]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - block/blk-cgroup.c
blk-throttle: Free up a group only after one rcu grace period
[mirror_ubuntu-eoan-kernel.git] / block / blk-cgroup.c
CommitLineData
31e4c28d
VG
1/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
22084190
VG
14#include <linux/seq_file.h>
15#include <linux/kdev_t.h>
9d6a986c 16#include <linux/module.h>
accee785 17#include <linux/err.h>
9195291e 18#include <linux/blkdev.h>
5a0e3ad6 19#include <linux/slab.h>
31e4c28d 20#include "blk-cgroup.h"
34d0f179 21#include <linux/genhd.h>
3e252066 22
84c124da
DS
23#define MAX_KEY_LEN 100
24
3e252066
VG
25static DEFINE_SPINLOCK(blkio_list_lock);
26static LIST_HEAD(blkio_list);
b1c35769 27
31e4c28d 28struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
9d6a986c
VG
29EXPORT_SYMBOL_GPL(blkio_root_cgroup);
30
67523c48
BB
31static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
32 struct cgroup *);
33static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
34 struct task_struct *, bool);
35static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
36 struct cgroup *, struct task_struct *, bool);
37static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
38static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
39
062a644d
VG
40/* for encoding cft->private value on file */
41#define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
42/* What policy owns the file, proportional or throttle */
43#define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
44#define BLKIOFILE_ATTR(val) ((val) & 0xffff)
45
67523c48
BB
46struct cgroup_subsys blkio_subsys = {
47 .name = "blkio",
48 .create = blkiocg_create,
49 .can_attach = blkiocg_can_attach,
50 .attach = blkiocg_attach,
51 .destroy = blkiocg_destroy,
52 .populate = blkiocg_populate,
53#ifdef CONFIG_BLK_CGROUP
54 /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
55 .subsys_id = blkio_subsys_id,
56#endif
57 .use_id = 1,
58 .module = THIS_MODULE,
59};
60EXPORT_SYMBOL_GPL(blkio_subsys);
61
34d0f179
GJ
62static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
63 struct blkio_policy_node *pn)
64{
65 list_add(&pn->node, &blkcg->policy_list);
66}
67
062a644d
VG
68static inline bool cftype_blkg_same_policy(struct cftype *cft,
69 struct blkio_group *blkg)
70{
71 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
72
73 if (blkg->plid == plid)
74 return 1;
75
76 return 0;
77}
78
79/* Determines if policy node matches cgroup file being accessed */
80static inline bool pn_matches_cftype(struct cftype *cft,
81 struct blkio_policy_node *pn)
82{
83 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
84 int fileid = BLKIOFILE_ATTR(cft->private);
85
86 return (plid == pn->plid && fileid == pn->fileid);
87}
88
34d0f179
GJ
89/* Must be called with blkcg->lock held */
90static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
91{
92 list_del(&pn->node);
93}
94
95/* Must be called with blkcg->lock held */
96static struct blkio_policy_node *
062a644d
VG
97blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
98 enum blkio_policy_id plid, int fileid)
34d0f179
GJ
99{
100 struct blkio_policy_node *pn;
101
102 list_for_each_entry(pn, &blkcg->policy_list, node) {
062a644d 103 if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
34d0f179
GJ
104 return pn;
105 }
106
107 return NULL;
108}
109
31e4c28d
VG
110struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
111{
112 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
113 struct blkio_cgroup, css);
114}
9d6a986c 115EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
31e4c28d 116
70087dc3
VG
117struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
118{
119 return container_of(task_subsys_state(tsk, blkio_subsys_id),
120 struct blkio_cgroup, css);
121}
122EXPORT_SYMBOL_GPL(task_blkio_cgroup);
123
062a644d
VG
124static inline void
125blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
126{
127 struct blkio_policy_type *blkiop;
128
129 list_for_each_entry(blkiop, &blkio_list, list) {
130 /* If this policy does not own the blkg, do not send updates */
131 if (blkiop->plid != blkg->plid)
132 continue;
133 if (blkiop->ops.blkio_update_group_weight_fn)
fe071437
VG
134 blkiop->ops.blkio_update_group_weight_fn(blkg->key,
135 blkg, weight);
062a644d
VG
136 }
137}
138
4c9eefa1
VG
139static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
140 int fileid)
141{
142 struct blkio_policy_type *blkiop;
143
144 list_for_each_entry(blkiop, &blkio_list, list) {
145
146 /* If this policy does not own the blkg, do not send updates */
147 if (blkiop->plid != blkg->plid)
148 continue;
149
150 if (fileid == BLKIO_THROTL_read_bps_device
151 && blkiop->ops.blkio_update_group_read_bps_fn)
fe071437
VG
152 blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
153 blkg, bps);
4c9eefa1
VG
154
155 if (fileid == BLKIO_THROTL_write_bps_device
156 && blkiop->ops.blkio_update_group_write_bps_fn)
fe071437
VG
157 blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
158 blkg, bps);
4c9eefa1
VG
159 }
160}
161
7702e8f4
VG
162static inline void blkio_update_group_iops(struct blkio_group *blkg,
163 unsigned int iops, int fileid)
164{
165 struct blkio_policy_type *blkiop;
166
167 list_for_each_entry(blkiop, &blkio_list, list) {
168
169 /* If this policy does not own the blkg, do not send updates */
170 if (blkiop->plid != blkg->plid)
171 continue;
172
173 if (fileid == BLKIO_THROTL_read_iops_device
174 && blkiop->ops.blkio_update_group_read_iops_fn)
fe071437
VG
175 blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
176 blkg, iops);
7702e8f4
VG
177
178 if (fileid == BLKIO_THROTL_write_iops_device
179 && blkiop->ops.blkio_update_group_write_iops_fn)
fe071437
VG
180 blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
181 blkg,iops);
7702e8f4
VG
182 }
183}
184
9195291e
DS
185/*
186 * Add to the appropriate stat variable depending on the request type.
187 * This should be called with the blkg->stats_lock held.
188 */
84c124da
DS
189static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
190 bool sync)
9195291e 191{
84c124da
DS
192 if (direction)
193 stat[BLKIO_STAT_WRITE] += add;
9195291e 194 else
84c124da
DS
195 stat[BLKIO_STAT_READ] += add;
196 if (sync)
197 stat[BLKIO_STAT_SYNC] += add;
9195291e 198 else
84c124da 199 stat[BLKIO_STAT_ASYNC] += add;
9195291e
DS
200}
201
cdc1184c
DS
202/*
203 * Decrements the appropriate stat variable if non-zero depending on the
204 * request type. Panics on value being zero.
205 * This should be called with the blkg->stats_lock held.
206 */
207static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
208{
209 if (direction) {
210 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
211 stat[BLKIO_STAT_WRITE]--;
212 } else {
213 BUG_ON(stat[BLKIO_STAT_READ] == 0);
214 stat[BLKIO_STAT_READ]--;
215 }
216 if (sync) {
217 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
218 stat[BLKIO_STAT_SYNC]--;
219 } else {
220 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
221 stat[BLKIO_STAT_ASYNC]--;
222 }
223}
224
225#ifdef CONFIG_DEBUG_BLK_CGROUP
812df48d
DS
226/* This should be called with the blkg->stats_lock held. */
227static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
228 struct blkio_group *curr_blkg)
229{
230 if (blkio_blkg_waiting(&blkg->stats))
231 return;
232 if (blkg == curr_blkg)
233 return;
234 blkg->stats.start_group_wait_time = sched_clock();
235 blkio_mark_blkg_waiting(&blkg->stats);
236}
237
238/* This should be called with the blkg->stats_lock held. */
239static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
240{
241 unsigned long long now;
242
243 if (!blkio_blkg_waiting(stats))
244 return;
245
246 now = sched_clock();
247 if (time_after64(now, stats->start_group_wait_time))
248 stats->group_wait_time += now - stats->start_group_wait_time;
249 blkio_clear_blkg_waiting(stats);
250}
251
252/* This should be called with the blkg->stats_lock held. */
253static void blkio_end_empty_time(struct blkio_group_stats *stats)
254{
255 unsigned long long now;
256
257 if (!blkio_blkg_empty(stats))
258 return;
259
260 now = sched_clock();
261 if (time_after64(now, stats->start_empty_time))
262 stats->empty_time += now - stats->start_empty_time;
263 blkio_clear_blkg_empty(stats);
264}
265
266void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
267{
268 unsigned long flags;
269
270 spin_lock_irqsave(&blkg->stats_lock, flags);
271 BUG_ON(blkio_blkg_idling(&blkg->stats));
272 blkg->stats.start_idle_time = sched_clock();
273 blkio_mark_blkg_idling(&blkg->stats);
274 spin_unlock_irqrestore(&blkg->stats_lock, flags);
275}
276EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
277
278void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
279{
280 unsigned long flags;
281 unsigned long long now;
282 struct blkio_group_stats *stats;
283
284 spin_lock_irqsave(&blkg->stats_lock, flags);
285 stats = &blkg->stats;
286 if (blkio_blkg_idling(stats)) {
287 now = sched_clock();
288 if (time_after64(now, stats->start_idle_time))
289 stats->idle_time += now - stats->start_idle_time;
290 blkio_clear_blkg_idling(stats);
291 }
292 spin_unlock_irqrestore(&blkg->stats_lock, flags);
293}
294EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
295
a11cdaa7 296void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
cdc1184c
DS
297{
298 unsigned long flags;
299 struct blkio_group_stats *stats;
300
301 spin_lock_irqsave(&blkg->stats_lock, flags);
302 stats = &blkg->stats;
303 stats->avg_queue_size_sum +=
304 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
305 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
306 stats->avg_queue_size_samples++;
812df48d 307 blkio_update_group_wait_time(stats);
cdc1184c
DS
308 spin_unlock_irqrestore(&blkg->stats_lock, flags);
309}
a11cdaa7
DS
310EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
311
e5ff082e 312void blkiocg_set_start_empty_time(struct blkio_group *blkg)
28baf442
DS
313{
314 unsigned long flags;
315 struct blkio_group_stats *stats;
316
317 spin_lock_irqsave(&blkg->stats_lock, flags);
318 stats = &blkg->stats;
319
320 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
321 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
322 spin_unlock_irqrestore(&blkg->stats_lock, flags);
323 return;
324 }
325
326 /*
e5ff082e
VG
327 * group is already marked empty. This can happen if cfqq got new
328 * request in parent group and moved to this group while being added
329 * to service tree. Just ignore the event and move on.
28baf442 330 */
e5ff082e
VG
331 if(blkio_blkg_empty(stats)) {
332 spin_unlock_irqrestore(&blkg->stats_lock, flags);
333 return;
334 }
335
28baf442
DS
336 stats->start_empty_time = sched_clock();
337 blkio_mark_blkg_empty(stats);
338 spin_unlock_irqrestore(&blkg->stats_lock, flags);
339}
340EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
341
a11cdaa7
DS
342void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
343 unsigned long dequeue)
344{
345 blkg->stats.dequeue += dequeue;
346}
347EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
812df48d
DS
348#else
349static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
350 struct blkio_group *curr_blkg) {}
351static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
cdc1184c
DS
352#endif
353
a11cdaa7 354void blkiocg_update_io_add_stats(struct blkio_group *blkg,
cdc1184c
DS
355 struct blkio_group *curr_blkg, bool direction,
356 bool sync)
357{
358 unsigned long flags;
359
360 spin_lock_irqsave(&blkg->stats_lock, flags);
361 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
362 sync);
812df48d
DS
363 blkio_end_empty_time(&blkg->stats);
364 blkio_set_start_group_wait_time(blkg, curr_blkg);
cdc1184c
DS
365 spin_unlock_irqrestore(&blkg->stats_lock, flags);
366}
a11cdaa7 367EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
cdc1184c 368
a11cdaa7 369void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
cdc1184c
DS
370 bool direction, bool sync)
371{
372 unsigned long flags;
373
374 spin_lock_irqsave(&blkg->stats_lock, flags);
375 blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
376 direction, sync);
377 spin_unlock_irqrestore(&blkg->stats_lock, flags);
378}
a11cdaa7 379EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
cdc1184c 380
167400d3
JT
381void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
382 unsigned long unaccounted_time)
22084190 383{
303a3acb
DS
384 unsigned long flags;
385
386 spin_lock_irqsave(&blkg->stats_lock, flags);
387 blkg->stats.time += time;
a23e6869 388#ifdef CONFIG_DEBUG_BLK_CGROUP
167400d3 389 blkg->stats.unaccounted_time += unaccounted_time;
a23e6869 390#endif
303a3acb 391 spin_unlock_irqrestore(&blkg->stats_lock, flags);
22084190 392}
303a3acb 393EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
22084190 394
84c124da
DS
395void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
396 uint64_t bytes, bool direction, bool sync)
9195291e
DS
397{
398 struct blkio_group_stats *stats;
399 unsigned long flags;
400
401 spin_lock_irqsave(&blkg->stats_lock, flags);
402 stats = &blkg->stats;
84c124da
DS
403 stats->sectors += bytes >> 9;
404 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
405 sync);
406 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
407 direction, sync);
9195291e
DS
408 spin_unlock_irqrestore(&blkg->stats_lock, flags);
409}
84c124da 410EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
9195291e 411
84c124da
DS
412void blkiocg_update_completion_stats(struct blkio_group *blkg,
413 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
9195291e
DS
414{
415 struct blkio_group_stats *stats;
416 unsigned long flags;
417 unsigned long long now = sched_clock();
418
419 spin_lock_irqsave(&blkg->stats_lock, flags);
420 stats = &blkg->stats;
84c124da
DS
421 if (time_after64(now, io_start_time))
422 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
423 now - io_start_time, direction, sync);
424 if (time_after64(io_start_time, start_time))
425 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
426 io_start_time - start_time, direction, sync);
9195291e
DS
427 spin_unlock_irqrestore(&blkg->stats_lock, flags);
428}
84c124da 429EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
9195291e 430
812d4026
DS
431void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
432 bool sync)
433{
434 unsigned long flags;
435
436 spin_lock_irqsave(&blkg->stats_lock, flags);
437 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
438 sync);
439 spin_unlock_irqrestore(&blkg->stats_lock, flags);
440}
441EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
442
31e4c28d 443void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
062a644d
VG
444 struct blkio_group *blkg, void *key, dev_t dev,
445 enum blkio_policy_id plid)
31e4c28d
VG
446{
447 unsigned long flags;
448
449 spin_lock_irqsave(&blkcg->lock, flags);
8d2a91f8 450 spin_lock_init(&blkg->stats_lock);
31e4c28d 451 rcu_assign_pointer(blkg->key, key);
b1c35769 452 blkg->blkcg_id = css_id(&blkcg->css);
31e4c28d 453 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
062a644d 454 blkg->plid = plid;
31e4c28d 455 spin_unlock_irqrestore(&blkcg->lock, flags);
2868ef7b
VG
456 /* Need to take css reference ? */
457 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
22084190 458 blkg->dev = dev;
31e4c28d 459}
9d6a986c 460EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
31e4c28d 461
b1c35769
VG
462static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
463{
464 hlist_del_init_rcu(&blkg->blkcg_node);
465 blkg->blkcg_id = 0;
466}
467
468/*
469 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
470 * indicating that blk_group was unhashed by the time we got to it.
471 */
31e4c28d
VG
472int blkiocg_del_blkio_group(struct blkio_group *blkg)
473{
b1c35769
VG
474 struct blkio_cgroup *blkcg;
475 unsigned long flags;
476 struct cgroup_subsys_state *css;
477 int ret = 1;
478
479 rcu_read_lock();
480 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
0f3942a3
JA
481 if (css) {
482 blkcg = container_of(css, struct blkio_cgroup, css);
483 spin_lock_irqsave(&blkcg->lock, flags);
484 if (!hlist_unhashed(&blkg->blkcg_node)) {
485 __blkiocg_del_blkio_group(blkg);
486 ret = 0;
487 }
488 spin_unlock_irqrestore(&blkcg->lock, flags);
b1c35769 489 }
0f3942a3 490
b1c35769
VG
491 rcu_read_unlock();
492 return ret;
31e4c28d 493}
9d6a986c 494EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
31e4c28d
VG
495
496/* called under rcu_read_lock(). */
497struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
498{
499 struct blkio_group *blkg;
500 struct hlist_node *n;
501 void *__key;
502
503 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
504 __key = blkg->key;
505 if (__key == key)
506 return blkg;
507 }
508
509 return NULL;
510}
9d6a986c 511EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
31e4c28d 512
303a3acb 513static int
84c124da 514blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
303a3acb
DS
515{
516 struct blkio_cgroup *blkcg;
517 struct blkio_group *blkg;
812df48d 518 struct blkio_group_stats *stats;
303a3acb 519 struct hlist_node *n;
cdc1184c
DS
520 uint64_t queued[BLKIO_STAT_TOTAL];
521 int i;
812df48d
DS
522#ifdef CONFIG_DEBUG_BLK_CGROUP
523 bool idling, waiting, empty;
524 unsigned long long now = sched_clock();
525#endif
303a3acb
DS
526
527 blkcg = cgroup_to_blkio_cgroup(cgroup);
528 spin_lock_irq(&blkcg->lock);
529 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
530 spin_lock(&blkg->stats_lock);
812df48d
DS
531 stats = &blkg->stats;
532#ifdef CONFIG_DEBUG_BLK_CGROUP
533 idling = blkio_blkg_idling(stats);
534 waiting = blkio_blkg_waiting(stats);
535 empty = blkio_blkg_empty(stats);
536#endif
cdc1184c 537 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
812df48d
DS
538 queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
539 memset(stats, 0, sizeof(struct blkio_group_stats));
cdc1184c 540 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
812df48d
DS
541 stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
542#ifdef CONFIG_DEBUG_BLK_CGROUP
543 if (idling) {
544 blkio_mark_blkg_idling(stats);
545 stats->start_idle_time = now;
546 }
547 if (waiting) {
548 blkio_mark_blkg_waiting(stats);
549 stats->start_group_wait_time = now;
550 }
551 if (empty) {
552 blkio_mark_blkg_empty(stats);
553 stats->start_empty_time = now;
554 }
555#endif
303a3acb
DS
556 spin_unlock(&blkg->stats_lock);
557 }
558 spin_unlock_irq(&blkcg->lock);
559 return 0;
560}
561
84c124da
DS
562static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
563 int chars_left, bool diskname_only)
303a3acb 564{
84c124da 565 snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
303a3acb
DS
566 chars_left -= strlen(str);
567 if (chars_left <= 0) {
568 printk(KERN_WARNING
569 "Possibly incorrect cgroup stat display format");
570 return;
571 }
84c124da
DS
572 if (diskname_only)
573 return;
303a3acb 574 switch (type) {
84c124da 575 case BLKIO_STAT_READ:
303a3acb
DS
576 strlcat(str, " Read", chars_left);
577 break;
84c124da 578 case BLKIO_STAT_WRITE:
303a3acb
DS
579 strlcat(str, " Write", chars_left);
580 break;
84c124da 581 case BLKIO_STAT_SYNC:
303a3acb
DS
582 strlcat(str, " Sync", chars_left);
583 break;
84c124da 584 case BLKIO_STAT_ASYNC:
303a3acb
DS
585 strlcat(str, " Async", chars_left);
586 break;
84c124da 587 case BLKIO_STAT_TOTAL:
303a3acb
DS
588 strlcat(str, " Total", chars_left);
589 break;
590 default:
591 strlcat(str, " Invalid", chars_left);
592 }
593}
594
84c124da
DS
595static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
596 struct cgroup_map_cb *cb, dev_t dev)
597{
598 blkio_get_key_name(0, dev, str, chars_left, true);
599 cb->fill(cb, str, val);
600 return val;
601}
303a3acb 602
84c124da
DS
603/* This should be called with blkg->stats_lock held */
604static uint64_t blkio_get_stat(struct blkio_group *blkg,
605 struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
303a3acb
DS
606{
607 uint64_t disk_total;
608 char key_str[MAX_KEY_LEN];
84c124da
DS
609 enum stat_sub_type sub_type;
610
611 if (type == BLKIO_STAT_TIME)
612 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
613 blkg->stats.time, cb, dev);
614 if (type == BLKIO_STAT_SECTORS)
615 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
616 blkg->stats.sectors, cb, dev);
9026e521 617#ifdef CONFIG_DEBUG_BLK_CGROUP
167400d3
JT
618 if (type == BLKIO_STAT_UNACCOUNTED_TIME)
619 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
620 blkg->stats.unaccounted_time, cb, dev);
cdc1184c
DS
621 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
622 uint64_t sum = blkg->stats.avg_queue_size_sum;
623 uint64_t samples = blkg->stats.avg_queue_size_samples;
624 if (samples)
625 do_div(sum, samples);
626 else
627 sum = 0;
628 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
629 }
812df48d
DS
630 if (type == BLKIO_STAT_GROUP_WAIT_TIME)
631 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
632 blkg->stats.group_wait_time, cb, dev);
633 if (type == BLKIO_STAT_IDLE_TIME)
634 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
635 blkg->stats.idle_time, cb, dev);
636 if (type == BLKIO_STAT_EMPTY_TIME)
637 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
638 blkg->stats.empty_time, cb, dev);
84c124da
DS
639 if (type == BLKIO_STAT_DEQUEUE)
640 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
641 blkg->stats.dequeue, cb, dev);
642#endif
303a3acb 643
84c124da
DS
644 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
645 sub_type++) {
646 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
647 cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
303a3acb 648 }
84c124da
DS
649 disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
650 blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
651 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
303a3acb
DS
652 cb->fill(cb, key_str, disk_total);
653 return disk_total;
654}
655
34d0f179
GJ
656static int blkio_check_dev_num(dev_t dev)
657{
658 int part = 0;
659 struct gendisk *disk;
660
661 disk = get_gendisk(dev, &part);
662 if (!disk || part)
663 return -ENODEV;
664
665 return 0;
666}
667
668static int blkio_policy_parse_and_set(char *buf,
062a644d 669 struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
34d0f179
GJ
670{
671 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
672 int ret;
673 unsigned long major, minor, temp;
674 int i = 0;
675 dev_t dev;
9355aede 676 u64 bps, iops;
34d0f179
GJ
677
678 memset(s, 0, sizeof(s));
679
680 while ((p = strsep(&buf, " ")) != NULL) {
681 if (!*p)
682 continue;
683
684 s[i++] = p;
685
686 /* Prevent from inputing too many things */
687 if (i == 3)
688 break;
689 }
690
691 if (i != 2)
692 return -EINVAL;
693
694 p = strsep(&s[0], ":");
695 if (p != NULL)
696 major_s = p;
697 else
698 return -EINVAL;
699
700 minor_s = s[0];
701 if (!minor_s)
702 return -EINVAL;
703
704 ret = strict_strtoul(major_s, 10, &major);
705 if (ret)
706 return -EINVAL;
707
708 ret = strict_strtoul(minor_s, 10, &minor);
709 if (ret)
710 return -EINVAL;
711
712 dev = MKDEV(major, minor);
713
714 ret = blkio_check_dev_num(dev);
715 if (ret)
716 return ret;
717
718 newpn->dev = dev;
719
720 if (s[1] == NULL)
721 return -EINVAL;
722
062a644d
VG
723 switch (plid) {
724 case BLKIO_POLICY_PROP:
725 ret = strict_strtoul(s[1], 10, &temp);
726 if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
727 temp > BLKIO_WEIGHT_MAX)
728 return -EINVAL;
34d0f179 729
062a644d
VG
730 newpn->plid = plid;
731 newpn->fileid = fileid;
4c9eefa1
VG
732 newpn->val.weight = temp;
733 break;
734 case BLKIO_POLICY_THROTL:
7702e8f4
VG
735 switch(fileid) {
736 case BLKIO_THROTL_read_bps_device:
737 case BLKIO_THROTL_write_bps_device:
738 ret = strict_strtoull(s[1], 10, &bps);
739 if (ret)
740 return -EINVAL;
4c9eefa1 741
7702e8f4
VG
742 newpn->plid = plid;
743 newpn->fileid = fileid;
744 newpn->val.bps = bps;
745 break;
746 case BLKIO_THROTL_read_iops_device:
747 case BLKIO_THROTL_write_iops_device:
9355aede 748 ret = strict_strtoull(s[1], 10, &iops);
7702e8f4
VG
749 if (ret)
750 return -EINVAL;
751
9355aede
VG
752 if (iops > THROTL_IOPS_MAX)
753 return -EINVAL;
754
7702e8f4
VG
755 newpn->plid = plid;
756 newpn->fileid = fileid;
9355aede 757 newpn->val.iops = (unsigned int)iops;
7702e8f4
VG
758 break;
759 }
062a644d
VG
760 break;
761 default:
762 BUG();
763 }
34d0f179
GJ
764
765 return 0;
766}
767
768unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
769 dev_t dev)
770{
771 struct blkio_policy_node *pn;
772
062a644d
VG
773 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
774 BLKIO_PROP_weight_device);
34d0f179 775 if (pn)
4c9eefa1 776 return pn->val.weight;
34d0f179
GJ
777 else
778 return blkcg->weight;
779}
780EXPORT_SYMBOL_GPL(blkcg_get_weight);
781
4c9eefa1
VG
782uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
783{
784 struct blkio_policy_node *pn;
785
786 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
787 BLKIO_THROTL_read_bps_device);
788 if (pn)
789 return pn->val.bps;
790 else
791 return -1;
792}
793
794uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
795{
796 struct blkio_policy_node *pn;
797 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
798 BLKIO_THROTL_write_bps_device);
799 if (pn)
800 return pn->val.bps;
801 else
802 return -1;
803}
804
7702e8f4
VG
805unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
806{
807 struct blkio_policy_node *pn;
808
809 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
810 BLKIO_THROTL_read_iops_device);
811 if (pn)
812 return pn->val.iops;
813 else
814 return -1;
815}
816
817unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
818{
819 struct blkio_policy_node *pn;
820 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
821 BLKIO_THROTL_write_iops_device);
822 if (pn)
823 return pn->val.iops;
824 else
825 return -1;
826}
827
062a644d
VG
828/* Checks whether user asked for deleting a policy rule */
829static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
830{
831 switch(pn->plid) {
832 case BLKIO_POLICY_PROP:
4c9eefa1
VG
833 if (pn->val.weight == 0)
834 return 1;
835 break;
836 case BLKIO_POLICY_THROTL:
7702e8f4
VG
837 switch(pn->fileid) {
838 case BLKIO_THROTL_read_bps_device:
839 case BLKIO_THROTL_write_bps_device:
840 if (pn->val.bps == 0)
841 return 1;
842 break;
843 case BLKIO_THROTL_read_iops_device:
844 case BLKIO_THROTL_write_iops_device:
845 if (pn->val.iops == 0)
846 return 1;
847 }
062a644d
VG
848 break;
849 default:
850 BUG();
851 }
852
853 return 0;
854}
855
856static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
857 struct blkio_policy_node *newpn)
858{
859 switch(oldpn->plid) {
860 case BLKIO_POLICY_PROP:
4c9eefa1
VG
861 oldpn->val.weight = newpn->val.weight;
862 break;
863 case BLKIO_POLICY_THROTL:
7702e8f4
VG
864 switch(newpn->fileid) {
865 case BLKIO_THROTL_read_bps_device:
866 case BLKIO_THROTL_write_bps_device:
867 oldpn->val.bps = newpn->val.bps;
868 break;
869 case BLKIO_THROTL_read_iops_device:
870 case BLKIO_THROTL_write_iops_device:
871 oldpn->val.iops = newpn->val.iops;
872 }
062a644d
VG
873 break;
874 default:
875 BUG();
876 }
877}
878
879/*
25985edc 880 * Some rules/values in blkg have changed. Propagate those to respective
062a644d
VG
881 * policies.
882 */
883static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
884 struct blkio_group *blkg, struct blkio_policy_node *pn)
885{
7702e8f4 886 unsigned int weight, iops;
4c9eefa1 887 u64 bps;
062a644d
VG
888
889 switch(pn->plid) {
890 case BLKIO_POLICY_PROP:
4c9eefa1 891 weight = pn->val.weight ? pn->val.weight :
062a644d
VG
892 blkcg->weight;
893 blkio_update_group_weight(blkg, weight);
894 break;
4c9eefa1
VG
895 case BLKIO_POLICY_THROTL:
896 switch(pn->fileid) {
897 case BLKIO_THROTL_read_bps_device:
898 case BLKIO_THROTL_write_bps_device:
899 bps = pn->val.bps ? pn->val.bps : (-1);
900 blkio_update_group_bps(blkg, bps, pn->fileid);
901 break;
7702e8f4
VG
902 case BLKIO_THROTL_read_iops_device:
903 case BLKIO_THROTL_write_iops_device:
904 iops = pn->val.iops ? pn->val.iops : (-1);
905 blkio_update_group_iops(blkg, iops, pn->fileid);
906 break;
4c9eefa1
VG
907 }
908 break;
062a644d
VG
909 default:
910 BUG();
911 }
912}
913
914/*
25985edc 915 * A policy node rule has been updated. Propagate this update to all the
062a644d
VG
916 * block groups which might be affected by this update.
917 */
918static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
919 struct blkio_policy_node *pn)
920{
921 struct blkio_group *blkg;
922 struct hlist_node *n;
923
924 spin_lock(&blkio_list_lock);
925 spin_lock_irq(&blkcg->lock);
34d0f179 926
062a644d
VG
927 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
928 if (pn->dev != blkg->dev || pn->plid != blkg->plid)
929 continue;
930 blkio_update_blkg_policy(blkcg, blkg, pn);
931 }
932
933 spin_unlock_irq(&blkcg->lock);
934 spin_unlock(&blkio_list_lock);
935}
34d0f179 936
062a644d
VG
937static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
938 const char *buffer)
34d0f179
GJ
939{
940 int ret = 0;
941 char *buf;
942 struct blkio_policy_node *newpn, *pn;
943 struct blkio_cgroup *blkcg;
34d0f179 944 int keep_newpn = 0;
062a644d
VG
945 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
946 int fileid = BLKIOFILE_ATTR(cft->private);
34d0f179
GJ
947
948 buf = kstrdup(buffer, GFP_KERNEL);
949 if (!buf)
950 return -ENOMEM;
951
952 newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
953 if (!newpn) {
954 ret = -ENOMEM;
955 goto free_buf;
956 }
957
062a644d 958 ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
34d0f179
GJ
959 if (ret)
960 goto free_newpn;
961
962 blkcg = cgroup_to_blkio_cgroup(cgrp);
963
964 spin_lock_irq(&blkcg->lock);
965
062a644d 966 pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
34d0f179 967 if (!pn) {
062a644d 968 if (!blkio_delete_rule_command(newpn)) {
34d0f179
GJ
969 blkio_policy_insert_node(blkcg, newpn);
970 keep_newpn = 1;
971 }
972 spin_unlock_irq(&blkcg->lock);
973 goto update_io_group;
974 }
975
062a644d 976 if (blkio_delete_rule_command(newpn)) {
34d0f179
GJ
977 blkio_policy_delete_node(pn);
978 spin_unlock_irq(&blkcg->lock);
979 goto update_io_group;
980 }
981 spin_unlock_irq(&blkcg->lock);
982
062a644d 983 blkio_update_policy_rule(pn, newpn);
34d0f179
GJ
984
985update_io_group:
062a644d 986 blkio_update_policy_node_blkg(blkcg, newpn);
34d0f179
GJ
987
988free_newpn:
989 if (!keep_newpn)
990 kfree(newpn);
991free_buf:
992 kfree(buf);
993 return ret;
994}
995
062a644d
VG
996static void
997blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
34d0f179 998{
062a644d
VG
999 switch(pn->plid) {
1000 case BLKIO_POLICY_PROP:
1001 if (pn->fileid == BLKIO_PROP_weight_device)
1002 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
4c9eefa1
VG
1003 MINOR(pn->dev), pn->val.weight);
1004 break;
1005 case BLKIO_POLICY_THROTL:
7702e8f4
VG
1006 switch(pn->fileid) {
1007 case BLKIO_THROTL_read_bps_device:
1008 case BLKIO_THROTL_write_bps_device:
4c9eefa1
VG
1009 seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
1010 MINOR(pn->dev), pn->val.bps);
7702e8f4
VG
1011 break;
1012 case BLKIO_THROTL_read_iops_device:
1013 case BLKIO_THROTL_write_iops_device:
1014 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
1015 MINOR(pn->dev), pn->val.iops);
1016 break;
1017 }
062a644d
VG
1018 break;
1019 default:
1020 BUG();
1021 }
1022}
34d0f179 1023
062a644d
VG
1024/* cgroup files which read their data from policy nodes end up here */
1025static void blkio_read_policy_node_files(struct cftype *cft,
1026 struct blkio_cgroup *blkcg, struct seq_file *m)
34d0f179 1027{
34d0f179 1028 struct blkio_policy_node *pn;
34d0f179 1029
0f3942a3
JA
1030 if (!list_empty(&blkcg->policy_list)) {
1031 spin_lock_irq(&blkcg->lock);
1032 list_for_each_entry(pn, &blkcg->policy_list, node) {
062a644d
VG
1033 if (!pn_matches_cftype(cft, pn))
1034 continue;
1035 blkio_print_policy_node(m, pn);
0f3942a3
JA
1036 }
1037 spin_unlock_irq(&blkcg->lock);
34d0f179 1038 }
062a644d
VG
1039}
1040
1041static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
1042 struct seq_file *m)
1043{
1044 struct blkio_cgroup *blkcg;
1045 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1046 int name = BLKIOFILE_ATTR(cft->private);
1047
1048 blkcg = cgroup_to_blkio_cgroup(cgrp);
1049
1050 switch(plid) {
1051 case BLKIO_POLICY_PROP:
1052 switch(name) {
1053 case BLKIO_PROP_weight_device:
1054 blkio_read_policy_node_files(cft, blkcg, m);
1055 return 0;
1056 default:
1057 BUG();
1058 }
1059 break;
4c9eefa1
VG
1060 case BLKIO_POLICY_THROTL:
1061 switch(name){
1062 case BLKIO_THROTL_read_bps_device:
1063 case BLKIO_THROTL_write_bps_device:
7702e8f4
VG
1064 case BLKIO_THROTL_read_iops_device:
1065 case BLKIO_THROTL_write_iops_device:
4c9eefa1
VG
1066 blkio_read_policy_node_files(cft, blkcg, m);
1067 return 0;
1068 default:
1069 BUG();
1070 }
1071 break;
062a644d
VG
1072 default:
1073 BUG();
1074 }
1075
1076 return 0;
1077}
1078
1079static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
1080 struct cftype *cft, struct cgroup_map_cb *cb, enum stat_type type,
1081 bool show_total)
1082{
1083 struct blkio_group *blkg;
1084 struct hlist_node *n;
1085 uint64_t cgroup_total = 0;
1086
1087 rcu_read_lock();
1088 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
1089 if (blkg->dev) {
1090 if (!cftype_blkg_same_policy(cft, blkg))
1091 continue;
1092 spin_lock_irq(&blkg->stats_lock);
1093 cgroup_total += blkio_get_stat(blkg, cb, blkg->dev,
1094 type);
1095 spin_unlock_irq(&blkg->stats_lock);
1096 }
1097 }
1098 if (show_total)
1099 cb->fill(cb, "Total", cgroup_total);
1100 rcu_read_unlock();
1101 return 0;
1102}
1103
1104/* All map kind of cgroup file get serviced by this function */
1105static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
1106 struct cgroup_map_cb *cb)
1107{
1108 struct blkio_cgroup *blkcg;
1109 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1110 int name = BLKIOFILE_ATTR(cft->private);
1111
1112 blkcg = cgroup_to_blkio_cgroup(cgrp);
1113
1114 switch(plid) {
1115 case BLKIO_POLICY_PROP:
1116 switch(name) {
1117 case BLKIO_PROP_time:
1118 return blkio_read_blkg_stats(blkcg, cft, cb,
1119 BLKIO_STAT_TIME, 0);
1120 case BLKIO_PROP_sectors:
1121 return blkio_read_blkg_stats(blkcg, cft, cb,
1122 BLKIO_STAT_SECTORS, 0);
1123 case BLKIO_PROP_io_service_bytes:
1124 return blkio_read_blkg_stats(blkcg, cft, cb,
1125 BLKIO_STAT_SERVICE_BYTES, 1);
1126 case BLKIO_PROP_io_serviced:
1127 return blkio_read_blkg_stats(blkcg, cft, cb,
1128 BLKIO_STAT_SERVICED, 1);
1129 case BLKIO_PROP_io_service_time:
1130 return blkio_read_blkg_stats(blkcg, cft, cb,
1131 BLKIO_STAT_SERVICE_TIME, 1);
1132 case BLKIO_PROP_io_wait_time:
1133 return blkio_read_blkg_stats(blkcg, cft, cb,
1134 BLKIO_STAT_WAIT_TIME, 1);
1135 case BLKIO_PROP_io_merged:
1136 return blkio_read_blkg_stats(blkcg, cft, cb,
1137 BLKIO_STAT_MERGED, 1);
1138 case BLKIO_PROP_io_queued:
1139 return blkio_read_blkg_stats(blkcg, cft, cb,
1140 BLKIO_STAT_QUEUED, 1);
1141#ifdef CONFIG_DEBUG_BLK_CGROUP
9026e521
JT
1142 case BLKIO_PROP_unaccounted_time:
1143 return blkio_read_blkg_stats(blkcg, cft, cb,
1144 BLKIO_STAT_UNACCOUNTED_TIME, 0);
062a644d
VG
1145 case BLKIO_PROP_dequeue:
1146 return blkio_read_blkg_stats(blkcg, cft, cb,
1147 BLKIO_STAT_DEQUEUE, 0);
1148 case BLKIO_PROP_avg_queue_size:
1149 return blkio_read_blkg_stats(blkcg, cft, cb,
1150 BLKIO_STAT_AVG_QUEUE_SIZE, 0);
1151 case BLKIO_PROP_group_wait_time:
1152 return blkio_read_blkg_stats(blkcg, cft, cb,
1153 BLKIO_STAT_GROUP_WAIT_TIME, 0);
1154 case BLKIO_PROP_idle_time:
1155 return blkio_read_blkg_stats(blkcg, cft, cb,
1156 BLKIO_STAT_IDLE_TIME, 0);
1157 case BLKIO_PROP_empty_time:
1158 return blkio_read_blkg_stats(blkcg, cft, cb,
1159 BLKIO_STAT_EMPTY_TIME, 0);
1160#endif
1161 default:
1162 BUG();
1163 }
1164 break;
4c9eefa1
VG
1165 case BLKIO_POLICY_THROTL:
1166 switch(name){
1167 case BLKIO_THROTL_io_service_bytes:
1168 return blkio_read_blkg_stats(blkcg, cft, cb,
1169 BLKIO_STAT_SERVICE_BYTES, 1);
1170 case BLKIO_THROTL_io_serviced:
1171 return blkio_read_blkg_stats(blkcg, cft, cb,
1172 BLKIO_STAT_SERVICED, 1);
1173 default:
1174 BUG();
1175 }
1176 break;
062a644d
VG
1177 default:
1178 BUG();
1179 }
1180
1181 return 0;
1182}
1183
1184static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
1185{
1186 struct blkio_group *blkg;
1187 struct hlist_node *n;
1188 struct blkio_policy_node *pn;
1189
1190 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1191 return -EINVAL;
1192
1193 spin_lock(&blkio_list_lock);
1194 spin_lock_irq(&blkcg->lock);
1195 blkcg->weight = (unsigned int)val;
1196
1197 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1198 pn = blkio_policy_search_node(blkcg, blkg->dev,
1199 BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
1200 if (pn)
1201 continue;
1202
1203 blkio_update_group_weight(blkg, blkcg->weight);
1204 }
1205 spin_unlock_irq(&blkcg->lock);
1206 spin_unlock(&blkio_list_lock);
1207 return 0;
1208}
1209
1210static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
1211 struct blkio_cgroup *blkcg;
1212 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1213 int name = BLKIOFILE_ATTR(cft->private);
1214
1215 blkcg = cgroup_to_blkio_cgroup(cgrp);
1216
1217 switch(plid) {
1218 case BLKIO_POLICY_PROP:
1219 switch(name) {
1220 case BLKIO_PROP_weight:
1221 return (u64)blkcg->weight;
1222 }
1223 break;
1224 default:
1225 BUG();
1226 }
1227 return 0;
1228}
1229
1230static int
1231blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1232{
1233 struct blkio_cgroup *blkcg;
1234 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1235 int name = BLKIOFILE_ATTR(cft->private);
1236
1237 blkcg = cgroup_to_blkio_cgroup(cgrp);
1238
1239 switch(plid) {
1240 case BLKIO_POLICY_PROP:
1241 switch(name) {
1242 case BLKIO_PROP_weight:
1243 return blkio_weight_write(blkcg, val);
1244 }
1245 break;
1246 default:
1247 BUG();
1248 }
34d0f179 1249
34d0f179
GJ
1250 return 0;
1251}
1252
31e4c28d 1253struct cftype blkio_files[] = {
34d0f179
GJ
1254 {
1255 .name = "weight_device",
062a644d
VG
1256 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1257 BLKIO_PROP_weight_device),
1258 .read_seq_string = blkiocg_file_read,
1259 .write_string = blkiocg_file_write,
34d0f179
GJ
1260 .max_write_len = 256,
1261 },
31e4c28d
VG
1262 {
1263 .name = "weight",
062a644d
VG
1264 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1265 BLKIO_PROP_weight),
1266 .read_u64 = blkiocg_file_read_u64,
1267 .write_u64 = blkiocg_file_write_u64,
31e4c28d 1268 },
22084190
VG
1269 {
1270 .name = "time",
062a644d
VG
1271 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1272 BLKIO_PROP_time),
1273 .read_map = blkiocg_file_read_map,
22084190
VG
1274 },
1275 {
1276 .name = "sectors",
062a644d
VG
1277 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1278 BLKIO_PROP_sectors),
1279 .read_map = blkiocg_file_read_map,
303a3acb
DS
1280 },
1281 {
1282 .name = "io_service_bytes",
062a644d
VG
1283 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1284 BLKIO_PROP_io_service_bytes),
1285 .read_map = blkiocg_file_read_map,
303a3acb
DS
1286 },
1287 {
1288 .name = "io_serviced",
062a644d
VG
1289 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1290 BLKIO_PROP_io_serviced),
1291 .read_map = blkiocg_file_read_map,
303a3acb
DS
1292 },
1293 {
1294 .name = "io_service_time",
062a644d
VG
1295 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1296 BLKIO_PROP_io_service_time),
1297 .read_map = blkiocg_file_read_map,
303a3acb
DS
1298 },
1299 {
1300 .name = "io_wait_time",
062a644d
VG
1301 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1302 BLKIO_PROP_io_wait_time),
1303 .read_map = blkiocg_file_read_map,
84c124da 1304 },
812d4026
DS
1305 {
1306 .name = "io_merged",
062a644d
VG
1307 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1308 BLKIO_PROP_io_merged),
1309 .read_map = blkiocg_file_read_map,
812d4026 1310 },
cdc1184c
DS
1311 {
1312 .name = "io_queued",
062a644d
VG
1313 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1314 BLKIO_PROP_io_queued),
1315 .read_map = blkiocg_file_read_map,
cdc1184c 1316 },
84c124da
DS
1317 {
1318 .name = "reset_stats",
1319 .write_u64 = blkiocg_reset_stats,
22084190 1320 },
13f98250
VG
1321#ifdef CONFIG_BLK_DEV_THROTTLING
1322 {
1323 .name = "throttle.read_bps_device",
1324 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1325 BLKIO_THROTL_read_bps_device),
1326 .read_seq_string = blkiocg_file_read,
1327 .write_string = blkiocg_file_write,
1328 .max_write_len = 256,
1329 },
1330
1331 {
1332 .name = "throttle.write_bps_device",
1333 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1334 BLKIO_THROTL_write_bps_device),
1335 .read_seq_string = blkiocg_file_read,
1336 .write_string = blkiocg_file_write,
1337 .max_write_len = 256,
1338 },
1339
1340 {
1341 .name = "throttle.read_iops_device",
1342 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1343 BLKIO_THROTL_read_iops_device),
1344 .read_seq_string = blkiocg_file_read,
1345 .write_string = blkiocg_file_write,
1346 .max_write_len = 256,
1347 },
1348
1349 {
1350 .name = "throttle.write_iops_device",
1351 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1352 BLKIO_THROTL_write_iops_device),
1353 .read_seq_string = blkiocg_file_read,
1354 .write_string = blkiocg_file_write,
1355 .max_write_len = 256,
1356 },
1357 {
1358 .name = "throttle.io_service_bytes",
1359 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1360 BLKIO_THROTL_io_service_bytes),
1361 .read_map = blkiocg_file_read_map,
1362 },
1363 {
1364 .name = "throttle.io_serviced",
1365 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1366 BLKIO_THROTL_io_serviced),
1367 .read_map = blkiocg_file_read_map,
1368 },
1369#endif /* CONFIG_BLK_DEV_THROTTLING */
1370
22084190 1371#ifdef CONFIG_DEBUG_BLK_CGROUP
cdc1184c
DS
1372 {
1373 .name = "avg_queue_size",
062a644d
VG
1374 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1375 BLKIO_PROP_avg_queue_size),
1376 .read_map = blkiocg_file_read_map,
cdc1184c 1377 },
812df48d
DS
1378 {
1379 .name = "group_wait_time",
062a644d
VG
1380 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1381 BLKIO_PROP_group_wait_time),
1382 .read_map = blkiocg_file_read_map,
812df48d
DS
1383 },
1384 {
1385 .name = "idle_time",
062a644d
VG
1386 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1387 BLKIO_PROP_idle_time),
1388 .read_map = blkiocg_file_read_map,
812df48d
DS
1389 },
1390 {
1391 .name = "empty_time",
062a644d
VG
1392 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1393 BLKIO_PROP_empty_time),
1394 .read_map = blkiocg_file_read_map,
812df48d 1395 },
cdc1184c 1396 {
22084190 1397 .name = "dequeue",
062a644d
VG
1398 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1399 BLKIO_PROP_dequeue),
1400 .read_map = blkiocg_file_read_map,
cdc1184c 1401 },
9026e521
JT
1402 {
1403 .name = "unaccounted_time",
1404 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1405 BLKIO_PROP_unaccounted_time),
1406 .read_map = blkiocg_file_read_map,
1407 },
22084190 1408#endif
31e4c28d
VG
1409};
1410
1411static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1412{
1413 return cgroup_add_files(cgroup, subsys, blkio_files,
1414 ARRAY_SIZE(blkio_files));
1415}
1416
1417static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1418{
1419 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
b1c35769
VG
1420 unsigned long flags;
1421 struct blkio_group *blkg;
1422 void *key;
3e252066 1423 struct blkio_policy_type *blkiop;
34d0f179 1424 struct blkio_policy_node *pn, *pntmp;
b1c35769
VG
1425
1426 rcu_read_lock();
0f3942a3
JA
1427 do {
1428 spin_lock_irqsave(&blkcg->lock, flags);
b1c35769 1429
0f3942a3
JA
1430 if (hlist_empty(&blkcg->blkg_list)) {
1431 spin_unlock_irqrestore(&blkcg->lock, flags);
1432 break;
1433 }
b1c35769 1434
0f3942a3
JA
1435 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
1436 blkcg_node);
1437 key = rcu_dereference(blkg->key);
1438 __blkiocg_del_blkio_group(blkg);
31e4c28d 1439
0f3942a3 1440 spin_unlock_irqrestore(&blkcg->lock, flags);
b1c35769 1441
0f3942a3
JA
1442 /*
1443 * This blkio_group is being unlinked as associated cgroup is
1444 * going away. Let all the IO controlling policies know about
61014e96 1445 * this event.
0f3942a3
JA
1446 */
1447 spin_lock(&blkio_list_lock);
61014e96
VG
1448 list_for_each_entry(blkiop, &blkio_list, list) {
1449 if (blkiop->plid != blkg->plid)
1450 continue;
0f3942a3 1451 blkiop->ops.blkio_unlink_group_fn(key, blkg);
61014e96 1452 }
0f3942a3
JA
1453 spin_unlock(&blkio_list_lock);
1454 } while (1);
34d0f179 1455
34d0f179
GJ
1456 list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
1457 blkio_policy_delete_node(pn);
1458 kfree(pn);
1459 }
0f3942a3 1460
31e4c28d 1461 free_css_id(&blkio_subsys, &blkcg->css);
b1c35769 1462 rcu_read_unlock();
67523c48
BB
1463 if (blkcg != &blkio_root_cgroup)
1464 kfree(blkcg);
31e4c28d
VG
1465}
1466
1467static struct cgroup_subsys_state *
1468blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1469{
0341509f
LZ
1470 struct blkio_cgroup *blkcg;
1471 struct cgroup *parent = cgroup->parent;
31e4c28d 1472
0341509f 1473 if (!parent) {
31e4c28d
VG
1474 blkcg = &blkio_root_cgroup;
1475 goto done;
1476 }
1477
31e4c28d
VG
1478 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1479 if (!blkcg)
1480 return ERR_PTR(-ENOMEM);
1481
1482 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
1483done:
1484 spin_lock_init(&blkcg->lock);
1485 INIT_HLIST_HEAD(&blkcg->blkg_list);
1486
34d0f179 1487 INIT_LIST_HEAD(&blkcg->policy_list);
31e4c28d
VG
1488 return &blkcg->css;
1489}
1490
1491/*
1492 * We cannot support shared io contexts, as we have no mean to support
1493 * two tasks with the same ioc in two different groups without major rework
1494 * of the main cic data structures. For now we allow a task to change
1495 * its cgroup only if it's the only owner of its ioc.
1496 */
1497static int blkiocg_can_attach(struct cgroup_subsys *subsys,
1498 struct cgroup *cgroup, struct task_struct *tsk,
1499 bool threadgroup)
1500{
1501 struct io_context *ioc;
1502 int ret = 0;
1503
1504 /* task_lock() is needed to avoid races with exit_io_context() */
1505 task_lock(tsk);
1506 ioc = tsk->io_context;
1507 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1508 ret = -EINVAL;
1509 task_unlock(tsk);
1510
1511 return ret;
1512}
1513
1514static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
1515 struct cgroup *prev, struct task_struct *tsk,
1516 bool threadgroup)
1517{
1518 struct io_context *ioc;
1519
1520 task_lock(tsk);
1521 ioc = tsk->io_context;
1522 if (ioc)
1523 ioc->cgroup_changed = 1;
1524 task_unlock(tsk);
1525}
1526
3e252066
VG
1527void blkio_policy_register(struct blkio_policy_type *blkiop)
1528{
1529 spin_lock(&blkio_list_lock);
1530 list_add_tail(&blkiop->list, &blkio_list);
1531 spin_unlock(&blkio_list_lock);
1532}
1533EXPORT_SYMBOL_GPL(blkio_policy_register);
1534
1535void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1536{
1537 spin_lock(&blkio_list_lock);
1538 list_del_init(&blkiop->list);
1539 spin_unlock(&blkio_list_lock);
1540}
1541EXPORT_SYMBOL_GPL(blkio_policy_unregister);
67523c48
BB
1542
1543static int __init init_cgroup_blkio(void)
1544{
1545 return cgroup_load_subsys(&blkio_subsys);
1546}
1547
1548static void __exit exit_cgroup_blkio(void)
1549{
1550 cgroup_unload_subsys(&blkio_subsys);
1551}
1552
1553module_init(init_cgroup_blkio);
1554module_exit(exit_cgroup_blkio);
1555MODULE_LICENSE("GPL");