]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - block/blk-cgroup.c
block: make gendisk hold a reference to its queue
[mirror_ubuntu-artful-kernel.git] / block / blk-cgroup.c
CommitLineData
31e4c28d
VG
1/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
22084190
VG
14#include <linux/seq_file.h>
15#include <linux/kdev_t.h>
9d6a986c 16#include <linux/module.h>
accee785 17#include <linux/err.h>
9195291e 18#include <linux/blkdev.h>
5a0e3ad6 19#include <linux/slab.h>
31e4c28d 20#include "blk-cgroup.h"
34d0f179 21#include <linux/genhd.h>
3e252066 22
84c124da
DS
23#define MAX_KEY_LEN 100
24
3e252066
VG
25static DEFINE_SPINLOCK(blkio_list_lock);
26static LIST_HEAD(blkio_list);
b1c35769 27
31e4c28d 28struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
9d6a986c
VG
29EXPORT_SYMBOL_GPL(blkio_root_cgroup);
30
67523c48
BB
31static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
32 struct cgroup *);
f780bdb7
BB
33static int blkiocg_can_attach_task(struct cgroup *, struct task_struct *);
34static void blkiocg_attach_task(struct cgroup *, struct task_struct *);
67523c48
BB
35static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
36static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
37
062a644d
VG
38/* for encoding cft->private value on file */
39#define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
40/* What policy owns the file, proportional or throttle */
41#define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
42#define BLKIOFILE_ATTR(val) ((val) & 0xffff)
43
67523c48
BB
44struct cgroup_subsys blkio_subsys = {
45 .name = "blkio",
46 .create = blkiocg_create,
f780bdb7
BB
47 .can_attach_task = blkiocg_can_attach_task,
48 .attach_task = blkiocg_attach_task,
67523c48
BB
49 .destroy = blkiocg_destroy,
50 .populate = blkiocg_populate,
51#ifdef CONFIG_BLK_CGROUP
52 /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
53 .subsys_id = blkio_subsys_id,
54#endif
55 .use_id = 1,
56 .module = THIS_MODULE,
57};
58EXPORT_SYMBOL_GPL(blkio_subsys);
59
34d0f179
GJ
60static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
61 struct blkio_policy_node *pn)
62{
63 list_add(&pn->node, &blkcg->policy_list);
64}
65
062a644d
VG
66static inline bool cftype_blkg_same_policy(struct cftype *cft,
67 struct blkio_group *blkg)
68{
69 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
70
71 if (blkg->plid == plid)
72 return 1;
73
74 return 0;
75}
76
77/* Determines if policy node matches cgroup file being accessed */
78static inline bool pn_matches_cftype(struct cftype *cft,
79 struct blkio_policy_node *pn)
80{
81 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
82 int fileid = BLKIOFILE_ATTR(cft->private);
83
84 return (plid == pn->plid && fileid == pn->fileid);
85}
86
34d0f179
GJ
87/* Must be called with blkcg->lock held */
88static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
89{
90 list_del(&pn->node);
91}
92
93/* Must be called with blkcg->lock held */
94static struct blkio_policy_node *
062a644d
VG
95blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
96 enum blkio_policy_id plid, int fileid)
34d0f179
GJ
97{
98 struct blkio_policy_node *pn;
99
100 list_for_each_entry(pn, &blkcg->policy_list, node) {
062a644d 101 if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
34d0f179
GJ
102 return pn;
103 }
104
105 return NULL;
106}
107
31e4c28d
VG
108struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
109{
110 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
111 struct blkio_cgroup, css);
112}
9d6a986c 113EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
31e4c28d 114
70087dc3
VG
115struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
116{
117 return container_of(task_subsys_state(tsk, blkio_subsys_id),
118 struct blkio_cgroup, css);
119}
120EXPORT_SYMBOL_GPL(task_blkio_cgroup);
121
062a644d
VG
122static inline void
123blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
124{
125 struct blkio_policy_type *blkiop;
126
127 list_for_each_entry(blkiop, &blkio_list, list) {
128 /* If this policy does not own the blkg, do not send updates */
129 if (blkiop->plid != blkg->plid)
130 continue;
131 if (blkiop->ops.blkio_update_group_weight_fn)
fe071437
VG
132 blkiop->ops.blkio_update_group_weight_fn(blkg->key,
133 blkg, weight);
062a644d
VG
134 }
135}
136
4c9eefa1
VG
137static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
138 int fileid)
139{
140 struct blkio_policy_type *blkiop;
141
142 list_for_each_entry(blkiop, &blkio_list, list) {
143
144 /* If this policy does not own the blkg, do not send updates */
145 if (blkiop->plid != blkg->plid)
146 continue;
147
148 if (fileid == BLKIO_THROTL_read_bps_device
149 && blkiop->ops.blkio_update_group_read_bps_fn)
fe071437
VG
150 blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
151 blkg, bps);
4c9eefa1
VG
152
153 if (fileid == BLKIO_THROTL_write_bps_device
154 && blkiop->ops.blkio_update_group_write_bps_fn)
fe071437
VG
155 blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
156 blkg, bps);
4c9eefa1
VG
157 }
158}
159
7702e8f4
VG
160static inline void blkio_update_group_iops(struct blkio_group *blkg,
161 unsigned int iops, int fileid)
162{
163 struct blkio_policy_type *blkiop;
164
165 list_for_each_entry(blkiop, &blkio_list, list) {
166
167 /* If this policy does not own the blkg, do not send updates */
168 if (blkiop->plid != blkg->plid)
169 continue;
170
171 if (fileid == BLKIO_THROTL_read_iops_device
172 && blkiop->ops.blkio_update_group_read_iops_fn)
fe071437
VG
173 blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
174 blkg, iops);
7702e8f4
VG
175
176 if (fileid == BLKIO_THROTL_write_iops_device
177 && blkiop->ops.blkio_update_group_write_iops_fn)
fe071437
VG
178 blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
179 blkg,iops);
7702e8f4
VG
180 }
181}
182
9195291e
DS
183/*
184 * Add to the appropriate stat variable depending on the request type.
185 * This should be called with the blkg->stats_lock held.
186 */
84c124da
DS
187static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
188 bool sync)
9195291e 189{
84c124da
DS
190 if (direction)
191 stat[BLKIO_STAT_WRITE] += add;
9195291e 192 else
84c124da
DS
193 stat[BLKIO_STAT_READ] += add;
194 if (sync)
195 stat[BLKIO_STAT_SYNC] += add;
9195291e 196 else
84c124da 197 stat[BLKIO_STAT_ASYNC] += add;
9195291e
DS
198}
199
cdc1184c
DS
200/*
201 * Decrements the appropriate stat variable if non-zero depending on the
202 * request type. Panics on value being zero.
203 * This should be called with the blkg->stats_lock held.
204 */
205static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
206{
207 if (direction) {
208 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
209 stat[BLKIO_STAT_WRITE]--;
210 } else {
211 BUG_ON(stat[BLKIO_STAT_READ] == 0);
212 stat[BLKIO_STAT_READ]--;
213 }
214 if (sync) {
215 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
216 stat[BLKIO_STAT_SYNC]--;
217 } else {
218 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
219 stat[BLKIO_STAT_ASYNC]--;
220 }
221}
222
223#ifdef CONFIG_DEBUG_BLK_CGROUP
812df48d
DS
224/* This should be called with the blkg->stats_lock held. */
225static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
226 struct blkio_group *curr_blkg)
227{
228 if (blkio_blkg_waiting(&blkg->stats))
229 return;
230 if (blkg == curr_blkg)
231 return;
232 blkg->stats.start_group_wait_time = sched_clock();
233 blkio_mark_blkg_waiting(&blkg->stats);
234}
235
236/* This should be called with the blkg->stats_lock held. */
237static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
238{
239 unsigned long long now;
240
241 if (!blkio_blkg_waiting(stats))
242 return;
243
244 now = sched_clock();
245 if (time_after64(now, stats->start_group_wait_time))
246 stats->group_wait_time += now - stats->start_group_wait_time;
247 blkio_clear_blkg_waiting(stats);
248}
249
250/* This should be called with the blkg->stats_lock held. */
251static void blkio_end_empty_time(struct blkio_group_stats *stats)
252{
253 unsigned long long now;
254
255 if (!blkio_blkg_empty(stats))
256 return;
257
258 now = sched_clock();
259 if (time_after64(now, stats->start_empty_time))
260 stats->empty_time += now - stats->start_empty_time;
261 blkio_clear_blkg_empty(stats);
262}
263
264void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
265{
266 unsigned long flags;
267
268 spin_lock_irqsave(&blkg->stats_lock, flags);
269 BUG_ON(blkio_blkg_idling(&blkg->stats));
270 blkg->stats.start_idle_time = sched_clock();
271 blkio_mark_blkg_idling(&blkg->stats);
272 spin_unlock_irqrestore(&blkg->stats_lock, flags);
273}
274EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
275
276void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
277{
278 unsigned long flags;
279 unsigned long long now;
280 struct blkio_group_stats *stats;
281
282 spin_lock_irqsave(&blkg->stats_lock, flags);
283 stats = &blkg->stats;
284 if (blkio_blkg_idling(stats)) {
285 now = sched_clock();
286 if (time_after64(now, stats->start_idle_time))
287 stats->idle_time += now - stats->start_idle_time;
288 blkio_clear_blkg_idling(stats);
289 }
290 spin_unlock_irqrestore(&blkg->stats_lock, flags);
291}
292EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
293
a11cdaa7 294void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
cdc1184c
DS
295{
296 unsigned long flags;
297 struct blkio_group_stats *stats;
298
299 spin_lock_irqsave(&blkg->stats_lock, flags);
300 stats = &blkg->stats;
301 stats->avg_queue_size_sum +=
302 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
303 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
304 stats->avg_queue_size_samples++;
812df48d 305 blkio_update_group_wait_time(stats);
cdc1184c
DS
306 spin_unlock_irqrestore(&blkg->stats_lock, flags);
307}
a11cdaa7
DS
308EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
309
e5ff082e 310void blkiocg_set_start_empty_time(struct blkio_group *blkg)
28baf442
DS
311{
312 unsigned long flags;
313 struct blkio_group_stats *stats;
314
315 spin_lock_irqsave(&blkg->stats_lock, flags);
316 stats = &blkg->stats;
317
318 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
319 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
320 spin_unlock_irqrestore(&blkg->stats_lock, flags);
321 return;
322 }
323
324 /*
e5ff082e
VG
325 * group is already marked empty. This can happen if cfqq got new
326 * request in parent group and moved to this group while being added
327 * to service tree. Just ignore the event and move on.
28baf442 328 */
e5ff082e
VG
329 if(blkio_blkg_empty(stats)) {
330 spin_unlock_irqrestore(&blkg->stats_lock, flags);
331 return;
332 }
333
28baf442
DS
334 stats->start_empty_time = sched_clock();
335 blkio_mark_blkg_empty(stats);
336 spin_unlock_irqrestore(&blkg->stats_lock, flags);
337}
338EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
339
a11cdaa7
DS
340void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
341 unsigned long dequeue)
342{
343 blkg->stats.dequeue += dequeue;
344}
345EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
812df48d
DS
346#else
347static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
348 struct blkio_group *curr_blkg) {}
349static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
cdc1184c
DS
350#endif
351
a11cdaa7 352void blkiocg_update_io_add_stats(struct blkio_group *blkg,
cdc1184c
DS
353 struct blkio_group *curr_blkg, bool direction,
354 bool sync)
355{
356 unsigned long flags;
357
358 spin_lock_irqsave(&blkg->stats_lock, flags);
359 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
360 sync);
812df48d
DS
361 blkio_end_empty_time(&blkg->stats);
362 blkio_set_start_group_wait_time(blkg, curr_blkg);
cdc1184c
DS
363 spin_unlock_irqrestore(&blkg->stats_lock, flags);
364}
a11cdaa7 365EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
cdc1184c 366
a11cdaa7 367void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
cdc1184c
DS
368 bool direction, bool sync)
369{
370 unsigned long flags;
371
372 spin_lock_irqsave(&blkg->stats_lock, flags);
373 blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
374 direction, sync);
375 spin_unlock_irqrestore(&blkg->stats_lock, flags);
376}
a11cdaa7 377EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
cdc1184c 378
167400d3
JT
379void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
380 unsigned long unaccounted_time)
22084190 381{
303a3acb
DS
382 unsigned long flags;
383
384 spin_lock_irqsave(&blkg->stats_lock, flags);
385 blkg->stats.time += time;
a23e6869 386#ifdef CONFIG_DEBUG_BLK_CGROUP
167400d3 387 blkg->stats.unaccounted_time += unaccounted_time;
a23e6869 388#endif
303a3acb 389 spin_unlock_irqrestore(&blkg->stats_lock, flags);
22084190 390}
303a3acb 391EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
22084190 392
5624a4e4
VG
393/*
394 * should be called under rcu read lock or queue lock to make sure blkg pointer
395 * is valid.
396 */
84c124da
DS
397void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
398 uint64_t bytes, bool direction, bool sync)
9195291e 399{
5624a4e4 400 struct blkio_group_stats_cpu *stats_cpu;
575969a0
VG
401 unsigned long flags;
402
403 /*
404 * Disabling interrupts to provide mutual exclusion between two
405 * writes on same cpu. It probably is not needed for 64bit. Not
406 * optimizing that case yet.
407 */
408 local_irq_save(flags);
9195291e 409
5624a4e4
VG
410 stats_cpu = this_cpu_ptr(blkg->stats_cpu);
411
575969a0 412 u64_stats_update_begin(&stats_cpu->syncp);
5624a4e4
VG
413 stats_cpu->sectors += bytes >> 9;
414 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
415 1, direction, sync);
416 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
417 bytes, direction, sync);
575969a0
VG
418 u64_stats_update_end(&stats_cpu->syncp);
419 local_irq_restore(flags);
9195291e 420}
84c124da 421EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
9195291e 422
84c124da
DS
423void blkiocg_update_completion_stats(struct blkio_group *blkg,
424 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
9195291e
DS
425{
426 struct blkio_group_stats *stats;
427 unsigned long flags;
428 unsigned long long now = sched_clock();
429
430 spin_lock_irqsave(&blkg->stats_lock, flags);
431 stats = &blkg->stats;
84c124da
DS
432 if (time_after64(now, io_start_time))
433 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
434 now - io_start_time, direction, sync);
435 if (time_after64(io_start_time, start_time))
436 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
437 io_start_time - start_time, direction, sync);
9195291e
DS
438 spin_unlock_irqrestore(&blkg->stats_lock, flags);
439}
84c124da 440EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
9195291e 441
317389a7 442/* Merged stats are per cpu. */
812d4026
DS
443void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
444 bool sync)
445{
317389a7 446 struct blkio_group_stats_cpu *stats_cpu;
812d4026
DS
447 unsigned long flags;
448
317389a7
VG
449 /*
450 * Disabling interrupts to provide mutual exclusion between two
451 * writes on same cpu. It probably is not needed for 64bit. Not
452 * optimizing that case yet.
453 */
454 local_irq_save(flags);
455
456 stats_cpu = this_cpu_ptr(blkg->stats_cpu);
457
458 u64_stats_update_begin(&stats_cpu->syncp);
459 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
460 direction, sync);
461 u64_stats_update_end(&stats_cpu->syncp);
462 local_irq_restore(flags);
812d4026
DS
463}
464EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
465
5624a4e4
VG
466/*
467 * This function allocates the per cpu stats for blkio_group. Should be called
468 * from sleepable context as alloc_per_cpu() requires that.
469 */
470int blkio_alloc_blkg_stats(struct blkio_group *blkg)
471{
472 /* Allocate memory for per cpu stats */
473 blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
474 if (!blkg->stats_cpu)
475 return -ENOMEM;
476 return 0;
477}
478EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
479
31e4c28d 480void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
062a644d
VG
481 struct blkio_group *blkg, void *key, dev_t dev,
482 enum blkio_policy_id plid)
31e4c28d
VG
483{
484 unsigned long flags;
485
486 spin_lock_irqsave(&blkcg->lock, flags);
8d2a91f8 487 spin_lock_init(&blkg->stats_lock);
31e4c28d 488 rcu_assign_pointer(blkg->key, key);
b1c35769 489 blkg->blkcg_id = css_id(&blkcg->css);
31e4c28d 490 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
062a644d 491 blkg->plid = plid;
31e4c28d 492 spin_unlock_irqrestore(&blkcg->lock, flags);
2868ef7b
VG
493 /* Need to take css reference ? */
494 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
22084190 495 blkg->dev = dev;
31e4c28d 496}
9d6a986c 497EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
31e4c28d 498
b1c35769
VG
499static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
500{
501 hlist_del_init_rcu(&blkg->blkcg_node);
502 blkg->blkcg_id = 0;
503}
504
505/*
506 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
507 * indicating that blk_group was unhashed by the time we got to it.
508 */
31e4c28d
VG
509int blkiocg_del_blkio_group(struct blkio_group *blkg)
510{
b1c35769
VG
511 struct blkio_cgroup *blkcg;
512 unsigned long flags;
513 struct cgroup_subsys_state *css;
514 int ret = 1;
515
516 rcu_read_lock();
517 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
0f3942a3
JA
518 if (css) {
519 blkcg = container_of(css, struct blkio_cgroup, css);
520 spin_lock_irqsave(&blkcg->lock, flags);
521 if (!hlist_unhashed(&blkg->blkcg_node)) {
522 __blkiocg_del_blkio_group(blkg);
523 ret = 0;
524 }
525 spin_unlock_irqrestore(&blkcg->lock, flags);
b1c35769 526 }
0f3942a3 527
b1c35769
VG
528 rcu_read_unlock();
529 return ret;
31e4c28d 530}
9d6a986c 531EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
31e4c28d
VG
532
533/* called under rcu_read_lock(). */
534struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
535{
536 struct blkio_group *blkg;
537 struct hlist_node *n;
538 void *__key;
539
540 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
541 __key = blkg->key;
542 if (__key == key)
543 return blkg;
544 }
545
546 return NULL;
547}
9d6a986c 548EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
31e4c28d 549
f0bdc8cd
VG
550static void blkio_reset_stats_cpu(struct blkio_group *blkg)
551{
552 struct blkio_group_stats_cpu *stats_cpu;
553 int i, j, k;
554 /*
555 * Note: On 64 bit arch this should not be an issue. This has the
556 * possibility of returning some inconsistent value on 32bit arch
557 * as 64bit update on 32bit is non atomic. Taking care of this
558 * corner case makes code very complicated, like sending IPIs to
559 * cpus, taking care of stats of offline cpus etc.
560 *
561 * reset stats is anyway more of a debug feature and this sounds a
562 * corner case. So I am not complicating the code yet until and
563 * unless this becomes a real issue.
564 */
565 for_each_possible_cpu(i) {
566 stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
567 stats_cpu->sectors = 0;
568 for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
569 for (k = 0; k < BLKIO_STAT_TOTAL; k++)
570 stats_cpu->stat_arr_cpu[j][k] = 0;
571 }
572}
573
303a3acb 574static int
84c124da 575blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
303a3acb
DS
576{
577 struct blkio_cgroup *blkcg;
578 struct blkio_group *blkg;
812df48d 579 struct blkio_group_stats *stats;
303a3acb 580 struct hlist_node *n;
cdc1184c
DS
581 uint64_t queued[BLKIO_STAT_TOTAL];
582 int i;
812df48d
DS
583#ifdef CONFIG_DEBUG_BLK_CGROUP
584 bool idling, waiting, empty;
585 unsigned long long now = sched_clock();
586#endif
303a3acb
DS
587
588 blkcg = cgroup_to_blkio_cgroup(cgroup);
589 spin_lock_irq(&blkcg->lock);
590 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
591 spin_lock(&blkg->stats_lock);
812df48d
DS
592 stats = &blkg->stats;
593#ifdef CONFIG_DEBUG_BLK_CGROUP
594 idling = blkio_blkg_idling(stats);
595 waiting = blkio_blkg_waiting(stats);
596 empty = blkio_blkg_empty(stats);
597#endif
cdc1184c 598 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
812df48d
DS
599 queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
600 memset(stats, 0, sizeof(struct blkio_group_stats));
cdc1184c 601 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
812df48d
DS
602 stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
603#ifdef CONFIG_DEBUG_BLK_CGROUP
604 if (idling) {
605 blkio_mark_blkg_idling(stats);
606 stats->start_idle_time = now;
607 }
608 if (waiting) {
609 blkio_mark_blkg_waiting(stats);
610 stats->start_group_wait_time = now;
611 }
612 if (empty) {
613 blkio_mark_blkg_empty(stats);
614 stats->start_empty_time = now;
615 }
616#endif
303a3acb 617 spin_unlock(&blkg->stats_lock);
f0bdc8cd
VG
618
619 /* Reset Per cpu stats which don't take blkg->stats_lock */
620 blkio_reset_stats_cpu(blkg);
303a3acb 621 }
f0bdc8cd 622
303a3acb
DS
623 spin_unlock_irq(&blkcg->lock);
624 return 0;
625}
626
84c124da
DS
627static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
628 int chars_left, bool diskname_only)
303a3acb 629{
84c124da 630 snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
303a3acb
DS
631 chars_left -= strlen(str);
632 if (chars_left <= 0) {
633 printk(KERN_WARNING
634 "Possibly incorrect cgroup stat display format");
635 return;
636 }
84c124da
DS
637 if (diskname_only)
638 return;
303a3acb 639 switch (type) {
84c124da 640 case BLKIO_STAT_READ:
303a3acb
DS
641 strlcat(str, " Read", chars_left);
642 break;
84c124da 643 case BLKIO_STAT_WRITE:
303a3acb
DS
644 strlcat(str, " Write", chars_left);
645 break;
84c124da 646 case BLKIO_STAT_SYNC:
303a3acb
DS
647 strlcat(str, " Sync", chars_left);
648 break;
84c124da 649 case BLKIO_STAT_ASYNC:
303a3acb
DS
650 strlcat(str, " Async", chars_left);
651 break;
84c124da 652 case BLKIO_STAT_TOTAL:
303a3acb
DS
653 strlcat(str, " Total", chars_left);
654 break;
655 default:
656 strlcat(str, " Invalid", chars_left);
657 }
658}
659
84c124da
DS
660static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
661 struct cgroup_map_cb *cb, dev_t dev)
662{
663 blkio_get_key_name(0, dev, str, chars_left, true);
664 cb->fill(cb, str, val);
665 return val;
666}
303a3acb 667
5624a4e4
VG
668
669static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
670 enum stat_type_cpu type, enum stat_sub_type sub_type)
671{
672 int cpu;
673 struct blkio_group_stats_cpu *stats_cpu;
575969a0 674 u64 val = 0, tval;
5624a4e4
VG
675
676 for_each_possible_cpu(cpu) {
575969a0 677 unsigned int start;
5624a4e4
VG
678 stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
679
575969a0
VG
680 do {
681 start = u64_stats_fetch_begin(&stats_cpu->syncp);
682 if (type == BLKIO_STAT_CPU_SECTORS)
683 tval = stats_cpu->sectors;
684 else
685 tval = stats_cpu->stat_arr_cpu[type][sub_type];
686 } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
687
688 val += tval;
5624a4e4
VG
689 }
690
691 return val;
692}
693
694static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
695 struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type)
696{
697 uint64_t disk_total, val;
698 char key_str[MAX_KEY_LEN];
699 enum stat_sub_type sub_type;
700
701 if (type == BLKIO_STAT_CPU_SECTORS) {
702 val = blkio_read_stat_cpu(blkg, type, 0);
703 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev);
704 }
705
706 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
707 sub_type++) {
708 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
709 val = blkio_read_stat_cpu(blkg, type, sub_type);
710 cb->fill(cb, key_str, val);
711 }
712
713 disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
714 blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
715
716 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
717 cb->fill(cb, key_str, disk_total);
718 return disk_total;
719}
720
84c124da
DS
721/* This should be called with blkg->stats_lock held */
722static uint64_t blkio_get_stat(struct blkio_group *blkg,
723 struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
303a3acb
DS
724{
725 uint64_t disk_total;
726 char key_str[MAX_KEY_LEN];
84c124da
DS
727 enum stat_sub_type sub_type;
728
729 if (type == BLKIO_STAT_TIME)
730 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
731 blkg->stats.time, cb, dev);
9026e521 732#ifdef CONFIG_DEBUG_BLK_CGROUP
167400d3
JT
733 if (type == BLKIO_STAT_UNACCOUNTED_TIME)
734 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
735 blkg->stats.unaccounted_time, cb, dev);
cdc1184c
DS
736 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
737 uint64_t sum = blkg->stats.avg_queue_size_sum;
738 uint64_t samples = blkg->stats.avg_queue_size_samples;
739 if (samples)
740 do_div(sum, samples);
741 else
742 sum = 0;
743 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
744 }
812df48d
DS
745 if (type == BLKIO_STAT_GROUP_WAIT_TIME)
746 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
747 blkg->stats.group_wait_time, cb, dev);
748 if (type == BLKIO_STAT_IDLE_TIME)
749 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
750 blkg->stats.idle_time, cb, dev);
751 if (type == BLKIO_STAT_EMPTY_TIME)
752 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
753 blkg->stats.empty_time, cb, dev);
84c124da
DS
754 if (type == BLKIO_STAT_DEQUEUE)
755 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
756 blkg->stats.dequeue, cb, dev);
757#endif
303a3acb 758
84c124da
DS
759 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
760 sub_type++) {
761 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
762 cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
303a3acb 763 }
84c124da
DS
764 disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
765 blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
766 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
303a3acb
DS
767 cb->fill(cb, key_str, disk_total);
768 return disk_total;
769}
770
34d0f179
GJ
771static int blkio_check_dev_num(dev_t dev)
772{
773 int part = 0;
774 struct gendisk *disk;
775
776 disk = get_gendisk(dev, &part);
777 if (!disk || part)
778 return -ENODEV;
779
780 return 0;
781}
782
783static int blkio_policy_parse_and_set(char *buf,
062a644d 784 struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
34d0f179
GJ
785{
786 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
787 int ret;
d11bb446 788 unsigned long major, minor;
34d0f179
GJ
789 int i = 0;
790 dev_t dev;
d11bb446 791 u64 temp;
34d0f179
GJ
792
793 memset(s, 0, sizeof(s));
794
795 while ((p = strsep(&buf, " ")) != NULL) {
796 if (!*p)
797 continue;
798
799 s[i++] = p;
800
801 /* Prevent from inputing too many things */
802 if (i == 3)
803 break;
804 }
805
806 if (i != 2)
807 return -EINVAL;
808
809 p = strsep(&s[0], ":");
810 if (p != NULL)
811 major_s = p;
812 else
813 return -EINVAL;
814
815 minor_s = s[0];
816 if (!minor_s)
817 return -EINVAL;
818
819 ret = strict_strtoul(major_s, 10, &major);
820 if (ret)
821 return -EINVAL;
822
823 ret = strict_strtoul(minor_s, 10, &minor);
824 if (ret)
825 return -EINVAL;
826
827 dev = MKDEV(major, minor);
828
d11bb446 829 ret = strict_strtoull(s[1], 10, &temp);
34d0f179 830 if (ret)
d11bb446 831 return -EINVAL;
34d0f179 832
d11bb446
WG
833 /* For rule removal, do not check for device presence. */
834 if (temp) {
835 ret = blkio_check_dev_num(dev);
836 if (ret)
837 return ret;
838 }
34d0f179 839
d11bb446 840 newpn->dev = dev;
34d0f179 841
062a644d
VG
842 switch (plid) {
843 case BLKIO_POLICY_PROP:
d11bb446
WG
844 if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
845 temp > BLKIO_WEIGHT_MAX)
062a644d 846 return -EINVAL;
34d0f179 847
062a644d
VG
848 newpn->plid = plid;
849 newpn->fileid = fileid;
4c9eefa1
VG
850 newpn->val.weight = temp;
851 break;
852 case BLKIO_POLICY_THROTL:
7702e8f4
VG
853 switch(fileid) {
854 case BLKIO_THROTL_read_bps_device:
855 case BLKIO_THROTL_write_bps_device:
7702e8f4
VG
856 newpn->plid = plid;
857 newpn->fileid = fileid;
d11bb446 858 newpn->val.bps = temp;
7702e8f4
VG
859 break;
860 case BLKIO_THROTL_read_iops_device:
861 case BLKIO_THROTL_write_iops_device:
d11bb446 862 if (temp > THROTL_IOPS_MAX)
9355aede
VG
863 return -EINVAL;
864
7702e8f4
VG
865 newpn->plid = plid;
866 newpn->fileid = fileid;
d11bb446 867 newpn->val.iops = (unsigned int)temp;
7702e8f4
VG
868 break;
869 }
062a644d
VG
870 break;
871 default:
872 BUG();
873 }
34d0f179
GJ
874
875 return 0;
876}
877
878unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
879 dev_t dev)
880{
881 struct blkio_policy_node *pn;
882
062a644d
VG
883 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
884 BLKIO_PROP_weight_device);
34d0f179 885 if (pn)
4c9eefa1 886 return pn->val.weight;
34d0f179
GJ
887 else
888 return blkcg->weight;
889}
890EXPORT_SYMBOL_GPL(blkcg_get_weight);
891
4c9eefa1
VG
892uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
893{
894 struct blkio_policy_node *pn;
895
896 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
897 BLKIO_THROTL_read_bps_device);
898 if (pn)
899 return pn->val.bps;
900 else
901 return -1;
902}
903
904uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
905{
906 struct blkio_policy_node *pn;
907 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
908 BLKIO_THROTL_write_bps_device);
909 if (pn)
910 return pn->val.bps;
911 else
912 return -1;
913}
914
7702e8f4
VG
915unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
916{
917 struct blkio_policy_node *pn;
918
919 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
920 BLKIO_THROTL_read_iops_device);
921 if (pn)
922 return pn->val.iops;
923 else
924 return -1;
925}
926
927unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
928{
929 struct blkio_policy_node *pn;
930 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
931 BLKIO_THROTL_write_iops_device);
932 if (pn)
933 return pn->val.iops;
934 else
935 return -1;
936}
937
062a644d
VG
938/* Checks whether user asked for deleting a policy rule */
939static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
940{
941 switch(pn->plid) {
942 case BLKIO_POLICY_PROP:
4c9eefa1
VG
943 if (pn->val.weight == 0)
944 return 1;
945 break;
946 case BLKIO_POLICY_THROTL:
7702e8f4
VG
947 switch(pn->fileid) {
948 case BLKIO_THROTL_read_bps_device:
949 case BLKIO_THROTL_write_bps_device:
950 if (pn->val.bps == 0)
951 return 1;
952 break;
953 case BLKIO_THROTL_read_iops_device:
954 case BLKIO_THROTL_write_iops_device:
955 if (pn->val.iops == 0)
956 return 1;
957 }
062a644d
VG
958 break;
959 default:
960 BUG();
961 }
962
963 return 0;
964}
965
966static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
967 struct blkio_policy_node *newpn)
968{
969 switch(oldpn->plid) {
970 case BLKIO_POLICY_PROP:
4c9eefa1
VG
971 oldpn->val.weight = newpn->val.weight;
972 break;
973 case BLKIO_POLICY_THROTL:
7702e8f4
VG
974 switch(newpn->fileid) {
975 case BLKIO_THROTL_read_bps_device:
976 case BLKIO_THROTL_write_bps_device:
977 oldpn->val.bps = newpn->val.bps;
978 break;
979 case BLKIO_THROTL_read_iops_device:
980 case BLKIO_THROTL_write_iops_device:
981 oldpn->val.iops = newpn->val.iops;
982 }
062a644d
VG
983 break;
984 default:
985 BUG();
986 }
987}
988
989/*
25985edc 990 * Some rules/values in blkg have changed. Propagate those to respective
062a644d
VG
991 * policies.
992 */
993static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
994 struct blkio_group *blkg, struct blkio_policy_node *pn)
995{
7702e8f4 996 unsigned int weight, iops;
4c9eefa1 997 u64 bps;
062a644d
VG
998
999 switch(pn->plid) {
1000 case BLKIO_POLICY_PROP:
4c9eefa1 1001 weight = pn->val.weight ? pn->val.weight :
062a644d
VG
1002 blkcg->weight;
1003 blkio_update_group_weight(blkg, weight);
1004 break;
4c9eefa1
VG
1005 case BLKIO_POLICY_THROTL:
1006 switch(pn->fileid) {
1007 case BLKIO_THROTL_read_bps_device:
1008 case BLKIO_THROTL_write_bps_device:
1009 bps = pn->val.bps ? pn->val.bps : (-1);
1010 blkio_update_group_bps(blkg, bps, pn->fileid);
1011 break;
7702e8f4
VG
1012 case BLKIO_THROTL_read_iops_device:
1013 case BLKIO_THROTL_write_iops_device:
1014 iops = pn->val.iops ? pn->val.iops : (-1);
1015 blkio_update_group_iops(blkg, iops, pn->fileid);
1016 break;
4c9eefa1
VG
1017 }
1018 break;
062a644d
VG
1019 default:
1020 BUG();
1021 }
1022}
1023
1024/*
25985edc 1025 * A policy node rule has been updated. Propagate this update to all the
062a644d
VG
1026 * block groups which might be affected by this update.
1027 */
1028static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
1029 struct blkio_policy_node *pn)
1030{
1031 struct blkio_group *blkg;
1032 struct hlist_node *n;
1033
1034 spin_lock(&blkio_list_lock);
1035 spin_lock_irq(&blkcg->lock);
34d0f179 1036
062a644d
VG
1037 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1038 if (pn->dev != blkg->dev || pn->plid != blkg->plid)
1039 continue;
1040 blkio_update_blkg_policy(blkcg, blkg, pn);
1041 }
1042
1043 spin_unlock_irq(&blkcg->lock);
1044 spin_unlock(&blkio_list_lock);
1045}
34d0f179 1046
062a644d
VG
1047static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
1048 const char *buffer)
34d0f179
GJ
1049{
1050 int ret = 0;
1051 char *buf;
1052 struct blkio_policy_node *newpn, *pn;
1053 struct blkio_cgroup *blkcg;
34d0f179 1054 int keep_newpn = 0;
062a644d
VG
1055 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1056 int fileid = BLKIOFILE_ATTR(cft->private);
34d0f179
GJ
1057
1058 buf = kstrdup(buffer, GFP_KERNEL);
1059 if (!buf)
1060 return -ENOMEM;
1061
1062 newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
1063 if (!newpn) {
1064 ret = -ENOMEM;
1065 goto free_buf;
1066 }
1067
062a644d 1068 ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
34d0f179
GJ
1069 if (ret)
1070 goto free_newpn;
1071
1072 blkcg = cgroup_to_blkio_cgroup(cgrp);
1073
1074 spin_lock_irq(&blkcg->lock);
1075
062a644d 1076 pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
34d0f179 1077 if (!pn) {
062a644d 1078 if (!blkio_delete_rule_command(newpn)) {
34d0f179
GJ
1079 blkio_policy_insert_node(blkcg, newpn);
1080 keep_newpn = 1;
1081 }
1082 spin_unlock_irq(&blkcg->lock);
1083 goto update_io_group;
1084 }
1085
062a644d 1086 if (blkio_delete_rule_command(newpn)) {
34d0f179
GJ
1087 blkio_policy_delete_node(pn);
1088 spin_unlock_irq(&blkcg->lock);
1089 goto update_io_group;
1090 }
1091 spin_unlock_irq(&blkcg->lock);
1092
062a644d 1093 blkio_update_policy_rule(pn, newpn);
34d0f179
GJ
1094
1095update_io_group:
062a644d 1096 blkio_update_policy_node_blkg(blkcg, newpn);
34d0f179
GJ
1097
1098free_newpn:
1099 if (!keep_newpn)
1100 kfree(newpn);
1101free_buf:
1102 kfree(buf);
1103 return ret;
1104}
1105
062a644d
VG
1106static void
1107blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
34d0f179 1108{
062a644d
VG
1109 switch(pn->plid) {
1110 case BLKIO_POLICY_PROP:
1111 if (pn->fileid == BLKIO_PROP_weight_device)
1112 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
4c9eefa1
VG
1113 MINOR(pn->dev), pn->val.weight);
1114 break;
1115 case BLKIO_POLICY_THROTL:
7702e8f4
VG
1116 switch(pn->fileid) {
1117 case BLKIO_THROTL_read_bps_device:
1118 case BLKIO_THROTL_write_bps_device:
4c9eefa1
VG
1119 seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
1120 MINOR(pn->dev), pn->val.bps);
7702e8f4
VG
1121 break;
1122 case BLKIO_THROTL_read_iops_device:
1123 case BLKIO_THROTL_write_iops_device:
1124 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
1125 MINOR(pn->dev), pn->val.iops);
1126 break;
1127 }
062a644d
VG
1128 break;
1129 default:
1130 BUG();
1131 }
1132}
34d0f179 1133
062a644d
VG
1134/* cgroup files which read their data from policy nodes end up here */
1135static void blkio_read_policy_node_files(struct cftype *cft,
1136 struct blkio_cgroup *blkcg, struct seq_file *m)
34d0f179 1137{
34d0f179 1138 struct blkio_policy_node *pn;
34d0f179 1139
0f3942a3
JA
1140 if (!list_empty(&blkcg->policy_list)) {
1141 spin_lock_irq(&blkcg->lock);
1142 list_for_each_entry(pn, &blkcg->policy_list, node) {
062a644d
VG
1143 if (!pn_matches_cftype(cft, pn))
1144 continue;
1145 blkio_print_policy_node(m, pn);
0f3942a3
JA
1146 }
1147 spin_unlock_irq(&blkcg->lock);
34d0f179 1148 }
062a644d
VG
1149}
1150
1151static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
1152 struct seq_file *m)
1153{
1154 struct blkio_cgroup *blkcg;
1155 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1156 int name = BLKIOFILE_ATTR(cft->private);
1157
1158 blkcg = cgroup_to_blkio_cgroup(cgrp);
1159
1160 switch(plid) {
1161 case BLKIO_POLICY_PROP:
1162 switch(name) {
1163 case BLKIO_PROP_weight_device:
1164 blkio_read_policy_node_files(cft, blkcg, m);
1165 return 0;
1166 default:
1167 BUG();
1168 }
1169 break;
4c9eefa1
VG
1170 case BLKIO_POLICY_THROTL:
1171 switch(name){
1172 case BLKIO_THROTL_read_bps_device:
1173 case BLKIO_THROTL_write_bps_device:
7702e8f4
VG
1174 case BLKIO_THROTL_read_iops_device:
1175 case BLKIO_THROTL_write_iops_device:
4c9eefa1
VG
1176 blkio_read_policy_node_files(cft, blkcg, m);
1177 return 0;
1178 default:
1179 BUG();
1180 }
1181 break;
062a644d
VG
1182 default:
1183 BUG();
1184 }
1185
1186 return 0;
1187}
1188
1189static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
5624a4e4
VG
1190 struct cftype *cft, struct cgroup_map_cb *cb,
1191 enum stat_type type, bool show_total, bool pcpu)
062a644d
VG
1192{
1193 struct blkio_group *blkg;
1194 struct hlist_node *n;
1195 uint64_t cgroup_total = 0;
1196
1197 rcu_read_lock();
1198 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
1199 if (blkg->dev) {
1200 if (!cftype_blkg_same_policy(cft, blkg))
1201 continue;
5624a4e4
VG
1202 if (pcpu)
1203 cgroup_total += blkio_get_stat_cpu(blkg, cb,
1204 blkg->dev, type);
1205 else {
1206 spin_lock_irq(&blkg->stats_lock);
1207 cgroup_total += blkio_get_stat(blkg, cb,
1208 blkg->dev, type);
1209 spin_unlock_irq(&blkg->stats_lock);
1210 }
062a644d
VG
1211 }
1212 }
1213 if (show_total)
1214 cb->fill(cb, "Total", cgroup_total);
1215 rcu_read_unlock();
1216 return 0;
1217}
1218
1219/* All map kind of cgroup file get serviced by this function */
1220static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
1221 struct cgroup_map_cb *cb)
1222{
1223 struct blkio_cgroup *blkcg;
1224 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1225 int name = BLKIOFILE_ATTR(cft->private);
1226
1227 blkcg = cgroup_to_blkio_cgroup(cgrp);
1228
1229 switch(plid) {
1230 case BLKIO_POLICY_PROP:
1231 switch(name) {
1232 case BLKIO_PROP_time:
1233 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1234 BLKIO_STAT_TIME, 0, 0);
062a644d
VG
1235 case BLKIO_PROP_sectors:
1236 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1237 BLKIO_STAT_CPU_SECTORS, 0, 1);
062a644d
VG
1238 case BLKIO_PROP_io_service_bytes:
1239 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1240 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
062a644d
VG
1241 case BLKIO_PROP_io_serviced:
1242 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1243 BLKIO_STAT_CPU_SERVICED, 1, 1);
062a644d
VG
1244 case BLKIO_PROP_io_service_time:
1245 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1246 BLKIO_STAT_SERVICE_TIME, 1, 0);
062a644d
VG
1247 case BLKIO_PROP_io_wait_time:
1248 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1249 BLKIO_STAT_WAIT_TIME, 1, 0);
062a644d
VG
1250 case BLKIO_PROP_io_merged:
1251 return blkio_read_blkg_stats(blkcg, cft, cb,
317389a7 1252 BLKIO_STAT_CPU_MERGED, 1, 1);
062a644d
VG
1253 case BLKIO_PROP_io_queued:
1254 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1255 BLKIO_STAT_QUEUED, 1, 0);
062a644d 1256#ifdef CONFIG_DEBUG_BLK_CGROUP
9026e521
JT
1257 case BLKIO_PROP_unaccounted_time:
1258 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1259 BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
062a644d
VG
1260 case BLKIO_PROP_dequeue:
1261 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1262 BLKIO_STAT_DEQUEUE, 0, 0);
062a644d
VG
1263 case BLKIO_PROP_avg_queue_size:
1264 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1265 BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
062a644d
VG
1266 case BLKIO_PROP_group_wait_time:
1267 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1268 BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
062a644d
VG
1269 case BLKIO_PROP_idle_time:
1270 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1271 BLKIO_STAT_IDLE_TIME, 0, 0);
062a644d
VG
1272 case BLKIO_PROP_empty_time:
1273 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1274 BLKIO_STAT_EMPTY_TIME, 0, 0);
062a644d
VG
1275#endif
1276 default:
1277 BUG();
1278 }
1279 break;
4c9eefa1
VG
1280 case BLKIO_POLICY_THROTL:
1281 switch(name){
1282 case BLKIO_THROTL_io_service_bytes:
1283 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1284 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
4c9eefa1
VG
1285 case BLKIO_THROTL_io_serviced:
1286 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1287 BLKIO_STAT_CPU_SERVICED, 1, 1);
4c9eefa1
VG
1288 default:
1289 BUG();
1290 }
1291 break;
062a644d
VG
1292 default:
1293 BUG();
1294 }
1295
1296 return 0;
1297}
1298
1299static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
1300{
1301 struct blkio_group *blkg;
1302 struct hlist_node *n;
1303 struct blkio_policy_node *pn;
1304
1305 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1306 return -EINVAL;
1307
1308 spin_lock(&blkio_list_lock);
1309 spin_lock_irq(&blkcg->lock);
1310 blkcg->weight = (unsigned int)val;
1311
1312 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1313 pn = blkio_policy_search_node(blkcg, blkg->dev,
1314 BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
1315 if (pn)
1316 continue;
1317
1318 blkio_update_group_weight(blkg, blkcg->weight);
1319 }
1320 spin_unlock_irq(&blkcg->lock);
1321 spin_unlock(&blkio_list_lock);
1322 return 0;
1323}
1324
1325static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
1326 struct blkio_cgroup *blkcg;
1327 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1328 int name = BLKIOFILE_ATTR(cft->private);
1329
1330 blkcg = cgroup_to_blkio_cgroup(cgrp);
1331
1332 switch(plid) {
1333 case BLKIO_POLICY_PROP:
1334 switch(name) {
1335 case BLKIO_PROP_weight:
1336 return (u64)blkcg->weight;
1337 }
1338 break;
1339 default:
1340 BUG();
1341 }
1342 return 0;
1343}
1344
1345static int
1346blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1347{
1348 struct blkio_cgroup *blkcg;
1349 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1350 int name = BLKIOFILE_ATTR(cft->private);
1351
1352 blkcg = cgroup_to_blkio_cgroup(cgrp);
1353
1354 switch(plid) {
1355 case BLKIO_POLICY_PROP:
1356 switch(name) {
1357 case BLKIO_PROP_weight:
1358 return blkio_weight_write(blkcg, val);
1359 }
1360 break;
1361 default:
1362 BUG();
1363 }
34d0f179 1364
34d0f179
GJ
1365 return 0;
1366}
1367
31e4c28d 1368struct cftype blkio_files[] = {
34d0f179
GJ
1369 {
1370 .name = "weight_device",
062a644d
VG
1371 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1372 BLKIO_PROP_weight_device),
1373 .read_seq_string = blkiocg_file_read,
1374 .write_string = blkiocg_file_write,
34d0f179
GJ
1375 .max_write_len = 256,
1376 },
31e4c28d
VG
1377 {
1378 .name = "weight",
062a644d
VG
1379 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1380 BLKIO_PROP_weight),
1381 .read_u64 = blkiocg_file_read_u64,
1382 .write_u64 = blkiocg_file_write_u64,
31e4c28d 1383 },
22084190
VG
1384 {
1385 .name = "time",
062a644d
VG
1386 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1387 BLKIO_PROP_time),
1388 .read_map = blkiocg_file_read_map,
22084190
VG
1389 },
1390 {
1391 .name = "sectors",
062a644d
VG
1392 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1393 BLKIO_PROP_sectors),
1394 .read_map = blkiocg_file_read_map,
303a3acb
DS
1395 },
1396 {
1397 .name = "io_service_bytes",
062a644d
VG
1398 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1399 BLKIO_PROP_io_service_bytes),
1400 .read_map = blkiocg_file_read_map,
303a3acb
DS
1401 },
1402 {
1403 .name = "io_serviced",
062a644d
VG
1404 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1405 BLKIO_PROP_io_serviced),
1406 .read_map = blkiocg_file_read_map,
303a3acb
DS
1407 },
1408 {
1409 .name = "io_service_time",
062a644d
VG
1410 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1411 BLKIO_PROP_io_service_time),
1412 .read_map = blkiocg_file_read_map,
303a3acb
DS
1413 },
1414 {
1415 .name = "io_wait_time",
062a644d
VG
1416 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1417 BLKIO_PROP_io_wait_time),
1418 .read_map = blkiocg_file_read_map,
84c124da 1419 },
812d4026
DS
1420 {
1421 .name = "io_merged",
062a644d
VG
1422 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1423 BLKIO_PROP_io_merged),
1424 .read_map = blkiocg_file_read_map,
812d4026 1425 },
cdc1184c
DS
1426 {
1427 .name = "io_queued",
062a644d
VG
1428 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1429 BLKIO_PROP_io_queued),
1430 .read_map = blkiocg_file_read_map,
cdc1184c 1431 },
84c124da
DS
1432 {
1433 .name = "reset_stats",
1434 .write_u64 = blkiocg_reset_stats,
22084190 1435 },
13f98250
VG
1436#ifdef CONFIG_BLK_DEV_THROTTLING
1437 {
1438 .name = "throttle.read_bps_device",
1439 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1440 BLKIO_THROTL_read_bps_device),
1441 .read_seq_string = blkiocg_file_read,
1442 .write_string = blkiocg_file_write,
1443 .max_write_len = 256,
1444 },
1445
1446 {
1447 .name = "throttle.write_bps_device",
1448 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1449 BLKIO_THROTL_write_bps_device),
1450 .read_seq_string = blkiocg_file_read,
1451 .write_string = blkiocg_file_write,
1452 .max_write_len = 256,
1453 },
1454
1455 {
1456 .name = "throttle.read_iops_device",
1457 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1458 BLKIO_THROTL_read_iops_device),
1459 .read_seq_string = blkiocg_file_read,
1460 .write_string = blkiocg_file_write,
1461 .max_write_len = 256,
1462 },
1463
1464 {
1465 .name = "throttle.write_iops_device",
1466 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1467 BLKIO_THROTL_write_iops_device),
1468 .read_seq_string = blkiocg_file_read,
1469 .write_string = blkiocg_file_write,
1470 .max_write_len = 256,
1471 },
1472 {
1473 .name = "throttle.io_service_bytes",
1474 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1475 BLKIO_THROTL_io_service_bytes),
1476 .read_map = blkiocg_file_read_map,
1477 },
1478 {
1479 .name = "throttle.io_serviced",
1480 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1481 BLKIO_THROTL_io_serviced),
1482 .read_map = blkiocg_file_read_map,
1483 },
1484#endif /* CONFIG_BLK_DEV_THROTTLING */
1485
22084190 1486#ifdef CONFIG_DEBUG_BLK_CGROUP
cdc1184c
DS
1487 {
1488 .name = "avg_queue_size",
062a644d
VG
1489 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1490 BLKIO_PROP_avg_queue_size),
1491 .read_map = blkiocg_file_read_map,
cdc1184c 1492 },
812df48d
DS
1493 {
1494 .name = "group_wait_time",
062a644d
VG
1495 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1496 BLKIO_PROP_group_wait_time),
1497 .read_map = blkiocg_file_read_map,
812df48d
DS
1498 },
1499 {
1500 .name = "idle_time",
062a644d
VG
1501 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1502 BLKIO_PROP_idle_time),
1503 .read_map = blkiocg_file_read_map,
812df48d
DS
1504 },
1505 {
1506 .name = "empty_time",
062a644d
VG
1507 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1508 BLKIO_PROP_empty_time),
1509 .read_map = blkiocg_file_read_map,
812df48d 1510 },
cdc1184c 1511 {
22084190 1512 .name = "dequeue",
062a644d
VG
1513 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1514 BLKIO_PROP_dequeue),
1515 .read_map = blkiocg_file_read_map,
cdc1184c 1516 },
9026e521
JT
1517 {
1518 .name = "unaccounted_time",
1519 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1520 BLKIO_PROP_unaccounted_time),
1521 .read_map = blkiocg_file_read_map,
1522 },
22084190 1523#endif
31e4c28d
VG
1524};
1525
1526static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1527{
1528 return cgroup_add_files(cgroup, subsys, blkio_files,
1529 ARRAY_SIZE(blkio_files));
1530}
1531
1532static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1533{
1534 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
b1c35769
VG
1535 unsigned long flags;
1536 struct blkio_group *blkg;
1537 void *key;
3e252066 1538 struct blkio_policy_type *blkiop;
34d0f179 1539 struct blkio_policy_node *pn, *pntmp;
b1c35769
VG
1540
1541 rcu_read_lock();
0f3942a3
JA
1542 do {
1543 spin_lock_irqsave(&blkcg->lock, flags);
b1c35769 1544
0f3942a3
JA
1545 if (hlist_empty(&blkcg->blkg_list)) {
1546 spin_unlock_irqrestore(&blkcg->lock, flags);
1547 break;
1548 }
b1c35769 1549
0f3942a3
JA
1550 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
1551 blkcg_node);
1552 key = rcu_dereference(blkg->key);
1553 __blkiocg_del_blkio_group(blkg);
31e4c28d 1554
0f3942a3 1555 spin_unlock_irqrestore(&blkcg->lock, flags);
b1c35769 1556
0f3942a3
JA
1557 /*
1558 * This blkio_group is being unlinked as associated cgroup is
1559 * going away. Let all the IO controlling policies know about
61014e96 1560 * this event.
0f3942a3
JA
1561 */
1562 spin_lock(&blkio_list_lock);
61014e96
VG
1563 list_for_each_entry(blkiop, &blkio_list, list) {
1564 if (blkiop->plid != blkg->plid)
1565 continue;
0f3942a3 1566 blkiop->ops.blkio_unlink_group_fn(key, blkg);
61014e96 1567 }
0f3942a3
JA
1568 spin_unlock(&blkio_list_lock);
1569 } while (1);
34d0f179 1570
34d0f179
GJ
1571 list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
1572 blkio_policy_delete_node(pn);
1573 kfree(pn);
1574 }
0f3942a3 1575
31e4c28d 1576 free_css_id(&blkio_subsys, &blkcg->css);
b1c35769 1577 rcu_read_unlock();
67523c48
BB
1578 if (blkcg != &blkio_root_cgroup)
1579 kfree(blkcg);
31e4c28d
VG
1580}
1581
1582static struct cgroup_subsys_state *
1583blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1584{
0341509f
LZ
1585 struct blkio_cgroup *blkcg;
1586 struct cgroup *parent = cgroup->parent;
31e4c28d 1587
0341509f 1588 if (!parent) {
31e4c28d
VG
1589 blkcg = &blkio_root_cgroup;
1590 goto done;
1591 }
1592
31e4c28d
VG
1593 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1594 if (!blkcg)
1595 return ERR_PTR(-ENOMEM);
1596
1597 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
1598done:
1599 spin_lock_init(&blkcg->lock);
1600 INIT_HLIST_HEAD(&blkcg->blkg_list);
1601
34d0f179 1602 INIT_LIST_HEAD(&blkcg->policy_list);
31e4c28d
VG
1603 return &blkcg->css;
1604}
1605
1606/*
1607 * We cannot support shared io contexts, as we have no mean to support
1608 * two tasks with the same ioc in two different groups without major rework
1609 * of the main cic data structures. For now we allow a task to change
1610 * its cgroup only if it's the only owner of its ioc.
1611 */
f780bdb7 1612static int blkiocg_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
31e4c28d
VG
1613{
1614 struct io_context *ioc;
1615 int ret = 0;
1616
1617 /* task_lock() is needed to avoid races with exit_io_context() */
1618 task_lock(tsk);
1619 ioc = tsk->io_context;
1620 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1621 ret = -EINVAL;
1622 task_unlock(tsk);
1623
1624 return ret;
1625}
1626
f780bdb7 1627static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
31e4c28d
VG
1628{
1629 struct io_context *ioc;
1630
1631 task_lock(tsk);
1632 ioc = tsk->io_context;
1633 if (ioc)
1634 ioc->cgroup_changed = 1;
1635 task_unlock(tsk);
1636}
1637
3e252066
VG
1638void blkio_policy_register(struct blkio_policy_type *blkiop)
1639{
1640 spin_lock(&blkio_list_lock);
1641 list_add_tail(&blkiop->list, &blkio_list);
1642 spin_unlock(&blkio_list_lock);
1643}
1644EXPORT_SYMBOL_GPL(blkio_policy_register);
1645
1646void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1647{
1648 spin_lock(&blkio_list_lock);
1649 list_del_init(&blkiop->list);
1650 spin_unlock(&blkio_list_lock);
1651}
1652EXPORT_SYMBOL_GPL(blkio_policy_unregister);
67523c48
BB
1653
1654static int __init init_cgroup_blkio(void)
1655{
1656 return cgroup_load_subsys(&blkio_subsys);
1657}
1658
1659static void __exit exit_cgroup_blkio(void)
1660{
1661 cgroup_unload_subsys(&blkio_subsys);
1662}
1663
1664module_init(init_cgroup_blkio);
1665module_exit(exit_cgroup_blkio);
1666MODULE_LICENSE("GPL");