]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame_incremental - block/blk-cgroup.c
blkcg: add blkcg_{init|drain|exit}_queue()
[mirror_ubuntu-bionic-kernel.git] / block / blk-cgroup.c
... / ...
CommitLineData
1/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
14#include <linux/seq_file.h>
15#include <linux/kdev_t.h>
16#include <linux/module.h>
17#include <linux/err.h>
18#include <linux/blkdev.h>
19#include <linux/slab.h>
20#include <linux/genhd.h>
21#include <linux/delay.h>
22#include "blk-cgroup.h"
23#include "blk.h"
24
25#define MAX_KEY_LEN 100
26
27static DEFINE_SPINLOCK(blkio_list_lock);
28static LIST_HEAD(blkio_list);
29
30struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
31EXPORT_SYMBOL_GPL(blkio_root_cgroup);
32
33static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
34
35static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
36 struct cgroup *);
37static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
38 struct cgroup_taskset *);
39static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
40 struct cgroup_taskset *);
41static int blkiocg_pre_destroy(struct cgroup_subsys *, struct cgroup *);
42static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
43static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
44
45/* for encoding cft->private value on file */
46#define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
47/* What policy owns the file, proportional or throttle */
48#define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
49#define BLKIOFILE_ATTR(val) ((val) & 0xffff)
50
51struct cgroup_subsys blkio_subsys = {
52 .name = "blkio",
53 .create = blkiocg_create,
54 .can_attach = blkiocg_can_attach,
55 .attach = blkiocg_attach,
56 .pre_destroy = blkiocg_pre_destroy,
57 .destroy = blkiocg_destroy,
58 .populate = blkiocg_populate,
59 .subsys_id = blkio_subsys_id,
60 .module = THIS_MODULE,
61};
62EXPORT_SYMBOL_GPL(blkio_subsys);
63
64struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
65{
66 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
67 struct blkio_cgroup, css);
68}
69EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
70
71struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
72{
73 return container_of(task_subsys_state(tsk, blkio_subsys_id),
74 struct blkio_cgroup, css);
75}
76EXPORT_SYMBOL_GPL(task_blkio_cgroup);
77
78static inline void
79blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
80{
81 struct blkio_policy_type *blkiop;
82
83 list_for_each_entry(blkiop, &blkio_list, list) {
84 /* If this policy does not own the blkg, do not send updates */
85 if (blkiop->plid != blkg->plid)
86 continue;
87 if (blkiop->ops.blkio_update_group_weight_fn)
88 blkiop->ops.blkio_update_group_weight_fn(blkg->q,
89 blkg, weight);
90 }
91}
92
93static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
94 int fileid)
95{
96 struct blkio_policy_type *blkiop;
97
98 list_for_each_entry(blkiop, &blkio_list, list) {
99
100 /* If this policy does not own the blkg, do not send updates */
101 if (blkiop->plid != blkg->plid)
102 continue;
103
104 if (fileid == BLKIO_THROTL_read_bps_device
105 && blkiop->ops.blkio_update_group_read_bps_fn)
106 blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
107 blkg, bps);
108
109 if (fileid == BLKIO_THROTL_write_bps_device
110 && blkiop->ops.blkio_update_group_write_bps_fn)
111 blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
112 blkg, bps);
113 }
114}
115
116static inline void blkio_update_group_iops(struct blkio_group *blkg,
117 unsigned int iops, int fileid)
118{
119 struct blkio_policy_type *blkiop;
120
121 list_for_each_entry(blkiop, &blkio_list, list) {
122
123 /* If this policy does not own the blkg, do not send updates */
124 if (blkiop->plid != blkg->plid)
125 continue;
126
127 if (fileid == BLKIO_THROTL_read_iops_device
128 && blkiop->ops.blkio_update_group_read_iops_fn)
129 blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
130 blkg, iops);
131
132 if (fileid == BLKIO_THROTL_write_iops_device
133 && blkiop->ops.blkio_update_group_write_iops_fn)
134 blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
135 blkg,iops);
136 }
137}
138
139/*
140 * Add to the appropriate stat variable depending on the request type.
141 * This should be called with the blkg->stats_lock held.
142 */
143static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
144 bool sync)
145{
146 if (direction)
147 stat[BLKIO_STAT_WRITE] += add;
148 else
149 stat[BLKIO_STAT_READ] += add;
150 if (sync)
151 stat[BLKIO_STAT_SYNC] += add;
152 else
153 stat[BLKIO_STAT_ASYNC] += add;
154}
155
156/*
157 * Decrements the appropriate stat variable if non-zero depending on the
158 * request type. Panics on value being zero.
159 * This should be called with the blkg->stats_lock held.
160 */
161static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
162{
163 if (direction) {
164 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
165 stat[BLKIO_STAT_WRITE]--;
166 } else {
167 BUG_ON(stat[BLKIO_STAT_READ] == 0);
168 stat[BLKIO_STAT_READ]--;
169 }
170 if (sync) {
171 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
172 stat[BLKIO_STAT_SYNC]--;
173 } else {
174 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
175 stat[BLKIO_STAT_ASYNC]--;
176 }
177}
178
179#ifdef CONFIG_DEBUG_BLK_CGROUP
180/* This should be called with the blkg->stats_lock held. */
181static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
182 struct blkio_group *curr_blkg)
183{
184 if (blkio_blkg_waiting(&blkg->stats))
185 return;
186 if (blkg == curr_blkg)
187 return;
188 blkg->stats.start_group_wait_time = sched_clock();
189 blkio_mark_blkg_waiting(&blkg->stats);
190}
191
192/* This should be called with the blkg->stats_lock held. */
193static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
194{
195 unsigned long long now;
196
197 if (!blkio_blkg_waiting(stats))
198 return;
199
200 now = sched_clock();
201 if (time_after64(now, stats->start_group_wait_time))
202 stats->group_wait_time += now - stats->start_group_wait_time;
203 blkio_clear_blkg_waiting(stats);
204}
205
206/* This should be called with the blkg->stats_lock held. */
207static void blkio_end_empty_time(struct blkio_group_stats *stats)
208{
209 unsigned long long now;
210
211 if (!blkio_blkg_empty(stats))
212 return;
213
214 now = sched_clock();
215 if (time_after64(now, stats->start_empty_time))
216 stats->empty_time += now - stats->start_empty_time;
217 blkio_clear_blkg_empty(stats);
218}
219
220void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
221{
222 unsigned long flags;
223
224 spin_lock_irqsave(&blkg->stats_lock, flags);
225 BUG_ON(blkio_blkg_idling(&blkg->stats));
226 blkg->stats.start_idle_time = sched_clock();
227 blkio_mark_blkg_idling(&blkg->stats);
228 spin_unlock_irqrestore(&blkg->stats_lock, flags);
229}
230EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
231
232void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
233{
234 unsigned long flags;
235 unsigned long long now;
236 struct blkio_group_stats *stats;
237
238 spin_lock_irqsave(&blkg->stats_lock, flags);
239 stats = &blkg->stats;
240 if (blkio_blkg_idling(stats)) {
241 now = sched_clock();
242 if (time_after64(now, stats->start_idle_time))
243 stats->idle_time += now - stats->start_idle_time;
244 blkio_clear_blkg_idling(stats);
245 }
246 spin_unlock_irqrestore(&blkg->stats_lock, flags);
247}
248EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
249
250void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
251{
252 unsigned long flags;
253 struct blkio_group_stats *stats;
254
255 spin_lock_irqsave(&blkg->stats_lock, flags);
256 stats = &blkg->stats;
257 stats->avg_queue_size_sum +=
258 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
259 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
260 stats->avg_queue_size_samples++;
261 blkio_update_group_wait_time(stats);
262 spin_unlock_irqrestore(&blkg->stats_lock, flags);
263}
264EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
265
266void blkiocg_set_start_empty_time(struct blkio_group *blkg)
267{
268 unsigned long flags;
269 struct blkio_group_stats *stats;
270
271 spin_lock_irqsave(&blkg->stats_lock, flags);
272 stats = &blkg->stats;
273
274 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
275 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
276 spin_unlock_irqrestore(&blkg->stats_lock, flags);
277 return;
278 }
279
280 /*
281 * group is already marked empty. This can happen if cfqq got new
282 * request in parent group and moved to this group while being added
283 * to service tree. Just ignore the event and move on.
284 */
285 if(blkio_blkg_empty(stats)) {
286 spin_unlock_irqrestore(&blkg->stats_lock, flags);
287 return;
288 }
289
290 stats->start_empty_time = sched_clock();
291 blkio_mark_blkg_empty(stats);
292 spin_unlock_irqrestore(&blkg->stats_lock, flags);
293}
294EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
295
296void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
297 unsigned long dequeue)
298{
299 blkg->stats.dequeue += dequeue;
300}
301EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
302#else
303static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
304 struct blkio_group *curr_blkg) {}
305static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
306#endif
307
308void blkiocg_update_io_add_stats(struct blkio_group *blkg,
309 struct blkio_group *curr_blkg, bool direction,
310 bool sync)
311{
312 unsigned long flags;
313
314 spin_lock_irqsave(&blkg->stats_lock, flags);
315 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
316 sync);
317 blkio_end_empty_time(&blkg->stats);
318 blkio_set_start_group_wait_time(blkg, curr_blkg);
319 spin_unlock_irqrestore(&blkg->stats_lock, flags);
320}
321EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
322
323void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
324 bool direction, bool sync)
325{
326 unsigned long flags;
327
328 spin_lock_irqsave(&blkg->stats_lock, flags);
329 blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
330 direction, sync);
331 spin_unlock_irqrestore(&blkg->stats_lock, flags);
332}
333EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
334
335void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
336 unsigned long unaccounted_time)
337{
338 unsigned long flags;
339
340 spin_lock_irqsave(&blkg->stats_lock, flags);
341 blkg->stats.time += time;
342#ifdef CONFIG_DEBUG_BLK_CGROUP
343 blkg->stats.unaccounted_time += unaccounted_time;
344#endif
345 spin_unlock_irqrestore(&blkg->stats_lock, flags);
346}
347EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
348
349/*
350 * should be called under rcu read lock or queue lock to make sure blkg pointer
351 * is valid.
352 */
353void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
354 uint64_t bytes, bool direction, bool sync)
355{
356 struct blkio_group_stats_cpu *stats_cpu;
357 unsigned long flags;
358
359 /*
360 * Disabling interrupts to provide mutual exclusion between two
361 * writes on same cpu. It probably is not needed for 64bit. Not
362 * optimizing that case yet.
363 */
364 local_irq_save(flags);
365
366 stats_cpu = this_cpu_ptr(blkg->stats_cpu);
367
368 u64_stats_update_begin(&stats_cpu->syncp);
369 stats_cpu->sectors += bytes >> 9;
370 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
371 1, direction, sync);
372 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
373 bytes, direction, sync);
374 u64_stats_update_end(&stats_cpu->syncp);
375 local_irq_restore(flags);
376}
377EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
378
379void blkiocg_update_completion_stats(struct blkio_group *blkg,
380 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
381{
382 struct blkio_group_stats *stats;
383 unsigned long flags;
384 unsigned long long now = sched_clock();
385
386 spin_lock_irqsave(&blkg->stats_lock, flags);
387 stats = &blkg->stats;
388 if (time_after64(now, io_start_time))
389 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
390 now - io_start_time, direction, sync);
391 if (time_after64(io_start_time, start_time))
392 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
393 io_start_time - start_time, direction, sync);
394 spin_unlock_irqrestore(&blkg->stats_lock, flags);
395}
396EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
397
398/* Merged stats are per cpu. */
399void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
400 bool sync)
401{
402 struct blkio_group_stats_cpu *stats_cpu;
403 unsigned long flags;
404
405 /*
406 * Disabling interrupts to provide mutual exclusion between two
407 * writes on same cpu. It probably is not needed for 64bit. Not
408 * optimizing that case yet.
409 */
410 local_irq_save(flags);
411
412 stats_cpu = this_cpu_ptr(blkg->stats_cpu);
413
414 u64_stats_update_begin(&stats_cpu->syncp);
415 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
416 direction, sync);
417 u64_stats_update_end(&stats_cpu->syncp);
418 local_irq_restore(flags);
419}
420EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
421
422struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
423 struct request_queue *q,
424 enum blkio_policy_id plid,
425 bool for_root)
426 __releases(q->queue_lock) __acquires(q->queue_lock)
427{
428 struct blkio_policy_type *pol = blkio_policy[plid];
429 struct blkio_group *blkg, *new_blkg;
430
431 WARN_ON_ONCE(!rcu_read_lock_held());
432 lockdep_assert_held(q->queue_lock);
433
434 /*
435 * This could be the first entry point of blkcg implementation and
436 * we shouldn't allow anything to go through for a bypassing queue.
437 * The following can be removed if blkg lookup is guaranteed to
438 * fail on a bypassing queue.
439 */
440 if (unlikely(blk_queue_bypass(q)) && !for_root)
441 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
442
443 blkg = blkg_lookup(blkcg, q, plid);
444 if (blkg)
445 return blkg;
446
447 /* blkg holds a reference to blkcg */
448 if (!css_tryget(&blkcg->css))
449 return ERR_PTR(-EINVAL);
450
451 /*
452 * Allocate and initialize.
453 *
454 * FIXME: The following is broken. Percpu memory allocation
455 * requires %GFP_KERNEL context and can't be performed from IO
456 * path. Allocation here should inherently be atomic and the
457 * following lock dancing can be removed once the broken percpu
458 * allocation is fixed.
459 */
460 spin_unlock_irq(q->queue_lock);
461 rcu_read_unlock();
462
463 new_blkg = pol->ops.blkio_alloc_group_fn(q, blkcg);
464 if (new_blkg) {
465 new_blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
466
467 spin_lock_init(&new_blkg->stats_lock);
468 rcu_assign_pointer(new_blkg->q, q);
469 new_blkg->blkcg = blkcg;
470 new_blkg->plid = plid;
471 cgroup_path(blkcg->css.cgroup, new_blkg->path,
472 sizeof(new_blkg->path));
473 } else {
474 css_put(&blkcg->css);
475 }
476
477 rcu_read_lock();
478 spin_lock_irq(q->queue_lock);
479
480 /* did bypass get turned on inbetween? */
481 if (unlikely(blk_queue_bypass(q)) && !for_root) {
482 blkg = ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
483 goto out;
484 }
485
486 /* did someone beat us to it? */
487 blkg = blkg_lookup(blkcg, q, plid);
488 if (unlikely(blkg))
489 goto out;
490
491 /* did alloc fail? */
492 if (unlikely(!new_blkg || !new_blkg->stats_cpu)) {
493 blkg = ERR_PTR(-ENOMEM);
494 goto out;
495 }
496
497 /* insert */
498 spin_lock(&blkcg->lock);
499 swap(blkg, new_blkg);
500 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
501 pol->ops.blkio_link_group_fn(q, blkg);
502 spin_unlock(&blkcg->lock);
503out:
504 if (new_blkg) {
505 free_percpu(new_blkg->stats_cpu);
506 kfree(new_blkg);
507 css_put(&blkcg->css);
508 }
509 return blkg;
510}
511EXPORT_SYMBOL_GPL(blkg_lookup_create);
512
513static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
514{
515 hlist_del_init_rcu(&blkg->blkcg_node);
516}
517
518/*
519 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
520 * indicating that blk_group was unhashed by the time we got to it.
521 */
522int blkiocg_del_blkio_group(struct blkio_group *blkg)
523{
524 struct blkio_cgroup *blkcg = blkg->blkcg;
525 unsigned long flags;
526 int ret = 1;
527
528 spin_lock_irqsave(&blkcg->lock, flags);
529 if (!hlist_unhashed(&blkg->blkcg_node)) {
530 __blkiocg_del_blkio_group(blkg);
531 ret = 0;
532 }
533 spin_unlock_irqrestore(&blkcg->lock, flags);
534
535 return ret;
536}
537EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
538
539/* called under rcu_read_lock(). */
540struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
541 struct request_queue *q,
542 enum blkio_policy_id plid)
543{
544 struct blkio_group *blkg;
545 struct hlist_node *n;
546
547 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
548 if (blkg->q == q && blkg->plid == plid)
549 return blkg;
550 return NULL;
551}
552EXPORT_SYMBOL_GPL(blkg_lookup);
553
554void blkg_destroy_all(struct request_queue *q)
555{
556 struct blkio_policy_type *pol;
557
558 while (true) {
559 bool done = true;
560
561 spin_lock(&blkio_list_lock);
562 spin_lock_irq(q->queue_lock);
563
564 /*
565 * clear_queue_fn() might return with non-empty group list
566 * if it raced cgroup removal and lost. cgroup removal is
567 * guaranteed to make forward progress and retrying after a
568 * while is enough. This ugliness is scheduled to be
569 * removed after locking update.
570 */
571 list_for_each_entry(pol, &blkio_list, list)
572 if (!pol->ops.blkio_clear_queue_fn(q))
573 done = false;
574
575 spin_unlock_irq(q->queue_lock);
576 spin_unlock(&blkio_list_lock);
577
578 if (done)
579 break;
580
581 msleep(10); /* just some random duration I like */
582 }
583}
584
585static void blkio_reset_stats_cpu(struct blkio_group *blkg)
586{
587 struct blkio_group_stats_cpu *stats_cpu;
588 int i, j, k;
589 /*
590 * Note: On 64 bit arch this should not be an issue. This has the
591 * possibility of returning some inconsistent value on 32bit arch
592 * as 64bit update on 32bit is non atomic. Taking care of this
593 * corner case makes code very complicated, like sending IPIs to
594 * cpus, taking care of stats of offline cpus etc.
595 *
596 * reset stats is anyway more of a debug feature and this sounds a
597 * corner case. So I am not complicating the code yet until and
598 * unless this becomes a real issue.
599 */
600 for_each_possible_cpu(i) {
601 stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
602 stats_cpu->sectors = 0;
603 for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
604 for (k = 0; k < BLKIO_STAT_TOTAL; k++)
605 stats_cpu->stat_arr_cpu[j][k] = 0;
606 }
607}
608
609static int
610blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
611{
612 struct blkio_cgroup *blkcg;
613 struct blkio_group *blkg;
614 struct blkio_group_stats *stats;
615 struct hlist_node *n;
616 uint64_t queued[BLKIO_STAT_TOTAL];
617 int i;
618#ifdef CONFIG_DEBUG_BLK_CGROUP
619 bool idling, waiting, empty;
620 unsigned long long now = sched_clock();
621#endif
622
623 blkcg = cgroup_to_blkio_cgroup(cgroup);
624 spin_lock_irq(&blkcg->lock);
625 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
626 spin_lock(&blkg->stats_lock);
627 stats = &blkg->stats;
628#ifdef CONFIG_DEBUG_BLK_CGROUP
629 idling = blkio_blkg_idling(stats);
630 waiting = blkio_blkg_waiting(stats);
631 empty = blkio_blkg_empty(stats);
632#endif
633 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
634 queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
635 memset(stats, 0, sizeof(struct blkio_group_stats));
636 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
637 stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
638#ifdef CONFIG_DEBUG_BLK_CGROUP
639 if (idling) {
640 blkio_mark_blkg_idling(stats);
641 stats->start_idle_time = now;
642 }
643 if (waiting) {
644 blkio_mark_blkg_waiting(stats);
645 stats->start_group_wait_time = now;
646 }
647 if (empty) {
648 blkio_mark_blkg_empty(stats);
649 stats->start_empty_time = now;
650 }
651#endif
652 spin_unlock(&blkg->stats_lock);
653
654 /* Reset Per cpu stats which don't take blkg->stats_lock */
655 blkio_reset_stats_cpu(blkg);
656 }
657
658 spin_unlock_irq(&blkcg->lock);
659 return 0;
660}
661
662static void blkio_get_key_name(enum stat_sub_type type, const char *dname,
663 char *str, int chars_left, bool diskname_only)
664{
665 snprintf(str, chars_left, "%s", dname);
666 chars_left -= strlen(str);
667 if (chars_left <= 0) {
668 printk(KERN_WARNING
669 "Possibly incorrect cgroup stat display format");
670 return;
671 }
672 if (diskname_only)
673 return;
674 switch (type) {
675 case BLKIO_STAT_READ:
676 strlcat(str, " Read", chars_left);
677 break;
678 case BLKIO_STAT_WRITE:
679 strlcat(str, " Write", chars_left);
680 break;
681 case BLKIO_STAT_SYNC:
682 strlcat(str, " Sync", chars_left);
683 break;
684 case BLKIO_STAT_ASYNC:
685 strlcat(str, " Async", chars_left);
686 break;
687 case BLKIO_STAT_TOTAL:
688 strlcat(str, " Total", chars_left);
689 break;
690 default:
691 strlcat(str, " Invalid", chars_left);
692 }
693}
694
695static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
696 struct cgroup_map_cb *cb, const char *dname)
697{
698 blkio_get_key_name(0, dname, str, chars_left, true);
699 cb->fill(cb, str, val);
700 return val;
701}
702
703
704static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
705 enum stat_type_cpu type, enum stat_sub_type sub_type)
706{
707 int cpu;
708 struct blkio_group_stats_cpu *stats_cpu;
709 u64 val = 0, tval;
710
711 for_each_possible_cpu(cpu) {
712 unsigned int start;
713 stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
714
715 do {
716 start = u64_stats_fetch_begin(&stats_cpu->syncp);
717 if (type == BLKIO_STAT_CPU_SECTORS)
718 tval = stats_cpu->sectors;
719 else
720 tval = stats_cpu->stat_arr_cpu[type][sub_type];
721 } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
722
723 val += tval;
724 }
725
726 return val;
727}
728
729static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
730 struct cgroup_map_cb *cb, const char *dname,
731 enum stat_type_cpu type)
732{
733 uint64_t disk_total, val;
734 char key_str[MAX_KEY_LEN];
735 enum stat_sub_type sub_type;
736
737 if (type == BLKIO_STAT_CPU_SECTORS) {
738 val = blkio_read_stat_cpu(blkg, type, 0);
739 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb,
740 dname);
741 }
742
743 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
744 sub_type++) {
745 blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
746 false);
747 val = blkio_read_stat_cpu(blkg, type, sub_type);
748 cb->fill(cb, key_str, val);
749 }
750
751 disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
752 blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
753
754 blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
755 false);
756 cb->fill(cb, key_str, disk_total);
757 return disk_total;
758}
759
760/* This should be called with blkg->stats_lock held */
761static uint64_t blkio_get_stat(struct blkio_group *blkg,
762 struct cgroup_map_cb *cb, const char *dname,
763 enum stat_type type)
764{
765 uint64_t disk_total;
766 char key_str[MAX_KEY_LEN];
767 enum stat_sub_type sub_type;
768
769 if (type == BLKIO_STAT_TIME)
770 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
771 blkg->stats.time, cb, dname);
772#ifdef CONFIG_DEBUG_BLK_CGROUP
773 if (type == BLKIO_STAT_UNACCOUNTED_TIME)
774 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
775 blkg->stats.unaccounted_time, cb, dname);
776 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
777 uint64_t sum = blkg->stats.avg_queue_size_sum;
778 uint64_t samples = blkg->stats.avg_queue_size_samples;
779 if (samples)
780 do_div(sum, samples);
781 else
782 sum = 0;
783 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
784 sum, cb, dname);
785 }
786 if (type == BLKIO_STAT_GROUP_WAIT_TIME)
787 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
788 blkg->stats.group_wait_time, cb, dname);
789 if (type == BLKIO_STAT_IDLE_TIME)
790 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
791 blkg->stats.idle_time, cb, dname);
792 if (type == BLKIO_STAT_EMPTY_TIME)
793 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
794 blkg->stats.empty_time, cb, dname);
795 if (type == BLKIO_STAT_DEQUEUE)
796 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
797 blkg->stats.dequeue, cb, dname);
798#endif
799
800 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
801 sub_type++) {
802 blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
803 false);
804 cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
805 }
806 disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
807 blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
808 blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
809 false);
810 cb->fill(cb, key_str, disk_total);
811 return disk_total;
812}
813
814static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid,
815 int fileid, struct blkio_cgroup *blkcg)
816{
817 struct gendisk *disk = NULL;
818 struct blkio_group *blkg = NULL;
819 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
820 unsigned long major, minor;
821 int i = 0, ret = -EINVAL;
822 int part;
823 dev_t dev;
824 u64 temp;
825
826 memset(s, 0, sizeof(s));
827
828 while ((p = strsep(&buf, " ")) != NULL) {
829 if (!*p)
830 continue;
831
832 s[i++] = p;
833
834 /* Prevent from inputing too many things */
835 if (i == 3)
836 break;
837 }
838
839 if (i != 2)
840 goto out;
841
842 p = strsep(&s[0], ":");
843 if (p != NULL)
844 major_s = p;
845 else
846 goto out;
847
848 minor_s = s[0];
849 if (!minor_s)
850 goto out;
851
852 if (strict_strtoul(major_s, 10, &major))
853 goto out;
854
855 if (strict_strtoul(minor_s, 10, &minor))
856 goto out;
857
858 dev = MKDEV(major, minor);
859
860 if (strict_strtoull(s[1], 10, &temp))
861 goto out;
862
863 disk = get_gendisk(dev, &part);
864 if (!disk || part)
865 goto out;
866
867 rcu_read_lock();
868
869 spin_lock_irq(disk->queue->queue_lock);
870 blkg = blkg_lookup_create(blkcg, disk->queue, plid, false);
871 spin_unlock_irq(disk->queue->queue_lock);
872
873 if (IS_ERR(blkg)) {
874 ret = PTR_ERR(blkg);
875 goto out_unlock;
876 }
877
878 switch (plid) {
879 case BLKIO_POLICY_PROP:
880 if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
881 temp > BLKIO_WEIGHT_MAX)
882 goto out_unlock;
883
884 blkg->conf.weight = temp;
885 blkio_update_group_weight(blkg, temp ?: blkcg->weight);
886 break;
887 case BLKIO_POLICY_THROTL:
888 switch(fileid) {
889 case BLKIO_THROTL_read_bps_device:
890 blkg->conf.bps[READ] = temp;
891 blkio_update_group_bps(blkg, temp ?: -1, fileid);
892 break;
893 case BLKIO_THROTL_write_bps_device:
894 blkg->conf.bps[WRITE] = temp;
895 blkio_update_group_bps(blkg, temp ?: -1, fileid);
896 break;
897 case BLKIO_THROTL_read_iops_device:
898 if (temp > THROTL_IOPS_MAX)
899 goto out_unlock;
900 blkg->conf.iops[READ] = temp;
901 blkio_update_group_iops(blkg, temp ?: -1, fileid);
902 break;
903 case BLKIO_THROTL_write_iops_device:
904 if (temp > THROTL_IOPS_MAX)
905 goto out_unlock;
906 blkg->conf.iops[WRITE] = temp;
907 blkio_update_group_iops(blkg, temp ?: -1, fileid);
908 break;
909 }
910 break;
911 default:
912 BUG();
913 }
914 ret = 0;
915out_unlock:
916 rcu_read_unlock();
917out:
918 put_disk(disk);
919
920 /*
921 * If queue was bypassing, we should retry. Do so after a short
922 * msleep(). It isn't strictly necessary but queue can be
923 * bypassing for some time and it's always nice to avoid busy
924 * looping.
925 */
926 if (ret == -EBUSY) {
927 msleep(10);
928 return restart_syscall();
929 }
930 return ret;
931}
932
933static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
934 const char *buffer)
935{
936 int ret = 0;
937 char *buf;
938 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
939 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
940 int fileid = BLKIOFILE_ATTR(cft->private);
941
942 buf = kstrdup(buffer, GFP_KERNEL);
943 if (!buf)
944 return -ENOMEM;
945
946 ret = blkio_policy_parse_and_set(buf, plid, fileid, blkcg);
947 kfree(buf);
948 return ret;
949}
950
951static const char *blkg_dev_name(struct blkio_group *blkg)
952{
953 /* some drivers (floppy) instantiate a queue w/o disk registered */
954 if (blkg->q->backing_dev_info.dev)
955 return dev_name(blkg->q->backing_dev_info.dev);
956 return NULL;
957}
958
959static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg,
960 struct seq_file *m)
961{
962 const char *dname = blkg_dev_name(blkg);
963 int fileid = BLKIOFILE_ATTR(cft->private);
964 int rw = WRITE;
965
966 if (!dname)
967 return;
968
969 switch (blkg->plid) {
970 case BLKIO_POLICY_PROP:
971 if (blkg->conf.weight)
972 seq_printf(m, "%s\t%u\n",
973 dname, blkg->conf.weight);
974 break;
975 case BLKIO_POLICY_THROTL:
976 switch (fileid) {
977 case BLKIO_THROTL_read_bps_device:
978 rw = READ;
979 case BLKIO_THROTL_write_bps_device:
980 if (blkg->conf.bps[rw])
981 seq_printf(m, "%s\t%llu\n",
982 dname, blkg->conf.bps[rw]);
983 break;
984 case BLKIO_THROTL_read_iops_device:
985 rw = READ;
986 case BLKIO_THROTL_write_iops_device:
987 if (blkg->conf.iops[rw])
988 seq_printf(m, "%s\t%u\n",
989 dname, blkg->conf.iops[rw]);
990 break;
991 }
992 break;
993 default:
994 BUG();
995 }
996}
997
998/* cgroup files which read their data from policy nodes end up here */
999static void blkio_read_conf(struct cftype *cft, struct blkio_cgroup *blkcg,
1000 struct seq_file *m)
1001{
1002 struct blkio_group *blkg;
1003 struct hlist_node *n;
1004
1005 spin_lock_irq(&blkcg->lock);
1006 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
1007 if (BLKIOFILE_POLICY(cft->private) == blkg->plid)
1008 blkio_print_group_conf(cft, blkg, m);
1009 spin_unlock_irq(&blkcg->lock);
1010}
1011
1012static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
1013 struct seq_file *m)
1014{
1015 struct blkio_cgroup *blkcg;
1016 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1017 int name = BLKIOFILE_ATTR(cft->private);
1018
1019 blkcg = cgroup_to_blkio_cgroup(cgrp);
1020
1021 switch(plid) {
1022 case BLKIO_POLICY_PROP:
1023 switch(name) {
1024 case BLKIO_PROP_weight_device:
1025 blkio_read_conf(cft, blkcg, m);
1026 return 0;
1027 default:
1028 BUG();
1029 }
1030 break;
1031 case BLKIO_POLICY_THROTL:
1032 switch(name){
1033 case BLKIO_THROTL_read_bps_device:
1034 case BLKIO_THROTL_write_bps_device:
1035 case BLKIO_THROTL_read_iops_device:
1036 case BLKIO_THROTL_write_iops_device:
1037 blkio_read_conf(cft, blkcg, m);
1038 return 0;
1039 default:
1040 BUG();
1041 }
1042 break;
1043 default:
1044 BUG();
1045 }
1046
1047 return 0;
1048}
1049
1050static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
1051 struct cftype *cft, struct cgroup_map_cb *cb,
1052 enum stat_type type, bool show_total, bool pcpu)
1053{
1054 struct blkio_group *blkg;
1055 struct hlist_node *n;
1056 uint64_t cgroup_total = 0;
1057
1058 rcu_read_lock();
1059 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
1060 const char *dname = blkg_dev_name(blkg);
1061
1062 if (!dname || BLKIOFILE_POLICY(cft->private) != blkg->plid)
1063 continue;
1064 if (pcpu)
1065 cgroup_total += blkio_get_stat_cpu(blkg, cb, dname,
1066 type);
1067 else {
1068 spin_lock_irq(&blkg->stats_lock);
1069 cgroup_total += blkio_get_stat(blkg, cb, dname, type);
1070 spin_unlock_irq(&blkg->stats_lock);
1071 }
1072 }
1073 if (show_total)
1074 cb->fill(cb, "Total", cgroup_total);
1075 rcu_read_unlock();
1076 return 0;
1077}
1078
1079/* All map kind of cgroup file get serviced by this function */
1080static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
1081 struct cgroup_map_cb *cb)
1082{
1083 struct blkio_cgroup *blkcg;
1084 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1085 int name = BLKIOFILE_ATTR(cft->private);
1086
1087 blkcg = cgroup_to_blkio_cgroup(cgrp);
1088
1089 switch(plid) {
1090 case BLKIO_POLICY_PROP:
1091 switch(name) {
1092 case BLKIO_PROP_time:
1093 return blkio_read_blkg_stats(blkcg, cft, cb,
1094 BLKIO_STAT_TIME, 0, 0);
1095 case BLKIO_PROP_sectors:
1096 return blkio_read_blkg_stats(blkcg, cft, cb,
1097 BLKIO_STAT_CPU_SECTORS, 0, 1);
1098 case BLKIO_PROP_io_service_bytes:
1099 return blkio_read_blkg_stats(blkcg, cft, cb,
1100 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1101 case BLKIO_PROP_io_serviced:
1102 return blkio_read_blkg_stats(blkcg, cft, cb,
1103 BLKIO_STAT_CPU_SERVICED, 1, 1);
1104 case BLKIO_PROP_io_service_time:
1105 return blkio_read_blkg_stats(blkcg, cft, cb,
1106 BLKIO_STAT_SERVICE_TIME, 1, 0);
1107 case BLKIO_PROP_io_wait_time:
1108 return blkio_read_blkg_stats(blkcg, cft, cb,
1109 BLKIO_STAT_WAIT_TIME, 1, 0);
1110 case BLKIO_PROP_io_merged:
1111 return blkio_read_blkg_stats(blkcg, cft, cb,
1112 BLKIO_STAT_CPU_MERGED, 1, 1);
1113 case BLKIO_PROP_io_queued:
1114 return blkio_read_blkg_stats(blkcg, cft, cb,
1115 BLKIO_STAT_QUEUED, 1, 0);
1116#ifdef CONFIG_DEBUG_BLK_CGROUP
1117 case BLKIO_PROP_unaccounted_time:
1118 return blkio_read_blkg_stats(blkcg, cft, cb,
1119 BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
1120 case BLKIO_PROP_dequeue:
1121 return blkio_read_blkg_stats(blkcg, cft, cb,
1122 BLKIO_STAT_DEQUEUE, 0, 0);
1123 case BLKIO_PROP_avg_queue_size:
1124 return blkio_read_blkg_stats(blkcg, cft, cb,
1125 BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
1126 case BLKIO_PROP_group_wait_time:
1127 return blkio_read_blkg_stats(blkcg, cft, cb,
1128 BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
1129 case BLKIO_PROP_idle_time:
1130 return blkio_read_blkg_stats(blkcg, cft, cb,
1131 BLKIO_STAT_IDLE_TIME, 0, 0);
1132 case BLKIO_PROP_empty_time:
1133 return blkio_read_blkg_stats(blkcg, cft, cb,
1134 BLKIO_STAT_EMPTY_TIME, 0, 0);
1135#endif
1136 default:
1137 BUG();
1138 }
1139 break;
1140 case BLKIO_POLICY_THROTL:
1141 switch(name){
1142 case BLKIO_THROTL_io_service_bytes:
1143 return blkio_read_blkg_stats(blkcg, cft, cb,
1144 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1145 case BLKIO_THROTL_io_serviced:
1146 return blkio_read_blkg_stats(blkcg, cft, cb,
1147 BLKIO_STAT_CPU_SERVICED, 1, 1);
1148 default:
1149 BUG();
1150 }
1151 break;
1152 default:
1153 BUG();
1154 }
1155
1156 return 0;
1157}
1158
1159static int blkio_weight_write(struct blkio_cgroup *blkcg, int plid, u64 val)
1160{
1161 struct blkio_group *blkg;
1162 struct hlist_node *n;
1163
1164 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1165 return -EINVAL;
1166
1167 spin_lock(&blkio_list_lock);
1168 spin_lock_irq(&blkcg->lock);
1169 blkcg->weight = (unsigned int)val;
1170
1171 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
1172 if (blkg->plid == plid && !blkg->conf.weight)
1173 blkio_update_group_weight(blkg, blkcg->weight);
1174
1175 spin_unlock_irq(&blkcg->lock);
1176 spin_unlock(&blkio_list_lock);
1177 return 0;
1178}
1179
1180static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
1181 struct blkio_cgroup *blkcg;
1182 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1183 int name = BLKIOFILE_ATTR(cft->private);
1184
1185 blkcg = cgroup_to_blkio_cgroup(cgrp);
1186
1187 switch(plid) {
1188 case BLKIO_POLICY_PROP:
1189 switch(name) {
1190 case BLKIO_PROP_weight:
1191 return (u64)blkcg->weight;
1192 }
1193 break;
1194 default:
1195 BUG();
1196 }
1197 return 0;
1198}
1199
1200static int
1201blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1202{
1203 struct blkio_cgroup *blkcg;
1204 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1205 int name = BLKIOFILE_ATTR(cft->private);
1206
1207 blkcg = cgroup_to_blkio_cgroup(cgrp);
1208
1209 switch(plid) {
1210 case BLKIO_POLICY_PROP:
1211 switch(name) {
1212 case BLKIO_PROP_weight:
1213 return blkio_weight_write(blkcg, plid, val);
1214 }
1215 break;
1216 default:
1217 BUG();
1218 }
1219
1220 return 0;
1221}
1222
1223struct cftype blkio_files[] = {
1224 {
1225 .name = "weight_device",
1226 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1227 BLKIO_PROP_weight_device),
1228 .read_seq_string = blkiocg_file_read,
1229 .write_string = blkiocg_file_write,
1230 .max_write_len = 256,
1231 },
1232 {
1233 .name = "weight",
1234 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1235 BLKIO_PROP_weight),
1236 .read_u64 = blkiocg_file_read_u64,
1237 .write_u64 = blkiocg_file_write_u64,
1238 },
1239 {
1240 .name = "time",
1241 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1242 BLKIO_PROP_time),
1243 .read_map = blkiocg_file_read_map,
1244 },
1245 {
1246 .name = "sectors",
1247 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1248 BLKIO_PROP_sectors),
1249 .read_map = blkiocg_file_read_map,
1250 },
1251 {
1252 .name = "io_service_bytes",
1253 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1254 BLKIO_PROP_io_service_bytes),
1255 .read_map = blkiocg_file_read_map,
1256 },
1257 {
1258 .name = "io_serviced",
1259 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1260 BLKIO_PROP_io_serviced),
1261 .read_map = blkiocg_file_read_map,
1262 },
1263 {
1264 .name = "io_service_time",
1265 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1266 BLKIO_PROP_io_service_time),
1267 .read_map = blkiocg_file_read_map,
1268 },
1269 {
1270 .name = "io_wait_time",
1271 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1272 BLKIO_PROP_io_wait_time),
1273 .read_map = blkiocg_file_read_map,
1274 },
1275 {
1276 .name = "io_merged",
1277 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1278 BLKIO_PROP_io_merged),
1279 .read_map = blkiocg_file_read_map,
1280 },
1281 {
1282 .name = "io_queued",
1283 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1284 BLKIO_PROP_io_queued),
1285 .read_map = blkiocg_file_read_map,
1286 },
1287 {
1288 .name = "reset_stats",
1289 .write_u64 = blkiocg_reset_stats,
1290 },
1291#ifdef CONFIG_BLK_DEV_THROTTLING
1292 {
1293 .name = "throttle.read_bps_device",
1294 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1295 BLKIO_THROTL_read_bps_device),
1296 .read_seq_string = blkiocg_file_read,
1297 .write_string = blkiocg_file_write,
1298 .max_write_len = 256,
1299 },
1300
1301 {
1302 .name = "throttle.write_bps_device",
1303 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1304 BLKIO_THROTL_write_bps_device),
1305 .read_seq_string = blkiocg_file_read,
1306 .write_string = blkiocg_file_write,
1307 .max_write_len = 256,
1308 },
1309
1310 {
1311 .name = "throttle.read_iops_device",
1312 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1313 BLKIO_THROTL_read_iops_device),
1314 .read_seq_string = blkiocg_file_read,
1315 .write_string = blkiocg_file_write,
1316 .max_write_len = 256,
1317 },
1318
1319 {
1320 .name = "throttle.write_iops_device",
1321 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1322 BLKIO_THROTL_write_iops_device),
1323 .read_seq_string = blkiocg_file_read,
1324 .write_string = blkiocg_file_write,
1325 .max_write_len = 256,
1326 },
1327 {
1328 .name = "throttle.io_service_bytes",
1329 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1330 BLKIO_THROTL_io_service_bytes),
1331 .read_map = blkiocg_file_read_map,
1332 },
1333 {
1334 .name = "throttle.io_serviced",
1335 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1336 BLKIO_THROTL_io_serviced),
1337 .read_map = blkiocg_file_read_map,
1338 },
1339#endif /* CONFIG_BLK_DEV_THROTTLING */
1340
1341#ifdef CONFIG_DEBUG_BLK_CGROUP
1342 {
1343 .name = "avg_queue_size",
1344 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1345 BLKIO_PROP_avg_queue_size),
1346 .read_map = blkiocg_file_read_map,
1347 },
1348 {
1349 .name = "group_wait_time",
1350 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1351 BLKIO_PROP_group_wait_time),
1352 .read_map = blkiocg_file_read_map,
1353 },
1354 {
1355 .name = "idle_time",
1356 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1357 BLKIO_PROP_idle_time),
1358 .read_map = blkiocg_file_read_map,
1359 },
1360 {
1361 .name = "empty_time",
1362 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1363 BLKIO_PROP_empty_time),
1364 .read_map = blkiocg_file_read_map,
1365 },
1366 {
1367 .name = "dequeue",
1368 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1369 BLKIO_PROP_dequeue),
1370 .read_map = blkiocg_file_read_map,
1371 },
1372 {
1373 .name = "unaccounted_time",
1374 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1375 BLKIO_PROP_unaccounted_time),
1376 .read_map = blkiocg_file_read_map,
1377 },
1378#endif
1379};
1380
1381static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1382{
1383 return cgroup_add_files(cgroup, subsys, blkio_files,
1384 ARRAY_SIZE(blkio_files));
1385}
1386
1387static int blkiocg_pre_destroy(struct cgroup_subsys *subsys,
1388 struct cgroup *cgroup)
1389{
1390 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1391 unsigned long flags;
1392 struct blkio_group *blkg;
1393 struct request_queue *q;
1394 struct blkio_policy_type *blkiop;
1395
1396 rcu_read_lock();
1397
1398 do {
1399 spin_lock_irqsave(&blkcg->lock, flags);
1400
1401 if (hlist_empty(&blkcg->blkg_list)) {
1402 spin_unlock_irqrestore(&blkcg->lock, flags);
1403 break;
1404 }
1405
1406 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
1407 blkcg_node);
1408 q = rcu_dereference(blkg->q);
1409 __blkiocg_del_blkio_group(blkg);
1410
1411 spin_unlock_irqrestore(&blkcg->lock, flags);
1412
1413 /*
1414 * This blkio_group is being unlinked as associated cgroup is
1415 * going away. Let all the IO controlling policies know about
1416 * this event.
1417 */
1418 spin_lock(&blkio_list_lock);
1419 list_for_each_entry(blkiop, &blkio_list, list) {
1420 if (blkiop->plid != blkg->plid)
1421 continue;
1422 blkiop->ops.blkio_unlink_group_fn(q, blkg);
1423 }
1424 spin_unlock(&blkio_list_lock);
1425 } while (1);
1426
1427 rcu_read_unlock();
1428
1429 return 0;
1430}
1431
1432static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1433{
1434 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1435
1436 if (blkcg != &blkio_root_cgroup)
1437 kfree(blkcg);
1438}
1439
1440static struct cgroup_subsys_state *
1441blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1442{
1443 struct blkio_cgroup *blkcg;
1444 struct cgroup *parent = cgroup->parent;
1445
1446 if (!parent) {
1447 blkcg = &blkio_root_cgroup;
1448 goto done;
1449 }
1450
1451 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1452 if (!blkcg)
1453 return ERR_PTR(-ENOMEM);
1454
1455 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
1456done:
1457 spin_lock_init(&blkcg->lock);
1458 INIT_HLIST_HEAD(&blkcg->blkg_list);
1459
1460 return &blkcg->css;
1461}
1462
1463/**
1464 * blkcg_init_queue - initialize blkcg part of request queue
1465 * @q: request_queue to initialize
1466 *
1467 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1468 * part of new request_queue @q.
1469 *
1470 * RETURNS:
1471 * 0 on success, -errno on failure.
1472 */
1473int blkcg_init_queue(struct request_queue *q)
1474{
1475 might_sleep();
1476
1477 return blk_throtl_init(q);
1478}
1479
1480/**
1481 * blkcg_drain_queue - drain blkcg part of request_queue
1482 * @q: request_queue to drain
1483 *
1484 * Called from blk_drain_queue(). Responsible for draining blkcg part.
1485 */
1486void blkcg_drain_queue(struct request_queue *q)
1487{
1488 lockdep_assert_held(q->queue_lock);
1489
1490 blk_throtl_drain(q);
1491}
1492
1493/**
1494 * blkcg_exit_queue - exit and release blkcg part of request_queue
1495 * @q: request_queue being released
1496 *
1497 * Called from blk_release_queue(). Responsible for exiting blkcg part.
1498 */
1499void blkcg_exit_queue(struct request_queue *q)
1500{
1501 blk_throtl_exit(q);
1502}
1503
1504/*
1505 * We cannot support shared io contexts, as we have no mean to support
1506 * two tasks with the same ioc in two different groups without major rework
1507 * of the main cic data structures. For now we allow a task to change
1508 * its cgroup only if it's the only owner of its ioc.
1509 */
1510static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1511 struct cgroup_taskset *tset)
1512{
1513 struct task_struct *task;
1514 struct io_context *ioc;
1515 int ret = 0;
1516
1517 /* task_lock() is needed to avoid races with exit_io_context() */
1518 cgroup_taskset_for_each(task, cgrp, tset) {
1519 task_lock(task);
1520 ioc = task->io_context;
1521 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1522 ret = -EINVAL;
1523 task_unlock(task);
1524 if (ret)
1525 break;
1526 }
1527 return ret;
1528}
1529
1530static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1531 struct cgroup_taskset *tset)
1532{
1533 struct task_struct *task;
1534 struct io_context *ioc;
1535
1536 cgroup_taskset_for_each(task, cgrp, tset) {
1537 /* we don't lose anything even if ioc allocation fails */
1538 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
1539 if (ioc) {
1540 ioc_cgroup_changed(ioc);
1541 put_io_context(ioc);
1542 }
1543 }
1544}
1545
1546void blkio_policy_register(struct blkio_policy_type *blkiop)
1547{
1548 spin_lock(&blkio_list_lock);
1549
1550 BUG_ON(blkio_policy[blkiop->plid]);
1551 blkio_policy[blkiop->plid] = blkiop;
1552 list_add_tail(&blkiop->list, &blkio_list);
1553
1554 spin_unlock(&blkio_list_lock);
1555}
1556EXPORT_SYMBOL_GPL(blkio_policy_register);
1557
1558void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1559{
1560 spin_lock(&blkio_list_lock);
1561
1562 BUG_ON(blkio_policy[blkiop->plid] != blkiop);
1563 blkio_policy[blkiop->plid] = NULL;
1564 list_del_init(&blkiop->list);
1565
1566 spin_unlock(&blkio_list_lock);
1567}
1568EXPORT_SYMBOL_GPL(blkio_policy_unregister);