2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include <linux/genhd.h>
21 #include <linux/delay.h>
22 #include "blk-cgroup.h"
24 #define MAX_KEY_LEN 100
26 static DEFINE_SPINLOCK(blkio_list_lock
);
27 static LIST_HEAD(blkio_list
);
29 struct blkio_cgroup blkio_root_cgroup
= { .weight
= 2*BLKIO_WEIGHT_DEFAULT
};
30 EXPORT_SYMBOL_GPL(blkio_root_cgroup
);
32 static struct blkio_policy_type
*blkio_policy
[BLKIO_NR_POLICIES
];
34 static struct cgroup_subsys_state
*blkiocg_create(struct cgroup_subsys
*,
36 static int blkiocg_can_attach(struct cgroup_subsys
*, struct cgroup
*,
37 struct cgroup_taskset
*);
38 static void blkiocg_attach(struct cgroup_subsys
*, struct cgroup
*,
39 struct cgroup_taskset
*);
40 static void blkiocg_destroy(struct cgroup_subsys
*, struct cgroup
*);
41 static int blkiocg_populate(struct cgroup_subsys
*, struct cgroup
*);
43 /* for encoding cft->private value on file */
44 #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
45 /* What policy owns the file, proportional or throttle */
46 #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
47 #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
49 struct cgroup_subsys blkio_subsys
= {
51 .create
= blkiocg_create
,
52 .can_attach
= blkiocg_can_attach
,
53 .attach
= blkiocg_attach
,
54 .destroy
= blkiocg_destroy
,
55 .populate
= blkiocg_populate
,
56 .subsys_id
= blkio_subsys_id
,
58 .module
= THIS_MODULE
,
60 EXPORT_SYMBOL_GPL(blkio_subsys
);
62 static inline void blkio_policy_insert_node(struct blkio_cgroup
*blkcg
,
63 struct blkio_policy_node
*pn
)
65 list_add(&pn
->node
, &blkcg
->policy_list
);
68 static inline bool cftype_blkg_same_policy(struct cftype
*cft
,
69 struct blkio_group
*blkg
)
71 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
73 if (blkg
->plid
== plid
)
79 /* Determines if policy node matches cgroup file being accessed */
80 static inline bool pn_matches_cftype(struct cftype
*cft
,
81 struct blkio_policy_node
*pn
)
83 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
84 int fileid
= BLKIOFILE_ATTR(cft
->private);
86 return (plid
== pn
->plid
&& fileid
== pn
->fileid
);
89 /* Must be called with blkcg->lock held */
90 static inline void blkio_policy_delete_node(struct blkio_policy_node
*pn
)
95 /* Must be called with blkcg->lock held */
96 static struct blkio_policy_node
*
97 blkio_policy_search_node(const struct blkio_cgroup
*blkcg
, dev_t dev
,
98 enum blkio_policy_id plid
, int fileid
)
100 struct blkio_policy_node
*pn
;
102 list_for_each_entry(pn
, &blkcg
->policy_list
, node
) {
103 if (pn
->dev
== dev
&& pn
->plid
== plid
&& pn
->fileid
== fileid
)
110 struct blkio_cgroup
*cgroup_to_blkio_cgroup(struct cgroup
*cgroup
)
112 return container_of(cgroup_subsys_state(cgroup
, blkio_subsys_id
),
113 struct blkio_cgroup
, css
);
115 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup
);
117 struct blkio_cgroup
*task_blkio_cgroup(struct task_struct
*tsk
)
119 return container_of(task_subsys_state(tsk
, blkio_subsys_id
),
120 struct blkio_cgroup
, css
);
122 EXPORT_SYMBOL_GPL(task_blkio_cgroup
);
125 blkio_update_group_weight(struct blkio_group
*blkg
, unsigned int weight
)
127 struct blkio_policy_type
*blkiop
;
129 list_for_each_entry(blkiop
, &blkio_list
, list
) {
130 /* If this policy does not own the blkg, do not send updates */
131 if (blkiop
->plid
!= blkg
->plid
)
133 if (blkiop
->ops
.blkio_update_group_weight_fn
)
134 blkiop
->ops
.blkio_update_group_weight_fn(blkg
->q
,
139 static inline void blkio_update_group_bps(struct blkio_group
*blkg
, u64 bps
,
142 struct blkio_policy_type
*blkiop
;
144 list_for_each_entry(blkiop
, &blkio_list
, list
) {
146 /* If this policy does not own the blkg, do not send updates */
147 if (blkiop
->plid
!= blkg
->plid
)
150 if (fileid
== BLKIO_THROTL_read_bps_device
151 && blkiop
->ops
.blkio_update_group_read_bps_fn
)
152 blkiop
->ops
.blkio_update_group_read_bps_fn(blkg
->q
,
155 if (fileid
== BLKIO_THROTL_write_bps_device
156 && blkiop
->ops
.blkio_update_group_write_bps_fn
)
157 blkiop
->ops
.blkio_update_group_write_bps_fn(blkg
->q
,
162 static inline void blkio_update_group_iops(struct blkio_group
*blkg
,
163 unsigned int iops
, int fileid
)
165 struct blkio_policy_type
*blkiop
;
167 list_for_each_entry(blkiop
, &blkio_list
, list
) {
169 /* If this policy does not own the blkg, do not send updates */
170 if (blkiop
->plid
!= blkg
->plid
)
173 if (fileid
== BLKIO_THROTL_read_iops_device
174 && blkiop
->ops
.blkio_update_group_read_iops_fn
)
175 blkiop
->ops
.blkio_update_group_read_iops_fn(blkg
->q
,
178 if (fileid
== BLKIO_THROTL_write_iops_device
179 && blkiop
->ops
.blkio_update_group_write_iops_fn
)
180 blkiop
->ops
.blkio_update_group_write_iops_fn(blkg
->q
,
186 * Add to the appropriate stat variable depending on the request type.
187 * This should be called with the blkg->stats_lock held.
189 static void blkio_add_stat(uint64_t *stat
, uint64_t add
, bool direction
,
193 stat
[BLKIO_STAT_WRITE
] += add
;
195 stat
[BLKIO_STAT_READ
] += add
;
197 stat
[BLKIO_STAT_SYNC
] += add
;
199 stat
[BLKIO_STAT_ASYNC
] += add
;
203 * Decrements the appropriate stat variable if non-zero depending on the
204 * request type. Panics on value being zero.
205 * This should be called with the blkg->stats_lock held.
207 static void blkio_check_and_dec_stat(uint64_t *stat
, bool direction
, bool sync
)
210 BUG_ON(stat
[BLKIO_STAT_WRITE
] == 0);
211 stat
[BLKIO_STAT_WRITE
]--;
213 BUG_ON(stat
[BLKIO_STAT_READ
] == 0);
214 stat
[BLKIO_STAT_READ
]--;
217 BUG_ON(stat
[BLKIO_STAT_SYNC
] == 0);
218 stat
[BLKIO_STAT_SYNC
]--;
220 BUG_ON(stat
[BLKIO_STAT_ASYNC
] == 0);
221 stat
[BLKIO_STAT_ASYNC
]--;
225 #ifdef CONFIG_DEBUG_BLK_CGROUP
226 /* This should be called with the blkg->stats_lock held. */
227 static void blkio_set_start_group_wait_time(struct blkio_group
*blkg
,
228 struct blkio_group
*curr_blkg
)
230 if (blkio_blkg_waiting(&blkg
->stats
))
232 if (blkg
== curr_blkg
)
234 blkg
->stats
.start_group_wait_time
= sched_clock();
235 blkio_mark_blkg_waiting(&blkg
->stats
);
238 /* This should be called with the blkg->stats_lock held. */
239 static void blkio_update_group_wait_time(struct blkio_group_stats
*stats
)
241 unsigned long long now
;
243 if (!blkio_blkg_waiting(stats
))
247 if (time_after64(now
, stats
->start_group_wait_time
))
248 stats
->group_wait_time
+= now
- stats
->start_group_wait_time
;
249 blkio_clear_blkg_waiting(stats
);
252 /* This should be called with the blkg->stats_lock held. */
253 static void blkio_end_empty_time(struct blkio_group_stats
*stats
)
255 unsigned long long now
;
257 if (!blkio_blkg_empty(stats
))
261 if (time_after64(now
, stats
->start_empty_time
))
262 stats
->empty_time
+= now
- stats
->start_empty_time
;
263 blkio_clear_blkg_empty(stats
);
266 void blkiocg_update_set_idle_time_stats(struct blkio_group
*blkg
)
270 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
271 BUG_ON(blkio_blkg_idling(&blkg
->stats
));
272 blkg
->stats
.start_idle_time
= sched_clock();
273 blkio_mark_blkg_idling(&blkg
->stats
);
274 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
276 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats
);
278 void blkiocg_update_idle_time_stats(struct blkio_group
*blkg
)
281 unsigned long long now
;
282 struct blkio_group_stats
*stats
;
284 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
285 stats
= &blkg
->stats
;
286 if (blkio_blkg_idling(stats
)) {
288 if (time_after64(now
, stats
->start_idle_time
))
289 stats
->idle_time
+= now
- stats
->start_idle_time
;
290 blkio_clear_blkg_idling(stats
);
292 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
294 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats
);
296 void blkiocg_update_avg_queue_size_stats(struct blkio_group
*blkg
)
299 struct blkio_group_stats
*stats
;
301 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
302 stats
= &blkg
->stats
;
303 stats
->avg_queue_size_sum
+=
304 stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_READ
] +
305 stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_WRITE
];
306 stats
->avg_queue_size_samples
++;
307 blkio_update_group_wait_time(stats
);
308 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
310 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats
);
312 void blkiocg_set_start_empty_time(struct blkio_group
*blkg
)
315 struct blkio_group_stats
*stats
;
317 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
318 stats
= &blkg
->stats
;
320 if (stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_READ
] ||
321 stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_WRITE
]) {
322 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
327 * group is already marked empty. This can happen if cfqq got new
328 * request in parent group and moved to this group while being added
329 * to service tree. Just ignore the event and move on.
331 if(blkio_blkg_empty(stats
)) {
332 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
336 stats
->start_empty_time
= sched_clock();
337 blkio_mark_blkg_empty(stats
);
338 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
340 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time
);
342 void blkiocg_update_dequeue_stats(struct blkio_group
*blkg
,
343 unsigned long dequeue
)
345 blkg
->stats
.dequeue
+= dequeue
;
347 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats
);
349 static inline void blkio_set_start_group_wait_time(struct blkio_group
*blkg
,
350 struct blkio_group
*curr_blkg
) {}
351 static inline void blkio_end_empty_time(struct blkio_group_stats
*stats
) {}
354 void blkiocg_update_io_add_stats(struct blkio_group
*blkg
,
355 struct blkio_group
*curr_blkg
, bool direction
,
360 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
361 blkio_add_stat(blkg
->stats
.stat_arr
[BLKIO_STAT_QUEUED
], 1, direction
,
363 blkio_end_empty_time(&blkg
->stats
);
364 blkio_set_start_group_wait_time(blkg
, curr_blkg
);
365 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
367 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats
);
369 void blkiocg_update_io_remove_stats(struct blkio_group
*blkg
,
370 bool direction
, bool sync
)
374 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
375 blkio_check_and_dec_stat(blkg
->stats
.stat_arr
[BLKIO_STAT_QUEUED
],
377 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
379 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats
);
381 void blkiocg_update_timeslice_used(struct blkio_group
*blkg
, unsigned long time
,
382 unsigned long unaccounted_time
)
386 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
387 blkg
->stats
.time
+= time
;
388 #ifdef CONFIG_DEBUG_BLK_CGROUP
389 blkg
->stats
.unaccounted_time
+= unaccounted_time
;
391 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
393 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used
);
396 * should be called under rcu read lock or queue lock to make sure blkg pointer
399 void blkiocg_update_dispatch_stats(struct blkio_group
*blkg
,
400 uint64_t bytes
, bool direction
, bool sync
)
402 struct blkio_group_stats_cpu
*stats_cpu
;
406 * Disabling interrupts to provide mutual exclusion between two
407 * writes on same cpu. It probably is not needed for 64bit. Not
408 * optimizing that case yet.
410 local_irq_save(flags
);
412 stats_cpu
= this_cpu_ptr(blkg
->stats_cpu
);
414 u64_stats_update_begin(&stats_cpu
->syncp
);
415 stats_cpu
->sectors
+= bytes
>> 9;
416 blkio_add_stat(stats_cpu
->stat_arr_cpu
[BLKIO_STAT_CPU_SERVICED
],
418 blkio_add_stat(stats_cpu
->stat_arr_cpu
[BLKIO_STAT_CPU_SERVICE_BYTES
],
419 bytes
, direction
, sync
);
420 u64_stats_update_end(&stats_cpu
->syncp
);
421 local_irq_restore(flags
);
423 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats
);
425 void blkiocg_update_completion_stats(struct blkio_group
*blkg
,
426 uint64_t start_time
, uint64_t io_start_time
, bool direction
, bool sync
)
428 struct blkio_group_stats
*stats
;
430 unsigned long long now
= sched_clock();
432 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
433 stats
= &blkg
->stats
;
434 if (time_after64(now
, io_start_time
))
435 blkio_add_stat(stats
->stat_arr
[BLKIO_STAT_SERVICE_TIME
],
436 now
- io_start_time
, direction
, sync
);
437 if (time_after64(io_start_time
, start_time
))
438 blkio_add_stat(stats
->stat_arr
[BLKIO_STAT_WAIT_TIME
],
439 io_start_time
- start_time
, direction
, sync
);
440 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
442 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats
);
444 /* Merged stats are per cpu. */
445 void blkiocg_update_io_merged_stats(struct blkio_group
*blkg
, bool direction
,
448 struct blkio_group_stats_cpu
*stats_cpu
;
452 * Disabling interrupts to provide mutual exclusion between two
453 * writes on same cpu. It probably is not needed for 64bit. Not
454 * optimizing that case yet.
456 local_irq_save(flags
);
458 stats_cpu
= this_cpu_ptr(blkg
->stats_cpu
);
460 u64_stats_update_begin(&stats_cpu
->syncp
);
461 blkio_add_stat(stats_cpu
->stat_arr_cpu
[BLKIO_STAT_CPU_MERGED
], 1,
463 u64_stats_update_end(&stats_cpu
->syncp
);
464 local_irq_restore(flags
);
466 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats
);
468 struct blkio_group
*blkg_lookup_create(struct blkio_cgroup
*blkcg
,
469 struct request_queue
*q
,
470 enum blkio_policy_id plid
,
472 __releases(q
->queue_lock
) __acquires(q
->queue_lock
)
474 struct blkio_policy_type
*pol
= blkio_policy
[plid
];
475 struct blkio_group
*blkg
, *new_blkg
;
477 WARN_ON_ONCE(!rcu_read_lock_held());
478 lockdep_assert_held(q
->queue_lock
);
481 * This could be the first entry point of blkcg implementation and
482 * we shouldn't allow anything to go through for a bypassing queue.
483 * The following can be removed if blkg lookup is guaranteed to
484 * fail on a bypassing queue.
486 if (unlikely(blk_queue_bypass(q
)) && !for_root
)
487 return ERR_PTR(blk_queue_dead(q
) ? -EINVAL
: -EBUSY
);
489 blkg
= blkg_lookup(blkcg
, q
, plid
);
493 if (!css_tryget(&blkcg
->css
))
494 return ERR_PTR(-EINVAL
);
497 * Allocate and initialize.
499 * FIXME: The following is broken. Percpu memory allocation
500 * requires %GFP_KERNEL context and can't be performed from IO
501 * path. Allocation here should inherently be atomic and the
502 * following lock dancing can be removed once the broken percpu
503 * allocation is fixed.
505 spin_unlock_irq(q
->queue_lock
);
508 new_blkg
= pol
->ops
.blkio_alloc_group_fn(q
, blkcg
);
510 new_blkg
->stats_cpu
= alloc_percpu(struct blkio_group_stats_cpu
);
512 spin_lock_init(&new_blkg
->stats_lock
);
513 rcu_assign_pointer(new_blkg
->q
, q
);
514 new_blkg
->blkcg_id
= css_id(&blkcg
->css
);
515 new_blkg
->plid
= plid
;
516 cgroup_path(blkcg
->css
.cgroup
, new_blkg
->path
,
517 sizeof(new_blkg
->path
));
521 spin_lock_irq(q
->queue_lock
);
522 css_put(&blkcg
->css
);
524 /* did bypass get turned on inbetween? */
525 if (unlikely(blk_queue_bypass(q
)) && !for_root
) {
526 blkg
= ERR_PTR(blk_queue_dead(q
) ? -EINVAL
: -EBUSY
);
530 /* did someone beat us to it? */
531 blkg
= blkg_lookup(blkcg
, q
, plid
);
535 /* did alloc fail? */
536 if (unlikely(!new_blkg
|| !new_blkg
->stats_cpu
)) {
537 blkg
= ERR_PTR(-ENOMEM
);
542 spin_lock(&blkcg
->lock
);
543 swap(blkg
, new_blkg
);
544 hlist_add_head_rcu(&blkg
->blkcg_node
, &blkcg
->blkg_list
);
545 pol
->ops
.blkio_link_group_fn(q
, blkg
);
546 spin_unlock(&blkcg
->lock
);
549 free_percpu(new_blkg
->stats_cpu
);
554 EXPORT_SYMBOL_GPL(blkg_lookup_create
);
556 static void __blkiocg_del_blkio_group(struct blkio_group
*blkg
)
558 hlist_del_init_rcu(&blkg
->blkcg_node
);
563 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
564 * indicating that blk_group was unhashed by the time we got to it.
566 int blkiocg_del_blkio_group(struct blkio_group
*blkg
)
568 struct blkio_cgroup
*blkcg
;
570 struct cgroup_subsys_state
*css
;
574 css
= css_lookup(&blkio_subsys
, blkg
->blkcg_id
);
576 blkcg
= container_of(css
, struct blkio_cgroup
, css
);
577 spin_lock_irqsave(&blkcg
->lock
, flags
);
578 if (!hlist_unhashed(&blkg
->blkcg_node
)) {
579 __blkiocg_del_blkio_group(blkg
);
582 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
588 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group
);
590 /* called under rcu_read_lock(). */
591 struct blkio_group
*blkg_lookup(struct blkio_cgroup
*blkcg
,
592 struct request_queue
*q
,
593 enum blkio_policy_id plid
)
595 struct blkio_group
*blkg
;
596 struct hlist_node
*n
;
598 hlist_for_each_entry_rcu(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
)
599 if (blkg
->q
== q
&& blkg
->plid
== plid
)
603 EXPORT_SYMBOL_GPL(blkg_lookup
);
605 void blkg_destroy_all(struct request_queue
*q
)
607 struct blkio_policy_type
*pol
;
612 spin_lock(&blkio_list_lock
);
613 spin_lock_irq(q
->queue_lock
);
616 * clear_queue_fn() might return with non-empty group list
617 * if it raced cgroup removal and lost. cgroup removal is
618 * guaranteed to make forward progress and retrying after a
619 * while is enough. This ugliness is scheduled to be
620 * removed after locking update.
622 list_for_each_entry(pol
, &blkio_list
, list
)
623 if (!pol
->ops
.blkio_clear_queue_fn(q
))
626 spin_unlock_irq(q
->queue_lock
);
627 spin_unlock(&blkio_list_lock
);
632 msleep(10); /* just some random duration I like */
636 static void blkio_reset_stats_cpu(struct blkio_group
*blkg
)
638 struct blkio_group_stats_cpu
*stats_cpu
;
641 * Note: On 64 bit arch this should not be an issue. This has the
642 * possibility of returning some inconsistent value on 32bit arch
643 * as 64bit update on 32bit is non atomic. Taking care of this
644 * corner case makes code very complicated, like sending IPIs to
645 * cpus, taking care of stats of offline cpus etc.
647 * reset stats is anyway more of a debug feature and this sounds a
648 * corner case. So I am not complicating the code yet until and
649 * unless this becomes a real issue.
651 for_each_possible_cpu(i
) {
652 stats_cpu
= per_cpu_ptr(blkg
->stats_cpu
, i
);
653 stats_cpu
->sectors
= 0;
654 for(j
= 0; j
< BLKIO_STAT_CPU_NR
; j
++)
655 for (k
= 0; k
< BLKIO_STAT_TOTAL
; k
++)
656 stats_cpu
->stat_arr_cpu
[j
][k
] = 0;
661 blkiocg_reset_stats(struct cgroup
*cgroup
, struct cftype
*cftype
, u64 val
)
663 struct blkio_cgroup
*blkcg
;
664 struct blkio_group
*blkg
;
665 struct blkio_group_stats
*stats
;
666 struct hlist_node
*n
;
667 uint64_t queued
[BLKIO_STAT_TOTAL
];
669 #ifdef CONFIG_DEBUG_BLK_CGROUP
670 bool idling
, waiting
, empty
;
671 unsigned long long now
= sched_clock();
674 blkcg
= cgroup_to_blkio_cgroup(cgroup
);
675 spin_lock_irq(&blkcg
->lock
);
676 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
677 spin_lock(&blkg
->stats_lock
);
678 stats
= &blkg
->stats
;
679 #ifdef CONFIG_DEBUG_BLK_CGROUP
680 idling
= blkio_blkg_idling(stats
);
681 waiting
= blkio_blkg_waiting(stats
);
682 empty
= blkio_blkg_empty(stats
);
684 for (i
= 0; i
< BLKIO_STAT_TOTAL
; i
++)
685 queued
[i
] = stats
->stat_arr
[BLKIO_STAT_QUEUED
][i
];
686 memset(stats
, 0, sizeof(struct blkio_group_stats
));
687 for (i
= 0; i
< BLKIO_STAT_TOTAL
; i
++)
688 stats
->stat_arr
[BLKIO_STAT_QUEUED
][i
] = queued
[i
];
689 #ifdef CONFIG_DEBUG_BLK_CGROUP
691 blkio_mark_blkg_idling(stats
);
692 stats
->start_idle_time
= now
;
695 blkio_mark_blkg_waiting(stats
);
696 stats
->start_group_wait_time
= now
;
699 blkio_mark_blkg_empty(stats
);
700 stats
->start_empty_time
= now
;
703 spin_unlock(&blkg
->stats_lock
);
705 /* Reset Per cpu stats which don't take blkg->stats_lock */
706 blkio_reset_stats_cpu(blkg
);
709 spin_unlock_irq(&blkcg
->lock
);
713 static void blkio_get_key_name(enum stat_sub_type type
, dev_t dev
, char *str
,
714 int chars_left
, bool diskname_only
)
716 snprintf(str
, chars_left
, "%d:%d", MAJOR(dev
), MINOR(dev
));
717 chars_left
-= strlen(str
);
718 if (chars_left
<= 0) {
720 "Possibly incorrect cgroup stat display format");
726 case BLKIO_STAT_READ
:
727 strlcat(str
, " Read", chars_left
);
729 case BLKIO_STAT_WRITE
:
730 strlcat(str
, " Write", chars_left
);
732 case BLKIO_STAT_SYNC
:
733 strlcat(str
, " Sync", chars_left
);
735 case BLKIO_STAT_ASYNC
:
736 strlcat(str
, " Async", chars_left
);
738 case BLKIO_STAT_TOTAL
:
739 strlcat(str
, " Total", chars_left
);
742 strlcat(str
, " Invalid", chars_left
);
746 static uint64_t blkio_fill_stat(char *str
, int chars_left
, uint64_t val
,
747 struct cgroup_map_cb
*cb
, dev_t dev
)
749 blkio_get_key_name(0, dev
, str
, chars_left
, true);
750 cb
->fill(cb
, str
, val
);
755 static uint64_t blkio_read_stat_cpu(struct blkio_group
*blkg
,
756 enum stat_type_cpu type
, enum stat_sub_type sub_type
)
759 struct blkio_group_stats_cpu
*stats_cpu
;
762 for_each_possible_cpu(cpu
) {
764 stats_cpu
= per_cpu_ptr(blkg
->stats_cpu
, cpu
);
767 start
= u64_stats_fetch_begin(&stats_cpu
->syncp
);
768 if (type
== BLKIO_STAT_CPU_SECTORS
)
769 tval
= stats_cpu
->sectors
;
771 tval
= stats_cpu
->stat_arr_cpu
[type
][sub_type
];
772 } while(u64_stats_fetch_retry(&stats_cpu
->syncp
, start
));
780 static uint64_t blkio_get_stat_cpu(struct blkio_group
*blkg
,
781 struct cgroup_map_cb
*cb
, dev_t dev
, enum stat_type_cpu type
)
783 uint64_t disk_total
, val
;
784 char key_str
[MAX_KEY_LEN
];
785 enum stat_sub_type sub_type
;
787 if (type
== BLKIO_STAT_CPU_SECTORS
) {
788 val
= blkio_read_stat_cpu(blkg
, type
, 0);
789 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1, val
, cb
, dev
);
792 for (sub_type
= BLKIO_STAT_READ
; sub_type
< BLKIO_STAT_TOTAL
;
794 blkio_get_key_name(sub_type
, dev
, key_str
, MAX_KEY_LEN
, false);
795 val
= blkio_read_stat_cpu(blkg
, type
, sub_type
);
796 cb
->fill(cb
, key_str
, val
);
799 disk_total
= blkio_read_stat_cpu(blkg
, type
, BLKIO_STAT_READ
) +
800 blkio_read_stat_cpu(blkg
, type
, BLKIO_STAT_WRITE
);
802 blkio_get_key_name(BLKIO_STAT_TOTAL
, dev
, key_str
, MAX_KEY_LEN
, false);
803 cb
->fill(cb
, key_str
, disk_total
);
807 /* This should be called with blkg->stats_lock held */
808 static uint64_t blkio_get_stat(struct blkio_group
*blkg
,
809 struct cgroup_map_cb
*cb
, dev_t dev
, enum stat_type type
)
812 char key_str
[MAX_KEY_LEN
];
813 enum stat_sub_type sub_type
;
815 if (type
== BLKIO_STAT_TIME
)
816 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
817 blkg
->stats
.time
, cb
, dev
);
818 #ifdef CONFIG_DEBUG_BLK_CGROUP
819 if (type
== BLKIO_STAT_UNACCOUNTED_TIME
)
820 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
821 blkg
->stats
.unaccounted_time
, cb
, dev
);
822 if (type
== BLKIO_STAT_AVG_QUEUE_SIZE
) {
823 uint64_t sum
= blkg
->stats
.avg_queue_size_sum
;
824 uint64_t samples
= blkg
->stats
.avg_queue_size_samples
;
826 do_div(sum
, samples
);
829 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1, sum
, cb
, dev
);
831 if (type
== BLKIO_STAT_GROUP_WAIT_TIME
)
832 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
833 blkg
->stats
.group_wait_time
, cb
, dev
);
834 if (type
== BLKIO_STAT_IDLE_TIME
)
835 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
836 blkg
->stats
.idle_time
, cb
, dev
);
837 if (type
== BLKIO_STAT_EMPTY_TIME
)
838 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
839 blkg
->stats
.empty_time
, cb
, dev
);
840 if (type
== BLKIO_STAT_DEQUEUE
)
841 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
842 blkg
->stats
.dequeue
, cb
, dev
);
845 for (sub_type
= BLKIO_STAT_READ
; sub_type
< BLKIO_STAT_TOTAL
;
847 blkio_get_key_name(sub_type
, dev
, key_str
, MAX_KEY_LEN
, false);
848 cb
->fill(cb
, key_str
, blkg
->stats
.stat_arr
[type
][sub_type
]);
850 disk_total
= blkg
->stats
.stat_arr
[type
][BLKIO_STAT_READ
] +
851 blkg
->stats
.stat_arr
[type
][BLKIO_STAT_WRITE
];
852 blkio_get_key_name(BLKIO_STAT_TOTAL
, dev
, key_str
, MAX_KEY_LEN
, false);
853 cb
->fill(cb
, key_str
, disk_total
);
857 static int blkio_policy_parse_and_set(char *buf
,
858 struct blkio_policy_node
*newpn
, enum blkio_policy_id plid
, int fileid
)
860 struct gendisk
*disk
= NULL
;
861 char *s
[4], *p
, *major_s
= NULL
, *minor_s
= NULL
;
862 unsigned long major
, minor
;
863 int i
= 0, ret
= -EINVAL
;
868 memset(s
, 0, sizeof(s
));
870 while ((p
= strsep(&buf
, " ")) != NULL
) {
876 /* Prevent from inputing too many things */
884 p
= strsep(&s
[0], ":");
894 if (strict_strtoul(major_s
, 10, &major
))
897 if (strict_strtoul(minor_s
, 10, &minor
))
900 dev
= MKDEV(major
, minor
);
902 if (strict_strtoull(s
[1], 10, &temp
))
905 /* For rule removal, do not check for device presence. */
907 disk
= get_gendisk(dev
, &part
);
917 case BLKIO_POLICY_PROP
:
918 if ((temp
< BLKIO_WEIGHT_MIN
&& temp
> 0) ||
919 temp
> BLKIO_WEIGHT_MAX
)
923 newpn
->fileid
= fileid
;
924 newpn
->val
.weight
= temp
;
926 case BLKIO_POLICY_THROTL
:
928 case BLKIO_THROTL_read_bps_device
:
929 case BLKIO_THROTL_write_bps_device
:
931 newpn
->fileid
= fileid
;
932 newpn
->val
.bps
= temp
;
934 case BLKIO_THROTL_read_iops_device
:
935 case BLKIO_THROTL_write_iops_device
:
936 if (temp
> THROTL_IOPS_MAX
)
940 newpn
->fileid
= fileid
;
941 newpn
->val
.iops
= (unsigned int)temp
;
954 unsigned int blkcg_get_weight(struct blkio_cgroup
*blkcg
,
957 struct blkio_policy_node
*pn
;
961 spin_lock_irqsave(&blkcg
->lock
, flags
);
963 pn
= blkio_policy_search_node(blkcg
, dev
, BLKIO_POLICY_PROP
,
964 BLKIO_PROP_weight_device
);
966 weight
= pn
->val
.weight
;
968 weight
= blkcg
->weight
;
970 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
974 EXPORT_SYMBOL_GPL(blkcg_get_weight
);
976 uint64_t blkcg_get_read_bps(struct blkio_cgroup
*blkcg
, dev_t dev
)
978 struct blkio_policy_node
*pn
;
982 spin_lock_irqsave(&blkcg
->lock
, flags
);
983 pn
= blkio_policy_search_node(blkcg
, dev
, BLKIO_POLICY_THROTL
,
984 BLKIO_THROTL_read_bps_device
);
987 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
992 uint64_t blkcg_get_write_bps(struct blkio_cgroup
*blkcg
, dev_t dev
)
994 struct blkio_policy_node
*pn
;
998 spin_lock_irqsave(&blkcg
->lock
, flags
);
999 pn
= blkio_policy_search_node(blkcg
, dev
, BLKIO_POLICY_THROTL
,
1000 BLKIO_THROTL_write_bps_device
);
1003 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
1008 unsigned int blkcg_get_read_iops(struct blkio_cgroup
*blkcg
, dev_t dev
)
1010 struct blkio_policy_node
*pn
;
1011 unsigned long flags
;
1012 unsigned int iops
= -1;
1014 spin_lock_irqsave(&blkcg
->lock
, flags
);
1015 pn
= blkio_policy_search_node(blkcg
, dev
, BLKIO_POLICY_THROTL
,
1016 BLKIO_THROTL_read_iops_device
);
1018 iops
= pn
->val
.iops
;
1019 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
1024 unsigned int blkcg_get_write_iops(struct blkio_cgroup
*blkcg
, dev_t dev
)
1026 struct blkio_policy_node
*pn
;
1027 unsigned long flags
;
1028 unsigned int iops
= -1;
1030 spin_lock_irqsave(&blkcg
->lock
, flags
);
1031 pn
= blkio_policy_search_node(blkcg
, dev
, BLKIO_POLICY_THROTL
,
1032 BLKIO_THROTL_write_iops_device
);
1034 iops
= pn
->val
.iops
;
1035 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
1040 /* Checks whether user asked for deleting a policy rule */
1041 static bool blkio_delete_rule_command(struct blkio_policy_node
*pn
)
1044 case BLKIO_POLICY_PROP
:
1045 if (pn
->val
.weight
== 0)
1048 case BLKIO_POLICY_THROTL
:
1049 switch(pn
->fileid
) {
1050 case BLKIO_THROTL_read_bps_device
:
1051 case BLKIO_THROTL_write_bps_device
:
1052 if (pn
->val
.bps
== 0)
1055 case BLKIO_THROTL_read_iops_device
:
1056 case BLKIO_THROTL_write_iops_device
:
1057 if (pn
->val
.iops
== 0)
1068 static void blkio_update_policy_rule(struct blkio_policy_node
*oldpn
,
1069 struct blkio_policy_node
*newpn
)
1071 switch(oldpn
->plid
) {
1072 case BLKIO_POLICY_PROP
:
1073 oldpn
->val
.weight
= newpn
->val
.weight
;
1075 case BLKIO_POLICY_THROTL
:
1076 switch(newpn
->fileid
) {
1077 case BLKIO_THROTL_read_bps_device
:
1078 case BLKIO_THROTL_write_bps_device
:
1079 oldpn
->val
.bps
= newpn
->val
.bps
;
1081 case BLKIO_THROTL_read_iops_device
:
1082 case BLKIO_THROTL_write_iops_device
:
1083 oldpn
->val
.iops
= newpn
->val
.iops
;
1092 * Some rules/values in blkg have changed. Propagate those to respective
1095 static void blkio_update_blkg_policy(struct blkio_cgroup
*blkcg
,
1096 struct blkio_group
*blkg
, struct blkio_policy_node
*pn
)
1098 unsigned int weight
, iops
;
1102 case BLKIO_POLICY_PROP
:
1103 weight
= pn
->val
.weight
? pn
->val
.weight
:
1105 blkio_update_group_weight(blkg
, weight
);
1107 case BLKIO_POLICY_THROTL
:
1108 switch(pn
->fileid
) {
1109 case BLKIO_THROTL_read_bps_device
:
1110 case BLKIO_THROTL_write_bps_device
:
1111 bps
= pn
->val
.bps
? pn
->val
.bps
: (-1);
1112 blkio_update_group_bps(blkg
, bps
, pn
->fileid
);
1114 case BLKIO_THROTL_read_iops_device
:
1115 case BLKIO_THROTL_write_iops_device
:
1116 iops
= pn
->val
.iops
? pn
->val
.iops
: (-1);
1117 blkio_update_group_iops(blkg
, iops
, pn
->fileid
);
1127 * A policy node rule has been updated. Propagate this update to all the
1128 * block groups which might be affected by this update.
1130 static void blkio_update_policy_node_blkg(struct blkio_cgroup
*blkcg
,
1131 struct blkio_policy_node
*pn
)
1133 struct blkio_group
*blkg
;
1134 struct hlist_node
*n
;
1136 spin_lock(&blkio_list_lock
);
1137 spin_lock_irq(&blkcg
->lock
);
1139 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
1140 if (pn
->dev
!= blkg
->dev
|| pn
->plid
!= blkg
->plid
)
1142 blkio_update_blkg_policy(blkcg
, blkg
, pn
);
1145 spin_unlock_irq(&blkcg
->lock
);
1146 spin_unlock(&blkio_list_lock
);
1149 static int blkiocg_file_write(struct cgroup
*cgrp
, struct cftype
*cft
,
1154 struct blkio_policy_node
*newpn
, *pn
;
1155 struct blkio_cgroup
*blkcg
;
1157 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1158 int fileid
= BLKIOFILE_ATTR(cft
->private);
1160 buf
= kstrdup(buffer
, GFP_KERNEL
);
1164 newpn
= kzalloc(sizeof(*newpn
), GFP_KERNEL
);
1170 ret
= blkio_policy_parse_and_set(buf
, newpn
, plid
, fileid
);
1174 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1176 spin_lock_irq(&blkcg
->lock
);
1178 pn
= blkio_policy_search_node(blkcg
, newpn
->dev
, plid
, fileid
);
1180 if (!blkio_delete_rule_command(newpn
)) {
1181 blkio_policy_insert_node(blkcg
, newpn
);
1184 spin_unlock_irq(&blkcg
->lock
);
1185 goto update_io_group
;
1188 if (blkio_delete_rule_command(newpn
)) {
1189 blkio_policy_delete_node(pn
);
1191 spin_unlock_irq(&blkcg
->lock
);
1192 goto update_io_group
;
1194 spin_unlock_irq(&blkcg
->lock
);
1196 blkio_update_policy_rule(pn
, newpn
);
1199 blkio_update_policy_node_blkg(blkcg
, newpn
);
1210 blkio_print_policy_node(struct seq_file
*m
, struct blkio_policy_node
*pn
)
1213 case BLKIO_POLICY_PROP
:
1214 if (pn
->fileid
== BLKIO_PROP_weight_device
)
1215 seq_printf(m
, "%u:%u\t%u\n", MAJOR(pn
->dev
),
1216 MINOR(pn
->dev
), pn
->val
.weight
);
1218 case BLKIO_POLICY_THROTL
:
1219 switch(pn
->fileid
) {
1220 case BLKIO_THROTL_read_bps_device
:
1221 case BLKIO_THROTL_write_bps_device
:
1222 seq_printf(m
, "%u:%u\t%llu\n", MAJOR(pn
->dev
),
1223 MINOR(pn
->dev
), pn
->val
.bps
);
1225 case BLKIO_THROTL_read_iops_device
:
1226 case BLKIO_THROTL_write_iops_device
:
1227 seq_printf(m
, "%u:%u\t%u\n", MAJOR(pn
->dev
),
1228 MINOR(pn
->dev
), pn
->val
.iops
);
1237 /* cgroup files which read their data from policy nodes end up here */
1238 static void blkio_read_policy_node_files(struct cftype
*cft
,
1239 struct blkio_cgroup
*blkcg
, struct seq_file
*m
)
1241 struct blkio_policy_node
*pn
;
1243 if (!list_empty(&blkcg
->policy_list
)) {
1244 spin_lock_irq(&blkcg
->lock
);
1245 list_for_each_entry(pn
, &blkcg
->policy_list
, node
) {
1246 if (!pn_matches_cftype(cft
, pn
))
1248 blkio_print_policy_node(m
, pn
);
1250 spin_unlock_irq(&blkcg
->lock
);
1254 static int blkiocg_file_read(struct cgroup
*cgrp
, struct cftype
*cft
,
1257 struct blkio_cgroup
*blkcg
;
1258 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1259 int name
= BLKIOFILE_ATTR(cft
->private);
1261 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1264 case BLKIO_POLICY_PROP
:
1266 case BLKIO_PROP_weight_device
:
1267 blkio_read_policy_node_files(cft
, blkcg
, m
);
1273 case BLKIO_POLICY_THROTL
:
1275 case BLKIO_THROTL_read_bps_device
:
1276 case BLKIO_THROTL_write_bps_device
:
1277 case BLKIO_THROTL_read_iops_device
:
1278 case BLKIO_THROTL_write_iops_device
:
1279 blkio_read_policy_node_files(cft
, blkcg
, m
);
1292 static int blkio_read_blkg_stats(struct blkio_cgroup
*blkcg
,
1293 struct cftype
*cft
, struct cgroup_map_cb
*cb
,
1294 enum stat_type type
, bool show_total
, bool pcpu
)
1296 struct blkio_group
*blkg
;
1297 struct hlist_node
*n
;
1298 uint64_t cgroup_total
= 0;
1301 hlist_for_each_entry_rcu(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
1303 if (!cftype_blkg_same_policy(cft
, blkg
))
1306 cgroup_total
+= blkio_get_stat_cpu(blkg
, cb
,
1309 spin_lock_irq(&blkg
->stats_lock
);
1310 cgroup_total
+= blkio_get_stat(blkg
, cb
,
1312 spin_unlock_irq(&blkg
->stats_lock
);
1317 cb
->fill(cb
, "Total", cgroup_total
);
1322 /* All map kind of cgroup file get serviced by this function */
1323 static int blkiocg_file_read_map(struct cgroup
*cgrp
, struct cftype
*cft
,
1324 struct cgroup_map_cb
*cb
)
1326 struct blkio_cgroup
*blkcg
;
1327 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1328 int name
= BLKIOFILE_ATTR(cft
->private);
1330 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1333 case BLKIO_POLICY_PROP
:
1335 case BLKIO_PROP_time
:
1336 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1337 BLKIO_STAT_TIME
, 0, 0);
1338 case BLKIO_PROP_sectors
:
1339 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1340 BLKIO_STAT_CPU_SECTORS
, 0, 1);
1341 case BLKIO_PROP_io_service_bytes
:
1342 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1343 BLKIO_STAT_CPU_SERVICE_BYTES
, 1, 1);
1344 case BLKIO_PROP_io_serviced
:
1345 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1346 BLKIO_STAT_CPU_SERVICED
, 1, 1);
1347 case BLKIO_PROP_io_service_time
:
1348 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1349 BLKIO_STAT_SERVICE_TIME
, 1, 0);
1350 case BLKIO_PROP_io_wait_time
:
1351 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1352 BLKIO_STAT_WAIT_TIME
, 1, 0);
1353 case BLKIO_PROP_io_merged
:
1354 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1355 BLKIO_STAT_CPU_MERGED
, 1, 1);
1356 case BLKIO_PROP_io_queued
:
1357 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1358 BLKIO_STAT_QUEUED
, 1, 0);
1359 #ifdef CONFIG_DEBUG_BLK_CGROUP
1360 case BLKIO_PROP_unaccounted_time
:
1361 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1362 BLKIO_STAT_UNACCOUNTED_TIME
, 0, 0);
1363 case BLKIO_PROP_dequeue
:
1364 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1365 BLKIO_STAT_DEQUEUE
, 0, 0);
1366 case BLKIO_PROP_avg_queue_size
:
1367 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1368 BLKIO_STAT_AVG_QUEUE_SIZE
, 0, 0);
1369 case BLKIO_PROP_group_wait_time
:
1370 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1371 BLKIO_STAT_GROUP_WAIT_TIME
, 0, 0);
1372 case BLKIO_PROP_idle_time
:
1373 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1374 BLKIO_STAT_IDLE_TIME
, 0, 0);
1375 case BLKIO_PROP_empty_time
:
1376 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1377 BLKIO_STAT_EMPTY_TIME
, 0, 0);
1383 case BLKIO_POLICY_THROTL
:
1385 case BLKIO_THROTL_io_service_bytes
:
1386 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1387 BLKIO_STAT_CPU_SERVICE_BYTES
, 1, 1);
1388 case BLKIO_THROTL_io_serviced
:
1389 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1390 BLKIO_STAT_CPU_SERVICED
, 1, 1);
1402 static int blkio_weight_write(struct blkio_cgroup
*blkcg
, u64 val
)
1404 struct blkio_group
*blkg
;
1405 struct hlist_node
*n
;
1406 struct blkio_policy_node
*pn
;
1408 if (val
< BLKIO_WEIGHT_MIN
|| val
> BLKIO_WEIGHT_MAX
)
1411 spin_lock(&blkio_list_lock
);
1412 spin_lock_irq(&blkcg
->lock
);
1413 blkcg
->weight
= (unsigned int)val
;
1415 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
1416 pn
= blkio_policy_search_node(blkcg
, blkg
->dev
,
1417 BLKIO_POLICY_PROP
, BLKIO_PROP_weight_device
);
1421 blkio_update_group_weight(blkg
, blkcg
->weight
);
1423 spin_unlock_irq(&blkcg
->lock
);
1424 spin_unlock(&blkio_list_lock
);
1428 static u64
blkiocg_file_read_u64 (struct cgroup
*cgrp
, struct cftype
*cft
) {
1429 struct blkio_cgroup
*blkcg
;
1430 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1431 int name
= BLKIOFILE_ATTR(cft
->private);
1433 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1436 case BLKIO_POLICY_PROP
:
1438 case BLKIO_PROP_weight
:
1439 return (u64
)blkcg
->weight
;
1449 blkiocg_file_write_u64(struct cgroup
*cgrp
, struct cftype
*cft
, u64 val
)
1451 struct blkio_cgroup
*blkcg
;
1452 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1453 int name
= BLKIOFILE_ATTR(cft
->private);
1455 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1458 case BLKIO_POLICY_PROP
:
1460 case BLKIO_PROP_weight
:
1461 return blkio_weight_write(blkcg
, val
);
1471 struct cftype blkio_files
[] = {
1473 .name
= "weight_device",
1474 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1475 BLKIO_PROP_weight_device
),
1476 .read_seq_string
= blkiocg_file_read
,
1477 .write_string
= blkiocg_file_write
,
1478 .max_write_len
= 256,
1482 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1484 .read_u64
= blkiocg_file_read_u64
,
1485 .write_u64
= blkiocg_file_write_u64
,
1489 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1491 .read_map
= blkiocg_file_read_map
,
1495 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1496 BLKIO_PROP_sectors
),
1497 .read_map
= blkiocg_file_read_map
,
1500 .name
= "io_service_bytes",
1501 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1502 BLKIO_PROP_io_service_bytes
),
1503 .read_map
= blkiocg_file_read_map
,
1506 .name
= "io_serviced",
1507 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1508 BLKIO_PROP_io_serviced
),
1509 .read_map
= blkiocg_file_read_map
,
1512 .name
= "io_service_time",
1513 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1514 BLKIO_PROP_io_service_time
),
1515 .read_map
= blkiocg_file_read_map
,
1518 .name
= "io_wait_time",
1519 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1520 BLKIO_PROP_io_wait_time
),
1521 .read_map
= blkiocg_file_read_map
,
1524 .name
= "io_merged",
1525 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1526 BLKIO_PROP_io_merged
),
1527 .read_map
= blkiocg_file_read_map
,
1530 .name
= "io_queued",
1531 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1532 BLKIO_PROP_io_queued
),
1533 .read_map
= blkiocg_file_read_map
,
1536 .name
= "reset_stats",
1537 .write_u64
= blkiocg_reset_stats
,
1539 #ifdef CONFIG_BLK_DEV_THROTTLING
1541 .name
= "throttle.read_bps_device",
1542 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1543 BLKIO_THROTL_read_bps_device
),
1544 .read_seq_string
= blkiocg_file_read
,
1545 .write_string
= blkiocg_file_write
,
1546 .max_write_len
= 256,
1550 .name
= "throttle.write_bps_device",
1551 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1552 BLKIO_THROTL_write_bps_device
),
1553 .read_seq_string
= blkiocg_file_read
,
1554 .write_string
= blkiocg_file_write
,
1555 .max_write_len
= 256,
1559 .name
= "throttle.read_iops_device",
1560 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1561 BLKIO_THROTL_read_iops_device
),
1562 .read_seq_string
= blkiocg_file_read
,
1563 .write_string
= blkiocg_file_write
,
1564 .max_write_len
= 256,
1568 .name
= "throttle.write_iops_device",
1569 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1570 BLKIO_THROTL_write_iops_device
),
1571 .read_seq_string
= blkiocg_file_read
,
1572 .write_string
= blkiocg_file_write
,
1573 .max_write_len
= 256,
1576 .name
= "throttle.io_service_bytes",
1577 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1578 BLKIO_THROTL_io_service_bytes
),
1579 .read_map
= blkiocg_file_read_map
,
1582 .name
= "throttle.io_serviced",
1583 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1584 BLKIO_THROTL_io_serviced
),
1585 .read_map
= blkiocg_file_read_map
,
1587 #endif /* CONFIG_BLK_DEV_THROTTLING */
1589 #ifdef CONFIG_DEBUG_BLK_CGROUP
1591 .name
= "avg_queue_size",
1592 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1593 BLKIO_PROP_avg_queue_size
),
1594 .read_map
= blkiocg_file_read_map
,
1597 .name
= "group_wait_time",
1598 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1599 BLKIO_PROP_group_wait_time
),
1600 .read_map
= blkiocg_file_read_map
,
1603 .name
= "idle_time",
1604 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1605 BLKIO_PROP_idle_time
),
1606 .read_map
= blkiocg_file_read_map
,
1609 .name
= "empty_time",
1610 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1611 BLKIO_PROP_empty_time
),
1612 .read_map
= blkiocg_file_read_map
,
1616 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1617 BLKIO_PROP_dequeue
),
1618 .read_map
= blkiocg_file_read_map
,
1621 .name
= "unaccounted_time",
1622 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1623 BLKIO_PROP_unaccounted_time
),
1624 .read_map
= blkiocg_file_read_map
,
1629 static int blkiocg_populate(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
1631 return cgroup_add_files(cgroup
, subsys
, blkio_files
,
1632 ARRAY_SIZE(blkio_files
));
1635 static void blkiocg_destroy(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
1637 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
1638 unsigned long flags
;
1639 struct blkio_group
*blkg
;
1640 struct request_queue
*q
;
1641 struct blkio_policy_type
*blkiop
;
1642 struct blkio_policy_node
*pn
, *pntmp
;
1646 spin_lock_irqsave(&blkcg
->lock
, flags
);
1648 if (hlist_empty(&blkcg
->blkg_list
)) {
1649 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
1653 blkg
= hlist_entry(blkcg
->blkg_list
.first
, struct blkio_group
,
1655 q
= rcu_dereference(blkg
->q
);
1656 __blkiocg_del_blkio_group(blkg
);
1658 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
1661 * This blkio_group is being unlinked as associated cgroup is
1662 * going away. Let all the IO controlling policies know about
1665 spin_lock(&blkio_list_lock
);
1666 list_for_each_entry(blkiop
, &blkio_list
, list
) {
1667 if (blkiop
->plid
!= blkg
->plid
)
1669 blkiop
->ops
.blkio_unlink_group_fn(q
, blkg
);
1671 spin_unlock(&blkio_list_lock
);
1674 list_for_each_entry_safe(pn
, pntmp
, &blkcg
->policy_list
, node
) {
1675 blkio_policy_delete_node(pn
);
1679 free_css_id(&blkio_subsys
, &blkcg
->css
);
1681 if (blkcg
!= &blkio_root_cgroup
)
1685 static struct cgroup_subsys_state
*
1686 blkiocg_create(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
1688 struct blkio_cgroup
*blkcg
;
1689 struct cgroup
*parent
= cgroup
->parent
;
1692 blkcg
= &blkio_root_cgroup
;
1696 blkcg
= kzalloc(sizeof(*blkcg
), GFP_KERNEL
);
1698 return ERR_PTR(-ENOMEM
);
1700 blkcg
->weight
= BLKIO_WEIGHT_DEFAULT
;
1702 spin_lock_init(&blkcg
->lock
);
1703 INIT_HLIST_HEAD(&blkcg
->blkg_list
);
1705 INIT_LIST_HEAD(&blkcg
->policy_list
);
1710 * We cannot support shared io contexts, as we have no mean to support
1711 * two tasks with the same ioc in two different groups without major rework
1712 * of the main cic data structures. For now we allow a task to change
1713 * its cgroup only if it's the only owner of its ioc.
1715 static int blkiocg_can_attach(struct cgroup_subsys
*ss
, struct cgroup
*cgrp
,
1716 struct cgroup_taskset
*tset
)
1718 struct task_struct
*task
;
1719 struct io_context
*ioc
;
1722 /* task_lock() is needed to avoid races with exit_io_context() */
1723 cgroup_taskset_for_each(task
, cgrp
, tset
) {
1725 ioc
= task
->io_context
;
1726 if (ioc
&& atomic_read(&ioc
->nr_tasks
) > 1)
1735 static void blkiocg_attach(struct cgroup_subsys
*ss
, struct cgroup
*cgrp
,
1736 struct cgroup_taskset
*tset
)
1738 struct task_struct
*task
;
1739 struct io_context
*ioc
;
1741 cgroup_taskset_for_each(task
, cgrp
, tset
) {
1742 /* we don't lose anything even if ioc allocation fails */
1743 ioc
= get_task_io_context(task
, GFP_ATOMIC
, NUMA_NO_NODE
);
1745 ioc_cgroup_changed(ioc
);
1746 put_io_context(ioc
);
1751 void blkio_policy_register(struct blkio_policy_type
*blkiop
)
1753 spin_lock(&blkio_list_lock
);
1755 BUG_ON(blkio_policy
[blkiop
->plid
]);
1756 blkio_policy
[blkiop
->plid
] = blkiop
;
1757 list_add_tail(&blkiop
->list
, &blkio_list
);
1759 spin_unlock(&blkio_list_lock
);
1761 EXPORT_SYMBOL_GPL(blkio_policy_register
);
1763 void blkio_policy_unregister(struct blkio_policy_type
*blkiop
)
1765 spin_lock(&blkio_list_lock
);
1767 BUG_ON(blkio_policy
[blkiop
->plid
] != blkiop
);
1768 blkio_policy
[blkiop
->plid
] = NULL
;
1769 list_del_init(&blkiop
->list
);
1771 spin_unlock(&blkio_list_lock
);
1773 EXPORT_SYMBOL_GPL(blkio_policy_unregister
);