2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include <linux/genhd.h>
21 #include <linux/delay.h>
22 #include "blk-cgroup.h"
25 #define MAX_KEY_LEN 100
27 static DEFINE_SPINLOCK(blkio_list_lock
);
28 static LIST_HEAD(blkio_list
);
30 static DEFINE_MUTEX(all_q_mutex
);
31 static LIST_HEAD(all_q_list
);
33 struct blkio_cgroup blkio_root_cgroup
= { .weight
= 2*BLKIO_WEIGHT_DEFAULT
};
34 EXPORT_SYMBOL_GPL(blkio_root_cgroup
);
36 static struct blkio_policy_type
*blkio_policy
[BLKIO_NR_POLICIES
];
38 static struct cgroup_subsys_state
*blkiocg_create(struct cgroup_subsys
*,
40 static int blkiocg_can_attach(struct cgroup_subsys
*, struct cgroup
*,
41 struct cgroup_taskset
*);
42 static void blkiocg_attach(struct cgroup_subsys
*, struct cgroup
*,
43 struct cgroup_taskset
*);
44 static int blkiocg_pre_destroy(struct cgroup_subsys
*, struct cgroup
*);
45 static void blkiocg_destroy(struct cgroup_subsys
*, struct cgroup
*);
46 static int blkiocg_populate(struct cgroup_subsys
*, struct cgroup
*);
48 /* for encoding cft->private value on file */
49 #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
50 /* What policy owns the file, proportional or throttle */
51 #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
52 #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
54 struct cgroup_subsys blkio_subsys
= {
56 .create
= blkiocg_create
,
57 .can_attach
= blkiocg_can_attach
,
58 .attach
= blkiocg_attach
,
59 .pre_destroy
= blkiocg_pre_destroy
,
60 .destroy
= blkiocg_destroy
,
61 .populate
= blkiocg_populate
,
62 .subsys_id
= blkio_subsys_id
,
63 .module
= THIS_MODULE
,
65 EXPORT_SYMBOL_GPL(blkio_subsys
);
67 struct blkio_cgroup
*cgroup_to_blkio_cgroup(struct cgroup
*cgroup
)
69 return container_of(cgroup_subsys_state(cgroup
, blkio_subsys_id
),
70 struct blkio_cgroup
, css
);
72 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup
);
74 struct blkio_cgroup
*task_blkio_cgroup(struct task_struct
*tsk
)
76 return container_of(task_subsys_state(tsk
, blkio_subsys_id
),
77 struct blkio_cgroup
, css
);
79 EXPORT_SYMBOL_GPL(task_blkio_cgroup
);
81 static inline void blkio_update_group_weight(struct blkio_group
*blkg
,
82 int plid
, unsigned int weight
)
84 struct blkio_policy_type
*blkiop
;
86 list_for_each_entry(blkiop
, &blkio_list
, list
) {
87 /* If this policy does not own the blkg, do not send updates */
88 if (blkiop
->plid
!= plid
)
90 if (blkiop
->ops
.blkio_update_group_weight_fn
)
91 blkiop
->ops
.blkio_update_group_weight_fn(blkg
->q
,
96 static inline void blkio_update_group_bps(struct blkio_group
*blkg
, int plid
,
99 struct blkio_policy_type
*blkiop
;
101 list_for_each_entry(blkiop
, &blkio_list
, list
) {
103 /* If this policy does not own the blkg, do not send updates */
104 if (blkiop
->plid
!= plid
)
107 if (fileid
== BLKIO_THROTL_read_bps_device
108 && blkiop
->ops
.blkio_update_group_read_bps_fn
)
109 blkiop
->ops
.blkio_update_group_read_bps_fn(blkg
->q
,
112 if (fileid
== BLKIO_THROTL_write_bps_device
113 && blkiop
->ops
.blkio_update_group_write_bps_fn
)
114 blkiop
->ops
.blkio_update_group_write_bps_fn(blkg
->q
,
119 static inline void blkio_update_group_iops(struct blkio_group
*blkg
,
120 int plid
, unsigned int iops
,
123 struct blkio_policy_type
*blkiop
;
125 list_for_each_entry(blkiop
, &blkio_list
, list
) {
127 /* If this policy does not own the blkg, do not send updates */
128 if (blkiop
->plid
!= plid
)
131 if (fileid
== BLKIO_THROTL_read_iops_device
132 && blkiop
->ops
.blkio_update_group_read_iops_fn
)
133 blkiop
->ops
.blkio_update_group_read_iops_fn(blkg
->q
,
136 if (fileid
== BLKIO_THROTL_write_iops_device
137 && blkiop
->ops
.blkio_update_group_write_iops_fn
)
138 blkiop
->ops
.blkio_update_group_write_iops_fn(blkg
->q
,
144 * Add to the appropriate stat variable depending on the request type.
145 * This should be called with the blkg->stats_lock held.
147 static void blkio_add_stat(uint64_t *stat
, uint64_t add
, bool direction
,
151 stat
[BLKIO_STAT_WRITE
] += add
;
153 stat
[BLKIO_STAT_READ
] += add
;
155 stat
[BLKIO_STAT_SYNC
] += add
;
157 stat
[BLKIO_STAT_ASYNC
] += add
;
161 * Decrements the appropriate stat variable if non-zero depending on the
162 * request type. Panics on value being zero.
163 * This should be called with the blkg->stats_lock held.
165 static void blkio_check_and_dec_stat(uint64_t *stat
, bool direction
, bool sync
)
168 BUG_ON(stat
[BLKIO_STAT_WRITE
] == 0);
169 stat
[BLKIO_STAT_WRITE
]--;
171 BUG_ON(stat
[BLKIO_STAT_READ
] == 0);
172 stat
[BLKIO_STAT_READ
]--;
175 BUG_ON(stat
[BLKIO_STAT_SYNC
] == 0);
176 stat
[BLKIO_STAT_SYNC
]--;
178 BUG_ON(stat
[BLKIO_STAT_ASYNC
] == 0);
179 stat
[BLKIO_STAT_ASYNC
]--;
183 #ifdef CONFIG_DEBUG_BLK_CGROUP
184 /* This should be called with the blkg->stats_lock held. */
185 static void blkio_set_start_group_wait_time(struct blkio_group
*blkg
,
186 struct blkio_policy_type
*pol
,
187 struct blkio_group
*curr_blkg
)
189 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
191 if (blkio_blkg_waiting(&pd
->stats
))
193 if (blkg
== curr_blkg
)
195 pd
->stats
.start_group_wait_time
= sched_clock();
196 blkio_mark_blkg_waiting(&pd
->stats
);
199 /* This should be called with the blkg->stats_lock held. */
200 static void blkio_update_group_wait_time(struct blkio_group_stats
*stats
)
202 unsigned long long now
;
204 if (!blkio_blkg_waiting(stats
))
208 if (time_after64(now
, stats
->start_group_wait_time
))
209 stats
->group_wait_time
+= now
- stats
->start_group_wait_time
;
210 blkio_clear_blkg_waiting(stats
);
213 /* This should be called with the blkg->stats_lock held. */
214 static void blkio_end_empty_time(struct blkio_group_stats
*stats
)
216 unsigned long long now
;
218 if (!blkio_blkg_empty(stats
))
222 if (time_after64(now
, stats
->start_empty_time
))
223 stats
->empty_time
+= now
- stats
->start_empty_time
;
224 blkio_clear_blkg_empty(stats
);
227 void blkiocg_update_set_idle_time_stats(struct blkio_group
*blkg
,
228 struct blkio_policy_type
*pol
)
230 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
233 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
234 BUG_ON(blkio_blkg_idling(&pd
->stats
));
235 pd
->stats
.start_idle_time
= sched_clock();
236 blkio_mark_blkg_idling(&pd
->stats
);
237 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
239 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats
);
241 void blkiocg_update_idle_time_stats(struct blkio_group
*blkg
,
242 struct blkio_policy_type
*pol
)
244 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
246 unsigned long long now
;
247 struct blkio_group_stats
*stats
;
249 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
251 if (blkio_blkg_idling(stats
)) {
253 if (time_after64(now
, stats
->start_idle_time
))
254 stats
->idle_time
+= now
- stats
->start_idle_time
;
255 blkio_clear_blkg_idling(stats
);
257 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
259 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats
);
261 void blkiocg_update_avg_queue_size_stats(struct blkio_group
*blkg
,
262 struct blkio_policy_type
*pol
)
264 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
266 struct blkio_group_stats
*stats
;
268 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
270 stats
->avg_queue_size_sum
+=
271 stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_READ
] +
272 stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_WRITE
];
273 stats
->avg_queue_size_samples
++;
274 blkio_update_group_wait_time(stats
);
275 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
277 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats
);
279 void blkiocg_set_start_empty_time(struct blkio_group
*blkg
,
280 struct blkio_policy_type
*pol
)
282 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
284 struct blkio_group_stats
*stats
;
286 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
289 if (stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_READ
] ||
290 stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_WRITE
]) {
291 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
296 * group is already marked empty. This can happen if cfqq got new
297 * request in parent group and moved to this group while being added
298 * to service tree. Just ignore the event and move on.
300 if(blkio_blkg_empty(stats
)) {
301 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
305 stats
->start_empty_time
= sched_clock();
306 blkio_mark_blkg_empty(stats
);
307 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
309 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time
);
311 void blkiocg_update_dequeue_stats(struct blkio_group
*blkg
,
312 struct blkio_policy_type
*pol
,
313 unsigned long dequeue
)
315 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
317 pd
->stats
.dequeue
+= dequeue
;
319 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats
);
321 static inline void blkio_set_start_group_wait_time(struct blkio_group
*blkg
,
322 struct blkio_policy_type
*pol
,
323 struct blkio_group
*curr_blkg
) { }
324 static inline void blkio_end_empty_time(struct blkio_group_stats
*stats
) { }
327 void blkiocg_update_io_add_stats(struct blkio_group
*blkg
,
328 struct blkio_policy_type
*pol
,
329 struct blkio_group
*curr_blkg
, bool direction
,
332 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
335 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
336 blkio_add_stat(pd
->stats
.stat_arr
[BLKIO_STAT_QUEUED
], 1, direction
,
338 blkio_end_empty_time(&pd
->stats
);
339 blkio_set_start_group_wait_time(blkg
, pol
, curr_blkg
);
340 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
342 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats
);
344 void blkiocg_update_io_remove_stats(struct blkio_group
*blkg
,
345 struct blkio_policy_type
*pol
,
346 bool direction
, bool sync
)
348 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
351 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
352 blkio_check_and_dec_stat(pd
->stats
.stat_arr
[BLKIO_STAT_QUEUED
],
354 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
356 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats
);
358 void blkiocg_update_timeslice_used(struct blkio_group
*blkg
,
359 struct blkio_policy_type
*pol
,
361 unsigned long unaccounted_time
)
363 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
366 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
367 pd
->stats
.time
+= time
;
368 #ifdef CONFIG_DEBUG_BLK_CGROUP
369 pd
->stats
.unaccounted_time
+= unaccounted_time
;
371 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
373 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used
);
376 * should be called under rcu read lock or queue lock to make sure blkg pointer
379 void blkiocg_update_dispatch_stats(struct blkio_group
*blkg
,
380 struct blkio_policy_type
*pol
,
381 uint64_t bytes
, bool direction
, bool sync
)
383 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
384 struct blkio_group_stats_cpu
*stats_cpu
;
388 * Disabling interrupts to provide mutual exclusion between two
389 * writes on same cpu. It probably is not needed for 64bit. Not
390 * optimizing that case yet.
392 local_irq_save(flags
);
394 stats_cpu
= this_cpu_ptr(pd
->stats_cpu
);
396 u64_stats_update_begin(&stats_cpu
->syncp
);
397 stats_cpu
->sectors
+= bytes
>> 9;
398 blkio_add_stat(stats_cpu
->stat_arr_cpu
[BLKIO_STAT_CPU_SERVICED
],
400 blkio_add_stat(stats_cpu
->stat_arr_cpu
[BLKIO_STAT_CPU_SERVICE_BYTES
],
401 bytes
, direction
, sync
);
402 u64_stats_update_end(&stats_cpu
->syncp
);
403 local_irq_restore(flags
);
405 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats
);
407 void blkiocg_update_completion_stats(struct blkio_group
*blkg
,
408 struct blkio_policy_type
*pol
,
410 uint64_t io_start_time
, bool direction
,
413 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
414 struct blkio_group_stats
*stats
;
416 unsigned long long now
= sched_clock();
418 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
420 if (time_after64(now
, io_start_time
))
421 blkio_add_stat(stats
->stat_arr
[BLKIO_STAT_SERVICE_TIME
],
422 now
- io_start_time
, direction
, sync
);
423 if (time_after64(io_start_time
, start_time
))
424 blkio_add_stat(stats
->stat_arr
[BLKIO_STAT_WAIT_TIME
],
425 io_start_time
- start_time
, direction
, sync
);
426 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
428 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats
);
430 /* Merged stats are per cpu. */
431 void blkiocg_update_io_merged_stats(struct blkio_group
*blkg
,
432 struct blkio_policy_type
*pol
,
433 bool direction
, bool sync
)
435 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
436 struct blkio_group_stats_cpu
*stats_cpu
;
440 * Disabling interrupts to provide mutual exclusion between two
441 * writes on same cpu. It probably is not needed for 64bit. Not
442 * optimizing that case yet.
444 local_irq_save(flags
);
446 stats_cpu
= this_cpu_ptr(pd
->stats_cpu
);
448 u64_stats_update_begin(&stats_cpu
->syncp
);
449 blkio_add_stat(stats_cpu
->stat_arr_cpu
[BLKIO_STAT_CPU_MERGED
], 1,
451 u64_stats_update_end(&stats_cpu
->syncp
);
452 local_irq_restore(flags
);
454 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats
);
457 * blkg_free - free a blkg
458 * @blkg: blkg to free
460 * Free @blkg which may be partially allocated.
462 static void blkg_free(struct blkio_group
*blkg
)
469 for (i
= 0; i
< BLKIO_NR_POLICIES
; i
++) {
470 struct blkg_policy_data
*pd
= blkg
->pd
[i
];
473 free_percpu(pd
->stats_cpu
);
482 * blkg_alloc - allocate a blkg
483 * @blkcg: block cgroup the new blkg is associated with
484 * @q: request_queue the new blkg is associated with
486 * Allocate a new blkg assocating @blkcg and @q.
488 * FIXME: Should be called with queue locked but currently isn't due to
489 * percpu stat breakage.
491 static struct blkio_group
*blkg_alloc(struct blkio_cgroup
*blkcg
,
492 struct request_queue
*q
)
494 struct blkio_group
*blkg
;
497 /* alloc and init base part */
498 blkg
= kzalloc_node(sizeof(*blkg
), GFP_ATOMIC
, q
->node
);
502 spin_lock_init(&blkg
->stats_lock
);
503 rcu_assign_pointer(blkg
->q
, q
);
504 INIT_LIST_HEAD(&blkg
->q_node
);
507 cgroup_path(blkcg
->css
.cgroup
, blkg
->path
, sizeof(blkg
->path
));
509 for (i
= 0; i
< BLKIO_NR_POLICIES
; i
++) {
510 struct blkio_policy_type
*pol
= blkio_policy
[i
];
511 struct blkg_policy_data
*pd
;
516 /* alloc per-policy data and attach it to blkg */
517 pd
= kzalloc_node(sizeof(*pd
) + pol
->pdata_size
, GFP_ATOMIC
,
527 /* broken, read comment in the callsite */
528 pd
->stats_cpu
= alloc_percpu(struct blkio_group_stats_cpu
);
529 if (!pd
->stats_cpu
) {
535 /* invoke per-policy init */
536 for (i
= 0; i
< BLKIO_NR_POLICIES
; i
++) {
537 struct blkio_policy_type
*pol
= blkio_policy
[i
];
540 pol
->ops
.blkio_init_group_fn(blkg
);
546 struct blkio_group
*blkg_lookup_create(struct blkio_cgroup
*blkcg
,
547 struct request_queue
*q
,
548 enum blkio_policy_id plid
,
550 __releases(q
->queue_lock
) __acquires(q
->queue_lock
)
552 struct blkio_group
*blkg
, *new_blkg
;
554 WARN_ON_ONCE(!rcu_read_lock_held());
555 lockdep_assert_held(q
->queue_lock
);
558 * This could be the first entry point of blkcg implementation and
559 * we shouldn't allow anything to go through for a bypassing queue.
560 * The following can be removed if blkg lookup is guaranteed to
561 * fail on a bypassing queue.
563 if (unlikely(blk_queue_bypass(q
)) && !for_root
)
564 return ERR_PTR(blk_queue_dead(q
) ? -EINVAL
: -EBUSY
);
566 blkg
= blkg_lookup(blkcg
, q
);
570 /* blkg holds a reference to blkcg */
571 if (!css_tryget(&blkcg
->css
))
572 return ERR_PTR(-EINVAL
);
575 * Allocate and initialize.
577 * FIXME: The following is broken. Percpu memory allocation
578 * requires %GFP_KERNEL context and can't be performed from IO
579 * path. Allocation here should inherently be atomic and the
580 * following lock dancing can be removed once the broken percpu
581 * allocation is fixed.
583 spin_unlock_irq(q
->queue_lock
);
586 new_blkg
= blkg_alloc(blkcg
, q
);
589 spin_lock_irq(q
->queue_lock
);
591 /* did bypass get turned on inbetween? */
592 if (unlikely(blk_queue_bypass(q
)) && !for_root
) {
593 blkg
= ERR_PTR(blk_queue_dead(q
) ? -EINVAL
: -EBUSY
);
597 /* did someone beat us to it? */
598 blkg
= blkg_lookup(blkcg
, q
);
602 /* did alloc fail? */
603 if (unlikely(!new_blkg
)) {
604 blkg
= ERR_PTR(-ENOMEM
);
609 spin_lock(&blkcg
->lock
);
610 swap(blkg
, new_blkg
);
612 hlist_add_head_rcu(&blkg
->blkcg_node
, &blkcg
->blkg_list
);
613 list_add(&blkg
->q_node
, &q
->blkg_list
);
616 spin_unlock(&blkcg
->lock
);
621 EXPORT_SYMBOL_GPL(blkg_lookup_create
);
623 static void __blkiocg_del_blkio_group(struct blkio_group
*blkg
)
625 hlist_del_init_rcu(&blkg
->blkcg_node
);
629 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
630 * indicating that blk_group was unhashed by the time we got to it.
632 int blkiocg_del_blkio_group(struct blkio_group
*blkg
)
634 struct blkio_cgroup
*blkcg
= blkg
->blkcg
;
638 spin_lock_irqsave(&blkcg
->lock
, flags
);
639 if (!hlist_unhashed(&blkg
->blkcg_node
)) {
640 __blkiocg_del_blkio_group(blkg
);
643 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
647 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group
);
649 /* called under rcu_read_lock(). */
650 struct blkio_group
*blkg_lookup(struct blkio_cgroup
*blkcg
,
651 struct request_queue
*q
)
653 struct blkio_group
*blkg
;
654 struct hlist_node
*n
;
656 hlist_for_each_entry_rcu(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
)
661 EXPORT_SYMBOL_GPL(blkg_lookup
);
663 static void blkg_destroy(struct blkio_group
*blkg
)
665 struct request_queue
*q
= blkg
->q
;
667 lockdep_assert_held(q
->queue_lock
);
669 /* Something wrong if we are trying to remove same group twice */
670 WARN_ON_ONCE(list_empty(&blkg
->q_node
));
671 list_del_init(&blkg
->q_node
);
673 WARN_ON_ONCE(q
->nr_blkgs
<= 0);
677 * Put the reference taken at the time of creation so that when all
678 * queues are gone, group can be destroyed.
684 * XXX: This updates blkg policy data in-place for root blkg, which is
685 * necessary across elevator switch and policy registration as root blkgs
686 * aren't shot down. This broken and racy implementation is temporary.
687 * Eventually, blkg shoot down will be replaced by proper in-place update.
689 void update_root_blkg_pd(struct request_queue
*q
, enum blkio_policy_id plid
)
691 struct blkio_policy_type
*pol
= blkio_policy
[plid
];
692 struct blkio_group
*blkg
= blkg_lookup(&blkio_root_cgroup
, q
);
693 struct blkg_policy_data
*pd
;
698 kfree(blkg
->pd
[plid
]);
699 blkg
->pd
[plid
] = NULL
;
704 pd
= kzalloc(sizeof(*pd
) + pol
->pdata_size
, GFP_KERNEL
);
707 pd
->stats_cpu
= alloc_percpu(struct blkio_group_stats_cpu
);
708 WARN_ON_ONCE(!pd
->stats_cpu
);
712 pol
->ops
.blkio_init_group_fn(blkg
);
714 EXPORT_SYMBOL_GPL(update_root_blkg_pd
);
716 void blkg_destroy_all(struct request_queue
*q
, bool destroy_root
)
718 struct blkio_group
*blkg
, *n
;
723 spin_lock_irq(q
->queue_lock
);
725 list_for_each_entry_safe(blkg
, n
, &q
->blkg_list
, q_node
) {
727 if (!destroy_root
&& blkg
->blkcg
== &blkio_root_cgroup
)
731 * If cgroup removal path got to blk_group first
732 * and removed it from cgroup list, then it will
733 * take care of destroying cfqg also.
735 if (!blkiocg_del_blkio_group(blkg
))
741 spin_unlock_irq(q
->queue_lock
);
744 * Group list may not be empty if we raced cgroup removal
745 * and lost. cgroup removal is guaranteed to make forward
746 * progress and retrying after a while is enough. This
747 * ugliness is scheduled to be removed after locking
753 msleep(10); /* just some random duration I like */
756 EXPORT_SYMBOL_GPL(blkg_destroy_all
);
758 static void blkg_rcu_free(struct rcu_head
*rcu_head
)
760 blkg_free(container_of(rcu_head
, struct blkio_group
, rcu_head
));
763 void __blkg_release(struct blkio_group
*blkg
)
765 /* release the extra blkcg reference this blkg has been holding */
766 css_put(&blkg
->blkcg
->css
);
769 * A group is freed in rcu manner. But having an rcu lock does not
770 * mean that one can access all the fields of blkg and assume these
771 * are valid. For example, don't try to follow throtl_data and
772 * request queue links.
774 * Having a reference to blkg under an rcu allows acess to only
775 * values local to groups like group stats and group rate limits
777 call_rcu(&blkg
->rcu_head
, blkg_rcu_free
);
779 EXPORT_SYMBOL_GPL(__blkg_release
);
781 static void blkio_reset_stats_cpu(struct blkio_group
*blkg
, int plid
)
783 struct blkg_policy_data
*pd
= blkg
->pd
[plid
];
784 struct blkio_group_stats_cpu
*stats_cpu
;
787 * Note: On 64 bit arch this should not be an issue. This has the
788 * possibility of returning some inconsistent value on 32bit arch
789 * as 64bit update on 32bit is non atomic. Taking care of this
790 * corner case makes code very complicated, like sending IPIs to
791 * cpus, taking care of stats of offline cpus etc.
793 * reset stats is anyway more of a debug feature and this sounds a
794 * corner case. So I am not complicating the code yet until and
795 * unless this becomes a real issue.
797 for_each_possible_cpu(i
) {
798 stats_cpu
= per_cpu_ptr(pd
->stats_cpu
, i
);
799 stats_cpu
->sectors
= 0;
800 for(j
= 0; j
< BLKIO_STAT_CPU_NR
; j
++)
801 for (k
= 0; k
< BLKIO_STAT_TOTAL
; k
++)
802 stats_cpu
->stat_arr_cpu
[j
][k
] = 0;
807 blkiocg_reset_stats(struct cgroup
*cgroup
, struct cftype
*cftype
, u64 val
)
809 struct blkio_cgroup
*blkcg
;
810 struct blkio_group
*blkg
;
811 struct blkio_group_stats
*stats
;
812 struct hlist_node
*n
;
813 uint64_t queued
[BLKIO_STAT_TOTAL
];
815 #ifdef CONFIG_DEBUG_BLK_CGROUP
816 bool idling
, waiting
, empty
;
817 unsigned long long now
= sched_clock();
820 blkcg
= cgroup_to_blkio_cgroup(cgroup
);
821 spin_lock(&blkio_list_lock
);
822 spin_lock_irq(&blkcg
->lock
);
823 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
824 struct blkio_policy_type
*pol
;
826 list_for_each_entry(pol
, &blkio_list
, list
) {
827 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
829 spin_lock(&blkg
->stats_lock
);
831 #ifdef CONFIG_DEBUG_BLK_CGROUP
832 idling
= blkio_blkg_idling(stats
);
833 waiting
= blkio_blkg_waiting(stats
);
834 empty
= blkio_blkg_empty(stats
);
836 for (i
= 0; i
< BLKIO_STAT_TOTAL
; i
++)
837 queued
[i
] = stats
->stat_arr
[BLKIO_STAT_QUEUED
][i
];
838 memset(stats
, 0, sizeof(struct blkio_group_stats
));
839 for (i
= 0; i
< BLKIO_STAT_TOTAL
; i
++)
840 stats
->stat_arr
[BLKIO_STAT_QUEUED
][i
] = queued
[i
];
841 #ifdef CONFIG_DEBUG_BLK_CGROUP
843 blkio_mark_blkg_idling(stats
);
844 stats
->start_idle_time
= now
;
847 blkio_mark_blkg_waiting(stats
);
848 stats
->start_group_wait_time
= now
;
851 blkio_mark_blkg_empty(stats
);
852 stats
->start_empty_time
= now
;
855 spin_unlock(&blkg
->stats_lock
);
857 /* Reset Per cpu stats which don't take blkg->stats_lock */
858 blkio_reset_stats_cpu(blkg
, pol
->plid
);
862 spin_unlock_irq(&blkcg
->lock
);
863 spin_unlock(&blkio_list_lock
);
867 static void blkio_get_key_name(enum stat_sub_type type
, const char *dname
,
868 char *str
, int chars_left
, bool diskname_only
)
870 snprintf(str
, chars_left
, "%s", dname
);
871 chars_left
-= strlen(str
);
872 if (chars_left
<= 0) {
874 "Possibly incorrect cgroup stat display format");
880 case BLKIO_STAT_READ
:
881 strlcat(str
, " Read", chars_left
);
883 case BLKIO_STAT_WRITE
:
884 strlcat(str
, " Write", chars_left
);
886 case BLKIO_STAT_SYNC
:
887 strlcat(str
, " Sync", chars_left
);
889 case BLKIO_STAT_ASYNC
:
890 strlcat(str
, " Async", chars_left
);
892 case BLKIO_STAT_TOTAL
:
893 strlcat(str
, " Total", chars_left
);
896 strlcat(str
, " Invalid", chars_left
);
900 static uint64_t blkio_fill_stat(char *str
, int chars_left
, uint64_t val
,
901 struct cgroup_map_cb
*cb
, const char *dname
)
903 blkio_get_key_name(0, dname
, str
, chars_left
, true);
904 cb
->fill(cb
, str
, val
);
909 static uint64_t blkio_read_stat_cpu(struct blkio_group
*blkg
, int plid
,
910 enum stat_type_cpu type
, enum stat_sub_type sub_type
)
912 struct blkg_policy_data
*pd
= blkg
->pd
[plid
];
914 struct blkio_group_stats_cpu
*stats_cpu
;
917 for_each_possible_cpu(cpu
) {
919 stats_cpu
= per_cpu_ptr(pd
->stats_cpu
, cpu
);
922 start
= u64_stats_fetch_begin(&stats_cpu
->syncp
);
923 if (type
== BLKIO_STAT_CPU_SECTORS
)
924 tval
= stats_cpu
->sectors
;
926 tval
= stats_cpu
->stat_arr_cpu
[type
][sub_type
];
927 } while(u64_stats_fetch_retry(&stats_cpu
->syncp
, start
));
935 static uint64_t blkio_get_stat_cpu(struct blkio_group
*blkg
, int plid
,
936 struct cgroup_map_cb
*cb
, const char *dname
,
937 enum stat_type_cpu type
)
939 uint64_t disk_total
, val
;
940 char key_str
[MAX_KEY_LEN
];
941 enum stat_sub_type sub_type
;
943 if (type
== BLKIO_STAT_CPU_SECTORS
) {
944 val
= blkio_read_stat_cpu(blkg
, plid
, type
, 0);
945 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1, val
, cb
,
949 for (sub_type
= BLKIO_STAT_READ
; sub_type
< BLKIO_STAT_TOTAL
;
951 blkio_get_key_name(sub_type
, dname
, key_str
, MAX_KEY_LEN
,
953 val
= blkio_read_stat_cpu(blkg
, plid
, type
, sub_type
);
954 cb
->fill(cb
, key_str
, val
);
957 disk_total
= blkio_read_stat_cpu(blkg
, plid
, type
, BLKIO_STAT_READ
) +
958 blkio_read_stat_cpu(blkg
, plid
, type
, BLKIO_STAT_WRITE
);
960 blkio_get_key_name(BLKIO_STAT_TOTAL
, dname
, key_str
, MAX_KEY_LEN
,
962 cb
->fill(cb
, key_str
, disk_total
);
966 /* This should be called with blkg->stats_lock held */
967 static uint64_t blkio_get_stat(struct blkio_group
*blkg
, int plid
,
968 struct cgroup_map_cb
*cb
, const char *dname
,
971 struct blkg_policy_data
*pd
= blkg
->pd
[plid
];
973 char key_str
[MAX_KEY_LEN
];
974 enum stat_sub_type sub_type
;
976 if (type
== BLKIO_STAT_TIME
)
977 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
978 pd
->stats
.time
, cb
, dname
);
979 #ifdef CONFIG_DEBUG_BLK_CGROUP
980 if (type
== BLKIO_STAT_UNACCOUNTED_TIME
)
981 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
982 pd
->stats
.unaccounted_time
, cb
, dname
);
983 if (type
== BLKIO_STAT_AVG_QUEUE_SIZE
) {
984 uint64_t sum
= pd
->stats
.avg_queue_size_sum
;
985 uint64_t samples
= pd
->stats
.avg_queue_size_samples
;
987 do_div(sum
, samples
);
990 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
993 if (type
== BLKIO_STAT_GROUP_WAIT_TIME
)
994 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
995 pd
->stats
.group_wait_time
, cb
, dname
);
996 if (type
== BLKIO_STAT_IDLE_TIME
)
997 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
998 pd
->stats
.idle_time
, cb
, dname
);
999 if (type
== BLKIO_STAT_EMPTY_TIME
)
1000 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
1001 pd
->stats
.empty_time
, cb
, dname
);
1002 if (type
== BLKIO_STAT_DEQUEUE
)
1003 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
1004 pd
->stats
.dequeue
, cb
, dname
);
1007 for (sub_type
= BLKIO_STAT_READ
; sub_type
< BLKIO_STAT_TOTAL
;
1009 blkio_get_key_name(sub_type
, dname
, key_str
, MAX_KEY_LEN
,
1011 cb
->fill(cb
, key_str
, pd
->stats
.stat_arr
[type
][sub_type
]);
1013 disk_total
= pd
->stats
.stat_arr
[type
][BLKIO_STAT_READ
] +
1014 pd
->stats
.stat_arr
[type
][BLKIO_STAT_WRITE
];
1015 blkio_get_key_name(BLKIO_STAT_TOTAL
, dname
, key_str
, MAX_KEY_LEN
,
1017 cb
->fill(cb
, key_str
, disk_total
);
1021 static int blkio_policy_parse_and_set(char *buf
, enum blkio_policy_id plid
,
1022 int fileid
, struct blkio_cgroup
*blkcg
)
1024 struct gendisk
*disk
= NULL
;
1025 struct blkio_group
*blkg
= NULL
;
1026 struct blkg_policy_data
*pd
;
1027 char *s
[4], *p
, *major_s
= NULL
, *minor_s
= NULL
;
1028 unsigned long major
, minor
;
1029 int i
= 0, ret
= -EINVAL
;
1034 memset(s
, 0, sizeof(s
));
1036 while ((p
= strsep(&buf
, " ")) != NULL
) {
1042 /* Prevent from inputing too many things */
1050 p
= strsep(&s
[0], ":");
1060 if (strict_strtoul(major_s
, 10, &major
))
1063 if (strict_strtoul(minor_s
, 10, &minor
))
1066 dev
= MKDEV(major
, minor
);
1068 if (strict_strtoull(s
[1], 10, &temp
))
1071 disk
= get_gendisk(dev
, &part
);
1077 spin_lock_irq(disk
->queue
->queue_lock
);
1078 blkg
= blkg_lookup_create(blkcg
, disk
->queue
, plid
, false);
1079 spin_unlock_irq(disk
->queue
->queue_lock
);
1082 ret
= PTR_ERR(blkg
);
1086 pd
= blkg
->pd
[plid
];
1089 case BLKIO_POLICY_PROP
:
1090 if ((temp
< BLKIO_WEIGHT_MIN
&& temp
> 0) ||
1091 temp
> BLKIO_WEIGHT_MAX
)
1094 pd
->conf
.weight
= temp
;
1095 blkio_update_group_weight(blkg
, plid
, temp
?: blkcg
->weight
);
1097 case BLKIO_POLICY_THROTL
:
1099 case BLKIO_THROTL_read_bps_device
:
1100 pd
->conf
.bps
[READ
] = temp
;
1101 blkio_update_group_bps(blkg
, plid
, temp
?: -1, fileid
);
1103 case BLKIO_THROTL_write_bps_device
:
1104 pd
->conf
.bps
[WRITE
] = temp
;
1105 blkio_update_group_bps(blkg
, plid
, temp
?: -1, fileid
);
1107 case BLKIO_THROTL_read_iops_device
:
1108 if (temp
> THROTL_IOPS_MAX
)
1110 pd
->conf
.iops
[READ
] = temp
;
1111 blkio_update_group_iops(blkg
, plid
, temp
?: -1, fileid
);
1113 case BLKIO_THROTL_write_iops_device
:
1114 if (temp
> THROTL_IOPS_MAX
)
1116 pd
->conf
.iops
[WRITE
] = temp
;
1117 blkio_update_group_iops(blkg
, plid
, temp
?: -1, fileid
);
1131 * If queue was bypassing, we should retry. Do so after a short
1132 * msleep(). It isn't strictly necessary but queue can be
1133 * bypassing for some time and it's always nice to avoid busy
1136 if (ret
== -EBUSY
) {
1138 return restart_syscall();
1143 static int blkiocg_file_write(struct cgroup
*cgrp
, struct cftype
*cft
,
1148 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1149 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1150 int fileid
= BLKIOFILE_ATTR(cft
->private);
1152 buf
= kstrdup(buffer
, GFP_KERNEL
);
1156 ret
= blkio_policy_parse_and_set(buf
, plid
, fileid
, blkcg
);
1161 static const char *blkg_dev_name(struct blkio_group
*blkg
)
1163 /* some drivers (floppy) instantiate a queue w/o disk registered */
1164 if (blkg
->q
->backing_dev_info
.dev
)
1165 return dev_name(blkg
->q
->backing_dev_info
.dev
);
1169 static void blkio_print_group_conf(struct cftype
*cft
, struct blkio_group
*blkg
,
1172 int plid
= BLKIOFILE_POLICY(cft
->private);
1173 int fileid
= BLKIOFILE_ATTR(cft
->private);
1174 struct blkg_policy_data
*pd
= blkg
->pd
[plid
];
1175 const char *dname
= blkg_dev_name(blkg
);
1182 case BLKIO_POLICY_PROP
:
1183 if (pd
->conf
.weight
)
1184 seq_printf(m
, "%s\t%u\n",
1185 dname
, pd
->conf
.weight
);
1187 case BLKIO_POLICY_THROTL
:
1189 case BLKIO_THROTL_read_bps_device
:
1191 case BLKIO_THROTL_write_bps_device
:
1192 if (pd
->conf
.bps
[rw
])
1193 seq_printf(m
, "%s\t%llu\n",
1194 dname
, pd
->conf
.bps
[rw
]);
1196 case BLKIO_THROTL_read_iops_device
:
1198 case BLKIO_THROTL_write_iops_device
:
1199 if (pd
->conf
.iops
[rw
])
1200 seq_printf(m
, "%s\t%u\n",
1201 dname
, pd
->conf
.iops
[rw
]);
1210 /* cgroup files which read their data from policy nodes end up here */
1211 static void blkio_read_conf(struct cftype
*cft
, struct blkio_cgroup
*blkcg
,
1214 struct blkio_group
*blkg
;
1215 struct hlist_node
*n
;
1217 spin_lock_irq(&blkcg
->lock
);
1218 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
)
1219 blkio_print_group_conf(cft
, blkg
, m
);
1220 spin_unlock_irq(&blkcg
->lock
);
1223 static int blkiocg_file_read(struct cgroup
*cgrp
, struct cftype
*cft
,
1226 struct blkio_cgroup
*blkcg
;
1227 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1228 int name
= BLKIOFILE_ATTR(cft
->private);
1230 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1233 case BLKIO_POLICY_PROP
:
1235 case BLKIO_PROP_weight_device
:
1236 blkio_read_conf(cft
, blkcg
, m
);
1242 case BLKIO_POLICY_THROTL
:
1244 case BLKIO_THROTL_read_bps_device
:
1245 case BLKIO_THROTL_write_bps_device
:
1246 case BLKIO_THROTL_read_iops_device
:
1247 case BLKIO_THROTL_write_iops_device
:
1248 blkio_read_conf(cft
, blkcg
, m
);
1261 static int blkio_read_blkg_stats(struct blkio_cgroup
*blkcg
,
1262 struct cftype
*cft
, struct cgroup_map_cb
*cb
,
1263 enum stat_type type
, bool show_total
, bool pcpu
)
1265 struct blkio_group
*blkg
;
1266 struct hlist_node
*n
;
1267 uint64_t cgroup_total
= 0;
1270 hlist_for_each_entry_rcu(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
1271 const char *dname
= blkg_dev_name(blkg
);
1272 int plid
= BLKIOFILE_POLICY(cft
->private);
1277 cgroup_total
+= blkio_get_stat_cpu(blkg
, plid
,
1280 spin_lock_irq(&blkg
->stats_lock
);
1281 cgroup_total
+= blkio_get_stat(blkg
, plid
,
1283 spin_unlock_irq(&blkg
->stats_lock
);
1287 cb
->fill(cb
, "Total", cgroup_total
);
1292 /* All map kind of cgroup file get serviced by this function */
1293 static int blkiocg_file_read_map(struct cgroup
*cgrp
, struct cftype
*cft
,
1294 struct cgroup_map_cb
*cb
)
1296 struct blkio_cgroup
*blkcg
;
1297 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1298 int name
= BLKIOFILE_ATTR(cft
->private);
1300 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1303 case BLKIO_POLICY_PROP
:
1305 case BLKIO_PROP_time
:
1306 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1307 BLKIO_STAT_TIME
, 0, 0);
1308 case BLKIO_PROP_sectors
:
1309 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1310 BLKIO_STAT_CPU_SECTORS
, 0, 1);
1311 case BLKIO_PROP_io_service_bytes
:
1312 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1313 BLKIO_STAT_CPU_SERVICE_BYTES
, 1, 1);
1314 case BLKIO_PROP_io_serviced
:
1315 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1316 BLKIO_STAT_CPU_SERVICED
, 1, 1);
1317 case BLKIO_PROP_io_service_time
:
1318 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1319 BLKIO_STAT_SERVICE_TIME
, 1, 0);
1320 case BLKIO_PROP_io_wait_time
:
1321 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1322 BLKIO_STAT_WAIT_TIME
, 1, 0);
1323 case BLKIO_PROP_io_merged
:
1324 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1325 BLKIO_STAT_CPU_MERGED
, 1, 1);
1326 case BLKIO_PROP_io_queued
:
1327 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1328 BLKIO_STAT_QUEUED
, 1, 0);
1329 #ifdef CONFIG_DEBUG_BLK_CGROUP
1330 case BLKIO_PROP_unaccounted_time
:
1331 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1332 BLKIO_STAT_UNACCOUNTED_TIME
, 0, 0);
1333 case BLKIO_PROP_dequeue
:
1334 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1335 BLKIO_STAT_DEQUEUE
, 0, 0);
1336 case BLKIO_PROP_avg_queue_size
:
1337 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1338 BLKIO_STAT_AVG_QUEUE_SIZE
, 0, 0);
1339 case BLKIO_PROP_group_wait_time
:
1340 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1341 BLKIO_STAT_GROUP_WAIT_TIME
, 0, 0);
1342 case BLKIO_PROP_idle_time
:
1343 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1344 BLKIO_STAT_IDLE_TIME
, 0, 0);
1345 case BLKIO_PROP_empty_time
:
1346 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1347 BLKIO_STAT_EMPTY_TIME
, 0, 0);
1353 case BLKIO_POLICY_THROTL
:
1355 case BLKIO_THROTL_io_service_bytes
:
1356 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1357 BLKIO_STAT_CPU_SERVICE_BYTES
, 1, 1);
1358 case BLKIO_THROTL_io_serviced
:
1359 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1360 BLKIO_STAT_CPU_SERVICED
, 1, 1);
1372 static int blkio_weight_write(struct blkio_cgroup
*blkcg
, int plid
, u64 val
)
1374 struct blkio_group
*blkg
;
1375 struct hlist_node
*n
;
1377 if (val
< BLKIO_WEIGHT_MIN
|| val
> BLKIO_WEIGHT_MAX
)
1380 spin_lock(&blkio_list_lock
);
1381 spin_lock_irq(&blkcg
->lock
);
1382 blkcg
->weight
= (unsigned int)val
;
1384 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
1385 struct blkg_policy_data
*pd
= blkg
->pd
[plid
];
1387 if (!pd
->conf
.weight
)
1388 blkio_update_group_weight(blkg
, plid
, blkcg
->weight
);
1391 spin_unlock_irq(&blkcg
->lock
);
1392 spin_unlock(&blkio_list_lock
);
1396 static u64
blkiocg_file_read_u64 (struct cgroup
*cgrp
, struct cftype
*cft
) {
1397 struct blkio_cgroup
*blkcg
;
1398 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1399 int name
= BLKIOFILE_ATTR(cft
->private);
1401 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1404 case BLKIO_POLICY_PROP
:
1406 case BLKIO_PROP_weight
:
1407 return (u64
)blkcg
->weight
;
1417 blkiocg_file_write_u64(struct cgroup
*cgrp
, struct cftype
*cft
, u64 val
)
1419 struct blkio_cgroup
*blkcg
;
1420 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1421 int name
= BLKIOFILE_ATTR(cft
->private);
1423 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1426 case BLKIO_POLICY_PROP
:
1428 case BLKIO_PROP_weight
:
1429 return blkio_weight_write(blkcg
, plid
, val
);
1439 struct cftype blkio_files
[] = {
1441 .name
= "weight_device",
1442 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1443 BLKIO_PROP_weight_device
),
1444 .read_seq_string
= blkiocg_file_read
,
1445 .write_string
= blkiocg_file_write
,
1446 .max_write_len
= 256,
1450 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1452 .read_u64
= blkiocg_file_read_u64
,
1453 .write_u64
= blkiocg_file_write_u64
,
1457 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1459 .read_map
= blkiocg_file_read_map
,
1463 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1464 BLKIO_PROP_sectors
),
1465 .read_map
= blkiocg_file_read_map
,
1468 .name
= "io_service_bytes",
1469 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1470 BLKIO_PROP_io_service_bytes
),
1471 .read_map
= blkiocg_file_read_map
,
1474 .name
= "io_serviced",
1475 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1476 BLKIO_PROP_io_serviced
),
1477 .read_map
= blkiocg_file_read_map
,
1480 .name
= "io_service_time",
1481 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1482 BLKIO_PROP_io_service_time
),
1483 .read_map
= blkiocg_file_read_map
,
1486 .name
= "io_wait_time",
1487 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1488 BLKIO_PROP_io_wait_time
),
1489 .read_map
= blkiocg_file_read_map
,
1492 .name
= "io_merged",
1493 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1494 BLKIO_PROP_io_merged
),
1495 .read_map
= blkiocg_file_read_map
,
1498 .name
= "io_queued",
1499 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1500 BLKIO_PROP_io_queued
),
1501 .read_map
= blkiocg_file_read_map
,
1504 .name
= "reset_stats",
1505 .write_u64
= blkiocg_reset_stats
,
1507 #ifdef CONFIG_BLK_DEV_THROTTLING
1509 .name
= "throttle.read_bps_device",
1510 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1511 BLKIO_THROTL_read_bps_device
),
1512 .read_seq_string
= blkiocg_file_read
,
1513 .write_string
= blkiocg_file_write
,
1514 .max_write_len
= 256,
1518 .name
= "throttle.write_bps_device",
1519 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1520 BLKIO_THROTL_write_bps_device
),
1521 .read_seq_string
= blkiocg_file_read
,
1522 .write_string
= blkiocg_file_write
,
1523 .max_write_len
= 256,
1527 .name
= "throttle.read_iops_device",
1528 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1529 BLKIO_THROTL_read_iops_device
),
1530 .read_seq_string
= blkiocg_file_read
,
1531 .write_string
= blkiocg_file_write
,
1532 .max_write_len
= 256,
1536 .name
= "throttle.write_iops_device",
1537 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1538 BLKIO_THROTL_write_iops_device
),
1539 .read_seq_string
= blkiocg_file_read
,
1540 .write_string
= blkiocg_file_write
,
1541 .max_write_len
= 256,
1544 .name
= "throttle.io_service_bytes",
1545 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1546 BLKIO_THROTL_io_service_bytes
),
1547 .read_map
= blkiocg_file_read_map
,
1550 .name
= "throttle.io_serviced",
1551 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1552 BLKIO_THROTL_io_serviced
),
1553 .read_map
= blkiocg_file_read_map
,
1555 #endif /* CONFIG_BLK_DEV_THROTTLING */
1557 #ifdef CONFIG_DEBUG_BLK_CGROUP
1559 .name
= "avg_queue_size",
1560 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1561 BLKIO_PROP_avg_queue_size
),
1562 .read_map
= blkiocg_file_read_map
,
1565 .name
= "group_wait_time",
1566 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1567 BLKIO_PROP_group_wait_time
),
1568 .read_map
= blkiocg_file_read_map
,
1571 .name
= "idle_time",
1572 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1573 BLKIO_PROP_idle_time
),
1574 .read_map
= blkiocg_file_read_map
,
1577 .name
= "empty_time",
1578 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1579 BLKIO_PROP_empty_time
),
1580 .read_map
= blkiocg_file_read_map
,
1584 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1585 BLKIO_PROP_dequeue
),
1586 .read_map
= blkiocg_file_read_map
,
1589 .name
= "unaccounted_time",
1590 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1591 BLKIO_PROP_unaccounted_time
),
1592 .read_map
= blkiocg_file_read_map
,
1597 static int blkiocg_populate(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
1599 return cgroup_add_files(cgroup
, subsys
, blkio_files
,
1600 ARRAY_SIZE(blkio_files
));
1603 static int blkiocg_pre_destroy(struct cgroup_subsys
*subsys
,
1604 struct cgroup
*cgroup
)
1606 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
1607 unsigned long flags
;
1608 struct blkio_group
*blkg
;
1609 struct request_queue
*q
;
1614 spin_lock_irqsave(&blkcg
->lock
, flags
);
1616 if (hlist_empty(&blkcg
->blkg_list
)) {
1617 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
1621 blkg
= hlist_entry(blkcg
->blkg_list
.first
, struct blkio_group
,
1623 q
= rcu_dereference(blkg
->q
);
1624 __blkiocg_del_blkio_group(blkg
);
1626 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
1629 * This blkio_group is being unlinked as associated cgroup is
1630 * going away. Let all the IO controlling policies know about
1633 spin_lock(&blkio_list_lock
);
1634 spin_lock_irqsave(q
->queue_lock
, flags
);
1636 spin_unlock_irqrestore(q
->queue_lock
, flags
);
1637 spin_unlock(&blkio_list_lock
);
1645 static void blkiocg_destroy(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
1647 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
1649 if (blkcg
!= &blkio_root_cgroup
)
1653 static struct cgroup_subsys_state
*
1654 blkiocg_create(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
1656 struct blkio_cgroup
*blkcg
;
1657 struct cgroup
*parent
= cgroup
->parent
;
1660 blkcg
= &blkio_root_cgroup
;
1664 blkcg
= kzalloc(sizeof(*blkcg
), GFP_KERNEL
);
1666 return ERR_PTR(-ENOMEM
);
1668 blkcg
->weight
= BLKIO_WEIGHT_DEFAULT
;
1670 spin_lock_init(&blkcg
->lock
);
1671 INIT_HLIST_HEAD(&blkcg
->blkg_list
);
1677 * blkcg_init_queue - initialize blkcg part of request queue
1678 * @q: request_queue to initialize
1680 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1681 * part of new request_queue @q.
1684 * 0 on success, -errno on failure.
1686 int blkcg_init_queue(struct request_queue
*q
)
1692 ret
= blk_throtl_init(q
);
1696 mutex_lock(&all_q_mutex
);
1697 INIT_LIST_HEAD(&q
->all_q_node
);
1698 list_add_tail(&q
->all_q_node
, &all_q_list
);
1699 mutex_unlock(&all_q_mutex
);
1705 * blkcg_drain_queue - drain blkcg part of request_queue
1706 * @q: request_queue to drain
1708 * Called from blk_drain_queue(). Responsible for draining blkcg part.
1710 void blkcg_drain_queue(struct request_queue
*q
)
1712 lockdep_assert_held(q
->queue_lock
);
1714 blk_throtl_drain(q
);
1718 * blkcg_exit_queue - exit and release blkcg part of request_queue
1719 * @q: request_queue being released
1721 * Called from blk_release_queue(). Responsible for exiting blkcg part.
1723 void blkcg_exit_queue(struct request_queue
*q
)
1725 mutex_lock(&all_q_mutex
);
1726 list_del_init(&q
->all_q_node
);
1727 mutex_unlock(&all_q_mutex
);
1729 blkg_destroy_all(q
, true);
1735 * We cannot support shared io contexts, as we have no mean to support
1736 * two tasks with the same ioc in two different groups without major rework
1737 * of the main cic data structures. For now we allow a task to change
1738 * its cgroup only if it's the only owner of its ioc.
1740 static int blkiocg_can_attach(struct cgroup_subsys
*ss
, struct cgroup
*cgrp
,
1741 struct cgroup_taskset
*tset
)
1743 struct task_struct
*task
;
1744 struct io_context
*ioc
;
1747 /* task_lock() is needed to avoid races with exit_io_context() */
1748 cgroup_taskset_for_each(task
, cgrp
, tset
) {
1750 ioc
= task
->io_context
;
1751 if (ioc
&& atomic_read(&ioc
->nr_tasks
) > 1)
1760 static void blkiocg_attach(struct cgroup_subsys
*ss
, struct cgroup
*cgrp
,
1761 struct cgroup_taskset
*tset
)
1763 struct task_struct
*task
;
1764 struct io_context
*ioc
;
1766 cgroup_taskset_for_each(task
, cgrp
, tset
) {
1767 /* we don't lose anything even if ioc allocation fails */
1768 ioc
= get_task_io_context(task
, GFP_ATOMIC
, NUMA_NO_NODE
);
1770 ioc_cgroup_changed(ioc
);
1771 put_io_context(ioc
);
1776 static void blkcg_bypass_start(void)
1777 __acquires(&all_q_mutex
)
1779 struct request_queue
*q
;
1781 mutex_lock(&all_q_mutex
);
1783 list_for_each_entry(q
, &all_q_list
, all_q_node
) {
1784 blk_queue_bypass_start(q
);
1785 blkg_destroy_all(q
, false);
1789 static void blkcg_bypass_end(void)
1790 __releases(&all_q_mutex
)
1792 struct request_queue
*q
;
1794 list_for_each_entry(q
, &all_q_list
, all_q_node
)
1795 blk_queue_bypass_end(q
);
1797 mutex_unlock(&all_q_mutex
);
1800 void blkio_policy_register(struct blkio_policy_type
*blkiop
)
1802 struct request_queue
*q
;
1804 blkcg_bypass_start();
1805 spin_lock(&blkio_list_lock
);
1807 BUG_ON(blkio_policy
[blkiop
->plid
]);
1808 blkio_policy
[blkiop
->plid
] = blkiop
;
1809 list_add_tail(&blkiop
->list
, &blkio_list
);
1811 spin_unlock(&blkio_list_lock
);
1812 list_for_each_entry(q
, &all_q_list
, all_q_node
)
1813 update_root_blkg_pd(q
, blkiop
->plid
);
1816 EXPORT_SYMBOL_GPL(blkio_policy_register
);
1818 void blkio_policy_unregister(struct blkio_policy_type
*blkiop
)
1820 struct request_queue
*q
;
1822 blkcg_bypass_start();
1823 spin_lock(&blkio_list_lock
);
1825 BUG_ON(blkio_policy
[blkiop
->plid
] != blkiop
);
1826 blkio_policy
[blkiop
->plid
] = NULL
;
1827 list_del_init(&blkiop
->list
);
1829 spin_unlock(&blkio_list_lock
);
1830 list_for_each_entry(q
, &all_q_list
, all_q_node
)
1831 update_root_blkg_pd(q
, blkiop
->plid
);
1834 EXPORT_SYMBOL_GPL(blkio_policy_unregister
);