2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include <linux/genhd.h>
21 #include <linux/delay.h>
22 #include <linux/atomic.h>
23 #include "blk-cgroup.h"
26 #define MAX_KEY_LEN 100
28 static DEFINE_SPINLOCK(blkio_list_lock
);
29 static LIST_HEAD(blkio_list
);
31 static DEFINE_MUTEX(all_q_mutex
);
32 static LIST_HEAD(all_q_list
);
34 /* List of groups pending per cpu stats allocation */
35 static DEFINE_SPINLOCK(alloc_list_lock
);
36 static LIST_HEAD(alloc_list
);
38 static void blkio_stat_alloc_fn(struct work_struct
*);
39 static DECLARE_DELAYED_WORK(blkio_stat_alloc_work
, blkio_stat_alloc_fn
);
41 struct blkio_cgroup blkio_root_cgroup
= { .weight
= 2*BLKIO_WEIGHT_DEFAULT
};
42 EXPORT_SYMBOL_GPL(blkio_root_cgroup
);
44 static struct blkio_policy_type
*blkio_policy
[BLKIO_NR_POLICIES
];
46 static struct cgroup_subsys_state
*blkiocg_create(struct cgroup_subsys
*,
48 static int blkiocg_can_attach(struct cgroup_subsys
*, struct cgroup
*,
49 struct cgroup_taskset
*);
50 static void blkiocg_attach(struct cgroup_subsys
*, struct cgroup
*,
51 struct cgroup_taskset
*);
52 static int blkiocg_pre_destroy(struct cgroup_subsys
*, struct cgroup
*);
53 static void blkiocg_destroy(struct cgroup_subsys
*, struct cgroup
*);
54 static int blkiocg_populate(struct cgroup_subsys
*, struct cgroup
*);
56 /* for encoding cft->private value on file */
57 #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
58 /* What policy owns the file, proportional or throttle */
59 #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
60 #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
62 struct cgroup_subsys blkio_subsys
= {
64 .create
= blkiocg_create
,
65 .can_attach
= blkiocg_can_attach
,
66 .attach
= blkiocg_attach
,
67 .pre_destroy
= blkiocg_pre_destroy
,
68 .destroy
= blkiocg_destroy
,
69 .populate
= blkiocg_populate
,
70 .subsys_id
= blkio_subsys_id
,
71 .module
= THIS_MODULE
,
73 EXPORT_SYMBOL_GPL(blkio_subsys
);
75 struct blkio_cgroup
*cgroup_to_blkio_cgroup(struct cgroup
*cgroup
)
77 return container_of(cgroup_subsys_state(cgroup
, blkio_subsys_id
),
78 struct blkio_cgroup
, css
);
80 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup
);
82 static struct blkio_cgroup
*task_blkio_cgroup(struct task_struct
*tsk
)
84 return container_of(task_subsys_state(tsk
, blkio_subsys_id
),
85 struct blkio_cgroup
, css
);
88 struct blkio_cgroup
*bio_blkio_cgroup(struct bio
*bio
)
90 if (bio
&& bio
->bi_css
)
91 return container_of(bio
->bi_css
, struct blkio_cgroup
, css
);
92 return task_blkio_cgroup(current
);
94 EXPORT_SYMBOL_GPL(bio_blkio_cgroup
);
96 static inline void blkio_update_group_weight(struct blkio_group
*blkg
,
97 int plid
, unsigned int weight
)
99 struct blkio_policy_type
*blkiop
;
101 list_for_each_entry(blkiop
, &blkio_list
, list
) {
102 /* If this policy does not own the blkg, do not send updates */
103 if (blkiop
->plid
!= plid
)
105 if (blkiop
->ops
.blkio_update_group_weight_fn
)
106 blkiop
->ops
.blkio_update_group_weight_fn(blkg
->q
,
111 static inline void blkio_update_group_bps(struct blkio_group
*blkg
, int plid
,
114 struct blkio_policy_type
*blkiop
;
116 list_for_each_entry(blkiop
, &blkio_list
, list
) {
118 /* If this policy does not own the blkg, do not send updates */
119 if (blkiop
->plid
!= plid
)
122 if (fileid
== BLKIO_THROTL_read_bps_device
123 && blkiop
->ops
.blkio_update_group_read_bps_fn
)
124 blkiop
->ops
.blkio_update_group_read_bps_fn(blkg
->q
,
127 if (fileid
== BLKIO_THROTL_write_bps_device
128 && blkiop
->ops
.blkio_update_group_write_bps_fn
)
129 blkiop
->ops
.blkio_update_group_write_bps_fn(blkg
->q
,
134 static inline void blkio_update_group_iops(struct blkio_group
*blkg
,
135 int plid
, unsigned int iops
,
138 struct blkio_policy_type
*blkiop
;
140 list_for_each_entry(blkiop
, &blkio_list
, list
) {
142 /* If this policy does not own the blkg, do not send updates */
143 if (blkiop
->plid
!= plid
)
146 if (fileid
== BLKIO_THROTL_read_iops_device
147 && blkiop
->ops
.blkio_update_group_read_iops_fn
)
148 blkiop
->ops
.blkio_update_group_read_iops_fn(blkg
->q
,
151 if (fileid
== BLKIO_THROTL_write_iops_device
152 && blkiop
->ops
.blkio_update_group_write_iops_fn
)
153 blkiop
->ops
.blkio_update_group_write_iops_fn(blkg
->q
,
159 * Add to the appropriate stat variable depending on the request type.
160 * This should be called with queue_lock held.
162 static void blkio_add_stat(uint64_t *stat
, uint64_t add
, bool direction
,
166 stat
[BLKIO_STAT_WRITE
] += add
;
168 stat
[BLKIO_STAT_READ
] += add
;
170 stat
[BLKIO_STAT_SYNC
] += add
;
172 stat
[BLKIO_STAT_ASYNC
] += add
;
176 * Decrements the appropriate stat variable if non-zero depending on the
177 * request type. Panics on value being zero.
178 * This should be called with the queue_lock held.
180 static void blkio_check_and_dec_stat(uint64_t *stat
, bool direction
, bool sync
)
183 BUG_ON(stat
[BLKIO_STAT_WRITE
] == 0);
184 stat
[BLKIO_STAT_WRITE
]--;
186 BUG_ON(stat
[BLKIO_STAT_READ
] == 0);
187 stat
[BLKIO_STAT_READ
]--;
190 BUG_ON(stat
[BLKIO_STAT_SYNC
] == 0);
191 stat
[BLKIO_STAT_SYNC
]--;
193 BUG_ON(stat
[BLKIO_STAT_ASYNC
] == 0);
194 stat
[BLKIO_STAT_ASYNC
]--;
198 #ifdef CONFIG_DEBUG_BLK_CGROUP
199 /* This should be called with the queue_lock held. */
200 static void blkio_set_start_group_wait_time(struct blkio_group
*blkg
,
201 struct blkio_policy_type
*pol
,
202 struct blkio_group
*curr_blkg
)
204 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
206 if (blkio_blkg_waiting(&pd
->stats
))
208 if (blkg
== curr_blkg
)
210 pd
->stats
.start_group_wait_time
= sched_clock();
211 blkio_mark_blkg_waiting(&pd
->stats
);
214 /* This should be called with the queue_lock held. */
215 static void blkio_update_group_wait_time(struct blkio_group_stats
*stats
)
217 unsigned long long now
;
219 if (!blkio_blkg_waiting(stats
))
223 if (time_after64(now
, stats
->start_group_wait_time
))
224 stats
->group_wait_time
+= now
- stats
->start_group_wait_time
;
225 blkio_clear_blkg_waiting(stats
);
228 /* This should be called with the queue_lock held. */
229 static void blkio_end_empty_time(struct blkio_group_stats
*stats
)
231 unsigned long long now
;
233 if (!blkio_blkg_empty(stats
))
237 if (time_after64(now
, stats
->start_empty_time
))
238 stats
->empty_time
+= now
- stats
->start_empty_time
;
239 blkio_clear_blkg_empty(stats
);
242 void blkiocg_update_set_idle_time_stats(struct blkio_group
*blkg
,
243 struct blkio_policy_type
*pol
)
245 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
247 lockdep_assert_held(blkg
->q
->queue_lock
);
248 BUG_ON(blkio_blkg_idling(stats
));
250 stats
->start_idle_time
= sched_clock();
251 blkio_mark_blkg_idling(stats
);
253 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats
);
255 void blkiocg_update_idle_time_stats(struct blkio_group
*blkg
,
256 struct blkio_policy_type
*pol
)
258 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
260 lockdep_assert_held(blkg
->q
->queue_lock
);
262 if (blkio_blkg_idling(stats
)) {
263 unsigned long long now
= sched_clock();
265 if (time_after64(now
, stats
->start_idle_time
)) {
266 u64_stats_update_begin(&stats
->syncp
);
267 stats
->idle_time
+= now
- stats
->start_idle_time
;
268 u64_stats_update_end(&stats
->syncp
);
270 blkio_clear_blkg_idling(stats
);
273 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats
);
275 void blkiocg_update_avg_queue_size_stats(struct blkio_group
*blkg
,
276 struct blkio_policy_type
*pol
)
278 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
280 lockdep_assert_held(blkg
->q
->queue_lock
);
282 u64_stats_update_begin(&stats
->syncp
);
283 stats
->avg_queue_size_sum
+=
284 stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_READ
] +
285 stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_WRITE
];
286 stats
->avg_queue_size_samples
++;
287 blkio_update_group_wait_time(stats
);
288 u64_stats_update_end(&stats
->syncp
);
290 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats
);
292 void blkiocg_set_start_empty_time(struct blkio_group
*blkg
,
293 struct blkio_policy_type
*pol
)
295 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
297 lockdep_assert_held(blkg
->q
->queue_lock
);
299 if (stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_READ
] ||
300 stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_WRITE
])
304 * group is already marked empty. This can happen if cfqq got new
305 * request in parent group and moved to this group while being added
306 * to service tree. Just ignore the event and move on.
308 if (blkio_blkg_empty(stats
))
311 stats
->start_empty_time
= sched_clock();
312 blkio_mark_blkg_empty(stats
);
314 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time
);
316 void blkiocg_update_dequeue_stats(struct blkio_group
*blkg
,
317 struct blkio_policy_type
*pol
,
318 unsigned long dequeue
)
320 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
322 lockdep_assert_held(blkg
->q
->queue_lock
);
324 pd
->stats
.dequeue
+= dequeue
;
326 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats
);
328 static inline void blkio_set_start_group_wait_time(struct blkio_group
*blkg
,
329 struct blkio_policy_type
*pol
,
330 struct blkio_group
*curr_blkg
) { }
331 static inline void blkio_end_empty_time(struct blkio_group_stats
*stats
) { }
334 void blkiocg_update_io_add_stats(struct blkio_group
*blkg
,
335 struct blkio_policy_type
*pol
,
336 struct blkio_group
*curr_blkg
, bool direction
,
339 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
341 lockdep_assert_held(blkg
->q
->queue_lock
);
343 u64_stats_update_begin(&stats
->syncp
);
344 blkio_add_stat(stats
->stat_arr
[BLKIO_STAT_QUEUED
], 1, direction
, sync
);
345 blkio_end_empty_time(stats
);
346 u64_stats_update_end(&stats
->syncp
);
348 blkio_set_start_group_wait_time(blkg
, pol
, curr_blkg
);
350 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats
);
352 void blkiocg_update_io_remove_stats(struct blkio_group
*blkg
,
353 struct blkio_policy_type
*pol
,
354 bool direction
, bool sync
)
356 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
358 lockdep_assert_held(blkg
->q
->queue_lock
);
360 u64_stats_update_begin(&stats
->syncp
);
361 blkio_check_and_dec_stat(stats
->stat_arr
[BLKIO_STAT_QUEUED
], direction
,
363 u64_stats_update_end(&stats
->syncp
);
365 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats
);
367 void blkiocg_update_timeslice_used(struct blkio_group
*blkg
,
368 struct blkio_policy_type
*pol
,
370 unsigned long unaccounted_time
)
372 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
374 lockdep_assert_held(blkg
->q
->queue_lock
);
376 u64_stats_update_begin(&stats
->syncp
);
378 #ifdef CONFIG_DEBUG_BLK_CGROUP
379 stats
->unaccounted_time
+= unaccounted_time
;
381 u64_stats_update_end(&stats
->syncp
);
383 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used
);
386 * should be called under rcu read lock or queue lock to make sure blkg pointer
389 void blkiocg_update_dispatch_stats(struct blkio_group
*blkg
,
390 struct blkio_policy_type
*pol
,
391 uint64_t bytes
, bool direction
, bool sync
)
393 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
394 struct blkio_group_stats_cpu
*stats_cpu
;
397 /* If per cpu stats are not allocated yet, don't do any accounting. */
398 if (pd
->stats_cpu
== NULL
)
402 * Disabling interrupts to provide mutual exclusion between two
403 * writes on same cpu. It probably is not needed for 64bit. Not
404 * optimizing that case yet.
406 local_irq_save(flags
);
408 stats_cpu
= this_cpu_ptr(pd
->stats_cpu
);
410 u64_stats_update_begin(&stats_cpu
->syncp
);
411 stats_cpu
->sectors
+= bytes
>> 9;
412 blkio_add_stat(stats_cpu
->stat_arr_cpu
[BLKIO_STAT_CPU_SERVICED
],
414 blkio_add_stat(stats_cpu
->stat_arr_cpu
[BLKIO_STAT_CPU_SERVICE_BYTES
],
415 bytes
, direction
, sync
);
416 u64_stats_update_end(&stats_cpu
->syncp
);
417 local_irq_restore(flags
);
419 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats
);
421 void blkiocg_update_completion_stats(struct blkio_group
*blkg
,
422 struct blkio_policy_type
*pol
,
424 uint64_t io_start_time
, bool direction
,
427 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
428 unsigned long long now
= sched_clock();
430 lockdep_assert_held(blkg
->q
->queue_lock
);
432 u64_stats_update_begin(&stats
->syncp
);
433 if (time_after64(now
, io_start_time
))
434 blkio_add_stat(stats
->stat_arr
[BLKIO_STAT_SERVICE_TIME
],
435 now
- io_start_time
, direction
, sync
);
436 if (time_after64(io_start_time
, start_time
))
437 blkio_add_stat(stats
->stat_arr
[BLKIO_STAT_WAIT_TIME
],
438 io_start_time
- start_time
, direction
, sync
);
439 u64_stats_update_end(&stats
->syncp
);
441 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats
);
443 /* Merged stats are per cpu. */
444 void blkiocg_update_io_merged_stats(struct blkio_group
*blkg
,
445 struct blkio_policy_type
*pol
,
446 bool direction
, bool sync
)
448 struct blkio_group_stats
*stats
= &blkg
->pd
[pol
->plid
]->stats
;
450 lockdep_assert_held(blkg
->q
->queue_lock
);
452 u64_stats_update_begin(&stats
->syncp
);
453 blkio_add_stat(stats
->stat_arr
[BLKIO_STAT_MERGED
], 1, direction
, sync
);
454 u64_stats_update_end(&stats
->syncp
);
456 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats
);
459 * Worker for allocating per cpu stat for blk groups. This is scheduled on
460 * the system_nrt_wq once there are some groups on the alloc_list waiting
463 static void blkio_stat_alloc_fn(struct work_struct
*work
)
465 static void *pcpu_stats
[BLKIO_NR_POLICIES
];
466 struct delayed_work
*dwork
= to_delayed_work(work
);
467 struct blkio_group
*blkg
;
472 for (i
= 0; i
< BLKIO_NR_POLICIES
; i
++) {
473 if (pcpu_stats
[i
] != NULL
)
476 pcpu_stats
[i
] = alloc_percpu(struct blkio_group_stats_cpu
);
478 /* Allocation failed. Try again after some time. */
479 if (pcpu_stats
[i
] == NULL
) {
480 queue_delayed_work(system_nrt_wq
, dwork
,
481 msecs_to_jiffies(10));
486 spin_lock_irq(&blkio_list_lock
);
487 spin_lock(&alloc_list_lock
);
489 /* cgroup got deleted or queue exited. */
490 if (!list_empty(&alloc_list
)) {
491 blkg
= list_first_entry(&alloc_list
, struct blkio_group
,
493 for (i
= 0; i
< BLKIO_NR_POLICIES
; i
++) {
494 struct blkg_policy_data
*pd
= blkg
->pd
[i
];
496 if (blkio_policy
[i
] && pd
&& !pd
->stats_cpu
)
497 swap(pd
->stats_cpu
, pcpu_stats
[i
]);
500 list_del_init(&blkg
->alloc_node
);
503 empty
= list_empty(&alloc_list
);
505 spin_unlock(&alloc_list_lock
);
506 spin_unlock_irq(&blkio_list_lock
);
513 * blkg_free - free a blkg
514 * @blkg: blkg to free
516 * Free @blkg which may be partially allocated.
518 static void blkg_free(struct blkio_group
*blkg
)
525 for (i
= 0; i
< BLKIO_NR_POLICIES
; i
++) {
526 struct blkg_policy_data
*pd
= blkg
->pd
[i
];
529 free_percpu(pd
->stats_cpu
);
538 * blkg_alloc - allocate a blkg
539 * @blkcg: block cgroup the new blkg is associated with
540 * @q: request_queue the new blkg is associated with
542 * Allocate a new blkg assocating @blkcg and @q.
544 static struct blkio_group
*blkg_alloc(struct blkio_cgroup
*blkcg
,
545 struct request_queue
*q
)
547 struct blkio_group
*blkg
;
550 /* alloc and init base part */
551 blkg
= kzalloc_node(sizeof(*blkg
), GFP_ATOMIC
, q
->node
);
556 INIT_LIST_HEAD(&blkg
->q_node
);
557 INIT_LIST_HEAD(&blkg
->alloc_node
);
560 cgroup_path(blkcg
->css
.cgroup
, blkg
->path
, sizeof(blkg
->path
));
562 for (i
= 0; i
< BLKIO_NR_POLICIES
; i
++) {
563 struct blkio_policy_type
*pol
= blkio_policy
[i
];
564 struct blkg_policy_data
*pd
;
569 /* alloc per-policy data and attach it to blkg */
570 pd
= kzalloc_node(sizeof(*pd
) + pol
->pdata_size
, GFP_ATOMIC
,
581 /* invoke per-policy init */
582 for (i
= 0; i
< BLKIO_NR_POLICIES
; i
++) {
583 struct blkio_policy_type
*pol
= blkio_policy
[i
];
586 pol
->ops
.blkio_init_group_fn(blkg
);
592 struct blkio_group
*blkg_lookup_create(struct blkio_cgroup
*blkcg
,
593 struct request_queue
*q
,
594 enum blkio_policy_id plid
,
596 __releases(q
->queue_lock
) __acquires(q
->queue_lock
)
598 struct blkio_group
*blkg
;
600 WARN_ON_ONCE(!rcu_read_lock_held());
601 lockdep_assert_held(q
->queue_lock
);
604 * This could be the first entry point of blkcg implementation and
605 * we shouldn't allow anything to go through for a bypassing queue.
606 * The following can be removed if blkg lookup is guaranteed to
607 * fail on a bypassing queue.
609 if (unlikely(blk_queue_bypass(q
)) && !for_root
)
610 return ERR_PTR(blk_queue_dead(q
) ? -EINVAL
: -EBUSY
);
612 blkg
= blkg_lookup(blkcg
, q
);
616 /* blkg holds a reference to blkcg */
617 if (!css_tryget(&blkcg
->css
))
618 return ERR_PTR(-EINVAL
);
621 * Allocate and initialize.
623 blkg
= blkg_alloc(blkcg
, q
);
625 /* did alloc fail? */
626 if (unlikely(!blkg
)) {
627 blkg
= ERR_PTR(-ENOMEM
);
632 spin_lock(&blkcg
->lock
);
633 hlist_add_head_rcu(&blkg
->blkcg_node
, &blkcg
->blkg_list
);
634 list_add(&blkg
->q_node
, &q
->blkg_list
);
635 spin_unlock(&blkcg
->lock
);
637 spin_lock(&alloc_list_lock
);
638 list_add(&blkg
->alloc_node
, &alloc_list
);
639 /* Queue per cpu stat allocation from worker thread. */
640 queue_delayed_work(system_nrt_wq
, &blkio_stat_alloc_work
, 0);
641 spin_unlock(&alloc_list_lock
);
645 EXPORT_SYMBOL_GPL(blkg_lookup_create
);
647 /* called under rcu_read_lock(). */
648 struct blkio_group
*blkg_lookup(struct blkio_cgroup
*blkcg
,
649 struct request_queue
*q
)
651 struct blkio_group
*blkg
;
652 struct hlist_node
*n
;
654 hlist_for_each_entry_rcu(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
)
659 EXPORT_SYMBOL_GPL(blkg_lookup
);
661 static void blkg_destroy(struct blkio_group
*blkg
)
663 struct request_queue
*q
= blkg
->q
;
664 struct blkio_cgroup
*blkcg
= blkg
->blkcg
;
666 lockdep_assert_held(q
->queue_lock
);
667 lockdep_assert_held(&blkcg
->lock
);
669 /* Something wrong if we are trying to remove same group twice */
670 WARN_ON_ONCE(list_empty(&blkg
->q_node
));
671 WARN_ON_ONCE(hlist_unhashed(&blkg
->blkcg_node
));
672 list_del_init(&blkg
->q_node
);
673 hlist_del_init_rcu(&blkg
->blkcg_node
);
675 spin_lock(&alloc_list_lock
);
676 list_del_init(&blkg
->alloc_node
);
677 spin_unlock(&alloc_list_lock
);
680 * Put the reference taken at the time of creation so that when all
681 * queues are gone, group can be destroyed.
687 * XXX: This updates blkg policy data in-place for root blkg, which is
688 * necessary across elevator switch and policy registration as root blkgs
689 * aren't shot down. This broken and racy implementation is temporary.
690 * Eventually, blkg shoot down will be replaced by proper in-place update.
692 void update_root_blkg_pd(struct request_queue
*q
, enum blkio_policy_id plid
)
694 struct blkio_policy_type
*pol
= blkio_policy
[plid
];
695 struct blkio_group
*blkg
= blkg_lookup(&blkio_root_cgroup
, q
);
696 struct blkg_policy_data
*pd
;
701 kfree(blkg
->pd
[plid
]);
702 blkg
->pd
[plid
] = NULL
;
707 pd
= kzalloc(sizeof(*pd
) + pol
->pdata_size
, GFP_KERNEL
);
710 pd
->stats_cpu
= alloc_percpu(struct blkio_group_stats_cpu
);
711 WARN_ON_ONCE(!pd
->stats_cpu
);
715 pol
->ops
.blkio_init_group_fn(blkg
);
717 EXPORT_SYMBOL_GPL(update_root_blkg_pd
);
720 * blkg_destroy_all - destroy all blkgs associated with a request_queue
721 * @q: request_queue of interest
722 * @destroy_root: whether to destroy root blkg or not
724 * Destroy blkgs associated with @q. If @destroy_root is %true, all are
725 * destroyed; otherwise, root blkg is left alone.
727 void blkg_destroy_all(struct request_queue
*q
, bool destroy_root
)
729 struct blkio_group
*blkg
, *n
;
731 spin_lock_irq(q
->queue_lock
);
733 list_for_each_entry_safe(blkg
, n
, &q
->blkg_list
, q_node
) {
734 struct blkio_cgroup
*blkcg
= blkg
->blkcg
;
737 if (!destroy_root
&& blkg
->blkcg
== &blkio_root_cgroup
)
740 spin_lock(&blkcg
->lock
);
742 spin_unlock(&blkcg
->lock
);
745 spin_unlock_irq(q
->queue_lock
);
747 EXPORT_SYMBOL_GPL(blkg_destroy_all
);
749 static void blkg_rcu_free(struct rcu_head
*rcu_head
)
751 blkg_free(container_of(rcu_head
, struct blkio_group
, rcu_head
));
754 void __blkg_release(struct blkio_group
*blkg
)
756 /* release the extra blkcg reference this blkg has been holding */
757 css_put(&blkg
->blkcg
->css
);
760 * A group is freed in rcu manner. But having an rcu lock does not
761 * mean that one can access all the fields of blkg and assume these
762 * are valid. For example, don't try to follow throtl_data and
763 * request queue links.
765 * Having a reference to blkg under an rcu allows acess to only
766 * values local to groups like group stats and group rate limits
768 call_rcu(&blkg
->rcu_head
, blkg_rcu_free
);
770 EXPORT_SYMBOL_GPL(__blkg_release
);
772 static void blkio_reset_stats_cpu(struct blkio_group
*blkg
, int plid
)
774 struct blkg_policy_data
*pd
= blkg
->pd
[plid
];
777 if (pd
->stats_cpu
== NULL
)
780 for_each_possible_cpu(cpu
) {
781 struct blkio_group_stats_cpu
*sc
=
782 per_cpu_ptr(pd
->stats_cpu
, cpu
);
785 memset(sc
->stat_arr_cpu
, 0, sizeof(sc
->stat_arr_cpu
));
790 blkiocg_reset_stats(struct cgroup
*cgroup
, struct cftype
*cftype
, u64 val
)
792 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
793 struct blkio_group
*blkg
;
794 struct hlist_node
*n
;
797 spin_lock(&blkio_list_lock
);
798 spin_lock_irq(&blkcg
->lock
);
801 * Note that stat reset is racy - it doesn't synchronize against
802 * stat updates. This is a debug feature which shouldn't exist
803 * anyway. If you get hit by a race, retry.
805 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
806 struct blkio_policy_type
*pol
;
808 list_for_each_entry(pol
, &blkio_list
, list
) {
809 struct blkg_policy_data
*pd
= blkg
->pd
[pol
->plid
];
810 struct blkio_group_stats
*stats
= &pd
->stats
;
812 /* queued stats shouldn't be cleared */
813 for (i
= 0; i
< ARRAY_SIZE(stats
->stat_arr
); i
++)
814 if (i
!= BLKIO_STAT_QUEUED
)
815 memset(stats
->stat_arr
[i
], 0,
816 sizeof(stats
->stat_arr
[i
]));
818 #ifdef CONFIG_DEBUG_BLK_CGROUP
819 memset((void *)stats
+ BLKG_STATS_DEBUG_CLEAR_START
, 0,
820 BLKG_STATS_DEBUG_CLEAR_SIZE
);
822 blkio_reset_stats_cpu(blkg
, pol
->plid
);
826 spin_unlock_irq(&blkcg
->lock
);
827 spin_unlock(&blkio_list_lock
);
831 static void blkio_get_key_name(enum stat_sub_type type
, const char *dname
,
832 char *str
, int chars_left
, bool diskname_only
)
834 snprintf(str
, chars_left
, "%s", dname
);
835 chars_left
-= strlen(str
);
836 if (chars_left
<= 0) {
838 "Possibly incorrect cgroup stat display format");
844 case BLKIO_STAT_READ
:
845 strlcat(str
, " Read", chars_left
);
847 case BLKIO_STAT_WRITE
:
848 strlcat(str
, " Write", chars_left
);
850 case BLKIO_STAT_SYNC
:
851 strlcat(str
, " Sync", chars_left
);
853 case BLKIO_STAT_ASYNC
:
854 strlcat(str
, " Async", chars_left
);
856 case BLKIO_STAT_TOTAL
:
857 strlcat(str
, " Total", chars_left
);
860 strlcat(str
, " Invalid", chars_left
);
864 static uint64_t blkio_read_stat_cpu(struct blkio_group
*blkg
, int plid
,
865 enum stat_type_cpu type
, enum stat_sub_type sub_type
)
867 struct blkg_policy_data
*pd
= blkg
->pd
[plid
];
869 struct blkio_group_stats_cpu
*stats_cpu
;
872 if (pd
->stats_cpu
== NULL
)
875 for_each_possible_cpu(cpu
) {
877 stats_cpu
= per_cpu_ptr(pd
->stats_cpu
, cpu
);
880 start
= u64_stats_fetch_begin(&stats_cpu
->syncp
);
881 if (type
== BLKIO_STAT_CPU_SECTORS
)
882 tval
= stats_cpu
->sectors
;
884 tval
= stats_cpu
->stat_arr_cpu
[type
][sub_type
];
885 } while(u64_stats_fetch_retry(&stats_cpu
->syncp
, start
));
893 static uint64_t blkio_get_stat_cpu(struct blkio_group
*blkg
, int plid
,
894 struct cgroup_map_cb
*cb
, const char *dname
,
895 enum stat_type_cpu type
)
897 uint64_t disk_total
, val
;
898 char key_str
[MAX_KEY_LEN
];
899 enum stat_sub_type sub_type
;
901 if (type
== BLKIO_STAT_CPU_SECTORS
) {
902 val
= blkio_read_stat_cpu(blkg
, plid
, type
, 0);
903 blkio_get_key_name(0, dname
, key_str
, MAX_KEY_LEN
, true);
904 cb
->fill(cb
, key_str
, val
);
908 for (sub_type
= BLKIO_STAT_READ
; sub_type
< BLKIO_STAT_TOTAL
;
910 blkio_get_key_name(sub_type
, dname
, key_str
, MAX_KEY_LEN
,
912 val
= blkio_read_stat_cpu(blkg
, plid
, type
, sub_type
);
913 cb
->fill(cb
, key_str
, val
);
916 disk_total
= blkio_read_stat_cpu(blkg
, plid
, type
, BLKIO_STAT_READ
) +
917 blkio_read_stat_cpu(blkg
, plid
, type
, BLKIO_STAT_WRITE
);
919 blkio_get_key_name(BLKIO_STAT_TOTAL
, dname
, key_str
, MAX_KEY_LEN
,
921 cb
->fill(cb
, key_str
, disk_total
);
925 static uint64_t blkio_get_stat(struct blkio_group
*blkg
, int plid
,
926 struct cgroup_map_cb
*cb
, const char *dname
,
929 struct blkio_group_stats
*stats
= &blkg
->pd
[plid
]->stats
;
930 uint64_t v
= 0, disk_total
= 0;
931 char key_str
[MAX_KEY_LEN
];
932 unsigned int sync_start
;
935 if (type
>= BLKIO_STAT_ARR_NR
) {
937 sync_start
= u64_stats_fetch_begin(&stats
->syncp
);
939 case BLKIO_STAT_TIME
:
942 #ifdef CONFIG_DEBUG_BLK_CGROUP
943 case BLKIO_STAT_UNACCOUNTED_TIME
:
944 v
= stats
->unaccounted_time
;
946 case BLKIO_STAT_AVG_QUEUE_SIZE
: {
947 uint64_t samples
= stats
->avg_queue_size_samples
;
950 v
= stats
->avg_queue_size_sum
;
955 case BLKIO_STAT_IDLE_TIME
:
956 v
= stats
->idle_time
;
958 case BLKIO_STAT_EMPTY_TIME
:
959 v
= stats
->empty_time
;
961 case BLKIO_STAT_DEQUEUE
:
964 case BLKIO_STAT_GROUP_WAIT_TIME
:
965 v
= stats
->group_wait_time
;
971 } while (u64_stats_fetch_retry(&stats
->syncp
, sync_start
));
973 blkio_get_key_name(0, dname
, key_str
, MAX_KEY_LEN
, true);
974 cb
->fill(cb
, key_str
, v
);
978 for (st
= BLKIO_STAT_READ
; st
< BLKIO_STAT_TOTAL
; st
++) {
980 sync_start
= u64_stats_fetch_begin(&stats
->syncp
);
981 v
= stats
->stat_arr
[type
][st
];
982 } while (u64_stats_fetch_retry(&stats
->syncp
, sync_start
));
984 blkio_get_key_name(st
, dname
, key_str
, MAX_KEY_LEN
, false);
985 cb
->fill(cb
, key_str
, v
);
986 if (st
== BLKIO_STAT_READ
|| st
== BLKIO_STAT_WRITE
)
990 blkio_get_key_name(BLKIO_STAT_TOTAL
, dname
, key_str
, MAX_KEY_LEN
,
992 cb
->fill(cb
, key_str
, disk_total
);
996 static int blkio_policy_parse_and_set(char *buf
, enum blkio_policy_id plid
,
997 int fileid
, struct blkio_cgroup
*blkcg
)
999 struct gendisk
*disk
= NULL
;
1000 struct blkio_group
*blkg
= NULL
;
1001 struct blkg_policy_data
*pd
;
1002 char *s
[4], *p
, *major_s
= NULL
, *minor_s
= NULL
;
1003 unsigned long major
, minor
;
1004 int i
= 0, ret
= -EINVAL
;
1009 memset(s
, 0, sizeof(s
));
1011 while ((p
= strsep(&buf
, " ")) != NULL
) {
1017 /* Prevent from inputing too many things */
1025 p
= strsep(&s
[0], ":");
1035 if (strict_strtoul(major_s
, 10, &major
))
1038 if (strict_strtoul(minor_s
, 10, &minor
))
1041 dev
= MKDEV(major
, minor
);
1043 if (strict_strtoull(s
[1], 10, &temp
))
1046 disk
= get_gendisk(dev
, &part
);
1052 spin_lock_irq(disk
->queue
->queue_lock
);
1053 blkg
= blkg_lookup_create(blkcg
, disk
->queue
, plid
, false);
1054 spin_unlock_irq(disk
->queue
->queue_lock
);
1057 ret
= PTR_ERR(blkg
);
1061 pd
= blkg
->pd
[plid
];
1064 case BLKIO_POLICY_PROP
:
1065 if ((temp
< BLKIO_WEIGHT_MIN
&& temp
> 0) ||
1066 temp
> BLKIO_WEIGHT_MAX
)
1069 pd
->conf
.weight
= temp
;
1070 blkio_update_group_weight(blkg
, plid
, temp
?: blkcg
->weight
);
1072 case BLKIO_POLICY_THROTL
:
1074 case BLKIO_THROTL_read_bps_device
:
1075 pd
->conf
.bps
[READ
] = temp
;
1076 blkio_update_group_bps(blkg
, plid
, temp
?: -1, fileid
);
1078 case BLKIO_THROTL_write_bps_device
:
1079 pd
->conf
.bps
[WRITE
] = temp
;
1080 blkio_update_group_bps(blkg
, plid
, temp
?: -1, fileid
);
1082 case BLKIO_THROTL_read_iops_device
:
1083 if (temp
> THROTL_IOPS_MAX
)
1085 pd
->conf
.iops
[READ
] = temp
;
1086 blkio_update_group_iops(blkg
, plid
, temp
?: -1, fileid
);
1088 case BLKIO_THROTL_write_iops_device
:
1089 if (temp
> THROTL_IOPS_MAX
)
1091 pd
->conf
.iops
[WRITE
] = temp
;
1092 blkio_update_group_iops(blkg
, plid
, temp
?: -1, fileid
);
1106 * If queue was bypassing, we should retry. Do so after a short
1107 * msleep(). It isn't strictly necessary but queue can be
1108 * bypassing for some time and it's always nice to avoid busy
1111 if (ret
== -EBUSY
) {
1113 return restart_syscall();
1118 static int blkiocg_file_write(struct cgroup
*cgrp
, struct cftype
*cft
,
1123 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1124 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1125 int fileid
= BLKIOFILE_ATTR(cft
->private);
1127 buf
= kstrdup(buffer
, GFP_KERNEL
);
1131 ret
= blkio_policy_parse_and_set(buf
, plid
, fileid
, blkcg
);
1136 static const char *blkg_dev_name(struct blkio_group
*blkg
)
1138 /* some drivers (floppy) instantiate a queue w/o disk registered */
1139 if (blkg
->q
->backing_dev_info
.dev
)
1140 return dev_name(blkg
->q
->backing_dev_info
.dev
);
1144 static void blkio_print_group_conf(struct cftype
*cft
, struct blkio_group
*blkg
,
1147 int plid
= BLKIOFILE_POLICY(cft
->private);
1148 int fileid
= BLKIOFILE_ATTR(cft
->private);
1149 struct blkg_policy_data
*pd
= blkg
->pd
[plid
];
1150 const char *dname
= blkg_dev_name(blkg
);
1157 case BLKIO_POLICY_PROP
:
1158 if (pd
->conf
.weight
)
1159 seq_printf(m
, "%s\t%u\n",
1160 dname
, pd
->conf
.weight
);
1162 case BLKIO_POLICY_THROTL
:
1164 case BLKIO_THROTL_read_bps_device
:
1166 case BLKIO_THROTL_write_bps_device
:
1167 if (pd
->conf
.bps
[rw
])
1168 seq_printf(m
, "%s\t%llu\n",
1169 dname
, pd
->conf
.bps
[rw
]);
1171 case BLKIO_THROTL_read_iops_device
:
1173 case BLKIO_THROTL_write_iops_device
:
1174 if (pd
->conf
.iops
[rw
])
1175 seq_printf(m
, "%s\t%u\n",
1176 dname
, pd
->conf
.iops
[rw
]);
1185 /* cgroup files which read their data from policy nodes end up here */
1186 static void blkio_read_conf(struct cftype
*cft
, struct blkio_cgroup
*blkcg
,
1189 struct blkio_group
*blkg
;
1190 struct hlist_node
*n
;
1192 spin_lock_irq(&blkcg
->lock
);
1193 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
)
1194 blkio_print_group_conf(cft
, blkg
, m
);
1195 spin_unlock_irq(&blkcg
->lock
);
1198 static int blkiocg_file_read(struct cgroup
*cgrp
, struct cftype
*cft
,
1201 struct blkio_cgroup
*blkcg
;
1202 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1203 int name
= BLKIOFILE_ATTR(cft
->private);
1205 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1208 case BLKIO_POLICY_PROP
:
1210 case BLKIO_PROP_weight_device
:
1211 blkio_read_conf(cft
, blkcg
, m
);
1217 case BLKIO_POLICY_THROTL
:
1219 case BLKIO_THROTL_read_bps_device
:
1220 case BLKIO_THROTL_write_bps_device
:
1221 case BLKIO_THROTL_read_iops_device
:
1222 case BLKIO_THROTL_write_iops_device
:
1223 blkio_read_conf(cft
, blkcg
, m
);
1236 static int blkio_read_blkg_stats(struct blkio_cgroup
*blkcg
,
1237 struct cftype
*cft
, struct cgroup_map_cb
*cb
,
1238 enum stat_type type
, bool show_total
, bool pcpu
)
1240 struct blkio_group
*blkg
;
1241 struct hlist_node
*n
;
1242 uint64_t cgroup_total
= 0;
1244 spin_lock_irq(&blkcg
->lock
);
1246 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
1247 const char *dname
= blkg_dev_name(blkg
);
1248 int plid
= BLKIOFILE_POLICY(cft
->private);
1253 cgroup_total
+= blkio_get_stat_cpu(blkg
, plid
,
1256 cgroup_total
+= blkio_get_stat(blkg
, plid
,
1260 cb
->fill(cb
, "Total", cgroup_total
);
1262 spin_unlock_irq(&blkcg
->lock
);
1266 /* All map kind of cgroup file get serviced by this function */
1267 static int blkiocg_file_read_map(struct cgroup
*cgrp
, struct cftype
*cft
,
1268 struct cgroup_map_cb
*cb
)
1270 struct blkio_cgroup
*blkcg
;
1271 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1272 int name
= BLKIOFILE_ATTR(cft
->private);
1274 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1277 case BLKIO_POLICY_PROP
:
1279 case BLKIO_PROP_time
:
1280 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1281 BLKIO_STAT_TIME
, 0, 0);
1282 case BLKIO_PROP_sectors
:
1283 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1284 BLKIO_STAT_CPU_SECTORS
, 0, 1);
1285 case BLKIO_PROP_io_service_bytes
:
1286 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1287 BLKIO_STAT_CPU_SERVICE_BYTES
, 1, 1);
1288 case BLKIO_PROP_io_serviced
:
1289 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1290 BLKIO_STAT_CPU_SERVICED
, 1, 1);
1291 case BLKIO_PROP_io_service_time
:
1292 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1293 BLKIO_STAT_SERVICE_TIME
, 1, 0);
1294 case BLKIO_PROP_io_wait_time
:
1295 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1296 BLKIO_STAT_WAIT_TIME
, 1, 0);
1297 case BLKIO_PROP_io_merged
:
1298 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1299 BLKIO_STAT_MERGED
, 1, 0);
1300 case BLKIO_PROP_io_queued
:
1301 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1302 BLKIO_STAT_QUEUED
, 1, 0);
1303 #ifdef CONFIG_DEBUG_BLK_CGROUP
1304 case BLKIO_PROP_unaccounted_time
:
1305 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1306 BLKIO_STAT_UNACCOUNTED_TIME
, 0, 0);
1307 case BLKIO_PROP_dequeue
:
1308 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1309 BLKIO_STAT_DEQUEUE
, 0, 0);
1310 case BLKIO_PROP_avg_queue_size
:
1311 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1312 BLKIO_STAT_AVG_QUEUE_SIZE
, 0, 0);
1313 case BLKIO_PROP_group_wait_time
:
1314 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1315 BLKIO_STAT_GROUP_WAIT_TIME
, 0, 0);
1316 case BLKIO_PROP_idle_time
:
1317 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1318 BLKIO_STAT_IDLE_TIME
, 0, 0);
1319 case BLKIO_PROP_empty_time
:
1320 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1321 BLKIO_STAT_EMPTY_TIME
, 0, 0);
1327 case BLKIO_POLICY_THROTL
:
1329 case BLKIO_THROTL_io_service_bytes
:
1330 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1331 BLKIO_STAT_CPU_SERVICE_BYTES
, 1, 1);
1332 case BLKIO_THROTL_io_serviced
:
1333 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1334 BLKIO_STAT_CPU_SERVICED
, 1, 1);
1346 static int blkio_weight_write(struct blkio_cgroup
*blkcg
, int plid
, u64 val
)
1348 struct blkio_group
*blkg
;
1349 struct hlist_node
*n
;
1351 if (val
< BLKIO_WEIGHT_MIN
|| val
> BLKIO_WEIGHT_MAX
)
1354 spin_lock(&blkio_list_lock
);
1355 spin_lock_irq(&blkcg
->lock
);
1356 blkcg
->weight
= (unsigned int)val
;
1358 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
1359 struct blkg_policy_data
*pd
= blkg
->pd
[plid
];
1361 if (!pd
->conf
.weight
)
1362 blkio_update_group_weight(blkg
, plid
, blkcg
->weight
);
1365 spin_unlock_irq(&blkcg
->lock
);
1366 spin_unlock(&blkio_list_lock
);
1370 static u64
blkiocg_file_read_u64 (struct cgroup
*cgrp
, struct cftype
*cft
) {
1371 struct blkio_cgroup
*blkcg
;
1372 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1373 int name
= BLKIOFILE_ATTR(cft
->private);
1375 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1378 case BLKIO_POLICY_PROP
:
1380 case BLKIO_PROP_weight
:
1381 return (u64
)blkcg
->weight
;
1391 blkiocg_file_write_u64(struct cgroup
*cgrp
, struct cftype
*cft
, u64 val
)
1393 struct blkio_cgroup
*blkcg
;
1394 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1395 int name
= BLKIOFILE_ATTR(cft
->private);
1397 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1400 case BLKIO_POLICY_PROP
:
1402 case BLKIO_PROP_weight
:
1403 return blkio_weight_write(blkcg
, plid
, val
);
1413 struct cftype blkio_files
[] = {
1415 .name
= "weight_device",
1416 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1417 BLKIO_PROP_weight_device
),
1418 .read_seq_string
= blkiocg_file_read
,
1419 .write_string
= blkiocg_file_write
,
1420 .max_write_len
= 256,
1424 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1426 .read_u64
= blkiocg_file_read_u64
,
1427 .write_u64
= blkiocg_file_write_u64
,
1431 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1433 .read_map
= blkiocg_file_read_map
,
1437 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1438 BLKIO_PROP_sectors
),
1439 .read_map
= blkiocg_file_read_map
,
1442 .name
= "io_service_bytes",
1443 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1444 BLKIO_PROP_io_service_bytes
),
1445 .read_map
= blkiocg_file_read_map
,
1448 .name
= "io_serviced",
1449 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1450 BLKIO_PROP_io_serviced
),
1451 .read_map
= blkiocg_file_read_map
,
1454 .name
= "io_service_time",
1455 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1456 BLKIO_PROP_io_service_time
),
1457 .read_map
= blkiocg_file_read_map
,
1460 .name
= "io_wait_time",
1461 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1462 BLKIO_PROP_io_wait_time
),
1463 .read_map
= blkiocg_file_read_map
,
1466 .name
= "io_merged",
1467 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1468 BLKIO_PROP_io_merged
),
1469 .read_map
= blkiocg_file_read_map
,
1472 .name
= "io_queued",
1473 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1474 BLKIO_PROP_io_queued
),
1475 .read_map
= blkiocg_file_read_map
,
1478 .name
= "reset_stats",
1479 .write_u64
= blkiocg_reset_stats
,
1481 #ifdef CONFIG_BLK_DEV_THROTTLING
1483 .name
= "throttle.read_bps_device",
1484 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1485 BLKIO_THROTL_read_bps_device
),
1486 .read_seq_string
= blkiocg_file_read
,
1487 .write_string
= blkiocg_file_write
,
1488 .max_write_len
= 256,
1492 .name
= "throttle.write_bps_device",
1493 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1494 BLKIO_THROTL_write_bps_device
),
1495 .read_seq_string
= blkiocg_file_read
,
1496 .write_string
= blkiocg_file_write
,
1497 .max_write_len
= 256,
1501 .name
= "throttle.read_iops_device",
1502 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1503 BLKIO_THROTL_read_iops_device
),
1504 .read_seq_string
= blkiocg_file_read
,
1505 .write_string
= blkiocg_file_write
,
1506 .max_write_len
= 256,
1510 .name
= "throttle.write_iops_device",
1511 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1512 BLKIO_THROTL_write_iops_device
),
1513 .read_seq_string
= blkiocg_file_read
,
1514 .write_string
= blkiocg_file_write
,
1515 .max_write_len
= 256,
1518 .name
= "throttle.io_service_bytes",
1519 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1520 BLKIO_THROTL_io_service_bytes
),
1521 .read_map
= blkiocg_file_read_map
,
1524 .name
= "throttle.io_serviced",
1525 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1526 BLKIO_THROTL_io_serviced
),
1527 .read_map
= blkiocg_file_read_map
,
1529 #endif /* CONFIG_BLK_DEV_THROTTLING */
1531 #ifdef CONFIG_DEBUG_BLK_CGROUP
1533 .name
= "avg_queue_size",
1534 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1535 BLKIO_PROP_avg_queue_size
),
1536 .read_map
= blkiocg_file_read_map
,
1539 .name
= "group_wait_time",
1540 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1541 BLKIO_PROP_group_wait_time
),
1542 .read_map
= blkiocg_file_read_map
,
1545 .name
= "idle_time",
1546 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1547 BLKIO_PROP_idle_time
),
1548 .read_map
= blkiocg_file_read_map
,
1551 .name
= "empty_time",
1552 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1553 BLKIO_PROP_empty_time
),
1554 .read_map
= blkiocg_file_read_map
,
1558 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1559 BLKIO_PROP_dequeue
),
1560 .read_map
= blkiocg_file_read_map
,
1563 .name
= "unaccounted_time",
1564 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1565 BLKIO_PROP_unaccounted_time
),
1566 .read_map
= blkiocg_file_read_map
,
1571 static int blkiocg_populate(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
1573 return cgroup_add_files(cgroup
, subsys
, blkio_files
,
1574 ARRAY_SIZE(blkio_files
));
1578 * blkiocg_pre_destroy - cgroup pre_destroy callback
1579 * @subsys: cgroup subsys
1580 * @cgroup: cgroup of interest
1582 * This function is called when @cgroup is about to go away and responsible
1583 * for shooting down all blkgs associated with @cgroup. blkgs should be
1584 * removed while holding both q and blkcg locks. As blkcg lock is nested
1585 * inside q lock, this function performs reverse double lock dancing.
1587 * This is the blkcg counterpart of ioc_release_fn().
1589 static int blkiocg_pre_destroy(struct cgroup_subsys
*subsys
,
1590 struct cgroup
*cgroup
)
1592 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
1594 spin_lock_irq(&blkcg
->lock
);
1596 while (!hlist_empty(&blkcg
->blkg_list
)) {
1597 struct blkio_group
*blkg
= hlist_entry(blkcg
->blkg_list
.first
,
1598 struct blkio_group
, blkcg_node
);
1599 struct request_queue
*q
= blkg
->q
;
1601 if (spin_trylock(q
->queue_lock
)) {
1603 spin_unlock(q
->queue_lock
);
1605 spin_unlock_irq(&blkcg
->lock
);
1607 spin_lock(&blkcg
->lock
);
1611 spin_unlock_irq(&blkcg
->lock
);
1615 static void blkiocg_destroy(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
1617 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
1619 if (blkcg
!= &blkio_root_cgroup
)
1623 static struct cgroup_subsys_state
*
1624 blkiocg_create(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
1626 static atomic64_t id_seq
= ATOMIC64_INIT(0);
1627 struct blkio_cgroup
*blkcg
;
1628 struct cgroup
*parent
= cgroup
->parent
;
1631 blkcg
= &blkio_root_cgroup
;
1635 blkcg
= kzalloc(sizeof(*blkcg
), GFP_KERNEL
);
1637 return ERR_PTR(-ENOMEM
);
1639 blkcg
->weight
= BLKIO_WEIGHT_DEFAULT
;
1640 blkcg
->id
= atomic64_inc_return(&id_seq
); /* root is 0, start from 1 */
1642 spin_lock_init(&blkcg
->lock
);
1643 INIT_HLIST_HEAD(&blkcg
->blkg_list
);
1649 * blkcg_init_queue - initialize blkcg part of request queue
1650 * @q: request_queue to initialize
1652 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1653 * part of new request_queue @q.
1656 * 0 on success, -errno on failure.
1658 int blkcg_init_queue(struct request_queue
*q
)
1664 ret
= blk_throtl_init(q
);
1668 mutex_lock(&all_q_mutex
);
1669 INIT_LIST_HEAD(&q
->all_q_node
);
1670 list_add_tail(&q
->all_q_node
, &all_q_list
);
1671 mutex_unlock(&all_q_mutex
);
1677 * blkcg_drain_queue - drain blkcg part of request_queue
1678 * @q: request_queue to drain
1680 * Called from blk_drain_queue(). Responsible for draining blkcg part.
1682 void blkcg_drain_queue(struct request_queue
*q
)
1684 lockdep_assert_held(q
->queue_lock
);
1686 blk_throtl_drain(q
);
1690 * blkcg_exit_queue - exit and release blkcg part of request_queue
1691 * @q: request_queue being released
1693 * Called from blk_release_queue(). Responsible for exiting blkcg part.
1695 void blkcg_exit_queue(struct request_queue
*q
)
1697 mutex_lock(&all_q_mutex
);
1698 list_del_init(&q
->all_q_node
);
1699 mutex_unlock(&all_q_mutex
);
1701 blkg_destroy_all(q
, true);
1707 * We cannot support shared io contexts, as we have no mean to support
1708 * two tasks with the same ioc in two different groups without major rework
1709 * of the main cic data structures. For now we allow a task to change
1710 * its cgroup only if it's the only owner of its ioc.
1712 static int blkiocg_can_attach(struct cgroup_subsys
*ss
, struct cgroup
*cgrp
,
1713 struct cgroup_taskset
*tset
)
1715 struct task_struct
*task
;
1716 struct io_context
*ioc
;
1719 /* task_lock() is needed to avoid races with exit_io_context() */
1720 cgroup_taskset_for_each(task
, cgrp
, tset
) {
1722 ioc
= task
->io_context
;
1723 if (ioc
&& atomic_read(&ioc
->nr_tasks
) > 1)
1732 static void blkiocg_attach(struct cgroup_subsys
*ss
, struct cgroup
*cgrp
,
1733 struct cgroup_taskset
*tset
)
1735 struct task_struct
*task
;
1736 struct io_context
*ioc
;
1738 cgroup_taskset_for_each(task
, cgrp
, tset
) {
1739 /* we don't lose anything even if ioc allocation fails */
1740 ioc
= get_task_io_context(task
, GFP_ATOMIC
, NUMA_NO_NODE
);
1742 ioc_cgroup_changed(ioc
);
1743 put_io_context(ioc
);
1748 static void blkcg_bypass_start(void)
1749 __acquires(&all_q_mutex
)
1751 struct request_queue
*q
;
1753 mutex_lock(&all_q_mutex
);
1755 list_for_each_entry(q
, &all_q_list
, all_q_node
) {
1756 blk_queue_bypass_start(q
);
1757 blkg_destroy_all(q
, false);
1761 static void blkcg_bypass_end(void)
1762 __releases(&all_q_mutex
)
1764 struct request_queue
*q
;
1766 list_for_each_entry(q
, &all_q_list
, all_q_node
)
1767 blk_queue_bypass_end(q
);
1769 mutex_unlock(&all_q_mutex
);
1772 void blkio_policy_register(struct blkio_policy_type
*blkiop
)
1774 struct request_queue
*q
;
1776 blkcg_bypass_start();
1777 spin_lock(&blkio_list_lock
);
1779 BUG_ON(blkio_policy
[blkiop
->plid
]);
1780 blkio_policy
[blkiop
->plid
] = blkiop
;
1781 list_add_tail(&blkiop
->list
, &blkio_list
);
1783 spin_unlock(&blkio_list_lock
);
1784 list_for_each_entry(q
, &all_q_list
, all_q_node
)
1785 update_root_blkg_pd(q
, blkiop
->plid
);
1788 EXPORT_SYMBOL_GPL(blkio_policy_register
);
1790 void blkio_policy_unregister(struct blkio_policy_type
*blkiop
)
1792 struct request_queue
*q
;
1794 blkcg_bypass_start();
1795 spin_lock(&blkio_list_lock
);
1797 BUG_ON(blkio_policy
[blkiop
->plid
] != blkiop
);
1798 blkio_policy
[blkiop
->plid
] = NULL
;
1799 list_del_init(&blkiop
->list
);
1801 spin_unlock(&blkio_list_lock
);
1802 list_for_each_entry(q
, &all_q_list
, all_q_node
)
1803 update_root_blkg_pd(q
, blkiop
->plid
);
1806 EXPORT_SYMBOL_GPL(blkio_policy_unregister
);