]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - block/blk-cgroup.c
Merge branch 'for-3.5' of ../cgroup into block/for-3.5/core-merged
[mirror_ubuntu-artful-kernel.git] / block / blk-cgroup.c
1 /*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include <linux/genhd.h>
21 #include <linux/delay.h>
22 #include <linux/atomic.h>
23 #include "blk-cgroup.h"
24 #include "blk.h"
25
26 #define MAX_KEY_LEN 100
27
28 static DEFINE_SPINLOCK(blkio_list_lock);
29 static LIST_HEAD(blkio_list);
30
31 static DEFINE_MUTEX(all_q_mutex);
32 static LIST_HEAD(all_q_list);
33
34 /* List of groups pending per cpu stats allocation */
35 static DEFINE_SPINLOCK(alloc_list_lock);
36 static LIST_HEAD(alloc_list);
37
38 static void blkio_stat_alloc_fn(struct work_struct *);
39 static DECLARE_DELAYED_WORK(blkio_stat_alloc_work, blkio_stat_alloc_fn);
40
41 struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
42 EXPORT_SYMBOL_GPL(blkio_root_cgroup);
43
44 static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
45
46 /* for encoding cft->private value on file */
47 #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
48 /* What policy owns the file, proportional or throttle */
49 #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
50 #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
51
52 struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
53 {
54 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
55 struct blkio_cgroup, css);
56 }
57 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
58
59 static struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
60 {
61 return container_of(task_subsys_state(tsk, blkio_subsys_id),
62 struct blkio_cgroup, css);
63 }
64
65 struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio)
66 {
67 if (bio && bio->bi_css)
68 return container_of(bio->bi_css, struct blkio_cgroup, css);
69 return task_blkio_cgroup(current);
70 }
71 EXPORT_SYMBOL_GPL(bio_blkio_cgroup);
72
73 static inline void blkio_update_group_weight(struct blkio_group *blkg,
74 int plid, unsigned int weight)
75 {
76 struct blkio_policy_type *blkiop;
77
78 list_for_each_entry(blkiop, &blkio_list, list) {
79 /* If this policy does not own the blkg, do not send updates */
80 if (blkiop->plid != plid)
81 continue;
82 if (blkiop->ops.blkio_update_group_weight_fn)
83 blkiop->ops.blkio_update_group_weight_fn(blkg->q,
84 blkg, weight);
85 }
86 }
87
88 static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid,
89 u64 bps, int fileid)
90 {
91 struct blkio_policy_type *blkiop;
92
93 list_for_each_entry(blkiop, &blkio_list, list) {
94
95 /* If this policy does not own the blkg, do not send updates */
96 if (blkiop->plid != plid)
97 continue;
98
99 if (fileid == BLKIO_THROTL_read_bps_device
100 && blkiop->ops.blkio_update_group_read_bps_fn)
101 blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
102 blkg, bps);
103
104 if (fileid == BLKIO_THROTL_write_bps_device
105 && blkiop->ops.blkio_update_group_write_bps_fn)
106 blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
107 blkg, bps);
108 }
109 }
110
111 static inline void blkio_update_group_iops(struct blkio_group *blkg,
112 int plid, unsigned int iops,
113 int fileid)
114 {
115 struct blkio_policy_type *blkiop;
116
117 list_for_each_entry(blkiop, &blkio_list, list) {
118
119 /* If this policy does not own the blkg, do not send updates */
120 if (blkiop->plid != plid)
121 continue;
122
123 if (fileid == BLKIO_THROTL_read_iops_device
124 && blkiop->ops.blkio_update_group_read_iops_fn)
125 blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
126 blkg, iops);
127
128 if (fileid == BLKIO_THROTL_write_iops_device
129 && blkiop->ops.blkio_update_group_write_iops_fn)
130 blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
131 blkg,iops);
132 }
133 }
134
135 /*
136 * Add to the appropriate stat variable depending on the request type.
137 * This should be called with queue_lock held.
138 */
139 static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
140 bool sync)
141 {
142 if (direction)
143 stat[BLKIO_STAT_WRITE] += add;
144 else
145 stat[BLKIO_STAT_READ] += add;
146 if (sync)
147 stat[BLKIO_STAT_SYNC] += add;
148 else
149 stat[BLKIO_STAT_ASYNC] += add;
150 }
151
152 /*
153 * Decrements the appropriate stat variable if non-zero depending on the
154 * request type. Panics on value being zero.
155 * This should be called with the queue_lock held.
156 */
157 static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
158 {
159 if (direction) {
160 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
161 stat[BLKIO_STAT_WRITE]--;
162 } else {
163 BUG_ON(stat[BLKIO_STAT_READ] == 0);
164 stat[BLKIO_STAT_READ]--;
165 }
166 if (sync) {
167 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
168 stat[BLKIO_STAT_SYNC]--;
169 } else {
170 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
171 stat[BLKIO_STAT_ASYNC]--;
172 }
173 }
174
175 #ifdef CONFIG_DEBUG_BLK_CGROUP
176 /* This should be called with the queue_lock held. */
177 static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
178 struct blkio_policy_type *pol,
179 struct blkio_group *curr_blkg)
180 {
181 struct blkg_policy_data *pd = blkg->pd[pol->plid];
182
183 if (blkio_blkg_waiting(&pd->stats))
184 return;
185 if (blkg == curr_blkg)
186 return;
187 pd->stats.start_group_wait_time = sched_clock();
188 blkio_mark_blkg_waiting(&pd->stats);
189 }
190
191 /* This should be called with the queue_lock held. */
192 static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
193 {
194 unsigned long long now;
195
196 if (!blkio_blkg_waiting(stats))
197 return;
198
199 now = sched_clock();
200 if (time_after64(now, stats->start_group_wait_time))
201 stats->group_wait_time += now - stats->start_group_wait_time;
202 blkio_clear_blkg_waiting(stats);
203 }
204
205 /* This should be called with the queue_lock held. */
206 static void blkio_end_empty_time(struct blkio_group_stats *stats)
207 {
208 unsigned long long now;
209
210 if (!blkio_blkg_empty(stats))
211 return;
212
213 now = sched_clock();
214 if (time_after64(now, stats->start_empty_time))
215 stats->empty_time += now - stats->start_empty_time;
216 blkio_clear_blkg_empty(stats);
217 }
218
219 void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
220 struct blkio_policy_type *pol)
221 {
222 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
223
224 lockdep_assert_held(blkg->q->queue_lock);
225 BUG_ON(blkio_blkg_idling(stats));
226
227 stats->start_idle_time = sched_clock();
228 blkio_mark_blkg_idling(stats);
229 }
230 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
231
232 void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
233 struct blkio_policy_type *pol)
234 {
235 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
236
237 lockdep_assert_held(blkg->q->queue_lock);
238
239 if (blkio_blkg_idling(stats)) {
240 unsigned long long now = sched_clock();
241
242 if (time_after64(now, stats->start_idle_time)) {
243 u64_stats_update_begin(&stats->syncp);
244 stats->idle_time += now - stats->start_idle_time;
245 u64_stats_update_end(&stats->syncp);
246 }
247 blkio_clear_blkg_idling(stats);
248 }
249 }
250 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
251
252 void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
253 struct blkio_policy_type *pol)
254 {
255 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
256
257 lockdep_assert_held(blkg->q->queue_lock);
258
259 u64_stats_update_begin(&stats->syncp);
260 stats->avg_queue_size_sum +=
261 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
262 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
263 stats->avg_queue_size_samples++;
264 blkio_update_group_wait_time(stats);
265 u64_stats_update_end(&stats->syncp);
266 }
267 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
268
269 void blkiocg_set_start_empty_time(struct blkio_group *blkg,
270 struct blkio_policy_type *pol)
271 {
272 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
273
274 lockdep_assert_held(blkg->q->queue_lock);
275
276 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
277 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE])
278 return;
279
280 /*
281 * group is already marked empty. This can happen if cfqq got new
282 * request in parent group and moved to this group while being added
283 * to service tree. Just ignore the event and move on.
284 */
285 if (blkio_blkg_empty(stats))
286 return;
287
288 stats->start_empty_time = sched_clock();
289 blkio_mark_blkg_empty(stats);
290 }
291 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
292
293 void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
294 struct blkio_policy_type *pol,
295 unsigned long dequeue)
296 {
297 struct blkg_policy_data *pd = blkg->pd[pol->plid];
298
299 lockdep_assert_held(blkg->q->queue_lock);
300
301 pd->stats.dequeue += dequeue;
302 }
303 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
304 #else
305 static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
306 struct blkio_policy_type *pol,
307 struct blkio_group *curr_blkg) { }
308 static inline void blkio_end_empty_time(struct blkio_group_stats *stats) { }
309 #endif
310
311 void blkiocg_update_io_add_stats(struct blkio_group *blkg,
312 struct blkio_policy_type *pol,
313 struct blkio_group *curr_blkg, bool direction,
314 bool sync)
315 {
316 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
317
318 lockdep_assert_held(blkg->q->queue_lock);
319
320 u64_stats_update_begin(&stats->syncp);
321 blkio_add_stat(stats->stat_arr[BLKIO_STAT_QUEUED], 1, direction, sync);
322 blkio_end_empty_time(stats);
323 u64_stats_update_end(&stats->syncp);
324
325 blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
326 }
327 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
328
329 void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
330 struct blkio_policy_type *pol,
331 bool direction, bool sync)
332 {
333 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
334
335 lockdep_assert_held(blkg->q->queue_lock);
336
337 u64_stats_update_begin(&stats->syncp);
338 blkio_check_and_dec_stat(stats->stat_arr[BLKIO_STAT_QUEUED], direction,
339 sync);
340 u64_stats_update_end(&stats->syncp);
341 }
342 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
343
344 void blkiocg_update_timeslice_used(struct blkio_group *blkg,
345 struct blkio_policy_type *pol,
346 unsigned long time,
347 unsigned long unaccounted_time)
348 {
349 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
350
351 lockdep_assert_held(blkg->q->queue_lock);
352
353 u64_stats_update_begin(&stats->syncp);
354 stats->time += time;
355 #ifdef CONFIG_DEBUG_BLK_CGROUP
356 stats->unaccounted_time += unaccounted_time;
357 #endif
358 u64_stats_update_end(&stats->syncp);
359 }
360 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
361
362 /*
363 * should be called under rcu read lock or queue lock to make sure blkg pointer
364 * is valid.
365 */
366 void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
367 struct blkio_policy_type *pol,
368 uint64_t bytes, bool direction, bool sync)
369 {
370 struct blkg_policy_data *pd = blkg->pd[pol->plid];
371 struct blkio_group_stats_cpu *stats_cpu;
372 unsigned long flags;
373
374 /* If per cpu stats are not allocated yet, don't do any accounting. */
375 if (pd->stats_cpu == NULL)
376 return;
377
378 /*
379 * Disabling interrupts to provide mutual exclusion between two
380 * writes on same cpu. It probably is not needed for 64bit. Not
381 * optimizing that case yet.
382 */
383 local_irq_save(flags);
384
385 stats_cpu = this_cpu_ptr(pd->stats_cpu);
386
387 u64_stats_update_begin(&stats_cpu->syncp);
388 stats_cpu->sectors += bytes >> 9;
389 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
390 1, direction, sync);
391 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
392 bytes, direction, sync);
393 u64_stats_update_end(&stats_cpu->syncp);
394 local_irq_restore(flags);
395 }
396 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
397
398 void blkiocg_update_completion_stats(struct blkio_group *blkg,
399 struct blkio_policy_type *pol,
400 uint64_t start_time,
401 uint64_t io_start_time, bool direction,
402 bool sync)
403 {
404 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
405 unsigned long long now = sched_clock();
406
407 lockdep_assert_held(blkg->q->queue_lock);
408
409 u64_stats_update_begin(&stats->syncp);
410 if (time_after64(now, io_start_time))
411 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
412 now - io_start_time, direction, sync);
413 if (time_after64(io_start_time, start_time))
414 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
415 io_start_time - start_time, direction, sync);
416 u64_stats_update_end(&stats->syncp);
417 }
418 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
419
420 /* Merged stats are per cpu. */
421 void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
422 struct blkio_policy_type *pol,
423 bool direction, bool sync)
424 {
425 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
426
427 lockdep_assert_held(blkg->q->queue_lock);
428
429 u64_stats_update_begin(&stats->syncp);
430 blkio_add_stat(stats->stat_arr[BLKIO_STAT_MERGED], 1, direction, sync);
431 u64_stats_update_end(&stats->syncp);
432 }
433 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
434
435 /*
436 * Worker for allocating per cpu stat for blk groups. This is scheduled on
437 * the system_nrt_wq once there are some groups on the alloc_list waiting
438 * for allocation.
439 */
440 static void blkio_stat_alloc_fn(struct work_struct *work)
441 {
442 static void *pcpu_stats[BLKIO_NR_POLICIES];
443 struct delayed_work *dwork = to_delayed_work(work);
444 struct blkio_group *blkg;
445 int i;
446 bool empty = false;
447
448 alloc_stats:
449 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
450 if (pcpu_stats[i] != NULL)
451 continue;
452
453 pcpu_stats[i] = alloc_percpu(struct blkio_group_stats_cpu);
454
455 /* Allocation failed. Try again after some time. */
456 if (pcpu_stats[i] == NULL) {
457 queue_delayed_work(system_nrt_wq, dwork,
458 msecs_to_jiffies(10));
459 return;
460 }
461 }
462
463 spin_lock_irq(&blkio_list_lock);
464 spin_lock(&alloc_list_lock);
465
466 /* cgroup got deleted or queue exited. */
467 if (!list_empty(&alloc_list)) {
468 blkg = list_first_entry(&alloc_list, struct blkio_group,
469 alloc_node);
470 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
471 struct blkg_policy_data *pd = blkg->pd[i];
472
473 if (blkio_policy[i] && pd && !pd->stats_cpu)
474 swap(pd->stats_cpu, pcpu_stats[i]);
475 }
476
477 list_del_init(&blkg->alloc_node);
478 }
479
480 empty = list_empty(&alloc_list);
481
482 spin_unlock(&alloc_list_lock);
483 spin_unlock_irq(&blkio_list_lock);
484
485 if (!empty)
486 goto alloc_stats;
487 }
488
489 /**
490 * blkg_free - free a blkg
491 * @blkg: blkg to free
492 *
493 * Free @blkg which may be partially allocated.
494 */
495 static void blkg_free(struct blkio_group *blkg)
496 {
497 int i;
498
499 if (!blkg)
500 return;
501
502 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
503 struct blkg_policy_data *pd = blkg->pd[i];
504
505 if (pd) {
506 free_percpu(pd->stats_cpu);
507 kfree(pd);
508 }
509 }
510
511 kfree(blkg);
512 }
513
514 /**
515 * blkg_alloc - allocate a blkg
516 * @blkcg: block cgroup the new blkg is associated with
517 * @q: request_queue the new blkg is associated with
518 *
519 * Allocate a new blkg assocating @blkcg and @q.
520 */
521 static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
522 struct request_queue *q)
523 {
524 struct blkio_group *blkg;
525 int i;
526
527 /* alloc and init base part */
528 blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
529 if (!blkg)
530 return NULL;
531
532 blkg->q = q;
533 INIT_LIST_HEAD(&blkg->q_node);
534 INIT_LIST_HEAD(&blkg->alloc_node);
535 blkg->blkcg = blkcg;
536 blkg->refcnt = 1;
537 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
538
539 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
540 struct blkio_policy_type *pol = blkio_policy[i];
541 struct blkg_policy_data *pd;
542
543 if (!pol)
544 continue;
545
546 /* alloc per-policy data and attach it to blkg */
547 pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC,
548 q->node);
549 if (!pd) {
550 blkg_free(blkg);
551 return NULL;
552 }
553
554 blkg->pd[i] = pd;
555 pd->blkg = blkg;
556 }
557
558 /* invoke per-policy init */
559 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
560 struct blkio_policy_type *pol = blkio_policy[i];
561
562 if (pol)
563 pol->ops.blkio_init_group_fn(blkg);
564 }
565
566 return blkg;
567 }
568
569 struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
570 struct request_queue *q,
571 enum blkio_policy_id plid,
572 bool for_root)
573 __releases(q->queue_lock) __acquires(q->queue_lock)
574 {
575 struct blkio_group *blkg;
576
577 WARN_ON_ONCE(!rcu_read_lock_held());
578 lockdep_assert_held(q->queue_lock);
579
580 /*
581 * This could be the first entry point of blkcg implementation and
582 * we shouldn't allow anything to go through for a bypassing queue.
583 * The following can be removed if blkg lookup is guaranteed to
584 * fail on a bypassing queue.
585 */
586 if (unlikely(blk_queue_bypass(q)) && !for_root)
587 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
588
589 blkg = blkg_lookup(blkcg, q);
590 if (blkg)
591 return blkg;
592
593 /* blkg holds a reference to blkcg */
594 if (!css_tryget(&blkcg->css))
595 return ERR_PTR(-EINVAL);
596
597 /*
598 * Allocate and initialize.
599 */
600 blkg = blkg_alloc(blkcg, q);
601
602 /* did alloc fail? */
603 if (unlikely(!blkg)) {
604 blkg = ERR_PTR(-ENOMEM);
605 goto out;
606 }
607
608 /* insert */
609 spin_lock(&blkcg->lock);
610 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
611 list_add(&blkg->q_node, &q->blkg_list);
612 spin_unlock(&blkcg->lock);
613
614 spin_lock(&alloc_list_lock);
615 list_add(&blkg->alloc_node, &alloc_list);
616 /* Queue per cpu stat allocation from worker thread. */
617 queue_delayed_work(system_nrt_wq, &blkio_stat_alloc_work, 0);
618 spin_unlock(&alloc_list_lock);
619 out:
620 return blkg;
621 }
622 EXPORT_SYMBOL_GPL(blkg_lookup_create);
623
624 /* called under rcu_read_lock(). */
625 struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
626 struct request_queue *q)
627 {
628 struct blkio_group *blkg;
629 struct hlist_node *n;
630
631 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
632 if (blkg->q == q)
633 return blkg;
634 return NULL;
635 }
636 EXPORT_SYMBOL_GPL(blkg_lookup);
637
638 static void blkg_destroy(struct blkio_group *blkg)
639 {
640 struct request_queue *q = blkg->q;
641 struct blkio_cgroup *blkcg = blkg->blkcg;
642
643 lockdep_assert_held(q->queue_lock);
644 lockdep_assert_held(&blkcg->lock);
645
646 /* Something wrong if we are trying to remove same group twice */
647 WARN_ON_ONCE(list_empty(&blkg->q_node));
648 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
649 list_del_init(&blkg->q_node);
650 hlist_del_init_rcu(&blkg->blkcg_node);
651
652 spin_lock(&alloc_list_lock);
653 list_del_init(&blkg->alloc_node);
654 spin_unlock(&alloc_list_lock);
655
656 /*
657 * Put the reference taken at the time of creation so that when all
658 * queues are gone, group can be destroyed.
659 */
660 blkg_put(blkg);
661 }
662
663 /*
664 * XXX: This updates blkg policy data in-place for root blkg, which is
665 * necessary across elevator switch and policy registration as root blkgs
666 * aren't shot down. This broken and racy implementation is temporary.
667 * Eventually, blkg shoot down will be replaced by proper in-place update.
668 */
669 void update_root_blkg_pd(struct request_queue *q, enum blkio_policy_id plid)
670 {
671 struct blkio_policy_type *pol = blkio_policy[plid];
672 struct blkio_group *blkg = blkg_lookup(&blkio_root_cgroup, q);
673 struct blkg_policy_data *pd;
674
675 if (!blkg)
676 return;
677
678 kfree(blkg->pd[plid]);
679 blkg->pd[plid] = NULL;
680
681 if (!pol)
682 return;
683
684 pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL);
685 WARN_ON_ONCE(!pd);
686
687 pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
688 WARN_ON_ONCE(!pd->stats_cpu);
689
690 blkg->pd[plid] = pd;
691 pd->blkg = blkg;
692 pol->ops.blkio_init_group_fn(blkg);
693 }
694 EXPORT_SYMBOL_GPL(update_root_blkg_pd);
695
696 /**
697 * blkg_destroy_all - destroy all blkgs associated with a request_queue
698 * @q: request_queue of interest
699 * @destroy_root: whether to destroy root blkg or not
700 *
701 * Destroy blkgs associated with @q. If @destroy_root is %true, all are
702 * destroyed; otherwise, root blkg is left alone.
703 */
704 void blkg_destroy_all(struct request_queue *q, bool destroy_root)
705 {
706 struct blkio_group *blkg, *n;
707
708 spin_lock_irq(q->queue_lock);
709
710 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
711 struct blkio_cgroup *blkcg = blkg->blkcg;
712
713 /* skip root? */
714 if (!destroy_root && blkg->blkcg == &blkio_root_cgroup)
715 continue;
716
717 spin_lock(&blkcg->lock);
718 blkg_destroy(blkg);
719 spin_unlock(&blkcg->lock);
720 }
721
722 spin_unlock_irq(q->queue_lock);
723 }
724 EXPORT_SYMBOL_GPL(blkg_destroy_all);
725
726 static void blkg_rcu_free(struct rcu_head *rcu_head)
727 {
728 blkg_free(container_of(rcu_head, struct blkio_group, rcu_head));
729 }
730
731 void __blkg_release(struct blkio_group *blkg)
732 {
733 /* release the extra blkcg reference this blkg has been holding */
734 css_put(&blkg->blkcg->css);
735
736 /*
737 * A group is freed in rcu manner. But having an rcu lock does not
738 * mean that one can access all the fields of blkg and assume these
739 * are valid. For example, don't try to follow throtl_data and
740 * request queue links.
741 *
742 * Having a reference to blkg under an rcu allows acess to only
743 * values local to groups like group stats and group rate limits
744 */
745 call_rcu(&blkg->rcu_head, blkg_rcu_free);
746 }
747 EXPORT_SYMBOL_GPL(__blkg_release);
748
749 static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid)
750 {
751 struct blkg_policy_data *pd = blkg->pd[plid];
752 int cpu;
753
754 if (pd->stats_cpu == NULL)
755 return;
756
757 for_each_possible_cpu(cpu) {
758 struct blkio_group_stats_cpu *sc =
759 per_cpu_ptr(pd->stats_cpu, cpu);
760
761 sc->sectors = 0;
762 memset(sc->stat_arr_cpu, 0, sizeof(sc->stat_arr_cpu));
763 }
764 }
765
766 static int
767 blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
768 {
769 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
770 struct blkio_group *blkg;
771 struct hlist_node *n;
772 int i;
773
774 spin_lock(&blkio_list_lock);
775 spin_lock_irq(&blkcg->lock);
776
777 /*
778 * Note that stat reset is racy - it doesn't synchronize against
779 * stat updates. This is a debug feature which shouldn't exist
780 * anyway. If you get hit by a race, retry.
781 */
782 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
783 struct blkio_policy_type *pol;
784
785 list_for_each_entry(pol, &blkio_list, list) {
786 struct blkg_policy_data *pd = blkg->pd[pol->plid];
787 struct blkio_group_stats *stats = &pd->stats;
788
789 /* queued stats shouldn't be cleared */
790 for (i = 0; i < ARRAY_SIZE(stats->stat_arr); i++)
791 if (i != BLKIO_STAT_QUEUED)
792 memset(stats->stat_arr[i], 0,
793 sizeof(stats->stat_arr[i]));
794 stats->time = 0;
795 #ifdef CONFIG_DEBUG_BLK_CGROUP
796 memset((void *)stats + BLKG_STATS_DEBUG_CLEAR_START, 0,
797 BLKG_STATS_DEBUG_CLEAR_SIZE);
798 #endif
799 blkio_reset_stats_cpu(blkg, pol->plid);
800 }
801 }
802
803 spin_unlock_irq(&blkcg->lock);
804 spin_unlock(&blkio_list_lock);
805 return 0;
806 }
807
808 static void blkio_get_key_name(enum stat_sub_type type, const char *dname,
809 char *str, int chars_left, bool diskname_only)
810 {
811 snprintf(str, chars_left, "%s", dname);
812 chars_left -= strlen(str);
813 if (chars_left <= 0) {
814 printk(KERN_WARNING
815 "Possibly incorrect cgroup stat display format");
816 return;
817 }
818 if (diskname_only)
819 return;
820 switch (type) {
821 case BLKIO_STAT_READ:
822 strlcat(str, " Read", chars_left);
823 break;
824 case BLKIO_STAT_WRITE:
825 strlcat(str, " Write", chars_left);
826 break;
827 case BLKIO_STAT_SYNC:
828 strlcat(str, " Sync", chars_left);
829 break;
830 case BLKIO_STAT_ASYNC:
831 strlcat(str, " Async", chars_left);
832 break;
833 case BLKIO_STAT_TOTAL:
834 strlcat(str, " Total", chars_left);
835 break;
836 default:
837 strlcat(str, " Invalid", chars_left);
838 }
839 }
840
841 static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, int plid,
842 enum stat_type_cpu type, enum stat_sub_type sub_type)
843 {
844 struct blkg_policy_data *pd = blkg->pd[plid];
845 int cpu;
846 struct blkio_group_stats_cpu *stats_cpu;
847 u64 val = 0, tval;
848
849 if (pd->stats_cpu == NULL)
850 return val;
851
852 for_each_possible_cpu(cpu) {
853 unsigned int start;
854 stats_cpu = per_cpu_ptr(pd->stats_cpu, cpu);
855
856 do {
857 start = u64_stats_fetch_begin(&stats_cpu->syncp);
858 if (type == BLKIO_STAT_CPU_SECTORS)
859 tval = stats_cpu->sectors;
860 else
861 tval = stats_cpu->stat_arr_cpu[type][sub_type];
862 } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
863
864 val += tval;
865 }
866
867 return val;
868 }
869
870 static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, int plid,
871 struct cgroup_map_cb *cb, const char *dname,
872 enum stat_type_cpu type)
873 {
874 uint64_t disk_total, val;
875 char key_str[MAX_KEY_LEN];
876 enum stat_sub_type sub_type;
877
878 if (type == BLKIO_STAT_CPU_SECTORS) {
879 val = blkio_read_stat_cpu(blkg, plid, type, 0);
880 blkio_get_key_name(0, dname, key_str, MAX_KEY_LEN, true);
881 cb->fill(cb, key_str, val);
882 return val;
883 }
884
885 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
886 sub_type++) {
887 blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
888 false);
889 val = blkio_read_stat_cpu(blkg, plid, type, sub_type);
890 cb->fill(cb, key_str, val);
891 }
892
893 disk_total = blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_READ) +
894 blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_WRITE);
895
896 blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
897 false);
898 cb->fill(cb, key_str, disk_total);
899 return disk_total;
900 }
901
902 static uint64_t blkio_get_stat(struct blkio_group *blkg, int plid,
903 struct cgroup_map_cb *cb, const char *dname,
904 enum stat_type type)
905 {
906 struct blkio_group_stats *stats = &blkg->pd[plid]->stats;
907 uint64_t v = 0, disk_total = 0;
908 char key_str[MAX_KEY_LEN];
909 unsigned int sync_start;
910 int st;
911
912 if (type >= BLKIO_STAT_ARR_NR) {
913 do {
914 sync_start = u64_stats_fetch_begin(&stats->syncp);
915 switch (type) {
916 case BLKIO_STAT_TIME:
917 v = stats->time;
918 break;
919 #ifdef CONFIG_DEBUG_BLK_CGROUP
920 case BLKIO_STAT_UNACCOUNTED_TIME:
921 v = stats->unaccounted_time;
922 break;
923 case BLKIO_STAT_AVG_QUEUE_SIZE: {
924 uint64_t samples = stats->avg_queue_size_samples;
925
926 if (samples) {
927 v = stats->avg_queue_size_sum;
928 do_div(v, samples);
929 }
930 break;
931 }
932 case BLKIO_STAT_IDLE_TIME:
933 v = stats->idle_time;
934 break;
935 case BLKIO_STAT_EMPTY_TIME:
936 v = stats->empty_time;
937 break;
938 case BLKIO_STAT_DEQUEUE:
939 v = stats->dequeue;
940 break;
941 case BLKIO_STAT_GROUP_WAIT_TIME:
942 v = stats->group_wait_time;
943 break;
944 #endif
945 default:
946 WARN_ON_ONCE(1);
947 }
948 } while (u64_stats_fetch_retry(&stats->syncp, sync_start));
949
950 blkio_get_key_name(0, dname, key_str, MAX_KEY_LEN, true);
951 cb->fill(cb, key_str, v);
952 return v;
953 }
954
955 for (st = BLKIO_STAT_READ; st < BLKIO_STAT_TOTAL; st++) {
956 do {
957 sync_start = u64_stats_fetch_begin(&stats->syncp);
958 v = stats->stat_arr[type][st];
959 } while (u64_stats_fetch_retry(&stats->syncp, sync_start));
960
961 blkio_get_key_name(st, dname, key_str, MAX_KEY_LEN, false);
962 cb->fill(cb, key_str, v);
963 if (st == BLKIO_STAT_READ || st == BLKIO_STAT_WRITE)
964 disk_total += v;
965 }
966
967 blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
968 false);
969 cb->fill(cb, key_str, disk_total);
970 return disk_total;
971 }
972
973 static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid,
974 int fileid, struct blkio_cgroup *blkcg)
975 {
976 struct gendisk *disk = NULL;
977 struct blkio_group *blkg = NULL;
978 struct blkg_policy_data *pd;
979 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
980 unsigned long major, minor;
981 int i = 0, ret = -EINVAL;
982 int part;
983 dev_t dev;
984 u64 temp;
985
986 memset(s, 0, sizeof(s));
987
988 while ((p = strsep(&buf, " ")) != NULL) {
989 if (!*p)
990 continue;
991
992 s[i++] = p;
993
994 /* Prevent from inputing too many things */
995 if (i == 3)
996 break;
997 }
998
999 if (i != 2)
1000 goto out;
1001
1002 p = strsep(&s[0], ":");
1003 if (p != NULL)
1004 major_s = p;
1005 else
1006 goto out;
1007
1008 minor_s = s[0];
1009 if (!minor_s)
1010 goto out;
1011
1012 if (strict_strtoul(major_s, 10, &major))
1013 goto out;
1014
1015 if (strict_strtoul(minor_s, 10, &minor))
1016 goto out;
1017
1018 dev = MKDEV(major, minor);
1019
1020 if (strict_strtoull(s[1], 10, &temp))
1021 goto out;
1022
1023 disk = get_gendisk(dev, &part);
1024 if (!disk || part)
1025 goto out;
1026
1027 rcu_read_lock();
1028
1029 spin_lock_irq(disk->queue->queue_lock);
1030 blkg = blkg_lookup_create(blkcg, disk->queue, plid, false);
1031 spin_unlock_irq(disk->queue->queue_lock);
1032
1033 if (IS_ERR(blkg)) {
1034 ret = PTR_ERR(blkg);
1035 goto out_unlock;
1036 }
1037
1038 pd = blkg->pd[plid];
1039
1040 switch (plid) {
1041 case BLKIO_POLICY_PROP:
1042 if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
1043 temp > BLKIO_WEIGHT_MAX)
1044 goto out_unlock;
1045
1046 pd->conf.weight = temp;
1047 blkio_update_group_weight(blkg, plid, temp ?: blkcg->weight);
1048 break;
1049 case BLKIO_POLICY_THROTL:
1050 switch(fileid) {
1051 case BLKIO_THROTL_read_bps_device:
1052 pd->conf.bps[READ] = temp;
1053 blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
1054 break;
1055 case BLKIO_THROTL_write_bps_device:
1056 pd->conf.bps[WRITE] = temp;
1057 blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
1058 break;
1059 case BLKIO_THROTL_read_iops_device:
1060 if (temp > THROTL_IOPS_MAX)
1061 goto out_unlock;
1062 pd->conf.iops[READ] = temp;
1063 blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
1064 break;
1065 case BLKIO_THROTL_write_iops_device:
1066 if (temp > THROTL_IOPS_MAX)
1067 goto out_unlock;
1068 pd->conf.iops[WRITE] = temp;
1069 blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
1070 break;
1071 }
1072 break;
1073 default:
1074 BUG();
1075 }
1076 ret = 0;
1077 out_unlock:
1078 rcu_read_unlock();
1079 out:
1080 put_disk(disk);
1081
1082 /*
1083 * If queue was bypassing, we should retry. Do so after a short
1084 * msleep(). It isn't strictly necessary but queue can be
1085 * bypassing for some time and it's always nice to avoid busy
1086 * looping.
1087 */
1088 if (ret == -EBUSY) {
1089 msleep(10);
1090 return restart_syscall();
1091 }
1092 return ret;
1093 }
1094
1095 static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
1096 const char *buffer)
1097 {
1098 int ret = 0;
1099 char *buf;
1100 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1101 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1102 int fileid = BLKIOFILE_ATTR(cft->private);
1103
1104 buf = kstrdup(buffer, GFP_KERNEL);
1105 if (!buf)
1106 return -ENOMEM;
1107
1108 ret = blkio_policy_parse_and_set(buf, plid, fileid, blkcg);
1109 kfree(buf);
1110 return ret;
1111 }
1112
1113 static const char *blkg_dev_name(struct blkio_group *blkg)
1114 {
1115 /* some drivers (floppy) instantiate a queue w/o disk registered */
1116 if (blkg->q->backing_dev_info.dev)
1117 return dev_name(blkg->q->backing_dev_info.dev);
1118 return NULL;
1119 }
1120
1121 static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg,
1122 struct seq_file *m)
1123 {
1124 int plid = BLKIOFILE_POLICY(cft->private);
1125 int fileid = BLKIOFILE_ATTR(cft->private);
1126 struct blkg_policy_data *pd = blkg->pd[plid];
1127 const char *dname = blkg_dev_name(blkg);
1128 int rw = WRITE;
1129
1130 if (!dname)
1131 return;
1132
1133 switch (plid) {
1134 case BLKIO_POLICY_PROP:
1135 if (pd->conf.weight)
1136 seq_printf(m, "%s\t%u\n",
1137 dname, pd->conf.weight);
1138 break;
1139 case BLKIO_POLICY_THROTL:
1140 switch (fileid) {
1141 case BLKIO_THROTL_read_bps_device:
1142 rw = READ;
1143 case BLKIO_THROTL_write_bps_device:
1144 if (pd->conf.bps[rw])
1145 seq_printf(m, "%s\t%llu\n",
1146 dname, pd->conf.bps[rw]);
1147 break;
1148 case BLKIO_THROTL_read_iops_device:
1149 rw = READ;
1150 case BLKIO_THROTL_write_iops_device:
1151 if (pd->conf.iops[rw])
1152 seq_printf(m, "%s\t%u\n",
1153 dname, pd->conf.iops[rw]);
1154 break;
1155 }
1156 break;
1157 default:
1158 BUG();
1159 }
1160 }
1161
1162 /* cgroup files which read their data from policy nodes end up here */
1163 static void blkio_read_conf(struct cftype *cft, struct blkio_cgroup *blkcg,
1164 struct seq_file *m)
1165 {
1166 struct blkio_group *blkg;
1167 struct hlist_node *n;
1168
1169 spin_lock_irq(&blkcg->lock);
1170 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
1171 blkio_print_group_conf(cft, blkg, m);
1172 spin_unlock_irq(&blkcg->lock);
1173 }
1174
1175 static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
1176 struct seq_file *m)
1177 {
1178 struct blkio_cgroup *blkcg;
1179 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1180 int name = BLKIOFILE_ATTR(cft->private);
1181
1182 blkcg = cgroup_to_blkio_cgroup(cgrp);
1183
1184 switch(plid) {
1185 case BLKIO_POLICY_PROP:
1186 switch(name) {
1187 case BLKIO_PROP_weight_device:
1188 blkio_read_conf(cft, blkcg, m);
1189 return 0;
1190 default:
1191 BUG();
1192 }
1193 break;
1194 case BLKIO_POLICY_THROTL:
1195 switch(name){
1196 case BLKIO_THROTL_read_bps_device:
1197 case BLKIO_THROTL_write_bps_device:
1198 case BLKIO_THROTL_read_iops_device:
1199 case BLKIO_THROTL_write_iops_device:
1200 blkio_read_conf(cft, blkcg, m);
1201 return 0;
1202 default:
1203 BUG();
1204 }
1205 break;
1206 default:
1207 BUG();
1208 }
1209
1210 return 0;
1211 }
1212
1213 static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
1214 struct cftype *cft, struct cgroup_map_cb *cb,
1215 enum stat_type type, bool show_total, bool pcpu)
1216 {
1217 struct blkio_group *blkg;
1218 struct hlist_node *n;
1219 uint64_t cgroup_total = 0;
1220
1221 spin_lock_irq(&blkcg->lock);
1222
1223 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1224 const char *dname = blkg_dev_name(blkg);
1225 int plid = BLKIOFILE_POLICY(cft->private);
1226
1227 if (!dname)
1228 continue;
1229 if (pcpu)
1230 cgroup_total += blkio_get_stat_cpu(blkg, plid,
1231 cb, dname, type);
1232 else
1233 cgroup_total += blkio_get_stat(blkg, plid,
1234 cb, dname, type);
1235 }
1236 if (show_total)
1237 cb->fill(cb, "Total", cgroup_total);
1238
1239 spin_unlock_irq(&blkcg->lock);
1240 return 0;
1241 }
1242
1243 /* All map kind of cgroup file get serviced by this function */
1244 static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
1245 struct cgroup_map_cb *cb)
1246 {
1247 struct blkio_cgroup *blkcg;
1248 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1249 int name = BLKIOFILE_ATTR(cft->private);
1250
1251 blkcg = cgroup_to_blkio_cgroup(cgrp);
1252
1253 switch(plid) {
1254 case BLKIO_POLICY_PROP:
1255 switch(name) {
1256 case BLKIO_PROP_time:
1257 return blkio_read_blkg_stats(blkcg, cft, cb,
1258 BLKIO_STAT_TIME, 0, 0);
1259 case BLKIO_PROP_sectors:
1260 return blkio_read_blkg_stats(blkcg, cft, cb,
1261 BLKIO_STAT_CPU_SECTORS, 0, 1);
1262 case BLKIO_PROP_io_service_bytes:
1263 return blkio_read_blkg_stats(blkcg, cft, cb,
1264 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1265 case BLKIO_PROP_io_serviced:
1266 return blkio_read_blkg_stats(blkcg, cft, cb,
1267 BLKIO_STAT_CPU_SERVICED, 1, 1);
1268 case BLKIO_PROP_io_service_time:
1269 return blkio_read_blkg_stats(blkcg, cft, cb,
1270 BLKIO_STAT_SERVICE_TIME, 1, 0);
1271 case BLKIO_PROP_io_wait_time:
1272 return blkio_read_blkg_stats(blkcg, cft, cb,
1273 BLKIO_STAT_WAIT_TIME, 1, 0);
1274 case BLKIO_PROP_io_merged:
1275 return blkio_read_blkg_stats(blkcg, cft, cb,
1276 BLKIO_STAT_MERGED, 1, 0);
1277 case BLKIO_PROP_io_queued:
1278 return blkio_read_blkg_stats(blkcg, cft, cb,
1279 BLKIO_STAT_QUEUED, 1, 0);
1280 #ifdef CONFIG_DEBUG_BLK_CGROUP
1281 case BLKIO_PROP_unaccounted_time:
1282 return blkio_read_blkg_stats(blkcg, cft, cb,
1283 BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
1284 case BLKIO_PROP_dequeue:
1285 return blkio_read_blkg_stats(blkcg, cft, cb,
1286 BLKIO_STAT_DEQUEUE, 0, 0);
1287 case BLKIO_PROP_avg_queue_size:
1288 return blkio_read_blkg_stats(blkcg, cft, cb,
1289 BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
1290 case BLKIO_PROP_group_wait_time:
1291 return blkio_read_blkg_stats(blkcg, cft, cb,
1292 BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
1293 case BLKIO_PROP_idle_time:
1294 return blkio_read_blkg_stats(blkcg, cft, cb,
1295 BLKIO_STAT_IDLE_TIME, 0, 0);
1296 case BLKIO_PROP_empty_time:
1297 return blkio_read_blkg_stats(blkcg, cft, cb,
1298 BLKIO_STAT_EMPTY_TIME, 0, 0);
1299 #endif
1300 default:
1301 BUG();
1302 }
1303 break;
1304 case BLKIO_POLICY_THROTL:
1305 switch(name){
1306 case BLKIO_THROTL_io_service_bytes:
1307 return blkio_read_blkg_stats(blkcg, cft, cb,
1308 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1309 case BLKIO_THROTL_io_serviced:
1310 return blkio_read_blkg_stats(blkcg, cft, cb,
1311 BLKIO_STAT_CPU_SERVICED, 1, 1);
1312 default:
1313 BUG();
1314 }
1315 break;
1316 default:
1317 BUG();
1318 }
1319
1320 return 0;
1321 }
1322
1323 static int blkio_weight_write(struct blkio_cgroup *blkcg, int plid, u64 val)
1324 {
1325 struct blkio_group *blkg;
1326 struct hlist_node *n;
1327
1328 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1329 return -EINVAL;
1330
1331 spin_lock(&blkio_list_lock);
1332 spin_lock_irq(&blkcg->lock);
1333 blkcg->weight = (unsigned int)val;
1334
1335 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1336 struct blkg_policy_data *pd = blkg->pd[plid];
1337
1338 if (!pd->conf.weight)
1339 blkio_update_group_weight(blkg, plid, blkcg->weight);
1340 }
1341
1342 spin_unlock_irq(&blkcg->lock);
1343 spin_unlock(&blkio_list_lock);
1344 return 0;
1345 }
1346
1347 static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
1348 struct blkio_cgroup *blkcg;
1349 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1350 int name = BLKIOFILE_ATTR(cft->private);
1351
1352 blkcg = cgroup_to_blkio_cgroup(cgrp);
1353
1354 switch(plid) {
1355 case BLKIO_POLICY_PROP:
1356 switch(name) {
1357 case BLKIO_PROP_weight:
1358 return (u64)blkcg->weight;
1359 }
1360 break;
1361 default:
1362 BUG();
1363 }
1364 return 0;
1365 }
1366
1367 static int
1368 blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1369 {
1370 struct blkio_cgroup *blkcg;
1371 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1372 int name = BLKIOFILE_ATTR(cft->private);
1373
1374 blkcg = cgroup_to_blkio_cgroup(cgrp);
1375
1376 switch(plid) {
1377 case BLKIO_POLICY_PROP:
1378 switch(name) {
1379 case BLKIO_PROP_weight:
1380 return blkio_weight_write(blkcg, plid, val);
1381 }
1382 break;
1383 default:
1384 BUG();
1385 }
1386
1387 return 0;
1388 }
1389
1390 struct cftype blkio_files[] = {
1391 {
1392 .name = "weight_device",
1393 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1394 BLKIO_PROP_weight_device),
1395 .read_seq_string = blkiocg_file_read,
1396 .write_string = blkiocg_file_write,
1397 .max_write_len = 256,
1398 },
1399 {
1400 .name = "weight",
1401 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1402 BLKIO_PROP_weight),
1403 .read_u64 = blkiocg_file_read_u64,
1404 .write_u64 = blkiocg_file_write_u64,
1405 },
1406 {
1407 .name = "time",
1408 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1409 BLKIO_PROP_time),
1410 .read_map = blkiocg_file_read_map,
1411 },
1412 {
1413 .name = "sectors",
1414 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1415 BLKIO_PROP_sectors),
1416 .read_map = blkiocg_file_read_map,
1417 },
1418 {
1419 .name = "io_service_bytes",
1420 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1421 BLKIO_PROP_io_service_bytes),
1422 .read_map = blkiocg_file_read_map,
1423 },
1424 {
1425 .name = "io_serviced",
1426 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1427 BLKIO_PROP_io_serviced),
1428 .read_map = blkiocg_file_read_map,
1429 },
1430 {
1431 .name = "io_service_time",
1432 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1433 BLKIO_PROP_io_service_time),
1434 .read_map = blkiocg_file_read_map,
1435 },
1436 {
1437 .name = "io_wait_time",
1438 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1439 BLKIO_PROP_io_wait_time),
1440 .read_map = blkiocg_file_read_map,
1441 },
1442 {
1443 .name = "io_merged",
1444 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1445 BLKIO_PROP_io_merged),
1446 .read_map = blkiocg_file_read_map,
1447 },
1448 {
1449 .name = "io_queued",
1450 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1451 BLKIO_PROP_io_queued),
1452 .read_map = blkiocg_file_read_map,
1453 },
1454 {
1455 .name = "reset_stats",
1456 .write_u64 = blkiocg_reset_stats,
1457 },
1458 #ifdef CONFIG_BLK_DEV_THROTTLING
1459 {
1460 .name = "throttle.read_bps_device",
1461 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1462 BLKIO_THROTL_read_bps_device),
1463 .read_seq_string = blkiocg_file_read,
1464 .write_string = blkiocg_file_write,
1465 .max_write_len = 256,
1466 },
1467
1468 {
1469 .name = "throttle.write_bps_device",
1470 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1471 BLKIO_THROTL_write_bps_device),
1472 .read_seq_string = blkiocg_file_read,
1473 .write_string = blkiocg_file_write,
1474 .max_write_len = 256,
1475 },
1476
1477 {
1478 .name = "throttle.read_iops_device",
1479 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1480 BLKIO_THROTL_read_iops_device),
1481 .read_seq_string = blkiocg_file_read,
1482 .write_string = blkiocg_file_write,
1483 .max_write_len = 256,
1484 },
1485
1486 {
1487 .name = "throttle.write_iops_device",
1488 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1489 BLKIO_THROTL_write_iops_device),
1490 .read_seq_string = blkiocg_file_read,
1491 .write_string = blkiocg_file_write,
1492 .max_write_len = 256,
1493 },
1494 {
1495 .name = "throttle.io_service_bytes",
1496 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1497 BLKIO_THROTL_io_service_bytes),
1498 .read_map = blkiocg_file_read_map,
1499 },
1500 {
1501 .name = "throttle.io_serviced",
1502 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1503 BLKIO_THROTL_io_serviced),
1504 .read_map = blkiocg_file_read_map,
1505 },
1506 #endif /* CONFIG_BLK_DEV_THROTTLING */
1507
1508 #ifdef CONFIG_DEBUG_BLK_CGROUP
1509 {
1510 .name = "avg_queue_size",
1511 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1512 BLKIO_PROP_avg_queue_size),
1513 .read_map = blkiocg_file_read_map,
1514 },
1515 {
1516 .name = "group_wait_time",
1517 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1518 BLKIO_PROP_group_wait_time),
1519 .read_map = blkiocg_file_read_map,
1520 },
1521 {
1522 .name = "idle_time",
1523 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1524 BLKIO_PROP_idle_time),
1525 .read_map = blkiocg_file_read_map,
1526 },
1527 {
1528 .name = "empty_time",
1529 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1530 BLKIO_PROP_empty_time),
1531 .read_map = blkiocg_file_read_map,
1532 },
1533 {
1534 .name = "dequeue",
1535 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1536 BLKIO_PROP_dequeue),
1537 .read_map = blkiocg_file_read_map,
1538 },
1539 {
1540 .name = "unaccounted_time",
1541 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1542 BLKIO_PROP_unaccounted_time),
1543 .read_map = blkiocg_file_read_map,
1544 },
1545 #endif
1546 { } /* terminate */
1547 };
1548
1549 /**
1550 * blkiocg_pre_destroy - cgroup pre_destroy callback
1551 * @cgroup: cgroup of interest
1552 *
1553 * This function is called when @cgroup is about to go away and responsible
1554 * for shooting down all blkgs associated with @cgroup. blkgs should be
1555 * removed while holding both q and blkcg locks. As blkcg lock is nested
1556 * inside q lock, this function performs reverse double lock dancing.
1557 *
1558 * This is the blkcg counterpart of ioc_release_fn().
1559 */
1560 static int blkiocg_pre_destroy(struct cgroup *cgroup)
1561 {
1562 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1563
1564 spin_lock_irq(&blkcg->lock);
1565
1566 while (!hlist_empty(&blkcg->blkg_list)) {
1567 struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first,
1568 struct blkio_group, blkcg_node);
1569 struct request_queue *q = blkg->q;
1570
1571 if (spin_trylock(q->queue_lock)) {
1572 blkg_destroy(blkg);
1573 spin_unlock(q->queue_lock);
1574 } else {
1575 spin_unlock_irq(&blkcg->lock);
1576 cpu_relax();
1577 spin_lock_irq(&blkcg->lock);
1578 }
1579 }
1580
1581 spin_unlock_irq(&blkcg->lock);
1582 return 0;
1583 }
1584
1585 static void blkiocg_destroy(struct cgroup *cgroup)
1586 {
1587 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1588
1589 if (blkcg != &blkio_root_cgroup)
1590 kfree(blkcg);
1591 }
1592
1593 static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
1594 {
1595 static atomic64_t id_seq = ATOMIC64_INIT(0);
1596 struct blkio_cgroup *blkcg;
1597 struct cgroup *parent = cgroup->parent;
1598
1599 if (!parent) {
1600 blkcg = &blkio_root_cgroup;
1601 goto done;
1602 }
1603
1604 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1605 if (!blkcg)
1606 return ERR_PTR(-ENOMEM);
1607
1608 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
1609 blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
1610 done:
1611 spin_lock_init(&blkcg->lock);
1612 INIT_HLIST_HEAD(&blkcg->blkg_list);
1613
1614 return &blkcg->css;
1615 }
1616
1617 /**
1618 * blkcg_init_queue - initialize blkcg part of request queue
1619 * @q: request_queue to initialize
1620 *
1621 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1622 * part of new request_queue @q.
1623 *
1624 * RETURNS:
1625 * 0 on success, -errno on failure.
1626 */
1627 int blkcg_init_queue(struct request_queue *q)
1628 {
1629 int ret;
1630
1631 might_sleep();
1632
1633 ret = blk_throtl_init(q);
1634 if (ret)
1635 return ret;
1636
1637 mutex_lock(&all_q_mutex);
1638 INIT_LIST_HEAD(&q->all_q_node);
1639 list_add_tail(&q->all_q_node, &all_q_list);
1640 mutex_unlock(&all_q_mutex);
1641
1642 return 0;
1643 }
1644
1645 /**
1646 * blkcg_drain_queue - drain blkcg part of request_queue
1647 * @q: request_queue to drain
1648 *
1649 * Called from blk_drain_queue(). Responsible for draining blkcg part.
1650 */
1651 void blkcg_drain_queue(struct request_queue *q)
1652 {
1653 lockdep_assert_held(q->queue_lock);
1654
1655 blk_throtl_drain(q);
1656 }
1657
1658 /**
1659 * blkcg_exit_queue - exit and release blkcg part of request_queue
1660 * @q: request_queue being released
1661 *
1662 * Called from blk_release_queue(). Responsible for exiting blkcg part.
1663 */
1664 void blkcg_exit_queue(struct request_queue *q)
1665 {
1666 mutex_lock(&all_q_mutex);
1667 list_del_init(&q->all_q_node);
1668 mutex_unlock(&all_q_mutex);
1669
1670 blkg_destroy_all(q, true);
1671
1672 blk_throtl_exit(q);
1673 }
1674
1675 /*
1676 * We cannot support shared io contexts, as we have no mean to support
1677 * two tasks with the same ioc in two different groups without major rework
1678 * of the main cic data structures. For now we allow a task to change
1679 * its cgroup only if it's the only owner of its ioc.
1680 */
1681 static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1682 {
1683 struct task_struct *task;
1684 struct io_context *ioc;
1685 int ret = 0;
1686
1687 /* task_lock() is needed to avoid races with exit_io_context() */
1688 cgroup_taskset_for_each(task, cgrp, tset) {
1689 task_lock(task);
1690 ioc = task->io_context;
1691 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1692 ret = -EINVAL;
1693 task_unlock(task);
1694 if (ret)
1695 break;
1696 }
1697 return ret;
1698 }
1699
1700 static void blkcg_bypass_start(void)
1701 __acquires(&all_q_mutex)
1702 {
1703 struct request_queue *q;
1704
1705 mutex_lock(&all_q_mutex);
1706
1707 list_for_each_entry(q, &all_q_list, all_q_node) {
1708 blk_queue_bypass_start(q);
1709 blkg_destroy_all(q, false);
1710 }
1711 }
1712
1713 static void blkcg_bypass_end(void)
1714 __releases(&all_q_mutex)
1715 {
1716 struct request_queue *q;
1717
1718 list_for_each_entry(q, &all_q_list, all_q_node)
1719 blk_queue_bypass_end(q);
1720
1721 mutex_unlock(&all_q_mutex);
1722 }
1723
1724 struct cgroup_subsys blkio_subsys = {
1725 .name = "blkio",
1726 .create = blkiocg_create,
1727 .can_attach = blkiocg_can_attach,
1728 .pre_destroy = blkiocg_pre_destroy,
1729 .destroy = blkiocg_destroy,
1730 .subsys_id = blkio_subsys_id,
1731 .base_cftypes = blkio_files,
1732 .module = THIS_MODULE,
1733 };
1734 EXPORT_SYMBOL_GPL(blkio_subsys);
1735
1736 void blkio_policy_register(struct blkio_policy_type *blkiop)
1737 {
1738 struct request_queue *q;
1739
1740 blkcg_bypass_start();
1741 spin_lock(&blkio_list_lock);
1742
1743 BUG_ON(blkio_policy[blkiop->plid]);
1744 blkio_policy[blkiop->plid] = blkiop;
1745 list_add_tail(&blkiop->list, &blkio_list);
1746
1747 spin_unlock(&blkio_list_lock);
1748 list_for_each_entry(q, &all_q_list, all_q_node)
1749 update_root_blkg_pd(q, blkiop->plid);
1750 blkcg_bypass_end();
1751 }
1752 EXPORT_SYMBOL_GPL(blkio_policy_register);
1753
1754 void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1755 {
1756 struct request_queue *q;
1757
1758 blkcg_bypass_start();
1759 spin_lock(&blkio_list_lock);
1760
1761 BUG_ON(blkio_policy[blkiop->plid] != blkiop);
1762 blkio_policy[blkiop->plid] = NULL;
1763 list_del_init(&blkiop->list);
1764
1765 spin_unlock(&blkio_list_lock);
1766 list_for_each_entry(q, &all_q_list, all_q_node)
1767 update_root_blkg_pd(q, blkiop->plid);
1768 blkcg_bypass_end();
1769 }
1770 EXPORT_SYMBOL_GPL(blkio_policy_unregister);