]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - block/blk-cgroup.c
blkcg: factor out blkio_group creation
[mirror_ubuntu-bionic-kernel.git] / block / blk-cgroup.c
1 /*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include <linux/genhd.h>
21 #include <linux/delay.h>
22 #include "blk-cgroup.h"
23
24 #define MAX_KEY_LEN 100
25
26 static DEFINE_SPINLOCK(blkio_list_lock);
27 static LIST_HEAD(blkio_list);
28
29 struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
30 EXPORT_SYMBOL_GPL(blkio_root_cgroup);
31
32 static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
33
34 static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
35 struct cgroup *);
36 static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
37 struct cgroup_taskset *);
38 static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
39 struct cgroup_taskset *);
40 static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
41 static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
42
43 /* for encoding cft->private value on file */
44 #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
45 /* What policy owns the file, proportional or throttle */
46 #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
47 #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
48
49 struct cgroup_subsys blkio_subsys = {
50 .name = "blkio",
51 .create = blkiocg_create,
52 .can_attach = blkiocg_can_attach,
53 .attach = blkiocg_attach,
54 .destroy = blkiocg_destroy,
55 .populate = blkiocg_populate,
56 .subsys_id = blkio_subsys_id,
57 .use_id = 1,
58 .module = THIS_MODULE,
59 };
60 EXPORT_SYMBOL_GPL(blkio_subsys);
61
62 static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
63 struct blkio_policy_node *pn)
64 {
65 list_add(&pn->node, &blkcg->policy_list);
66 }
67
68 static inline bool cftype_blkg_same_policy(struct cftype *cft,
69 struct blkio_group *blkg)
70 {
71 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
72
73 if (blkg->plid == plid)
74 return 1;
75
76 return 0;
77 }
78
79 /* Determines if policy node matches cgroup file being accessed */
80 static inline bool pn_matches_cftype(struct cftype *cft,
81 struct blkio_policy_node *pn)
82 {
83 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
84 int fileid = BLKIOFILE_ATTR(cft->private);
85
86 return (plid == pn->plid && fileid == pn->fileid);
87 }
88
89 /* Must be called with blkcg->lock held */
90 static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
91 {
92 list_del(&pn->node);
93 }
94
95 /* Must be called with blkcg->lock held */
96 static struct blkio_policy_node *
97 blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
98 enum blkio_policy_id plid, int fileid)
99 {
100 struct blkio_policy_node *pn;
101
102 list_for_each_entry(pn, &blkcg->policy_list, node) {
103 if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
104 return pn;
105 }
106
107 return NULL;
108 }
109
110 struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
111 {
112 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
113 struct blkio_cgroup, css);
114 }
115 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
116
117 struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
118 {
119 return container_of(task_subsys_state(tsk, blkio_subsys_id),
120 struct blkio_cgroup, css);
121 }
122 EXPORT_SYMBOL_GPL(task_blkio_cgroup);
123
124 static inline void
125 blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
126 {
127 struct blkio_policy_type *blkiop;
128
129 list_for_each_entry(blkiop, &blkio_list, list) {
130 /* If this policy does not own the blkg, do not send updates */
131 if (blkiop->plid != blkg->plid)
132 continue;
133 if (blkiop->ops.blkio_update_group_weight_fn)
134 blkiop->ops.blkio_update_group_weight_fn(blkg->q,
135 blkg, weight);
136 }
137 }
138
139 static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
140 int fileid)
141 {
142 struct blkio_policy_type *blkiop;
143
144 list_for_each_entry(blkiop, &blkio_list, list) {
145
146 /* If this policy does not own the blkg, do not send updates */
147 if (blkiop->plid != blkg->plid)
148 continue;
149
150 if (fileid == BLKIO_THROTL_read_bps_device
151 && blkiop->ops.blkio_update_group_read_bps_fn)
152 blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
153 blkg, bps);
154
155 if (fileid == BLKIO_THROTL_write_bps_device
156 && blkiop->ops.blkio_update_group_write_bps_fn)
157 blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
158 blkg, bps);
159 }
160 }
161
162 static inline void blkio_update_group_iops(struct blkio_group *blkg,
163 unsigned int iops, int fileid)
164 {
165 struct blkio_policy_type *blkiop;
166
167 list_for_each_entry(blkiop, &blkio_list, list) {
168
169 /* If this policy does not own the blkg, do not send updates */
170 if (blkiop->plid != blkg->plid)
171 continue;
172
173 if (fileid == BLKIO_THROTL_read_iops_device
174 && blkiop->ops.blkio_update_group_read_iops_fn)
175 blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
176 blkg, iops);
177
178 if (fileid == BLKIO_THROTL_write_iops_device
179 && blkiop->ops.blkio_update_group_write_iops_fn)
180 blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
181 blkg,iops);
182 }
183 }
184
185 /*
186 * Add to the appropriate stat variable depending on the request type.
187 * This should be called with the blkg->stats_lock held.
188 */
189 static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
190 bool sync)
191 {
192 if (direction)
193 stat[BLKIO_STAT_WRITE] += add;
194 else
195 stat[BLKIO_STAT_READ] += add;
196 if (sync)
197 stat[BLKIO_STAT_SYNC] += add;
198 else
199 stat[BLKIO_STAT_ASYNC] += add;
200 }
201
202 /*
203 * Decrements the appropriate stat variable if non-zero depending on the
204 * request type. Panics on value being zero.
205 * This should be called with the blkg->stats_lock held.
206 */
207 static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
208 {
209 if (direction) {
210 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
211 stat[BLKIO_STAT_WRITE]--;
212 } else {
213 BUG_ON(stat[BLKIO_STAT_READ] == 0);
214 stat[BLKIO_STAT_READ]--;
215 }
216 if (sync) {
217 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
218 stat[BLKIO_STAT_SYNC]--;
219 } else {
220 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
221 stat[BLKIO_STAT_ASYNC]--;
222 }
223 }
224
225 #ifdef CONFIG_DEBUG_BLK_CGROUP
226 /* This should be called with the blkg->stats_lock held. */
227 static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
228 struct blkio_group *curr_blkg)
229 {
230 if (blkio_blkg_waiting(&blkg->stats))
231 return;
232 if (blkg == curr_blkg)
233 return;
234 blkg->stats.start_group_wait_time = sched_clock();
235 blkio_mark_blkg_waiting(&blkg->stats);
236 }
237
238 /* This should be called with the blkg->stats_lock held. */
239 static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
240 {
241 unsigned long long now;
242
243 if (!blkio_blkg_waiting(stats))
244 return;
245
246 now = sched_clock();
247 if (time_after64(now, stats->start_group_wait_time))
248 stats->group_wait_time += now - stats->start_group_wait_time;
249 blkio_clear_blkg_waiting(stats);
250 }
251
252 /* This should be called with the blkg->stats_lock held. */
253 static void blkio_end_empty_time(struct blkio_group_stats *stats)
254 {
255 unsigned long long now;
256
257 if (!blkio_blkg_empty(stats))
258 return;
259
260 now = sched_clock();
261 if (time_after64(now, stats->start_empty_time))
262 stats->empty_time += now - stats->start_empty_time;
263 blkio_clear_blkg_empty(stats);
264 }
265
266 void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
267 {
268 unsigned long flags;
269
270 spin_lock_irqsave(&blkg->stats_lock, flags);
271 BUG_ON(blkio_blkg_idling(&blkg->stats));
272 blkg->stats.start_idle_time = sched_clock();
273 blkio_mark_blkg_idling(&blkg->stats);
274 spin_unlock_irqrestore(&blkg->stats_lock, flags);
275 }
276 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
277
278 void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
279 {
280 unsigned long flags;
281 unsigned long long now;
282 struct blkio_group_stats *stats;
283
284 spin_lock_irqsave(&blkg->stats_lock, flags);
285 stats = &blkg->stats;
286 if (blkio_blkg_idling(stats)) {
287 now = sched_clock();
288 if (time_after64(now, stats->start_idle_time))
289 stats->idle_time += now - stats->start_idle_time;
290 blkio_clear_blkg_idling(stats);
291 }
292 spin_unlock_irqrestore(&blkg->stats_lock, flags);
293 }
294 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
295
296 void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
297 {
298 unsigned long flags;
299 struct blkio_group_stats *stats;
300
301 spin_lock_irqsave(&blkg->stats_lock, flags);
302 stats = &blkg->stats;
303 stats->avg_queue_size_sum +=
304 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
305 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
306 stats->avg_queue_size_samples++;
307 blkio_update_group_wait_time(stats);
308 spin_unlock_irqrestore(&blkg->stats_lock, flags);
309 }
310 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
311
312 void blkiocg_set_start_empty_time(struct blkio_group *blkg)
313 {
314 unsigned long flags;
315 struct blkio_group_stats *stats;
316
317 spin_lock_irqsave(&blkg->stats_lock, flags);
318 stats = &blkg->stats;
319
320 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
321 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
322 spin_unlock_irqrestore(&blkg->stats_lock, flags);
323 return;
324 }
325
326 /*
327 * group is already marked empty. This can happen if cfqq got new
328 * request in parent group and moved to this group while being added
329 * to service tree. Just ignore the event and move on.
330 */
331 if(blkio_blkg_empty(stats)) {
332 spin_unlock_irqrestore(&blkg->stats_lock, flags);
333 return;
334 }
335
336 stats->start_empty_time = sched_clock();
337 blkio_mark_blkg_empty(stats);
338 spin_unlock_irqrestore(&blkg->stats_lock, flags);
339 }
340 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
341
342 void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
343 unsigned long dequeue)
344 {
345 blkg->stats.dequeue += dequeue;
346 }
347 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
348 #else
349 static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
350 struct blkio_group *curr_blkg) {}
351 static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
352 #endif
353
354 void blkiocg_update_io_add_stats(struct blkio_group *blkg,
355 struct blkio_group *curr_blkg, bool direction,
356 bool sync)
357 {
358 unsigned long flags;
359
360 spin_lock_irqsave(&blkg->stats_lock, flags);
361 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
362 sync);
363 blkio_end_empty_time(&blkg->stats);
364 blkio_set_start_group_wait_time(blkg, curr_blkg);
365 spin_unlock_irqrestore(&blkg->stats_lock, flags);
366 }
367 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
368
369 void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
370 bool direction, bool sync)
371 {
372 unsigned long flags;
373
374 spin_lock_irqsave(&blkg->stats_lock, flags);
375 blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
376 direction, sync);
377 spin_unlock_irqrestore(&blkg->stats_lock, flags);
378 }
379 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
380
381 void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
382 unsigned long unaccounted_time)
383 {
384 unsigned long flags;
385
386 spin_lock_irqsave(&blkg->stats_lock, flags);
387 blkg->stats.time += time;
388 #ifdef CONFIG_DEBUG_BLK_CGROUP
389 blkg->stats.unaccounted_time += unaccounted_time;
390 #endif
391 spin_unlock_irqrestore(&blkg->stats_lock, flags);
392 }
393 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
394
395 /*
396 * should be called under rcu read lock or queue lock to make sure blkg pointer
397 * is valid.
398 */
399 void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
400 uint64_t bytes, bool direction, bool sync)
401 {
402 struct blkio_group_stats_cpu *stats_cpu;
403 unsigned long flags;
404
405 /*
406 * Disabling interrupts to provide mutual exclusion between two
407 * writes on same cpu. It probably is not needed for 64bit. Not
408 * optimizing that case yet.
409 */
410 local_irq_save(flags);
411
412 stats_cpu = this_cpu_ptr(blkg->stats_cpu);
413
414 u64_stats_update_begin(&stats_cpu->syncp);
415 stats_cpu->sectors += bytes >> 9;
416 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
417 1, direction, sync);
418 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
419 bytes, direction, sync);
420 u64_stats_update_end(&stats_cpu->syncp);
421 local_irq_restore(flags);
422 }
423 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
424
425 void blkiocg_update_completion_stats(struct blkio_group *blkg,
426 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
427 {
428 struct blkio_group_stats *stats;
429 unsigned long flags;
430 unsigned long long now = sched_clock();
431
432 spin_lock_irqsave(&blkg->stats_lock, flags);
433 stats = &blkg->stats;
434 if (time_after64(now, io_start_time))
435 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
436 now - io_start_time, direction, sync);
437 if (time_after64(io_start_time, start_time))
438 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
439 io_start_time - start_time, direction, sync);
440 spin_unlock_irqrestore(&blkg->stats_lock, flags);
441 }
442 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
443
444 /* Merged stats are per cpu. */
445 void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
446 bool sync)
447 {
448 struct blkio_group_stats_cpu *stats_cpu;
449 unsigned long flags;
450
451 /*
452 * Disabling interrupts to provide mutual exclusion between two
453 * writes on same cpu. It probably is not needed for 64bit. Not
454 * optimizing that case yet.
455 */
456 local_irq_save(flags);
457
458 stats_cpu = this_cpu_ptr(blkg->stats_cpu);
459
460 u64_stats_update_begin(&stats_cpu->syncp);
461 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
462 direction, sync);
463 u64_stats_update_end(&stats_cpu->syncp);
464 local_irq_restore(flags);
465 }
466 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
467
468 struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
469 struct request_queue *q,
470 enum blkio_policy_id plid,
471 bool for_root)
472 __releases(q->queue_lock) __acquires(q->queue_lock)
473 {
474 struct blkio_policy_type *pol = blkio_policy[plid];
475 struct blkio_group *blkg, *new_blkg;
476
477 WARN_ON_ONCE(!rcu_read_lock_held());
478 lockdep_assert_held(q->queue_lock);
479
480 /*
481 * This could be the first entry point of blkcg implementation and
482 * we shouldn't allow anything to go through for a bypassing queue.
483 * The following can be removed if blkg lookup is guaranteed to
484 * fail on a bypassing queue.
485 */
486 if (unlikely(blk_queue_bypass(q)) && !for_root)
487 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
488
489 blkg = blkg_lookup(blkcg, q, plid);
490 if (blkg)
491 return blkg;
492
493 if (!css_tryget(&blkcg->css))
494 return ERR_PTR(-EINVAL);
495
496 /*
497 * Allocate and initialize.
498 *
499 * FIXME: The following is broken. Percpu memory allocation
500 * requires %GFP_KERNEL context and can't be performed from IO
501 * path. Allocation here should inherently be atomic and the
502 * following lock dancing can be removed once the broken percpu
503 * allocation is fixed.
504 */
505 spin_unlock_irq(q->queue_lock);
506 rcu_read_unlock();
507
508 new_blkg = pol->ops.blkio_alloc_group_fn(q, blkcg);
509 if (new_blkg) {
510 new_blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
511
512 spin_lock_init(&new_blkg->stats_lock);
513 rcu_assign_pointer(new_blkg->q, q);
514 new_blkg->blkcg_id = css_id(&blkcg->css);
515 new_blkg->plid = plid;
516 cgroup_path(blkcg->css.cgroup, new_blkg->path,
517 sizeof(new_blkg->path));
518 }
519
520 rcu_read_lock();
521 spin_lock_irq(q->queue_lock);
522 css_put(&blkcg->css);
523
524 /* did bypass get turned on inbetween? */
525 if (unlikely(blk_queue_bypass(q)) && !for_root) {
526 blkg = ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
527 goto out;
528 }
529
530 /* did someone beat us to it? */
531 blkg = blkg_lookup(blkcg, q, plid);
532 if (unlikely(blkg))
533 goto out;
534
535 /* did alloc fail? */
536 if (unlikely(!new_blkg || !new_blkg->stats_cpu)) {
537 blkg = ERR_PTR(-ENOMEM);
538 goto out;
539 }
540
541 /* insert */
542 spin_lock(&blkcg->lock);
543 swap(blkg, new_blkg);
544 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
545 pol->ops.blkio_link_group_fn(q, blkg);
546 spin_unlock(&blkcg->lock);
547 out:
548 if (new_blkg) {
549 free_percpu(new_blkg->stats_cpu);
550 kfree(new_blkg);
551 }
552 return blkg;
553 }
554 EXPORT_SYMBOL_GPL(blkg_lookup_create);
555
556 static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
557 {
558 hlist_del_init_rcu(&blkg->blkcg_node);
559 blkg->blkcg_id = 0;
560 }
561
562 /*
563 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
564 * indicating that blk_group was unhashed by the time we got to it.
565 */
566 int blkiocg_del_blkio_group(struct blkio_group *blkg)
567 {
568 struct blkio_cgroup *blkcg;
569 unsigned long flags;
570 struct cgroup_subsys_state *css;
571 int ret = 1;
572
573 rcu_read_lock();
574 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
575 if (css) {
576 blkcg = container_of(css, struct blkio_cgroup, css);
577 spin_lock_irqsave(&blkcg->lock, flags);
578 if (!hlist_unhashed(&blkg->blkcg_node)) {
579 __blkiocg_del_blkio_group(blkg);
580 ret = 0;
581 }
582 spin_unlock_irqrestore(&blkcg->lock, flags);
583 }
584
585 rcu_read_unlock();
586 return ret;
587 }
588 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
589
590 /* called under rcu_read_lock(). */
591 struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
592 struct request_queue *q,
593 enum blkio_policy_id plid)
594 {
595 struct blkio_group *blkg;
596 struct hlist_node *n;
597
598 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
599 if (blkg->q == q && blkg->plid == plid)
600 return blkg;
601 return NULL;
602 }
603 EXPORT_SYMBOL_GPL(blkg_lookup);
604
605 void blkg_destroy_all(struct request_queue *q)
606 {
607 struct blkio_policy_type *pol;
608
609 while (true) {
610 bool done = true;
611
612 spin_lock(&blkio_list_lock);
613 spin_lock_irq(q->queue_lock);
614
615 /*
616 * clear_queue_fn() might return with non-empty group list
617 * if it raced cgroup removal and lost. cgroup removal is
618 * guaranteed to make forward progress and retrying after a
619 * while is enough. This ugliness is scheduled to be
620 * removed after locking update.
621 */
622 list_for_each_entry(pol, &blkio_list, list)
623 if (!pol->ops.blkio_clear_queue_fn(q))
624 done = false;
625
626 spin_unlock_irq(q->queue_lock);
627 spin_unlock(&blkio_list_lock);
628
629 if (done)
630 break;
631
632 msleep(10); /* just some random duration I like */
633 }
634 }
635
636 static void blkio_reset_stats_cpu(struct blkio_group *blkg)
637 {
638 struct blkio_group_stats_cpu *stats_cpu;
639 int i, j, k;
640 /*
641 * Note: On 64 bit arch this should not be an issue. This has the
642 * possibility of returning some inconsistent value on 32bit arch
643 * as 64bit update on 32bit is non atomic. Taking care of this
644 * corner case makes code very complicated, like sending IPIs to
645 * cpus, taking care of stats of offline cpus etc.
646 *
647 * reset stats is anyway more of a debug feature and this sounds a
648 * corner case. So I am not complicating the code yet until and
649 * unless this becomes a real issue.
650 */
651 for_each_possible_cpu(i) {
652 stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
653 stats_cpu->sectors = 0;
654 for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
655 for (k = 0; k < BLKIO_STAT_TOTAL; k++)
656 stats_cpu->stat_arr_cpu[j][k] = 0;
657 }
658 }
659
660 static int
661 blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
662 {
663 struct blkio_cgroup *blkcg;
664 struct blkio_group *blkg;
665 struct blkio_group_stats *stats;
666 struct hlist_node *n;
667 uint64_t queued[BLKIO_STAT_TOTAL];
668 int i;
669 #ifdef CONFIG_DEBUG_BLK_CGROUP
670 bool idling, waiting, empty;
671 unsigned long long now = sched_clock();
672 #endif
673
674 blkcg = cgroup_to_blkio_cgroup(cgroup);
675 spin_lock_irq(&blkcg->lock);
676 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
677 spin_lock(&blkg->stats_lock);
678 stats = &blkg->stats;
679 #ifdef CONFIG_DEBUG_BLK_CGROUP
680 idling = blkio_blkg_idling(stats);
681 waiting = blkio_blkg_waiting(stats);
682 empty = blkio_blkg_empty(stats);
683 #endif
684 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
685 queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
686 memset(stats, 0, sizeof(struct blkio_group_stats));
687 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
688 stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
689 #ifdef CONFIG_DEBUG_BLK_CGROUP
690 if (idling) {
691 blkio_mark_blkg_idling(stats);
692 stats->start_idle_time = now;
693 }
694 if (waiting) {
695 blkio_mark_blkg_waiting(stats);
696 stats->start_group_wait_time = now;
697 }
698 if (empty) {
699 blkio_mark_blkg_empty(stats);
700 stats->start_empty_time = now;
701 }
702 #endif
703 spin_unlock(&blkg->stats_lock);
704
705 /* Reset Per cpu stats which don't take blkg->stats_lock */
706 blkio_reset_stats_cpu(blkg);
707 }
708
709 spin_unlock_irq(&blkcg->lock);
710 return 0;
711 }
712
713 static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
714 int chars_left, bool diskname_only)
715 {
716 snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
717 chars_left -= strlen(str);
718 if (chars_left <= 0) {
719 printk(KERN_WARNING
720 "Possibly incorrect cgroup stat display format");
721 return;
722 }
723 if (diskname_only)
724 return;
725 switch (type) {
726 case BLKIO_STAT_READ:
727 strlcat(str, " Read", chars_left);
728 break;
729 case BLKIO_STAT_WRITE:
730 strlcat(str, " Write", chars_left);
731 break;
732 case BLKIO_STAT_SYNC:
733 strlcat(str, " Sync", chars_left);
734 break;
735 case BLKIO_STAT_ASYNC:
736 strlcat(str, " Async", chars_left);
737 break;
738 case BLKIO_STAT_TOTAL:
739 strlcat(str, " Total", chars_left);
740 break;
741 default:
742 strlcat(str, " Invalid", chars_left);
743 }
744 }
745
746 static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
747 struct cgroup_map_cb *cb, dev_t dev)
748 {
749 blkio_get_key_name(0, dev, str, chars_left, true);
750 cb->fill(cb, str, val);
751 return val;
752 }
753
754
755 static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
756 enum stat_type_cpu type, enum stat_sub_type sub_type)
757 {
758 int cpu;
759 struct blkio_group_stats_cpu *stats_cpu;
760 u64 val = 0, tval;
761
762 for_each_possible_cpu(cpu) {
763 unsigned int start;
764 stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
765
766 do {
767 start = u64_stats_fetch_begin(&stats_cpu->syncp);
768 if (type == BLKIO_STAT_CPU_SECTORS)
769 tval = stats_cpu->sectors;
770 else
771 tval = stats_cpu->stat_arr_cpu[type][sub_type];
772 } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
773
774 val += tval;
775 }
776
777 return val;
778 }
779
780 static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
781 struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type)
782 {
783 uint64_t disk_total, val;
784 char key_str[MAX_KEY_LEN];
785 enum stat_sub_type sub_type;
786
787 if (type == BLKIO_STAT_CPU_SECTORS) {
788 val = blkio_read_stat_cpu(blkg, type, 0);
789 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev);
790 }
791
792 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
793 sub_type++) {
794 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
795 val = blkio_read_stat_cpu(blkg, type, sub_type);
796 cb->fill(cb, key_str, val);
797 }
798
799 disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
800 blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
801
802 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
803 cb->fill(cb, key_str, disk_total);
804 return disk_total;
805 }
806
807 /* This should be called with blkg->stats_lock held */
808 static uint64_t blkio_get_stat(struct blkio_group *blkg,
809 struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
810 {
811 uint64_t disk_total;
812 char key_str[MAX_KEY_LEN];
813 enum stat_sub_type sub_type;
814
815 if (type == BLKIO_STAT_TIME)
816 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
817 blkg->stats.time, cb, dev);
818 #ifdef CONFIG_DEBUG_BLK_CGROUP
819 if (type == BLKIO_STAT_UNACCOUNTED_TIME)
820 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
821 blkg->stats.unaccounted_time, cb, dev);
822 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
823 uint64_t sum = blkg->stats.avg_queue_size_sum;
824 uint64_t samples = blkg->stats.avg_queue_size_samples;
825 if (samples)
826 do_div(sum, samples);
827 else
828 sum = 0;
829 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
830 }
831 if (type == BLKIO_STAT_GROUP_WAIT_TIME)
832 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
833 blkg->stats.group_wait_time, cb, dev);
834 if (type == BLKIO_STAT_IDLE_TIME)
835 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
836 blkg->stats.idle_time, cb, dev);
837 if (type == BLKIO_STAT_EMPTY_TIME)
838 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
839 blkg->stats.empty_time, cb, dev);
840 if (type == BLKIO_STAT_DEQUEUE)
841 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
842 blkg->stats.dequeue, cb, dev);
843 #endif
844
845 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
846 sub_type++) {
847 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
848 cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
849 }
850 disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
851 blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
852 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
853 cb->fill(cb, key_str, disk_total);
854 return disk_total;
855 }
856
857 static int blkio_policy_parse_and_set(char *buf,
858 struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
859 {
860 struct gendisk *disk = NULL;
861 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
862 unsigned long major, minor;
863 int i = 0, ret = -EINVAL;
864 int part;
865 dev_t dev;
866 u64 temp;
867
868 memset(s, 0, sizeof(s));
869
870 while ((p = strsep(&buf, " ")) != NULL) {
871 if (!*p)
872 continue;
873
874 s[i++] = p;
875
876 /* Prevent from inputing too many things */
877 if (i == 3)
878 break;
879 }
880
881 if (i != 2)
882 goto out;
883
884 p = strsep(&s[0], ":");
885 if (p != NULL)
886 major_s = p;
887 else
888 goto out;
889
890 minor_s = s[0];
891 if (!minor_s)
892 goto out;
893
894 if (strict_strtoul(major_s, 10, &major))
895 goto out;
896
897 if (strict_strtoul(minor_s, 10, &minor))
898 goto out;
899
900 dev = MKDEV(major, minor);
901
902 if (strict_strtoull(s[1], 10, &temp))
903 goto out;
904
905 /* For rule removal, do not check for device presence. */
906 if (temp) {
907 disk = get_gendisk(dev, &part);
908 if (!disk || part) {
909 ret = -ENODEV;
910 goto out;
911 }
912 }
913
914 newpn->dev = dev;
915
916 switch (plid) {
917 case BLKIO_POLICY_PROP:
918 if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
919 temp > BLKIO_WEIGHT_MAX)
920 goto out;
921
922 newpn->plid = plid;
923 newpn->fileid = fileid;
924 newpn->val.weight = temp;
925 break;
926 case BLKIO_POLICY_THROTL:
927 switch(fileid) {
928 case BLKIO_THROTL_read_bps_device:
929 case BLKIO_THROTL_write_bps_device:
930 newpn->plid = plid;
931 newpn->fileid = fileid;
932 newpn->val.bps = temp;
933 break;
934 case BLKIO_THROTL_read_iops_device:
935 case BLKIO_THROTL_write_iops_device:
936 if (temp > THROTL_IOPS_MAX)
937 goto out;
938
939 newpn->plid = plid;
940 newpn->fileid = fileid;
941 newpn->val.iops = (unsigned int)temp;
942 break;
943 }
944 break;
945 default:
946 BUG();
947 }
948 ret = 0;
949 out:
950 put_disk(disk);
951 return ret;
952 }
953
954 unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
955 dev_t dev)
956 {
957 struct blkio_policy_node *pn;
958 unsigned long flags;
959 unsigned int weight;
960
961 spin_lock_irqsave(&blkcg->lock, flags);
962
963 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
964 BLKIO_PROP_weight_device);
965 if (pn)
966 weight = pn->val.weight;
967 else
968 weight = blkcg->weight;
969
970 spin_unlock_irqrestore(&blkcg->lock, flags);
971
972 return weight;
973 }
974 EXPORT_SYMBOL_GPL(blkcg_get_weight);
975
976 uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
977 {
978 struct blkio_policy_node *pn;
979 unsigned long flags;
980 uint64_t bps = -1;
981
982 spin_lock_irqsave(&blkcg->lock, flags);
983 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
984 BLKIO_THROTL_read_bps_device);
985 if (pn)
986 bps = pn->val.bps;
987 spin_unlock_irqrestore(&blkcg->lock, flags);
988
989 return bps;
990 }
991
992 uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
993 {
994 struct blkio_policy_node *pn;
995 unsigned long flags;
996 uint64_t bps = -1;
997
998 spin_lock_irqsave(&blkcg->lock, flags);
999 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
1000 BLKIO_THROTL_write_bps_device);
1001 if (pn)
1002 bps = pn->val.bps;
1003 spin_unlock_irqrestore(&blkcg->lock, flags);
1004
1005 return bps;
1006 }
1007
1008 unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
1009 {
1010 struct blkio_policy_node *pn;
1011 unsigned long flags;
1012 unsigned int iops = -1;
1013
1014 spin_lock_irqsave(&blkcg->lock, flags);
1015 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
1016 BLKIO_THROTL_read_iops_device);
1017 if (pn)
1018 iops = pn->val.iops;
1019 spin_unlock_irqrestore(&blkcg->lock, flags);
1020
1021 return iops;
1022 }
1023
1024 unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
1025 {
1026 struct blkio_policy_node *pn;
1027 unsigned long flags;
1028 unsigned int iops = -1;
1029
1030 spin_lock_irqsave(&blkcg->lock, flags);
1031 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
1032 BLKIO_THROTL_write_iops_device);
1033 if (pn)
1034 iops = pn->val.iops;
1035 spin_unlock_irqrestore(&blkcg->lock, flags);
1036
1037 return iops;
1038 }
1039
1040 /* Checks whether user asked for deleting a policy rule */
1041 static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
1042 {
1043 switch(pn->plid) {
1044 case BLKIO_POLICY_PROP:
1045 if (pn->val.weight == 0)
1046 return 1;
1047 break;
1048 case BLKIO_POLICY_THROTL:
1049 switch(pn->fileid) {
1050 case BLKIO_THROTL_read_bps_device:
1051 case BLKIO_THROTL_write_bps_device:
1052 if (pn->val.bps == 0)
1053 return 1;
1054 break;
1055 case BLKIO_THROTL_read_iops_device:
1056 case BLKIO_THROTL_write_iops_device:
1057 if (pn->val.iops == 0)
1058 return 1;
1059 }
1060 break;
1061 default:
1062 BUG();
1063 }
1064
1065 return 0;
1066 }
1067
1068 static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
1069 struct blkio_policy_node *newpn)
1070 {
1071 switch(oldpn->plid) {
1072 case BLKIO_POLICY_PROP:
1073 oldpn->val.weight = newpn->val.weight;
1074 break;
1075 case BLKIO_POLICY_THROTL:
1076 switch(newpn->fileid) {
1077 case BLKIO_THROTL_read_bps_device:
1078 case BLKIO_THROTL_write_bps_device:
1079 oldpn->val.bps = newpn->val.bps;
1080 break;
1081 case BLKIO_THROTL_read_iops_device:
1082 case BLKIO_THROTL_write_iops_device:
1083 oldpn->val.iops = newpn->val.iops;
1084 }
1085 break;
1086 default:
1087 BUG();
1088 }
1089 }
1090
1091 /*
1092 * Some rules/values in blkg have changed. Propagate those to respective
1093 * policies.
1094 */
1095 static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
1096 struct blkio_group *blkg, struct blkio_policy_node *pn)
1097 {
1098 unsigned int weight, iops;
1099 u64 bps;
1100
1101 switch(pn->plid) {
1102 case BLKIO_POLICY_PROP:
1103 weight = pn->val.weight ? pn->val.weight :
1104 blkcg->weight;
1105 blkio_update_group_weight(blkg, weight);
1106 break;
1107 case BLKIO_POLICY_THROTL:
1108 switch(pn->fileid) {
1109 case BLKIO_THROTL_read_bps_device:
1110 case BLKIO_THROTL_write_bps_device:
1111 bps = pn->val.bps ? pn->val.bps : (-1);
1112 blkio_update_group_bps(blkg, bps, pn->fileid);
1113 break;
1114 case BLKIO_THROTL_read_iops_device:
1115 case BLKIO_THROTL_write_iops_device:
1116 iops = pn->val.iops ? pn->val.iops : (-1);
1117 blkio_update_group_iops(blkg, iops, pn->fileid);
1118 break;
1119 }
1120 break;
1121 default:
1122 BUG();
1123 }
1124 }
1125
1126 /*
1127 * A policy node rule has been updated. Propagate this update to all the
1128 * block groups which might be affected by this update.
1129 */
1130 static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
1131 struct blkio_policy_node *pn)
1132 {
1133 struct blkio_group *blkg;
1134 struct hlist_node *n;
1135
1136 spin_lock(&blkio_list_lock);
1137 spin_lock_irq(&blkcg->lock);
1138
1139 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1140 if (pn->dev != blkg->dev || pn->plid != blkg->plid)
1141 continue;
1142 blkio_update_blkg_policy(blkcg, blkg, pn);
1143 }
1144
1145 spin_unlock_irq(&blkcg->lock);
1146 spin_unlock(&blkio_list_lock);
1147 }
1148
1149 static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
1150 const char *buffer)
1151 {
1152 int ret = 0;
1153 char *buf;
1154 struct blkio_policy_node *newpn, *pn;
1155 struct blkio_cgroup *blkcg;
1156 int keep_newpn = 0;
1157 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1158 int fileid = BLKIOFILE_ATTR(cft->private);
1159
1160 buf = kstrdup(buffer, GFP_KERNEL);
1161 if (!buf)
1162 return -ENOMEM;
1163
1164 newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
1165 if (!newpn) {
1166 ret = -ENOMEM;
1167 goto free_buf;
1168 }
1169
1170 ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
1171 if (ret)
1172 goto free_newpn;
1173
1174 blkcg = cgroup_to_blkio_cgroup(cgrp);
1175
1176 spin_lock_irq(&blkcg->lock);
1177
1178 pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
1179 if (!pn) {
1180 if (!blkio_delete_rule_command(newpn)) {
1181 blkio_policy_insert_node(blkcg, newpn);
1182 keep_newpn = 1;
1183 }
1184 spin_unlock_irq(&blkcg->lock);
1185 goto update_io_group;
1186 }
1187
1188 if (blkio_delete_rule_command(newpn)) {
1189 blkio_policy_delete_node(pn);
1190 kfree(pn);
1191 spin_unlock_irq(&blkcg->lock);
1192 goto update_io_group;
1193 }
1194 spin_unlock_irq(&blkcg->lock);
1195
1196 blkio_update_policy_rule(pn, newpn);
1197
1198 update_io_group:
1199 blkio_update_policy_node_blkg(blkcg, newpn);
1200
1201 free_newpn:
1202 if (!keep_newpn)
1203 kfree(newpn);
1204 free_buf:
1205 kfree(buf);
1206 return ret;
1207 }
1208
1209 static void
1210 blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
1211 {
1212 switch(pn->plid) {
1213 case BLKIO_POLICY_PROP:
1214 if (pn->fileid == BLKIO_PROP_weight_device)
1215 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
1216 MINOR(pn->dev), pn->val.weight);
1217 break;
1218 case BLKIO_POLICY_THROTL:
1219 switch(pn->fileid) {
1220 case BLKIO_THROTL_read_bps_device:
1221 case BLKIO_THROTL_write_bps_device:
1222 seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
1223 MINOR(pn->dev), pn->val.bps);
1224 break;
1225 case BLKIO_THROTL_read_iops_device:
1226 case BLKIO_THROTL_write_iops_device:
1227 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
1228 MINOR(pn->dev), pn->val.iops);
1229 break;
1230 }
1231 break;
1232 default:
1233 BUG();
1234 }
1235 }
1236
1237 /* cgroup files which read their data from policy nodes end up here */
1238 static void blkio_read_policy_node_files(struct cftype *cft,
1239 struct blkio_cgroup *blkcg, struct seq_file *m)
1240 {
1241 struct blkio_policy_node *pn;
1242
1243 if (!list_empty(&blkcg->policy_list)) {
1244 spin_lock_irq(&blkcg->lock);
1245 list_for_each_entry(pn, &blkcg->policy_list, node) {
1246 if (!pn_matches_cftype(cft, pn))
1247 continue;
1248 blkio_print_policy_node(m, pn);
1249 }
1250 spin_unlock_irq(&blkcg->lock);
1251 }
1252 }
1253
1254 static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
1255 struct seq_file *m)
1256 {
1257 struct blkio_cgroup *blkcg;
1258 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1259 int name = BLKIOFILE_ATTR(cft->private);
1260
1261 blkcg = cgroup_to_blkio_cgroup(cgrp);
1262
1263 switch(plid) {
1264 case BLKIO_POLICY_PROP:
1265 switch(name) {
1266 case BLKIO_PROP_weight_device:
1267 blkio_read_policy_node_files(cft, blkcg, m);
1268 return 0;
1269 default:
1270 BUG();
1271 }
1272 break;
1273 case BLKIO_POLICY_THROTL:
1274 switch(name){
1275 case BLKIO_THROTL_read_bps_device:
1276 case BLKIO_THROTL_write_bps_device:
1277 case BLKIO_THROTL_read_iops_device:
1278 case BLKIO_THROTL_write_iops_device:
1279 blkio_read_policy_node_files(cft, blkcg, m);
1280 return 0;
1281 default:
1282 BUG();
1283 }
1284 break;
1285 default:
1286 BUG();
1287 }
1288
1289 return 0;
1290 }
1291
1292 static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
1293 struct cftype *cft, struct cgroup_map_cb *cb,
1294 enum stat_type type, bool show_total, bool pcpu)
1295 {
1296 struct blkio_group *blkg;
1297 struct hlist_node *n;
1298 uint64_t cgroup_total = 0;
1299
1300 rcu_read_lock();
1301 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
1302 if (blkg->dev) {
1303 if (!cftype_blkg_same_policy(cft, blkg))
1304 continue;
1305 if (pcpu)
1306 cgroup_total += blkio_get_stat_cpu(blkg, cb,
1307 blkg->dev, type);
1308 else {
1309 spin_lock_irq(&blkg->stats_lock);
1310 cgroup_total += blkio_get_stat(blkg, cb,
1311 blkg->dev, type);
1312 spin_unlock_irq(&blkg->stats_lock);
1313 }
1314 }
1315 }
1316 if (show_total)
1317 cb->fill(cb, "Total", cgroup_total);
1318 rcu_read_unlock();
1319 return 0;
1320 }
1321
1322 /* All map kind of cgroup file get serviced by this function */
1323 static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
1324 struct cgroup_map_cb *cb)
1325 {
1326 struct blkio_cgroup *blkcg;
1327 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1328 int name = BLKIOFILE_ATTR(cft->private);
1329
1330 blkcg = cgroup_to_blkio_cgroup(cgrp);
1331
1332 switch(plid) {
1333 case BLKIO_POLICY_PROP:
1334 switch(name) {
1335 case BLKIO_PROP_time:
1336 return blkio_read_blkg_stats(blkcg, cft, cb,
1337 BLKIO_STAT_TIME, 0, 0);
1338 case BLKIO_PROP_sectors:
1339 return blkio_read_blkg_stats(blkcg, cft, cb,
1340 BLKIO_STAT_CPU_SECTORS, 0, 1);
1341 case BLKIO_PROP_io_service_bytes:
1342 return blkio_read_blkg_stats(blkcg, cft, cb,
1343 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1344 case BLKIO_PROP_io_serviced:
1345 return blkio_read_blkg_stats(blkcg, cft, cb,
1346 BLKIO_STAT_CPU_SERVICED, 1, 1);
1347 case BLKIO_PROP_io_service_time:
1348 return blkio_read_blkg_stats(blkcg, cft, cb,
1349 BLKIO_STAT_SERVICE_TIME, 1, 0);
1350 case BLKIO_PROP_io_wait_time:
1351 return blkio_read_blkg_stats(blkcg, cft, cb,
1352 BLKIO_STAT_WAIT_TIME, 1, 0);
1353 case BLKIO_PROP_io_merged:
1354 return blkio_read_blkg_stats(blkcg, cft, cb,
1355 BLKIO_STAT_CPU_MERGED, 1, 1);
1356 case BLKIO_PROP_io_queued:
1357 return blkio_read_blkg_stats(blkcg, cft, cb,
1358 BLKIO_STAT_QUEUED, 1, 0);
1359 #ifdef CONFIG_DEBUG_BLK_CGROUP
1360 case BLKIO_PROP_unaccounted_time:
1361 return blkio_read_blkg_stats(blkcg, cft, cb,
1362 BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
1363 case BLKIO_PROP_dequeue:
1364 return blkio_read_blkg_stats(blkcg, cft, cb,
1365 BLKIO_STAT_DEQUEUE, 0, 0);
1366 case BLKIO_PROP_avg_queue_size:
1367 return blkio_read_blkg_stats(blkcg, cft, cb,
1368 BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
1369 case BLKIO_PROP_group_wait_time:
1370 return blkio_read_blkg_stats(blkcg, cft, cb,
1371 BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
1372 case BLKIO_PROP_idle_time:
1373 return blkio_read_blkg_stats(blkcg, cft, cb,
1374 BLKIO_STAT_IDLE_TIME, 0, 0);
1375 case BLKIO_PROP_empty_time:
1376 return blkio_read_blkg_stats(blkcg, cft, cb,
1377 BLKIO_STAT_EMPTY_TIME, 0, 0);
1378 #endif
1379 default:
1380 BUG();
1381 }
1382 break;
1383 case BLKIO_POLICY_THROTL:
1384 switch(name){
1385 case BLKIO_THROTL_io_service_bytes:
1386 return blkio_read_blkg_stats(blkcg, cft, cb,
1387 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1388 case BLKIO_THROTL_io_serviced:
1389 return blkio_read_blkg_stats(blkcg, cft, cb,
1390 BLKIO_STAT_CPU_SERVICED, 1, 1);
1391 default:
1392 BUG();
1393 }
1394 break;
1395 default:
1396 BUG();
1397 }
1398
1399 return 0;
1400 }
1401
1402 static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
1403 {
1404 struct blkio_group *blkg;
1405 struct hlist_node *n;
1406 struct blkio_policy_node *pn;
1407
1408 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1409 return -EINVAL;
1410
1411 spin_lock(&blkio_list_lock);
1412 spin_lock_irq(&blkcg->lock);
1413 blkcg->weight = (unsigned int)val;
1414
1415 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1416 pn = blkio_policy_search_node(blkcg, blkg->dev,
1417 BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
1418 if (pn)
1419 continue;
1420
1421 blkio_update_group_weight(blkg, blkcg->weight);
1422 }
1423 spin_unlock_irq(&blkcg->lock);
1424 spin_unlock(&blkio_list_lock);
1425 return 0;
1426 }
1427
1428 static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
1429 struct blkio_cgroup *blkcg;
1430 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1431 int name = BLKIOFILE_ATTR(cft->private);
1432
1433 blkcg = cgroup_to_blkio_cgroup(cgrp);
1434
1435 switch(plid) {
1436 case BLKIO_POLICY_PROP:
1437 switch(name) {
1438 case BLKIO_PROP_weight:
1439 return (u64)blkcg->weight;
1440 }
1441 break;
1442 default:
1443 BUG();
1444 }
1445 return 0;
1446 }
1447
1448 static int
1449 blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1450 {
1451 struct blkio_cgroup *blkcg;
1452 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1453 int name = BLKIOFILE_ATTR(cft->private);
1454
1455 blkcg = cgroup_to_blkio_cgroup(cgrp);
1456
1457 switch(plid) {
1458 case BLKIO_POLICY_PROP:
1459 switch(name) {
1460 case BLKIO_PROP_weight:
1461 return blkio_weight_write(blkcg, val);
1462 }
1463 break;
1464 default:
1465 BUG();
1466 }
1467
1468 return 0;
1469 }
1470
1471 struct cftype blkio_files[] = {
1472 {
1473 .name = "weight_device",
1474 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1475 BLKIO_PROP_weight_device),
1476 .read_seq_string = blkiocg_file_read,
1477 .write_string = blkiocg_file_write,
1478 .max_write_len = 256,
1479 },
1480 {
1481 .name = "weight",
1482 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1483 BLKIO_PROP_weight),
1484 .read_u64 = blkiocg_file_read_u64,
1485 .write_u64 = blkiocg_file_write_u64,
1486 },
1487 {
1488 .name = "time",
1489 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1490 BLKIO_PROP_time),
1491 .read_map = blkiocg_file_read_map,
1492 },
1493 {
1494 .name = "sectors",
1495 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1496 BLKIO_PROP_sectors),
1497 .read_map = blkiocg_file_read_map,
1498 },
1499 {
1500 .name = "io_service_bytes",
1501 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1502 BLKIO_PROP_io_service_bytes),
1503 .read_map = blkiocg_file_read_map,
1504 },
1505 {
1506 .name = "io_serviced",
1507 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1508 BLKIO_PROP_io_serviced),
1509 .read_map = blkiocg_file_read_map,
1510 },
1511 {
1512 .name = "io_service_time",
1513 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1514 BLKIO_PROP_io_service_time),
1515 .read_map = blkiocg_file_read_map,
1516 },
1517 {
1518 .name = "io_wait_time",
1519 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1520 BLKIO_PROP_io_wait_time),
1521 .read_map = blkiocg_file_read_map,
1522 },
1523 {
1524 .name = "io_merged",
1525 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1526 BLKIO_PROP_io_merged),
1527 .read_map = blkiocg_file_read_map,
1528 },
1529 {
1530 .name = "io_queued",
1531 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1532 BLKIO_PROP_io_queued),
1533 .read_map = blkiocg_file_read_map,
1534 },
1535 {
1536 .name = "reset_stats",
1537 .write_u64 = blkiocg_reset_stats,
1538 },
1539 #ifdef CONFIG_BLK_DEV_THROTTLING
1540 {
1541 .name = "throttle.read_bps_device",
1542 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1543 BLKIO_THROTL_read_bps_device),
1544 .read_seq_string = blkiocg_file_read,
1545 .write_string = blkiocg_file_write,
1546 .max_write_len = 256,
1547 },
1548
1549 {
1550 .name = "throttle.write_bps_device",
1551 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1552 BLKIO_THROTL_write_bps_device),
1553 .read_seq_string = blkiocg_file_read,
1554 .write_string = blkiocg_file_write,
1555 .max_write_len = 256,
1556 },
1557
1558 {
1559 .name = "throttle.read_iops_device",
1560 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1561 BLKIO_THROTL_read_iops_device),
1562 .read_seq_string = blkiocg_file_read,
1563 .write_string = blkiocg_file_write,
1564 .max_write_len = 256,
1565 },
1566
1567 {
1568 .name = "throttle.write_iops_device",
1569 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1570 BLKIO_THROTL_write_iops_device),
1571 .read_seq_string = blkiocg_file_read,
1572 .write_string = blkiocg_file_write,
1573 .max_write_len = 256,
1574 },
1575 {
1576 .name = "throttle.io_service_bytes",
1577 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1578 BLKIO_THROTL_io_service_bytes),
1579 .read_map = blkiocg_file_read_map,
1580 },
1581 {
1582 .name = "throttle.io_serviced",
1583 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1584 BLKIO_THROTL_io_serviced),
1585 .read_map = blkiocg_file_read_map,
1586 },
1587 #endif /* CONFIG_BLK_DEV_THROTTLING */
1588
1589 #ifdef CONFIG_DEBUG_BLK_CGROUP
1590 {
1591 .name = "avg_queue_size",
1592 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1593 BLKIO_PROP_avg_queue_size),
1594 .read_map = blkiocg_file_read_map,
1595 },
1596 {
1597 .name = "group_wait_time",
1598 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1599 BLKIO_PROP_group_wait_time),
1600 .read_map = blkiocg_file_read_map,
1601 },
1602 {
1603 .name = "idle_time",
1604 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1605 BLKIO_PROP_idle_time),
1606 .read_map = blkiocg_file_read_map,
1607 },
1608 {
1609 .name = "empty_time",
1610 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1611 BLKIO_PROP_empty_time),
1612 .read_map = blkiocg_file_read_map,
1613 },
1614 {
1615 .name = "dequeue",
1616 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1617 BLKIO_PROP_dequeue),
1618 .read_map = blkiocg_file_read_map,
1619 },
1620 {
1621 .name = "unaccounted_time",
1622 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1623 BLKIO_PROP_unaccounted_time),
1624 .read_map = blkiocg_file_read_map,
1625 },
1626 #endif
1627 };
1628
1629 static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1630 {
1631 return cgroup_add_files(cgroup, subsys, blkio_files,
1632 ARRAY_SIZE(blkio_files));
1633 }
1634
1635 static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1636 {
1637 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1638 unsigned long flags;
1639 struct blkio_group *blkg;
1640 struct request_queue *q;
1641 struct blkio_policy_type *blkiop;
1642 struct blkio_policy_node *pn, *pntmp;
1643
1644 rcu_read_lock();
1645 do {
1646 spin_lock_irqsave(&blkcg->lock, flags);
1647
1648 if (hlist_empty(&blkcg->blkg_list)) {
1649 spin_unlock_irqrestore(&blkcg->lock, flags);
1650 break;
1651 }
1652
1653 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
1654 blkcg_node);
1655 q = rcu_dereference(blkg->q);
1656 __blkiocg_del_blkio_group(blkg);
1657
1658 spin_unlock_irqrestore(&blkcg->lock, flags);
1659
1660 /*
1661 * This blkio_group is being unlinked as associated cgroup is
1662 * going away. Let all the IO controlling policies know about
1663 * this event.
1664 */
1665 spin_lock(&blkio_list_lock);
1666 list_for_each_entry(blkiop, &blkio_list, list) {
1667 if (blkiop->plid != blkg->plid)
1668 continue;
1669 blkiop->ops.blkio_unlink_group_fn(q, blkg);
1670 }
1671 spin_unlock(&blkio_list_lock);
1672 } while (1);
1673
1674 list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
1675 blkio_policy_delete_node(pn);
1676 kfree(pn);
1677 }
1678
1679 free_css_id(&blkio_subsys, &blkcg->css);
1680 rcu_read_unlock();
1681 if (blkcg != &blkio_root_cgroup)
1682 kfree(blkcg);
1683 }
1684
1685 static struct cgroup_subsys_state *
1686 blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1687 {
1688 struct blkio_cgroup *blkcg;
1689 struct cgroup *parent = cgroup->parent;
1690
1691 if (!parent) {
1692 blkcg = &blkio_root_cgroup;
1693 goto done;
1694 }
1695
1696 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1697 if (!blkcg)
1698 return ERR_PTR(-ENOMEM);
1699
1700 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
1701 done:
1702 spin_lock_init(&blkcg->lock);
1703 INIT_HLIST_HEAD(&blkcg->blkg_list);
1704
1705 INIT_LIST_HEAD(&blkcg->policy_list);
1706 return &blkcg->css;
1707 }
1708
1709 /*
1710 * We cannot support shared io contexts, as we have no mean to support
1711 * two tasks with the same ioc in two different groups without major rework
1712 * of the main cic data structures. For now we allow a task to change
1713 * its cgroup only if it's the only owner of its ioc.
1714 */
1715 static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1716 struct cgroup_taskset *tset)
1717 {
1718 struct task_struct *task;
1719 struct io_context *ioc;
1720 int ret = 0;
1721
1722 /* task_lock() is needed to avoid races with exit_io_context() */
1723 cgroup_taskset_for_each(task, cgrp, tset) {
1724 task_lock(task);
1725 ioc = task->io_context;
1726 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1727 ret = -EINVAL;
1728 task_unlock(task);
1729 if (ret)
1730 break;
1731 }
1732 return ret;
1733 }
1734
1735 static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1736 struct cgroup_taskset *tset)
1737 {
1738 struct task_struct *task;
1739 struct io_context *ioc;
1740
1741 cgroup_taskset_for_each(task, cgrp, tset) {
1742 /* we don't lose anything even if ioc allocation fails */
1743 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
1744 if (ioc) {
1745 ioc_cgroup_changed(ioc);
1746 put_io_context(ioc);
1747 }
1748 }
1749 }
1750
1751 void blkio_policy_register(struct blkio_policy_type *blkiop)
1752 {
1753 spin_lock(&blkio_list_lock);
1754
1755 BUG_ON(blkio_policy[blkiop->plid]);
1756 blkio_policy[blkiop->plid] = blkiop;
1757 list_add_tail(&blkiop->list, &blkio_list);
1758
1759 spin_unlock(&blkio_list_lock);
1760 }
1761 EXPORT_SYMBOL_GPL(blkio_policy_register);
1762
1763 void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1764 {
1765 spin_lock(&blkio_list_lock);
1766
1767 BUG_ON(blkio_policy[blkiop->plid] != blkiop);
1768 blkio_policy[blkiop->plid] = NULL;
1769 list_del_init(&blkiop->list);
1770
1771 spin_unlock(&blkio_list_lock);
1772 }
1773 EXPORT_SYMBOL_GPL(blkio_policy_unregister);