]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - block/blk-cgroup.c
blkcg: use q and plid instead of opaque void * for blkio_group association
[mirror_ubuntu-bionic-kernel.git] / block / blk-cgroup.c
CommitLineData
31e4c28d
VG
1/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
22084190
VG
14#include <linux/seq_file.h>
15#include <linux/kdev_t.h>
9d6a986c 16#include <linux/module.h>
accee785 17#include <linux/err.h>
9195291e 18#include <linux/blkdev.h>
5a0e3ad6 19#include <linux/slab.h>
34d0f179 20#include <linux/genhd.h>
72e06c25
TH
21#include <linux/delay.h>
22#include "blk-cgroup.h"
3e252066 23
84c124da
DS
24#define MAX_KEY_LEN 100
25
3e252066
VG
26static DEFINE_SPINLOCK(blkio_list_lock);
27static LIST_HEAD(blkio_list);
b1c35769 28
31e4c28d 29struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
9d6a986c
VG
30EXPORT_SYMBOL_GPL(blkio_root_cgroup);
31
67523c48
BB
32static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
33 struct cgroup *);
bb9d97b6
TH
34static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
35 struct cgroup_taskset *);
36static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
37 struct cgroup_taskset *);
67523c48
BB
38static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
39static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
40
062a644d
VG
41/* for encoding cft->private value on file */
42#define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
43/* What policy owns the file, proportional or throttle */
44#define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
45#define BLKIOFILE_ATTR(val) ((val) & 0xffff)
46
67523c48
BB
47struct cgroup_subsys blkio_subsys = {
48 .name = "blkio",
49 .create = blkiocg_create,
bb9d97b6
TH
50 .can_attach = blkiocg_can_attach,
51 .attach = blkiocg_attach,
67523c48
BB
52 .destroy = blkiocg_destroy,
53 .populate = blkiocg_populate,
67523c48 54 .subsys_id = blkio_subsys_id,
67523c48
BB
55 .use_id = 1,
56 .module = THIS_MODULE,
57};
58EXPORT_SYMBOL_GPL(blkio_subsys);
59
34d0f179
GJ
60static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
61 struct blkio_policy_node *pn)
62{
63 list_add(&pn->node, &blkcg->policy_list);
64}
65
062a644d
VG
66static inline bool cftype_blkg_same_policy(struct cftype *cft,
67 struct blkio_group *blkg)
68{
69 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
70
71 if (blkg->plid == plid)
72 return 1;
73
74 return 0;
75}
76
77/* Determines if policy node matches cgroup file being accessed */
78static inline bool pn_matches_cftype(struct cftype *cft,
79 struct blkio_policy_node *pn)
80{
81 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
82 int fileid = BLKIOFILE_ATTR(cft->private);
83
84 return (plid == pn->plid && fileid == pn->fileid);
85}
86
34d0f179
GJ
87/* Must be called with blkcg->lock held */
88static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
89{
90 list_del(&pn->node);
91}
92
93/* Must be called with blkcg->lock held */
94static struct blkio_policy_node *
062a644d
VG
95blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
96 enum blkio_policy_id plid, int fileid)
34d0f179
GJ
97{
98 struct blkio_policy_node *pn;
99
100 list_for_each_entry(pn, &blkcg->policy_list, node) {
062a644d 101 if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
34d0f179
GJ
102 return pn;
103 }
104
105 return NULL;
106}
107
31e4c28d
VG
108struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
109{
110 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
111 struct blkio_cgroup, css);
112}
9d6a986c 113EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
31e4c28d 114
70087dc3
VG
115struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
116{
117 return container_of(task_subsys_state(tsk, blkio_subsys_id),
118 struct blkio_cgroup, css);
119}
120EXPORT_SYMBOL_GPL(task_blkio_cgroup);
121
062a644d
VG
122static inline void
123blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
124{
125 struct blkio_policy_type *blkiop;
126
127 list_for_each_entry(blkiop, &blkio_list, list) {
128 /* If this policy does not own the blkg, do not send updates */
129 if (blkiop->plid != blkg->plid)
130 continue;
131 if (blkiop->ops.blkio_update_group_weight_fn)
ca32aefc 132 blkiop->ops.blkio_update_group_weight_fn(blkg->q,
fe071437 133 blkg, weight);
062a644d
VG
134 }
135}
136
4c9eefa1
VG
137static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
138 int fileid)
139{
140 struct blkio_policy_type *blkiop;
141
142 list_for_each_entry(blkiop, &blkio_list, list) {
143
144 /* If this policy does not own the blkg, do not send updates */
145 if (blkiop->plid != blkg->plid)
146 continue;
147
148 if (fileid == BLKIO_THROTL_read_bps_device
149 && blkiop->ops.blkio_update_group_read_bps_fn)
ca32aefc 150 blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
fe071437 151 blkg, bps);
4c9eefa1
VG
152
153 if (fileid == BLKIO_THROTL_write_bps_device
154 && blkiop->ops.blkio_update_group_write_bps_fn)
ca32aefc 155 blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
fe071437 156 blkg, bps);
4c9eefa1
VG
157 }
158}
159
7702e8f4
VG
160static inline void blkio_update_group_iops(struct blkio_group *blkg,
161 unsigned int iops, int fileid)
162{
163 struct blkio_policy_type *blkiop;
164
165 list_for_each_entry(blkiop, &blkio_list, list) {
166
167 /* If this policy does not own the blkg, do not send updates */
168 if (blkiop->plid != blkg->plid)
169 continue;
170
171 if (fileid == BLKIO_THROTL_read_iops_device
172 && blkiop->ops.blkio_update_group_read_iops_fn)
ca32aefc 173 blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
fe071437 174 blkg, iops);
7702e8f4
VG
175
176 if (fileid == BLKIO_THROTL_write_iops_device
177 && blkiop->ops.blkio_update_group_write_iops_fn)
ca32aefc 178 blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
fe071437 179 blkg,iops);
7702e8f4
VG
180 }
181}
182
9195291e
DS
183/*
184 * Add to the appropriate stat variable depending on the request type.
185 * This should be called with the blkg->stats_lock held.
186 */
84c124da
DS
187static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
188 bool sync)
9195291e 189{
84c124da
DS
190 if (direction)
191 stat[BLKIO_STAT_WRITE] += add;
9195291e 192 else
84c124da
DS
193 stat[BLKIO_STAT_READ] += add;
194 if (sync)
195 stat[BLKIO_STAT_SYNC] += add;
9195291e 196 else
84c124da 197 stat[BLKIO_STAT_ASYNC] += add;
9195291e
DS
198}
199
cdc1184c
DS
200/*
201 * Decrements the appropriate stat variable if non-zero depending on the
202 * request type. Panics on value being zero.
203 * This should be called with the blkg->stats_lock held.
204 */
205static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
206{
207 if (direction) {
208 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
209 stat[BLKIO_STAT_WRITE]--;
210 } else {
211 BUG_ON(stat[BLKIO_STAT_READ] == 0);
212 stat[BLKIO_STAT_READ]--;
213 }
214 if (sync) {
215 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
216 stat[BLKIO_STAT_SYNC]--;
217 } else {
218 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
219 stat[BLKIO_STAT_ASYNC]--;
220 }
221}
222
223#ifdef CONFIG_DEBUG_BLK_CGROUP
812df48d
DS
224/* This should be called with the blkg->stats_lock held. */
225static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
226 struct blkio_group *curr_blkg)
227{
228 if (blkio_blkg_waiting(&blkg->stats))
229 return;
230 if (blkg == curr_blkg)
231 return;
232 blkg->stats.start_group_wait_time = sched_clock();
233 blkio_mark_blkg_waiting(&blkg->stats);
234}
235
236/* This should be called with the blkg->stats_lock held. */
237static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
238{
239 unsigned long long now;
240
241 if (!blkio_blkg_waiting(stats))
242 return;
243
244 now = sched_clock();
245 if (time_after64(now, stats->start_group_wait_time))
246 stats->group_wait_time += now - stats->start_group_wait_time;
247 blkio_clear_blkg_waiting(stats);
248}
249
250/* This should be called with the blkg->stats_lock held. */
251static void blkio_end_empty_time(struct blkio_group_stats *stats)
252{
253 unsigned long long now;
254
255 if (!blkio_blkg_empty(stats))
256 return;
257
258 now = sched_clock();
259 if (time_after64(now, stats->start_empty_time))
260 stats->empty_time += now - stats->start_empty_time;
261 blkio_clear_blkg_empty(stats);
262}
263
264void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
265{
266 unsigned long flags;
267
268 spin_lock_irqsave(&blkg->stats_lock, flags);
269 BUG_ON(blkio_blkg_idling(&blkg->stats));
270 blkg->stats.start_idle_time = sched_clock();
271 blkio_mark_blkg_idling(&blkg->stats);
272 spin_unlock_irqrestore(&blkg->stats_lock, flags);
273}
274EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
275
276void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
277{
278 unsigned long flags;
279 unsigned long long now;
280 struct blkio_group_stats *stats;
281
282 spin_lock_irqsave(&blkg->stats_lock, flags);
283 stats = &blkg->stats;
284 if (blkio_blkg_idling(stats)) {
285 now = sched_clock();
286 if (time_after64(now, stats->start_idle_time))
287 stats->idle_time += now - stats->start_idle_time;
288 blkio_clear_blkg_idling(stats);
289 }
290 spin_unlock_irqrestore(&blkg->stats_lock, flags);
291}
292EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
293
a11cdaa7 294void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
cdc1184c
DS
295{
296 unsigned long flags;
297 struct blkio_group_stats *stats;
298
299 spin_lock_irqsave(&blkg->stats_lock, flags);
300 stats = &blkg->stats;
301 stats->avg_queue_size_sum +=
302 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
303 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
304 stats->avg_queue_size_samples++;
812df48d 305 blkio_update_group_wait_time(stats);
cdc1184c
DS
306 spin_unlock_irqrestore(&blkg->stats_lock, flags);
307}
a11cdaa7
DS
308EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
309
e5ff082e 310void blkiocg_set_start_empty_time(struct blkio_group *blkg)
28baf442
DS
311{
312 unsigned long flags;
313 struct blkio_group_stats *stats;
314
315 spin_lock_irqsave(&blkg->stats_lock, flags);
316 stats = &blkg->stats;
317
318 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
319 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
320 spin_unlock_irqrestore(&blkg->stats_lock, flags);
321 return;
322 }
323
324 /*
e5ff082e
VG
325 * group is already marked empty. This can happen if cfqq got new
326 * request in parent group and moved to this group while being added
327 * to service tree. Just ignore the event and move on.
28baf442 328 */
e5ff082e
VG
329 if(blkio_blkg_empty(stats)) {
330 spin_unlock_irqrestore(&blkg->stats_lock, flags);
331 return;
332 }
333
28baf442
DS
334 stats->start_empty_time = sched_clock();
335 blkio_mark_blkg_empty(stats);
336 spin_unlock_irqrestore(&blkg->stats_lock, flags);
337}
338EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
339
a11cdaa7
DS
340void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
341 unsigned long dequeue)
342{
343 blkg->stats.dequeue += dequeue;
344}
345EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
812df48d
DS
346#else
347static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
348 struct blkio_group *curr_blkg) {}
349static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
cdc1184c
DS
350#endif
351
a11cdaa7 352void blkiocg_update_io_add_stats(struct blkio_group *blkg,
cdc1184c
DS
353 struct blkio_group *curr_blkg, bool direction,
354 bool sync)
355{
356 unsigned long flags;
357
358 spin_lock_irqsave(&blkg->stats_lock, flags);
359 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
360 sync);
812df48d
DS
361 blkio_end_empty_time(&blkg->stats);
362 blkio_set_start_group_wait_time(blkg, curr_blkg);
cdc1184c
DS
363 spin_unlock_irqrestore(&blkg->stats_lock, flags);
364}
a11cdaa7 365EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
cdc1184c 366
a11cdaa7 367void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
cdc1184c
DS
368 bool direction, bool sync)
369{
370 unsigned long flags;
371
372 spin_lock_irqsave(&blkg->stats_lock, flags);
373 blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
374 direction, sync);
375 spin_unlock_irqrestore(&blkg->stats_lock, flags);
376}
a11cdaa7 377EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
cdc1184c 378
167400d3
JT
379void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
380 unsigned long unaccounted_time)
22084190 381{
303a3acb
DS
382 unsigned long flags;
383
384 spin_lock_irqsave(&blkg->stats_lock, flags);
385 blkg->stats.time += time;
a23e6869 386#ifdef CONFIG_DEBUG_BLK_CGROUP
167400d3 387 blkg->stats.unaccounted_time += unaccounted_time;
a23e6869 388#endif
303a3acb 389 spin_unlock_irqrestore(&blkg->stats_lock, flags);
22084190 390}
303a3acb 391EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
22084190 392
5624a4e4
VG
393/*
394 * should be called under rcu read lock or queue lock to make sure blkg pointer
395 * is valid.
396 */
84c124da
DS
397void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
398 uint64_t bytes, bool direction, bool sync)
9195291e 399{
5624a4e4 400 struct blkio_group_stats_cpu *stats_cpu;
575969a0
VG
401 unsigned long flags;
402
403 /*
404 * Disabling interrupts to provide mutual exclusion between two
405 * writes on same cpu. It probably is not needed for 64bit. Not
406 * optimizing that case yet.
407 */
408 local_irq_save(flags);
9195291e 409
5624a4e4
VG
410 stats_cpu = this_cpu_ptr(blkg->stats_cpu);
411
575969a0 412 u64_stats_update_begin(&stats_cpu->syncp);
5624a4e4
VG
413 stats_cpu->sectors += bytes >> 9;
414 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
415 1, direction, sync);
416 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
417 bytes, direction, sync);
575969a0
VG
418 u64_stats_update_end(&stats_cpu->syncp);
419 local_irq_restore(flags);
9195291e 420}
84c124da 421EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
9195291e 422
84c124da
DS
423void blkiocg_update_completion_stats(struct blkio_group *blkg,
424 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
9195291e
DS
425{
426 struct blkio_group_stats *stats;
427 unsigned long flags;
428 unsigned long long now = sched_clock();
429
430 spin_lock_irqsave(&blkg->stats_lock, flags);
431 stats = &blkg->stats;
84c124da
DS
432 if (time_after64(now, io_start_time))
433 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
434 now - io_start_time, direction, sync);
435 if (time_after64(io_start_time, start_time))
436 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
437 io_start_time - start_time, direction, sync);
9195291e
DS
438 spin_unlock_irqrestore(&blkg->stats_lock, flags);
439}
84c124da 440EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
9195291e 441
317389a7 442/* Merged stats are per cpu. */
812d4026
DS
443void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
444 bool sync)
445{
317389a7 446 struct blkio_group_stats_cpu *stats_cpu;
812d4026
DS
447 unsigned long flags;
448
317389a7
VG
449 /*
450 * Disabling interrupts to provide mutual exclusion between two
451 * writes on same cpu. It probably is not needed for 64bit. Not
452 * optimizing that case yet.
453 */
454 local_irq_save(flags);
455
456 stats_cpu = this_cpu_ptr(blkg->stats_cpu);
457
458 u64_stats_update_begin(&stats_cpu->syncp);
459 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
460 direction, sync);
461 u64_stats_update_end(&stats_cpu->syncp);
462 local_irq_restore(flags);
812d4026
DS
463}
464EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
465
5624a4e4
VG
466/*
467 * This function allocates the per cpu stats for blkio_group. Should be called
468 * from sleepable context as alloc_per_cpu() requires that.
469 */
470int blkio_alloc_blkg_stats(struct blkio_group *blkg)
471{
472 /* Allocate memory for per cpu stats */
473 blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
474 if (!blkg->stats_cpu)
475 return -ENOMEM;
476 return 0;
477}
478EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
479
31e4c28d 480void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
ca32aefc 481 struct blkio_group *blkg, struct request_queue *q, dev_t dev,
062a644d 482 enum blkio_policy_id plid)
31e4c28d
VG
483{
484 unsigned long flags;
485
486 spin_lock_irqsave(&blkcg->lock, flags);
8d2a91f8 487 spin_lock_init(&blkg->stats_lock);
ca32aefc 488 rcu_assign_pointer(blkg->q, q);
b1c35769 489 blkg->blkcg_id = css_id(&blkcg->css);
31e4c28d 490 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
062a644d 491 blkg->plid = plid;
31e4c28d 492 spin_unlock_irqrestore(&blkcg->lock, flags);
2868ef7b
VG
493 /* Need to take css reference ? */
494 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
22084190 495 blkg->dev = dev;
31e4c28d 496}
9d6a986c 497EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
31e4c28d 498
b1c35769
VG
499static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
500{
501 hlist_del_init_rcu(&blkg->blkcg_node);
502 blkg->blkcg_id = 0;
503}
504
505/*
506 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
507 * indicating that blk_group was unhashed by the time we got to it.
508 */
31e4c28d
VG
509int blkiocg_del_blkio_group(struct blkio_group *blkg)
510{
b1c35769
VG
511 struct blkio_cgroup *blkcg;
512 unsigned long flags;
513 struct cgroup_subsys_state *css;
514 int ret = 1;
515
516 rcu_read_lock();
517 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
0f3942a3
JA
518 if (css) {
519 blkcg = container_of(css, struct blkio_cgroup, css);
520 spin_lock_irqsave(&blkcg->lock, flags);
521 if (!hlist_unhashed(&blkg->blkcg_node)) {
522 __blkiocg_del_blkio_group(blkg);
523 ret = 0;
524 }
525 spin_unlock_irqrestore(&blkcg->lock, flags);
b1c35769 526 }
0f3942a3 527
b1c35769
VG
528 rcu_read_unlock();
529 return ret;
31e4c28d 530}
9d6a986c 531EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
31e4c28d
VG
532
533/* called under rcu_read_lock(). */
ca32aefc
TH
534struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
535 struct request_queue *q,
536 enum blkio_policy_id plid)
31e4c28d
VG
537{
538 struct blkio_group *blkg;
539 struct hlist_node *n;
31e4c28d 540
ca32aefc
TH
541 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
542 if (blkg->q == q && blkg->plid == plid)
31e4c28d 543 return blkg;
31e4c28d
VG
544 return NULL;
545}
9d6a986c 546EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
31e4c28d 547
72e06c25
TH
548void blkg_destroy_all(struct request_queue *q)
549{
550 struct blkio_policy_type *pol;
551
552 while (true) {
553 bool done = true;
554
555 spin_lock(&blkio_list_lock);
556 spin_lock_irq(q->queue_lock);
557
558 /*
559 * clear_queue_fn() might return with non-empty group list
560 * if it raced cgroup removal and lost. cgroup removal is
561 * guaranteed to make forward progress and retrying after a
562 * while is enough. This ugliness is scheduled to be
563 * removed after locking update.
564 */
565 list_for_each_entry(pol, &blkio_list, list)
566 if (!pol->ops.blkio_clear_queue_fn(q))
567 done = false;
568
569 spin_unlock_irq(q->queue_lock);
570 spin_unlock(&blkio_list_lock);
571
572 if (done)
573 break;
574
575 msleep(10); /* just some random duration I like */
576 }
577}
578
f0bdc8cd
VG
579static void blkio_reset_stats_cpu(struct blkio_group *blkg)
580{
581 struct blkio_group_stats_cpu *stats_cpu;
582 int i, j, k;
583 /*
584 * Note: On 64 bit arch this should not be an issue. This has the
585 * possibility of returning some inconsistent value on 32bit arch
586 * as 64bit update on 32bit is non atomic. Taking care of this
587 * corner case makes code very complicated, like sending IPIs to
588 * cpus, taking care of stats of offline cpus etc.
589 *
590 * reset stats is anyway more of a debug feature and this sounds a
591 * corner case. So I am not complicating the code yet until and
592 * unless this becomes a real issue.
593 */
594 for_each_possible_cpu(i) {
595 stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
596 stats_cpu->sectors = 0;
597 for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
598 for (k = 0; k < BLKIO_STAT_TOTAL; k++)
599 stats_cpu->stat_arr_cpu[j][k] = 0;
600 }
601}
602
303a3acb 603static int
84c124da 604blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
303a3acb
DS
605{
606 struct blkio_cgroup *blkcg;
607 struct blkio_group *blkg;
812df48d 608 struct blkio_group_stats *stats;
303a3acb 609 struct hlist_node *n;
cdc1184c
DS
610 uint64_t queued[BLKIO_STAT_TOTAL];
611 int i;
812df48d
DS
612#ifdef CONFIG_DEBUG_BLK_CGROUP
613 bool idling, waiting, empty;
614 unsigned long long now = sched_clock();
615#endif
303a3acb
DS
616
617 blkcg = cgroup_to_blkio_cgroup(cgroup);
618 spin_lock_irq(&blkcg->lock);
619 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
620 spin_lock(&blkg->stats_lock);
812df48d
DS
621 stats = &blkg->stats;
622#ifdef CONFIG_DEBUG_BLK_CGROUP
623 idling = blkio_blkg_idling(stats);
624 waiting = blkio_blkg_waiting(stats);
625 empty = blkio_blkg_empty(stats);
626#endif
cdc1184c 627 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
812df48d
DS
628 queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
629 memset(stats, 0, sizeof(struct blkio_group_stats));
cdc1184c 630 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
812df48d
DS
631 stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
632#ifdef CONFIG_DEBUG_BLK_CGROUP
633 if (idling) {
634 blkio_mark_blkg_idling(stats);
635 stats->start_idle_time = now;
636 }
637 if (waiting) {
638 blkio_mark_blkg_waiting(stats);
639 stats->start_group_wait_time = now;
640 }
641 if (empty) {
642 blkio_mark_blkg_empty(stats);
643 stats->start_empty_time = now;
644 }
645#endif
303a3acb 646 spin_unlock(&blkg->stats_lock);
f0bdc8cd
VG
647
648 /* Reset Per cpu stats which don't take blkg->stats_lock */
649 blkio_reset_stats_cpu(blkg);
303a3acb 650 }
f0bdc8cd 651
303a3acb
DS
652 spin_unlock_irq(&blkcg->lock);
653 return 0;
654}
655
84c124da
DS
656static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
657 int chars_left, bool diskname_only)
303a3acb 658{
84c124da 659 snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
303a3acb
DS
660 chars_left -= strlen(str);
661 if (chars_left <= 0) {
662 printk(KERN_WARNING
663 "Possibly incorrect cgroup stat display format");
664 return;
665 }
84c124da
DS
666 if (diskname_only)
667 return;
303a3acb 668 switch (type) {
84c124da 669 case BLKIO_STAT_READ:
303a3acb
DS
670 strlcat(str, " Read", chars_left);
671 break;
84c124da 672 case BLKIO_STAT_WRITE:
303a3acb
DS
673 strlcat(str, " Write", chars_left);
674 break;
84c124da 675 case BLKIO_STAT_SYNC:
303a3acb
DS
676 strlcat(str, " Sync", chars_left);
677 break;
84c124da 678 case BLKIO_STAT_ASYNC:
303a3acb
DS
679 strlcat(str, " Async", chars_left);
680 break;
84c124da 681 case BLKIO_STAT_TOTAL:
303a3acb
DS
682 strlcat(str, " Total", chars_left);
683 break;
684 default:
685 strlcat(str, " Invalid", chars_left);
686 }
687}
688
84c124da
DS
689static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
690 struct cgroup_map_cb *cb, dev_t dev)
691{
692 blkio_get_key_name(0, dev, str, chars_left, true);
693 cb->fill(cb, str, val);
694 return val;
695}
303a3acb 696
5624a4e4
VG
697
698static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
699 enum stat_type_cpu type, enum stat_sub_type sub_type)
700{
701 int cpu;
702 struct blkio_group_stats_cpu *stats_cpu;
575969a0 703 u64 val = 0, tval;
5624a4e4
VG
704
705 for_each_possible_cpu(cpu) {
575969a0 706 unsigned int start;
5624a4e4
VG
707 stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
708
575969a0
VG
709 do {
710 start = u64_stats_fetch_begin(&stats_cpu->syncp);
711 if (type == BLKIO_STAT_CPU_SECTORS)
712 tval = stats_cpu->sectors;
713 else
714 tval = stats_cpu->stat_arr_cpu[type][sub_type];
715 } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
716
717 val += tval;
5624a4e4
VG
718 }
719
720 return val;
721}
722
723static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
724 struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type)
725{
726 uint64_t disk_total, val;
727 char key_str[MAX_KEY_LEN];
728 enum stat_sub_type sub_type;
729
730 if (type == BLKIO_STAT_CPU_SECTORS) {
731 val = blkio_read_stat_cpu(blkg, type, 0);
732 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev);
733 }
734
735 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
736 sub_type++) {
737 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
738 val = blkio_read_stat_cpu(blkg, type, sub_type);
739 cb->fill(cb, key_str, val);
740 }
741
742 disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
743 blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
744
745 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
746 cb->fill(cb, key_str, disk_total);
747 return disk_total;
748}
749
84c124da
DS
750/* This should be called with blkg->stats_lock held */
751static uint64_t blkio_get_stat(struct blkio_group *blkg,
752 struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
303a3acb
DS
753{
754 uint64_t disk_total;
755 char key_str[MAX_KEY_LEN];
84c124da
DS
756 enum stat_sub_type sub_type;
757
758 if (type == BLKIO_STAT_TIME)
759 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
760 blkg->stats.time, cb, dev);
9026e521 761#ifdef CONFIG_DEBUG_BLK_CGROUP
167400d3
JT
762 if (type == BLKIO_STAT_UNACCOUNTED_TIME)
763 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
764 blkg->stats.unaccounted_time, cb, dev);
cdc1184c
DS
765 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
766 uint64_t sum = blkg->stats.avg_queue_size_sum;
767 uint64_t samples = blkg->stats.avg_queue_size_samples;
768 if (samples)
769 do_div(sum, samples);
770 else
771 sum = 0;
772 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
773 }
812df48d
DS
774 if (type == BLKIO_STAT_GROUP_WAIT_TIME)
775 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
776 blkg->stats.group_wait_time, cb, dev);
777 if (type == BLKIO_STAT_IDLE_TIME)
778 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
779 blkg->stats.idle_time, cb, dev);
780 if (type == BLKIO_STAT_EMPTY_TIME)
781 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
782 blkg->stats.empty_time, cb, dev);
84c124da
DS
783 if (type == BLKIO_STAT_DEQUEUE)
784 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
785 blkg->stats.dequeue, cb, dev);
786#endif
303a3acb 787
84c124da
DS
788 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
789 sub_type++) {
790 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
791 cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
303a3acb 792 }
84c124da
DS
793 disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
794 blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
795 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
303a3acb
DS
796 cb->fill(cb, key_str, disk_total);
797 return disk_total;
798}
799
34d0f179 800static int blkio_policy_parse_and_set(char *buf,
062a644d 801 struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
34d0f179 802{
ece84241 803 struct gendisk *disk = NULL;
34d0f179 804 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
d11bb446 805 unsigned long major, minor;
ece84241
TH
806 int i = 0, ret = -EINVAL;
807 int part;
34d0f179 808 dev_t dev;
d11bb446 809 u64 temp;
34d0f179
GJ
810
811 memset(s, 0, sizeof(s));
812
813 while ((p = strsep(&buf, " ")) != NULL) {
814 if (!*p)
815 continue;
816
817 s[i++] = p;
818
819 /* Prevent from inputing too many things */
820 if (i == 3)
821 break;
822 }
823
824 if (i != 2)
ece84241 825 goto out;
34d0f179
GJ
826
827 p = strsep(&s[0], ":");
828 if (p != NULL)
829 major_s = p;
830 else
ece84241 831 goto out;
34d0f179
GJ
832
833 minor_s = s[0];
834 if (!minor_s)
ece84241 835 goto out;
34d0f179 836
ece84241
TH
837 if (strict_strtoul(major_s, 10, &major))
838 goto out;
34d0f179 839
ece84241
TH
840 if (strict_strtoul(minor_s, 10, &minor))
841 goto out;
34d0f179
GJ
842
843 dev = MKDEV(major, minor);
844
ece84241
TH
845 if (strict_strtoull(s[1], 10, &temp))
846 goto out;
34d0f179 847
d11bb446
WG
848 /* For rule removal, do not check for device presence. */
849 if (temp) {
ece84241
TH
850 disk = get_gendisk(dev, &part);
851 if (!disk || part) {
852 ret = -ENODEV;
853 goto out;
854 }
d11bb446 855 }
34d0f179 856
d11bb446 857 newpn->dev = dev;
34d0f179 858
062a644d
VG
859 switch (plid) {
860 case BLKIO_POLICY_PROP:
d11bb446
WG
861 if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
862 temp > BLKIO_WEIGHT_MAX)
ece84241 863 goto out;
34d0f179 864
062a644d
VG
865 newpn->plid = plid;
866 newpn->fileid = fileid;
4c9eefa1
VG
867 newpn->val.weight = temp;
868 break;
869 case BLKIO_POLICY_THROTL:
7702e8f4
VG
870 switch(fileid) {
871 case BLKIO_THROTL_read_bps_device:
872 case BLKIO_THROTL_write_bps_device:
7702e8f4
VG
873 newpn->plid = plid;
874 newpn->fileid = fileid;
d11bb446 875 newpn->val.bps = temp;
7702e8f4
VG
876 break;
877 case BLKIO_THROTL_read_iops_device:
878 case BLKIO_THROTL_write_iops_device:
d11bb446 879 if (temp > THROTL_IOPS_MAX)
ece84241 880 goto out;
9355aede 881
7702e8f4
VG
882 newpn->plid = plid;
883 newpn->fileid = fileid;
d11bb446 884 newpn->val.iops = (unsigned int)temp;
7702e8f4
VG
885 break;
886 }
062a644d
VG
887 break;
888 default:
889 BUG();
890 }
ece84241
TH
891 ret = 0;
892out:
893 put_disk(disk);
894 return ret;
34d0f179
GJ
895}
896
897unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
898 dev_t dev)
899{
900 struct blkio_policy_node *pn;
a38eb630
VG
901 unsigned long flags;
902 unsigned int weight;
903
904 spin_lock_irqsave(&blkcg->lock, flags);
34d0f179 905
062a644d
VG
906 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
907 BLKIO_PROP_weight_device);
34d0f179 908 if (pn)
a38eb630 909 weight = pn->val.weight;
34d0f179 910 else
a38eb630
VG
911 weight = blkcg->weight;
912
913 spin_unlock_irqrestore(&blkcg->lock, flags);
914
915 return weight;
34d0f179
GJ
916}
917EXPORT_SYMBOL_GPL(blkcg_get_weight);
918
4c9eefa1
VG
919uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
920{
921 struct blkio_policy_node *pn;
a38eb630
VG
922 unsigned long flags;
923 uint64_t bps = -1;
4c9eefa1 924
a38eb630 925 spin_lock_irqsave(&blkcg->lock, flags);
4c9eefa1
VG
926 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
927 BLKIO_THROTL_read_bps_device);
928 if (pn)
a38eb630
VG
929 bps = pn->val.bps;
930 spin_unlock_irqrestore(&blkcg->lock, flags);
931
932 return bps;
4c9eefa1
VG
933}
934
935uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
936{
937 struct blkio_policy_node *pn;
a38eb630
VG
938 unsigned long flags;
939 uint64_t bps = -1;
940
941 spin_lock_irqsave(&blkcg->lock, flags);
4c9eefa1
VG
942 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
943 BLKIO_THROTL_write_bps_device);
944 if (pn)
a38eb630
VG
945 bps = pn->val.bps;
946 spin_unlock_irqrestore(&blkcg->lock, flags);
947
948 return bps;
4c9eefa1
VG
949}
950
7702e8f4
VG
951unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
952{
953 struct blkio_policy_node *pn;
a38eb630
VG
954 unsigned long flags;
955 unsigned int iops = -1;
7702e8f4 956
a38eb630 957 spin_lock_irqsave(&blkcg->lock, flags);
7702e8f4
VG
958 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
959 BLKIO_THROTL_read_iops_device);
960 if (pn)
a38eb630
VG
961 iops = pn->val.iops;
962 spin_unlock_irqrestore(&blkcg->lock, flags);
963
964 return iops;
7702e8f4
VG
965}
966
967unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
968{
969 struct blkio_policy_node *pn;
a38eb630
VG
970 unsigned long flags;
971 unsigned int iops = -1;
972
973 spin_lock_irqsave(&blkcg->lock, flags);
7702e8f4
VG
974 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
975 BLKIO_THROTL_write_iops_device);
976 if (pn)
a38eb630
VG
977 iops = pn->val.iops;
978 spin_unlock_irqrestore(&blkcg->lock, flags);
979
980 return iops;
7702e8f4
VG
981}
982
062a644d
VG
983/* Checks whether user asked for deleting a policy rule */
984static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
985{
986 switch(pn->plid) {
987 case BLKIO_POLICY_PROP:
4c9eefa1
VG
988 if (pn->val.weight == 0)
989 return 1;
990 break;
991 case BLKIO_POLICY_THROTL:
7702e8f4
VG
992 switch(pn->fileid) {
993 case BLKIO_THROTL_read_bps_device:
994 case BLKIO_THROTL_write_bps_device:
995 if (pn->val.bps == 0)
996 return 1;
997 break;
998 case BLKIO_THROTL_read_iops_device:
999 case BLKIO_THROTL_write_iops_device:
1000 if (pn->val.iops == 0)
1001 return 1;
1002 }
062a644d
VG
1003 break;
1004 default:
1005 BUG();
1006 }
1007
1008 return 0;
1009}
1010
1011static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
1012 struct blkio_policy_node *newpn)
1013{
1014 switch(oldpn->plid) {
1015 case BLKIO_POLICY_PROP:
4c9eefa1
VG
1016 oldpn->val.weight = newpn->val.weight;
1017 break;
1018 case BLKIO_POLICY_THROTL:
7702e8f4
VG
1019 switch(newpn->fileid) {
1020 case BLKIO_THROTL_read_bps_device:
1021 case BLKIO_THROTL_write_bps_device:
1022 oldpn->val.bps = newpn->val.bps;
1023 break;
1024 case BLKIO_THROTL_read_iops_device:
1025 case BLKIO_THROTL_write_iops_device:
1026 oldpn->val.iops = newpn->val.iops;
1027 }
062a644d
VG
1028 break;
1029 default:
1030 BUG();
1031 }
1032}
1033
1034/*
25985edc 1035 * Some rules/values in blkg have changed. Propagate those to respective
062a644d
VG
1036 * policies.
1037 */
1038static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
1039 struct blkio_group *blkg, struct blkio_policy_node *pn)
1040{
7702e8f4 1041 unsigned int weight, iops;
4c9eefa1 1042 u64 bps;
062a644d
VG
1043
1044 switch(pn->plid) {
1045 case BLKIO_POLICY_PROP:
4c9eefa1 1046 weight = pn->val.weight ? pn->val.weight :
062a644d
VG
1047 blkcg->weight;
1048 blkio_update_group_weight(blkg, weight);
1049 break;
4c9eefa1
VG
1050 case BLKIO_POLICY_THROTL:
1051 switch(pn->fileid) {
1052 case BLKIO_THROTL_read_bps_device:
1053 case BLKIO_THROTL_write_bps_device:
1054 bps = pn->val.bps ? pn->val.bps : (-1);
1055 blkio_update_group_bps(blkg, bps, pn->fileid);
1056 break;
7702e8f4
VG
1057 case BLKIO_THROTL_read_iops_device:
1058 case BLKIO_THROTL_write_iops_device:
1059 iops = pn->val.iops ? pn->val.iops : (-1);
1060 blkio_update_group_iops(blkg, iops, pn->fileid);
1061 break;
4c9eefa1
VG
1062 }
1063 break;
062a644d
VG
1064 default:
1065 BUG();
1066 }
1067}
1068
1069/*
25985edc 1070 * A policy node rule has been updated. Propagate this update to all the
062a644d
VG
1071 * block groups which might be affected by this update.
1072 */
1073static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
1074 struct blkio_policy_node *pn)
1075{
1076 struct blkio_group *blkg;
1077 struct hlist_node *n;
1078
1079 spin_lock(&blkio_list_lock);
1080 spin_lock_irq(&blkcg->lock);
34d0f179 1081
062a644d
VG
1082 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1083 if (pn->dev != blkg->dev || pn->plid != blkg->plid)
1084 continue;
1085 blkio_update_blkg_policy(blkcg, blkg, pn);
1086 }
1087
1088 spin_unlock_irq(&blkcg->lock);
1089 spin_unlock(&blkio_list_lock);
1090}
34d0f179 1091
062a644d
VG
1092static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
1093 const char *buffer)
34d0f179
GJ
1094{
1095 int ret = 0;
1096 char *buf;
1097 struct blkio_policy_node *newpn, *pn;
1098 struct blkio_cgroup *blkcg;
34d0f179 1099 int keep_newpn = 0;
062a644d
VG
1100 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1101 int fileid = BLKIOFILE_ATTR(cft->private);
34d0f179
GJ
1102
1103 buf = kstrdup(buffer, GFP_KERNEL);
1104 if (!buf)
1105 return -ENOMEM;
1106
1107 newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
1108 if (!newpn) {
1109 ret = -ENOMEM;
1110 goto free_buf;
1111 }
1112
062a644d 1113 ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
34d0f179
GJ
1114 if (ret)
1115 goto free_newpn;
1116
1117 blkcg = cgroup_to_blkio_cgroup(cgrp);
1118
1119 spin_lock_irq(&blkcg->lock);
1120
062a644d 1121 pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
34d0f179 1122 if (!pn) {
062a644d 1123 if (!blkio_delete_rule_command(newpn)) {
34d0f179
GJ
1124 blkio_policy_insert_node(blkcg, newpn);
1125 keep_newpn = 1;
1126 }
1127 spin_unlock_irq(&blkcg->lock);
1128 goto update_io_group;
1129 }
1130
062a644d 1131 if (blkio_delete_rule_command(newpn)) {
34d0f179 1132 blkio_policy_delete_node(pn);
e060f00b 1133 kfree(pn);
34d0f179
GJ
1134 spin_unlock_irq(&blkcg->lock);
1135 goto update_io_group;
1136 }
1137 spin_unlock_irq(&blkcg->lock);
1138
062a644d 1139 blkio_update_policy_rule(pn, newpn);
34d0f179
GJ
1140
1141update_io_group:
062a644d 1142 blkio_update_policy_node_blkg(blkcg, newpn);
34d0f179
GJ
1143
1144free_newpn:
1145 if (!keep_newpn)
1146 kfree(newpn);
1147free_buf:
1148 kfree(buf);
1149 return ret;
1150}
1151
062a644d
VG
1152static void
1153blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
34d0f179 1154{
062a644d
VG
1155 switch(pn->plid) {
1156 case BLKIO_POLICY_PROP:
1157 if (pn->fileid == BLKIO_PROP_weight_device)
1158 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
4c9eefa1
VG
1159 MINOR(pn->dev), pn->val.weight);
1160 break;
1161 case BLKIO_POLICY_THROTL:
7702e8f4
VG
1162 switch(pn->fileid) {
1163 case BLKIO_THROTL_read_bps_device:
1164 case BLKIO_THROTL_write_bps_device:
4c9eefa1
VG
1165 seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
1166 MINOR(pn->dev), pn->val.bps);
7702e8f4
VG
1167 break;
1168 case BLKIO_THROTL_read_iops_device:
1169 case BLKIO_THROTL_write_iops_device:
1170 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
1171 MINOR(pn->dev), pn->val.iops);
1172 break;
1173 }
062a644d
VG
1174 break;
1175 default:
1176 BUG();
1177 }
1178}
34d0f179 1179
062a644d
VG
1180/* cgroup files which read their data from policy nodes end up here */
1181static void blkio_read_policy_node_files(struct cftype *cft,
1182 struct blkio_cgroup *blkcg, struct seq_file *m)
34d0f179 1183{
34d0f179 1184 struct blkio_policy_node *pn;
34d0f179 1185
0f3942a3
JA
1186 if (!list_empty(&blkcg->policy_list)) {
1187 spin_lock_irq(&blkcg->lock);
1188 list_for_each_entry(pn, &blkcg->policy_list, node) {
062a644d
VG
1189 if (!pn_matches_cftype(cft, pn))
1190 continue;
1191 blkio_print_policy_node(m, pn);
0f3942a3
JA
1192 }
1193 spin_unlock_irq(&blkcg->lock);
34d0f179 1194 }
062a644d
VG
1195}
1196
1197static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
1198 struct seq_file *m)
1199{
1200 struct blkio_cgroup *blkcg;
1201 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1202 int name = BLKIOFILE_ATTR(cft->private);
1203
1204 blkcg = cgroup_to_blkio_cgroup(cgrp);
1205
1206 switch(plid) {
1207 case BLKIO_POLICY_PROP:
1208 switch(name) {
1209 case BLKIO_PROP_weight_device:
1210 blkio_read_policy_node_files(cft, blkcg, m);
1211 return 0;
1212 default:
1213 BUG();
1214 }
1215 break;
4c9eefa1
VG
1216 case BLKIO_POLICY_THROTL:
1217 switch(name){
1218 case BLKIO_THROTL_read_bps_device:
1219 case BLKIO_THROTL_write_bps_device:
7702e8f4
VG
1220 case BLKIO_THROTL_read_iops_device:
1221 case BLKIO_THROTL_write_iops_device:
4c9eefa1
VG
1222 blkio_read_policy_node_files(cft, blkcg, m);
1223 return 0;
1224 default:
1225 BUG();
1226 }
1227 break;
062a644d
VG
1228 default:
1229 BUG();
1230 }
1231
1232 return 0;
1233}
1234
1235static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
5624a4e4
VG
1236 struct cftype *cft, struct cgroup_map_cb *cb,
1237 enum stat_type type, bool show_total, bool pcpu)
062a644d
VG
1238{
1239 struct blkio_group *blkg;
1240 struct hlist_node *n;
1241 uint64_t cgroup_total = 0;
1242
1243 rcu_read_lock();
1244 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
1245 if (blkg->dev) {
1246 if (!cftype_blkg_same_policy(cft, blkg))
1247 continue;
5624a4e4
VG
1248 if (pcpu)
1249 cgroup_total += blkio_get_stat_cpu(blkg, cb,
1250 blkg->dev, type);
1251 else {
1252 spin_lock_irq(&blkg->stats_lock);
1253 cgroup_total += blkio_get_stat(blkg, cb,
1254 blkg->dev, type);
1255 spin_unlock_irq(&blkg->stats_lock);
1256 }
062a644d
VG
1257 }
1258 }
1259 if (show_total)
1260 cb->fill(cb, "Total", cgroup_total);
1261 rcu_read_unlock();
1262 return 0;
1263}
1264
1265/* All map kind of cgroup file get serviced by this function */
1266static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
1267 struct cgroup_map_cb *cb)
1268{
1269 struct blkio_cgroup *blkcg;
1270 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1271 int name = BLKIOFILE_ATTR(cft->private);
1272
1273 blkcg = cgroup_to_blkio_cgroup(cgrp);
1274
1275 switch(plid) {
1276 case BLKIO_POLICY_PROP:
1277 switch(name) {
1278 case BLKIO_PROP_time:
1279 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1280 BLKIO_STAT_TIME, 0, 0);
062a644d
VG
1281 case BLKIO_PROP_sectors:
1282 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1283 BLKIO_STAT_CPU_SECTORS, 0, 1);
062a644d
VG
1284 case BLKIO_PROP_io_service_bytes:
1285 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1286 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
062a644d
VG
1287 case BLKIO_PROP_io_serviced:
1288 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1289 BLKIO_STAT_CPU_SERVICED, 1, 1);
062a644d
VG
1290 case BLKIO_PROP_io_service_time:
1291 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1292 BLKIO_STAT_SERVICE_TIME, 1, 0);
062a644d
VG
1293 case BLKIO_PROP_io_wait_time:
1294 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1295 BLKIO_STAT_WAIT_TIME, 1, 0);
062a644d
VG
1296 case BLKIO_PROP_io_merged:
1297 return blkio_read_blkg_stats(blkcg, cft, cb,
317389a7 1298 BLKIO_STAT_CPU_MERGED, 1, 1);
062a644d
VG
1299 case BLKIO_PROP_io_queued:
1300 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1301 BLKIO_STAT_QUEUED, 1, 0);
062a644d 1302#ifdef CONFIG_DEBUG_BLK_CGROUP
9026e521
JT
1303 case BLKIO_PROP_unaccounted_time:
1304 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1305 BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
062a644d
VG
1306 case BLKIO_PROP_dequeue:
1307 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1308 BLKIO_STAT_DEQUEUE, 0, 0);
062a644d
VG
1309 case BLKIO_PROP_avg_queue_size:
1310 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1311 BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
062a644d
VG
1312 case BLKIO_PROP_group_wait_time:
1313 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1314 BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
062a644d
VG
1315 case BLKIO_PROP_idle_time:
1316 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1317 BLKIO_STAT_IDLE_TIME, 0, 0);
062a644d
VG
1318 case BLKIO_PROP_empty_time:
1319 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1320 BLKIO_STAT_EMPTY_TIME, 0, 0);
062a644d
VG
1321#endif
1322 default:
1323 BUG();
1324 }
1325 break;
4c9eefa1
VG
1326 case BLKIO_POLICY_THROTL:
1327 switch(name){
1328 case BLKIO_THROTL_io_service_bytes:
1329 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1330 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
4c9eefa1
VG
1331 case BLKIO_THROTL_io_serviced:
1332 return blkio_read_blkg_stats(blkcg, cft, cb,
5624a4e4 1333 BLKIO_STAT_CPU_SERVICED, 1, 1);
4c9eefa1
VG
1334 default:
1335 BUG();
1336 }
1337 break;
062a644d
VG
1338 default:
1339 BUG();
1340 }
1341
1342 return 0;
1343}
1344
1345static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
1346{
1347 struct blkio_group *blkg;
1348 struct hlist_node *n;
1349 struct blkio_policy_node *pn;
1350
1351 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1352 return -EINVAL;
1353
1354 spin_lock(&blkio_list_lock);
1355 spin_lock_irq(&blkcg->lock);
1356 blkcg->weight = (unsigned int)val;
1357
1358 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1359 pn = blkio_policy_search_node(blkcg, blkg->dev,
1360 BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
1361 if (pn)
1362 continue;
1363
1364 blkio_update_group_weight(blkg, blkcg->weight);
1365 }
1366 spin_unlock_irq(&blkcg->lock);
1367 spin_unlock(&blkio_list_lock);
1368 return 0;
1369}
1370
1371static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
1372 struct blkio_cgroup *blkcg;
1373 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1374 int name = BLKIOFILE_ATTR(cft->private);
1375
1376 blkcg = cgroup_to_blkio_cgroup(cgrp);
1377
1378 switch(plid) {
1379 case BLKIO_POLICY_PROP:
1380 switch(name) {
1381 case BLKIO_PROP_weight:
1382 return (u64)blkcg->weight;
1383 }
1384 break;
1385 default:
1386 BUG();
1387 }
1388 return 0;
1389}
1390
1391static int
1392blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1393{
1394 struct blkio_cgroup *blkcg;
1395 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1396 int name = BLKIOFILE_ATTR(cft->private);
1397
1398 blkcg = cgroup_to_blkio_cgroup(cgrp);
1399
1400 switch(plid) {
1401 case BLKIO_POLICY_PROP:
1402 switch(name) {
1403 case BLKIO_PROP_weight:
1404 return blkio_weight_write(blkcg, val);
1405 }
1406 break;
1407 default:
1408 BUG();
1409 }
34d0f179 1410
34d0f179
GJ
1411 return 0;
1412}
1413
31e4c28d 1414struct cftype blkio_files[] = {
34d0f179
GJ
1415 {
1416 .name = "weight_device",
062a644d
VG
1417 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1418 BLKIO_PROP_weight_device),
1419 .read_seq_string = blkiocg_file_read,
1420 .write_string = blkiocg_file_write,
34d0f179
GJ
1421 .max_write_len = 256,
1422 },
31e4c28d
VG
1423 {
1424 .name = "weight",
062a644d
VG
1425 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1426 BLKIO_PROP_weight),
1427 .read_u64 = blkiocg_file_read_u64,
1428 .write_u64 = blkiocg_file_write_u64,
31e4c28d 1429 },
22084190
VG
1430 {
1431 .name = "time",
062a644d
VG
1432 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1433 BLKIO_PROP_time),
1434 .read_map = blkiocg_file_read_map,
22084190
VG
1435 },
1436 {
1437 .name = "sectors",
062a644d
VG
1438 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1439 BLKIO_PROP_sectors),
1440 .read_map = blkiocg_file_read_map,
303a3acb
DS
1441 },
1442 {
1443 .name = "io_service_bytes",
062a644d
VG
1444 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1445 BLKIO_PROP_io_service_bytes),
1446 .read_map = blkiocg_file_read_map,
303a3acb
DS
1447 },
1448 {
1449 .name = "io_serviced",
062a644d
VG
1450 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1451 BLKIO_PROP_io_serviced),
1452 .read_map = blkiocg_file_read_map,
303a3acb
DS
1453 },
1454 {
1455 .name = "io_service_time",
062a644d
VG
1456 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1457 BLKIO_PROP_io_service_time),
1458 .read_map = blkiocg_file_read_map,
303a3acb
DS
1459 },
1460 {
1461 .name = "io_wait_time",
062a644d
VG
1462 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1463 BLKIO_PROP_io_wait_time),
1464 .read_map = blkiocg_file_read_map,
84c124da 1465 },
812d4026
DS
1466 {
1467 .name = "io_merged",
062a644d
VG
1468 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1469 BLKIO_PROP_io_merged),
1470 .read_map = blkiocg_file_read_map,
812d4026 1471 },
cdc1184c
DS
1472 {
1473 .name = "io_queued",
062a644d
VG
1474 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1475 BLKIO_PROP_io_queued),
1476 .read_map = blkiocg_file_read_map,
cdc1184c 1477 },
84c124da
DS
1478 {
1479 .name = "reset_stats",
1480 .write_u64 = blkiocg_reset_stats,
22084190 1481 },
13f98250
VG
1482#ifdef CONFIG_BLK_DEV_THROTTLING
1483 {
1484 .name = "throttle.read_bps_device",
1485 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1486 BLKIO_THROTL_read_bps_device),
1487 .read_seq_string = blkiocg_file_read,
1488 .write_string = blkiocg_file_write,
1489 .max_write_len = 256,
1490 },
1491
1492 {
1493 .name = "throttle.write_bps_device",
1494 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1495 BLKIO_THROTL_write_bps_device),
1496 .read_seq_string = blkiocg_file_read,
1497 .write_string = blkiocg_file_write,
1498 .max_write_len = 256,
1499 },
1500
1501 {
1502 .name = "throttle.read_iops_device",
1503 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1504 BLKIO_THROTL_read_iops_device),
1505 .read_seq_string = blkiocg_file_read,
1506 .write_string = blkiocg_file_write,
1507 .max_write_len = 256,
1508 },
1509
1510 {
1511 .name = "throttle.write_iops_device",
1512 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1513 BLKIO_THROTL_write_iops_device),
1514 .read_seq_string = blkiocg_file_read,
1515 .write_string = blkiocg_file_write,
1516 .max_write_len = 256,
1517 },
1518 {
1519 .name = "throttle.io_service_bytes",
1520 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1521 BLKIO_THROTL_io_service_bytes),
1522 .read_map = blkiocg_file_read_map,
1523 },
1524 {
1525 .name = "throttle.io_serviced",
1526 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1527 BLKIO_THROTL_io_serviced),
1528 .read_map = blkiocg_file_read_map,
1529 },
1530#endif /* CONFIG_BLK_DEV_THROTTLING */
1531
22084190 1532#ifdef CONFIG_DEBUG_BLK_CGROUP
cdc1184c
DS
1533 {
1534 .name = "avg_queue_size",
062a644d
VG
1535 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1536 BLKIO_PROP_avg_queue_size),
1537 .read_map = blkiocg_file_read_map,
cdc1184c 1538 },
812df48d
DS
1539 {
1540 .name = "group_wait_time",
062a644d
VG
1541 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1542 BLKIO_PROP_group_wait_time),
1543 .read_map = blkiocg_file_read_map,
812df48d
DS
1544 },
1545 {
1546 .name = "idle_time",
062a644d
VG
1547 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1548 BLKIO_PROP_idle_time),
1549 .read_map = blkiocg_file_read_map,
812df48d
DS
1550 },
1551 {
1552 .name = "empty_time",
062a644d
VG
1553 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1554 BLKIO_PROP_empty_time),
1555 .read_map = blkiocg_file_read_map,
812df48d 1556 },
cdc1184c 1557 {
22084190 1558 .name = "dequeue",
062a644d
VG
1559 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1560 BLKIO_PROP_dequeue),
1561 .read_map = blkiocg_file_read_map,
cdc1184c 1562 },
9026e521
JT
1563 {
1564 .name = "unaccounted_time",
1565 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1566 BLKIO_PROP_unaccounted_time),
1567 .read_map = blkiocg_file_read_map,
1568 },
22084190 1569#endif
31e4c28d
VG
1570};
1571
1572static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1573{
1574 return cgroup_add_files(cgroup, subsys, blkio_files,
1575 ARRAY_SIZE(blkio_files));
1576}
1577
1578static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1579{
1580 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
b1c35769
VG
1581 unsigned long flags;
1582 struct blkio_group *blkg;
ca32aefc 1583 struct request_queue *q;
3e252066 1584 struct blkio_policy_type *blkiop;
34d0f179 1585 struct blkio_policy_node *pn, *pntmp;
b1c35769
VG
1586
1587 rcu_read_lock();
0f3942a3
JA
1588 do {
1589 spin_lock_irqsave(&blkcg->lock, flags);
b1c35769 1590
0f3942a3
JA
1591 if (hlist_empty(&blkcg->blkg_list)) {
1592 spin_unlock_irqrestore(&blkcg->lock, flags);
1593 break;
1594 }
b1c35769 1595
0f3942a3
JA
1596 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
1597 blkcg_node);
ca32aefc 1598 q = rcu_dereference(blkg->q);
0f3942a3 1599 __blkiocg_del_blkio_group(blkg);
31e4c28d 1600
0f3942a3 1601 spin_unlock_irqrestore(&blkcg->lock, flags);
b1c35769 1602
0f3942a3
JA
1603 /*
1604 * This blkio_group is being unlinked as associated cgroup is
1605 * going away. Let all the IO controlling policies know about
61014e96 1606 * this event.
0f3942a3
JA
1607 */
1608 spin_lock(&blkio_list_lock);
61014e96
VG
1609 list_for_each_entry(blkiop, &blkio_list, list) {
1610 if (blkiop->plid != blkg->plid)
1611 continue;
ca32aefc 1612 blkiop->ops.blkio_unlink_group_fn(q, blkg);
61014e96 1613 }
0f3942a3
JA
1614 spin_unlock(&blkio_list_lock);
1615 } while (1);
34d0f179 1616
34d0f179
GJ
1617 list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
1618 blkio_policy_delete_node(pn);
1619 kfree(pn);
1620 }
0f3942a3 1621
31e4c28d 1622 free_css_id(&blkio_subsys, &blkcg->css);
b1c35769 1623 rcu_read_unlock();
67523c48
BB
1624 if (blkcg != &blkio_root_cgroup)
1625 kfree(blkcg);
31e4c28d
VG
1626}
1627
1628static struct cgroup_subsys_state *
1629blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1630{
0341509f
LZ
1631 struct blkio_cgroup *blkcg;
1632 struct cgroup *parent = cgroup->parent;
31e4c28d 1633
0341509f 1634 if (!parent) {
31e4c28d
VG
1635 blkcg = &blkio_root_cgroup;
1636 goto done;
1637 }
1638
31e4c28d
VG
1639 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1640 if (!blkcg)
1641 return ERR_PTR(-ENOMEM);
1642
1643 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
1644done:
1645 spin_lock_init(&blkcg->lock);
1646 INIT_HLIST_HEAD(&blkcg->blkg_list);
1647
34d0f179 1648 INIT_LIST_HEAD(&blkcg->policy_list);
31e4c28d
VG
1649 return &blkcg->css;
1650}
1651
1652/*
1653 * We cannot support shared io contexts, as we have no mean to support
1654 * two tasks with the same ioc in two different groups without major rework
1655 * of the main cic data structures. For now we allow a task to change
1656 * its cgroup only if it's the only owner of its ioc.
1657 */
bb9d97b6
TH
1658static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1659 struct cgroup_taskset *tset)
31e4c28d 1660{
bb9d97b6 1661 struct task_struct *task;
31e4c28d
VG
1662 struct io_context *ioc;
1663 int ret = 0;
1664
1665 /* task_lock() is needed to avoid races with exit_io_context() */
bb9d97b6
TH
1666 cgroup_taskset_for_each(task, cgrp, tset) {
1667 task_lock(task);
1668 ioc = task->io_context;
1669 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1670 ret = -EINVAL;
1671 task_unlock(task);
1672 if (ret)
1673 break;
1674 }
31e4c28d
VG
1675 return ret;
1676}
1677
bb9d97b6
TH
1678static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1679 struct cgroup_taskset *tset)
31e4c28d 1680{
bb9d97b6 1681 struct task_struct *task;
31e4c28d
VG
1682 struct io_context *ioc;
1683
bb9d97b6 1684 cgroup_taskset_for_each(task, cgrp, tset) {
b3c9dd18
LT
1685 /* we don't lose anything even if ioc allocation fails */
1686 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
1687 if (ioc) {
1688 ioc_cgroup_changed(ioc);
11a3122f 1689 put_io_context(ioc);
b3c9dd18 1690 }
bb9d97b6 1691 }
31e4c28d
VG
1692}
1693
3e252066
VG
1694void blkio_policy_register(struct blkio_policy_type *blkiop)
1695{
1696 spin_lock(&blkio_list_lock);
1697 list_add_tail(&blkiop->list, &blkio_list);
1698 spin_unlock(&blkio_list_lock);
1699}
1700EXPORT_SYMBOL_GPL(blkio_policy_register);
1701
1702void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1703{
1704 spin_lock(&blkio_list_lock);
1705 list_del_init(&blkiop->list);
1706 spin_unlock(&blkio_list_lock);
1707}
1708EXPORT_SYMBOL_GPL(blkio_policy_unregister);