]>
Commit | Line | Data |
---|---|---|
31e4c28d VG |
1 | /* |
2 | * Common Block IO controller cgroup interface | |
3 | * | |
4 | * Based on ideas and code from CFQ, CFS and BFQ: | |
5 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> | |
6 | * | |
7 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> | |
8 | * Paolo Valente <paolo.valente@unimore.it> | |
9 | * | |
10 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> | |
11 | * Nauman Rafique <nauman@google.com> | |
12 | */ | |
13 | #include <linux/ioprio.h> | |
22084190 | 14 | #include <linux/kdev_t.h> |
9d6a986c | 15 | #include <linux/module.h> |
accee785 | 16 | #include <linux/err.h> |
9195291e | 17 | #include <linux/blkdev.h> |
5a0e3ad6 | 18 | #include <linux/slab.h> |
34d0f179 | 19 | #include <linux/genhd.h> |
72e06c25 | 20 | #include <linux/delay.h> |
9a9e8a26 | 21 | #include <linux/atomic.h> |
72e06c25 | 22 | #include "blk-cgroup.h" |
5efd6113 | 23 | #include "blk.h" |
3e252066 | 24 | |
84c124da DS |
25 | #define MAX_KEY_LEN 100 |
26 | ||
3e252066 VG |
27 | static DEFINE_SPINLOCK(blkio_list_lock); |
28 | static LIST_HEAD(blkio_list); | |
b1c35769 | 29 | |
923adde1 TH |
30 | static DEFINE_MUTEX(all_q_mutex); |
31 | static LIST_HEAD(all_q_list); | |
32 | ||
3381cb8d | 33 | struct blkio_cgroup blkio_root_cgroup = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT }; |
9d6a986c VG |
34 | EXPORT_SYMBOL_GPL(blkio_root_cgroup); |
35 | ||
035d10b2 TH |
36 | static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES]; |
37 | ||
31e4c28d VG |
38 | struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) |
39 | { | |
40 | return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), | |
41 | struct blkio_cgroup, css); | |
42 | } | |
9d6a986c | 43 | EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); |
31e4c28d | 44 | |
4f85cb96 | 45 | static struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk) |
70087dc3 VG |
46 | { |
47 | return container_of(task_subsys_state(tsk, blkio_subsys_id), | |
48 | struct blkio_cgroup, css); | |
49 | } | |
4f85cb96 TH |
50 | |
51 | struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio) | |
52 | { | |
53 | if (bio && bio->bi_css) | |
54 | return container_of(bio->bi_css, struct blkio_cgroup, css); | |
55 | return task_blkio_cgroup(current); | |
56 | } | |
57 | EXPORT_SYMBOL_GPL(bio_blkio_cgroup); | |
70087dc3 | 58 | |
0381411e TH |
59 | /** |
60 | * blkg_free - free a blkg | |
61 | * @blkg: blkg to free | |
62 | * | |
63 | * Free @blkg which may be partially allocated. | |
64 | */ | |
65 | static void blkg_free(struct blkio_group *blkg) | |
66 | { | |
e8989fae | 67 | int i; |
549d3aa8 TH |
68 | |
69 | if (!blkg) | |
70 | return; | |
71 | ||
e8989fae | 72 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { |
9ade5ea4 | 73 | struct blkio_policy_type *pol = blkio_policy[i]; |
e8989fae TH |
74 | struct blkg_policy_data *pd = blkg->pd[i]; |
75 | ||
9ade5ea4 TH |
76 | if (!pd) |
77 | continue; | |
78 | ||
79 | if (pol && pol->ops.blkio_exit_group_fn) | |
80 | pol->ops.blkio_exit_group_fn(blkg); | |
81 | ||
9ade5ea4 | 82 | kfree(pd); |
0381411e | 83 | } |
e8989fae | 84 | |
549d3aa8 | 85 | kfree(blkg); |
0381411e TH |
86 | } |
87 | ||
88 | /** | |
89 | * blkg_alloc - allocate a blkg | |
90 | * @blkcg: block cgroup the new blkg is associated with | |
91 | * @q: request_queue the new blkg is associated with | |
0381411e | 92 | * |
e8989fae | 93 | * Allocate a new blkg assocating @blkcg and @q. |
0381411e TH |
94 | */ |
95 | static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg, | |
e8989fae | 96 | struct request_queue *q) |
0381411e TH |
97 | { |
98 | struct blkio_group *blkg; | |
e8989fae | 99 | int i; |
0381411e TH |
100 | |
101 | /* alloc and init base part */ | |
102 | blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node); | |
103 | if (!blkg) | |
104 | return NULL; | |
105 | ||
c875f4d0 | 106 | blkg->q = q; |
e8989fae | 107 | INIT_LIST_HEAD(&blkg->q_node); |
0381411e | 108 | blkg->blkcg = blkcg; |
1adaf3dd | 109 | blkg->refcnt = 1; |
0381411e TH |
110 | cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path)); |
111 | ||
e8989fae TH |
112 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { |
113 | struct blkio_policy_type *pol = blkio_policy[i]; | |
114 | struct blkg_policy_data *pd; | |
0381411e | 115 | |
e8989fae TH |
116 | if (!pol) |
117 | continue; | |
118 | ||
119 | /* alloc per-policy data and attach it to blkg */ | |
120 | pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC, | |
121 | q->node); | |
122 | if (!pd) { | |
123 | blkg_free(blkg); | |
124 | return NULL; | |
125 | } | |
549d3aa8 | 126 | |
e8989fae TH |
127 | blkg->pd[i] = pd; |
128 | pd->blkg = blkg; | |
0381411e TH |
129 | } |
130 | ||
549d3aa8 | 131 | /* invoke per-policy init */ |
e8989fae TH |
132 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { |
133 | struct blkio_policy_type *pol = blkio_policy[i]; | |
134 | ||
135 | if (pol) | |
136 | pol->ops.blkio_init_group_fn(blkg); | |
137 | } | |
138 | ||
0381411e TH |
139 | return blkg; |
140 | } | |
141 | ||
cd1604fa TH |
142 | struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, |
143 | struct request_queue *q, | |
cd1604fa TH |
144 | bool for_root) |
145 | __releases(q->queue_lock) __acquires(q->queue_lock) | |
5624a4e4 | 146 | { |
1cd9e039 | 147 | struct blkio_group *blkg; |
5624a4e4 | 148 | |
cd1604fa TH |
149 | WARN_ON_ONCE(!rcu_read_lock_held()); |
150 | lockdep_assert_held(q->queue_lock); | |
151 | ||
152 | /* | |
153 | * This could be the first entry point of blkcg implementation and | |
154 | * we shouldn't allow anything to go through for a bypassing queue. | |
155 | * The following can be removed if blkg lookup is guaranteed to | |
156 | * fail on a bypassing queue. | |
157 | */ | |
158 | if (unlikely(blk_queue_bypass(q)) && !for_root) | |
159 | return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); | |
160 | ||
e8989fae | 161 | blkg = blkg_lookup(blkcg, q); |
cd1604fa TH |
162 | if (blkg) |
163 | return blkg; | |
164 | ||
7ee9c562 | 165 | /* blkg holds a reference to blkcg */ |
cd1604fa TH |
166 | if (!css_tryget(&blkcg->css)) |
167 | return ERR_PTR(-EINVAL); | |
168 | ||
169 | /* | |
170 | * Allocate and initialize. | |
cd1604fa | 171 | */ |
1cd9e039 | 172 | blkg = blkg_alloc(blkcg, q); |
cd1604fa TH |
173 | |
174 | /* did alloc fail? */ | |
1cd9e039 | 175 | if (unlikely(!blkg)) { |
cd1604fa TH |
176 | blkg = ERR_PTR(-ENOMEM); |
177 | goto out; | |
178 | } | |
179 | ||
180 | /* insert */ | |
181 | spin_lock(&blkcg->lock); | |
31e4c28d | 182 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); |
e8989fae | 183 | list_add(&blkg->q_node, &q->blkg_list); |
cd1604fa TH |
184 | spin_unlock(&blkcg->lock); |
185 | out: | |
cd1604fa | 186 | return blkg; |
31e4c28d | 187 | } |
cd1604fa | 188 | EXPORT_SYMBOL_GPL(blkg_lookup_create); |
31e4c28d | 189 | |
31e4c28d | 190 | /* called under rcu_read_lock(). */ |
cd1604fa | 191 | struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, |
e8989fae | 192 | struct request_queue *q) |
31e4c28d VG |
193 | { |
194 | struct blkio_group *blkg; | |
195 | struct hlist_node *n; | |
31e4c28d | 196 | |
ca32aefc | 197 | hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) |
e8989fae | 198 | if (blkg->q == q) |
31e4c28d | 199 | return blkg; |
31e4c28d VG |
200 | return NULL; |
201 | } | |
cd1604fa | 202 | EXPORT_SYMBOL_GPL(blkg_lookup); |
31e4c28d | 203 | |
e8989fae | 204 | static void blkg_destroy(struct blkio_group *blkg) |
03aa264a TH |
205 | { |
206 | struct request_queue *q = blkg->q; | |
9f13ef67 | 207 | struct blkio_cgroup *blkcg = blkg->blkcg; |
03aa264a TH |
208 | |
209 | lockdep_assert_held(q->queue_lock); | |
9f13ef67 | 210 | lockdep_assert_held(&blkcg->lock); |
03aa264a TH |
211 | |
212 | /* Something wrong if we are trying to remove same group twice */ | |
e8989fae | 213 | WARN_ON_ONCE(list_empty(&blkg->q_node)); |
9f13ef67 | 214 | WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); |
e8989fae | 215 | list_del_init(&blkg->q_node); |
9f13ef67 | 216 | hlist_del_init_rcu(&blkg->blkcg_node); |
03aa264a | 217 | |
03aa264a TH |
218 | /* |
219 | * Put the reference taken at the time of creation so that when all | |
220 | * queues are gone, group can be destroyed. | |
221 | */ | |
222 | blkg_put(blkg); | |
223 | } | |
224 | ||
e8989fae TH |
225 | /* |
226 | * XXX: This updates blkg policy data in-place for root blkg, which is | |
227 | * necessary across elevator switch and policy registration as root blkgs | |
228 | * aren't shot down. This broken and racy implementation is temporary. | |
229 | * Eventually, blkg shoot down will be replaced by proper in-place update. | |
230 | */ | |
231 | void update_root_blkg_pd(struct request_queue *q, enum blkio_policy_id plid) | |
232 | { | |
233 | struct blkio_policy_type *pol = blkio_policy[plid]; | |
234 | struct blkio_group *blkg = blkg_lookup(&blkio_root_cgroup, q); | |
235 | struct blkg_policy_data *pd; | |
236 | ||
237 | if (!blkg) | |
238 | return; | |
239 | ||
240 | kfree(blkg->pd[plid]); | |
241 | blkg->pd[plid] = NULL; | |
242 | ||
243 | if (!pol) | |
244 | return; | |
245 | ||
246 | pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL); | |
247 | WARN_ON_ONCE(!pd); | |
248 | ||
e8989fae TH |
249 | blkg->pd[plid] = pd; |
250 | pd->blkg = blkg; | |
251 | pol->ops.blkio_init_group_fn(blkg); | |
252 | } | |
253 | EXPORT_SYMBOL_GPL(update_root_blkg_pd); | |
254 | ||
9f13ef67 TH |
255 | /** |
256 | * blkg_destroy_all - destroy all blkgs associated with a request_queue | |
257 | * @q: request_queue of interest | |
258 | * @destroy_root: whether to destroy root blkg or not | |
259 | * | |
260 | * Destroy blkgs associated with @q. If @destroy_root is %true, all are | |
261 | * destroyed; otherwise, root blkg is left alone. | |
262 | */ | |
e8989fae | 263 | void blkg_destroy_all(struct request_queue *q, bool destroy_root) |
72e06c25 | 264 | { |
03aa264a | 265 | struct blkio_group *blkg, *n; |
72e06c25 | 266 | |
9f13ef67 | 267 | spin_lock_irq(q->queue_lock); |
72e06c25 | 268 | |
9f13ef67 TH |
269 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { |
270 | struct blkio_cgroup *blkcg = blkg->blkcg; | |
72e06c25 | 271 | |
9f13ef67 TH |
272 | /* skip root? */ |
273 | if (!destroy_root && blkg->blkcg == &blkio_root_cgroup) | |
274 | continue; | |
72e06c25 | 275 | |
9f13ef67 TH |
276 | spin_lock(&blkcg->lock); |
277 | blkg_destroy(blkg); | |
278 | spin_unlock(&blkcg->lock); | |
72e06c25 | 279 | } |
9f13ef67 TH |
280 | |
281 | spin_unlock_irq(q->queue_lock); | |
72e06c25 | 282 | } |
03aa264a | 283 | EXPORT_SYMBOL_GPL(blkg_destroy_all); |
72e06c25 | 284 | |
1adaf3dd TH |
285 | static void blkg_rcu_free(struct rcu_head *rcu_head) |
286 | { | |
287 | blkg_free(container_of(rcu_head, struct blkio_group, rcu_head)); | |
288 | } | |
289 | ||
290 | void __blkg_release(struct blkio_group *blkg) | |
291 | { | |
292 | /* release the extra blkcg reference this blkg has been holding */ | |
293 | css_put(&blkg->blkcg->css); | |
294 | ||
295 | /* | |
296 | * A group is freed in rcu manner. But having an rcu lock does not | |
297 | * mean that one can access all the fields of blkg and assume these | |
298 | * are valid. For example, don't try to follow throtl_data and | |
299 | * request queue links. | |
300 | * | |
301 | * Having a reference to blkg under an rcu allows acess to only | |
302 | * values local to groups like group stats and group rate limits | |
303 | */ | |
304 | call_rcu(&blkg->rcu_head, blkg_rcu_free); | |
305 | } | |
306 | EXPORT_SYMBOL_GPL(__blkg_release); | |
307 | ||
303a3acb | 308 | static int |
84c124da | 309 | blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) |
303a3acb | 310 | { |
997a026c | 311 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); |
303a3acb DS |
312 | struct blkio_group *blkg; |
313 | struct hlist_node *n; | |
303a3acb | 314 | |
e8989fae | 315 | spin_lock(&blkio_list_lock); |
303a3acb | 316 | spin_lock_irq(&blkcg->lock); |
997a026c TH |
317 | |
318 | /* | |
319 | * Note that stat reset is racy - it doesn't synchronize against | |
320 | * stat updates. This is a debug feature which shouldn't exist | |
321 | * anyway. If you get hit by a race, retry. | |
322 | */ | |
303a3acb | 323 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { |
e8989fae | 324 | struct blkio_policy_type *pol; |
549d3aa8 | 325 | |
8a3d2615 | 326 | list_for_each_entry(pol, &blkio_list, list) |
9ade5ea4 TH |
327 | if (pol->ops.blkio_reset_group_stats_fn) |
328 | pol->ops.blkio_reset_group_stats_fn(blkg); | |
303a3acb | 329 | } |
f0bdc8cd | 330 | |
303a3acb | 331 | spin_unlock_irq(&blkcg->lock); |
e8989fae | 332 | spin_unlock(&blkio_list_lock); |
303a3acb DS |
333 | return 0; |
334 | } | |
335 | ||
d3d32e69 | 336 | static const char *blkg_dev_name(struct blkio_group *blkg) |
303a3acb | 337 | { |
d3d32e69 TH |
338 | /* some drivers (floppy) instantiate a queue w/o disk registered */ |
339 | if (blkg->q->backing_dev_info.dev) | |
340 | return dev_name(blkg->q->backing_dev_info.dev); | |
341 | return NULL; | |
303a3acb DS |
342 | } |
343 | ||
d3d32e69 TH |
344 | /** |
345 | * blkcg_print_blkgs - helper for printing per-blkg data | |
346 | * @sf: seq_file to print to | |
347 | * @blkcg: blkcg of interest | |
348 | * @prfill: fill function to print out a blkg | |
349 | * @pol: policy in question | |
350 | * @data: data to be passed to @prfill | |
351 | * @show_total: to print out sum of prfill return values or not | |
352 | * | |
353 | * This function invokes @prfill on each blkg of @blkcg if pd for the | |
354 | * policy specified by @pol exists. @prfill is invoked with @sf, the | |
355 | * policy data and @data. If @show_total is %true, the sum of the return | |
356 | * values from @prfill is printed with "Total" label at the end. | |
357 | * | |
358 | * This is to be used to construct print functions for | |
359 | * cftype->read_seq_string method. | |
360 | */ | |
829fdb50 | 361 | void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg, |
d366e7ec | 362 | u64 (*prfill)(struct seq_file *, void *, int), |
829fdb50 | 363 | int pol, int data, bool show_total) |
5624a4e4 | 364 | { |
d3d32e69 TH |
365 | struct blkio_group *blkg; |
366 | struct hlist_node *n; | |
367 | u64 total = 0; | |
5624a4e4 | 368 | |
d3d32e69 TH |
369 | spin_lock_irq(&blkcg->lock); |
370 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) | |
371 | if (blkg->pd[pol]) | |
d366e7ec | 372 | total += prfill(sf, blkg->pd[pol]->pdata, data); |
d3d32e69 TH |
373 | spin_unlock_irq(&blkcg->lock); |
374 | ||
375 | if (show_total) | |
376 | seq_printf(sf, "Total %llu\n", (unsigned long long)total); | |
377 | } | |
829fdb50 | 378 | EXPORT_SYMBOL_GPL(blkcg_print_blkgs); |
d3d32e69 TH |
379 | |
380 | /** | |
381 | * __blkg_prfill_u64 - prfill helper for a single u64 value | |
382 | * @sf: seq_file to print to | |
d366e7ec | 383 | * @pdata: policy private data of interest |
d3d32e69 TH |
384 | * @v: value to print |
385 | * | |
d366e7ec | 386 | * Print @v to @sf for the device assocaited with @pdata. |
d3d32e69 | 387 | */ |
d366e7ec | 388 | u64 __blkg_prfill_u64(struct seq_file *sf, void *pdata, u64 v) |
d3d32e69 | 389 | { |
d366e7ec | 390 | const char *dname = blkg_dev_name(pdata_to_blkg(pdata)); |
d3d32e69 TH |
391 | |
392 | if (!dname) | |
393 | return 0; | |
394 | ||
395 | seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); | |
396 | return v; | |
397 | } | |
829fdb50 | 398 | EXPORT_SYMBOL_GPL(__blkg_prfill_u64); |
d3d32e69 TH |
399 | |
400 | /** | |
401 | * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat | |
402 | * @sf: seq_file to print to | |
d366e7ec | 403 | * @pdata: policy private data of interest |
d3d32e69 TH |
404 | * @rwstat: rwstat to print |
405 | * | |
d366e7ec | 406 | * Print @rwstat to @sf for the device assocaited with @pdata. |
d3d32e69 | 407 | */ |
d366e7ec | 408 | u64 __blkg_prfill_rwstat(struct seq_file *sf, void *pdata, |
829fdb50 | 409 | const struct blkg_rwstat *rwstat) |
d3d32e69 TH |
410 | { |
411 | static const char *rwstr[] = { | |
412 | [BLKG_RWSTAT_READ] = "Read", | |
413 | [BLKG_RWSTAT_WRITE] = "Write", | |
414 | [BLKG_RWSTAT_SYNC] = "Sync", | |
415 | [BLKG_RWSTAT_ASYNC] = "Async", | |
416 | }; | |
d366e7ec | 417 | const char *dname = blkg_dev_name(pdata_to_blkg(pdata)); |
d3d32e69 TH |
418 | u64 v; |
419 | int i; | |
420 | ||
421 | if (!dname) | |
422 | return 0; | |
423 | ||
424 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | |
425 | seq_printf(sf, "%s %s %llu\n", dname, rwstr[i], | |
426 | (unsigned long long)rwstat->cnt[i]); | |
427 | ||
428 | v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE]; | |
429 | seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v); | |
430 | return v; | |
431 | } | |
432 | ||
5bc4afb1 TH |
433 | /** |
434 | * blkg_prfill_stat - prfill callback for blkg_stat | |
435 | * @sf: seq_file to print to | |
436 | * @pdata: policy private data of interest | |
437 | * @off: offset to the blkg_stat in @pdata | |
438 | * | |
439 | * prfill callback for printing a blkg_stat. | |
440 | */ | |
441 | u64 blkg_prfill_stat(struct seq_file *sf, void *pdata, int off) | |
d3d32e69 | 442 | { |
d366e7ec | 443 | return __blkg_prfill_u64(sf, pdata, blkg_stat_read(pdata + off)); |
d3d32e69 | 444 | } |
5bc4afb1 | 445 | EXPORT_SYMBOL_GPL(blkg_prfill_stat); |
d3d32e69 | 446 | |
5bc4afb1 TH |
447 | /** |
448 | * blkg_prfill_rwstat - prfill callback for blkg_rwstat | |
449 | * @sf: seq_file to print to | |
450 | * @pdata: policy private data of interest | |
451 | * @off: offset to the blkg_rwstat in @pdata | |
452 | * | |
453 | * prfill callback for printing a blkg_rwstat. | |
454 | */ | |
455 | u64 blkg_prfill_rwstat(struct seq_file *sf, void *pdata, int off) | |
d3d32e69 | 456 | { |
d366e7ec | 457 | struct blkg_rwstat rwstat = blkg_rwstat_read(pdata + off); |
d3d32e69 | 458 | |
d366e7ec | 459 | return __blkg_prfill_rwstat(sf, pdata, &rwstat); |
d3d32e69 | 460 | } |
5bc4afb1 | 461 | EXPORT_SYMBOL_GPL(blkg_prfill_rwstat); |
d3d32e69 | 462 | |
3a8b31d3 TH |
463 | /** |
464 | * blkg_conf_prep - parse and prepare for per-blkg config update | |
465 | * @blkcg: target block cgroup | |
466 | * @input: input string | |
467 | * @ctx: blkg_conf_ctx to be filled | |
468 | * | |
469 | * Parse per-blkg config update from @input and initialize @ctx with the | |
470 | * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new | |
471 | * value. This function returns with RCU read locked and must be paired | |
472 | * with blkg_conf_finish(). | |
473 | */ | |
829fdb50 TH |
474 | int blkg_conf_prep(struct blkio_cgroup *blkcg, const char *input, |
475 | struct blkg_conf_ctx *ctx) | |
3a8b31d3 | 476 | __acquires(rcu) |
34d0f179 | 477 | { |
3a8b31d3 TH |
478 | struct gendisk *disk; |
479 | struct blkio_group *blkg; | |
726fa694 TH |
480 | unsigned int major, minor; |
481 | unsigned long long v; | |
482 | int part, ret; | |
34d0f179 | 483 | |
726fa694 TH |
484 | if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3) |
485 | return -EINVAL; | |
3a8b31d3 | 486 | |
726fa694 | 487 | disk = get_gendisk(MKDEV(major, minor), &part); |
4bfd482e | 488 | if (!disk || part) |
726fa694 | 489 | return -EINVAL; |
e56da7e2 TH |
490 | |
491 | rcu_read_lock(); | |
492 | ||
4bfd482e | 493 | spin_lock_irq(disk->queue->queue_lock); |
aaec55a0 | 494 | blkg = blkg_lookup_create(blkcg, disk->queue, false); |
4bfd482e | 495 | spin_unlock_irq(disk->queue->queue_lock); |
e56da7e2 | 496 | |
4bfd482e TH |
497 | if (IS_ERR(blkg)) { |
498 | ret = PTR_ERR(blkg); | |
3a8b31d3 TH |
499 | rcu_read_unlock(); |
500 | put_disk(disk); | |
501 | /* | |
502 | * If queue was bypassing, we should retry. Do so after a | |
503 | * short msleep(). It isn't strictly necessary but queue | |
504 | * can be bypassing for some time and it's always nice to | |
505 | * avoid busy looping. | |
506 | */ | |
507 | if (ret == -EBUSY) { | |
508 | msleep(10); | |
509 | ret = restart_syscall(); | |
7702e8f4 | 510 | } |
726fa694 | 511 | return ret; |
062a644d | 512 | } |
3a8b31d3 TH |
513 | |
514 | ctx->disk = disk; | |
515 | ctx->blkg = blkg; | |
726fa694 TH |
516 | ctx->v = v; |
517 | return 0; | |
34d0f179 | 518 | } |
829fdb50 | 519 | EXPORT_SYMBOL_GPL(blkg_conf_prep); |
34d0f179 | 520 | |
3a8b31d3 TH |
521 | /** |
522 | * blkg_conf_finish - finish up per-blkg config update | |
523 | * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() | |
524 | * | |
525 | * Finish up after per-blkg config update. This function must be paired | |
526 | * with blkg_conf_prep(). | |
527 | */ | |
829fdb50 | 528 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) |
3a8b31d3 | 529 | __releases(rcu) |
34d0f179 | 530 | { |
3a8b31d3 TH |
531 | rcu_read_unlock(); |
532 | put_disk(ctx->disk); | |
34d0f179 | 533 | } |
829fdb50 | 534 | EXPORT_SYMBOL_GPL(blkg_conf_finish); |
34d0f179 | 535 | |
31e4c28d | 536 | struct cftype blkio_files[] = { |
84c124da DS |
537 | { |
538 | .name = "reset_stats", | |
539 | .write_u64 = blkiocg_reset_stats, | |
22084190 | 540 | }, |
4baf6e33 | 541 | { } /* terminate */ |
31e4c28d VG |
542 | }; |
543 | ||
9f13ef67 TH |
544 | /** |
545 | * blkiocg_pre_destroy - cgroup pre_destroy callback | |
9f13ef67 TH |
546 | * @cgroup: cgroup of interest |
547 | * | |
548 | * This function is called when @cgroup is about to go away and responsible | |
549 | * for shooting down all blkgs associated with @cgroup. blkgs should be | |
550 | * removed while holding both q and blkcg locks. As blkcg lock is nested | |
551 | * inside q lock, this function performs reverse double lock dancing. | |
552 | * | |
553 | * This is the blkcg counterpart of ioc_release_fn(). | |
554 | */ | |
959d851c | 555 | static int blkiocg_pre_destroy(struct cgroup *cgroup) |
31e4c28d VG |
556 | { |
557 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); | |
b1c35769 | 558 | |
9f13ef67 | 559 | spin_lock_irq(&blkcg->lock); |
7ee9c562 | 560 | |
9f13ef67 TH |
561 | while (!hlist_empty(&blkcg->blkg_list)) { |
562 | struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first, | |
563 | struct blkio_group, blkcg_node); | |
c875f4d0 | 564 | struct request_queue *q = blkg->q; |
b1c35769 | 565 | |
9f13ef67 TH |
566 | if (spin_trylock(q->queue_lock)) { |
567 | blkg_destroy(blkg); | |
568 | spin_unlock(q->queue_lock); | |
569 | } else { | |
570 | spin_unlock_irq(&blkcg->lock); | |
9f13ef67 | 571 | cpu_relax(); |
a5567932 | 572 | spin_lock_irq(&blkcg->lock); |
0f3942a3 | 573 | } |
9f13ef67 | 574 | } |
b1c35769 | 575 | |
9f13ef67 | 576 | spin_unlock_irq(&blkcg->lock); |
7ee9c562 TH |
577 | return 0; |
578 | } | |
579 | ||
959d851c | 580 | static void blkiocg_destroy(struct cgroup *cgroup) |
7ee9c562 TH |
581 | { |
582 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); | |
583 | ||
67523c48 BB |
584 | if (blkcg != &blkio_root_cgroup) |
585 | kfree(blkcg); | |
31e4c28d VG |
586 | } |
587 | ||
761b3ef5 | 588 | static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup) |
31e4c28d | 589 | { |
9a9e8a26 | 590 | static atomic64_t id_seq = ATOMIC64_INIT(0); |
0341509f LZ |
591 | struct blkio_cgroup *blkcg; |
592 | struct cgroup *parent = cgroup->parent; | |
31e4c28d | 593 | |
0341509f | 594 | if (!parent) { |
31e4c28d VG |
595 | blkcg = &blkio_root_cgroup; |
596 | goto done; | |
597 | } | |
598 | ||
31e4c28d VG |
599 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); |
600 | if (!blkcg) | |
601 | return ERR_PTR(-ENOMEM); | |
602 | ||
3381cb8d | 603 | blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT; |
9a9e8a26 | 604 | blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */ |
31e4c28d VG |
605 | done: |
606 | spin_lock_init(&blkcg->lock); | |
607 | INIT_HLIST_HEAD(&blkcg->blkg_list); | |
608 | ||
609 | return &blkcg->css; | |
610 | } | |
611 | ||
5efd6113 TH |
612 | /** |
613 | * blkcg_init_queue - initialize blkcg part of request queue | |
614 | * @q: request_queue to initialize | |
615 | * | |
616 | * Called from blk_alloc_queue_node(). Responsible for initializing blkcg | |
617 | * part of new request_queue @q. | |
618 | * | |
619 | * RETURNS: | |
620 | * 0 on success, -errno on failure. | |
621 | */ | |
622 | int blkcg_init_queue(struct request_queue *q) | |
623 | { | |
923adde1 TH |
624 | int ret; |
625 | ||
5efd6113 TH |
626 | might_sleep(); |
627 | ||
923adde1 TH |
628 | ret = blk_throtl_init(q); |
629 | if (ret) | |
630 | return ret; | |
631 | ||
632 | mutex_lock(&all_q_mutex); | |
633 | INIT_LIST_HEAD(&q->all_q_node); | |
634 | list_add_tail(&q->all_q_node, &all_q_list); | |
635 | mutex_unlock(&all_q_mutex); | |
636 | ||
637 | return 0; | |
5efd6113 TH |
638 | } |
639 | ||
640 | /** | |
641 | * blkcg_drain_queue - drain blkcg part of request_queue | |
642 | * @q: request_queue to drain | |
643 | * | |
644 | * Called from blk_drain_queue(). Responsible for draining blkcg part. | |
645 | */ | |
646 | void blkcg_drain_queue(struct request_queue *q) | |
647 | { | |
648 | lockdep_assert_held(q->queue_lock); | |
649 | ||
650 | blk_throtl_drain(q); | |
651 | } | |
652 | ||
653 | /** | |
654 | * blkcg_exit_queue - exit and release blkcg part of request_queue | |
655 | * @q: request_queue being released | |
656 | * | |
657 | * Called from blk_release_queue(). Responsible for exiting blkcg part. | |
658 | */ | |
659 | void blkcg_exit_queue(struct request_queue *q) | |
660 | { | |
923adde1 TH |
661 | mutex_lock(&all_q_mutex); |
662 | list_del_init(&q->all_q_node); | |
663 | mutex_unlock(&all_q_mutex); | |
664 | ||
e8989fae TH |
665 | blkg_destroy_all(q, true); |
666 | ||
5efd6113 TH |
667 | blk_throtl_exit(q); |
668 | } | |
669 | ||
31e4c28d VG |
670 | /* |
671 | * We cannot support shared io contexts, as we have no mean to support | |
672 | * two tasks with the same ioc in two different groups without major rework | |
673 | * of the main cic data structures. For now we allow a task to change | |
674 | * its cgroup only if it's the only owner of its ioc. | |
675 | */ | |
761b3ef5 | 676 | static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
31e4c28d | 677 | { |
bb9d97b6 | 678 | struct task_struct *task; |
31e4c28d VG |
679 | struct io_context *ioc; |
680 | int ret = 0; | |
681 | ||
682 | /* task_lock() is needed to avoid races with exit_io_context() */ | |
bb9d97b6 TH |
683 | cgroup_taskset_for_each(task, cgrp, tset) { |
684 | task_lock(task); | |
685 | ioc = task->io_context; | |
686 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) | |
687 | ret = -EINVAL; | |
688 | task_unlock(task); | |
689 | if (ret) | |
690 | break; | |
691 | } | |
31e4c28d VG |
692 | return ret; |
693 | } | |
694 | ||
923adde1 TH |
695 | static void blkcg_bypass_start(void) |
696 | __acquires(&all_q_mutex) | |
697 | { | |
698 | struct request_queue *q; | |
699 | ||
700 | mutex_lock(&all_q_mutex); | |
701 | ||
702 | list_for_each_entry(q, &all_q_list, all_q_node) { | |
703 | blk_queue_bypass_start(q); | |
e8989fae | 704 | blkg_destroy_all(q, false); |
923adde1 TH |
705 | } |
706 | } | |
707 | ||
708 | static void blkcg_bypass_end(void) | |
709 | __releases(&all_q_mutex) | |
710 | { | |
711 | struct request_queue *q; | |
712 | ||
713 | list_for_each_entry(q, &all_q_list, all_q_node) | |
714 | blk_queue_bypass_end(q); | |
715 | ||
716 | mutex_unlock(&all_q_mutex); | |
717 | } | |
718 | ||
676f7c8f TH |
719 | struct cgroup_subsys blkio_subsys = { |
720 | .name = "blkio", | |
721 | .create = blkiocg_create, | |
722 | .can_attach = blkiocg_can_attach, | |
959d851c | 723 | .pre_destroy = blkiocg_pre_destroy, |
676f7c8f | 724 | .destroy = blkiocg_destroy, |
676f7c8f | 725 | .subsys_id = blkio_subsys_id, |
4baf6e33 | 726 | .base_cftypes = blkio_files, |
676f7c8f TH |
727 | .module = THIS_MODULE, |
728 | }; | |
729 | EXPORT_SYMBOL_GPL(blkio_subsys); | |
730 | ||
3e252066 VG |
731 | void blkio_policy_register(struct blkio_policy_type *blkiop) |
732 | { | |
e8989fae TH |
733 | struct request_queue *q; |
734 | ||
923adde1 | 735 | blkcg_bypass_start(); |
3e252066 | 736 | spin_lock(&blkio_list_lock); |
035d10b2 TH |
737 | |
738 | BUG_ON(blkio_policy[blkiop->plid]); | |
739 | blkio_policy[blkiop->plid] = blkiop; | |
3e252066 | 740 | list_add_tail(&blkiop->list, &blkio_list); |
035d10b2 | 741 | |
3e252066 | 742 | spin_unlock(&blkio_list_lock); |
e8989fae TH |
743 | list_for_each_entry(q, &all_q_list, all_q_node) |
744 | update_root_blkg_pd(q, blkiop->plid); | |
923adde1 | 745 | blkcg_bypass_end(); |
44ea53de TH |
746 | |
747 | if (blkiop->cftypes) | |
748 | WARN_ON(cgroup_add_cftypes(&blkio_subsys, blkiop->cftypes)); | |
3e252066 VG |
749 | } |
750 | EXPORT_SYMBOL_GPL(blkio_policy_register); | |
751 | ||
752 | void blkio_policy_unregister(struct blkio_policy_type *blkiop) | |
753 | { | |
e8989fae TH |
754 | struct request_queue *q; |
755 | ||
44ea53de TH |
756 | if (blkiop->cftypes) |
757 | cgroup_rm_cftypes(&blkio_subsys, blkiop->cftypes); | |
758 | ||
923adde1 | 759 | blkcg_bypass_start(); |
3e252066 | 760 | spin_lock(&blkio_list_lock); |
035d10b2 TH |
761 | |
762 | BUG_ON(blkio_policy[blkiop->plid] != blkiop); | |
763 | blkio_policy[blkiop->plid] = NULL; | |
3e252066 | 764 | list_del_init(&blkiop->list); |
035d10b2 | 765 | |
3e252066 | 766 | spin_unlock(&blkio_list_lock); |
e8989fae TH |
767 | list_for_each_entry(q, &all_q_list, all_q_node) |
768 | update_root_blkg_pd(q, blkiop->plid); | |
923adde1 | 769 | blkcg_bypass_end(); |
3e252066 VG |
770 | } |
771 | EXPORT_SYMBOL_GPL(blkio_policy_unregister); |