]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Common Block IO controller cgroup interface | |
3 | * | |
4 | * Based on ideas and code from CFQ, CFS and BFQ: | |
5 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> | |
6 | * | |
7 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> | |
8 | * Paolo Valente <paolo.valente@unimore.it> | |
9 | * | |
10 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> | |
11 | * Nauman Rafique <nauman@google.com> | |
12 | */ | |
13 | #include <linux/ioprio.h> | |
14 | #include <linux/kdev_t.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/err.h> | |
17 | #include <linux/blkdev.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/genhd.h> | |
20 | #include <linux/delay.h> | |
21 | #include <linux/atomic.h> | |
22 | #include "blk-cgroup.h" | |
23 | #include "blk.h" | |
24 | ||
25 | #define MAX_KEY_LEN 100 | |
26 | ||
27 | static DEFINE_MUTEX(blkcg_pol_mutex); | |
28 | ||
29 | struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT }; | |
30 | EXPORT_SYMBOL_GPL(blkcg_root); | |
31 | ||
32 | static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; | |
33 | ||
34 | static bool blkcg_policy_enabled(struct request_queue *q, | |
35 | const struct blkcg_policy *pol) | |
36 | { | |
37 | return pol && test_bit(pol->plid, q->blkcg_pols); | |
38 | } | |
39 | ||
40 | /** | |
41 | * blkg_free - free a blkg | |
42 | * @blkg: blkg to free | |
43 | * | |
44 | * Free @blkg which may be partially allocated. | |
45 | */ | |
46 | static void blkg_free(struct blkcg_gq *blkg) | |
47 | { | |
48 | int i; | |
49 | ||
50 | if (!blkg) | |
51 | return; | |
52 | ||
53 | for (i = 0; i < BLKCG_MAX_POLS; i++) { | |
54 | struct blkcg_policy *pol = blkcg_policy[i]; | |
55 | struct blkg_policy_data *pd = blkg->pd[i]; | |
56 | ||
57 | if (!pd) | |
58 | continue; | |
59 | ||
60 | if (pol && pol->pd_exit_fn) | |
61 | pol->pd_exit_fn(blkg); | |
62 | ||
63 | kfree(pd); | |
64 | } | |
65 | ||
66 | blk_exit_rl(&blkg->rl); | |
67 | kfree(blkg); | |
68 | } | |
69 | ||
70 | /** | |
71 | * blkg_alloc - allocate a blkg | |
72 | * @blkcg: block cgroup the new blkg is associated with | |
73 | * @q: request_queue the new blkg is associated with | |
74 | * @gfp_mask: allocation mask to use | |
75 | * | |
76 | * Allocate a new blkg assocating @blkcg and @q. | |
77 | */ | |
78 | static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, | |
79 | gfp_t gfp_mask) | |
80 | { | |
81 | struct blkcg_gq *blkg; | |
82 | int i; | |
83 | ||
84 | /* alloc and init base part */ | |
85 | blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); | |
86 | if (!blkg) | |
87 | return NULL; | |
88 | ||
89 | blkg->q = q; | |
90 | INIT_LIST_HEAD(&blkg->q_node); | |
91 | blkg->blkcg = blkcg; | |
92 | blkg->refcnt = 1; | |
93 | ||
94 | /* root blkg uses @q->root_rl, init rl only for !root blkgs */ | |
95 | if (blkcg != &blkcg_root) { | |
96 | if (blk_init_rl(&blkg->rl, q, gfp_mask)) | |
97 | goto err_free; | |
98 | blkg->rl.blkg = blkg; | |
99 | } | |
100 | ||
101 | for (i = 0; i < BLKCG_MAX_POLS; i++) { | |
102 | struct blkcg_policy *pol = blkcg_policy[i]; | |
103 | struct blkg_policy_data *pd; | |
104 | ||
105 | if (!blkcg_policy_enabled(q, pol)) | |
106 | continue; | |
107 | ||
108 | /* alloc per-policy data and attach it to blkg */ | |
109 | pd = kzalloc_node(pol->pd_size, gfp_mask, q->node); | |
110 | if (!pd) | |
111 | goto err_free; | |
112 | ||
113 | blkg->pd[i] = pd; | |
114 | pd->blkg = blkg; | |
115 | ||
116 | /* invoke per-policy init */ | |
117 | if (blkcg_policy_enabled(blkg->q, pol)) | |
118 | pol->pd_init_fn(blkg); | |
119 | } | |
120 | ||
121 | return blkg; | |
122 | ||
123 | err_free: | |
124 | blkg_free(blkg); | |
125 | return NULL; | |
126 | } | |
127 | ||
128 | static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, | |
129 | struct request_queue *q) | |
130 | { | |
131 | struct blkcg_gq *blkg; | |
132 | ||
133 | blkg = rcu_dereference(blkcg->blkg_hint); | |
134 | if (blkg && blkg->q == q) | |
135 | return blkg; | |
136 | ||
137 | /* | |
138 | * Hint didn't match. Look up from the radix tree. Note that we | |
139 | * may not be holding queue_lock and thus are not sure whether | |
140 | * @blkg from blkg_tree has already been removed or not, so we | |
141 | * can't update hint to the lookup result. Leave it to the caller. | |
142 | */ | |
143 | blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); | |
144 | if (blkg && blkg->q == q) | |
145 | return blkg; | |
146 | ||
147 | return NULL; | |
148 | } | |
149 | ||
150 | /** | |
151 | * blkg_lookup - lookup blkg for the specified blkcg - q pair | |
152 | * @blkcg: blkcg of interest | |
153 | * @q: request_queue of interest | |
154 | * | |
155 | * Lookup blkg for the @blkcg - @q pair. This function should be called | |
156 | * under RCU read lock and is guaranteed to return %NULL if @q is bypassing | |
157 | * - see blk_queue_bypass_start() for details. | |
158 | */ | |
159 | struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q) | |
160 | { | |
161 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
162 | ||
163 | if (unlikely(blk_queue_bypass(q))) | |
164 | return NULL; | |
165 | return __blkg_lookup(blkcg, q); | |
166 | } | |
167 | EXPORT_SYMBOL_GPL(blkg_lookup); | |
168 | ||
169 | /* | |
170 | * If @new_blkg is %NULL, this function tries to allocate a new one as | |
171 | * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return. | |
172 | */ | |
173 | static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, | |
174 | struct request_queue *q, | |
175 | struct blkcg_gq *new_blkg) | |
176 | { | |
177 | struct blkcg_gq *blkg; | |
178 | int ret; | |
179 | ||
180 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
181 | lockdep_assert_held(q->queue_lock); | |
182 | ||
183 | /* lookup and update hint on success, see __blkg_lookup() for details */ | |
184 | blkg = __blkg_lookup(blkcg, q); | |
185 | if (blkg) { | |
186 | rcu_assign_pointer(blkcg->blkg_hint, blkg); | |
187 | goto out_free; | |
188 | } | |
189 | ||
190 | /* blkg holds a reference to blkcg */ | |
191 | if (!css_tryget(&blkcg->css)) { | |
192 | blkg = ERR_PTR(-EINVAL); | |
193 | goto out_free; | |
194 | } | |
195 | ||
196 | /* allocate */ | |
197 | if (!new_blkg) { | |
198 | new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC); | |
199 | if (unlikely(!new_blkg)) { | |
200 | blkg = ERR_PTR(-ENOMEM); | |
201 | goto out_put; | |
202 | } | |
203 | } | |
204 | blkg = new_blkg; | |
205 | ||
206 | /* insert */ | |
207 | spin_lock(&blkcg->lock); | |
208 | ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); | |
209 | if (likely(!ret)) { | |
210 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); | |
211 | list_add(&blkg->q_node, &q->blkg_list); | |
212 | } | |
213 | spin_unlock(&blkcg->lock); | |
214 | ||
215 | if (!ret) | |
216 | return blkg; | |
217 | ||
218 | blkg = ERR_PTR(ret); | |
219 | out_put: | |
220 | css_put(&blkcg->css); | |
221 | out_free: | |
222 | blkg_free(new_blkg); | |
223 | return blkg; | |
224 | } | |
225 | ||
226 | struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, | |
227 | struct request_queue *q) | |
228 | { | |
229 | /* | |
230 | * This could be the first entry point of blkcg implementation and | |
231 | * we shouldn't allow anything to go through for a bypassing queue. | |
232 | */ | |
233 | if (unlikely(blk_queue_bypass(q))) | |
234 | return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY); | |
235 | return __blkg_lookup_create(blkcg, q, NULL); | |
236 | } | |
237 | EXPORT_SYMBOL_GPL(blkg_lookup_create); | |
238 | ||
239 | static void blkg_destroy(struct blkcg_gq *blkg) | |
240 | { | |
241 | struct blkcg *blkcg = blkg->blkcg; | |
242 | ||
243 | lockdep_assert_held(blkg->q->queue_lock); | |
244 | lockdep_assert_held(&blkcg->lock); | |
245 | ||
246 | /* Something wrong if we are trying to remove same group twice */ | |
247 | WARN_ON_ONCE(list_empty(&blkg->q_node)); | |
248 | WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); | |
249 | ||
250 | radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); | |
251 | list_del_init(&blkg->q_node); | |
252 | hlist_del_init_rcu(&blkg->blkcg_node); | |
253 | ||
254 | /* | |
255 | * Both setting lookup hint to and clearing it from @blkg are done | |
256 | * under queue_lock. If it's not pointing to @blkg now, it never | |
257 | * will. Hint assignment itself can race safely. | |
258 | */ | |
259 | if (rcu_dereference_raw(blkcg->blkg_hint) == blkg) | |
260 | rcu_assign_pointer(blkcg->blkg_hint, NULL); | |
261 | ||
262 | /* | |
263 | * Put the reference taken at the time of creation so that when all | |
264 | * queues are gone, group can be destroyed. | |
265 | */ | |
266 | blkg_put(blkg); | |
267 | } | |
268 | ||
269 | /** | |
270 | * blkg_destroy_all - destroy all blkgs associated with a request_queue | |
271 | * @q: request_queue of interest | |
272 | * | |
273 | * Destroy all blkgs associated with @q. | |
274 | */ | |
275 | static void blkg_destroy_all(struct request_queue *q) | |
276 | { | |
277 | struct blkcg_gq *blkg, *n; | |
278 | ||
279 | lockdep_assert_held(q->queue_lock); | |
280 | ||
281 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { | |
282 | struct blkcg *blkcg = blkg->blkcg; | |
283 | ||
284 | spin_lock(&blkcg->lock); | |
285 | blkg_destroy(blkg); | |
286 | spin_unlock(&blkcg->lock); | |
287 | } | |
288 | ||
289 | /* | |
290 | * root blkg is destroyed. Just clear the pointer since | |
291 | * root_rl does not take reference on root blkg. | |
292 | */ | |
293 | q->root_blkg = NULL; | |
294 | q->root_rl.blkg = NULL; | |
295 | } | |
296 | ||
297 | static void blkg_rcu_free(struct rcu_head *rcu_head) | |
298 | { | |
299 | blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head)); | |
300 | } | |
301 | ||
302 | void __blkg_release(struct blkcg_gq *blkg) | |
303 | { | |
304 | /* release the extra blkcg reference this blkg has been holding */ | |
305 | css_put(&blkg->blkcg->css); | |
306 | ||
307 | /* | |
308 | * A group is freed in rcu manner. But having an rcu lock does not | |
309 | * mean that one can access all the fields of blkg and assume these | |
310 | * are valid. For example, don't try to follow throtl_data and | |
311 | * request queue links. | |
312 | * | |
313 | * Having a reference to blkg under an rcu allows acess to only | |
314 | * values local to groups like group stats and group rate limits | |
315 | */ | |
316 | call_rcu(&blkg->rcu_head, blkg_rcu_free); | |
317 | } | |
318 | EXPORT_SYMBOL_GPL(__blkg_release); | |
319 | ||
320 | /* | |
321 | * The next function used by blk_queue_for_each_rl(). It's a bit tricky | |
322 | * because the root blkg uses @q->root_rl instead of its own rl. | |
323 | */ | |
324 | struct request_list *__blk_queue_next_rl(struct request_list *rl, | |
325 | struct request_queue *q) | |
326 | { | |
327 | struct list_head *ent; | |
328 | struct blkcg_gq *blkg; | |
329 | ||
330 | /* | |
331 | * Determine the current blkg list_head. The first entry is | |
332 | * root_rl which is off @q->blkg_list and mapped to the head. | |
333 | */ | |
334 | if (rl == &q->root_rl) { | |
335 | ent = &q->blkg_list; | |
336 | /* There are no more block groups, hence no request lists */ | |
337 | if (list_empty(ent)) | |
338 | return NULL; | |
339 | } else { | |
340 | blkg = container_of(rl, struct blkcg_gq, rl); | |
341 | ent = &blkg->q_node; | |
342 | } | |
343 | ||
344 | /* walk to the next list_head, skip root blkcg */ | |
345 | ent = ent->next; | |
346 | if (ent == &q->root_blkg->q_node) | |
347 | ent = ent->next; | |
348 | if (ent == &q->blkg_list) | |
349 | return NULL; | |
350 | ||
351 | blkg = container_of(ent, struct blkcg_gq, q_node); | |
352 | return &blkg->rl; | |
353 | } | |
354 | ||
355 | static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, | |
356 | u64 val) | |
357 | { | |
358 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); | |
359 | struct blkcg_gq *blkg; | |
360 | int i; | |
361 | ||
362 | mutex_lock(&blkcg_pol_mutex); | |
363 | spin_lock_irq(&blkcg->lock); | |
364 | ||
365 | /* | |
366 | * Note that stat reset is racy - it doesn't synchronize against | |
367 | * stat updates. This is a debug feature which shouldn't exist | |
368 | * anyway. If you get hit by a race, retry. | |
369 | */ | |
370 | hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { | |
371 | for (i = 0; i < BLKCG_MAX_POLS; i++) { | |
372 | struct blkcg_policy *pol = blkcg_policy[i]; | |
373 | ||
374 | if (blkcg_policy_enabled(blkg->q, pol) && | |
375 | pol->pd_reset_stats_fn) | |
376 | pol->pd_reset_stats_fn(blkg); | |
377 | } | |
378 | } | |
379 | ||
380 | spin_unlock_irq(&blkcg->lock); | |
381 | mutex_unlock(&blkcg_pol_mutex); | |
382 | return 0; | |
383 | } | |
384 | ||
385 | static const char *blkg_dev_name(struct blkcg_gq *blkg) | |
386 | { | |
387 | /* some drivers (floppy) instantiate a queue w/o disk registered */ | |
388 | if (blkg->q->backing_dev_info.dev) | |
389 | return dev_name(blkg->q->backing_dev_info.dev); | |
390 | return NULL; | |
391 | } | |
392 | ||
393 | /** | |
394 | * blkcg_print_blkgs - helper for printing per-blkg data | |
395 | * @sf: seq_file to print to | |
396 | * @blkcg: blkcg of interest | |
397 | * @prfill: fill function to print out a blkg | |
398 | * @pol: policy in question | |
399 | * @data: data to be passed to @prfill | |
400 | * @show_total: to print out sum of prfill return values or not | |
401 | * | |
402 | * This function invokes @prfill on each blkg of @blkcg if pd for the | |
403 | * policy specified by @pol exists. @prfill is invoked with @sf, the | |
404 | * policy data and @data. If @show_total is %true, the sum of the return | |
405 | * values from @prfill is printed with "Total" label at the end. | |
406 | * | |
407 | * This is to be used to construct print functions for | |
408 | * cftype->read_seq_string method. | |
409 | */ | |
410 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, | |
411 | u64 (*prfill)(struct seq_file *, | |
412 | struct blkg_policy_data *, int), | |
413 | const struct blkcg_policy *pol, int data, | |
414 | bool show_total) | |
415 | { | |
416 | struct blkcg_gq *blkg; | |
417 | u64 total = 0; | |
418 | ||
419 | spin_lock_irq(&blkcg->lock); | |
420 | hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) | |
421 | if (blkcg_policy_enabled(blkg->q, pol)) | |
422 | total += prfill(sf, blkg->pd[pol->plid], data); | |
423 | spin_unlock_irq(&blkcg->lock); | |
424 | ||
425 | if (show_total) | |
426 | seq_printf(sf, "Total %llu\n", (unsigned long long)total); | |
427 | } | |
428 | EXPORT_SYMBOL_GPL(blkcg_print_blkgs); | |
429 | ||
430 | /** | |
431 | * __blkg_prfill_u64 - prfill helper for a single u64 value | |
432 | * @sf: seq_file to print to | |
433 | * @pd: policy private data of interest | |
434 | * @v: value to print | |
435 | * | |
436 | * Print @v to @sf for the device assocaited with @pd. | |
437 | */ | |
438 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) | |
439 | { | |
440 | const char *dname = blkg_dev_name(pd->blkg); | |
441 | ||
442 | if (!dname) | |
443 | return 0; | |
444 | ||
445 | seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); | |
446 | return v; | |
447 | } | |
448 | EXPORT_SYMBOL_GPL(__blkg_prfill_u64); | |
449 | ||
450 | /** | |
451 | * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat | |
452 | * @sf: seq_file to print to | |
453 | * @pd: policy private data of interest | |
454 | * @rwstat: rwstat to print | |
455 | * | |
456 | * Print @rwstat to @sf for the device assocaited with @pd. | |
457 | */ | |
458 | u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, | |
459 | const struct blkg_rwstat *rwstat) | |
460 | { | |
461 | static const char *rwstr[] = { | |
462 | [BLKG_RWSTAT_READ] = "Read", | |
463 | [BLKG_RWSTAT_WRITE] = "Write", | |
464 | [BLKG_RWSTAT_SYNC] = "Sync", | |
465 | [BLKG_RWSTAT_ASYNC] = "Async", | |
466 | }; | |
467 | const char *dname = blkg_dev_name(pd->blkg); | |
468 | u64 v; | |
469 | int i; | |
470 | ||
471 | if (!dname) | |
472 | return 0; | |
473 | ||
474 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | |
475 | seq_printf(sf, "%s %s %llu\n", dname, rwstr[i], | |
476 | (unsigned long long)rwstat->cnt[i]); | |
477 | ||
478 | v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE]; | |
479 | seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v); | |
480 | return v; | |
481 | } | |
482 | ||
483 | /** | |
484 | * blkg_prfill_stat - prfill callback for blkg_stat | |
485 | * @sf: seq_file to print to | |
486 | * @pd: policy private data of interest | |
487 | * @off: offset to the blkg_stat in @pd | |
488 | * | |
489 | * prfill callback for printing a blkg_stat. | |
490 | */ | |
491 | u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off) | |
492 | { | |
493 | return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off)); | |
494 | } | |
495 | EXPORT_SYMBOL_GPL(blkg_prfill_stat); | |
496 | ||
497 | /** | |
498 | * blkg_prfill_rwstat - prfill callback for blkg_rwstat | |
499 | * @sf: seq_file to print to | |
500 | * @pd: policy private data of interest | |
501 | * @off: offset to the blkg_rwstat in @pd | |
502 | * | |
503 | * prfill callback for printing a blkg_rwstat. | |
504 | */ | |
505 | u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, | |
506 | int off) | |
507 | { | |
508 | struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off); | |
509 | ||
510 | return __blkg_prfill_rwstat(sf, pd, &rwstat); | |
511 | } | |
512 | EXPORT_SYMBOL_GPL(blkg_prfill_rwstat); | |
513 | ||
514 | /** | |
515 | * blkg_conf_prep - parse and prepare for per-blkg config update | |
516 | * @blkcg: target block cgroup | |
517 | * @pol: target policy | |
518 | * @input: input string | |
519 | * @ctx: blkg_conf_ctx to be filled | |
520 | * | |
521 | * Parse per-blkg config update from @input and initialize @ctx with the | |
522 | * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new | |
523 | * value. This function returns with RCU read lock and queue lock held and | |
524 | * must be paired with blkg_conf_finish(). | |
525 | */ | |
526 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, | |
527 | const char *input, struct blkg_conf_ctx *ctx) | |
528 | __acquires(rcu) __acquires(disk->queue->queue_lock) | |
529 | { | |
530 | struct gendisk *disk; | |
531 | struct blkcg_gq *blkg; | |
532 | unsigned int major, minor; | |
533 | unsigned long long v; | |
534 | int part, ret; | |
535 | ||
536 | if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3) | |
537 | return -EINVAL; | |
538 | ||
539 | disk = get_gendisk(MKDEV(major, minor), &part); | |
540 | if (!disk || part) | |
541 | return -EINVAL; | |
542 | ||
543 | rcu_read_lock(); | |
544 | spin_lock_irq(disk->queue->queue_lock); | |
545 | ||
546 | if (blkcg_policy_enabled(disk->queue, pol)) | |
547 | blkg = blkg_lookup_create(blkcg, disk->queue); | |
548 | else | |
549 | blkg = ERR_PTR(-EINVAL); | |
550 | ||
551 | if (IS_ERR(blkg)) { | |
552 | ret = PTR_ERR(blkg); | |
553 | rcu_read_unlock(); | |
554 | spin_unlock_irq(disk->queue->queue_lock); | |
555 | put_disk(disk); | |
556 | /* | |
557 | * If queue was bypassing, we should retry. Do so after a | |
558 | * short msleep(). It isn't strictly necessary but queue | |
559 | * can be bypassing for some time and it's always nice to | |
560 | * avoid busy looping. | |
561 | */ | |
562 | if (ret == -EBUSY) { | |
563 | msleep(10); | |
564 | ret = restart_syscall(); | |
565 | } | |
566 | return ret; | |
567 | } | |
568 | ||
569 | ctx->disk = disk; | |
570 | ctx->blkg = blkg; | |
571 | ctx->v = v; | |
572 | return 0; | |
573 | } | |
574 | EXPORT_SYMBOL_GPL(blkg_conf_prep); | |
575 | ||
576 | /** | |
577 | * blkg_conf_finish - finish up per-blkg config update | |
578 | * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() | |
579 | * | |
580 | * Finish up after per-blkg config update. This function must be paired | |
581 | * with blkg_conf_prep(). | |
582 | */ | |
583 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) | |
584 | __releases(ctx->disk->queue->queue_lock) __releases(rcu) | |
585 | { | |
586 | spin_unlock_irq(ctx->disk->queue->queue_lock); | |
587 | rcu_read_unlock(); | |
588 | put_disk(ctx->disk); | |
589 | } | |
590 | EXPORT_SYMBOL_GPL(blkg_conf_finish); | |
591 | ||
592 | struct cftype blkcg_files[] = { | |
593 | { | |
594 | .name = "reset_stats", | |
595 | .write_u64 = blkcg_reset_stats, | |
596 | }, | |
597 | { } /* terminate */ | |
598 | }; | |
599 | ||
600 | /** | |
601 | * blkcg_css_offline - cgroup css_offline callback | |
602 | * @cgroup: cgroup of interest | |
603 | * | |
604 | * This function is called when @cgroup is about to go away and responsible | |
605 | * for shooting down all blkgs associated with @cgroup. blkgs should be | |
606 | * removed while holding both q and blkcg locks. As blkcg lock is nested | |
607 | * inside q lock, this function performs reverse double lock dancing. | |
608 | * | |
609 | * This is the blkcg counterpart of ioc_release_fn(). | |
610 | */ | |
611 | static void blkcg_css_offline(struct cgroup *cgroup) | |
612 | { | |
613 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); | |
614 | ||
615 | spin_lock_irq(&blkcg->lock); | |
616 | ||
617 | while (!hlist_empty(&blkcg->blkg_list)) { | |
618 | struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, | |
619 | struct blkcg_gq, blkcg_node); | |
620 | struct request_queue *q = blkg->q; | |
621 | ||
622 | if (spin_trylock(q->queue_lock)) { | |
623 | blkg_destroy(blkg); | |
624 | spin_unlock(q->queue_lock); | |
625 | } else { | |
626 | spin_unlock_irq(&blkcg->lock); | |
627 | cpu_relax(); | |
628 | spin_lock_irq(&blkcg->lock); | |
629 | } | |
630 | } | |
631 | ||
632 | spin_unlock_irq(&blkcg->lock); | |
633 | } | |
634 | ||
635 | static void blkcg_css_free(struct cgroup *cgroup) | |
636 | { | |
637 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); | |
638 | ||
639 | if (blkcg != &blkcg_root) | |
640 | kfree(blkcg); | |
641 | } | |
642 | ||
643 | static struct cgroup_subsys_state *blkcg_css_alloc(struct cgroup *cgroup) | |
644 | { | |
645 | static atomic64_t id_seq = ATOMIC64_INIT(0); | |
646 | struct blkcg *blkcg; | |
647 | struct cgroup *parent = cgroup->parent; | |
648 | ||
649 | if (!parent) { | |
650 | blkcg = &blkcg_root; | |
651 | goto done; | |
652 | } | |
653 | ||
654 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); | |
655 | if (!blkcg) | |
656 | return ERR_PTR(-ENOMEM); | |
657 | ||
658 | blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT; | |
659 | blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */ | |
660 | done: | |
661 | spin_lock_init(&blkcg->lock); | |
662 | INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC); | |
663 | INIT_HLIST_HEAD(&blkcg->blkg_list); | |
664 | ||
665 | return &blkcg->css; | |
666 | } | |
667 | ||
668 | /** | |
669 | * blkcg_init_queue - initialize blkcg part of request queue | |
670 | * @q: request_queue to initialize | |
671 | * | |
672 | * Called from blk_alloc_queue_node(). Responsible for initializing blkcg | |
673 | * part of new request_queue @q. | |
674 | * | |
675 | * RETURNS: | |
676 | * 0 on success, -errno on failure. | |
677 | */ | |
678 | int blkcg_init_queue(struct request_queue *q) | |
679 | { | |
680 | might_sleep(); | |
681 | ||
682 | return blk_throtl_init(q); | |
683 | } | |
684 | ||
685 | /** | |
686 | * blkcg_drain_queue - drain blkcg part of request_queue | |
687 | * @q: request_queue to drain | |
688 | * | |
689 | * Called from blk_drain_queue(). Responsible for draining blkcg part. | |
690 | */ | |
691 | void blkcg_drain_queue(struct request_queue *q) | |
692 | { | |
693 | lockdep_assert_held(q->queue_lock); | |
694 | ||
695 | blk_throtl_drain(q); | |
696 | } | |
697 | ||
698 | /** | |
699 | * blkcg_exit_queue - exit and release blkcg part of request_queue | |
700 | * @q: request_queue being released | |
701 | * | |
702 | * Called from blk_release_queue(). Responsible for exiting blkcg part. | |
703 | */ | |
704 | void blkcg_exit_queue(struct request_queue *q) | |
705 | { | |
706 | spin_lock_irq(q->queue_lock); | |
707 | blkg_destroy_all(q); | |
708 | spin_unlock_irq(q->queue_lock); | |
709 | ||
710 | blk_throtl_exit(q); | |
711 | } | |
712 | ||
713 | /* | |
714 | * We cannot support shared io contexts, as we have no mean to support | |
715 | * two tasks with the same ioc in two different groups without major rework | |
716 | * of the main cic data structures. For now we allow a task to change | |
717 | * its cgroup only if it's the only owner of its ioc. | |
718 | */ | |
719 | static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) | |
720 | { | |
721 | struct task_struct *task; | |
722 | struct io_context *ioc; | |
723 | int ret = 0; | |
724 | ||
725 | /* task_lock() is needed to avoid races with exit_io_context() */ | |
726 | cgroup_taskset_for_each(task, cgrp, tset) { | |
727 | task_lock(task); | |
728 | ioc = task->io_context; | |
729 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) | |
730 | ret = -EINVAL; | |
731 | task_unlock(task); | |
732 | if (ret) | |
733 | break; | |
734 | } | |
735 | return ret; | |
736 | } | |
737 | ||
738 | struct cgroup_subsys blkio_subsys = { | |
739 | .name = "blkio", | |
740 | .css_alloc = blkcg_css_alloc, | |
741 | .css_offline = blkcg_css_offline, | |
742 | .css_free = blkcg_css_free, | |
743 | .can_attach = blkcg_can_attach, | |
744 | .subsys_id = blkio_subsys_id, | |
745 | .base_cftypes = blkcg_files, | |
746 | .module = THIS_MODULE, | |
747 | ||
748 | /* | |
749 | * blkio subsystem is utterly broken in terms of hierarchy support. | |
750 | * It treats all cgroups equally regardless of where they're | |
751 | * located in the hierarchy - all cgroups are treated as if they're | |
752 | * right below the root. Fix it and remove the following. | |
753 | */ | |
754 | .broken_hierarchy = true, | |
755 | }; | |
756 | EXPORT_SYMBOL_GPL(blkio_subsys); | |
757 | ||
758 | /** | |
759 | * blkcg_activate_policy - activate a blkcg policy on a request_queue | |
760 | * @q: request_queue of interest | |
761 | * @pol: blkcg policy to activate | |
762 | * | |
763 | * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through | |
764 | * bypass mode to populate its blkgs with policy_data for @pol. | |
765 | * | |
766 | * Activation happens with @q bypassed, so nobody would be accessing blkgs | |
767 | * from IO path. Update of each blkg is protected by both queue and blkcg | |
768 | * locks so that holding either lock and testing blkcg_policy_enabled() is | |
769 | * always enough for dereferencing policy data. | |
770 | * | |
771 | * The caller is responsible for synchronizing [de]activations and policy | |
772 | * [un]registerations. Returns 0 on success, -errno on failure. | |
773 | */ | |
774 | int blkcg_activate_policy(struct request_queue *q, | |
775 | const struct blkcg_policy *pol) | |
776 | { | |
777 | LIST_HEAD(pds); | |
778 | struct blkcg_gq *blkg; | |
779 | struct blkg_policy_data *pd, *n; | |
780 | int cnt = 0, ret; | |
781 | bool preloaded; | |
782 | ||
783 | if (blkcg_policy_enabled(q, pol)) | |
784 | return 0; | |
785 | ||
786 | /* preallocations for root blkg */ | |
787 | blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); | |
788 | if (!blkg) | |
789 | return -ENOMEM; | |
790 | ||
791 | preloaded = !radix_tree_preload(GFP_KERNEL); | |
792 | ||
793 | blk_queue_bypass_start(q); | |
794 | ||
795 | /* make sure the root blkg exists and count the existing blkgs */ | |
796 | spin_lock_irq(q->queue_lock); | |
797 | ||
798 | rcu_read_lock(); | |
799 | blkg = __blkg_lookup_create(&blkcg_root, q, blkg); | |
800 | rcu_read_unlock(); | |
801 | ||
802 | if (preloaded) | |
803 | radix_tree_preload_end(); | |
804 | ||
805 | if (IS_ERR(blkg)) { | |
806 | ret = PTR_ERR(blkg); | |
807 | goto out_unlock; | |
808 | } | |
809 | q->root_blkg = blkg; | |
810 | q->root_rl.blkg = blkg; | |
811 | ||
812 | list_for_each_entry(blkg, &q->blkg_list, q_node) | |
813 | cnt++; | |
814 | ||
815 | spin_unlock_irq(q->queue_lock); | |
816 | ||
817 | /* allocate policy_data for all existing blkgs */ | |
818 | while (cnt--) { | |
819 | pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node); | |
820 | if (!pd) { | |
821 | ret = -ENOMEM; | |
822 | goto out_free; | |
823 | } | |
824 | list_add_tail(&pd->alloc_node, &pds); | |
825 | } | |
826 | ||
827 | /* | |
828 | * Install the allocated pds. With @q bypassing, no new blkg | |
829 | * should have been created while the queue lock was dropped. | |
830 | */ | |
831 | spin_lock_irq(q->queue_lock); | |
832 | ||
833 | list_for_each_entry(blkg, &q->blkg_list, q_node) { | |
834 | if (WARN_ON(list_empty(&pds))) { | |
835 | /* umm... this shouldn't happen, just abort */ | |
836 | ret = -ENOMEM; | |
837 | goto out_unlock; | |
838 | } | |
839 | pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node); | |
840 | list_del_init(&pd->alloc_node); | |
841 | ||
842 | /* grab blkcg lock too while installing @pd on @blkg */ | |
843 | spin_lock(&blkg->blkcg->lock); | |
844 | ||
845 | blkg->pd[pol->plid] = pd; | |
846 | pd->blkg = blkg; | |
847 | pol->pd_init_fn(blkg); | |
848 | ||
849 | spin_unlock(&blkg->blkcg->lock); | |
850 | } | |
851 | ||
852 | __set_bit(pol->plid, q->blkcg_pols); | |
853 | ret = 0; | |
854 | out_unlock: | |
855 | spin_unlock_irq(q->queue_lock); | |
856 | out_free: | |
857 | blk_queue_bypass_end(q); | |
858 | list_for_each_entry_safe(pd, n, &pds, alloc_node) | |
859 | kfree(pd); | |
860 | return ret; | |
861 | } | |
862 | EXPORT_SYMBOL_GPL(blkcg_activate_policy); | |
863 | ||
864 | /** | |
865 | * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue | |
866 | * @q: request_queue of interest | |
867 | * @pol: blkcg policy to deactivate | |
868 | * | |
869 | * Deactivate @pol on @q. Follows the same synchronization rules as | |
870 | * blkcg_activate_policy(). | |
871 | */ | |
872 | void blkcg_deactivate_policy(struct request_queue *q, | |
873 | const struct blkcg_policy *pol) | |
874 | { | |
875 | struct blkcg_gq *blkg; | |
876 | ||
877 | if (!blkcg_policy_enabled(q, pol)) | |
878 | return; | |
879 | ||
880 | blk_queue_bypass_start(q); | |
881 | spin_lock_irq(q->queue_lock); | |
882 | ||
883 | __clear_bit(pol->plid, q->blkcg_pols); | |
884 | ||
885 | /* if no policy is left, no need for blkgs - shoot them down */ | |
886 | if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS)) | |
887 | blkg_destroy_all(q); | |
888 | ||
889 | list_for_each_entry(blkg, &q->blkg_list, q_node) { | |
890 | /* grab blkcg lock too while removing @pd from @blkg */ | |
891 | spin_lock(&blkg->blkcg->lock); | |
892 | ||
893 | if (pol->pd_exit_fn) | |
894 | pol->pd_exit_fn(blkg); | |
895 | ||
896 | kfree(blkg->pd[pol->plid]); | |
897 | blkg->pd[pol->plid] = NULL; | |
898 | ||
899 | spin_unlock(&blkg->blkcg->lock); | |
900 | } | |
901 | ||
902 | spin_unlock_irq(q->queue_lock); | |
903 | blk_queue_bypass_end(q); | |
904 | } | |
905 | EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); | |
906 | ||
907 | /** | |
908 | * blkcg_policy_register - register a blkcg policy | |
909 | * @pol: blkcg policy to register | |
910 | * | |
911 | * Register @pol with blkcg core. Might sleep and @pol may be modified on | |
912 | * successful registration. Returns 0 on success and -errno on failure. | |
913 | */ | |
914 | int blkcg_policy_register(struct blkcg_policy *pol) | |
915 | { | |
916 | int i, ret; | |
917 | ||
918 | if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data))) | |
919 | return -EINVAL; | |
920 | ||
921 | mutex_lock(&blkcg_pol_mutex); | |
922 | ||
923 | /* find an empty slot */ | |
924 | ret = -ENOSPC; | |
925 | for (i = 0; i < BLKCG_MAX_POLS; i++) | |
926 | if (!blkcg_policy[i]) | |
927 | break; | |
928 | if (i >= BLKCG_MAX_POLS) | |
929 | goto out_unlock; | |
930 | ||
931 | /* register and update blkgs */ | |
932 | pol->plid = i; | |
933 | blkcg_policy[i] = pol; | |
934 | ||
935 | /* everything is in place, add intf files for the new policy */ | |
936 | if (pol->cftypes) | |
937 | WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes)); | |
938 | ret = 0; | |
939 | out_unlock: | |
940 | mutex_unlock(&blkcg_pol_mutex); | |
941 | return ret; | |
942 | } | |
943 | EXPORT_SYMBOL_GPL(blkcg_policy_register); | |
944 | ||
945 | /** | |
946 | * blkcg_policy_unregister - unregister a blkcg policy | |
947 | * @pol: blkcg policy to unregister | |
948 | * | |
949 | * Undo blkcg_policy_register(@pol). Might sleep. | |
950 | */ | |
951 | void blkcg_policy_unregister(struct blkcg_policy *pol) | |
952 | { | |
953 | mutex_lock(&blkcg_pol_mutex); | |
954 | ||
955 | if (WARN_ON(blkcg_policy[pol->plid] != pol)) | |
956 | goto out_unlock; | |
957 | ||
958 | /* kill the intf files first */ | |
959 | if (pol->cftypes) | |
960 | cgroup_rm_cftypes(&blkio_subsys, pol->cftypes); | |
961 | ||
962 | /* unregister and update blkgs */ | |
963 | blkcg_policy[pol->plid] = NULL; | |
964 | out_unlock: | |
965 | mutex_unlock(&blkcg_pol_mutex); | |
966 | } | |
967 | EXPORT_SYMBOL_GPL(blkcg_policy_unregister); |