]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Common Block IO controller cgroup interface | |
3 | * | |
4 | * Based on ideas and code from CFQ, CFS and BFQ: | |
5 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> | |
6 | * | |
7 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> | |
8 | * Paolo Valente <paolo.valente@unimore.it> | |
9 | * | |
10 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> | |
11 | * Nauman Rafique <nauman@google.com> | |
12 | */ | |
13 | #include <linux/ioprio.h> | |
14 | #include <linux/kdev_t.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/err.h> | |
17 | #include <linux/blkdev.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/genhd.h> | |
20 | #include <linux/delay.h> | |
21 | #include <linux/atomic.h> | |
22 | #include "blk-cgroup.h" | |
23 | #include "blk.h" | |
24 | ||
25 | #define MAX_KEY_LEN 100 | |
26 | ||
27 | static DEFINE_MUTEX(blkcg_pol_mutex); | |
28 | ||
29 | struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT }; | |
30 | EXPORT_SYMBOL_GPL(blkcg_root); | |
31 | ||
32 | static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; | |
33 | ||
34 | static bool blkcg_policy_enabled(struct request_queue *q, | |
35 | const struct blkcg_policy *pol) | |
36 | { | |
37 | return pol && test_bit(pol->plid, q->blkcg_pols); | |
38 | } | |
39 | ||
40 | /** | |
41 | * blkg_free - free a blkg | |
42 | * @blkg: blkg to free | |
43 | * | |
44 | * Free @blkg which may be partially allocated. | |
45 | */ | |
46 | static void blkg_free(struct blkcg_gq *blkg) | |
47 | { | |
48 | int i; | |
49 | ||
50 | if (!blkg) | |
51 | return; | |
52 | ||
53 | for (i = 0; i < BLKCG_MAX_POLS; i++) { | |
54 | struct blkcg_policy *pol = blkcg_policy[i]; | |
55 | struct blkg_policy_data *pd = blkg->pd[i]; | |
56 | ||
57 | if (!pd) | |
58 | continue; | |
59 | ||
60 | if (pol && pol->pd_exit_fn) | |
61 | pol->pd_exit_fn(blkg); | |
62 | ||
63 | kfree(pd); | |
64 | } | |
65 | ||
66 | kfree(blkg); | |
67 | } | |
68 | ||
69 | /** | |
70 | * blkg_alloc - allocate a blkg | |
71 | * @blkcg: block cgroup the new blkg is associated with | |
72 | * @q: request_queue the new blkg is associated with | |
73 | * @gfp_mask: allocation mask to use | |
74 | * | |
75 | * Allocate a new blkg assocating @blkcg and @q. | |
76 | */ | |
77 | static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, | |
78 | gfp_t gfp_mask) | |
79 | { | |
80 | struct blkcg_gq *blkg; | |
81 | int i; | |
82 | ||
83 | /* alloc and init base part */ | |
84 | blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); | |
85 | if (!blkg) | |
86 | return NULL; | |
87 | ||
88 | blkg->q = q; | |
89 | INIT_LIST_HEAD(&blkg->q_node); | |
90 | blkg->blkcg = blkcg; | |
91 | blkg->refcnt = 1; | |
92 | ||
93 | for (i = 0; i < BLKCG_MAX_POLS; i++) { | |
94 | struct blkcg_policy *pol = blkcg_policy[i]; | |
95 | struct blkg_policy_data *pd; | |
96 | ||
97 | if (!blkcg_policy_enabled(q, pol)) | |
98 | continue; | |
99 | ||
100 | /* alloc per-policy data and attach it to blkg */ | |
101 | pd = kzalloc_node(pol->pd_size, gfp_mask, q->node); | |
102 | if (!pd) { | |
103 | blkg_free(blkg); | |
104 | return NULL; | |
105 | } | |
106 | ||
107 | blkg->pd[i] = pd; | |
108 | pd->blkg = blkg; | |
109 | ||
110 | /* invoke per-policy init */ | |
111 | if (blkcg_policy_enabled(blkg->q, pol)) | |
112 | pol->pd_init_fn(blkg); | |
113 | } | |
114 | ||
115 | return blkg; | |
116 | } | |
117 | ||
118 | static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, | |
119 | struct request_queue *q) | |
120 | { | |
121 | struct blkcg_gq *blkg; | |
122 | ||
123 | blkg = rcu_dereference(blkcg->blkg_hint); | |
124 | if (blkg && blkg->q == q) | |
125 | return blkg; | |
126 | ||
127 | /* | |
128 | * Hint didn't match. Look up from the radix tree. Note that we | |
129 | * may not be holding queue_lock and thus are not sure whether | |
130 | * @blkg from blkg_tree has already been removed or not, so we | |
131 | * can't update hint to the lookup result. Leave it to the caller. | |
132 | */ | |
133 | blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); | |
134 | if (blkg && blkg->q == q) | |
135 | return blkg; | |
136 | ||
137 | return NULL; | |
138 | } | |
139 | ||
140 | /** | |
141 | * blkg_lookup - lookup blkg for the specified blkcg - q pair | |
142 | * @blkcg: blkcg of interest | |
143 | * @q: request_queue of interest | |
144 | * | |
145 | * Lookup blkg for the @blkcg - @q pair. This function should be called | |
146 | * under RCU read lock and is guaranteed to return %NULL if @q is bypassing | |
147 | * - see blk_queue_bypass_start() for details. | |
148 | */ | |
149 | struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q) | |
150 | { | |
151 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
152 | ||
153 | if (unlikely(blk_queue_bypass(q))) | |
154 | return NULL; | |
155 | return __blkg_lookup(blkcg, q); | |
156 | } | |
157 | EXPORT_SYMBOL_GPL(blkg_lookup); | |
158 | ||
159 | /* | |
160 | * If @new_blkg is %NULL, this function tries to allocate a new one as | |
161 | * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return. | |
162 | */ | |
163 | static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, | |
164 | struct request_queue *q, | |
165 | struct blkcg_gq *new_blkg) | |
166 | { | |
167 | struct blkcg_gq *blkg; | |
168 | int ret; | |
169 | ||
170 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
171 | lockdep_assert_held(q->queue_lock); | |
172 | ||
173 | /* lookup and update hint on success, see __blkg_lookup() for details */ | |
174 | blkg = __blkg_lookup(blkcg, q); | |
175 | if (blkg) { | |
176 | rcu_assign_pointer(blkcg->blkg_hint, blkg); | |
177 | goto out_free; | |
178 | } | |
179 | ||
180 | /* blkg holds a reference to blkcg */ | |
181 | if (!css_tryget(&blkcg->css)) { | |
182 | blkg = ERR_PTR(-EINVAL); | |
183 | goto out_free; | |
184 | } | |
185 | ||
186 | /* allocate */ | |
187 | if (!new_blkg) { | |
188 | new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC); | |
189 | if (unlikely(!new_blkg)) { | |
190 | blkg = ERR_PTR(-ENOMEM); | |
191 | goto out_put; | |
192 | } | |
193 | } | |
194 | blkg = new_blkg; | |
195 | ||
196 | /* insert */ | |
197 | spin_lock(&blkcg->lock); | |
198 | ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); | |
199 | if (likely(!ret)) { | |
200 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); | |
201 | list_add(&blkg->q_node, &q->blkg_list); | |
202 | } | |
203 | spin_unlock(&blkcg->lock); | |
204 | ||
205 | if (!ret) | |
206 | return blkg; | |
207 | ||
208 | blkg = ERR_PTR(ret); | |
209 | out_put: | |
210 | css_put(&blkcg->css); | |
211 | out_free: | |
212 | blkg_free(new_blkg); | |
213 | return blkg; | |
214 | } | |
215 | ||
216 | struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, | |
217 | struct request_queue *q) | |
218 | { | |
219 | /* | |
220 | * This could be the first entry point of blkcg implementation and | |
221 | * we shouldn't allow anything to go through for a bypassing queue. | |
222 | */ | |
223 | if (unlikely(blk_queue_bypass(q))) | |
224 | return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); | |
225 | return __blkg_lookup_create(blkcg, q, NULL); | |
226 | } | |
227 | EXPORT_SYMBOL_GPL(blkg_lookup_create); | |
228 | ||
229 | static void blkg_destroy(struct blkcg_gq *blkg) | |
230 | { | |
231 | struct blkcg *blkcg = blkg->blkcg; | |
232 | ||
233 | lockdep_assert_held(blkg->q->queue_lock); | |
234 | lockdep_assert_held(&blkcg->lock); | |
235 | ||
236 | /* Something wrong if we are trying to remove same group twice */ | |
237 | WARN_ON_ONCE(list_empty(&blkg->q_node)); | |
238 | WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); | |
239 | ||
240 | radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); | |
241 | list_del_init(&blkg->q_node); | |
242 | hlist_del_init_rcu(&blkg->blkcg_node); | |
243 | ||
244 | /* | |
245 | * Both setting lookup hint to and clearing it from @blkg are done | |
246 | * under queue_lock. If it's not pointing to @blkg now, it never | |
247 | * will. Hint assignment itself can race safely. | |
248 | */ | |
249 | if (rcu_dereference_raw(blkcg->blkg_hint) == blkg) | |
250 | rcu_assign_pointer(blkcg->blkg_hint, NULL); | |
251 | ||
252 | /* | |
253 | * Put the reference taken at the time of creation so that when all | |
254 | * queues are gone, group can be destroyed. | |
255 | */ | |
256 | blkg_put(blkg); | |
257 | } | |
258 | ||
259 | /** | |
260 | * blkg_destroy_all - destroy all blkgs associated with a request_queue | |
261 | * @q: request_queue of interest | |
262 | * | |
263 | * Destroy all blkgs associated with @q. | |
264 | */ | |
265 | static void blkg_destroy_all(struct request_queue *q) | |
266 | { | |
267 | struct blkcg_gq *blkg, *n; | |
268 | ||
269 | lockdep_assert_held(q->queue_lock); | |
270 | ||
271 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { | |
272 | struct blkcg *blkcg = blkg->blkcg; | |
273 | ||
274 | spin_lock(&blkcg->lock); | |
275 | blkg_destroy(blkg); | |
276 | spin_unlock(&blkcg->lock); | |
277 | } | |
278 | } | |
279 | ||
280 | static void blkg_rcu_free(struct rcu_head *rcu_head) | |
281 | { | |
282 | blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head)); | |
283 | } | |
284 | ||
285 | void __blkg_release(struct blkcg_gq *blkg) | |
286 | { | |
287 | /* release the extra blkcg reference this blkg has been holding */ | |
288 | css_put(&blkg->blkcg->css); | |
289 | ||
290 | /* | |
291 | * A group is freed in rcu manner. But having an rcu lock does not | |
292 | * mean that one can access all the fields of blkg and assume these | |
293 | * are valid. For example, don't try to follow throtl_data and | |
294 | * request queue links. | |
295 | * | |
296 | * Having a reference to blkg under an rcu allows acess to only | |
297 | * values local to groups like group stats and group rate limits | |
298 | */ | |
299 | call_rcu(&blkg->rcu_head, blkg_rcu_free); | |
300 | } | |
301 | EXPORT_SYMBOL_GPL(__blkg_release); | |
302 | ||
303 | static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, | |
304 | u64 val) | |
305 | { | |
306 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); | |
307 | struct blkcg_gq *blkg; | |
308 | struct hlist_node *n; | |
309 | int i; | |
310 | ||
311 | mutex_lock(&blkcg_pol_mutex); | |
312 | spin_lock_irq(&blkcg->lock); | |
313 | ||
314 | /* | |
315 | * Note that stat reset is racy - it doesn't synchronize against | |
316 | * stat updates. This is a debug feature which shouldn't exist | |
317 | * anyway. If you get hit by a race, retry. | |
318 | */ | |
319 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { | |
320 | for (i = 0; i < BLKCG_MAX_POLS; i++) { | |
321 | struct blkcg_policy *pol = blkcg_policy[i]; | |
322 | ||
323 | if (blkcg_policy_enabled(blkg->q, pol) && | |
324 | pol->pd_reset_stats_fn) | |
325 | pol->pd_reset_stats_fn(blkg); | |
326 | } | |
327 | } | |
328 | ||
329 | spin_unlock_irq(&blkcg->lock); | |
330 | mutex_unlock(&blkcg_pol_mutex); | |
331 | return 0; | |
332 | } | |
333 | ||
334 | static const char *blkg_dev_name(struct blkcg_gq *blkg) | |
335 | { | |
336 | /* some drivers (floppy) instantiate a queue w/o disk registered */ | |
337 | if (blkg->q->backing_dev_info.dev) | |
338 | return dev_name(blkg->q->backing_dev_info.dev); | |
339 | return NULL; | |
340 | } | |
341 | ||
342 | /** | |
343 | * blkcg_print_blkgs - helper for printing per-blkg data | |
344 | * @sf: seq_file to print to | |
345 | * @blkcg: blkcg of interest | |
346 | * @prfill: fill function to print out a blkg | |
347 | * @pol: policy in question | |
348 | * @data: data to be passed to @prfill | |
349 | * @show_total: to print out sum of prfill return values or not | |
350 | * | |
351 | * This function invokes @prfill on each blkg of @blkcg if pd for the | |
352 | * policy specified by @pol exists. @prfill is invoked with @sf, the | |
353 | * policy data and @data. If @show_total is %true, the sum of the return | |
354 | * values from @prfill is printed with "Total" label at the end. | |
355 | * | |
356 | * This is to be used to construct print functions for | |
357 | * cftype->read_seq_string method. | |
358 | */ | |
359 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, | |
360 | u64 (*prfill)(struct seq_file *, | |
361 | struct blkg_policy_data *, int), | |
362 | const struct blkcg_policy *pol, int data, | |
363 | bool show_total) | |
364 | { | |
365 | struct blkcg_gq *blkg; | |
366 | struct hlist_node *n; | |
367 | u64 total = 0; | |
368 | ||
369 | spin_lock_irq(&blkcg->lock); | |
370 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) | |
371 | if (blkcg_policy_enabled(blkg->q, pol)) | |
372 | total += prfill(sf, blkg->pd[pol->plid], data); | |
373 | spin_unlock_irq(&blkcg->lock); | |
374 | ||
375 | if (show_total) | |
376 | seq_printf(sf, "Total %llu\n", (unsigned long long)total); | |
377 | } | |
378 | EXPORT_SYMBOL_GPL(blkcg_print_blkgs); | |
379 | ||
380 | /** | |
381 | * __blkg_prfill_u64 - prfill helper for a single u64 value | |
382 | * @sf: seq_file to print to | |
383 | * @pd: policy private data of interest | |
384 | * @v: value to print | |
385 | * | |
386 | * Print @v to @sf for the device assocaited with @pd. | |
387 | */ | |
388 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) | |
389 | { | |
390 | const char *dname = blkg_dev_name(pd->blkg); | |
391 | ||
392 | if (!dname) | |
393 | return 0; | |
394 | ||
395 | seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); | |
396 | return v; | |
397 | } | |
398 | EXPORT_SYMBOL_GPL(__blkg_prfill_u64); | |
399 | ||
400 | /** | |
401 | * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat | |
402 | * @sf: seq_file to print to | |
403 | * @pd: policy private data of interest | |
404 | * @rwstat: rwstat to print | |
405 | * | |
406 | * Print @rwstat to @sf for the device assocaited with @pd. | |
407 | */ | |
408 | u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, | |
409 | const struct blkg_rwstat *rwstat) | |
410 | { | |
411 | static const char *rwstr[] = { | |
412 | [BLKG_RWSTAT_READ] = "Read", | |
413 | [BLKG_RWSTAT_WRITE] = "Write", | |
414 | [BLKG_RWSTAT_SYNC] = "Sync", | |
415 | [BLKG_RWSTAT_ASYNC] = "Async", | |
416 | }; | |
417 | const char *dname = blkg_dev_name(pd->blkg); | |
418 | u64 v; | |
419 | int i; | |
420 | ||
421 | if (!dname) | |
422 | return 0; | |
423 | ||
424 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | |
425 | seq_printf(sf, "%s %s %llu\n", dname, rwstr[i], | |
426 | (unsigned long long)rwstat->cnt[i]); | |
427 | ||
428 | v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE]; | |
429 | seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v); | |
430 | return v; | |
431 | } | |
432 | ||
433 | /** | |
434 | * blkg_prfill_stat - prfill callback for blkg_stat | |
435 | * @sf: seq_file to print to | |
436 | * @pd: policy private data of interest | |
437 | * @off: offset to the blkg_stat in @pd | |
438 | * | |
439 | * prfill callback for printing a blkg_stat. | |
440 | */ | |
441 | u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off) | |
442 | { | |
443 | return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off)); | |
444 | } | |
445 | EXPORT_SYMBOL_GPL(blkg_prfill_stat); | |
446 | ||
447 | /** | |
448 | * blkg_prfill_rwstat - prfill callback for blkg_rwstat | |
449 | * @sf: seq_file to print to | |
450 | * @pd: policy private data of interest | |
451 | * @off: offset to the blkg_rwstat in @pd | |
452 | * | |
453 | * prfill callback for printing a blkg_rwstat. | |
454 | */ | |
455 | u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, | |
456 | int off) | |
457 | { | |
458 | struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off); | |
459 | ||
460 | return __blkg_prfill_rwstat(sf, pd, &rwstat); | |
461 | } | |
462 | EXPORT_SYMBOL_GPL(blkg_prfill_rwstat); | |
463 | ||
464 | /** | |
465 | * blkg_conf_prep - parse and prepare for per-blkg config update | |
466 | * @blkcg: target block cgroup | |
467 | * @pol: target policy | |
468 | * @input: input string | |
469 | * @ctx: blkg_conf_ctx to be filled | |
470 | * | |
471 | * Parse per-blkg config update from @input and initialize @ctx with the | |
472 | * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new | |
473 | * value. This function returns with RCU read lock and queue lock held and | |
474 | * must be paired with blkg_conf_finish(). | |
475 | */ | |
476 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, | |
477 | const char *input, struct blkg_conf_ctx *ctx) | |
478 | __acquires(rcu) __acquires(disk->queue->queue_lock) | |
479 | { | |
480 | struct gendisk *disk; | |
481 | struct blkcg_gq *blkg; | |
482 | unsigned int major, minor; | |
483 | unsigned long long v; | |
484 | int part, ret; | |
485 | ||
486 | if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3) | |
487 | return -EINVAL; | |
488 | ||
489 | disk = get_gendisk(MKDEV(major, minor), &part); | |
490 | if (!disk || part) | |
491 | return -EINVAL; | |
492 | ||
493 | rcu_read_lock(); | |
494 | spin_lock_irq(disk->queue->queue_lock); | |
495 | ||
496 | if (blkcg_policy_enabled(disk->queue, pol)) | |
497 | blkg = blkg_lookup_create(blkcg, disk->queue); | |
498 | else | |
499 | blkg = ERR_PTR(-EINVAL); | |
500 | ||
501 | if (IS_ERR(blkg)) { | |
502 | ret = PTR_ERR(blkg); | |
503 | rcu_read_unlock(); | |
504 | spin_unlock_irq(disk->queue->queue_lock); | |
505 | put_disk(disk); | |
506 | /* | |
507 | * If queue was bypassing, we should retry. Do so after a | |
508 | * short msleep(). It isn't strictly necessary but queue | |
509 | * can be bypassing for some time and it's always nice to | |
510 | * avoid busy looping. | |
511 | */ | |
512 | if (ret == -EBUSY) { | |
513 | msleep(10); | |
514 | ret = restart_syscall(); | |
515 | } | |
516 | return ret; | |
517 | } | |
518 | ||
519 | ctx->disk = disk; | |
520 | ctx->blkg = blkg; | |
521 | ctx->v = v; | |
522 | return 0; | |
523 | } | |
524 | EXPORT_SYMBOL_GPL(blkg_conf_prep); | |
525 | ||
526 | /** | |
527 | * blkg_conf_finish - finish up per-blkg config update | |
528 | * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() | |
529 | * | |
530 | * Finish up after per-blkg config update. This function must be paired | |
531 | * with blkg_conf_prep(). | |
532 | */ | |
533 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) | |
534 | __releases(ctx->disk->queue->queue_lock) __releases(rcu) | |
535 | { | |
536 | spin_unlock_irq(ctx->disk->queue->queue_lock); | |
537 | rcu_read_unlock(); | |
538 | put_disk(ctx->disk); | |
539 | } | |
540 | EXPORT_SYMBOL_GPL(blkg_conf_finish); | |
541 | ||
542 | struct cftype blkcg_files[] = { | |
543 | { | |
544 | .name = "reset_stats", | |
545 | .write_u64 = blkcg_reset_stats, | |
546 | }, | |
547 | { } /* terminate */ | |
548 | }; | |
549 | ||
550 | /** | |
551 | * blkcg_pre_destroy - cgroup pre_destroy callback | |
552 | * @cgroup: cgroup of interest | |
553 | * | |
554 | * This function is called when @cgroup is about to go away and responsible | |
555 | * for shooting down all blkgs associated with @cgroup. blkgs should be | |
556 | * removed while holding both q and blkcg locks. As blkcg lock is nested | |
557 | * inside q lock, this function performs reverse double lock dancing. | |
558 | * | |
559 | * This is the blkcg counterpart of ioc_release_fn(). | |
560 | */ | |
561 | static int blkcg_pre_destroy(struct cgroup *cgroup) | |
562 | { | |
563 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); | |
564 | ||
565 | spin_lock_irq(&blkcg->lock); | |
566 | ||
567 | while (!hlist_empty(&blkcg->blkg_list)) { | |
568 | struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, | |
569 | struct blkcg_gq, blkcg_node); | |
570 | struct request_queue *q = blkg->q; | |
571 | ||
572 | if (spin_trylock(q->queue_lock)) { | |
573 | blkg_destroy(blkg); | |
574 | spin_unlock(q->queue_lock); | |
575 | } else { | |
576 | spin_unlock_irq(&blkcg->lock); | |
577 | cpu_relax(); | |
578 | spin_lock_irq(&blkcg->lock); | |
579 | } | |
580 | } | |
581 | ||
582 | spin_unlock_irq(&blkcg->lock); | |
583 | return 0; | |
584 | } | |
585 | ||
586 | static void blkcg_destroy(struct cgroup *cgroup) | |
587 | { | |
588 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); | |
589 | ||
590 | if (blkcg != &blkcg_root) | |
591 | kfree(blkcg); | |
592 | } | |
593 | ||
594 | static struct cgroup_subsys_state *blkcg_create(struct cgroup *cgroup) | |
595 | { | |
596 | static atomic64_t id_seq = ATOMIC64_INIT(0); | |
597 | struct blkcg *blkcg; | |
598 | struct cgroup *parent = cgroup->parent; | |
599 | ||
600 | if (!parent) { | |
601 | blkcg = &blkcg_root; | |
602 | goto done; | |
603 | } | |
604 | ||
605 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); | |
606 | if (!blkcg) | |
607 | return ERR_PTR(-ENOMEM); | |
608 | ||
609 | blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT; | |
610 | blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */ | |
611 | done: | |
612 | spin_lock_init(&blkcg->lock); | |
613 | INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC); | |
614 | INIT_HLIST_HEAD(&blkcg->blkg_list); | |
615 | ||
616 | return &blkcg->css; | |
617 | } | |
618 | ||
619 | /** | |
620 | * blkcg_init_queue - initialize blkcg part of request queue | |
621 | * @q: request_queue to initialize | |
622 | * | |
623 | * Called from blk_alloc_queue_node(). Responsible for initializing blkcg | |
624 | * part of new request_queue @q. | |
625 | * | |
626 | * RETURNS: | |
627 | * 0 on success, -errno on failure. | |
628 | */ | |
629 | int blkcg_init_queue(struct request_queue *q) | |
630 | { | |
631 | might_sleep(); | |
632 | ||
633 | return blk_throtl_init(q); | |
634 | } | |
635 | ||
636 | /** | |
637 | * blkcg_drain_queue - drain blkcg part of request_queue | |
638 | * @q: request_queue to drain | |
639 | * | |
640 | * Called from blk_drain_queue(). Responsible for draining blkcg part. | |
641 | */ | |
642 | void blkcg_drain_queue(struct request_queue *q) | |
643 | { | |
644 | lockdep_assert_held(q->queue_lock); | |
645 | ||
646 | blk_throtl_drain(q); | |
647 | } | |
648 | ||
649 | /** | |
650 | * blkcg_exit_queue - exit and release blkcg part of request_queue | |
651 | * @q: request_queue being released | |
652 | * | |
653 | * Called from blk_release_queue(). Responsible for exiting blkcg part. | |
654 | */ | |
655 | void blkcg_exit_queue(struct request_queue *q) | |
656 | { | |
657 | spin_lock_irq(q->queue_lock); | |
658 | blkg_destroy_all(q); | |
659 | spin_unlock_irq(q->queue_lock); | |
660 | ||
661 | blk_throtl_exit(q); | |
662 | } | |
663 | ||
664 | /* | |
665 | * We cannot support shared io contexts, as we have no mean to support | |
666 | * two tasks with the same ioc in two different groups without major rework | |
667 | * of the main cic data structures. For now we allow a task to change | |
668 | * its cgroup only if it's the only owner of its ioc. | |
669 | */ | |
670 | static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) | |
671 | { | |
672 | struct task_struct *task; | |
673 | struct io_context *ioc; | |
674 | int ret = 0; | |
675 | ||
676 | /* task_lock() is needed to avoid races with exit_io_context() */ | |
677 | cgroup_taskset_for_each(task, cgrp, tset) { | |
678 | task_lock(task); | |
679 | ioc = task->io_context; | |
680 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) | |
681 | ret = -EINVAL; | |
682 | task_unlock(task); | |
683 | if (ret) | |
684 | break; | |
685 | } | |
686 | return ret; | |
687 | } | |
688 | ||
689 | struct cgroup_subsys blkio_subsys = { | |
690 | .name = "blkio", | |
691 | .create = blkcg_create, | |
692 | .can_attach = blkcg_can_attach, | |
693 | .pre_destroy = blkcg_pre_destroy, | |
694 | .destroy = blkcg_destroy, | |
695 | .subsys_id = blkio_subsys_id, | |
696 | .base_cftypes = blkcg_files, | |
697 | .module = THIS_MODULE, | |
698 | }; | |
699 | EXPORT_SYMBOL_GPL(blkio_subsys); | |
700 | ||
701 | /** | |
702 | * blkcg_activate_policy - activate a blkcg policy on a request_queue | |
703 | * @q: request_queue of interest | |
704 | * @pol: blkcg policy to activate | |
705 | * | |
706 | * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through | |
707 | * bypass mode to populate its blkgs with policy_data for @pol. | |
708 | * | |
709 | * Activation happens with @q bypassed, so nobody would be accessing blkgs | |
710 | * from IO path. Update of each blkg is protected by both queue and blkcg | |
711 | * locks so that holding either lock and testing blkcg_policy_enabled() is | |
712 | * always enough for dereferencing policy data. | |
713 | * | |
714 | * The caller is responsible for synchronizing [de]activations and policy | |
715 | * [un]registerations. Returns 0 on success, -errno on failure. | |
716 | */ | |
717 | int blkcg_activate_policy(struct request_queue *q, | |
718 | const struct blkcg_policy *pol) | |
719 | { | |
720 | LIST_HEAD(pds); | |
721 | struct blkcg_gq *blkg; | |
722 | struct blkg_policy_data *pd, *n; | |
723 | int cnt = 0, ret; | |
724 | bool preloaded; | |
725 | ||
726 | if (blkcg_policy_enabled(q, pol)) | |
727 | return 0; | |
728 | ||
729 | /* preallocations for root blkg */ | |
730 | blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); | |
731 | if (!blkg) | |
732 | return -ENOMEM; | |
733 | ||
734 | preloaded = !radix_tree_preload(GFP_KERNEL); | |
735 | ||
736 | blk_queue_bypass_start(q); | |
737 | ||
738 | /* make sure the root blkg exists and count the existing blkgs */ | |
739 | spin_lock_irq(q->queue_lock); | |
740 | ||
741 | rcu_read_lock(); | |
742 | blkg = __blkg_lookup_create(&blkcg_root, q, blkg); | |
743 | rcu_read_unlock(); | |
744 | ||
745 | if (preloaded) | |
746 | radix_tree_preload_end(); | |
747 | ||
748 | if (IS_ERR(blkg)) { | |
749 | ret = PTR_ERR(blkg); | |
750 | goto out_unlock; | |
751 | } | |
752 | q->root_blkg = blkg; | |
753 | ||
754 | list_for_each_entry(blkg, &q->blkg_list, q_node) | |
755 | cnt++; | |
756 | ||
757 | spin_unlock_irq(q->queue_lock); | |
758 | ||
759 | /* allocate policy_data for all existing blkgs */ | |
760 | while (cnt--) { | |
761 | pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node); | |
762 | if (!pd) { | |
763 | ret = -ENOMEM; | |
764 | goto out_free; | |
765 | } | |
766 | list_add_tail(&pd->alloc_node, &pds); | |
767 | } | |
768 | ||
769 | /* | |
770 | * Install the allocated pds. With @q bypassing, no new blkg | |
771 | * should have been created while the queue lock was dropped. | |
772 | */ | |
773 | spin_lock_irq(q->queue_lock); | |
774 | ||
775 | list_for_each_entry(blkg, &q->blkg_list, q_node) { | |
776 | if (WARN_ON(list_empty(&pds))) { | |
777 | /* umm... this shouldn't happen, just abort */ | |
778 | ret = -ENOMEM; | |
779 | goto out_unlock; | |
780 | } | |
781 | pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node); | |
782 | list_del_init(&pd->alloc_node); | |
783 | ||
784 | /* grab blkcg lock too while installing @pd on @blkg */ | |
785 | spin_lock(&blkg->blkcg->lock); | |
786 | ||
787 | blkg->pd[pol->plid] = pd; | |
788 | pd->blkg = blkg; | |
789 | pol->pd_init_fn(blkg); | |
790 | ||
791 | spin_unlock(&blkg->blkcg->lock); | |
792 | } | |
793 | ||
794 | __set_bit(pol->plid, q->blkcg_pols); | |
795 | ret = 0; | |
796 | out_unlock: | |
797 | spin_unlock_irq(q->queue_lock); | |
798 | out_free: | |
799 | blk_queue_bypass_end(q); | |
800 | list_for_each_entry_safe(pd, n, &pds, alloc_node) | |
801 | kfree(pd); | |
802 | return ret; | |
803 | } | |
804 | EXPORT_SYMBOL_GPL(blkcg_activate_policy); | |
805 | ||
806 | /** | |
807 | * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue | |
808 | * @q: request_queue of interest | |
809 | * @pol: blkcg policy to deactivate | |
810 | * | |
811 | * Deactivate @pol on @q. Follows the same synchronization rules as | |
812 | * blkcg_activate_policy(). | |
813 | */ | |
814 | void blkcg_deactivate_policy(struct request_queue *q, | |
815 | const struct blkcg_policy *pol) | |
816 | { | |
817 | struct blkcg_gq *blkg; | |
818 | ||
819 | if (!blkcg_policy_enabled(q, pol)) | |
820 | return; | |
821 | ||
822 | blk_queue_bypass_start(q); | |
823 | spin_lock_irq(q->queue_lock); | |
824 | ||
825 | __clear_bit(pol->plid, q->blkcg_pols); | |
826 | ||
827 | /* if no policy is left, no need for blkgs - shoot them down */ | |
828 | if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS)) | |
829 | blkg_destroy_all(q); | |
830 | ||
831 | list_for_each_entry(blkg, &q->blkg_list, q_node) { | |
832 | /* grab blkcg lock too while removing @pd from @blkg */ | |
833 | spin_lock(&blkg->blkcg->lock); | |
834 | ||
835 | if (pol->pd_exit_fn) | |
836 | pol->pd_exit_fn(blkg); | |
837 | ||
838 | kfree(blkg->pd[pol->plid]); | |
839 | blkg->pd[pol->plid] = NULL; | |
840 | ||
841 | spin_unlock(&blkg->blkcg->lock); | |
842 | } | |
843 | ||
844 | spin_unlock_irq(q->queue_lock); | |
845 | blk_queue_bypass_end(q); | |
846 | } | |
847 | EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); | |
848 | ||
849 | /** | |
850 | * blkcg_policy_register - register a blkcg policy | |
851 | * @pol: blkcg policy to register | |
852 | * | |
853 | * Register @pol with blkcg core. Might sleep and @pol may be modified on | |
854 | * successful registration. Returns 0 on success and -errno on failure. | |
855 | */ | |
856 | int blkcg_policy_register(struct blkcg_policy *pol) | |
857 | { | |
858 | int i, ret; | |
859 | ||
860 | if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data))) | |
861 | return -EINVAL; | |
862 | ||
863 | mutex_lock(&blkcg_pol_mutex); | |
864 | ||
865 | /* find an empty slot */ | |
866 | ret = -ENOSPC; | |
867 | for (i = 0; i < BLKCG_MAX_POLS; i++) | |
868 | if (!blkcg_policy[i]) | |
869 | break; | |
870 | if (i >= BLKCG_MAX_POLS) | |
871 | goto out_unlock; | |
872 | ||
873 | /* register and update blkgs */ | |
874 | pol->plid = i; | |
875 | blkcg_policy[i] = pol; | |
876 | ||
877 | /* everything is in place, add intf files for the new policy */ | |
878 | if (pol->cftypes) | |
879 | WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes)); | |
880 | ret = 0; | |
881 | out_unlock: | |
882 | mutex_unlock(&blkcg_pol_mutex); | |
883 | return ret; | |
884 | } | |
885 | EXPORT_SYMBOL_GPL(blkcg_policy_register); | |
886 | ||
887 | /** | |
888 | * blkcg_policy_unregister - unregister a blkcg policy | |
889 | * @pol: blkcg policy to unregister | |
890 | * | |
891 | * Undo blkcg_policy_register(@pol). Might sleep. | |
892 | */ | |
893 | void blkcg_policy_unregister(struct blkcg_policy *pol) | |
894 | { | |
895 | mutex_lock(&blkcg_pol_mutex); | |
896 | ||
897 | if (WARN_ON(blkcg_policy[pol->plid] != pol)) | |
898 | goto out_unlock; | |
899 | ||
900 | /* kill the intf files first */ | |
901 | if (pol->cftypes) | |
902 | cgroup_rm_cftypes(&blkio_subsys, pol->cftypes); | |
903 | ||
904 | /* unregister and update blkgs */ | |
905 | blkcg_policy[pol->plid] = NULL; | |
906 | out_unlock: | |
907 | mutex_unlock(&blkcg_pol_mutex); | |
908 | } | |
909 | EXPORT_SYMBOL_GPL(blkcg_policy_unregister); |