]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Common Block IO controller cgroup interface | |
3 | * | |
4 | * Based on ideas and code from CFQ, CFS and BFQ: | |
5 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> | |
6 | * | |
7 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> | |
8 | * Paolo Valente <paolo.valente@unimore.it> | |
9 | * | |
10 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> | |
11 | * Nauman Rafique <nauman@google.com> | |
12 | */ | |
13 | #include <linux/ioprio.h> | |
14 | #include <linux/seq_file.h> | |
15 | #include <linux/kdev_t.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/err.h> | |
18 | #include <linux/blkdev.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/genhd.h> | |
21 | #include <linux/delay.h> | |
22 | #include "blk-cgroup.h" | |
23 | #include "blk.h" | |
24 | ||
25 | #define MAX_KEY_LEN 100 | |
26 | ||
27 | static DEFINE_SPINLOCK(blkio_list_lock); | |
28 | static LIST_HEAD(blkio_list); | |
29 | ||
30 | static DEFINE_MUTEX(all_q_mutex); | |
31 | static LIST_HEAD(all_q_list); | |
32 | ||
33 | struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT }; | |
34 | EXPORT_SYMBOL_GPL(blkio_root_cgroup); | |
35 | ||
36 | static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES]; | |
37 | ||
38 | static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *, | |
39 | struct cgroup *); | |
40 | static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *, | |
41 | struct cgroup_taskset *); | |
42 | static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *, | |
43 | struct cgroup_taskset *); | |
44 | static int blkiocg_pre_destroy(struct cgroup_subsys *, struct cgroup *); | |
45 | static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *); | |
46 | static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *); | |
47 | ||
48 | /* for encoding cft->private value on file */ | |
49 | #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val)) | |
50 | /* What policy owns the file, proportional or throttle */ | |
51 | #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff) | |
52 | #define BLKIOFILE_ATTR(val) ((val) & 0xffff) | |
53 | ||
54 | struct cgroup_subsys blkio_subsys = { | |
55 | .name = "blkio", | |
56 | .create = blkiocg_create, | |
57 | .can_attach = blkiocg_can_attach, | |
58 | .attach = blkiocg_attach, | |
59 | .pre_destroy = blkiocg_pre_destroy, | |
60 | .destroy = blkiocg_destroy, | |
61 | .populate = blkiocg_populate, | |
62 | .subsys_id = blkio_subsys_id, | |
63 | .module = THIS_MODULE, | |
64 | }; | |
65 | EXPORT_SYMBOL_GPL(blkio_subsys); | |
66 | ||
67 | struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) | |
68 | { | |
69 | return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), | |
70 | struct blkio_cgroup, css); | |
71 | } | |
72 | EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); | |
73 | ||
74 | struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk) | |
75 | { | |
76 | return container_of(task_subsys_state(tsk, blkio_subsys_id), | |
77 | struct blkio_cgroup, css); | |
78 | } | |
79 | EXPORT_SYMBOL_GPL(task_blkio_cgroup); | |
80 | ||
81 | static inline void blkio_update_group_weight(struct blkio_group *blkg, | |
82 | int plid, unsigned int weight) | |
83 | { | |
84 | struct blkio_policy_type *blkiop; | |
85 | ||
86 | list_for_each_entry(blkiop, &blkio_list, list) { | |
87 | /* If this policy does not own the blkg, do not send updates */ | |
88 | if (blkiop->plid != plid) | |
89 | continue; | |
90 | if (blkiop->ops.blkio_update_group_weight_fn) | |
91 | blkiop->ops.blkio_update_group_weight_fn(blkg->q, | |
92 | blkg, weight); | |
93 | } | |
94 | } | |
95 | ||
96 | static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid, | |
97 | u64 bps, int fileid) | |
98 | { | |
99 | struct blkio_policy_type *blkiop; | |
100 | ||
101 | list_for_each_entry(blkiop, &blkio_list, list) { | |
102 | ||
103 | /* If this policy does not own the blkg, do not send updates */ | |
104 | if (blkiop->plid != plid) | |
105 | continue; | |
106 | ||
107 | if (fileid == BLKIO_THROTL_read_bps_device | |
108 | && blkiop->ops.blkio_update_group_read_bps_fn) | |
109 | blkiop->ops.blkio_update_group_read_bps_fn(blkg->q, | |
110 | blkg, bps); | |
111 | ||
112 | if (fileid == BLKIO_THROTL_write_bps_device | |
113 | && blkiop->ops.blkio_update_group_write_bps_fn) | |
114 | blkiop->ops.blkio_update_group_write_bps_fn(blkg->q, | |
115 | blkg, bps); | |
116 | } | |
117 | } | |
118 | ||
119 | static inline void blkio_update_group_iops(struct blkio_group *blkg, | |
120 | int plid, unsigned int iops, | |
121 | int fileid) | |
122 | { | |
123 | struct blkio_policy_type *blkiop; | |
124 | ||
125 | list_for_each_entry(blkiop, &blkio_list, list) { | |
126 | ||
127 | /* If this policy does not own the blkg, do not send updates */ | |
128 | if (blkiop->plid != plid) | |
129 | continue; | |
130 | ||
131 | if (fileid == BLKIO_THROTL_read_iops_device | |
132 | && blkiop->ops.blkio_update_group_read_iops_fn) | |
133 | blkiop->ops.blkio_update_group_read_iops_fn(blkg->q, | |
134 | blkg, iops); | |
135 | ||
136 | if (fileid == BLKIO_THROTL_write_iops_device | |
137 | && blkiop->ops.blkio_update_group_write_iops_fn) | |
138 | blkiop->ops.blkio_update_group_write_iops_fn(blkg->q, | |
139 | blkg,iops); | |
140 | } | |
141 | } | |
142 | ||
143 | /* | |
144 | * Add to the appropriate stat variable depending on the request type. | |
145 | * This should be called with the blkg->stats_lock held. | |
146 | */ | |
147 | static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction, | |
148 | bool sync) | |
149 | { | |
150 | if (direction) | |
151 | stat[BLKIO_STAT_WRITE] += add; | |
152 | else | |
153 | stat[BLKIO_STAT_READ] += add; | |
154 | if (sync) | |
155 | stat[BLKIO_STAT_SYNC] += add; | |
156 | else | |
157 | stat[BLKIO_STAT_ASYNC] += add; | |
158 | } | |
159 | ||
160 | /* | |
161 | * Decrements the appropriate stat variable if non-zero depending on the | |
162 | * request type. Panics on value being zero. | |
163 | * This should be called with the blkg->stats_lock held. | |
164 | */ | |
165 | static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync) | |
166 | { | |
167 | if (direction) { | |
168 | BUG_ON(stat[BLKIO_STAT_WRITE] == 0); | |
169 | stat[BLKIO_STAT_WRITE]--; | |
170 | } else { | |
171 | BUG_ON(stat[BLKIO_STAT_READ] == 0); | |
172 | stat[BLKIO_STAT_READ]--; | |
173 | } | |
174 | if (sync) { | |
175 | BUG_ON(stat[BLKIO_STAT_SYNC] == 0); | |
176 | stat[BLKIO_STAT_SYNC]--; | |
177 | } else { | |
178 | BUG_ON(stat[BLKIO_STAT_ASYNC] == 0); | |
179 | stat[BLKIO_STAT_ASYNC]--; | |
180 | } | |
181 | } | |
182 | ||
183 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
184 | /* This should be called with the blkg->stats_lock held. */ | |
185 | static void blkio_set_start_group_wait_time(struct blkio_group *blkg, | |
186 | struct blkio_policy_type *pol, | |
187 | struct blkio_group *curr_blkg) | |
188 | { | |
189 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | |
190 | ||
191 | if (blkio_blkg_waiting(&pd->stats)) | |
192 | return; | |
193 | if (blkg == curr_blkg) | |
194 | return; | |
195 | pd->stats.start_group_wait_time = sched_clock(); | |
196 | blkio_mark_blkg_waiting(&pd->stats); | |
197 | } | |
198 | ||
199 | /* This should be called with the blkg->stats_lock held. */ | |
200 | static void blkio_update_group_wait_time(struct blkio_group_stats *stats) | |
201 | { | |
202 | unsigned long long now; | |
203 | ||
204 | if (!blkio_blkg_waiting(stats)) | |
205 | return; | |
206 | ||
207 | now = sched_clock(); | |
208 | if (time_after64(now, stats->start_group_wait_time)) | |
209 | stats->group_wait_time += now - stats->start_group_wait_time; | |
210 | blkio_clear_blkg_waiting(stats); | |
211 | } | |
212 | ||
213 | /* This should be called with the blkg->stats_lock held. */ | |
214 | static void blkio_end_empty_time(struct blkio_group_stats *stats) | |
215 | { | |
216 | unsigned long long now; | |
217 | ||
218 | if (!blkio_blkg_empty(stats)) | |
219 | return; | |
220 | ||
221 | now = sched_clock(); | |
222 | if (time_after64(now, stats->start_empty_time)) | |
223 | stats->empty_time += now - stats->start_empty_time; | |
224 | blkio_clear_blkg_empty(stats); | |
225 | } | |
226 | ||
227 | void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg, | |
228 | struct blkio_policy_type *pol) | |
229 | { | |
230 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | |
231 | unsigned long flags; | |
232 | ||
233 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
234 | BUG_ON(blkio_blkg_idling(&pd->stats)); | |
235 | pd->stats.start_idle_time = sched_clock(); | |
236 | blkio_mark_blkg_idling(&pd->stats); | |
237 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
238 | } | |
239 | EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats); | |
240 | ||
241 | void blkiocg_update_idle_time_stats(struct blkio_group *blkg, | |
242 | struct blkio_policy_type *pol) | |
243 | { | |
244 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | |
245 | unsigned long flags; | |
246 | unsigned long long now; | |
247 | struct blkio_group_stats *stats; | |
248 | ||
249 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
250 | stats = &pd->stats; | |
251 | if (blkio_blkg_idling(stats)) { | |
252 | now = sched_clock(); | |
253 | if (time_after64(now, stats->start_idle_time)) | |
254 | stats->idle_time += now - stats->start_idle_time; | |
255 | blkio_clear_blkg_idling(stats); | |
256 | } | |
257 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
258 | } | |
259 | EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats); | |
260 | ||
261 | void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg, | |
262 | struct blkio_policy_type *pol) | |
263 | { | |
264 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | |
265 | unsigned long flags; | |
266 | struct blkio_group_stats *stats; | |
267 | ||
268 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
269 | stats = &pd->stats; | |
270 | stats->avg_queue_size_sum += | |
271 | stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] + | |
272 | stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]; | |
273 | stats->avg_queue_size_samples++; | |
274 | blkio_update_group_wait_time(stats); | |
275 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
276 | } | |
277 | EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats); | |
278 | ||
279 | void blkiocg_set_start_empty_time(struct blkio_group *blkg, | |
280 | struct blkio_policy_type *pol) | |
281 | { | |
282 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | |
283 | unsigned long flags; | |
284 | struct blkio_group_stats *stats; | |
285 | ||
286 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
287 | stats = &pd->stats; | |
288 | ||
289 | if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] || | |
290 | stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) { | |
291 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
292 | return; | |
293 | } | |
294 | ||
295 | /* | |
296 | * group is already marked empty. This can happen if cfqq got new | |
297 | * request in parent group and moved to this group while being added | |
298 | * to service tree. Just ignore the event and move on. | |
299 | */ | |
300 | if(blkio_blkg_empty(stats)) { | |
301 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
302 | return; | |
303 | } | |
304 | ||
305 | stats->start_empty_time = sched_clock(); | |
306 | blkio_mark_blkg_empty(stats); | |
307 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
308 | } | |
309 | EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time); | |
310 | ||
311 | void blkiocg_update_dequeue_stats(struct blkio_group *blkg, | |
312 | struct blkio_policy_type *pol, | |
313 | unsigned long dequeue) | |
314 | { | |
315 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | |
316 | ||
317 | pd->stats.dequeue += dequeue; | |
318 | } | |
319 | EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats); | |
320 | #else | |
321 | static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg, | |
322 | struct blkio_policy_type *pol, | |
323 | struct blkio_group *curr_blkg) { } | |
324 | static inline void blkio_end_empty_time(struct blkio_group_stats *stats) { } | |
325 | #endif | |
326 | ||
327 | void blkiocg_update_io_add_stats(struct blkio_group *blkg, | |
328 | struct blkio_policy_type *pol, | |
329 | struct blkio_group *curr_blkg, bool direction, | |
330 | bool sync) | |
331 | { | |
332 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | |
333 | unsigned long flags; | |
334 | ||
335 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
336 | blkio_add_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction, | |
337 | sync); | |
338 | blkio_end_empty_time(&pd->stats); | |
339 | blkio_set_start_group_wait_time(blkg, pol, curr_blkg); | |
340 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
341 | } | |
342 | EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats); | |
343 | ||
344 | void blkiocg_update_io_remove_stats(struct blkio_group *blkg, | |
345 | struct blkio_policy_type *pol, | |
346 | bool direction, bool sync) | |
347 | { | |
348 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | |
349 | unsigned long flags; | |
350 | ||
351 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
352 | blkio_check_and_dec_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED], | |
353 | direction, sync); | |
354 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
355 | } | |
356 | EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats); | |
357 | ||
358 | void blkiocg_update_timeslice_used(struct blkio_group *blkg, | |
359 | struct blkio_policy_type *pol, | |
360 | unsigned long time, | |
361 | unsigned long unaccounted_time) | |
362 | { | |
363 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | |
364 | unsigned long flags; | |
365 | ||
366 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
367 | pd->stats.time += time; | |
368 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
369 | pd->stats.unaccounted_time += unaccounted_time; | |
370 | #endif | |
371 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
372 | } | |
373 | EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used); | |
374 | ||
375 | /* | |
376 | * should be called under rcu read lock or queue lock to make sure blkg pointer | |
377 | * is valid. | |
378 | */ | |
379 | void blkiocg_update_dispatch_stats(struct blkio_group *blkg, | |
380 | struct blkio_policy_type *pol, | |
381 | uint64_t bytes, bool direction, bool sync) | |
382 | { | |
383 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | |
384 | struct blkio_group_stats_cpu *stats_cpu; | |
385 | unsigned long flags; | |
386 | ||
387 | /* | |
388 | * Disabling interrupts to provide mutual exclusion between two | |
389 | * writes on same cpu. It probably is not needed for 64bit. Not | |
390 | * optimizing that case yet. | |
391 | */ | |
392 | local_irq_save(flags); | |
393 | ||
394 | stats_cpu = this_cpu_ptr(pd->stats_cpu); | |
395 | ||
396 | u64_stats_update_begin(&stats_cpu->syncp); | |
397 | stats_cpu->sectors += bytes >> 9; | |
398 | blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED], | |
399 | 1, direction, sync); | |
400 | blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES], | |
401 | bytes, direction, sync); | |
402 | u64_stats_update_end(&stats_cpu->syncp); | |
403 | local_irq_restore(flags); | |
404 | } | |
405 | EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats); | |
406 | ||
407 | void blkiocg_update_completion_stats(struct blkio_group *blkg, | |
408 | struct blkio_policy_type *pol, | |
409 | uint64_t start_time, | |
410 | uint64_t io_start_time, bool direction, | |
411 | bool sync) | |
412 | { | |
413 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | |
414 | struct blkio_group_stats *stats; | |
415 | unsigned long flags; | |
416 | unsigned long long now = sched_clock(); | |
417 | ||
418 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
419 | stats = &pd->stats; | |
420 | if (time_after64(now, io_start_time)) | |
421 | blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME], | |
422 | now - io_start_time, direction, sync); | |
423 | if (time_after64(io_start_time, start_time)) | |
424 | blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME], | |
425 | io_start_time - start_time, direction, sync); | |
426 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
427 | } | |
428 | EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats); | |
429 | ||
430 | /* Merged stats are per cpu. */ | |
431 | void blkiocg_update_io_merged_stats(struct blkio_group *blkg, | |
432 | struct blkio_policy_type *pol, | |
433 | bool direction, bool sync) | |
434 | { | |
435 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | |
436 | struct blkio_group_stats_cpu *stats_cpu; | |
437 | unsigned long flags; | |
438 | ||
439 | /* | |
440 | * Disabling interrupts to provide mutual exclusion between two | |
441 | * writes on same cpu. It probably is not needed for 64bit. Not | |
442 | * optimizing that case yet. | |
443 | */ | |
444 | local_irq_save(flags); | |
445 | ||
446 | stats_cpu = this_cpu_ptr(pd->stats_cpu); | |
447 | ||
448 | u64_stats_update_begin(&stats_cpu->syncp); | |
449 | blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1, | |
450 | direction, sync); | |
451 | u64_stats_update_end(&stats_cpu->syncp); | |
452 | local_irq_restore(flags); | |
453 | } | |
454 | EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats); | |
455 | ||
456 | /** | |
457 | * blkg_free - free a blkg | |
458 | * @blkg: blkg to free | |
459 | * | |
460 | * Free @blkg which may be partially allocated. | |
461 | */ | |
462 | static void blkg_free(struct blkio_group *blkg) | |
463 | { | |
464 | int i; | |
465 | ||
466 | if (!blkg) | |
467 | return; | |
468 | ||
469 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { | |
470 | struct blkg_policy_data *pd = blkg->pd[i]; | |
471 | ||
472 | if (pd) { | |
473 | free_percpu(pd->stats_cpu); | |
474 | kfree(pd); | |
475 | } | |
476 | } | |
477 | ||
478 | kfree(blkg); | |
479 | } | |
480 | ||
481 | /** | |
482 | * blkg_alloc - allocate a blkg | |
483 | * @blkcg: block cgroup the new blkg is associated with | |
484 | * @q: request_queue the new blkg is associated with | |
485 | * | |
486 | * Allocate a new blkg assocating @blkcg and @q. | |
487 | * | |
488 | * FIXME: Should be called with queue locked but currently isn't due to | |
489 | * percpu stat breakage. | |
490 | */ | |
491 | static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg, | |
492 | struct request_queue *q) | |
493 | { | |
494 | struct blkio_group *blkg; | |
495 | int i; | |
496 | ||
497 | /* alloc and init base part */ | |
498 | blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node); | |
499 | if (!blkg) | |
500 | return NULL; | |
501 | ||
502 | spin_lock_init(&blkg->stats_lock); | |
503 | rcu_assign_pointer(blkg->q, q); | |
504 | INIT_LIST_HEAD(&blkg->q_node); | |
505 | blkg->blkcg = blkcg; | |
506 | blkg->refcnt = 1; | |
507 | cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path)); | |
508 | ||
509 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { | |
510 | struct blkio_policy_type *pol = blkio_policy[i]; | |
511 | struct blkg_policy_data *pd; | |
512 | ||
513 | if (!pol) | |
514 | continue; | |
515 | ||
516 | /* alloc per-policy data and attach it to blkg */ | |
517 | pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC, | |
518 | q->node); | |
519 | if (!pd) { | |
520 | blkg_free(blkg); | |
521 | return NULL; | |
522 | } | |
523 | ||
524 | blkg->pd[i] = pd; | |
525 | pd->blkg = blkg; | |
526 | ||
527 | /* broken, read comment in the callsite */ | |
528 | pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu); | |
529 | if (!pd->stats_cpu) { | |
530 | blkg_free(blkg); | |
531 | return NULL; | |
532 | } | |
533 | } | |
534 | ||
535 | /* invoke per-policy init */ | |
536 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { | |
537 | struct blkio_policy_type *pol = blkio_policy[i]; | |
538 | ||
539 | if (pol) | |
540 | pol->ops.blkio_init_group_fn(blkg); | |
541 | } | |
542 | ||
543 | return blkg; | |
544 | } | |
545 | ||
546 | struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, | |
547 | struct request_queue *q, | |
548 | enum blkio_policy_id plid, | |
549 | bool for_root) | |
550 | __releases(q->queue_lock) __acquires(q->queue_lock) | |
551 | { | |
552 | struct blkio_group *blkg, *new_blkg; | |
553 | ||
554 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
555 | lockdep_assert_held(q->queue_lock); | |
556 | ||
557 | /* | |
558 | * This could be the first entry point of blkcg implementation and | |
559 | * we shouldn't allow anything to go through for a bypassing queue. | |
560 | * The following can be removed if blkg lookup is guaranteed to | |
561 | * fail on a bypassing queue. | |
562 | */ | |
563 | if (unlikely(blk_queue_bypass(q)) && !for_root) | |
564 | return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); | |
565 | ||
566 | blkg = blkg_lookup(blkcg, q); | |
567 | if (blkg) | |
568 | return blkg; | |
569 | ||
570 | /* blkg holds a reference to blkcg */ | |
571 | if (!css_tryget(&blkcg->css)) | |
572 | return ERR_PTR(-EINVAL); | |
573 | ||
574 | /* | |
575 | * Allocate and initialize. | |
576 | * | |
577 | * FIXME: The following is broken. Percpu memory allocation | |
578 | * requires %GFP_KERNEL context and can't be performed from IO | |
579 | * path. Allocation here should inherently be atomic and the | |
580 | * following lock dancing can be removed once the broken percpu | |
581 | * allocation is fixed. | |
582 | */ | |
583 | spin_unlock_irq(q->queue_lock); | |
584 | rcu_read_unlock(); | |
585 | ||
586 | new_blkg = blkg_alloc(blkcg, q); | |
587 | ||
588 | rcu_read_lock(); | |
589 | spin_lock_irq(q->queue_lock); | |
590 | ||
591 | /* did bypass get turned on inbetween? */ | |
592 | if (unlikely(blk_queue_bypass(q)) && !for_root) { | |
593 | blkg = ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); | |
594 | goto out; | |
595 | } | |
596 | ||
597 | /* did someone beat us to it? */ | |
598 | blkg = blkg_lookup(blkcg, q); | |
599 | if (unlikely(blkg)) | |
600 | goto out; | |
601 | ||
602 | /* did alloc fail? */ | |
603 | if (unlikely(!new_blkg)) { | |
604 | blkg = ERR_PTR(-ENOMEM); | |
605 | goto out; | |
606 | } | |
607 | ||
608 | /* insert */ | |
609 | spin_lock(&blkcg->lock); | |
610 | swap(blkg, new_blkg); | |
611 | ||
612 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); | |
613 | list_add(&blkg->q_node, &q->blkg_list); | |
614 | q->nr_blkgs++; | |
615 | ||
616 | spin_unlock(&blkcg->lock); | |
617 | out: | |
618 | blkg_free(new_blkg); | |
619 | return blkg; | |
620 | } | |
621 | EXPORT_SYMBOL_GPL(blkg_lookup_create); | |
622 | ||
623 | static void __blkiocg_del_blkio_group(struct blkio_group *blkg) | |
624 | { | |
625 | hlist_del_init_rcu(&blkg->blkcg_node); | |
626 | } | |
627 | ||
628 | /* | |
629 | * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1 | |
630 | * indicating that blk_group was unhashed by the time we got to it. | |
631 | */ | |
632 | int blkiocg_del_blkio_group(struct blkio_group *blkg) | |
633 | { | |
634 | struct blkio_cgroup *blkcg = blkg->blkcg; | |
635 | unsigned long flags; | |
636 | int ret = 1; | |
637 | ||
638 | spin_lock_irqsave(&blkcg->lock, flags); | |
639 | if (!hlist_unhashed(&blkg->blkcg_node)) { | |
640 | __blkiocg_del_blkio_group(blkg); | |
641 | ret = 0; | |
642 | } | |
643 | spin_unlock_irqrestore(&blkcg->lock, flags); | |
644 | ||
645 | return ret; | |
646 | } | |
647 | EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group); | |
648 | ||
649 | /* called under rcu_read_lock(). */ | |
650 | struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, | |
651 | struct request_queue *q) | |
652 | { | |
653 | struct blkio_group *blkg; | |
654 | struct hlist_node *n; | |
655 | ||
656 | hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) | |
657 | if (blkg->q == q) | |
658 | return blkg; | |
659 | return NULL; | |
660 | } | |
661 | EXPORT_SYMBOL_GPL(blkg_lookup); | |
662 | ||
663 | static void blkg_destroy(struct blkio_group *blkg) | |
664 | { | |
665 | struct request_queue *q = blkg->q; | |
666 | ||
667 | lockdep_assert_held(q->queue_lock); | |
668 | ||
669 | /* Something wrong if we are trying to remove same group twice */ | |
670 | WARN_ON_ONCE(list_empty(&blkg->q_node)); | |
671 | list_del_init(&blkg->q_node); | |
672 | ||
673 | WARN_ON_ONCE(q->nr_blkgs <= 0); | |
674 | q->nr_blkgs--; | |
675 | ||
676 | /* | |
677 | * Put the reference taken at the time of creation so that when all | |
678 | * queues are gone, group can be destroyed. | |
679 | */ | |
680 | blkg_put(blkg); | |
681 | } | |
682 | ||
683 | /* | |
684 | * XXX: This updates blkg policy data in-place for root blkg, which is | |
685 | * necessary across elevator switch and policy registration as root blkgs | |
686 | * aren't shot down. This broken and racy implementation is temporary. | |
687 | * Eventually, blkg shoot down will be replaced by proper in-place update. | |
688 | */ | |
689 | void update_root_blkg_pd(struct request_queue *q, enum blkio_policy_id plid) | |
690 | { | |
691 | struct blkio_policy_type *pol = blkio_policy[plid]; | |
692 | struct blkio_group *blkg = blkg_lookup(&blkio_root_cgroup, q); | |
693 | struct blkg_policy_data *pd; | |
694 | ||
695 | if (!blkg) | |
696 | return; | |
697 | ||
698 | kfree(blkg->pd[plid]); | |
699 | blkg->pd[plid] = NULL; | |
700 | ||
701 | if (!pol) | |
702 | return; | |
703 | ||
704 | pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL); | |
705 | WARN_ON_ONCE(!pd); | |
706 | ||
707 | pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu); | |
708 | WARN_ON_ONCE(!pd->stats_cpu); | |
709 | ||
710 | blkg->pd[plid] = pd; | |
711 | pd->blkg = blkg; | |
712 | pol->ops.blkio_init_group_fn(blkg); | |
713 | } | |
714 | EXPORT_SYMBOL_GPL(update_root_blkg_pd); | |
715 | ||
716 | void blkg_destroy_all(struct request_queue *q, bool destroy_root) | |
717 | { | |
718 | struct blkio_group *blkg, *n; | |
719 | ||
720 | while (true) { | |
721 | bool done = true; | |
722 | ||
723 | spin_lock_irq(q->queue_lock); | |
724 | ||
725 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { | |
726 | /* skip root? */ | |
727 | if (!destroy_root && blkg->blkcg == &blkio_root_cgroup) | |
728 | continue; | |
729 | ||
730 | /* | |
731 | * If cgroup removal path got to blk_group first | |
732 | * and removed it from cgroup list, then it will | |
733 | * take care of destroying cfqg also. | |
734 | */ | |
735 | if (!blkiocg_del_blkio_group(blkg)) | |
736 | blkg_destroy(blkg); | |
737 | else | |
738 | done = false; | |
739 | } | |
740 | ||
741 | spin_unlock_irq(q->queue_lock); | |
742 | ||
743 | /* | |
744 | * Group list may not be empty if we raced cgroup removal | |
745 | * and lost. cgroup removal is guaranteed to make forward | |
746 | * progress and retrying after a while is enough. This | |
747 | * ugliness is scheduled to be removed after locking | |
748 | * update. | |
749 | */ | |
750 | if (done) | |
751 | break; | |
752 | ||
753 | msleep(10); /* just some random duration I like */ | |
754 | } | |
755 | } | |
756 | EXPORT_SYMBOL_GPL(blkg_destroy_all); | |
757 | ||
758 | static void blkg_rcu_free(struct rcu_head *rcu_head) | |
759 | { | |
760 | blkg_free(container_of(rcu_head, struct blkio_group, rcu_head)); | |
761 | } | |
762 | ||
763 | void __blkg_release(struct blkio_group *blkg) | |
764 | { | |
765 | /* release the extra blkcg reference this blkg has been holding */ | |
766 | css_put(&blkg->blkcg->css); | |
767 | ||
768 | /* | |
769 | * A group is freed in rcu manner. But having an rcu lock does not | |
770 | * mean that one can access all the fields of blkg and assume these | |
771 | * are valid. For example, don't try to follow throtl_data and | |
772 | * request queue links. | |
773 | * | |
774 | * Having a reference to blkg under an rcu allows acess to only | |
775 | * values local to groups like group stats and group rate limits | |
776 | */ | |
777 | call_rcu(&blkg->rcu_head, blkg_rcu_free); | |
778 | } | |
779 | EXPORT_SYMBOL_GPL(__blkg_release); | |
780 | ||
781 | static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid) | |
782 | { | |
783 | struct blkg_policy_data *pd = blkg->pd[plid]; | |
784 | struct blkio_group_stats_cpu *stats_cpu; | |
785 | int i, j, k; | |
786 | /* | |
787 | * Note: On 64 bit arch this should not be an issue. This has the | |
788 | * possibility of returning some inconsistent value on 32bit arch | |
789 | * as 64bit update on 32bit is non atomic. Taking care of this | |
790 | * corner case makes code very complicated, like sending IPIs to | |
791 | * cpus, taking care of stats of offline cpus etc. | |
792 | * | |
793 | * reset stats is anyway more of a debug feature and this sounds a | |
794 | * corner case. So I am not complicating the code yet until and | |
795 | * unless this becomes a real issue. | |
796 | */ | |
797 | for_each_possible_cpu(i) { | |
798 | stats_cpu = per_cpu_ptr(pd->stats_cpu, i); | |
799 | stats_cpu->sectors = 0; | |
800 | for(j = 0; j < BLKIO_STAT_CPU_NR; j++) | |
801 | for (k = 0; k < BLKIO_STAT_TOTAL; k++) | |
802 | stats_cpu->stat_arr_cpu[j][k] = 0; | |
803 | } | |
804 | } | |
805 | ||
806 | static int | |
807 | blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) | |
808 | { | |
809 | struct blkio_cgroup *blkcg; | |
810 | struct blkio_group *blkg; | |
811 | struct blkio_group_stats *stats; | |
812 | struct hlist_node *n; | |
813 | uint64_t queued[BLKIO_STAT_TOTAL]; | |
814 | int i; | |
815 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
816 | bool idling, waiting, empty; | |
817 | unsigned long long now = sched_clock(); | |
818 | #endif | |
819 | ||
820 | blkcg = cgroup_to_blkio_cgroup(cgroup); | |
821 | spin_lock(&blkio_list_lock); | |
822 | spin_lock_irq(&blkcg->lock); | |
823 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { | |
824 | struct blkio_policy_type *pol; | |
825 | ||
826 | list_for_each_entry(pol, &blkio_list, list) { | |
827 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | |
828 | ||
829 | spin_lock(&blkg->stats_lock); | |
830 | stats = &pd->stats; | |
831 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
832 | idling = blkio_blkg_idling(stats); | |
833 | waiting = blkio_blkg_waiting(stats); | |
834 | empty = blkio_blkg_empty(stats); | |
835 | #endif | |
836 | for (i = 0; i < BLKIO_STAT_TOTAL; i++) | |
837 | queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i]; | |
838 | memset(stats, 0, sizeof(struct blkio_group_stats)); | |
839 | for (i = 0; i < BLKIO_STAT_TOTAL; i++) | |
840 | stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i]; | |
841 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
842 | if (idling) { | |
843 | blkio_mark_blkg_idling(stats); | |
844 | stats->start_idle_time = now; | |
845 | } | |
846 | if (waiting) { | |
847 | blkio_mark_blkg_waiting(stats); | |
848 | stats->start_group_wait_time = now; | |
849 | } | |
850 | if (empty) { | |
851 | blkio_mark_blkg_empty(stats); | |
852 | stats->start_empty_time = now; | |
853 | } | |
854 | #endif | |
855 | spin_unlock(&blkg->stats_lock); | |
856 | ||
857 | /* Reset Per cpu stats which don't take blkg->stats_lock */ | |
858 | blkio_reset_stats_cpu(blkg, pol->plid); | |
859 | } | |
860 | } | |
861 | ||
862 | spin_unlock_irq(&blkcg->lock); | |
863 | spin_unlock(&blkio_list_lock); | |
864 | return 0; | |
865 | } | |
866 | ||
867 | static void blkio_get_key_name(enum stat_sub_type type, const char *dname, | |
868 | char *str, int chars_left, bool diskname_only) | |
869 | { | |
870 | snprintf(str, chars_left, "%s", dname); | |
871 | chars_left -= strlen(str); | |
872 | if (chars_left <= 0) { | |
873 | printk(KERN_WARNING | |
874 | "Possibly incorrect cgroup stat display format"); | |
875 | return; | |
876 | } | |
877 | if (diskname_only) | |
878 | return; | |
879 | switch (type) { | |
880 | case BLKIO_STAT_READ: | |
881 | strlcat(str, " Read", chars_left); | |
882 | break; | |
883 | case BLKIO_STAT_WRITE: | |
884 | strlcat(str, " Write", chars_left); | |
885 | break; | |
886 | case BLKIO_STAT_SYNC: | |
887 | strlcat(str, " Sync", chars_left); | |
888 | break; | |
889 | case BLKIO_STAT_ASYNC: | |
890 | strlcat(str, " Async", chars_left); | |
891 | break; | |
892 | case BLKIO_STAT_TOTAL: | |
893 | strlcat(str, " Total", chars_left); | |
894 | break; | |
895 | default: | |
896 | strlcat(str, " Invalid", chars_left); | |
897 | } | |
898 | } | |
899 | ||
900 | static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val, | |
901 | struct cgroup_map_cb *cb, const char *dname) | |
902 | { | |
903 | blkio_get_key_name(0, dname, str, chars_left, true); | |
904 | cb->fill(cb, str, val); | |
905 | return val; | |
906 | } | |
907 | ||
908 | ||
909 | static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, int plid, | |
910 | enum stat_type_cpu type, enum stat_sub_type sub_type) | |
911 | { | |
912 | struct blkg_policy_data *pd = blkg->pd[plid]; | |
913 | int cpu; | |
914 | struct blkio_group_stats_cpu *stats_cpu; | |
915 | u64 val = 0, tval; | |
916 | ||
917 | for_each_possible_cpu(cpu) { | |
918 | unsigned int start; | |
919 | stats_cpu = per_cpu_ptr(pd->stats_cpu, cpu); | |
920 | ||
921 | do { | |
922 | start = u64_stats_fetch_begin(&stats_cpu->syncp); | |
923 | if (type == BLKIO_STAT_CPU_SECTORS) | |
924 | tval = stats_cpu->sectors; | |
925 | else | |
926 | tval = stats_cpu->stat_arr_cpu[type][sub_type]; | |
927 | } while(u64_stats_fetch_retry(&stats_cpu->syncp, start)); | |
928 | ||
929 | val += tval; | |
930 | } | |
931 | ||
932 | return val; | |
933 | } | |
934 | ||
935 | static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, int plid, | |
936 | struct cgroup_map_cb *cb, const char *dname, | |
937 | enum stat_type_cpu type) | |
938 | { | |
939 | uint64_t disk_total, val; | |
940 | char key_str[MAX_KEY_LEN]; | |
941 | enum stat_sub_type sub_type; | |
942 | ||
943 | if (type == BLKIO_STAT_CPU_SECTORS) { | |
944 | val = blkio_read_stat_cpu(blkg, plid, type, 0); | |
945 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, | |
946 | dname); | |
947 | } | |
948 | ||
949 | for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL; | |
950 | sub_type++) { | |
951 | blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN, | |
952 | false); | |
953 | val = blkio_read_stat_cpu(blkg, plid, type, sub_type); | |
954 | cb->fill(cb, key_str, val); | |
955 | } | |
956 | ||
957 | disk_total = blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_READ) + | |
958 | blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_WRITE); | |
959 | ||
960 | blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN, | |
961 | false); | |
962 | cb->fill(cb, key_str, disk_total); | |
963 | return disk_total; | |
964 | } | |
965 | ||
966 | /* This should be called with blkg->stats_lock held */ | |
967 | static uint64_t blkio_get_stat(struct blkio_group *blkg, int plid, | |
968 | struct cgroup_map_cb *cb, const char *dname, | |
969 | enum stat_type type) | |
970 | { | |
971 | struct blkg_policy_data *pd = blkg->pd[plid]; | |
972 | uint64_t disk_total; | |
973 | char key_str[MAX_KEY_LEN]; | |
974 | enum stat_sub_type sub_type; | |
975 | ||
976 | if (type == BLKIO_STAT_TIME) | |
977 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
978 | pd->stats.time, cb, dname); | |
979 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
980 | if (type == BLKIO_STAT_UNACCOUNTED_TIME) | |
981 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
982 | pd->stats.unaccounted_time, cb, dname); | |
983 | if (type == BLKIO_STAT_AVG_QUEUE_SIZE) { | |
984 | uint64_t sum = pd->stats.avg_queue_size_sum; | |
985 | uint64_t samples = pd->stats.avg_queue_size_samples; | |
986 | if (samples) | |
987 | do_div(sum, samples); | |
988 | else | |
989 | sum = 0; | |
990 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
991 | sum, cb, dname); | |
992 | } | |
993 | if (type == BLKIO_STAT_GROUP_WAIT_TIME) | |
994 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
995 | pd->stats.group_wait_time, cb, dname); | |
996 | if (type == BLKIO_STAT_IDLE_TIME) | |
997 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
998 | pd->stats.idle_time, cb, dname); | |
999 | if (type == BLKIO_STAT_EMPTY_TIME) | |
1000 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
1001 | pd->stats.empty_time, cb, dname); | |
1002 | if (type == BLKIO_STAT_DEQUEUE) | |
1003 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
1004 | pd->stats.dequeue, cb, dname); | |
1005 | #endif | |
1006 | ||
1007 | for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL; | |
1008 | sub_type++) { | |
1009 | blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN, | |
1010 | false); | |
1011 | cb->fill(cb, key_str, pd->stats.stat_arr[type][sub_type]); | |
1012 | } | |
1013 | disk_total = pd->stats.stat_arr[type][BLKIO_STAT_READ] + | |
1014 | pd->stats.stat_arr[type][BLKIO_STAT_WRITE]; | |
1015 | blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN, | |
1016 | false); | |
1017 | cb->fill(cb, key_str, disk_total); | |
1018 | return disk_total; | |
1019 | } | |
1020 | ||
1021 | static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid, | |
1022 | int fileid, struct blkio_cgroup *blkcg) | |
1023 | { | |
1024 | struct gendisk *disk = NULL; | |
1025 | struct blkio_group *blkg = NULL; | |
1026 | struct blkg_policy_data *pd; | |
1027 | char *s[4], *p, *major_s = NULL, *minor_s = NULL; | |
1028 | unsigned long major, minor; | |
1029 | int i = 0, ret = -EINVAL; | |
1030 | int part; | |
1031 | dev_t dev; | |
1032 | u64 temp; | |
1033 | ||
1034 | memset(s, 0, sizeof(s)); | |
1035 | ||
1036 | while ((p = strsep(&buf, " ")) != NULL) { | |
1037 | if (!*p) | |
1038 | continue; | |
1039 | ||
1040 | s[i++] = p; | |
1041 | ||
1042 | /* Prevent from inputing too many things */ | |
1043 | if (i == 3) | |
1044 | break; | |
1045 | } | |
1046 | ||
1047 | if (i != 2) | |
1048 | goto out; | |
1049 | ||
1050 | p = strsep(&s[0], ":"); | |
1051 | if (p != NULL) | |
1052 | major_s = p; | |
1053 | else | |
1054 | goto out; | |
1055 | ||
1056 | minor_s = s[0]; | |
1057 | if (!minor_s) | |
1058 | goto out; | |
1059 | ||
1060 | if (strict_strtoul(major_s, 10, &major)) | |
1061 | goto out; | |
1062 | ||
1063 | if (strict_strtoul(minor_s, 10, &minor)) | |
1064 | goto out; | |
1065 | ||
1066 | dev = MKDEV(major, minor); | |
1067 | ||
1068 | if (strict_strtoull(s[1], 10, &temp)) | |
1069 | goto out; | |
1070 | ||
1071 | disk = get_gendisk(dev, &part); | |
1072 | if (!disk || part) | |
1073 | goto out; | |
1074 | ||
1075 | rcu_read_lock(); | |
1076 | ||
1077 | spin_lock_irq(disk->queue->queue_lock); | |
1078 | blkg = blkg_lookup_create(blkcg, disk->queue, plid, false); | |
1079 | spin_unlock_irq(disk->queue->queue_lock); | |
1080 | ||
1081 | if (IS_ERR(blkg)) { | |
1082 | ret = PTR_ERR(blkg); | |
1083 | goto out_unlock; | |
1084 | } | |
1085 | ||
1086 | pd = blkg->pd[plid]; | |
1087 | ||
1088 | switch (plid) { | |
1089 | case BLKIO_POLICY_PROP: | |
1090 | if ((temp < BLKIO_WEIGHT_MIN && temp > 0) || | |
1091 | temp > BLKIO_WEIGHT_MAX) | |
1092 | goto out_unlock; | |
1093 | ||
1094 | pd->conf.weight = temp; | |
1095 | blkio_update_group_weight(blkg, plid, temp ?: blkcg->weight); | |
1096 | break; | |
1097 | case BLKIO_POLICY_THROTL: | |
1098 | switch(fileid) { | |
1099 | case BLKIO_THROTL_read_bps_device: | |
1100 | pd->conf.bps[READ] = temp; | |
1101 | blkio_update_group_bps(blkg, plid, temp ?: -1, fileid); | |
1102 | break; | |
1103 | case BLKIO_THROTL_write_bps_device: | |
1104 | pd->conf.bps[WRITE] = temp; | |
1105 | blkio_update_group_bps(blkg, plid, temp ?: -1, fileid); | |
1106 | break; | |
1107 | case BLKIO_THROTL_read_iops_device: | |
1108 | if (temp > THROTL_IOPS_MAX) | |
1109 | goto out_unlock; | |
1110 | pd->conf.iops[READ] = temp; | |
1111 | blkio_update_group_iops(blkg, plid, temp ?: -1, fileid); | |
1112 | break; | |
1113 | case BLKIO_THROTL_write_iops_device: | |
1114 | if (temp > THROTL_IOPS_MAX) | |
1115 | goto out_unlock; | |
1116 | pd->conf.iops[WRITE] = temp; | |
1117 | blkio_update_group_iops(blkg, plid, temp ?: -1, fileid); | |
1118 | break; | |
1119 | } | |
1120 | break; | |
1121 | default: | |
1122 | BUG(); | |
1123 | } | |
1124 | ret = 0; | |
1125 | out_unlock: | |
1126 | rcu_read_unlock(); | |
1127 | out: | |
1128 | put_disk(disk); | |
1129 | ||
1130 | /* | |
1131 | * If queue was bypassing, we should retry. Do so after a short | |
1132 | * msleep(). It isn't strictly necessary but queue can be | |
1133 | * bypassing for some time and it's always nice to avoid busy | |
1134 | * looping. | |
1135 | */ | |
1136 | if (ret == -EBUSY) { | |
1137 | msleep(10); | |
1138 | return restart_syscall(); | |
1139 | } | |
1140 | return ret; | |
1141 | } | |
1142 | ||
1143 | static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft, | |
1144 | const char *buffer) | |
1145 | { | |
1146 | int ret = 0; | |
1147 | char *buf; | |
1148 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); | |
1149 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | |
1150 | int fileid = BLKIOFILE_ATTR(cft->private); | |
1151 | ||
1152 | buf = kstrdup(buffer, GFP_KERNEL); | |
1153 | if (!buf) | |
1154 | return -ENOMEM; | |
1155 | ||
1156 | ret = blkio_policy_parse_and_set(buf, plid, fileid, blkcg); | |
1157 | kfree(buf); | |
1158 | return ret; | |
1159 | } | |
1160 | ||
1161 | static const char *blkg_dev_name(struct blkio_group *blkg) | |
1162 | { | |
1163 | /* some drivers (floppy) instantiate a queue w/o disk registered */ | |
1164 | if (blkg->q->backing_dev_info.dev) | |
1165 | return dev_name(blkg->q->backing_dev_info.dev); | |
1166 | return NULL; | |
1167 | } | |
1168 | ||
1169 | static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg, | |
1170 | struct seq_file *m) | |
1171 | { | |
1172 | int plid = BLKIOFILE_POLICY(cft->private); | |
1173 | int fileid = BLKIOFILE_ATTR(cft->private); | |
1174 | struct blkg_policy_data *pd = blkg->pd[plid]; | |
1175 | const char *dname = blkg_dev_name(blkg); | |
1176 | int rw = WRITE; | |
1177 | ||
1178 | if (!dname) | |
1179 | return; | |
1180 | ||
1181 | switch (plid) { | |
1182 | case BLKIO_POLICY_PROP: | |
1183 | if (pd->conf.weight) | |
1184 | seq_printf(m, "%s\t%u\n", | |
1185 | dname, pd->conf.weight); | |
1186 | break; | |
1187 | case BLKIO_POLICY_THROTL: | |
1188 | switch (fileid) { | |
1189 | case BLKIO_THROTL_read_bps_device: | |
1190 | rw = READ; | |
1191 | case BLKIO_THROTL_write_bps_device: | |
1192 | if (pd->conf.bps[rw]) | |
1193 | seq_printf(m, "%s\t%llu\n", | |
1194 | dname, pd->conf.bps[rw]); | |
1195 | break; | |
1196 | case BLKIO_THROTL_read_iops_device: | |
1197 | rw = READ; | |
1198 | case BLKIO_THROTL_write_iops_device: | |
1199 | if (pd->conf.iops[rw]) | |
1200 | seq_printf(m, "%s\t%u\n", | |
1201 | dname, pd->conf.iops[rw]); | |
1202 | break; | |
1203 | } | |
1204 | break; | |
1205 | default: | |
1206 | BUG(); | |
1207 | } | |
1208 | } | |
1209 | ||
1210 | /* cgroup files which read their data from policy nodes end up here */ | |
1211 | static void blkio_read_conf(struct cftype *cft, struct blkio_cgroup *blkcg, | |
1212 | struct seq_file *m) | |
1213 | { | |
1214 | struct blkio_group *blkg; | |
1215 | struct hlist_node *n; | |
1216 | ||
1217 | spin_lock_irq(&blkcg->lock); | |
1218 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) | |
1219 | blkio_print_group_conf(cft, blkg, m); | |
1220 | spin_unlock_irq(&blkcg->lock); | |
1221 | } | |
1222 | ||
1223 | static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft, | |
1224 | struct seq_file *m) | |
1225 | { | |
1226 | struct blkio_cgroup *blkcg; | |
1227 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | |
1228 | int name = BLKIOFILE_ATTR(cft->private); | |
1229 | ||
1230 | blkcg = cgroup_to_blkio_cgroup(cgrp); | |
1231 | ||
1232 | switch(plid) { | |
1233 | case BLKIO_POLICY_PROP: | |
1234 | switch(name) { | |
1235 | case BLKIO_PROP_weight_device: | |
1236 | blkio_read_conf(cft, blkcg, m); | |
1237 | return 0; | |
1238 | default: | |
1239 | BUG(); | |
1240 | } | |
1241 | break; | |
1242 | case BLKIO_POLICY_THROTL: | |
1243 | switch(name){ | |
1244 | case BLKIO_THROTL_read_bps_device: | |
1245 | case BLKIO_THROTL_write_bps_device: | |
1246 | case BLKIO_THROTL_read_iops_device: | |
1247 | case BLKIO_THROTL_write_iops_device: | |
1248 | blkio_read_conf(cft, blkcg, m); | |
1249 | return 0; | |
1250 | default: | |
1251 | BUG(); | |
1252 | } | |
1253 | break; | |
1254 | default: | |
1255 | BUG(); | |
1256 | } | |
1257 | ||
1258 | return 0; | |
1259 | } | |
1260 | ||
1261 | static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg, | |
1262 | struct cftype *cft, struct cgroup_map_cb *cb, | |
1263 | enum stat_type type, bool show_total, bool pcpu) | |
1264 | { | |
1265 | struct blkio_group *blkg; | |
1266 | struct hlist_node *n; | |
1267 | uint64_t cgroup_total = 0; | |
1268 | ||
1269 | rcu_read_lock(); | |
1270 | hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) { | |
1271 | const char *dname = blkg_dev_name(blkg); | |
1272 | int plid = BLKIOFILE_POLICY(cft->private); | |
1273 | ||
1274 | if (!dname) | |
1275 | continue; | |
1276 | if (pcpu) { | |
1277 | cgroup_total += blkio_get_stat_cpu(blkg, plid, | |
1278 | cb, dname, type); | |
1279 | } else { | |
1280 | spin_lock_irq(&blkg->stats_lock); | |
1281 | cgroup_total += blkio_get_stat(blkg, plid, | |
1282 | cb, dname, type); | |
1283 | spin_unlock_irq(&blkg->stats_lock); | |
1284 | } | |
1285 | } | |
1286 | if (show_total) | |
1287 | cb->fill(cb, "Total", cgroup_total); | |
1288 | rcu_read_unlock(); | |
1289 | return 0; | |
1290 | } | |
1291 | ||
1292 | /* All map kind of cgroup file get serviced by this function */ | |
1293 | static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft, | |
1294 | struct cgroup_map_cb *cb) | |
1295 | { | |
1296 | struct blkio_cgroup *blkcg; | |
1297 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | |
1298 | int name = BLKIOFILE_ATTR(cft->private); | |
1299 | ||
1300 | blkcg = cgroup_to_blkio_cgroup(cgrp); | |
1301 | ||
1302 | switch(plid) { | |
1303 | case BLKIO_POLICY_PROP: | |
1304 | switch(name) { | |
1305 | case BLKIO_PROP_time: | |
1306 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1307 | BLKIO_STAT_TIME, 0, 0); | |
1308 | case BLKIO_PROP_sectors: | |
1309 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1310 | BLKIO_STAT_CPU_SECTORS, 0, 1); | |
1311 | case BLKIO_PROP_io_service_bytes: | |
1312 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1313 | BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1); | |
1314 | case BLKIO_PROP_io_serviced: | |
1315 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1316 | BLKIO_STAT_CPU_SERVICED, 1, 1); | |
1317 | case BLKIO_PROP_io_service_time: | |
1318 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1319 | BLKIO_STAT_SERVICE_TIME, 1, 0); | |
1320 | case BLKIO_PROP_io_wait_time: | |
1321 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1322 | BLKIO_STAT_WAIT_TIME, 1, 0); | |
1323 | case BLKIO_PROP_io_merged: | |
1324 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1325 | BLKIO_STAT_CPU_MERGED, 1, 1); | |
1326 | case BLKIO_PROP_io_queued: | |
1327 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1328 | BLKIO_STAT_QUEUED, 1, 0); | |
1329 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
1330 | case BLKIO_PROP_unaccounted_time: | |
1331 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1332 | BLKIO_STAT_UNACCOUNTED_TIME, 0, 0); | |
1333 | case BLKIO_PROP_dequeue: | |
1334 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1335 | BLKIO_STAT_DEQUEUE, 0, 0); | |
1336 | case BLKIO_PROP_avg_queue_size: | |
1337 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1338 | BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0); | |
1339 | case BLKIO_PROP_group_wait_time: | |
1340 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1341 | BLKIO_STAT_GROUP_WAIT_TIME, 0, 0); | |
1342 | case BLKIO_PROP_idle_time: | |
1343 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1344 | BLKIO_STAT_IDLE_TIME, 0, 0); | |
1345 | case BLKIO_PROP_empty_time: | |
1346 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1347 | BLKIO_STAT_EMPTY_TIME, 0, 0); | |
1348 | #endif | |
1349 | default: | |
1350 | BUG(); | |
1351 | } | |
1352 | break; | |
1353 | case BLKIO_POLICY_THROTL: | |
1354 | switch(name){ | |
1355 | case BLKIO_THROTL_io_service_bytes: | |
1356 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1357 | BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1); | |
1358 | case BLKIO_THROTL_io_serviced: | |
1359 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1360 | BLKIO_STAT_CPU_SERVICED, 1, 1); | |
1361 | default: | |
1362 | BUG(); | |
1363 | } | |
1364 | break; | |
1365 | default: | |
1366 | BUG(); | |
1367 | } | |
1368 | ||
1369 | return 0; | |
1370 | } | |
1371 | ||
1372 | static int blkio_weight_write(struct blkio_cgroup *blkcg, int plid, u64 val) | |
1373 | { | |
1374 | struct blkio_group *blkg; | |
1375 | struct hlist_node *n; | |
1376 | ||
1377 | if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX) | |
1378 | return -EINVAL; | |
1379 | ||
1380 | spin_lock(&blkio_list_lock); | |
1381 | spin_lock_irq(&blkcg->lock); | |
1382 | blkcg->weight = (unsigned int)val; | |
1383 | ||
1384 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { | |
1385 | struct blkg_policy_data *pd = blkg->pd[plid]; | |
1386 | ||
1387 | if (!pd->conf.weight) | |
1388 | blkio_update_group_weight(blkg, plid, blkcg->weight); | |
1389 | } | |
1390 | ||
1391 | spin_unlock_irq(&blkcg->lock); | |
1392 | spin_unlock(&blkio_list_lock); | |
1393 | return 0; | |
1394 | } | |
1395 | ||
1396 | static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) { | |
1397 | struct blkio_cgroup *blkcg; | |
1398 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | |
1399 | int name = BLKIOFILE_ATTR(cft->private); | |
1400 | ||
1401 | blkcg = cgroup_to_blkio_cgroup(cgrp); | |
1402 | ||
1403 | switch(plid) { | |
1404 | case BLKIO_POLICY_PROP: | |
1405 | switch(name) { | |
1406 | case BLKIO_PROP_weight: | |
1407 | return (u64)blkcg->weight; | |
1408 | } | |
1409 | break; | |
1410 | default: | |
1411 | BUG(); | |
1412 | } | |
1413 | return 0; | |
1414 | } | |
1415 | ||
1416 | static int | |
1417 | blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val) | |
1418 | { | |
1419 | struct blkio_cgroup *blkcg; | |
1420 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | |
1421 | int name = BLKIOFILE_ATTR(cft->private); | |
1422 | ||
1423 | blkcg = cgroup_to_blkio_cgroup(cgrp); | |
1424 | ||
1425 | switch(plid) { | |
1426 | case BLKIO_POLICY_PROP: | |
1427 | switch(name) { | |
1428 | case BLKIO_PROP_weight: | |
1429 | return blkio_weight_write(blkcg, plid, val); | |
1430 | } | |
1431 | break; | |
1432 | default: | |
1433 | BUG(); | |
1434 | } | |
1435 | ||
1436 | return 0; | |
1437 | } | |
1438 | ||
1439 | struct cftype blkio_files[] = { | |
1440 | { | |
1441 | .name = "weight_device", | |
1442 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1443 | BLKIO_PROP_weight_device), | |
1444 | .read_seq_string = blkiocg_file_read, | |
1445 | .write_string = blkiocg_file_write, | |
1446 | .max_write_len = 256, | |
1447 | }, | |
1448 | { | |
1449 | .name = "weight", | |
1450 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1451 | BLKIO_PROP_weight), | |
1452 | .read_u64 = blkiocg_file_read_u64, | |
1453 | .write_u64 = blkiocg_file_write_u64, | |
1454 | }, | |
1455 | { | |
1456 | .name = "time", | |
1457 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1458 | BLKIO_PROP_time), | |
1459 | .read_map = blkiocg_file_read_map, | |
1460 | }, | |
1461 | { | |
1462 | .name = "sectors", | |
1463 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1464 | BLKIO_PROP_sectors), | |
1465 | .read_map = blkiocg_file_read_map, | |
1466 | }, | |
1467 | { | |
1468 | .name = "io_service_bytes", | |
1469 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1470 | BLKIO_PROP_io_service_bytes), | |
1471 | .read_map = blkiocg_file_read_map, | |
1472 | }, | |
1473 | { | |
1474 | .name = "io_serviced", | |
1475 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1476 | BLKIO_PROP_io_serviced), | |
1477 | .read_map = blkiocg_file_read_map, | |
1478 | }, | |
1479 | { | |
1480 | .name = "io_service_time", | |
1481 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1482 | BLKIO_PROP_io_service_time), | |
1483 | .read_map = blkiocg_file_read_map, | |
1484 | }, | |
1485 | { | |
1486 | .name = "io_wait_time", | |
1487 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1488 | BLKIO_PROP_io_wait_time), | |
1489 | .read_map = blkiocg_file_read_map, | |
1490 | }, | |
1491 | { | |
1492 | .name = "io_merged", | |
1493 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1494 | BLKIO_PROP_io_merged), | |
1495 | .read_map = blkiocg_file_read_map, | |
1496 | }, | |
1497 | { | |
1498 | .name = "io_queued", | |
1499 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1500 | BLKIO_PROP_io_queued), | |
1501 | .read_map = blkiocg_file_read_map, | |
1502 | }, | |
1503 | { | |
1504 | .name = "reset_stats", | |
1505 | .write_u64 = blkiocg_reset_stats, | |
1506 | }, | |
1507 | #ifdef CONFIG_BLK_DEV_THROTTLING | |
1508 | { | |
1509 | .name = "throttle.read_bps_device", | |
1510 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1511 | BLKIO_THROTL_read_bps_device), | |
1512 | .read_seq_string = blkiocg_file_read, | |
1513 | .write_string = blkiocg_file_write, | |
1514 | .max_write_len = 256, | |
1515 | }, | |
1516 | ||
1517 | { | |
1518 | .name = "throttle.write_bps_device", | |
1519 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1520 | BLKIO_THROTL_write_bps_device), | |
1521 | .read_seq_string = blkiocg_file_read, | |
1522 | .write_string = blkiocg_file_write, | |
1523 | .max_write_len = 256, | |
1524 | }, | |
1525 | ||
1526 | { | |
1527 | .name = "throttle.read_iops_device", | |
1528 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1529 | BLKIO_THROTL_read_iops_device), | |
1530 | .read_seq_string = blkiocg_file_read, | |
1531 | .write_string = blkiocg_file_write, | |
1532 | .max_write_len = 256, | |
1533 | }, | |
1534 | ||
1535 | { | |
1536 | .name = "throttle.write_iops_device", | |
1537 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1538 | BLKIO_THROTL_write_iops_device), | |
1539 | .read_seq_string = blkiocg_file_read, | |
1540 | .write_string = blkiocg_file_write, | |
1541 | .max_write_len = 256, | |
1542 | }, | |
1543 | { | |
1544 | .name = "throttle.io_service_bytes", | |
1545 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1546 | BLKIO_THROTL_io_service_bytes), | |
1547 | .read_map = blkiocg_file_read_map, | |
1548 | }, | |
1549 | { | |
1550 | .name = "throttle.io_serviced", | |
1551 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1552 | BLKIO_THROTL_io_serviced), | |
1553 | .read_map = blkiocg_file_read_map, | |
1554 | }, | |
1555 | #endif /* CONFIG_BLK_DEV_THROTTLING */ | |
1556 | ||
1557 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
1558 | { | |
1559 | .name = "avg_queue_size", | |
1560 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1561 | BLKIO_PROP_avg_queue_size), | |
1562 | .read_map = blkiocg_file_read_map, | |
1563 | }, | |
1564 | { | |
1565 | .name = "group_wait_time", | |
1566 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1567 | BLKIO_PROP_group_wait_time), | |
1568 | .read_map = blkiocg_file_read_map, | |
1569 | }, | |
1570 | { | |
1571 | .name = "idle_time", | |
1572 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1573 | BLKIO_PROP_idle_time), | |
1574 | .read_map = blkiocg_file_read_map, | |
1575 | }, | |
1576 | { | |
1577 | .name = "empty_time", | |
1578 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1579 | BLKIO_PROP_empty_time), | |
1580 | .read_map = blkiocg_file_read_map, | |
1581 | }, | |
1582 | { | |
1583 | .name = "dequeue", | |
1584 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1585 | BLKIO_PROP_dequeue), | |
1586 | .read_map = blkiocg_file_read_map, | |
1587 | }, | |
1588 | { | |
1589 | .name = "unaccounted_time", | |
1590 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1591 | BLKIO_PROP_unaccounted_time), | |
1592 | .read_map = blkiocg_file_read_map, | |
1593 | }, | |
1594 | #endif | |
1595 | }; | |
1596 | ||
1597 | static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |
1598 | { | |
1599 | return cgroup_add_files(cgroup, subsys, blkio_files, | |
1600 | ARRAY_SIZE(blkio_files)); | |
1601 | } | |
1602 | ||
1603 | static int blkiocg_pre_destroy(struct cgroup_subsys *subsys, | |
1604 | struct cgroup *cgroup) | |
1605 | { | |
1606 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); | |
1607 | unsigned long flags; | |
1608 | struct blkio_group *blkg; | |
1609 | struct request_queue *q; | |
1610 | ||
1611 | rcu_read_lock(); | |
1612 | ||
1613 | do { | |
1614 | spin_lock_irqsave(&blkcg->lock, flags); | |
1615 | ||
1616 | if (hlist_empty(&blkcg->blkg_list)) { | |
1617 | spin_unlock_irqrestore(&blkcg->lock, flags); | |
1618 | break; | |
1619 | } | |
1620 | ||
1621 | blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group, | |
1622 | blkcg_node); | |
1623 | q = rcu_dereference(blkg->q); | |
1624 | __blkiocg_del_blkio_group(blkg); | |
1625 | ||
1626 | spin_unlock_irqrestore(&blkcg->lock, flags); | |
1627 | ||
1628 | /* | |
1629 | * This blkio_group is being unlinked as associated cgroup is | |
1630 | * going away. Let all the IO controlling policies know about | |
1631 | * this event. | |
1632 | */ | |
1633 | spin_lock(&blkio_list_lock); | |
1634 | spin_lock_irqsave(q->queue_lock, flags); | |
1635 | blkg_destroy(blkg); | |
1636 | spin_unlock_irqrestore(q->queue_lock, flags); | |
1637 | spin_unlock(&blkio_list_lock); | |
1638 | } while (1); | |
1639 | ||
1640 | rcu_read_unlock(); | |
1641 | ||
1642 | return 0; | |
1643 | } | |
1644 | ||
1645 | static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |
1646 | { | |
1647 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); | |
1648 | ||
1649 | if (blkcg != &blkio_root_cgroup) | |
1650 | kfree(blkcg); | |
1651 | } | |
1652 | ||
1653 | static struct cgroup_subsys_state * | |
1654 | blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |
1655 | { | |
1656 | struct blkio_cgroup *blkcg; | |
1657 | struct cgroup *parent = cgroup->parent; | |
1658 | ||
1659 | if (!parent) { | |
1660 | blkcg = &blkio_root_cgroup; | |
1661 | goto done; | |
1662 | } | |
1663 | ||
1664 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); | |
1665 | if (!blkcg) | |
1666 | return ERR_PTR(-ENOMEM); | |
1667 | ||
1668 | blkcg->weight = BLKIO_WEIGHT_DEFAULT; | |
1669 | done: | |
1670 | spin_lock_init(&blkcg->lock); | |
1671 | INIT_HLIST_HEAD(&blkcg->blkg_list); | |
1672 | ||
1673 | return &blkcg->css; | |
1674 | } | |
1675 | ||
1676 | /** | |
1677 | * blkcg_init_queue - initialize blkcg part of request queue | |
1678 | * @q: request_queue to initialize | |
1679 | * | |
1680 | * Called from blk_alloc_queue_node(). Responsible for initializing blkcg | |
1681 | * part of new request_queue @q. | |
1682 | * | |
1683 | * RETURNS: | |
1684 | * 0 on success, -errno on failure. | |
1685 | */ | |
1686 | int blkcg_init_queue(struct request_queue *q) | |
1687 | { | |
1688 | int ret; | |
1689 | ||
1690 | might_sleep(); | |
1691 | ||
1692 | ret = blk_throtl_init(q); | |
1693 | if (ret) | |
1694 | return ret; | |
1695 | ||
1696 | mutex_lock(&all_q_mutex); | |
1697 | INIT_LIST_HEAD(&q->all_q_node); | |
1698 | list_add_tail(&q->all_q_node, &all_q_list); | |
1699 | mutex_unlock(&all_q_mutex); | |
1700 | ||
1701 | return 0; | |
1702 | } | |
1703 | ||
1704 | /** | |
1705 | * blkcg_drain_queue - drain blkcg part of request_queue | |
1706 | * @q: request_queue to drain | |
1707 | * | |
1708 | * Called from blk_drain_queue(). Responsible for draining blkcg part. | |
1709 | */ | |
1710 | void blkcg_drain_queue(struct request_queue *q) | |
1711 | { | |
1712 | lockdep_assert_held(q->queue_lock); | |
1713 | ||
1714 | blk_throtl_drain(q); | |
1715 | } | |
1716 | ||
1717 | /** | |
1718 | * blkcg_exit_queue - exit and release blkcg part of request_queue | |
1719 | * @q: request_queue being released | |
1720 | * | |
1721 | * Called from blk_release_queue(). Responsible for exiting blkcg part. | |
1722 | */ | |
1723 | void blkcg_exit_queue(struct request_queue *q) | |
1724 | { | |
1725 | mutex_lock(&all_q_mutex); | |
1726 | list_del_init(&q->all_q_node); | |
1727 | mutex_unlock(&all_q_mutex); | |
1728 | ||
1729 | blkg_destroy_all(q, true); | |
1730 | ||
1731 | blk_throtl_exit(q); | |
1732 | } | |
1733 | ||
1734 | /* | |
1735 | * We cannot support shared io contexts, as we have no mean to support | |
1736 | * two tasks with the same ioc in two different groups without major rework | |
1737 | * of the main cic data structures. For now we allow a task to change | |
1738 | * its cgroup only if it's the only owner of its ioc. | |
1739 | */ | |
1740 | static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |
1741 | struct cgroup_taskset *tset) | |
1742 | { | |
1743 | struct task_struct *task; | |
1744 | struct io_context *ioc; | |
1745 | int ret = 0; | |
1746 | ||
1747 | /* task_lock() is needed to avoid races with exit_io_context() */ | |
1748 | cgroup_taskset_for_each(task, cgrp, tset) { | |
1749 | task_lock(task); | |
1750 | ioc = task->io_context; | |
1751 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) | |
1752 | ret = -EINVAL; | |
1753 | task_unlock(task); | |
1754 | if (ret) | |
1755 | break; | |
1756 | } | |
1757 | return ret; | |
1758 | } | |
1759 | ||
1760 | static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |
1761 | struct cgroup_taskset *tset) | |
1762 | { | |
1763 | struct task_struct *task; | |
1764 | struct io_context *ioc; | |
1765 | ||
1766 | cgroup_taskset_for_each(task, cgrp, tset) { | |
1767 | /* we don't lose anything even if ioc allocation fails */ | |
1768 | ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); | |
1769 | if (ioc) { | |
1770 | ioc_cgroup_changed(ioc); | |
1771 | put_io_context(ioc); | |
1772 | } | |
1773 | } | |
1774 | } | |
1775 | ||
1776 | static void blkcg_bypass_start(void) | |
1777 | __acquires(&all_q_mutex) | |
1778 | { | |
1779 | struct request_queue *q; | |
1780 | ||
1781 | mutex_lock(&all_q_mutex); | |
1782 | ||
1783 | list_for_each_entry(q, &all_q_list, all_q_node) { | |
1784 | blk_queue_bypass_start(q); | |
1785 | blkg_destroy_all(q, false); | |
1786 | } | |
1787 | } | |
1788 | ||
1789 | static void blkcg_bypass_end(void) | |
1790 | __releases(&all_q_mutex) | |
1791 | { | |
1792 | struct request_queue *q; | |
1793 | ||
1794 | list_for_each_entry(q, &all_q_list, all_q_node) | |
1795 | blk_queue_bypass_end(q); | |
1796 | ||
1797 | mutex_unlock(&all_q_mutex); | |
1798 | } | |
1799 | ||
1800 | void blkio_policy_register(struct blkio_policy_type *blkiop) | |
1801 | { | |
1802 | struct request_queue *q; | |
1803 | ||
1804 | blkcg_bypass_start(); | |
1805 | spin_lock(&blkio_list_lock); | |
1806 | ||
1807 | BUG_ON(blkio_policy[blkiop->plid]); | |
1808 | blkio_policy[blkiop->plid] = blkiop; | |
1809 | list_add_tail(&blkiop->list, &blkio_list); | |
1810 | ||
1811 | spin_unlock(&blkio_list_lock); | |
1812 | list_for_each_entry(q, &all_q_list, all_q_node) | |
1813 | update_root_blkg_pd(q, blkiop->plid); | |
1814 | blkcg_bypass_end(); | |
1815 | } | |
1816 | EXPORT_SYMBOL_GPL(blkio_policy_register); | |
1817 | ||
1818 | void blkio_policy_unregister(struct blkio_policy_type *blkiop) | |
1819 | { | |
1820 | struct request_queue *q; | |
1821 | ||
1822 | blkcg_bypass_start(); | |
1823 | spin_lock(&blkio_list_lock); | |
1824 | ||
1825 | BUG_ON(blkio_policy[blkiop->plid] != blkiop); | |
1826 | blkio_policy[blkiop->plid] = NULL; | |
1827 | list_del_init(&blkiop->list); | |
1828 | ||
1829 | spin_unlock(&blkio_list_lock); | |
1830 | list_for_each_entry(q, &all_q_list, all_q_node) | |
1831 | update_root_blkg_pd(q, blkiop->plid); | |
1832 | blkcg_bypass_end(); | |
1833 | } | |
1834 | EXPORT_SYMBOL_GPL(blkio_policy_unregister); |