]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Common Block IO controller cgroup interface | |
3 | * | |
4 | * Based on ideas and code from CFQ, CFS and BFQ: | |
5 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> | |
6 | * | |
7 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> | |
8 | * Paolo Valente <paolo.valente@unimore.it> | |
9 | * | |
10 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> | |
11 | * Nauman Rafique <nauman@google.com> | |
12 | */ | |
13 | #include <linux/ioprio.h> | |
14 | #include <linux/seq_file.h> | |
15 | #include <linux/kdev_t.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/err.h> | |
18 | #include <linux/blkdev.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/genhd.h> | |
21 | #include <linux/delay.h> | |
22 | #include "blk-cgroup.h" | |
23 | #include "blk.h" | |
24 | ||
25 | #define MAX_KEY_LEN 100 | |
26 | ||
27 | static DEFINE_SPINLOCK(blkio_list_lock); | |
28 | static LIST_HEAD(blkio_list); | |
29 | ||
30 | static DEFINE_MUTEX(all_q_mutex); | |
31 | static LIST_HEAD(all_q_list); | |
32 | ||
33 | /* List of groups pending per cpu stats allocation */ | |
34 | static DEFINE_SPINLOCK(alloc_list_lock); | |
35 | static LIST_HEAD(alloc_list); | |
36 | ||
37 | static void blkio_stat_alloc_fn(struct work_struct *); | |
38 | static DECLARE_DELAYED_WORK(blkio_stat_alloc_work, blkio_stat_alloc_fn); | |
39 | ||
40 | struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT }; | |
41 | EXPORT_SYMBOL_GPL(blkio_root_cgroup); | |
42 | ||
43 | static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES]; | |
44 | ||
45 | static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *, | |
46 | struct cgroup *); | |
47 | static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *, | |
48 | struct cgroup_taskset *); | |
49 | static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *, | |
50 | struct cgroup_taskset *); | |
51 | static int blkiocg_pre_destroy(struct cgroup_subsys *, struct cgroup *); | |
52 | static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *); | |
53 | static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *); | |
54 | ||
55 | /* for encoding cft->private value on file */ | |
56 | #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val)) | |
57 | /* What policy owns the file, proportional or throttle */ | |
58 | #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff) | |
59 | #define BLKIOFILE_ATTR(val) ((val) & 0xffff) | |
60 | ||
61 | struct cgroup_subsys blkio_subsys = { | |
62 | .name = "blkio", | |
63 | .create = blkiocg_create, | |
64 | .can_attach = blkiocg_can_attach, | |
65 | .attach = blkiocg_attach, | |
66 | .pre_destroy = blkiocg_pre_destroy, | |
67 | .destroy = blkiocg_destroy, | |
68 | .populate = blkiocg_populate, | |
69 | .subsys_id = blkio_subsys_id, | |
70 | .module = THIS_MODULE, | |
71 | }; | |
72 | EXPORT_SYMBOL_GPL(blkio_subsys); | |
73 | ||
74 | struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) | |
75 | { | |
76 | return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), | |
77 | struct blkio_cgroup, css); | |
78 | } | |
79 | EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); | |
80 | ||
81 | static struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk) | |
82 | { | |
83 | return container_of(task_subsys_state(tsk, blkio_subsys_id), | |
84 | struct blkio_cgroup, css); | |
85 | } | |
86 | ||
87 | struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio) | |
88 | { | |
89 | if (bio && bio->bi_css) | |
90 | return container_of(bio->bi_css, struct blkio_cgroup, css); | |
91 | return task_blkio_cgroup(current); | |
92 | } | |
93 | EXPORT_SYMBOL_GPL(bio_blkio_cgroup); | |
94 | ||
95 | static inline void blkio_update_group_weight(struct blkio_group *blkg, | |
96 | int plid, unsigned int weight) | |
97 | { | |
98 | struct blkio_policy_type *blkiop; | |
99 | ||
100 | list_for_each_entry(blkiop, &blkio_list, list) { | |
101 | /* If this policy does not own the blkg, do not send updates */ | |
102 | if (blkiop->plid != plid) | |
103 | continue; | |
104 | if (blkiop->ops.blkio_update_group_weight_fn) | |
105 | blkiop->ops.blkio_update_group_weight_fn(blkg->q, | |
106 | blkg, weight); | |
107 | } | |
108 | } | |
109 | ||
110 | static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid, | |
111 | u64 bps, int fileid) | |
112 | { | |
113 | struct blkio_policy_type *blkiop; | |
114 | ||
115 | list_for_each_entry(blkiop, &blkio_list, list) { | |
116 | ||
117 | /* If this policy does not own the blkg, do not send updates */ | |
118 | if (blkiop->plid != plid) | |
119 | continue; | |
120 | ||
121 | if (fileid == BLKIO_THROTL_read_bps_device | |
122 | && blkiop->ops.blkio_update_group_read_bps_fn) | |
123 | blkiop->ops.blkio_update_group_read_bps_fn(blkg->q, | |
124 | blkg, bps); | |
125 | ||
126 | if (fileid == BLKIO_THROTL_write_bps_device | |
127 | && blkiop->ops.blkio_update_group_write_bps_fn) | |
128 | blkiop->ops.blkio_update_group_write_bps_fn(blkg->q, | |
129 | blkg, bps); | |
130 | } | |
131 | } | |
132 | ||
133 | static inline void blkio_update_group_iops(struct blkio_group *blkg, | |
134 | int plid, unsigned int iops, | |
135 | int fileid) | |
136 | { | |
137 | struct blkio_policy_type *blkiop; | |
138 | ||
139 | list_for_each_entry(blkiop, &blkio_list, list) { | |
140 | ||
141 | /* If this policy does not own the blkg, do not send updates */ | |
142 | if (blkiop->plid != plid) | |
143 | continue; | |
144 | ||
145 | if (fileid == BLKIO_THROTL_read_iops_device | |
146 | && blkiop->ops.blkio_update_group_read_iops_fn) | |
147 | blkiop->ops.blkio_update_group_read_iops_fn(blkg->q, | |
148 | blkg, iops); | |
149 | ||
150 | if (fileid == BLKIO_THROTL_write_iops_device | |
151 | && blkiop->ops.blkio_update_group_write_iops_fn) | |
152 | blkiop->ops.blkio_update_group_write_iops_fn(blkg->q, | |
153 | blkg,iops); | |
154 | } | |
155 | } | |
156 | ||
157 | /* | |
158 | * Add to the appropriate stat variable depending on the request type. | |
159 | * This should be called with queue_lock held. | |
160 | */ | |
161 | static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction, | |
162 | bool sync) | |
163 | { | |
164 | if (direction) | |
165 | stat[BLKIO_STAT_WRITE] += add; | |
166 | else | |
167 | stat[BLKIO_STAT_READ] += add; | |
168 | if (sync) | |
169 | stat[BLKIO_STAT_SYNC] += add; | |
170 | else | |
171 | stat[BLKIO_STAT_ASYNC] += add; | |
172 | } | |
173 | ||
174 | /* | |
175 | * Decrements the appropriate stat variable if non-zero depending on the | |
176 | * request type. Panics on value being zero. | |
177 | * This should be called with the queue_lock held. | |
178 | */ | |
179 | static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync) | |
180 | { | |
181 | if (direction) { | |
182 | BUG_ON(stat[BLKIO_STAT_WRITE] == 0); | |
183 | stat[BLKIO_STAT_WRITE]--; | |
184 | } else { | |
185 | BUG_ON(stat[BLKIO_STAT_READ] == 0); | |
186 | stat[BLKIO_STAT_READ]--; | |
187 | } | |
188 | if (sync) { | |
189 | BUG_ON(stat[BLKIO_STAT_SYNC] == 0); | |
190 | stat[BLKIO_STAT_SYNC]--; | |
191 | } else { | |
192 | BUG_ON(stat[BLKIO_STAT_ASYNC] == 0); | |
193 | stat[BLKIO_STAT_ASYNC]--; | |
194 | } | |
195 | } | |
196 | ||
197 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
198 | /* This should be called with the queue_lock held. */ | |
199 | static void blkio_set_start_group_wait_time(struct blkio_group *blkg, | |
200 | struct blkio_policy_type *pol, | |
201 | struct blkio_group *curr_blkg) | |
202 | { | |
203 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | |
204 | ||
205 | if (blkio_blkg_waiting(&pd->stats)) | |
206 | return; | |
207 | if (blkg == curr_blkg) | |
208 | return; | |
209 | pd->stats.start_group_wait_time = sched_clock(); | |
210 | blkio_mark_blkg_waiting(&pd->stats); | |
211 | } | |
212 | ||
213 | /* This should be called with the queue_lock held. */ | |
214 | static void blkio_update_group_wait_time(struct blkio_group_stats *stats) | |
215 | { | |
216 | unsigned long long now; | |
217 | ||
218 | if (!blkio_blkg_waiting(stats)) | |
219 | return; | |
220 | ||
221 | now = sched_clock(); | |
222 | if (time_after64(now, stats->start_group_wait_time)) | |
223 | stats->group_wait_time += now - stats->start_group_wait_time; | |
224 | blkio_clear_blkg_waiting(stats); | |
225 | } | |
226 | ||
227 | /* This should be called with the queue_lock held. */ | |
228 | static void blkio_end_empty_time(struct blkio_group_stats *stats) | |
229 | { | |
230 | unsigned long long now; | |
231 | ||
232 | if (!blkio_blkg_empty(stats)) | |
233 | return; | |
234 | ||
235 | now = sched_clock(); | |
236 | if (time_after64(now, stats->start_empty_time)) | |
237 | stats->empty_time += now - stats->start_empty_time; | |
238 | blkio_clear_blkg_empty(stats); | |
239 | } | |
240 | ||
241 | void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg, | |
242 | struct blkio_policy_type *pol) | |
243 | { | |
244 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; | |
245 | ||
246 | lockdep_assert_held(blkg->q->queue_lock); | |
247 | BUG_ON(blkio_blkg_idling(stats)); | |
248 | ||
249 | stats->start_idle_time = sched_clock(); | |
250 | blkio_mark_blkg_idling(stats); | |
251 | } | |
252 | EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats); | |
253 | ||
254 | void blkiocg_update_idle_time_stats(struct blkio_group *blkg, | |
255 | struct blkio_policy_type *pol) | |
256 | { | |
257 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; | |
258 | ||
259 | lockdep_assert_held(blkg->q->queue_lock); | |
260 | ||
261 | if (blkio_blkg_idling(stats)) { | |
262 | unsigned long long now = sched_clock(); | |
263 | ||
264 | if (time_after64(now, stats->start_idle_time)) { | |
265 | u64_stats_update_begin(&stats->syncp); | |
266 | stats->idle_time += now - stats->start_idle_time; | |
267 | u64_stats_update_end(&stats->syncp); | |
268 | } | |
269 | blkio_clear_blkg_idling(stats); | |
270 | } | |
271 | } | |
272 | EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats); | |
273 | ||
274 | void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg, | |
275 | struct blkio_policy_type *pol) | |
276 | { | |
277 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; | |
278 | ||
279 | lockdep_assert_held(blkg->q->queue_lock); | |
280 | ||
281 | u64_stats_update_begin(&stats->syncp); | |
282 | stats->avg_queue_size_sum += | |
283 | stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] + | |
284 | stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]; | |
285 | stats->avg_queue_size_samples++; | |
286 | blkio_update_group_wait_time(stats); | |
287 | u64_stats_update_end(&stats->syncp); | |
288 | } | |
289 | EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats); | |
290 | ||
291 | void blkiocg_set_start_empty_time(struct blkio_group *blkg, | |
292 | struct blkio_policy_type *pol) | |
293 | { | |
294 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; | |
295 | ||
296 | lockdep_assert_held(blkg->q->queue_lock); | |
297 | ||
298 | if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] || | |
299 | stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) | |
300 | return; | |
301 | ||
302 | /* | |
303 | * group is already marked empty. This can happen if cfqq got new | |
304 | * request in parent group and moved to this group while being added | |
305 | * to service tree. Just ignore the event and move on. | |
306 | */ | |
307 | if (blkio_blkg_empty(stats)) | |
308 | return; | |
309 | ||
310 | stats->start_empty_time = sched_clock(); | |
311 | blkio_mark_blkg_empty(stats); | |
312 | } | |
313 | EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time); | |
314 | ||
315 | void blkiocg_update_dequeue_stats(struct blkio_group *blkg, | |
316 | struct blkio_policy_type *pol, | |
317 | unsigned long dequeue) | |
318 | { | |
319 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | |
320 | ||
321 | lockdep_assert_held(blkg->q->queue_lock); | |
322 | ||
323 | pd->stats.dequeue += dequeue; | |
324 | } | |
325 | EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats); | |
326 | #else | |
327 | static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg, | |
328 | struct blkio_policy_type *pol, | |
329 | struct blkio_group *curr_blkg) { } | |
330 | static inline void blkio_end_empty_time(struct blkio_group_stats *stats) { } | |
331 | #endif | |
332 | ||
333 | void blkiocg_update_io_add_stats(struct blkio_group *blkg, | |
334 | struct blkio_policy_type *pol, | |
335 | struct blkio_group *curr_blkg, bool direction, | |
336 | bool sync) | |
337 | { | |
338 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; | |
339 | ||
340 | lockdep_assert_held(blkg->q->queue_lock); | |
341 | ||
342 | u64_stats_update_begin(&stats->syncp); | |
343 | blkio_add_stat(stats->stat_arr[BLKIO_STAT_QUEUED], 1, direction, sync); | |
344 | blkio_end_empty_time(stats); | |
345 | u64_stats_update_end(&stats->syncp); | |
346 | ||
347 | blkio_set_start_group_wait_time(blkg, pol, curr_blkg); | |
348 | } | |
349 | EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats); | |
350 | ||
351 | void blkiocg_update_io_remove_stats(struct blkio_group *blkg, | |
352 | struct blkio_policy_type *pol, | |
353 | bool direction, bool sync) | |
354 | { | |
355 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; | |
356 | ||
357 | lockdep_assert_held(blkg->q->queue_lock); | |
358 | ||
359 | u64_stats_update_begin(&stats->syncp); | |
360 | blkio_check_and_dec_stat(stats->stat_arr[BLKIO_STAT_QUEUED], direction, | |
361 | sync); | |
362 | u64_stats_update_end(&stats->syncp); | |
363 | } | |
364 | EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats); | |
365 | ||
366 | void blkiocg_update_timeslice_used(struct blkio_group *blkg, | |
367 | struct blkio_policy_type *pol, | |
368 | unsigned long time, | |
369 | unsigned long unaccounted_time) | |
370 | { | |
371 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; | |
372 | ||
373 | lockdep_assert_held(blkg->q->queue_lock); | |
374 | ||
375 | u64_stats_update_begin(&stats->syncp); | |
376 | stats->time += time; | |
377 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
378 | stats->unaccounted_time += unaccounted_time; | |
379 | #endif | |
380 | u64_stats_update_end(&stats->syncp); | |
381 | } | |
382 | EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used); | |
383 | ||
384 | /* | |
385 | * should be called under rcu read lock or queue lock to make sure blkg pointer | |
386 | * is valid. | |
387 | */ | |
388 | void blkiocg_update_dispatch_stats(struct blkio_group *blkg, | |
389 | struct blkio_policy_type *pol, | |
390 | uint64_t bytes, bool direction, bool sync) | |
391 | { | |
392 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | |
393 | struct blkio_group_stats_cpu *stats_cpu; | |
394 | unsigned long flags; | |
395 | ||
396 | /* If per cpu stats are not allocated yet, don't do any accounting. */ | |
397 | if (pd->stats_cpu == NULL) | |
398 | return; | |
399 | ||
400 | /* | |
401 | * Disabling interrupts to provide mutual exclusion between two | |
402 | * writes on same cpu. It probably is not needed for 64bit. Not | |
403 | * optimizing that case yet. | |
404 | */ | |
405 | local_irq_save(flags); | |
406 | ||
407 | stats_cpu = this_cpu_ptr(pd->stats_cpu); | |
408 | ||
409 | u64_stats_update_begin(&stats_cpu->syncp); | |
410 | stats_cpu->sectors += bytes >> 9; | |
411 | blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED], | |
412 | 1, direction, sync); | |
413 | blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES], | |
414 | bytes, direction, sync); | |
415 | u64_stats_update_end(&stats_cpu->syncp); | |
416 | local_irq_restore(flags); | |
417 | } | |
418 | EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats); | |
419 | ||
420 | void blkiocg_update_completion_stats(struct blkio_group *blkg, | |
421 | struct blkio_policy_type *pol, | |
422 | uint64_t start_time, | |
423 | uint64_t io_start_time, bool direction, | |
424 | bool sync) | |
425 | { | |
426 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; | |
427 | unsigned long long now = sched_clock(); | |
428 | ||
429 | lockdep_assert_held(blkg->q->queue_lock); | |
430 | ||
431 | u64_stats_update_begin(&stats->syncp); | |
432 | if (time_after64(now, io_start_time)) | |
433 | blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME], | |
434 | now - io_start_time, direction, sync); | |
435 | if (time_after64(io_start_time, start_time)) | |
436 | blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME], | |
437 | io_start_time - start_time, direction, sync); | |
438 | u64_stats_update_end(&stats->syncp); | |
439 | } | |
440 | EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats); | |
441 | ||
442 | /* Merged stats are per cpu. */ | |
443 | void blkiocg_update_io_merged_stats(struct blkio_group *blkg, | |
444 | struct blkio_policy_type *pol, | |
445 | bool direction, bool sync) | |
446 | { | |
447 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; | |
448 | ||
449 | lockdep_assert_held(blkg->q->queue_lock); | |
450 | ||
451 | u64_stats_update_begin(&stats->syncp); | |
452 | blkio_add_stat(stats->stat_arr[BLKIO_STAT_MERGED], 1, direction, sync); | |
453 | u64_stats_update_end(&stats->syncp); | |
454 | } | |
455 | EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats); | |
456 | ||
457 | /* | |
458 | * Worker for allocating per cpu stat for blk groups. This is scheduled on | |
459 | * the system_nrt_wq once there are some groups on the alloc_list waiting | |
460 | * for allocation. | |
461 | */ | |
462 | static void blkio_stat_alloc_fn(struct work_struct *work) | |
463 | { | |
464 | static void *pcpu_stats[BLKIO_NR_POLICIES]; | |
465 | struct delayed_work *dwork = to_delayed_work(work); | |
466 | struct blkio_group *blkg; | |
467 | int i; | |
468 | bool empty = false; | |
469 | ||
470 | alloc_stats: | |
471 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { | |
472 | if (pcpu_stats[i] != NULL) | |
473 | continue; | |
474 | ||
475 | pcpu_stats[i] = alloc_percpu(struct blkio_group_stats_cpu); | |
476 | ||
477 | /* Allocation failed. Try again after some time. */ | |
478 | if (pcpu_stats[i] == NULL) { | |
479 | queue_delayed_work(system_nrt_wq, dwork, | |
480 | msecs_to_jiffies(10)); | |
481 | return; | |
482 | } | |
483 | } | |
484 | ||
485 | spin_lock_irq(&blkio_list_lock); | |
486 | spin_lock(&alloc_list_lock); | |
487 | ||
488 | /* cgroup got deleted or queue exited. */ | |
489 | if (!list_empty(&alloc_list)) { | |
490 | blkg = list_first_entry(&alloc_list, struct blkio_group, | |
491 | alloc_node); | |
492 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { | |
493 | struct blkg_policy_data *pd = blkg->pd[i]; | |
494 | ||
495 | if (blkio_policy[i] && pd && !pd->stats_cpu) | |
496 | swap(pd->stats_cpu, pcpu_stats[i]); | |
497 | } | |
498 | ||
499 | list_del_init(&blkg->alloc_node); | |
500 | } | |
501 | ||
502 | empty = list_empty(&alloc_list); | |
503 | ||
504 | spin_unlock(&alloc_list_lock); | |
505 | spin_unlock_irq(&blkio_list_lock); | |
506 | ||
507 | if (!empty) | |
508 | goto alloc_stats; | |
509 | } | |
510 | ||
511 | /** | |
512 | * blkg_free - free a blkg | |
513 | * @blkg: blkg to free | |
514 | * | |
515 | * Free @blkg which may be partially allocated. | |
516 | */ | |
517 | static void blkg_free(struct blkio_group *blkg) | |
518 | { | |
519 | int i; | |
520 | ||
521 | if (!blkg) | |
522 | return; | |
523 | ||
524 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { | |
525 | struct blkg_policy_data *pd = blkg->pd[i]; | |
526 | ||
527 | if (pd) { | |
528 | free_percpu(pd->stats_cpu); | |
529 | kfree(pd); | |
530 | } | |
531 | } | |
532 | ||
533 | kfree(blkg); | |
534 | } | |
535 | ||
536 | /** | |
537 | * blkg_alloc - allocate a blkg | |
538 | * @blkcg: block cgroup the new blkg is associated with | |
539 | * @q: request_queue the new blkg is associated with | |
540 | * | |
541 | * Allocate a new blkg assocating @blkcg and @q. | |
542 | */ | |
543 | static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg, | |
544 | struct request_queue *q) | |
545 | { | |
546 | struct blkio_group *blkg; | |
547 | int i; | |
548 | ||
549 | /* alloc and init base part */ | |
550 | blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node); | |
551 | if (!blkg) | |
552 | return NULL; | |
553 | ||
554 | blkg->q = q; | |
555 | INIT_LIST_HEAD(&blkg->q_node); | |
556 | INIT_LIST_HEAD(&blkg->alloc_node); | |
557 | blkg->blkcg = blkcg; | |
558 | blkg->refcnt = 1; | |
559 | cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path)); | |
560 | ||
561 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { | |
562 | struct blkio_policy_type *pol = blkio_policy[i]; | |
563 | struct blkg_policy_data *pd; | |
564 | ||
565 | if (!pol) | |
566 | continue; | |
567 | ||
568 | /* alloc per-policy data and attach it to blkg */ | |
569 | pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC, | |
570 | q->node); | |
571 | if (!pd) { | |
572 | blkg_free(blkg); | |
573 | return NULL; | |
574 | } | |
575 | ||
576 | blkg->pd[i] = pd; | |
577 | pd->blkg = blkg; | |
578 | } | |
579 | ||
580 | /* invoke per-policy init */ | |
581 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { | |
582 | struct blkio_policy_type *pol = blkio_policy[i]; | |
583 | ||
584 | if (pol) | |
585 | pol->ops.blkio_init_group_fn(blkg); | |
586 | } | |
587 | ||
588 | return blkg; | |
589 | } | |
590 | ||
591 | struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, | |
592 | struct request_queue *q, | |
593 | enum blkio_policy_id plid, | |
594 | bool for_root) | |
595 | __releases(q->queue_lock) __acquires(q->queue_lock) | |
596 | { | |
597 | struct blkio_group *blkg; | |
598 | ||
599 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
600 | lockdep_assert_held(q->queue_lock); | |
601 | ||
602 | /* | |
603 | * This could be the first entry point of blkcg implementation and | |
604 | * we shouldn't allow anything to go through for a bypassing queue. | |
605 | * The following can be removed if blkg lookup is guaranteed to | |
606 | * fail on a bypassing queue. | |
607 | */ | |
608 | if (unlikely(blk_queue_bypass(q)) && !for_root) | |
609 | return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); | |
610 | ||
611 | blkg = blkg_lookup(blkcg, q); | |
612 | if (blkg) | |
613 | return blkg; | |
614 | ||
615 | /* blkg holds a reference to blkcg */ | |
616 | if (!css_tryget(&blkcg->css)) | |
617 | return ERR_PTR(-EINVAL); | |
618 | ||
619 | /* | |
620 | * Allocate and initialize. | |
621 | */ | |
622 | blkg = blkg_alloc(blkcg, q); | |
623 | ||
624 | /* did alloc fail? */ | |
625 | if (unlikely(!blkg)) { | |
626 | blkg = ERR_PTR(-ENOMEM); | |
627 | goto out; | |
628 | } | |
629 | ||
630 | /* insert */ | |
631 | spin_lock(&blkcg->lock); | |
632 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); | |
633 | list_add(&blkg->q_node, &q->blkg_list); | |
634 | spin_unlock(&blkcg->lock); | |
635 | ||
636 | spin_lock(&alloc_list_lock); | |
637 | list_add(&blkg->alloc_node, &alloc_list); | |
638 | /* Queue per cpu stat allocation from worker thread. */ | |
639 | queue_delayed_work(system_nrt_wq, &blkio_stat_alloc_work, 0); | |
640 | spin_unlock(&alloc_list_lock); | |
641 | out: | |
642 | return blkg; | |
643 | } | |
644 | EXPORT_SYMBOL_GPL(blkg_lookup_create); | |
645 | ||
646 | /* called under rcu_read_lock(). */ | |
647 | struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, | |
648 | struct request_queue *q) | |
649 | { | |
650 | struct blkio_group *blkg; | |
651 | struct hlist_node *n; | |
652 | ||
653 | hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) | |
654 | if (blkg->q == q) | |
655 | return blkg; | |
656 | return NULL; | |
657 | } | |
658 | EXPORT_SYMBOL_GPL(blkg_lookup); | |
659 | ||
660 | static void blkg_destroy(struct blkio_group *blkg) | |
661 | { | |
662 | struct request_queue *q = blkg->q; | |
663 | struct blkio_cgroup *blkcg = blkg->blkcg; | |
664 | ||
665 | lockdep_assert_held(q->queue_lock); | |
666 | lockdep_assert_held(&blkcg->lock); | |
667 | ||
668 | /* Something wrong if we are trying to remove same group twice */ | |
669 | WARN_ON_ONCE(list_empty(&blkg->q_node)); | |
670 | WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); | |
671 | list_del_init(&blkg->q_node); | |
672 | hlist_del_init_rcu(&blkg->blkcg_node); | |
673 | ||
674 | spin_lock(&alloc_list_lock); | |
675 | list_del_init(&blkg->alloc_node); | |
676 | spin_unlock(&alloc_list_lock); | |
677 | ||
678 | /* | |
679 | * Put the reference taken at the time of creation so that when all | |
680 | * queues are gone, group can be destroyed. | |
681 | */ | |
682 | blkg_put(blkg); | |
683 | } | |
684 | ||
685 | /* | |
686 | * XXX: This updates blkg policy data in-place for root blkg, which is | |
687 | * necessary across elevator switch and policy registration as root blkgs | |
688 | * aren't shot down. This broken and racy implementation is temporary. | |
689 | * Eventually, blkg shoot down will be replaced by proper in-place update. | |
690 | */ | |
691 | void update_root_blkg_pd(struct request_queue *q, enum blkio_policy_id plid) | |
692 | { | |
693 | struct blkio_policy_type *pol = blkio_policy[plid]; | |
694 | struct blkio_group *blkg = blkg_lookup(&blkio_root_cgroup, q); | |
695 | struct blkg_policy_data *pd; | |
696 | ||
697 | if (!blkg) | |
698 | return; | |
699 | ||
700 | kfree(blkg->pd[plid]); | |
701 | blkg->pd[plid] = NULL; | |
702 | ||
703 | if (!pol) | |
704 | return; | |
705 | ||
706 | pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL); | |
707 | WARN_ON_ONCE(!pd); | |
708 | ||
709 | pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu); | |
710 | WARN_ON_ONCE(!pd->stats_cpu); | |
711 | ||
712 | blkg->pd[plid] = pd; | |
713 | pd->blkg = blkg; | |
714 | pol->ops.blkio_init_group_fn(blkg); | |
715 | } | |
716 | EXPORT_SYMBOL_GPL(update_root_blkg_pd); | |
717 | ||
718 | /** | |
719 | * blkg_destroy_all - destroy all blkgs associated with a request_queue | |
720 | * @q: request_queue of interest | |
721 | * @destroy_root: whether to destroy root blkg or not | |
722 | * | |
723 | * Destroy blkgs associated with @q. If @destroy_root is %true, all are | |
724 | * destroyed; otherwise, root blkg is left alone. | |
725 | */ | |
726 | void blkg_destroy_all(struct request_queue *q, bool destroy_root) | |
727 | { | |
728 | struct blkio_group *blkg, *n; | |
729 | ||
730 | spin_lock_irq(q->queue_lock); | |
731 | ||
732 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { | |
733 | struct blkio_cgroup *blkcg = blkg->blkcg; | |
734 | ||
735 | /* skip root? */ | |
736 | if (!destroy_root && blkg->blkcg == &blkio_root_cgroup) | |
737 | continue; | |
738 | ||
739 | spin_lock(&blkcg->lock); | |
740 | blkg_destroy(blkg); | |
741 | spin_unlock(&blkcg->lock); | |
742 | } | |
743 | ||
744 | spin_unlock_irq(q->queue_lock); | |
745 | } | |
746 | EXPORT_SYMBOL_GPL(blkg_destroy_all); | |
747 | ||
748 | static void blkg_rcu_free(struct rcu_head *rcu_head) | |
749 | { | |
750 | blkg_free(container_of(rcu_head, struct blkio_group, rcu_head)); | |
751 | } | |
752 | ||
753 | void __blkg_release(struct blkio_group *blkg) | |
754 | { | |
755 | /* release the extra blkcg reference this blkg has been holding */ | |
756 | css_put(&blkg->blkcg->css); | |
757 | ||
758 | /* | |
759 | * A group is freed in rcu manner. But having an rcu lock does not | |
760 | * mean that one can access all the fields of blkg and assume these | |
761 | * are valid. For example, don't try to follow throtl_data and | |
762 | * request queue links. | |
763 | * | |
764 | * Having a reference to blkg under an rcu allows acess to only | |
765 | * values local to groups like group stats and group rate limits | |
766 | */ | |
767 | call_rcu(&blkg->rcu_head, blkg_rcu_free); | |
768 | } | |
769 | EXPORT_SYMBOL_GPL(__blkg_release); | |
770 | ||
771 | static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid) | |
772 | { | |
773 | struct blkg_policy_data *pd = blkg->pd[plid]; | |
774 | int cpu; | |
775 | ||
776 | if (pd->stats_cpu == NULL) | |
777 | return; | |
778 | ||
779 | for_each_possible_cpu(cpu) { | |
780 | struct blkio_group_stats_cpu *sc = | |
781 | per_cpu_ptr(pd->stats_cpu, cpu); | |
782 | ||
783 | sc->sectors = 0; | |
784 | memset(sc->stat_arr_cpu, 0, sizeof(sc->stat_arr_cpu)); | |
785 | } | |
786 | } | |
787 | ||
788 | static int | |
789 | blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) | |
790 | { | |
791 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); | |
792 | struct blkio_group *blkg; | |
793 | struct hlist_node *n; | |
794 | int i; | |
795 | ||
796 | spin_lock(&blkio_list_lock); | |
797 | spin_lock_irq(&blkcg->lock); | |
798 | ||
799 | /* | |
800 | * Note that stat reset is racy - it doesn't synchronize against | |
801 | * stat updates. This is a debug feature which shouldn't exist | |
802 | * anyway. If you get hit by a race, retry. | |
803 | */ | |
804 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { | |
805 | struct blkio_policy_type *pol; | |
806 | ||
807 | list_for_each_entry(pol, &blkio_list, list) { | |
808 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | |
809 | struct blkio_group_stats *stats = &pd->stats; | |
810 | ||
811 | /* queued stats shouldn't be cleared */ | |
812 | for (i = 0; i < ARRAY_SIZE(stats->stat_arr); i++) | |
813 | if (i != BLKIO_STAT_QUEUED) | |
814 | memset(stats->stat_arr[i], 0, | |
815 | sizeof(stats->stat_arr[i])); | |
816 | stats->time = 0; | |
817 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
818 | memset((void *)stats + BLKG_STATS_DEBUG_CLEAR_START, 0, | |
819 | BLKG_STATS_DEBUG_CLEAR_SIZE); | |
820 | #endif | |
821 | blkio_reset_stats_cpu(blkg, pol->plid); | |
822 | } | |
823 | } | |
824 | ||
825 | spin_unlock_irq(&blkcg->lock); | |
826 | spin_unlock(&blkio_list_lock); | |
827 | return 0; | |
828 | } | |
829 | ||
830 | static void blkio_get_key_name(enum stat_sub_type type, const char *dname, | |
831 | char *str, int chars_left, bool diskname_only) | |
832 | { | |
833 | snprintf(str, chars_left, "%s", dname); | |
834 | chars_left -= strlen(str); | |
835 | if (chars_left <= 0) { | |
836 | printk(KERN_WARNING | |
837 | "Possibly incorrect cgroup stat display format"); | |
838 | return; | |
839 | } | |
840 | if (diskname_only) | |
841 | return; | |
842 | switch (type) { | |
843 | case BLKIO_STAT_READ: | |
844 | strlcat(str, " Read", chars_left); | |
845 | break; | |
846 | case BLKIO_STAT_WRITE: | |
847 | strlcat(str, " Write", chars_left); | |
848 | break; | |
849 | case BLKIO_STAT_SYNC: | |
850 | strlcat(str, " Sync", chars_left); | |
851 | break; | |
852 | case BLKIO_STAT_ASYNC: | |
853 | strlcat(str, " Async", chars_left); | |
854 | break; | |
855 | case BLKIO_STAT_TOTAL: | |
856 | strlcat(str, " Total", chars_left); | |
857 | break; | |
858 | default: | |
859 | strlcat(str, " Invalid", chars_left); | |
860 | } | |
861 | } | |
862 | ||
863 | static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, int plid, | |
864 | enum stat_type_cpu type, enum stat_sub_type sub_type) | |
865 | { | |
866 | struct blkg_policy_data *pd = blkg->pd[plid]; | |
867 | int cpu; | |
868 | struct blkio_group_stats_cpu *stats_cpu; | |
869 | u64 val = 0, tval; | |
870 | ||
871 | if (pd->stats_cpu == NULL) | |
872 | return val; | |
873 | ||
874 | for_each_possible_cpu(cpu) { | |
875 | unsigned int start; | |
876 | stats_cpu = per_cpu_ptr(pd->stats_cpu, cpu); | |
877 | ||
878 | do { | |
879 | start = u64_stats_fetch_begin(&stats_cpu->syncp); | |
880 | if (type == BLKIO_STAT_CPU_SECTORS) | |
881 | tval = stats_cpu->sectors; | |
882 | else | |
883 | tval = stats_cpu->stat_arr_cpu[type][sub_type]; | |
884 | } while(u64_stats_fetch_retry(&stats_cpu->syncp, start)); | |
885 | ||
886 | val += tval; | |
887 | } | |
888 | ||
889 | return val; | |
890 | } | |
891 | ||
892 | static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, int plid, | |
893 | struct cgroup_map_cb *cb, const char *dname, | |
894 | enum stat_type_cpu type) | |
895 | { | |
896 | uint64_t disk_total, val; | |
897 | char key_str[MAX_KEY_LEN]; | |
898 | enum stat_sub_type sub_type; | |
899 | ||
900 | if (type == BLKIO_STAT_CPU_SECTORS) { | |
901 | val = blkio_read_stat_cpu(blkg, plid, type, 0); | |
902 | blkio_get_key_name(0, dname, key_str, MAX_KEY_LEN, true); | |
903 | cb->fill(cb, key_str, val); | |
904 | return val; | |
905 | } | |
906 | ||
907 | for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL; | |
908 | sub_type++) { | |
909 | blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN, | |
910 | false); | |
911 | val = blkio_read_stat_cpu(blkg, plid, type, sub_type); | |
912 | cb->fill(cb, key_str, val); | |
913 | } | |
914 | ||
915 | disk_total = blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_READ) + | |
916 | blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_WRITE); | |
917 | ||
918 | blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN, | |
919 | false); | |
920 | cb->fill(cb, key_str, disk_total); | |
921 | return disk_total; | |
922 | } | |
923 | ||
924 | static uint64_t blkio_get_stat(struct blkio_group *blkg, int plid, | |
925 | struct cgroup_map_cb *cb, const char *dname, | |
926 | enum stat_type type) | |
927 | { | |
928 | struct blkio_group_stats *stats = &blkg->pd[plid]->stats; | |
929 | uint64_t v = 0, disk_total = 0; | |
930 | char key_str[MAX_KEY_LEN]; | |
931 | unsigned int sync_start; | |
932 | int st; | |
933 | ||
934 | if (type >= BLKIO_STAT_ARR_NR) { | |
935 | do { | |
936 | sync_start = u64_stats_fetch_begin(&stats->syncp); | |
937 | switch (type) { | |
938 | case BLKIO_STAT_TIME: | |
939 | v = stats->time; | |
940 | break; | |
941 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
942 | case BLKIO_STAT_UNACCOUNTED_TIME: | |
943 | v = stats->unaccounted_time; | |
944 | break; | |
945 | case BLKIO_STAT_AVG_QUEUE_SIZE: { | |
946 | uint64_t samples = stats->avg_queue_size_samples; | |
947 | ||
948 | if (samples) { | |
949 | v = stats->avg_queue_size_sum; | |
950 | do_div(v, samples); | |
951 | } | |
952 | break; | |
953 | } | |
954 | case BLKIO_STAT_IDLE_TIME: | |
955 | v = stats->idle_time; | |
956 | break; | |
957 | case BLKIO_STAT_EMPTY_TIME: | |
958 | v = stats->empty_time; | |
959 | break; | |
960 | case BLKIO_STAT_DEQUEUE: | |
961 | v = stats->dequeue; | |
962 | break; | |
963 | case BLKIO_STAT_GROUP_WAIT_TIME: | |
964 | v = stats->group_wait_time; | |
965 | break; | |
966 | #endif | |
967 | default: | |
968 | WARN_ON_ONCE(1); | |
969 | } | |
970 | } while (u64_stats_fetch_retry(&stats->syncp, sync_start)); | |
971 | ||
972 | blkio_get_key_name(0, dname, key_str, MAX_KEY_LEN, true); | |
973 | cb->fill(cb, key_str, v); | |
974 | return v; | |
975 | } | |
976 | ||
977 | for (st = BLKIO_STAT_READ; st < BLKIO_STAT_TOTAL; st++) { | |
978 | do { | |
979 | sync_start = u64_stats_fetch_begin(&stats->syncp); | |
980 | v = stats->stat_arr[type][st]; | |
981 | } while (u64_stats_fetch_retry(&stats->syncp, sync_start)); | |
982 | ||
983 | blkio_get_key_name(st, dname, key_str, MAX_KEY_LEN, false); | |
984 | cb->fill(cb, key_str, v); | |
985 | if (st == BLKIO_STAT_READ || st == BLKIO_STAT_WRITE) | |
986 | disk_total += v; | |
987 | } | |
988 | ||
989 | blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN, | |
990 | false); | |
991 | cb->fill(cb, key_str, disk_total); | |
992 | return disk_total; | |
993 | } | |
994 | ||
995 | static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid, | |
996 | int fileid, struct blkio_cgroup *blkcg) | |
997 | { | |
998 | struct gendisk *disk = NULL; | |
999 | struct blkio_group *blkg = NULL; | |
1000 | struct blkg_policy_data *pd; | |
1001 | char *s[4], *p, *major_s = NULL, *minor_s = NULL; | |
1002 | unsigned long major, minor; | |
1003 | int i = 0, ret = -EINVAL; | |
1004 | int part; | |
1005 | dev_t dev; | |
1006 | u64 temp; | |
1007 | ||
1008 | memset(s, 0, sizeof(s)); | |
1009 | ||
1010 | while ((p = strsep(&buf, " ")) != NULL) { | |
1011 | if (!*p) | |
1012 | continue; | |
1013 | ||
1014 | s[i++] = p; | |
1015 | ||
1016 | /* Prevent from inputing too many things */ | |
1017 | if (i == 3) | |
1018 | break; | |
1019 | } | |
1020 | ||
1021 | if (i != 2) | |
1022 | goto out; | |
1023 | ||
1024 | p = strsep(&s[0], ":"); | |
1025 | if (p != NULL) | |
1026 | major_s = p; | |
1027 | else | |
1028 | goto out; | |
1029 | ||
1030 | minor_s = s[0]; | |
1031 | if (!minor_s) | |
1032 | goto out; | |
1033 | ||
1034 | if (strict_strtoul(major_s, 10, &major)) | |
1035 | goto out; | |
1036 | ||
1037 | if (strict_strtoul(minor_s, 10, &minor)) | |
1038 | goto out; | |
1039 | ||
1040 | dev = MKDEV(major, minor); | |
1041 | ||
1042 | if (strict_strtoull(s[1], 10, &temp)) | |
1043 | goto out; | |
1044 | ||
1045 | disk = get_gendisk(dev, &part); | |
1046 | if (!disk || part) | |
1047 | goto out; | |
1048 | ||
1049 | rcu_read_lock(); | |
1050 | ||
1051 | spin_lock_irq(disk->queue->queue_lock); | |
1052 | blkg = blkg_lookup_create(blkcg, disk->queue, plid, false); | |
1053 | spin_unlock_irq(disk->queue->queue_lock); | |
1054 | ||
1055 | if (IS_ERR(blkg)) { | |
1056 | ret = PTR_ERR(blkg); | |
1057 | goto out_unlock; | |
1058 | } | |
1059 | ||
1060 | pd = blkg->pd[plid]; | |
1061 | ||
1062 | switch (plid) { | |
1063 | case BLKIO_POLICY_PROP: | |
1064 | if ((temp < BLKIO_WEIGHT_MIN && temp > 0) || | |
1065 | temp > BLKIO_WEIGHT_MAX) | |
1066 | goto out_unlock; | |
1067 | ||
1068 | pd->conf.weight = temp; | |
1069 | blkio_update_group_weight(blkg, plid, temp ?: blkcg->weight); | |
1070 | break; | |
1071 | case BLKIO_POLICY_THROTL: | |
1072 | switch(fileid) { | |
1073 | case BLKIO_THROTL_read_bps_device: | |
1074 | pd->conf.bps[READ] = temp; | |
1075 | blkio_update_group_bps(blkg, plid, temp ?: -1, fileid); | |
1076 | break; | |
1077 | case BLKIO_THROTL_write_bps_device: | |
1078 | pd->conf.bps[WRITE] = temp; | |
1079 | blkio_update_group_bps(blkg, plid, temp ?: -1, fileid); | |
1080 | break; | |
1081 | case BLKIO_THROTL_read_iops_device: | |
1082 | if (temp > THROTL_IOPS_MAX) | |
1083 | goto out_unlock; | |
1084 | pd->conf.iops[READ] = temp; | |
1085 | blkio_update_group_iops(blkg, plid, temp ?: -1, fileid); | |
1086 | break; | |
1087 | case BLKIO_THROTL_write_iops_device: | |
1088 | if (temp > THROTL_IOPS_MAX) | |
1089 | goto out_unlock; | |
1090 | pd->conf.iops[WRITE] = temp; | |
1091 | blkio_update_group_iops(blkg, plid, temp ?: -1, fileid); | |
1092 | break; | |
1093 | } | |
1094 | break; | |
1095 | default: | |
1096 | BUG(); | |
1097 | } | |
1098 | ret = 0; | |
1099 | out_unlock: | |
1100 | rcu_read_unlock(); | |
1101 | out: | |
1102 | put_disk(disk); | |
1103 | ||
1104 | /* | |
1105 | * If queue was bypassing, we should retry. Do so after a short | |
1106 | * msleep(). It isn't strictly necessary but queue can be | |
1107 | * bypassing for some time and it's always nice to avoid busy | |
1108 | * looping. | |
1109 | */ | |
1110 | if (ret == -EBUSY) { | |
1111 | msleep(10); | |
1112 | return restart_syscall(); | |
1113 | } | |
1114 | return ret; | |
1115 | } | |
1116 | ||
1117 | static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft, | |
1118 | const char *buffer) | |
1119 | { | |
1120 | int ret = 0; | |
1121 | char *buf; | |
1122 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); | |
1123 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | |
1124 | int fileid = BLKIOFILE_ATTR(cft->private); | |
1125 | ||
1126 | buf = kstrdup(buffer, GFP_KERNEL); | |
1127 | if (!buf) | |
1128 | return -ENOMEM; | |
1129 | ||
1130 | ret = blkio_policy_parse_and_set(buf, plid, fileid, blkcg); | |
1131 | kfree(buf); | |
1132 | return ret; | |
1133 | } | |
1134 | ||
1135 | static const char *blkg_dev_name(struct blkio_group *blkg) | |
1136 | { | |
1137 | /* some drivers (floppy) instantiate a queue w/o disk registered */ | |
1138 | if (blkg->q->backing_dev_info.dev) | |
1139 | return dev_name(blkg->q->backing_dev_info.dev); | |
1140 | return NULL; | |
1141 | } | |
1142 | ||
1143 | static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg, | |
1144 | struct seq_file *m) | |
1145 | { | |
1146 | int plid = BLKIOFILE_POLICY(cft->private); | |
1147 | int fileid = BLKIOFILE_ATTR(cft->private); | |
1148 | struct blkg_policy_data *pd = blkg->pd[plid]; | |
1149 | const char *dname = blkg_dev_name(blkg); | |
1150 | int rw = WRITE; | |
1151 | ||
1152 | if (!dname) | |
1153 | return; | |
1154 | ||
1155 | switch (plid) { | |
1156 | case BLKIO_POLICY_PROP: | |
1157 | if (pd->conf.weight) | |
1158 | seq_printf(m, "%s\t%u\n", | |
1159 | dname, pd->conf.weight); | |
1160 | break; | |
1161 | case BLKIO_POLICY_THROTL: | |
1162 | switch (fileid) { | |
1163 | case BLKIO_THROTL_read_bps_device: | |
1164 | rw = READ; | |
1165 | case BLKIO_THROTL_write_bps_device: | |
1166 | if (pd->conf.bps[rw]) | |
1167 | seq_printf(m, "%s\t%llu\n", | |
1168 | dname, pd->conf.bps[rw]); | |
1169 | break; | |
1170 | case BLKIO_THROTL_read_iops_device: | |
1171 | rw = READ; | |
1172 | case BLKIO_THROTL_write_iops_device: | |
1173 | if (pd->conf.iops[rw]) | |
1174 | seq_printf(m, "%s\t%u\n", | |
1175 | dname, pd->conf.iops[rw]); | |
1176 | break; | |
1177 | } | |
1178 | break; | |
1179 | default: | |
1180 | BUG(); | |
1181 | } | |
1182 | } | |
1183 | ||
1184 | /* cgroup files which read their data from policy nodes end up here */ | |
1185 | static void blkio_read_conf(struct cftype *cft, struct blkio_cgroup *blkcg, | |
1186 | struct seq_file *m) | |
1187 | { | |
1188 | struct blkio_group *blkg; | |
1189 | struct hlist_node *n; | |
1190 | ||
1191 | spin_lock_irq(&blkcg->lock); | |
1192 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) | |
1193 | blkio_print_group_conf(cft, blkg, m); | |
1194 | spin_unlock_irq(&blkcg->lock); | |
1195 | } | |
1196 | ||
1197 | static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft, | |
1198 | struct seq_file *m) | |
1199 | { | |
1200 | struct blkio_cgroup *blkcg; | |
1201 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | |
1202 | int name = BLKIOFILE_ATTR(cft->private); | |
1203 | ||
1204 | blkcg = cgroup_to_blkio_cgroup(cgrp); | |
1205 | ||
1206 | switch(plid) { | |
1207 | case BLKIO_POLICY_PROP: | |
1208 | switch(name) { | |
1209 | case BLKIO_PROP_weight_device: | |
1210 | blkio_read_conf(cft, blkcg, m); | |
1211 | return 0; | |
1212 | default: | |
1213 | BUG(); | |
1214 | } | |
1215 | break; | |
1216 | case BLKIO_POLICY_THROTL: | |
1217 | switch(name){ | |
1218 | case BLKIO_THROTL_read_bps_device: | |
1219 | case BLKIO_THROTL_write_bps_device: | |
1220 | case BLKIO_THROTL_read_iops_device: | |
1221 | case BLKIO_THROTL_write_iops_device: | |
1222 | blkio_read_conf(cft, blkcg, m); | |
1223 | return 0; | |
1224 | default: | |
1225 | BUG(); | |
1226 | } | |
1227 | break; | |
1228 | default: | |
1229 | BUG(); | |
1230 | } | |
1231 | ||
1232 | return 0; | |
1233 | } | |
1234 | ||
1235 | static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg, | |
1236 | struct cftype *cft, struct cgroup_map_cb *cb, | |
1237 | enum stat_type type, bool show_total, bool pcpu) | |
1238 | { | |
1239 | struct blkio_group *blkg; | |
1240 | struct hlist_node *n; | |
1241 | uint64_t cgroup_total = 0; | |
1242 | ||
1243 | spin_lock_irq(&blkcg->lock); | |
1244 | ||
1245 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { | |
1246 | const char *dname = blkg_dev_name(blkg); | |
1247 | int plid = BLKIOFILE_POLICY(cft->private); | |
1248 | ||
1249 | if (!dname) | |
1250 | continue; | |
1251 | if (pcpu) | |
1252 | cgroup_total += blkio_get_stat_cpu(blkg, plid, | |
1253 | cb, dname, type); | |
1254 | else | |
1255 | cgroup_total += blkio_get_stat(blkg, plid, | |
1256 | cb, dname, type); | |
1257 | } | |
1258 | if (show_total) | |
1259 | cb->fill(cb, "Total", cgroup_total); | |
1260 | ||
1261 | spin_unlock_irq(&blkcg->lock); | |
1262 | return 0; | |
1263 | } | |
1264 | ||
1265 | /* All map kind of cgroup file get serviced by this function */ | |
1266 | static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft, | |
1267 | struct cgroup_map_cb *cb) | |
1268 | { | |
1269 | struct blkio_cgroup *blkcg; | |
1270 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | |
1271 | int name = BLKIOFILE_ATTR(cft->private); | |
1272 | ||
1273 | blkcg = cgroup_to_blkio_cgroup(cgrp); | |
1274 | ||
1275 | switch(plid) { | |
1276 | case BLKIO_POLICY_PROP: | |
1277 | switch(name) { | |
1278 | case BLKIO_PROP_time: | |
1279 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1280 | BLKIO_STAT_TIME, 0, 0); | |
1281 | case BLKIO_PROP_sectors: | |
1282 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1283 | BLKIO_STAT_CPU_SECTORS, 0, 1); | |
1284 | case BLKIO_PROP_io_service_bytes: | |
1285 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1286 | BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1); | |
1287 | case BLKIO_PROP_io_serviced: | |
1288 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1289 | BLKIO_STAT_CPU_SERVICED, 1, 1); | |
1290 | case BLKIO_PROP_io_service_time: | |
1291 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1292 | BLKIO_STAT_SERVICE_TIME, 1, 0); | |
1293 | case BLKIO_PROP_io_wait_time: | |
1294 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1295 | BLKIO_STAT_WAIT_TIME, 1, 0); | |
1296 | case BLKIO_PROP_io_merged: | |
1297 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1298 | BLKIO_STAT_MERGED, 1, 0); | |
1299 | case BLKIO_PROP_io_queued: | |
1300 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1301 | BLKIO_STAT_QUEUED, 1, 0); | |
1302 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
1303 | case BLKIO_PROP_unaccounted_time: | |
1304 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1305 | BLKIO_STAT_UNACCOUNTED_TIME, 0, 0); | |
1306 | case BLKIO_PROP_dequeue: | |
1307 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1308 | BLKIO_STAT_DEQUEUE, 0, 0); | |
1309 | case BLKIO_PROP_avg_queue_size: | |
1310 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1311 | BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0); | |
1312 | case BLKIO_PROP_group_wait_time: | |
1313 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1314 | BLKIO_STAT_GROUP_WAIT_TIME, 0, 0); | |
1315 | case BLKIO_PROP_idle_time: | |
1316 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1317 | BLKIO_STAT_IDLE_TIME, 0, 0); | |
1318 | case BLKIO_PROP_empty_time: | |
1319 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1320 | BLKIO_STAT_EMPTY_TIME, 0, 0); | |
1321 | #endif | |
1322 | default: | |
1323 | BUG(); | |
1324 | } | |
1325 | break; | |
1326 | case BLKIO_POLICY_THROTL: | |
1327 | switch(name){ | |
1328 | case BLKIO_THROTL_io_service_bytes: | |
1329 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1330 | BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1); | |
1331 | case BLKIO_THROTL_io_serviced: | |
1332 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
1333 | BLKIO_STAT_CPU_SERVICED, 1, 1); | |
1334 | default: | |
1335 | BUG(); | |
1336 | } | |
1337 | break; | |
1338 | default: | |
1339 | BUG(); | |
1340 | } | |
1341 | ||
1342 | return 0; | |
1343 | } | |
1344 | ||
1345 | static int blkio_weight_write(struct blkio_cgroup *blkcg, int plid, u64 val) | |
1346 | { | |
1347 | struct blkio_group *blkg; | |
1348 | struct hlist_node *n; | |
1349 | ||
1350 | if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX) | |
1351 | return -EINVAL; | |
1352 | ||
1353 | spin_lock(&blkio_list_lock); | |
1354 | spin_lock_irq(&blkcg->lock); | |
1355 | blkcg->weight = (unsigned int)val; | |
1356 | ||
1357 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { | |
1358 | struct blkg_policy_data *pd = blkg->pd[plid]; | |
1359 | ||
1360 | if (!pd->conf.weight) | |
1361 | blkio_update_group_weight(blkg, plid, blkcg->weight); | |
1362 | } | |
1363 | ||
1364 | spin_unlock_irq(&blkcg->lock); | |
1365 | spin_unlock(&blkio_list_lock); | |
1366 | return 0; | |
1367 | } | |
1368 | ||
1369 | static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) { | |
1370 | struct blkio_cgroup *blkcg; | |
1371 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | |
1372 | int name = BLKIOFILE_ATTR(cft->private); | |
1373 | ||
1374 | blkcg = cgroup_to_blkio_cgroup(cgrp); | |
1375 | ||
1376 | switch(plid) { | |
1377 | case BLKIO_POLICY_PROP: | |
1378 | switch(name) { | |
1379 | case BLKIO_PROP_weight: | |
1380 | return (u64)blkcg->weight; | |
1381 | } | |
1382 | break; | |
1383 | default: | |
1384 | BUG(); | |
1385 | } | |
1386 | return 0; | |
1387 | } | |
1388 | ||
1389 | static int | |
1390 | blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val) | |
1391 | { | |
1392 | struct blkio_cgroup *blkcg; | |
1393 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | |
1394 | int name = BLKIOFILE_ATTR(cft->private); | |
1395 | ||
1396 | blkcg = cgroup_to_blkio_cgroup(cgrp); | |
1397 | ||
1398 | switch(plid) { | |
1399 | case BLKIO_POLICY_PROP: | |
1400 | switch(name) { | |
1401 | case BLKIO_PROP_weight: | |
1402 | return blkio_weight_write(blkcg, plid, val); | |
1403 | } | |
1404 | break; | |
1405 | default: | |
1406 | BUG(); | |
1407 | } | |
1408 | ||
1409 | return 0; | |
1410 | } | |
1411 | ||
1412 | struct cftype blkio_files[] = { | |
1413 | { | |
1414 | .name = "weight_device", | |
1415 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1416 | BLKIO_PROP_weight_device), | |
1417 | .read_seq_string = blkiocg_file_read, | |
1418 | .write_string = blkiocg_file_write, | |
1419 | .max_write_len = 256, | |
1420 | }, | |
1421 | { | |
1422 | .name = "weight", | |
1423 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1424 | BLKIO_PROP_weight), | |
1425 | .read_u64 = blkiocg_file_read_u64, | |
1426 | .write_u64 = blkiocg_file_write_u64, | |
1427 | }, | |
1428 | { | |
1429 | .name = "time", | |
1430 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1431 | BLKIO_PROP_time), | |
1432 | .read_map = blkiocg_file_read_map, | |
1433 | }, | |
1434 | { | |
1435 | .name = "sectors", | |
1436 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1437 | BLKIO_PROP_sectors), | |
1438 | .read_map = blkiocg_file_read_map, | |
1439 | }, | |
1440 | { | |
1441 | .name = "io_service_bytes", | |
1442 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1443 | BLKIO_PROP_io_service_bytes), | |
1444 | .read_map = blkiocg_file_read_map, | |
1445 | }, | |
1446 | { | |
1447 | .name = "io_serviced", | |
1448 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1449 | BLKIO_PROP_io_serviced), | |
1450 | .read_map = blkiocg_file_read_map, | |
1451 | }, | |
1452 | { | |
1453 | .name = "io_service_time", | |
1454 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1455 | BLKIO_PROP_io_service_time), | |
1456 | .read_map = blkiocg_file_read_map, | |
1457 | }, | |
1458 | { | |
1459 | .name = "io_wait_time", | |
1460 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1461 | BLKIO_PROP_io_wait_time), | |
1462 | .read_map = blkiocg_file_read_map, | |
1463 | }, | |
1464 | { | |
1465 | .name = "io_merged", | |
1466 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1467 | BLKIO_PROP_io_merged), | |
1468 | .read_map = blkiocg_file_read_map, | |
1469 | }, | |
1470 | { | |
1471 | .name = "io_queued", | |
1472 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1473 | BLKIO_PROP_io_queued), | |
1474 | .read_map = blkiocg_file_read_map, | |
1475 | }, | |
1476 | { | |
1477 | .name = "reset_stats", | |
1478 | .write_u64 = blkiocg_reset_stats, | |
1479 | }, | |
1480 | #ifdef CONFIG_BLK_DEV_THROTTLING | |
1481 | { | |
1482 | .name = "throttle.read_bps_device", | |
1483 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1484 | BLKIO_THROTL_read_bps_device), | |
1485 | .read_seq_string = blkiocg_file_read, | |
1486 | .write_string = blkiocg_file_write, | |
1487 | .max_write_len = 256, | |
1488 | }, | |
1489 | ||
1490 | { | |
1491 | .name = "throttle.write_bps_device", | |
1492 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1493 | BLKIO_THROTL_write_bps_device), | |
1494 | .read_seq_string = blkiocg_file_read, | |
1495 | .write_string = blkiocg_file_write, | |
1496 | .max_write_len = 256, | |
1497 | }, | |
1498 | ||
1499 | { | |
1500 | .name = "throttle.read_iops_device", | |
1501 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1502 | BLKIO_THROTL_read_iops_device), | |
1503 | .read_seq_string = blkiocg_file_read, | |
1504 | .write_string = blkiocg_file_write, | |
1505 | .max_write_len = 256, | |
1506 | }, | |
1507 | ||
1508 | { | |
1509 | .name = "throttle.write_iops_device", | |
1510 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1511 | BLKIO_THROTL_write_iops_device), | |
1512 | .read_seq_string = blkiocg_file_read, | |
1513 | .write_string = blkiocg_file_write, | |
1514 | .max_write_len = 256, | |
1515 | }, | |
1516 | { | |
1517 | .name = "throttle.io_service_bytes", | |
1518 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1519 | BLKIO_THROTL_io_service_bytes), | |
1520 | .read_map = blkiocg_file_read_map, | |
1521 | }, | |
1522 | { | |
1523 | .name = "throttle.io_serviced", | |
1524 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1525 | BLKIO_THROTL_io_serviced), | |
1526 | .read_map = blkiocg_file_read_map, | |
1527 | }, | |
1528 | #endif /* CONFIG_BLK_DEV_THROTTLING */ | |
1529 | ||
1530 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
1531 | { | |
1532 | .name = "avg_queue_size", | |
1533 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1534 | BLKIO_PROP_avg_queue_size), | |
1535 | .read_map = blkiocg_file_read_map, | |
1536 | }, | |
1537 | { | |
1538 | .name = "group_wait_time", | |
1539 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1540 | BLKIO_PROP_group_wait_time), | |
1541 | .read_map = blkiocg_file_read_map, | |
1542 | }, | |
1543 | { | |
1544 | .name = "idle_time", | |
1545 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1546 | BLKIO_PROP_idle_time), | |
1547 | .read_map = blkiocg_file_read_map, | |
1548 | }, | |
1549 | { | |
1550 | .name = "empty_time", | |
1551 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1552 | BLKIO_PROP_empty_time), | |
1553 | .read_map = blkiocg_file_read_map, | |
1554 | }, | |
1555 | { | |
1556 | .name = "dequeue", | |
1557 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1558 | BLKIO_PROP_dequeue), | |
1559 | .read_map = blkiocg_file_read_map, | |
1560 | }, | |
1561 | { | |
1562 | .name = "unaccounted_time", | |
1563 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1564 | BLKIO_PROP_unaccounted_time), | |
1565 | .read_map = blkiocg_file_read_map, | |
1566 | }, | |
1567 | #endif | |
1568 | }; | |
1569 | ||
1570 | static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |
1571 | { | |
1572 | return cgroup_add_files(cgroup, subsys, blkio_files, | |
1573 | ARRAY_SIZE(blkio_files)); | |
1574 | } | |
1575 | ||
1576 | /** | |
1577 | * blkiocg_pre_destroy - cgroup pre_destroy callback | |
1578 | * @subsys: cgroup subsys | |
1579 | * @cgroup: cgroup of interest | |
1580 | * | |
1581 | * This function is called when @cgroup is about to go away and responsible | |
1582 | * for shooting down all blkgs associated with @cgroup. blkgs should be | |
1583 | * removed while holding both q and blkcg locks. As blkcg lock is nested | |
1584 | * inside q lock, this function performs reverse double lock dancing. | |
1585 | * | |
1586 | * This is the blkcg counterpart of ioc_release_fn(). | |
1587 | */ | |
1588 | static int blkiocg_pre_destroy(struct cgroup_subsys *subsys, | |
1589 | struct cgroup *cgroup) | |
1590 | { | |
1591 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); | |
1592 | ||
1593 | spin_lock_irq(&blkcg->lock); | |
1594 | ||
1595 | while (!hlist_empty(&blkcg->blkg_list)) { | |
1596 | struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first, | |
1597 | struct blkio_group, blkcg_node); | |
1598 | struct request_queue *q = blkg->q; | |
1599 | ||
1600 | if (spin_trylock(q->queue_lock)) { | |
1601 | blkg_destroy(blkg); | |
1602 | spin_unlock(q->queue_lock); | |
1603 | } else { | |
1604 | spin_unlock_irq(&blkcg->lock); | |
1605 | cpu_relax(); | |
1606 | spin_lock(&blkcg->lock); | |
1607 | } | |
1608 | } | |
1609 | ||
1610 | spin_unlock_irq(&blkcg->lock); | |
1611 | return 0; | |
1612 | } | |
1613 | ||
1614 | static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |
1615 | { | |
1616 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); | |
1617 | ||
1618 | if (blkcg != &blkio_root_cgroup) | |
1619 | kfree(blkcg); | |
1620 | } | |
1621 | ||
1622 | static struct cgroup_subsys_state * | |
1623 | blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |
1624 | { | |
1625 | struct blkio_cgroup *blkcg; | |
1626 | struct cgroup *parent = cgroup->parent; | |
1627 | ||
1628 | if (!parent) { | |
1629 | blkcg = &blkio_root_cgroup; | |
1630 | goto done; | |
1631 | } | |
1632 | ||
1633 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); | |
1634 | if (!blkcg) | |
1635 | return ERR_PTR(-ENOMEM); | |
1636 | ||
1637 | blkcg->weight = BLKIO_WEIGHT_DEFAULT; | |
1638 | done: | |
1639 | spin_lock_init(&blkcg->lock); | |
1640 | INIT_HLIST_HEAD(&blkcg->blkg_list); | |
1641 | ||
1642 | return &blkcg->css; | |
1643 | } | |
1644 | ||
1645 | /** | |
1646 | * blkcg_init_queue - initialize blkcg part of request queue | |
1647 | * @q: request_queue to initialize | |
1648 | * | |
1649 | * Called from blk_alloc_queue_node(). Responsible for initializing blkcg | |
1650 | * part of new request_queue @q. | |
1651 | * | |
1652 | * RETURNS: | |
1653 | * 0 on success, -errno on failure. | |
1654 | */ | |
1655 | int blkcg_init_queue(struct request_queue *q) | |
1656 | { | |
1657 | int ret; | |
1658 | ||
1659 | might_sleep(); | |
1660 | ||
1661 | ret = blk_throtl_init(q); | |
1662 | if (ret) | |
1663 | return ret; | |
1664 | ||
1665 | mutex_lock(&all_q_mutex); | |
1666 | INIT_LIST_HEAD(&q->all_q_node); | |
1667 | list_add_tail(&q->all_q_node, &all_q_list); | |
1668 | mutex_unlock(&all_q_mutex); | |
1669 | ||
1670 | return 0; | |
1671 | } | |
1672 | ||
1673 | /** | |
1674 | * blkcg_drain_queue - drain blkcg part of request_queue | |
1675 | * @q: request_queue to drain | |
1676 | * | |
1677 | * Called from blk_drain_queue(). Responsible for draining blkcg part. | |
1678 | */ | |
1679 | void blkcg_drain_queue(struct request_queue *q) | |
1680 | { | |
1681 | lockdep_assert_held(q->queue_lock); | |
1682 | ||
1683 | blk_throtl_drain(q); | |
1684 | } | |
1685 | ||
1686 | /** | |
1687 | * blkcg_exit_queue - exit and release blkcg part of request_queue | |
1688 | * @q: request_queue being released | |
1689 | * | |
1690 | * Called from blk_release_queue(). Responsible for exiting blkcg part. | |
1691 | */ | |
1692 | void blkcg_exit_queue(struct request_queue *q) | |
1693 | { | |
1694 | mutex_lock(&all_q_mutex); | |
1695 | list_del_init(&q->all_q_node); | |
1696 | mutex_unlock(&all_q_mutex); | |
1697 | ||
1698 | blkg_destroy_all(q, true); | |
1699 | ||
1700 | blk_throtl_exit(q); | |
1701 | } | |
1702 | ||
1703 | /* | |
1704 | * We cannot support shared io contexts, as we have no mean to support | |
1705 | * two tasks with the same ioc in two different groups without major rework | |
1706 | * of the main cic data structures. For now we allow a task to change | |
1707 | * its cgroup only if it's the only owner of its ioc. | |
1708 | */ | |
1709 | static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |
1710 | struct cgroup_taskset *tset) | |
1711 | { | |
1712 | struct task_struct *task; | |
1713 | struct io_context *ioc; | |
1714 | int ret = 0; | |
1715 | ||
1716 | /* task_lock() is needed to avoid races with exit_io_context() */ | |
1717 | cgroup_taskset_for_each(task, cgrp, tset) { | |
1718 | task_lock(task); | |
1719 | ioc = task->io_context; | |
1720 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) | |
1721 | ret = -EINVAL; | |
1722 | task_unlock(task); | |
1723 | if (ret) | |
1724 | break; | |
1725 | } | |
1726 | return ret; | |
1727 | } | |
1728 | ||
1729 | static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |
1730 | struct cgroup_taskset *tset) | |
1731 | { | |
1732 | struct task_struct *task; | |
1733 | struct io_context *ioc; | |
1734 | ||
1735 | cgroup_taskset_for_each(task, cgrp, tset) { | |
1736 | /* we don't lose anything even if ioc allocation fails */ | |
1737 | ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); | |
1738 | if (ioc) { | |
1739 | ioc_cgroup_changed(ioc); | |
1740 | put_io_context(ioc); | |
1741 | } | |
1742 | } | |
1743 | } | |
1744 | ||
1745 | static void blkcg_bypass_start(void) | |
1746 | __acquires(&all_q_mutex) | |
1747 | { | |
1748 | struct request_queue *q; | |
1749 | ||
1750 | mutex_lock(&all_q_mutex); | |
1751 | ||
1752 | list_for_each_entry(q, &all_q_list, all_q_node) { | |
1753 | blk_queue_bypass_start(q); | |
1754 | blkg_destroy_all(q, false); | |
1755 | } | |
1756 | } | |
1757 | ||
1758 | static void blkcg_bypass_end(void) | |
1759 | __releases(&all_q_mutex) | |
1760 | { | |
1761 | struct request_queue *q; | |
1762 | ||
1763 | list_for_each_entry(q, &all_q_list, all_q_node) | |
1764 | blk_queue_bypass_end(q); | |
1765 | ||
1766 | mutex_unlock(&all_q_mutex); | |
1767 | } | |
1768 | ||
1769 | void blkio_policy_register(struct blkio_policy_type *blkiop) | |
1770 | { | |
1771 | struct request_queue *q; | |
1772 | ||
1773 | blkcg_bypass_start(); | |
1774 | spin_lock(&blkio_list_lock); | |
1775 | ||
1776 | BUG_ON(blkio_policy[blkiop->plid]); | |
1777 | blkio_policy[blkiop->plid] = blkiop; | |
1778 | list_add_tail(&blkiop->list, &blkio_list); | |
1779 | ||
1780 | spin_unlock(&blkio_list_lock); | |
1781 | list_for_each_entry(q, &all_q_list, all_q_node) | |
1782 | update_root_blkg_pd(q, blkiop->plid); | |
1783 | blkcg_bypass_end(); | |
1784 | } | |
1785 | EXPORT_SYMBOL_GPL(blkio_policy_register); | |
1786 | ||
1787 | void blkio_policy_unregister(struct blkio_policy_type *blkiop) | |
1788 | { | |
1789 | struct request_queue *q; | |
1790 | ||
1791 | blkcg_bypass_start(); | |
1792 | spin_lock(&blkio_list_lock); | |
1793 | ||
1794 | BUG_ON(blkio_policy[blkiop->plid] != blkiop); | |
1795 | blkio_policy[blkiop->plid] = NULL; | |
1796 | list_del_init(&blkiop->list); | |
1797 | ||
1798 | spin_unlock(&blkio_list_lock); | |
1799 | list_for_each_entry(q, &all_q_list, all_q_node) | |
1800 | update_root_blkg_pd(q, blkiop->plid); | |
1801 | blkcg_bypass_end(); | |
1802 | } | |
1803 | EXPORT_SYMBOL_GPL(blkio_policy_unregister); |