]>
Commit | Line | Data |
---|---|---|
31e4c28d VG |
1 | /* |
2 | * Common Block IO controller cgroup interface | |
3 | * | |
4 | * Based on ideas and code from CFQ, CFS and BFQ: | |
5 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> | |
6 | * | |
7 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> | |
8 | * Paolo Valente <paolo.valente@unimore.it> | |
9 | * | |
10 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> | |
11 | * Nauman Rafique <nauman@google.com> | |
12 | */ | |
13 | #include <linux/ioprio.h> | |
22084190 VG |
14 | #include <linux/seq_file.h> |
15 | #include <linux/kdev_t.h> | |
9d6a986c | 16 | #include <linux/module.h> |
accee785 | 17 | #include <linux/err.h> |
9195291e | 18 | #include <linux/blkdev.h> |
5a0e3ad6 | 19 | #include <linux/slab.h> |
34d0f179 | 20 | #include <linux/genhd.h> |
72e06c25 TH |
21 | #include <linux/delay.h> |
22 | #include "blk-cgroup.h" | |
3e252066 | 23 | |
84c124da DS |
24 | #define MAX_KEY_LEN 100 |
25 | ||
3e252066 VG |
26 | static DEFINE_SPINLOCK(blkio_list_lock); |
27 | static LIST_HEAD(blkio_list); | |
b1c35769 | 28 | |
31e4c28d | 29 | struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT }; |
9d6a986c VG |
30 | EXPORT_SYMBOL_GPL(blkio_root_cgroup); |
31 | ||
035d10b2 TH |
32 | static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES]; |
33 | ||
67523c48 BB |
34 | static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *, |
35 | struct cgroup *); | |
bb9d97b6 TH |
36 | static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *, |
37 | struct cgroup_taskset *); | |
38 | static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *, | |
39 | struct cgroup_taskset *); | |
7ee9c562 | 40 | static int blkiocg_pre_destroy(struct cgroup_subsys *, struct cgroup *); |
67523c48 BB |
41 | static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *); |
42 | static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *); | |
43 | ||
062a644d VG |
44 | /* for encoding cft->private value on file */ |
45 | #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val)) | |
46 | /* What policy owns the file, proportional or throttle */ | |
47 | #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff) | |
48 | #define BLKIOFILE_ATTR(val) ((val) & 0xffff) | |
49 | ||
67523c48 BB |
50 | struct cgroup_subsys blkio_subsys = { |
51 | .name = "blkio", | |
52 | .create = blkiocg_create, | |
bb9d97b6 TH |
53 | .can_attach = blkiocg_can_attach, |
54 | .attach = blkiocg_attach, | |
7ee9c562 | 55 | .pre_destroy = blkiocg_pre_destroy, |
67523c48 BB |
56 | .destroy = blkiocg_destroy, |
57 | .populate = blkiocg_populate, | |
67523c48 | 58 | .subsys_id = blkio_subsys_id, |
67523c48 BB |
59 | .module = THIS_MODULE, |
60 | }; | |
61 | EXPORT_SYMBOL_GPL(blkio_subsys); | |
62 | ||
31e4c28d VG |
63 | struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) |
64 | { | |
65 | return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), | |
66 | struct blkio_cgroup, css); | |
67 | } | |
9d6a986c | 68 | EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); |
31e4c28d | 69 | |
70087dc3 VG |
70 | struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk) |
71 | { | |
72 | return container_of(task_subsys_state(tsk, blkio_subsys_id), | |
73 | struct blkio_cgroup, css); | |
74 | } | |
75 | EXPORT_SYMBOL_GPL(task_blkio_cgroup); | |
76 | ||
062a644d VG |
77 | static inline void |
78 | blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight) | |
79 | { | |
80 | struct blkio_policy_type *blkiop; | |
81 | ||
82 | list_for_each_entry(blkiop, &blkio_list, list) { | |
83 | /* If this policy does not own the blkg, do not send updates */ | |
84 | if (blkiop->plid != blkg->plid) | |
85 | continue; | |
86 | if (blkiop->ops.blkio_update_group_weight_fn) | |
ca32aefc | 87 | blkiop->ops.blkio_update_group_weight_fn(blkg->q, |
fe071437 | 88 | blkg, weight); |
062a644d VG |
89 | } |
90 | } | |
91 | ||
4c9eefa1 VG |
92 | static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps, |
93 | int fileid) | |
94 | { | |
95 | struct blkio_policy_type *blkiop; | |
96 | ||
97 | list_for_each_entry(blkiop, &blkio_list, list) { | |
98 | ||
99 | /* If this policy does not own the blkg, do not send updates */ | |
100 | if (blkiop->plid != blkg->plid) | |
101 | continue; | |
102 | ||
103 | if (fileid == BLKIO_THROTL_read_bps_device | |
104 | && blkiop->ops.blkio_update_group_read_bps_fn) | |
ca32aefc | 105 | blkiop->ops.blkio_update_group_read_bps_fn(blkg->q, |
fe071437 | 106 | blkg, bps); |
4c9eefa1 VG |
107 | |
108 | if (fileid == BLKIO_THROTL_write_bps_device | |
109 | && blkiop->ops.blkio_update_group_write_bps_fn) | |
ca32aefc | 110 | blkiop->ops.blkio_update_group_write_bps_fn(blkg->q, |
fe071437 | 111 | blkg, bps); |
4c9eefa1 VG |
112 | } |
113 | } | |
114 | ||
7702e8f4 VG |
115 | static inline void blkio_update_group_iops(struct blkio_group *blkg, |
116 | unsigned int iops, int fileid) | |
117 | { | |
118 | struct blkio_policy_type *blkiop; | |
119 | ||
120 | list_for_each_entry(blkiop, &blkio_list, list) { | |
121 | ||
122 | /* If this policy does not own the blkg, do not send updates */ | |
123 | if (blkiop->plid != blkg->plid) | |
124 | continue; | |
125 | ||
126 | if (fileid == BLKIO_THROTL_read_iops_device | |
127 | && blkiop->ops.blkio_update_group_read_iops_fn) | |
ca32aefc | 128 | blkiop->ops.blkio_update_group_read_iops_fn(blkg->q, |
fe071437 | 129 | blkg, iops); |
7702e8f4 VG |
130 | |
131 | if (fileid == BLKIO_THROTL_write_iops_device | |
132 | && blkiop->ops.blkio_update_group_write_iops_fn) | |
ca32aefc | 133 | blkiop->ops.blkio_update_group_write_iops_fn(blkg->q, |
fe071437 | 134 | blkg,iops); |
7702e8f4 VG |
135 | } |
136 | } | |
137 | ||
9195291e DS |
138 | /* |
139 | * Add to the appropriate stat variable depending on the request type. | |
140 | * This should be called with the blkg->stats_lock held. | |
141 | */ | |
84c124da DS |
142 | static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction, |
143 | bool sync) | |
9195291e | 144 | { |
84c124da DS |
145 | if (direction) |
146 | stat[BLKIO_STAT_WRITE] += add; | |
9195291e | 147 | else |
84c124da DS |
148 | stat[BLKIO_STAT_READ] += add; |
149 | if (sync) | |
150 | stat[BLKIO_STAT_SYNC] += add; | |
9195291e | 151 | else |
84c124da | 152 | stat[BLKIO_STAT_ASYNC] += add; |
9195291e DS |
153 | } |
154 | ||
cdc1184c DS |
155 | /* |
156 | * Decrements the appropriate stat variable if non-zero depending on the | |
157 | * request type. Panics on value being zero. | |
158 | * This should be called with the blkg->stats_lock held. | |
159 | */ | |
160 | static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync) | |
161 | { | |
162 | if (direction) { | |
163 | BUG_ON(stat[BLKIO_STAT_WRITE] == 0); | |
164 | stat[BLKIO_STAT_WRITE]--; | |
165 | } else { | |
166 | BUG_ON(stat[BLKIO_STAT_READ] == 0); | |
167 | stat[BLKIO_STAT_READ]--; | |
168 | } | |
169 | if (sync) { | |
170 | BUG_ON(stat[BLKIO_STAT_SYNC] == 0); | |
171 | stat[BLKIO_STAT_SYNC]--; | |
172 | } else { | |
173 | BUG_ON(stat[BLKIO_STAT_ASYNC] == 0); | |
174 | stat[BLKIO_STAT_ASYNC]--; | |
175 | } | |
176 | } | |
177 | ||
178 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
812df48d DS |
179 | /* This should be called with the blkg->stats_lock held. */ |
180 | static void blkio_set_start_group_wait_time(struct blkio_group *blkg, | |
181 | struct blkio_group *curr_blkg) | |
182 | { | |
183 | if (blkio_blkg_waiting(&blkg->stats)) | |
184 | return; | |
185 | if (blkg == curr_blkg) | |
186 | return; | |
187 | blkg->stats.start_group_wait_time = sched_clock(); | |
188 | blkio_mark_blkg_waiting(&blkg->stats); | |
189 | } | |
190 | ||
191 | /* This should be called with the blkg->stats_lock held. */ | |
192 | static void blkio_update_group_wait_time(struct blkio_group_stats *stats) | |
193 | { | |
194 | unsigned long long now; | |
195 | ||
196 | if (!blkio_blkg_waiting(stats)) | |
197 | return; | |
198 | ||
199 | now = sched_clock(); | |
200 | if (time_after64(now, stats->start_group_wait_time)) | |
201 | stats->group_wait_time += now - stats->start_group_wait_time; | |
202 | blkio_clear_blkg_waiting(stats); | |
203 | } | |
204 | ||
205 | /* This should be called with the blkg->stats_lock held. */ | |
206 | static void blkio_end_empty_time(struct blkio_group_stats *stats) | |
207 | { | |
208 | unsigned long long now; | |
209 | ||
210 | if (!blkio_blkg_empty(stats)) | |
211 | return; | |
212 | ||
213 | now = sched_clock(); | |
214 | if (time_after64(now, stats->start_empty_time)) | |
215 | stats->empty_time += now - stats->start_empty_time; | |
216 | blkio_clear_blkg_empty(stats); | |
217 | } | |
218 | ||
219 | void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) | |
220 | { | |
221 | unsigned long flags; | |
222 | ||
223 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
224 | BUG_ON(blkio_blkg_idling(&blkg->stats)); | |
225 | blkg->stats.start_idle_time = sched_clock(); | |
226 | blkio_mark_blkg_idling(&blkg->stats); | |
227 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
228 | } | |
229 | EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats); | |
230 | ||
231 | void blkiocg_update_idle_time_stats(struct blkio_group *blkg) | |
232 | { | |
233 | unsigned long flags; | |
234 | unsigned long long now; | |
235 | struct blkio_group_stats *stats; | |
236 | ||
237 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
238 | stats = &blkg->stats; | |
239 | if (blkio_blkg_idling(stats)) { | |
240 | now = sched_clock(); | |
241 | if (time_after64(now, stats->start_idle_time)) | |
242 | stats->idle_time += now - stats->start_idle_time; | |
243 | blkio_clear_blkg_idling(stats); | |
244 | } | |
245 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
246 | } | |
247 | EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats); | |
248 | ||
a11cdaa7 | 249 | void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) |
cdc1184c DS |
250 | { |
251 | unsigned long flags; | |
252 | struct blkio_group_stats *stats; | |
253 | ||
254 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
255 | stats = &blkg->stats; | |
256 | stats->avg_queue_size_sum += | |
257 | stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] + | |
258 | stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]; | |
259 | stats->avg_queue_size_samples++; | |
812df48d | 260 | blkio_update_group_wait_time(stats); |
cdc1184c DS |
261 | spin_unlock_irqrestore(&blkg->stats_lock, flags); |
262 | } | |
a11cdaa7 DS |
263 | EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats); |
264 | ||
e5ff082e | 265 | void blkiocg_set_start_empty_time(struct blkio_group *blkg) |
28baf442 DS |
266 | { |
267 | unsigned long flags; | |
268 | struct blkio_group_stats *stats; | |
269 | ||
270 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
271 | stats = &blkg->stats; | |
272 | ||
273 | if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] || | |
274 | stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) { | |
275 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
276 | return; | |
277 | } | |
278 | ||
279 | /* | |
e5ff082e VG |
280 | * group is already marked empty. This can happen if cfqq got new |
281 | * request in parent group and moved to this group while being added | |
282 | * to service tree. Just ignore the event and move on. | |
28baf442 | 283 | */ |
e5ff082e VG |
284 | if(blkio_blkg_empty(stats)) { |
285 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
286 | return; | |
287 | } | |
288 | ||
28baf442 DS |
289 | stats->start_empty_time = sched_clock(); |
290 | blkio_mark_blkg_empty(stats); | |
291 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
292 | } | |
293 | EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time); | |
294 | ||
a11cdaa7 DS |
295 | void blkiocg_update_dequeue_stats(struct blkio_group *blkg, |
296 | unsigned long dequeue) | |
297 | { | |
298 | blkg->stats.dequeue += dequeue; | |
299 | } | |
300 | EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats); | |
812df48d DS |
301 | #else |
302 | static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg, | |
303 | struct blkio_group *curr_blkg) {} | |
304 | static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {} | |
cdc1184c DS |
305 | #endif |
306 | ||
a11cdaa7 | 307 | void blkiocg_update_io_add_stats(struct blkio_group *blkg, |
cdc1184c DS |
308 | struct blkio_group *curr_blkg, bool direction, |
309 | bool sync) | |
310 | { | |
311 | unsigned long flags; | |
312 | ||
313 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
314 | blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction, | |
315 | sync); | |
812df48d DS |
316 | blkio_end_empty_time(&blkg->stats); |
317 | blkio_set_start_group_wait_time(blkg, curr_blkg); | |
cdc1184c DS |
318 | spin_unlock_irqrestore(&blkg->stats_lock, flags); |
319 | } | |
a11cdaa7 | 320 | EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats); |
cdc1184c | 321 | |
a11cdaa7 | 322 | void blkiocg_update_io_remove_stats(struct blkio_group *blkg, |
cdc1184c DS |
323 | bool direction, bool sync) |
324 | { | |
325 | unsigned long flags; | |
326 | ||
327 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
328 | blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], | |
329 | direction, sync); | |
330 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
331 | } | |
a11cdaa7 | 332 | EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats); |
cdc1184c | 333 | |
167400d3 JT |
334 | void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time, |
335 | unsigned long unaccounted_time) | |
22084190 | 336 | { |
303a3acb DS |
337 | unsigned long flags; |
338 | ||
339 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
340 | blkg->stats.time += time; | |
a23e6869 | 341 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
167400d3 | 342 | blkg->stats.unaccounted_time += unaccounted_time; |
a23e6869 | 343 | #endif |
303a3acb | 344 | spin_unlock_irqrestore(&blkg->stats_lock, flags); |
22084190 | 345 | } |
303a3acb | 346 | EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used); |
22084190 | 347 | |
5624a4e4 VG |
348 | /* |
349 | * should be called under rcu read lock or queue lock to make sure blkg pointer | |
350 | * is valid. | |
351 | */ | |
84c124da DS |
352 | void blkiocg_update_dispatch_stats(struct blkio_group *blkg, |
353 | uint64_t bytes, bool direction, bool sync) | |
9195291e | 354 | { |
5624a4e4 | 355 | struct blkio_group_stats_cpu *stats_cpu; |
575969a0 VG |
356 | unsigned long flags; |
357 | ||
358 | /* | |
359 | * Disabling interrupts to provide mutual exclusion between two | |
360 | * writes on same cpu. It probably is not needed for 64bit. Not | |
361 | * optimizing that case yet. | |
362 | */ | |
363 | local_irq_save(flags); | |
9195291e | 364 | |
5624a4e4 VG |
365 | stats_cpu = this_cpu_ptr(blkg->stats_cpu); |
366 | ||
575969a0 | 367 | u64_stats_update_begin(&stats_cpu->syncp); |
5624a4e4 VG |
368 | stats_cpu->sectors += bytes >> 9; |
369 | blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED], | |
370 | 1, direction, sync); | |
371 | blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES], | |
372 | bytes, direction, sync); | |
575969a0 VG |
373 | u64_stats_update_end(&stats_cpu->syncp); |
374 | local_irq_restore(flags); | |
9195291e | 375 | } |
84c124da | 376 | EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats); |
9195291e | 377 | |
84c124da DS |
378 | void blkiocg_update_completion_stats(struct blkio_group *blkg, |
379 | uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) | |
9195291e DS |
380 | { |
381 | struct blkio_group_stats *stats; | |
382 | unsigned long flags; | |
383 | unsigned long long now = sched_clock(); | |
384 | ||
385 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
386 | stats = &blkg->stats; | |
84c124da DS |
387 | if (time_after64(now, io_start_time)) |
388 | blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME], | |
389 | now - io_start_time, direction, sync); | |
390 | if (time_after64(io_start_time, start_time)) | |
391 | blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME], | |
392 | io_start_time - start_time, direction, sync); | |
9195291e DS |
393 | spin_unlock_irqrestore(&blkg->stats_lock, flags); |
394 | } | |
84c124da | 395 | EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats); |
9195291e | 396 | |
317389a7 | 397 | /* Merged stats are per cpu. */ |
812d4026 DS |
398 | void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction, |
399 | bool sync) | |
400 | { | |
317389a7 | 401 | struct blkio_group_stats_cpu *stats_cpu; |
812d4026 DS |
402 | unsigned long flags; |
403 | ||
317389a7 VG |
404 | /* |
405 | * Disabling interrupts to provide mutual exclusion between two | |
406 | * writes on same cpu. It probably is not needed for 64bit. Not | |
407 | * optimizing that case yet. | |
408 | */ | |
409 | local_irq_save(flags); | |
410 | ||
411 | stats_cpu = this_cpu_ptr(blkg->stats_cpu); | |
412 | ||
413 | u64_stats_update_begin(&stats_cpu->syncp); | |
414 | blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1, | |
415 | direction, sync); | |
416 | u64_stats_update_end(&stats_cpu->syncp); | |
417 | local_irq_restore(flags); | |
812d4026 DS |
418 | } |
419 | EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats); | |
420 | ||
cd1604fa TH |
421 | struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, |
422 | struct request_queue *q, | |
423 | enum blkio_policy_id plid, | |
424 | bool for_root) | |
425 | __releases(q->queue_lock) __acquires(q->queue_lock) | |
5624a4e4 | 426 | { |
cd1604fa TH |
427 | struct blkio_policy_type *pol = blkio_policy[plid]; |
428 | struct blkio_group *blkg, *new_blkg; | |
5624a4e4 | 429 | |
cd1604fa TH |
430 | WARN_ON_ONCE(!rcu_read_lock_held()); |
431 | lockdep_assert_held(q->queue_lock); | |
432 | ||
433 | /* | |
434 | * This could be the first entry point of blkcg implementation and | |
435 | * we shouldn't allow anything to go through for a bypassing queue. | |
436 | * The following can be removed if blkg lookup is guaranteed to | |
437 | * fail on a bypassing queue. | |
438 | */ | |
439 | if (unlikely(blk_queue_bypass(q)) && !for_root) | |
440 | return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); | |
441 | ||
442 | blkg = blkg_lookup(blkcg, q, plid); | |
443 | if (blkg) | |
444 | return blkg; | |
445 | ||
7ee9c562 | 446 | /* blkg holds a reference to blkcg */ |
cd1604fa TH |
447 | if (!css_tryget(&blkcg->css)) |
448 | return ERR_PTR(-EINVAL); | |
449 | ||
450 | /* | |
451 | * Allocate and initialize. | |
452 | * | |
453 | * FIXME: The following is broken. Percpu memory allocation | |
454 | * requires %GFP_KERNEL context and can't be performed from IO | |
455 | * path. Allocation here should inherently be atomic and the | |
456 | * following lock dancing can be removed once the broken percpu | |
457 | * allocation is fixed. | |
458 | */ | |
459 | spin_unlock_irq(q->queue_lock); | |
460 | rcu_read_unlock(); | |
461 | ||
462 | new_blkg = pol->ops.blkio_alloc_group_fn(q, blkcg); | |
463 | if (new_blkg) { | |
464 | new_blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu); | |
465 | ||
466 | spin_lock_init(&new_blkg->stats_lock); | |
467 | rcu_assign_pointer(new_blkg->q, q); | |
7ee9c562 | 468 | new_blkg->blkcg = blkcg; |
cd1604fa TH |
469 | new_blkg->plid = plid; |
470 | cgroup_path(blkcg->css.cgroup, new_blkg->path, | |
471 | sizeof(new_blkg->path)); | |
7ee9c562 TH |
472 | } else { |
473 | css_put(&blkcg->css); | |
cd1604fa TH |
474 | } |
475 | ||
476 | rcu_read_lock(); | |
477 | spin_lock_irq(q->queue_lock); | |
31e4c28d | 478 | |
cd1604fa TH |
479 | /* did bypass get turned on inbetween? */ |
480 | if (unlikely(blk_queue_bypass(q)) && !for_root) { | |
481 | blkg = ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); | |
482 | goto out; | |
483 | } | |
484 | ||
485 | /* did someone beat us to it? */ | |
486 | blkg = blkg_lookup(blkcg, q, plid); | |
487 | if (unlikely(blkg)) | |
488 | goto out; | |
489 | ||
490 | /* did alloc fail? */ | |
491 | if (unlikely(!new_blkg || !new_blkg->stats_cpu)) { | |
492 | blkg = ERR_PTR(-ENOMEM); | |
493 | goto out; | |
494 | } | |
495 | ||
496 | /* insert */ | |
497 | spin_lock(&blkcg->lock); | |
498 | swap(blkg, new_blkg); | |
31e4c28d | 499 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); |
cd1604fa TH |
500 | pol->ops.blkio_link_group_fn(q, blkg); |
501 | spin_unlock(&blkcg->lock); | |
502 | out: | |
503 | if (new_blkg) { | |
504 | free_percpu(new_blkg->stats_cpu); | |
505 | kfree(new_blkg); | |
7ee9c562 | 506 | css_put(&blkcg->css); |
cd1604fa TH |
507 | } |
508 | return blkg; | |
31e4c28d | 509 | } |
cd1604fa | 510 | EXPORT_SYMBOL_GPL(blkg_lookup_create); |
31e4c28d | 511 | |
b1c35769 VG |
512 | static void __blkiocg_del_blkio_group(struct blkio_group *blkg) |
513 | { | |
514 | hlist_del_init_rcu(&blkg->blkcg_node); | |
b1c35769 VG |
515 | } |
516 | ||
517 | /* | |
518 | * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1 | |
519 | * indicating that blk_group was unhashed by the time we got to it. | |
520 | */ | |
31e4c28d VG |
521 | int blkiocg_del_blkio_group(struct blkio_group *blkg) |
522 | { | |
7ee9c562 | 523 | struct blkio_cgroup *blkcg = blkg->blkcg; |
b1c35769 | 524 | unsigned long flags; |
b1c35769 VG |
525 | int ret = 1; |
526 | ||
7ee9c562 TH |
527 | spin_lock_irqsave(&blkcg->lock, flags); |
528 | if (!hlist_unhashed(&blkg->blkcg_node)) { | |
529 | __blkiocg_del_blkio_group(blkg); | |
530 | ret = 0; | |
b1c35769 | 531 | } |
7ee9c562 | 532 | spin_unlock_irqrestore(&blkcg->lock, flags); |
0f3942a3 | 533 | |
b1c35769 | 534 | return ret; |
31e4c28d | 535 | } |
9d6a986c | 536 | EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group); |
31e4c28d VG |
537 | |
538 | /* called under rcu_read_lock(). */ | |
cd1604fa TH |
539 | struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, |
540 | struct request_queue *q, | |
541 | enum blkio_policy_id plid) | |
31e4c28d VG |
542 | { |
543 | struct blkio_group *blkg; | |
544 | struct hlist_node *n; | |
31e4c28d | 545 | |
ca32aefc TH |
546 | hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) |
547 | if (blkg->q == q && blkg->plid == plid) | |
31e4c28d | 548 | return blkg; |
31e4c28d VG |
549 | return NULL; |
550 | } | |
cd1604fa | 551 | EXPORT_SYMBOL_GPL(blkg_lookup); |
31e4c28d | 552 | |
72e06c25 TH |
553 | void blkg_destroy_all(struct request_queue *q) |
554 | { | |
555 | struct blkio_policy_type *pol; | |
556 | ||
557 | while (true) { | |
558 | bool done = true; | |
559 | ||
560 | spin_lock(&blkio_list_lock); | |
561 | spin_lock_irq(q->queue_lock); | |
562 | ||
563 | /* | |
564 | * clear_queue_fn() might return with non-empty group list | |
565 | * if it raced cgroup removal and lost. cgroup removal is | |
566 | * guaranteed to make forward progress and retrying after a | |
567 | * while is enough. This ugliness is scheduled to be | |
568 | * removed after locking update. | |
569 | */ | |
570 | list_for_each_entry(pol, &blkio_list, list) | |
571 | if (!pol->ops.blkio_clear_queue_fn(q)) | |
572 | done = false; | |
573 | ||
574 | spin_unlock_irq(q->queue_lock); | |
575 | spin_unlock(&blkio_list_lock); | |
576 | ||
577 | if (done) | |
578 | break; | |
579 | ||
580 | msleep(10); /* just some random duration I like */ | |
581 | } | |
582 | } | |
583 | ||
f0bdc8cd VG |
584 | static void blkio_reset_stats_cpu(struct blkio_group *blkg) |
585 | { | |
586 | struct blkio_group_stats_cpu *stats_cpu; | |
587 | int i, j, k; | |
588 | /* | |
589 | * Note: On 64 bit arch this should not be an issue. This has the | |
590 | * possibility of returning some inconsistent value on 32bit arch | |
591 | * as 64bit update on 32bit is non atomic. Taking care of this | |
592 | * corner case makes code very complicated, like sending IPIs to | |
593 | * cpus, taking care of stats of offline cpus etc. | |
594 | * | |
595 | * reset stats is anyway more of a debug feature and this sounds a | |
596 | * corner case. So I am not complicating the code yet until and | |
597 | * unless this becomes a real issue. | |
598 | */ | |
599 | for_each_possible_cpu(i) { | |
600 | stats_cpu = per_cpu_ptr(blkg->stats_cpu, i); | |
601 | stats_cpu->sectors = 0; | |
602 | for(j = 0; j < BLKIO_STAT_CPU_NR; j++) | |
603 | for (k = 0; k < BLKIO_STAT_TOTAL; k++) | |
604 | stats_cpu->stat_arr_cpu[j][k] = 0; | |
605 | } | |
606 | } | |
607 | ||
303a3acb | 608 | static int |
84c124da | 609 | blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) |
303a3acb DS |
610 | { |
611 | struct blkio_cgroup *blkcg; | |
612 | struct blkio_group *blkg; | |
812df48d | 613 | struct blkio_group_stats *stats; |
303a3acb | 614 | struct hlist_node *n; |
cdc1184c DS |
615 | uint64_t queued[BLKIO_STAT_TOTAL]; |
616 | int i; | |
812df48d DS |
617 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
618 | bool idling, waiting, empty; | |
619 | unsigned long long now = sched_clock(); | |
620 | #endif | |
303a3acb DS |
621 | |
622 | blkcg = cgroup_to_blkio_cgroup(cgroup); | |
623 | spin_lock_irq(&blkcg->lock); | |
624 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { | |
625 | spin_lock(&blkg->stats_lock); | |
812df48d DS |
626 | stats = &blkg->stats; |
627 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
628 | idling = blkio_blkg_idling(stats); | |
629 | waiting = blkio_blkg_waiting(stats); | |
630 | empty = blkio_blkg_empty(stats); | |
631 | #endif | |
cdc1184c | 632 | for (i = 0; i < BLKIO_STAT_TOTAL; i++) |
812df48d DS |
633 | queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i]; |
634 | memset(stats, 0, sizeof(struct blkio_group_stats)); | |
cdc1184c | 635 | for (i = 0; i < BLKIO_STAT_TOTAL; i++) |
812df48d DS |
636 | stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i]; |
637 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
638 | if (idling) { | |
639 | blkio_mark_blkg_idling(stats); | |
640 | stats->start_idle_time = now; | |
641 | } | |
642 | if (waiting) { | |
643 | blkio_mark_blkg_waiting(stats); | |
644 | stats->start_group_wait_time = now; | |
645 | } | |
646 | if (empty) { | |
647 | blkio_mark_blkg_empty(stats); | |
648 | stats->start_empty_time = now; | |
649 | } | |
650 | #endif | |
303a3acb | 651 | spin_unlock(&blkg->stats_lock); |
f0bdc8cd VG |
652 | |
653 | /* Reset Per cpu stats which don't take blkg->stats_lock */ | |
654 | blkio_reset_stats_cpu(blkg); | |
303a3acb | 655 | } |
f0bdc8cd | 656 | |
303a3acb DS |
657 | spin_unlock_irq(&blkcg->lock); |
658 | return 0; | |
659 | } | |
660 | ||
7a4dd281 TH |
661 | static void blkio_get_key_name(enum stat_sub_type type, const char *dname, |
662 | char *str, int chars_left, bool diskname_only) | |
303a3acb | 663 | { |
7a4dd281 | 664 | snprintf(str, chars_left, "%s", dname); |
303a3acb DS |
665 | chars_left -= strlen(str); |
666 | if (chars_left <= 0) { | |
667 | printk(KERN_WARNING | |
668 | "Possibly incorrect cgroup stat display format"); | |
669 | return; | |
670 | } | |
84c124da DS |
671 | if (diskname_only) |
672 | return; | |
303a3acb | 673 | switch (type) { |
84c124da | 674 | case BLKIO_STAT_READ: |
303a3acb DS |
675 | strlcat(str, " Read", chars_left); |
676 | break; | |
84c124da | 677 | case BLKIO_STAT_WRITE: |
303a3acb DS |
678 | strlcat(str, " Write", chars_left); |
679 | break; | |
84c124da | 680 | case BLKIO_STAT_SYNC: |
303a3acb DS |
681 | strlcat(str, " Sync", chars_left); |
682 | break; | |
84c124da | 683 | case BLKIO_STAT_ASYNC: |
303a3acb DS |
684 | strlcat(str, " Async", chars_left); |
685 | break; | |
84c124da | 686 | case BLKIO_STAT_TOTAL: |
303a3acb DS |
687 | strlcat(str, " Total", chars_left); |
688 | break; | |
689 | default: | |
690 | strlcat(str, " Invalid", chars_left); | |
691 | } | |
692 | } | |
693 | ||
84c124da | 694 | static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val, |
7a4dd281 | 695 | struct cgroup_map_cb *cb, const char *dname) |
84c124da | 696 | { |
7a4dd281 | 697 | blkio_get_key_name(0, dname, str, chars_left, true); |
84c124da DS |
698 | cb->fill(cb, str, val); |
699 | return val; | |
700 | } | |
303a3acb | 701 | |
5624a4e4 VG |
702 | |
703 | static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, | |
704 | enum stat_type_cpu type, enum stat_sub_type sub_type) | |
705 | { | |
706 | int cpu; | |
707 | struct blkio_group_stats_cpu *stats_cpu; | |
575969a0 | 708 | u64 val = 0, tval; |
5624a4e4 VG |
709 | |
710 | for_each_possible_cpu(cpu) { | |
575969a0 | 711 | unsigned int start; |
5624a4e4 VG |
712 | stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu); |
713 | ||
575969a0 VG |
714 | do { |
715 | start = u64_stats_fetch_begin(&stats_cpu->syncp); | |
716 | if (type == BLKIO_STAT_CPU_SECTORS) | |
717 | tval = stats_cpu->sectors; | |
718 | else | |
719 | tval = stats_cpu->stat_arr_cpu[type][sub_type]; | |
720 | } while(u64_stats_fetch_retry(&stats_cpu->syncp, start)); | |
721 | ||
722 | val += tval; | |
5624a4e4 VG |
723 | } |
724 | ||
725 | return val; | |
726 | } | |
727 | ||
728 | static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, | |
7a4dd281 TH |
729 | struct cgroup_map_cb *cb, const char *dname, |
730 | enum stat_type_cpu type) | |
5624a4e4 VG |
731 | { |
732 | uint64_t disk_total, val; | |
733 | char key_str[MAX_KEY_LEN]; | |
734 | enum stat_sub_type sub_type; | |
735 | ||
736 | if (type == BLKIO_STAT_CPU_SECTORS) { | |
737 | val = blkio_read_stat_cpu(blkg, type, 0); | |
7a4dd281 TH |
738 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, |
739 | dname); | |
5624a4e4 VG |
740 | } |
741 | ||
742 | for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL; | |
743 | sub_type++) { | |
7a4dd281 TH |
744 | blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN, |
745 | false); | |
5624a4e4 VG |
746 | val = blkio_read_stat_cpu(blkg, type, sub_type); |
747 | cb->fill(cb, key_str, val); | |
748 | } | |
749 | ||
750 | disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) + | |
751 | blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE); | |
752 | ||
7a4dd281 TH |
753 | blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN, |
754 | false); | |
5624a4e4 VG |
755 | cb->fill(cb, key_str, disk_total); |
756 | return disk_total; | |
757 | } | |
758 | ||
84c124da DS |
759 | /* This should be called with blkg->stats_lock held */ |
760 | static uint64_t blkio_get_stat(struct blkio_group *blkg, | |
7a4dd281 TH |
761 | struct cgroup_map_cb *cb, const char *dname, |
762 | enum stat_type type) | |
303a3acb DS |
763 | { |
764 | uint64_t disk_total; | |
765 | char key_str[MAX_KEY_LEN]; | |
84c124da DS |
766 | enum stat_sub_type sub_type; |
767 | ||
768 | if (type == BLKIO_STAT_TIME) | |
769 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
7a4dd281 | 770 | blkg->stats.time, cb, dname); |
9026e521 | 771 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
167400d3 JT |
772 | if (type == BLKIO_STAT_UNACCOUNTED_TIME) |
773 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
7a4dd281 | 774 | blkg->stats.unaccounted_time, cb, dname); |
cdc1184c DS |
775 | if (type == BLKIO_STAT_AVG_QUEUE_SIZE) { |
776 | uint64_t sum = blkg->stats.avg_queue_size_sum; | |
777 | uint64_t samples = blkg->stats.avg_queue_size_samples; | |
778 | if (samples) | |
779 | do_div(sum, samples); | |
780 | else | |
781 | sum = 0; | |
7a4dd281 TH |
782 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, |
783 | sum, cb, dname); | |
cdc1184c | 784 | } |
812df48d DS |
785 | if (type == BLKIO_STAT_GROUP_WAIT_TIME) |
786 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
7a4dd281 | 787 | blkg->stats.group_wait_time, cb, dname); |
812df48d DS |
788 | if (type == BLKIO_STAT_IDLE_TIME) |
789 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
7a4dd281 | 790 | blkg->stats.idle_time, cb, dname); |
812df48d DS |
791 | if (type == BLKIO_STAT_EMPTY_TIME) |
792 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
7a4dd281 | 793 | blkg->stats.empty_time, cb, dname); |
84c124da DS |
794 | if (type == BLKIO_STAT_DEQUEUE) |
795 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
7a4dd281 | 796 | blkg->stats.dequeue, cb, dname); |
84c124da | 797 | #endif |
303a3acb | 798 | |
84c124da DS |
799 | for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL; |
800 | sub_type++) { | |
7a4dd281 TH |
801 | blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN, |
802 | false); | |
84c124da | 803 | cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]); |
303a3acb | 804 | } |
84c124da DS |
805 | disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] + |
806 | blkg->stats.stat_arr[type][BLKIO_STAT_WRITE]; | |
7a4dd281 TH |
807 | blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN, |
808 | false); | |
303a3acb DS |
809 | cb->fill(cb, key_str, disk_total); |
810 | return disk_total; | |
811 | } | |
812 | ||
4bfd482e TH |
813 | static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid, |
814 | int fileid, struct blkio_cgroup *blkcg) | |
34d0f179 | 815 | { |
ece84241 | 816 | struct gendisk *disk = NULL; |
e56da7e2 | 817 | struct blkio_group *blkg = NULL; |
34d0f179 | 818 | char *s[4], *p, *major_s = NULL, *minor_s = NULL; |
d11bb446 | 819 | unsigned long major, minor; |
ece84241 TH |
820 | int i = 0, ret = -EINVAL; |
821 | int part; | |
34d0f179 | 822 | dev_t dev; |
d11bb446 | 823 | u64 temp; |
34d0f179 GJ |
824 | |
825 | memset(s, 0, sizeof(s)); | |
826 | ||
827 | while ((p = strsep(&buf, " ")) != NULL) { | |
828 | if (!*p) | |
829 | continue; | |
830 | ||
831 | s[i++] = p; | |
832 | ||
833 | /* Prevent from inputing too many things */ | |
834 | if (i == 3) | |
835 | break; | |
836 | } | |
837 | ||
838 | if (i != 2) | |
ece84241 | 839 | goto out; |
34d0f179 GJ |
840 | |
841 | p = strsep(&s[0], ":"); | |
842 | if (p != NULL) | |
843 | major_s = p; | |
844 | else | |
ece84241 | 845 | goto out; |
34d0f179 GJ |
846 | |
847 | minor_s = s[0]; | |
848 | if (!minor_s) | |
ece84241 | 849 | goto out; |
34d0f179 | 850 | |
ece84241 TH |
851 | if (strict_strtoul(major_s, 10, &major)) |
852 | goto out; | |
34d0f179 | 853 | |
ece84241 TH |
854 | if (strict_strtoul(minor_s, 10, &minor)) |
855 | goto out; | |
34d0f179 GJ |
856 | |
857 | dev = MKDEV(major, minor); | |
858 | ||
ece84241 TH |
859 | if (strict_strtoull(s[1], 10, &temp)) |
860 | goto out; | |
34d0f179 | 861 | |
e56da7e2 | 862 | disk = get_gendisk(dev, &part); |
4bfd482e | 863 | if (!disk || part) |
e56da7e2 | 864 | goto out; |
e56da7e2 TH |
865 | |
866 | rcu_read_lock(); | |
867 | ||
4bfd482e TH |
868 | spin_lock_irq(disk->queue->queue_lock); |
869 | blkg = blkg_lookup_create(blkcg, disk->queue, plid, false); | |
870 | spin_unlock_irq(disk->queue->queue_lock); | |
e56da7e2 | 871 | |
4bfd482e TH |
872 | if (IS_ERR(blkg)) { |
873 | ret = PTR_ERR(blkg); | |
874 | goto out_unlock; | |
d11bb446 | 875 | } |
34d0f179 | 876 | |
062a644d VG |
877 | switch (plid) { |
878 | case BLKIO_POLICY_PROP: | |
d11bb446 WG |
879 | if ((temp < BLKIO_WEIGHT_MIN && temp > 0) || |
880 | temp > BLKIO_WEIGHT_MAX) | |
e56da7e2 | 881 | goto out_unlock; |
34d0f179 | 882 | |
4bfd482e TH |
883 | blkg->conf.weight = temp; |
884 | blkio_update_group_weight(blkg, temp ?: blkcg->weight); | |
4c9eefa1 VG |
885 | break; |
886 | case BLKIO_POLICY_THROTL: | |
7702e8f4 VG |
887 | switch(fileid) { |
888 | case BLKIO_THROTL_read_bps_device: | |
4bfd482e TH |
889 | blkg->conf.bps[READ] = temp; |
890 | blkio_update_group_bps(blkg, temp ?: -1, fileid); | |
e56da7e2 | 891 | break; |
7702e8f4 | 892 | case BLKIO_THROTL_write_bps_device: |
4bfd482e TH |
893 | blkg->conf.bps[WRITE] = temp; |
894 | blkio_update_group_bps(blkg, temp ?: -1, fileid); | |
7702e8f4 VG |
895 | break; |
896 | case BLKIO_THROTL_read_iops_device: | |
e56da7e2 TH |
897 | if (temp > THROTL_IOPS_MAX) |
898 | goto out_unlock; | |
4bfd482e TH |
899 | blkg->conf.iops[READ] = temp; |
900 | blkio_update_group_iops(blkg, temp ?: -1, fileid); | |
e56da7e2 | 901 | break; |
7702e8f4 | 902 | case BLKIO_THROTL_write_iops_device: |
d11bb446 | 903 | if (temp > THROTL_IOPS_MAX) |
e56da7e2 | 904 | goto out_unlock; |
4bfd482e TH |
905 | blkg->conf.iops[WRITE] = temp; |
906 | blkio_update_group_iops(blkg, temp ?: -1, fileid); | |
7702e8f4 VG |
907 | break; |
908 | } | |
062a644d VG |
909 | break; |
910 | default: | |
911 | BUG(); | |
912 | } | |
ece84241 | 913 | ret = 0; |
e56da7e2 TH |
914 | out_unlock: |
915 | rcu_read_unlock(); | |
ece84241 TH |
916 | out: |
917 | put_disk(disk); | |
e56da7e2 TH |
918 | |
919 | /* | |
920 | * If queue was bypassing, we should retry. Do so after a short | |
921 | * msleep(). It isn't strictly necessary but queue can be | |
922 | * bypassing for some time and it's always nice to avoid busy | |
923 | * looping. | |
924 | */ | |
925 | if (ret == -EBUSY) { | |
926 | msleep(10); | |
927 | return restart_syscall(); | |
928 | } | |
ece84241 | 929 | return ret; |
34d0f179 GJ |
930 | } |
931 | ||
062a644d VG |
932 | static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft, |
933 | const char *buffer) | |
34d0f179 GJ |
934 | { |
935 | int ret = 0; | |
936 | char *buf; | |
e56da7e2 | 937 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); |
062a644d VG |
938 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); |
939 | int fileid = BLKIOFILE_ATTR(cft->private); | |
34d0f179 GJ |
940 | |
941 | buf = kstrdup(buffer, GFP_KERNEL); | |
942 | if (!buf) | |
943 | return -ENOMEM; | |
944 | ||
4bfd482e | 945 | ret = blkio_policy_parse_and_set(buf, plid, fileid, blkcg); |
34d0f179 GJ |
946 | kfree(buf); |
947 | return ret; | |
948 | } | |
949 | ||
92616b5b VG |
950 | static const char *blkg_dev_name(struct blkio_group *blkg) |
951 | { | |
952 | /* some drivers (floppy) instantiate a queue w/o disk registered */ | |
953 | if (blkg->q->backing_dev_info.dev) | |
954 | return dev_name(blkg->q->backing_dev_info.dev); | |
955 | return NULL; | |
956 | } | |
957 | ||
4bfd482e TH |
958 | static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg, |
959 | struct seq_file *m) | |
34d0f179 | 960 | { |
92616b5b | 961 | const char *dname = blkg_dev_name(blkg); |
4bfd482e TH |
962 | int fileid = BLKIOFILE_ATTR(cft->private); |
963 | int rw = WRITE; | |
964 | ||
92616b5b VG |
965 | if (!dname) |
966 | return; | |
967 | ||
4bfd482e | 968 | switch (blkg->plid) { |
062a644d | 969 | case BLKIO_POLICY_PROP: |
4bfd482e | 970 | if (blkg->conf.weight) |
7a4dd281 TH |
971 | seq_printf(m, "%s\t%u\n", |
972 | dname, blkg->conf.weight); | |
4c9eefa1 VG |
973 | break; |
974 | case BLKIO_POLICY_THROTL: | |
4bfd482e | 975 | switch (fileid) { |
7702e8f4 | 976 | case BLKIO_THROTL_read_bps_device: |
4bfd482e | 977 | rw = READ; |
7702e8f4 | 978 | case BLKIO_THROTL_write_bps_device: |
4bfd482e | 979 | if (blkg->conf.bps[rw]) |
7a4dd281 TH |
980 | seq_printf(m, "%s\t%llu\n", |
981 | dname, blkg->conf.bps[rw]); | |
7702e8f4 VG |
982 | break; |
983 | case BLKIO_THROTL_read_iops_device: | |
4bfd482e | 984 | rw = READ; |
7702e8f4 | 985 | case BLKIO_THROTL_write_iops_device: |
4bfd482e | 986 | if (blkg->conf.iops[rw]) |
7a4dd281 TH |
987 | seq_printf(m, "%s\t%u\n", |
988 | dname, blkg->conf.iops[rw]); | |
7702e8f4 VG |
989 | break; |
990 | } | |
062a644d VG |
991 | break; |
992 | default: | |
993 | BUG(); | |
994 | } | |
995 | } | |
34d0f179 | 996 | |
062a644d | 997 | /* cgroup files which read their data from policy nodes end up here */ |
4bfd482e TH |
998 | static void blkio_read_conf(struct cftype *cft, struct blkio_cgroup *blkcg, |
999 | struct seq_file *m) | |
34d0f179 | 1000 | { |
4bfd482e TH |
1001 | struct blkio_group *blkg; |
1002 | struct hlist_node *n; | |
34d0f179 | 1003 | |
4bfd482e TH |
1004 | spin_lock_irq(&blkcg->lock); |
1005 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) | |
1006 | if (BLKIOFILE_POLICY(cft->private) == blkg->plid) | |
1007 | blkio_print_group_conf(cft, blkg, m); | |
1008 | spin_unlock_irq(&blkcg->lock); | |
062a644d VG |
1009 | } |
1010 | ||
1011 | static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft, | |
1012 | struct seq_file *m) | |
1013 | { | |
1014 | struct blkio_cgroup *blkcg; | |
1015 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | |
1016 | int name = BLKIOFILE_ATTR(cft->private); | |
1017 | ||
1018 | blkcg = cgroup_to_blkio_cgroup(cgrp); | |
1019 | ||
1020 | switch(plid) { | |
1021 | case BLKIO_POLICY_PROP: | |
1022 | switch(name) { | |
1023 | case BLKIO_PROP_weight_device: | |
4bfd482e | 1024 | blkio_read_conf(cft, blkcg, m); |
062a644d VG |
1025 | return 0; |
1026 | default: | |
1027 | BUG(); | |
1028 | } | |
1029 | break; | |
4c9eefa1 VG |
1030 | case BLKIO_POLICY_THROTL: |
1031 | switch(name){ | |
1032 | case BLKIO_THROTL_read_bps_device: | |
1033 | case BLKIO_THROTL_write_bps_device: | |
7702e8f4 VG |
1034 | case BLKIO_THROTL_read_iops_device: |
1035 | case BLKIO_THROTL_write_iops_device: | |
4bfd482e | 1036 | blkio_read_conf(cft, blkcg, m); |
4c9eefa1 VG |
1037 | return 0; |
1038 | default: | |
1039 | BUG(); | |
1040 | } | |
1041 | break; | |
062a644d VG |
1042 | default: |
1043 | BUG(); | |
1044 | } | |
1045 | ||
1046 | return 0; | |
1047 | } | |
1048 | ||
1049 | static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg, | |
5624a4e4 VG |
1050 | struct cftype *cft, struct cgroup_map_cb *cb, |
1051 | enum stat_type type, bool show_total, bool pcpu) | |
062a644d VG |
1052 | { |
1053 | struct blkio_group *blkg; | |
1054 | struct hlist_node *n; | |
1055 | uint64_t cgroup_total = 0; | |
1056 | ||
1057 | rcu_read_lock(); | |
1058 | hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) { | |
92616b5b | 1059 | const char *dname = blkg_dev_name(blkg); |
7a4dd281 | 1060 | |
92616b5b | 1061 | if (!dname || BLKIOFILE_POLICY(cft->private) != blkg->plid) |
7a4dd281 TH |
1062 | continue; |
1063 | if (pcpu) | |
1064 | cgroup_total += blkio_get_stat_cpu(blkg, cb, dname, | |
1065 | type); | |
1066 | else { | |
1067 | spin_lock_irq(&blkg->stats_lock); | |
1068 | cgroup_total += blkio_get_stat(blkg, cb, dname, type); | |
1069 | spin_unlock_irq(&blkg->stats_lock); | |
062a644d VG |
1070 | } |
1071 | } | |
1072 | if (show_total) | |
1073 | cb->fill(cb, "Total", cgroup_total); | |
1074 | rcu_read_unlock(); | |
1075 | return 0; | |
1076 | } | |
1077 | ||
1078 | /* All map kind of cgroup file get serviced by this function */ | |
1079 | static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft, | |
1080 | struct cgroup_map_cb *cb) | |
1081 | { | |
1082 | struct blkio_cgroup *blkcg; | |
1083 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | |
1084 | int name = BLKIOFILE_ATTR(cft->private); | |
1085 | ||
1086 | blkcg = cgroup_to_blkio_cgroup(cgrp); | |
1087 | ||
1088 | switch(plid) { | |
1089 | case BLKIO_POLICY_PROP: | |
1090 | switch(name) { | |
1091 | case BLKIO_PROP_time: | |
1092 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1093 | BLKIO_STAT_TIME, 0, 0); |
062a644d VG |
1094 | case BLKIO_PROP_sectors: |
1095 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1096 | BLKIO_STAT_CPU_SECTORS, 0, 1); |
062a644d VG |
1097 | case BLKIO_PROP_io_service_bytes: |
1098 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1099 | BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1); |
062a644d VG |
1100 | case BLKIO_PROP_io_serviced: |
1101 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1102 | BLKIO_STAT_CPU_SERVICED, 1, 1); |
062a644d VG |
1103 | case BLKIO_PROP_io_service_time: |
1104 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1105 | BLKIO_STAT_SERVICE_TIME, 1, 0); |
062a644d VG |
1106 | case BLKIO_PROP_io_wait_time: |
1107 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1108 | BLKIO_STAT_WAIT_TIME, 1, 0); |
062a644d VG |
1109 | case BLKIO_PROP_io_merged: |
1110 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
317389a7 | 1111 | BLKIO_STAT_CPU_MERGED, 1, 1); |
062a644d VG |
1112 | case BLKIO_PROP_io_queued: |
1113 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1114 | BLKIO_STAT_QUEUED, 1, 0); |
062a644d | 1115 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
9026e521 JT |
1116 | case BLKIO_PROP_unaccounted_time: |
1117 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1118 | BLKIO_STAT_UNACCOUNTED_TIME, 0, 0); |
062a644d VG |
1119 | case BLKIO_PROP_dequeue: |
1120 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1121 | BLKIO_STAT_DEQUEUE, 0, 0); |
062a644d VG |
1122 | case BLKIO_PROP_avg_queue_size: |
1123 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1124 | BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0); |
062a644d VG |
1125 | case BLKIO_PROP_group_wait_time: |
1126 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1127 | BLKIO_STAT_GROUP_WAIT_TIME, 0, 0); |
062a644d VG |
1128 | case BLKIO_PROP_idle_time: |
1129 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1130 | BLKIO_STAT_IDLE_TIME, 0, 0); |
062a644d VG |
1131 | case BLKIO_PROP_empty_time: |
1132 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1133 | BLKIO_STAT_EMPTY_TIME, 0, 0); |
062a644d VG |
1134 | #endif |
1135 | default: | |
1136 | BUG(); | |
1137 | } | |
1138 | break; | |
4c9eefa1 VG |
1139 | case BLKIO_POLICY_THROTL: |
1140 | switch(name){ | |
1141 | case BLKIO_THROTL_io_service_bytes: | |
1142 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1143 | BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1); |
4c9eefa1 VG |
1144 | case BLKIO_THROTL_io_serviced: |
1145 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1146 | BLKIO_STAT_CPU_SERVICED, 1, 1); |
4c9eefa1 VG |
1147 | default: |
1148 | BUG(); | |
1149 | } | |
1150 | break; | |
062a644d VG |
1151 | default: |
1152 | BUG(); | |
1153 | } | |
1154 | ||
1155 | return 0; | |
1156 | } | |
1157 | ||
4bfd482e | 1158 | static int blkio_weight_write(struct blkio_cgroup *blkcg, int plid, u64 val) |
062a644d VG |
1159 | { |
1160 | struct blkio_group *blkg; | |
1161 | struct hlist_node *n; | |
062a644d VG |
1162 | |
1163 | if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX) | |
1164 | return -EINVAL; | |
1165 | ||
1166 | spin_lock(&blkio_list_lock); | |
1167 | spin_lock_irq(&blkcg->lock); | |
1168 | blkcg->weight = (unsigned int)val; | |
1169 | ||
4bfd482e TH |
1170 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) |
1171 | if (blkg->plid == plid && !blkg->conf.weight) | |
1172 | blkio_update_group_weight(blkg, blkcg->weight); | |
062a644d | 1173 | |
062a644d VG |
1174 | spin_unlock_irq(&blkcg->lock); |
1175 | spin_unlock(&blkio_list_lock); | |
1176 | return 0; | |
1177 | } | |
1178 | ||
1179 | static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) { | |
1180 | struct blkio_cgroup *blkcg; | |
1181 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | |
1182 | int name = BLKIOFILE_ATTR(cft->private); | |
1183 | ||
1184 | blkcg = cgroup_to_blkio_cgroup(cgrp); | |
1185 | ||
1186 | switch(plid) { | |
1187 | case BLKIO_POLICY_PROP: | |
1188 | switch(name) { | |
1189 | case BLKIO_PROP_weight: | |
1190 | return (u64)blkcg->weight; | |
1191 | } | |
1192 | break; | |
1193 | default: | |
1194 | BUG(); | |
1195 | } | |
1196 | return 0; | |
1197 | } | |
1198 | ||
1199 | static int | |
1200 | blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val) | |
1201 | { | |
1202 | struct blkio_cgroup *blkcg; | |
1203 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | |
1204 | int name = BLKIOFILE_ATTR(cft->private); | |
1205 | ||
1206 | blkcg = cgroup_to_blkio_cgroup(cgrp); | |
1207 | ||
1208 | switch(plid) { | |
1209 | case BLKIO_POLICY_PROP: | |
1210 | switch(name) { | |
1211 | case BLKIO_PROP_weight: | |
4bfd482e | 1212 | return blkio_weight_write(blkcg, plid, val); |
062a644d VG |
1213 | } |
1214 | break; | |
1215 | default: | |
1216 | BUG(); | |
1217 | } | |
34d0f179 | 1218 | |
34d0f179 GJ |
1219 | return 0; |
1220 | } | |
1221 | ||
31e4c28d | 1222 | struct cftype blkio_files[] = { |
34d0f179 GJ |
1223 | { |
1224 | .name = "weight_device", | |
062a644d VG |
1225 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1226 | BLKIO_PROP_weight_device), | |
1227 | .read_seq_string = blkiocg_file_read, | |
1228 | .write_string = blkiocg_file_write, | |
34d0f179 GJ |
1229 | .max_write_len = 256, |
1230 | }, | |
31e4c28d VG |
1231 | { |
1232 | .name = "weight", | |
062a644d VG |
1233 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1234 | BLKIO_PROP_weight), | |
1235 | .read_u64 = blkiocg_file_read_u64, | |
1236 | .write_u64 = blkiocg_file_write_u64, | |
31e4c28d | 1237 | }, |
22084190 VG |
1238 | { |
1239 | .name = "time", | |
062a644d VG |
1240 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1241 | BLKIO_PROP_time), | |
1242 | .read_map = blkiocg_file_read_map, | |
22084190 VG |
1243 | }, |
1244 | { | |
1245 | .name = "sectors", | |
062a644d VG |
1246 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1247 | BLKIO_PROP_sectors), | |
1248 | .read_map = blkiocg_file_read_map, | |
303a3acb DS |
1249 | }, |
1250 | { | |
1251 | .name = "io_service_bytes", | |
062a644d VG |
1252 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1253 | BLKIO_PROP_io_service_bytes), | |
1254 | .read_map = blkiocg_file_read_map, | |
303a3acb DS |
1255 | }, |
1256 | { | |
1257 | .name = "io_serviced", | |
062a644d VG |
1258 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1259 | BLKIO_PROP_io_serviced), | |
1260 | .read_map = blkiocg_file_read_map, | |
303a3acb DS |
1261 | }, |
1262 | { | |
1263 | .name = "io_service_time", | |
062a644d VG |
1264 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1265 | BLKIO_PROP_io_service_time), | |
1266 | .read_map = blkiocg_file_read_map, | |
303a3acb DS |
1267 | }, |
1268 | { | |
1269 | .name = "io_wait_time", | |
062a644d VG |
1270 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1271 | BLKIO_PROP_io_wait_time), | |
1272 | .read_map = blkiocg_file_read_map, | |
84c124da | 1273 | }, |
812d4026 DS |
1274 | { |
1275 | .name = "io_merged", | |
062a644d VG |
1276 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1277 | BLKIO_PROP_io_merged), | |
1278 | .read_map = blkiocg_file_read_map, | |
812d4026 | 1279 | }, |
cdc1184c DS |
1280 | { |
1281 | .name = "io_queued", | |
062a644d VG |
1282 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1283 | BLKIO_PROP_io_queued), | |
1284 | .read_map = blkiocg_file_read_map, | |
cdc1184c | 1285 | }, |
84c124da DS |
1286 | { |
1287 | .name = "reset_stats", | |
1288 | .write_u64 = blkiocg_reset_stats, | |
22084190 | 1289 | }, |
13f98250 VG |
1290 | #ifdef CONFIG_BLK_DEV_THROTTLING |
1291 | { | |
1292 | .name = "throttle.read_bps_device", | |
1293 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1294 | BLKIO_THROTL_read_bps_device), | |
1295 | .read_seq_string = blkiocg_file_read, | |
1296 | .write_string = blkiocg_file_write, | |
1297 | .max_write_len = 256, | |
1298 | }, | |
1299 | ||
1300 | { | |
1301 | .name = "throttle.write_bps_device", | |
1302 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1303 | BLKIO_THROTL_write_bps_device), | |
1304 | .read_seq_string = blkiocg_file_read, | |
1305 | .write_string = blkiocg_file_write, | |
1306 | .max_write_len = 256, | |
1307 | }, | |
1308 | ||
1309 | { | |
1310 | .name = "throttle.read_iops_device", | |
1311 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1312 | BLKIO_THROTL_read_iops_device), | |
1313 | .read_seq_string = blkiocg_file_read, | |
1314 | .write_string = blkiocg_file_write, | |
1315 | .max_write_len = 256, | |
1316 | }, | |
1317 | ||
1318 | { | |
1319 | .name = "throttle.write_iops_device", | |
1320 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1321 | BLKIO_THROTL_write_iops_device), | |
1322 | .read_seq_string = blkiocg_file_read, | |
1323 | .write_string = blkiocg_file_write, | |
1324 | .max_write_len = 256, | |
1325 | }, | |
1326 | { | |
1327 | .name = "throttle.io_service_bytes", | |
1328 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1329 | BLKIO_THROTL_io_service_bytes), | |
1330 | .read_map = blkiocg_file_read_map, | |
1331 | }, | |
1332 | { | |
1333 | .name = "throttle.io_serviced", | |
1334 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1335 | BLKIO_THROTL_io_serviced), | |
1336 | .read_map = blkiocg_file_read_map, | |
1337 | }, | |
1338 | #endif /* CONFIG_BLK_DEV_THROTTLING */ | |
1339 | ||
22084190 | 1340 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
cdc1184c DS |
1341 | { |
1342 | .name = "avg_queue_size", | |
062a644d VG |
1343 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1344 | BLKIO_PROP_avg_queue_size), | |
1345 | .read_map = blkiocg_file_read_map, | |
cdc1184c | 1346 | }, |
812df48d DS |
1347 | { |
1348 | .name = "group_wait_time", | |
062a644d VG |
1349 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1350 | BLKIO_PROP_group_wait_time), | |
1351 | .read_map = blkiocg_file_read_map, | |
812df48d DS |
1352 | }, |
1353 | { | |
1354 | .name = "idle_time", | |
062a644d VG |
1355 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1356 | BLKIO_PROP_idle_time), | |
1357 | .read_map = blkiocg_file_read_map, | |
812df48d DS |
1358 | }, |
1359 | { | |
1360 | .name = "empty_time", | |
062a644d VG |
1361 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1362 | BLKIO_PROP_empty_time), | |
1363 | .read_map = blkiocg_file_read_map, | |
812df48d | 1364 | }, |
cdc1184c | 1365 | { |
22084190 | 1366 | .name = "dequeue", |
062a644d VG |
1367 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1368 | BLKIO_PROP_dequeue), | |
1369 | .read_map = blkiocg_file_read_map, | |
cdc1184c | 1370 | }, |
9026e521 JT |
1371 | { |
1372 | .name = "unaccounted_time", | |
1373 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1374 | BLKIO_PROP_unaccounted_time), | |
1375 | .read_map = blkiocg_file_read_map, | |
1376 | }, | |
22084190 | 1377 | #endif |
31e4c28d VG |
1378 | }; |
1379 | ||
1380 | static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |
1381 | { | |
1382 | return cgroup_add_files(cgroup, subsys, blkio_files, | |
1383 | ARRAY_SIZE(blkio_files)); | |
1384 | } | |
1385 | ||
7ee9c562 TH |
1386 | static int blkiocg_pre_destroy(struct cgroup_subsys *subsys, |
1387 | struct cgroup *cgroup) | |
31e4c28d VG |
1388 | { |
1389 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); | |
b1c35769 VG |
1390 | unsigned long flags; |
1391 | struct blkio_group *blkg; | |
ca32aefc | 1392 | struct request_queue *q; |
3e252066 | 1393 | struct blkio_policy_type *blkiop; |
b1c35769 VG |
1394 | |
1395 | rcu_read_lock(); | |
7ee9c562 | 1396 | |
0f3942a3 JA |
1397 | do { |
1398 | spin_lock_irqsave(&blkcg->lock, flags); | |
b1c35769 | 1399 | |
0f3942a3 JA |
1400 | if (hlist_empty(&blkcg->blkg_list)) { |
1401 | spin_unlock_irqrestore(&blkcg->lock, flags); | |
1402 | break; | |
1403 | } | |
b1c35769 | 1404 | |
0f3942a3 JA |
1405 | blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group, |
1406 | blkcg_node); | |
ca32aefc | 1407 | q = rcu_dereference(blkg->q); |
0f3942a3 | 1408 | __blkiocg_del_blkio_group(blkg); |
31e4c28d | 1409 | |
0f3942a3 | 1410 | spin_unlock_irqrestore(&blkcg->lock, flags); |
b1c35769 | 1411 | |
0f3942a3 JA |
1412 | /* |
1413 | * This blkio_group is being unlinked as associated cgroup is | |
1414 | * going away. Let all the IO controlling policies know about | |
61014e96 | 1415 | * this event. |
0f3942a3 JA |
1416 | */ |
1417 | spin_lock(&blkio_list_lock); | |
61014e96 VG |
1418 | list_for_each_entry(blkiop, &blkio_list, list) { |
1419 | if (blkiop->plid != blkg->plid) | |
1420 | continue; | |
ca32aefc | 1421 | blkiop->ops.blkio_unlink_group_fn(q, blkg); |
61014e96 | 1422 | } |
0f3942a3 JA |
1423 | spin_unlock(&blkio_list_lock); |
1424 | } while (1); | |
34d0f179 | 1425 | |
b1c35769 | 1426 | rcu_read_unlock(); |
7ee9c562 TH |
1427 | |
1428 | return 0; | |
1429 | } | |
1430 | ||
1431 | static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |
1432 | { | |
1433 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); | |
1434 | ||
67523c48 BB |
1435 | if (blkcg != &blkio_root_cgroup) |
1436 | kfree(blkcg); | |
31e4c28d VG |
1437 | } |
1438 | ||
1439 | static struct cgroup_subsys_state * | |
1440 | blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |
1441 | { | |
0341509f LZ |
1442 | struct blkio_cgroup *blkcg; |
1443 | struct cgroup *parent = cgroup->parent; | |
31e4c28d | 1444 | |
0341509f | 1445 | if (!parent) { |
31e4c28d VG |
1446 | blkcg = &blkio_root_cgroup; |
1447 | goto done; | |
1448 | } | |
1449 | ||
31e4c28d VG |
1450 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); |
1451 | if (!blkcg) | |
1452 | return ERR_PTR(-ENOMEM); | |
1453 | ||
1454 | blkcg->weight = BLKIO_WEIGHT_DEFAULT; | |
1455 | done: | |
1456 | spin_lock_init(&blkcg->lock); | |
1457 | INIT_HLIST_HEAD(&blkcg->blkg_list); | |
1458 | ||
1459 | return &blkcg->css; | |
1460 | } | |
1461 | ||
1462 | /* | |
1463 | * We cannot support shared io contexts, as we have no mean to support | |
1464 | * two tasks with the same ioc in two different groups without major rework | |
1465 | * of the main cic data structures. For now we allow a task to change | |
1466 | * its cgroup only if it's the only owner of its ioc. | |
1467 | */ | |
bb9d97b6 TH |
1468 | static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, |
1469 | struct cgroup_taskset *tset) | |
31e4c28d | 1470 | { |
bb9d97b6 | 1471 | struct task_struct *task; |
31e4c28d VG |
1472 | struct io_context *ioc; |
1473 | int ret = 0; | |
1474 | ||
1475 | /* task_lock() is needed to avoid races with exit_io_context() */ | |
bb9d97b6 TH |
1476 | cgroup_taskset_for_each(task, cgrp, tset) { |
1477 | task_lock(task); | |
1478 | ioc = task->io_context; | |
1479 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) | |
1480 | ret = -EINVAL; | |
1481 | task_unlock(task); | |
1482 | if (ret) | |
1483 | break; | |
1484 | } | |
31e4c28d VG |
1485 | return ret; |
1486 | } | |
1487 | ||
bb9d97b6 TH |
1488 | static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, |
1489 | struct cgroup_taskset *tset) | |
31e4c28d | 1490 | { |
bb9d97b6 | 1491 | struct task_struct *task; |
31e4c28d VG |
1492 | struct io_context *ioc; |
1493 | ||
bb9d97b6 | 1494 | cgroup_taskset_for_each(task, cgrp, tset) { |
b3c9dd18 LT |
1495 | /* we don't lose anything even if ioc allocation fails */ |
1496 | ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); | |
1497 | if (ioc) { | |
1498 | ioc_cgroup_changed(ioc); | |
11a3122f | 1499 | put_io_context(ioc); |
b3c9dd18 | 1500 | } |
bb9d97b6 | 1501 | } |
31e4c28d VG |
1502 | } |
1503 | ||
3e252066 VG |
1504 | void blkio_policy_register(struct blkio_policy_type *blkiop) |
1505 | { | |
1506 | spin_lock(&blkio_list_lock); | |
035d10b2 TH |
1507 | |
1508 | BUG_ON(blkio_policy[blkiop->plid]); | |
1509 | blkio_policy[blkiop->plid] = blkiop; | |
3e252066 | 1510 | list_add_tail(&blkiop->list, &blkio_list); |
035d10b2 | 1511 | |
3e252066 VG |
1512 | spin_unlock(&blkio_list_lock); |
1513 | } | |
1514 | EXPORT_SYMBOL_GPL(blkio_policy_register); | |
1515 | ||
1516 | void blkio_policy_unregister(struct blkio_policy_type *blkiop) | |
1517 | { | |
1518 | spin_lock(&blkio_list_lock); | |
035d10b2 TH |
1519 | |
1520 | BUG_ON(blkio_policy[blkiop->plid] != blkiop); | |
1521 | blkio_policy[blkiop->plid] = NULL; | |
3e252066 | 1522 | list_del_init(&blkiop->list); |
035d10b2 | 1523 | |
3e252066 VG |
1524 | spin_unlock(&blkio_list_lock); |
1525 | } | |
1526 | EXPORT_SYMBOL_GPL(blkio_policy_unregister); |