]>
Commit | Line | Data |
---|---|---|
31e4c28d VG |
1 | /* |
2 | * Common Block IO controller cgroup interface | |
3 | * | |
4 | * Based on ideas and code from CFQ, CFS and BFQ: | |
5 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> | |
6 | * | |
7 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> | |
8 | * Paolo Valente <paolo.valente@unimore.it> | |
9 | * | |
10 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> | |
11 | * Nauman Rafique <nauman@google.com> | |
12 | */ | |
13 | #include <linux/ioprio.h> | |
22084190 VG |
14 | #include <linux/seq_file.h> |
15 | #include <linux/kdev_t.h> | |
9d6a986c | 16 | #include <linux/module.h> |
accee785 | 17 | #include <linux/err.h> |
9195291e | 18 | #include <linux/blkdev.h> |
5a0e3ad6 | 19 | #include <linux/slab.h> |
34d0f179 | 20 | #include <linux/genhd.h> |
72e06c25 TH |
21 | #include <linux/delay.h> |
22 | #include "blk-cgroup.h" | |
3e252066 | 23 | |
84c124da DS |
24 | #define MAX_KEY_LEN 100 |
25 | ||
3e252066 VG |
26 | static DEFINE_SPINLOCK(blkio_list_lock); |
27 | static LIST_HEAD(blkio_list); | |
b1c35769 | 28 | |
31e4c28d | 29 | struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT }; |
9d6a986c VG |
30 | EXPORT_SYMBOL_GPL(blkio_root_cgroup); |
31 | ||
035d10b2 TH |
32 | static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES]; |
33 | ||
67523c48 BB |
34 | static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *, |
35 | struct cgroup *); | |
bb9d97b6 TH |
36 | static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *, |
37 | struct cgroup_taskset *); | |
38 | static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *, | |
39 | struct cgroup_taskset *); | |
67523c48 BB |
40 | static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *); |
41 | static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *); | |
42 | ||
062a644d VG |
43 | /* for encoding cft->private value on file */ |
44 | #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val)) | |
45 | /* What policy owns the file, proportional or throttle */ | |
46 | #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff) | |
47 | #define BLKIOFILE_ATTR(val) ((val) & 0xffff) | |
48 | ||
67523c48 BB |
49 | struct cgroup_subsys blkio_subsys = { |
50 | .name = "blkio", | |
51 | .create = blkiocg_create, | |
bb9d97b6 TH |
52 | .can_attach = blkiocg_can_attach, |
53 | .attach = blkiocg_attach, | |
67523c48 BB |
54 | .destroy = blkiocg_destroy, |
55 | .populate = blkiocg_populate, | |
67523c48 | 56 | .subsys_id = blkio_subsys_id, |
67523c48 BB |
57 | .use_id = 1, |
58 | .module = THIS_MODULE, | |
59 | }; | |
60 | EXPORT_SYMBOL_GPL(blkio_subsys); | |
61 | ||
34d0f179 GJ |
62 | static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg, |
63 | struct blkio_policy_node *pn) | |
64 | { | |
65 | list_add(&pn->node, &blkcg->policy_list); | |
66 | } | |
67 | ||
062a644d VG |
68 | static inline bool cftype_blkg_same_policy(struct cftype *cft, |
69 | struct blkio_group *blkg) | |
70 | { | |
71 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | |
72 | ||
73 | if (blkg->plid == plid) | |
74 | return 1; | |
75 | ||
76 | return 0; | |
77 | } | |
78 | ||
79 | /* Determines if policy node matches cgroup file being accessed */ | |
80 | static inline bool pn_matches_cftype(struct cftype *cft, | |
81 | struct blkio_policy_node *pn) | |
82 | { | |
83 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | |
84 | int fileid = BLKIOFILE_ATTR(cft->private); | |
85 | ||
86 | return (plid == pn->plid && fileid == pn->fileid); | |
87 | } | |
88 | ||
34d0f179 GJ |
89 | /* Must be called with blkcg->lock held */ |
90 | static inline void blkio_policy_delete_node(struct blkio_policy_node *pn) | |
91 | { | |
92 | list_del(&pn->node); | |
93 | } | |
94 | ||
95 | /* Must be called with blkcg->lock held */ | |
96 | static struct blkio_policy_node * | |
062a644d VG |
97 | blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev, |
98 | enum blkio_policy_id plid, int fileid) | |
34d0f179 GJ |
99 | { |
100 | struct blkio_policy_node *pn; | |
101 | ||
102 | list_for_each_entry(pn, &blkcg->policy_list, node) { | |
062a644d | 103 | if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid) |
34d0f179 GJ |
104 | return pn; |
105 | } | |
106 | ||
107 | return NULL; | |
108 | } | |
109 | ||
31e4c28d VG |
110 | struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) |
111 | { | |
112 | return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), | |
113 | struct blkio_cgroup, css); | |
114 | } | |
9d6a986c | 115 | EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); |
31e4c28d | 116 | |
70087dc3 VG |
117 | struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk) |
118 | { | |
119 | return container_of(task_subsys_state(tsk, blkio_subsys_id), | |
120 | struct blkio_cgroup, css); | |
121 | } | |
122 | EXPORT_SYMBOL_GPL(task_blkio_cgroup); | |
123 | ||
062a644d VG |
124 | static inline void |
125 | blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight) | |
126 | { | |
127 | struct blkio_policy_type *blkiop; | |
128 | ||
129 | list_for_each_entry(blkiop, &blkio_list, list) { | |
130 | /* If this policy does not own the blkg, do not send updates */ | |
131 | if (blkiop->plid != blkg->plid) | |
132 | continue; | |
133 | if (blkiop->ops.blkio_update_group_weight_fn) | |
ca32aefc | 134 | blkiop->ops.blkio_update_group_weight_fn(blkg->q, |
fe071437 | 135 | blkg, weight); |
062a644d VG |
136 | } |
137 | } | |
138 | ||
4c9eefa1 VG |
139 | static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps, |
140 | int fileid) | |
141 | { | |
142 | struct blkio_policy_type *blkiop; | |
143 | ||
144 | list_for_each_entry(blkiop, &blkio_list, list) { | |
145 | ||
146 | /* If this policy does not own the blkg, do not send updates */ | |
147 | if (blkiop->plid != blkg->plid) | |
148 | continue; | |
149 | ||
150 | if (fileid == BLKIO_THROTL_read_bps_device | |
151 | && blkiop->ops.blkio_update_group_read_bps_fn) | |
ca32aefc | 152 | blkiop->ops.blkio_update_group_read_bps_fn(blkg->q, |
fe071437 | 153 | blkg, bps); |
4c9eefa1 VG |
154 | |
155 | if (fileid == BLKIO_THROTL_write_bps_device | |
156 | && blkiop->ops.blkio_update_group_write_bps_fn) | |
ca32aefc | 157 | blkiop->ops.blkio_update_group_write_bps_fn(blkg->q, |
fe071437 | 158 | blkg, bps); |
4c9eefa1 VG |
159 | } |
160 | } | |
161 | ||
7702e8f4 VG |
162 | static inline void blkio_update_group_iops(struct blkio_group *blkg, |
163 | unsigned int iops, int fileid) | |
164 | { | |
165 | struct blkio_policy_type *blkiop; | |
166 | ||
167 | list_for_each_entry(blkiop, &blkio_list, list) { | |
168 | ||
169 | /* If this policy does not own the blkg, do not send updates */ | |
170 | if (blkiop->plid != blkg->plid) | |
171 | continue; | |
172 | ||
173 | if (fileid == BLKIO_THROTL_read_iops_device | |
174 | && blkiop->ops.blkio_update_group_read_iops_fn) | |
ca32aefc | 175 | blkiop->ops.blkio_update_group_read_iops_fn(blkg->q, |
fe071437 | 176 | blkg, iops); |
7702e8f4 VG |
177 | |
178 | if (fileid == BLKIO_THROTL_write_iops_device | |
179 | && blkiop->ops.blkio_update_group_write_iops_fn) | |
ca32aefc | 180 | blkiop->ops.blkio_update_group_write_iops_fn(blkg->q, |
fe071437 | 181 | blkg,iops); |
7702e8f4 VG |
182 | } |
183 | } | |
184 | ||
9195291e DS |
185 | /* |
186 | * Add to the appropriate stat variable depending on the request type. | |
187 | * This should be called with the blkg->stats_lock held. | |
188 | */ | |
84c124da DS |
189 | static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction, |
190 | bool sync) | |
9195291e | 191 | { |
84c124da DS |
192 | if (direction) |
193 | stat[BLKIO_STAT_WRITE] += add; | |
9195291e | 194 | else |
84c124da DS |
195 | stat[BLKIO_STAT_READ] += add; |
196 | if (sync) | |
197 | stat[BLKIO_STAT_SYNC] += add; | |
9195291e | 198 | else |
84c124da | 199 | stat[BLKIO_STAT_ASYNC] += add; |
9195291e DS |
200 | } |
201 | ||
cdc1184c DS |
202 | /* |
203 | * Decrements the appropriate stat variable if non-zero depending on the | |
204 | * request type. Panics on value being zero. | |
205 | * This should be called with the blkg->stats_lock held. | |
206 | */ | |
207 | static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync) | |
208 | { | |
209 | if (direction) { | |
210 | BUG_ON(stat[BLKIO_STAT_WRITE] == 0); | |
211 | stat[BLKIO_STAT_WRITE]--; | |
212 | } else { | |
213 | BUG_ON(stat[BLKIO_STAT_READ] == 0); | |
214 | stat[BLKIO_STAT_READ]--; | |
215 | } | |
216 | if (sync) { | |
217 | BUG_ON(stat[BLKIO_STAT_SYNC] == 0); | |
218 | stat[BLKIO_STAT_SYNC]--; | |
219 | } else { | |
220 | BUG_ON(stat[BLKIO_STAT_ASYNC] == 0); | |
221 | stat[BLKIO_STAT_ASYNC]--; | |
222 | } | |
223 | } | |
224 | ||
225 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
812df48d DS |
226 | /* This should be called with the blkg->stats_lock held. */ |
227 | static void blkio_set_start_group_wait_time(struct blkio_group *blkg, | |
228 | struct blkio_group *curr_blkg) | |
229 | { | |
230 | if (blkio_blkg_waiting(&blkg->stats)) | |
231 | return; | |
232 | if (blkg == curr_blkg) | |
233 | return; | |
234 | blkg->stats.start_group_wait_time = sched_clock(); | |
235 | blkio_mark_blkg_waiting(&blkg->stats); | |
236 | } | |
237 | ||
238 | /* This should be called with the blkg->stats_lock held. */ | |
239 | static void blkio_update_group_wait_time(struct blkio_group_stats *stats) | |
240 | { | |
241 | unsigned long long now; | |
242 | ||
243 | if (!blkio_blkg_waiting(stats)) | |
244 | return; | |
245 | ||
246 | now = sched_clock(); | |
247 | if (time_after64(now, stats->start_group_wait_time)) | |
248 | stats->group_wait_time += now - stats->start_group_wait_time; | |
249 | blkio_clear_blkg_waiting(stats); | |
250 | } | |
251 | ||
252 | /* This should be called with the blkg->stats_lock held. */ | |
253 | static void blkio_end_empty_time(struct blkio_group_stats *stats) | |
254 | { | |
255 | unsigned long long now; | |
256 | ||
257 | if (!blkio_blkg_empty(stats)) | |
258 | return; | |
259 | ||
260 | now = sched_clock(); | |
261 | if (time_after64(now, stats->start_empty_time)) | |
262 | stats->empty_time += now - stats->start_empty_time; | |
263 | blkio_clear_blkg_empty(stats); | |
264 | } | |
265 | ||
266 | void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) | |
267 | { | |
268 | unsigned long flags; | |
269 | ||
270 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
271 | BUG_ON(blkio_blkg_idling(&blkg->stats)); | |
272 | blkg->stats.start_idle_time = sched_clock(); | |
273 | blkio_mark_blkg_idling(&blkg->stats); | |
274 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
275 | } | |
276 | EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats); | |
277 | ||
278 | void blkiocg_update_idle_time_stats(struct blkio_group *blkg) | |
279 | { | |
280 | unsigned long flags; | |
281 | unsigned long long now; | |
282 | struct blkio_group_stats *stats; | |
283 | ||
284 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
285 | stats = &blkg->stats; | |
286 | if (blkio_blkg_idling(stats)) { | |
287 | now = sched_clock(); | |
288 | if (time_after64(now, stats->start_idle_time)) | |
289 | stats->idle_time += now - stats->start_idle_time; | |
290 | blkio_clear_blkg_idling(stats); | |
291 | } | |
292 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
293 | } | |
294 | EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats); | |
295 | ||
a11cdaa7 | 296 | void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) |
cdc1184c DS |
297 | { |
298 | unsigned long flags; | |
299 | struct blkio_group_stats *stats; | |
300 | ||
301 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
302 | stats = &blkg->stats; | |
303 | stats->avg_queue_size_sum += | |
304 | stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] + | |
305 | stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]; | |
306 | stats->avg_queue_size_samples++; | |
812df48d | 307 | blkio_update_group_wait_time(stats); |
cdc1184c DS |
308 | spin_unlock_irqrestore(&blkg->stats_lock, flags); |
309 | } | |
a11cdaa7 DS |
310 | EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats); |
311 | ||
e5ff082e | 312 | void blkiocg_set_start_empty_time(struct blkio_group *blkg) |
28baf442 DS |
313 | { |
314 | unsigned long flags; | |
315 | struct blkio_group_stats *stats; | |
316 | ||
317 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
318 | stats = &blkg->stats; | |
319 | ||
320 | if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] || | |
321 | stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) { | |
322 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
323 | return; | |
324 | } | |
325 | ||
326 | /* | |
e5ff082e VG |
327 | * group is already marked empty. This can happen if cfqq got new |
328 | * request in parent group and moved to this group while being added | |
329 | * to service tree. Just ignore the event and move on. | |
28baf442 | 330 | */ |
e5ff082e VG |
331 | if(blkio_blkg_empty(stats)) { |
332 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
333 | return; | |
334 | } | |
335 | ||
28baf442 DS |
336 | stats->start_empty_time = sched_clock(); |
337 | blkio_mark_blkg_empty(stats); | |
338 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
339 | } | |
340 | EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time); | |
341 | ||
a11cdaa7 DS |
342 | void blkiocg_update_dequeue_stats(struct blkio_group *blkg, |
343 | unsigned long dequeue) | |
344 | { | |
345 | blkg->stats.dequeue += dequeue; | |
346 | } | |
347 | EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats); | |
812df48d DS |
348 | #else |
349 | static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg, | |
350 | struct blkio_group *curr_blkg) {} | |
351 | static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {} | |
cdc1184c DS |
352 | #endif |
353 | ||
a11cdaa7 | 354 | void blkiocg_update_io_add_stats(struct blkio_group *blkg, |
cdc1184c DS |
355 | struct blkio_group *curr_blkg, bool direction, |
356 | bool sync) | |
357 | { | |
358 | unsigned long flags; | |
359 | ||
360 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
361 | blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction, | |
362 | sync); | |
812df48d DS |
363 | blkio_end_empty_time(&blkg->stats); |
364 | blkio_set_start_group_wait_time(blkg, curr_blkg); | |
cdc1184c DS |
365 | spin_unlock_irqrestore(&blkg->stats_lock, flags); |
366 | } | |
a11cdaa7 | 367 | EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats); |
cdc1184c | 368 | |
a11cdaa7 | 369 | void blkiocg_update_io_remove_stats(struct blkio_group *blkg, |
cdc1184c DS |
370 | bool direction, bool sync) |
371 | { | |
372 | unsigned long flags; | |
373 | ||
374 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
375 | blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], | |
376 | direction, sync); | |
377 | spin_unlock_irqrestore(&blkg->stats_lock, flags); | |
378 | } | |
a11cdaa7 | 379 | EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats); |
cdc1184c | 380 | |
167400d3 JT |
381 | void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time, |
382 | unsigned long unaccounted_time) | |
22084190 | 383 | { |
303a3acb DS |
384 | unsigned long flags; |
385 | ||
386 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
387 | blkg->stats.time += time; | |
a23e6869 | 388 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
167400d3 | 389 | blkg->stats.unaccounted_time += unaccounted_time; |
a23e6869 | 390 | #endif |
303a3acb | 391 | spin_unlock_irqrestore(&blkg->stats_lock, flags); |
22084190 | 392 | } |
303a3acb | 393 | EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used); |
22084190 | 394 | |
5624a4e4 VG |
395 | /* |
396 | * should be called under rcu read lock or queue lock to make sure blkg pointer | |
397 | * is valid. | |
398 | */ | |
84c124da DS |
399 | void blkiocg_update_dispatch_stats(struct blkio_group *blkg, |
400 | uint64_t bytes, bool direction, bool sync) | |
9195291e | 401 | { |
5624a4e4 | 402 | struct blkio_group_stats_cpu *stats_cpu; |
575969a0 VG |
403 | unsigned long flags; |
404 | ||
405 | /* | |
406 | * Disabling interrupts to provide mutual exclusion between two | |
407 | * writes on same cpu. It probably is not needed for 64bit. Not | |
408 | * optimizing that case yet. | |
409 | */ | |
410 | local_irq_save(flags); | |
9195291e | 411 | |
5624a4e4 VG |
412 | stats_cpu = this_cpu_ptr(blkg->stats_cpu); |
413 | ||
575969a0 | 414 | u64_stats_update_begin(&stats_cpu->syncp); |
5624a4e4 VG |
415 | stats_cpu->sectors += bytes >> 9; |
416 | blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED], | |
417 | 1, direction, sync); | |
418 | blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES], | |
419 | bytes, direction, sync); | |
575969a0 VG |
420 | u64_stats_update_end(&stats_cpu->syncp); |
421 | local_irq_restore(flags); | |
9195291e | 422 | } |
84c124da | 423 | EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats); |
9195291e | 424 | |
84c124da DS |
425 | void blkiocg_update_completion_stats(struct blkio_group *blkg, |
426 | uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) | |
9195291e DS |
427 | { |
428 | struct blkio_group_stats *stats; | |
429 | unsigned long flags; | |
430 | unsigned long long now = sched_clock(); | |
431 | ||
432 | spin_lock_irqsave(&blkg->stats_lock, flags); | |
433 | stats = &blkg->stats; | |
84c124da DS |
434 | if (time_after64(now, io_start_time)) |
435 | blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME], | |
436 | now - io_start_time, direction, sync); | |
437 | if (time_after64(io_start_time, start_time)) | |
438 | blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME], | |
439 | io_start_time - start_time, direction, sync); | |
9195291e DS |
440 | spin_unlock_irqrestore(&blkg->stats_lock, flags); |
441 | } | |
84c124da | 442 | EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats); |
9195291e | 443 | |
317389a7 | 444 | /* Merged stats are per cpu. */ |
812d4026 DS |
445 | void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction, |
446 | bool sync) | |
447 | { | |
317389a7 | 448 | struct blkio_group_stats_cpu *stats_cpu; |
812d4026 DS |
449 | unsigned long flags; |
450 | ||
317389a7 VG |
451 | /* |
452 | * Disabling interrupts to provide mutual exclusion between two | |
453 | * writes on same cpu. It probably is not needed for 64bit. Not | |
454 | * optimizing that case yet. | |
455 | */ | |
456 | local_irq_save(flags); | |
457 | ||
458 | stats_cpu = this_cpu_ptr(blkg->stats_cpu); | |
459 | ||
460 | u64_stats_update_begin(&stats_cpu->syncp); | |
461 | blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1, | |
462 | direction, sync); | |
463 | u64_stats_update_end(&stats_cpu->syncp); | |
464 | local_irq_restore(flags); | |
812d4026 DS |
465 | } |
466 | EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats); | |
467 | ||
cd1604fa TH |
468 | struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, |
469 | struct request_queue *q, | |
470 | enum blkio_policy_id plid, | |
471 | bool for_root) | |
472 | __releases(q->queue_lock) __acquires(q->queue_lock) | |
5624a4e4 | 473 | { |
cd1604fa TH |
474 | struct blkio_policy_type *pol = blkio_policy[plid]; |
475 | struct blkio_group *blkg, *new_blkg; | |
5624a4e4 | 476 | |
cd1604fa TH |
477 | WARN_ON_ONCE(!rcu_read_lock_held()); |
478 | lockdep_assert_held(q->queue_lock); | |
479 | ||
480 | /* | |
481 | * This could be the first entry point of blkcg implementation and | |
482 | * we shouldn't allow anything to go through for a bypassing queue. | |
483 | * The following can be removed if blkg lookup is guaranteed to | |
484 | * fail on a bypassing queue. | |
485 | */ | |
486 | if (unlikely(blk_queue_bypass(q)) && !for_root) | |
487 | return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); | |
488 | ||
489 | blkg = blkg_lookup(blkcg, q, plid); | |
490 | if (blkg) | |
491 | return blkg; | |
492 | ||
493 | if (!css_tryget(&blkcg->css)) | |
494 | return ERR_PTR(-EINVAL); | |
495 | ||
496 | /* | |
497 | * Allocate and initialize. | |
498 | * | |
499 | * FIXME: The following is broken. Percpu memory allocation | |
500 | * requires %GFP_KERNEL context and can't be performed from IO | |
501 | * path. Allocation here should inherently be atomic and the | |
502 | * following lock dancing can be removed once the broken percpu | |
503 | * allocation is fixed. | |
504 | */ | |
505 | spin_unlock_irq(q->queue_lock); | |
506 | rcu_read_unlock(); | |
507 | ||
508 | new_blkg = pol->ops.blkio_alloc_group_fn(q, blkcg); | |
509 | if (new_blkg) { | |
510 | new_blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu); | |
511 | ||
512 | spin_lock_init(&new_blkg->stats_lock); | |
513 | rcu_assign_pointer(new_blkg->q, q); | |
514 | new_blkg->blkcg_id = css_id(&blkcg->css); | |
515 | new_blkg->plid = plid; | |
516 | cgroup_path(blkcg->css.cgroup, new_blkg->path, | |
517 | sizeof(new_blkg->path)); | |
518 | } | |
519 | ||
520 | rcu_read_lock(); | |
521 | spin_lock_irq(q->queue_lock); | |
522 | css_put(&blkcg->css); | |
31e4c28d | 523 | |
cd1604fa TH |
524 | /* did bypass get turned on inbetween? */ |
525 | if (unlikely(blk_queue_bypass(q)) && !for_root) { | |
526 | blkg = ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); | |
527 | goto out; | |
528 | } | |
529 | ||
530 | /* did someone beat us to it? */ | |
531 | blkg = blkg_lookup(blkcg, q, plid); | |
532 | if (unlikely(blkg)) | |
533 | goto out; | |
534 | ||
535 | /* did alloc fail? */ | |
536 | if (unlikely(!new_blkg || !new_blkg->stats_cpu)) { | |
537 | blkg = ERR_PTR(-ENOMEM); | |
538 | goto out; | |
539 | } | |
540 | ||
541 | /* insert */ | |
542 | spin_lock(&blkcg->lock); | |
543 | swap(blkg, new_blkg); | |
31e4c28d | 544 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); |
cd1604fa TH |
545 | pol->ops.blkio_link_group_fn(q, blkg); |
546 | spin_unlock(&blkcg->lock); | |
547 | out: | |
548 | if (new_blkg) { | |
549 | free_percpu(new_blkg->stats_cpu); | |
550 | kfree(new_blkg); | |
551 | } | |
552 | return blkg; | |
31e4c28d | 553 | } |
cd1604fa | 554 | EXPORT_SYMBOL_GPL(blkg_lookup_create); |
31e4c28d | 555 | |
b1c35769 VG |
556 | static void __blkiocg_del_blkio_group(struct blkio_group *blkg) |
557 | { | |
558 | hlist_del_init_rcu(&blkg->blkcg_node); | |
559 | blkg->blkcg_id = 0; | |
560 | } | |
561 | ||
562 | /* | |
563 | * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1 | |
564 | * indicating that blk_group was unhashed by the time we got to it. | |
565 | */ | |
31e4c28d VG |
566 | int blkiocg_del_blkio_group(struct blkio_group *blkg) |
567 | { | |
b1c35769 VG |
568 | struct blkio_cgroup *blkcg; |
569 | unsigned long flags; | |
570 | struct cgroup_subsys_state *css; | |
571 | int ret = 1; | |
572 | ||
573 | rcu_read_lock(); | |
574 | css = css_lookup(&blkio_subsys, blkg->blkcg_id); | |
0f3942a3 JA |
575 | if (css) { |
576 | blkcg = container_of(css, struct blkio_cgroup, css); | |
577 | spin_lock_irqsave(&blkcg->lock, flags); | |
578 | if (!hlist_unhashed(&blkg->blkcg_node)) { | |
579 | __blkiocg_del_blkio_group(blkg); | |
580 | ret = 0; | |
581 | } | |
582 | spin_unlock_irqrestore(&blkcg->lock, flags); | |
b1c35769 | 583 | } |
0f3942a3 | 584 | |
b1c35769 VG |
585 | rcu_read_unlock(); |
586 | return ret; | |
31e4c28d | 587 | } |
9d6a986c | 588 | EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group); |
31e4c28d VG |
589 | |
590 | /* called under rcu_read_lock(). */ | |
cd1604fa TH |
591 | struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, |
592 | struct request_queue *q, | |
593 | enum blkio_policy_id plid) | |
31e4c28d VG |
594 | { |
595 | struct blkio_group *blkg; | |
596 | struct hlist_node *n; | |
31e4c28d | 597 | |
ca32aefc TH |
598 | hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) |
599 | if (blkg->q == q && blkg->plid == plid) | |
31e4c28d | 600 | return blkg; |
31e4c28d VG |
601 | return NULL; |
602 | } | |
cd1604fa | 603 | EXPORT_SYMBOL_GPL(blkg_lookup); |
31e4c28d | 604 | |
72e06c25 TH |
605 | void blkg_destroy_all(struct request_queue *q) |
606 | { | |
607 | struct blkio_policy_type *pol; | |
608 | ||
609 | while (true) { | |
610 | bool done = true; | |
611 | ||
612 | spin_lock(&blkio_list_lock); | |
613 | spin_lock_irq(q->queue_lock); | |
614 | ||
615 | /* | |
616 | * clear_queue_fn() might return with non-empty group list | |
617 | * if it raced cgroup removal and lost. cgroup removal is | |
618 | * guaranteed to make forward progress and retrying after a | |
619 | * while is enough. This ugliness is scheduled to be | |
620 | * removed after locking update. | |
621 | */ | |
622 | list_for_each_entry(pol, &blkio_list, list) | |
623 | if (!pol->ops.blkio_clear_queue_fn(q)) | |
624 | done = false; | |
625 | ||
626 | spin_unlock_irq(q->queue_lock); | |
627 | spin_unlock(&blkio_list_lock); | |
628 | ||
629 | if (done) | |
630 | break; | |
631 | ||
632 | msleep(10); /* just some random duration I like */ | |
633 | } | |
634 | } | |
635 | ||
f0bdc8cd VG |
636 | static void blkio_reset_stats_cpu(struct blkio_group *blkg) |
637 | { | |
638 | struct blkio_group_stats_cpu *stats_cpu; | |
639 | int i, j, k; | |
640 | /* | |
641 | * Note: On 64 bit arch this should not be an issue. This has the | |
642 | * possibility of returning some inconsistent value on 32bit arch | |
643 | * as 64bit update on 32bit is non atomic. Taking care of this | |
644 | * corner case makes code very complicated, like sending IPIs to | |
645 | * cpus, taking care of stats of offline cpus etc. | |
646 | * | |
647 | * reset stats is anyway more of a debug feature and this sounds a | |
648 | * corner case. So I am not complicating the code yet until and | |
649 | * unless this becomes a real issue. | |
650 | */ | |
651 | for_each_possible_cpu(i) { | |
652 | stats_cpu = per_cpu_ptr(blkg->stats_cpu, i); | |
653 | stats_cpu->sectors = 0; | |
654 | for(j = 0; j < BLKIO_STAT_CPU_NR; j++) | |
655 | for (k = 0; k < BLKIO_STAT_TOTAL; k++) | |
656 | stats_cpu->stat_arr_cpu[j][k] = 0; | |
657 | } | |
658 | } | |
659 | ||
303a3acb | 660 | static int |
84c124da | 661 | blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) |
303a3acb DS |
662 | { |
663 | struct blkio_cgroup *blkcg; | |
664 | struct blkio_group *blkg; | |
812df48d | 665 | struct blkio_group_stats *stats; |
303a3acb | 666 | struct hlist_node *n; |
cdc1184c DS |
667 | uint64_t queued[BLKIO_STAT_TOTAL]; |
668 | int i; | |
812df48d DS |
669 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
670 | bool idling, waiting, empty; | |
671 | unsigned long long now = sched_clock(); | |
672 | #endif | |
303a3acb DS |
673 | |
674 | blkcg = cgroup_to_blkio_cgroup(cgroup); | |
675 | spin_lock_irq(&blkcg->lock); | |
676 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { | |
677 | spin_lock(&blkg->stats_lock); | |
812df48d DS |
678 | stats = &blkg->stats; |
679 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
680 | idling = blkio_blkg_idling(stats); | |
681 | waiting = blkio_blkg_waiting(stats); | |
682 | empty = blkio_blkg_empty(stats); | |
683 | #endif | |
cdc1184c | 684 | for (i = 0; i < BLKIO_STAT_TOTAL; i++) |
812df48d DS |
685 | queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i]; |
686 | memset(stats, 0, sizeof(struct blkio_group_stats)); | |
cdc1184c | 687 | for (i = 0; i < BLKIO_STAT_TOTAL; i++) |
812df48d DS |
688 | stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i]; |
689 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
690 | if (idling) { | |
691 | blkio_mark_blkg_idling(stats); | |
692 | stats->start_idle_time = now; | |
693 | } | |
694 | if (waiting) { | |
695 | blkio_mark_blkg_waiting(stats); | |
696 | stats->start_group_wait_time = now; | |
697 | } | |
698 | if (empty) { | |
699 | blkio_mark_blkg_empty(stats); | |
700 | stats->start_empty_time = now; | |
701 | } | |
702 | #endif | |
303a3acb | 703 | spin_unlock(&blkg->stats_lock); |
f0bdc8cd VG |
704 | |
705 | /* Reset Per cpu stats which don't take blkg->stats_lock */ | |
706 | blkio_reset_stats_cpu(blkg); | |
303a3acb | 707 | } |
f0bdc8cd | 708 | |
303a3acb DS |
709 | spin_unlock_irq(&blkcg->lock); |
710 | return 0; | |
711 | } | |
712 | ||
84c124da DS |
713 | static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str, |
714 | int chars_left, bool diskname_only) | |
303a3acb | 715 | { |
84c124da | 716 | snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev)); |
303a3acb DS |
717 | chars_left -= strlen(str); |
718 | if (chars_left <= 0) { | |
719 | printk(KERN_WARNING | |
720 | "Possibly incorrect cgroup stat display format"); | |
721 | return; | |
722 | } | |
84c124da DS |
723 | if (diskname_only) |
724 | return; | |
303a3acb | 725 | switch (type) { |
84c124da | 726 | case BLKIO_STAT_READ: |
303a3acb DS |
727 | strlcat(str, " Read", chars_left); |
728 | break; | |
84c124da | 729 | case BLKIO_STAT_WRITE: |
303a3acb DS |
730 | strlcat(str, " Write", chars_left); |
731 | break; | |
84c124da | 732 | case BLKIO_STAT_SYNC: |
303a3acb DS |
733 | strlcat(str, " Sync", chars_left); |
734 | break; | |
84c124da | 735 | case BLKIO_STAT_ASYNC: |
303a3acb DS |
736 | strlcat(str, " Async", chars_left); |
737 | break; | |
84c124da | 738 | case BLKIO_STAT_TOTAL: |
303a3acb DS |
739 | strlcat(str, " Total", chars_left); |
740 | break; | |
741 | default: | |
742 | strlcat(str, " Invalid", chars_left); | |
743 | } | |
744 | } | |
745 | ||
84c124da DS |
746 | static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val, |
747 | struct cgroup_map_cb *cb, dev_t dev) | |
748 | { | |
749 | blkio_get_key_name(0, dev, str, chars_left, true); | |
750 | cb->fill(cb, str, val); | |
751 | return val; | |
752 | } | |
303a3acb | 753 | |
5624a4e4 VG |
754 | |
755 | static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, | |
756 | enum stat_type_cpu type, enum stat_sub_type sub_type) | |
757 | { | |
758 | int cpu; | |
759 | struct blkio_group_stats_cpu *stats_cpu; | |
575969a0 | 760 | u64 val = 0, tval; |
5624a4e4 VG |
761 | |
762 | for_each_possible_cpu(cpu) { | |
575969a0 | 763 | unsigned int start; |
5624a4e4 VG |
764 | stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu); |
765 | ||
575969a0 VG |
766 | do { |
767 | start = u64_stats_fetch_begin(&stats_cpu->syncp); | |
768 | if (type == BLKIO_STAT_CPU_SECTORS) | |
769 | tval = stats_cpu->sectors; | |
770 | else | |
771 | tval = stats_cpu->stat_arr_cpu[type][sub_type]; | |
772 | } while(u64_stats_fetch_retry(&stats_cpu->syncp, start)); | |
773 | ||
774 | val += tval; | |
5624a4e4 VG |
775 | } |
776 | ||
777 | return val; | |
778 | } | |
779 | ||
780 | static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, | |
781 | struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type) | |
782 | { | |
783 | uint64_t disk_total, val; | |
784 | char key_str[MAX_KEY_LEN]; | |
785 | enum stat_sub_type sub_type; | |
786 | ||
787 | if (type == BLKIO_STAT_CPU_SECTORS) { | |
788 | val = blkio_read_stat_cpu(blkg, type, 0); | |
789 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev); | |
790 | } | |
791 | ||
792 | for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL; | |
793 | sub_type++) { | |
794 | blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false); | |
795 | val = blkio_read_stat_cpu(blkg, type, sub_type); | |
796 | cb->fill(cb, key_str, val); | |
797 | } | |
798 | ||
799 | disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) + | |
800 | blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE); | |
801 | ||
802 | blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false); | |
803 | cb->fill(cb, key_str, disk_total); | |
804 | return disk_total; | |
805 | } | |
806 | ||
84c124da DS |
807 | /* This should be called with blkg->stats_lock held */ |
808 | static uint64_t blkio_get_stat(struct blkio_group *blkg, | |
809 | struct cgroup_map_cb *cb, dev_t dev, enum stat_type type) | |
303a3acb DS |
810 | { |
811 | uint64_t disk_total; | |
812 | char key_str[MAX_KEY_LEN]; | |
84c124da DS |
813 | enum stat_sub_type sub_type; |
814 | ||
815 | if (type == BLKIO_STAT_TIME) | |
816 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
817 | blkg->stats.time, cb, dev); | |
9026e521 | 818 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
167400d3 JT |
819 | if (type == BLKIO_STAT_UNACCOUNTED_TIME) |
820 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
821 | blkg->stats.unaccounted_time, cb, dev); | |
cdc1184c DS |
822 | if (type == BLKIO_STAT_AVG_QUEUE_SIZE) { |
823 | uint64_t sum = blkg->stats.avg_queue_size_sum; | |
824 | uint64_t samples = blkg->stats.avg_queue_size_samples; | |
825 | if (samples) | |
826 | do_div(sum, samples); | |
827 | else | |
828 | sum = 0; | |
829 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev); | |
830 | } | |
812df48d DS |
831 | if (type == BLKIO_STAT_GROUP_WAIT_TIME) |
832 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
833 | blkg->stats.group_wait_time, cb, dev); | |
834 | if (type == BLKIO_STAT_IDLE_TIME) | |
835 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
836 | blkg->stats.idle_time, cb, dev); | |
837 | if (type == BLKIO_STAT_EMPTY_TIME) | |
838 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
839 | blkg->stats.empty_time, cb, dev); | |
84c124da DS |
840 | if (type == BLKIO_STAT_DEQUEUE) |
841 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | |
842 | blkg->stats.dequeue, cb, dev); | |
843 | #endif | |
303a3acb | 844 | |
84c124da DS |
845 | for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL; |
846 | sub_type++) { | |
847 | blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false); | |
848 | cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]); | |
303a3acb | 849 | } |
84c124da DS |
850 | disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] + |
851 | blkg->stats.stat_arr[type][BLKIO_STAT_WRITE]; | |
852 | blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false); | |
303a3acb DS |
853 | cb->fill(cb, key_str, disk_total); |
854 | return disk_total; | |
855 | } | |
856 | ||
34d0f179 | 857 | static int blkio_policy_parse_and_set(char *buf, |
e56da7e2 TH |
858 | struct blkio_policy_node *newpn, |
859 | enum blkio_policy_id plid, int fileid, | |
860 | struct blkio_cgroup *blkcg) | |
34d0f179 | 861 | { |
ece84241 | 862 | struct gendisk *disk = NULL; |
e56da7e2 | 863 | struct blkio_group *blkg = NULL; |
34d0f179 | 864 | char *s[4], *p, *major_s = NULL, *minor_s = NULL; |
d11bb446 | 865 | unsigned long major, minor; |
ece84241 TH |
866 | int i = 0, ret = -EINVAL; |
867 | int part; | |
34d0f179 | 868 | dev_t dev; |
d11bb446 | 869 | u64 temp; |
34d0f179 GJ |
870 | |
871 | memset(s, 0, sizeof(s)); | |
872 | ||
873 | while ((p = strsep(&buf, " ")) != NULL) { | |
874 | if (!*p) | |
875 | continue; | |
876 | ||
877 | s[i++] = p; | |
878 | ||
879 | /* Prevent from inputing too many things */ | |
880 | if (i == 3) | |
881 | break; | |
882 | } | |
883 | ||
884 | if (i != 2) | |
ece84241 | 885 | goto out; |
34d0f179 GJ |
886 | |
887 | p = strsep(&s[0], ":"); | |
888 | if (p != NULL) | |
889 | major_s = p; | |
890 | else | |
ece84241 | 891 | goto out; |
34d0f179 GJ |
892 | |
893 | minor_s = s[0]; | |
894 | if (!minor_s) | |
ece84241 | 895 | goto out; |
34d0f179 | 896 | |
ece84241 TH |
897 | if (strict_strtoul(major_s, 10, &major)) |
898 | goto out; | |
34d0f179 | 899 | |
ece84241 TH |
900 | if (strict_strtoul(minor_s, 10, &minor)) |
901 | goto out; | |
34d0f179 GJ |
902 | |
903 | dev = MKDEV(major, minor); | |
904 | ||
ece84241 TH |
905 | if (strict_strtoull(s[1], 10, &temp)) |
906 | goto out; | |
34d0f179 | 907 | |
d11bb446 | 908 | /* For rule removal, do not check for device presence. */ |
e56da7e2 TH |
909 | disk = get_gendisk(dev, &part); |
910 | ||
911 | if ((!disk || part) && temp) { | |
912 | ret = -ENODEV; | |
913 | goto out; | |
914 | } | |
915 | ||
916 | rcu_read_lock(); | |
917 | ||
918 | if (disk && !part) { | |
919 | spin_lock_irq(disk->queue->queue_lock); | |
920 | blkg = blkg_lookup_create(blkcg, disk->queue, plid, false); | |
921 | spin_unlock_irq(disk->queue->queue_lock); | |
922 | ||
923 | if (IS_ERR(blkg)) { | |
924 | ret = PTR_ERR(blkg); | |
925 | if (ret == -EBUSY) | |
926 | goto out_unlock; | |
927 | blkg = NULL; | |
ece84241 | 928 | } |
d11bb446 | 929 | } |
34d0f179 | 930 | |
d11bb446 | 931 | newpn->dev = dev; |
34d0f179 | 932 | |
062a644d VG |
933 | switch (plid) { |
934 | case BLKIO_POLICY_PROP: | |
d11bb446 WG |
935 | if ((temp < BLKIO_WEIGHT_MIN && temp > 0) || |
936 | temp > BLKIO_WEIGHT_MAX) | |
e56da7e2 | 937 | goto out_unlock; |
34d0f179 | 938 | |
062a644d VG |
939 | newpn->plid = plid; |
940 | newpn->fileid = fileid; | |
4c9eefa1 | 941 | newpn->val.weight = temp; |
e56da7e2 TH |
942 | if (blkg) |
943 | blkg->conf.weight = temp; | |
4c9eefa1 VG |
944 | break; |
945 | case BLKIO_POLICY_THROTL: | |
7702e8f4 VG |
946 | switch(fileid) { |
947 | case BLKIO_THROTL_read_bps_device: | |
e56da7e2 TH |
948 | if (blkg) |
949 | blkg->conf.bps[READ] = temp; | |
950 | newpn->plid = plid; | |
951 | newpn->fileid = fileid; | |
952 | newpn->val.bps = temp; | |
953 | break; | |
7702e8f4 | 954 | case BLKIO_THROTL_write_bps_device: |
e56da7e2 TH |
955 | if (blkg) |
956 | blkg->conf.bps[WRITE] = temp; | |
7702e8f4 VG |
957 | newpn->plid = plid; |
958 | newpn->fileid = fileid; | |
d11bb446 | 959 | newpn->val.bps = temp; |
7702e8f4 VG |
960 | break; |
961 | case BLKIO_THROTL_read_iops_device: | |
e56da7e2 TH |
962 | if (temp > THROTL_IOPS_MAX) |
963 | goto out_unlock; | |
964 | ||
965 | if (blkg) | |
966 | blkg->conf.iops[READ] = temp; | |
967 | newpn->plid = plid; | |
968 | newpn->fileid = fileid; | |
969 | newpn->val.iops = (unsigned int)temp; | |
970 | break; | |
7702e8f4 | 971 | case BLKIO_THROTL_write_iops_device: |
d11bb446 | 972 | if (temp > THROTL_IOPS_MAX) |
e56da7e2 | 973 | goto out_unlock; |
9355aede | 974 | |
e56da7e2 TH |
975 | if (blkg) |
976 | blkg->conf.iops[WRITE] = temp; | |
7702e8f4 VG |
977 | newpn->plid = plid; |
978 | newpn->fileid = fileid; | |
d11bb446 | 979 | newpn->val.iops = (unsigned int)temp; |
7702e8f4 VG |
980 | break; |
981 | } | |
062a644d VG |
982 | break; |
983 | default: | |
984 | BUG(); | |
985 | } | |
ece84241 | 986 | ret = 0; |
e56da7e2 TH |
987 | out_unlock: |
988 | rcu_read_unlock(); | |
ece84241 TH |
989 | out: |
990 | put_disk(disk); | |
e56da7e2 TH |
991 | |
992 | /* | |
993 | * If queue was bypassing, we should retry. Do so after a short | |
994 | * msleep(). It isn't strictly necessary but queue can be | |
995 | * bypassing for some time and it's always nice to avoid busy | |
996 | * looping. | |
997 | */ | |
998 | if (ret == -EBUSY) { | |
999 | msleep(10); | |
1000 | return restart_syscall(); | |
1001 | } | |
ece84241 | 1002 | return ret; |
34d0f179 GJ |
1003 | } |
1004 | ||
1005 | unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg, | |
1006 | dev_t dev) | |
1007 | { | |
1008 | struct blkio_policy_node *pn; | |
a38eb630 VG |
1009 | unsigned long flags; |
1010 | unsigned int weight; | |
1011 | ||
1012 | spin_lock_irqsave(&blkcg->lock, flags); | |
34d0f179 | 1013 | |
062a644d VG |
1014 | pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP, |
1015 | BLKIO_PROP_weight_device); | |
34d0f179 | 1016 | if (pn) |
a38eb630 | 1017 | weight = pn->val.weight; |
34d0f179 | 1018 | else |
a38eb630 VG |
1019 | weight = blkcg->weight; |
1020 | ||
1021 | spin_unlock_irqrestore(&blkcg->lock, flags); | |
1022 | ||
1023 | return weight; | |
34d0f179 GJ |
1024 | } |
1025 | EXPORT_SYMBOL_GPL(blkcg_get_weight); | |
1026 | ||
4c9eefa1 VG |
1027 | uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev) |
1028 | { | |
1029 | struct blkio_policy_node *pn; | |
a38eb630 VG |
1030 | unsigned long flags; |
1031 | uint64_t bps = -1; | |
4c9eefa1 | 1032 | |
a38eb630 | 1033 | spin_lock_irqsave(&blkcg->lock, flags); |
4c9eefa1 VG |
1034 | pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL, |
1035 | BLKIO_THROTL_read_bps_device); | |
1036 | if (pn) | |
a38eb630 VG |
1037 | bps = pn->val.bps; |
1038 | spin_unlock_irqrestore(&blkcg->lock, flags); | |
1039 | ||
1040 | return bps; | |
4c9eefa1 VG |
1041 | } |
1042 | ||
1043 | uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev) | |
1044 | { | |
1045 | struct blkio_policy_node *pn; | |
a38eb630 VG |
1046 | unsigned long flags; |
1047 | uint64_t bps = -1; | |
1048 | ||
1049 | spin_lock_irqsave(&blkcg->lock, flags); | |
4c9eefa1 VG |
1050 | pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL, |
1051 | BLKIO_THROTL_write_bps_device); | |
1052 | if (pn) | |
a38eb630 VG |
1053 | bps = pn->val.bps; |
1054 | spin_unlock_irqrestore(&blkcg->lock, flags); | |
1055 | ||
1056 | return bps; | |
4c9eefa1 VG |
1057 | } |
1058 | ||
7702e8f4 VG |
1059 | unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev) |
1060 | { | |
1061 | struct blkio_policy_node *pn; | |
a38eb630 VG |
1062 | unsigned long flags; |
1063 | unsigned int iops = -1; | |
7702e8f4 | 1064 | |
a38eb630 | 1065 | spin_lock_irqsave(&blkcg->lock, flags); |
7702e8f4 VG |
1066 | pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL, |
1067 | BLKIO_THROTL_read_iops_device); | |
1068 | if (pn) | |
a38eb630 VG |
1069 | iops = pn->val.iops; |
1070 | spin_unlock_irqrestore(&blkcg->lock, flags); | |
1071 | ||
1072 | return iops; | |
7702e8f4 VG |
1073 | } |
1074 | ||
1075 | unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev) | |
1076 | { | |
1077 | struct blkio_policy_node *pn; | |
a38eb630 VG |
1078 | unsigned long flags; |
1079 | unsigned int iops = -1; | |
1080 | ||
1081 | spin_lock_irqsave(&blkcg->lock, flags); | |
7702e8f4 VG |
1082 | pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL, |
1083 | BLKIO_THROTL_write_iops_device); | |
1084 | if (pn) | |
a38eb630 VG |
1085 | iops = pn->val.iops; |
1086 | spin_unlock_irqrestore(&blkcg->lock, flags); | |
1087 | ||
1088 | return iops; | |
7702e8f4 VG |
1089 | } |
1090 | ||
062a644d VG |
1091 | /* Checks whether user asked for deleting a policy rule */ |
1092 | static bool blkio_delete_rule_command(struct blkio_policy_node *pn) | |
1093 | { | |
1094 | switch(pn->plid) { | |
1095 | case BLKIO_POLICY_PROP: | |
4c9eefa1 VG |
1096 | if (pn->val.weight == 0) |
1097 | return 1; | |
1098 | break; | |
1099 | case BLKIO_POLICY_THROTL: | |
7702e8f4 VG |
1100 | switch(pn->fileid) { |
1101 | case BLKIO_THROTL_read_bps_device: | |
1102 | case BLKIO_THROTL_write_bps_device: | |
1103 | if (pn->val.bps == 0) | |
1104 | return 1; | |
1105 | break; | |
1106 | case BLKIO_THROTL_read_iops_device: | |
1107 | case BLKIO_THROTL_write_iops_device: | |
1108 | if (pn->val.iops == 0) | |
1109 | return 1; | |
1110 | } | |
062a644d VG |
1111 | break; |
1112 | default: | |
1113 | BUG(); | |
1114 | } | |
1115 | ||
1116 | return 0; | |
1117 | } | |
1118 | ||
1119 | static void blkio_update_policy_rule(struct blkio_policy_node *oldpn, | |
1120 | struct blkio_policy_node *newpn) | |
1121 | { | |
1122 | switch(oldpn->plid) { | |
1123 | case BLKIO_POLICY_PROP: | |
4c9eefa1 VG |
1124 | oldpn->val.weight = newpn->val.weight; |
1125 | break; | |
1126 | case BLKIO_POLICY_THROTL: | |
7702e8f4 VG |
1127 | switch(newpn->fileid) { |
1128 | case BLKIO_THROTL_read_bps_device: | |
1129 | case BLKIO_THROTL_write_bps_device: | |
1130 | oldpn->val.bps = newpn->val.bps; | |
1131 | break; | |
1132 | case BLKIO_THROTL_read_iops_device: | |
1133 | case BLKIO_THROTL_write_iops_device: | |
1134 | oldpn->val.iops = newpn->val.iops; | |
1135 | } | |
062a644d VG |
1136 | break; |
1137 | default: | |
1138 | BUG(); | |
1139 | } | |
1140 | } | |
1141 | ||
1142 | /* | |
25985edc | 1143 | * Some rules/values in blkg have changed. Propagate those to respective |
062a644d VG |
1144 | * policies. |
1145 | */ | |
1146 | static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg, | |
1147 | struct blkio_group *blkg, struct blkio_policy_node *pn) | |
1148 | { | |
e56da7e2 | 1149 | struct blkio_group_conf *conf = &blkg->conf; |
062a644d VG |
1150 | |
1151 | switch(pn->plid) { | |
1152 | case BLKIO_POLICY_PROP: | |
e56da7e2 | 1153 | blkio_update_group_weight(blkg, conf->weight ?: blkcg->weight); |
062a644d | 1154 | break; |
4c9eefa1 VG |
1155 | case BLKIO_POLICY_THROTL: |
1156 | switch(pn->fileid) { | |
1157 | case BLKIO_THROTL_read_bps_device: | |
e56da7e2 TH |
1158 | blkio_update_group_bps(blkg, conf->bps[READ] ?: -1, |
1159 | pn->fileid); | |
1160 | break; | |
4c9eefa1 | 1161 | case BLKIO_THROTL_write_bps_device: |
e56da7e2 TH |
1162 | blkio_update_group_bps(blkg, conf->bps[WRITE] ?: -1, |
1163 | pn->fileid); | |
4c9eefa1 | 1164 | break; |
7702e8f4 | 1165 | case BLKIO_THROTL_read_iops_device: |
e56da7e2 TH |
1166 | blkio_update_group_iops(blkg, conf->iops[READ] ?: -1, |
1167 | pn->fileid); | |
1168 | break; | |
7702e8f4 | 1169 | case BLKIO_THROTL_write_iops_device: |
e56da7e2 TH |
1170 | blkio_update_group_iops(blkg, conf->iops[WRITE] ?: -1, |
1171 | pn->fileid); | |
7702e8f4 | 1172 | break; |
4c9eefa1 VG |
1173 | } |
1174 | break; | |
062a644d VG |
1175 | default: |
1176 | BUG(); | |
1177 | } | |
1178 | } | |
1179 | ||
1180 | /* | |
25985edc | 1181 | * A policy node rule has been updated. Propagate this update to all the |
062a644d VG |
1182 | * block groups which might be affected by this update. |
1183 | */ | |
1184 | static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg, | |
1185 | struct blkio_policy_node *pn) | |
1186 | { | |
1187 | struct blkio_group *blkg; | |
1188 | struct hlist_node *n; | |
1189 | ||
1190 | spin_lock(&blkio_list_lock); | |
1191 | spin_lock_irq(&blkcg->lock); | |
34d0f179 | 1192 | |
062a644d VG |
1193 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { |
1194 | if (pn->dev != blkg->dev || pn->plid != blkg->plid) | |
1195 | continue; | |
1196 | blkio_update_blkg_policy(blkcg, blkg, pn); | |
1197 | } | |
1198 | ||
1199 | spin_unlock_irq(&blkcg->lock); | |
1200 | spin_unlock(&blkio_list_lock); | |
1201 | } | |
34d0f179 | 1202 | |
062a644d VG |
1203 | static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft, |
1204 | const char *buffer) | |
34d0f179 GJ |
1205 | { |
1206 | int ret = 0; | |
1207 | char *buf; | |
1208 | struct blkio_policy_node *newpn, *pn; | |
e56da7e2 | 1209 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); |
34d0f179 | 1210 | int keep_newpn = 0; |
062a644d VG |
1211 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); |
1212 | int fileid = BLKIOFILE_ATTR(cft->private); | |
34d0f179 GJ |
1213 | |
1214 | buf = kstrdup(buffer, GFP_KERNEL); | |
1215 | if (!buf) | |
1216 | return -ENOMEM; | |
1217 | ||
1218 | newpn = kzalloc(sizeof(*newpn), GFP_KERNEL); | |
1219 | if (!newpn) { | |
1220 | ret = -ENOMEM; | |
1221 | goto free_buf; | |
1222 | } | |
1223 | ||
e56da7e2 | 1224 | ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid, blkcg); |
34d0f179 GJ |
1225 | if (ret) |
1226 | goto free_newpn; | |
1227 | ||
34d0f179 GJ |
1228 | spin_lock_irq(&blkcg->lock); |
1229 | ||
062a644d | 1230 | pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid); |
34d0f179 | 1231 | if (!pn) { |
062a644d | 1232 | if (!blkio_delete_rule_command(newpn)) { |
34d0f179 GJ |
1233 | blkio_policy_insert_node(blkcg, newpn); |
1234 | keep_newpn = 1; | |
1235 | } | |
1236 | spin_unlock_irq(&blkcg->lock); | |
1237 | goto update_io_group; | |
1238 | } | |
1239 | ||
062a644d | 1240 | if (blkio_delete_rule_command(newpn)) { |
34d0f179 | 1241 | blkio_policy_delete_node(pn); |
e060f00b | 1242 | kfree(pn); |
34d0f179 GJ |
1243 | spin_unlock_irq(&blkcg->lock); |
1244 | goto update_io_group; | |
1245 | } | |
1246 | spin_unlock_irq(&blkcg->lock); | |
1247 | ||
062a644d | 1248 | blkio_update_policy_rule(pn, newpn); |
34d0f179 GJ |
1249 | |
1250 | update_io_group: | |
062a644d | 1251 | blkio_update_policy_node_blkg(blkcg, newpn); |
34d0f179 GJ |
1252 | |
1253 | free_newpn: | |
1254 | if (!keep_newpn) | |
1255 | kfree(newpn); | |
1256 | free_buf: | |
1257 | kfree(buf); | |
1258 | return ret; | |
1259 | } | |
1260 | ||
062a644d VG |
1261 | static void |
1262 | blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn) | |
34d0f179 | 1263 | { |
062a644d VG |
1264 | switch(pn->plid) { |
1265 | case BLKIO_POLICY_PROP: | |
1266 | if (pn->fileid == BLKIO_PROP_weight_device) | |
1267 | seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev), | |
4c9eefa1 VG |
1268 | MINOR(pn->dev), pn->val.weight); |
1269 | break; | |
1270 | case BLKIO_POLICY_THROTL: | |
7702e8f4 VG |
1271 | switch(pn->fileid) { |
1272 | case BLKIO_THROTL_read_bps_device: | |
1273 | case BLKIO_THROTL_write_bps_device: | |
4c9eefa1 VG |
1274 | seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev), |
1275 | MINOR(pn->dev), pn->val.bps); | |
7702e8f4 VG |
1276 | break; |
1277 | case BLKIO_THROTL_read_iops_device: | |
1278 | case BLKIO_THROTL_write_iops_device: | |
1279 | seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev), | |
1280 | MINOR(pn->dev), pn->val.iops); | |
1281 | break; | |
1282 | } | |
062a644d VG |
1283 | break; |
1284 | default: | |
1285 | BUG(); | |
1286 | } | |
1287 | } | |
34d0f179 | 1288 | |
062a644d VG |
1289 | /* cgroup files which read their data from policy nodes end up here */ |
1290 | static void blkio_read_policy_node_files(struct cftype *cft, | |
1291 | struct blkio_cgroup *blkcg, struct seq_file *m) | |
34d0f179 | 1292 | { |
34d0f179 | 1293 | struct blkio_policy_node *pn; |
34d0f179 | 1294 | |
0f3942a3 JA |
1295 | if (!list_empty(&blkcg->policy_list)) { |
1296 | spin_lock_irq(&blkcg->lock); | |
1297 | list_for_each_entry(pn, &blkcg->policy_list, node) { | |
062a644d VG |
1298 | if (!pn_matches_cftype(cft, pn)) |
1299 | continue; | |
1300 | blkio_print_policy_node(m, pn); | |
0f3942a3 JA |
1301 | } |
1302 | spin_unlock_irq(&blkcg->lock); | |
34d0f179 | 1303 | } |
062a644d VG |
1304 | } |
1305 | ||
1306 | static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft, | |
1307 | struct seq_file *m) | |
1308 | { | |
1309 | struct blkio_cgroup *blkcg; | |
1310 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | |
1311 | int name = BLKIOFILE_ATTR(cft->private); | |
1312 | ||
1313 | blkcg = cgroup_to_blkio_cgroup(cgrp); | |
1314 | ||
1315 | switch(plid) { | |
1316 | case BLKIO_POLICY_PROP: | |
1317 | switch(name) { | |
1318 | case BLKIO_PROP_weight_device: | |
1319 | blkio_read_policy_node_files(cft, blkcg, m); | |
1320 | return 0; | |
1321 | default: | |
1322 | BUG(); | |
1323 | } | |
1324 | break; | |
4c9eefa1 VG |
1325 | case BLKIO_POLICY_THROTL: |
1326 | switch(name){ | |
1327 | case BLKIO_THROTL_read_bps_device: | |
1328 | case BLKIO_THROTL_write_bps_device: | |
7702e8f4 VG |
1329 | case BLKIO_THROTL_read_iops_device: |
1330 | case BLKIO_THROTL_write_iops_device: | |
4c9eefa1 VG |
1331 | blkio_read_policy_node_files(cft, blkcg, m); |
1332 | return 0; | |
1333 | default: | |
1334 | BUG(); | |
1335 | } | |
1336 | break; | |
062a644d VG |
1337 | default: |
1338 | BUG(); | |
1339 | } | |
1340 | ||
1341 | return 0; | |
1342 | } | |
1343 | ||
1344 | static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg, | |
5624a4e4 VG |
1345 | struct cftype *cft, struct cgroup_map_cb *cb, |
1346 | enum stat_type type, bool show_total, bool pcpu) | |
062a644d VG |
1347 | { |
1348 | struct blkio_group *blkg; | |
1349 | struct hlist_node *n; | |
1350 | uint64_t cgroup_total = 0; | |
1351 | ||
1352 | rcu_read_lock(); | |
1353 | hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) { | |
1354 | if (blkg->dev) { | |
1355 | if (!cftype_blkg_same_policy(cft, blkg)) | |
1356 | continue; | |
5624a4e4 VG |
1357 | if (pcpu) |
1358 | cgroup_total += blkio_get_stat_cpu(blkg, cb, | |
1359 | blkg->dev, type); | |
1360 | else { | |
1361 | spin_lock_irq(&blkg->stats_lock); | |
1362 | cgroup_total += blkio_get_stat(blkg, cb, | |
1363 | blkg->dev, type); | |
1364 | spin_unlock_irq(&blkg->stats_lock); | |
1365 | } | |
062a644d VG |
1366 | } |
1367 | } | |
1368 | if (show_total) | |
1369 | cb->fill(cb, "Total", cgroup_total); | |
1370 | rcu_read_unlock(); | |
1371 | return 0; | |
1372 | } | |
1373 | ||
1374 | /* All map kind of cgroup file get serviced by this function */ | |
1375 | static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft, | |
1376 | struct cgroup_map_cb *cb) | |
1377 | { | |
1378 | struct blkio_cgroup *blkcg; | |
1379 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | |
1380 | int name = BLKIOFILE_ATTR(cft->private); | |
1381 | ||
1382 | blkcg = cgroup_to_blkio_cgroup(cgrp); | |
1383 | ||
1384 | switch(plid) { | |
1385 | case BLKIO_POLICY_PROP: | |
1386 | switch(name) { | |
1387 | case BLKIO_PROP_time: | |
1388 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1389 | BLKIO_STAT_TIME, 0, 0); |
062a644d VG |
1390 | case BLKIO_PROP_sectors: |
1391 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1392 | BLKIO_STAT_CPU_SECTORS, 0, 1); |
062a644d VG |
1393 | case BLKIO_PROP_io_service_bytes: |
1394 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1395 | BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1); |
062a644d VG |
1396 | case BLKIO_PROP_io_serviced: |
1397 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1398 | BLKIO_STAT_CPU_SERVICED, 1, 1); |
062a644d VG |
1399 | case BLKIO_PROP_io_service_time: |
1400 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1401 | BLKIO_STAT_SERVICE_TIME, 1, 0); |
062a644d VG |
1402 | case BLKIO_PROP_io_wait_time: |
1403 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1404 | BLKIO_STAT_WAIT_TIME, 1, 0); |
062a644d VG |
1405 | case BLKIO_PROP_io_merged: |
1406 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
317389a7 | 1407 | BLKIO_STAT_CPU_MERGED, 1, 1); |
062a644d VG |
1408 | case BLKIO_PROP_io_queued: |
1409 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1410 | BLKIO_STAT_QUEUED, 1, 0); |
062a644d | 1411 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
9026e521 JT |
1412 | case BLKIO_PROP_unaccounted_time: |
1413 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1414 | BLKIO_STAT_UNACCOUNTED_TIME, 0, 0); |
062a644d VG |
1415 | case BLKIO_PROP_dequeue: |
1416 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1417 | BLKIO_STAT_DEQUEUE, 0, 0); |
062a644d VG |
1418 | case BLKIO_PROP_avg_queue_size: |
1419 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1420 | BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0); |
062a644d VG |
1421 | case BLKIO_PROP_group_wait_time: |
1422 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1423 | BLKIO_STAT_GROUP_WAIT_TIME, 0, 0); |
062a644d VG |
1424 | case BLKIO_PROP_idle_time: |
1425 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1426 | BLKIO_STAT_IDLE_TIME, 0, 0); |
062a644d VG |
1427 | case BLKIO_PROP_empty_time: |
1428 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1429 | BLKIO_STAT_EMPTY_TIME, 0, 0); |
062a644d VG |
1430 | #endif |
1431 | default: | |
1432 | BUG(); | |
1433 | } | |
1434 | break; | |
4c9eefa1 VG |
1435 | case BLKIO_POLICY_THROTL: |
1436 | switch(name){ | |
1437 | case BLKIO_THROTL_io_service_bytes: | |
1438 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1439 | BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1); |
4c9eefa1 VG |
1440 | case BLKIO_THROTL_io_serviced: |
1441 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1442 | BLKIO_STAT_CPU_SERVICED, 1, 1); |
4c9eefa1 VG |
1443 | default: |
1444 | BUG(); | |
1445 | } | |
1446 | break; | |
062a644d VG |
1447 | default: |
1448 | BUG(); | |
1449 | } | |
1450 | ||
1451 | return 0; | |
1452 | } | |
1453 | ||
1454 | static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val) | |
1455 | { | |
1456 | struct blkio_group *blkg; | |
1457 | struct hlist_node *n; | |
1458 | struct blkio_policy_node *pn; | |
1459 | ||
1460 | if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX) | |
1461 | return -EINVAL; | |
1462 | ||
1463 | spin_lock(&blkio_list_lock); | |
1464 | spin_lock_irq(&blkcg->lock); | |
1465 | blkcg->weight = (unsigned int)val; | |
1466 | ||
1467 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { | |
1468 | pn = blkio_policy_search_node(blkcg, blkg->dev, | |
1469 | BLKIO_POLICY_PROP, BLKIO_PROP_weight_device); | |
1470 | if (pn) | |
1471 | continue; | |
1472 | ||
1473 | blkio_update_group_weight(blkg, blkcg->weight); | |
1474 | } | |
1475 | spin_unlock_irq(&blkcg->lock); | |
1476 | spin_unlock(&blkio_list_lock); | |
1477 | return 0; | |
1478 | } | |
1479 | ||
1480 | static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) { | |
1481 | struct blkio_cgroup *blkcg; | |
1482 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | |
1483 | int name = BLKIOFILE_ATTR(cft->private); | |
1484 | ||
1485 | blkcg = cgroup_to_blkio_cgroup(cgrp); | |
1486 | ||
1487 | switch(plid) { | |
1488 | case BLKIO_POLICY_PROP: | |
1489 | switch(name) { | |
1490 | case BLKIO_PROP_weight: | |
1491 | return (u64)blkcg->weight; | |
1492 | } | |
1493 | break; | |
1494 | default: | |
1495 | BUG(); | |
1496 | } | |
1497 | return 0; | |
1498 | } | |
1499 | ||
1500 | static int | |
1501 | blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val) | |
1502 | { | |
1503 | struct blkio_cgroup *blkcg; | |
1504 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | |
1505 | int name = BLKIOFILE_ATTR(cft->private); | |
1506 | ||
1507 | blkcg = cgroup_to_blkio_cgroup(cgrp); | |
1508 | ||
1509 | switch(plid) { | |
1510 | case BLKIO_POLICY_PROP: | |
1511 | switch(name) { | |
1512 | case BLKIO_PROP_weight: | |
1513 | return blkio_weight_write(blkcg, val); | |
1514 | } | |
1515 | break; | |
1516 | default: | |
1517 | BUG(); | |
1518 | } | |
34d0f179 | 1519 | |
34d0f179 GJ |
1520 | return 0; |
1521 | } | |
1522 | ||
31e4c28d | 1523 | struct cftype blkio_files[] = { |
34d0f179 GJ |
1524 | { |
1525 | .name = "weight_device", | |
062a644d VG |
1526 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1527 | BLKIO_PROP_weight_device), | |
1528 | .read_seq_string = blkiocg_file_read, | |
1529 | .write_string = blkiocg_file_write, | |
34d0f179 GJ |
1530 | .max_write_len = 256, |
1531 | }, | |
31e4c28d VG |
1532 | { |
1533 | .name = "weight", | |
062a644d VG |
1534 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1535 | BLKIO_PROP_weight), | |
1536 | .read_u64 = blkiocg_file_read_u64, | |
1537 | .write_u64 = blkiocg_file_write_u64, | |
31e4c28d | 1538 | }, |
22084190 VG |
1539 | { |
1540 | .name = "time", | |
062a644d VG |
1541 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1542 | BLKIO_PROP_time), | |
1543 | .read_map = blkiocg_file_read_map, | |
22084190 VG |
1544 | }, |
1545 | { | |
1546 | .name = "sectors", | |
062a644d VG |
1547 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1548 | BLKIO_PROP_sectors), | |
1549 | .read_map = blkiocg_file_read_map, | |
303a3acb DS |
1550 | }, |
1551 | { | |
1552 | .name = "io_service_bytes", | |
062a644d VG |
1553 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1554 | BLKIO_PROP_io_service_bytes), | |
1555 | .read_map = blkiocg_file_read_map, | |
303a3acb DS |
1556 | }, |
1557 | { | |
1558 | .name = "io_serviced", | |
062a644d VG |
1559 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1560 | BLKIO_PROP_io_serviced), | |
1561 | .read_map = blkiocg_file_read_map, | |
303a3acb DS |
1562 | }, |
1563 | { | |
1564 | .name = "io_service_time", | |
062a644d VG |
1565 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1566 | BLKIO_PROP_io_service_time), | |
1567 | .read_map = blkiocg_file_read_map, | |
303a3acb DS |
1568 | }, |
1569 | { | |
1570 | .name = "io_wait_time", | |
062a644d VG |
1571 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1572 | BLKIO_PROP_io_wait_time), | |
1573 | .read_map = blkiocg_file_read_map, | |
84c124da | 1574 | }, |
812d4026 DS |
1575 | { |
1576 | .name = "io_merged", | |
062a644d VG |
1577 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1578 | BLKIO_PROP_io_merged), | |
1579 | .read_map = blkiocg_file_read_map, | |
812d4026 | 1580 | }, |
cdc1184c DS |
1581 | { |
1582 | .name = "io_queued", | |
062a644d VG |
1583 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1584 | BLKIO_PROP_io_queued), | |
1585 | .read_map = blkiocg_file_read_map, | |
cdc1184c | 1586 | }, |
84c124da DS |
1587 | { |
1588 | .name = "reset_stats", | |
1589 | .write_u64 = blkiocg_reset_stats, | |
22084190 | 1590 | }, |
13f98250 VG |
1591 | #ifdef CONFIG_BLK_DEV_THROTTLING |
1592 | { | |
1593 | .name = "throttle.read_bps_device", | |
1594 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1595 | BLKIO_THROTL_read_bps_device), | |
1596 | .read_seq_string = blkiocg_file_read, | |
1597 | .write_string = blkiocg_file_write, | |
1598 | .max_write_len = 256, | |
1599 | }, | |
1600 | ||
1601 | { | |
1602 | .name = "throttle.write_bps_device", | |
1603 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1604 | BLKIO_THROTL_write_bps_device), | |
1605 | .read_seq_string = blkiocg_file_read, | |
1606 | .write_string = blkiocg_file_write, | |
1607 | .max_write_len = 256, | |
1608 | }, | |
1609 | ||
1610 | { | |
1611 | .name = "throttle.read_iops_device", | |
1612 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1613 | BLKIO_THROTL_read_iops_device), | |
1614 | .read_seq_string = blkiocg_file_read, | |
1615 | .write_string = blkiocg_file_write, | |
1616 | .max_write_len = 256, | |
1617 | }, | |
1618 | ||
1619 | { | |
1620 | .name = "throttle.write_iops_device", | |
1621 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1622 | BLKIO_THROTL_write_iops_device), | |
1623 | .read_seq_string = blkiocg_file_read, | |
1624 | .write_string = blkiocg_file_write, | |
1625 | .max_write_len = 256, | |
1626 | }, | |
1627 | { | |
1628 | .name = "throttle.io_service_bytes", | |
1629 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1630 | BLKIO_THROTL_io_service_bytes), | |
1631 | .read_map = blkiocg_file_read_map, | |
1632 | }, | |
1633 | { | |
1634 | .name = "throttle.io_serviced", | |
1635 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1636 | BLKIO_THROTL_io_serviced), | |
1637 | .read_map = blkiocg_file_read_map, | |
1638 | }, | |
1639 | #endif /* CONFIG_BLK_DEV_THROTTLING */ | |
1640 | ||
22084190 | 1641 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
cdc1184c DS |
1642 | { |
1643 | .name = "avg_queue_size", | |
062a644d VG |
1644 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1645 | BLKIO_PROP_avg_queue_size), | |
1646 | .read_map = blkiocg_file_read_map, | |
cdc1184c | 1647 | }, |
812df48d DS |
1648 | { |
1649 | .name = "group_wait_time", | |
062a644d VG |
1650 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1651 | BLKIO_PROP_group_wait_time), | |
1652 | .read_map = blkiocg_file_read_map, | |
812df48d DS |
1653 | }, |
1654 | { | |
1655 | .name = "idle_time", | |
062a644d VG |
1656 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1657 | BLKIO_PROP_idle_time), | |
1658 | .read_map = blkiocg_file_read_map, | |
812df48d DS |
1659 | }, |
1660 | { | |
1661 | .name = "empty_time", | |
062a644d VG |
1662 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1663 | BLKIO_PROP_empty_time), | |
1664 | .read_map = blkiocg_file_read_map, | |
812df48d | 1665 | }, |
cdc1184c | 1666 | { |
22084190 | 1667 | .name = "dequeue", |
062a644d VG |
1668 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1669 | BLKIO_PROP_dequeue), | |
1670 | .read_map = blkiocg_file_read_map, | |
cdc1184c | 1671 | }, |
9026e521 JT |
1672 | { |
1673 | .name = "unaccounted_time", | |
1674 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1675 | BLKIO_PROP_unaccounted_time), | |
1676 | .read_map = blkiocg_file_read_map, | |
1677 | }, | |
22084190 | 1678 | #endif |
31e4c28d VG |
1679 | }; |
1680 | ||
1681 | static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |
1682 | { | |
1683 | return cgroup_add_files(cgroup, subsys, blkio_files, | |
1684 | ARRAY_SIZE(blkio_files)); | |
1685 | } | |
1686 | ||
1687 | static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |
1688 | { | |
1689 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); | |
b1c35769 VG |
1690 | unsigned long flags; |
1691 | struct blkio_group *blkg; | |
ca32aefc | 1692 | struct request_queue *q; |
3e252066 | 1693 | struct blkio_policy_type *blkiop; |
34d0f179 | 1694 | struct blkio_policy_node *pn, *pntmp; |
b1c35769 VG |
1695 | |
1696 | rcu_read_lock(); | |
0f3942a3 JA |
1697 | do { |
1698 | spin_lock_irqsave(&blkcg->lock, flags); | |
b1c35769 | 1699 | |
0f3942a3 JA |
1700 | if (hlist_empty(&blkcg->blkg_list)) { |
1701 | spin_unlock_irqrestore(&blkcg->lock, flags); | |
1702 | break; | |
1703 | } | |
b1c35769 | 1704 | |
0f3942a3 JA |
1705 | blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group, |
1706 | blkcg_node); | |
ca32aefc | 1707 | q = rcu_dereference(blkg->q); |
0f3942a3 | 1708 | __blkiocg_del_blkio_group(blkg); |
31e4c28d | 1709 | |
0f3942a3 | 1710 | spin_unlock_irqrestore(&blkcg->lock, flags); |
b1c35769 | 1711 | |
0f3942a3 JA |
1712 | /* |
1713 | * This blkio_group is being unlinked as associated cgroup is | |
1714 | * going away. Let all the IO controlling policies know about | |
61014e96 | 1715 | * this event. |
0f3942a3 JA |
1716 | */ |
1717 | spin_lock(&blkio_list_lock); | |
61014e96 VG |
1718 | list_for_each_entry(blkiop, &blkio_list, list) { |
1719 | if (blkiop->plid != blkg->plid) | |
1720 | continue; | |
ca32aefc | 1721 | blkiop->ops.blkio_unlink_group_fn(q, blkg); |
61014e96 | 1722 | } |
0f3942a3 JA |
1723 | spin_unlock(&blkio_list_lock); |
1724 | } while (1); | |
34d0f179 | 1725 | |
34d0f179 GJ |
1726 | list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) { |
1727 | blkio_policy_delete_node(pn); | |
1728 | kfree(pn); | |
1729 | } | |
0f3942a3 | 1730 | |
31e4c28d | 1731 | free_css_id(&blkio_subsys, &blkcg->css); |
b1c35769 | 1732 | rcu_read_unlock(); |
67523c48 BB |
1733 | if (blkcg != &blkio_root_cgroup) |
1734 | kfree(blkcg); | |
31e4c28d VG |
1735 | } |
1736 | ||
1737 | static struct cgroup_subsys_state * | |
1738 | blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |
1739 | { | |
0341509f LZ |
1740 | struct blkio_cgroup *blkcg; |
1741 | struct cgroup *parent = cgroup->parent; | |
31e4c28d | 1742 | |
0341509f | 1743 | if (!parent) { |
31e4c28d VG |
1744 | blkcg = &blkio_root_cgroup; |
1745 | goto done; | |
1746 | } | |
1747 | ||
31e4c28d VG |
1748 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); |
1749 | if (!blkcg) | |
1750 | return ERR_PTR(-ENOMEM); | |
1751 | ||
1752 | blkcg->weight = BLKIO_WEIGHT_DEFAULT; | |
1753 | done: | |
1754 | spin_lock_init(&blkcg->lock); | |
1755 | INIT_HLIST_HEAD(&blkcg->blkg_list); | |
1756 | ||
34d0f179 | 1757 | INIT_LIST_HEAD(&blkcg->policy_list); |
31e4c28d VG |
1758 | return &blkcg->css; |
1759 | } | |
1760 | ||
1761 | /* | |
1762 | * We cannot support shared io contexts, as we have no mean to support | |
1763 | * two tasks with the same ioc in two different groups without major rework | |
1764 | * of the main cic data structures. For now we allow a task to change | |
1765 | * its cgroup only if it's the only owner of its ioc. | |
1766 | */ | |
bb9d97b6 TH |
1767 | static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, |
1768 | struct cgroup_taskset *tset) | |
31e4c28d | 1769 | { |
bb9d97b6 | 1770 | struct task_struct *task; |
31e4c28d VG |
1771 | struct io_context *ioc; |
1772 | int ret = 0; | |
1773 | ||
1774 | /* task_lock() is needed to avoid races with exit_io_context() */ | |
bb9d97b6 TH |
1775 | cgroup_taskset_for_each(task, cgrp, tset) { |
1776 | task_lock(task); | |
1777 | ioc = task->io_context; | |
1778 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) | |
1779 | ret = -EINVAL; | |
1780 | task_unlock(task); | |
1781 | if (ret) | |
1782 | break; | |
1783 | } | |
31e4c28d VG |
1784 | return ret; |
1785 | } | |
1786 | ||
bb9d97b6 TH |
1787 | static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, |
1788 | struct cgroup_taskset *tset) | |
31e4c28d | 1789 | { |
bb9d97b6 | 1790 | struct task_struct *task; |
31e4c28d VG |
1791 | struct io_context *ioc; |
1792 | ||
bb9d97b6 | 1793 | cgroup_taskset_for_each(task, cgrp, tset) { |
b3c9dd18 LT |
1794 | /* we don't lose anything even if ioc allocation fails */ |
1795 | ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); | |
1796 | if (ioc) { | |
1797 | ioc_cgroup_changed(ioc); | |
11a3122f | 1798 | put_io_context(ioc); |
b3c9dd18 | 1799 | } |
bb9d97b6 | 1800 | } |
31e4c28d VG |
1801 | } |
1802 | ||
3e252066 VG |
1803 | void blkio_policy_register(struct blkio_policy_type *blkiop) |
1804 | { | |
1805 | spin_lock(&blkio_list_lock); | |
035d10b2 TH |
1806 | |
1807 | BUG_ON(blkio_policy[blkiop->plid]); | |
1808 | blkio_policy[blkiop->plid] = blkiop; | |
3e252066 | 1809 | list_add_tail(&blkiop->list, &blkio_list); |
035d10b2 | 1810 | |
3e252066 VG |
1811 | spin_unlock(&blkio_list_lock); |
1812 | } | |
1813 | EXPORT_SYMBOL_GPL(blkio_policy_register); | |
1814 | ||
1815 | void blkio_policy_unregister(struct blkio_policy_type *blkiop) | |
1816 | { | |
1817 | spin_lock(&blkio_list_lock); | |
035d10b2 TH |
1818 | |
1819 | BUG_ON(blkio_policy[blkiop->plid] != blkiop); | |
1820 | blkio_policy[blkiop->plid] = NULL; | |
3e252066 | 1821 | list_del_init(&blkiop->list); |
035d10b2 | 1822 | |
3e252066 VG |
1823 | spin_unlock(&blkio_list_lock); |
1824 | } | |
1825 | EXPORT_SYMBOL_GPL(blkio_policy_unregister); |