]>
Commit | Line | Data |
---|---|---|
31e4c28d VG |
1 | /* |
2 | * Common Block IO controller cgroup interface | |
3 | * | |
4 | * Based on ideas and code from CFQ, CFS and BFQ: | |
5 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> | |
6 | * | |
7 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> | |
8 | * Paolo Valente <paolo.valente@unimore.it> | |
9 | * | |
10 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> | |
11 | * Nauman Rafique <nauman@google.com> | |
12 | */ | |
13 | #include <linux/ioprio.h> | |
22084190 | 14 | #include <linux/kdev_t.h> |
9d6a986c | 15 | #include <linux/module.h> |
accee785 | 16 | #include <linux/err.h> |
9195291e | 17 | #include <linux/blkdev.h> |
5a0e3ad6 | 18 | #include <linux/slab.h> |
34d0f179 | 19 | #include <linux/genhd.h> |
72e06c25 | 20 | #include <linux/delay.h> |
9a9e8a26 | 21 | #include <linux/atomic.h> |
72e06c25 | 22 | #include "blk-cgroup.h" |
5efd6113 | 23 | #include "blk.h" |
3e252066 | 24 | |
84c124da DS |
25 | #define MAX_KEY_LEN 100 |
26 | ||
3e252066 VG |
27 | static DEFINE_SPINLOCK(blkio_list_lock); |
28 | static LIST_HEAD(blkio_list); | |
b1c35769 | 29 | |
923adde1 TH |
30 | static DEFINE_MUTEX(all_q_mutex); |
31 | static LIST_HEAD(all_q_list); | |
32 | ||
1cd9e039 VG |
33 | /* List of groups pending per cpu stats allocation */ |
34 | static DEFINE_SPINLOCK(alloc_list_lock); | |
35 | static LIST_HEAD(alloc_list); | |
36 | ||
37 | static void blkio_stat_alloc_fn(struct work_struct *); | |
38 | static DECLARE_DELAYED_WORK(blkio_stat_alloc_work, blkio_stat_alloc_fn); | |
39 | ||
31e4c28d | 40 | struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT }; |
9d6a986c VG |
41 | EXPORT_SYMBOL_GPL(blkio_root_cgroup); |
42 | ||
035d10b2 TH |
43 | static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES]; |
44 | ||
31e4c28d VG |
45 | struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) |
46 | { | |
47 | return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), | |
48 | struct blkio_cgroup, css); | |
49 | } | |
9d6a986c | 50 | EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); |
31e4c28d | 51 | |
4f85cb96 | 52 | static struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk) |
70087dc3 VG |
53 | { |
54 | return container_of(task_subsys_state(tsk, blkio_subsys_id), | |
55 | struct blkio_cgroup, css); | |
56 | } | |
4f85cb96 TH |
57 | |
58 | struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio) | |
59 | { | |
60 | if (bio && bio->bi_css) | |
61 | return container_of(bio->bi_css, struct blkio_cgroup, css); | |
62 | return task_blkio_cgroup(current); | |
63 | } | |
64 | EXPORT_SYMBOL_GPL(bio_blkio_cgroup); | |
70087dc3 | 65 | |
cdc1184c | 66 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
edf1b879 | 67 | /* This should be called with the queue_lock held. */ |
812df48d | 68 | static void blkio_set_start_group_wait_time(struct blkio_group *blkg, |
c1768268 TH |
69 | struct blkio_policy_type *pol, |
70 | struct blkio_group *curr_blkg) | |
812df48d | 71 | { |
c1768268 | 72 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; |
549d3aa8 TH |
73 | |
74 | if (blkio_blkg_waiting(&pd->stats)) | |
812df48d DS |
75 | return; |
76 | if (blkg == curr_blkg) | |
77 | return; | |
549d3aa8 TH |
78 | pd->stats.start_group_wait_time = sched_clock(); |
79 | blkio_mark_blkg_waiting(&pd->stats); | |
812df48d DS |
80 | } |
81 | ||
edf1b879 | 82 | /* This should be called with the queue_lock held. */ |
812df48d DS |
83 | static void blkio_update_group_wait_time(struct blkio_group_stats *stats) |
84 | { | |
85 | unsigned long long now; | |
86 | ||
87 | if (!blkio_blkg_waiting(stats)) | |
88 | return; | |
89 | ||
90 | now = sched_clock(); | |
91 | if (time_after64(now, stats->start_group_wait_time)) | |
edcb0722 TH |
92 | blkg_stat_add(&stats->group_wait_time, |
93 | now - stats->start_group_wait_time); | |
812df48d DS |
94 | blkio_clear_blkg_waiting(stats); |
95 | } | |
96 | ||
edf1b879 | 97 | /* This should be called with the queue_lock held. */ |
812df48d DS |
98 | static void blkio_end_empty_time(struct blkio_group_stats *stats) |
99 | { | |
100 | unsigned long long now; | |
101 | ||
102 | if (!blkio_blkg_empty(stats)) | |
103 | return; | |
104 | ||
105 | now = sched_clock(); | |
106 | if (time_after64(now, stats->start_empty_time)) | |
edcb0722 TH |
107 | blkg_stat_add(&stats->empty_time, |
108 | now - stats->start_empty_time); | |
812df48d DS |
109 | blkio_clear_blkg_empty(stats); |
110 | } | |
111 | ||
c1768268 TH |
112 | void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg, |
113 | struct blkio_policy_type *pol) | |
812df48d | 114 | { |
edf1b879 | 115 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; |
812df48d | 116 | |
edf1b879 TH |
117 | lockdep_assert_held(blkg->q->queue_lock); |
118 | BUG_ON(blkio_blkg_idling(stats)); | |
119 | ||
120 | stats->start_idle_time = sched_clock(); | |
121 | blkio_mark_blkg_idling(stats); | |
812df48d DS |
122 | } |
123 | EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats); | |
124 | ||
c1768268 TH |
125 | void blkiocg_update_idle_time_stats(struct blkio_group *blkg, |
126 | struct blkio_policy_type *pol) | |
812df48d | 127 | { |
edf1b879 TH |
128 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; |
129 | ||
130 | lockdep_assert_held(blkg->q->queue_lock); | |
812df48d | 131 | |
812df48d | 132 | if (blkio_blkg_idling(stats)) { |
edf1b879 TH |
133 | unsigned long long now = sched_clock(); |
134 | ||
edcb0722 TH |
135 | if (time_after64(now, stats->start_idle_time)) |
136 | blkg_stat_add(&stats->idle_time, | |
137 | now - stats->start_idle_time); | |
812df48d DS |
138 | blkio_clear_blkg_idling(stats); |
139 | } | |
812df48d DS |
140 | } |
141 | EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats); | |
142 | ||
c1768268 TH |
143 | void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg, |
144 | struct blkio_policy_type *pol) | |
cdc1184c | 145 | { |
edf1b879 | 146 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; |
cdc1184c | 147 | |
edf1b879 TH |
148 | lockdep_assert_held(blkg->q->queue_lock); |
149 | ||
edcb0722 TH |
150 | blkg_stat_add(&stats->avg_queue_size_sum, |
151 | blkg_rwstat_sum(&stats->queued)); | |
152 | blkg_stat_add(&stats->avg_queue_size_samples, 1); | |
812df48d | 153 | blkio_update_group_wait_time(stats); |
cdc1184c | 154 | } |
a11cdaa7 DS |
155 | EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats); |
156 | ||
c1768268 TH |
157 | void blkiocg_set_start_empty_time(struct blkio_group *blkg, |
158 | struct blkio_policy_type *pol) | |
28baf442 | 159 | { |
edf1b879 | 160 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; |
28baf442 | 161 | |
edf1b879 | 162 | lockdep_assert_held(blkg->q->queue_lock); |
28baf442 | 163 | |
edcb0722 | 164 | if (blkg_rwstat_sum(&stats->queued)) |
28baf442 | 165 | return; |
28baf442 DS |
166 | |
167 | /* | |
e5ff082e VG |
168 | * group is already marked empty. This can happen if cfqq got new |
169 | * request in parent group and moved to this group while being added | |
170 | * to service tree. Just ignore the event and move on. | |
28baf442 | 171 | */ |
edf1b879 | 172 | if (blkio_blkg_empty(stats)) |
e5ff082e | 173 | return; |
e5ff082e | 174 | |
28baf442 DS |
175 | stats->start_empty_time = sched_clock(); |
176 | blkio_mark_blkg_empty(stats); | |
28baf442 DS |
177 | } |
178 | EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time); | |
179 | ||
a11cdaa7 | 180 | void blkiocg_update_dequeue_stats(struct blkio_group *blkg, |
c1768268 TH |
181 | struct blkio_policy_type *pol, |
182 | unsigned long dequeue) | |
a11cdaa7 | 183 | { |
c1768268 | 184 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; |
549d3aa8 | 185 | |
edf1b879 TH |
186 | lockdep_assert_held(blkg->q->queue_lock); |
187 | ||
edcb0722 | 188 | blkg_stat_add(&pd->stats.dequeue, dequeue); |
a11cdaa7 DS |
189 | } |
190 | EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats); | |
812df48d DS |
191 | #else |
192 | static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg, | |
c1768268 TH |
193 | struct blkio_policy_type *pol, |
194 | struct blkio_group *curr_blkg) { } | |
195 | static inline void blkio_end_empty_time(struct blkio_group_stats *stats) { } | |
cdc1184c DS |
196 | #endif |
197 | ||
a11cdaa7 | 198 | void blkiocg_update_io_add_stats(struct blkio_group *blkg, |
c1768268 TH |
199 | struct blkio_policy_type *pol, |
200 | struct blkio_group *curr_blkg, bool direction, | |
201 | bool sync) | |
cdc1184c | 202 | { |
edf1b879 | 203 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; |
edcb0722 | 204 | int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0); |
edf1b879 TH |
205 | |
206 | lockdep_assert_held(blkg->q->queue_lock); | |
207 | ||
edcb0722 | 208 | blkg_rwstat_add(&stats->queued, rw, 1); |
edf1b879 | 209 | blkio_end_empty_time(stats); |
c1768268 | 210 | blkio_set_start_group_wait_time(blkg, pol, curr_blkg); |
cdc1184c | 211 | } |
a11cdaa7 | 212 | EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats); |
cdc1184c | 213 | |
a11cdaa7 | 214 | void blkiocg_update_io_remove_stats(struct blkio_group *blkg, |
c1768268 TH |
215 | struct blkio_policy_type *pol, |
216 | bool direction, bool sync) | |
cdc1184c | 217 | { |
edf1b879 | 218 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; |
edcb0722 | 219 | int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0); |
edf1b879 TH |
220 | |
221 | lockdep_assert_held(blkg->q->queue_lock); | |
cdc1184c | 222 | |
edcb0722 | 223 | blkg_rwstat_add(&stats->queued, rw, -1); |
cdc1184c | 224 | } |
a11cdaa7 | 225 | EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats); |
cdc1184c | 226 | |
c1768268 TH |
227 | void blkiocg_update_timeslice_used(struct blkio_group *blkg, |
228 | struct blkio_policy_type *pol, | |
229 | unsigned long time, | |
230 | unsigned long unaccounted_time) | |
22084190 | 231 | { |
edf1b879 TH |
232 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; |
233 | ||
234 | lockdep_assert_held(blkg->q->queue_lock); | |
303a3acb | 235 | |
edcb0722 | 236 | blkg_stat_add(&stats->time, time); |
a23e6869 | 237 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
edcb0722 | 238 | blkg_stat_add(&stats->unaccounted_time, unaccounted_time); |
a23e6869 | 239 | #endif |
22084190 | 240 | } |
303a3acb | 241 | EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used); |
22084190 | 242 | |
5624a4e4 VG |
243 | /* |
244 | * should be called under rcu read lock or queue lock to make sure blkg pointer | |
245 | * is valid. | |
246 | */ | |
84c124da | 247 | void blkiocg_update_dispatch_stats(struct blkio_group *blkg, |
c1768268 TH |
248 | struct blkio_policy_type *pol, |
249 | uint64_t bytes, bool direction, bool sync) | |
9195291e | 250 | { |
edcb0722 | 251 | int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0); |
c1768268 | 252 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; |
5624a4e4 | 253 | struct blkio_group_stats_cpu *stats_cpu; |
575969a0 VG |
254 | unsigned long flags; |
255 | ||
1cd9e039 VG |
256 | /* If per cpu stats are not allocated yet, don't do any accounting. */ |
257 | if (pd->stats_cpu == NULL) | |
258 | return; | |
259 | ||
575969a0 VG |
260 | /* |
261 | * Disabling interrupts to provide mutual exclusion between two | |
262 | * writes on same cpu. It probably is not needed for 64bit. Not | |
263 | * optimizing that case yet. | |
264 | */ | |
265 | local_irq_save(flags); | |
9195291e | 266 | |
549d3aa8 | 267 | stats_cpu = this_cpu_ptr(pd->stats_cpu); |
5624a4e4 | 268 | |
edcb0722 TH |
269 | blkg_stat_add(&stats_cpu->sectors, bytes >> 9); |
270 | blkg_rwstat_add(&stats_cpu->serviced, rw, 1); | |
271 | blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes); | |
272 | ||
575969a0 | 273 | local_irq_restore(flags); |
9195291e | 274 | } |
84c124da | 275 | EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats); |
9195291e | 276 | |
84c124da | 277 | void blkiocg_update_completion_stats(struct blkio_group *blkg, |
c1768268 TH |
278 | struct blkio_policy_type *pol, |
279 | uint64_t start_time, | |
280 | uint64_t io_start_time, bool direction, | |
281 | bool sync) | |
9195291e | 282 | { |
edf1b879 | 283 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; |
9195291e | 284 | unsigned long long now = sched_clock(); |
edcb0722 | 285 | int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0); |
9195291e | 286 | |
edf1b879 TH |
287 | lockdep_assert_held(blkg->q->queue_lock); |
288 | ||
84c124da | 289 | if (time_after64(now, io_start_time)) |
edcb0722 | 290 | blkg_rwstat_add(&stats->service_time, rw, now - io_start_time); |
84c124da | 291 | if (time_after64(io_start_time, start_time)) |
edcb0722 TH |
292 | blkg_rwstat_add(&stats->wait_time, rw, |
293 | io_start_time - start_time); | |
9195291e | 294 | } |
84c124da | 295 | EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats); |
9195291e | 296 | |
317389a7 | 297 | /* Merged stats are per cpu. */ |
c1768268 TH |
298 | void blkiocg_update_io_merged_stats(struct blkio_group *blkg, |
299 | struct blkio_policy_type *pol, | |
300 | bool direction, bool sync) | |
812d4026 | 301 | { |
edf1b879 | 302 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; |
edcb0722 | 303 | int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0); |
edf1b879 TH |
304 | |
305 | lockdep_assert_held(blkg->q->queue_lock); | |
812d4026 | 306 | |
edcb0722 | 307 | blkg_rwstat_add(&stats->merged, rw, 1); |
812d4026 DS |
308 | } |
309 | EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats); | |
310 | ||
1cd9e039 VG |
311 | /* |
312 | * Worker for allocating per cpu stat for blk groups. This is scheduled on | |
313 | * the system_nrt_wq once there are some groups on the alloc_list waiting | |
314 | * for allocation. | |
315 | */ | |
316 | static void blkio_stat_alloc_fn(struct work_struct *work) | |
317 | { | |
318 | static void *pcpu_stats[BLKIO_NR_POLICIES]; | |
319 | struct delayed_work *dwork = to_delayed_work(work); | |
320 | struct blkio_group *blkg; | |
321 | int i; | |
322 | bool empty = false; | |
323 | ||
324 | alloc_stats: | |
325 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { | |
326 | if (pcpu_stats[i] != NULL) | |
327 | continue; | |
328 | ||
329 | pcpu_stats[i] = alloc_percpu(struct blkio_group_stats_cpu); | |
330 | ||
331 | /* Allocation failed. Try again after some time. */ | |
332 | if (pcpu_stats[i] == NULL) { | |
333 | queue_delayed_work(system_nrt_wq, dwork, | |
334 | msecs_to_jiffies(10)); | |
335 | return; | |
336 | } | |
337 | } | |
338 | ||
339 | spin_lock_irq(&blkio_list_lock); | |
340 | spin_lock(&alloc_list_lock); | |
341 | ||
342 | /* cgroup got deleted or queue exited. */ | |
343 | if (!list_empty(&alloc_list)) { | |
344 | blkg = list_first_entry(&alloc_list, struct blkio_group, | |
345 | alloc_node); | |
346 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { | |
347 | struct blkg_policy_data *pd = blkg->pd[i]; | |
348 | ||
349 | if (blkio_policy[i] && pd && !pd->stats_cpu) | |
350 | swap(pd->stats_cpu, pcpu_stats[i]); | |
351 | } | |
352 | ||
353 | list_del_init(&blkg->alloc_node); | |
354 | } | |
355 | ||
356 | empty = list_empty(&alloc_list); | |
357 | ||
358 | spin_unlock(&alloc_list_lock); | |
359 | spin_unlock_irq(&blkio_list_lock); | |
360 | ||
361 | if (!empty) | |
362 | goto alloc_stats; | |
363 | } | |
364 | ||
0381411e TH |
365 | /** |
366 | * blkg_free - free a blkg | |
367 | * @blkg: blkg to free | |
368 | * | |
369 | * Free @blkg which may be partially allocated. | |
370 | */ | |
371 | static void blkg_free(struct blkio_group *blkg) | |
372 | { | |
e8989fae | 373 | int i; |
549d3aa8 TH |
374 | |
375 | if (!blkg) | |
376 | return; | |
377 | ||
e8989fae TH |
378 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { |
379 | struct blkg_policy_data *pd = blkg->pd[i]; | |
380 | ||
381 | if (pd) { | |
382 | free_percpu(pd->stats_cpu); | |
383 | kfree(pd); | |
384 | } | |
0381411e | 385 | } |
e8989fae | 386 | |
549d3aa8 | 387 | kfree(blkg); |
0381411e TH |
388 | } |
389 | ||
390 | /** | |
391 | * blkg_alloc - allocate a blkg | |
392 | * @blkcg: block cgroup the new blkg is associated with | |
393 | * @q: request_queue the new blkg is associated with | |
0381411e | 394 | * |
e8989fae | 395 | * Allocate a new blkg assocating @blkcg and @q. |
0381411e TH |
396 | */ |
397 | static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg, | |
e8989fae | 398 | struct request_queue *q) |
0381411e TH |
399 | { |
400 | struct blkio_group *blkg; | |
e8989fae | 401 | int i; |
0381411e TH |
402 | |
403 | /* alloc and init base part */ | |
404 | blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node); | |
405 | if (!blkg) | |
406 | return NULL; | |
407 | ||
c875f4d0 | 408 | blkg->q = q; |
e8989fae | 409 | INIT_LIST_HEAD(&blkg->q_node); |
1cd9e039 | 410 | INIT_LIST_HEAD(&blkg->alloc_node); |
0381411e | 411 | blkg->blkcg = blkcg; |
1adaf3dd | 412 | blkg->refcnt = 1; |
0381411e TH |
413 | cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path)); |
414 | ||
e8989fae TH |
415 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { |
416 | struct blkio_policy_type *pol = blkio_policy[i]; | |
417 | struct blkg_policy_data *pd; | |
0381411e | 418 | |
e8989fae TH |
419 | if (!pol) |
420 | continue; | |
421 | ||
422 | /* alloc per-policy data and attach it to blkg */ | |
423 | pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC, | |
424 | q->node); | |
425 | if (!pd) { | |
426 | blkg_free(blkg); | |
427 | return NULL; | |
428 | } | |
549d3aa8 | 429 | |
e8989fae TH |
430 | blkg->pd[i] = pd; |
431 | pd->blkg = blkg; | |
0381411e TH |
432 | } |
433 | ||
549d3aa8 | 434 | /* invoke per-policy init */ |
e8989fae TH |
435 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { |
436 | struct blkio_policy_type *pol = blkio_policy[i]; | |
437 | ||
438 | if (pol) | |
439 | pol->ops.blkio_init_group_fn(blkg); | |
440 | } | |
441 | ||
0381411e TH |
442 | return blkg; |
443 | } | |
444 | ||
cd1604fa TH |
445 | struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, |
446 | struct request_queue *q, | |
cd1604fa TH |
447 | bool for_root) |
448 | __releases(q->queue_lock) __acquires(q->queue_lock) | |
5624a4e4 | 449 | { |
1cd9e039 | 450 | struct blkio_group *blkg; |
5624a4e4 | 451 | |
cd1604fa TH |
452 | WARN_ON_ONCE(!rcu_read_lock_held()); |
453 | lockdep_assert_held(q->queue_lock); | |
454 | ||
455 | /* | |
456 | * This could be the first entry point of blkcg implementation and | |
457 | * we shouldn't allow anything to go through for a bypassing queue. | |
458 | * The following can be removed if blkg lookup is guaranteed to | |
459 | * fail on a bypassing queue. | |
460 | */ | |
461 | if (unlikely(blk_queue_bypass(q)) && !for_root) | |
462 | return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); | |
463 | ||
e8989fae | 464 | blkg = blkg_lookup(blkcg, q); |
cd1604fa TH |
465 | if (blkg) |
466 | return blkg; | |
467 | ||
7ee9c562 | 468 | /* blkg holds a reference to blkcg */ |
cd1604fa TH |
469 | if (!css_tryget(&blkcg->css)) |
470 | return ERR_PTR(-EINVAL); | |
471 | ||
472 | /* | |
473 | * Allocate and initialize. | |
cd1604fa | 474 | */ |
1cd9e039 | 475 | blkg = blkg_alloc(blkcg, q); |
cd1604fa TH |
476 | |
477 | /* did alloc fail? */ | |
1cd9e039 | 478 | if (unlikely(!blkg)) { |
cd1604fa TH |
479 | blkg = ERR_PTR(-ENOMEM); |
480 | goto out; | |
481 | } | |
482 | ||
483 | /* insert */ | |
484 | spin_lock(&blkcg->lock); | |
31e4c28d | 485 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); |
e8989fae | 486 | list_add(&blkg->q_node, &q->blkg_list); |
cd1604fa | 487 | spin_unlock(&blkcg->lock); |
1cd9e039 VG |
488 | |
489 | spin_lock(&alloc_list_lock); | |
490 | list_add(&blkg->alloc_node, &alloc_list); | |
491 | /* Queue per cpu stat allocation from worker thread. */ | |
492 | queue_delayed_work(system_nrt_wq, &blkio_stat_alloc_work, 0); | |
493 | spin_unlock(&alloc_list_lock); | |
cd1604fa | 494 | out: |
cd1604fa | 495 | return blkg; |
31e4c28d | 496 | } |
cd1604fa | 497 | EXPORT_SYMBOL_GPL(blkg_lookup_create); |
31e4c28d | 498 | |
31e4c28d | 499 | /* called under rcu_read_lock(). */ |
cd1604fa | 500 | struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, |
e8989fae | 501 | struct request_queue *q) |
31e4c28d VG |
502 | { |
503 | struct blkio_group *blkg; | |
504 | struct hlist_node *n; | |
31e4c28d | 505 | |
ca32aefc | 506 | hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) |
e8989fae | 507 | if (blkg->q == q) |
31e4c28d | 508 | return blkg; |
31e4c28d VG |
509 | return NULL; |
510 | } | |
cd1604fa | 511 | EXPORT_SYMBOL_GPL(blkg_lookup); |
31e4c28d | 512 | |
e8989fae | 513 | static void blkg_destroy(struct blkio_group *blkg) |
03aa264a TH |
514 | { |
515 | struct request_queue *q = blkg->q; | |
9f13ef67 | 516 | struct blkio_cgroup *blkcg = blkg->blkcg; |
03aa264a TH |
517 | |
518 | lockdep_assert_held(q->queue_lock); | |
9f13ef67 | 519 | lockdep_assert_held(&blkcg->lock); |
03aa264a TH |
520 | |
521 | /* Something wrong if we are trying to remove same group twice */ | |
e8989fae | 522 | WARN_ON_ONCE(list_empty(&blkg->q_node)); |
9f13ef67 | 523 | WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); |
e8989fae | 524 | list_del_init(&blkg->q_node); |
9f13ef67 | 525 | hlist_del_init_rcu(&blkg->blkcg_node); |
03aa264a | 526 | |
1cd9e039 VG |
527 | spin_lock(&alloc_list_lock); |
528 | list_del_init(&blkg->alloc_node); | |
529 | spin_unlock(&alloc_list_lock); | |
530 | ||
03aa264a TH |
531 | /* |
532 | * Put the reference taken at the time of creation so that when all | |
533 | * queues are gone, group can be destroyed. | |
534 | */ | |
535 | blkg_put(blkg); | |
536 | } | |
537 | ||
e8989fae TH |
538 | /* |
539 | * XXX: This updates blkg policy data in-place for root blkg, which is | |
540 | * necessary across elevator switch and policy registration as root blkgs | |
541 | * aren't shot down. This broken and racy implementation is temporary. | |
542 | * Eventually, blkg shoot down will be replaced by proper in-place update. | |
543 | */ | |
544 | void update_root_blkg_pd(struct request_queue *q, enum blkio_policy_id plid) | |
545 | { | |
546 | struct blkio_policy_type *pol = blkio_policy[plid]; | |
547 | struct blkio_group *blkg = blkg_lookup(&blkio_root_cgroup, q); | |
548 | struct blkg_policy_data *pd; | |
549 | ||
550 | if (!blkg) | |
551 | return; | |
552 | ||
553 | kfree(blkg->pd[plid]); | |
554 | blkg->pd[plid] = NULL; | |
555 | ||
556 | if (!pol) | |
557 | return; | |
558 | ||
559 | pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL); | |
560 | WARN_ON_ONCE(!pd); | |
561 | ||
562 | pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu); | |
563 | WARN_ON_ONCE(!pd->stats_cpu); | |
564 | ||
565 | blkg->pd[plid] = pd; | |
566 | pd->blkg = blkg; | |
567 | pol->ops.blkio_init_group_fn(blkg); | |
568 | } | |
569 | EXPORT_SYMBOL_GPL(update_root_blkg_pd); | |
570 | ||
9f13ef67 TH |
571 | /** |
572 | * blkg_destroy_all - destroy all blkgs associated with a request_queue | |
573 | * @q: request_queue of interest | |
574 | * @destroy_root: whether to destroy root blkg or not | |
575 | * | |
576 | * Destroy blkgs associated with @q. If @destroy_root is %true, all are | |
577 | * destroyed; otherwise, root blkg is left alone. | |
578 | */ | |
e8989fae | 579 | void blkg_destroy_all(struct request_queue *q, bool destroy_root) |
72e06c25 | 580 | { |
03aa264a | 581 | struct blkio_group *blkg, *n; |
72e06c25 | 582 | |
9f13ef67 | 583 | spin_lock_irq(q->queue_lock); |
72e06c25 | 584 | |
9f13ef67 TH |
585 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { |
586 | struct blkio_cgroup *blkcg = blkg->blkcg; | |
72e06c25 | 587 | |
9f13ef67 TH |
588 | /* skip root? */ |
589 | if (!destroy_root && blkg->blkcg == &blkio_root_cgroup) | |
590 | continue; | |
72e06c25 | 591 | |
9f13ef67 TH |
592 | spin_lock(&blkcg->lock); |
593 | blkg_destroy(blkg); | |
594 | spin_unlock(&blkcg->lock); | |
72e06c25 | 595 | } |
9f13ef67 TH |
596 | |
597 | spin_unlock_irq(q->queue_lock); | |
72e06c25 | 598 | } |
03aa264a | 599 | EXPORT_SYMBOL_GPL(blkg_destroy_all); |
72e06c25 | 600 | |
1adaf3dd TH |
601 | static void blkg_rcu_free(struct rcu_head *rcu_head) |
602 | { | |
603 | blkg_free(container_of(rcu_head, struct blkio_group, rcu_head)); | |
604 | } | |
605 | ||
606 | void __blkg_release(struct blkio_group *blkg) | |
607 | { | |
608 | /* release the extra blkcg reference this blkg has been holding */ | |
609 | css_put(&blkg->blkcg->css); | |
610 | ||
611 | /* | |
612 | * A group is freed in rcu manner. But having an rcu lock does not | |
613 | * mean that one can access all the fields of blkg and assume these | |
614 | * are valid. For example, don't try to follow throtl_data and | |
615 | * request queue links. | |
616 | * | |
617 | * Having a reference to blkg under an rcu allows acess to only | |
618 | * values local to groups like group stats and group rate limits | |
619 | */ | |
620 | call_rcu(&blkg->rcu_head, blkg_rcu_free); | |
621 | } | |
622 | EXPORT_SYMBOL_GPL(__blkg_release); | |
623 | ||
c1768268 | 624 | static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid) |
f0bdc8cd | 625 | { |
c1768268 | 626 | struct blkg_policy_data *pd = blkg->pd[plid]; |
997a026c | 627 | int cpu; |
1cd9e039 VG |
628 | |
629 | if (pd->stats_cpu == NULL) | |
630 | return; | |
997a026c TH |
631 | |
632 | for_each_possible_cpu(cpu) { | |
633 | struct blkio_group_stats_cpu *sc = | |
634 | per_cpu_ptr(pd->stats_cpu, cpu); | |
635 | ||
edcb0722 TH |
636 | blkg_rwstat_reset(&sc->service_bytes); |
637 | blkg_rwstat_reset(&sc->serviced); | |
638 | blkg_stat_reset(&sc->sectors); | |
f0bdc8cd VG |
639 | } |
640 | } | |
641 | ||
303a3acb | 642 | static int |
84c124da | 643 | blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) |
303a3acb | 644 | { |
997a026c | 645 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); |
303a3acb DS |
646 | struct blkio_group *blkg; |
647 | struct hlist_node *n; | |
303a3acb | 648 | |
e8989fae | 649 | spin_lock(&blkio_list_lock); |
303a3acb | 650 | spin_lock_irq(&blkcg->lock); |
997a026c TH |
651 | |
652 | /* | |
653 | * Note that stat reset is racy - it doesn't synchronize against | |
654 | * stat updates. This is a debug feature which shouldn't exist | |
655 | * anyway. If you get hit by a race, retry. | |
656 | */ | |
303a3acb | 657 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { |
e8989fae | 658 | struct blkio_policy_type *pol; |
549d3aa8 | 659 | |
e8989fae TH |
660 | list_for_each_entry(pol, &blkio_list, list) { |
661 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | |
997a026c TH |
662 | struct blkio_group_stats *stats = &pd->stats; |
663 | ||
664 | /* queued stats shouldn't be cleared */ | |
edcb0722 TH |
665 | blkg_rwstat_reset(&stats->merged); |
666 | blkg_rwstat_reset(&stats->service_time); | |
667 | blkg_rwstat_reset(&stats->wait_time); | |
668 | blkg_stat_reset(&stats->time); | |
812df48d | 669 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
edcb0722 TH |
670 | blkg_stat_reset(&stats->unaccounted_time); |
671 | blkg_stat_reset(&stats->avg_queue_size_sum); | |
672 | blkg_stat_reset(&stats->avg_queue_size_samples); | |
673 | blkg_stat_reset(&stats->dequeue); | |
674 | blkg_stat_reset(&stats->group_wait_time); | |
675 | blkg_stat_reset(&stats->idle_time); | |
676 | blkg_stat_reset(&stats->empty_time); | |
812df48d | 677 | #endif |
e8989fae TH |
678 | blkio_reset_stats_cpu(blkg, pol->plid); |
679 | } | |
303a3acb | 680 | } |
f0bdc8cd | 681 | |
303a3acb | 682 | spin_unlock_irq(&blkcg->lock); |
e8989fae | 683 | spin_unlock(&blkio_list_lock); |
303a3acb DS |
684 | return 0; |
685 | } | |
686 | ||
d3d32e69 | 687 | static const char *blkg_dev_name(struct blkio_group *blkg) |
303a3acb | 688 | { |
d3d32e69 TH |
689 | /* some drivers (floppy) instantiate a queue w/o disk registered */ |
690 | if (blkg->q->backing_dev_info.dev) | |
691 | return dev_name(blkg->q->backing_dev_info.dev); | |
692 | return NULL; | |
303a3acb DS |
693 | } |
694 | ||
d3d32e69 TH |
695 | /** |
696 | * blkcg_print_blkgs - helper for printing per-blkg data | |
697 | * @sf: seq_file to print to | |
698 | * @blkcg: blkcg of interest | |
699 | * @prfill: fill function to print out a blkg | |
700 | * @pol: policy in question | |
701 | * @data: data to be passed to @prfill | |
702 | * @show_total: to print out sum of prfill return values or not | |
703 | * | |
704 | * This function invokes @prfill on each blkg of @blkcg if pd for the | |
705 | * policy specified by @pol exists. @prfill is invoked with @sf, the | |
706 | * policy data and @data. If @show_total is %true, the sum of the return | |
707 | * values from @prfill is printed with "Total" label at the end. | |
708 | * | |
709 | * This is to be used to construct print functions for | |
710 | * cftype->read_seq_string method. | |
711 | */ | |
829fdb50 TH |
712 | void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg, |
713 | u64 (*prfill)(struct seq_file *, struct blkg_policy_data *, int), | |
714 | int pol, int data, bool show_total) | |
5624a4e4 | 715 | { |
d3d32e69 TH |
716 | struct blkio_group *blkg; |
717 | struct hlist_node *n; | |
718 | u64 total = 0; | |
5624a4e4 | 719 | |
d3d32e69 TH |
720 | spin_lock_irq(&blkcg->lock); |
721 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) | |
722 | if (blkg->pd[pol]) | |
723 | total += prfill(sf, blkg->pd[pol], data); | |
724 | spin_unlock_irq(&blkcg->lock); | |
725 | ||
726 | if (show_total) | |
727 | seq_printf(sf, "Total %llu\n", (unsigned long long)total); | |
728 | } | |
829fdb50 | 729 | EXPORT_SYMBOL_GPL(blkcg_print_blkgs); |
d3d32e69 TH |
730 | |
731 | /** | |
732 | * __blkg_prfill_u64 - prfill helper for a single u64 value | |
733 | * @sf: seq_file to print to | |
734 | * @pd: policy data of interest | |
735 | * @v: value to print | |
736 | * | |
737 | * Print @v to @sf for the device assocaited with @pd. | |
738 | */ | |
829fdb50 | 739 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) |
d3d32e69 TH |
740 | { |
741 | const char *dname = blkg_dev_name(pd->blkg); | |
742 | ||
743 | if (!dname) | |
744 | return 0; | |
745 | ||
746 | seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); | |
747 | return v; | |
748 | } | |
829fdb50 | 749 | EXPORT_SYMBOL_GPL(__blkg_prfill_u64); |
d3d32e69 TH |
750 | |
751 | /** | |
752 | * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat | |
753 | * @sf: seq_file to print to | |
754 | * @pd: policy data of interest | |
755 | * @rwstat: rwstat to print | |
756 | * | |
757 | * Print @rwstat to @sf for the device assocaited with @pd. | |
758 | */ | |
829fdb50 TH |
759 | u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
760 | const struct blkg_rwstat *rwstat) | |
d3d32e69 TH |
761 | { |
762 | static const char *rwstr[] = { | |
763 | [BLKG_RWSTAT_READ] = "Read", | |
764 | [BLKG_RWSTAT_WRITE] = "Write", | |
765 | [BLKG_RWSTAT_SYNC] = "Sync", | |
766 | [BLKG_RWSTAT_ASYNC] = "Async", | |
767 | }; | |
768 | const char *dname = blkg_dev_name(pd->blkg); | |
769 | u64 v; | |
770 | int i; | |
771 | ||
772 | if (!dname) | |
773 | return 0; | |
774 | ||
775 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | |
776 | seq_printf(sf, "%s %s %llu\n", dname, rwstr[i], | |
777 | (unsigned long long)rwstat->cnt[i]); | |
778 | ||
779 | v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE]; | |
780 | seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v); | |
781 | return v; | |
782 | } | |
783 | ||
784 | static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, | |
785 | int off) | |
786 | { | |
787 | return __blkg_prfill_u64(sf, pd, | |
788 | blkg_stat_read((void *)&pd->stats + off)); | |
789 | } | |
790 | ||
791 | static u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, | |
792 | int off) | |
793 | { | |
794 | struct blkg_rwstat rwstat = blkg_rwstat_read((void *)&pd->stats + off); | |
795 | ||
796 | return __blkg_prfill_rwstat(sf, pd, &rwstat); | |
797 | } | |
798 | ||
799 | /* print blkg_stat specified by BLKCG_STAT_PRIV() */ | |
829fdb50 TH |
800 | int blkcg_print_stat(struct cgroup *cgrp, struct cftype *cft, |
801 | struct seq_file *sf) | |
d3d32e69 TH |
802 | { |
803 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); | |
804 | ||
805 | blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, | |
806 | BLKCG_STAT_POL(cft->private), | |
807 | BLKCG_STAT_OFF(cft->private), false); | |
808 | return 0; | |
809 | } | |
829fdb50 | 810 | EXPORT_SYMBOL_GPL(blkcg_print_stat); |
d3d32e69 TH |
811 | |
812 | /* print blkg_rwstat specified by BLKCG_STAT_PRIV() */ | |
829fdb50 TH |
813 | int blkcg_print_rwstat(struct cgroup *cgrp, struct cftype *cft, |
814 | struct seq_file *sf) | |
d3d32e69 TH |
815 | { |
816 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); | |
817 | ||
818 | blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, | |
819 | BLKCG_STAT_POL(cft->private), | |
820 | BLKCG_STAT_OFF(cft->private), true); | |
821 | return 0; | |
822 | } | |
829fdb50 | 823 | EXPORT_SYMBOL_GPL(blkcg_print_rwstat); |
d3d32e69 TH |
824 | |
825 | static u64 blkg_prfill_cpu_stat(struct seq_file *sf, | |
826 | struct blkg_policy_data *pd, int off) | |
827 | { | |
828 | u64 v = 0; | |
829 | int cpu; | |
1cd9e039 | 830 | |
5624a4e4 | 831 | for_each_possible_cpu(cpu) { |
d3d32e69 | 832 | struct blkio_group_stats_cpu *sc = |
edcb0722 | 833 | per_cpu_ptr(pd->stats_cpu, cpu); |
edcb0722 | 834 | |
d3d32e69 | 835 | v += blkg_stat_read((void *)sc + off); |
5624a4e4 VG |
836 | } |
837 | ||
d3d32e69 | 838 | return __blkg_prfill_u64(sf, pd, v); |
5624a4e4 VG |
839 | } |
840 | ||
d3d32e69 TH |
841 | static u64 blkg_prfill_cpu_rwstat(struct seq_file *sf, |
842 | struct blkg_policy_data *pd, int off) | |
5624a4e4 | 843 | { |
d3d32e69 TH |
844 | struct blkg_rwstat rwstat = { }, tmp; |
845 | int i, cpu; | |
846 | ||
847 | for_each_possible_cpu(cpu) { | |
848 | struct blkio_group_stats_cpu *sc = | |
849 | per_cpu_ptr(pd->stats_cpu, cpu); | |
5624a4e4 | 850 | |
d3d32e69 TH |
851 | tmp = blkg_rwstat_read((void *)sc + off); |
852 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | |
853 | rwstat.cnt[i] += tmp.cnt[i]; | |
5624a4e4 VG |
854 | } |
855 | ||
d3d32e69 TH |
856 | return __blkg_prfill_rwstat(sf, pd, &rwstat); |
857 | } | |
5624a4e4 | 858 | |
d3d32e69 | 859 | /* print per-cpu blkg_stat specified by BLKCG_STAT_PRIV() */ |
829fdb50 TH |
860 | int blkcg_print_cpu_stat(struct cgroup *cgrp, struct cftype *cft, |
861 | struct seq_file *sf) | |
d3d32e69 TH |
862 | { |
863 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); | |
864 | ||
865 | blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_stat, | |
866 | BLKCG_STAT_POL(cft->private), | |
867 | BLKCG_STAT_OFF(cft->private), false); | |
868 | return 0; | |
5624a4e4 | 869 | } |
829fdb50 | 870 | EXPORT_SYMBOL_GPL(blkcg_print_cpu_stat); |
5624a4e4 | 871 | |
d3d32e69 | 872 | /* print per-cpu blkg_rwstat specified by BLKCG_STAT_PRIV() */ |
829fdb50 TH |
873 | int blkcg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft, |
874 | struct seq_file *sf) | |
303a3acb | 875 | { |
d3d32e69 | 876 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); |
edcb0722 | 877 | |
d3d32e69 TH |
878 | blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_rwstat, |
879 | BLKCG_STAT_POL(cft->private), | |
880 | BLKCG_STAT_OFF(cft->private), true); | |
881 | return 0; | |
882 | } | |
829fdb50 | 883 | EXPORT_SYMBOL_GPL(blkcg_print_cpu_rwstat); |
303a3acb | 884 | |
3a8b31d3 TH |
885 | /** |
886 | * blkg_conf_prep - parse and prepare for per-blkg config update | |
887 | * @blkcg: target block cgroup | |
888 | * @input: input string | |
889 | * @ctx: blkg_conf_ctx to be filled | |
890 | * | |
891 | * Parse per-blkg config update from @input and initialize @ctx with the | |
892 | * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new | |
893 | * value. This function returns with RCU read locked and must be paired | |
894 | * with blkg_conf_finish(). | |
895 | */ | |
829fdb50 TH |
896 | int blkg_conf_prep(struct blkio_cgroup *blkcg, const char *input, |
897 | struct blkg_conf_ctx *ctx) | |
3a8b31d3 | 898 | __acquires(rcu) |
34d0f179 | 899 | { |
3a8b31d3 TH |
900 | struct gendisk *disk; |
901 | struct blkio_group *blkg; | |
726fa694 TH |
902 | unsigned int major, minor; |
903 | unsigned long long v; | |
904 | int part, ret; | |
34d0f179 | 905 | |
726fa694 TH |
906 | if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3) |
907 | return -EINVAL; | |
3a8b31d3 | 908 | |
726fa694 | 909 | disk = get_gendisk(MKDEV(major, minor), &part); |
4bfd482e | 910 | if (!disk || part) |
726fa694 | 911 | return -EINVAL; |
e56da7e2 TH |
912 | |
913 | rcu_read_lock(); | |
914 | ||
4bfd482e | 915 | spin_lock_irq(disk->queue->queue_lock); |
aaec55a0 | 916 | blkg = blkg_lookup_create(blkcg, disk->queue, false); |
4bfd482e | 917 | spin_unlock_irq(disk->queue->queue_lock); |
e56da7e2 | 918 | |
4bfd482e TH |
919 | if (IS_ERR(blkg)) { |
920 | ret = PTR_ERR(blkg); | |
3a8b31d3 TH |
921 | rcu_read_unlock(); |
922 | put_disk(disk); | |
923 | /* | |
924 | * If queue was bypassing, we should retry. Do so after a | |
925 | * short msleep(). It isn't strictly necessary but queue | |
926 | * can be bypassing for some time and it's always nice to | |
927 | * avoid busy looping. | |
928 | */ | |
929 | if (ret == -EBUSY) { | |
930 | msleep(10); | |
931 | ret = restart_syscall(); | |
7702e8f4 | 932 | } |
726fa694 | 933 | return ret; |
062a644d | 934 | } |
3a8b31d3 TH |
935 | |
936 | ctx->disk = disk; | |
937 | ctx->blkg = blkg; | |
726fa694 TH |
938 | ctx->v = v; |
939 | return 0; | |
34d0f179 | 940 | } |
829fdb50 | 941 | EXPORT_SYMBOL_GPL(blkg_conf_prep); |
34d0f179 | 942 | |
3a8b31d3 TH |
943 | /** |
944 | * blkg_conf_finish - finish up per-blkg config update | |
945 | * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() | |
946 | * | |
947 | * Finish up after per-blkg config update. This function must be paired | |
948 | * with blkg_conf_prep(). | |
949 | */ | |
829fdb50 | 950 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) |
3a8b31d3 | 951 | __releases(rcu) |
34d0f179 | 952 | { |
3a8b31d3 TH |
953 | rcu_read_unlock(); |
954 | put_disk(ctx->disk); | |
34d0f179 | 955 | } |
829fdb50 | 956 | EXPORT_SYMBOL_GPL(blkg_conf_finish); |
34d0f179 | 957 | |
31e4c28d | 958 | struct cftype blkio_files[] = { |
84c124da DS |
959 | { |
960 | .name = "reset_stats", | |
961 | .write_u64 = blkiocg_reset_stats, | |
22084190 | 962 | }, |
4baf6e33 | 963 | { } /* terminate */ |
31e4c28d VG |
964 | }; |
965 | ||
9f13ef67 TH |
966 | /** |
967 | * blkiocg_pre_destroy - cgroup pre_destroy callback | |
9f13ef67 TH |
968 | * @cgroup: cgroup of interest |
969 | * | |
970 | * This function is called when @cgroup is about to go away and responsible | |
971 | * for shooting down all blkgs associated with @cgroup. blkgs should be | |
972 | * removed while holding both q and blkcg locks. As blkcg lock is nested | |
973 | * inside q lock, this function performs reverse double lock dancing. | |
974 | * | |
975 | * This is the blkcg counterpart of ioc_release_fn(). | |
976 | */ | |
959d851c | 977 | static int blkiocg_pre_destroy(struct cgroup *cgroup) |
31e4c28d VG |
978 | { |
979 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); | |
b1c35769 | 980 | |
9f13ef67 | 981 | spin_lock_irq(&blkcg->lock); |
7ee9c562 | 982 | |
9f13ef67 TH |
983 | while (!hlist_empty(&blkcg->blkg_list)) { |
984 | struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first, | |
985 | struct blkio_group, blkcg_node); | |
c875f4d0 | 986 | struct request_queue *q = blkg->q; |
b1c35769 | 987 | |
9f13ef67 TH |
988 | if (spin_trylock(q->queue_lock)) { |
989 | blkg_destroy(blkg); | |
990 | spin_unlock(q->queue_lock); | |
991 | } else { | |
992 | spin_unlock_irq(&blkcg->lock); | |
9f13ef67 | 993 | cpu_relax(); |
a5567932 | 994 | spin_lock_irq(&blkcg->lock); |
0f3942a3 | 995 | } |
9f13ef67 | 996 | } |
b1c35769 | 997 | |
9f13ef67 | 998 | spin_unlock_irq(&blkcg->lock); |
7ee9c562 TH |
999 | return 0; |
1000 | } | |
1001 | ||
959d851c | 1002 | static void blkiocg_destroy(struct cgroup *cgroup) |
7ee9c562 TH |
1003 | { |
1004 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); | |
1005 | ||
67523c48 BB |
1006 | if (blkcg != &blkio_root_cgroup) |
1007 | kfree(blkcg); | |
31e4c28d VG |
1008 | } |
1009 | ||
761b3ef5 | 1010 | static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup) |
31e4c28d | 1011 | { |
9a9e8a26 | 1012 | static atomic64_t id_seq = ATOMIC64_INIT(0); |
0341509f LZ |
1013 | struct blkio_cgroup *blkcg; |
1014 | struct cgroup *parent = cgroup->parent; | |
31e4c28d | 1015 | |
0341509f | 1016 | if (!parent) { |
31e4c28d VG |
1017 | blkcg = &blkio_root_cgroup; |
1018 | goto done; | |
1019 | } | |
1020 | ||
31e4c28d VG |
1021 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); |
1022 | if (!blkcg) | |
1023 | return ERR_PTR(-ENOMEM); | |
1024 | ||
1025 | blkcg->weight = BLKIO_WEIGHT_DEFAULT; | |
9a9e8a26 | 1026 | blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */ |
31e4c28d VG |
1027 | done: |
1028 | spin_lock_init(&blkcg->lock); | |
1029 | INIT_HLIST_HEAD(&blkcg->blkg_list); | |
1030 | ||
1031 | return &blkcg->css; | |
1032 | } | |
1033 | ||
5efd6113 TH |
1034 | /** |
1035 | * blkcg_init_queue - initialize blkcg part of request queue | |
1036 | * @q: request_queue to initialize | |
1037 | * | |
1038 | * Called from blk_alloc_queue_node(). Responsible for initializing blkcg | |
1039 | * part of new request_queue @q. | |
1040 | * | |
1041 | * RETURNS: | |
1042 | * 0 on success, -errno on failure. | |
1043 | */ | |
1044 | int blkcg_init_queue(struct request_queue *q) | |
1045 | { | |
923adde1 TH |
1046 | int ret; |
1047 | ||
5efd6113 TH |
1048 | might_sleep(); |
1049 | ||
923adde1 TH |
1050 | ret = blk_throtl_init(q); |
1051 | if (ret) | |
1052 | return ret; | |
1053 | ||
1054 | mutex_lock(&all_q_mutex); | |
1055 | INIT_LIST_HEAD(&q->all_q_node); | |
1056 | list_add_tail(&q->all_q_node, &all_q_list); | |
1057 | mutex_unlock(&all_q_mutex); | |
1058 | ||
1059 | return 0; | |
5efd6113 TH |
1060 | } |
1061 | ||
1062 | /** | |
1063 | * blkcg_drain_queue - drain blkcg part of request_queue | |
1064 | * @q: request_queue to drain | |
1065 | * | |
1066 | * Called from blk_drain_queue(). Responsible for draining blkcg part. | |
1067 | */ | |
1068 | void blkcg_drain_queue(struct request_queue *q) | |
1069 | { | |
1070 | lockdep_assert_held(q->queue_lock); | |
1071 | ||
1072 | blk_throtl_drain(q); | |
1073 | } | |
1074 | ||
1075 | /** | |
1076 | * blkcg_exit_queue - exit and release blkcg part of request_queue | |
1077 | * @q: request_queue being released | |
1078 | * | |
1079 | * Called from blk_release_queue(). Responsible for exiting blkcg part. | |
1080 | */ | |
1081 | void blkcg_exit_queue(struct request_queue *q) | |
1082 | { | |
923adde1 TH |
1083 | mutex_lock(&all_q_mutex); |
1084 | list_del_init(&q->all_q_node); | |
1085 | mutex_unlock(&all_q_mutex); | |
1086 | ||
e8989fae TH |
1087 | blkg_destroy_all(q, true); |
1088 | ||
5efd6113 TH |
1089 | blk_throtl_exit(q); |
1090 | } | |
1091 | ||
31e4c28d VG |
1092 | /* |
1093 | * We cannot support shared io contexts, as we have no mean to support | |
1094 | * two tasks with the same ioc in two different groups without major rework | |
1095 | * of the main cic data structures. For now we allow a task to change | |
1096 | * its cgroup only if it's the only owner of its ioc. | |
1097 | */ | |
761b3ef5 | 1098 | static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
31e4c28d | 1099 | { |
bb9d97b6 | 1100 | struct task_struct *task; |
31e4c28d VG |
1101 | struct io_context *ioc; |
1102 | int ret = 0; | |
1103 | ||
1104 | /* task_lock() is needed to avoid races with exit_io_context() */ | |
bb9d97b6 TH |
1105 | cgroup_taskset_for_each(task, cgrp, tset) { |
1106 | task_lock(task); | |
1107 | ioc = task->io_context; | |
1108 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) | |
1109 | ret = -EINVAL; | |
1110 | task_unlock(task); | |
1111 | if (ret) | |
1112 | break; | |
1113 | } | |
31e4c28d VG |
1114 | return ret; |
1115 | } | |
1116 | ||
923adde1 TH |
1117 | static void blkcg_bypass_start(void) |
1118 | __acquires(&all_q_mutex) | |
1119 | { | |
1120 | struct request_queue *q; | |
1121 | ||
1122 | mutex_lock(&all_q_mutex); | |
1123 | ||
1124 | list_for_each_entry(q, &all_q_list, all_q_node) { | |
1125 | blk_queue_bypass_start(q); | |
e8989fae | 1126 | blkg_destroy_all(q, false); |
923adde1 TH |
1127 | } |
1128 | } | |
1129 | ||
1130 | static void blkcg_bypass_end(void) | |
1131 | __releases(&all_q_mutex) | |
1132 | { | |
1133 | struct request_queue *q; | |
1134 | ||
1135 | list_for_each_entry(q, &all_q_list, all_q_node) | |
1136 | blk_queue_bypass_end(q); | |
1137 | ||
1138 | mutex_unlock(&all_q_mutex); | |
1139 | } | |
1140 | ||
676f7c8f TH |
1141 | struct cgroup_subsys blkio_subsys = { |
1142 | .name = "blkio", | |
1143 | .create = blkiocg_create, | |
1144 | .can_attach = blkiocg_can_attach, | |
959d851c | 1145 | .pre_destroy = blkiocg_pre_destroy, |
676f7c8f | 1146 | .destroy = blkiocg_destroy, |
676f7c8f | 1147 | .subsys_id = blkio_subsys_id, |
4baf6e33 | 1148 | .base_cftypes = blkio_files, |
676f7c8f TH |
1149 | .module = THIS_MODULE, |
1150 | }; | |
1151 | EXPORT_SYMBOL_GPL(blkio_subsys); | |
1152 | ||
3e252066 VG |
1153 | void blkio_policy_register(struct blkio_policy_type *blkiop) |
1154 | { | |
e8989fae TH |
1155 | struct request_queue *q; |
1156 | ||
923adde1 | 1157 | blkcg_bypass_start(); |
3e252066 | 1158 | spin_lock(&blkio_list_lock); |
035d10b2 TH |
1159 | |
1160 | BUG_ON(blkio_policy[blkiop->plid]); | |
1161 | blkio_policy[blkiop->plid] = blkiop; | |
3e252066 | 1162 | list_add_tail(&blkiop->list, &blkio_list); |
035d10b2 | 1163 | |
3e252066 | 1164 | spin_unlock(&blkio_list_lock); |
e8989fae TH |
1165 | list_for_each_entry(q, &all_q_list, all_q_node) |
1166 | update_root_blkg_pd(q, blkiop->plid); | |
923adde1 | 1167 | blkcg_bypass_end(); |
44ea53de TH |
1168 | |
1169 | if (blkiop->cftypes) | |
1170 | WARN_ON(cgroup_add_cftypes(&blkio_subsys, blkiop->cftypes)); | |
3e252066 VG |
1171 | } |
1172 | EXPORT_SYMBOL_GPL(blkio_policy_register); | |
1173 | ||
1174 | void blkio_policy_unregister(struct blkio_policy_type *blkiop) | |
1175 | { | |
e8989fae TH |
1176 | struct request_queue *q; |
1177 | ||
44ea53de TH |
1178 | if (blkiop->cftypes) |
1179 | cgroup_rm_cftypes(&blkio_subsys, blkiop->cftypes); | |
1180 | ||
923adde1 | 1181 | blkcg_bypass_start(); |
3e252066 | 1182 | spin_lock(&blkio_list_lock); |
035d10b2 TH |
1183 | |
1184 | BUG_ON(blkio_policy[blkiop->plid] != blkiop); | |
1185 | blkio_policy[blkiop->plid] = NULL; | |
3e252066 | 1186 | list_del_init(&blkiop->list); |
035d10b2 | 1187 | |
3e252066 | 1188 | spin_unlock(&blkio_list_lock); |
e8989fae TH |
1189 | list_for_each_entry(q, &all_q_list, all_q_node) |
1190 | update_root_blkg_pd(q, blkiop->plid); | |
923adde1 | 1191 | blkcg_bypass_end(); |
3e252066 VG |
1192 | } |
1193 | EXPORT_SYMBOL_GPL(blkio_policy_unregister); |