]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Common Block IO controller cgroup interface | |
3 | * | |
4 | * Based on ideas and code from CFQ, CFS and BFQ: | |
5 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> | |
6 | * | |
7 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> | |
8 | * Paolo Valente <paolo.valente@unimore.it> | |
9 | * | |
10 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> | |
11 | * Nauman Rafique <nauman@google.com> | |
12 | */ | |
13 | #include <linux/ioprio.h> | |
14 | #include <linux/seq_file.h> | |
15 | #include <linux/kdev_t.h> | |
16 | #include <linux/module.h> | |
17 | #include "blk-cgroup.h" | |
18 | #include "cfq-iosched.h" | |
19 | ||
20 | struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT }; | |
21 | EXPORT_SYMBOL_GPL(blkio_root_cgroup); | |
22 | ||
23 | bool blkiocg_css_tryget(struct blkio_cgroup *blkcg) | |
24 | { | |
25 | if (!css_tryget(&blkcg->css)) | |
26 | return false; | |
27 | return true; | |
28 | } | |
29 | EXPORT_SYMBOL_GPL(blkiocg_css_tryget); | |
30 | ||
31 | void blkiocg_css_put(struct blkio_cgroup *blkcg) | |
32 | { | |
33 | css_put(&blkcg->css); | |
34 | } | |
35 | EXPORT_SYMBOL_GPL(blkiocg_css_put); | |
36 | ||
37 | struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) | |
38 | { | |
39 | return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), | |
40 | struct blkio_cgroup, css); | |
41 | } | |
42 | EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); | |
43 | ||
44 | void blkiocg_update_blkio_group_stats(struct blkio_group *blkg, | |
45 | unsigned long time, unsigned long sectors) | |
46 | { | |
47 | blkg->time += time; | |
48 | blkg->sectors += sectors; | |
49 | } | |
50 | EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_stats); | |
51 | ||
52 | void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, | |
53 | struct blkio_group *blkg, void *key, dev_t dev) | |
54 | { | |
55 | unsigned long flags; | |
56 | ||
57 | spin_lock_irqsave(&blkcg->lock, flags); | |
58 | rcu_assign_pointer(blkg->key, key); | |
59 | blkg->blkcg_id = css_id(&blkcg->css); | |
60 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); | |
61 | spin_unlock_irqrestore(&blkcg->lock, flags); | |
62 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
63 | /* Need to take css reference ? */ | |
64 | cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path)); | |
65 | #endif | |
66 | blkg->dev = dev; | |
67 | } | |
68 | EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group); | |
69 | ||
70 | static void __blkiocg_del_blkio_group(struct blkio_group *blkg) | |
71 | { | |
72 | hlist_del_init_rcu(&blkg->blkcg_node); | |
73 | blkg->blkcg_id = 0; | |
74 | } | |
75 | ||
76 | /* | |
77 | * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1 | |
78 | * indicating that blk_group was unhashed by the time we got to it. | |
79 | */ | |
80 | int blkiocg_del_blkio_group(struct blkio_group *blkg) | |
81 | { | |
82 | struct blkio_cgroup *blkcg; | |
83 | unsigned long flags; | |
84 | struct cgroup_subsys_state *css; | |
85 | int ret = 1; | |
86 | ||
87 | rcu_read_lock(); | |
88 | css = css_lookup(&blkio_subsys, blkg->blkcg_id); | |
89 | if (!css) | |
90 | goto out; | |
91 | ||
92 | blkcg = container_of(css, struct blkio_cgroup, css); | |
93 | spin_lock_irqsave(&blkcg->lock, flags); | |
94 | if (!hlist_unhashed(&blkg->blkcg_node)) { | |
95 | __blkiocg_del_blkio_group(blkg); | |
96 | ret = 0; | |
97 | } | |
98 | spin_unlock_irqrestore(&blkcg->lock, flags); | |
99 | out: | |
100 | rcu_read_unlock(); | |
101 | return ret; | |
102 | } | |
103 | EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group); | |
104 | ||
105 | /* called under rcu_read_lock(). */ | |
106 | struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) | |
107 | { | |
108 | struct blkio_group *blkg; | |
109 | struct hlist_node *n; | |
110 | void *__key; | |
111 | ||
112 | hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) { | |
113 | __key = blkg->key; | |
114 | if (__key == key) | |
115 | return blkg; | |
116 | } | |
117 | ||
118 | return NULL; | |
119 | } | |
120 | EXPORT_SYMBOL_GPL(blkiocg_lookup_group); | |
121 | ||
122 | #define SHOW_FUNCTION(__VAR) \ | |
123 | static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \ | |
124 | struct cftype *cftype) \ | |
125 | { \ | |
126 | struct blkio_cgroup *blkcg; \ | |
127 | \ | |
128 | blkcg = cgroup_to_blkio_cgroup(cgroup); \ | |
129 | return (u64)blkcg->__VAR; \ | |
130 | } | |
131 | ||
132 | SHOW_FUNCTION(weight); | |
133 | #undef SHOW_FUNCTION | |
134 | ||
135 | static int | |
136 | blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val) | |
137 | { | |
138 | struct blkio_cgroup *blkcg; | |
139 | struct blkio_group *blkg; | |
140 | struct hlist_node *n; | |
141 | ||
142 | if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX) | |
143 | return -EINVAL; | |
144 | ||
145 | blkcg = cgroup_to_blkio_cgroup(cgroup); | |
146 | spin_lock_irq(&blkcg->lock); | |
147 | blkcg->weight = (unsigned int)val; | |
148 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) | |
149 | cfq_update_blkio_group_weight(blkg, blkcg->weight); | |
150 | spin_unlock_irq(&blkcg->lock); | |
151 | return 0; | |
152 | } | |
153 | ||
154 | #define SHOW_FUNCTION_PER_GROUP(__VAR) \ | |
155 | static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \ | |
156 | struct cftype *cftype, struct seq_file *m) \ | |
157 | { \ | |
158 | struct blkio_cgroup *blkcg; \ | |
159 | struct blkio_group *blkg; \ | |
160 | struct hlist_node *n; \ | |
161 | \ | |
162 | if (!cgroup_lock_live_group(cgroup)) \ | |
163 | return -ENODEV; \ | |
164 | \ | |
165 | blkcg = cgroup_to_blkio_cgroup(cgroup); \ | |
166 | rcu_read_lock(); \ | |
167 | hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\ | |
168 | if (blkg->dev) \ | |
169 | seq_printf(m, "%u:%u %lu\n", MAJOR(blkg->dev), \ | |
170 | MINOR(blkg->dev), blkg->__VAR); \ | |
171 | } \ | |
172 | rcu_read_unlock(); \ | |
173 | cgroup_unlock(); \ | |
174 | return 0; \ | |
175 | } | |
176 | ||
177 | SHOW_FUNCTION_PER_GROUP(time); | |
178 | SHOW_FUNCTION_PER_GROUP(sectors); | |
179 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
180 | SHOW_FUNCTION_PER_GROUP(dequeue); | |
181 | #endif | |
182 | #undef SHOW_FUNCTION_PER_GROUP | |
183 | ||
184 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
185 | void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg, | |
186 | unsigned long dequeue) | |
187 | { | |
188 | blkg->dequeue += dequeue; | |
189 | } | |
190 | EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_dequeue_stats); | |
191 | #endif | |
192 | ||
193 | struct cftype blkio_files[] = { | |
194 | { | |
195 | .name = "weight", | |
196 | .read_u64 = blkiocg_weight_read, | |
197 | .write_u64 = blkiocg_weight_write, | |
198 | }, | |
199 | { | |
200 | .name = "time", | |
201 | .read_seq_string = blkiocg_time_read, | |
202 | }, | |
203 | { | |
204 | .name = "sectors", | |
205 | .read_seq_string = blkiocg_sectors_read, | |
206 | }, | |
207 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
208 | { | |
209 | .name = "dequeue", | |
210 | .read_seq_string = blkiocg_dequeue_read, | |
211 | }, | |
212 | #endif | |
213 | }; | |
214 | ||
215 | static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |
216 | { | |
217 | return cgroup_add_files(cgroup, subsys, blkio_files, | |
218 | ARRAY_SIZE(blkio_files)); | |
219 | } | |
220 | ||
221 | static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |
222 | { | |
223 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); | |
224 | unsigned long flags; | |
225 | struct blkio_group *blkg; | |
226 | void *key; | |
227 | ||
228 | rcu_read_lock(); | |
229 | remove_entry: | |
230 | spin_lock_irqsave(&blkcg->lock, flags); | |
231 | ||
232 | if (hlist_empty(&blkcg->blkg_list)) { | |
233 | spin_unlock_irqrestore(&blkcg->lock, flags); | |
234 | goto done; | |
235 | } | |
236 | ||
237 | blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group, | |
238 | blkcg_node); | |
239 | key = rcu_dereference(blkg->key); | |
240 | __blkiocg_del_blkio_group(blkg); | |
241 | ||
242 | spin_unlock_irqrestore(&blkcg->lock, flags); | |
243 | ||
244 | /* | |
245 | * This blkio_group is being unlinked as associated cgroup is going | |
246 | * away. Let all the IO controlling policies know about this event. | |
247 | * | |
248 | * Currently this is static call to one io controlling policy. Once | |
249 | * we have more policies in place, we need some dynamic registration | |
250 | * of callback function. | |
251 | */ | |
252 | cfq_unlink_blkio_group(key, blkg); | |
253 | goto remove_entry; | |
254 | done: | |
255 | free_css_id(&blkio_subsys, &blkcg->css); | |
256 | rcu_read_unlock(); | |
257 | kfree(blkcg); | |
258 | } | |
259 | ||
260 | static struct cgroup_subsys_state * | |
261 | blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |
262 | { | |
263 | struct blkio_cgroup *blkcg, *parent_blkcg; | |
264 | ||
265 | if (!cgroup->parent) { | |
266 | blkcg = &blkio_root_cgroup; | |
267 | goto done; | |
268 | } | |
269 | ||
270 | /* Currently we do not support hierarchy deeper than two level (0,1) */ | |
271 | parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent); | |
272 | if (css_depth(&parent_blkcg->css) > 0) | |
273 | return ERR_PTR(-EINVAL); | |
274 | ||
275 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); | |
276 | if (!blkcg) | |
277 | return ERR_PTR(-ENOMEM); | |
278 | ||
279 | blkcg->weight = BLKIO_WEIGHT_DEFAULT; | |
280 | done: | |
281 | spin_lock_init(&blkcg->lock); | |
282 | INIT_HLIST_HEAD(&blkcg->blkg_list); | |
283 | ||
284 | return &blkcg->css; | |
285 | } | |
286 | ||
287 | /* | |
288 | * We cannot support shared io contexts, as we have no mean to support | |
289 | * two tasks with the same ioc in two different groups without major rework | |
290 | * of the main cic data structures. For now we allow a task to change | |
291 | * its cgroup only if it's the only owner of its ioc. | |
292 | */ | |
293 | static int blkiocg_can_attach(struct cgroup_subsys *subsys, | |
294 | struct cgroup *cgroup, struct task_struct *tsk, | |
295 | bool threadgroup) | |
296 | { | |
297 | struct io_context *ioc; | |
298 | int ret = 0; | |
299 | ||
300 | /* task_lock() is needed to avoid races with exit_io_context() */ | |
301 | task_lock(tsk); | |
302 | ioc = tsk->io_context; | |
303 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) | |
304 | ret = -EINVAL; | |
305 | task_unlock(tsk); | |
306 | ||
307 | return ret; | |
308 | } | |
309 | ||
310 | static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup, | |
311 | struct cgroup *prev, struct task_struct *tsk, | |
312 | bool threadgroup) | |
313 | { | |
314 | struct io_context *ioc; | |
315 | ||
316 | task_lock(tsk); | |
317 | ioc = tsk->io_context; | |
318 | if (ioc) | |
319 | ioc->cgroup_changed = 1; | |
320 | task_unlock(tsk); | |
321 | } | |
322 | ||
323 | struct cgroup_subsys blkio_subsys = { | |
324 | .name = "blkio", | |
325 | .create = blkiocg_create, | |
326 | .can_attach = blkiocg_can_attach, | |
327 | .attach = blkiocg_attach, | |
328 | .destroy = blkiocg_destroy, | |
329 | .populate = blkiocg_populate, | |
330 | .subsys_id = blkio_subsys_id, | |
331 | .use_id = 1, | |
332 | }; |