]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - block/blk-cgroup.c
Merge branch 'hid-suspend' into picolcd
[mirror_ubuntu-bionic-kernel.git] / block / blk-cgroup.c
1 /*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/slab.h>
19 #include "blk-cgroup.h"
20
21 static DEFINE_SPINLOCK(blkio_list_lock);
22 static LIST_HEAD(blkio_list);
23
24 struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
25 EXPORT_SYMBOL_GPL(blkio_root_cgroup);
26
27 static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
28 struct cgroup *);
29 static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
30 struct task_struct *, bool);
31 static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
32 struct cgroup *, struct task_struct *, bool);
33 static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
34 static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
35
36 struct cgroup_subsys blkio_subsys = {
37 .name = "blkio",
38 .create = blkiocg_create,
39 .can_attach = blkiocg_can_attach,
40 .attach = blkiocg_attach,
41 .destroy = blkiocg_destroy,
42 .populate = blkiocg_populate,
43 #ifdef CONFIG_BLK_CGROUP
44 /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
45 .subsys_id = blkio_subsys_id,
46 #endif
47 .use_id = 1,
48 .module = THIS_MODULE,
49 };
50 EXPORT_SYMBOL_GPL(blkio_subsys);
51
52 struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
53 {
54 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
55 struct blkio_cgroup, css);
56 }
57 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
58
59 void blkiocg_update_blkio_group_stats(struct blkio_group *blkg,
60 unsigned long time, unsigned long sectors)
61 {
62 blkg->time += time;
63 blkg->sectors += sectors;
64 }
65 EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_stats);
66
67 void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
68 struct blkio_group *blkg, void *key, dev_t dev)
69 {
70 unsigned long flags;
71
72 spin_lock_irqsave(&blkcg->lock, flags);
73 rcu_assign_pointer(blkg->key, key);
74 blkg->blkcg_id = css_id(&blkcg->css);
75 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
76 spin_unlock_irqrestore(&blkcg->lock, flags);
77 #ifdef CONFIG_DEBUG_BLK_CGROUP
78 /* Need to take css reference ? */
79 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
80 #endif
81 blkg->dev = dev;
82 }
83 EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
84
85 static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
86 {
87 hlist_del_init_rcu(&blkg->blkcg_node);
88 blkg->blkcg_id = 0;
89 }
90
91 /*
92 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
93 * indicating that blk_group was unhashed by the time we got to it.
94 */
95 int blkiocg_del_blkio_group(struct blkio_group *blkg)
96 {
97 struct blkio_cgroup *blkcg;
98 unsigned long flags;
99 struct cgroup_subsys_state *css;
100 int ret = 1;
101
102 rcu_read_lock();
103 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
104 if (!css)
105 goto out;
106
107 blkcg = container_of(css, struct blkio_cgroup, css);
108 spin_lock_irqsave(&blkcg->lock, flags);
109 if (!hlist_unhashed(&blkg->blkcg_node)) {
110 __blkiocg_del_blkio_group(blkg);
111 ret = 0;
112 }
113 spin_unlock_irqrestore(&blkcg->lock, flags);
114 out:
115 rcu_read_unlock();
116 return ret;
117 }
118 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
119
120 /* called under rcu_read_lock(). */
121 struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
122 {
123 struct blkio_group *blkg;
124 struct hlist_node *n;
125 void *__key;
126
127 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
128 __key = blkg->key;
129 if (__key == key)
130 return blkg;
131 }
132
133 return NULL;
134 }
135 EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
136
137 #define SHOW_FUNCTION(__VAR) \
138 static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \
139 struct cftype *cftype) \
140 { \
141 struct blkio_cgroup *blkcg; \
142 \
143 blkcg = cgroup_to_blkio_cgroup(cgroup); \
144 return (u64)blkcg->__VAR; \
145 }
146
147 SHOW_FUNCTION(weight);
148 #undef SHOW_FUNCTION
149
150 static int
151 blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
152 {
153 struct blkio_cgroup *blkcg;
154 struct blkio_group *blkg;
155 struct hlist_node *n;
156 struct blkio_policy_type *blkiop;
157
158 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
159 return -EINVAL;
160
161 blkcg = cgroup_to_blkio_cgroup(cgroup);
162 spin_lock(&blkio_list_lock);
163 spin_lock_irq(&blkcg->lock);
164 blkcg->weight = (unsigned int)val;
165 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
166 list_for_each_entry(blkiop, &blkio_list, list)
167 blkiop->ops.blkio_update_group_weight_fn(blkg,
168 blkcg->weight);
169 }
170 spin_unlock_irq(&blkcg->lock);
171 spin_unlock(&blkio_list_lock);
172 return 0;
173 }
174
175 #define SHOW_FUNCTION_PER_GROUP(__VAR) \
176 static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
177 struct cftype *cftype, struct seq_file *m) \
178 { \
179 struct blkio_cgroup *blkcg; \
180 struct blkio_group *blkg; \
181 struct hlist_node *n; \
182 \
183 if (!cgroup_lock_live_group(cgroup)) \
184 return -ENODEV; \
185 \
186 blkcg = cgroup_to_blkio_cgroup(cgroup); \
187 rcu_read_lock(); \
188 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
189 if (blkg->dev) \
190 seq_printf(m, "%u:%u %lu\n", MAJOR(blkg->dev), \
191 MINOR(blkg->dev), blkg->__VAR); \
192 } \
193 rcu_read_unlock(); \
194 cgroup_unlock(); \
195 return 0; \
196 }
197
198 SHOW_FUNCTION_PER_GROUP(time);
199 SHOW_FUNCTION_PER_GROUP(sectors);
200 #ifdef CONFIG_DEBUG_BLK_CGROUP
201 SHOW_FUNCTION_PER_GROUP(dequeue);
202 #endif
203 #undef SHOW_FUNCTION_PER_GROUP
204
205 #ifdef CONFIG_DEBUG_BLK_CGROUP
206 void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg,
207 unsigned long dequeue)
208 {
209 blkg->dequeue += dequeue;
210 }
211 EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_dequeue_stats);
212 #endif
213
214 struct cftype blkio_files[] = {
215 {
216 .name = "weight",
217 .read_u64 = blkiocg_weight_read,
218 .write_u64 = blkiocg_weight_write,
219 },
220 {
221 .name = "time",
222 .read_seq_string = blkiocg_time_read,
223 },
224 {
225 .name = "sectors",
226 .read_seq_string = blkiocg_sectors_read,
227 },
228 #ifdef CONFIG_DEBUG_BLK_CGROUP
229 {
230 .name = "dequeue",
231 .read_seq_string = blkiocg_dequeue_read,
232 },
233 #endif
234 };
235
236 static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
237 {
238 return cgroup_add_files(cgroup, subsys, blkio_files,
239 ARRAY_SIZE(blkio_files));
240 }
241
242 static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
243 {
244 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
245 unsigned long flags;
246 struct blkio_group *blkg;
247 void *key;
248 struct blkio_policy_type *blkiop;
249
250 rcu_read_lock();
251 remove_entry:
252 spin_lock_irqsave(&blkcg->lock, flags);
253
254 if (hlist_empty(&blkcg->blkg_list)) {
255 spin_unlock_irqrestore(&blkcg->lock, flags);
256 goto done;
257 }
258
259 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
260 blkcg_node);
261 key = rcu_dereference(blkg->key);
262 __blkiocg_del_blkio_group(blkg);
263
264 spin_unlock_irqrestore(&blkcg->lock, flags);
265
266 /*
267 * This blkio_group is being unlinked as associated cgroup is going
268 * away. Let all the IO controlling policies know about this event.
269 *
270 * Currently this is static call to one io controlling policy. Once
271 * we have more policies in place, we need some dynamic registration
272 * of callback function.
273 */
274 spin_lock(&blkio_list_lock);
275 list_for_each_entry(blkiop, &blkio_list, list)
276 blkiop->ops.blkio_unlink_group_fn(key, blkg);
277 spin_unlock(&blkio_list_lock);
278 goto remove_entry;
279 done:
280 free_css_id(&blkio_subsys, &blkcg->css);
281 rcu_read_unlock();
282 if (blkcg != &blkio_root_cgroup)
283 kfree(blkcg);
284 }
285
286 static struct cgroup_subsys_state *
287 blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
288 {
289 struct blkio_cgroup *blkcg, *parent_blkcg;
290
291 if (!cgroup->parent) {
292 blkcg = &blkio_root_cgroup;
293 goto done;
294 }
295
296 /* Currently we do not support hierarchy deeper than two level (0,1) */
297 parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent);
298 if (css_depth(&parent_blkcg->css) > 0)
299 return ERR_PTR(-EINVAL);
300
301 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
302 if (!blkcg)
303 return ERR_PTR(-ENOMEM);
304
305 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
306 done:
307 spin_lock_init(&blkcg->lock);
308 INIT_HLIST_HEAD(&blkcg->blkg_list);
309
310 return &blkcg->css;
311 }
312
313 /*
314 * We cannot support shared io contexts, as we have no mean to support
315 * two tasks with the same ioc in two different groups without major rework
316 * of the main cic data structures. For now we allow a task to change
317 * its cgroup only if it's the only owner of its ioc.
318 */
319 static int blkiocg_can_attach(struct cgroup_subsys *subsys,
320 struct cgroup *cgroup, struct task_struct *tsk,
321 bool threadgroup)
322 {
323 struct io_context *ioc;
324 int ret = 0;
325
326 /* task_lock() is needed to avoid races with exit_io_context() */
327 task_lock(tsk);
328 ioc = tsk->io_context;
329 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
330 ret = -EINVAL;
331 task_unlock(tsk);
332
333 return ret;
334 }
335
336 static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
337 struct cgroup *prev, struct task_struct *tsk,
338 bool threadgroup)
339 {
340 struct io_context *ioc;
341
342 task_lock(tsk);
343 ioc = tsk->io_context;
344 if (ioc)
345 ioc->cgroup_changed = 1;
346 task_unlock(tsk);
347 }
348
349 void blkio_policy_register(struct blkio_policy_type *blkiop)
350 {
351 spin_lock(&blkio_list_lock);
352 list_add_tail(&blkiop->list, &blkio_list);
353 spin_unlock(&blkio_list_lock);
354 }
355 EXPORT_SYMBOL_GPL(blkio_policy_register);
356
357 void blkio_policy_unregister(struct blkio_policy_type *blkiop)
358 {
359 spin_lock(&blkio_list_lock);
360 list_del_init(&blkiop->list);
361 spin_unlock(&blkio_list_lock);
362 }
363 EXPORT_SYMBOL_GPL(blkio_policy_unregister);
364
365 static int __init init_cgroup_blkio(void)
366 {
367 return cgroup_load_subsys(&blkio_subsys);
368 }
369
370 static void __exit exit_cgroup_blkio(void)
371 {
372 cgroup_unload_subsys(&blkio_subsys);
373 }
374
375 module_init(init_cgroup_blkio);
376 module_exit(exit_cgroup_blkio);
377 MODULE_LICENSE("GPL");