]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - block/blk-cgroup.c
block: Fix io_context leak after failure of clone with CLONE_IO
[mirror_ubuntu-bionic-kernel.git] / block / blk-cgroup.c
CommitLineData
31e4c28d
VG
1/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
22084190
VG
14#include <linux/seq_file.h>
15#include <linux/kdev_t.h>
31e4c28d 16#include "blk-cgroup.h"
f2eecb91 17#include "cfq-iosched.h"
b1c35769 18
31e4c28d
VG
19struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
20
21struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
22{
23 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
24 struct blkio_cgroup, css);
25}
26
22084190
VG
27void blkiocg_update_blkio_group_stats(struct blkio_group *blkg,
28 unsigned long time, unsigned long sectors)
29{
30 blkg->time += time;
31 blkg->sectors += sectors;
32}
33
31e4c28d 34void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
22084190 35 struct blkio_group *blkg, void *key, dev_t dev)
31e4c28d
VG
36{
37 unsigned long flags;
38
39 spin_lock_irqsave(&blkcg->lock, flags);
40 rcu_assign_pointer(blkg->key, key);
b1c35769 41 blkg->blkcg_id = css_id(&blkcg->css);
31e4c28d
VG
42 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
43 spin_unlock_irqrestore(&blkcg->lock, flags);
2868ef7b
VG
44#ifdef CONFIG_DEBUG_BLK_CGROUP
45 /* Need to take css reference ? */
46 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
47#endif
22084190 48 blkg->dev = dev;
31e4c28d
VG
49}
50
b1c35769
VG
51static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
52{
53 hlist_del_init_rcu(&blkg->blkcg_node);
54 blkg->blkcg_id = 0;
55}
56
57/*
58 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
59 * indicating that blk_group was unhashed by the time we got to it.
60 */
31e4c28d
VG
61int blkiocg_del_blkio_group(struct blkio_group *blkg)
62{
b1c35769
VG
63 struct blkio_cgroup *blkcg;
64 unsigned long flags;
65 struct cgroup_subsys_state *css;
66 int ret = 1;
67
68 rcu_read_lock();
69 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
70 if (!css)
71 goto out;
72
73 blkcg = container_of(css, struct blkio_cgroup, css);
74 spin_lock_irqsave(&blkcg->lock, flags);
75 if (!hlist_unhashed(&blkg->blkcg_node)) {
76 __blkiocg_del_blkio_group(blkg);
77 ret = 0;
78 }
79 spin_unlock_irqrestore(&blkcg->lock, flags);
80out:
81 rcu_read_unlock();
82 return ret;
31e4c28d
VG
83}
84
85/* called under rcu_read_lock(). */
86struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
87{
88 struct blkio_group *blkg;
89 struct hlist_node *n;
90 void *__key;
91
92 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
93 __key = blkg->key;
94 if (__key == key)
95 return blkg;
96 }
97
98 return NULL;
99}
100
101#define SHOW_FUNCTION(__VAR) \
102static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \
103 struct cftype *cftype) \
104{ \
105 struct blkio_cgroup *blkcg; \
106 \
107 blkcg = cgroup_to_blkio_cgroup(cgroup); \
108 return (u64)blkcg->__VAR; \
109}
110
111SHOW_FUNCTION(weight);
112#undef SHOW_FUNCTION
113
114static int
115blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
116{
117 struct blkio_cgroup *blkcg;
f8d461d6
VG
118 struct blkio_group *blkg;
119 struct hlist_node *n;
31e4c28d
VG
120
121 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
122 return -EINVAL;
123
124 blkcg = cgroup_to_blkio_cgroup(cgroup);
f8d461d6 125 spin_lock_irq(&blkcg->lock);
31e4c28d 126 blkcg->weight = (unsigned int)val;
f8d461d6
VG
127 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
128 cfq_update_blkio_group_weight(blkg, blkcg->weight);
129 spin_unlock_irq(&blkcg->lock);
31e4c28d
VG
130 return 0;
131}
132
22084190
VG
133#define SHOW_FUNCTION_PER_GROUP(__VAR) \
134static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
135 struct cftype *cftype, struct seq_file *m) \
136{ \
137 struct blkio_cgroup *blkcg; \
138 struct blkio_group *blkg; \
139 struct hlist_node *n; \
140 \
141 if (!cgroup_lock_live_group(cgroup)) \
142 return -ENODEV; \
143 \
144 blkcg = cgroup_to_blkio_cgroup(cgroup); \
145 rcu_read_lock(); \
146 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
147 if (blkg->dev) \
148 seq_printf(m, "%u:%u %lu\n", MAJOR(blkg->dev), \
149 MINOR(blkg->dev), blkg->__VAR); \
150 } \
151 rcu_read_unlock(); \
152 cgroup_unlock(); \
153 return 0; \
154}
155
156SHOW_FUNCTION_PER_GROUP(time);
157SHOW_FUNCTION_PER_GROUP(sectors);
158#ifdef CONFIG_DEBUG_BLK_CGROUP
159SHOW_FUNCTION_PER_GROUP(dequeue);
160#endif
161#undef SHOW_FUNCTION_PER_GROUP
162
163#ifdef CONFIG_DEBUG_BLK_CGROUP
164void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg,
165 unsigned long dequeue)
166{
167 blkg->dequeue += dequeue;
168}
169#endif
170
31e4c28d
VG
171struct cftype blkio_files[] = {
172 {
173 .name = "weight",
174 .read_u64 = blkiocg_weight_read,
175 .write_u64 = blkiocg_weight_write,
176 },
22084190
VG
177 {
178 .name = "time",
179 .read_seq_string = blkiocg_time_read,
180 },
181 {
182 .name = "sectors",
183 .read_seq_string = blkiocg_sectors_read,
184 },
185#ifdef CONFIG_DEBUG_BLK_CGROUP
186 {
187 .name = "dequeue",
188 .read_seq_string = blkiocg_dequeue_read,
189 },
190#endif
31e4c28d
VG
191};
192
193static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
194{
195 return cgroup_add_files(cgroup, subsys, blkio_files,
196 ARRAY_SIZE(blkio_files));
197}
198
199static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
200{
201 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
b1c35769
VG
202 unsigned long flags;
203 struct blkio_group *blkg;
204 void *key;
205
206 rcu_read_lock();
207remove_entry:
208 spin_lock_irqsave(&blkcg->lock, flags);
209
210 if (hlist_empty(&blkcg->blkg_list)) {
211 spin_unlock_irqrestore(&blkcg->lock, flags);
212 goto done;
213 }
214
215 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
216 blkcg_node);
217 key = rcu_dereference(blkg->key);
218 __blkiocg_del_blkio_group(blkg);
31e4c28d 219
b1c35769
VG
220 spin_unlock_irqrestore(&blkcg->lock, flags);
221
222 /*
223 * This blkio_group is being unlinked as associated cgroup is going
224 * away. Let all the IO controlling policies know about this event.
225 *
226 * Currently this is static call to one io controlling policy. Once
227 * we have more policies in place, we need some dynamic registration
228 * of callback function.
229 */
230 cfq_unlink_blkio_group(key, blkg);
231 goto remove_entry;
232done:
31e4c28d 233 free_css_id(&blkio_subsys, &blkcg->css);
b1c35769 234 rcu_read_unlock();
31e4c28d
VG
235 kfree(blkcg);
236}
237
238static struct cgroup_subsys_state *
239blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
240{
241 struct blkio_cgroup *blkcg, *parent_blkcg;
242
243 if (!cgroup->parent) {
244 blkcg = &blkio_root_cgroup;
245 goto done;
246 }
247
248 /* Currently we do not support hierarchy deeper than two level (0,1) */
249 parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent);
250 if (css_depth(&parent_blkcg->css) > 0)
251 return ERR_PTR(-EINVAL);
252
253 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
254 if (!blkcg)
255 return ERR_PTR(-ENOMEM);
256
257 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
258done:
259 spin_lock_init(&blkcg->lock);
260 INIT_HLIST_HEAD(&blkcg->blkg_list);
261
262 return &blkcg->css;
263}
264
265/*
266 * We cannot support shared io contexts, as we have no mean to support
267 * two tasks with the same ioc in two different groups without major rework
268 * of the main cic data structures. For now we allow a task to change
269 * its cgroup only if it's the only owner of its ioc.
270 */
271static int blkiocg_can_attach(struct cgroup_subsys *subsys,
272 struct cgroup *cgroup, struct task_struct *tsk,
273 bool threadgroup)
274{
275 struct io_context *ioc;
276 int ret = 0;
277
278 /* task_lock() is needed to avoid races with exit_io_context() */
279 task_lock(tsk);
280 ioc = tsk->io_context;
281 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
282 ret = -EINVAL;
283 task_unlock(tsk);
284
285 return ret;
286}
287
288static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
289 struct cgroup *prev, struct task_struct *tsk,
290 bool threadgroup)
291{
292 struct io_context *ioc;
293
294 task_lock(tsk);
295 ioc = tsk->io_context;
296 if (ioc)
297 ioc->cgroup_changed = 1;
298 task_unlock(tsk);
299}
300
301struct cgroup_subsys blkio_subsys = {
302 .name = "blkio",
303 .create = blkiocg_create,
304 .can_attach = blkiocg_can_attach,
305 .attach = blkiocg_attach,
306 .destroy = blkiocg_destroy,
307 .populate = blkiocg_populate,
308 .subsys_id = blkio_subsys_id,
309 .use_id = 1,
310};