]>
Commit | Line | Data |
---|---|---|
0a268dbd TH |
1 | #include "cgroup-internal.h" |
2 | ||
1592c9b2 | 3 | #include <linux/ctype.h> |
0a268dbd TH |
4 | #include <linux/kmod.h> |
5 | #include <linux/sort.h> | |
1592c9b2 | 6 | #include <linux/delay.h> |
0a268dbd | 7 | #include <linux/mm.h> |
c3edc401 | 8 | #include <linux/sched/signal.h> |
56cd6973 | 9 | #include <linux/sched/task.h> |
50ff9d13 | 10 | #include <linux/magic.h> |
0a268dbd TH |
11 | #include <linux/slab.h> |
12 | #include <linux/vmalloc.h> | |
13 | #include <linux/delayacct.h> | |
14 | #include <linux/pid_namespace.h> | |
15 | #include <linux/cgroupstats.h> | |
16 | ||
17 | #include <trace/events/cgroup.h> | |
18 | ||
19 | /* | |
20 | * pidlists linger the following amount before being destroyed. The goal | |
21 | * is avoiding frequent destruction in the middle of consecutive read calls | |
22 | * Expiring in the middle is a performance problem not a correctness one. | |
23 | * 1 sec should be enough. | |
24 | */ | |
25 | #define CGROUP_PIDLIST_DESTROY_DELAY HZ | |
26 | ||
27 | /* Controllers blocked by the commandline in v1 */ | |
28 | static u16 cgroup_no_v1_mask; | |
29 | ||
30 | /* | |
31 | * pidlist destructions need to be flushed on cgroup destruction. Use a | |
32 | * separate workqueue as flush domain. | |
33 | */ | |
34 | static struct workqueue_struct *cgroup_pidlist_destroy_wq; | |
35 | ||
36 | /* | |
37 | * Protects cgroup_subsys->release_agent_path. Modifying it also requires | |
38 | * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock. | |
39 | */ | |
1592c9b2 | 40 | static DEFINE_SPINLOCK(release_agent_path_lock); |
0a268dbd | 41 | |
d62beb7f | 42 | bool cgroup1_ssid_disabled(int ssid) |
0a268dbd TH |
43 | { |
44 | return cgroup_no_v1_mask & (1 << ssid); | |
45 | } | |
46 | ||
47 | /** | |
48 | * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from' | |
49 | * @from: attach to all cgroups of a given task | |
50 | * @tsk: the task to be attached | |
51 | */ | |
52 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) | |
53 | { | |
54 | struct cgroup_root *root; | |
55 | int retval = 0; | |
56 | ||
57 | mutex_lock(&cgroup_mutex); | |
58 | percpu_down_write(&cgroup_threadgroup_rwsem); | |
59 | for_each_root(root) { | |
60 | struct cgroup *from_cgrp; | |
61 | ||
62 | if (root == &cgrp_dfl_root) | |
63 | continue; | |
64 | ||
65 | spin_lock_irq(&css_set_lock); | |
66 | from_cgrp = task_cgroup_from_root(from, root); | |
67 | spin_unlock_irq(&css_set_lock); | |
68 | ||
69 | retval = cgroup_attach_task(from_cgrp, tsk, false); | |
70 | if (retval) | |
71 | break; | |
72 | } | |
73 | percpu_up_write(&cgroup_threadgroup_rwsem); | |
74 | mutex_unlock(&cgroup_mutex); | |
75 | ||
76 | return retval; | |
77 | } | |
78 | EXPORT_SYMBOL_GPL(cgroup_attach_task_all); | |
79 | ||
80 | /** | |
81 | * cgroup_trasnsfer_tasks - move tasks from one cgroup to another | |
82 | * @to: cgroup to which the tasks will be moved | |
83 | * @from: cgroup in which the tasks currently reside | |
84 | * | |
85 | * Locking rules between cgroup_post_fork() and the migration path | |
86 | * guarantee that, if a task is forking while being migrated, the new child | |
87 | * is guaranteed to be either visible in the source cgroup after the | |
88 | * parent's migration is complete or put into the target cgroup. No task | |
89 | * can slip out of migration through forking. | |
90 | */ | |
91 | int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) | |
92 | { | |
e595cd70 | 93 | DEFINE_CGROUP_MGCTX(mgctx); |
0a268dbd TH |
94 | struct cgrp_cset_link *link; |
95 | struct css_task_iter it; | |
96 | struct task_struct *task; | |
97 | int ret; | |
98 | ||
99 | if (cgroup_on_dfl(to)) | |
100 | return -EINVAL; | |
101 | ||
102 | if (!cgroup_may_migrate_to(to)) | |
103 | return -EBUSY; | |
104 | ||
105 | mutex_lock(&cgroup_mutex); | |
106 | ||
107 | percpu_down_write(&cgroup_threadgroup_rwsem); | |
108 | ||
109 | /* all tasks in @from are being moved, all csets are source */ | |
110 | spin_lock_irq(&css_set_lock); | |
111 | list_for_each_entry(link, &from->cset_links, cset_link) | |
e595cd70 | 112 | cgroup_migrate_add_src(link->cset, to, &mgctx); |
0a268dbd TH |
113 | spin_unlock_irq(&css_set_lock); |
114 | ||
e595cd70 | 115 | ret = cgroup_migrate_prepare_dst(&mgctx); |
0a268dbd TH |
116 | if (ret) |
117 | goto out_err; | |
118 | ||
119 | /* | |
120 | * Migrate tasks one-by-one until @from is empty. This fails iff | |
121 | * ->can_attach() fails. | |
122 | */ | |
123 | do { | |
bc2fb7ed | 124 | css_task_iter_start(&from->self, 0, &it); |
0a268dbd TH |
125 | task = css_task_iter_next(&it); |
126 | if (task) | |
127 | get_task_struct(task); | |
128 | css_task_iter_end(&it); | |
129 | ||
130 | if (task) { | |
bfc2cf6f | 131 | ret = cgroup_migrate(task, false, &mgctx); |
0a268dbd TH |
132 | if (!ret) |
133 | trace_cgroup_transfer_tasks(to, task, false); | |
134 | put_task_struct(task); | |
135 | } | |
136 | } while (task && !ret); | |
137 | out_err: | |
e595cd70 | 138 | cgroup_migrate_finish(&mgctx); |
0a268dbd TH |
139 | percpu_up_write(&cgroup_threadgroup_rwsem); |
140 | mutex_unlock(&cgroup_mutex); | |
141 | return ret; | |
142 | } | |
143 | ||
144 | /* | |
145 | * Stuff for reading the 'tasks'/'procs' files. | |
146 | * | |
147 | * Reading this file can return large amounts of data if a cgroup has | |
148 | * *lots* of attached tasks. So it may need several calls to read(), | |
149 | * but we cannot guarantee that the information we produce is correct | |
150 | * unless we produce it entirely atomically. | |
151 | * | |
152 | */ | |
153 | ||
154 | /* which pidlist file are we talking about? */ | |
155 | enum cgroup_filetype { | |
156 | CGROUP_FILE_PROCS, | |
157 | CGROUP_FILE_TASKS, | |
158 | }; | |
159 | ||
160 | /* | |
161 | * A pidlist is a list of pids that virtually represents the contents of one | |
162 | * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists, | |
163 | * a pair (one each for procs, tasks) for each pid namespace that's relevant | |
164 | * to the cgroup. | |
165 | */ | |
166 | struct cgroup_pidlist { | |
167 | /* | |
168 | * used to find which pidlist is wanted. doesn't change as long as | |
169 | * this particular list stays in the list. | |
170 | */ | |
171 | struct { enum cgroup_filetype type; struct pid_namespace *ns; } key; | |
172 | /* array of xids */ | |
173 | pid_t *list; | |
174 | /* how many elements the above list has */ | |
175 | int length; | |
176 | /* each of these stored in a list by its cgroup */ | |
177 | struct list_head links; | |
178 | /* pointer to the cgroup we belong to, for list removal purposes */ | |
179 | struct cgroup *owner; | |
180 | /* for delayed destruction */ | |
181 | struct delayed_work destroy_dwork; | |
182 | }; | |
183 | ||
184 | /* | |
185 | * The following two functions "fix" the issue where there are more pids | |
186 | * than kmalloc will give memory for; in such cases, we use vmalloc/vfree. | |
187 | * TODO: replace with a kernel-wide solution to this problem | |
188 | */ | |
189 | #define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2)) | |
190 | static void *pidlist_allocate(int count) | |
191 | { | |
192 | if (PIDLIST_TOO_LARGE(count)) | |
193 | return vmalloc(count * sizeof(pid_t)); | |
194 | else | |
195 | return kmalloc(count * sizeof(pid_t), GFP_KERNEL); | |
196 | } | |
197 | ||
198 | static void pidlist_free(void *p) | |
199 | { | |
200 | kvfree(p); | |
201 | } | |
202 | ||
203 | /* | |
204 | * Used to destroy all pidlists lingering waiting for destroy timer. None | |
205 | * should be left afterwards. | |
206 | */ | |
d62beb7f | 207 | void cgroup1_pidlist_destroy_all(struct cgroup *cgrp) |
0a268dbd TH |
208 | { |
209 | struct cgroup_pidlist *l, *tmp_l; | |
210 | ||
211 | mutex_lock(&cgrp->pidlist_mutex); | |
212 | list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links) | |
213 | mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0); | |
214 | mutex_unlock(&cgrp->pidlist_mutex); | |
215 | ||
216 | flush_workqueue(cgroup_pidlist_destroy_wq); | |
217 | BUG_ON(!list_empty(&cgrp->pidlists)); | |
218 | } | |
219 | ||
220 | static void cgroup_pidlist_destroy_work_fn(struct work_struct *work) | |
221 | { | |
222 | struct delayed_work *dwork = to_delayed_work(work); | |
223 | struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist, | |
224 | destroy_dwork); | |
225 | struct cgroup_pidlist *tofree = NULL; | |
226 | ||
227 | mutex_lock(&l->owner->pidlist_mutex); | |
228 | ||
229 | /* | |
230 | * Destroy iff we didn't get queued again. The state won't change | |
231 | * as destroy_dwork can only be queued while locked. | |
232 | */ | |
233 | if (!delayed_work_pending(dwork)) { | |
234 | list_del(&l->links); | |
235 | pidlist_free(l->list); | |
236 | put_pid_ns(l->key.ns); | |
237 | tofree = l; | |
238 | } | |
239 | ||
240 | mutex_unlock(&l->owner->pidlist_mutex); | |
241 | kfree(tofree); | |
242 | } | |
243 | ||
244 | /* | |
245 | * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries | |
246 | * Returns the number of unique elements. | |
247 | */ | |
248 | static int pidlist_uniq(pid_t *list, int length) | |
249 | { | |
250 | int src, dest = 1; | |
251 | ||
252 | /* | |
253 | * we presume the 0th element is unique, so i starts at 1. trivial | |
254 | * edge cases first; no work needs to be done for either | |
255 | */ | |
256 | if (length == 0 || length == 1) | |
257 | return length; | |
258 | /* src and dest walk down the list; dest counts unique elements */ | |
259 | for (src = 1; src < length; src++) { | |
260 | /* find next unique element */ | |
261 | while (list[src] == list[src-1]) { | |
262 | src++; | |
263 | if (src == length) | |
264 | goto after; | |
265 | } | |
266 | /* dest always points to where the next unique element goes */ | |
267 | list[dest] = list[src]; | |
268 | dest++; | |
269 | } | |
270 | after: | |
271 | return dest; | |
272 | } | |
273 | ||
274 | /* | |
275 | * The two pid files - task and cgroup.procs - guaranteed that the result | |
276 | * is sorted, which forced this whole pidlist fiasco. As pid order is | |
277 | * different per namespace, each namespace needs differently sorted list, | |
278 | * making it impossible to use, for example, single rbtree of member tasks | |
279 | * sorted by task pointer. As pidlists can be fairly large, allocating one | |
280 | * per open file is dangerous, so cgroup had to implement shared pool of | |
281 | * pidlists keyed by cgroup and namespace. | |
282 | */ | |
283 | static int cmppid(const void *a, const void *b) | |
284 | { | |
285 | return *(pid_t *)a - *(pid_t *)b; | |
286 | } | |
287 | ||
288 | static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp, | |
289 | enum cgroup_filetype type) | |
290 | { | |
291 | struct cgroup_pidlist *l; | |
292 | /* don't need task_nsproxy() if we're looking at ourself */ | |
293 | struct pid_namespace *ns = task_active_pid_ns(current); | |
294 | ||
295 | lockdep_assert_held(&cgrp->pidlist_mutex); | |
296 | ||
297 | list_for_each_entry(l, &cgrp->pidlists, links) | |
298 | if (l->key.type == type && l->key.ns == ns) | |
299 | return l; | |
300 | return NULL; | |
301 | } | |
302 | ||
303 | /* | |
304 | * find the appropriate pidlist for our purpose (given procs vs tasks) | |
305 | * returns with the lock on that pidlist already held, and takes care | |
306 | * of the use count, or returns NULL with no locks held if we're out of | |
307 | * memory. | |
308 | */ | |
309 | static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp, | |
310 | enum cgroup_filetype type) | |
311 | { | |
312 | struct cgroup_pidlist *l; | |
313 | ||
314 | lockdep_assert_held(&cgrp->pidlist_mutex); | |
315 | ||
316 | l = cgroup_pidlist_find(cgrp, type); | |
317 | if (l) | |
318 | return l; | |
319 | ||
320 | /* entry not found; create a new one */ | |
321 | l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL); | |
322 | if (!l) | |
323 | return l; | |
324 | ||
325 | INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn); | |
326 | l->key.type = type; | |
327 | /* don't need task_nsproxy() if we're looking at ourself */ | |
328 | l->key.ns = get_pid_ns(task_active_pid_ns(current)); | |
329 | l->owner = cgrp; | |
330 | list_add(&l->links, &cgrp->pidlists); | |
331 | return l; | |
332 | } | |
333 | ||
334 | /** | |
335 | * cgroup_task_count - count the number of tasks in a cgroup. | |
336 | * @cgrp: the cgroup in question | |
0a268dbd | 337 | */ |
a28f8f5e | 338 | int cgroup_task_count(const struct cgroup *cgrp) |
0a268dbd TH |
339 | { |
340 | int count = 0; | |
341 | struct cgrp_cset_link *link; | |
342 | ||
343 | spin_lock_irq(&css_set_lock); | |
344 | list_for_each_entry(link, &cgrp->cset_links, cset_link) | |
73a7242a | 345 | count += link->cset->nr_tasks; |
0a268dbd TH |
346 | spin_unlock_irq(&css_set_lock); |
347 | return count; | |
348 | } | |
349 | ||
350 | /* | |
351 | * Load a cgroup's pidarray with either procs' tgids or tasks' pids | |
352 | */ | |
353 | static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type, | |
354 | struct cgroup_pidlist **lp) | |
355 | { | |
356 | pid_t *array; | |
357 | int length; | |
358 | int pid, n = 0; /* used for populating the array */ | |
359 | struct css_task_iter it; | |
360 | struct task_struct *tsk; | |
361 | struct cgroup_pidlist *l; | |
362 | ||
363 | lockdep_assert_held(&cgrp->pidlist_mutex); | |
364 | ||
365 | /* | |
366 | * If cgroup gets more users after we read count, we won't have | |
367 | * enough space - tough. This race is indistinguishable to the | |
368 | * caller from the case that the additional cgroup users didn't | |
369 | * show up until sometime later on. | |
370 | */ | |
371 | length = cgroup_task_count(cgrp); | |
372 | array = pidlist_allocate(length); | |
373 | if (!array) | |
374 | return -ENOMEM; | |
375 | /* now, populate the array */ | |
bc2fb7ed | 376 | css_task_iter_start(&cgrp->self, 0, &it); |
0a268dbd TH |
377 | while ((tsk = css_task_iter_next(&it))) { |
378 | if (unlikely(n == length)) | |
379 | break; | |
380 | /* get tgid or pid for procs or tasks file respectively */ | |
381 | if (type == CGROUP_FILE_PROCS) | |
382 | pid = task_tgid_vnr(tsk); | |
383 | else | |
384 | pid = task_pid_vnr(tsk); | |
385 | if (pid > 0) /* make sure to only use valid results */ | |
386 | array[n++] = pid; | |
387 | } | |
388 | css_task_iter_end(&it); | |
389 | length = n; | |
390 | /* now sort & (if procs) strip out duplicates */ | |
391 | sort(array, length, sizeof(pid_t), cmppid, NULL); | |
392 | if (type == CGROUP_FILE_PROCS) | |
393 | length = pidlist_uniq(array, length); | |
394 | ||
395 | l = cgroup_pidlist_find_create(cgrp, type); | |
396 | if (!l) { | |
397 | pidlist_free(array); | |
398 | return -ENOMEM; | |
399 | } | |
400 | ||
401 | /* store array, freeing old if necessary */ | |
402 | pidlist_free(l->list); | |
403 | l->list = array; | |
404 | l->length = length; | |
405 | *lp = l; | |
406 | return 0; | |
407 | } | |
408 | ||
409 | /* | |
410 | * seq_file methods for the tasks/procs files. The seq_file position is the | |
411 | * next pid to display; the seq_file iterator is a pointer to the pid | |
412 | * in the cgroup->l->list array. | |
413 | */ | |
414 | ||
415 | static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos) | |
416 | { | |
417 | /* | |
418 | * Initially we receive a position value that corresponds to | |
419 | * one more than the last pid shown (or 0 on the first call or | |
420 | * after a seek to the start). Use a binary-search to find the | |
421 | * next pid to display, if any | |
422 | */ | |
423 | struct kernfs_open_file *of = s->private; | |
424 | struct cgroup *cgrp = seq_css(s)->cgroup; | |
425 | struct cgroup_pidlist *l; | |
426 | enum cgroup_filetype type = seq_cft(s)->private; | |
427 | int index = 0, pid = *pos; | |
428 | int *iter, ret; | |
429 | ||
430 | mutex_lock(&cgrp->pidlist_mutex); | |
431 | ||
432 | /* | |
433 | * !NULL @of->priv indicates that this isn't the first start() | |
434 | * after open. If the matching pidlist is around, we can use that. | |
435 | * Look for it. Note that @of->priv can't be used directly. It | |
436 | * could already have been destroyed. | |
437 | */ | |
438 | if (of->priv) | |
439 | of->priv = cgroup_pidlist_find(cgrp, type); | |
440 | ||
441 | /* | |
442 | * Either this is the first start() after open or the matching | |
443 | * pidlist has been destroyed inbetween. Create a new one. | |
444 | */ | |
445 | if (!of->priv) { | |
446 | ret = pidlist_array_load(cgrp, type, | |
447 | (struct cgroup_pidlist **)&of->priv); | |
448 | if (ret) | |
449 | return ERR_PTR(ret); | |
450 | } | |
451 | l = of->priv; | |
452 | ||
453 | if (pid) { | |
454 | int end = l->length; | |
455 | ||
456 | while (index < end) { | |
457 | int mid = (index + end) / 2; | |
458 | if (l->list[mid] == pid) { | |
459 | index = mid; | |
460 | break; | |
461 | } else if (l->list[mid] <= pid) | |
462 | index = mid + 1; | |
463 | else | |
464 | end = mid; | |
465 | } | |
466 | } | |
467 | /* If we're off the end of the array, we're done */ | |
468 | if (index >= l->length) | |
469 | return NULL; | |
470 | /* Update the abstract position to be the actual pid that we found */ | |
471 | iter = l->list + index; | |
472 | *pos = *iter; | |
473 | return iter; | |
474 | } | |
475 | ||
476 | static void cgroup_pidlist_stop(struct seq_file *s, void *v) | |
477 | { | |
478 | struct kernfs_open_file *of = s->private; | |
479 | struct cgroup_pidlist *l = of->priv; | |
480 | ||
481 | if (l) | |
482 | mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, | |
483 | CGROUP_PIDLIST_DESTROY_DELAY); | |
484 | mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex); | |
485 | } | |
486 | ||
487 | static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos) | |
488 | { | |
489 | struct kernfs_open_file *of = s->private; | |
490 | struct cgroup_pidlist *l = of->priv; | |
491 | pid_t *p = v; | |
492 | pid_t *end = l->list + l->length; | |
493 | /* | |
494 | * Advance to the next pid in the array. If this goes off the | |
495 | * end, we're done | |
496 | */ | |
497 | p++; | |
498 | if (p >= end) { | |
499 | return NULL; | |
500 | } else { | |
501 | *pos = *p; | |
502 | return p; | |
503 | } | |
504 | } | |
505 | ||
506 | static int cgroup_pidlist_show(struct seq_file *s, void *v) | |
507 | { | |
508 | seq_printf(s, "%d\n", *(int *)v); | |
509 | ||
510 | return 0; | |
511 | } | |
512 | ||
715c809d TH |
513 | static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of, |
514 | char *buf, size_t nbytes, loff_t off, | |
515 | bool threadgroup) | |
0a268dbd | 516 | { |
715c809d TH |
517 | struct cgroup *cgrp; |
518 | struct task_struct *task; | |
519 | const struct cred *cred, *tcred; | |
520 | ssize_t ret; | |
521 | ||
522 | cgrp = cgroup_kn_lock_live(of->kn, false); | |
523 | if (!cgrp) | |
524 | return -ENODEV; | |
525 | ||
526 | task = cgroup_procs_write_start(buf, threadgroup); | |
527 | ret = PTR_ERR_OR_ZERO(task); | |
528 | if (ret) | |
529 | goto out_unlock; | |
530 | ||
531 | /* | |
532 | * Even if we're attaching all tasks in the thread group, we only | |
533 | * need to check permissions on one of them. | |
534 | */ | |
535 | cred = current_cred(); | |
536 | tcred = get_task_cred(task); | |
537 | if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && | |
538 | !uid_eq(cred->euid, tcred->uid) && | |
539 | !uid_eq(cred->euid, tcred->suid)) | |
540 | ret = -EACCES; | |
541 | put_cred(tcred); | |
542 | if (ret) | |
543 | goto out_finish; | |
544 | ||
545 | ret = cgroup_attach_task(cgrp, task, threadgroup); | |
546 | ||
547 | out_finish: | |
548 | cgroup_procs_write_finish(task); | |
549 | out_unlock: | |
550 | cgroup_kn_unlock(of->kn); | |
551 | ||
552 | return ret ?: nbytes; | |
553 | } | |
554 | ||
555 | static ssize_t cgroup1_procs_write(struct kernfs_open_file *of, | |
556 | char *buf, size_t nbytes, loff_t off) | |
557 | { | |
558 | return __cgroup1_procs_write(of, buf, nbytes, off, true); | |
559 | } | |
560 | ||
561 | static ssize_t cgroup1_tasks_write(struct kernfs_open_file *of, | |
562 | char *buf, size_t nbytes, loff_t off) | |
563 | { | |
564 | return __cgroup1_procs_write(of, buf, nbytes, off, false); | |
0a268dbd TH |
565 | } |
566 | ||
567 | static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of, | |
568 | char *buf, size_t nbytes, loff_t off) | |
569 | { | |
570 | struct cgroup *cgrp; | |
571 | ||
572 | BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); | |
573 | ||
574 | cgrp = cgroup_kn_lock_live(of->kn, false); | |
575 | if (!cgrp) | |
576 | return -ENODEV; | |
577 | spin_lock(&release_agent_path_lock); | |
578 | strlcpy(cgrp->root->release_agent_path, strstrip(buf), | |
579 | sizeof(cgrp->root->release_agent_path)); | |
580 | spin_unlock(&release_agent_path_lock); | |
581 | cgroup_kn_unlock(of->kn); | |
582 | return nbytes; | |
583 | } | |
584 | ||
585 | static int cgroup_release_agent_show(struct seq_file *seq, void *v) | |
586 | { | |
587 | struct cgroup *cgrp = seq_css(seq)->cgroup; | |
588 | ||
589 | spin_lock(&release_agent_path_lock); | |
590 | seq_puts(seq, cgrp->root->release_agent_path); | |
591 | spin_unlock(&release_agent_path_lock); | |
592 | seq_putc(seq, '\n'); | |
593 | return 0; | |
594 | } | |
595 | ||
596 | static int cgroup_sane_behavior_show(struct seq_file *seq, void *v) | |
597 | { | |
598 | seq_puts(seq, "0\n"); | |
599 | return 0; | |
600 | } | |
601 | ||
602 | static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css, | |
603 | struct cftype *cft) | |
604 | { | |
605 | return notify_on_release(css->cgroup); | |
606 | } | |
607 | ||
608 | static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css, | |
609 | struct cftype *cft, u64 val) | |
610 | { | |
611 | if (val) | |
612 | set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags); | |
613 | else | |
614 | clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags); | |
615 | return 0; | |
616 | } | |
617 | ||
618 | static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css, | |
619 | struct cftype *cft) | |
620 | { | |
621 | return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags); | |
622 | } | |
623 | ||
624 | static int cgroup_clone_children_write(struct cgroup_subsys_state *css, | |
625 | struct cftype *cft, u64 val) | |
626 | { | |
627 | if (val) | |
628 | set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags); | |
629 | else | |
630 | clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags); | |
631 | return 0; | |
632 | } | |
633 | ||
634 | /* cgroup core interface files for the legacy hierarchies */ | |
d62beb7f | 635 | struct cftype cgroup1_base_files[] = { |
0a268dbd TH |
636 | { |
637 | .name = "cgroup.procs", | |
638 | .seq_start = cgroup_pidlist_start, | |
639 | .seq_next = cgroup_pidlist_next, | |
640 | .seq_stop = cgroup_pidlist_stop, | |
641 | .seq_show = cgroup_pidlist_show, | |
642 | .private = CGROUP_FILE_PROCS, | |
715c809d | 643 | .write = cgroup1_procs_write, |
0a268dbd TH |
644 | }, |
645 | { | |
646 | .name = "cgroup.clone_children", | |
647 | .read_u64 = cgroup_clone_children_read, | |
648 | .write_u64 = cgroup_clone_children_write, | |
649 | }, | |
650 | { | |
651 | .name = "cgroup.sane_behavior", | |
652 | .flags = CFTYPE_ONLY_ON_ROOT, | |
653 | .seq_show = cgroup_sane_behavior_show, | |
654 | }, | |
655 | { | |
656 | .name = "tasks", | |
657 | .seq_start = cgroup_pidlist_start, | |
658 | .seq_next = cgroup_pidlist_next, | |
659 | .seq_stop = cgroup_pidlist_stop, | |
660 | .seq_show = cgroup_pidlist_show, | |
661 | .private = CGROUP_FILE_TASKS, | |
715c809d | 662 | .write = cgroup1_tasks_write, |
0a268dbd TH |
663 | }, |
664 | { | |
665 | .name = "notify_on_release", | |
666 | .read_u64 = cgroup_read_notify_on_release, | |
667 | .write_u64 = cgroup_write_notify_on_release, | |
668 | }, | |
669 | { | |
670 | .name = "release_agent", | |
671 | .flags = CFTYPE_ONLY_ON_ROOT, | |
672 | .seq_show = cgroup_release_agent_show, | |
673 | .write = cgroup_release_agent_write, | |
674 | .max_write_len = PATH_MAX - 1, | |
675 | }, | |
676 | { } /* terminate */ | |
677 | }; | |
678 | ||
679 | /* Display information about each subsystem and each hierarchy */ | |
680 | static int proc_cgroupstats_show(struct seq_file *m, void *v) | |
681 | { | |
682 | struct cgroup_subsys *ss; | |
683 | int i; | |
684 | ||
685 | seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n"); | |
686 | /* | |
687 | * ideally we don't want subsystems moving around while we do this. | |
688 | * cgroup_mutex is also necessary to guarantee an atomic snapshot of | |
689 | * subsys/hierarchy state. | |
690 | */ | |
691 | mutex_lock(&cgroup_mutex); | |
692 | ||
693 | for_each_subsys(ss, i) | |
694 | seq_printf(m, "%s\t%d\t%d\t%d\n", | |
695 | ss->legacy_name, ss->root->hierarchy_id, | |
696 | atomic_read(&ss->root->nr_cgrps), | |
697 | cgroup_ssid_enabled(i)); | |
698 | ||
699 | mutex_unlock(&cgroup_mutex); | |
700 | return 0; | |
701 | } | |
702 | ||
703 | static int cgroupstats_open(struct inode *inode, struct file *file) | |
704 | { | |
705 | return single_open(file, proc_cgroupstats_show, NULL); | |
706 | } | |
707 | ||
708 | const struct file_operations proc_cgroupstats_operations = { | |
709 | .open = cgroupstats_open, | |
710 | .read = seq_read, | |
711 | .llseek = seq_lseek, | |
712 | .release = single_release, | |
713 | }; | |
714 | ||
715 | /** | |
716 | * cgroupstats_build - build and fill cgroupstats | |
717 | * @stats: cgroupstats to fill information into | |
718 | * @dentry: A dentry entry belonging to the cgroup for which stats have | |
719 | * been requested. | |
720 | * | |
721 | * Build and fill cgroupstats so that taskstats can export it to user | |
722 | * space. | |
723 | */ | |
724 | int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) | |
725 | { | |
726 | struct kernfs_node *kn = kernfs_node_from_dentry(dentry); | |
727 | struct cgroup *cgrp; | |
728 | struct css_task_iter it; | |
729 | struct task_struct *tsk; | |
730 | ||
731 | /* it should be kernfs_node belonging to cgroupfs and is a directory */ | |
732 | if (dentry->d_sb->s_type != &cgroup_fs_type || !kn || | |
733 | kernfs_type(kn) != KERNFS_DIR) | |
734 | return -EINVAL; | |
735 | ||
736 | mutex_lock(&cgroup_mutex); | |
737 | ||
738 | /* | |
739 | * We aren't being called from kernfs and there's no guarantee on | |
740 | * @kn->priv's validity. For this and css_tryget_online_from_dir(), | |
741 | * @kn->priv is RCU safe. Let's do the RCU dancing. | |
742 | */ | |
743 | rcu_read_lock(); | |
e0aed7c7 | 744 | cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv); |
0a268dbd TH |
745 | if (!cgrp || cgroup_is_dead(cgrp)) { |
746 | rcu_read_unlock(); | |
747 | mutex_unlock(&cgroup_mutex); | |
748 | return -ENOENT; | |
749 | } | |
750 | rcu_read_unlock(); | |
751 | ||
bc2fb7ed | 752 | css_task_iter_start(&cgrp->self, 0, &it); |
0a268dbd TH |
753 | while ((tsk = css_task_iter_next(&it))) { |
754 | switch (tsk->state) { | |
755 | case TASK_RUNNING: | |
756 | stats->nr_running++; | |
757 | break; | |
758 | case TASK_INTERRUPTIBLE: | |
759 | stats->nr_sleeping++; | |
760 | break; | |
761 | case TASK_UNINTERRUPTIBLE: | |
762 | stats->nr_uninterruptible++; | |
763 | break; | |
764 | case TASK_STOPPED: | |
765 | stats->nr_stopped++; | |
766 | break; | |
767 | default: | |
768 | if (delayacct_is_task_waiting_on_io(tsk)) | |
769 | stats->nr_io_wait++; | |
770 | break; | |
771 | } | |
772 | } | |
773 | css_task_iter_end(&it); | |
774 | ||
775 | mutex_unlock(&cgroup_mutex); | |
776 | return 0; | |
777 | } | |
778 | ||
d62beb7f | 779 | void cgroup1_check_for_release(struct cgroup *cgrp) |
0a268dbd TH |
780 | { |
781 | if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) && | |
782 | !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp)) | |
783 | schedule_work(&cgrp->release_agent_work); | |
784 | } | |
785 | ||
786 | /* | |
787 | * Notify userspace when a cgroup is released, by running the | |
788 | * configured release agent with the name of the cgroup (path | |
789 | * relative to the root of cgroup file system) as the argument. | |
790 | * | |
791 | * Most likely, this user command will try to rmdir this cgroup. | |
792 | * | |
793 | * This races with the possibility that some other task will be | |
794 | * attached to this cgroup before it is removed, or that some other | |
795 | * user task will 'mkdir' a child cgroup of this cgroup. That's ok. | |
796 | * The presumed 'rmdir' will fail quietly if this cgroup is no longer | |
797 | * unused, and this cgroup will be reprieved from its death sentence, | |
798 | * to continue to serve a useful existence. Next time it's released, | |
799 | * we will get notified again, if it still has 'notify_on_release' set. | |
800 | * | |
801 | * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which | |
802 | * means only wait until the task is successfully execve()'d. The | |
803 | * separate release agent task is forked by call_usermodehelper(), | |
804 | * then control in this thread returns here, without waiting for the | |
805 | * release agent task. We don't bother to wait because the caller of | |
806 | * this routine has no use for the exit status of the release agent | |
807 | * task, so no sense holding our caller up for that. | |
808 | */ | |
d62beb7f | 809 | void cgroup1_release_agent(struct work_struct *work) |
0a268dbd TH |
810 | { |
811 | struct cgroup *cgrp = | |
812 | container_of(work, struct cgroup, release_agent_work); | |
813 | char *pathbuf = NULL, *agentbuf = NULL; | |
814 | char *argv[3], *envp[3]; | |
815 | int ret; | |
816 | ||
817 | mutex_lock(&cgroup_mutex); | |
818 | ||
819 | pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); | |
820 | agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL); | |
821 | if (!pathbuf || !agentbuf) | |
822 | goto out; | |
823 | ||
824 | spin_lock_irq(&css_set_lock); | |
825 | ret = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns); | |
826 | spin_unlock_irq(&css_set_lock); | |
827 | if (ret < 0 || ret >= PATH_MAX) | |
828 | goto out; | |
829 | ||
830 | argv[0] = agentbuf; | |
831 | argv[1] = pathbuf; | |
832 | argv[2] = NULL; | |
833 | ||
834 | /* minimal command environment */ | |
835 | envp[0] = "HOME=/"; | |
836 | envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; | |
837 | envp[2] = NULL; | |
838 | ||
839 | mutex_unlock(&cgroup_mutex); | |
840 | call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); | |
841 | goto out_free; | |
842 | out: | |
843 | mutex_unlock(&cgroup_mutex); | |
844 | out_free: | |
845 | kfree(agentbuf); | |
846 | kfree(pathbuf); | |
847 | } | |
848 | ||
849 | /* | |
850 | * cgroup_rename - Only allow simple rename of directories in place. | |
851 | */ | |
1592c9b2 TH |
852 | static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent, |
853 | const char *new_name_str) | |
0a268dbd TH |
854 | { |
855 | struct cgroup *cgrp = kn->priv; | |
856 | int ret; | |
857 | ||
858 | if (kernfs_type(kn) != KERNFS_DIR) | |
859 | return -ENOTDIR; | |
860 | if (kn->parent != new_parent) | |
861 | return -EIO; | |
862 | ||
0a268dbd TH |
863 | /* |
864 | * We're gonna grab cgroup_mutex which nests outside kernfs | |
865 | * active_ref. kernfs_rename() doesn't require active_ref | |
866 | * protection. Break them before grabbing cgroup_mutex. | |
867 | */ | |
868 | kernfs_break_active_protection(new_parent); | |
869 | kernfs_break_active_protection(kn); | |
870 | ||
871 | mutex_lock(&cgroup_mutex); | |
872 | ||
873 | ret = kernfs_rename(kn, new_parent, new_name_str); | |
874 | if (!ret) | |
875 | trace_cgroup_rename(cgrp); | |
876 | ||
877 | mutex_unlock(&cgroup_mutex); | |
878 | ||
879 | kernfs_unbreak_active_protection(kn); | |
880 | kernfs_unbreak_active_protection(new_parent); | |
881 | return ret; | |
882 | } | |
883 | ||
1592c9b2 TH |
884 | static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root) |
885 | { | |
886 | struct cgroup_root *root = cgroup_root_from_kf(kf_root); | |
887 | struct cgroup_subsys *ss; | |
888 | int ssid; | |
889 | ||
890 | for_each_subsys(ss, ssid) | |
891 | if (root->subsys_mask & (1 << ssid)) | |
892 | seq_show_option(seq, ss->legacy_name, NULL); | |
893 | if (root->flags & CGRP_ROOT_NOPREFIX) | |
894 | seq_puts(seq, ",noprefix"); | |
895 | if (root->flags & CGRP_ROOT_XATTR) | |
896 | seq_puts(seq, ",xattr"); | |
897 | ||
898 | spin_lock(&release_agent_path_lock); | |
899 | if (strlen(root->release_agent_path)) | |
900 | seq_show_option(seq, "release_agent", | |
901 | root->release_agent_path); | |
902 | spin_unlock(&release_agent_path_lock); | |
903 | ||
904 | if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags)) | |
905 | seq_puts(seq, ",clone_children"); | |
906 | if (strlen(root->name)) | |
907 | seq_show_option(seq, "name", root->name); | |
908 | return 0; | |
909 | } | |
910 | ||
911 | static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) | |
912 | { | |
913 | char *token, *o = data; | |
914 | bool all_ss = false, one_ss = false; | |
915 | u16 mask = U16_MAX; | |
916 | struct cgroup_subsys *ss; | |
917 | int nr_opts = 0; | |
918 | int i; | |
919 | ||
920 | #ifdef CONFIG_CPUSETS | |
921 | mask = ~((u16)1 << cpuset_cgrp_id); | |
922 | #endif | |
923 | ||
924 | memset(opts, 0, sizeof(*opts)); | |
925 | ||
926 | while ((token = strsep(&o, ",")) != NULL) { | |
927 | nr_opts++; | |
928 | ||
929 | if (!*token) | |
930 | return -EINVAL; | |
931 | if (!strcmp(token, "none")) { | |
932 | /* Explicitly have no subsystems */ | |
933 | opts->none = true; | |
934 | continue; | |
935 | } | |
936 | if (!strcmp(token, "all")) { | |
937 | /* Mutually exclusive option 'all' + subsystem name */ | |
938 | if (one_ss) | |
939 | return -EINVAL; | |
940 | all_ss = true; | |
941 | continue; | |
942 | } | |
943 | if (!strcmp(token, "noprefix")) { | |
944 | opts->flags |= CGRP_ROOT_NOPREFIX; | |
945 | continue; | |
946 | } | |
947 | if (!strcmp(token, "clone_children")) { | |
948 | opts->cpuset_clone_children = true; | |
949 | continue; | |
950 | } | |
951 | if (!strcmp(token, "xattr")) { | |
952 | opts->flags |= CGRP_ROOT_XATTR; | |
953 | continue; | |
954 | } | |
955 | if (!strncmp(token, "release_agent=", 14)) { | |
956 | /* Specifying two release agents is forbidden */ | |
957 | if (opts->release_agent) | |
958 | return -EINVAL; | |
959 | opts->release_agent = | |
960 | kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL); | |
961 | if (!opts->release_agent) | |
962 | return -ENOMEM; | |
963 | continue; | |
964 | } | |
965 | if (!strncmp(token, "name=", 5)) { | |
966 | const char *name = token + 5; | |
967 | /* Can't specify an empty name */ | |
968 | if (!strlen(name)) | |
969 | return -EINVAL; | |
970 | /* Must match [\w.-]+ */ | |
971 | for (i = 0; i < strlen(name); i++) { | |
972 | char c = name[i]; | |
973 | if (isalnum(c)) | |
974 | continue; | |
975 | if ((c == '.') || (c == '-') || (c == '_')) | |
976 | continue; | |
977 | return -EINVAL; | |
978 | } | |
979 | /* Specifying two names is forbidden */ | |
980 | if (opts->name) | |
981 | return -EINVAL; | |
982 | opts->name = kstrndup(name, | |
983 | MAX_CGROUP_ROOT_NAMELEN - 1, | |
984 | GFP_KERNEL); | |
985 | if (!opts->name) | |
986 | return -ENOMEM; | |
987 | ||
988 | continue; | |
989 | } | |
990 | ||
991 | for_each_subsys(ss, i) { | |
992 | if (strcmp(token, ss->legacy_name)) | |
993 | continue; | |
994 | if (!cgroup_ssid_enabled(i)) | |
995 | continue; | |
d62beb7f | 996 | if (cgroup1_ssid_disabled(i)) |
1592c9b2 TH |
997 | continue; |
998 | ||
999 | /* Mutually exclusive option 'all' + subsystem name */ | |
1000 | if (all_ss) | |
1001 | return -EINVAL; | |
1002 | opts->subsys_mask |= (1 << i); | |
1003 | one_ss = true; | |
1004 | ||
1005 | break; | |
1006 | } | |
1007 | if (i == CGROUP_SUBSYS_COUNT) | |
1008 | return -ENOENT; | |
1009 | } | |
1010 | ||
1011 | /* | |
1012 | * If the 'all' option was specified select all the subsystems, | |
1013 | * otherwise if 'none', 'name=' and a subsystem name options were | |
1014 | * not specified, let's default to 'all' | |
1015 | */ | |
1016 | if (all_ss || (!one_ss && !opts->none && !opts->name)) | |
1017 | for_each_subsys(ss, i) | |
d62beb7f | 1018 | if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i)) |
1592c9b2 TH |
1019 | opts->subsys_mask |= (1 << i); |
1020 | ||
1021 | /* | |
1022 | * We either have to specify by name or by subsystems. (So all | |
1023 | * empty hierarchies must have a name). | |
1024 | */ | |
1025 | if (!opts->subsys_mask && !opts->name) | |
1026 | return -EINVAL; | |
1027 | ||
1028 | /* | |
1029 | * Option noprefix was introduced just for backward compatibility | |
1030 | * with the old cpuset, so we allow noprefix only if mounting just | |
1031 | * the cpuset subsystem. | |
1032 | */ | |
1033 | if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask)) | |
1034 | return -EINVAL; | |
1035 | ||
1036 | /* Can't specify "none" and some subsystems */ | |
1037 | if (opts->subsys_mask && opts->none) | |
1038 | return -EINVAL; | |
1039 | ||
1040 | return 0; | |
1041 | } | |
1042 | ||
1043 | static int cgroup1_remount(struct kernfs_root *kf_root, int *flags, char *data) | |
1044 | { | |
1045 | int ret = 0; | |
1046 | struct cgroup_root *root = cgroup_root_from_kf(kf_root); | |
1047 | struct cgroup_sb_opts opts; | |
1048 | u16 added_mask, removed_mask; | |
1049 | ||
1050 | cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp); | |
1051 | ||
1052 | /* See what subsystems are wanted */ | |
1053 | ret = parse_cgroupfs_options(data, &opts); | |
1054 | if (ret) | |
1055 | goto out_unlock; | |
1056 | ||
1057 | if (opts.subsys_mask != root->subsys_mask || opts.release_agent) | |
1058 | pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n", | |
1059 | task_tgid_nr(current), current->comm); | |
1060 | ||
1061 | added_mask = opts.subsys_mask & ~root->subsys_mask; | |
1062 | removed_mask = root->subsys_mask & ~opts.subsys_mask; | |
1063 | ||
1064 | /* Don't allow flags or name to change at remount */ | |
1065 | if ((opts.flags ^ root->flags) || | |
1066 | (opts.name && strcmp(opts.name, root->name))) { | |
1067 | pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n", | |
1068 | opts.flags, opts.name ?: "", root->flags, root->name); | |
1069 | ret = -EINVAL; | |
1070 | goto out_unlock; | |
1071 | } | |
1072 | ||
1073 | /* remounting is not allowed for populated hierarchies */ | |
1074 | if (!list_empty(&root->cgrp.self.children)) { | |
1075 | ret = -EBUSY; | |
1076 | goto out_unlock; | |
1077 | } | |
1078 | ||
1079 | ret = rebind_subsystems(root, added_mask); | |
1080 | if (ret) | |
1081 | goto out_unlock; | |
1082 | ||
1083 | WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask)); | |
1084 | ||
1085 | if (opts.release_agent) { | |
1086 | spin_lock(&release_agent_path_lock); | |
1087 | strcpy(root->release_agent_path, opts.release_agent); | |
1088 | spin_unlock(&release_agent_path_lock); | |
1089 | } | |
1090 | ||
1091 | trace_cgroup_remount(root); | |
1092 | ||
1093 | out_unlock: | |
1094 | kfree(opts.release_agent); | |
1095 | kfree(opts.name); | |
1096 | mutex_unlock(&cgroup_mutex); | |
1097 | return ret; | |
1098 | } | |
1099 | ||
1100 | struct kernfs_syscall_ops cgroup1_kf_syscall_ops = { | |
1101 | .rename = cgroup1_rename, | |
1102 | .show_options = cgroup1_show_options, | |
1103 | .remount_fs = cgroup1_remount, | |
1104 | .mkdir = cgroup_mkdir, | |
1105 | .rmdir = cgroup_rmdir, | |
1106 | .show_path = cgroup_show_path, | |
1107 | }; | |
1108 | ||
1109 | struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags, | |
1110 | void *data, unsigned long magic, | |
1111 | struct cgroup_namespace *ns) | |
1112 | { | |
1113 | struct super_block *pinned_sb = NULL; | |
1114 | struct cgroup_sb_opts opts; | |
1115 | struct cgroup_root *root; | |
1116 | struct cgroup_subsys *ss; | |
1117 | struct dentry *dentry; | |
1118 | int i, ret; | |
9732adc5 | 1119 | bool new_root = false; |
1592c9b2 TH |
1120 | |
1121 | cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp); | |
1122 | ||
1123 | /* First find the desired set of subsystems */ | |
1124 | ret = parse_cgroupfs_options(data, &opts); | |
1125 | if (ret) | |
1126 | goto out_unlock; | |
1127 | ||
1128 | /* | |
1129 | * Destruction of cgroup root is asynchronous, so subsystems may | |
1130 | * still be dying after the previous unmount. Let's drain the | |
1131 | * dying subsystems. We just need to ensure that the ones | |
1132 | * unmounted previously finish dying and don't care about new ones | |
1133 | * starting. Testing ref liveliness is good enough. | |
1134 | */ | |
1135 | for_each_subsys(ss, i) { | |
1136 | if (!(opts.subsys_mask & (1 << i)) || | |
1137 | ss->root == &cgrp_dfl_root) | |
1138 | continue; | |
1139 | ||
1140 | if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) { | |
1141 | mutex_unlock(&cgroup_mutex); | |
1142 | msleep(10); | |
1143 | ret = restart_syscall(); | |
1144 | goto out_free; | |
1145 | } | |
1146 | cgroup_put(&ss->root->cgrp); | |
1147 | } | |
1148 | ||
1149 | for_each_root(root) { | |
1150 | bool name_match = false; | |
1151 | ||
1152 | if (root == &cgrp_dfl_root) | |
1153 | continue; | |
1154 | ||
1155 | /* | |
1156 | * If we asked for a name then it must match. Also, if | |
1157 | * name matches but sybsys_mask doesn't, we should fail. | |
1158 | * Remember whether name matched. | |
1159 | */ | |
1160 | if (opts.name) { | |
1161 | if (strcmp(opts.name, root->name)) | |
1162 | continue; | |
1163 | name_match = true; | |
1164 | } | |
1165 | ||
1166 | /* | |
1167 | * If we asked for subsystems (or explicitly for no | |
1168 | * subsystems) then they must match. | |
1169 | */ | |
1170 | if ((opts.subsys_mask || opts.none) && | |
1171 | (opts.subsys_mask != root->subsys_mask)) { | |
1172 | if (!name_match) | |
1173 | continue; | |
1174 | ret = -EBUSY; | |
1175 | goto out_unlock; | |
1176 | } | |
1177 | ||
1178 | if (root->flags ^ opts.flags) | |
1179 | pr_warn("new mount options do not match the existing superblock, will be ignored\n"); | |
1180 | ||
1181 | /* | |
1182 | * We want to reuse @root whose lifetime is governed by its | |
1183 | * ->cgrp. Let's check whether @root is alive and keep it | |
1184 | * that way. As cgroup_kill_sb() can happen anytime, we | |
1185 | * want to block it by pinning the sb so that @root doesn't | |
1186 | * get killed before mount is complete. | |
1187 | * | |
1188 | * With the sb pinned, tryget_live can reliably indicate | |
1189 | * whether @root can be reused. If it's being killed, | |
1190 | * drain it. We can use wait_queue for the wait but this | |
1191 | * path is super cold. Let's just sleep a bit and retry. | |
1192 | */ | |
1193 | pinned_sb = kernfs_pin_sb(root->kf_root, NULL); | |
330c4186 | 1194 | if (IS_ERR(pinned_sb) || |
1592c9b2 TH |
1195 | !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) { |
1196 | mutex_unlock(&cgroup_mutex); | |
1197 | if (!IS_ERR_OR_NULL(pinned_sb)) | |
1198 | deactivate_super(pinned_sb); | |
1199 | msleep(10); | |
1200 | ret = restart_syscall(); | |
1201 | goto out_free; | |
1202 | } | |
1203 | ||
1204 | ret = 0; | |
1205 | goto out_unlock; | |
1206 | } | |
1207 | ||
1208 | /* | |
1209 | * No such thing, create a new one. name= matching without subsys | |
1210 | * specification is allowed for already existing hierarchies but we | |
1211 | * can't create new one without subsys specification. | |
1212 | */ | |
1213 | if (!opts.subsys_mask && !opts.none) { | |
1214 | ret = -EINVAL; | |
1215 | goto out_unlock; | |
1216 | } | |
1217 | ||
1218 | /* Hierarchies may only be created in the initial cgroup namespace. */ | |
1219 | if (ns != &init_cgroup_ns) { | |
1220 | ret = -EPERM; | |
1221 | goto out_unlock; | |
1222 | } | |
1223 | ||
1224 | root = kzalloc(sizeof(*root), GFP_KERNEL); | |
1225 | if (!root) { | |
1226 | ret = -ENOMEM; | |
1227 | goto out_unlock; | |
1228 | } | |
9732adc5 | 1229 | new_root = true; |
1592c9b2 TH |
1230 | |
1231 | init_cgroup_root(root, &opts); | |
1232 | ||
9732adc5 | 1233 | ret = cgroup_setup_root(root, opts.subsys_mask, PERCPU_REF_INIT_DEAD); |
1592c9b2 TH |
1234 | if (ret) |
1235 | cgroup_free_root(root); | |
1236 | ||
1237 | out_unlock: | |
1238 | mutex_unlock(&cgroup_mutex); | |
1239 | out_free: | |
1240 | kfree(opts.release_agent); | |
1241 | kfree(opts.name); | |
1242 | ||
1243 | if (ret) | |
1244 | return ERR_PTR(ret); | |
1245 | ||
1246 | dentry = cgroup_do_mount(&cgroup_fs_type, flags, root, | |
1247 | CGROUP_SUPER_MAGIC, ns); | |
1248 | ||
9732adc5 ZL |
1249 | /* |
1250 | * There's a race window after we release cgroup_mutex and before | |
1251 | * allocating a superblock. Make sure a concurrent process won't | |
1252 | * be able to re-use the root during this window by delaying the | |
1253 | * initialization of root refcnt. | |
1254 | */ | |
1255 | if (new_root) { | |
1256 | mutex_lock(&cgroup_mutex); | |
1257 | percpu_ref_reinit(&root->cgrp.self.refcnt); | |
1258 | mutex_unlock(&cgroup_mutex); | |
1259 | } | |
1260 | ||
1592c9b2 TH |
1261 | /* |
1262 | * If @pinned_sb, we're reusing an existing root and holding an | |
1263 | * extra ref on its sb. Mount is complete. Put the extra ref. | |
1264 | */ | |
1265 | if (pinned_sb) | |
1266 | deactivate_super(pinned_sb); | |
1267 | ||
1268 | return dentry; | |
1269 | } | |
1270 | ||
0a268dbd TH |
1271 | static int __init cgroup1_wq_init(void) |
1272 | { | |
1273 | /* | |
1274 | * Used to destroy pidlists and separate to serve as flush domain. | |
1275 | * Cap @max_active to 1 too. | |
1276 | */ | |
1277 | cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy", | |
1278 | 0, 1); | |
1279 | BUG_ON(!cgroup_pidlist_destroy_wq); | |
1280 | return 0; | |
1281 | } | |
1282 | core_initcall(cgroup1_wq_init); | |
1283 | ||
1284 | static int __init cgroup_no_v1(char *str) | |
1285 | { | |
1286 | struct cgroup_subsys *ss; | |
1287 | char *token; | |
1288 | int i; | |
1289 | ||
1290 | while ((token = strsep(&str, ",")) != NULL) { | |
1291 | if (!*token) | |
1292 | continue; | |
1293 | ||
1294 | if (!strcmp(token, "all")) { | |
1295 | cgroup_no_v1_mask = U16_MAX; | |
1296 | break; | |
1297 | } | |
1298 | ||
1299 | for_each_subsys(ss, i) { | |
1300 | if (strcmp(token, ss->name) && | |
1301 | strcmp(token, ss->legacy_name)) | |
1302 | continue; | |
1303 | ||
1304 | cgroup_no_v1_mask |= 1 << i; | |
1305 | } | |
1306 | } | |
1307 | return 1; | |
1308 | } | |
1309 | __setup("cgroup_no_v1=", cgroup_no_v1); |