]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/cgroup/cgroup-v1.c
tracing: Prevent PROFILE_ALL_BRANCHES when FORTIFY_SOURCE=y
[mirror_ubuntu-bionic-kernel.git] / kernel / cgroup / cgroup-v1.c
1 #include "cgroup-internal.h"
2
3 #include <linux/ctype.h>
4 #include <linux/kmod.h>
5 #include <linux/sort.h>
6 #include <linux/delay.h>
7 #include <linux/mm.h>
8 #include <linux/sched/signal.h>
9 #include <linux/sched/task.h>
10 #include <linux/magic.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 #include <linux/delayacct.h>
14 #include <linux/pid_namespace.h>
15 #include <linux/cgroupstats.h>
16
17 #include <trace/events/cgroup.h>
18
19 /*
20 * pidlists linger the following amount before being destroyed. The goal
21 * is avoiding frequent destruction in the middle of consecutive read calls
22 * Expiring in the middle is a performance problem not a correctness one.
23 * 1 sec should be enough.
24 */
25 #define CGROUP_PIDLIST_DESTROY_DELAY HZ
26
27 /* Controllers blocked by the commandline in v1 */
28 static u16 cgroup_no_v1_mask;
29
30 /*
31 * pidlist destructions need to be flushed on cgroup destruction. Use a
32 * separate workqueue as flush domain.
33 */
34 static struct workqueue_struct *cgroup_pidlist_destroy_wq;
35
36 /*
37 * Protects cgroup_subsys->release_agent_path. Modifying it also requires
38 * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock.
39 */
40 static DEFINE_SPINLOCK(release_agent_path_lock);
41
42 bool cgroup1_ssid_disabled(int ssid)
43 {
44 return cgroup_no_v1_mask & (1 << ssid);
45 }
46
47 /**
48 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
49 * @from: attach to all cgroups of a given task
50 * @tsk: the task to be attached
51 */
52 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
53 {
54 struct cgroup_root *root;
55 int retval = 0;
56
57 mutex_lock(&cgroup_mutex);
58 percpu_down_write(&cgroup_threadgroup_rwsem);
59 for_each_root(root) {
60 struct cgroup *from_cgrp;
61
62 if (root == &cgrp_dfl_root)
63 continue;
64
65 spin_lock_irq(&css_set_lock);
66 from_cgrp = task_cgroup_from_root(from, root);
67 spin_unlock_irq(&css_set_lock);
68
69 retval = cgroup_attach_task(from_cgrp, tsk, false);
70 if (retval)
71 break;
72 }
73 percpu_up_write(&cgroup_threadgroup_rwsem);
74 mutex_unlock(&cgroup_mutex);
75
76 return retval;
77 }
78 EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
79
80 /**
81 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
82 * @to: cgroup to which the tasks will be moved
83 * @from: cgroup in which the tasks currently reside
84 *
85 * Locking rules between cgroup_post_fork() and the migration path
86 * guarantee that, if a task is forking while being migrated, the new child
87 * is guaranteed to be either visible in the source cgroup after the
88 * parent's migration is complete or put into the target cgroup. No task
89 * can slip out of migration through forking.
90 */
91 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
92 {
93 DEFINE_CGROUP_MGCTX(mgctx);
94 struct cgrp_cset_link *link;
95 struct css_task_iter it;
96 struct task_struct *task;
97 int ret;
98
99 if (cgroup_on_dfl(to))
100 return -EINVAL;
101
102 ret = cgroup_migrate_vet_dst(to);
103 if (ret)
104 return ret;
105
106 mutex_lock(&cgroup_mutex);
107
108 percpu_down_write(&cgroup_threadgroup_rwsem);
109
110 /* all tasks in @from are being moved, all csets are source */
111 spin_lock_irq(&css_set_lock);
112 list_for_each_entry(link, &from->cset_links, cset_link)
113 cgroup_migrate_add_src(link->cset, to, &mgctx);
114 spin_unlock_irq(&css_set_lock);
115
116 ret = cgroup_migrate_prepare_dst(&mgctx);
117 if (ret)
118 goto out_err;
119
120 /*
121 * Migrate tasks one-by-one until @from is empty. This fails iff
122 * ->can_attach() fails.
123 */
124 do {
125 css_task_iter_start(&from->self, 0, &it);
126 task = css_task_iter_next(&it);
127 if (task)
128 get_task_struct(task);
129 css_task_iter_end(&it);
130
131 if (task) {
132 ret = cgroup_migrate(task, false, &mgctx);
133 if (!ret)
134 trace_cgroup_transfer_tasks(to, task, false);
135 put_task_struct(task);
136 }
137 } while (task && !ret);
138 out_err:
139 cgroup_migrate_finish(&mgctx);
140 percpu_up_write(&cgroup_threadgroup_rwsem);
141 mutex_unlock(&cgroup_mutex);
142 return ret;
143 }
144
145 /*
146 * Stuff for reading the 'tasks'/'procs' files.
147 *
148 * Reading this file can return large amounts of data if a cgroup has
149 * *lots* of attached tasks. So it may need several calls to read(),
150 * but we cannot guarantee that the information we produce is correct
151 * unless we produce it entirely atomically.
152 *
153 */
154
155 /* which pidlist file are we talking about? */
156 enum cgroup_filetype {
157 CGROUP_FILE_PROCS,
158 CGROUP_FILE_TASKS,
159 };
160
161 /*
162 * A pidlist is a list of pids that virtually represents the contents of one
163 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
164 * a pair (one each for procs, tasks) for each pid namespace that's relevant
165 * to the cgroup.
166 */
167 struct cgroup_pidlist {
168 /*
169 * used to find which pidlist is wanted. doesn't change as long as
170 * this particular list stays in the list.
171 */
172 struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
173 /* array of xids */
174 pid_t *list;
175 /* how many elements the above list has */
176 int length;
177 /* each of these stored in a list by its cgroup */
178 struct list_head links;
179 /* pointer to the cgroup we belong to, for list removal purposes */
180 struct cgroup *owner;
181 /* for delayed destruction */
182 struct delayed_work destroy_dwork;
183 };
184
185 /*
186 * The following two functions "fix" the issue where there are more pids
187 * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
188 * TODO: replace with a kernel-wide solution to this problem
189 */
190 #define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
191 static void *pidlist_allocate(int count)
192 {
193 if (PIDLIST_TOO_LARGE(count))
194 return vmalloc(count * sizeof(pid_t));
195 else
196 return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
197 }
198
199 static void pidlist_free(void *p)
200 {
201 kvfree(p);
202 }
203
204 /*
205 * Used to destroy all pidlists lingering waiting for destroy timer. None
206 * should be left afterwards.
207 */
208 void cgroup1_pidlist_destroy_all(struct cgroup *cgrp)
209 {
210 struct cgroup_pidlist *l, *tmp_l;
211
212 mutex_lock(&cgrp->pidlist_mutex);
213 list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
214 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
215 mutex_unlock(&cgrp->pidlist_mutex);
216
217 flush_workqueue(cgroup_pidlist_destroy_wq);
218 BUG_ON(!list_empty(&cgrp->pidlists));
219 }
220
221 static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
222 {
223 struct delayed_work *dwork = to_delayed_work(work);
224 struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
225 destroy_dwork);
226 struct cgroup_pidlist *tofree = NULL;
227
228 mutex_lock(&l->owner->pidlist_mutex);
229
230 /*
231 * Destroy iff we didn't get queued again. The state won't change
232 * as destroy_dwork can only be queued while locked.
233 */
234 if (!delayed_work_pending(dwork)) {
235 list_del(&l->links);
236 pidlist_free(l->list);
237 put_pid_ns(l->key.ns);
238 tofree = l;
239 }
240
241 mutex_unlock(&l->owner->pidlist_mutex);
242 kfree(tofree);
243 }
244
245 /*
246 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
247 * Returns the number of unique elements.
248 */
249 static int pidlist_uniq(pid_t *list, int length)
250 {
251 int src, dest = 1;
252
253 /*
254 * we presume the 0th element is unique, so i starts at 1. trivial
255 * edge cases first; no work needs to be done for either
256 */
257 if (length == 0 || length == 1)
258 return length;
259 /* src and dest walk down the list; dest counts unique elements */
260 for (src = 1; src < length; src++) {
261 /* find next unique element */
262 while (list[src] == list[src-1]) {
263 src++;
264 if (src == length)
265 goto after;
266 }
267 /* dest always points to where the next unique element goes */
268 list[dest] = list[src];
269 dest++;
270 }
271 after:
272 return dest;
273 }
274
275 /*
276 * The two pid files - task and cgroup.procs - guaranteed that the result
277 * is sorted, which forced this whole pidlist fiasco. As pid order is
278 * different per namespace, each namespace needs differently sorted list,
279 * making it impossible to use, for example, single rbtree of member tasks
280 * sorted by task pointer. As pidlists can be fairly large, allocating one
281 * per open file is dangerous, so cgroup had to implement shared pool of
282 * pidlists keyed by cgroup and namespace.
283 */
284 static int cmppid(const void *a, const void *b)
285 {
286 return *(pid_t *)a - *(pid_t *)b;
287 }
288
289 static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
290 enum cgroup_filetype type)
291 {
292 struct cgroup_pidlist *l;
293 /* don't need task_nsproxy() if we're looking at ourself */
294 struct pid_namespace *ns = task_active_pid_ns(current);
295
296 lockdep_assert_held(&cgrp->pidlist_mutex);
297
298 list_for_each_entry(l, &cgrp->pidlists, links)
299 if (l->key.type == type && l->key.ns == ns)
300 return l;
301 return NULL;
302 }
303
304 /*
305 * find the appropriate pidlist for our purpose (given procs vs tasks)
306 * returns with the lock on that pidlist already held, and takes care
307 * of the use count, or returns NULL with no locks held if we're out of
308 * memory.
309 */
310 static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
311 enum cgroup_filetype type)
312 {
313 struct cgroup_pidlist *l;
314
315 lockdep_assert_held(&cgrp->pidlist_mutex);
316
317 l = cgroup_pidlist_find(cgrp, type);
318 if (l)
319 return l;
320
321 /* entry not found; create a new one */
322 l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
323 if (!l)
324 return l;
325
326 INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
327 l->key.type = type;
328 /* don't need task_nsproxy() if we're looking at ourself */
329 l->key.ns = get_pid_ns(task_active_pid_ns(current));
330 l->owner = cgrp;
331 list_add(&l->links, &cgrp->pidlists);
332 return l;
333 }
334
335 /**
336 * cgroup_task_count - count the number of tasks in a cgroup.
337 * @cgrp: the cgroup in question
338 */
339 int cgroup_task_count(const struct cgroup *cgrp)
340 {
341 int count = 0;
342 struct cgrp_cset_link *link;
343
344 spin_lock_irq(&css_set_lock);
345 list_for_each_entry(link, &cgrp->cset_links, cset_link)
346 count += link->cset->nr_tasks;
347 spin_unlock_irq(&css_set_lock);
348 return count;
349 }
350
351 /*
352 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
353 */
354 static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
355 struct cgroup_pidlist **lp)
356 {
357 pid_t *array;
358 int length;
359 int pid, n = 0; /* used for populating the array */
360 struct css_task_iter it;
361 struct task_struct *tsk;
362 struct cgroup_pidlist *l;
363
364 lockdep_assert_held(&cgrp->pidlist_mutex);
365
366 /*
367 * If cgroup gets more users after we read count, we won't have
368 * enough space - tough. This race is indistinguishable to the
369 * caller from the case that the additional cgroup users didn't
370 * show up until sometime later on.
371 */
372 length = cgroup_task_count(cgrp);
373 array = pidlist_allocate(length);
374 if (!array)
375 return -ENOMEM;
376 /* now, populate the array */
377 css_task_iter_start(&cgrp->self, 0, &it);
378 while ((tsk = css_task_iter_next(&it))) {
379 if (unlikely(n == length))
380 break;
381 /* get tgid or pid for procs or tasks file respectively */
382 if (type == CGROUP_FILE_PROCS)
383 pid = task_tgid_vnr(tsk);
384 else
385 pid = task_pid_vnr(tsk);
386 if (pid > 0) /* make sure to only use valid results */
387 array[n++] = pid;
388 }
389 css_task_iter_end(&it);
390 length = n;
391 /* now sort & (if procs) strip out duplicates */
392 sort(array, length, sizeof(pid_t), cmppid, NULL);
393 if (type == CGROUP_FILE_PROCS)
394 length = pidlist_uniq(array, length);
395
396 l = cgroup_pidlist_find_create(cgrp, type);
397 if (!l) {
398 pidlist_free(array);
399 return -ENOMEM;
400 }
401
402 /* store array, freeing old if necessary */
403 pidlist_free(l->list);
404 l->list = array;
405 l->length = length;
406 *lp = l;
407 return 0;
408 }
409
410 /*
411 * seq_file methods for the tasks/procs files. The seq_file position is the
412 * next pid to display; the seq_file iterator is a pointer to the pid
413 * in the cgroup->l->list array.
414 */
415
416 static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
417 {
418 /*
419 * Initially we receive a position value that corresponds to
420 * one more than the last pid shown (or 0 on the first call or
421 * after a seek to the start). Use a binary-search to find the
422 * next pid to display, if any
423 */
424 struct kernfs_open_file *of = s->private;
425 struct cgroup *cgrp = seq_css(s)->cgroup;
426 struct cgroup_pidlist *l;
427 enum cgroup_filetype type = seq_cft(s)->private;
428 int index = 0, pid = *pos;
429 int *iter, ret;
430
431 mutex_lock(&cgrp->pidlist_mutex);
432
433 /*
434 * !NULL @of->priv indicates that this isn't the first start()
435 * after open. If the matching pidlist is around, we can use that.
436 * Look for it. Note that @of->priv can't be used directly. It
437 * could already have been destroyed.
438 */
439 if (of->priv)
440 of->priv = cgroup_pidlist_find(cgrp, type);
441
442 /*
443 * Either this is the first start() after open or the matching
444 * pidlist has been destroyed inbetween. Create a new one.
445 */
446 if (!of->priv) {
447 ret = pidlist_array_load(cgrp, type,
448 (struct cgroup_pidlist **)&of->priv);
449 if (ret)
450 return ERR_PTR(ret);
451 }
452 l = of->priv;
453
454 if (pid) {
455 int end = l->length;
456
457 while (index < end) {
458 int mid = (index + end) / 2;
459 if (l->list[mid] == pid) {
460 index = mid;
461 break;
462 } else if (l->list[mid] <= pid)
463 index = mid + 1;
464 else
465 end = mid;
466 }
467 }
468 /* If we're off the end of the array, we're done */
469 if (index >= l->length)
470 return NULL;
471 /* Update the abstract position to be the actual pid that we found */
472 iter = l->list + index;
473 *pos = *iter;
474 return iter;
475 }
476
477 static void cgroup_pidlist_stop(struct seq_file *s, void *v)
478 {
479 struct kernfs_open_file *of = s->private;
480 struct cgroup_pidlist *l = of->priv;
481
482 if (l)
483 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
484 CGROUP_PIDLIST_DESTROY_DELAY);
485 mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
486 }
487
488 static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
489 {
490 struct kernfs_open_file *of = s->private;
491 struct cgroup_pidlist *l = of->priv;
492 pid_t *p = v;
493 pid_t *end = l->list + l->length;
494 /*
495 * Advance to the next pid in the array. If this goes off the
496 * end, we're done
497 */
498 p++;
499 if (p >= end) {
500 return NULL;
501 } else {
502 *pos = *p;
503 return p;
504 }
505 }
506
507 static int cgroup_pidlist_show(struct seq_file *s, void *v)
508 {
509 seq_printf(s, "%d\n", *(int *)v);
510
511 return 0;
512 }
513
514 static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
515 char *buf, size_t nbytes, loff_t off,
516 bool threadgroup)
517 {
518 struct cgroup *cgrp;
519 struct task_struct *task;
520 const struct cred *cred, *tcred;
521 ssize_t ret;
522
523 cgrp = cgroup_kn_lock_live(of->kn, false);
524 if (!cgrp)
525 return -ENODEV;
526
527 task = cgroup_procs_write_start(buf, threadgroup);
528 ret = PTR_ERR_OR_ZERO(task);
529 if (ret)
530 goto out_unlock;
531
532 /*
533 * Even if we're attaching all tasks in the thread group, we only
534 * need to check permissions on one of them.
535 */
536 cred = current_cred();
537 tcred = get_task_cred(task);
538 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
539 !uid_eq(cred->euid, tcred->uid) &&
540 !uid_eq(cred->euid, tcred->suid))
541 ret = -EACCES;
542 put_cred(tcred);
543 if (ret)
544 goto out_finish;
545
546 ret = cgroup_attach_task(cgrp, task, threadgroup);
547
548 out_finish:
549 cgroup_procs_write_finish(task);
550 out_unlock:
551 cgroup_kn_unlock(of->kn);
552
553 return ret ?: nbytes;
554 }
555
556 static ssize_t cgroup1_procs_write(struct kernfs_open_file *of,
557 char *buf, size_t nbytes, loff_t off)
558 {
559 return __cgroup1_procs_write(of, buf, nbytes, off, true);
560 }
561
562 static ssize_t cgroup1_tasks_write(struct kernfs_open_file *of,
563 char *buf, size_t nbytes, loff_t off)
564 {
565 return __cgroup1_procs_write(of, buf, nbytes, off, false);
566 }
567
568 static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
569 char *buf, size_t nbytes, loff_t off)
570 {
571 struct cgroup *cgrp;
572
573 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
574
575 cgrp = cgroup_kn_lock_live(of->kn, false);
576 if (!cgrp)
577 return -ENODEV;
578 spin_lock(&release_agent_path_lock);
579 strlcpy(cgrp->root->release_agent_path, strstrip(buf),
580 sizeof(cgrp->root->release_agent_path));
581 spin_unlock(&release_agent_path_lock);
582 cgroup_kn_unlock(of->kn);
583 return nbytes;
584 }
585
586 static int cgroup_release_agent_show(struct seq_file *seq, void *v)
587 {
588 struct cgroup *cgrp = seq_css(seq)->cgroup;
589
590 spin_lock(&release_agent_path_lock);
591 seq_puts(seq, cgrp->root->release_agent_path);
592 spin_unlock(&release_agent_path_lock);
593 seq_putc(seq, '\n');
594 return 0;
595 }
596
597 static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
598 {
599 seq_puts(seq, "0\n");
600 return 0;
601 }
602
603 static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
604 struct cftype *cft)
605 {
606 return notify_on_release(css->cgroup);
607 }
608
609 static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
610 struct cftype *cft, u64 val)
611 {
612 if (val)
613 set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
614 else
615 clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
616 return 0;
617 }
618
619 static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
620 struct cftype *cft)
621 {
622 return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
623 }
624
625 static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
626 struct cftype *cft, u64 val)
627 {
628 if (val)
629 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
630 else
631 clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
632 return 0;
633 }
634
635 /* cgroup core interface files for the legacy hierarchies */
636 struct cftype cgroup1_base_files[] = {
637 {
638 .name = "cgroup.procs",
639 .seq_start = cgroup_pidlist_start,
640 .seq_next = cgroup_pidlist_next,
641 .seq_stop = cgroup_pidlist_stop,
642 .seq_show = cgroup_pidlist_show,
643 .private = CGROUP_FILE_PROCS,
644 .write = cgroup1_procs_write,
645 },
646 {
647 .name = "cgroup.clone_children",
648 .read_u64 = cgroup_clone_children_read,
649 .write_u64 = cgroup_clone_children_write,
650 },
651 {
652 .name = "cgroup.sane_behavior",
653 .flags = CFTYPE_ONLY_ON_ROOT,
654 .seq_show = cgroup_sane_behavior_show,
655 },
656 {
657 .name = "tasks",
658 .seq_start = cgroup_pidlist_start,
659 .seq_next = cgroup_pidlist_next,
660 .seq_stop = cgroup_pidlist_stop,
661 .seq_show = cgroup_pidlist_show,
662 .private = CGROUP_FILE_TASKS,
663 .write = cgroup1_tasks_write,
664 },
665 {
666 .name = "notify_on_release",
667 .read_u64 = cgroup_read_notify_on_release,
668 .write_u64 = cgroup_write_notify_on_release,
669 },
670 {
671 .name = "release_agent",
672 .flags = CFTYPE_ONLY_ON_ROOT,
673 .seq_show = cgroup_release_agent_show,
674 .write = cgroup_release_agent_write,
675 .max_write_len = PATH_MAX - 1,
676 },
677 { } /* terminate */
678 };
679
680 /* Display information about each subsystem and each hierarchy */
681 static int proc_cgroupstats_show(struct seq_file *m, void *v)
682 {
683 struct cgroup_subsys *ss;
684 int i;
685
686 seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
687 /*
688 * ideally we don't want subsystems moving around while we do this.
689 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
690 * subsys/hierarchy state.
691 */
692 mutex_lock(&cgroup_mutex);
693
694 for_each_subsys(ss, i)
695 seq_printf(m, "%s\t%d\t%d\t%d\n",
696 ss->legacy_name, ss->root->hierarchy_id,
697 atomic_read(&ss->root->nr_cgrps),
698 cgroup_ssid_enabled(i));
699
700 mutex_unlock(&cgroup_mutex);
701 return 0;
702 }
703
704 static int cgroupstats_open(struct inode *inode, struct file *file)
705 {
706 return single_open(file, proc_cgroupstats_show, NULL);
707 }
708
709 const struct file_operations proc_cgroupstats_operations = {
710 .open = cgroupstats_open,
711 .read = seq_read,
712 .llseek = seq_lseek,
713 .release = single_release,
714 };
715
716 /**
717 * cgroupstats_build - build and fill cgroupstats
718 * @stats: cgroupstats to fill information into
719 * @dentry: A dentry entry belonging to the cgroup for which stats have
720 * been requested.
721 *
722 * Build and fill cgroupstats so that taskstats can export it to user
723 * space.
724 */
725 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
726 {
727 struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
728 struct cgroup *cgrp;
729 struct css_task_iter it;
730 struct task_struct *tsk;
731
732 /* it should be kernfs_node belonging to cgroupfs and is a directory */
733 if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
734 kernfs_type(kn) != KERNFS_DIR)
735 return -EINVAL;
736
737 mutex_lock(&cgroup_mutex);
738
739 /*
740 * We aren't being called from kernfs and there's no guarantee on
741 * @kn->priv's validity. For this and css_tryget_online_from_dir(),
742 * @kn->priv is RCU safe. Let's do the RCU dancing.
743 */
744 rcu_read_lock();
745 cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
746 if (!cgrp || cgroup_is_dead(cgrp)) {
747 rcu_read_unlock();
748 mutex_unlock(&cgroup_mutex);
749 return -ENOENT;
750 }
751 rcu_read_unlock();
752
753 css_task_iter_start(&cgrp->self, 0, &it);
754 while ((tsk = css_task_iter_next(&it))) {
755 switch (tsk->state) {
756 case TASK_RUNNING:
757 stats->nr_running++;
758 break;
759 case TASK_INTERRUPTIBLE:
760 stats->nr_sleeping++;
761 break;
762 case TASK_UNINTERRUPTIBLE:
763 stats->nr_uninterruptible++;
764 break;
765 case TASK_STOPPED:
766 stats->nr_stopped++;
767 break;
768 default:
769 if (delayacct_is_task_waiting_on_io(tsk))
770 stats->nr_io_wait++;
771 break;
772 }
773 }
774 css_task_iter_end(&it);
775
776 mutex_unlock(&cgroup_mutex);
777 return 0;
778 }
779
780 void cgroup1_check_for_release(struct cgroup *cgrp)
781 {
782 if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
783 !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
784 schedule_work(&cgrp->release_agent_work);
785 }
786
787 /*
788 * Notify userspace when a cgroup is released, by running the
789 * configured release agent with the name of the cgroup (path
790 * relative to the root of cgroup file system) as the argument.
791 *
792 * Most likely, this user command will try to rmdir this cgroup.
793 *
794 * This races with the possibility that some other task will be
795 * attached to this cgroup before it is removed, or that some other
796 * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
797 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
798 * unused, and this cgroup will be reprieved from its death sentence,
799 * to continue to serve a useful existence. Next time it's released,
800 * we will get notified again, if it still has 'notify_on_release' set.
801 *
802 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
803 * means only wait until the task is successfully execve()'d. The
804 * separate release agent task is forked by call_usermodehelper(),
805 * then control in this thread returns here, without waiting for the
806 * release agent task. We don't bother to wait because the caller of
807 * this routine has no use for the exit status of the release agent
808 * task, so no sense holding our caller up for that.
809 */
810 void cgroup1_release_agent(struct work_struct *work)
811 {
812 struct cgroup *cgrp =
813 container_of(work, struct cgroup, release_agent_work);
814 char *pathbuf = NULL, *agentbuf = NULL;
815 char *argv[3], *envp[3];
816 int ret;
817
818 mutex_lock(&cgroup_mutex);
819
820 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
821 agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
822 if (!pathbuf || !agentbuf)
823 goto out;
824
825 spin_lock_irq(&css_set_lock);
826 ret = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
827 spin_unlock_irq(&css_set_lock);
828 if (ret < 0 || ret >= PATH_MAX)
829 goto out;
830
831 argv[0] = agentbuf;
832 argv[1] = pathbuf;
833 argv[2] = NULL;
834
835 /* minimal command environment */
836 envp[0] = "HOME=/";
837 envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
838 envp[2] = NULL;
839
840 mutex_unlock(&cgroup_mutex);
841 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
842 goto out_free;
843 out:
844 mutex_unlock(&cgroup_mutex);
845 out_free:
846 kfree(agentbuf);
847 kfree(pathbuf);
848 }
849
850 /*
851 * cgroup_rename - Only allow simple rename of directories in place.
852 */
853 static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
854 const char *new_name_str)
855 {
856 struct cgroup *cgrp = kn->priv;
857 int ret;
858
859 if (kernfs_type(kn) != KERNFS_DIR)
860 return -ENOTDIR;
861 if (kn->parent != new_parent)
862 return -EIO;
863
864 /*
865 * We're gonna grab cgroup_mutex which nests outside kernfs
866 * active_ref. kernfs_rename() doesn't require active_ref
867 * protection. Break them before grabbing cgroup_mutex.
868 */
869 kernfs_break_active_protection(new_parent);
870 kernfs_break_active_protection(kn);
871
872 mutex_lock(&cgroup_mutex);
873
874 ret = kernfs_rename(kn, new_parent, new_name_str);
875 if (!ret)
876 trace_cgroup_rename(cgrp);
877
878 mutex_unlock(&cgroup_mutex);
879
880 kernfs_unbreak_active_protection(kn);
881 kernfs_unbreak_active_protection(new_parent);
882 return ret;
883 }
884
885 static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
886 {
887 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
888 struct cgroup_subsys *ss;
889 int ssid;
890
891 for_each_subsys(ss, ssid)
892 if (root->subsys_mask & (1 << ssid))
893 seq_show_option(seq, ss->legacy_name, NULL);
894 if (root->flags & CGRP_ROOT_NOPREFIX)
895 seq_puts(seq, ",noprefix");
896 if (root->flags & CGRP_ROOT_XATTR)
897 seq_puts(seq, ",xattr");
898 if (root->flags & CGRP_ROOT_CPUSET_V2_MODE)
899 seq_puts(seq, ",cpuset_v2_mode");
900
901 spin_lock(&release_agent_path_lock);
902 if (strlen(root->release_agent_path))
903 seq_show_option(seq, "release_agent",
904 root->release_agent_path);
905 spin_unlock(&release_agent_path_lock);
906
907 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
908 seq_puts(seq, ",clone_children");
909 if (strlen(root->name))
910 seq_show_option(seq, "name", root->name);
911 return 0;
912 }
913
914 static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
915 {
916 char *token, *o = data;
917 bool all_ss = false, one_ss = false;
918 u16 mask = U16_MAX;
919 struct cgroup_subsys *ss;
920 int nr_opts = 0;
921 int i;
922
923 #ifdef CONFIG_CPUSETS
924 mask = ~((u16)1 << cpuset_cgrp_id);
925 #endif
926
927 memset(opts, 0, sizeof(*opts));
928
929 while ((token = strsep(&o, ",")) != NULL) {
930 nr_opts++;
931
932 if (!*token)
933 return -EINVAL;
934 if (!strcmp(token, "none")) {
935 /* Explicitly have no subsystems */
936 opts->none = true;
937 continue;
938 }
939 if (!strcmp(token, "all")) {
940 /* Mutually exclusive option 'all' + subsystem name */
941 if (one_ss)
942 return -EINVAL;
943 all_ss = true;
944 continue;
945 }
946 if (!strcmp(token, "noprefix")) {
947 opts->flags |= CGRP_ROOT_NOPREFIX;
948 continue;
949 }
950 if (!strcmp(token, "clone_children")) {
951 opts->cpuset_clone_children = true;
952 continue;
953 }
954 if (!strcmp(token, "cpuset_v2_mode")) {
955 opts->flags |= CGRP_ROOT_CPUSET_V2_MODE;
956 continue;
957 }
958 if (!strcmp(token, "xattr")) {
959 opts->flags |= CGRP_ROOT_XATTR;
960 continue;
961 }
962 if (!strncmp(token, "release_agent=", 14)) {
963 /* Specifying two release agents is forbidden */
964 if (opts->release_agent)
965 return -EINVAL;
966 opts->release_agent =
967 kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
968 if (!opts->release_agent)
969 return -ENOMEM;
970 continue;
971 }
972 if (!strncmp(token, "name=", 5)) {
973 const char *name = token + 5;
974 /* Can't specify an empty name */
975 if (!strlen(name))
976 return -EINVAL;
977 /* Must match [\w.-]+ */
978 for (i = 0; i < strlen(name); i++) {
979 char c = name[i];
980 if (isalnum(c))
981 continue;
982 if ((c == '.') || (c == '-') || (c == '_'))
983 continue;
984 return -EINVAL;
985 }
986 /* Specifying two names is forbidden */
987 if (opts->name)
988 return -EINVAL;
989 opts->name = kstrndup(name,
990 MAX_CGROUP_ROOT_NAMELEN - 1,
991 GFP_KERNEL);
992 if (!opts->name)
993 return -ENOMEM;
994
995 continue;
996 }
997
998 for_each_subsys(ss, i) {
999 if (strcmp(token, ss->legacy_name))
1000 continue;
1001 if (!cgroup_ssid_enabled(i))
1002 continue;
1003 if (cgroup1_ssid_disabled(i))
1004 continue;
1005
1006 /* Mutually exclusive option 'all' + subsystem name */
1007 if (all_ss)
1008 return -EINVAL;
1009 opts->subsys_mask |= (1 << i);
1010 one_ss = true;
1011
1012 break;
1013 }
1014 if (i == CGROUP_SUBSYS_COUNT)
1015 return -ENOENT;
1016 }
1017
1018 /*
1019 * If the 'all' option was specified select all the subsystems,
1020 * otherwise if 'none', 'name=' and a subsystem name options were
1021 * not specified, let's default to 'all'
1022 */
1023 if (all_ss || (!one_ss && !opts->none && !opts->name))
1024 for_each_subsys(ss, i)
1025 if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i))
1026 opts->subsys_mask |= (1 << i);
1027
1028 /*
1029 * We either have to specify by name or by subsystems. (So all
1030 * empty hierarchies must have a name).
1031 */
1032 if (!opts->subsys_mask && !opts->name)
1033 return -EINVAL;
1034
1035 /*
1036 * Option noprefix was introduced just for backward compatibility
1037 * with the old cpuset, so we allow noprefix only if mounting just
1038 * the cpuset subsystem.
1039 */
1040 if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
1041 return -EINVAL;
1042
1043 /* Can't specify "none" and some subsystems */
1044 if (opts->subsys_mask && opts->none)
1045 return -EINVAL;
1046
1047 return 0;
1048 }
1049
1050 static int cgroup1_remount(struct kernfs_root *kf_root, int *flags, char *data)
1051 {
1052 int ret = 0;
1053 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1054 struct cgroup_sb_opts opts;
1055 u16 added_mask, removed_mask;
1056
1057 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1058
1059 /* See what subsystems are wanted */
1060 ret = parse_cgroupfs_options(data, &opts);
1061 if (ret)
1062 goto out_unlock;
1063
1064 if (opts.subsys_mask != root->subsys_mask || opts.release_agent)
1065 pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1066 task_tgid_nr(current), current->comm);
1067
1068 added_mask = opts.subsys_mask & ~root->subsys_mask;
1069 removed_mask = root->subsys_mask & ~opts.subsys_mask;
1070
1071 /* Don't allow flags or name to change at remount */
1072 if ((opts.flags ^ root->flags) ||
1073 (opts.name && strcmp(opts.name, root->name))) {
1074 pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
1075 opts.flags, opts.name ?: "", root->flags, root->name);
1076 ret = -EINVAL;
1077 goto out_unlock;
1078 }
1079
1080 /* remounting is not allowed for populated hierarchies */
1081 if (!list_empty(&root->cgrp.self.children)) {
1082 ret = -EBUSY;
1083 goto out_unlock;
1084 }
1085
1086 ret = rebind_subsystems(root, added_mask);
1087 if (ret)
1088 goto out_unlock;
1089
1090 WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
1091
1092 if (opts.release_agent) {
1093 spin_lock(&release_agent_path_lock);
1094 strcpy(root->release_agent_path, opts.release_agent);
1095 spin_unlock(&release_agent_path_lock);
1096 }
1097
1098 trace_cgroup_remount(root);
1099
1100 out_unlock:
1101 kfree(opts.release_agent);
1102 kfree(opts.name);
1103 mutex_unlock(&cgroup_mutex);
1104 return ret;
1105 }
1106
1107 struct kernfs_syscall_ops cgroup1_kf_syscall_ops = {
1108 .rename = cgroup1_rename,
1109 .show_options = cgroup1_show_options,
1110 .remount_fs = cgroup1_remount,
1111 .mkdir = cgroup_mkdir,
1112 .rmdir = cgroup_rmdir,
1113 .show_path = cgroup_show_path,
1114 };
1115
1116 struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
1117 void *data, unsigned long magic,
1118 struct cgroup_namespace *ns)
1119 {
1120 struct super_block *pinned_sb = NULL;
1121 struct cgroup_sb_opts opts;
1122 struct cgroup_root *root;
1123 struct cgroup_subsys *ss;
1124 struct dentry *dentry;
1125 int i, ret;
1126 bool new_root = false;
1127
1128 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1129
1130 /* First find the desired set of subsystems */
1131 ret = parse_cgroupfs_options(data, &opts);
1132 if (ret)
1133 goto out_unlock;
1134
1135 /*
1136 * Destruction of cgroup root is asynchronous, so subsystems may
1137 * still be dying after the previous unmount. Let's drain the
1138 * dying subsystems. We just need to ensure that the ones
1139 * unmounted previously finish dying and don't care about new ones
1140 * starting. Testing ref liveliness is good enough.
1141 */
1142 for_each_subsys(ss, i) {
1143 if (!(opts.subsys_mask & (1 << i)) ||
1144 ss->root == &cgrp_dfl_root)
1145 continue;
1146
1147 if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
1148 mutex_unlock(&cgroup_mutex);
1149 msleep(10);
1150 ret = restart_syscall();
1151 goto out_free;
1152 }
1153 cgroup_put(&ss->root->cgrp);
1154 }
1155
1156 for_each_root(root) {
1157 bool name_match = false;
1158
1159 if (root == &cgrp_dfl_root)
1160 continue;
1161
1162 /*
1163 * If we asked for a name then it must match. Also, if
1164 * name matches but sybsys_mask doesn't, we should fail.
1165 * Remember whether name matched.
1166 */
1167 if (opts.name) {
1168 if (strcmp(opts.name, root->name))
1169 continue;
1170 name_match = true;
1171 }
1172
1173 /*
1174 * If we asked for subsystems (or explicitly for no
1175 * subsystems) then they must match.
1176 */
1177 if ((opts.subsys_mask || opts.none) &&
1178 (opts.subsys_mask != root->subsys_mask)) {
1179 if (!name_match)
1180 continue;
1181 ret = -EBUSY;
1182 goto out_unlock;
1183 }
1184
1185 if (root->flags ^ opts.flags)
1186 pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1187
1188 /*
1189 * We want to reuse @root whose lifetime is governed by its
1190 * ->cgrp. Let's check whether @root is alive and keep it
1191 * that way. As cgroup_kill_sb() can happen anytime, we
1192 * want to block it by pinning the sb so that @root doesn't
1193 * get killed before mount is complete.
1194 *
1195 * With the sb pinned, tryget_live can reliably indicate
1196 * whether @root can be reused. If it's being killed,
1197 * drain it. We can use wait_queue for the wait but this
1198 * path is super cold. Let's just sleep a bit and retry.
1199 */
1200 pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
1201 if (IS_ERR(pinned_sb) ||
1202 !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
1203 mutex_unlock(&cgroup_mutex);
1204 if (!IS_ERR_OR_NULL(pinned_sb))
1205 deactivate_super(pinned_sb);
1206 msleep(10);
1207 ret = restart_syscall();
1208 goto out_free;
1209 }
1210
1211 ret = 0;
1212 goto out_unlock;
1213 }
1214
1215 /*
1216 * No such thing, create a new one. name= matching without subsys
1217 * specification is allowed for already existing hierarchies but we
1218 * can't create new one without subsys specification.
1219 */
1220 if (!opts.subsys_mask && !opts.none) {
1221 ret = -EINVAL;
1222 goto out_unlock;
1223 }
1224
1225 /* Hierarchies may only be created in the initial cgroup namespace. */
1226 if (ns != &init_cgroup_ns) {
1227 ret = -EPERM;
1228 goto out_unlock;
1229 }
1230
1231 root = kzalloc(sizeof(*root), GFP_KERNEL);
1232 if (!root) {
1233 ret = -ENOMEM;
1234 goto out_unlock;
1235 }
1236 new_root = true;
1237
1238 init_cgroup_root(root, &opts);
1239
1240 ret = cgroup_setup_root(root, opts.subsys_mask, PERCPU_REF_INIT_DEAD);
1241 if (ret)
1242 cgroup_free_root(root);
1243
1244 out_unlock:
1245 mutex_unlock(&cgroup_mutex);
1246 out_free:
1247 kfree(opts.release_agent);
1248 kfree(opts.name);
1249
1250 if (ret)
1251 return ERR_PTR(ret);
1252
1253 dentry = cgroup_do_mount(&cgroup_fs_type, flags, root,
1254 CGROUP_SUPER_MAGIC, ns);
1255
1256 /*
1257 * There's a race window after we release cgroup_mutex and before
1258 * allocating a superblock. Make sure a concurrent process won't
1259 * be able to re-use the root during this window by delaying the
1260 * initialization of root refcnt.
1261 */
1262 if (new_root) {
1263 mutex_lock(&cgroup_mutex);
1264 percpu_ref_reinit(&root->cgrp.self.refcnt);
1265 mutex_unlock(&cgroup_mutex);
1266 }
1267
1268 /*
1269 * If @pinned_sb, we're reusing an existing root and holding an
1270 * extra ref on its sb. Mount is complete. Put the extra ref.
1271 */
1272 if (pinned_sb)
1273 deactivate_super(pinned_sb);
1274
1275 return dentry;
1276 }
1277
1278 static int __init cgroup1_wq_init(void)
1279 {
1280 /*
1281 * Used to destroy pidlists and separate to serve as flush domain.
1282 * Cap @max_active to 1 too.
1283 */
1284 cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
1285 0, 1);
1286 BUG_ON(!cgroup_pidlist_destroy_wq);
1287 return 0;
1288 }
1289 core_initcall(cgroup1_wq_init);
1290
1291 static int __init cgroup_no_v1(char *str)
1292 {
1293 struct cgroup_subsys *ss;
1294 char *token;
1295 int i;
1296
1297 while ((token = strsep(&str, ",")) != NULL) {
1298 if (!*token)
1299 continue;
1300
1301 if (!strcmp(token, "all")) {
1302 cgroup_no_v1_mask = U16_MAX;
1303 break;
1304 }
1305
1306 for_each_subsys(ss, i) {
1307 if (strcmp(token, ss->name) &&
1308 strcmp(token, ss->legacy_name))
1309 continue;
1310
1311 cgroup_no_v1_mask |= 1 << i;
1312 }
1313 }
1314 return 1;
1315 }
1316 __setup("cgroup_no_v1=", cgroup_no_v1);