]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/cgroup.c
cgroup: replace cgroup_on_dfl() tests in controllers with cgroup_subsys_on_dfl()
[mirror_ubuntu-bionic-kernel.git] / kernel / cgroup.c
1 /*
2 * Generic process-grouping system.
3 *
4 * Based originally on the cpuset system, extracted by Paul Menage
5 * Copyright (C) 2006 Google, Inc
6 *
7 * Notifications support
8 * Copyright (C) 2009 Nokia Corporation
9 * Author: Kirill A. Shutemov
10 *
11 * Copyright notices from the original cpuset code:
12 * --------------------------------------------------
13 * Copyright (C) 2003 BULL SA.
14 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
15 *
16 * Portions derived from Patrick Mochel's sysfs code.
17 * sysfs is Copyright (c) 2001-3 Patrick Mochel
18 *
19 * 2003-10-10 Written by Simon Derr.
20 * 2003-10-22 Updates by Stephen Hemminger.
21 * 2004 May-July Rework by Paul Jackson.
22 * ---------------------------------------------------
23 *
24 * This file is subject to the terms and conditions of the GNU General Public
25 * License. See the file COPYING in the main directory of the Linux
26 * distribution for more details.
27 */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/cgroup.h>
32 #include <linux/cred.h>
33 #include <linux/ctype.h>
34 #include <linux/errno.h>
35 #include <linux/init_task.h>
36 #include <linux/kernel.h>
37 #include <linux/list.h>
38 #include <linux/magic.h>
39 #include <linux/mm.h>
40 #include <linux/mutex.h>
41 #include <linux/mount.h>
42 #include <linux/pagemap.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rcupdate.h>
45 #include <linux/sched.h>
46 #include <linux/slab.h>
47 #include <linux/spinlock.h>
48 #include <linux/rwsem.h>
49 #include <linux/percpu-rwsem.h>
50 #include <linux/string.h>
51 #include <linux/sort.h>
52 #include <linux/kmod.h>
53 #include <linux/delayacct.h>
54 #include <linux/cgroupstats.h>
55 #include <linux/hashtable.h>
56 #include <linux/pid_namespace.h>
57 #include <linux/idr.h>
58 #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
59 #include <linux/kthread.h>
60 #include <linux/delay.h>
61
62 #include <linux/atomic.h>
63
64 /*
65 * pidlists linger the following amount before being destroyed. The goal
66 * is avoiding frequent destruction in the middle of consecutive read calls
67 * Expiring in the middle is a performance problem not a correctness one.
68 * 1 sec should be enough.
69 */
70 #define CGROUP_PIDLIST_DESTROY_DELAY HZ
71
72 #define CGROUP_FILE_NAME_MAX (MAX_CGROUP_TYPE_NAMELEN + \
73 MAX_CFTYPE_NAME + 2)
74
75 /*
76 * cgroup_mutex is the master lock. Any modification to cgroup or its
77 * hierarchy must be performed while holding it.
78 *
79 * css_set_rwsem protects task->cgroups pointer, the list of css_set
80 * objects, and the chain of tasks off each css_set.
81 *
82 * These locks are exported if CONFIG_PROVE_RCU so that accessors in
83 * cgroup.h can use them for lockdep annotations.
84 */
85 #ifdef CONFIG_PROVE_RCU
86 DEFINE_MUTEX(cgroup_mutex);
87 DECLARE_RWSEM(css_set_rwsem);
88 EXPORT_SYMBOL_GPL(cgroup_mutex);
89 EXPORT_SYMBOL_GPL(css_set_rwsem);
90 #else
91 static DEFINE_MUTEX(cgroup_mutex);
92 static DECLARE_RWSEM(css_set_rwsem);
93 #endif
94
95 /*
96 * Protects cgroup_idr and css_idr so that IDs can be released without
97 * grabbing cgroup_mutex.
98 */
99 static DEFINE_SPINLOCK(cgroup_idr_lock);
100
101 /*
102 * Protects cgroup_subsys->release_agent_path. Modifying it also requires
103 * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock.
104 */
105 static DEFINE_SPINLOCK(release_agent_path_lock);
106
107 struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
108
109 #define cgroup_assert_mutex_or_rcu_locked() \
110 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
111 !lockdep_is_held(&cgroup_mutex), \
112 "cgroup_mutex or RCU read lock required");
113
114 /*
115 * cgroup destruction makes heavy use of work items and there can be a lot
116 * of concurrent destructions. Use a separate workqueue so that cgroup
117 * destruction work items don't end up filling up max_active of system_wq
118 * which may lead to deadlock.
119 */
120 static struct workqueue_struct *cgroup_destroy_wq;
121
122 /*
123 * pidlist destructions need to be flushed on cgroup destruction. Use a
124 * separate workqueue as flush domain.
125 */
126 static struct workqueue_struct *cgroup_pidlist_destroy_wq;
127
128 /* generate an array of cgroup subsystem pointers */
129 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
130 static struct cgroup_subsys *cgroup_subsys[] = {
131 #include <linux/cgroup_subsys.h>
132 };
133 #undef SUBSYS
134
135 /* array of cgroup subsystem names */
136 #define SUBSYS(_x) [_x ## _cgrp_id] = #_x,
137 static const char *cgroup_subsys_name[] = {
138 #include <linux/cgroup_subsys.h>
139 };
140 #undef SUBSYS
141
142 /* array of static_keys for cgroup_subsys_enabled() and cgroup_subsys_on_dfl() */
143 #define SUBSYS(_x) \
144 DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_enabled_key); \
145 DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_on_dfl_key); \
146 EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_enabled_key); \
147 EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_on_dfl_key);
148 #include <linux/cgroup_subsys.h>
149 #undef SUBSYS
150
151 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_enabled_key,
152 static struct static_key_true *cgroup_subsys_enabled_key[] = {
153 #include <linux/cgroup_subsys.h>
154 };
155 #undef SUBSYS
156
157 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_on_dfl_key,
158 static struct static_key_true *cgroup_subsys_on_dfl_key[] = {
159 #include <linux/cgroup_subsys.h>
160 };
161 #undef SUBSYS
162
163 /*
164 * The default hierarchy, reserved for the subsystems that are otherwise
165 * unattached - it never has more than a single cgroup, and all tasks are
166 * part of that cgroup.
167 */
168 struct cgroup_root cgrp_dfl_root;
169 EXPORT_SYMBOL_GPL(cgrp_dfl_root);
170
171 /*
172 * The default hierarchy always exists but is hidden until mounted for the
173 * first time. This is for backward compatibility.
174 */
175 static bool cgrp_dfl_root_visible;
176
177 /*
178 * Set by the boot param of the same name and makes subsystems with NULL
179 * ->dfl_files to use ->legacy_files on the default hierarchy.
180 */
181 static bool cgroup_legacy_files_on_dfl;
182
183 /* some controllers are not supported in the default hierarchy */
184 static unsigned long cgrp_dfl_root_inhibit_ss_mask;
185
186 /* The list of hierarchy roots */
187
188 static LIST_HEAD(cgroup_roots);
189 static int cgroup_root_count;
190
191 /* hierarchy ID allocation and mapping, protected by cgroup_mutex */
192 static DEFINE_IDR(cgroup_hierarchy_idr);
193
194 /*
195 * Assign a monotonically increasing serial number to csses. It guarantees
196 * cgroups with bigger numbers are newer than those with smaller numbers.
197 * Also, as csses are always appended to the parent's ->children list, it
198 * guarantees that sibling csses are always sorted in the ascending serial
199 * number order on the list. Protected by cgroup_mutex.
200 */
201 static u64 css_serial_nr_next = 1;
202
203 /*
204 * These bitmask flags indicate whether tasks in the fork and exit paths have
205 * fork/exit handlers to call. This avoids us having to do extra work in the
206 * fork/exit path to check which subsystems have fork/exit callbacks.
207 */
208 static unsigned long have_fork_callback __read_mostly;
209 static unsigned long have_exit_callback __read_mostly;
210
211 /* Ditto for the can_fork callback. */
212 static unsigned long have_canfork_callback __read_mostly;
213
214 static struct cftype cgroup_dfl_base_files[];
215 static struct cftype cgroup_legacy_base_files[];
216
217 static int rebind_subsystems(struct cgroup_root *dst_root,
218 unsigned long ss_mask);
219 static int cgroup_destroy_locked(struct cgroup *cgrp);
220 static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
221 bool visible);
222 static void css_release(struct percpu_ref *ref);
223 static void kill_css(struct cgroup_subsys_state *css);
224 static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
225 bool is_add);
226
227 /**
228 * cgroup_ssid_enabled - cgroup subsys enabled test by subsys ID
229 * @ssid: subsys ID of interest
230 *
231 * cgroup_subsys_enabled() can only be used with literal subsys names which
232 * is fine for individual subsystems but unsuitable for cgroup core. This
233 * is slower static_key_enabled() based test indexed by @ssid.
234 */
235 static bool cgroup_ssid_enabled(int ssid)
236 {
237 return static_key_enabled(cgroup_subsys_enabled_key[ssid]);
238 }
239
240 /**
241 * cgroup_on_dfl - test whether a cgroup is on the default hierarchy
242 * @cgrp: the cgroup of interest
243 *
244 * The default hierarchy is the v2 interface of cgroup and this function
245 * can be used to test whether a cgroup is on the default hierarchy for
246 * cases where a subsystem should behave differnetly depending on the
247 * interface version.
248 *
249 * The set of behaviors which change on the default hierarchy are still
250 * being determined and the mount option is prefixed with __DEVEL__.
251 *
252 * List of changed behaviors:
253 *
254 * - Mount options "noprefix", "xattr", "clone_children", "release_agent"
255 * and "name" are disallowed.
256 *
257 * - When mounting an existing superblock, mount options should match.
258 *
259 * - Remount is disallowed.
260 *
261 * - rename(2) is disallowed.
262 *
263 * - "tasks" is removed. Everything should be at process granularity. Use
264 * "cgroup.procs" instead.
265 *
266 * - "cgroup.procs" is not sorted. pids will be unique unless they got
267 * recycled inbetween reads.
268 *
269 * - "release_agent" and "notify_on_release" are removed. Replacement
270 * notification mechanism will be implemented.
271 *
272 * - "cgroup.clone_children" is removed.
273 *
274 * - "cgroup.subtree_populated" is available. Its value is 0 if the cgroup
275 * and its descendants contain no task; otherwise, 1. The file also
276 * generates kernfs notification which can be monitored through poll and
277 * [di]notify when the value of the file changes.
278 *
279 * - cpuset: tasks will be kept in empty cpusets when hotplug happens and
280 * take masks of ancestors with non-empty cpus/mems, instead of being
281 * moved to an ancestor.
282 *
283 * - cpuset: a task can be moved into an empty cpuset, and again it takes
284 * masks of ancestors.
285 *
286 * - memcg: use_hierarchy is on by default and the cgroup file for the flag
287 * is not created.
288 *
289 * - blkcg: blk-throttle becomes properly hierarchical.
290 *
291 * - debug: disallowed on the default hierarchy.
292 */
293 static bool cgroup_on_dfl(const struct cgroup *cgrp)
294 {
295 return cgrp->root == &cgrp_dfl_root;
296 }
297
298 /* IDR wrappers which synchronize using cgroup_idr_lock */
299 static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
300 gfp_t gfp_mask)
301 {
302 int ret;
303
304 idr_preload(gfp_mask);
305 spin_lock_bh(&cgroup_idr_lock);
306 ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_WAIT);
307 spin_unlock_bh(&cgroup_idr_lock);
308 idr_preload_end();
309 return ret;
310 }
311
312 static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id)
313 {
314 void *ret;
315
316 spin_lock_bh(&cgroup_idr_lock);
317 ret = idr_replace(idr, ptr, id);
318 spin_unlock_bh(&cgroup_idr_lock);
319 return ret;
320 }
321
322 static void cgroup_idr_remove(struct idr *idr, int id)
323 {
324 spin_lock_bh(&cgroup_idr_lock);
325 idr_remove(idr, id);
326 spin_unlock_bh(&cgroup_idr_lock);
327 }
328
329 static struct cgroup *cgroup_parent(struct cgroup *cgrp)
330 {
331 struct cgroup_subsys_state *parent_css = cgrp->self.parent;
332
333 if (parent_css)
334 return container_of(parent_css, struct cgroup, self);
335 return NULL;
336 }
337
338 /**
339 * cgroup_css - obtain a cgroup's css for the specified subsystem
340 * @cgrp: the cgroup of interest
341 * @ss: the subsystem of interest (%NULL returns @cgrp->self)
342 *
343 * Return @cgrp's css (cgroup_subsys_state) associated with @ss. This
344 * function must be called either under cgroup_mutex or rcu_read_lock() and
345 * the caller is responsible for pinning the returned css if it wants to
346 * keep accessing it outside the said locks. This function may return
347 * %NULL if @cgrp doesn't have @subsys_id enabled.
348 */
349 static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
350 struct cgroup_subsys *ss)
351 {
352 if (ss)
353 return rcu_dereference_check(cgrp->subsys[ss->id],
354 lockdep_is_held(&cgroup_mutex));
355 else
356 return &cgrp->self;
357 }
358
359 /**
360 * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
361 * @cgrp: the cgroup of interest
362 * @ss: the subsystem of interest (%NULL returns @cgrp->self)
363 *
364 * Similar to cgroup_css() but returns the effective css, which is defined
365 * as the matching css of the nearest ancestor including self which has @ss
366 * enabled. If @ss is associated with the hierarchy @cgrp is on, this
367 * function is guaranteed to return non-NULL css.
368 */
369 static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
370 struct cgroup_subsys *ss)
371 {
372 lockdep_assert_held(&cgroup_mutex);
373
374 if (!ss)
375 return &cgrp->self;
376
377 if (!(cgrp->root->subsys_mask & (1 << ss->id)))
378 return NULL;
379
380 /*
381 * This function is used while updating css associations and thus
382 * can't test the csses directly. Use ->child_subsys_mask.
383 */
384 while (cgroup_parent(cgrp) &&
385 !(cgroup_parent(cgrp)->child_subsys_mask & (1 << ss->id)))
386 cgrp = cgroup_parent(cgrp);
387
388 return cgroup_css(cgrp, ss);
389 }
390
391 /**
392 * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
393 * @cgrp: the cgroup of interest
394 * @ss: the subsystem of interest
395 *
396 * Find and get the effective css of @cgrp for @ss. The effective css is
397 * defined as the matching css of the nearest ancestor including self which
398 * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on,
399 * the root css is returned, so this function always returns a valid css.
400 * The returned css must be put using css_put().
401 */
402 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp,
403 struct cgroup_subsys *ss)
404 {
405 struct cgroup_subsys_state *css;
406
407 rcu_read_lock();
408
409 do {
410 css = cgroup_css(cgrp, ss);
411
412 if (css && css_tryget_online(css))
413 goto out_unlock;
414 cgrp = cgroup_parent(cgrp);
415 } while (cgrp);
416
417 css = init_css_set.subsys[ss->id];
418 css_get(css);
419 out_unlock:
420 rcu_read_unlock();
421 return css;
422 }
423
424 /* convenient tests for these bits */
425 static inline bool cgroup_is_dead(const struct cgroup *cgrp)
426 {
427 return !(cgrp->self.flags & CSS_ONLINE);
428 }
429
430 struct cgroup_subsys_state *of_css(struct kernfs_open_file *of)
431 {
432 struct cgroup *cgrp = of->kn->parent->priv;
433 struct cftype *cft = of_cft(of);
434
435 /*
436 * This is open and unprotected implementation of cgroup_css().
437 * seq_css() is only called from a kernfs file operation which has
438 * an active reference on the file. Because all the subsystem
439 * files are drained before a css is disassociated with a cgroup,
440 * the matching css from the cgroup's subsys table is guaranteed to
441 * be and stay valid until the enclosing operation is complete.
442 */
443 if (cft->ss)
444 return rcu_dereference_raw(cgrp->subsys[cft->ss->id]);
445 else
446 return &cgrp->self;
447 }
448 EXPORT_SYMBOL_GPL(of_css);
449
450 /**
451 * cgroup_is_descendant - test ancestry
452 * @cgrp: the cgroup to be tested
453 * @ancestor: possible ancestor of @cgrp
454 *
455 * Test whether @cgrp is a descendant of @ancestor. It also returns %true
456 * if @cgrp == @ancestor. This function is safe to call as long as @cgrp
457 * and @ancestor are accessible.
458 */
459 bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor)
460 {
461 while (cgrp) {
462 if (cgrp == ancestor)
463 return true;
464 cgrp = cgroup_parent(cgrp);
465 }
466 return false;
467 }
468
469 static int notify_on_release(const struct cgroup *cgrp)
470 {
471 return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
472 }
473
474 /**
475 * for_each_css - iterate all css's of a cgroup
476 * @css: the iteration cursor
477 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
478 * @cgrp: the target cgroup to iterate css's of
479 *
480 * Should be called under cgroup_[tree_]mutex.
481 */
482 #define for_each_css(css, ssid, cgrp) \
483 for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
484 if (!((css) = rcu_dereference_check( \
485 (cgrp)->subsys[(ssid)], \
486 lockdep_is_held(&cgroup_mutex)))) { } \
487 else
488
489 /**
490 * for_each_e_css - iterate all effective css's of a cgroup
491 * @css: the iteration cursor
492 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
493 * @cgrp: the target cgroup to iterate css's of
494 *
495 * Should be called under cgroup_[tree_]mutex.
496 */
497 #define for_each_e_css(css, ssid, cgrp) \
498 for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
499 if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \
500 ; \
501 else
502
503 /**
504 * for_each_subsys - iterate all enabled cgroup subsystems
505 * @ss: the iteration cursor
506 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
507 */
508 #define for_each_subsys(ss, ssid) \
509 for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT && \
510 (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
511
512 /**
513 * for_each_subsys_which - filter for_each_subsys with a bitmask
514 * @ss: the iteration cursor
515 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
516 * @ss_maskp: a pointer to the bitmask
517 *
518 * The block will only run for cases where the ssid-th bit (1 << ssid) of
519 * mask is set to 1.
520 */
521 #define for_each_subsys_which(ss, ssid, ss_maskp) \
522 if (!CGROUP_SUBSYS_COUNT) /* to avoid spurious gcc warning */ \
523 (ssid) = 0; \
524 else \
525 for_each_set_bit(ssid, ss_maskp, CGROUP_SUBSYS_COUNT) \
526 if (((ss) = cgroup_subsys[ssid]) && false) \
527 break; \
528 else
529
530 /* iterate across the hierarchies */
531 #define for_each_root(root) \
532 list_for_each_entry((root), &cgroup_roots, root_list)
533
534 /* iterate over child cgrps, lock should be held throughout iteration */
535 #define cgroup_for_each_live_child(child, cgrp) \
536 list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \
537 if (({ lockdep_assert_held(&cgroup_mutex); \
538 cgroup_is_dead(child); })) \
539 ; \
540 else
541
542 static void cgroup_release_agent(struct work_struct *work);
543 static void check_for_release(struct cgroup *cgrp);
544
545 /*
546 * A cgroup can be associated with multiple css_sets as different tasks may
547 * belong to different cgroups on different hierarchies. In the other
548 * direction, a css_set is naturally associated with multiple cgroups.
549 * This M:N relationship is represented by the following link structure
550 * which exists for each association and allows traversing the associations
551 * from both sides.
552 */
553 struct cgrp_cset_link {
554 /* the cgroup and css_set this link associates */
555 struct cgroup *cgrp;
556 struct css_set *cset;
557
558 /* list of cgrp_cset_links anchored at cgrp->cset_links */
559 struct list_head cset_link;
560
561 /* list of cgrp_cset_links anchored at css_set->cgrp_links */
562 struct list_head cgrp_link;
563 };
564
565 /*
566 * The default css_set - used by init and its children prior to any
567 * hierarchies being mounted. It contains a pointer to the root state
568 * for each subsystem. Also used to anchor the list of css_sets. Not
569 * reference-counted, to improve performance when child cgroups
570 * haven't been created.
571 */
572 struct css_set init_css_set = {
573 .refcount = ATOMIC_INIT(1),
574 .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links),
575 .tasks = LIST_HEAD_INIT(init_css_set.tasks),
576 .mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks),
577 .mg_preload_node = LIST_HEAD_INIT(init_css_set.mg_preload_node),
578 .mg_node = LIST_HEAD_INIT(init_css_set.mg_node),
579 };
580
581 static int css_set_count = 1; /* 1 for init_css_set */
582
583 /**
584 * cgroup_update_populated - updated populated count of a cgroup
585 * @cgrp: the target cgroup
586 * @populated: inc or dec populated count
587 *
588 * @cgrp is either getting the first task (css_set) or losing the last.
589 * Update @cgrp->populated_cnt accordingly. The count is propagated
590 * towards root so that a given cgroup's populated_cnt is zero iff the
591 * cgroup and all its descendants are empty.
592 *
593 * @cgrp's interface file "cgroup.populated" is zero if
594 * @cgrp->populated_cnt is zero and 1 otherwise. When @cgrp->populated_cnt
595 * changes from or to zero, userland is notified that the content of the
596 * interface file has changed. This can be used to detect when @cgrp and
597 * its descendants become populated or empty.
598 */
599 static void cgroup_update_populated(struct cgroup *cgrp, bool populated)
600 {
601 lockdep_assert_held(&css_set_rwsem);
602
603 do {
604 bool trigger;
605
606 if (populated)
607 trigger = !cgrp->populated_cnt++;
608 else
609 trigger = !--cgrp->populated_cnt;
610
611 if (!trigger)
612 break;
613
614 if (cgrp->populated_kn)
615 kernfs_notify(cgrp->populated_kn);
616 cgrp = cgroup_parent(cgrp);
617 } while (cgrp);
618 }
619
620 /*
621 * hash table for cgroup groups. This improves the performance to find
622 * an existing css_set. This hash doesn't (currently) take into
623 * account cgroups in empty hierarchies.
624 */
625 #define CSS_SET_HASH_BITS 7
626 static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
627
628 static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
629 {
630 unsigned long key = 0UL;
631 struct cgroup_subsys *ss;
632 int i;
633
634 for_each_subsys(ss, i)
635 key += (unsigned long)css[i];
636 key = (key >> 16) ^ key;
637
638 return key;
639 }
640
641 static void put_css_set_locked(struct css_set *cset)
642 {
643 struct cgrp_cset_link *link, *tmp_link;
644 struct cgroup_subsys *ss;
645 int ssid;
646
647 lockdep_assert_held(&css_set_rwsem);
648
649 if (!atomic_dec_and_test(&cset->refcount))
650 return;
651
652 /* This css_set is dead. unlink it and release cgroup refcounts */
653 for_each_subsys(ss, ssid)
654 list_del(&cset->e_cset_node[ssid]);
655 hash_del(&cset->hlist);
656 css_set_count--;
657
658 list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) {
659 struct cgroup *cgrp = link->cgrp;
660
661 list_del(&link->cset_link);
662 list_del(&link->cgrp_link);
663
664 /* @cgrp can't go away while we're holding css_set_rwsem */
665 if (list_empty(&cgrp->cset_links)) {
666 cgroup_update_populated(cgrp, false);
667 check_for_release(cgrp);
668 }
669
670 kfree(link);
671 }
672
673 kfree_rcu(cset, rcu_head);
674 }
675
676 static void put_css_set(struct css_set *cset)
677 {
678 /*
679 * Ensure that the refcount doesn't hit zero while any readers
680 * can see it. Similar to atomic_dec_and_lock(), but for an
681 * rwlock
682 */
683 if (atomic_add_unless(&cset->refcount, -1, 1))
684 return;
685
686 down_write(&css_set_rwsem);
687 put_css_set_locked(cset);
688 up_write(&css_set_rwsem);
689 }
690
691 /*
692 * refcounted get/put for css_set objects
693 */
694 static inline void get_css_set(struct css_set *cset)
695 {
696 atomic_inc(&cset->refcount);
697 }
698
699 /**
700 * compare_css_sets - helper function for find_existing_css_set().
701 * @cset: candidate css_set being tested
702 * @old_cset: existing css_set for a task
703 * @new_cgrp: cgroup that's being entered by the task
704 * @template: desired set of css pointers in css_set (pre-calculated)
705 *
706 * Returns true if "cset" matches "old_cset" except for the hierarchy
707 * which "new_cgrp" belongs to, for which it should match "new_cgrp".
708 */
709 static bool compare_css_sets(struct css_set *cset,
710 struct css_set *old_cset,
711 struct cgroup *new_cgrp,
712 struct cgroup_subsys_state *template[])
713 {
714 struct list_head *l1, *l2;
715
716 /*
717 * On the default hierarchy, there can be csets which are
718 * associated with the same set of cgroups but different csses.
719 * Let's first ensure that csses match.
720 */
721 if (memcmp(template, cset->subsys, sizeof(cset->subsys)))
722 return false;
723
724 /*
725 * Compare cgroup pointers in order to distinguish between
726 * different cgroups in hierarchies. As different cgroups may
727 * share the same effective css, this comparison is always
728 * necessary.
729 */
730 l1 = &cset->cgrp_links;
731 l2 = &old_cset->cgrp_links;
732 while (1) {
733 struct cgrp_cset_link *link1, *link2;
734 struct cgroup *cgrp1, *cgrp2;
735
736 l1 = l1->next;
737 l2 = l2->next;
738 /* See if we reached the end - both lists are equal length. */
739 if (l1 == &cset->cgrp_links) {
740 BUG_ON(l2 != &old_cset->cgrp_links);
741 break;
742 } else {
743 BUG_ON(l2 == &old_cset->cgrp_links);
744 }
745 /* Locate the cgroups associated with these links. */
746 link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link);
747 link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link);
748 cgrp1 = link1->cgrp;
749 cgrp2 = link2->cgrp;
750 /* Hierarchies should be linked in the same order. */
751 BUG_ON(cgrp1->root != cgrp2->root);
752
753 /*
754 * If this hierarchy is the hierarchy of the cgroup
755 * that's changing, then we need to check that this
756 * css_set points to the new cgroup; if it's any other
757 * hierarchy, then this css_set should point to the
758 * same cgroup as the old css_set.
759 */
760 if (cgrp1->root == new_cgrp->root) {
761 if (cgrp1 != new_cgrp)
762 return false;
763 } else {
764 if (cgrp1 != cgrp2)
765 return false;
766 }
767 }
768 return true;
769 }
770
771 /**
772 * find_existing_css_set - init css array and find the matching css_set
773 * @old_cset: the css_set that we're using before the cgroup transition
774 * @cgrp: the cgroup that we're moving into
775 * @template: out param for the new set of csses, should be clear on entry
776 */
777 static struct css_set *find_existing_css_set(struct css_set *old_cset,
778 struct cgroup *cgrp,
779 struct cgroup_subsys_state *template[])
780 {
781 struct cgroup_root *root = cgrp->root;
782 struct cgroup_subsys *ss;
783 struct css_set *cset;
784 unsigned long key;
785 int i;
786
787 /*
788 * Build the set of subsystem state objects that we want to see in the
789 * new css_set. while subsystems can change globally, the entries here
790 * won't change, so no need for locking.
791 */
792 for_each_subsys(ss, i) {
793 if (root->subsys_mask & (1UL << i)) {
794 /*
795 * @ss is in this hierarchy, so we want the
796 * effective css from @cgrp.
797 */
798 template[i] = cgroup_e_css(cgrp, ss);
799 } else {
800 /*
801 * @ss is not in this hierarchy, so we don't want
802 * to change the css.
803 */
804 template[i] = old_cset->subsys[i];
805 }
806 }
807
808 key = css_set_hash(template);
809 hash_for_each_possible(css_set_table, cset, hlist, key) {
810 if (!compare_css_sets(cset, old_cset, cgrp, template))
811 continue;
812
813 /* This css_set matches what we need */
814 return cset;
815 }
816
817 /* No existing cgroup group matched */
818 return NULL;
819 }
820
821 static void free_cgrp_cset_links(struct list_head *links_to_free)
822 {
823 struct cgrp_cset_link *link, *tmp_link;
824
825 list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) {
826 list_del(&link->cset_link);
827 kfree(link);
828 }
829 }
830
831 /**
832 * allocate_cgrp_cset_links - allocate cgrp_cset_links
833 * @count: the number of links to allocate
834 * @tmp_links: list_head the allocated links are put on
835 *
836 * Allocate @count cgrp_cset_link structures and chain them on @tmp_links
837 * through ->cset_link. Returns 0 on success or -errno.
838 */
839 static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links)
840 {
841 struct cgrp_cset_link *link;
842 int i;
843
844 INIT_LIST_HEAD(tmp_links);
845
846 for (i = 0; i < count; i++) {
847 link = kzalloc(sizeof(*link), GFP_KERNEL);
848 if (!link) {
849 free_cgrp_cset_links(tmp_links);
850 return -ENOMEM;
851 }
852 list_add(&link->cset_link, tmp_links);
853 }
854 return 0;
855 }
856
857 /**
858 * link_css_set - a helper function to link a css_set to a cgroup
859 * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links()
860 * @cset: the css_set to be linked
861 * @cgrp: the destination cgroup
862 */
863 static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
864 struct cgroup *cgrp)
865 {
866 struct cgrp_cset_link *link;
867
868 BUG_ON(list_empty(tmp_links));
869
870 if (cgroup_on_dfl(cgrp))
871 cset->dfl_cgrp = cgrp;
872
873 link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link);
874 link->cset = cset;
875 link->cgrp = cgrp;
876
877 if (list_empty(&cgrp->cset_links))
878 cgroup_update_populated(cgrp, true);
879 list_move(&link->cset_link, &cgrp->cset_links);
880
881 /*
882 * Always add links to the tail of the list so that the list
883 * is sorted by order of hierarchy creation
884 */
885 list_add_tail(&link->cgrp_link, &cset->cgrp_links);
886 }
887
888 /**
889 * find_css_set - return a new css_set with one cgroup updated
890 * @old_cset: the baseline css_set
891 * @cgrp: the cgroup to be updated
892 *
893 * Return a new css_set that's equivalent to @old_cset, but with @cgrp
894 * substituted into the appropriate hierarchy.
895 */
896 static struct css_set *find_css_set(struct css_set *old_cset,
897 struct cgroup *cgrp)
898 {
899 struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { };
900 struct css_set *cset;
901 struct list_head tmp_links;
902 struct cgrp_cset_link *link;
903 struct cgroup_subsys *ss;
904 unsigned long key;
905 int ssid;
906
907 lockdep_assert_held(&cgroup_mutex);
908
909 /* First see if we already have a cgroup group that matches
910 * the desired set */
911 down_read(&css_set_rwsem);
912 cset = find_existing_css_set(old_cset, cgrp, template);
913 if (cset)
914 get_css_set(cset);
915 up_read(&css_set_rwsem);
916
917 if (cset)
918 return cset;
919
920 cset = kzalloc(sizeof(*cset), GFP_KERNEL);
921 if (!cset)
922 return NULL;
923
924 /* Allocate all the cgrp_cset_link objects that we'll need */
925 if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) {
926 kfree(cset);
927 return NULL;
928 }
929
930 atomic_set(&cset->refcount, 1);
931 INIT_LIST_HEAD(&cset->cgrp_links);
932 INIT_LIST_HEAD(&cset->tasks);
933 INIT_LIST_HEAD(&cset->mg_tasks);
934 INIT_LIST_HEAD(&cset->mg_preload_node);
935 INIT_LIST_HEAD(&cset->mg_node);
936 INIT_HLIST_NODE(&cset->hlist);
937
938 /* Copy the set of subsystem state objects generated in
939 * find_existing_css_set() */
940 memcpy(cset->subsys, template, sizeof(cset->subsys));
941
942 down_write(&css_set_rwsem);
943 /* Add reference counts and links from the new css_set. */
944 list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
945 struct cgroup *c = link->cgrp;
946
947 if (c->root == cgrp->root)
948 c = cgrp;
949 link_css_set(&tmp_links, cset, c);
950 }
951
952 BUG_ON(!list_empty(&tmp_links));
953
954 css_set_count++;
955
956 /* Add @cset to the hash table */
957 key = css_set_hash(cset->subsys);
958 hash_add(css_set_table, &cset->hlist, key);
959
960 for_each_subsys(ss, ssid)
961 list_add_tail(&cset->e_cset_node[ssid],
962 &cset->subsys[ssid]->cgroup->e_csets[ssid]);
963
964 up_write(&css_set_rwsem);
965
966 return cset;
967 }
968
969 static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
970 {
971 struct cgroup *root_cgrp = kf_root->kn->priv;
972
973 return root_cgrp->root;
974 }
975
976 static int cgroup_init_root_id(struct cgroup_root *root)
977 {
978 int id;
979
980 lockdep_assert_held(&cgroup_mutex);
981
982 id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL);
983 if (id < 0)
984 return id;
985
986 root->hierarchy_id = id;
987 return 0;
988 }
989
990 static void cgroup_exit_root_id(struct cgroup_root *root)
991 {
992 lockdep_assert_held(&cgroup_mutex);
993
994 if (root->hierarchy_id) {
995 idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
996 root->hierarchy_id = 0;
997 }
998 }
999
1000 static void cgroup_free_root(struct cgroup_root *root)
1001 {
1002 if (root) {
1003 /* hierarchy ID should already have been released */
1004 WARN_ON_ONCE(root->hierarchy_id);
1005
1006 idr_destroy(&root->cgroup_idr);
1007 kfree(root);
1008 }
1009 }
1010
1011 static void cgroup_destroy_root(struct cgroup_root *root)
1012 {
1013 struct cgroup *cgrp = &root->cgrp;
1014 struct cgrp_cset_link *link, *tmp_link;
1015
1016 mutex_lock(&cgroup_mutex);
1017
1018 BUG_ON(atomic_read(&root->nr_cgrps));
1019 BUG_ON(!list_empty(&cgrp->self.children));
1020
1021 /* Rebind all subsystems back to the default hierarchy */
1022 rebind_subsystems(&cgrp_dfl_root, root->subsys_mask);
1023
1024 /*
1025 * Release all the links from cset_links to this hierarchy's
1026 * root cgroup
1027 */
1028 down_write(&css_set_rwsem);
1029
1030 list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
1031 list_del(&link->cset_link);
1032 list_del(&link->cgrp_link);
1033 kfree(link);
1034 }
1035 up_write(&css_set_rwsem);
1036
1037 if (!list_empty(&root->root_list)) {
1038 list_del(&root->root_list);
1039 cgroup_root_count--;
1040 }
1041
1042 cgroup_exit_root_id(root);
1043
1044 mutex_unlock(&cgroup_mutex);
1045
1046 kernfs_destroy_root(root->kf_root);
1047 cgroup_free_root(root);
1048 }
1049
1050 /* look up cgroup associated with given css_set on the specified hierarchy */
1051 static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
1052 struct cgroup_root *root)
1053 {
1054 struct cgroup *res = NULL;
1055
1056 lockdep_assert_held(&cgroup_mutex);
1057 lockdep_assert_held(&css_set_rwsem);
1058
1059 if (cset == &init_css_set) {
1060 res = &root->cgrp;
1061 } else {
1062 struct cgrp_cset_link *link;
1063
1064 list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
1065 struct cgroup *c = link->cgrp;
1066
1067 if (c->root == root) {
1068 res = c;
1069 break;
1070 }
1071 }
1072 }
1073
1074 BUG_ON(!res);
1075 return res;
1076 }
1077
1078 /*
1079 * Return the cgroup for "task" from the given hierarchy. Must be
1080 * called with cgroup_mutex and css_set_rwsem held.
1081 */
1082 static struct cgroup *task_cgroup_from_root(struct task_struct *task,
1083 struct cgroup_root *root)
1084 {
1085 /*
1086 * No need to lock the task - since we hold cgroup_mutex the
1087 * task can't change groups, so the only thing that can happen
1088 * is that it exits and its css is set back to init_css_set.
1089 */
1090 return cset_cgroup_from_root(task_css_set(task), root);
1091 }
1092
1093 /*
1094 * A task must hold cgroup_mutex to modify cgroups.
1095 *
1096 * Any task can increment and decrement the count field without lock.
1097 * So in general, code holding cgroup_mutex can't rely on the count
1098 * field not changing. However, if the count goes to zero, then only
1099 * cgroup_attach_task() can increment it again. Because a count of zero
1100 * means that no tasks are currently attached, therefore there is no
1101 * way a task attached to that cgroup can fork (the other way to
1102 * increment the count). So code holding cgroup_mutex can safely
1103 * assume that if the count is zero, it will stay zero. Similarly, if
1104 * a task holds cgroup_mutex on a cgroup with zero count, it
1105 * knows that the cgroup won't be removed, as cgroup_rmdir()
1106 * needs that mutex.
1107 *
1108 * A cgroup can only be deleted if both its 'count' of using tasks
1109 * is zero, and its list of 'children' cgroups is empty. Since all
1110 * tasks in the system use _some_ cgroup, and since there is always at
1111 * least one task in the system (init, pid == 1), therefore, root cgroup
1112 * always has either children cgroups and/or using tasks. So we don't
1113 * need a special hack to ensure that root cgroup cannot be deleted.
1114 *
1115 * P.S. One more locking exception. RCU is used to guard the
1116 * update of a tasks cgroup pointer by cgroup_attach_task()
1117 */
1118
1119 static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask);
1120 static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
1121 static const struct file_operations proc_cgroupstats_operations;
1122
1123 static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
1124 char *buf)
1125 {
1126 struct cgroup_subsys *ss = cft->ss;
1127
1128 if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
1129 !(cgrp->root->flags & CGRP_ROOT_NOPREFIX))
1130 snprintf(buf, CGROUP_FILE_NAME_MAX, "%s.%s",
1131 cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
1132 cft->name);
1133 else
1134 strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
1135 return buf;
1136 }
1137
1138 /**
1139 * cgroup_file_mode - deduce file mode of a control file
1140 * @cft: the control file in question
1141 *
1142 * returns cft->mode if ->mode is not 0
1143 * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
1144 * returns S_IRUGO if it has only a read handler
1145 * returns S_IWUSR if it has only a write hander
1146 */
1147 static umode_t cgroup_file_mode(const struct cftype *cft)
1148 {
1149 umode_t mode = 0;
1150
1151 if (cft->mode)
1152 return cft->mode;
1153
1154 if (cft->read_u64 || cft->read_s64 || cft->seq_show)
1155 mode |= S_IRUGO;
1156
1157 if (cft->write_u64 || cft->write_s64 || cft->write)
1158 mode |= S_IWUSR;
1159
1160 return mode;
1161 }
1162
1163 static void cgroup_get(struct cgroup *cgrp)
1164 {
1165 WARN_ON_ONCE(cgroup_is_dead(cgrp));
1166 css_get(&cgrp->self);
1167 }
1168
1169 static bool cgroup_tryget(struct cgroup *cgrp)
1170 {
1171 return css_tryget(&cgrp->self);
1172 }
1173
1174 static void cgroup_put(struct cgroup *cgrp)
1175 {
1176 css_put(&cgrp->self);
1177 }
1178
1179 /**
1180 * cgroup_calc_child_subsys_mask - calculate child_subsys_mask
1181 * @cgrp: the target cgroup
1182 * @subtree_control: the new subtree_control mask to consider
1183 *
1184 * On the default hierarchy, a subsystem may request other subsystems to be
1185 * enabled together through its ->depends_on mask. In such cases, more
1186 * subsystems than specified in "cgroup.subtree_control" may be enabled.
1187 *
1188 * This function calculates which subsystems need to be enabled if
1189 * @subtree_control is to be applied to @cgrp. The returned mask is always
1190 * a superset of @subtree_control and follows the usual hierarchy rules.
1191 */
1192 static unsigned long cgroup_calc_child_subsys_mask(struct cgroup *cgrp,
1193 unsigned long subtree_control)
1194 {
1195 struct cgroup *parent = cgroup_parent(cgrp);
1196 unsigned long cur_ss_mask = subtree_control;
1197 struct cgroup_subsys *ss;
1198 int ssid;
1199
1200 lockdep_assert_held(&cgroup_mutex);
1201
1202 if (!cgroup_on_dfl(cgrp))
1203 return cur_ss_mask;
1204
1205 while (true) {
1206 unsigned long new_ss_mask = cur_ss_mask;
1207
1208 for_each_subsys_which(ss, ssid, &cur_ss_mask)
1209 new_ss_mask |= ss->depends_on;
1210
1211 /*
1212 * Mask out subsystems which aren't available. This can
1213 * happen only if some depended-upon subsystems were bound
1214 * to non-default hierarchies.
1215 */
1216 if (parent)
1217 new_ss_mask &= parent->child_subsys_mask;
1218 else
1219 new_ss_mask &= cgrp->root->subsys_mask;
1220
1221 if (new_ss_mask == cur_ss_mask)
1222 break;
1223 cur_ss_mask = new_ss_mask;
1224 }
1225
1226 return cur_ss_mask;
1227 }
1228
1229 /**
1230 * cgroup_refresh_child_subsys_mask - update child_subsys_mask
1231 * @cgrp: the target cgroup
1232 *
1233 * Update @cgrp->child_subsys_mask according to the current
1234 * @cgrp->subtree_control using cgroup_calc_child_subsys_mask().
1235 */
1236 static void cgroup_refresh_child_subsys_mask(struct cgroup *cgrp)
1237 {
1238 cgrp->child_subsys_mask =
1239 cgroup_calc_child_subsys_mask(cgrp, cgrp->subtree_control);
1240 }
1241
1242 /**
1243 * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods
1244 * @kn: the kernfs_node being serviced
1245 *
1246 * This helper undoes cgroup_kn_lock_live() and should be invoked before
1247 * the method finishes if locking succeeded. Note that once this function
1248 * returns the cgroup returned by cgroup_kn_lock_live() may become
1249 * inaccessible any time. If the caller intends to continue to access the
1250 * cgroup, it should pin it before invoking this function.
1251 */
1252 static void cgroup_kn_unlock(struct kernfs_node *kn)
1253 {
1254 struct cgroup *cgrp;
1255
1256 if (kernfs_type(kn) == KERNFS_DIR)
1257 cgrp = kn->priv;
1258 else
1259 cgrp = kn->parent->priv;
1260
1261 mutex_unlock(&cgroup_mutex);
1262
1263 kernfs_unbreak_active_protection(kn);
1264 cgroup_put(cgrp);
1265 }
1266
1267 /**
1268 * cgroup_kn_lock_live - locking helper for cgroup kernfs methods
1269 * @kn: the kernfs_node being serviced
1270 *
1271 * This helper is to be used by a cgroup kernfs method currently servicing
1272 * @kn. It breaks the active protection, performs cgroup locking and
1273 * verifies that the associated cgroup is alive. Returns the cgroup if
1274 * alive; otherwise, %NULL. A successful return should be undone by a
1275 * matching cgroup_kn_unlock() invocation.
1276 *
1277 * Any cgroup kernfs method implementation which requires locking the
1278 * associated cgroup should use this helper. It avoids nesting cgroup
1279 * locking under kernfs active protection and allows all kernfs operations
1280 * including self-removal.
1281 */
1282 static struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn)
1283 {
1284 struct cgroup *cgrp;
1285
1286 if (kernfs_type(kn) == KERNFS_DIR)
1287 cgrp = kn->priv;
1288 else
1289 cgrp = kn->parent->priv;
1290
1291 /*
1292 * We're gonna grab cgroup_mutex which nests outside kernfs
1293 * active_ref. cgroup liveliness check alone provides enough
1294 * protection against removal. Ensure @cgrp stays accessible and
1295 * break the active_ref protection.
1296 */
1297 if (!cgroup_tryget(cgrp))
1298 return NULL;
1299 kernfs_break_active_protection(kn);
1300
1301 mutex_lock(&cgroup_mutex);
1302
1303 if (!cgroup_is_dead(cgrp))
1304 return cgrp;
1305
1306 cgroup_kn_unlock(kn);
1307 return NULL;
1308 }
1309
1310 static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
1311 {
1312 char name[CGROUP_FILE_NAME_MAX];
1313
1314 lockdep_assert_held(&cgroup_mutex);
1315 kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
1316 }
1317
1318 /**
1319 * cgroup_clear_dir - remove subsys files in a cgroup directory
1320 * @cgrp: target cgroup
1321 * @subsys_mask: mask of the subsystem ids whose files should be removed
1322 */
1323 static void cgroup_clear_dir(struct cgroup *cgrp, unsigned long subsys_mask)
1324 {
1325 struct cgroup_subsys *ss;
1326 int i;
1327
1328 for_each_subsys(ss, i) {
1329 struct cftype *cfts;
1330
1331 if (!(subsys_mask & (1 << i)))
1332 continue;
1333 list_for_each_entry(cfts, &ss->cfts, node)
1334 cgroup_addrm_files(cgrp, cfts, false);
1335 }
1336 }
1337
1338 static int rebind_subsystems(struct cgroup_root *dst_root,
1339 unsigned long ss_mask)
1340 {
1341 struct cgroup_subsys *ss;
1342 unsigned long tmp_ss_mask;
1343 int ssid, i, ret;
1344
1345 lockdep_assert_held(&cgroup_mutex);
1346
1347 for_each_subsys_which(ss, ssid, &ss_mask) {
1348 /* if @ss has non-root csses attached to it, can't move */
1349 if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss)))
1350 return -EBUSY;
1351
1352 /* can't move between two non-dummy roots either */
1353 if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root)
1354 return -EBUSY;
1355 }
1356
1357 /* skip creating root files on dfl_root for inhibited subsystems */
1358 tmp_ss_mask = ss_mask;
1359 if (dst_root == &cgrp_dfl_root)
1360 tmp_ss_mask &= ~cgrp_dfl_root_inhibit_ss_mask;
1361
1362 ret = cgroup_populate_dir(&dst_root->cgrp, tmp_ss_mask);
1363 if (ret) {
1364 if (dst_root != &cgrp_dfl_root)
1365 return ret;
1366
1367 /*
1368 * Rebinding back to the default root is not allowed to
1369 * fail. Using both default and non-default roots should
1370 * be rare. Moving subsystems back and forth even more so.
1371 * Just warn about it and continue.
1372 */
1373 if (cgrp_dfl_root_visible) {
1374 pr_warn("failed to create files (%d) while rebinding 0x%lx to default root\n",
1375 ret, ss_mask);
1376 pr_warn("you may retry by moving them to a different hierarchy and unbinding\n");
1377 }
1378 }
1379
1380 /*
1381 * Nothing can fail from this point on. Remove files for the
1382 * removed subsystems and rebind each subsystem.
1383 */
1384 for_each_subsys_which(ss, ssid, &ss_mask)
1385 cgroup_clear_dir(&ss->root->cgrp, 1 << ssid);
1386
1387 for_each_subsys_which(ss, ssid, &ss_mask) {
1388 struct cgroup_root *src_root;
1389 struct cgroup_subsys_state *css;
1390 struct css_set *cset;
1391
1392 src_root = ss->root;
1393 css = cgroup_css(&src_root->cgrp, ss);
1394
1395 WARN_ON(!css || cgroup_css(&dst_root->cgrp, ss));
1396
1397 RCU_INIT_POINTER(src_root->cgrp.subsys[ssid], NULL);
1398 rcu_assign_pointer(dst_root->cgrp.subsys[ssid], css);
1399 ss->root = dst_root;
1400 css->cgroup = &dst_root->cgrp;
1401
1402 down_write(&css_set_rwsem);
1403 hash_for_each(css_set_table, i, cset, hlist)
1404 list_move_tail(&cset->e_cset_node[ss->id],
1405 &dst_root->cgrp.e_csets[ss->id]);
1406 up_write(&css_set_rwsem);
1407
1408 src_root->subsys_mask &= ~(1 << ssid);
1409 src_root->cgrp.subtree_control &= ~(1 << ssid);
1410 cgroup_refresh_child_subsys_mask(&src_root->cgrp);
1411
1412 /* default hierarchy doesn't enable controllers by default */
1413 dst_root->subsys_mask |= 1 << ssid;
1414 if (dst_root == &cgrp_dfl_root) {
1415 static_branch_enable(cgroup_subsys_on_dfl_key[ssid]);
1416 } else {
1417 dst_root->cgrp.subtree_control |= 1 << ssid;
1418 cgroup_refresh_child_subsys_mask(&dst_root->cgrp);
1419 static_branch_disable(cgroup_subsys_on_dfl_key[ssid]);
1420 }
1421
1422 if (ss->bind)
1423 ss->bind(css);
1424 }
1425
1426 kernfs_activate(dst_root->cgrp.kn);
1427 return 0;
1428 }
1429
1430 static int cgroup_show_options(struct seq_file *seq,
1431 struct kernfs_root *kf_root)
1432 {
1433 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1434 struct cgroup_subsys *ss;
1435 int ssid;
1436
1437 if (root != &cgrp_dfl_root)
1438 for_each_subsys(ss, ssid)
1439 if (root->subsys_mask & (1 << ssid))
1440 seq_show_option(seq, ss->legacy_name, NULL);
1441 if (root->flags & CGRP_ROOT_NOPREFIX)
1442 seq_puts(seq, ",noprefix");
1443 if (root->flags & CGRP_ROOT_XATTR)
1444 seq_puts(seq, ",xattr");
1445
1446 spin_lock(&release_agent_path_lock);
1447 if (strlen(root->release_agent_path))
1448 seq_show_option(seq, "release_agent",
1449 root->release_agent_path);
1450 spin_unlock(&release_agent_path_lock);
1451
1452 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
1453 seq_puts(seq, ",clone_children");
1454 if (strlen(root->name))
1455 seq_show_option(seq, "name", root->name);
1456 return 0;
1457 }
1458
1459 struct cgroup_sb_opts {
1460 unsigned long subsys_mask;
1461 unsigned int flags;
1462 char *release_agent;
1463 bool cpuset_clone_children;
1464 char *name;
1465 /* User explicitly requested empty subsystem */
1466 bool none;
1467 };
1468
1469 static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
1470 {
1471 char *token, *o = data;
1472 bool all_ss = false, one_ss = false;
1473 unsigned long mask = -1UL;
1474 struct cgroup_subsys *ss;
1475 int nr_opts = 0;
1476 int i;
1477
1478 #ifdef CONFIG_CPUSETS
1479 mask = ~(1U << cpuset_cgrp_id);
1480 #endif
1481
1482 memset(opts, 0, sizeof(*opts));
1483
1484 while ((token = strsep(&o, ",")) != NULL) {
1485 nr_opts++;
1486
1487 if (!*token)
1488 return -EINVAL;
1489 if (!strcmp(token, "none")) {
1490 /* Explicitly have no subsystems */
1491 opts->none = true;
1492 continue;
1493 }
1494 if (!strcmp(token, "all")) {
1495 /* Mutually exclusive option 'all' + subsystem name */
1496 if (one_ss)
1497 return -EINVAL;
1498 all_ss = true;
1499 continue;
1500 }
1501 if (!strcmp(token, "__DEVEL__sane_behavior")) {
1502 opts->flags |= CGRP_ROOT_SANE_BEHAVIOR;
1503 continue;
1504 }
1505 if (!strcmp(token, "noprefix")) {
1506 opts->flags |= CGRP_ROOT_NOPREFIX;
1507 continue;
1508 }
1509 if (!strcmp(token, "clone_children")) {
1510 opts->cpuset_clone_children = true;
1511 continue;
1512 }
1513 if (!strcmp(token, "xattr")) {
1514 opts->flags |= CGRP_ROOT_XATTR;
1515 continue;
1516 }
1517 if (!strncmp(token, "release_agent=", 14)) {
1518 /* Specifying two release agents is forbidden */
1519 if (opts->release_agent)
1520 return -EINVAL;
1521 opts->release_agent =
1522 kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
1523 if (!opts->release_agent)
1524 return -ENOMEM;
1525 continue;
1526 }
1527 if (!strncmp(token, "name=", 5)) {
1528 const char *name = token + 5;
1529 /* Can't specify an empty name */
1530 if (!strlen(name))
1531 return -EINVAL;
1532 /* Must match [\w.-]+ */
1533 for (i = 0; i < strlen(name); i++) {
1534 char c = name[i];
1535 if (isalnum(c))
1536 continue;
1537 if ((c == '.') || (c == '-') || (c == '_'))
1538 continue;
1539 return -EINVAL;
1540 }
1541 /* Specifying two names is forbidden */
1542 if (opts->name)
1543 return -EINVAL;
1544 opts->name = kstrndup(name,
1545 MAX_CGROUP_ROOT_NAMELEN - 1,
1546 GFP_KERNEL);
1547 if (!opts->name)
1548 return -ENOMEM;
1549
1550 continue;
1551 }
1552
1553 for_each_subsys(ss, i) {
1554 if (strcmp(token, ss->legacy_name))
1555 continue;
1556 if (!cgroup_ssid_enabled(i))
1557 continue;
1558
1559 /* Mutually exclusive option 'all' + subsystem name */
1560 if (all_ss)
1561 return -EINVAL;
1562 opts->subsys_mask |= (1 << i);
1563 one_ss = true;
1564
1565 break;
1566 }
1567 if (i == CGROUP_SUBSYS_COUNT)
1568 return -ENOENT;
1569 }
1570
1571 if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) {
1572 pr_warn("sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n");
1573 if (nr_opts != 1) {
1574 pr_err("sane_behavior: no other mount options allowed\n");
1575 return -EINVAL;
1576 }
1577 return 0;
1578 }
1579
1580 /*
1581 * If the 'all' option was specified select all the subsystems,
1582 * otherwise if 'none', 'name=' and a subsystem name options were
1583 * not specified, let's default to 'all'
1584 */
1585 if (all_ss || (!one_ss && !opts->none && !opts->name))
1586 for_each_subsys(ss, i)
1587 if (cgroup_ssid_enabled(i))
1588 opts->subsys_mask |= (1 << i);
1589
1590 /*
1591 * We either have to specify by name or by subsystems. (So all
1592 * empty hierarchies must have a name).
1593 */
1594 if (!opts->subsys_mask && !opts->name)
1595 return -EINVAL;
1596
1597 /*
1598 * Option noprefix was introduced just for backward compatibility
1599 * with the old cpuset, so we allow noprefix only if mounting just
1600 * the cpuset subsystem.
1601 */
1602 if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
1603 return -EINVAL;
1604
1605 /* Can't specify "none" and some subsystems */
1606 if (opts->subsys_mask && opts->none)
1607 return -EINVAL;
1608
1609 return 0;
1610 }
1611
1612 static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
1613 {
1614 int ret = 0;
1615 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1616 struct cgroup_sb_opts opts;
1617 unsigned long added_mask, removed_mask;
1618
1619 if (root == &cgrp_dfl_root) {
1620 pr_err("remount is not allowed\n");
1621 return -EINVAL;
1622 }
1623
1624 mutex_lock(&cgroup_mutex);
1625
1626 /* See what subsystems are wanted */
1627 ret = parse_cgroupfs_options(data, &opts);
1628 if (ret)
1629 goto out_unlock;
1630
1631 if (opts.subsys_mask != root->subsys_mask || opts.release_agent)
1632 pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1633 task_tgid_nr(current), current->comm);
1634
1635 added_mask = opts.subsys_mask & ~root->subsys_mask;
1636 removed_mask = root->subsys_mask & ~opts.subsys_mask;
1637
1638 /* Don't allow flags or name to change at remount */
1639 if ((opts.flags ^ root->flags) ||
1640 (opts.name && strcmp(opts.name, root->name))) {
1641 pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
1642 opts.flags, opts.name ?: "", root->flags, root->name);
1643 ret = -EINVAL;
1644 goto out_unlock;
1645 }
1646
1647 /* remounting is not allowed for populated hierarchies */
1648 if (!list_empty(&root->cgrp.self.children)) {
1649 ret = -EBUSY;
1650 goto out_unlock;
1651 }
1652
1653 ret = rebind_subsystems(root, added_mask);
1654 if (ret)
1655 goto out_unlock;
1656
1657 rebind_subsystems(&cgrp_dfl_root, removed_mask);
1658
1659 if (opts.release_agent) {
1660 spin_lock(&release_agent_path_lock);
1661 strcpy(root->release_agent_path, opts.release_agent);
1662 spin_unlock(&release_agent_path_lock);
1663 }
1664 out_unlock:
1665 kfree(opts.release_agent);
1666 kfree(opts.name);
1667 mutex_unlock(&cgroup_mutex);
1668 return ret;
1669 }
1670
1671 /*
1672 * To reduce the fork() overhead for systems that are not actually using
1673 * their cgroups capability, we don't maintain the lists running through
1674 * each css_set to its tasks until we see the list actually used - in other
1675 * words after the first mount.
1676 */
1677 static bool use_task_css_set_links __read_mostly;
1678
1679 static void cgroup_enable_task_cg_lists(void)
1680 {
1681 struct task_struct *p, *g;
1682
1683 down_write(&css_set_rwsem);
1684
1685 if (use_task_css_set_links)
1686 goto out_unlock;
1687
1688 use_task_css_set_links = true;
1689
1690 /*
1691 * We need tasklist_lock because RCU is not safe against
1692 * while_each_thread(). Besides, a forking task that has passed
1693 * cgroup_post_fork() without seeing use_task_css_set_links = 1
1694 * is not guaranteed to have its child immediately visible in the
1695 * tasklist if we walk through it with RCU.
1696 */
1697 read_lock(&tasklist_lock);
1698 do_each_thread(g, p) {
1699 WARN_ON_ONCE(!list_empty(&p->cg_list) ||
1700 task_css_set(p) != &init_css_set);
1701
1702 /*
1703 * We should check if the process is exiting, otherwise
1704 * it will race with cgroup_exit() in that the list
1705 * entry won't be deleted though the process has exited.
1706 * Do it while holding siglock so that we don't end up
1707 * racing against cgroup_exit().
1708 */
1709 spin_lock_irq(&p->sighand->siglock);
1710 if (!(p->flags & PF_EXITING)) {
1711 struct css_set *cset = task_css_set(p);
1712
1713 list_add(&p->cg_list, &cset->tasks);
1714 get_css_set(cset);
1715 }
1716 spin_unlock_irq(&p->sighand->siglock);
1717 } while_each_thread(g, p);
1718 read_unlock(&tasklist_lock);
1719 out_unlock:
1720 up_write(&css_set_rwsem);
1721 }
1722
1723 static void init_cgroup_housekeeping(struct cgroup *cgrp)
1724 {
1725 struct cgroup_subsys *ss;
1726 int ssid;
1727
1728 INIT_LIST_HEAD(&cgrp->self.sibling);
1729 INIT_LIST_HEAD(&cgrp->self.children);
1730 INIT_LIST_HEAD(&cgrp->cset_links);
1731 INIT_LIST_HEAD(&cgrp->pidlists);
1732 mutex_init(&cgrp->pidlist_mutex);
1733 cgrp->self.cgroup = cgrp;
1734 cgrp->self.flags |= CSS_ONLINE;
1735
1736 for_each_subsys(ss, ssid)
1737 INIT_LIST_HEAD(&cgrp->e_csets[ssid]);
1738
1739 init_waitqueue_head(&cgrp->offline_waitq);
1740 INIT_WORK(&cgrp->release_agent_work, cgroup_release_agent);
1741 }
1742
1743 static void init_cgroup_root(struct cgroup_root *root,
1744 struct cgroup_sb_opts *opts)
1745 {
1746 struct cgroup *cgrp = &root->cgrp;
1747
1748 INIT_LIST_HEAD(&root->root_list);
1749 atomic_set(&root->nr_cgrps, 1);
1750 cgrp->root = root;
1751 init_cgroup_housekeeping(cgrp);
1752 idr_init(&root->cgroup_idr);
1753
1754 root->flags = opts->flags;
1755 if (opts->release_agent)
1756 strcpy(root->release_agent_path, opts->release_agent);
1757 if (opts->name)
1758 strcpy(root->name, opts->name);
1759 if (opts->cpuset_clone_children)
1760 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
1761 }
1762
1763 static int cgroup_setup_root(struct cgroup_root *root, unsigned long ss_mask)
1764 {
1765 LIST_HEAD(tmp_links);
1766 struct cgroup *root_cgrp = &root->cgrp;
1767 struct cftype *base_files;
1768 struct css_set *cset;
1769 int i, ret;
1770
1771 lockdep_assert_held(&cgroup_mutex);
1772
1773 ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_KERNEL);
1774 if (ret < 0)
1775 goto out;
1776 root_cgrp->id = ret;
1777
1778 ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, 0,
1779 GFP_KERNEL);
1780 if (ret)
1781 goto out;
1782
1783 /*
1784 * We're accessing css_set_count without locking css_set_rwsem here,
1785 * but that's OK - it can only be increased by someone holding
1786 * cgroup_lock, and that's us. The worst that can happen is that we
1787 * have some link structures left over
1788 */
1789 ret = allocate_cgrp_cset_links(css_set_count, &tmp_links);
1790 if (ret)
1791 goto cancel_ref;
1792
1793 ret = cgroup_init_root_id(root);
1794 if (ret)
1795 goto cancel_ref;
1796
1797 root->kf_root = kernfs_create_root(&cgroup_kf_syscall_ops,
1798 KERNFS_ROOT_CREATE_DEACTIVATED,
1799 root_cgrp);
1800 if (IS_ERR(root->kf_root)) {
1801 ret = PTR_ERR(root->kf_root);
1802 goto exit_root_id;
1803 }
1804 root_cgrp->kn = root->kf_root->kn;
1805
1806 if (root == &cgrp_dfl_root)
1807 base_files = cgroup_dfl_base_files;
1808 else
1809 base_files = cgroup_legacy_base_files;
1810
1811 ret = cgroup_addrm_files(root_cgrp, base_files, true);
1812 if (ret)
1813 goto destroy_root;
1814
1815 ret = rebind_subsystems(root, ss_mask);
1816 if (ret)
1817 goto destroy_root;
1818
1819 /*
1820 * There must be no failure case after here, since rebinding takes
1821 * care of subsystems' refcounts, which are explicitly dropped in
1822 * the failure exit path.
1823 */
1824 list_add(&root->root_list, &cgroup_roots);
1825 cgroup_root_count++;
1826
1827 /*
1828 * Link the root cgroup in this hierarchy into all the css_set
1829 * objects.
1830 */
1831 down_write(&css_set_rwsem);
1832 hash_for_each(css_set_table, i, cset, hlist)
1833 link_css_set(&tmp_links, cset, root_cgrp);
1834 up_write(&css_set_rwsem);
1835
1836 BUG_ON(!list_empty(&root_cgrp->self.children));
1837 BUG_ON(atomic_read(&root->nr_cgrps) != 1);
1838
1839 kernfs_activate(root_cgrp->kn);
1840 ret = 0;
1841 goto out;
1842
1843 destroy_root:
1844 kernfs_destroy_root(root->kf_root);
1845 root->kf_root = NULL;
1846 exit_root_id:
1847 cgroup_exit_root_id(root);
1848 cancel_ref:
1849 percpu_ref_exit(&root_cgrp->self.refcnt);
1850 out:
1851 free_cgrp_cset_links(&tmp_links);
1852 return ret;
1853 }
1854
1855 static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1856 int flags, const char *unused_dev_name,
1857 void *data)
1858 {
1859 struct super_block *pinned_sb = NULL;
1860 struct cgroup_subsys *ss;
1861 struct cgroup_root *root;
1862 struct cgroup_sb_opts opts;
1863 struct dentry *dentry;
1864 int ret;
1865 int i;
1866 bool new_sb;
1867
1868 /*
1869 * The first time anyone tries to mount a cgroup, enable the list
1870 * linking each css_set to its tasks and fix up all existing tasks.
1871 */
1872 if (!use_task_css_set_links)
1873 cgroup_enable_task_cg_lists();
1874
1875 mutex_lock(&cgroup_mutex);
1876
1877 /* First find the desired set of subsystems */
1878 ret = parse_cgroupfs_options(data, &opts);
1879 if (ret)
1880 goto out_unlock;
1881
1882 /* look for a matching existing root */
1883 if (opts.flags & CGRP_ROOT_SANE_BEHAVIOR) {
1884 cgrp_dfl_root_visible = true;
1885 root = &cgrp_dfl_root;
1886 cgroup_get(&root->cgrp);
1887 ret = 0;
1888 goto out_unlock;
1889 }
1890
1891 /*
1892 * Destruction of cgroup root is asynchronous, so subsystems may
1893 * still be dying after the previous unmount. Let's drain the
1894 * dying subsystems. We just need to ensure that the ones
1895 * unmounted previously finish dying and don't care about new ones
1896 * starting. Testing ref liveliness is good enough.
1897 */
1898 for_each_subsys(ss, i) {
1899 if (!(opts.subsys_mask & (1 << i)) ||
1900 ss->root == &cgrp_dfl_root)
1901 continue;
1902
1903 if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
1904 mutex_unlock(&cgroup_mutex);
1905 msleep(10);
1906 ret = restart_syscall();
1907 goto out_free;
1908 }
1909 cgroup_put(&ss->root->cgrp);
1910 }
1911
1912 for_each_root(root) {
1913 bool name_match = false;
1914
1915 if (root == &cgrp_dfl_root)
1916 continue;
1917
1918 /*
1919 * If we asked for a name then it must match. Also, if
1920 * name matches but sybsys_mask doesn't, we should fail.
1921 * Remember whether name matched.
1922 */
1923 if (opts.name) {
1924 if (strcmp(opts.name, root->name))
1925 continue;
1926 name_match = true;
1927 }
1928
1929 /*
1930 * If we asked for subsystems (or explicitly for no
1931 * subsystems) then they must match.
1932 */
1933 if ((opts.subsys_mask || opts.none) &&
1934 (opts.subsys_mask != root->subsys_mask)) {
1935 if (!name_match)
1936 continue;
1937 ret = -EBUSY;
1938 goto out_unlock;
1939 }
1940
1941 if (root->flags ^ opts.flags)
1942 pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1943
1944 /*
1945 * We want to reuse @root whose lifetime is governed by its
1946 * ->cgrp. Let's check whether @root is alive and keep it
1947 * that way. As cgroup_kill_sb() can happen anytime, we
1948 * want to block it by pinning the sb so that @root doesn't
1949 * get killed before mount is complete.
1950 *
1951 * With the sb pinned, tryget_live can reliably indicate
1952 * whether @root can be reused. If it's being killed,
1953 * drain it. We can use wait_queue for the wait but this
1954 * path is super cold. Let's just sleep a bit and retry.
1955 */
1956 pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
1957 if (IS_ERR(pinned_sb) ||
1958 !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
1959 mutex_unlock(&cgroup_mutex);
1960 if (!IS_ERR_OR_NULL(pinned_sb))
1961 deactivate_super(pinned_sb);
1962 msleep(10);
1963 ret = restart_syscall();
1964 goto out_free;
1965 }
1966
1967 ret = 0;
1968 goto out_unlock;
1969 }
1970
1971 /*
1972 * No such thing, create a new one. name= matching without subsys
1973 * specification is allowed for already existing hierarchies but we
1974 * can't create new one without subsys specification.
1975 */
1976 if (!opts.subsys_mask && !opts.none) {
1977 ret = -EINVAL;
1978 goto out_unlock;
1979 }
1980
1981 root = kzalloc(sizeof(*root), GFP_KERNEL);
1982 if (!root) {
1983 ret = -ENOMEM;
1984 goto out_unlock;
1985 }
1986
1987 init_cgroup_root(root, &opts);
1988
1989 ret = cgroup_setup_root(root, opts.subsys_mask);
1990 if (ret)
1991 cgroup_free_root(root);
1992
1993 out_unlock:
1994 mutex_unlock(&cgroup_mutex);
1995 out_free:
1996 kfree(opts.release_agent);
1997 kfree(opts.name);
1998
1999 if (ret)
2000 return ERR_PTR(ret);
2001
2002 dentry = kernfs_mount(fs_type, flags, root->kf_root,
2003 CGROUP_SUPER_MAGIC, &new_sb);
2004 if (IS_ERR(dentry) || !new_sb)
2005 cgroup_put(&root->cgrp);
2006
2007 /*
2008 * If @pinned_sb, we're reusing an existing root and holding an
2009 * extra ref on its sb. Mount is complete. Put the extra ref.
2010 */
2011 if (pinned_sb) {
2012 WARN_ON(new_sb);
2013 deactivate_super(pinned_sb);
2014 }
2015
2016 return dentry;
2017 }
2018
2019 static void cgroup_kill_sb(struct super_block *sb)
2020 {
2021 struct kernfs_root *kf_root = kernfs_root_from_sb(sb);
2022 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
2023
2024 /*
2025 * If @root doesn't have any mounts or children, start killing it.
2026 * This prevents new mounts by disabling percpu_ref_tryget_live().
2027 * cgroup_mount() may wait for @root's release.
2028 *
2029 * And don't kill the default root.
2030 */
2031 if (!list_empty(&root->cgrp.self.children) ||
2032 root == &cgrp_dfl_root)
2033 cgroup_put(&root->cgrp);
2034 else
2035 percpu_ref_kill(&root->cgrp.self.refcnt);
2036
2037 kernfs_kill_sb(sb);
2038 }
2039
2040 static struct file_system_type cgroup_fs_type = {
2041 .name = "cgroup",
2042 .mount = cgroup_mount,
2043 .kill_sb = cgroup_kill_sb,
2044 };
2045
2046 /**
2047 * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
2048 * @task: target task
2049 * @buf: the buffer to write the path into
2050 * @buflen: the length of the buffer
2051 *
2052 * Determine @task's cgroup on the first (the one with the lowest non-zero
2053 * hierarchy_id) cgroup hierarchy and copy its path into @buf. This
2054 * function grabs cgroup_mutex and shouldn't be used inside locks used by
2055 * cgroup controller callbacks.
2056 *
2057 * Return value is the same as kernfs_path().
2058 */
2059 char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
2060 {
2061 struct cgroup_root *root;
2062 struct cgroup *cgrp;
2063 int hierarchy_id = 1;
2064 char *path = NULL;
2065
2066 mutex_lock(&cgroup_mutex);
2067 down_read(&css_set_rwsem);
2068
2069 root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
2070
2071 if (root) {
2072 cgrp = task_cgroup_from_root(task, root);
2073 path = cgroup_path(cgrp, buf, buflen);
2074 } else {
2075 /* if no hierarchy exists, everyone is in "/" */
2076 if (strlcpy(buf, "/", buflen) < buflen)
2077 path = buf;
2078 }
2079
2080 up_read(&css_set_rwsem);
2081 mutex_unlock(&cgroup_mutex);
2082 return path;
2083 }
2084 EXPORT_SYMBOL_GPL(task_cgroup_path);
2085
2086 /* used to track tasks and other necessary states during migration */
2087 struct cgroup_taskset {
2088 /* the src and dst cset list running through cset->mg_node */
2089 struct list_head src_csets;
2090 struct list_head dst_csets;
2091
2092 /*
2093 * Fields for cgroup_taskset_*() iteration.
2094 *
2095 * Before migration is committed, the target migration tasks are on
2096 * ->mg_tasks of the csets on ->src_csets. After, on ->mg_tasks of
2097 * the csets on ->dst_csets. ->csets point to either ->src_csets
2098 * or ->dst_csets depending on whether migration is committed.
2099 *
2100 * ->cur_csets and ->cur_task point to the current task position
2101 * during iteration.
2102 */
2103 struct list_head *csets;
2104 struct css_set *cur_cset;
2105 struct task_struct *cur_task;
2106 };
2107
2108 /**
2109 * cgroup_taskset_first - reset taskset and return the first task
2110 * @tset: taskset of interest
2111 *
2112 * @tset iteration is initialized and the first task is returned.
2113 */
2114 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset)
2115 {
2116 tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
2117 tset->cur_task = NULL;
2118
2119 return cgroup_taskset_next(tset);
2120 }
2121
2122 /**
2123 * cgroup_taskset_next - iterate to the next task in taskset
2124 * @tset: taskset of interest
2125 *
2126 * Return the next task in @tset. Iteration must have been initialized
2127 * with cgroup_taskset_first().
2128 */
2129 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
2130 {
2131 struct css_set *cset = tset->cur_cset;
2132 struct task_struct *task = tset->cur_task;
2133
2134 while (&cset->mg_node != tset->csets) {
2135 if (!task)
2136 task = list_first_entry(&cset->mg_tasks,
2137 struct task_struct, cg_list);
2138 else
2139 task = list_next_entry(task, cg_list);
2140
2141 if (&task->cg_list != &cset->mg_tasks) {
2142 tset->cur_cset = cset;
2143 tset->cur_task = task;
2144 return task;
2145 }
2146
2147 cset = list_next_entry(cset, mg_node);
2148 task = NULL;
2149 }
2150
2151 return NULL;
2152 }
2153
2154 /**
2155 * cgroup_task_migrate - move a task from one cgroup to another.
2156 * @old_cgrp: the cgroup @tsk is being migrated from
2157 * @tsk: the task being migrated
2158 * @new_cset: the new css_set @tsk is being attached to
2159 *
2160 * Must be called with cgroup_mutex, threadgroup and css_set_rwsem locked.
2161 */
2162 static void cgroup_task_migrate(struct cgroup *old_cgrp,
2163 struct task_struct *tsk,
2164 struct css_set *new_cset)
2165 {
2166 struct css_set *old_cset;
2167
2168 lockdep_assert_held(&cgroup_mutex);
2169 lockdep_assert_held(&css_set_rwsem);
2170
2171 /*
2172 * We are synchronized through cgroup_threadgroup_rwsem against
2173 * PF_EXITING setting such that we can't race against cgroup_exit()
2174 * changing the css_set to init_css_set and dropping the old one.
2175 */
2176 WARN_ON_ONCE(tsk->flags & PF_EXITING);
2177 old_cset = task_css_set(tsk);
2178
2179 get_css_set(new_cset);
2180 rcu_assign_pointer(tsk->cgroups, new_cset);
2181
2182 /*
2183 * Use move_tail so that cgroup_taskset_first() still returns the
2184 * leader after migration. This works because cgroup_migrate()
2185 * ensures that the dst_cset of the leader is the first on the
2186 * tset's dst_csets list.
2187 */
2188 list_move_tail(&tsk->cg_list, &new_cset->mg_tasks);
2189
2190 /*
2191 * We just gained a reference on old_cset by taking it from the
2192 * task. As trading it for new_cset is protected by cgroup_mutex,
2193 * we're safe to drop it here; it will be freed under RCU.
2194 */
2195 put_css_set_locked(old_cset);
2196 }
2197
2198 /**
2199 * cgroup_migrate_finish - cleanup after attach
2200 * @preloaded_csets: list of preloaded css_sets
2201 *
2202 * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst(). See
2203 * those functions for details.
2204 */
2205 static void cgroup_migrate_finish(struct list_head *preloaded_csets)
2206 {
2207 struct css_set *cset, *tmp_cset;
2208
2209 lockdep_assert_held(&cgroup_mutex);
2210
2211 down_write(&css_set_rwsem);
2212 list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
2213 cset->mg_src_cgrp = NULL;
2214 cset->mg_dst_cset = NULL;
2215 list_del_init(&cset->mg_preload_node);
2216 put_css_set_locked(cset);
2217 }
2218 up_write(&css_set_rwsem);
2219 }
2220
2221 /**
2222 * cgroup_migrate_add_src - add a migration source css_set
2223 * @src_cset: the source css_set to add
2224 * @dst_cgrp: the destination cgroup
2225 * @preloaded_csets: list of preloaded css_sets
2226 *
2227 * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp. Pin
2228 * @src_cset and add it to @preloaded_csets, which should later be cleaned
2229 * up by cgroup_migrate_finish().
2230 *
2231 * This function may be called without holding cgroup_threadgroup_rwsem
2232 * even if the target is a process. Threads may be created and destroyed
2233 * but as long as cgroup_mutex is not dropped, no new css_set can be put
2234 * into play and the preloaded css_sets are guaranteed to cover all
2235 * migrations.
2236 */
2237 static void cgroup_migrate_add_src(struct css_set *src_cset,
2238 struct cgroup *dst_cgrp,
2239 struct list_head *preloaded_csets)
2240 {
2241 struct cgroup *src_cgrp;
2242
2243 lockdep_assert_held(&cgroup_mutex);
2244 lockdep_assert_held(&css_set_rwsem);
2245
2246 src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
2247
2248 if (!list_empty(&src_cset->mg_preload_node))
2249 return;
2250
2251 WARN_ON(src_cset->mg_src_cgrp);
2252 WARN_ON(!list_empty(&src_cset->mg_tasks));
2253 WARN_ON(!list_empty(&src_cset->mg_node));
2254
2255 src_cset->mg_src_cgrp = src_cgrp;
2256 get_css_set(src_cset);
2257 list_add(&src_cset->mg_preload_node, preloaded_csets);
2258 }
2259
2260 /**
2261 * cgroup_migrate_prepare_dst - prepare destination css_sets for migration
2262 * @dst_cgrp: the destination cgroup (may be %NULL)
2263 * @preloaded_csets: list of preloaded source css_sets
2264 *
2265 * Tasks are about to be moved to @dst_cgrp and all the source css_sets
2266 * have been preloaded to @preloaded_csets. This function looks up and
2267 * pins all destination css_sets, links each to its source, and append them
2268 * to @preloaded_csets. If @dst_cgrp is %NULL, the destination of each
2269 * source css_set is assumed to be its cgroup on the default hierarchy.
2270 *
2271 * This function must be called after cgroup_migrate_add_src() has been
2272 * called on each migration source css_set. After migration is performed
2273 * using cgroup_migrate(), cgroup_migrate_finish() must be called on
2274 * @preloaded_csets.
2275 */
2276 static int cgroup_migrate_prepare_dst(struct cgroup *dst_cgrp,
2277 struct list_head *preloaded_csets)
2278 {
2279 LIST_HEAD(csets);
2280 struct css_set *src_cset, *tmp_cset;
2281
2282 lockdep_assert_held(&cgroup_mutex);
2283
2284 /*
2285 * Except for the root, child_subsys_mask must be zero for a cgroup
2286 * with tasks so that child cgroups don't compete against tasks.
2287 */
2288 if (dst_cgrp && cgroup_on_dfl(dst_cgrp) && cgroup_parent(dst_cgrp) &&
2289 dst_cgrp->child_subsys_mask)
2290 return -EBUSY;
2291
2292 /* look up the dst cset for each src cset and link it to src */
2293 list_for_each_entry_safe(src_cset, tmp_cset, preloaded_csets, mg_preload_node) {
2294 struct css_set *dst_cset;
2295
2296 dst_cset = find_css_set(src_cset,
2297 dst_cgrp ?: src_cset->dfl_cgrp);
2298 if (!dst_cset)
2299 goto err;
2300
2301 WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset);
2302
2303 /*
2304 * If src cset equals dst, it's noop. Drop the src.
2305 * cgroup_migrate() will skip the cset too. Note that we
2306 * can't handle src == dst as some nodes are used by both.
2307 */
2308 if (src_cset == dst_cset) {
2309 src_cset->mg_src_cgrp = NULL;
2310 list_del_init(&src_cset->mg_preload_node);
2311 put_css_set(src_cset);
2312 put_css_set(dst_cset);
2313 continue;
2314 }
2315
2316 src_cset->mg_dst_cset = dst_cset;
2317
2318 if (list_empty(&dst_cset->mg_preload_node))
2319 list_add(&dst_cset->mg_preload_node, &csets);
2320 else
2321 put_css_set(dst_cset);
2322 }
2323
2324 list_splice_tail(&csets, preloaded_csets);
2325 return 0;
2326 err:
2327 cgroup_migrate_finish(&csets);
2328 return -ENOMEM;
2329 }
2330
2331 /**
2332 * cgroup_migrate - migrate a process or task to a cgroup
2333 * @cgrp: the destination cgroup
2334 * @leader: the leader of the process or the task to migrate
2335 * @threadgroup: whether @leader points to the whole process or a single task
2336 *
2337 * Migrate a process or task denoted by @leader to @cgrp. If migrating a
2338 * process, the caller must be holding cgroup_threadgroup_rwsem. The
2339 * caller is also responsible for invoking cgroup_migrate_add_src() and
2340 * cgroup_migrate_prepare_dst() on the targets before invoking this
2341 * function and following up with cgroup_migrate_finish().
2342 *
2343 * As long as a controller's ->can_attach() doesn't fail, this function is
2344 * guaranteed to succeed. This means that, excluding ->can_attach()
2345 * failure, when migrating multiple targets, the success or failure can be
2346 * decided for all targets by invoking group_migrate_prepare_dst() before
2347 * actually starting migrating.
2348 */
2349 static int cgroup_migrate(struct cgroup *cgrp, struct task_struct *leader,
2350 bool threadgroup)
2351 {
2352 struct cgroup_taskset tset = {
2353 .src_csets = LIST_HEAD_INIT(tset.src_csets),
2354 .dst_csets = LIST_HEAD_INIT(tset.dst_csets),
2355 .csets = &tset.src_csets,
2356 };
2357 struct cgroup_subsys_state *css, *failed_css = NULL;
2358 struct css_set *cset, *tmp_cset;
2359 struct task_struct *task, *tmp_task;
2360 int i, ret;
2361
2362 /*
2363 * Prevent freeing of tasks while we take a snapshot. Tasks that are
2364 * already PF_EXITING could be freed from underneath us unless we
2365 * take an rcu_read_lock.
2366 */
2367 down_write(&css_set_rwsem);
2368 rcu_read_lock();
2369 task = leader;
2370 do {
2371 /* @task either already exited or can't exit until the end */
2372 if (task->flags & PF_EXITING)
2373 goto next;
2374
2375 /* leave @task alone if post_fork() hasn't linked it yet */
2376 if (list_empty(&task->cg_list))
2377 goto next;
2378
2379 cset = task_css_set(task);
2380 if (!cset->mg_src_cgrp)
2381 goto next;
2382
2383 /*
2384 * cgroup_taskset_first() must always return the leader.
2385 * Take care to avoid disturbing the ordering.
2386 */
2387 list_move_tail(&task->cg_list, &cset->mg_tasks);
2388 if (list_empty(&cset->mg_node))
2389 list_add_tail(&cset->mg_node, &tset.src_csets);
2390 if (list_empty(&cset->mg_dst_cset->mg_node))
2391 list_move_tail(&cset->mg_dst_cset->mg_node,
2392 &tset.dst_csets);
2393 next:
2394 if (!threadgroup)
2395 break;
2396 } while_each_thread(leader, task);
2397 rcu_read_unlock();
2398 up_write(&css_set_rwsem);
2399
2400 /* methods shouldn't be called if no task is actually migrating */
2401 if (list_empty(&tset.src_csets))
2402 return 0;
2403
2404 /* check that we can legitimately attach to the cgroup */
2405 for_each_e_css(css, i, cgrp) {
2406 if (css->ss->can_attach) {
2407 ret = css->ss->can_attach(css, &tset);
2408 if (ret) {
2409 failed_css = css;
2410 goto out_cancel_attach;
2411 }
2412 }
2413 }
2414
2415 /*
2416 * Now that we're guaranteed success, proceed to move all tasks to
2417 * the new cgroup. There are no failure cases after here, so this
2418 * is the commit point.
2419 */
2420 down_write(&css_set_rwsem);
2421 list_for_each_entry(cset, &tset.src_csets, mg_node) {
2422 list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list)
2423 cgroup_task_migrate(cset->mg_src_cgrp, task,
2424 cset->mg_dst_cset);
2425 }
2426 up_write(&css_set_rwsem);
2427
2428 /*
2429 * Migration is committed, all target tasks are now on dst_csets.
2430 * Nothing is sensitive to fork() after this point. Notify
2431 * controllers that migration is complete.
2432 */
2433 tset.csets = &tset.dst_csets;
2434
2435 for_each_e_css(css, i, cgrp)
2436 if (css->ss->attach)
2437 css->ss->attach(css, &tset);
2438
2439 ret = 0;
2440 goto out_release_tset;
2441
2442 out_cancel_attach:
2443 for_each_e_css(css, i, cgrp) {
2444 if (css == failed_css)
2445 break;
2446 if (css->ss->cancel_attach)
2447 css->ss->cancel_attach(css, &tset);
2448 }
2449 out_release_tset:
2450 down_write(&css_set_rwsem);
2451 list_splice_init(&tset.dst_csets, &tset.src_csets);
2452 list_for_each_entry_safe(cset, tmp_cset, &tset.src_csets, mg_node) {
2453 list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
2454 list_del_init(&cset->mg_node);
2455 }
2456 up_write(&css_set_rwsem);
2457 return ret;
2458 }
2459
2460 /**
2461 * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
2462 * @dst_cgrp: the cgroup to attach to
2463 * @leader: the task or the leader of the threadgroup to be attached
2464 * @threadgroup: attach the whole threadgroup?
2465 *
2466 * Call holding cgroup_mutex and cgroup_threadgroup_rwsem.
2467 */
2468 static int cgroup_attach_task(struct cgroup *dst_cgrp,
2469 struct task_struct *leader, bool threadgroup)
2470 {
2471 LIST_HEAD(preloaded_csets);
2472 struct task_struct *task;
2473 int ret;
2474
2475 /* look up all src csets */
2476 down_read(&css_set_rwsem);
2477 rcu_read_lock();
2478 task = leader;
2479 do {
2480 cgroup_migrate_add_src(task_css_set(task), dst_cgrp,
2481 &preloaded_csets);
2482 if (!threadgroup)
2483 break;
2484 } while_each_thread(leader, task);
2485 rcu_read_unlock();
2486 up_read(&css_set_rwsem);
2487
2488 /* prepare dst csets and commit */
2489 ret = cgroup_migrate_prepare_dst(dst_cgrp, &preloaded_csets);
2490 if (!ret)
2491 ret = cgroup_migrate(dst_cgrp, leader, threadgroup);
2492
2493 cgroup_migrate_finish(&preloaded_csets);
2494 return ret;
2495 }
2496
2497 static int cgroup_procs_write_permission(struct task_struct *task,
2498 struct cgroup *dst_cgrp,
2499 struct kernfs_open_file *of)
2500 {
2501 const struct cred *cred = current_cred();
2502 const struct cred *tcred = get_task_cred(task);
2503 int ret = 0;
2504
2505 /*
2506 * even if we're attaching all tasks in the thread group, we only
2507 * need to check permissions on one of them.
2508 */
2509 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
2510 !uid_eq(cred->euid, tcred->uid) &&
2511 !uid_eq(cred->euid, tcred->suid))
2512 ret = -EACCES;
2513
2514 if (!ret && cgroup_on_dfl(dst_cgrp)) {
2515 struct super_block *sb = of->file->f_path.dentry->d_sb;
2516 struct cgroup *cgrp;
2517 struct inode *inode;
2518
2519 down_read(&css_set_rwsem);
2520 cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
2521 up_read(&css_set_rwsem);
2522
2523 while (!cgroup_is_descendant(dst_cgrp, cgrp))
2524 cgrp = cgroup_parent(cgrp);
2525
2526 ret = -ENOMEM;
2527 inode = kernfs_get_inode(sb, cgrp->procs_kn);
2528 if (inode) {
2529 ret = inode_permission(inode, MAY_WRITE);
2530 iput(inode);
2531 }
2532 }
2533
2534 put_cred(tcred);
2535 return ret;
2536 }
2537
2538 /*
2539 * Find the task_struct of the task to attach by vpid and pass it along to the
2540 * function to attach either it or all tasks in its threadgroup. Will lock
2541 * cgroup_mutex and threadgroup.
2542 */
2543 static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
2544 size_t nbytes, loff_t off, bool threadgroup)
2545 {
2546 struct task_struct *tsk;
2547 struct cgroup *cgrp;
2548 pid_t pid;
2549 int ret;
2550
2551 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
2552 return -EINVAL;
2553
2554 cgrp = cgroup_kn_lock_live(of->kn);
2555 if (!cgrp)
2556 return -ENODEV;
2557
2558 percpu_down_write(&cgroup_threadgroup_rwsem);
2559 rcu_read_lock();
2560 if (pid) {
2561 tsk = find_task_by_vpid(pid);
2562 if (!tsk) {
2563 ret = -ESRCH;
2564 goto out_unlock_rcu;
2565 }
2566 } else {
2567 tsk = current;
2568 }
2569
2570 if (threadgroup)
2571 tsk = tsk->group_leader;
2572
2573 /*
2574 * Workqueue threads may acquire PF_NO_SETAFFINITY and become
2575 * trapped in a cpuset, or RT worker may be born in a cgroup
2576 * with no rt_runtime allocated. Just say no.
2577 */
2578 if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
2579 ret = -EINVAL;
2580 goto out_unlock_rcu;
2581 }
2582
2583 get_task_struct(tsk);
2584 rcu_read_unlock();
2585
2586 ret = cgroup_procs_write_permission(tsk, cgrp, of);
2587 if (!ret)
2588 ret = cgroup_attach_task(cgrp, tsk, threadgroup);
2589
2590 put_task_struct(tsk);
2591 goto out_unlock_threadgroup;
2592
2593 out_unlock_rcu:
2594 rcu_read_unlock();
2595 out_unlock_threadgroup:
2596 percpu_up_write(&cgroup_threadgroup_rwsem);
2597 cgroup_kn_unlock(of->kn);
2598 return ret ?: nbytes;
2599 }
2600
2601 /**
2602 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
2603 * @from: attach to all cgroups of a given task
2604 * @tsk: the task to be attached
2605 */
2606 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
2607 {
2608 struct cgroup_root *root;
2609 int retval = 0;
2610
2611 mutex_lock(&cgroup_mutex);
2612 for_each_root(root) {
2613 struct cgroup *from_cgrp;
2614
2615 if (root == &cgrp_dfl_root)
2616 continue;
2617
2618 down_read(&css_set_rwsem);
2619 from_cgrp = task_cgroup_from_root(from, root);
2620 up_read(&css_set_rwsem);
2621
2622 retval = cgroup_attach_task(from_cgrp, tsk, false);
2623 if (retval)
2624 break;
2625 }
2626 mutex_unlock(&cgroup_mutex);
2627
2628 return retval;
2629 }
2630 EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
2631
2632 static ssize_t cgroup_tasks_write(struct kernfs_open_file *of,
2633 char *buf, size_t nbytes, loff_t off)
2634 {
2635 return __cgroup_procs_write(of, buf, nbytes, off, false);
2636 }
2637
2638 static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
2639 char *buf, size_t nbytes, loff_t off)
2640 {
2641 return __cgroup_procs_write(of, buf, nbytes, off, true);
2642 }
2643
2644 static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
2645 char *buf, size_t nbytes, loff_t off)
2646 {
2647 struct cgroup *cgrp;
2648
2649 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
2650
2651 cgrp = cgroup_kn_lock_live(of->kn);
2652 if (!cgrp)
2653 return -ENODEV;
2654 spin_lock(&release_agent_path_lock);
2655 strlcpy(cgrp->root->release_agent_path, strstrip(buf),
2656 sizeof(cgrp->root->release_agent_path));
2657 spin_unlock(&release_agent_path_lock);
2658 cgroup_kn_unlock(of->kn);
2659 return nbytes;
2660 }
2661
2662 static int cgroup_release_agent_show(struct seq_file *seq, void *v)
2663 {
2664 struct cgroup *cgrp = seq_css(seq)->cgroup;
2665
2666 spin_lock(&release_agent_path_lock);
2667 seq_puts(seq, cgrp->root->release_agent_path);
2668 spin_unlock(&release_agent_path_lock);
2669 seq_putc(seq, '\n');
2670 return 0;
2671 }
2672
2673 static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
2674 {
2675 seq_puts(seq, "0\n");
2676 return 0;
2677 }
2678
2679 static void cgroup_print_ss_mask(struct seq_file *seq, unsigned long ss_mask)
2680 {
2681 struct cgroup_subsys *ss;
2682 bool printed = false;
2683 int ssid;
2684
2685 for_each_subsys_which(ss, ssid, &ss_mask) {
2686 if (printed)
2687 seq_putc(seq, ' ');
2688 seq_printf(seq, "%s", ss->name);
2689 printed = true;
2690 }
2691 if (printed)
2692 seq_putc(seq, '\n');
2693 }
2694
2695 /* show controllers which are currently attached to the default hierarchy */
2696 static int cgroup_root_controllers_show(struct seq_file *seq, void *v)
2697 {
2698 struct cgroup *cgrp = seq_css(seq)->cgroup;
2699
2700 cgroup_print_ss_mask(seq, cgrp->root->subsys_mask &
2701 ~cgrp_dfl_root_inhibit_ss_mask);
2702 return 0;
2703 }
2704
2705 /* show controllers which are enabled from the parent */
2706 static int cgroup_controllers_show(struct seq_file *seq, void *v)
2707 {
2708 struct cgroup *cgrp = seq_css(seq)->cgroup;
2709
2710 cgroup_print_ss_mask(seq, cgroup_parent(cgrp)->subtree_control);
2711 return 0;
2712 }
2713
2714 /* show controllers which are enabled for a given cgroup's children */
2715 static int cgroup_subtree_control_show(struct seq_file *seq, void *v)
2716 {
2717 struct cgroup *cgrp = seq_css(seq)->cgroup;
2718
2719 cgroup_print_ss_mask(seq, cgrp->subtree_control);
2720 return 0;
2721 }
2722
2723 /**
2724 * cgroup_update_dfl_csses - update css assoc of a subtree in default hierarchy
2725 * @cgrp: root of the subtree to update csses for
2726 *
2727 * @cgrp's child_subsys_mask has changed and its subtree's (self excluded)
2728 * css associations need to be updated accordingly. This function looks up
2729 * all css_sets which are attached to the subtree, creates the matching
2730 * updated css_sets and migrates the tasks to the new ones.
2731 */
2732 static int cgroup_update_dfl_csses(struct cgroup *cgrp)
2733 {
2734 LIST_HEAD(preloaded_csets);
2735 struct cgroup_subsys_state *css;
2736 struct css_set *src_cset;
2737 int ret;
2738
2739 lockdep_assert_held(&cgroup_mutex);
2740
2741 percpu_down_write(&cgroup_threadgroup_rwsem);
2742
2743 /* look up all csses currently attached to @cgrp's subtree */
2744 down_read(&css_set_rwsem);
2745 css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) {
2746 struct cgrp_cset_link *link;
2747
2748 /* self is not affected by child_subsys_mask change */
2749 if (css->cgroup == cgrp)
2750 continue;
2751
2752 list_for_each_entry(link, &css->cgroup->cset_links, cset_link)
2753 cgroup_migrate_add_src(link->cset, cgrp,
2754 &preloaded_csets);
2755 }
2756 up_read(&css_set_rwsem);
2757
2758 /* NULL dst indicates self on default hierarchy */
2759 ret = cgroup_migrate_prepare_dst(NULL, &preloaded_csets);
2760 if (ret)
2761 goto out_finish;
2762
2763 list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
2764 struct task_struct *last_task = NULL, *task;
2765
2766 /* src_csets precede dst_csets, break on the first dst_cset */
2767 if (!src_cset->mg_src_cgrp)
2768 break;
2769
2770 /*
2771 * All tasks in src_cset need to be migrated to the
2772 * matching dst_cset. Empty it process by process. We
2773 * walk tasks but migrate processes. The leader might even
2774 * belong to a different cset but such src_cset would also
2775 * be among the target src_csets because the default
2776 * hierarchy enforces per-process membership.
2777 */
2778 while (true) {
2779 down_read(&css_set_rwsem);
2780 task = list_first_entry_or_null(&src_cset->tasks,
2781 struct task_struct, cg_list);
2782 if (task) {
2783 task = task->group_leader;
2784 WARN_ON_ONCE(!task_css_set(task)->mg_src_cgrp);
2785 get_task_struct(task);
2786 }
2787 up_read(&css_set_rwsem);
2788
2789 if (!task)
2790 break;
2791
2792 /* guard against possible infinite loop */
2793 if (WARN(last_task == task,
2794 "cgroup: update_dfl_csses failed to make progress, aborting in inconsistent state\n"))
2795 goto out_finish;
2796 last_task = task;
2797
2798 ret = cgroup_migrate(src_cset->dfl_cgrp, task, true);
2799
2800 put_task_struct(task);
2801
2802 if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret))
2803 goto out_finish;
2804 }
2805 }
2806
2807 out_finish:
2808 cgroup_migrate_finish(&preloaded_csets);
2809 percpu_up_write(&cgroup_threadgroup_rwsem);
2810 return ret;
2811 }
2812
2813 /* change the enabled child controllers for a cgroup in the default hierarchy */
2814 static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
2815 char *buf, size_t nbytes,
2816 loff_t off)
2817 {
2818 unsigned long enable = 0, disable = 0;
2819 unsigned long css_enable, css_disable, old_sc, new_sc, old_ss, new_ss;
2820 struct cgroup *cgrp, *child;
2821 struct cgroup_subsys *ss;
2822 char *tok;
2823 int ssid, ret;
2824
2825 /*
2826 * Parse input - space separated list of subsystem names prefixed
2827 * with either + or -.
2828 */
2829 buf = strstrip(buf);
2830 while ((tok = strsep(&buf, " "))) {
2831 unsigned long tmp_ss_mask = ~cgrp_dfl_root_inhibit_ss_mask;
2832
2833 if (tok[0] == '\0')
2834 continue;
2835 for_each_subsys_which(ss, ssid, &tmp_ss_mask) {
2836 if (!cgroup_ssid_enabled(ssid) ||
2837 strcmp(tok + 1, ss->name))
2838 continue;
2839
2840 if (*tok == '+') {
2841 enable |= 1 << ssid;
2842 disable &= ~(1 << ssid);
2843 } else if (*tok == '-') {
2844 disable |= 1 << ssid;
2845 enable &= ~(1 << ssid);
2846 } else {
2847 return -EINVAL;
2848 }
2849 break;
2850 }
2851 if (ssid == CGROUP_SUBSYS_COUNT)
2852 return -EINVAL;
2853 }
2854
2855 cgrp = cgroup_kn_lock_live(of->kn);
2856 if (!cgrp)
2857 return -ENODEV;
2858
2859 for_each_subsys(ss, ssid) {
2860 if (enable & (1 << ssid)) {
2861 if (cgrp->subtree_control & (1 << ssid)) {
2862 enable &= ~(1 << ssid);
2863 continue;
2864 }
2865
2866 /* unavailable or not enabled on the parent? */
2867 if (!(cgrp_dfl_root.subsys_mask & (1 << ssid)) ||
2868 (cgroup_parent(cgrp) &&
2869 !(cgroup_parent(cgrp)->subtree_control & (1 << ssid)))) {
2870 ret = -ENOENT;
2871 goto out_unlock;
2872 }
2873 } else if (disable & (1 << ssid)) {
2874 if (!(cgrp->subtree_control & (1 << ssid))) {
2875 disable &= ~(1 << ssid);
2876 continue;
2877 }
2878
2879 /* a child has it enabled? */
2880 cgroup_for_each_live_child(child, cgrp) {
2881 if (child->subtree_control & (1 << ssid)) {
2882 ret = -EBUSY;
2883 goto out_unlock;
2884 }
2885 }
2886 }
2887 }
2888
2889 if (!enable && !disable) {
2890 ret = 0;
2891 goto out_unlock;
2892 }
2893
2894 /*
2895 * Except for the root, subtree_control must be zero for a cgroup
2896 * with tasks so that child cgroups don't compete against tasks.
2897 */
2898 if (enable && cgroup_parent(cgrp) && !list_empty(&cgrp->cset_links)) {
2899 ret = -EBUSY;
2900 goto out_unlock;
2901 }
2902
2903 /*
2904 * Update subsys masks and calculate what needs to be done. More
2905 * subsystems than specified may need to be enabled or disabled
2906 * depending on subsystem dependencies.
2907 */
2908 old_sc = cgrp->subtree_control;
2909 old_ss = cgrp->child_subsys_mask;
2910 new_sc = (old_sc | enable) & ~disable;
2911 new_ss = cgroup_calc_child_subsys_mask(cgrp, new_sc);
2912
2913 css_enable = ~old_ss & new_ss;
2914 css_disable = old_ss & ~new_ss;
2915 enable |= css_enable;
2916 disable |= css_disable;
2917
2918 /*
2919 * Because css offlining is asynchronous, userland might try to
2920 * re-enable the same controller while the previous instance is
2921 * still around. In such cases, wait till it's gone using
2922 * offline_waitq.
2923 */
2924 for_each_subsys_which(ss, ssid, &css_enable) {
2925 cgroup_for_each_live_child(child, cgrp) {
2926 DEFINE_WAIT(wait);
2927
2928 if (!cgroup_css(child, ss))
2929 continue;
2930
2931 cgroup_get(child);
2932 prepare_to_wait(&child->offline_waitq, &wait,
2933 TASK_UNINTERRUPTIBLE);
2934 cgroup_kn_unlock(of->kn);
2935 schedule();
2936 finish_wait(&child->offline_waitq, &wait);
2937 cgroup_put(child);
2938
2939 return restart_syscall();
2940 }
2941 }
2942
2943 cgrp->subtree_control = new_sc;
2944 cgrp->child_subsys_mask = new_ss;
2945
2946 /*
2947 * Create new csses or make the existing ones visible. A css is
2948 * created invisible if it's being implicitly enabled through
2949 * dependency. An invisible css is made visible when the userland
2950 * explicitly enables it.
2951 */
2952 for_each_subsys(ss, ssid) {
2953 if (!(enable & (1 << ssid)))
2954 continue;
2955
2956 cgroup_for_each_live_child(child, cgrp) {
2957 if (css_enable & (1 << ssid))
2958 ret = create_css(child, ss,
2959 cgrp->subtree_control & (1 << ssid));
2960 else
2961 ret = cgroup_populate_dir(child, 1 << ssid);
2962 if (ret)
2963 goto err_undo_css;
2964 }
2965 }
2966
2967 /*
2968 * At this point, cgroup_e_css() results reflect the new csses
2969 * making the following cgroup_update_dfl_csses() properly update
2970 * css associations of all tasks in the subtree.
2971 */
2972 ret = cgroup_update_dfl_csses(cgrp);
2973 if (ret)
2974 goto err_undo_css;
2975
2976 /*
2977 * All tasks are migrated out of disabled csses. Kill or hide
2978 * them. A css is hidden when the userland requests it to be
2979 * disabled while other subsystems are still depending on it. The
2980 * css must not actively control resources and be in the vanilla
2981 * state if it's made visible again later. Controllers which may
2982 * be depended upon should provide ->css_reset() for this purpose.
2983 */
2984 for_each_subsys(ss, ssid) {
2985 if (!(disable & (1 << ssid)))
2986 continue;
2987
2988 cgroup_for_each_live_child(child, cgrp) {
2989 struct cgroup_subsys_state *css = cgroup_css(child, ss);
2990
2991 if (css_disable & (1 << ssid)) {
2992 kill_css(css);
2993 } else {
2994 cgroup_clear_dir(child, 1 << ssid);
2995 if (ss->css_reset)
2996 ss->css_reset(css);
2997 }
2998 }
2999 }
3000
3001 /*
3002 * The effective csses of all the descendants (excluding @cgrp) may
3003 * have changed. Subsystems can optionally subscribe to this event
3004 * by implementing ->css_e_css_changed() which is invoked if any of
3005 * the effective csses seen from the css's cgroup may have changed.
3006 */
3007 for_each_subsys(ss, ssid) {
3008 struct cgroup_subsys_state *this_css = cgroup_css(cgrp, ss);
3009 struct cgroup_subsys_state *css;
3010
3011 if (!ss->css_e_css_changed || !this_css)
3012 continue;
3013
3014 css_for_each_descendant_pre(css, this_css)
3015 if (css != this_css)
3016 ss->css_e_css_changed(css);
3017 }
3018
3019 kernfs_activate(cgrp->kn);
3020 ret = 0;
3021 out_unlock:
3022 cgroup_kn_unlock(of->kn);
3023 return ret ?: nbytes;
3024
3025 err_undo_css:
3026 cgrp->subtree_control = old_sc;
3027 cgrp->child_subsys_mask = old_ss;
3028
3029 for_each_subsys(ss, ssid) {
3030 if (!(enable & (1 << ssid)))
3031 continue;
3032
3033 cgroup_for_each_live_child(child, cgrp) {
3034 struct cgroup_subsys_state *css = cgroup_css(child, ss);
3035
3036 if (!css)
3037 continue;
3038
3039 if (css_enable & (1 << ssid))
3040 kill_css(css);
3041 else
3042 cgroup_clear_dir(child, 1 << ssid);
3043 }
3044 }
3045 goto out_unlock;
3046 }
3047
3048 static int cgroup_populated_show(struct seq_file *seq, void *v)
3049 {
3050 seq_printf(seq, "%d\n", (bool)seq_css(seq)->cgroup->populated_cnt);
3051 return 0;
3052 }
3053
3054 static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
3055 size_t nbytes, loff_t off)
3056 {
3057 struct cgroup *cgrp = of->kn->parent->priv;
3058 struct cftype *cft = of->kn->priv;
3059 struct cgroup_subsys_state *css;
3060 int ret;
3061
3062 if (cft->write)
3063 return cft->write(of, buf, nbytes, off);
3064
3065 /*
3066 * kernfs guarantees that a file isn't deleted with operations in
3067 * flight, which means that the matching css is and stays alive and
3068 * doesn't need to be pinned. The RCU locking is not necessary
3069 * either. It's just for the convenience of using cgroup_css().
3070 */
3071 rcu_read_lock();
3072 css = cgroup_css(cgrp, cft->ss);
3073 rcu_read_unlock();
3074
3075 if (cft->write_u64) {
3076 unsigned long long v;
3077 ret = kstrtoull(buf, 0, &v);
3078 if (!ret)
3079 ret = cft->write_u64(css, cft, v);
3080 } else if (cft->write_s64) {
3081 long long v;
3082 ret = kstrtoll(buf, 0, &v);
3083 if (!ret)
3084 ret = cft->write_s64(css, cft, v);
3085 } else {
3086 ret = -EINVAL;
3087 }
3088
3089 return ret ?: nbytes;
3090 }
3091
3092 static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
3093 {
3094 return seq_cft(seq)->seq_start(seq, ppos);
3095 }
3096
3097 static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos)
3098 {
3099 return seq_cft(seq)->seq_next(seq, v, ppos);
3100 }
3101
3102 static void cgroup_seqfile_stop(struct seq_file *seq, void *v)
3103 {
3104 seq_cft(seq)->seq_stop(seq, v);
3105 }
3106
3107 static int cgroup_seqfile_show(struct seq_file *m, void *arg)
3108 {
3109 struct cftype *cft = seq_cft(m);
3110 struct cgroup_subsys_state *css = seq_css(m);
3111
3112 if (cft->seq_show)
3113 return cft->seq_show(m, arg);
3114
3115 if (cft->read_u64)
3116 seq_printf(m, "%llu\n", cft->read_u64(css, cft));
3117 else if (cft->read_s64)
3118 seq_printf(m, "%lld\n", cft->read_s64(css, cft));
3119 else
3120 return -EINVAL;
3121 return 0;
3122 }
3123
3124 static struct kernfs_ops cgroup_kf_single_ops = {
3125 .atomic_write_len = PAGE_SIZE,
3126 .write = cgroup_file_write,
3127 .seq_show = cgroup_seqfile_show,
3128 };
3129
3130 static struct kernfs_ops cgroup_kf_ops = {
3131 .atomic_write_len = PAGE_SIZE,
3132 .write = cgroup_file_write,
3133 .seq_start = cgroup_seqfile_start,
3134 .seq_next = cgroup_seqfile_next,
3135 .seq_stop = cgroup_seqfile_stop,
3136 .seq_show = cgroup_seqfile_show,
3137 };
3138
3139 /*
3140 * cgroup_rename - Only allow simple rename of directories in place.
3141 */
3142 static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
3143 const char *new_name_str)
3144 {
3145 struct cgroup *cgrp = kn->priv;
3146 int ret;
3147
3148 if (kernfs_type(kn) != KERNFS_DIR)
3149 return -ENOTDIR;
3150 if (kn->parent != new_parent)
3151 return -EIO;
3152
3153 /*
3154 * This isn't a proper migration and its usefulness is very
3155 * limited. Disallow on the default hierarchy.
3156 */
3157 if (cgroup_on_dfl(cgrp))
3158 return -EPERM;
3159
3160 /*
3161 * We're gonna grab cgroup_mutex which nests outside kernfs
3162 * active_ref. kernfs_rename() doesn't require active_ref
3163 * protection. Break them before grabbing cgroup_mutex.
3164 */
3165 kernfs_break_active_protection(new_parent);
3166 kernfs_break_active_protection(kn);
3167
3168 mutex_lock(&cgroup_mutex);
3169
3170 ret = kernfs_rename(kn, new_parent, new_name_str);
3171
3172 mutex_unlock(&cgroup_mutex);
3173
3174 kernfs_unbreak_active_protection(kn);
3175 kernfs_unbreak_active_protection(new_parent);
3176 return ret;
3177 }
3178
3179 /* set uid and gid of cgroup dirs and files to that of the creator */
3180 static int cgroup_kn_set_ugid(struct kernfs_node *kn)
3181 {
3182 struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
3183 .ia_uid = current_fsuid(),
3184 .ia_gid = current_fsgid(), };
3185
3186 if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
3187 gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
3188 return 0;
3189
3190 return kernfs_setattr(kn, &iattr);
3191 }
3192
3193 static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft)
3194 {
3195 char name[CGROUP_FILE_NAME_MAX];
3196 struct kernfs_node *kn;
3197 struct lock_class_key *key = NULL;
3198 int ret;
3199
3200 #ifdef CONFIG_DEBUG_LOCK_ALLOC
3201 key = &cft->lockdep_key;
3202 #endif
3203 kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
3204 cgroup_file_mode(cft), 0, cft->kf_ops, cft,
3205 NULL, key);
3206 if (IS_ERR(kn))
3207 return PTR_ERR(kn);
3208
3209 ret = cgroup_kn_set_ugid(kn);
3210 if (ret) {
3211 kernfs_remove(kn);
3212 return ret;
3213 }
3214
3215 if (cft->write == cgroup_procs_write)
3216 cgrp->procs_kn = kn;
3217 else if (cft->seq_show == cgroup_populated_show)
3218 cgrp->populated_kn = kn;
3219 return 0;
3220 }
3221
3222 /**
3223 * cgroup_addrm_files - add or remove files to a cgroup directory
3224 * @cgrp: the target cgroup
3225 * @cfts: array of cftypes to be added
3226 * @is_add: whether to add or remove
3227 *
3228 * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
3229 * For removals, this function never fails. If addition fails, this
3230 * function doesn't remove files already added. The caller is responsible
3231 * for cleaning up.
3232 */
3233 static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
3234 bool is_add)
3235 {
3236 struct cftype *cft;
3237 int ret;
3238
3239 lockdep_assert_held(&cgroup_mutex);
3240
3241 for (cft = cfts; cft->name[0] != '\0'; cft++) {
3242 /* does cft->flags tell us to skip this file on @cgrp? */
3243 if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
3244 continue;
3245 if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp))
3246 continue;
3247 if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp))
3248 continue;
3249 if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgroup_parent(cgrp))
3250 continue;
3251
3252 if (is_add) {
3253 ret = cgroup_add_file(cgrp, cft);
3254 if (ret) {
3255 pr_warn("%s: failed to add %s, err=%d\n",
3256 __func__, cft->name, ret);
3257 return ret;
3258 }
3259 } else {
3260 cgroup_rm_file(cgrp, cft);
3261 }
3262 }
3263 return 0;
3264 }
3265
3266 static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add)
3267 {
3268 LIST_HEAD(pending);
3269 struct cgroup_subsys *ss = cfts[0].ss;
3270 struct cgroup *root = &ss->root->cgrp;
3271 struct cgroup_subsys_state *css;
3272 int ret = 0;
3273
3274 lockdep_assert_held(&cgroup_mutex);
3275
3276 /* add/rm files for all cgroups created before */
3277 css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
3278 struct cgroup *cgrp = css->cgroup;
3279
3280 if (cgroup_is_dead(cgrp))
3281 continue;
3282
3283 ret = cgroup_addrm_files(cgrp, cfts, is_add);
3284 if (ret)
3285 break;
3286 }
3287
3288 if (is_add && !ret)
3289 kernfs_activate(root->kn);
3290 return ret;
3291 }
3292
3293 static void cgroup_exit_cftypes(struct cftype *cfts)
3294 {
3295 struct cftype *cft;
3296
3297 for (cft = cfts; cft->name[0] != '\0'; cft++) {
3298 /* free copy for custom atomic_write_len, see init_cftypes() */
3299 if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
3300 kfree(cft->kf_ops);
3301 cft->kf_ops = NULL;
3302 cft->ss = NULL;
3303
3304 /* revert flags set by cgroup core while adding @cfts */
3305 cft->flags &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL);
3306 }
3307 }
3308
3309 static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3310 {
3311 struct cftype *cft;
3312
3313 for (cft = cfts; cft->name[0] != '\0'; cft++) {
3314 struct kernfs_ops *kf_ops;
3315
3316 WARN_ON(cft->ss || cft->kf_ops);
3317
3318 if (cft->seq_start)
3319 kf_ops = &cgroup_kf_ops;
3320 else
3321 kf_ops = &cgroup_kf_single_ops;
3322
3323 /*
3324 * Ugh... if @cft wants a custom max_write_len, we need to
3325 * make a copy of kf_ops to set its atomic_write_len.
3326 */
3327 if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) {
3328 kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL);
3329 if (!kf_ops) {
3330 cgroup_exit_cftypes(cfts);
3331 return -ENOMEM;
3332 }
3333 kf_ops->atomic_write_len = cft->max_write_len;
3334 }
3335
3336 cft->kf_ops = kf_ops;
3337 cft->ss = ss;
3338 }
3339
3340 return 0;
3341 }
3342
3343 static int cgroup_rm_cftypes_locked(struct cftype *cfts)
3344 {
3345 lockdep_assert_held(&cgroup_mutex);
3346
3347 if (!cfts || !cfts[0].ss)
3348 return -ENOENT;
3349
3350 list_del(&cfts->node);
3351 cgroup_apply_cftypes(cfts, false);
3352 cgroup_exit_cftypes(cfts);
3353 return 0;
3354 }
3355
3356 /**
3357 * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
3358 * @cfts: zero-length name terminated array of cftypes
3359 *
3360 * Unregister @cfts. Files described by @cfts are removed from all
3361 * existing cgroups and all future cgroups won't have them either. This
3362 * function can be called anytime whether @cfts' subsys is attached or not.
3363 *
3364 * Returns 0 on successful unregistration, -ENOENT if @cfts is not
3365 * registered.
3366 */
3367 int cgroup_rm_cftypes(struct cftype *cfts)
3368 {
3369 int ret;
3370
3371 mutex_lock(&cgroup_mutex);
3372 ret = cgroup_rm_cftypes_locked(cfts);
3373 mutex_unlock(&cgroup_mutex);
3374 return ret;
3375 }
3376
3377 /**
3378 * cgroup_add_cftypes - add an array of cftypes to a subsystem
3379 * @ss: target cgroup subsystem
3380 * @cfts: zero-length name terminated array of cftypes
3381 *
3382 * Register @cfts to @ss. Files described by @cfts are created for all
3383 * existing cgroups to which @ss is attached and all future cgroups will
3384 * have them too. This function can be called anytime whether @ss is
3385 * attached or not.
3386 *
3387 * Returns 0 on successful registration, -errno on failure. Note that this
3388 * function currently returns 0 as long as @cfts registration is successful
3389 * even if some file creation attempts on existing cgroups fail.
3390 */
3391 static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3392 {
3393 int ret;
3394
3395 if (!cgroup_ssid_enabled(ss->id))
3396 return 0;
3397
3398 if (!cfts || cfts[0].name[0] == '\0')
3399 return 0;
3400
3401 ret = cgroup_init_cftypes(ss, cfts);
3402 if (ret)
3403 return ret;
3404
3405 mutex_lock(&cgroup_mutex);
3406
3407 list_add_tail(&cfts->node, &ss->cfts);
3408 ret = cgroup_apply_cftypes(cfts, true);
3409 if (ret)
3410 cgroup_rm_cftypes_locked(cfts);
3411
3412 mutex_unlock(&cgroup_mutex);
3413 return ret;
3414 }
3415
3416 /**
3417 * cgroup_add_dfl_cftypes - add an array of cftypes for default hierarchy
3418 * @ss: target cgroup subsystem
3419 * @cfts: zero-length name terminated array of cftypes
3420 *
3421 * Similar to cgroup_add_cftypes() but the added files are only used for
3422 * the default hierarchy.
3423 */
3424 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3425 {
3426 struct cftype *cft;
3427
3428 for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
3429 cft->flags |= __CFTYPE_ONLY_ON_DFL;
3430 return cgroup_add_cftypes(ss, cfts);
3431 }
3432
3433 /**
3434 * cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies
3435 * @ss: target cgroup subsystem
3436 * @cfts: zero-length name terminated array of cftypes
3437 *
3438 * Similar to cgroup_add_cftypes() but the added files are only used for
3439 * the legacy hierarchies.
3440 */
3441 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3442 {
3443 struct cftype *cft;
3444
3445 /*
3446 * If legacy_flies_on_dfl, we want to show the legacy files on the
3447 * dfl hierarchy but iff the target subsystem hasn't been updated
3448 * for the dfl hierarchy yet.
3449 */
3450 if (!cgroup_legacy_files_on_dfl ||
3451 ss->dfl_cftypes != ss->legacy_cftypes) {
3452 for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
3453 cft->flags |= __CFTYPE_NOT_ON_DFL;
3454 }
3455
3456 return cgroup_add_cftypes(ss, cfts);
3457 }
3458
3459 /**
3460 * cgroup_task_count - count the number of tasks in a cgroup.
3461 * @cgrp: the cgroup in question
3462 *
3463 * Return the number of tasks in the cgroup.
3464 */
3465 static int cgroup_task_count(const struct cgroup *cgrp)
3466 {
3467 int count = 0;
3468 struct cgrp_cset_link *link;
3469
3470 down_read(&css_set_rwsem);
3471 list_for_each_entry(link, &cgrp->cset_links, cset_link)
3472 count += atomic_read(&link->cset->refcount);
3473 up_read(&css_set_rwsem);
3474 return count;
3475 }
3476
3477 /**
3478 * css_next_child - find the next child of a given css
3479 * @pos: the current position (%NULL to initiate traversal)
3480 * @parent: css whose children to walk
3481 *
3482 * This function returns the next child of @parent and should be called
3483 * under either cgroup_mutex or RCU read lock. The only requirement is
3484 * that @parent and @pos are accessible. The next sibling is guaranteed to
3485 * be returned regardless of their states.
3486 *
3487 * If a subsystem synchronizes ->css_online() and the start of iteration, a
3488 * css which finished ->css_online() is guaranteed to be visible in the
3489 * future iterations and will stay visible until the last reference is put.
3490 * A css which hasn't finished ->css_online() or already finished
3491 * ->css_offline() may show up during traversal. It's each subsystem's
3492 * responsibility to synchronize against on/offlining.
3493 */
3494 struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
3495 struct cgroup_subsys_state *parent)
3496 {
3497 struct cgroup_subsys_state *next;
3498
3499 cgroup_assert_mutex_or_rcu_locked();
3500
3501 /*
3502 * @pos could already have been unlinked from the sibling list.
3503 * Once a cgroup is removed, its ->sibling.next is no longer
3504 * updated when its next sibling changes. CSS_RELEASED is set when
3505 * @pos is taken off list, at which time its next pointer is valid,
3506 * and, as releases are serialized, the one pointed to by the next
3507 * pointer is guaranteed to not have started release yet. This
3508 * implies that if we observe !CSS_RELEASED on @pos in this RCU
3509 * critical section, the one pointed to by its next pointer is
3510 * guaranteed to not have finished its RCU grace period even if we
3511 * have dropped rcu_read_lock() inbetween iterations.
3512 *
3513 * If @pos has CSS_RELEASED set, its next pointer can't be
3514 * dereferenced; however, as each css is given a monotonically
3515 * increasing unique serial number and always appended to the
3516 * sibling list, the next one can be found by walking the parent's
3517 * children until the first css with higher serial number than
3518 * @pos's. While this path can be slower, it happens iff iteration
3519 * races against release and the race window is very small.
3520 */
3521 if (!pos) {
3522 next = list_entry_rcu(parent->children.next, struct cgroup_subsys_state, sibling);
3523 } else if (likely(!(pos->flags & CSS_RELEASED))) {
3524 next = list_entry_rcu(pos->sibling.next, struct cgroup_subsys_state, sibling);
3525 } else {
3526 list_for_each_entry_rcu(next, &parent->children, sibling)
3527 if (next->serial_nr > pos->serial_nr)
3528 break;
3529 }
3530
3531 /*
3532 * @next, if not pointing to the head, can be dereferenced and is
3533 * the next sibling.
3534 */
3535 if (&next->sibling != &parent->children)
3536 return next;
3537 return NULL;
3538 }
3539
3540 /**
3541 * css_next_descendant_pre - find the next descendant for pre-order walk
3542 * @pos: the current position (%NULL to initiate traversal)
3543 * @root: css whose descendants to walk
3544 *
3545 * To be used by css_for_each_descendant_pre(). Find the next descendant
3546 * to visit for pre-order traversal of @root's descendants. @root is
3547 * included in the iteration and the first node to be visited.
3548 *
3549 * While this function requires cgroup_mutex or RCU read locking, it
3550 * doesn't require the whole traversal to be contained in a single critical
3551 * section. This function will return the correct next descendant as long
3552 * as both @pos and @root are accessible and @pos is a descendant of @root.
3553 *
3554 * If a subsystem synchronizes ->css_online() and the start of iteration, a
3555 * css which finished ->css_online() is guaranteed to be visible in the
3556 * future iterations and will stay visible until the last reference is put.
3557 * A css which hasn't finished ->css_online() or already finished
3558 * ->css_offline() may show up during traversal. It's each subsystem's
3559 * responsibility to synchronize against on/offlining.
3560 */
3561 struct cgroup_subsys_state *
3562 css_next_descendant_pre(struct cgroup_subsys_state *pos,
3563 struct cgroup_subsys_state *root)
3564 {
3565 struct cgroup_subsys_state *next;
3566
3567 cgroup_assert_mutex_or_rcu_locked();
3568
3569 /* if first iteration, visit @root */
3570 if (!pos)
3571 return root;
3572
3573 /* visit the first child if exists */
3574 next = css_next_child(NULL, pos);
3575 if (next)
3576 return next;
3577
3578 /* no child, visit my or the closest ancestor's next sibling */
3579 while (pos != root) {
3580 next = css_next_child(pos, pos->parent);
3581 if (next)
3582 return next;
3583 pos = pos->parent;
3584 }
3585
3586 return NULL;
3587 }
3588
3589 /**
3590 * css_rightmost_descendant - return the rightmost descendant of a css
3591 * @pos: css of interest
3592 *
3593 * Return the rightmost descendant of @pos. If there's no descendant, @pos
3594 * is returned. This can be used during pre-order traversal to skip
3595 * subtree of @pos.
3596 *
3597 * While this function requires cgroup_mutex or RCU read locking, it
3598 * doesn't require the whole traversal to be contained in a single critical
3599 * section. This function will return the correct rightmost descendant as
3600 * long as @pos is accessible.
3601 */
3602 struct cgroup_subsys_state *
3603 css_rightmost_descendant(struct cgroup_subsys_state *pos)
3604 {
3605 struct cgroup_subsys_state *last, *tmp;
3606
3607 cgroup_assert_mutex_or_rcu_locked();
3608
3609 do {
3610 last = pos;
3611 /* ->prev isn't RCU safe, walk ->next till the end */
3612 pos = NULL;
3613 css_for_each_child(tmp, last)
3614 pos = tmp;
3615 } while (pos);
3616
3617 return last;
3618 }
3619
3620 static struct cgroup_subsys_state *
3621 css_leftmost_descendant(struct cgroup_subsys_state *pos)
3622 {
3623 struct cgroup_subsys_state *last;
3624
3625 do {
3626 last = pos;
3627 pos = css_next_child(NULL, pos);
3628 } while (pos);
3629
3630 return last;
3631 }
3632
3633 /**
3634 * css_next_descendant_post - find the next descendant for post-order walk
3635 * @pos: the current position (%NULL to initiate traversal)
3636 * @root: css whose descendants to walk
3637 *
3638 * To be used by css_for_each_descendant_post(). Find the next descendant
3639 * to visit for post-order traversal of @root's descendants. @root is
3640 * included in the iteration and the last node to be visited.
3641 *
3642 * While this function requires cgroup_mutex or RCU read locking, it
3643 * doesn't require the whole traversal to be contained in a single critical
3644 * section. This function will return the correct next descendant as long
3645 * as both @pos and @cgroup are accessible and @pos is a descendant of
3646 * @cgroup.
3647 *
3648 * If a subsystem synchronizes ->css_online() and the start of iteration, a
3649 * css which finished ->css_online() is guaranteed to be visible in the
3650 * future iterations and will stay visible until the last reference is put.
3651 * A css which hasn't finished ->css_online() or already finished
3652 * ->css_offline() may show up during traversal. It's each subsystem's
3653 * responsibility to synchronize against on/offlining.
3654 */
3655 struct cgroup_subsys_state *
3656 css_next_descendant_post(struct cgroup_subsys_state *pos,
3657 struct cgroup_subsys_state *root)
3658 {
3659 struct cgroup_subsys_state *next;
3660
3661 cgroup_assert_mutex_or_rcu_locked();
3662
3663 /* if first iteration, visit leftmost descendant which may be @root */
3664 if (!pos)
3665 return css_leftmost_descendant(root);
3666
3667 /* if we visited @root, we're done */
3668 if (pos == root)
3669 return NULL;
3670
3671 /* if there's an unvisited sibling, visit its leftmost descendant */
3672 next = css_next_child(pos, pos->parent);
3673 if (next)
3674 return css_leftmost_descendant(next);
3675
3676 /* no sibling left, visit parent */
3677 return pos->parent;
3678 }
3679
3680 /**
3681 * css_has_online_children - does a css have online children
3682 * @css: the target css
3683 *
3684 * Returns %true if @css has any online children; otherwise, %false. This
3685 * function can be called from any context but the caller is responsible
3686 * for synchronizing against on/offlining as necessary.
3687 */
3688 bool css_has_online_children(struct cgroup_subsys_state *css)
3689 {
3690 struct cgroup_subsys_state *child;
3691 bool ret = false;
3692
3693 rcu_read_lock();
3694 css_for_each_child(child, css) {
3695 if (child->flags & CSS_ONLINE) {
3696 ret = true;
3697 break;
3698 }
3699 }
3700 rcu_read_unlock();
3701 return ret;
3702 }
3703
3704 /**
3705 * css_advance_task_iter - advance a task itererator to the next css_set
3706 * @it: the iterator to advance
3707 *
3708 * Advance @it to the next css_set to walk.
3709 */
3710 static void css_advance_task_iter(struct css_task_iter *it)
3711 {
3712 struct list_head *l = it->cset_pos;
3713 struct cgrp_cset_link *link;
3714 struct css_set *cset;
3715
3716 /* Advance to the next non-empty css_set */
3717 do {
3718 l = l->next;
3719 if (l == it->cset_head) {
3720 it->cset_pos = NULL;
3721 return;
3722 }
3723
3724 if (it->ss) {
3725 cset = container_of(l, struct css_set,
3726 e_cset_node[it->ss->id]);
3727 } else {
3728 link = list_entry(l, struct cgrp_cset_link, cset_link);
3729 cset = link->cset;
3730 }
3731 } while (list_empty(&cset->tasks) && list_empty(&cset->mg_tasks));
3732
3733 it->cset_pos = l;
3734
3735 if (!list_empty(&cset->tasks))
3736 it->task_pos = cset->tasks.next;
3737 else
3738 it->task_pos = cset->mg_tasks.next;
3739
3740 it->tasks_head = &cset->tasks;
3741 it->mg_tasks_head = &cset->mg_tasks;
3742 }
3743
3744 /**
3745 * css_task_iter_start - initiate task iteration
3746 * @css: the css to walk tasks of
3747 * @it: the task iterator to use
3748 *
3749 * Initiate iteration through the tasks of @css. The caller can call
3750 * css_task_iter_next() to walk through the tasks until the function
3751 * returns NULL. On completion of iteration, css_task_iter_end() must be
3752 * called.
3753 *
3754 * Note that this function acquires a lock which is released when the
3755 * iteration finishes. The caller can't sleep while iteration is in
3756 * progress.
3757 */
3758 void css_task_iter_start(struct cgroup_subsys_state *css,
3759 struct css_task_iter *it)
3760 __acquires(css_set_rwsem)
3761 {
3762 /* no one should try to iterate before mounting cgroups */
3763 WARN_ON_ONCE(!use_task_css_set_links);
3764
3765 down_read(&css_set_rwsem);
3766
3767 it->ss = css->ss;
3768
3769 if (it->ss)
3770 it->cset_pos = &css->cgroup->e_csets[css->ss->id];
3771 else
3772 it->cset_pos = &css->cgroup->cset_links;
3773
3774 it->cset_head = it->cset_pos;
3775
3776 css_advance_task_iter(it);
3777 }
3778
3779 /**
3780 * css_task_iter_next - return the next task for the iterator
3781 * @it: the task iterator being iterated
3782 *
3783 * The "next" function for task iteration. @it should have been
3784 * initialized via css_task_iter_start(). Returns NULL when the iteration
3785 * reaches the end.
3786 */
3787 struct task_struct *css_task_iter_next(struct css_task_iter *it)
3788 {
3789 struct task_struct *res;
3790 struct list_head *l = it->task_pos;
3791
3792 /* If the iterator cg is NULL, we have no tasks */
3793 if (!it->cset_pos)
3794 return NULL;
3795 res = list_entry(l, struct task_struct, cg_list);
3796
3797 /*
3798 * Advance iterator to find next entry. cset->tasks is consumed
3799 * first and then ->mg_tasks. After ->mg_tasks, we move onto the
3800 * next cset.
3801 */
3802 l = l->next;
3803
3804 if (l == it->tasks_head)
3805 l = it->mg_tasks_head->next;
3806
3807 if (l == it->mg_tasks_head)
3808 css_advance_task_iter(it);
3809 else
3810 it->task_pos = l;
3811
3812 return res;
3813 }
3814
3815 /**
3816 * css_task_iter_end - finish task iteration
3817 * @it: the task iterator to finish
3818 *
3819 * Finish task iteration started by css_task_iter_start().
3820 */
3821 void css_task_iter_end(struct css_task_iter *it)
3822 __releases(css_set_rwsem)
3823 {
3824 up_read(&css_set_rwsem);
3825 }
3826
3827 /**
3828 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
3829 * @to: cgroup to which the tasks will be moved
3830 * @from: cgroup in which the tasks currently reside
3831 *
3832 * Locking rules between cgroup_post_fork() and the migration path
3833 * guarantee that, if a task is forking while being migrated, the new child
3834 * is guaranteed to be either visible in the source cgroup after the
3835 * parent's migration is complete or put into the target cgroup. No task
3836 * can slip out of migration through forking.
3837 */
3838 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
3839 {
3840 LIST_HEAD(preloaded_csets);
3841 struct cgrp_cset_link *link;
3842 struct css_task_iter it;
3843 struct task_struct *task;
3844 int ret;
3845
3846 mutex_lock(&cgroup_mutex);
3847
3848 /* all tasks in @from are being moved, all csets are source */
3849 down_read(&css_set_rwsem);
3850 list_for_each_entry(link, &from->cset_links, cset_link)
3851 cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
3852 up_read(&css_set_rwsem);
3853
3854 ret = cgroup_migrate_prepare_dst(to, &preloaded_csets);
3855 if (ret)
3856 goto out_err;
3857
3858 /*
3859 * Migrate tasks one-by-one until @form is empty. This fails iff
3860 * ->can_attach() fails.
3861 */
3862 do {
3863 css_task_iter_start(&from->self, &it);
3864 task = css_task_iter_next(&it);
3865 if (task)
3866 get_task_struct(task);
3867 css_task_iter_end(&it);
3868
3869 if (task) {
3870 ret = cgroup_migrate(to, task, false);
3871 put_task_struct(task);
3872 }
3873 } while (task && !ret);
3874 out_err:
3875 cgroup_migrate_finish(&preloaded_csets);
3876 mutex_unlock(&cgroup_mutex);
3877 return ret;
3878 }
3879
3880 /*
3881 * Stuff for reading the 'tasks'/'procs' files.
3882 *
3883 * Reading this file can return large amounts of data if a cgroup has
3884 * *lots* of attached tasks. So it may need several calls to read(),
3885 * but we cannot guarantee that the information we produce is correct
3886 * unless we produce it entirely atomically.
3887 *
3888 */
3889
3890 /* which pidlist file are we talking about? */
3891 enum cgroup_filetype {
3892 CGROUP_FILE_PROCS,
3893 CGROUP_FILE_TASKS,
3894 };
3895
3896 /*
3897 * A pidlist is a list of pids that virtually represents the contents of one
3898 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
3899 * a pair (one each for procs, tasks) for each pid namespace that's relevant
3900 * to the cgroup.
3901 */
3902 struct cgroup_pidlist {
3903 /*
3904 * used to find which pidlist is wanted. doesn't change as long as
3905 * this particular list stays in the list.
3906 */
3907 struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
3908 /* array of xids */
3909 pid_t *list;
3910 /* how many elements the above list has */
3911 int length;
3912 /* each of these stored in a list by its cgroup */
3913 struct list_head links;
3914 /* pointer to the cgroup we belong to, for list removal purposes */
3915 struct cgroup *owner;
3916 /* for delayed destruction */
3917 struct delayed_work destroy_dwork;
3918 };
3919
3920 /*
3921 * The following two functions "fix" the issue where there are more pids
3922 * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
3923 * TODO: replace with a kernel-wide solution to this problem
3924 */
3925 #define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
3926 static void *pidlist_allocate(int count)
3927 {
3928 if (PIDLIST_TOO_LARGE(count))
3929 return vmalloc(count * sizeof(pid_t));
3930 else
3931 return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
3932 }
3933
3934 static void pidlist_free(void *p)
3935 {
3936 kvfree(p);
3937 }
3938
3939 /*
3940 * Used to destroy all pidlists lingering waiting for destroy timer. None
3941 * should be left afterwards.
3942 */
3943 static void cgroup_pidlist_destroy_all(struct cgroup *cgrp)
3944 {
3945 struct cgroup_pidlist *l, *tmp_l;
3946
3947 mutex_lock(&cgrp->pidlist_mutex);
3948 list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
3949 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
3950 mutex_unlock(&cgrp->pidlist_mutex);
3951
3952 flush_workqueue(cgroup_pidlist_destroy_wq);
3953 BUG_ON(!list_empty(&cgrp->pidlists));
3954 }
3955
3956 static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
3957 {
3958 struct delayed_work *dwork = to_delayed_work(work);
3959 struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
3960 destroy_dwork);
3961 struct cgroup_pidlist *tofree = NULL;
3962
3963 mutex_lock(&l->owner->pidlist_mutex);
3964
3965 /*
3966 * Destroy iff we didn't get queued again. The state won't change
3967 * as destroy_dwork can only be queued while locked.
3968 */
3969 if (!delayed_work_pending(dwork)) {
3970 list_del(&l->links);
3971 pidlist_free(l->list);
3972 put_pid_ns(l->key.ns);
3973 tofree = l;
3974 }
3975
3976 mutex_unlock(&l->owner->pidlist_mutex);
3977 kfree(tofree);
3978 }
3979
3980 /*
3981 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
3982 * Returns the number of unique elements.
3983 */
3984 static int pidlist_uniq(pid_t *list, int length)
3985 {
3986 int src, dest = 1;
3987
3988 /*
3989 * we presume the 0th element is unique, so i starts at 1. trivial
3990 * edge cases first; no work needs to be done for either
3991 */
3992 if (length == 0 || length == 1)
3993 return length;
3994 /* src and dest walk down the list; dest counts unique elements */
3995 for (src = 1; src < length; src++) {
3996 /* find next unique element */
3997 while (list[src] == list[src-1]) {
3998 src++;
3999 if (src == length)
4000 goto after;
4001 }
4002 /* dest always points to where the next unique element goes */
4003 list[dest] = list[src];
4004 dest++;
4005 }
4006 after:
4007 return dest;
4008 }
4009
4010 /*
4011 * The two pid files - task and cgroup.procs - guaranteed that the result
4012 * is sorted, which forced this whole pidlist fiasco. As pid order is
4013 * different per namespace, each namespace needs differently sorted list,
4014 * making it impossible to use, for example, single rbtree of member tasks
4015 * sorted by task pointer. As pidlists can be fairly large, allocating one
4016 * per open file is dangerous, so cgroup had to implement shared pool of
4017 * pidlists keyed by cgroup and namespace.
4018 *
4019 * All this extra complexity was caused by the original implementation
4020 * committing to an entirely unnecessary property. In the long term, we
4021 * want to do away with it. Explicitly scramble sort order if on the
4022 * default hierarchy so that no such expectation exists in the new
4023 * interface.
4024 *
4025 * Scrambling is done by swapping every two consecutive bits, which is
4026 * non-identity one-to-one mapping which disturbs sort order sufficiently.
4027 */
4028 static pid_t pid_fry(pid_t pid)
4029 {
4030 unsigned a = pid & 0x55555555;
4031 unsigned b = pid & 0xAAAAAAAA;
4032
4033 return (a << 1) | (b >> 1);
4034 }
4035
4036 static pid_t cgroup_pid_fry(struct cgroup *cgrp, pid_t pid)
4037 {
4038 if (cgroup_on_dfl(cgrp))
4039 return pid_fry(pid);
4040 else
4041 return pid;
4042 }
4043
4044 static int cmppid(const void *a, const void *b)
4045 {
4046 return *(pid_t *)a - *(pid_t *)b;
4047 }
4048
4049 static int fried_cmppid(const void *a, const void *b)
4050 {
4051 return pid_fry(*(pid_t *)a) - pid_fry(*(pid_t *)b);
4052 }
4053
4054 static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
4055 enum cgroup_filetype type)
4056 {
4057 struct cgroup_pidlist *l;
4058 /* don't need task_nsproxy() if we're looking at ourself */
4059 struct pid_namespace *ns = task_active_pid_ns(current);
4060
4061 lockdep_assert_held(&cgrp->pidlist_mutex);
4062
4063 list_for_each_entry(l, &cgrp->pidlists, links)
4064 if (l->key.type == type && l->key.ns == ns)
4065 return l;
4066 return NULL;
4067 }
4068
4069 /*
4070 * find the appropriate pidlist for our purpose (given procs vs tasks)
4071 * returns with the lock on that pidlist already held, and takes care
4072 * of the use count, or returns NULL with no locks held if we're out of
4073 * memory.
4074 */
4075 static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
4076 enum cgroup_filetype type)
4077 {
4078 struct cgroup_pidlist *l;
4079
4080 lockdep_assert_held(&cgrp->pidlist_mutex);
4081
4082 l = cgroup_pidlist_find(cgrp, type);
4083 if (l)
4084 return l;
4085
4086 /* entry not found; create a new one */
4087 l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
4088 if (!l)
4089 return l;
4090
4091 INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
4092 l->key.type = type;
4093 /* don't need task_nsproxy() if we're looking at ourself */
4094 l->key.ns = get_pid_ns(task_active_pid_ns(current));
4095 l->owner = cgrp;
4096 list_add(&l->links, &cgrp->pidlists);
4097 return l;
4098 }
4099
4100 /*
4101 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
4102 */
4103 static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
4104 struct cgroup_pidlist **lp)
4105 {
4106 pid_t *array;
4107 int length;
4108 int pid, n = 0; /* used for populating the array */
4109 struct css_task_iter it;
4110 struct task_struct *tsk;
4111 struct cgroup_pidlist *l;
4112
4113 lockdep_assert_held(&cgrp->pidlist_mutex);
4114
4115 /*
4116 * If cgroup gets more users after we read count, we won't have
4117 * enough space - tough. This race is indistinguishable to the
4118 * caller from the case that the additional cgroup users didn't
4119 * show up until sometime later on.
4120 */
4121 length = cgroup_task_count(cgrp);
4122 array = pidlist_allocate(length);
4123 if (!array)
4124 return -ENOMEM;
4125 /* now, populate the array */
4126 css_task_iter_start(&cgrp->self, &it);
4127 while ((tsk = css_task_iter_next(&it))) {
4128 if (unlikely(n == length))
4129 break;
4130 /* get tgid or pid for procs or tasks file respectively */
4131 if (type == CGROUP_FILE_PROCS)
4132 pid = task_tgid_vnr(tsk);
4133 else
4134 pid = task_pid_vnr(tsk);
4135 if (pid > 0) /* make sure to only use valid results */
4136 array[n++] = pid;
4137 }
4138 css_task_iter_end(&it);
4139 length = n;
4140 /* now sort & (if procs) strip out duplicates */
4141 if (cgroup_on_dfl(cgrp))
4142 sort(array, length, sizeof(pid_t), fried_cmppid, NULL);
4143 else
4144 sort(array, length, sizeof(pid_t), cmppid, NULL);
4145 if (type == CGROUP_FILE_PROCS)
4146 length = pidlist_uniq(array, length);
4147
4148 l = cgroup_pidlist_find_create(cgrp, type);
4149 if (!l) {
4150 pidlist_free(array);
4151 return -ENOMEM;
4152 }
4153
4154 /* store array, freeing old if necessary */
4155 pidlist_free(l->list);
4156 l->list = array;
4157 l->length = length;
4158 *lp = l;
4159 return 0;
4160 }
4161
4162 /**
4163 * cgroupstats_build - build and fill cgroupstats
4164 * @stats: cgroupstats to fill information into
4165 * @dentry: A dentry entry belonging to the cgroup for which stats have
4166 * been requested.
4167 *
4168 * Build and fill cgroupstats so that taskstats can export it to user
4169 * space.
4170 */
4171 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
4172 {
4173 struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
4174 struct cgroup *cgrp;
4175 struct css_task_iter it;
4176 struct task_struct *tsk;
4177
4178 /* it should be kernfs_node belonging to cgroupfs and is a directory */
4179 if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
4180 kernfs_type(kn) != KERNFS_DIR)
4181 return -EINVAL;
4182
4183 mutex_lock(&cgroup_mutex);
4184
4185 /*
4186 * We aren't being called from kernfs and there's no guarantee on
4187 * @kn->priv's validity. For this and css_tryget_online_from_dir(),
4188 * @kn->priv is RCU safe. Let's do the RCU dancing.
4189 */
4190 rcu_read_lock();
4191 cgrp = rcu_dereference(kn->priv);
4192 if (!cgrp || cgroup_is_dead(cgrp)) {
4193 rcu_read_unlock();
4194 mutex_unlock(&cgroup_mutex);
4195 return -ENOENT;
4196 }
4197 rcu_read_unlock();
4198
4199 css_task_iter_start(&cgrp->self, &it);
4200 while ((tsk = css_task_iter_next(&it))) {
4201 switch (tsk->state) {
4202 case TASK_RUNNING:
4203 stats->nr_running++;
4204 break;
4205 case TASK_INTERRUPTIBLE:
4206 stats->nr_sleeping++;
4207 break;
4208 case TASK_UNINTERRUPTIBLE:
4209 stats->nr_uninterruptible++;
4210 break;
4211 case TASK_STOPPED:
4212 stats->nr_stopped++;
4213 break;
4214 default:
4215 if (delayacct_is_task_waiting_on_io(tsk))
4216 stats->nr_io_wait++;
4217 break;
4218 }
4219 }
4220 css_task_iter_end(&it);
4221
4222 mutex_unlock(&cgroup_mutex);
4223 return 0;
4224 }
4225
4226
4227 /*
4228 * seq_file methods for the tasks/procs files. The seq_file position is the
4229 * next pid to display; the seq_file iterator is a pointer to the pid
4230 * in the cgroup->l->list array.
4231 */
4232
4233 static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
4234 {
4235 /*
4236 * Initially we receive a position value that corresponds to
4237 * one more than the last pid shown (or 0 on the first call or
4238 * after a seek to the start). Use a binary-search to find the
4239 * next pid to display, if any
4240 */
4241 struct kernfs_open_file *of = s->private;
4242 struct cgroup *cgrp = seq_css(s)->cgroup;
4243 struct cgroup_pidlist *l;
4244 enum cgroup_filetype type = seq_cft(s)->private;
4245 int index = 0, pid = *pos;
4246 int *iter, ret;
4247
4248 mutex_lock(&cgrp->pidlist_mutex);
4249
4250 /*
4251 * !NULL @of->priv indicates that this isn't the first start()
4252 * after open. If the matching pidlist is around, we can use that.
4253 * Look for it. Note that @of->priv can't be used directly. It
4254 * could already have been destroyed.
4255 */
4256 if (of->priv)
4257 of->priv = cgroup_pidlist_find(cgrp, type);
4258
4259 /*
4260 * Either this is the first start() after open or the matching
4261 * pidlist has been destroyed inbetween. Create a new one.
4262 */
4263 if (!of->priv) {
4264 ret = pidlist_array_load(cgrp, type,
4265 (struct cgroup_pidlist **)&of->priv);
4266 if (ret)
4267 return ERR_PTR(ret);
4268 }
4269 l = of->priv;
4270
4271 if (pid) {
4272 int end = l->length;
4273
4274 while (index < end) {
4275 int mid = (index + end) / 2;
4276 if (cgroup_pid_fry(cgrp, l->list[mid]) == pid) {
4277 index = mid;
4278 break;
4279 } else if (cgroup_pid_fry(cgrp, l->list[mid]) <= pid)
4280 index = mid + 1;
4281 else
4282 end = mid;
4283 }
4284 }
4285 /* If we're off the end of the array, we're done */
4286 if (index >= l->length)
4287 return NULL;
4288 /* Update the abstract position to be the actual pid that we found */
4289 iter = l->list + index;
4290 *pos = cgroup_pid_fry(cgrp, *iter);
4291 return iter;
4292 }
4293
4294 static void cgroup_pidlist_stop(struct seq_file *s, void *v)
4295 {
4296 struct kernfs_open_file *of = s->private;
4297 struct cgroup_pidlist *l = of->priv;
4298
4299 if (l)
4300 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
4301 CGROUP_PIDLIST_DESTROY_DELAY);
4302 mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
4303 }
4304
4305 static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
4306 {
4307 struct kernfs_open_file *of = s->private;
4308 struct cgroup_pidlist *l = of->priv;
4309 pid_t *p = v;
4310 pid_t *end = l->list + l->length;
4311 /*
4312 * Advance to the next pid in the array. If this goes off the
4313 * end, we're done
4314 */
4315 p++;
4316 if (p >= end) {
4317 return NULL;
4318 } else {
4319 *pos = cgroup_pid_fry(seq_css(s)->cgroup, *p);
4320 return p;
4321 }
4322 }
4323
4324 static int cgroup_pidlist_show(struct seq_file *s, void *v)
4325 {
4326 seq_printf(s, "%d\n", *(int *)v);
4327
4328 return 0;
4329 }
4330
4331 static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
4332 struct cftype *cft)
4333 {
4334 return notify_on_release(css->cgroup);
4335 }
4336
4337 static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
4338 struct cftype *cft, u64 val)
4339 {
4340 if (val)
4341 set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
4342 else
4343 clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
4344 return 0;
4345 }
4346
4347 static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
4348 struct cftype *cft)
4349 {
4350 return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4351 }
4352
4353 static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
4354 struct cftype *cft, u64 val)
4355 {
4356 if (val)
4357 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4358 else
4359 clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4360 return 0;
4361 }
4362
4363 /* cgroup core interface files for the default hierarchy */
4364 static struct cftype cgroup_dfl_base_files[] = {
4365 {
4366 .name = "cgroup.procs",
4367 .seq_start = cgroup_pidlist_start,
4368 .seq_next = cgroup_pidlist_next,
4369 .seq_stop = cgroup_pidlist_stop,
4370 .seq_show = cgroup_pidlist_show,
4371 .private = CGROUP_FILE_PROCS,
4372 .write = cgroup_procs_write,
4373 .mode = S_IRUGO | S_IWUSR,
4374 },
4375 {
4376 .name = "cgroup.controllers",
4377 .flags = CFTYPE_ONLY_ON_ROOT,
4378 .seq_show = cgroup_root_controllers_show,
4379 },
4380 {
4381 .name = "cgroup.controllers",
4382 .flags = CFTYPE_NOT_ON_ROOT,
4383 .seq_show = cgroup_controllers_show,
4384 },
4385 {
4386 .name = "cgroup.subtree_control",
4387 .seq_show = cgroup_subtree_control_show,
4388 .write = cgroup_subtree_control_write,
4389 },
4390 {
4391 .name = "cgroup.populated",
4392 .flags = CFTYPE_NOT_ON_ROOT,
4393 .seq_show = cgroup_populated_show,
4394 },
4395 { } /* terminate */
4396 };
4397
4398 /* cgroup core interface files for the legacy hierarchies */
4399 static struct cftype cgroup_legacy_base_files[] = {
4400 {
4401 .name = "cgroup.procs",
4402 .seq_start = cgroup_pidlist_start,
4403 .seq_next = cgroup_pidlist_next,
4404 .seq_stop = cgroup_pidlist_stop,
4405 .seq_show = cgroup_pidlist_show,
4406 .private = CGROUP_FILE_PROCS,
4407 .write = cgroup_procs_write,
4408 .mode = S_IRUGO | S_IWUSR,
4409 },
4410 {
4411 .name = "cgroup.clone_children",
4412 .read_u64 = cgroup_clone_children_read,
4413 .write_u64 = cgroup_clone_children_write,
4414 },
4415 {
4416 .name = "cgroup.sane_behavior",
4417 .flags = CFTYPE_ONLY_ON_ROOT,
4418 .seq_show = cgroup_sane_behavior_show,
4419 },
4420 {
4421 .name = "tasks",
4422 .seq_start = cgroup_pidlist_start,
4423 .seq_next = cgroup_pidlist_next,
4424 .seq_stop = cgroup_pidlist_stop,
4425 .seq_show = cgroup_pidlist_show,
4426 .private = CGROUP_FILE_TASKS,
4427 .write = cgroup_tasks_write,
4428 .mode = S_IRUGO | S_IWUSR,
4429 },
4430 {
4431 .name = "notify_on_release",
4432 .read_u64 = cgroup_read_notify_on_release,
4433 .write_u64 = cgroup_write_notify_on_release,
4434 },
4435 {
4436 .name = "release_agent",
4437 .flags = CFTYPE_ONLY_ON_ROOT,
4438 .seq_show = cgroup_release_agent_show,
4439 .write = cgroup_release_agent_write,
4440 .max_write_len = PATH_MAX - 1,
4441 },
4442 { } /* terminate */
4443 };
4444
4445 /**
4446 * cgroup_populate_dir - create subsys files in a cgroup directory
4447 * @cgrp: target cgroup
4448 * @subsys_mask: mask of the subsystem ids whose files should be added
4449 *
4450 * On failure, no file is added.
4451 */
4452 static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask)
4453 {
4454 struct cgroup_subsys *ss;
4455 int i, ret = 0;
4456
4457 /* process cftsets of each subsystem */
4458 for_each_subsys(ss, i) {
4459 struct cftype *cfts;
4460
4461 if (!(subsys_mask & (1 << i)))
4462 continue;
4463
4464 list_for_each_entry(cfts, &ss->cfts, node) {
4465 ret = cgroup_addrm_files(cgrp, cfts, true);
4466 if (ret < 0)
4467 goto err;
4468 }
4469 }
4470 return 0;
4471 err:
4472 cgroup_clear_dir(cgrp, subsys_mask);
4473 return ret;
4474 }
4475
4476 /*
4477 * css destruction is four-stage process.
4478 *
4479 * 1. Destruction starts. Killing of the percpu_ref is initiated.
4480 * Implemented in kill_css().
4481 *
4482 * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
4483 * and thus css_tryget_online() is guaranteed to fail, the css can be
4484 * offlined by invoking offline_css(). After offlining, the base ref is
4485 * put. Implemented in css_killed_work_fn().
4486 *
4487 * 3. When the percpu_ref reaches zero, the only possible remaining
4488 * accessors are inside RCU read sections. css_release() schedules the
4489 * RCU callback.
4490 *
4491 * 4. After the grace period, the css can be freed. Implemented in
4492 * css_free_work_fn().
4493 *
4494 * It is actually hairier because both step 2 and 4 require process context
4495 * and thus involve punting to css->destroy_work adding two additional
4496 * steps to the already complex sequence.
4497 */
4498 static void css_free_work_fn(struct work_struct *work)
4499 {
4500 struct cgroup_subsys_state *css =
4501 container_of(work, struct cgroup_subsys_state, destroy_work);
4502 struct cgroup_subsys *ss = css->ss;
4503 struct cgroup *cgrp = css->cgroup;
4504
4505 percpu_ref_exit(&css->refcnt);
4506
4507 if (ss) {
4508 /* css free path */
4509 int id = css->id;
4510
4511 if (css->parent)
4512 css_put(css->parent);
4513
4514 ss->css_free(css);
4515 cgroup_idr_remove(&ss->css_idr, id);
4516 cgroup_put(cgrp);
4517 } else {
4518 /* cgroup free path */
4519 atomic_dec(&cgrp->root->nr_cgrps);
4520 cgroup_pidlist_destroy_all(cgrp);
4521 cancel_work_sync(&cgrp->release_agent_work);
4522
4523 if (cgroup_parent(cgrp)) {
4524 /*
4525 * We get a ref to the parent, and put the ref when
4526 * this cgroup is being freed, so it's guaranteed
4527 * that the parent won't be destroyed before its
4528 * children.
4529 */
4530 cgroup_put(cgroup_parent(cgrp));
4531 kernfs_put(cgrp->kn);
4532 kfree(cgrp);
4533 } else {
4534 /*
4535 * This is root cgroup's refcnt reaching zero,
4536 * which indicates that the root should be
4537 * released.
4538 */
4539 cgroup_destroy_root(cgrp->root);
4540 }
4541 }
4542 }
4543
4544 static void css_free_rcu_fn(struct rcu_head *rcu_head)
4545 {
4546 struct cgroup_subsys_state *css =
4547 container_of(rcu_head, struct cgroup_subsys_state, rcu_head);
4548
4549 INIT_WORK(&css->destroy_work, css_free_work_fn);
4550 queue_work(cgroup_destroy_wq, &css->destroy_work);
4551 }
4552
4553 static void css_release_work_fn(struct work_struct *work)
4554 {
4555 struct cgroup_subsys_state *css =
4556 container_of(work, struct cgroup_subsys_state, destroy_work);
4557 struct cgroup_subsys *ss = css->ss;
4558 struct cgroup *cgrp = css->cgroup;
4559
4560 mutex_lock(&cgroup_mutex);
4561
4562 css->flags |= CSS_RELEASED;
4563 list_del_rcu(&css->sibling);
4564
4565 if (ss) {
4566 /* css release path */
4567 cgroup_idr_replace(&ss->css_idr, NULL, css->id);
4568 if (ss->css_released)
4569 ss->css_released(css);
4570 } else {
4571 /* cgroup release path */
4572 cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
4573 cgrp->id = -1;
4574
4575 /*
4576 * There are two control paths which try to determine
4577 * cgroup from dentry without going through kernfs -
4578 * cgroupstats_build() and css_tryget_online_from_dir().
4579 * Those are supported by RCU protecting clearing of
4580 * cgrp->kn->priv backpointer.
4581 */
4582 RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL);
4583 }
4584
4585 mutex_unlock(&cgroup_mutex);
4586
4587 call_rcu(&css->rcu_head, css_free_rcu_fn);
4588 }
4589
4590 static void css_release(struct percpu_ref *ref)
4591 {
4592 struct cgroup_subsys_state *css =
4593 container_of(ref, struct cgroup_subsys_state, refcnt);
4594
4595 INIT_WORK(&css->destroy_work, css_release_work_fn);
4596 queue_work(cgroup_destroy_wq, &css->destroy_work);
4597 }
4598
4599 static void init_and_link_css(struct cgroup_subsys_state *css,
4600 struct cgroup_subsys *ss, struct cgroup *cgrp)
4601 {
4602 lockdep_assert_held(&cgroup_mutex);
4603
4604 cgroup_get(cgrp);
4605
4606 memset(css, 0, sizeof(*css));
4607 css->cgroup = cgrp;
4608 css->ss = ss;
4609 INIT_LIST_HEAD(&css->sibling);
4610 INIT_LIST_HEAD(&css->children);
4611 css->serial_nr = css_serial_nr_next++;
4612
4613 if (cgroup_parent(cgrp)) {
4614 css->parent = cgroup_css(cgroup_parent(cgrp), ss);
4615 css_get(css->parent);
4616 }
4617
4618 BUG_ON(cgroup_css(cgrp, ss));
4619 }
4620
4621 /* invoke ->css_online() on a new CSS and mark it online if successful */
4622 static int online_css(struct cgroup_subsys_state *css)
4623 {
4624 struct cgroup_subsys *ss = css->ss;
4625 int ret = 0;
4626
4627 lockdep_assert_held(&cgroup_mutex);
4628
4629 if (ss->css_online)
4630 ret = ss->css_online(css);
4631 if (!ret) {
4632 css->flags |= CSS_ONLINE;
4633 rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
4634 }
4635 return ret;
4636 }
4637
4638 /* if the CSS is online, invoke ->css_offline() on it and mark it offline */
4639 static void offline_css(struct cgroup_subsys_state *css)
4640 {
4641 struct cgroup_subsys *ss = css->ss;
4642
4643 lockdep_assert_held(&cgroup_mutex);
4644
4645 if (!(css->flags & CSS_ONLINE))
4646 return;
4647
4648 if (ss->css_offline)
4649 ss->css_offline(css);
4650
4651 css->flags &= ~CSS_ONLINE;
4652 RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL);
4653
4654 wake_up_all(&css->cgroup->offline_waitq);
4655 }
4656
4657 /**
4658 * create_css - create a cgroup_subsys_state
4659 * @cgrp: the cgroup new css will be associated with
4660 * @ss: the subsys of new css
4661 * @visible: whether to create control knobs for the new css or not
4662 *
4663 * Create a new css associated with @cgrp - @ss pair. On success, the new
4664 * css is online and installed in @cgrp with all interface files created if
4665 * @visible. Returns 0 on success, -errno on failure.
4666 */
4667 static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
4668 bool visible)
4669 {
4670 struct cgroup *parent = cgroup_parent(cgrp);
4671 struct cgroup_subsys_state *parent_css = cgroup_css(parent, ss);
4672 struct cgroup_subsys_state *css;
4673 int err;
4674
4675 lockdep_assert_held(&cgroup_mutex);
4676
4677 css = ss->css_alloc(parent_css);
4678 if (IS_ERR(css))
4679 return PTR_ERR(css);
4680
4681 init_and_link_css(css, ss, cgrp);
4682
4683 err = percpu_ref_init(&css->refcnt, css_release, 0, GFP_KERNEL);
4684 if (err)
4685 goto err_free_css;
4686
4687 err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
4688 if (err < 0)
4689 goto err_free_percpu_ref;
4690 css->id = err;
4691
4692 if (visible) {
4693 err = cgroup_populate_dir(cgrp, 1 << ss->id);
4694 if (err)
4695 goto err_free_id;
4696 }
4697
4698 /* @css is ready to be brought online now, make it visible */
4699 list_add_tail_rcu(&css->sibling, &parent_css->children);
4700 cgroup_idr_replace(&ss->css_idr, css, css->id);
4701
4702 err = online_css(css);
4703 if (err)
4704 goto err_list_del;
4705
4706 if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
4707 cgroup_parent(parent)) {
4708 pr_warn("%s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
4709 current->comm, current->pid, ss->name);
4710 if (!strcmp(ss->name, "memory"))
4711 pr_warn("\"memory\" requires setting use_hierarchy to 1 on the root\n");
4712 ss->warned_broken_hierarchy = true;
4713 }
4714
4715 return 0;
4716
4717 err_list_del:
4718 list_del_rcu(&css->sibling);
4719 cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
4720 err_free_id:
4721 cgroup_idr_remove(&ss->css_idr, css->id);
4722 err_free_percpu_ref:
4723 percpu_ref_exit(&css->refcnt);
4724 err_free_css:
4725 call_rcu(&css->rcu_head, css_free_rcu_fn);
4726 return err;
4727 }
4728
4729 static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
4730 umode_t mode)
4731 {
4732 struct cgroup *parent, *cgrp;
4733 struct cgroup_root *root;
4734 struct cgroup_subsys *ss;
4735 struct kernfs_node *kn;
4736 struct cftype *base_files;
4737 int ssid, ret;
4738
4739 /* Do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable.
4740 */
4741 if (strchr(name, '\n'))
4742 return -EINVAL;
4743
4744 parent = cgroup_kn_lock_live(parent_kn);
4745 if (!parent)
4746 return -ENODEV;
4747 root = parent->root;
4748
4749 /* allocate the cgroup and its ID, 0 is reserved for the root */
4750 cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
4751 if (!cgrp) {
4752 ret = -ENOMEM;
4753 goto out_unlock;
4754 }
4755
4756 ret = percpu_ref_init(&cgrp->self.refcnt, css_release, 0, GFP_KERNEL);
4757 if (ret)
4758 goto out_free_cgrp;
4759
4760 /*
4761 * Temporarily set the pointer to NULL, so idr_find() won't return
4762 * a half-baked cgroup.
4763 */
4764 cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_KERNEL);
4765 if (cgrp->id < 0) {
4766 ret = -ENOMEM;
4767 goto out_cancel_ref;
4768 }
4769
4770 init_cgroup_housekeeping(cgrp);
4771
4772 cgrp->self.parent = &parent->self;
4773 cgrp->root = root;
4774
4775 if (notify_on_release(parent))
4776 set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
4777
4778 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
4779 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
4780
4781 /* create the directory */
4782 kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
4783 if (IS_ERR(kn)) {
4784 ret = PTR_ERR(kn);
4785 goto out_free_id;
4786 }
4787 cgrp->kn = kn;
4788
4789 /*
4790 * This extra ref will be put in cgroup_free_fn() and guarantees
4791 * that @cgrp->kn is always accessible.
4792 */
4793 kernfs_get(kn);
4794
4795 cgrp->self.serial_nr = css_serial_nr_next++;
4796
4797 /* allocation complete, commit to creation */
4798 list_add_tail_rcu(&cgrp->self.sibling, &cgroup_parent(cgrp)->self.children);
4799 atomic_inc(&root->nr_cgrps);
4800 cgroup_get(parent);
4801
4802 /*
4803 * @cgrp is now fully operational. If something fails after this
4804 * point, it'll be released via the normal destruction path.
4805 */
4806 cgroup_idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
4807
4808 ret = cgroup_kn_set_ugid(kn);
4809 if (ret)
4810 goto out_destroy;
4811
4812 if (cgroup_on_dfl(cgrp))
4813 base_files = cgroup_dfl_base_files;
4814 else
4815 base_files = cgroup_legacy_base_files;
4816
4817 ret = cgroup_addrm_files(cgrp, base_files, true);
4818 if (ret)
4819 goto out_destroy;
4820
4821 /* let's create and online css's */
4822 for_each_subsys(ss, ssid) {
4823 if (parent->child_subsys_mask & (1 << ssid)) {
4824 ret = create_css(cgrp, ss,
4825 parent->subtree_control & (1 << ssid));
4826 if (ret)
4827 goto out_destroy;
4828 }
4829 }
4830
4831 /*
4832 * On the default hierarchy, a child doesn't automatically inherit
4833 * subtree_control from the parent. Each is configured manually.
4834 */
4835 if (!cgroup_on_dfl(cgrp)) {
4836 cgrp->subtree_control = parent->subtree_control;
4837 cgroup_refresh_child_subsys_mask(cgrp);
4838 }
4839
4840 kernfs_activate(kn);
4841
4842 ret = 0;
4843 goto out_unlock;
4844
4845 out_free_id:
4846 cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
4847 out_cancel_ref:
4848 percpu_ref_exit(&cgrp->self.refcnt);
4849 out_free_cgrp:
4850 kfree(cgrp);
4851 out_unlock:
4852 cgroup_kn_unlock(parent_kn);
4853 return ret;
4854
4855 out_destroy:
4856 cgroup_destroy_locked(cgrp);
4857 goto out_unlock;
4858 }
4859
4860 /*
4861 * This is called when the refcnt of a css is confirmed to be killed.
4862 * css_tryget_online() is now guaranteed to fail. Tell the subsystem to
4863 * initate destruction and put the css ref from kill_css().
4864 */
4865 static void css_killed_work_fn(struct work_struct *work)
4866 {
4867 struct cgroup_subsys_state *css =
4868 container_of(work, struct cgroup_subsys_state, destroy_work);
4869
4870 mutex_lock(&cgroup_mutex);
4871 offline_css(css);
4872 mutex_unlock(&cgroup_mutex);
4873
4874 css_put(css);
4875 }
4876
4877 /* css kill confirmation processing requires process context, bounce */
4878 static void css_killed_ref_fn(struct percpu_ref *ref)
4879 {
4880 struct cgroup_subsys_state *css =
4881 container_of(ref, struct cgroup_subsys_state, refcnt);
4882
4883 INIT_WORK(&css->destroy_work, css_killed_work_fn);
4884 queue_work(cgroup_destroy_wq, &css->destroy_work);
4885 }
4886
4887 /**
4888 * kill_css - destroy a css
4889 * @css: css to destroy
4890 *
4891 * This function initiates destruction of @css by removing cgroup interface
4892 * files and putting its base reference. ->css_offline() will be invoked
4893 * asynchronously once css_tryget_online() is guaranteed to fail and when
4894 * the reference count reaches zero, @css will be released.
4895 */
4896 static void kill_css(struct cgroup_subsys_state *css)
4897 {
4898 lockdep_assert_held(&cgroup_mutex);
4899
4900 /*
4901 * This must happen before css is disassociated with its cgroup.
4902 * See seq_css() for details.
4903 */
4904 cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
4905
4906 /*
4907 * Killing would put the base ref, but we need to keep it alive
4908 * until after ->css_offline().
4909 */
4910 css_get(css);
4911
4912 /*
4913 * cgroup core guarantees that, by the time ->css_offline() is
4914 * invoked, no new css reference will be given out via
4915 * css_tryget_online(). We can't simply call percpu_ref_kill() and
4916 * proceed to offlining css's because percpu_ref_kill() doesn't
4917 * guarantee that the ref is seen as killed on all CPUs on return.
4918 *
4919 * Use percpu_ref_kill_and_confirm() to get notifications as each
4920 * css is confirmed to be seen as killed on all CPUs.
4921 */
4922 percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn);
4923 }
4924
4925 /**
4926 * cgroup_destroy_locked - the first stage of cgroup destruction
4927 * @cgrp: cgroup to be destroyed
4928 *
4929 * css's make use of percpu refcnts whose killing latency shouldn't be
4930 * exposed to userland and are RCU protected. Also, cgroup core needs to
4931 * guarantee that css_tryget_online() won't succeed by the time
4932 * ->css_offline() is invoked. To satisfy all the requirements,
4933 * destruction is implemented in the following two steps.
4934 *
4935 * s1. Verify @cgrp can be destroyed and mark it dying. Remove all
4936 * userland visible parts and start killing the percpu refcnts of
4937 * css's. Set up so that the next stage will be kicked off once all
4938 * the percpu refcnts are confirmed to be killed.
4939 *
4940 * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the
4941 * rest of destruction. Once all cgroup references are gone, the
4942 * cgroup is RCU-freed.
4943 *
4944 * This function implements s1. After this step, @cgrp is gone as far as
4945 * the userland is concerned and a new cgroup with the same name may be
4946 * created. As cgroup doesn't care about the names internally, this
4947 * doesn't cause any problem.
4948 */
4949 static int cgroup_destroy_locked(struct cgroup *cgrp)
4950 __releases(&cgroup_mutex) __acquires(&cgroup_mutex)
4951 {
4952 struct cgroup_subsys_state *css;
4953 bool empty;
4954 int ssid;
4955
4956 lockdep_assert_held(&cgroup_mutex);
4957
4958 /*
4959 * css_set_rwsem synchronizes access to ->cset_links and prevents
4960 * @cgrp from being removed while put_css_set() is in progress.
4961 */
4962 down_read(&css_set_rwsem);
4963 empty = list_empty(&cgrp->cset_links);
4964 up_read(&css_set_rwsem);
4965 if (!empty)
4966 return -EBUSY;
4967
4968 /*
4969 * Make sure there's no live children. We can't test emptiness of
4970 * ->self.children as dead children linger on it while being
4971 * drained; otherwise, "rmdir parent/child parent" may fail.
4972 */
4973 if (css_has_online_children(&cgrp->self))
4974 return -EBUSY;
4975
4976 /*
4977 * Mark @cgrp dead. This prevents further task migration and child
4978 * creation by disabling cgroup_lock_live_group().
4979 */
4980 cgrp->self.flags &= ~CSS_ONLINE;
4981
4982 /* initiate massacre of all css's */
4983 for_each_css(css, ssid, cgrp)
4984 kill_css(css);
4985
4986 /*
4987 * Remove @cgrp directory along with the base files. @cgrp has an
4988 * extra ref on its kn.
4989 */
4990 kernfs_remove(cgrp->kn);
4991
4992 check_for_release(cgroup_parent(cgrp));
4993
4994 /* put the base reference */
4995 percpu_ref_kill(&cgrp->self.refcnt);
4996
4997 return 0;
4998 };
4999
5000 static int cgroup_rmdir(struct kernfs_node *kn)
5001 {
5002 struct cgroup *cgrp;
5003 int ret = 0;
5004
5005 cgrp = cgroup_kn_lock_live(kn);
5006 if (!cgrp)
5007 return 0;
5008
5009 ret = cgroup_destroy_locked(cgrp);
5010
5011 cgroup_kn_unlock(kn);
5012 return ret;
5013 }
5014
5015 static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
5016 .remount_fs = cgroup_remount,
5017 .show_options = cgroup_show_options,
5018 .mkdir = cgroup_mkdir,
5019 .rmdir = cgroup_rmdir,
5020 .rename = cgroup_rename,
5021 };
5022
5023 static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
5024 {
5025 struct cgroup_subsys_state *css;
5026
5027 printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
5028
5029 mutex_lock(&cgroup_mutex);
5030
5031 idr_init(&ss->css_idr);
5032 INIT_LIST_HEAD(&ss->cfts);
5033
5034 /* Create the root cgroup state for this subsystem */
5035 ss->root = &cgrp_dfl_root;
5036 css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss));
5037 /* We don't handle early failures gracefully */
5038 BUG_ON(IS_ERR(css));
5039 init_and_link_css(css, ss, &cgrp_dfl_root.cgrp);
5040
5041 /*
5042 * Root csses are never destroyed and we can't initialize
5043 * percpu_ref during early init. Disable refcnting.
5044 */
5045 css->flags |= CSS_NO_REF;
5046
5047 if (early) {
5048 /* allocation can't be done safely during early init */
5049 css->id = 1;
5050 } else {
5051 css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);
5052 BUG_ON(css->id < 0);
5053 }
5054
5055 /* Update the init_css_set to contain a subsys
5056 * pointer to this state - since the subsystem is
5057 * newly registered, all tasks and hence the
5058 * init_css_set is in the subsystem's root cgroup. */
5059 init_css_set.subsys[ss->id] = css;
5060
5061 have_fork_callback |= (bool)ss->fork << ss->id;
5062 have_exit_callback |= (bool)ss->exit << ss->id;
5063 have_canfork_callback |= (bool)ss->can_fork << ss->id;
5064
5065 /* At system boot, before all subsystems have been
5066 * registered, no tasks have been forked, so we don't
5067 * need to invoke fork callbacks here. */
5068 BUG_ON(!list_empty(&init_task.tasks));
5069
5070 BUG_ON(online_css(css));
5071
5072 mutex_unlock(&cgroup_mutex);
5073 }
5074
5075 /**
5076 * cgroup_init_early - cgroup initialization at system boot
5077 *
5078 * Initialize cgroups at system boot, and initialize any
5079 * subsystems that request early init.
5080 */
5081 int __init cgroup_init_early(void)
5082 {
5083 static struct cgroup_sb_opts __initdata opts;
5084 struct cgroup_subsys *ss;
5085 int i;
5086
5087 init_cgroup_root(&cgrp_dfl_root, &opts);
5088 cgrp_dfl_root.cgrp.self.flags |= CSS_NO_REF;
5089
5090 RCU_INIT_POINTER(init_task.cgroups, &init_css_set);
5091
5092 for_each_subsys(ss, i) {
5093 WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id,
5094 "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p name:id=%d:%s\n",
5095 i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free,
5096 ss->id, ss->name);
5097 WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN,
5098 "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]);
5099
5100 ss->id = i;
5101 ss->name = cgroup_subsys_name[i];
5102 if (!ss->legacy_name)
5103 ss->legacy_name = cgroup_subsys_name[i];
5104
5105 if (ss->early_init)
5106 cgroup_init_subsys(ss, true);
5107 }
5108 return 0;
5109 }
5110
5111 /**
5112 * cgroup_init - cgroup initialization
5113 *
5114 * Register cgroup filesystem and /proc file, and initialize
5115 * any subsystems that didn't request early init.
5116 */
5117 int __init cgroup_init(void)
5118 {
5119 struct cgroup_subsys *ss;
5120 unsigned long key;
5121 int ssid, err;
5122
5123 BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem));
5124 BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
5125 BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
5126
5127 mutex_lock(&cgroup_mutex);
5128
5129 /* Add init_css_set to the hash table */
5130 key = css_set_hash(init_css_set.subsys);
5131 hash_add(css_set_table, &init_css_set.hlist, key);
5132
5133 BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
5134
5135 mutex_unlock(&cgroup_mutex);
5136
5137 for_each_subsys(ss, ssid) {
5138 if (ss->early_init) {
5139 struct cgroup_subsys_state *css =
5140 init_css_set.subsys[ss->id];
5141
5142 css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2,
5143 GFP_KERNEL);
5144 BUG_ON(css->id < 0);
5145 } else {
5146 cgroup_init_subsys(ss, false);
5147 }
5148
5149 list_add_tail(&init_css_set.e_cset_node[ssid],
5150 &cgrp_dfl_root.cgrp.e_csets[ssid]);
5151
5152 /*
5153 * Setting dfl_root subsys_mask needs to consider the
5154 * disabled flag and cftype registration needs kmalloc,
5155 * both of which aren't available during early_init.
5156 */
5157 if (!cgroup_ssid_enabled(ssid))
5158 continue;
5159
5160 cgrp_dfl_root.subsys_mask |= 1 << ss->id;
5161
5162 if (cgroup_legacy_files_on_dfl && !ss->dfl_cftypes)
5163 ss->dfl_cftypes = ss->legacy_cftypes;
5164
5165 if (!ss->dfl_cftypes)
5166 cgrp_dfl_root_inhibit_ss_mask |= 1 << ss->id;
5167
5168 if (ss->dfl_cftypes == ss->legacy_cftypes) {
5169 WARN_ON(cgroup_add_cftypes(ss, ss->dfl_cftypes));
5170 } else {
5171 WARN_ON(cgroup_add_dfl_cftypes(ss, ss->dfl_cftypes));
5172 WARN_ON(cgroup_add_legacy_cftypes(ss, ss->legacy_cftypes));
5173 }
5174
5175 if (ss->bind)
5176 ss->bind(init_css_set.subsys[ssid]);
5177 }
5178
5179 err = sysfs_create_mount_point(fs_kobj, "cgroup");
5180 if (err)
5181 return err;
5182
5183 err = register_filesystem(&cgroup_fs_type);
5184 if (err < 0) {
5185 sysfs_remove_mount_point(fs_kobj, "cgroup");
5186 return err;
5187 }
5188
5189 proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
5190 return 0;
5191 }
5192
5193 static int __init cgroup_wq_init(void)
5194 {
5195 /*
5196 * There isn't much point in executing destruction path in
5197 * parallel. Good chunk is serialized with cgroup_mutex anyway.
5198 * Use 1 for @max_active.
5199 *
5200 * We would prefer to do this in cgroup_init() above, but that
5201 * is called before init_workqueues(): so leave this until after.
5202 */
5203 cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
5204 BUG_ON(!cgroup_destroy_wq);
5205
5206 /*
5207 * Used to destroy pidlists and separate to serve as flush domain.
5208 * Cap @max_active to 1 too.
5209 */
5210 cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
5211 0, 1);
5212 BUG_ON(!cgroup_pidlist_destroy_wq);
5213
5214 return 0;
5215 }
5216 core_initcall(cgroup_wq_init);
5217
5218 /*
5219 * proc_cgroup_show()
5220 * - Print task's cgroup paths into seq_file, one line for each hierarchy
5221 * - Used for /proc/<pid>/cgroup.
5222 */
5223 int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
5224 struct pid *pid, struct task_struct *tsk)
5225 {
5226 char *buf, *path;
5227 int retval;
5228 struct cgroup_root *root;
5229
5230 retval = -ENOMEM;
5231 buf = kmalloc(PATH_MAX, GFP_KERNEL);
5232 if (!buf)
5233 goto out;
5234
5235 mutex_lock(&cgroup_mutex);
5236 down_read(&css_set_rwsem);
5237
5238 for_each_root(root) {
5239 struct cgroup_subsys *ss;
5240 struct cgroup *cgrp;
5241 int ssid, count = 0;
5242
5243 if (root == &cgrp_dfl_root && !cgrp_dfl_root_visible)
5244 continue;
5245
5246 seq_printf(m, "%d:", root->hierarchy_id);
5247 if (root != &cgrp_dfl_root)
5248 for_each_subsys(ss, ssid)
5249 if (root->subsys_mask & (1 << ssid))
5250 seq_printf(m, "%s%s", count++ ? "," : "",
5251 ss->legacy_name);
5252 if (strlen(root->name))
5253 seq_printf(m, "%sname=%s", count ? "," : "",
5254 root->name);
5255 seq_putc(m, ':');
5256 cgrp = task_cgroup_from_root(tsk, root);
5257 path = cgroup_path(cgrp, buf, PATH_MAX);
5258 if (!path) {
5259 retval = -ENAMETOOLONG;
5260 goto out_unlock;
5261 }
5262 seq_puts(m, path);
5263 seq_putc(m, '\n');
5264 }
5265
5266 retval = 0;
5267 out_unlock:
5268 up_read(&css_set_rwsem);
5269 mutex_unlock(&cgroup_mutex);
5270 kfree(buf);
5271 out:
5272 return retval;
5273 }
5274
5275 /* Display information about each subsystem and each hierarchy */
5276 static int proc_cgroupstats_show(struct seq_file *m, void *v)
5277 {
5278 struct cgroup_subsys *ss;
5279 int i;
5280
5281 seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
5282 /*
5283 * ideally we don't want subsystems moving around while we do this.
5284 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
5285 * subsys/hierarchy state.
5286 */
5287 mutex_lock(&cgroup_mutex);
5288
5289 for_each_subsys(ss, i)
5290 seq_printf(m, "%s\t%d\t%d\t%d\n",
5291 ss->legacy_name, ss->root->hierarchy_id,
5292 atomic_read(&ss->root->nr_cgrps),
5293 cgroup_ssid_enabled(i));
5294
5295 mutex_unlock(&cgroup_mutex);
5296 return 0;
5297 }
5298
5299 static int cgroupstats_open(struct inode *inode, struct file *file)
5300 {
5301 return single_open(file, proc_cgroupstats_show, NULL);
5302 }
5303
5304 static const struct file_operations proc_cgroupstats_operations = {
5305 .open = cgroupstats_open,
5306 .read = seq_read,
5307 .llseek = seq_lseek,
5308 .release = single_release,
5309 };
5310
5311 static void **subsys_canfork_priv_p(void *ss_priv[CGROUP_CANFORK_COUNT], int i)
5312 {
5313 if (CGROUP_CANFORK_START <= i && i < CGROUP_CANFORK_END)
5314 return &ss_priv[i - CGROUP_CANFORK_START];
5315 return NULL;
5316 }
5317
5318 static void *subsys_canfork_priv(void *ss_priv[CGROUP_CANFORK_COUNT], int i)
5319 {
5320 void **private = subsys_canfork_priv_p(ss_priv, i);
5321 return private ? *private : NULL;
5322 }
5323
5324 /**
5325 * cgroup_fork - initialize cgroup related fields during copy_process()
5326 * @child: pointer to task_struct of forking parent process.
5327 *
5328 * A task is associated with the init_css_set until cgroup_post_fork()
5329 * attaches it to the parent's css_set. Empty cg_list indicates that
5330 * @child isn't holding reference to its css_set.
5331 */
5332 void cgroup_fork(struct task_struct *child)
5333 {
5334 RCU_INIT_POINTER(child->cgroups, &init_css_set);
5335 INIT_LIST_HEAD(&child->cg_list);
5336 }
5337
5338 /**
5339 * cgroup_can_fork - called on a new task before the process is exposed
5340 * @child: the task in question.
5341 *
5342 * This calls the subsystem can_fork() callbacks. If the can_fork() callback
5343 * returns an error, the fork aborts with that error code. This allows for
5344 * a cgroup subsystem to conditionally allow or deny new forks.
5345 */
5346 int cgroup_can_fork(struct task_struct *child,
5347 void *ss_priv[CGROUP_CANFORK_COUNT])
5348 {
5349 struct cgroup_subsys *ss;
5350 int i, j, ret;
5351
5352 for_each_subsys_which(ss, i, &have_canfork_callback) {
5353 ret = ss->can_fork(child, subsys_canfork_priv_p(ss_priv, i));
5354 if (ret)
5355 goto out_revert;
5356 }
5357
5358 return 0;
5359
5360 out_revert:
5361 for_each_subsys(ss, j) {
5362 if (j >= i)
5363 break;
5364 if (ss->cancel_fork)
5365 ss->cancel_fork(child, subsys_canfork_priv(ss_priv, j));
5366 }
5367
5368 return ret;
5369 }
5370
5371 /**
5372 * cgroup_cancel_fork - called if a fork failed after cgroup_can_fork()
5373 * @child: the task in question
5374 *
5375 * This calls the cancel_fork() callbacks if a fork failed *after*
5376 * cgroup_can_fork() succeded.
5377 */
5378 void cgroup_cancel_fork(struct task_struct *child,
5379 void *ss_priv[CGROUP_CANFORK_COUNT])
5380 {
5381 struct cgroup_subsys *ss;
5382 int i;
5383
5384 for_each_subsys(ss, i)
5385 if (ss->cancel_fork)
5386 ss->cancel_fork(child, subsys_canfork_priv(ss_priv, i));
5387 }
5388
5389 /**
5390 * cgroup_post_fork - called on a new task after adding it to the task list
5391 * @child: the task in question
5392 *
5393 * Adds the task to the list running through its css_set if necessary and
5394 * call the subsystem fork() callbacks. Has to be after the task is
5395 * visible on the task list in case we race with the first call to
5396 * cgroup_task_iter_start() - to guarantee that the new task ends up on its
5397 * list.
5398 */
5399 void cgroup_post_fork(struct task_struct *child,
5400 void *old_ss_priv[CGROUP_CANFORK_COUNT])
5401 {
5402 struct cgroup_subsys *ss;
5403 int i;
5404
5405 /*
5406 * This may race against cgroup_enable_task_cg_lists(). As that
5407 * function sets use_task_css_set_links before grabbing
5408 * tasklist_lock and we just went through tasklist_lock to add
5409 * @child, it's guaranteed that either we see the set
5410 * use_task_css_set_links or cgroup_enable_task_cg_lists() sees
5411 * @child during its iteration.
5412 *
5413 * If we won the race, @child is associated with %current's
5414 * css_set. Grabbing css_set_rwsem guarantees both that the
5415 * association is stable, and, on completion of the parent's
5416 * migration, @child is visible in the source of migration or
5417 * already in the destination cgroup. This guarantee is necessary
5418 * when implementing operations which need to migrate all tasks of
5419 * a cgroup to another.
5420 *
5421 * Note that if we lose to cgroup_enable_task_cg_lists(), @child
5422 * will remain in init_css_set. This is safe because all tasks are
5423 * in the init_css_set before cg_links is enabled and there's no
5424 * operation which transfers all tasks out of init_css_set.
5425 */
5426 if (use_task_css_set_links) {
5427 struct css_set *cset;
5428
5429 down_write(&css_set_rwsem);
5430 cset = task_css_set(current);
5431 if (list_empty(&child->cg_list)) {
5432 rcu_assign_pointer(child->cgroups, cset);
5433 list_add(&child->cg_list, &cset->tasks);
5434 get_css_set(cset);
5435 }
5436 up_write(&css_set_rwsem);
5437 }
5438
5439 /*
5440 * Call ss->fork(). This must happen after @child is linked on
5441 * css_set; otherwise, @child might change state between ->fork()
5442 * and addition to css_set.
5443 */
5444 for_each_subsys_which(ss, i, &have_fork_callback)
5445 ss->fork(child, subsys_canfork_priv(old_ss_priv, i));
5446 }
5447
5448 /**
5449 * cgroup_exit - detach cgroup from exiting task
5450 * @tsk: pointer to task_struct of exiting process
5451 *
5452 * Description: Detach cgroup from @tsk and release it.
5453 *
5454 * Note that cgroups marked notify_on_release force every task in
5455 * them to take the global cgroup_mutex mutex when exiting.
5456 * This could impact scaling on very large systems. Be reluctant to
5457 * use notify_on_release cgroups where very high task exit scaling
5458 * is required on large systems.
5459 *
5460 * We set the exiting tasks cgroup to the root cgroup (top_cgroup). We
5461 * call cgroup_exit() while the task is still competent to handle
5462 * notify_on_release(), then leave the task attached to the root cgroup in
5463 * each hierarchy for the remainder of its exit. No need to bother with
5464 * init_css_set refcnting. init_css_set never goes away and we can't race
5465 * with migration path - PF_EXITING is visible to migration path.
5466 */
5467 void cgroup_exit(struct task_struct *tsk)
5468 {
5469 struct cgroup_subsys *ss;
5470 struct css_set *cset;
5471 bool put_cset = false;
5472 int i;
5473
5474 /*
5475 * Unlink from @tsk from its css_set. As migration path can't race
5476 * with us, we can check cg_list without grabbing css_set_rwsem.
5477 */
5478 if (!list_empty(&tsk->cg_list)) {
5479 down_write(&css_set_rwsem);
5480 list_del_init(&tsk->cg_list);
5481 up_write(&css_set_rwsem);
5482 put_cset = true;
5483 }
5484
5485 /* Reassign the task to the init_css_set. */
5486 cset = task_css_set(tsk);
5487 RCU_INIT_POINTER(tsk->cgroups, &init_css_set);
5488
5489 /* see cgroup_post_fork() for details */
5490 for_each_subsys_which(ss, i, &have_exit_callback) {
5491 struct cgroup_subsys_state *old_css = cset->subsys[i];
5492 struct cgroup_subsys_state *css = task_css(tsk, i);
5493
5494 ss->exit(css, old_css, tsk);
5495 }
5496
5497 if (put_cset)
5498 put_css_set(cset);
5499 }
5500
5501 static void check_for_release(struct cgroup *cgrp)
5502 {
5503 if (notify_on_release(cgrp) && !cgroup_has_tasks(cgrp) &&
5504 !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
5505 schedule_work(&cgrp->release_agent_work);
5506 }
5507
5508 /*
5509 * Notify userspace when a cgroup is released, by running the
5510 * configured release agent with the name of the cgroup (path
5511 * relative to the root of cgroup file system) as the argument.
5512 *
5513 * Most likely, this user command will try to rmdir this cgroup.
5514 *
5515 * This races with the possibility that some other task will be
5516 * attached to this cgroup before it is removed, or that some other
5517 * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
5518 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
5519 * unused, and this cgroup will be reprieved from its death sentence,
5520 * to continue to serve a useful existence. Next time it's released,
5521 * we will get notified again, if it still has 'notify_on_release' set.
5522 *
5523 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
5524 * means only wait until the task is successfully execve()'d. The
5525 * separate release agent task is forked by call_usermodehelper(),
5526 * then control in this thread returns here, without waiting for the
5527 * release agent task. We don't bother to wait because the caller of
5528 * this routine has no use for the exit status of the release agent
5529 * task, so no sense holding our caller up for that.
5530 */
5531 static void cgroup_release_agent(struct work_struct *work)
5532 {
5533 struct cgroup *cgrp =
5534 container_of(work, struct cgroup, release_agent_work);
5535 char *pathbuf = NULL, *agentbuf = NULL, *path;
5536 char *argv[3], *envp[3];
5537
5538 mutex_lock(&cgroup_mutex);
5539
5540 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
5541 agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
5542 if (!pathbuf || !agentbuf)
5543 goto out;
5544
5545 path = cgroup_path(cgrp, pathbuf, PATH_MAX);
5546 if (!path)
5547 goto out;
5548
5549 argv[0] = agentbuf;
5550 argv[1] = path;
5551 argv[2] = NULL;
5552
5553 /* minimal command environment */
5554 envp[0] = "HOME=/";
5555 envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
5556 envp[2] = NULL;
5557
5558 mutex_unlock(&cgroup_mutex);
5559 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
5560 goto out_free;
5561 out:
5562 mutex_unlock(&cgroup_mutex);
5563 out_free:
5564 kfree(agentbuf);
5565 kfree(pathbuf);
5566 }
5567
5568 static int __init cgroup_disable(char *str)
5569 {
5570 struct cgroup_subsys *ss;
5571 char *token;
5572 int i;
5573
5574 while ((token = strsep(&str, ",")) != NULL) {
5575 if (!*token)
5576 continue;
5577
5578 for_each_subsys(ss, i) {
5579 if (strcmp(token, ss->name) &&
5580 strcmp(token, ss->legacy_name))
5581 continue;
5582
5583 static_branch_disable(cgroup_subsys_enabled_key[i]);
5584 printk(KERN_INFO "Disabling %s control group subsystem\n",
5585 ss->name);
5586 break;
5587 }
5588 }
5589 return 1;
5590 }
5591 __setup("cgroup_disable=", cgroup_disable);
5592
5593 static int __init cgroup_set_legacy_files_on_dfl(char *str)
5594 {
5595 printk("cgroup: using legacy files on the default hierarchy\n");
5596 cgroup_legacy_files_on_dfl = true;
5597 return 0;
5598 }
5599 __setup("cgroup__DEVEL__legacy_files_on_dfl", cgroup_set_legacy_files_on_dfl);
5600
5601 /**
5602 * css_tryget_online_from_dir - get corresponding css from a cgroup dentry
5603 * @dentry: directory dentry of interest
5604 * @ss: subsystem of interest
5605 *
5606 * If @dentry is a directory for a cgroup which has @ss enabled on it, try
5607 * to get the corresponding css and return it. If such css doesn't exist
5608 * or can't be pinned, an ERR_PTR value is returned.
5609 */
5610 struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
5611 struct cgroup_subsys *ss)
5612 {
5613 struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
5614 struct cgroup_subsys_state *css = NULL;
5615 struct cgroup *cgrp;
5616
5617 /* is @dentry a cgroup dir? */
5618 if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
5619 kernfs_type(kn) != KERNFS_DIR)
5620 return ERR_PTR(-EBADF);
5621
5622 rcu_read_lock();
5623
5624 /*
5625 * This path doesn't originate from kernfs and @kn could already
5626 * have been or be removed at any point. @kn->priv is RCU
5627 * protected for this access. See css_release_work_fn() for details.
5628 */
5629 cgrp = rcu_dereference(kn->priv);
5630 if (cgrp)
5631 css = cgroup_css(cgrp, ss);
5632
5633 if (!css || !css_tryget_online(css))
5634 css = ERR_PTR(-ENOENT);
5635
5636 rcu_read_unlock();
5637 return css;
5638 }
5639
5640 /**
5641 * css_from_id - lookup css by id
5642 * @id: the cgroup id
5643 * @ss: cgroup subsys to be looked into
5644 *
5645 * Returns the css if there's valid one with @id, otherwise returns NULL.
5646 * Should be called under rcu_read_lock().
5647 */
5648 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
5649 {
5650 WARN_ON_ONCE(!rcu_read_lock_held());
5651 return id > 0 ? idr_find(&ss->css_idr, id) : NULL;
5652 }
5653
5654 #ifdef CONFIG_CGROUP_DEBUG
5655 static struct cgroup_subsys_state *
5656 debug_css_alloc(struct cgroup_subsys_state *parent_css)
5657 {
5658 struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
5659
5660 if (!css)
5661 return ERR_PTR(-ENOMEM);
5662
5663 return css;
5664 }
5665
5666 static void debug_css_free(struct cgroup_subsys_state *css)
5667 {
5668 kfree(css);
5669 }
5670
5671 static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
5672 struct cftype *cft)
5673 {
5674 return cgroup_task_count(css->cgroup);
5675 }
5676
5677 static u64 current_css_set_read(struct cgroup_subsys_state *css,
5678 struct cftype *cft)
5679 {
5680 return (u64)(unsigned long)current->cgroups;
5681 }
5682
5683 static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
5684 struct cftype *cft)
5685 {
5686 u64 count;
5687
5688 rcu_read_lock();
5689 count = atomic_read(&task_css_set(current)->refcount);
5690 rcu_read_unlock();
5691 return count;
5692 }
5693
5694 static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
5695 {
5696 struct cgrp_cset_link *link;
5697 struct css_set *cset;
5698 char *name_buf;
5699
5700 name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
5701 if (!name_buf)
5702 return -ENOMEM;
5703
5704 down_read(&css_set_rwsem);
5705 rcu_read_lock();
5706 cset = rcu_dereference(current->cgroups);
5707 list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
5708 struct cgroup *c = link->cgrp;
5709
5710 cgroup_name(c, name_buf, NAME_MAX + 1);
5711 seq_printf(seq, "Root %d group %s\n",
5712 c->root->hierarchy_id, name_buf);
5713 }
5714 rcu_read_unlock();
5715 up_read(&css_set_rwsem);
5716 kfree(name_buf);
5717 return 0;
5718 }
5719
5720 #define MAX_TASKS_SHOWN_PER_CSS 25
5721 static int cgroup_css_links_read(struct seq_file *seq, void *v)
5722 {
5723 struct cgroup_subsys_state *css = seq_css(seq);
5724 struct cgrp_cset_link *link;
5725
5726 down_read(&css_set_rwsem);
5727 list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
5728 struct css_set *cset = link->cset;
5729 struct task_struct *task;
5730 int count = 0;
5731
5732 seq_printf(seq, "css_set %p\n", cset);
5733
5734 list_for_each_entry(task, &cset->tasks, cg_list) {
5735 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
5736 goto overflow;
5737 seq_printf(seq, " task %d\n", task_pid_vnr(task));
5738 }
5739
5740 list_for_each_entry(task, &cset->mg_tasks, cg_list) {
5741 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
5742 goto overflow;
5743 seq_printf(seq, " task %d\n", task_pid_vnr(task));
5744 }
5745 continue;
5746 overflow:
5747 seq_puts(seq, " ...\n");
5748 }
5749 up_read(&css_set_rwsem);
5750 return 0;
5751 }
5752
5753 static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
5754 {
5755 return (!cgroup_has_tasks(css->cgroup) &&
5756 !css_has_online_children(&css->cgroup->self));
5757 }
5758
5759 static struct cftype debug_files[] = {
5760 {
5761 .name = "taskcount",
5762 .read_u64 = debug_taskcount_read,
5763 },
5764
5765 {
5766 .name = "current_css_set",
5767 .read_u64 = current_css_set_read,
5768 },
5769
5770 {
5771 .name = "current_css_set_refcount",
5772 .read_u64 = current_css_set_refcount_read,
5773 },
5774
5775 {
5776 .name = "current_css_set_cg_links",
5777 .seq_show = current_css_set_cg_links_read,
5778 },
5779
5780 {
5781 .name = "cgroup_css_links",
5782 .seq_show = cgroup_css_links_read,
5783 },
5784
5785 {
5786 .name = "releasable",
5787 .read_u64 = releasable_read,
5788 },
5789
5790 { } /* terminate */
5791 };
5792
5793 struct cgroup_subsys debug_cgrp_subsys = {
5794 .css_alloc = debug_css_alloc,
5795 .css_free = debug_css_free,
5796 .legacy_cftypes = debug_files,
5797 };
5798 #endif /* CONFIG_CGROUP_DEBUG */