]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - kernel/cgroup.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
[mirror_ubuntu-zesty-kernel.git] / kernel / cgroup.c
CommitLineData
ddbcc7e8 1/*
ddbcc7e8
PM
2 * Generic process-grouping system.
3 *
4 * Based originally on the cpuset system, extracted by Paul Menage
5 * Copyright (C) 2006 Google, Inc
6 *
0dea1168
KS
7 * Notifications support
8 * Copyright (C) 2009 Nokia Corporation
9 * Author: Kirill A. Shutemov
10 *
ddbcc7e8
PM
11 * Copyright notices from the original cpuset code:
12 * --------------------------------------------------
13 * Copyright (C) 2003 BULL SA.
14 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
15 *
16 * Portions derived from Patrick Mochel's sysfs code.
17 * sysfs is Copyright (c) 2001-3 Patrick Mochel
18 *
19 * 2003-10-10 Written by Simon Derr.
20 * 2003-10-22 Updates by Stephen Hemminger.
21 * 2004 May-July Rework by Paul Jackson.
22 * ---------------------------------------------------
23 *
24 * This file is subject to the terms and conditions of the GNU General Public
25 * License. See the file COPYING in the main directory of the Linux
26 * distribution for more details.
27 */
28
ed3d261b
JP
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
ddbcc7e8 31#include <linux/cgroup.h>
2ce9738b 32#include <linux/cred.h>
c6d57f33 33#include <linux/ctype.h>
ddbcc7e8 34#include <linux/errno.h>
2ce9738b 35#include <linux/init_task.h>
ddbcc7e8
PM
36#include <linux/kernel.h>
37#include <linux/list.h>
c9482a5b 38#include <linux/magic.h>
ddbcc7e8
PM
39#include <linux/mm.h>
40#include <linux/mutex.h>
41#include <linux/mount.h>
42#include <linux/pagemap.h>
a424316c 43#include <linux/proc_fs.h>
ddbcc7e8
PM
44#include <linux/rcupdate.h>
45#include <linux/sched.h>
ddbcc7e8 46#include <linux/slab.h>
ddbcc7e8 47#include <linux/spinlock.h>
1ed13287 48#include <linux/percpu-rwsem.h>
ddbcc7e8 49#include <linux/string.h>
bbcb81d0 50#include <linux/sort.h>
81a6a5cd 51#include <linux/kmod.h>
846c7bb0
BS
52#include <linux/delayacct.h>
53#include <linux/cgroupstats.h>
0ac801fe 54#include <linux/hashtable.h>
096b7fe0 55#include <linux/pid_namespace.h>
2c6ab6d2 56#include <linux/idr.h>
d1d9fd33 57#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
c4c27fbd 58#include <linux/kthread.h>
776f02fa 59#include <linux/delay.h>
60063497 60#include <linux/atomic.h>
e93ad19d 61#include <linux/cpuset.h>
a79a908f
AK
62#include <linux/proc_ns.h>
63#include <linux/nsproxy.h>
1f3fe7eb 64#include <linux/file.h>
bd1060a1 65#include <net/sock.h>
ddbcc7e8 66
ed1777de
TH
67#define CREATE_TRACE_POINTS
68#include <trace/events/cgroup.h>
69
b1a21367
TH
70/*
71 * pidlists linger the following amount before being destroyed. The goal
72 * is avoiding frequent destruction in the middle of consecutive read calls
73 * Expiring in the middle is a performance problem not a correctness one.
74 * 1 sec should be enough.
75 */
76#define CGROUP_PIDLIST_DESTROY_DELAY HZ
77
8d7e6fb0
TH
78#define CGROUP_FILE_NAME_MAX (MAX_CGROUP_TYPE_NAMELEN + \
79 MAX_CFTYPE_NAME + 2)
80
e25e2cbb
TH
81/*
82 * cgroup_mutex is the master lock. Any modification to cgroup or its
83 * hierarchy must be performed while holding it.
84 *
f0d9a5f1 85 * css_set_lock protects task->cgroups pointer, the list of css_set
0e1d768f 86 * objects, and the chain of tasks off each css_set.
e25e2cbb 87 *
0e1d768f
TH
88 * These locks are exported if CONFIG_PROVE_RCU so that accessors in
89 * cgroup.h can use them for lockdep annotations.
e25e2cbb 90 */
2219449a
TH
91#ifdef CONFIG_PROVE_RCU
92DEFINE_MUTEX(cgroup_mutex);
f0d9a5f1 93DEFINE_SPINLOCK(css_set_lock);
0e1d768f 94EXPORT_SYMBOL_GPL(cgroup_mutex);
f0d9a5f1 95EXPORT_SYMBOL_GPL(css_set_lock);
2219449a 96#else
81a6a5cd 97static DEFINE_MUTEX(cgroup_mutex);
f0d9a5f1 98static DEFINE_SPINLOCK(css_set_lock);
2219449a
TH
99#endif
100
6fa4918d 101/*
15a4c835
TH
102 * Protects cgroup_idr and css_idr so that IDs can be released without
103 * grabbing cgroup_mutex.
6fa4918d
TH
104 */
105static DEFINE_SPINLOCK(cgroup_idr_lock);
106
34c06254
TH
107/*
108 * Protects cgroup_file->kn for !self csses. It synchronizes notifications
109 * against file removal/re-creation across css hiding.
110 */
111static DEFINE_SPINLOCK(cgroup_file_kn_lock);
112
69e943b7
TH
113/*
114 * Protects cgroup_subsys->release_agent_path. Modifying it also requires
115 * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock.
116 */
117static DEFINE_SPINLOCK(release_agent_path_lock);
81a6a5cd 118
1ed13287
TH
119struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
120
8353da1f 121#define cgroup_assert_mutex_or_rcu_locked() \
f78f5b90
PM
122 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
123 !lockdep_is_held(&cgroup_mutex), \
8353da1f 124 "cgroup_mutex or RCU read lock required");
780cd8b3 125
e5fca243
TH
126/*
127 * cgroup destruction makes heavy use of work items and there can be a lot
128 * of concurrent destructions. Use a separate workqueue so that cgroup
129 * destruction work items don't end up filling up max_active of system_wq
130 * which may lead to deadlock.
131 */
132static struct workqueue_struct *cgroup_destroy_wq;
133
b1a21367
TH
134/*
135 * pidlist destructions need to be flushed on cgroup destruction. Use a
136 * separate workqueue as flush domain.
137 */
138static struct workqueue_struct *cgroup_pidlist_destroy_wq;
139
3ed80a62 140/* generate an array of cgroup subsystem pointers */
073219e9 141#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
3ed80a62 142static struct cgroup_subsys *cgroup_subsys[] = {
ddbcc7e8
PM
143#include <linux/cgroup_subsys.h>
144};
073219e9
TH
145#undef SUBSYS
146
147/* array of cgroup subsystem names */
148#define SUBSYS(_x) [_x ## _cgrp_id] = #_x,
149static const char *cgroup_subsys_name[] = {
ddbcc7e8
PM
150#include <linux/cgroup_subsys.h>
151};
073219e9 152#undef SUBSYS
ddbcc7e8 153
49d1dc4b
TH
154/* array of static_keys for cgroup_subsys_enabled() and cgroup_subsys_on_dfl() */
155#define SUBSYS(_x) \
156 DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_enabled_key); \
157 DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_on_dfl_key); \
158 EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_enabled_key); \
159 EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_on_dfl_key);
160#include <linux/cgroup_subsys.h>
161#undef SUBSYS
162
163#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_enabled_key,
164static struct static_key_true *cgroup_subsys_enabled_key[] = {
165#include <linux/cgroup_subsys.h>
166};
167#undef SUBSYS
168
169#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_on_dfl_key,
170static struct static_key_true *cgroup_subsys_on_dfl_key[] = {
171#include <linux/cgroup_subsys.h>
172};
173#undef SUBSYS
174
ddbcc7e8 175/*
3dd06ffa 176 * The default hierarchy, reserved for the subsystems that are otherwise
9871bf95
TH
177 * unattached - it never has more than a single cgroup, and all tasks are
178 * part of that cgroup.
ddbcc7e8 179 */
a2dd4247 180struct cgroup_root cgrp_dfl_root;
d0ec4230 181EXPORT_SYMBOL_GPL(cgrp_dfl_root);
9871bf95 182
a2dd4247
TH
183/*
184 * The default hierarchy always exists but is hidden until mounted for the
185 * first time. This is for backward compatibility.
186 */
a7165264 187static bool cgrp_dfl_visible;
ddbcc7e8 188
223ffb29 189/* Controllers blocked by the commandline in v1 */
6e5c8307 190static u16 cgroup_no_v1_mask;
223ffb29 191
5533e011 192/* some controllers are not supported in the default hierarchy */
a7165264 193static u16 cgrp_dfl_inhibit_ss_mask;
5533e011 194
f6d635ad
TH
195/* some controllers are implicitly enabled on the default hierarchy */
196static unsigned long cgrp_dfl_implicit_ss_mask;
197
ddbcc7e8
PM
198/* The list of hierarchy roots */
199
9871bf95
TH
200static LIST_HEAD(cgroup_roots);
201static int cgroup_root_count;
ddbcc7e8 202
3417ae1f 203/* hierarchy ID allocation and mapping, protected by cgroup_mutex */
1a574231 204static DEFINE_IDR(cgroup_hierarchy_idr);
2c6ab6d2 205
794611a1 206/*
0cb51d71
TH
207 * Assign a monotonically increasing serial number to csses. It guarantees
208 * cgroups with bigger numbers are newer than those with smaller numbers.
209 * Also, as csses are always appended to the parent's ->children list, it
210 * guarantees that sibling csses are always sorted in the ascending serial
211 * number order on the list. Protected by cgroup_mutex.
794611a1 212 */
0cb51d71 213static u64 css_serial_nr_next = 1;
794611a1 214
cb4a3167
AS
215/*
216 * These bitmask flags indicate whether tasks in the fork and exit paths have
217 * fork/exit handlers to call. This avoids us having to do extra work in the
218 * fork/exit path to check which subsystems have fork/exit callbacks.
ddbcc7e8 219 */
6e5c8307
TH
220static u16 have_fork_callback __read_mostly;
221static u16 have_exit_callback __read_mostly;
222static u16 have_free_callback __read_mostly;
ddbcc7e8 223
a79a908f
AK
224/* cgroup namespace for init task */
225struct cgroup_namespace init_cgroup_ns = {
226 .count = { .counter = 2, },
227 .user_ns = &init_user_ns,
228 .ns.ops = &cgroupns_operations,
229 .ns.inum = PROC_CGROUP_INIT_INO,
230 .root_cset = &init_css_set,
231};
232
7e47682e 233/* Ditto for the can_fork callback. */
6e5c8307 234static u16 have_canfork_callback __read_mostly;
7e47682e 235
67e9c74b 236static struct file_system_type cgroup2_fs_type;
a14c6874
TH
237static struct cftype cgroup_dfl_base_files[];
238static struct cftype cgroup_legacy_base_files[];
628f7cd4 239
6e5c8307 240static int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask);
945ba199 241static void cgroup_lock_and_drain_offline(struct cgroup *cgrp);
334c3679
TH
242static int cgroup_apply_control(struct cgroup *cgrp);
243static void cgroup_finalize_control(struct cgroup *cgrp, int ret);
ed27b9f7 244static void css_task_iter_advance(struct css_task_iter *it);
42809dd4 245static int cgroup_destroy_locked(struct cgroup *cgrp);
6cd0f5bb
TH
246static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
247 struct cgroup_subsys *ss);
9d755d33 248static void css_release(struct percpu_ref *ref);
f8f22e53 249static void kill_css(struct cgroup_subsys_state *css);
4df8dc90
TH
250static int cgroup_addrm_files(struct cgroup_subsys_state *css,
251 struct cgroup *cgrp, struct cftype cfts[],
2bb566cb 252 bool is_add);
42809dd4 253
fc5ed1e9
TH
254/**
255 * cgroup_ssid_enabled - cgroup subsys enabled test by subsys ID
256 * @ssid: subsys ID of interest
257 *
258 * cgroup_subsys_enabled() can only be used with literal subsys names which
259 * is fine for individual subsystems but unsuitable for cgroup core. This
260 * is slower static_key_enabled() based test indexed by @ssid.
261 */
262static bool cgroup_ssid_enabled(int ssid)
263{
cfe02a8a
AB
264 if (CGROUP_SUBSYS_COUNT == 0)
265 return false;
266
fc5ed1e9
TH
267 return static_key_enabled(cgroup_subsys_enabled_key[ssid]);
268}
269
223ffb29
JW
270static bool cgroup_ssid_no_v1(int ssid)
271{
272 return cgroup_no_v1_mask & (1 << ssid);
273}
274
9e10a130
TH
275/**
276 * cgroup_on_dfl - test whether a cgroup is on the default hierarchy
277 * @cgrp: the cgroup of interest
278 *
279 * The default hierarchy is the v2 interface of cgroup and this function
280 * can be used to test whether a cgroup is on the default hierarchy for
281 * cases where a subsystem should behave differnetly depending on the
282 * interface version.
283 *
284 * The set of behaviors which change on the default hierarchy are still
285 * being determined and the mount option is prefixed with __DEVEL__.
286 *
287 * List of changed behaviors:
288 *
289 * - Mount options "noprefix", "xattr", "clone_children", "release_agent"
290 * and "name" are disallowed.
291 *
292 * - When mounting an existing superblock, mount options should match.
293 *
294 * - Remount is disallowed.
295 *
296 * - rename(2) is disallowed.
297 *
298 * - "tasks" is removed. Everything should be at process granularity. Use
299 * "cgroup.procs" instead.
300 *
301 * - "cgroup.procs" is not sorted. pids will be unique unless they got
302 * recycled inbetween reads.
303 *
304 * - "release_agent" and "notify_on_release" are removed. Replacement
305 * notification mechanism will be implemented.
306 *
307 * - "cgroup.clone_children" is removed.
308 *
309 * - "cgroup.subtree_populated" is available. Its value is 0 if the cgroup
310 * and its descendants contain no task; otherwise, 1. The file also
311 * generates kernfs notification which can be monitored through poll and
312 * [di]notify when the value of the file changes.
313 *
314 * - cpuset: tasks will be kept in empty cpusets when hotplug happens and
315 * take masks of ancestors with non-empty cpus/mems, instead of being
316 * moved to an ancestor.
317 *
318 * - cpuset: a task can be moved into an empty cpuset, and again it takes
319 * masks of ancestors.
320 *
321 * - memcg: use_hierarchy is on by default and the cgroup file for the flag
322 * is not created.
323 *
324 * - blkcg: blk-throttle becomes properly hierarchical.
325 *
326 * - debug: disallowed on the default hierarchy.
327 */
328static bool cgroup_on_dfl(const struct cgroup *cgrp)
329{
330 return cgrp->root == &cgrp_dfl_root;
331}
332
6fa4918d
TH
333/* IDR wrappers which synchronize using cgroup_idr_lock */
334static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
335 gfp_t gfp_mask)
336{
337 int ret;
338
339 idr_preload(gfp_mask);
54504e97 340 spin_lock_bh(&cgroup_idr_lock);
d0164adc 341 ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_DIRECT_RECLAIM);
54504e97 342 spin_unlock_bh(&cgroup_idr_lock);
6fa4918d
TH
343 idr_preload_end();
344 return ret;
345}
346
347static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id)
348{
349 void *ret;
350
54504e97 351 spin_lock_bh(&cgroup_idr_lock);
6fa4918d 352 ret = idr_replace(idr, ptr, id);
54504e97 353 spin_unlock_bh(&cgroup_idr_lock);
6fa4918d
TH
354 return ret;
355}
356
357static void cgroup_idr_remove(struct idr *idr, int id)
358{
54504e97 359 spin_lock_bh(&cgroup_idr_lock);
6fa4918d 360 idr_remove(idr, id);
54504e97 361 spin_unlock_bh(&cgroup_idr_lock);
6fa4918d
TH
362}
363
d51f39b0
TH
364static struct cgroup *cgroup_parent(struct cgroup *cgrp)
365{
366 struct cgroup_subsys_state *parent_css = cgrp->self.parent;
367
368 if (parent_css)
369 return container_of(parent_css, struct cgroup, self);
370 return NULL;
371}
372
5531dc91
TH
373/* subsystems visibly enabled on a cgroup */
374static u16 cgroup_control(struct cgroup *cgrp)
375{
376 struct cgroup *parent = cgroup_parent(cgrp);
377 u16 root_ss_mask = cgrp->root->subsys_mask;
378
379 if (parent)
380 return parent->subtree_control;
381
382 if (cgroup_on_dfl(cgrp))
f6d635ad
TH
383 root_ss_mask &= ~(cgrp_dfl_inhibit_ss_mask |
384 cgrp_dfl_implicit_ss_mask);
5531dc91
TH
385 return root_ss_mask;
386}
387
388/* subsystems enabled on a cgroup */
389static u16 cgroup_ss_mask(struct cgroup *cgrp)
390{
391 struct cgroup *parent = cgroup_parent(cgrp);
392
393 if (parent)
394 return parent->subtree_ss_mask;
395
396 return cgrp->root->subsys_mask;
397}
398
95109b62
TH
399/**
400 * cgroup_css - obtain a cgroup's css for the specified subsystem
401 * @cgrp: the cgroup of interest
9d800df1 402 * @ss: the subsystem of interest (%NULL returns @cgrp->self)
95109b62 403 *
ca8bdcaf
TH
404 * Return @cgrp's css (cgroup_subsys_state) associated with @ss. This
405 * function must be called either under cgroup_mutex or rcu_read_lock() and
406 * the caller is responsible for pinning the returned css if it wants to
407 * keep accessing it outside the said locks. This function may return
408 * %NULL if @cgrp doesn't have @subsys_id enabled.
95109b62
TH
409 */
410static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
ca8bdcaf 411 struct cgroup_subsys *ss)
95109b62 412{
ca8bdcaf 413 if (ss)
aec25020 414 return rcu_dereference_check(cgrp->subsys[ss->id],
ace2bee8 415 lockdep_is_held(&cgroup_mutex));
ca8bdcaf 416 else
9d800df1 417 return &cgrp->self;
95109b62 418}
42809dd4 419
aec3dfcb
TH
420/**
421 * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
422 * @cgrp: the cgroup of interest
9d800df1 423 * @ss: the subsystem of interest (%NULL returns @cgrp->self)
aec3dfcb 424 *
d0f702e6 425 * Similar to cgroup_css() but returns the effective css, which is defined
aec3dfcb
TH
426 * as the matching css of the nearest ancestor including self which has @ss
427 * enabled. If @ss is associated with the hierarchy @cgrp is on, this
428 * function is guaranteed to return non-NULL css.
429 */
430static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
431 struct cgroup_subsys *ss)
432{
433 lockdep_assert_held(&cgroup_mutex);
434
435 if (!ss)
9d800df1 436 return &cgrp->self;
aec3dfcb 437
eeecbd19
TH
438 /*
439 * This function is used while updating css associations and thus
5531dc91 440 * can't test the csses directly. Test ss_mask.
eeecbd19 441 */
5531dc91 442 while (!(cgroup_ss_mask(cgrp) & (1 << ss->id))) {
d51f39b0 443 cgrp = cgroup_parent(cgrp);
5531dc91
TH
444 if (!cgrp)
445 return NULL;
446 }
aec3dfcb
TH
447
448 return cgroup_css(cgrp, ss);
95109b62 449}
42809dd4 450
eeecbd19
TH
451/**
452 * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
453 * @cgrp: the cgroup of interest
454 * @ss: the subsystem of interest
455 *
456 * Find and get the effective css of @cgrp for @ss. The effective css is
457 * defined as the matching css of the nearest ancestor including self which
458 * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on,
459 * the root css is returned, so this function always returns a valid css.
460 * The returned css must be put using css_put().
461 */
462struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp,
463 struct cgroup_subsys *ss)
464{
465 struct cgroup_subsys_state *css;
466
467 rcu_read_lock();
468
469 do {
470 css = cgroup_css(cgrp, ss);
471
472 if (css && css_tryget_online(css))
473 goto out_unlock;
474 cgrp = cgroup_parent(cgrp);
475 } while (cgrp);
476
477 css = init_css_set.subsys[ss->id];
478 css_get(css);
479out_unlock:
480 rcu_read_unlock();
481 return css;
482}
483
ddbcc7e8 484/* convenient tests for these bits */
54766d4a 485static inline bool cgroup_is_dead(const struct cgroup *cgrp)
ddbcc7e8 486{
184faf32 487 return !(cgrp->self.flags & CSS_ONLINE);
ddbcc7e8
PM
488}
489
052c3f3a
TH
490static void cgroup_get(struct cgroup *cgrp)
491{
492 WARN_ON_ONCE(cgroup_is_dead(cgrp));
493 css_get(&cgrp->self);
494}
495
496static bool cgroup_tryget(struct cgroup *cgrp)
497{
498 return css_tryget(&cgrp->self);
499}
500
b4168640 501struct cgroup_subsys_state *of_css(struct kernfs_open_file *of)
59f5296b 502{
2bd59d48 503 struct cgroup *cgrp = of->kn->parent->priv;
b4168640 504 struct cftype *cft = of_cft(of);
2bd59d48
TH
505
506 /*
507 * This is open and unprotected implementation of cgroup_css().
508 * seq_css() is only called from a kernfs file operation which has
509 * an active reference on the file. Because all the subsystem
510 * files are drained before a css is disassociated with a cgroup,
511 * the matching css from the cgroup's subsys table is guaranteed to
512 * be and stay valid until the enclosing operation is complete.
513 */
514 if (cft->ss)
515 return rcu_dereference_raw(cgrp->subsys[cft->ss->id]);
516 else
9d800df1 517 return &cgrp->self;
59f5296b 518}
b4168640 519EXPORT_SYMBOL_GPL(of_css);
59f5296b 520
e9685a03 521static int notify_on_release(const struct cgroup *cgrp)
81a6a5cd 522{
bd89aabc 523 return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
81a6a5cd
PM
524}
525
1c6727af
TH
526/**
527 * for_each_css - iterate all css's of a cgroup
528 * @css: the iteration cursor
529 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
530 * @cgrp: the target cgroup to iterate css's of
531 *
aec3dfcb 532 * Should be called under cgroup_[tree_]mutex.
1c6727af
TH
533 */
534#define for_each_css(css, ssid, cgrp) \
535 for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
536 if (!((css) = rcu_dereference_check( \
537 (cgrp)->subsys[(ssid)], \
538 lockdep_is_held(&cgroup_mutex)))) { } \
539 else
540
aec3dfcb
TH
541/**
542 * for_each_e_css - iterate all effective css's of a cgroup
543 * @css: the iteration cursor
544 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
545 * @cgrp: the target cgroup to iterate css's of
546 *
547 * Should be called under cgroup_[tree_]mutex.
548 */
549#define for_each_e_css(css, ssid, cgrp) \
550 for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
551 if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \
552 ; \
553 else
554
30159ec7 555/**
3ed80a62 556 * for_each_subsys - iterate all enabled cgroup subsystems
30159ec7 557 * @ss: the iteration cursor
780cd8b3 558 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
30159ec7 559 */
780cd8b3 560#define for_each_subsys(ss, ssid) \
3ed80a62
TH
561 for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT && \
562 (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
30159ec7 563
cb4a3167 564/**
b4e0eeaf 565 * do_each_subsys_mask - filter for_each_subsys with a bitmask
cb4a3167
AS
566 * @ss: the iteration cursor
567 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
b4e0eeaf 568 * @ss_mask: the bitmask
cb4a3167
AS
569 *
570 * The block will only run for cases where the ssid-th bit (1 << ssid) of
b4e0eeaf 571 * @ss_mask is set.
cb4a3167 572 */
b4e0eeaf
TH
573#define do_each_subsys_mask(ss, ssid, ss_mask) do { \
574 unsigned long __ss_mask = (ss_mask); \
575 if (!CGROUP_SUBSYS_COUNT) { /* to avoid spurious gcc warning */ \
4a705c5c 576 (ssid) = 0; \
b4e0eeaf
TH
577 break; \
578 } \
579 for_each_set_bit(ssid, &__ss_mask, CGROUP_SUBSYS_COUNT) { \
580 (ss) = cgroup_subsys[ssid]; \
581 {
582
583#define while_each_subsys_mask() \
584 } \
585 } \
586} while (false)
cb4a3167 587
985ed670
TH
588/* iterate across the hierarchies */
589#define for_each_root(root) \
5549c497 590 list_for_each_entry((root), &cgroup_roots, root_list)
ddbcc7e8 591
f8f22e53
TH
592/* iterate over child cgrps, lock should be held throughout iteration */
593#define cgroup_for_each_live_child(child, cgrp) \
d5c419b6 594 list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \
8353da1f 595 if (({ lockdep_assert_held(&cgroup_mutex); \
f8f22e53
TH
596 cgroup_is_dead(child); })) \
597 ; \
598 else
7ae1bad9 599
ce3f1d9d
TH
600/* walk live descendants in preorder */
601#define cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) \
602 css_for_each_descendant_pre((d_css), cgroup_css((cgrp), NULL)) \
603 if (({ lockdep_assert_held(&cgroup_mutex); \
604 (dsct) = (d_css)->cgroup; \
605 cgroup_is_dead(dsct); })) \
606 ; \
607 else
608
609/* walk live descendants in postorder */
610#define cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) \
611 css_for_each_descendant_post((d_css), cgroup_css((cgrp), NULL)) \
612 if (({ lockdep_assert_held(&cgroup_mutex); \
613 (dsct) = (d_css)->cgroup; \
614 cgroup_is_dead(dsct); })) \
615 ; \
616 else
617
81a6a5cd 618static void cgroup_release_agent(struct work_struct *work);
bd89aabc 619static void check_for_release(struct cgroup *cgrp);
81a6a5cd 620
69d0206c
TH
621/*
622 * A cgroup can be associated with multiple css_sets as different tasks may
623 * belong to different cgroups on different hierarchies. In the other
624 * direction, a css_set is naturally associated with multiple cgroups.
625 * This M:N relationship is represented by the following link structure
626 * which exists for each association and allows traversing the associations
627 * from both sides.
628 */
629struct cgrp_cset_link {
630 /* the cgroup and css_set this link associates */
631 struct cgroup *cgrp;
632 struct css_set *cset;
633
634 /* list of cgrp_cset_links anchored at cgrp->cset_links */
635 struct list_head cset_link;
636
637 /* list of cgrp_cset_links anchored at css_set->cgrp_links */
638 struct list_head cgrp_link;
817929ec
PM
639};
640
172a2c06
TH
641/*
642 * The default css_set - used by init and its children prior to any
817929ec
PM
643 * hierarchies being mounted. It contains a pointer to the root state
644 * for each subsystem. Also used to anchor the list of css_sets. Not
645 * reference-counted, to improve performance when child cgroups
646 * haven't been created.
647 */
5024ae29 648struct css_set init_css_set = {
172a2c06
TH
649 .refcount = ATOMIC_INIT(1),
650 .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links),
651 .tasks = LIST_HEAD_INIT(init_css_set.tasks),
652 .mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks),
653 .mg_preload_node = LIST_HEAD_INIT(init_css_set.mg_preload_node),
654 .mg_node = LIST_HEAD_INIT(init_css_set.mg_node),
ed27b9f7 655 .task_iters = LIST_HEAD_INIT(init_css_set.task_iters),
172a2c06 656};
817929ec 657
172a2c06 658static int css_set_count = 1; /* 1 for init_css_set */
817929ec 659
0de0942d
TH
660/**
661 * css_set_populated - does a css_set contain any tasks?
662 * @cset: target css_set
663 */
664static bool css_set_populated(struct css_set *cset)
665{
f0d9a5f1 666 lockdep_assert_held(&css_set_lock);
0de0942d
TH
667
668 return !list_empty(&cset->tasks) || !list_empty(&cset->mg_tasks);
669}
670
842b597e
TH
671/**
672 * cgroup_update_populated - updated populated count of a cgroup
673 * @cgrp: the target cgroup
674 * @populated: inc or dec populated count
675 *
0de0942d
TH
676 * One of the css_sets associated with @cgrp is either getting its first
677 * task or losing the last. Update @cgrp->populated_cnt accordingly. The
678 * count is propagated towards root so that a given cgroup's populated_cnt
679 * is zero iff the cgroup and all its descendants don't contain any tasks.
842b597e
TH
680 *
681 * @cgrp's interface file "cgroup.populated" is zero if
682 * @cgrp->populated_cnt is zero and 1 otherwise. When @cgrp->populated_cnt
683 * changes from or to zero, userland is notified that the content of the
684 * interface file has changed. This can be used to detect when @cgrp and
685 * its descendants become populated or empty.
686 */
687static void cgroup_update_populated(struct cgroup *cgrp, bool populated)
688{
f0d9a5f1 689 lockdep_assert_held(&css_set_lock);
842b597e
TH
690
691 do {
692 bool trigger;
693
694 if (populated)
695 trigger = !cgrp->populated_cnt++;
696 else
697 trigger = !--cgrp->populated_cnt;
698
699 if (!trigger)
700 break;
701
ad2ed2b3 702 check_for_release(cgrp);
6f60eade
TH
703 cgroup_file_notify(&cgrp->events_file);
704
d51f39b0 705 cgrp = cgroup_parent(cgrp);
842b597e
TH
706 } while (cgrp);
707}
708
0de0942d
TH
709/**
710 * css_set_update_populated - update populated state of a css_set
711 * @cset: target css_set
712 * @populated: whether @cset is populated or depopulated
713 *
714 * @cset is either getting the first task or losing the last. Update the
715 * ->populated_cnt of all associated cgroups accordingly.
716 */
717static void css_set_update_populated(struct css_set *cset, bool populated)
718{
719 struct cgrp_cset_link *link;
720
f0d9a5f1 721 lockdep_assert_held(&css_set_lock);
0de0942d
TH
722
723 list_for_each_entry(link, &cset->cgrp_links, cgrp_link)
724 cgroup_update_populated(link->cgrp, populated);
725}
726
f6d7d049
TH
727/**
728 * css_set_move_task - move a task from one css_set to another
729 * @task: task being moved
730 * @from_cset: css_set @task currently belongs to (may be NULL)
731 * @to_cset: new css_set @task is being moved to (may be NULL)
732 * @use_mg_tasks: move to @to_cset->mg_tasks instead of ->tasks
733 *
734 * Move @task from @from_cset to @to_cset. If @task didn't belong to any
735 * css_set, @from_cset can be NULL. If @task is being disassociated
736 * instead of moved, @to_cset can be NULL.
737 *
ed27b9f7
TH
738 * This function automatically handles populated_cnt updates and
739 * css_task_iter adjustments but the caller is responsible for managing
740 * @from_cset and @to_cset's reference counts.
f6d7d049
TH
741 */
742static void css_set_move_task(struct task_struct *task,
743 struct css_set *from_cset, struct css_set *to_cset,
744 bool use_mg_tasks)
745{
f0d9a5f1 746 lockdep_assert_held(&css_set_lock);
f6d7d049 747
20b454a6
TH
748 if (to_cset && !css_set_populated(to_cset))
749 css_set_update_populated(to_cset, true);
750
f6d7d049 751 if (from_cset) {
ed27b9f7
TH
752 struct css_task_iter *it, *pos;
753
f6d7d049 754 WARN_ON_ONCE(list_empty(&task->cg_list));
ed27b9f7
TH
755
756 /*
757 * @task is leaving, advance task iterators which are
758 * pointing to it so that they can resume at the next
759 * position. Advancing an iterator might remove it from
760 * the list, use safe walk. See css_task_iter_advance*()
761 * for details.
762 */
763 list_for_each_entry_safe(it, pos, &from_cset->task_iters,
764 iters_node)
765 if (it->task_pos == &task->cg_list)
766 css_task_iter_advance(it);
767
f6d7d049
TH
768 list_del_init(&task->cg_list);
769 if (!css_set_populated(from_cset))
770 css_set_update_populated(from_cset, false);
771 } else {
772 WARN_ON_ONCE(!list_empty(&task->cg_list));
773 }
774
775 if (to_cset) {
776 /*
777 * We are synchronized through cgroup_threadgroup_rwsem
778 * against PF_EXITING setting such that we can't race
779 * against cgroup_exit() changing the css_set to
780 * init_css_set and dropping the old one.
781 */
782 WARN_ON_ONCE(task->flags & PF_EXITING);
783
f6d7d049
TH
784 rcu_assign_pointer(task->cgroups, to_cset);
785 list_add_tail(&task->cg_list, use_mg_tasks ? &to_cset->mg_tasks :
786 &to_cset->tasks);
787 }
788}
789
7717f7ba
PM
790/*
791 * hash table for cgroup groups. This improves the performance to find
792 * an existing css_set. This hash doesn't (currently) take into
793 * account cgroups in empty hierarchies.
794 */
472b1053 795#define CSS_SET_HASH_BITS 7
0ac801fe 796static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
472b1053 797
0ac801fe 798static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
472b1053 799{
0ac801fe 800 unsigned long key = 0UL;
30159ec7
TH
801 struct cgroup_subsys *ss;
802 int i;
472b1053 803
30159ec7 804 for_each_subsys(ss, i)
0ac801fe
LZ
805 key += (unsigned long)css[i];
806 key = (key >> 16) ^ key;
472b1053 807
0ac801fe 808 return key;
472b1053
LZ
809}
810
a25eb52e 811static void put_css_set_locked(struct css_set *cset)
b4f48b63 812{
69d0206c 813 struct cgrp_cset_link *link, *tmp_link;
2d8f243a
TH
814 struct cgroup_subsys *ss;
815 int ssid;
5abb8855 816
f0d9a5f1 817 lockdep_assert_held(&css_set_lock);
89c5509b
TH
818
819 if (!atomic_dec_and_test(&cset->refcount))
146aa1bd 820 return;
81a6a5cd 821
53254f90
TH
822 /* This css_set is dead. unlink it and release cgroup and css refs */
823 for_each_subsys(ss, ssid) {
2d8f243a 824 list_del(&cset->e_cset_node[ssid]);
53254f90
TH
825 css_put(cset->subsys[ssid]);
826 }
5abb8855 827 hash_del(&cset->hlist);
2c6ab6d2
PM
828 css_set_count--;
829
69d0206c 830 list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) {
69d0206c
TH
831 list_del(&link->cset_link);
832 list_del(&link->cgrp_link);
2ceb231b
TH
833 if (cgroup_parent(link->cgrp))
834 cgroup_put(link->cgrp);
2c6ab6d2 835 kfree(link);
81a6a5cd 836 }
2c6ab6d2 837
5abb8855 838 kfree_rcu(cset, rcu_head);
b4f48b63
PM
839}
840
a25eb52e 841static void put_css_set(struct css_set *cset)
89c5509b 842{
82d6489d
DBO
843 unsigned long flags;
844
89c5509b
TH
845 /*
846 * Ensure that the refcount doesn't hit zero while any readers
847 * can see it. Similar to atomic_dec_and_lock(), but for an
848 * rwlock
849 */
850 if (atomic_add_unless(&cset->refcount, -1, 1))
851 return;
852
82d6489d 853 spin_lock_irqsave(&css_set_lock, flags);
a25eb52e 854 put_css_set_locked(cset);
82d6489d 855 spin_unlock_irqrestore(&css_set_lock, flags);
89c5509b
TH
856}
857
817929ec
PM
858/*
859 * refcounted get/put for css_set objects
860 */
5abb8855 861static inline void get_css_set(struct css_set *cset)
817929ec 862{
5abb8855 863 atomic_inc(&cset->refcount);
817929ec
PM
864}
865
b326f9d0 866/**
7717f7ba 867 * compare_css_sets - helper function for find_existing_css_set().
5abb8855
TH
868 * @cset: candidate css_set being tested
869 * @old_cset: existing css_set for a task
7717f7ba
PM
870 * @new_cgrp: cgroup that's being entered by the task
871 * @template: desired set of css pointers in css_set (pre-calculated)
872 *
6f4b7e63 873 * Returns true if "cset" matches "old_cset" except for the hierarchy
7717f7ba
PM
874 * which "new_cgrp" belongs to, for which it should match "new_cgrp".
875 */
5abb8855
TH
876static bool compare_css_sets(struct css_set *cset,
877 struct css_set *old_cset,
7717f7ba
PM
878 struct cgroup *new_cgrp,
879 struct cgroup_subsys_state *template[])
880{
881 struct list_head *l1, *l2;
882
aec3dfcb
TH
883 /*
884 * On the default hierarchy, there can be csets which are
885 * associated with the same set of cgroups but different csses.
886 * Let's first ensure that csses match.
887 */
888 if (memcmp(template, cset->subsys, sizeof(cset->subsys)))
7717f7ba 889 return false;
7717f7ba
PM
890
891 /*
892 * Compare cgroup pointers in order to distinguish between
aec3dfcb
TH
893 * different cgroups in hierarchies. As different cgroups may
894 * share the same effective css, this comparison is always
895 * necessary.
7717f7ba 896 */
69d0206c
TH
897 l1 = &cset->cgrp_links;
898 l2 = &old_cset->cgrp_links;
7717f7ba 899 while (1) {
69d0206c 900 struct cgrp_cset_link *link1, *link2;
5abb8855 901 struct cgroup *cgrp1, *cgrp2;
7717f7ba
PM
902
903 l1 = l1->next;
904 l2 = l2->next;
905 /* See if we reached the end - both lists are equal length. */
69d0206c
TH
906 if (l1 == &cset->cgrp_links) {
907 BUG_ON(l2 != &old_cset->cgrp_links);
7717f7ba
PM
908 break;
909 } else {
69d0206c 910 BUG_ON(l2 == &old_cset->cgrp_links);
7717f7ba
PM
911 }
912 /* Locate the cgroups associated with these links. */
69d0206c
TH
913 link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link);
914 link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link);
915 cgrp1 = link1->cgrp;
916 cgrp2 = link2->cgrp;
7717f7ba 917 /* Hierarchies should be linked in the same order. */
5abb8855 918 BUG_ON(cgrp1->root != cgrp2->root);
7717f7ba
PM
919
920 /*
921 * If this hierarchy is the hierarchy of the cgroup
922 * that's changing, then we need to check that this
923 * css_set points to the new cgroup; if it's any other
924 * hierarchy, then this css_set should point to the
925 * same cgroup as the old css_set.
926 */
5abb8855
TH
927 if (cgrp1->root == new_cgrp->root) {
928 if (cgrp1 != new_cgrp)
7717f7ba
PM
929 return false;
930 } else {
5abb8855 931 if (cgrp1 != cgrp2)
7717f7ba
PM
932 return false;
933 }
934 }
935 return true;
936}
937
b326f9d0
TH
938/**
939 * find_existing_css_set - init css array and find the matching css_set
940 * @old_cset: the css_set that we're using before the cgroup transition
941 * @cgrp: the cgroup that we're moving into
942 * @template: out param for the new set of csses, should be clear on entry
817929ec 943 */
5abb8855
TH
944static struct css_set *find_existing_css_set(struct css_set *old_cset,
945 struct cgroup *cgrp,
946 struct cgroup_subsys_state *template[])
b4f48b63 947{
3dd06ffa 948 struct cgroup_root *root = cgrp->root;
30159ec7 949 struct cgroup_subsys *ss;
5abb8855 950 struct css_set *cset;
0ac801fe 951 unsigned long key;
b326f9d0 952 int i;
817929ec 953
aae8aab4
BB
954 /*
955 * Build the set of subsystem state objects that we want to see in the
956 * new css_set. while subsystems can change globally, the entries here
957 * won't change, so no need for locking.
958 */
30159ec7 959 for_each_subsys(ss, i) {
f392e51c 960 if (root->subsys_mask & (1UL << i)) {
aec3dfcb
TH
961 /*
962 * @ss is in this hierarchy, so we want the
963 * effective css from @cgrp.
964 */
965 template[i] = cgroup_e_css(cgrp, ss);
817929ec 966 } else {
aec3dfcb
TH
967 /*
968 * @ss is not in this hierarchy, so we don't want
969 * to change the css.
970 */
5abb8855 971 template[i] = old_cset->subsys[i];
817929ec
PM
972 }
973 }
974
0ac801fe 975 key = css_set_hash(template);
5abb8855
TH
976 hash_for_each_possible(css_set_table, cset, hlist, key) {
977 if (!compare_css_sets(cset, old_cset, cgrp, template))
7717f7ba
PM
978 continue;
979
980 /* This css_set matches what we need */
5abb8855 981 return cset;
472b1053 982 }
817929ec
PM
983
984 /* No existing cgroup group matched */
985 return NULL;
986}
987
69d0206c 988static void free_cgrp_cset_links(struct list_head *links_to_free)
36553434 989{
69d0206c 990 struct cgrp_cset_link *link, *tmp_link;
36553434 991
69d0206c
TH
992 list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) {
993 list_del(&link->cset_link);
36553434
LZ
994 kfree(link);
995 }
996}
997
69d0206c
TH
998/**
999 * allocate_cgrp_cset_links - allocate cgrp_cset_links
1000 * @count: the number of links to allocate
1001 * @tmp_links: list_head the allocated links are put on
1002 *
1003 * Allocate @count cgrp_cset_link structures and chain them on @tmp_links
1004 * through ->cset_link. Returns 0 on success or -errno.
817929ec 1005 */
69d0206c 1006static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links)
817929ec 1007{
69d0206c 1008 struct cgrp_cset_link *link;
817929ec 1009 int i;
69d0206c
TH
1010
1011 INIT_LIST_HEAD(tmp_links);
1012
817929ec 1013 for (i = 0; i < count; i++) {
f4f4be2b 1014 link = kzalloc(sizeof(*link), GFP_KERNEL);
817929ec 1015 if (!link) {
69d0206c 1016 free_cgrp_cset_links(tmp_links);
817929ec
PM
1017 return -ENOMEM;
1018 }
69d0206c 1019 list_add(&link->cset_link, tmp_links);
817929ec
PM
1020 }
1021 return 0;
1022}
1023
c12f65d4
LZ
1024/**
1025 * link_css_set - a helper function to link a css_set to a cgroup
69d0206c 1026 * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links()
5abb8855 1027 * @cset: the css_set to be linked
c12f65d4
LZ
1028 * @cgrp: the destination cgroup
1029 */
69d0206c
TH
1030static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
1031 struct cgroup *cgrp)
c12f65d4 1032{
69d0206c 1033 struct cgrp_cset_link *link;
c12f65d4 1034
69d0206c 1035 BUG_ON(list_empty(tmp_links));
6803c006
TH
1036
1037 if (cgroup_on_dfl(cgrp))
1038 cset->dfl_cgrp = cgrp;
1039
69d0206c
TH
1040 link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link);
1041 link->cset = cset;
7717f7ba 1042 link->cgrp = cgrp;
842b597e 1043
7717f7ba 1044 /*
389b9c1b
TH
1045 * Always add links to the tail of the lists so that the lists are
1046 * in choronological order.
7717f7ba 1047 */
389b9c1b 1048 list_move_tail(&link->cset_link, &cgrp->cset_links);
69d0206c 1049 list_add_tail(&link->cgrp_link, &cset->cgrp_links);
2ceb231b
TH
1050
1051 if (cgroup_parent(cgrp))
1052 cgroup_get(cgrp);
c12f65d4
LZ
1053}
1054
b326f9d0
TH
1055/**
1056 * find_css_set - return a new css_set with one cgroup updated
1057 * @old_cset: the baseline css_set
1058 * @cgrp: the cgroup to be updated
1059 *
1060 * Return a new css_set that's equivalent to @old_cset, but with @cgrp
1061 * substituted into the appropriate hierarchy.
817929ec 1062 */
5abb8855
TH
1063static struct css_set *find_css_set(struct css_set *old_cset,
1064 struct cgroup *cgrp)
817929ec 1065{
b326f9d0 1066 struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { };
5abb8855 1067 struct css_set *cset;
69d0206c
TH
1068 struct list_head tmp_links;
1069 struct cgrp_cset_link *link;
2d8f243a 1070 struct cgroup_subsys *ss;
0ac801fe 1071 unsigned long key;
2d8f243a 1072 int ssid;
472b1053 1073
b326f9d0
TH
1074 lockdep_assert_held(&cgroup_mutex);
1075
817929ec
PM
1076 /* First see if we already have a cgroup group that matches
1077 * the desired set */
82d6489d 1078 spin_lock_irq(&css_set_lock);
5abb8855
TH
1079 cset = find_existing_css_set(old_cset, cgrp, template);
1080 if (cset)
1081 get_css_set(cset);
82d6489d 1082 spin_unlock_irq(&css_set_lock);
817929ec 1083
5abb8855
TH
1084 if (cset)
1085 return cset;
817929ec 1086
f4f4be2b 1087 cset = kzalloc(sizeof(*cset), GFP_KERNEL);
5abb8855 1088 if (!cset)
817929ec
PM
1089 return NULL;
1090
69d0206c 1091 /* Allocate all the cgrp_cset_link objects that we'll need */
9871bf95 1092 if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) {
5abb8855 1093 kfree(cset);
817929ec
PM
1094 return NULL;
1095 }
1096
5abb8855 1097 atomic_set(&cset->refcount, 1);
69d0206c 1098 INIT_LIST_HEAD(&cset->cgrp_links);
5abb8855 1099 INIT_LIST_HEAD(&cset->tasks);
c7561128 1100 INIT_LIST_HEAD(&cset->mg_tasks);
1958d2d5 1101 INIT_LIST_HEAD(&cset->mg_preload_node);
b3dc094e 1102 INIT_LIST_HEAD(&cset->mg_node);
ed27b9f7 1103 INIT_LIST_HEAD(&cset->task_iters);
5abb8855 1104 INIT_HLIST_NODE(&cset->hlist);
817929ec
PM
1105
1106 /* Copy the set of subsystem state objects generated in
1107 * find_existing_css_set() */
5abb8855 1108 memcpy(cset->subsys, template, sizeof(cset->subsys));
817929ec 1109
82d6489d 1110 spin_lock_irq(&css_set_lock);
817929ec 1111 /* Add reference counts and links from the new css_set. */
69d0206c 1112 list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
7717f7ba 1113 struct cgroup *c = link->cgrp;
69d0206c 1114
7717f7ba
PM
1115 if (c->root == cgrp->root)
1116 c = cgrp;
69d0206c 1117 link_css_set(&tmp_links, cset, c);
7717f7ba 1118 }
817929ec 1119
69d0206c 1120 BUG_ON(!list_empty(&tmp_links));
817929ec 1121
817929ec 1122 css_set_count++;
472b1053 1123
2d8f243a 1124 /* Add @cset to the hash table */
5abb8855
TH
1125 key = css_set_hash(cset->subsys);
1126 hash_add(css_set_table, &cset->hlist, key);
472b1053 1127
53254f90
TH
1128 for_each_subsys(ss, ssid) {
1129 struct cgroup_subsys_state *css = cset->subsys[ssid];
1130
2d8f243a 1131 list_add_tail(&cset->e_cset_node[ssid],
53254f90
TH
1132 &css->cgroup->e_csets[ssid]);
1133 css_get(css);
1134 }
2d8f243a 1135
82d6489d 1136 spin_unlock_irq(&css_set_lock);
817929ec 1137
5abb8855 1138 return cset;
b4f48b63
PM
1139}
1140
3dd06ffa 1141static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
7717f7ba 1142{
3dd06ffa 1143 struct cgroup *root_cgrp = kf_root->kn->priv;
2bd59d48 1144
3dd06ffa 1145 return root_cgrp->root;
2bd59d48
TH
1146}
1147
3dd06ffa 1148static int cgroup_init_root_id(struct cgroup_root *root)
f2e85d57
TH
1149{
1150 int id;
1151
1152 lockdep_assert_held(&cgroup_mutex);
1153
985ed670 1154 id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL);
f2e85d57
TH
1155 if (id < 0)
1156 return id;
1157
1158 root->hierarchy_id = id;
1159 return 0;
1160}
1161
3dd06ffa 1162static void cgroup_exit_root_id(struct cgroup_root *root)
f2e85d57
TH
1163{
1164 lockdep_assert_held(&cgroup_mutex);
1165
8c8a5502 1166 idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
f2e85d57
TH
1167}
1168
3dd06ffa 1169static void cgroup_free_root(struct cgroup_root *root)
f2e85d57
TH
1170{
1171 if (root) {
f2e85d57
TH
1172 idr_destroy(&root->cgroup_idr);
1173 kfree(root);
1174 }
1175}
1176
3dd06ffa 1177static void cgroup_destroy_root(struct cgroup_root *root)
59f5296b 1178{
3dd06ffa 1179 struct cgroup *cgrp = &root->cgrp;
f2e85d57 1180 struct cgrp_cset_link *link, *tmp_link;
f2e85d57 1181
ed1777de
TH
1182 trace_cgroup_destroy_root(root);
1183
334c3679 1184 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
f2e85d57 1185
776f02fa 1186 BUG_ON(atomic_read(&root->nr_cgrps));
d5c419b6 1187 BUG_ON(!list_empty(&cgrp->self.children));
f2e85d57 1188
f2e85d57 1189 /* Rebind all subsystems back to the default hierarchy */
334c3679 1190 WARN_ON(rebind_subsystems(&cgrp_dfl_root, root->subsys_mask));
7717f7ba 1191
7717f7ba 1192 /*
f2e85d57
TH
1193 * Release all the links from cset_links to this hierarchy's
1194 * root cgroup
7717f7ba 1195 */
82d6489d 1196 spin_lock_irq(&css_set_lock);
f2e85d57
TH
1197
1198 list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
1199 list_del(&link->cset_link);
1200 list_del(&link->cgrp_link);
1201 kfree(link);
1202 }
f0d9a5f1 1203
82d6489d 1204 spin_unlock_irq(&css_set_lock);
f2e85d57
TH
1205
1206 if (!list_empty(&root->root_list)) {
1207 list_del(&root->root_list);
1208 cgroup_root_count--;
1209 }
1210
1211 cgroup_exit_root_id(root);
1212
1213 mutex_unlock(&cgroup_mutex);
f2e85d57 1214
2bd59d48 1215 kernfs_destroy_root(root->kf_root);
f2e85d57
TH
1216 cgroup_free_root(root);
1217}
1218
4f41fc59
SH
1219/*
1220 * look up cgroup associated with current task's cgroup namespace on the
1221 * specified hierarchy
1222 */
1223static struct cgroup *
1224current_cgns_cgroup_from_root(struct cgroup_root *root)
1225{
1226 struct cgroup *res = NULL;
1227 struct css_set *cset;
1228
1229 lockdep_assert_held(&css_set_lock);
1230
1231 rcu_read_lock();
1232
1233 cset = current->nsproxy->cgroup_ns->root_cset;
1234 if (cset == &init_css_set) {
1235 res = &root->cgrp;
1236 } else {
1237 struct cgrp_cset_link *link;
1238
1239 list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
1240 struct cgroup *c = link->cgrp;
1241
1242 if (c->root == root) {
1243 res = c;
1244 break;
1245 }
1246 }
1247 }
1248 rcu_read_unlock();
1249
1250 BUG_ON(!res);
1251 return res;
1252}
1253
ceb6a081
TH
1254/* look up cgroup associated with given css_set on the specified hierarchy */
1255static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
3dd06ffa 1256 struct cgroup_root *root)
7717f7ba 1257{
7717f7ba
PM
1258 struct cgroup *res = NULL;
1259
96d365e0 1260 lockdep_assert_held(&cgroup_mutex);
f0d9a5f1 1261 lockdep_assert_held(&css_set_lock);
96d365e0 1262
5abb8855 1263 if (cset == &init_css_set) {
3dd06ffa 1264 res = &root->cgrp;
7717f7ba 1265 } else {
69d0206c
TH
1266 struct cgrp_cset_link *link;
1267
1268 list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
7717f7ba 1269 struct cgroup *c = link->cgrp;
69d0206c 1270
7717f7ba
PM
1271 if (c->root == root) {
1272 res = c;
1273 break;
1274 }
1275 }
1276 }
96d365e0 1277
7717f7ba
PM
1278 BUG_ON(!res);
1279 return res;
1280}
1281
ddbcc7e8 1282/*
ceb6a081 1283 * Return the cgroup for "task" from the given hierarchy. Must be
f0d9a5f1 1284 * called with cgroup_mutex and css_set_lock held.
ceb6a081
TH
1285 */
1286static struct cgroup *task_cgroup_from_root(struct task_struct *task,
3dd06ffa 1287 struct cgroup_root *root)
ceb6a081
TH
1288{
1289 /*
1290 * No need to lock the task - since we hold cgroup_mutex the
1291 * task can't change groups, so the only thing that can happen
1292 * is that it exits and its css is set back to init_css_set.
1293 */
1294 return cset_cgroup_from_root(task_css_set(task), root);
1295}
1296
ddbcc7e8 1297/*
ddbcc7e8
PM
1298 * A task must hold cgroup_mutex to modify cgroups.
1299 *
1300 * Any task can increment and decrement the count field without lock.
1301 * So in general, code holding cgroup_mutex can't rely on the count
1302 * field not changing. However, if the count goes to zero, then only
956db3ca 1303 * cgroup_attach_task() can increment it again. Because a count of zero
ddbcc7e8
PM
1304 * means that no tasks are currently attached, therefore there is no
1305 * way a task attached to that cgroup can fork (the other way to
1306 * increment the count). So code holding cgroup_mutex can safely
1307 * assume that if the count is zero, it will stay zero. Similarly, if
1308 * a task holds cgroup_mutex on a cgroup with zero count, it
1309 * knows that the cgroup won't be removed, as cgroup_rmdir()
1310 * needs that mutex.
1311 *
ddbcc7e8
PM
1312 * A cgroup can only be deleted if both its 'count' of using tasks
1313 * is zero, and its list of 'children' cgroups is empty. Since all
1314 * tasks in the system use _some_ cgroup, and since there is always at
3dd06ffa 1315 * least one task in the system (init, pid == 1), therefore, root cgroup
ddbcc7e8 1316 * always has either children cgroups and/or using tasks. So we don't
3dd06ffa 1317 * need a special hack to ensure that root cgroup cannot be deleted.
ddbcc7e8
PM
1318 *
1319 * P.S. One more locking exception. RCU is used to guard the
956db3ca 1320 * update of a tasks cgroup pointer by cgroup_attach_task()
ddbcc7e8
PM
1321 */
1322
2bd59d48 1323static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
828c0950 1324static const struct file_operations proc_cgroupstats_operations;
a424316c 1325
8d7e6fb0
TH
1326static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
1327 char *buf)
ddbcc7e8 1328{
3e1d2eed
TH
1329 struct cgroup_subsys *ss = cft->ss;
1330
8d7e6fb0
TH
1331 if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
1332 !(cgrp->root->flags & CGRP_ROOT_NOPREFIX))
1333 snprintf(buf, CGROUP_FILE_NAME_MAX, "%s.%s",
3e1d2eed
TH
1334 cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
1335 cft->name);
8d7e6fb0
TH
1336 else
1337 strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
1338 return buf;
ddbcc7e8
PM
1339}
1340
f2e85d57
TH
1341/**
1342 * cgroup_file_mode - deduce file mode of a control file
1343 * @cft: the control file in question
1344 *
7dbdb199 1345 * S_IRUGO for read, S_IWUSR for write.
f2e85d57
TH
1346 */
1347static umode_t cgroup_file_mode(const struct cftype *cft)
65dff759 1348{
f2e85d57 1349 umode_t mode = 0;
65dff759 1350
f2e85d57
TH
1351 if (cft->read_u64 || cft->read_s64 || cft->seq_show)
1352 mode |= S_IRUGO;
1353
7dbdb199
TH
1354 if (cft->write_u64 || cft->write_s64 || cft->write) {
1355 if (cft->flags & CFTYPE_WORLD_WRITABLE)
1356 mode |= S_IWUGO;
1357 else
1358 mode |= S_IWUSR;
1359 }
f2e85d57
TH
1360
1361 return mode;
65dff759
LZ
1362}
1363
af0ba678 1364/**
8699b776 1365 * cgroup_calc_subtree_ss_mask - calculate subtree_ss_mask
0f060deb 1366 * @subtree_control: the new subtree_control mask to consider
5ced2518 1367 * @this_ss_mask: available subsystems
af0ba678
TH
1368 *
1369 * On the default hierarchy, a subsystem may request other subsystems to be
1370 * enabled together through its ->depends_on mask. In such cases, more
1371 * subsystems than specified in "cgroup.subtree_control" may be enabled.
1372 *
0f060deb 1373 * This function calculates which subsystems need to be enabled if
5ced2518 1374 * @subtree_control is to be applied while restricted to @this_ss_mask.
af0ba678 1375 */
5ced2518 1376static u16 cgroup_calc_subtree_ss_mask(u16 subtree_control, u16 this_ss_mask)
667c2491 1377{
6e5c8307 1378 u16 cur_ss_mask = subtree_control;
af0ba678
TH
1379 struct cgroup_subsys *ss;
1380 int ssid;
1381
1382 lockdep_assert_held(&cgroup_mutex);
1383
f6d635ad
TH
1384 cur_ss_mask |= cgrp_dfl_implicit_ss_mask;
1385
af0ba678 1386 while (true) {
6e5c8307 1387 u16 new_ss_mask = cur_ss_mask;
af0ba678 1388
b4e0eeaf 1389 do_each_subsys_mask(ss, ssid, cur_ss_mask) {
a966a4ed 1390 new_ss_mask |= ss->depends_on;
b4e0eeaf 1391 } while_each_subsys_mask();
af0ba678
TH
1392
1393 /*
1394 * Mask out subsystems which aren't available. This can
1395 * happen only if some depended-upon subsystems were bound
1396 * to non-default hierarchies.
1397 */
5ced2518 1398 new_ss_mask &= this_ss_mask;
af0ba678
TH
1399
1400 if (new_ss_mask == cur_ss_mask)
1401 break;
1402 cur_ss_mask = new_ss_mask;
1403 }
1404
0f060deb
TH
1405 return cur_ss_mask;
1406}
1407
a9746d8d
TH
1408/**
1409 * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods
1410 * @kn: the kernfs_node being serviced
1411 *
1412 * This helper undoes cgroup_kn_lock_live() and should be invoked before
1413 * the method finishes if locking succeeded. Note that once this function
1414 * returns the cgroup returned by cgroup_kn_lock_live() may become
1415 * inaccessible any time. If the caller intends to continue to access the
1416 * cgroup, it should pin it before invoking this function.
1417 */
1418static void cgroup_kn_unlock(struct kernfs_node *kn)
ddbcc7e8 1419{
a9746d8d
TH
1420 struct cgroup *cgrp;
1421
1422 if (kernfs_type(kn) == KERNFS_DIR)
1423 cgrp = kn->priv;
1424 else
1425 cgrp = kn->parent->priv;
1426
1427 mutex_unlock(&cgroup_mutex);
a9746d8d
TH
1428
1429 kernfs_unbreak_active_protection(kn);
1430 cgroup_put(cgrp);
ddbcc7e8
PM
1431}
1432
a9746d8d
TH
1433/**
1434 * cgroup_kn_lock_live - locking helper for cgroup kernfs methods
1435 * @kn: the kernfs_node being serviced
945ba199 1436 * @drain_offline: perform offline draining on the cgroup
a9746d8d
TH
1437 *
1438 * This helper is to be used by a cgroup kernfs method currently servicing
1439 * @kn. It breaks the active protection, performs cgroup locking and
1440 * verifies that the associated cgroup is alive. Returns the cgroup if
1441 * alive; otherwise, %NULL. A successful return should be undone by a
945ba199
TH
1442 * matching cgroup_kn_unlock() invocation. If @drain_offline is %true, the
1443 * cgroup is drained of offlining csses before return.
a9746d8d
TH
1444 *
1445 * Any cgroup kernfs method implementation which requires locking the
1446 * associated cgroup should use this helper. It avoids nesting cgroup
1447 * locking under kernfs active protection and allows all kernfs operations
1448 * including self-removal.
1449 */
945ba199
TH
1450static struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn,
1451 bool drain_offline)
05ef1d7c 1452{
a9746d8d
TH
1453 struct cgroup *cgrp;
1454
1455 if (kernfs_type(kn) == KERNFS_DIR)
1456 cgrp = kn->priv;
1457 else
1458 cgrp = kn->parent->priv;
05ef1d7c 1459
2739d3cc 1460 /*
01f6474c 1461 * We're gonna grab cgroup_mutex which nests outside kernfs
a9746d8d
TH
1462 * active_ref. cgroup liveliness check alone provides enough
1463 * protection against removal. Ensure @cgrp stays accessible and
1464 * break the active_ref protection.
2739d3cc 1465 */
aa32362f
LZ
1466 if (!cgroup_tryget(cgrp))
1467 return NULL;
a9746d8d
TH
1468 kernfs_break_active_protection(kn);
1469
945ba199
TH
1470 if (drain_offline)
1471 cgroup_lock_and_drain_offline(cgrp);
1472 else
1473 mutex_lock(&cgroup_mutex);
05ef1d7c 1474
a9746d8d
TH
1475 if (!cgroup_is_dead(cgrp))
1476 return cgrp;
1477
1478 cgroup_kn_unlock(kn);
1479 return NULL;
ddbcc7e8 1480}
05ef1d7c 1481
2739d3cc 1482static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
05ef1d7c 1483{
2bd59d48 1484 char name[CGROUP_FILE_NAME_MAX];
05ef1d7c 1485
01f6474c 1486 lockdep_assert_held(&cgroup_mutex);
34c06254
TH
1487
1488 if (cft->file_offset) {
1489 struct cgroup_subsys_state *css = cgroup_css(cgrp, cft->ss);
1490 struct cgroup_file *cfile = (void *)css + cft->file_offset;
1491
1492 spin_lock_irq(&cgroup_file_kn_lock);
1493 cfile->kn = NULL;
1494 spin_unlock_irq(&cgroup_file_kn_lock);
1495 }
1496
2bd59d48 1497 kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
05ef1d7c
TH
1498}
1499
13af07df 1500/**
4df8dc90
TH
1501 * css_clear_dir - remove subsys files in a cgroup directory
1502 * @css: taget css
13af07df 1503 */
334c3679 1504static void css_clear_dir(struct cgroup_subsys_state *css)
05ef1d7c 1505{
334c3679 1506 struct cgroup *cgrp = css->cgroup;
4df8dc90 1507 struct cftype *cfts;
05ef1d7c 1508
88cb04b9
TH
1509 if (!(css->flags & CSS_VISIBLE))
1510 return;
1511
1512 css->flags &= ~CSS_VISIBLE;
1513
4df8dc90
TH
1514 list_for_each_entry(cfts, &css->ss->cfts, node)
1515 cgroup_addrm_files(css, cgrp, cfts, false);
ddbcc7e8
PM
1516}
1517
ccdca218 1518/**
4df8dc90
TH
1519 * css_populate_dir - create subsys files in a cgroup directory
1520 * @css: target css
ccdca218
TH
1521 *
1522 * On failure, no file is added.
1523 */
334c3679 1524static int css_populate_dir(struct cgroup_subsys_state *css)
ccdca218 1525{
334c3679 1526 struct cgroup *cgrp = css->cgroup;
4df8dc90
TH
1527 struct cftype *cfts, *failed_cfts;
1528 int ret;
ccdca218 1529
03970d3c 1530 if ((css->flags & CSS_VISIBLE) || !cgrp->kn)
88cb04b9
TH
1531 return 0;
1532
4df8dc90
TH
1533 if (!css->ss) {
1534 if (cgroup_on_dfl(cgrp))
1535 cfts = cgroup_dfl_base_files;
1536 else
1537 cfts = cgroup_legacy_base_files;
ccdca218 1538
4df8dc90
TH
1539 return cgroup_addrm_files(&cgrp->self, cgrp, cfts, true);
1540 }
ccdca218 1541
4df8dc90
TH
1542 list_for_each_entry(cfts, &css->ss->cfts, node) {
1543 ret = cgroup_addrm_files(css, cgrp, cfts, true);
1544 if (ret < 0) {
1545 failed_cfts = cfts;
1546 goto err;
ccdca218
TH
1547 }
1548 }
88cb04b9
TH
1549
1550 css->flags |= CSS_VISIBLE;
1551
ccdca218
TH
1552 return 0;
1553err:
4df8dc90
TH
1554 list_for_each_entry(cfts, &css->ss->cfts, node) {
1555 if (cfts == failed_cfts)
1556 break;
1557 cgroup_addrm_files(css, cgrp, cfts, false);
1558 }
ccdca218
TH
1559 return ret;
1560}
1561
6e5c8307 1562static int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
ddbcc7e8 1563{
1ada4838 1564 struct cgroup *dcgrp = &dst_root->cgrp;
30159ec7 1565 struct cgroup_subsys *ss;
2d8f243a 1566 int ssid, i, ret;
ddbcc7e8 1567
ace2bee8 1568 lockdep_assert_held(&cgroup_mutex);
ddbcc7e8 1569
b4e0eeaf 1570 do_each_subsys_mask(ss, ssid, ss_mask) {
f6d635ad
TH
1571 /*
1572 * If @ss has non-root csses attached to it, can't move.
1573 * If @ss is an implicit controller, it is exempt from this
1574 * rule and can be stolen.
1575 */
1576 if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss)) &&
1577 !ss->implicit_on_dfl)
3ed80a62 1578 return -EBUSY;
1d5be6b2 1579
5df36032 1580 /* can't move between two non-dummy roots either */
7fd8c565 1581 if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root)
5df36032 1582 return -EBUSY;
b4e0eeaf 1583 } while_each_subsys_mask();
ddbcc7e8 1584
b4e0eeaf 1585 do_each_subsys_mask(ss, ssid, ss_mask) {
1ada4838
TH
1586 struct cgroup_root *src_root = ss->root;
1587 struct cgroup *scgrp = &src_root->cgrp;
1588 struct cgroup_subsys_state *css = cgroup_css(scgrp, ss);
2d8f243a 1589 struct css_set *cset;
a8a648c4 1590
1ada4838 1591 WARN_ON(!css || cgroup_css(dcgrp, ss));
a8a648c4 1592
334c3679
TH
1593 /* disable from the source */
1594 src_root->subsys_mask &= ~(1 << ssid);
1595 WARN_ON(cgroup_apply_control(scgrp));
1596 cgroup_finalize_control(scgrp, 0);
4df8dc90 1597
334c3679 1598 /* rebind */
1ada4838
TH
1599 RCU_INIT_POINTER(scgrp->subsys[ssid], NULL);
1600 rcu_assign_pointer(dcgrp->subsys[ssid], css);
5df36032 1601 ss->root = dst_root;
1ada4838 1602 css->cgroup = dcgrp;
73e80ed8 1603
82d6489d 1604 spin_lock_irq(&css_set_lock);
2d8f243a
TH
1605 hash_for_each(css_set_table, i, cset, hlist)
1606 list_move_tail(&cset->e_cset_node[ss->id],
1ada4838 1607 &dcgrp->e_csets[ss->id]);
82d6489d 1608 spin_unlock_irq(&css_set_lock);
2d8f243a 1609
bd53d617 1610 /* default hierarchy doesn't enable controllers by default */
f392e51c 1611 dst_root->subsys_mask |= 1 << ssid;
49d1dc4b
TH
1612 if (dst_root == &cgrp_dfl_root) {
1613 static_branch_enable(cgroup_subsys_on_dfl_key[ssid]);
1614 } else {
1ada4838 1615 dcgrp->subtree_control |= 1 << ssid;
49d1dc4b 1616 static_branch_disable(cgroup_subsys_on_dfl_key[ssid]);
667c2491 1617 }
a8a648c4 1618
334c3679
TH
1619 ret = cgroup_apply_control(dcgrp);
1620 if (ret)
1621 pr_warn("partial failure to rebind %s controller (err=%d)\n",
1622 ss->name, ret);
1623
5df36032
TH
1624 if (ss->bind)
1625 ss->bind(css);
b4e0eeaf 1626 } while_each_subsys_mask();
ddbcc7e8 1627
1ada4838 1628 kernfs_activate(dcgrp->kn);
ddbcc7e8
PM
1629 return 0;
1630}
1631
4f41fc59
SH
1632static int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
1633 struct kernfs_root *kf_root)
1634{
09be4c82 1635 int len = 0;
4f41fc59
SH
1636 char *buf = NULL;
1637 struct cgroup_root *kf_cgroot = cgroup_root_from_kf(kf_root);
1638 struct cgroup *ns_cgroup;
1639
1640 buf = kmalloc(PATH_MAX, GFP_KERNEL);
1641 if (!buf)
1642 return -ENOMEM;
1643
82d6489d 1644 spin_lock_irq(&css_set_lock);
4f41fc59
SH
1645 ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot);
1646 len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX);
82d6489d 1647 spin_unlock_irq(&css_set_lock);
4f41fc59
SH
1648
1649 if (len >= PATH_MAX)
1650 len = -ERANGE;
1651 else if (len > 0) {
1652 seq_escape(sf, buf, " \t\n\\");
1653 len = 0;
1654 }
1655 kfree(buf);
1656 return len;
1657}
1658
2bd59d48
TH
1659static int cgroup_show_options(struct seq_file *seq,
1660 struct kernfs_root *kf_root)
ddbcc7e8 1661{
3dd06ffa 1662 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
ddbcc7e8 1663 struct cgroup_subsys *ss;
b85d2040 1664 int ssid;
ddbcc7e8 1665
d98817d4
TH
1666 if (root != &cgrp_dfl_root)
1667 for_each_subsys(ss, ssid)
1668 if (root->subsys_mask & (1 << ssid))
61e57c0c 1669 seq_show_option(seq, ss->legacy_name, NULL);
93438629 1670 if (root->flags & CGRP_ROOT_NOPREFIX)
ddbcc7e8 1671 seq_puts(seq, ",noprefix");
93438629 1672 if (root->flags & CGRP_ROOT_XATTR)
03b1cde6 1673 seq_puts(seq, ",xattr");
69e943b7
TH
1674
1675 spin_lock(&release_agent_path_lock);
81a6a5cd 1676 if (strlen(root->release_agent_path))
a068acf2
KC
1677 seq_show_option(seq, "release_agent",
1678 root->release_agent_path);
69e943b7
TH
1679 spin_unlock(&release_agent_path_lock);
1680
3dd06ffa 1681 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
97978e6d 1682 seq_puts(seq, ",clone_children");
c6d57f33 1683 if (strlen(root->name))
a068acf2 1684 seq_show_option(seq, "name", root->name);
ddbcc7e8
PM
1685 return 0;
1686}
1687
1688struct cgroup_sb_opts {
6e5c8307 1689 u16 subsys_mask;
69dfa00c 1690 unsigned int flags;
81a6a5cd 1691 char *release_agent;
2260e7fc 1692 bool cpuset_clone_children;
c6d57f33 1693 char *name;
2c6ab6d2
PM
1694 /* User explicitly requested empty subsystem */
1695 bool none;
ddbcc7e8
PM
1696};
1697
cf5d5941 1698static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
ddbcc7e8 1699{
32a8cf23
DL
1700 char *token, *o = data;
1701 bool all_ss = false, one_ss = false;
6e5c8307 1702 u16 mask = U16_MAX;
30159ec7 1703 struct cgroup_subsys *ss;
7b9a6ba5 1704 int nr_opts = 0;
30159ec7 1705 int i;
f9ab5b5b
LZ
1706
1707#ifdef CONFIG_CPUSETS
6e5c8307 1708 mask = ~((u16)1 << cpuset_cgrp_id);
f9ab5b5b 1709#endif
ddbcc7e8 1710
c6d57f33 1711 memset(opts, 0, sizeof(*opts));
ddbcc7e8
PM
1712
1713 while ((token = strsep(&o, ",")) != NULL) {
7b9a6ba5
TH
1714 nr_opts++;
1715
ddbcc7e8
PM
1716 if (!*token)
1717 return -EINVAL;
32a8cf23 1718 if (!strcmp(token, "none")) {
2c6ab6d2
PM
1719 /* Explicitly have no subsystems */
1720 opts->none = true;
32a8cf23
DL
1721 continue;
1722 }
1723 if (!strcmp(token, "all")) {
1724 /* Mutually exclusive option 'all' + subsystem name */
1725 if (one_ss)
1726 return -EINVAL;
1727 all_ss = true;
1728 continue;
1729 }
1730 if (!strcmp(token, "noprefix")) {
93438629 1731 opts->flags |= CGRP_ROOT_NOPREFIX;
32a8cf23
DL
1732 continue;
1733 }
1734 if (!strcmp(token, "clone_children")) {
2260e7fc 1735 opts->cpuset_clone_children = true;
32a8cf23
DL
1736 continue;
1737 }
03b1cde6 1738 if (!strcmp(token, "xattr")) {
93438629 1739 opts->flags |= CGRP_ROOT_XATTR;
03b1cde6
AR
1740 continue;
1741 }
32a8cf23 1742 if (!strncmp(token, "release_agent=", 14)) {
81a6a5cd
PM
1743 /* Specifying two release agents is forbidden */
1744 if (opts->release_agent)
1745 return -EINVAL;
c6d57f33 1746 opts->release_agent =
e400c285 1747 kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
81a6a5cd
PM
1748 if (!opts->release_agent)
1749 return -ENOMEM;
32a8cf23
DL
1750 continue;
1751 }
1752 if (!strncmp(token, "name=", 5)) {
c6d57f33
PM
1753 const char *name = token + 5;
1754 /* Can't specify an empty name */
1755 if (!strlen(name))
1756 return -EINVAL;
1757 /* Must match [\w.-]+ */
1758 for (i = 0; i < strlen(name); i++) {
1759 char c = name[i];
1760 if (isalnum(c))
1761 continue;
1762 if ((c == '.') || (c == '-') || (c == '_'))
1763 continue;
1764 return -EINVAL;
1765 }
1766 /* Specifying two names is forbidden */
1767 if (opts->name)
1768 return -EINVAL;
1769 opts->name = kstrndup(name,
e400c285 1770 MAX_CGROUP_ROOT_NAMELEN - 1,
c6d57f33
PM
1771 GFP_KERNEL);
1772 if (!opts->name)
1773 return -ENOMEM;
32a8cf23
DL
1774
1775 continue;
1776 }
1777
30159ec7 1778 for_each_subsys(ss, i) {
3e1d2eed 1779 if (strcmp(token, ss->legacy_name))
32a8cf23 1780 continue;
fc5ed1e9 1781 if (!cgroup_ssid_enabled(i))
32a8cf23 1782 continue;
223ffb29
JW
1783 if (cgroup_ssid_no_v1(i))
1784 continue;
32a8cf23
DL
1785
1786 /* Mutually exclusive option 'all' + subsystem name */
1787 if (all_ss)
1788 return -EINVAL;
69dfa00c 1789 opts->subsys_mask |= (1 << i);
32a8cf23
DL
1790 one_ss = true;
1791
1792 break;
1793 }
1794 if (i == CGROUP_SUBSYS_COUNT)
1795 return -ENOENT;
1796 }
1797
7b9a6ba5
TH
1798 /*
1799 * If the 'all' option was specified select all the subsystems,
1800 * otherwise if 'none', 'name=' and a subsystem name options were
1801 * not specified, let's default to 'all'
1802 */
1803 if (all_ss || (!one_ss && !opts->none && !opts->name))
1804 for_each_subsys(ss, i)
223ffb29 1805 if (cgroup_ssid_enabled(i) && !cgroup_ssid_no_v1(i))
7b9a6ba5
TH
1806 opts->subsys_mask |= (1 << i);
1807
1808 /*
1809 * We either have to specify by name or by subsystems. (So all
1810 * empty hierarchies must have a name).
1811 */
1812 if (!opts->subsys_mask && !opts->name)
1813 return -EINVAL;
1814
f9ab5b5b
LZ
1815 /*
1816 * Option noprefix was introduced just for backward compatibility
1817 * with the old cpuset, so we allow noprefix only if mounting just
1818 * the cpuset subsystem.
1819 */
93438629 1820 if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
f9ab5b5b
LZ
1821 return -EINVAL;
1822
2c6ab6d2 1823 /* Can't specify "none" and some subsystems */
a1a71b45 1824 if (opts->subsys_mask && opts->none)
2c6ab6d2
PM
1825 return -EINVAL;
1826
ddbcc7e8
PM
1827 return 0;
1828}
1829
2bd59d48 1830static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
ddbcc7e8
PM
1831{
1832 int ret = 0;
3dd06ffa 1833 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
ddbcc7e8 1834 struct cgroup_sb_opts opts;
6e5c8307 1835 u16 added_mask, removed_mask;
ddbcc7e8 1836
aa6ec29b
TH
1837 if (root == &cgrp_dfl_root) {
1838 pr_err("remount is not allowed\n");
873fe09e
TH
1839 return -EINVAL;
1840 }
1841
334c3679 1842 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
ddbcc7e8
PM
1843
1844 /* See what subsystems are wanted */
1845 ret = parse_cgroupfs_options(data, &opts);
1846 if (ret)
1847 goto out_unlock;
1848
f392e51c 1849 if (opts.subsys_mask != root->subsys_mask || opts.release_agent)
ed3d261b 1850 pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
a2a1f9ea 1851 task_tgid_nr(current), current->comm);
8b5a5a9d 1852
f392e51c
TH
1853 added_mask = opts.subsys_mask & ~root->subsys_mask;
1854 removed_mask = root->subsys_mask & ~opts.subsys_mask;
13af07df 1855
cf5d5941 1856 /* Don't allow flags or name to change at remount */
7450e90b 1857 if ((opts.flags ^ root->flags) ||
cf5d5941 1858 (opts.name && strcmp(opts.name, root->name))) {
69dfa00c 1859 pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
7450e90b 1860 opts.flags, opts.name ?: "", root->flags, root->name);
c6d57f33
PM
1861 ret = -EINVAL;
1862 goto out_unlock;
1863 }
1864
f172e67c 1865 /* remounting is not allowed for populated hierarchies */
d5c419b6 1866 if (!list_empty(&root->cgrp.self.children)) {
f172e67c 1867 ret = -EBUSY;
0670e08b 1868 goto out_unlock;
cf5d5941 1869 }
ddbcc7e8 1870
5df36032 1871 ret = rebind_subsystems(root, added_mask);
3126121f 1872 if (ret)
0670e08b 1873 goto out_unlock;
ddbcc7e8 1874
334c3679 1875 WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
5df36032 1876
69e943b7
TH
1877 if (opts.release_agent) {
1878 spin_lock(&release_agent_path_lock);
81a6a5cd 1879 strcpy(root->release_agent_path, opts.release_agent);
69e943b7
TH
1880 spin_unlock(&release_agent_path_lock);
1881 }
ed1777de
TH
1882
1883 trace_cgroup_remount(root);
1884
ddbcc7e8 1885 out_unlock:
66bdc9cf 1886 kfree(opts.release_agent);
c6d57f33 1887 kfree(opts.name);
ddbcc7e8 1888 mutex_unlock(&cgroup_mutex);
ddbcc7e8
PM
1889 return ret;
1890}
1891
afeb0f9f
TH
1892/*
1893 * To reduce the fork() overhead for systems that are not actually using
1894 * their cgroups capability, we don't maintain the lists running through
1895 * each css_set to its tasks until we see the list actually used - in other
1896 * words after the first mount.
1897 */
1898static bool use_task_css_set_links __read_mostly;
1899
1900static void cgroup_enable_task_cg_lists(void)
1901{
1902 struct task_struct *p, *g;
1903
82d6489d 1904 spin_lock_irq(&css_set_lock);
afeb0f9f
TH
1905
1906 if (use_task_css_set_links)
1907 goto out_unlock;
1908
1909 use_task_css_set_links = true;
1910
1911 /*
1912 * We need tasklist_lock because RCU is not safe against
1913 * while_each_thread(). Besides, a forking task that has passed
1914 * cgroup_post_fork() without seeing use_task_css_set_links = 1
1915 * is not guaranteed to have its child immediately visible in the
1916 * tasklist if we walk through it with RCU.
1917 */
1918 read_lock(&tasklist_lock);
1919 do_each_thread(g, p) {
afeb0f9f
TH
1920 WARN_ON_ONCE(!list_empty(&p->cg_list) ||
1921 task_css_set(p) != &init_css_set);
1922
1923 /*
1924 * We should check if the process is exiting, otherwise
1925 * it will race with cgroup_exit() in that the list
1926 * entry won't be deleted though the process has exited.
f153ad11
TH
1927 * Do it while holding siglock so that we don't end up
1928 * racing against cgroup_exit().
82d6489d
DBO
1929 *
1930 * Interrupts were already disabled while acquiring
1931 * the css_set_lock, so we do not need to disable it
1932 * again when acquiring the sighand->siglock here.
afeb0f9f 1933 */
82d6489d 1934 spin_lock(&p->sighand->siglock);
eaf797ab
TH
1935 if (!(p->flags & PF_EXITING)) {
1936 struct css_set *cset = task_css_set(p);
1937
0de0942d
TH
1938 if (!css_set_populated(cset))
1939 css_set_update_populated(cset, true);
389b9c1b 1940 list_add_tail(&p->cg_list, &cset->tasks);
eaf797ab
TH
1941 get_css_set(cset);
1942 }
82d6489d 1943 spin_unlock(&p->sighand->siglock);
afeb0f9f
TH
1944 } while_each_thread(g, p);
1945 read_unlock(&tasklist_lock);
1946out_unlock:
82d6489d 1947 spin_unlock_irq(&css_set_lock);
afeb0f9f 1948}
ddbcc7e8 1949
cc31edce
PM
1950static void init_cgroup_housekeeping(struct cgroup *cgrp)
1951{
2d8f243a
TH
1952 struct cgroup_subsys *ss;
1953 int ssid;
1954
d5c419b6
TH
1955 INIT_LIST_HEAD(&cgrp->self.sibling);
1956 INIT_LIST_HEAD(&cgrp->self.children);
69d0206c 1957 INIT_LIST_HEAD(&cgrp->cset_links);
72a8cb30
BB
1958 INIT_LIST_HEAD(&cgrp->pidlists);
1959 mutex_init(&cgrp->pidlist_mutex);
9d800df1 1960 cgrp->self.cgroup = cgrp;
184faf32 1961 cgrp->self.flags |= CSS_ONLINE;
2d8f243a
TH
1962
1963 for_each_subsys(ss, ssid)
1964 INIT_LIST_HEAD(&cgrp->e_csets[ssid]);
f8f22e53
TH
1965
1966 init_waitqueue_head(&cgrp->offline_waitq);
971ff493 1967 INIT_WORK(&cgrp->release_agent_work, cgroup_release_agent);
cc31edce 1968}
c6d57f33 1969
3dd06ffa 1970static void init_cgroup_root(struct cgroup_root *root,
172a2c06 1971 struct cgroup_sb_opts *opts)
ddbcc7e8 1972{
3dd06ffa 1973 struct cgroup *cgrp = &root->cgrp;
b0ca5a84 1974
ddbcc7e8 1975 INIT_LIST_HEAD(&root->root_list);
3c9c825b 1976 atomic_set(&root->nr_cgrps, 1);
bd89aabc 1977 cgrp->root = root;
cc31edce 1978 init_cgroup_housekeeping(cgrp);
4e96ee8e 1979 idr_init(&root->cgroup_idr);
c6d57f33 1980
c6d57f33
PM
1981 root->flags = opts->flags;
1982 if (opts->release_agent)
1983 strcpy(root->release_agent_path, opts->release_agent);
1984 if (opts->name)
1985 strcpy(root->name, opts->name);
2260e7fc 1986 if (opts->cpuset_clone_children)
3dd06ffa 1987 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
c6d57f33
PM
1988}
1989
6e5c8307 1990static int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
2c6ab6d2 1991{
d427dfeb 1992 LIST_HEAD(tmp_links);
3dd06ffa 1993 struct cgroup *root_cgrp = &root->cgrp;
d427dfeb 1994 struct css_set *cset;
d427dfeb 1995 int i, ret;
2c6ab6d2 1996
d427dfeb 1997 lockdep_assert_held(&cgroup_mutex);
c6d57f33 1998
cf780b7d 1999 ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_KERNEL);
d427dfeb 2000 if (ret < 0)
2bd59d48 2001 goto out;
d427dfeb 2002 root_cgrp->id = ret;
b11cfb58 2003 root_cgrp->ancestor_ids[0] = ret;
c6d57f33 2004
2aad2a86
TH
2005 ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, 0,
2006 GFP_KERNEL);
9d755d33
TH
2007 if (ret)
2008 goto out;
2009
d427dfeb 2010 /*
f0d9a5f1 2011 * We're accessing css_set_count without locking css_set_lock here,
d427dfeb 2012 * but that's OK - it can only be increased by someone holding
04313591
TH
2013 * cgroup_lock, and that's us. Later rebinding may disable
2014 * controllers on the default hierarchy and thus create new csets,
2015 * which can't be more than the existing ones. Allocate 2x.
d427dfeb 2016 */
04313591 2017 ret = allocate_cgrp_cset_links(2 * css_set_count, &tmp_links);
d427dfeb 2018 if (ret)
9d755d33 2019 goto cancel_ref;
ddbcc7e8 2020
985ed670 2021 ret = cgroup_init_root_id(root);
ddbcc7e8 2022 if (ret)
9d755d33 2023 goto cancel_ref;
ddbcc7e8 2024
2bd59d48
TH
2025 root->kf_root = kernfs_create_root(&cgroup_kf_syscall_ops,
2026 KERNFS_ROOT_CREATE_DEACTIVATED,
2027 root_cgrp);
2028 if (IS_ERR(root->kf_root)) {
2029 ret = PTR_ERR(root->kf_root);
2030 goto exit_root_id;
2031 }
2032 root_cgrp->kn = root->kf_root->kn;
ddbcc7e8 2033
334c3679 2034 ret = css_populate_dir(&root_cgrp->self);
d427dfeb 2035 if (ret)
2bd59d48 2036 goto destroy_root;
ddbcc7e8 2037
5df36032 2038 ret = rebind_subsystems(root, ss_mask);
d427dfeb 2039 if (ret)
2bd59d48 2040 goto destroy_root;
ddbcc7e8 2041
ed1777de
TH
2042 trace_cgroup_setup_root(root);
2043
d427dfeb
TH
2044 /*
2045 * There must be no failure case after here, since rebinding takes
2046 * care of subsystems' refcounts, which are explicitly dropped in
2047 * the failure exit path.
2048 */
2049 list_add(&root->root_list, &cgroup_roots);
2050 cgroup_root_count++;
0df6a63f 2051
d427dfeb 2052 /*
3dd06ffa 2053 * Link the root cgroup in this hierarchy into all the css_set
d427dfeb
TH
2054 * objects.
2055 */
82d6489d 2056 spin_lock_irq(&css_set_lock);
0de0942d 2057 hash_for_each(css_set_table, i, cset, hlist) {
d427dfeb 2058 link_css_set(&tmp_links, cset, root_cgrp);
0de0942d
TH
2059 if (css_set_populated(cset))
2060 cgroup_update_populated(root_cgrp, true);
2061 }
82d6489d 2062 spin_unlock_irq(&css_set_lock);
ddbcc7e8 2063
d5c419b6 2064 BUG_ON(!list_empty(&root_cgrp->self.children));
3c9c825b 2065 BUG_ON(atomic_read(&root->nr_cgrps) != 1);
ddbcc7e8 2066
2bd59d48 2067 kernfs_activate(root_cgrp->kn);
d427dfeb 2068 ret = 0;
2bd59d48 2069 goto out;
d427dfeb 2070
2bd59d48
TH
2071destroy_root:
2072 kernfs_destroy_root(root->kf_root);
2073 root->kf_root = NULL;
2074exit_root_id:
d427dfeb 2075 cgroup_exit_root_id(root);
9d755d33 2076cancel_ref:
9a1049da 2077 percpu_ref_exit(&root_cgrp->self.refcnt);
2bd59d48 2078out:
d427dfeb
TH
2079 free_cgrp_cset_links(&tmp_links);
2080 return ret;
ddbcc7e8
PM
2081}
2082
f7e83571 2083static struct dentry *cgroup_mount(struct file_system_type *fs_type,
ddbcc7e8 2084 int flags, const char *unused_dev_name,
f7e83571 2085 void *data)
ddbcc7e8 2086{
67e9c74b 2087 bool is_v2 = fs_type == &cgroup2_fs_type;
3a32bd72 2088 struct super_block *pinned_sb = NULL;
ed82571b 2089 struct cgroup_namespace *ns = current->nsproxy->cgroup_ns;
970317aa 2090 struct cgroup_subsys *ss;
3dd06ffa 2091 struct cgroup_root *root;
ddbcc7e8 2092 struct cgroup_sb_opts opts;
2bd59d48 2093 struct dentry *dentry;
8e30e2b8 2094 int ret;
970317aa 2095 int i;
c6b3d5bc 2096 bool new_sb;
ddbcc7e8 2097
ed82571b
SH
2098 get_cgroup_ns(ns);
2099
2100 /* Check if the caller has permission to mount. */
2101 if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) {
2102 put_cgroup_ns(ns);
2103 return ERR_PTR(-EPERM);
2104 }
2105
56fde9e0
TH
2106 /*
2107 * The first time anyone tries to mount a cgroup, enable the list
2108 * linking each css_set to its tasks and fix up all existing tasks.
2109 */
2110 if (!use_task_css_set_links)
2111 cgroup_enable_task_cg_lists();
e37a06f1 2112
67e9c74b
TH
2113 if (is_v2) {
2114 if (data) {
2115 pr_err("cgroup2: unknown option \"%s\"\n", (char *)data);
ed82571b 2116 put_cgroup_ns(ns);
67e9c74b
TH
2117 return ERR_PTR(-EINVAL);
2118 }
a7165264 2119 cgrp_dfl_visible = true;
67e9c74b
TH
2120 root = &cgrp_dfl_root;
2121 cgroup_get(&root->cgrp);
2122 goto out_mount;
2123 }
2124
334c3679 2125 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
8e30e2b8
TH
2126
2127 /* First find the desired set of subsystems */
ddbcc7e8 2128 ret = parse_cgroupfs_options(data, &opts);
c6d57f33 2129 if (ret)
8e30e2b8 2130 goto out_unlock;
a015edd2 2131
970317aa
LZ
2132 /*
2133 * Destruction of cgroup root is asynchronous, so subsystems may
2134 * still be dying after the previous unmount. Let's drain the
2135 * dying subsystems. We just need to ensure that the ones
2136 * unmounted previously finish dying and don't care about new ones
2137 * starting. Testing ref liveliness is good enough.
2138 */
2139 for_each_subsys(ss, i) {
2140 if (!(opts.subsys_mask & (1 << i)) ||
2141 ss->root == &cgrp_dfl_root)
2142 continue;
2143
2144 if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
2145 mutex_unlock(&cgroup_mutex);
2146 msleep(10);
2147 ret = restart_syscall();
2148 goto out_free;
2149 }
2150 cgroup_put(&ss->root->cgrp);
2151 }
2152
985ed670 2153 for_each_root(root) {
2bd59d48 2154 bool name_match = false;
3126121f 2155
3dd06ffa 2156 if (root == &cgrp_dfl_root)
985ed670 2157 continue;
3126121f 2158
cf5d5941 2159 /*
2bd59d48
TH
2160 * If we asked for a name then it must match. Also, if
2161 * name matches but sybsys_mask doesn't, we should fail.
2162 * Remember whether name matched.
cf5d5941 2163 */
2bd59d48
TH
2164 if (opts.name) {
2165 if (strcmp(opts.name, root->name))
2166 continue;
2167 name_match = true;
2168 }
ddbcc7e8 2169
c6d57f33 2170 /*
2bd59d48
TH
2171 * If we asked for subsystems (or explicitly for no
2172 * subsystems) then they must match.
c6d57f33 2173 */
2bd59d48 2174 if ((opts.subsys_mask || opts.none) &&
f392e51c 2175 (opts.subsys_mask != root->subsys_mask)) {
2bd59d48
TH
2176 if (!name_match)
2177 continue;
2178 ret = -EBUSY;
2179 goto out_unlock;
2180 }
873fe09e 2181
7b9a6ba5
TH
2182 if (root->flags ^ opts.flags)
2183 pr_warn("new mount options do not match the existing superblock, will be ignored\n");
ddbcc7e8 2184
776f02fa 2185 /*
3a32bd72
LZ
2186 * We want to reuse @root whose lifetime is governed by its
2187 * ->cgrp. Let's check whether @root is alive and keep it
2188 * that way. As cgroup_kill_sb() can happen anytime, we
2189 * want to block it by pinning the sb so that @root doesn't
2190 * get killed before mount is complete.
2191 *
2192 * With the sb pinned, tryget_live can reliably indicate
2193 * whether @root can be reused. If it's being killed,
2194 * drain it. We can use wait_queue for the wait but this
2195 * path is super cold. Let's just sleep a bit and retry.
776f02fa 2196 */
3a32bd72
LZ
2197 pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
2198 if (IS_ERR(pinned_sb) ||
2199 !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
776f02fa 2200 mutex_unlock(&cgroup_mutex);
3a32bd72
LZ
2201 if (!IS_ERR_OR_NULL(pinned_sb))
2202 deactivate_super(pinned_sb);
776f02fa 2203 msleep(10);
a015edd2
TH
2204 ret = restart_syscall();
2205 goto out_free;
776f02fa 2206 }
ddbcc7e8 2207
776f02fa 2208 ret = 0;
2bd59d48 2209 goto out_unlock;
ddbcc7e8 2210 }
ddbcc7e8 2211
817929ec 2212 /*
172a2c06
TH
2213 * No such thing, create a new one. name= matching without subsys
2214 * specification is allowed for already existing hierarchies but we
2215 * can't create new one without subsys specification.
817929ec 2216 */
172a2c06
TH
2217 if (!opts.subsys_mask && !opts.none) {
2218 ret = -EINVAL;
2219 goto out_unlock;
817929ec 2220 }
817929ec 2221
726a4994
EB
2222 /* Hierarchies may only be created in the initial cgroup namespace. */
2223 if (ns != &init_cgroup_ns) {
ed82571b
SH
2224 ret = -EPERM;
2225 goto out_unlock;
2226 }
2227
172a2c06
TH
2228 root = kzalloc(sizeof(*root), GFP_KERNEL);
2229 if (!root) {
2230 ret = -ENOMEM;
2bd59d48 2231 goto out_unlock;
839ec545 2232 }
e5f6a860 2233
172a2c06
TH
2234 init_cgroup_root(root, &opts);
2235
35585573 2236 ret = cgroup_setup_root(root, opts.subsys_mask);
2bd59d48
TH
2237 if (ret)
2238 cgroup_free_root(root);
fa3ca07e 2239
8e30e2b8 2240out_unlock:
ddbcc7e8 2241 mutex_unlock(&cgroup_mutex);
a015edd2 2242out_free:
c6d57f33
PM
2243 kfree(opts.release_agent);
2244 kfree(opts.name);
03b1cde6 2245
ed82571b
SH
2246 if (ret) {
2247 put_cgroup_ns(ns);
8e30e2b8 2248 return ERR_PTR(ret);
ed82571b 2249 }
67e9c74b 2250out_mount:
c9482a5b 2251 dentry = kernfs_mount(fs_type, flags, root->kf_root,
67e9c74b
TH
2252 is_v2 ? CGROUP2_SUPER_MAGIC : CGROUP_SUPER_MAGIC,
2253 &new_sb);
ed82571b
SH
2254
2255 /*
2256 * In non-init cgroup namespace, instead of root cgroup's
2257 * dentry, we return the dentry corresponding to the
2258 * cgroupns->root_cgrp.
2259 */
2260 if (!IS_ERR(dentry) && ns != &init_cgroup_ns) {
2261 struct dentry *nsdentry;
2262 struct cgroup *cgrp;
2263
2264 mutex_lock(&cgroup_mutex);
82d6489d 2265 spin_lock_irq(&css_set_lock);
ed82571b
SH
2266
2267 cgrp = cset_cgroup_from_root(ns->root_cset, root);
2268
82d6489d 2269 spin_unlock_irq(&css_set_lock);
ed82571b
SH
2270 mutex_unlock(&cgroup_mutex);
2271
2272 nsdentry = kernfs_node_dentry(cgrp->kn, dentry->d_sb);
2273 dput(dentry);
2274 dentry = nsdentry;
2275 }
2276
c6b3d5bc 2277 if (IS_ERR(dentry) || !new_sb)
3dd06ffa 2278 cgroup_put(&root->cgrp);
3a32bd72
LZ
2279
2280 /*
2281 * If @pinned_sb, we're reusing an existing root and holding an
2282 * extra ref on its sb. Mount is complete. Put the extra ref.
2283 */
2284 if (pinned_sb) {
2285 WARN_ON(new_sb);
2286 deactivate_super(pinned_sb);
2287 }
2288
ed82571b 2289 put_cgroup_ns(ns);
2bd59d48
TH
2290 return dentry;
2291}
2292
2293static void cgroup_kill_sb(struct super_block *sb)
2294{
2295 struct kernfs_root *kf_root = kernfs_root_from_sb(sb);
3dd06ffa 2296 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
2bd59d48 2297
9d755d33
TH
2298 /*
2299 * If @root doesn't have any mounts or children, start killing it.
2300 * This prevents new mounts by disabling percpu_ref_tryget_live().
2301 * cgroup_mount() may wait for @root's release.
1f779fb2
LZ
2302 *
2303 * And don't kill the default root.
9d755d33 2304 */
3c606d35 2305 if (!list_empty(&root->cgrp.self.children) ||
1f779fb2 2306 root == &cgrp_dfl_root)
9d755d33
TH
2307 cgroup_put(&root->cgrp);
2308 else
2309 percpu_ref_kill(&root->cgrp.self.refcnt);
2310
2bd59d48 2311 kernfs_kill_sb(sb);
ddbcc7e8
PM
2312}
2313
2314static struct file_system_type cgroup_fs_type = {
2315 .name = "cgroup",
f7e83571 2316 .mount = cgroup_mount,
ddbcc7e8 2317 .kill_sb = cgroup_kill_sb,
1c53753e 2318 .fs_flags = FS_USERNS_MOUNT,
ddbcc7e8
PM
2319};
2320
67e9c74b
TH
2321static struct file_system_type cgroup2_fs_type = {
2322 .name = "cgroup2",
2323 .mount = cgroup_mount,
2324 .kill_sb = cgroup_kill_sb,
1c53753e 2325 .fs_flags = FS_USERNS_MOUNT,
67e9c74b
TH
2326};
2327
4c737b41
TH
2328static int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
2329 struct cgroup_namespace *ns)
a79a908f
AK
2330{
2331 struct cgroup *root = cset_cgroup_from_root(ns->root_cset, cgrp->root);
a79a908f 2332
4c737b41 2333 return kernfs_path_from_node(cgrp->kn, root->kn, buf, buflen);
a79a908f
AK
2334}
2335
4c737b41
TH
2336int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
2337 struct cgroup_namespace *ns)
a79a908f 2338{
4c737b41 2339 int ret;
a79a908f
AK
2340
2341 mutex_lock(&cgroup_mutex);
82d6489d 2342 spin_lock_irq(&css_set_lock);
a79a908f
AK
2343
2344 ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns);
2345
82d6489d 2346 spin_unlock_irq(&css_set_lock);
a79a908f
AK
2347 mutex_unlock(&cgroup_mutex);
2348
2349 return ret;
2350}
2351EXPORT_SYMBOL_GPL(cgroup_path_ns);
2352
857a2beb 2353/**
913ffdb5 2354 * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
857a2beb 2355 * @task: target task
857a2beb
TH
2356 * @buf: the buffer to write the path into
2357 * @buflen: the length of the buffer
2358 *
913ffdb5
TH
2359 * Determine @task's cgroup on the first (the one with the lowest non-zero
2360 * hierarchy_id) cgroup hierarchy and copy its path into @buf. This
2361 * function grabs cgroup_mutex and shouldn't be used inside locks used by
2362 * cgroup controller callbacks.
2363 *
e61734c5 2364 * Return value is the same as kernfs_path().
857a2beb 2365 */
4c737b41 2366int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
857a2beb 2367{
3dd06ffa 2368 struct cgroup_root *root;
913ffdb5 2369 struct cgroup *cgrp;
e61734c5 2370 int hierarchy_id = 1;
4c737b41 2371 int ret;
857a2beb
TH
2372
2373 mutex_lock(&cgroup_mutex);
82d6489d 2374 spin_lock_irq(&css_set_lock);
857a2beb 2375
913ffdb5
TH
2376 root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
2377
857a2beb
TH
2378 if (root) {
2379 cgrp = task_cgroup_from_root(task, root);
4c737b41 2380 ret = cgroup_path_ns_locked(cgrp, buf, buflen, &init_cgroup_ns);
913ffdb5
TH
2381 } else {
2382 /* if no hierarchy exists, everyone is in "/" */
4c737b41 2383 ret = strlcpy(buf, "/", buflen);
857a2beb
TH
2384 }
2385
82d6489d 2386 spin_unlock_irq(&css_set_lock);
857a2beb 2387 mutex_unlock(&cgroup_mutex);
4c737b41 2388 return ret;
857a2beb 2389}
913ffdb5 2390EXPORT_SYMBOL_GPL(task_cgroup_path);
857a2beb 2391
b3dc094e 2392/* used to track tasks and other necessary states during migration */
2f7ee569 2393struct cgroup_taskset {
b3dc094e
TH
2394 /* the src and dst cset list running through cset->mg_node */
2395 struct list_head src_csets;
2396 struct list_head dst_csets;
2397
1f7dd3e5
TH
2398 /* the subsys currently being processed */
2399 int ssid;
2400
b3dc094e
TH
2401 /*
2402 * Fields for cgroup_taskset_*() iteration.
2403 *
2404 * Before migration is committed, the target migration tasks are on
2405 * ->mg_tasks of the csets on ->src_csets. After, on ->mg_tasks of
2406 * the csets on ->dst_csets. ->csets point to either ->src_csets
2407 * or ->dst_csets depending on whether migration is committed.
2408 *
2409 * ->cur_csets and ->cur_task point to the current task position
2410 * during iteration.
2411 */
2412 struct list_head *csets;
2413 struct css_set *cur_cset;
2414 struct task_struct *cur_task;
2f7ee569
TH
2415};
2416
adaae5dc
TH
2417#define CGROUP_TASKSET_INIT(tset) (struct cgroup_taskset){ \
2418 .src_csets = LIST_HEAD_INIT(tset.src_csets), \
2419 .dst_csets = LIST_HEAD_INIT(tset.dst_csets), \
2420 .csets = &tset.src_csets, \
2421}
2422
2423/**
2424 * cgroup_taskset_add - try to add a migration target task to a taskset
2425 * @task: target task
2426 * @tset: target taskset
2427 *
2428 * Add @task, which is a migration target, to @tset. This function becomes
2429 * noop if @task doesn't need to be migrated. @task's css_set should have
2430 * been added as a migration source and @task->cg_list will be moved from
2431 * the css_set's tasks list to mg_tasks one.
2432 */
2433static void cgroup_taskset_add(struct task_struct *task,
2434 struct cgroup_taskset *tset)
2435{
2436 struct css_set *cset;
2437
f0d9a5f1 2438 lockdep_assert_held(&css_set_lock);
adaae5dc
TH
2439
2440 /* @task either already exited or can't exit until the end */
2441 if (task->flags & PF_EXITING)
2442 return;
2443
2444 /* leave @task alone if post_fork() hasn't linked it yet */
2445 if (list_empty(&task->cg_list))
2446 return;
2447
2448 cset = task_css_set(task);
2449 if (!cset->mg_src_cgrp)
2450 return;
2451
2452 list_move_tail(&task->cg_list, &cset->mg_tasks);
2453 if (list_empty(&cset->mg_node))
2454 list_add_tail(&cset->mg_node, &tset->src_csets);
2455 if (list_empty(&cset->mg_dst_cset->mg_node))
2456 list_move_tail(&cset->mg_dst_cset->mg_node,
2457 &tset->dst_csets);
2458}
2459
2f7ee569
TH
2460/**
2461 * cgroup_taskset_first - reset taskset and return the first task
2462 * @tset: taskset of interest
1f7dd3e5 2463 * @dst_cssp: output variable for the destination css
2f7ee569
TH
2464 *
2465 * @tset iteration is initialized and the first task is returned.
2466 */
1f7dd3e5
TH
2467struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
2468 struct cgroup_subsys_state **dst_cssp)
2f7ee569 2469{
b3dc094e
TH
2470 tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
2471 tset->cur_task = NULL;
2472
1f7dd3e5 2473 return cgroup_taskset_next(tset, dst_cssp);
2f7ee569 2474}
2f7ee569
TH
2475
2476/**
2477 * cgroup_taskset_next - iterate to the next task in taskset
2478 * @tset: taskset of interest
1f7dd3e5 2479 * @dst_cssp: output variable for the destination css
2f7ee569
TH
2480 *
2481 * Return the next task in @tset. Iteration must have been initialized
2482 * with cgroup_taskset_first().
2483 */
1f7dd3e5
TH
2484struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
2485 struct cgroup_subsys_state **dst_cssp)
2f7ee569 2486{
b3dc094e
TH
2487 struct css_set *cset = tset->cur_cset;
2488 struct task_struct *task = tset->cur_task;
2f7ee569 2489
b3dc094e
TH
2490 while (&cset->mg_node != tset->csets) {
2491 if (!task)
2492 task = list_first_entry(&cset->mg_tasks,
2493 struct task_struct, cg_list);
2494 else
2495 task = list_next_entry(task, cg_list);
2f7ee569 2496
b3dc094e
TH
2497 if (&task->cg_list != &cset->mg_tasks) {
2498 tset->cur_cset = cset;
2499 tset->cur_task = task;
1f7dd3e5
TH
2500
2501 /*
2502 * This function may be called both before and
2503 * after cgroup_taskset_migrate(). The two cases
2504 * can be distinguished by looking at whether @cset
2505 * has its ->mg_dst_cset set.
2506 */
2507 if (cset->mg_dst_cset)
2508 *dst_cssp = cset->mg_dst_cset->subsys[tset->ssid];
2509 else
2510 *dst_cssp = cset->subsys[tset->ssid];
2511
b3dc094e
TH
2512 return task;
2513 }
2f7ee569 2514
b3dc094e
TH
2515 cset = list_next_entry(cset, mg_node);
2516 task = NULL;
2517 }
2f7ee569 2518
b3dc094e 2519 return NULL;
2f7ee569 2520}
2f7ee569 2521
adaae5dc 2522/**
37ff9f8f 2523 * cgroup_taskset_migrate - migrate a taskset
adaae5dc 2524 * @tset: taget taskset
37ff9f8f 2525 * @root: cgroup root the migration is taking place on
adaae5dc 2526 *
37ff9f8f
TH
2527 * Migrate tasks in @tset as setup by migration preparation functions.
2528 * This function fails iff one of the ->can_attach callbacks fails and
2529 * guarantees that either all or none of the tasks in @tset are migrated.
2530 * @tset is consumed regardless of success.
adaae5dc
TH
2531 */
2532static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
37ff9f8f 2533 struct cgroup_root *root)
adaae5dc 2534{
37ff9f8f 2535 struct cgroup_subsys *ss;
adaae5dc
TH
2536 struct task_struct *task, *tmp_task;
2537 struct css_set *cset, *tmp_cset;
37ff9f8f 2538 int ssid, failed_ssid, ret;
adaae5dc
TH
2539
2540 /* methods shouldn't be called if no task is actually migrating */
2541 if (list_empty(&tset->src_csets))
2542 return 0;
2543
2544 /* check that we can legitimately attach to the cgroup */
37ff9f8f
TH
2545 do_each_subsys_mask(ss, ssid, root->subsys_mask) {
2546 if (ss->can_attach) {
2547 tset->ssid = ssid;
2548 ret = ss->can_attach(tset);
adaae5dc 2549 if (ret) {
37ff9f8f 2550 failed_ssid = ssid;
adaae5dc
TH
2551 goto out_cancel_attach;
2552 }
2553 }
37ff9f8f 2554 } while_each_subsys_mask();
adaae5dc
TH
2555
2556 /*
2557 * Now that we're guaranteed success, proceed to move all tasks to
2558 * the new cgroup. There are no failure cases after here, so this
2559 * is the commit point.
2560 */
82d6489d 2561 spin_lock_irq(&css_set_lock);
adaae5dc 2562 list_for_each_entry(cset, &tset->src_csets, mg_node) {
f6d7d049
TH
2563 list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
2564 struct css_set *from_cset = task_css_set(task);
2565 struct css_set *to_cset = cset->mg_dst_cset;
2566
2567 get_css_set(to_cset);
2568 css_set_move_task(task, from_cset, to_cset, true);
2569 put_css_set_locked(from_cset);
2570 }
adaae5dc 2571 }
82d6489d 2572 spin_unlock_irq(&css_set_lock);
adaae5dc
TH
2573
2574 /*
2575 * Migration is committed, all target tasks are now on dst_csets.
2576 * Nothing is sensitive to fork() after this point. Notify
2577 * controllers that migration is complete.
2578 */
2579 tset->csets = &tset->dst_csets;
2580
37ff9f8f
TH
2581 do_each_subsys_mask(ss, ssid, root->subsys_mask) {
2582 if (ss->attach) {
2583 tset->ssid = ssid;
2584 ss->attach(tset);
1f7dd3e5 2585 }
37ff9f8f 2586 } while_each_subsys_mask();
adaae5dc
TH
2587
2588 ret = 0;
2589 goto out_release_tset;
2590
2591out_cancel_attach:
37ff9f8f
TH
2592 do_each_subsys_mask(ss, ssid, root->subsys_mask) {
2593 if (ssid == failed_ssid)
adaae5dc 2594 break;
37ff9f8f
TH
2595 if (ss->cancel_attach) {
2596 tset->ssid = ssid;
2597 ss->cancel_attach(tset);
1f7dd3e5 2598 }
37ff9f8f 2599 } while_each_subsys_mask();
adaae5dc 2600out_release_tset:
82d6489d 2601 spin_lock_irq(&css_set_lock);
adaae5dc
TH
2602 list_splice_init(&tset->dst_csets, &tset->src_csets);
2603 list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
2604 list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
2605 list_del_init(&cset->mg_node);
2606 }
82d6489d 2607 spin_unlock_irq(&css_set_lock);
adaae5dc
TH
2608 return ret;
2609}
2610
6c694c88
TH
2611/**
2612 * cgroup_may_migrate_to - verify whether a cgroup can be migration destination
2613 * @dst_cgrp: destination cgroup to test
2614 *
2615 * On the default hierarchy, except for the root, subtree_control must be
2616 * zero for migration destination cgroups with tasks so that child cgroups
2617 * don't compete against tasks.
2618 */
2619static bool cgroup_may_migrate_to(struct cgroup *dst_cgrp)
2620{
2621 return !cgroup_on_dfl(dst_cgrp) || !cgroup_parent(dst_cgrp) ||
2622 !dst_cgrp->subtree_control;
2623}
2624
a043e3b2 2625/**
1958d2d5
TH
2626 * cgroup_migrate_finish - cleanup after attach
2627 * @preloaded_csets: list of preloaded css_sets
74a1166d 2628 *
1958d2d5
TH
2629 * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst(). See
2630 * those functions for details.
74a1166d 2631 */
1958d2d5 2632static void cgroup_migrate_finish(struct list_head *preloaded_csets)
74a1166d 2633{
1958d2d5 2634 struct css_set *cset, *tmp_cset;
74a1166d 2635
1958d2d5
TH
2636 lockdep_assert_held(&cgroup_mutex);
2637
82d6489d 2638 spin_lock_irq(&css_set_lock);
1958d2d5
TH
2639 list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
2640 cset->mg_src_cgrp = NULL;
e4857982 2641 cset->mg_dst_cgrp = NULL;
1958d2d5
TH
2642 cset->mg_dst_cset = NULL;
2643 list_del_init(&cset->mg_preload_node);
a25eb52e 2644 put_css_set_locked(cset);
1958d2d5 2645 }
82d6489d 2646 spin_unlock_irq(&css_set_lock);
1958d2d5
TH
2647}
2648
2649/**
2650 * cgroup_migrate_add_src - add a migration source css_set
2651 * @src_cset: the source css_set to add
2652 * @dst_cgrp: the destination cgroup
2653 * @preloaded_csets: list of preloaded css_sets
2654 *
2655 * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp. Pin
2656 * @src_cset and add it to @preloaded_csets, which should later be cleaned
2657 * up by cgroup_migrate_finish().
2658 *
1ed13287
TH
2659 * This function may be called without holding cgroup_threadgroup_rwsem
2660 * even if the target is a process. Threads may be created and destroyed
2661 * but as long as cgroup_mutex is not dropped, no new css_set can be put
2662 * into play and the preloaded css_sets are guaranteed to cover all
2663 * migrations.
1958d2d5
TH
2664 */
2665static void cgroup_migrate_add_src(struct css_set *src_cset,
2666 struct cgroup *dst_cgrp,
2667 struct list_head *preloaded_csets)
2668{
2669 struct cgroup *src_cgrp;
2670
2671 lockdep_assert_held(&cgroup_mutex);
f0d9a5f1 2672 lockdep_assert_held(&css_set_lock);
1958d2d5 2673
2b021cbf
TH
2674 /*
2675 * If ->dead, @src_set is associated with one or more dead cgroups
2676 * and doesn't contain any migratable tasks. Ignore it early so
2677 * that the rest of migration path doesn't get confused by it.
2678 */
2679 if (src_cset->dead)
2680 return;
2681
1958d2d5
TH
2682 src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
2683
1958d2d5
TH
2684 if (!list_empty(&src_cset->mg_preload_node))
2685 return;
2686
2687 WARN_ON(src_cset->mg_src_cgrp);
e4857982 2688 WARN_ON(src_cset->mg_dst_cgrp);
1958d2d5
TH
2689 WARN_ON(!list_empty(&src_cset->mg_tasks));
2690 WARN_ON(!list_empty(&src_cset->mg_node));
2691
2692 src_cset->mg_src_cgrp = src_cgrp;
e4857982 2693 src_cset->mg_dst_cgrp = dst_cgrp;
1958d2d5
TH
2694 get_css_set(src_cset);
2695 list_add(&src_cset->mg_preload_node, preloaded_csets);
2696}
2697
2698/**
2699 * cgroup_migrate_prepare_dst - prepare destination css_sets for migration
1958d2d5
TH
2700 * @preloaded_csets: list of preloaded source css_sets
2701 *
e4857982
TH
2702 * Tasks are about to be moved and all the source css_sets have been
2703 * preloaded to @preloaded_csets. This function looks up and pins all
2704 * destination css_sets, links each to its source, and append them to
2705 * @preloaded_csets.
1958d2d5
TH
2706 *
2707 * This function must be called after cgroup_migrate_add_src() has been
2708 * called on each migration source css_set. After migration is performed
2709 * using cgroup_migrate(), cgroup_migrate_finish() must be called on
2710 * @preloaded_csets.
2711 */
e4857982 2712static int cgroup_migrate_prepare_dst(struct list_head *preloaded_csets)
1958d2d5
TH
2713{
2714 LIST_HEAD(csets);
f817de98 2715 struct css_set *src_cset, *tmp_cset;
1958d2d5
TH
2716
2717 lockdep_assert_held(&cgroup_mutex);
2718
2719 /* look up the dst cset for each src cset and link it to src */
f817de98 2720 list_for_each_entry_safe(src_cset, tmp_cset, preloaded_csets, mg_preload_node) {
1958d2d5
TH
2721 struct css_set *dst_cset;
2722
e4857982 2723 dst_cset = find_css_set(src_cset, src_cset->mg_dst_cgrp);
1958d2d5
TH
2724 if (!dst_cset)
2725 goto err;
2726
2727 WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset);
f817de98
TH
2728
2729 /*
2730 * If src cset equals dst, it's noop. Drop the src.
2731 * cgroup_migrate() will skip the cset too. Note that we
2732 * can't handle src == dst as some nodes are used by both.
2733 */
2734 if (src_cset == dst_cset) {
2735 src_cset->mg_src_cgrp = NULL;
e4857982 2736 src_cset->mg_dst_cgrp = NULL;
f817de98 2737 list_del_init(&src_cset->mg_preload_node);
a25eb52e
ZL
2738 put_css_set(src_cset);
2739 put_css_set(dst_cset);
f817de98
TH
2740 continue;
2741 }
2742
1958d2d5
TH
2743 src_cset->mg_dst_cset = dst_cset;
2744
2745 if (list_empty(&dst_cset->mg_preload_node))
2746 list_add(&dst_cset->mg_preload_node, &csets);
2747 else
a25eb52e 2748 put_css_set(dst_cset);
1958d2d5
TH
2749 }
2750
f817de98 2751 list_splice_tail(&csets, preloaded_csets);
1958d2d5
TH
2752 return 0;
2753err:
2754 cgroup_migrate_finish(&csets);
2755 return -ENOMEM;
2756}
2757
2758/**
2759 * cgroup_migrate - migrate a process or task to a cgroup
1958d2d5
TH
2760 * @leader: the leader of the process or the task to migrate
2761 * @threadgroup: whether @leader points to the whole process or a single task
37ff9f8f 2762 * @root: cgroup root migration is taking place on
1958d2d5 2763 *
37ff9f8f
TH
2764 * Migrate a process or task denoted by @leader. If migrating a process,
2765 * the caller must be holding cgroup_threadgroup_rwsem. The caller is also
2766 * responsible for invoking cgroup_migrate_add_src() and
1958d2d5
TH
2767 * cgroup_migrate_prepare_dst() on the targets before invoking this
2768 * function and following up with cgroup_migrate_finish().
2769 *
2770 * As long as a controller's ->can_attach() doesn't fail, this function is
2771 * guaranteed to succeed. This means that, excluding ->can_attach()
2772 * failure, when migrating multiple targets, the success or failure can be
2773 * decided for all targets by invoking group_migrate_prepare_dst() before
2774 * actually starting migrating.
2775 */
9af2ec45 2776static int cgroup_migrate(struct task_struct *leader, bool threadgroup,
37ff9f8f 2777 struct cgroup_root *root)
74a1166d 2778{
adaae5dc
TH
2779 struct cgroup_taskset tset = CGROUP_TASKSET_INIT(tset);
2780 struct task_struct *task;
74a1166d 2781
fb5d2b4c
MSB
2782 /*
2783 * Prevent freeing of tasks while we take a snapshot. Tasks that are
2784 * already PF_EXITING could be freed from underneath us unless we
2785 * take an rcu_read_lock.
2786 */
82d6489d 2787 spin_lock_irq(&css_set_lock);
fb5d2b4c 2788 rcu_read_lock();
9db8de37 2789 task = leader;
74a1166d 2790 do {
adaae5dc 2791 cgroup_taskset_add(task, &tset);
081aa458
LZ
2792 if (!threadgroup)
2793 break;
9db8de37 2794 } while_each_thread(leader, task);
fb5d2b4c 2795 rcu_read_unlock();
82d6489d 2796 spin_unlock_irq(&css_set_lock);
74a1166d 2797
37ff9f8f 2798 return cgroup_taskset_migrate(&tset, root);
74a1166d
BB
2799}
2800
1958d2d5
TH
2801/**
2802 * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
2803 * @dst_cgrp: the cgroup to attach to
2804 * @leader: the task or the leader of the threadgroup to be attached
2805 * @threadgroup: attach the whole threadgroup?
2806 *
1ed13287 2807 * Call holding cgroup_mutex and cgroup_threadgroup_rwsem.
1958d2d5
TH
2808 */
2809static int cgroup_attach_task(struct cgroup *dst_cgrp,
2810 struct task_struct *leader, bool threadgroup)
2811{
2812 LIST_HEAD(preloaded_csets);
2813 struct task_struct *task;
2814 int ret;
2815
6c694c88
TH
2816 if (!cgroup_may_migrate_to(dst_cgrp))
2817 return -EBUSY;
2818
1958d2d5 2819 /* look up all src csets */
82d6489d 2820 spin_lock_irq(&css_set_lock);
1958d2d5
TH
2821 rcu_read_lock();
2822 task = leader;
2823 do {
2824 cgroup_migrate_add_src(task_css_set(task), dst_cgrp,
2825 &preloaded_csets);
2826 if (!threadgroup)
2827 break;
2828 } while_each_thread(leader, task);
2829 rcu_read_unlock();
82d6489d 2830 spin_unlock_irq(&css_set_lock);
1958d2d5
TH
2831
2832 /* prepare dst csets and commit */
e4857982 2833 ret = cgroup_migrate_prepare_dst(&preloaded_csets);
1958d2d5 2834 if (!ret)
37ff9f8f 2835 ret = cgroup_migrate(leader, threadgroup, dst_cgrp->root);
1958d2d5
TH
2836
2837 cgroup_migrate_finish(&preloaded_csets);
ed1777de
TH
2838
2839 if (!ret)
2840 trace_cgroup_attach_task(dst_cgrp, leader, threadgroup);
2841
1958d2d5 2842 return ret;
74a1166d
BB
2843}
2844
187fe840
TH
2845static int cgroup_procs_write_permission(struct task_struct *task,
2846 struct cgroup *dst_cgrp,
2847 struct kernfs_open_file *of)
dedf22e9
TH
2848{
2849 const struct cred *cred = current_cred();
2850 const struct cred *tcred = get_task_cred(task);
2851 int ret = 0;
2852
2853 /*
2854 * even if we're attaching all tasks in the thread group, we only
2855 * need to check permissions on one of them.
2856 */
2857 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
2858 !uid_eq(cred->euid, tcred->uid) &&
2859 !uid_eq(cred->euid, tcred->suid))
2860 ret = -EACCES;
2861
187fe840
TH
2862 if (!ret && cgroup_on_dfl(dst_cgrp)) {
2863 struct super_block *sb = of->file->f_path.dentry->d_sb;
2864 struct cgroup *cgrp;
2865 struct inode *inode;
2866
82d6489d 2867 spin_lock_irq(&css_set_lock);
187fe840 2868 cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
82d6489d 2869 spin_unlock_irq(&css_set_lock);
187fe840
TH
2870
2871 while (!cgroup_is_descendant(dst_cgrp, cgrp))
2872 cgrp = cgroup_parent(cgrp);
2873
2874 ret = -ENOMEM;
6f60eade 2875 inode = kernfs_get_inode(sb, cgrp->procs_file.kn);
187fe840
TH
2876 if (inode) {
2877 ret = inode_permission(inode, MAY_WRITE);
2878 iput(inode);
2879 }
2880 }
2881
dedf22e9
TH
2882 put_cred(tcred);
2883 return ret;
2884}
2885
74a1166d
BB
2886/*
2887 * Find the task_struct of the task to attach by vpid and pass it along to the
cd3d0952 2888 * function to attach either it or all tasks in its threadgroup. Will lock
0e1d768f 2889 * cgroup_mutex and threadgroup.
bbcb81d0 2890 */
acbef755
TH
2891static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
2892 size_t nbytes, loff_t off, bool threadgroup)
bbcb81d0 2893{
bbcb81d0 2894 struct task_struct *tsk;
5cf1cacb 2895 struct cgroup_subsys *ss;
e76ecaee 2896 struct cgroup *cgrp;
acbef755 2897 pid_t pid;
5cf1cacb 2898 int ssid, ret;
bbcb81d0 2899
acbef755
TH
2900 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
2901 return -EINVAL;
2902
945ba199 2903 cgrp = cgroup_kn_lock_live(of->kn, false);
e76ecaee 2904 if (!cgrp)
74a1166d
BB
2905 return -ENODEV;
2906
3014dde7 2907 percpu_down_write(&cgroup_threadgroup_rwsem);
b78949eb 2908 rcu_read_lock();
bbcb81d0 2909 if (pid) {
73507f33 2910 tsk = find_task_by_vpid(pid);
74a1166d 2911 if (!tsk) {
dd4b0a46 2912 ret = -ESRCH;
3014dde7 2913 goto out_unlock_rcu;
bbcb81d0 2914 }
dedf22e9 2915 } else {
b78949eb 2916 tsk = current;
dedf22e9 2917 }
cd3d0952
TH
2918
2919 if (threadgroup)
b78949eb 2920 tsk = tsk->group_leader;
c4c27fbd
MG
2921
2922 /*
14a40ffc 2923 * Workqueue threads may acquire PF_NO_SETAFFINITY and become
c4c27fbd
MG
2924 * trapped in a cpuset, or RT worker may be born in a cgroup
2925 * with no rt_runtime allocated. Just say no.
2926 */
14a40ffc 2927 if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
c4c27fbd 2928 ret = -EINVAL;
3014dde7 2929 goto out_unlock_rcu;
c4c27fbd
MG
2930 }
2931
b78949eb
MSB
2932 get_task_struct(tsk);
2933 rcu_read_unlock();
2934
187fe840 2935 ret = cgroup_procs_write_permission(tsk, cgrp, of);
dedf22e9
TH
2936 if (!ret)
2937 ret = cgroup_attach_task(cgrp, tsk, threadgroup);
081aa458 2938
f9f9e7b7 2939 put_task_struct(tsk);
3014dde7
TH
2940 goto out_unlock_threadgroup;
2941
2942out_unlock_rcu:
2943 rcu_read_unlock();
2944out_unlock_threadgroup:
2945 percpu_up_write(&cgroup_threadgroup_rwsem);
5cf1cacb
TH
2946 for_each_subsys(ss, ssid)
2947 if (ss->post_attach)
2948 ss->post_attach();
e76ecaee 2949 cgroup_kn_unlock(of->kn);
acbef755 2950 return ret ?: nbytes;
bbcb81d0
PM
2951}
2952
7ae1bad9
TH
2953/**
2954 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
2955 * @from: attach to all cgroups of a given task
2956 * @tsk: the task to be attached
2957 */
2958int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
2959{
3dd06ffa 2960 struct cgroup_root *root;
7ae1bad9
TH
2961 int retval = 0;
2962
47cfcd09 2963 mutex_lock(&cgroup_mutex);
eedd0f4c 2964 percpu_down_write(&cgroup_threadgroup_rwsem);
985ed670 2965 for_each_root(root) {
96d365e0
TH
2966 struct cgroup *from_cgrp;
2967
3dd06ffa 2968 if (root == &cgrp_dfl_root)
985ed670
TH
2969 continue;
2970
82d6489d 2971 spin_lock_irq(&css_set_lock);
96d365e0 2972 from_cgrp = task_cgroup_from_root(from, root);
82d6489d 2973 spin_unlock_irq(&css_set_lock);
7ae1bad9 2974
6f4b7e63 2975 retval = cgroup_attach_task(from_cgrp, tsk, false);
7ae1bad9
TH
2976 if (retval)
2977 break;
2978 }
eedd0f4c 2979 percpu_up_write(&cgroup_threadgroup_rwsem);
47cfcd09 2980 mutex_unlock(&cgroup_mutex);
7ae1bad9
TH
2981
2982 return retval;
2983}
2984EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
2985
acbef755
TH
2986static ssize_t cgroup_tasks_write(struct kernfs_open_file *of,
2987 char *buf, size_t nbytes, loff_t off)
74a1166d 2988{
acbef755 2989 return __cgroup_procs_write(of, buf, nbytes, off, false);
74a1166d
BB
2990}
2991
acbef755
TH
2992static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
2993 char *buf, size_t nbytes, loff_t off)
af351026 2994{
acbef755 2995 return __cgroup_procs_write(of, buf, nbytes, off, true);
af351026
PM
2996}
2997
451af504
TH
2998static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
2999 char *buf, size_t nbytes, loff_t off)
e788e066 3000{
e76ecaee 3001 struct cgroup *cgrp;
5f469907 3002
e76ecaee 3003 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
5f469907 3004
945ba199 3005 cgrp = cgroup_kn_lock_live(of->kn, false);
e76ecaee 3006 if (!cgrp)
e788e066 3007 return -ENODEV;
69e943b7 3008 spin_lock(&release_agent_path_lock);
e76ecaee
TH
3009 strlcpy(cgrp->root->release_agent_path, strstrip(buf),
3010 sizeof(cgrp->root->release_agent_path));
69e943b7 3011 spin_unlock(&release_agent_path_lock);
e76ecaee 3012 cgroup_kn_unlock(of->kn);
451af504 3013 return nbytes;
e788e066
PM
3014}
3015
2da8ca82 3016static int cgroup_release_agent_show(struct seq_file *seq, void *v)
e788e066 3017{
2da8ca82 3018 struct cgroup *cgrp = seq_css(seq)->cgroup;
182446d0 3019
46cfeb04 3020 spin_lock(&release_agent_path_lock);
e788e066 3021 seq_puts(seq, cgrp->root->release_agent_path);
46cfeb04 3022 spin_unlock(&release_agent_path_lock);
e788e066 3023 seq_putc(seq, '\n');
e788e066
PM
3024 return 0;
3025}
3026
2da8ca82 3027static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
873fe09e 3028{
c1d5d42e 3029 seq_puts(seq, "0\n");
e788e066
PM
3030 return 0;
3031}
3032
6e5c8307 3033static void cgroup_print_ss_mask(struct seq_file *seq, u16 ss_mask)
355e0c48 3034{
f8f22e53
TH
3035 struct cgroup_subsys *ss;
3036 bool printed = false;
3037 int ssid;
a742c59d 3038
b4e0eeaf 3039 do_each_subsys_mask(ss, ssid, ss_mask) {
a966a4ed
AS
3040 if (printed)
3041 seq_putc(seq, ' ');
3042 seq_printf(seq, "%s", ss->name);
3043 printed = true;
b4e0eeaf 3044 } while_each_subsys_mask();
f8f22e53
TH
3045 if (printed)
3046 seq_putc(seq, '\n');
355e0c48
PM
3047}
3048
f8f22e53
TH
3049/* show controllers which are enabled from the parent */
3050static int cgroup_controllers_show(struct seq_file *seq, void *v)
ddbcc7e8 3051{
f8f22e53
TH
3052 struct cgroup *cgrp = seq_css(seq)->cgroup;
3053
5531dc91 3054 cgroup_print_ss_mask(seq, cgroup_control(cgrp));
f8f22e53 3055 return 0;
ddbcc7e8
PM
3056}
3057
f8f22e53
TH
3058/* show controllers which are enabled for a given cgroup's children */
3059static int cgroup_subtree_control_show(struct seq_file *seq, void *v)
ddbcc7e8 3060{
f8f22e53
TH
3061 struct cgroup *cgrp = seq_css(seq)->cgroup;
3062
667c2491 3063 cgroup_print_ss_mask(seq, cgrp->subtree_control);
f8f22e53
TH
3064 return 0;
3065}
3066
3067/**
3068 * cgroup_update_dfl_csses - update css assoc of a subtree in default hierarchy
3069 * @cgrp: root of the subtree to update csses for
3070 *
54962604
TH
3071 * @cgrp's control masks have changed and its subtree's css associations
3072 * need to be updated accordingly. This function looks up all css_sets
3073 * which are attached to the subtree, creates the matching updated css_sets
3074 * and migrates the tasks to the new ones.
f8f22e53
TH
3075 */
3076static int cgroup_update_dfl_csses(struct cgroup *cgrp)
3077{
3078 LIST_HEAD(preloaded_csets);
10265075 3079 struct cgroup_taskset tset = CGROUP_TASKSET_INIT(tset);
54962604
TH
3080 struct cgroup_subsys_state *d_css;
3081 struct cgroup *dsct;
f8f22e53
TH
3082 struct css_set *src_cset;
3083 int ret;
3084
f8f22e53
TH
3085 lockdep_assert_held(&cgroup_mutex);
3086
3014dde7
TH
3087 percpu_down_write(&cgroup_threadgroup_rwsem);
3088
f8f22e53 3089 /* look up all csses currently attached to @cgrp's subtree */
82d6489d 3090 spin_lock_irq(&css_set_lock);
54962604 3091 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
f8f22e53
TH
3092 struct cgrp_cset_link *link;
3093
54962604 3094 list_for_each_entry(link, &dsct->cset_links, cset_link)
58cdb1ce 3095 cgroup_migrate_add_src(link->cset, dsct,
f8f22e53
TH
3096 &preloaded_csets);
3097 }
82d6489d 3098 spin_unlock_irq(&css_set_lock);
f8f22e53
TH
3099
3100 /* NULL dst indicates self on default hierarchy */
e4857982 3101 ret = cgroup_migrate_prepare_dst(&preloaded_csets);
f8f22e53
TH
3102 if (ret)
3103 goto out_finish;
3104
82d6489d 3105 spin_lock_irq(&css_set_lock);
f8f22e53 3106 list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
10265075 3107 struct task_struct *task, *ntask;
f8f22e53
TH
3108
3109 /* src_csets precede dst_csets, break on the first dst_cset */
3110 if (!src_cset->mg_src_cgrp)
3111 break;
3112
10265075
TH
3113 /* all tasks in src_csets need to be migrated */
3114 list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
3115 cgroup_taskset_add(task, &tset);
f8f22e53 3116 }
82d6489d 3117 spin_unlock_irq(&css_set_lock);
f8f22e53 3118
37ff9f8f 3119 ret = cgroup_taskset_migrate(&tset, cgrp->root);
f8f22e53
TH
3120out_finish:
3121 cgroup_migrate_finish(&preloaded_csets);
3014dde7 3122 percpu_up_write(&cgroup_threadgroup_rwsem);
f8f22e53
TH
3123 return ret;
3124}
3125
1b9b96a1 3126/**
945ba199 3127 * cgroup_lock_and_drain_offline - lock cgroup_mutex and drain offlined csses
ce3f1d9d 3128 * @cgrp: root of the target subtree
1b9b96a1
TH
3129 *
3130 * Because css offlining is asynchronous, userland may try to re-enable a
945ba199
TH
3131 * controller while the previous css is still around. This function grabs
3132 * cgroup_mutex and drains the previous css instances of @cgrp's subtree.
1b9b96a1 3133 */
945ba199
TH
3134static void cgroup_lock_and_drain_offline(struct cgroup *cgrp)
3135 __acquires(&cgroup_mutex)
1b9b96a1
TH
3136{
3137 struct cgroup *dsct;
ce3f1d9d 3138 struct cgroup_subsys_state *d_css;
1b9b96a1
TH
3139 struct cgroup_subsys *ss;
3140 int ssid;
3141
945ba199
TH
3142restart:
3143 mutex_lock(&cgroup_mutex);
1b9b96a1 3144
ce3f1d9d 3145 cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
1b9b96a1
TH
3146 for_each_subsys(ss, ssid) {
3147 struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
3148 DEFINE_WAIT(wait);
3149
ce3f1d9d 3150 if (!css || !percpu_ref_is_dying(&css->refcnt))
1b9b96a1
TH
3151 continue;
3152
3153 cgroup_get(dsct);
3154 prepare_to_wait(&dsct->offline_waitq, &wait,
3155 TASK_UNINTERRUPTIBLE);
3156
3157 mutex_unlock(&cgroup_mutex);
3158 schedule();
3159 finish_wait(&dsct->offline_waitq, &wait);
1b9b96a1
TH
3160
3161 cgroup_put(dsct);
945ba199 3162 goto restart;
1b9b96a1
TH
3163 }
3164 }
1b9b96a1
TH
3165}
3166
15a27c36
TH
3167/**
3168 * cgroup_save_control - save control masks of a subtree
3169 * @cgrp: root of the target subtree
3170 *
3171 * Save ->subtree_control and ->subtree_ss_mask to the respective old_
3172 * prefixed fields for @cgrp's subtree including @cgrp itself.
3173 */
3174static void cgroup_save_control(struct cgroup *cgrp)
3175{
3176 struct cgroup *dsct;
3177 struct cgroup_subsys_state *d_css;
3178
3179 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
3180 dsct->old_subtree_control = dsct->subtree_control;
3181 dsct->old_subtree_ss_mask = dsct->subtree_ss_mask;
3182 }
3183}
3184
3185/**
3186 * cgroup_propagate_control - refresh control masks of a subtree
3187 * @cgrp: root of the target subtree
3188 *
3189 * For @cgrp and its subtree, ensure ->subtree_ss_mask matches
3190 * ->subtree_control and propagate controller availability through the
3191 * subtree so that descendants don't have unavailable controllers enabled.
3192 */
3193static void cgroup_propagate_control(struct cgroup *cgrp)
3194{
3195 struct cgroup *dsct;
3196 struct cgroup_subsys_state *d_css;
3197
3198 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
3199 dsct->subtree_control &= cgroup_control(dsct);
5ced2518
TH
3200 dsct->subtree_ss_mask =
3201 cgroup_calc_subtree_ss_mask(dsct->subtree_control,
3202 cgroup_ss_mask(dsct));
15a27c36
TH
3203 }
3204}
3205
3206/**
3207 * cgroup_restore_control - restore control masks of a subtree
3208 * @cgrp: root of the target subtree
3209 *
3210 * Restore ->subtree_control and ->subtree_ss_mask from the respective old_
3211 * prefixed fields for @cgrp's subtree including @cgrp itself.
3212 */
3213static void cgroup_restore_control(struct cgroup *cgrp)
3214{
3215 struct cgroup *dsct;
3216 struct cgroup_subsys_state *d_css;
3217
3218 cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
3219 dsct->subtree_control = dsct->old_subtree_control;
3220 dsct->subtree_ss_mask = dsct->old_subtree_ss_mask;
3221 }
3222}
3223
f6d635ad
TH
3224static bool css_visible(struct cgroup_subsys_state *css)
3225{
3226 struct cgroup_subsys *ss = css->ss;
3227 struct cgroup *cgrp = css->cgroup;
3228
3229 if (cgroup_control(cgrp) & (1 << ss->id))
3230 return true;
3231 if (!(cgroup_ss_mask(cgrp) & (1 << ss->id)))
3232 return false;
3233 return cgroup_on_dfl(cgrp) && ss->implicit_on_dfl;
3234}
3235
bdb53bd7
TH
3236/**
3237 * cgroup_apply_control_enable - enable or show csses according to control
ce3f1d9d 3238 * @cgrp: root of the target subtree
bdb53bd7 3239 *
ce3f1d9d 3240 * Walk @cgrp's subtree and create new csses or make the existing ones
bdb53bd7
TH
3241 * visible. A css is created invisible if it's being implicitly enabled
3242 * through dependency. An invisible css is made visible when the userland
3243 * explicitly enables it.
3244 *
3245 * Returns 0 on success, -errno on failure. On failure, csses which have
3246 * been processed already aren't cleaned up. The caller is responsible for
3247 * cleaning up with cgroup_apply_control_disble().
3248 */
3249static int cgroup_apply_control_enable(struct cgroup *cgrp)
3250{
3251 struct cgroup *dsct;
ce3f1d9d 3252 struct cgroup_subsys_state *d_css;
bdb53bd7
TH
3253 struct cgroup_subsys *ss;
3254 int ssid, ret;
3255
ce3f1d9d 3256 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
bdb53bd7
TH
3257 for_each_subsys(ss, ssid) {
3258 struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
3259
945ba199
TH
3260 WARN_ON_ONCE(css && percpu_ref_is_dying(&css->refcnt));
3261
bdb53bd7
TH
3262 if (!(cgroup_ss_mask(dsct) & (1 << ss->id)))
3263 continue;
3264
3265 if (!css) {
3266 css = css_create(dsct, ss);
3267 if (IS_ERR(css))
3268 return PTR_ERR(css);
3269 }
3270
f6d635ad 3271 if (css_visible(css)) {
334c3679 3272 ret = css_populate_dir(css);
bdb53bd7
TH
3273 if (ret)
3274 return ret;
3275 }
3276 }
3277 }
3278
3279 return 0;
3280}
3281
12b3bb6a
TH
3282/**
3283 * cgroup_apply_control_disable - kill or hide csses according to control
ce3f1d9d 3284 * @cgrp: root of the target subtree
12b3bb6a 3285 *
ce3f1d9d 3286 * Walk @cgrp's subtree and kill and hide csses so that they match
12b3bb6a
TH
3287 * cgroup_ss_mask() and cgroup_visible_mask().
3288 *
3289 * A css is hidden when the userland requests it to be disabled while other
3290 * subsystems are still depending on it. The css must not actively control
3291 * resources and be in the vanilla state if it's made visible again later.
3292 * Controllers which may be depended upon should provide ->css_reset() for
3293 * this purpose.
3294 */
3295static void cgroup_apply_control_disable(struct cgroup *cgrp)
3296{
3297 struct cgroup *dsct;
ce3f1d9d 3298 struct cgroup_subsys_state *d_css;
12b3bb6a
TH
3299 struct cgroup_subsys *ss;
3300 int ssid;
3301
ce3f1d9d 3302 cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
12b3bb6a
TH
3303 for_each_subsys(ss, ssid) {
3304 struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
3305
945ba199
TH
3306 WARN_ON_ONCE(css && percpu_ref_is_dying(&css->refcnt));
3307
12b3bb6a
TH
3308 if (!css)
3309 continue;
3310
334c3679
TH
3311 if (css->parent &&
3312 !(cgroup_ss_mask(dsct) & (1 << ss->id))) {
12b3bb6a 3313 kill_css(css);
f6d635ad 3314 } else if (!css_visible(css)) {
334c3679 3315 css_clear_dir(css);
12b3bb6a
TH
3316 if (ss->css_reset)
3317 ss->css_reset(css);
3318 }
3319 }
3320 }
3321}
3322
f7b2814b
TH
3323/**
3324 * cgroup_apply_control - apply control mask updates to the subtree
3325 * @cgrp: root of the target subtree
3326 *
3327 * subsystems can be enabled and disabled in a subtree using the following
3328 * steps.
3329 *
3330 * 1. Call cgroup_save_control() to stash the current state.
3331 * 2. Update ->subtree_control masks in the subtree as desired.
3332 * 3. Call cgroup_apply_control() to apply the changes.
3333 * 4. Optionally perform other related operations.
3334 * 5. Call cgroup_finalize_control() to finish up.
3335 *
3336 * This function implements step 3 and propagates the mask changes
3337 * throughout @cgrp's subtree, updates csses accordingly and perform
3338 * process migrations.
3339 */
3340static int cgroup_apply_control(struct cgroup *cgrp)
3341{
3342 int ret;
3343
3344 cgroup_propagate_control(cgrp);
3345
3346 ret = cgroup_apply_control_enable(cgrp);
3347 if (ret)
3348 return ret;
3349
3350 /*
3351 * At this point, cgroup_e_css() results reflect the new csses
3352 * making the following cgroup_update_dfl_csses() properly update
3353 * css associations of all tasks in the subtree.
3354 */
3355 ret = cgroup_update_dfl_csses(cgrp);
3356 if (ret)
3357 return ret;
3358
3359 return 0;
3360}
3361
3362/**
3363 * cgroup_finalize_control - finalize control mask update
3364 * @cgrp: root of the target subtree
3365 * @ret: the result of the update
3366 *
3367 * Finalize control mask update. See cgroup_apply_control() for more info.
3368 */
3369static void cgroup_finalize_control(struct cgroup *cgrp, int ret)
3370{
3371 if (ret) {
3372 cgroup_restore_control(cgrp);
3373 cgroup_propagate_control(cgrp);
3374 }
3375
3376 cgroup_apply_control_disable(cgrp);
3377}
3378
f8f22e53 3379/* change the enabled child controllers for a cgroup in the default hierarchy */
451af504
TH
3380static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
3381 char *buf, size_t nbytes,
3382 loff_t off)
f8f22e53 3383{
6e5c8307 3384 u16 enable = 0, disable = 0;
a9746d8d 3385 struct cgroup *cgrp, *child;
f8f22e53 3386 struct cgroup_subsys *ss;
451af504 3387 char *tok;
f8f22e53
TH
3388 int ssid, ret;
3389
3390 /*
d37167ab
TH
3391 * Parse input - space separated list of subsystem names prefixed
3392 * with either + or -.
f8f22e53 3393 */
451af504
TH
3394 buf = strstrip(buf);
3395 while ((tok = strsep(&buf, " "))) {
d37167ab
TH
3396 if (tok[0] == '\0')
3397 continue;
a7165264 3398 do_each_subsys_mask(ss, ssid, ~cgrp_dfl_inhibit_ss_mask) {
fc5ed1e9
TH
3399 if (!cgroup_ssid_enabled(ssid) ||
3400 strcmp(tok + 1, ss->name))
f8f22e53
TH
3401 continue;
3402
3403 if (*tok == '+') {
7d331fa9
TH
3404 enable |= 1 << ssid;
3405 disable &= ~(1 << ssid);
f8f22e53 3406 } else if (*tok == '-') {
7d331fa9
TH
3407 disable |= 1 << ssid;
3408 enable &= ~(1 << ssid);
f8f22e53
TH
3409 } else {
3410 return -EINVAL;
3411 }
3412 break;
b4e0eeaf 3413 } while_each_subsys_mask();
f8f22e53
TH
3414 if (ssid == CGROUP_SUBSYS_COUNT)
3415 return -EINVAL;
3416 }
3417
945ba199 3418 cgrp = cgroup_kn_lock_live(of->kn, true);
a9746d8d
TH
3419 if (!cgrp)
3420 return -ENODEV;
f8f22e53
TH
3421
3422 for_each_subsys(ss, ssid) {
3423 if (enable & (1 << ssid)) {
667c2491 3424 if (cgrp->subtree_control & (1 << ssid)) {
f8f22e53
TH
3425 enable &= ~(1 << ssid);
3426 continue;
3427 }
3428
5531dc91 3429 if (!(cgroup_control(cgrp) & (1 << ssid))) {
c29adf24
TH
3430 ret = -ENOENT;
3431 goto out_unlock;
3432 }
f8f22e53 3433 } else if (disable & (1 << ssid)) {
667c2491 3434 if (!(cgrp->subtree_control & (1 << ssid))) {
f8f22e53
TH
3435 disable &= ~(1 << ssid);
3436 continue;
3437 }
3438
3439 /* a child has it enabled? */
3440 cgroup_for_each_live_child(child, cgrp) {
667c2491 3441 if (child->subtree_control & (1 << ssid)) {
f8f22e53 3442 ret = -EBUSY;
ddab2b6e 3443 goto out_unlock;
f8f22e53
TH
3444 }
3445 }
3446 }
3447 }
3448
3449 if (!enable && !disable) {
3450 ret = 0;
ddab2b6e 3451 goto out_unlock;
f8f22e53
TH
3452 }
3453
3454 /*
667c2491 3455 * Except for the root, subtree_control must be zero for a cgroup
f8f22e53
TH
3456 * with tasks so that child cgroups don't compete against tasks.
3457 */
9157056d
TH
3458 if (enable && cgroup_parent(cgrp)) {
3459 struct cgrp_cset_link *link;
3460
3461 /*
3462 * Because namespaces pin csets too, @cgrp->cset_links
3463 * might not be empty even when @cgrp is empty. Walk and
3464 * verify each cset.
3465 */
3466 spin_lock_irq(&css_set_lock);
3467
3468 ret = 0;
3469 list_for_each_entry(link, &cgrp->cset_links, cset_link) {
3470 if (css_set_populated(link->cset)) {
3471 ret = -EBUSY;
3472 break;
3473 }
3474 }
3475
3476 spin_unlock_irq(&css_set_lock);
3477
3478 if (ret)
3479 goto out_unlock;
f8f22e53
TH
3480 }
3481
15a27c36
TH
3482 /* save and update control masks and prepare csses */
3483 cgroup_save_control(cgrp);
f63070d3 3484
15a27c36
TH
3485 cgrp->subtree_control |= enable;
3486 cgrp->subtree_control &= ~disable;
c29adf24 3487
f7b2814b 3488 ret = cgroup_apply_control(cgrp);
f8f22e53 3489
f7b2814b 3490 cgroup_finalize_control(cgrp, ret);
f8f22e53
TH
3491
3492 kernfs_activate(cgrp->kn);
3493 ret = 0;
3494out_unlock:
a9746d8d 3495 cgroup_kn_unlock(of->kn);
451af504 3496 return ret ?: nbytes;
f8f22e53
TH
3497}
3498
4a07c222 3499static int cgroup_events_show(struct seq_file *seq, void *v)
842b597e 3500{
4a07c222 3501 seq_printf(seq, "populated %d\n",
27bd4dbb 3502 cgroup_is_populated(seq_css(seq)->cgroup));
842b597e
TH
3503 return 0;
3504}
3505
2bd59d48
TH
3506static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
3507 size_t nbytes, loff_t off)
355e0c48 3508{
2bd59d48
TH
3509 struct cgroup *cgrp = of->kn->parent->priv;
3510 struct cftype *cft = of->kn->priv;
3511 struct cgroup_subsys_state *css;
a742c59d 3512 int ret;
355e0c48 3513
b4168640
TH
3514 if (cft->write)
3515 return cft->write(of, buf, nbytes, off);
3516
2bd59d48
TH
3517 /*
3518 * kernfs guarantees that a file isn't deleted with operations in
3519 * flight, which means that the matching css is and stays alive and
3520 * doesn't need to be pinned. The RCU locking is not necessary
3521 * either. It's just for the convenience of using cgroup_css().
3522 */
3523 rcu_read_lock();
3524 css = cgroup_css(cgrp, cft->ss);
3525 rcu_read_unlock();
a742c59d 3526
451af504 3527 if (cft->write_u64) {
a742c59d
TH
3528 unsigned long long v;
3529 ret = kstrtoull(buf, 0, &v);
3530 if (!ret)
3531 ret = cft->write_u64(css, cft, v);
3532 } else if (cft->write_s64) {
3533 long long v;
3534 ret = kstrtoll(buf, 0, &v);
3535 if (!ret)
3536 ret = cft->write_s64(css, cft, v);
e73d2c61 3537 } else {
a742c59d 3538 ret = -EINVAL;
e73d2c61 3539 }
2bd59d48 3540
a742c59d 3541 return ret ?: nbytes;
355e0c48
PM
3542}
3543
6612f05b 3544static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
db3b1497 3545{
2bd59d48 3546 return seq_cft(seq)->seq_start(seq, ppos);
db3b1497
PM
3547}
3548
6612f05b 3549static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos)
ddbcc7e8 3550{
2bd59d48 3551 return seq_cft(seq)->seq_next(seq, v, ppos);
ddbcc7e8
PM
3552}
3553
6612f05b 3554static void cgroup_seqfile_stop(struct seq_file *seq, void *v)
ddbcc7e8 3555{
2bd59d48 3556 seq_cft(seq)->seq_stop(seq, v);
ddbcc7e8
PM
3557}
3558
91796569 3559static int cgroup_seqfile_show(struct seq_file *m, void *arg)
e73d2c61 3560{
7da11279
TH
3561 struct cftype *cft = seq_cft(m);
3562 struct cgroup_subsys_state *css = seq_css(m);
e73d2c61 3563
2da8ca82
TH
3564 if (cft->seq_show)
3565 return cft->seq_show(m, arg);
e73d2c61 3566
f4c753b7 3567 if (cft->read_u64)
896f5199
TH
3568 seq_printf(m, "%llu\n", cft->read_u64(css, cft));
3569 else if (cft->read_s64)
3570 seq_printf(m, "%lld\n", cft->read_s64(css, cft));
3571 else
3572 return -EINVAL;
3573 return 0;
91796569
PM
3574}
3575
2bd59d48
TH
3576static struct kernfs_ops cgroup_kf_single_ops = {
3577 .atomic_write_len = PAGE_SIZE,
3578 .write = cgroup_file_write,
3579 .seq_show = cgroup_seqfile_show,
91796569
PM
3580};
3581
2bd59d48
TH
3582static struct kernfs_ops cgroup_kf_ops = {
3583 .atomic_write_len = PAGE_SIZE,
3584 .write = cgroup_file_write,
3585 .seq_start = cgroup_seqfile_start,
3586 .seq_next = cgroup_seqfile_next,
3587 .seq_stop = cgroup_seqfile_stop,
3588 .seq_show = cgroup_seqfile_show,
3589};
ddbcc7e8
PM
3590
3591/*
3592 * cgroup_rename - Only allow simple rename of directories in place.
3593 */
2bd59d48
TH
3594static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
3595 const char *new_name_str)
ddbcc7e8 3596{
2bd59d48 3597 struct cgroup *cgrp = kn->priv;
65dff759 3598 int ret;
65dff759 3599
2bd59d48 3600 if (kernfs_type(kn) != KERNFS_DIR)
ddbcc7e8 3601 return -ENOTDIR;
2bd59d48 3602 if (kn->parent != new_parent)
ddbcc7e8 3603 return -EIO;
65dff759 3604
6db8e85c
TH
3605 /*
3606 * This isn't a proper migration and its usefulness is very
aa6ec29b 3607 * limited. Disallow on the default hierarchy.
6db8e85c 3608 */
aa6ec29b 3609 if (cgroup_on_dfl(cgrp))
6db8e85c 3610 return -EPERM;
099fca32 3611
e1b2dc17 3612 /*
8353da1f 3613 * We're gonna grab cgroup_mutex which nests outside kernfs
e1b2dc17 3614 * active_ref. kernfs_rename() doesn't require active_ref
8353da1f 3615 * protection. Break them before grabbing cgroup_mutex.
e1b2dc17
TH
3616 */
3617 kernfs_break_active_protection(new_parent);
3618 kernfs_break_active_protection(kn);
099fca32 3619
2bd59d48 3620 mutex_lock(&cgroup_mutex);
099fca32 3621
2bd59d48 3622 ret = kernfs_rename(kn, new_parent, new_name_str);
ed1777de
TH
3623 if (!ret)
3624 trace_cgroup_rename(cgrp);
099fca32 3625
2bd59d48 3626 mutex_unlock(&cgroup_mutex);
e1b2dc17
TH
3627
3628 kernfs_unbreak_active_protection(kn);
3629 kernfs_unbreak_active_protection(new_parent);
2bd59d48 3630 return ret;
099fca32
LZ
3631}
3632
49957f8e
TH
3633/* set uid and gid of cgroup dirs and files to that of the creator */
3634static int cgroup_kn_set_ugid(struct kernfs_node *kn)
3635{
3636 struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
3637 .ia_uid = current_fsuid(),
3638 .ia_gid = current_fsgid(), };
3639
3640 if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
3641 gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
3642 return 0;
3643
3644 return kernfs_setattr(kn, &iattr);
3645}
3646
4df8dc90
TH
3647static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
3648 struct cftype *cft)
ddbcc7e8 3649{
8d7e6fb0 3650 char name[CGROUP_FILE_NAME_MAX];
2bd59d48
TH
3651 struct kernfs_node *kn;
3652 struct lock_class_key *key = NULL;
49957f8e 3653 int ret;
05ef1d7c 3654
2bd59d48
TH
3655#ifdef CONFIG_DEBUG_LOCK_ALLOC
3656 key = &cft->lockdep_key;
3657#endif
3658 kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
3659 cgroup_file_mode(cft), 0, cft->kf_ops, cft,
dfeb0750 3660 NULL, key);
49957f8e
TH
3661 if (IS_ERR(kn))
3662 return PTR_ERR(kn);
3663
3664 ret = cgroup_kn_set_ugid(kn);
f8f22e53 3665 if (ret) {
49957f8e 3666 kernfs_remove(kn);
f8f22e53
TH
3667 return ret;
3668 }
3669
6f60eade
TH
3670 if (cft->file_offset) {
3671 struct cgroup_file *cfile = (void *)css + cft->file_offset;
3672
34c06254 3673 spin_lock_irq(&cgroup_file_kn_lock);
6f60eade 3674 cfile->kn = kn;
34c06254 3675 spin_unlock_irq(&cgroup_file_kn_lock);
6f60eade
TH
3676 }
3677
f8f22e53 3678 return 0;
ddbcc7e8
PM
3679}
3680
b1f28d31
TH
3681/**
3682 * cgroup_addrm_files - add or remove files to a cgroup directory
4df8dc90
TH
3683 * @css: the target css
3684 * @cgrp: the target cgroup (usually css->cgroup)
b1f28d31
TH
3685 * @cfts: array of cftypes to be added
3686 * @is_add: whether to add or remove
3687 *
3688 * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
6732ed85 3689 * For removals, this function never fails.
b1f28d31 3690 */
4df8dc90
TH
3691static int cgroup_addrm_files(struct cgroup_subsys_state *css,
3692 struct cgroup *cgrp, struct cftype cfts[],
2bb566cb 3693 bool is_add)
ddbcc7e8 3694{
6732ed85 3695 struct cftype *cft, *cft_end = NULL;
b598dde3 3696 int ret = 0;
b1f28d31 3697
01f6474c 3698 lockdep_assert_held(&cgroup_mutex);
db0416b6 3699
6732ed85
TH
3700restart:
3701 for (cft = cfts; cft != cft_end && cft->name[0] != '\0'; cft++) {
f33fddc2 3702 /* does cft->flags tell us to skip this file on @cgrp? */
05ebb6e6 3703 if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
8cbbf2c9 3704 continue;
05ebb6e6 3705 if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp))
873fe09e 3706 continue;
d51f39b0 3707 if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp))
f33fddc2 3708 continue;
d51f39b0 3709 if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgroup_parent(cgrp))
f33fddc2
G
3710 continue;
3711
2739d3cc 3712 if (is_add) {
4df8dc90 3713 ret = cgroup_add_file(css, cgrp, cft);
b1f28d31 3714 if (ret) {
ed3d261b
JP
3715 pr_warn("%s: failed to add %s, err=%d\n",
3716 __func__, cft->name, ret);
6732ed85
TH
3717 cft_end = cft;
3718 is_add = false;
3719 goto restart;
b1f28d31 3720 }
2739d3cc
LZ
3721 } else {
3722 cgroup_rm_file(cgrp, cft);
db0416b6 3723 }
ddbcc7e8 3724 }
b598dde3 3725 return ret;
ddbcc7e8
PM
3726}
3727
21a2d343 3728static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add)
8e3f6541
TH
3729{
3730 LIST_HEAD(pending);
2bb566cb 3731 struct cgroup_subsys *ss = cfts[0].ss;
3dd06ffa 3732 struct cgroup *root = &ss->root->cgrp;
492eb21b 3733 struct cgroup_subsys_state *css;
9ccece80 3734 int ret = 0;
8e3f6541 3735
01f6474c 3736 lockdep_assert_held(&cgroup_mutex);
e8c82d20 3737
e8c82d20 3738 /* add/rm files for all cgroups created before */
ca8bdcaf 3739 css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
492eb21b
TH
3740 struct cgroup *cgrp = css->cgroup;
3741
88cb04b9 3742 if (!(css->flags & CSS_VISIBLE))
e8c82d20
LZ
3743 continue;
3744
4df8dc90 3745 ret = cgroup_addrm_files(css, cgrp, cfts, is_add);
9ccece80
TH
3746 if (ret)
3747 break;
8e3f6541 3748 }
21a2d343
TH
3749
3750 if (is_add && !ret)
3751 kernfs_activate(root->kn);
9ccece80 3752 return ret;
8e3f6541
TH
3753}
3754
2da440a2 3755static void cgroup_exit_cftypes(struct cftype *cfts)
8e3f6541 3756{
2bb566cb 3757 struct cftype *cft;
8e3f6541 3758
2bd59d48
TH
3759 for (cft = cfts; cft->name[0] != '\0'; cft++) {
3760 /* free copy for custom atomic_write_len, see init_cftypes() */
3761 if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
3762 kfree(cft->kf_ops);
3763 cft->kf_ops = NULL;
2da440a2 3764 cft->ss = NULL;
a8ddc821
TH
3765
3766 /* revert flags set by cgroup core while adding @cfts */
05ebb6e6 3767 cft->flags &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL);
2bd59d48 3768 }
2da440a2
TH
3769}
3770
2bd59d48 3771static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
2da440a2
TH
3772{
3773 struct cftype *cft;
3774
2bd59d48
TH
3775 for (cft = cfts; cft->name[0] != '\0'; cft++) {
3776 struct kernfs_ops *kf_ops;
3777
0adb0704
TH
3778 WARN_ON(cft->ss || cft->kf_ops);
3779
2bd59d48
TH
3780 if (cft->seq_start)
3781 kf_ops = &cgroup_kf_ops;
3782 else
3783 kf_ops = &cgroup_kf_single_ops;
3784
3785 /*
3786 * Ugh... if @cft wants a custom max_write_len, we need to
3787 * make a copy of kf_ops to set its atomic_write_len.
3788 */
3789 if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) {
3790 kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL);
3791 if (!kf_ops) {
3792 cgroup_exit_cftypes(cfts);
3793 return -ENOMEM;
3794 }
3795 kf_ops->atomic_write_len = cft->max_write_len;
3796 }
8e3f6541 3797
2bd59d48 3798 cft->kf_ops = kf_ops;
2bb566cb 3799 cft->ss = ss;
2bd59d48 3800 }
2bb566cb 3801
2bd59d48 3802 return 0;
2da440a2
TH
3803}
3804
21a2d343
TH
3805static int cgroup_rm_cftypes_locked(struct cftype *cfts)
3806{
01f6474c 3807 lockdep_assert_held(&cgroup_mutex);
21a2d343
TH
3808
3809 if (!cfts || !cfts[0].ss)
3810 return -ENOENT;
3811
3812 list_del(&cfts->node);
3813 cgroup_apply_cftypes(cfts, false);
3814 cgroup_exit_cftypes(cfts);
3815 return 0;
8e3f6541 3816}
8e3f6541 3817
79578621
TH
3818/**
3819 * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
79578621
TH
3820 * @cfts: zero-length name terminated array of cftypes
3821 *
2bb566cb
TH
3822 * Unregister @cfts. Files described by @cfts are removed from all
3823 * existing cgroups and all future cgroups won't have them either. This
3824 * function can be called anytime whether @cfts' subsys is attached or not.
79578621
TH
3825 *
3826 * Returns 0 on successful unregistration, -ENOENT if @cfts is not
2bb566cb 3827 * registered.
79578621 3828 */
2bb566cb 3829int cgroup_rm_cftypes(struct cftype *cfts)
79578621 3830{
21a2d343 3831 int ret;
79578621 3832
01f6474c 3833 mutex_lock(&cgroup_mutex);
21a2d343 3834 ret = cgroup_rm_cftypes_locked(cfts);
01f6474c 3835 mutex_unlock(&cgroup_mutex);
21a2d343 3836 return ret;
80b13586
TH
3837}
3838
8e3f6541
TH
3839/**
3840 * cgroup_add_cftypes - add an array of cftypes to a subsystem
3841 * @ss: target cgroup subsystem
3842 * @cfts: zero-length name terminated array of cftypes
3843 *
3844 * Register @cfts to @ss. Files described by @cfts are created for all
3845 * existing cgroups to which @ss is attached and all future cgroups will
3846 * have them too. This function can be called anytime whether @ss is
3847 * attached or not.
3848 *
3849 * Returns 0 on successful registration, -errno on failure. Note that this
3850 * function currently returns 0 as long as @cfts registration is successful
3851 * even if some file creation attempts on existing cgroups fail.
3852 */
2cf669a5 3853static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
8e3f6541 3854{
9ccece80 3855 int ret;
8e3f6541 3856
fc5ed1e9 3857 if (!cgroup_ssid_enabled(ss->id))
c731ae1d
LZ
3858 return 0;
3859
dc5736ed
LZ
3860 if (!cfts || cfts[0].name[0] == '\0')
3861 return 0;
2bb566cb 3862
2bd59d48
TH
3863 ret = cgroup_init_cftypes(ss, cfts);
3864 if (ret)
3865 return ret;
79578621 3866
01f6474c 3867 mutex_lock(&cgroup_mutex);
21a2d343 3868
0adb0704 3869 list_add_tail(&cfts->node, &ss->cfts);
21a2d343 3870 ret = cgroup_apply_cftypes(cfts, true);
9ccece80 3871 if (ret)
21a2d343 3872 cgroup_rm_cftypes_locked(cfts);
79578621 3873
01f6474c 3874 mutex_unlock(&cgroup_mutex);
9ccece80 3875 return ret;
79578621
TH
3876}
3877
a8ddc821
TH
3878/**
3879 * cgroup_add_dfl_cftypes - add an array of cftypes for default hierarchy
3880 * @ss: target cgroup subsystem
3881 * @cfts: zero-length name terminated array of cftypes
3882 *
3883 * Similar to cgroup_add_cftypes() but the added files are only used for
3884 * the default hierarchy.
3885 */
3886int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3887{
3888 struct cftype *cft;
3889
3890 for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
05ebb6e6 3891 cft->flags |= __CFTYPE_ONLY_ON_DFL;
a8ddc821
TH
3892 return cgroup_add_cftypes(ss, cfts);
3893}
3894
3895/**
3896 * cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies
3897 * @ss: target cgroup subsystem
3898 * @cfts: zero-length name terminated array of cftypes
3899 *
3900 * Similar to cgroup_add_cftypes() but the added files are only used for
3901 * the legacy hierarchies.
3902 */
2cf669a5
TH
3903int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3904{
a8ddc821
TH
3905 struct cftype *cft;
3906
e4b7037c
TH
3907 for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
3908 cft->flags |= __CFTYPE_NOT_ON_DFL;
2cf669a5
TH
3909 return cgroup_add_cftypes(ss, cfts);
3910}
3911
34c06254
TH
3912/**
3913 * cgroup_file_notify - generate a file modified event for a cgroup_file
3914 * @cfile: target cgroup_file
3915 *
3916 * @cfile must have been obtained by setting cftype->file_offset.
3917 */
3918void cgroup_file_notify(struct cgroup_file *cfile)
3919{
3920 unsigned long flags;
3921
3922 spin_lock_irqsave(&cgroup_file_kn_lock, flags);
3923 if (cfile->kn)
3924 kernfs_notify(cfile->kn);
3925 spin_unlock_irqrestore(&cgroup_file_kn_lock, flags);
3926}
3927
a043e3b2
LZ
3928/**
3929 * cgroup_task_count - count the number of tasks in a cgroup.
3930 * @cgrp: the cgroup in question
3931 *
9157056d
TH
3932 * Return the number of tasks in the cgroup. The returned number can be
3933 * higher than the actual number of tasks due to css_set references from
3934 * namespace roots and temporary usages.
a043e3b2 3935 */
07bc356e 3936static int cgroup_task_count(const struct cgroup *cgrp)
bbcb81d0
PM
3937{
3938 int count = 0;
69d0206c 3939 struct cgrp_cset_link *link;
817929ec 3940
82d6489d 3941 spin_lock_irq(&css_set_lock);
69d0206c
TH
3942 list_for_each_entry(link, &cgrp->cset_links, cset_link)
3943 count += atomic_read(&link->cset->refcount);
82d6489d 3944 spin_unlock_irq(&css_set_lock);
bbcb81d0
PM
3945 return count;
3946}
3947
53fa5261 3948/**
492eb21b 3949 * css_next_child - find the next child of a given css
c2931b70
TH
3950 * @pos: the current position (%NULL to initiate traversal)
3951 * @parent: css whose children to walk
53fa5261 3952 *
c2931b70 3953 * This function returns the next child of @parent and should be called
87fb54f1 3954 * under either cgroup_mutex or RCU read lock. The only requirement is
c2931b70
TH
3955 * that @parent and @pos are accessible. The next sibling is guaranteed to
3956 * be returned regardless of their states.
3957 *
3958 * If a subsystem synchronizes ->css_online() and the start of iteration, a
3959 * css which finished ->css_online() is guaranteed to be visible in the
3960 * future iterations and will stay visible until the last reference is put.
3961 * A css which hasn't finished ->css_online() or already finished
3962 * ->css_offline() may show up during traversal. It's each subsystem's
3963 * responsibility to synchronize against on/offlining.
53fa5261 3964 */
c2931b70
TH
3965struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
3966 struct cgroup_subsys_state *parent)
53fa5261 3967{
c2931b70 3968 struct cgroup_subsys_state *next;
53fa5261 3969
8353da1f 3970 cgroup_assert_mutex_or_rcu_locked();
53fa5261
TH
3971
3972 /*
de3f0341
TH
3973 * @pos could already have been unlinked from the sibling list.
3974 * Once a cgroup is removed, its ->sibling.next is no longer
3975 * updated when its next sibling changes. CSS_RELEASED is set when
3976 * @pos is taken off list, at which time its next pointer is valid,
3977 * and, as releases are serialized, the one pointed to by the next
3978 * pointer is guaranteed to not have started release yet. This
3979 * implies that if we observe !CSS_RELEASED on @pos in this RCU
3980 * critical section, the one pointed to by its next pointer is
3981 * guaranteed to not have finished its RCU grace period even if we
3982 * have dropped rcu_read_lock() inbetween iterations.
3b287a50 3983 *
de3f0341
TH
3984 * If @pos has CSS_RELEASED set, its next pointer can't be
3985 * dereferenced; however, as each css is given a monotonically
3986 * increasing unique serial number and always appended to the
3987 * sibling list, the next one can be found by walking the parent's
3988 * children until the first css with higher serial number than
3989 * @pos's. While this path can be slower, it happens iff iteration
3990 * races against release and the race window is very small.
53fa5261 3991 */
3b287a50 3992 if (!pos) {
c2931b70
TH
3993 next = list_entry_rcu(parent->children.next, struct cgroup_subsys_state, sibling);
3994 } else if (likely(!(pos->flags & CSS_RELEASED))) {
3995 next = list_entry_rcu(pos->sibling.next, struct cgroup_subsys_state, sibling);
3b287a50 3996 } else {
c2931b70 3997 list_for_each_entry_rcu(next, &parent->children, sibling)
3b287a50
TH
3998 if (next->serial_nr > pos->serial_nr)
3999 break;
53fa5261
TH
4000 }
4001
3b281afb
TH
4002 /*
4003 * @next, if not pointing to the head, can be dereferenced and is
c2931b70 4004 * the next sibling.
3b281afb 4005 */
c2931b70
TH
4006 if (&next->sibling != &parent->children)
4007 return next;
3b281afb 4008 return NULL;
53fa5261 4009}
53fa5261 4010
574bd9f7 4011/**
492eb21b 4012 * css_next_descendant_pre - find the next descendant for pre-order walk
574bd9f7 4013 * @pos: the current position (%NULL to initiate traversal)
492eb21b 4014 * @root: css whose descendants to walk
574bd9f7 4015 *
492eb21b 4016 * To be used by css_for_each_descendant_pre(). Find the next descendant
bd8815a6
TH
4017 * to visit for pre-order traversal of @root's descendants. @root is
4018 * included in the iteration and the first node to be visited.
75501a6d 4019 *
87fb54f1
TH
4020 * While this function requires cgroup_mutex or RCU read locking, it
4021 * doesn't require the whole traversal to be contained in a single critical
4022 * section. This function will return the correct next descendant as long
4023 * as both @pos and @root are accessible and @pos is a descendant of @root.
c2931b70
TH
4024 *
4025 * If a subsystem synchronizes ->css_online() and the start of iteration, a
4026 * css which finished ->css_online() is guaranteed to be visible in the
4027 * future iterations and will stay visible until the last reference is put.
4028 * A css which hasn't finished ->css_online() or already finished
4029 * ->css_offline() may show up during traversal. It's each subsystem's
4030 * responsibility to synchronize against on/offlining.
574bd9f7 4031 */
492eb21b
TH
4032struct cgroup_subsys_state *
4033css_next_descendant_pre(struct cgroup_subsys_state *pos,
4034 struct cgroup_subsys_state *root)
574bd9f7 4035{
492eb21b 4036 struct cgroup_subsys_state *next;
574bd9f7 4037
8353da1f 4038 cgroup_assert_mutex_or_rcu_locked();
574bd9f7 4039
bd8815a6 4040 /* if first iteration, visit @root */
7805d000 4041 if (!pos)
bd8815a6 4042 return root;
574bd9f7
TH
4043
4044 /* visit the first child if exists */
492eb21b 4045 next = css_next_child(NULL, pos);
574bd9f7
TH
4046 if (next)
4047 return next;
4048
4049 /* no child, visit my or the closest ancestor's next sibling */
492eb21b 4050 while (pos != root) {
5c9d535b 4051 next = css_next_child(pos, pos->parent);
75501a6d 4052 if (next)
574bd9f7 4053 return next;
5c9d535b 4054 pos = pos->parent;
7805d000 4055 }
574bd9f7
TH
4056
4057 return NULL;
4058}
574bd9f7 4059
12a9d2fe 4060/**
492eb21b
TH
4061 * css_rightmost_descendant - return the rightmost descendant of a css
4062 * @pos: css of interest
12a9d2fe 4063 *
492eb21b
TH
4064 * Return the rightmost descendant of @pos. If there's no descendant, @pos
4065 * is returned. This can be used during pre-order traversal to skip
12a9d2fe 4066 * subtree of @pos.
75501a6d 4067 *
87fb54f1
TH
4068 * While this function requires cgroup_mutex or RCU read locking, it
4069 * doesn't require the whole traversal to be contained in a single critical
4070 * section. This function will return the correct rightmost descendant as
4071 * long as @pos is accessible.
12a9d2fe 4072 */
492eb21b
TH
4073struct cgroup_subsys_state *
4074css_rightmost_descendant(struct cgroup_subsys_state *pos)
12a9d2fe 4075{
492eb21b 4076 struct cgroup_subsys_state *last, *tmp;
12a9d2fe 4077
8353da1f 4078 cgroup_assert_mutex_or_rcu_locked();
12a9d2fe
TH
4079
4080 do {
4081 last = pos;
4082 /* ->prev isn't RCU safe, walk ->next till the end */
4083 pos = NULL;
492eb21b 4084 css_for_each_child(tmp, last)
12a9d2fe
TH
4085 pos = tmp;
4086 } while (pos);
4087
4088 return last;
4089}
12a9d2fe 4090
492eb21b
TH
4091static struct cgroup_subsys_state *
4092css_leftmost_descendant(struct cgroup_subsys_state *pos)
574bd9f7 4093{
492eb21b 4094 struct cgroup_subsys_state *last;
574bd9f7
TH
4095
4096 do {
4097 last = pos;
492eb21b 4098 pos = css_next_child(NULL, pos);
574bd9f7
TH
4099 } while (pos);
4100
4101 return last;
4102}
4103
4104/**
492eb21b 4105 * css_next_descendant_post - find the next descendant for post-order walk
574bd9f7 4106 * @pos: the current position (%NULL to initiate traversal)
492eb21b 4107 * @root: css whose descendants to walk
574bd9f7 4108 *
492eb21b 4109 * To be used by css_for_each_descendant_post(). Find the next descendant
bd8815a6
TH
4110 * to visit for post-order traversal of @root's descendants. @root is
4111 * included in the iteration and the last node to be visited.
75501a6d 4112 *
87fb54f1
TH
4113 * While this function requires cgroup_mutex or RCU read locking, it
4114 * doesn't require the whole traversal to be contained in a single critical
4115 * section. This function will return the correct next descendant as long
4116 * as both @pos and @cgroup are accessible and @pos is a descendant of
4117 * @cgroup.
c2931b70
TH
4118 *
4119 * If a subsystem synchronizes ->css_online() and the start of iteration, a
4120 * css which finished ->css_online() is guaranteed to be visible in the
4121 * future iterations and will stay visible until the last reference is put.
4122 * A css which hasn't finished ->css_online() or already finished
4123 * ->css_offline() may show up during traversal. It's each subsystem's
4124 * responsibility to synchronize against on/offlining.
574bd9f7 4125 */
492eb21b
TH
4126struct cgroup_subsys_state *
4127css_next_descendant_post(struct cgroup_subsys_state *pos,
4128 struct cgroup_subsys_state *root)
574bd9f7 4129{
492eb21b 4130 struct cgroup_subsys_state *next;
574bd9f7 4131
8353da1f 4132 cgroup_assert_mutex_or_rcu_locked();
574bd9f7 4133
58b79a91
TH
4134 /* if first iteration, visit leftmost descendant which may be @root */
4135 if (!pos)
4136 return css_leftmost_descendant(root);
574bd9f7 4137
bd8815a6
TH
4138 /* if we visited @root, we're done */
4139 if (pos == root)
4140 return NULL;
4141
574bd9f7 4142 /* if there's an unvisited sibling, visit its leftmost descendant */
5c9d535b 4143 next = css_next_child(pos, pos->parent);
75501a6d 4144 if (next)
492eb21b 4145 return css_leftmost_descendant(next);
574bd9f7
TH
4146
4147 /* no sibling left, visit parent */
5c9d535b 4148 return pos->parent;
574bd9f7 4149}
574bd9f7 4150
f3d46500
TH
4151/**
4152 * css_has_online_children - does a css have online children
4153 * @css: the target css
4154 *
4155 * Returns %true if @css has any online children; otherwise, %false. This
4156 * function can be called from any context but the caller is responsible
4157 * for synchronizing against on/offlining as necessary.
4158 */
4159bool css_has_online_children(struct cgroup_subsys_state *css)
cbc125ef 4160{
f3d46500
TH
4161 struct cgroup_subsys_state *child;
4162 bool ret = false;
cbc125ef
TH
4163
4164 rcu_read_lock();
f3d46500 4165 css_for_each_child(child, css) {
99bae5f9 4166 if (child->flags & CSS_ONLINE) {
f3d46500
TH
4167 ret = true;
4168 break;
cbc125ef
TH
4169 }
4170 }
4171 rcu_read_unlock();
f3d46500 4172 return ret;
574bd9f7 4173}
574bd9f7 4174
0942eeee 4175/**
ecb9d535 4176 * css_task_iter_advance_css_set - advance a task itererator to the next css_set
0942eeee
TH
4177 * @it: the iterator to advance
4178 *
4179 * Advance @it to the next css_set to walk.
d515876e 4180 */
ecb9d535 4181static void css_task_iter_advance_css_set(struct css_task_iter *it)
d515876e 4182{
0f0a2b4f 4183 struct list_head *l = it->cset_pos;
d515876e
TH
4184 struct cgrp_cset_link *link;
4185 struct css_set *cset;
4186
f0d9a5f1 4187 lockdep_assert_held(&css_set_lock);
ed27b9f7 4188
d515876e
TH
4189 /* Advance to the next non-empty css_set */
4190 do {
4191 l = l->next;
0f0a2b4f
TH
4192 if (l == it->cset_head) {
4193 it->cset_pos = NULL;
ecb9d535 4194 it->task_pos = NULL;
d515876e
TH
4195 return;
4196 }
3ebb2b6e
TH
4197
4198 if (it->ss) {
4199 cset = container_of(l, struct css_set,
4200 e_cset_node[it->ss->id]);
4201 } else {
4202 link = list_entry(l, struct cgrp_cset_link, cset_link);
4203 cset = link->cset;
4204 }
0de0942d 4205 } while (!css_set_populated(cset));
c7561128 4206
0f0a2b4f 4207 it->cset_pos = l;
c7561128
TH
4208
4209 if (!list_empty(&cset->tasks))
0f0a2b4f 4210 it->task_pos = cset->tasks.next;
c7561128 4211 else
0f0a2b4f
TH
4212 it->task_pos = cset->mg_tasks.next;
4213
4214 it->tasks_head = &cset->tasks;
4215 it->mg_tasks_head = &cset->mg_tasks;
ed27b9f7
TH
4216
4217 /*
4218 * We don't keep css_sets locked across iteration steps and thus
4219 * need to take steps to ensure that iteration can be resumed after
4220 * the lock is re-acquired. Iteration is performed at two levels -
4221 * css_sets and tasks in them.
4222 *
4223 * Once created, a css_set never leaves its cgroup lists, so a
4224 * pinned css_set is guaranteed to stay put and we can resume
4225 * iteration afterwards.
4226 *
4227 * Tasks may leave @cset across iteration steps. This is resolved
4228 * by registering each iterator with the css_set currently being
4229 * walked and making css_set_move_task() advance iterators whose
4230 * next task is leaving.
4231 */
4232 if (it->cur_cset) {
4233 list_del(&it->iters_node);
4234 put_css_set_locked(it->cur_cset);
4235 }
4236 get_css_set(cset);
4237 it->cur_cset = cset;
4238 list_add(&it->iters_node, &cset->task_iters);
d515876e
TH
4239}
4240
ecb9d535
TH
4241static void css_task_iter_advance(struct css_task_iter *it)
4242{
4243 struct list_head *l = it->task_pos;
4244
f0d9a5f1 4245 lockdep_assert_held(&css_set_lock);
ecb9d535
TH
4246 WARN_ON_ONCE(!l);
4247
4248 /*
4249 * Advance iterator to find next entry. cset->tasks is consumed
4250 * first and then ->mg_tasks. After ->mg_tasks, we move onto the
4251 * next cset.
4252 */
4253 l = l->next;
4254
4255 if (l == it->tasks_head)
4256 l = it->mg_tasks_head->next;
4257
4258 if (l == it->mg_tasks_head)
4259 css_task_iter_advance_css_set(it);
4260 else
4261 it->task_pos = l;
4262}
4263
0942eeee 4264/**
72ec7029
TH
4265 * css_task_iter_start - initiate task iteration
4266 * @css: the css to walk tasks of
0942eeee
TH
4267 * @it: the task iterator to use
4268 *
72ec7029
TH
4269 * Initiate iteration through the tasks of @css. The caller can call
4270 * css_task_iter_next() to walk through the tasks until the function
4271 * returns NULL. On completion of iteration, css_task_iter_end() must be
4272 * called.
0942eeee 4273 */
72ec7029
TH
4274void css_task_iter_start(struct cgroup_subsys_state *css,
4275 struct css_task_iter *it)
817929ec 4276{
56fde9e0
TH
4277 /* no one should try to iterate before mounting cgroups */
4278 WARN_ON_ONCE(!use_task_css_set_links);
31a7df01 4279
ed27b9f7
TH
4280 memset(it, 0, sizeof(*it));
4281
82d6489d 4282 spin_lock_irq(&css_set_lock);
c59cd3d8 4283
3ebb2b6e
TH
4284 it->ss = css->ss;
4285
4286 if (it->ss)
4287 it->cset_pos = &css->cgroup->e_csets[css->ss->id];
4288 else
4289 it->cset_pos = &css->cgroup->cset_links;
4290
0f0a2b4f 4291 it->cset_head = it->cset_pos;
c59cd3d8 4292
ecb9d535 4293 css_task_iter_advance_css_set(it);
ed27b9f7 4294
82d6489d 4295 spin_unlock_irq(&css_set_lock);
817929ec
PM
4296}
4297
0942eeee 4298/**
72ec7029 4299 * css_task_iter_next - return the next task for the iterator
0942eeee
TH
4300 * @it: the task iterator being iterated
4301 *
4302 * The "next" function for task iteration. @it should have been
72ec7029
TH
4303 * initialized via css_task_iter_start(). Returns NULL when the iteration
4304 * reaches the end.
0942eeee 4305 */
72ec7029 4306struct task_struct *css_task_iter_next(struct css_task_iter *it)
817929ec 4307{
d5745675 4308 if (it->cur_task) {
ed27b9f7 4309 put_task_struct(it->cur_task);
d5745675
TH
4310 it->cur_task = NULL;
4311 }
ed27b9f7 4312
82d6489d 4313 spin_lock_irq(&css_set_lock);
ed27b9f7 4314
d5745675
TH
4315 if (it->task_pos) {
4316 it->cur_task = list_entry(it->task_pos, struct task_struct,
4317 cg_list);
4318 get_task_struct(it->cur_task);
4319 css_task_iter_advance(it);
4320 }
ed27b9f7 4321
82d6489d 4322 spin_unlock_irq(&css_set_lock);
ed27b9f7
TH
4323
4324 return it->cur_task;
817929ec
PM
4325}
4326
0942eeee 4327/**
72ec7029 4328 * css_task_iter_end - finish task iteration
0942eeee
TH
4329 * @it: the task iterator to finish
4330 *
72ec7029 4331 * Finish task iteration started by css_task_iter_start().
0942eeee 4332 */
72ec7029 4333void css_task_iter_end(struct css_task_iter *it)
31a7df01 4334{
ed27b9f7 4335 if (it->cur_cset) {
82d6489d 4336 spin_lock_irq(&css_set_lock);
ed27b9f7
TH
4337 list_del(&it->iters_node);
4338 put_css_set_locked(it->cur_cset);
82d6489d 4339 spin_unlock_irq(&css_set_lock);
ed27b9f7
TH
4340 }
4341
4342 if (it->cur_task)
4343 put_task_struct(it->cur_task);
31a7df01
CW
4344}
4345
4346/**
8cc99345
TH
4347 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
4348 * @to: cgroup to which the tasks will be moved
4349 * @from: cgroup in which the tasks currently reside
31a7df01 4350 *
eaf797ab
TH
4351 * Locking rules between cgroup_post_fork() and the migration path
4352 * guarantee that, if a task is forking while being migrated, the new child
4353 * is guaranteed to be either visible in the source cgroup after the
4354 * parent's migration is complete or put into the target cgroup. No task
4355 * can slip out of migration through forking.
31a7df01 4356 */
8cc99345 4357int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
31a7df01 4358{
952aaa12
TH
4359 LIST_HEAD(preloaded_csets);
4360 struct cgrp_cset_link *link;
72ec7029 4361 struct css_task_iter it;
e406d1cf 4362 struct task_struct *task;
952aaa12 4363 int ret;
31a7df01 4364
6c694c88
TH
4365 if (!cgroup_may_migrate_to(to))
4366 return -EBUSY;
4367
952aaa12 4368 mutex_lock(&cgroup_mutex);
31a7df01 4369
eedd0f4c
EB
4370 percpu_down_write(&cgroup_threadgroup_rwsem);
4371
952aaa12 4372 /* all tasks in @from are being moved, all csets are source */
82d6489d 4373 spin_lock_irq(&css_set_lock);
952aaa12
TH
4374 list_for_each_entry(link, &from->cset_links, cset_link)
4375 cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
82d6489d 4376 spin_unlock_irq(&css_set_lock);
31a7df01 4377
e4857982 4378 ret = cgroup_migrate_prepare_dst(&preloaded_csets);
952aaa12
TH
4379 if (ret)
4380 goto out_err;
8cc99345 4381
952aaa12 4382 /*
2cfa2b19 4383 * Migrate tasks one-by-one until @from is empty. This fails iff
952aaa12
TH
4384 * ->can_attach() fails.
4385 */
e406d1cf 4386 do {
9d800df1 4387 css_task_iter_start(&from->self, &it);
e406d1cf
TH
4388 task = css_task_iter_next(&it);
4389 if (task)
4390 get_task_struct(task);
4391 css_task_iter_end(&it);
4392
4393 if (task) {
37ff9f8f 4394 ret = cgroup_migrate(task, false, to->root);
ed1777de
TH
4395 if (!ret)
4396 trace_cgroup_transfer_tasks(to, task, false);
e406d1cf
TH
4397 put_task_struct(task);
4398 }
4399 } while (task && !ret);
952aaa12
TH
4400out_err:
4401 cgroup_migrate_finish(&preloaded_csets);
eedd0f4c 4402 percpu_up_write(&cgroup_threadgroup_rwsem);
47cfcd09 4403 mutex_unlock(&cgroup_mutex);
e406d1cf 4404 return ret;
8cc99345
TH
4405}
4406
bbcb81d0 4407/*
102a775e 4408 * Stuff for reading the 'tasks'/'procs' files.
bbcb81d0
PM
4409 *
4410 * Reading this file can return large amounts of data if a cgroup has
4411 * *lots* of attached tasks. So it may need several calls to read(),
4412 * but we cannot guarantee that the information we produce is correct
4413 * unless we produce it entirely atomically.
4414 *
bbcb81d0 4415 */
bbcb81d0 4416
24528255
LZ
4417/* which pidlist file are we talking about? */
4418enum cgroup_filetype {
4419 CGROUP_FILE_PROCS,
4420 CGROUP_FILE_TASKS,
4421};
4422
4423/*
4424 * A pidlist is a list of pids that virtually represents the contents of one
4425 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
4426 * a pair (one each for procs, tasks) for each pid namespace that's relevant
4427 * to the cgroup.
4428 */
4429struct cgroup_pidlist {
4430 /*
4431 * used to find which pidlist is wanted. doesn't change as long as
4432 * this particular list stays in the list.
4433 */
4434 struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
4435 /* array of xids */
4436 pid_t *list;
4437 /* how many elements the above list has */
4438 int length;
24528255
LZ
4439 /* each of these stored in a list by its cgroup */
4440 struct list_head links;
4441 /* pointer to the cgroup we belong to, for list removal purposes */
4442 struct cgroup *owner;
b1a21367
TH
4443 /* for delayed destruction */
4444 struct delayed_work destroy_dwork;
24528255
LZ
4445};
4446
d1d9fd33
BB
4447/*
4448 * The following two functions "fix" the issue where there are more pids
4449 * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
4450 * TODO: replace with a kernel-wide solution to this problem
4451 */
4452#define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
4453static void *pidlist_allocate(int count)
4454{
4455 if (PIDLIST_TOO_LARGE(count))
4456 return vmalloc(count * sizeof(pid_t));
4457 else
4458 return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
4459}
b1a21367 4460
d1d9fd33
BB
4461static void pidlist_free(void *p)
4462{
58794514 4463 kvfree(p);
d1d9fd33 4464}
d1d9fd33 4465
b1a21367
TH
4466/*
4467 * Used to destroy all pidlists lingering waiting for destroy timer. None
4468 * should be left afterwards.
4469 */
4470static void cgroup_pidlist_destroy_all(struct cgroup *cgrp)
4471{
4472 struct cgroup_pidlist *l, *tmp_l;
4473
4474 mutex_lock(&cgrp->pidlist_mutex);
4475 list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
4476 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
4477 mutex_unlock(&cgrp->pidlist_mutex);
4478
4479 flush_workqueue(cgroup_pidlist_destroy_wq);
4480 BUG_ON(!list_empty(&cgrp->pidlists));
4481}
4482
4483static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
4484{
4485 struct delayed_work *dwork = to_delayed_work(work);
4486 struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
4487 destroy_dwork);
4488 struct cgroup_pidlist *tofree = NULL;
4489
4490 mutex_lock(&l->owner->pidlist_mutex);
b1a21367
TH
4491
4492 /*
04502365
TH
4493 * Destroy iff we didn't get queued again. The state won't change
4494 * as destroy_dwork can only be queued while locked.
b1a21367 4495 */
04502365 4496 if (!delayed_work_pending(dwork)) {
b1a21367
TH
4497 list_del(&l->links);
4498 pidlist_free(l->list);
4499 put_pid_ns(l->key.ns);
4500 tofree = l;
4501 }
4502
b1a21367
TH
4503 mutex_unlock(&l->owner->pidlist_mutex);
4504 kfree(tofree);
4505}
4506
bbcb81d0 4507/*
102a775e 4508 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
6ee211ad 4509 * Returns the number of unique elements.
bbcb81d0 4510 */
6ee211ad 4511static int pidlist_uniq(pid_t *list, int length)
bbcb81d0 4512{
102a775e 4513 int src, dest = 1;
102a775e
BB
4514
4515 /*
4516 * we presume the 0th element is unique, so i starts at 1. trivial
4517 * edge cases first; no work needs to be done for either
4518 */
4519 if (length == 0 || length == 1)
4520 return length;
4521 /* src and dest walk down the list; dest counts unique elements */
4522 for (src = 1; src < length; src++) {
4523 /* find next unique element */
4524 while (list[src] == list[src-1]) {
4525 src++;
4526 if (src == length)
4527 goto after;
4528 }
4529 /* dest always points to where the next unique element goes */
4530 list[dest] = list[src];
4531 dest++;
4532 }
4533after:
102a775e
BB
4534 return dest;
4535}
4536
afb2bc14
TH
4537/*
4538 * The two pid files - task and cgroup.procs - guaranteed that the result
4539 * is sorted, which forced this whole pidlist fiasco. As pid order is
4540 * different per namespace, each namespace needs differently sorted list,
4541 * making it impossible to use, for example, single rbtree of member tasks
4542 * sorted by task pointer. As pidlists can be fairly large, allocating one
4543 * per open file is dangerous, so cgroup had to implement shared pool of
4544 * pidlists keyed by cgroup and namespace.
4545 *
4546 * All this extra complexity was caused by the original implementation
4547 * committing to an entirely unnecessary property. In the long term, we
aa6ec29b
TH
4548 * want to do away with it. Explicitly scramble sort order if on the
4549 * default hierarchy so that no such expectation exists in the new
4550 * interface.
afb2bc14
TH
4551 *
4552 * Scrambling is done by swapping every two consecutive bits, which is
4553 * non-identity one-to-one mapping which disturbs sort order sufficiently.
4554 */
4555static pid_t pid_fry(pid_t pid)
4556{
4557 unsigned a = pid & 0x55555555;
4558 unsigned b = pid & 0xAAAAAAAA;
4559
4560 return (a << 1) | (b >> 1);
4561}
4562
4563static pid_t cgroup_pid_fry(struct cgroup *cgrp, pid_t pid)
4564{
aa6ec29b 4565 if (cgroup_on_dfl(cgrp))
afb2bc14
TH
4566 return pid_fry(pid);
4567 else
4568 return pid;
4569}
4570
102a775e
BB
4571static int cmppid(const void *a, const void *b)
4572{
4573 return *(pid_t *)a - *(pid_t *)b;
4574}
4575
afb2bc14
TH
4576static int fried_cmppid(const void *a, const void *b)
4577{
4578 return pid_fry(*(pid_t *)a) - pid_fry(*(pid_t *)b);
4579}
4580
e6b81710
TH
4581static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
4582 enum cgroup_filetype type)
4583{
4584 struct cgroup_pidlist *l;
4585 /* don't need task_nsproxy() if we're looking at ourself */
4586 struct pid_namespace *ns = task_active_pid_ns(current);
4587
4588 lockdep_assert_held(&cgrp->pidlist_mutex);
4589
4590 list_for_each_entry(l, &cgrp->pidlists, links)
4591 if (l->key.type == type && l->key.ns == ns)
4592 return l;
4593 return NULL;
4594}
4595
72a8cb30
BB
4596/*
4597 * find the appropriate pidlist for our purpose (given procs vs tasks)
4598 * returns with the lock on that pidlist already held, and takes care
4599 * of the use count, or returns NULL with no locks held if we're out of
4600 * memory.
4601 */
e6b81710
TH
4602static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
4603 enum cgroup_filetype type)
72a8cb30
BB
4604{
4605 struct cgroup_pidlist *l;
b70cc5fd 4606
e6b81710
TH
4607 lockdep_assert_held(&cgrp->pidlist_mutex);
4608
4609 l = cgroup_pidlist_find(cgrp, type);
4610 if (l)
4611 return l;
4612
72a8cb30 4613 /* entry not found; create a new one */
f4f4be2b 4614 l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
e6b81710 4615 if (!l)
72a8cb30 4616 return l;
e6b81710 4617
b1a21367 4618 INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
72a8cb30 4619 l->key.type = type;
e6b81710
TH
4620 /* don't need task_nsproxy() if we're looking at ourself */
4621 l->key.ns = get_pid_ns(task_active_pid_ns(current));
72a8cb30
BB
4622 l->owner = cgrp;
4623 list_add(&l->links, &cgrp->pidlists);
72a8cb30
BB
4624 return l;
4625}
4626
102a775e
BB
4627/*
4628 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
4629 */
72a8cb30
BB
4630static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
4631 struct cgroup_pidlist **lp)
102a775e
BB
4632{
4633 pid_t *array;
4634 int length;
4635 int pid, n = 0; /* used for populating the array */
72ec7029 4636 struct css_task_iter it;
817929ec 4637 struct task_struct *tsk;
102a775e
BB
4638 struct cgroup_pidlist *l;
4639
4bac00d1
TH
4640 lockdep_assert_held(&cgrp->pidlist_mutex);
4641
102a775e
BB
4642 /*
4643 * If cgroup gets more users after we read count, we won't have
4644 * enough space - tough. This race is indistinguishable to the
4645 * caller from the case that the additional cgroup users didn't
4646 * show up until sometime later on.
4647 */
4648 length = cgroup_task_count(cgrp);
d1d9fd33 4649 array = pidlist_allocate(length);
102a775e
BB
4650 if (!array)
4651 return -ENOMEM;
4652 /* now, populate the array */
9d800df1 4653 css_task_iter_start(&cgrp->self, &it);
72ec7029 4654 while ((tsk = css_task_iter_next(&it))) {
102a775e 4655 if (unlikely(n == length))
817929ec 4656 break;
102a775e 4657 /* get tgid or pid for procs or tasks file respectively */
72a8cb30
BB
4658 if (type == CGROUP_FILE_PROCS)
4659 pid = task_tgid_vnr(tsk);
4660 else
4661 pid = task_pid_vnr(tsk);
102a775e
BB
4662 if (pid > 0) /* make sure to only use valid results */
4663 array[n++] = pid;
817929ec 4664 }
72ec7029 4665 css_task_iter_end(&it);
102a775e
BB
4666 length = n;
4667 /* now sort & (if procs) strip out duplicates */
aa6ec29b 4668 if (cgroup_on_dfl(cgrp))
afb2bc14
TH
4669 sort(array, length, sizeof(pid_t), fried_cmppid, NULL);
4670 else
4671 sort(array, length, sizeof(pid_t), cmppid, NULL);
72a8cb30 4672 if (type == CGROUP_FILE_PROCS)
6ee211ad 4673 length = pidlist_uniq(array, length);
e6b81710 4674
e6b81710 4675 l = cgroup_pidlist_find_create(cgrp, type);
72a8cb30 4676 if (!l) {
d1d9fd33 4677 pidlist_free(array);
72a8cb30 4678 return -ENOMEM;
102a775e 4679 }
e6b81710
TH
4680
4681 /* store array, freeing old if necessary */
d1d9fd33 4682 pidlist_free(l->list);
102a775e
BB
4683 l->list = array;
4684 l->length = length;
72a8cb30 4685 *lp = l;
102a775e 4686 return 0;
bbcb81d0
PM
4687}
4688
846c7bb0 4689/**
a043e3b2 4690 * cgroupstats_build - build and fill cgroupstats
846c7bb0
BS
4691 * @stats: cgroupstats to fill information into
4692 * @dentry: A dentry entry belonging to the cgroup for which stats have
4693 * been requested.
a043e3b2
LZ
4694 *
4695 * Build and fill cgroupstats so that taskstats can export it to user
4696 * space.
846c7bb0
BS
4697 */
4698int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
4699{
2bd59d48 4700 struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
bd89aabc 4701 struct cgroup *cgrp;
72ec7029 4702 struct css_task_iter it;
846c7bb0 4703 struct task_struct *tsk;
33d283be 4704
2bd59d48
TH
4705 /* it should be kernfs_node belonging to cgroupfs and is a directory */
4706 if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
4707 kernfs_type(kn) != KERNFS_DIR)
4708 return -EINVAL;
4709
bad34660
LZ
4710 mutex_lock(&cgroup_mutex);
4711
846c7bb0 4712 /*
2bd59d48 4713 * We aren't being called from kernfs and there's no guarantee on
ec903c0c 4714 * @kn->priv's validity. For this and css_tryget_online_from_dir(),
2bd59d48 4715 * @kn->priv is RCU safe. Let's do the RCU dancing.
846c7bb0 4716 */
2bd59d48
TH
4717 rcu_read_lock();
4718 cgrp = rcu_dereference(kn->priv);
bad34660 4719 if (!cgrp || cgroup_is_dead(cgrp)) {
2bd59d48 4720 rcu_read_unlock();
bad34660 4721 mutex_unlock(&cgroup_mutex);
2bd59d48
TH
4722 return -ENOENT;
4723 }
bad34660 4724 rcu_read_unlock();
846c7bb0 4725
9d800df1 4726 css_task_iter_start(&cgrp->self, &it);
72ec7029 4727 while ((tsk = css_task_iter_next(&it))) {
846c7bb0
BS
4728 switch (tsk->state) {
4729 case TASK_RUNNING:
4730 stats->nr_running++;
4731 break;
4732 case TASK_INTERRUPTIBLE:
4733 stats->nr_sleeping++;
4734 break;
4735 case TASK_UNINTERRUPTIBLE:
4736 stats->nr_uninterruptible++;
4737 break;
4738 case TASK_STOPPED:
4739 stats->nr_stopped++;
4740 break;
4741 default:
4742 if (delayacct_is_task_waiting_on_io(tsk))
4743 stats->nr_io_wait++;
4744 break;
4745 }
4746 }
72ec7029 4747 css_task_iter_end(&it);
846c7bb0 4748
bad34660 4749 mutex_unlock(&cgroup_mutex);
2bd59d48 4750 return 0;
846c7bb0
BS
4751}
4752
8f3ff208 4753
bbcb81d0 4754/*
102a775e 4755 * seq_file methods for the tasks/procs files. The seq_file position is the
cc31edce 4756 * next pid to display; the seq_file iterator is a pointer to the pid
102a775e 4757 * in the cgroup->l->list array.
bbcb81d0 4758 */
cc31edce 4759
102a775e 4760static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
bbcb81d0 4761{
cc31edce
PM
4762 /*
4763 * Initially we receive a position value that corresponds to
4764 * one more than the last pid shown (or 0 on the first call or
4765 * after a seek to the start). Use a binary-search to find the
4766 * next pid to display, if any
4767 */
2bd59d48 4768 struct kernfs_open_file *of = s->private;
7da11279 4769 struct cgroup *cgrp = seq_css(s)->cgroup;
4bac00d1 4770 struct cgroup_pidlist *l;
7da11279 4771 enum cgroup_filetype type = seq_cft(s)->private;
cc31edce 4772 int index = 0, pid = *pos;
4bac00d1
TH
4773 int *iter, ret;
4774
4775 mutex_lock(&cgrp->pidlist_mutex);
4776
4777 /*
5d22444f 4778 * !NULL @of->priv indicates that this isn't the first start()
4bac00d1 4779 * after open. If the matching pidlist is around, we can use that.
5d22444f 4780 * Look for it. Note that @of->priv can't be used directly. It
4bac00d1
TH
4781 * could already have been destroyed.
4782 */
5d22444f
TH
4783 if (of->priv)
4784 of->priv = cgroup_pidlist_find(cgrp, type);
4bac00d1
TH
4785
4786 /*
4787 * Either this is the first start() after open or the matching
4788 * pidlist has been destroyed inbetween. Create a new one.
4789 */
5d22444f
TH
4790 if (!of->priv) {
4791 ret = pidlist_array_load(cgrp, type,
4792 (struct cgroup_pidlist **)&of->priv);
4bac00d1
TH
4793 if (ret)
4794 return ERR_PTR(ret);
4795 }
5d22444f 4796 l = of->priv;
cc31edce 4797
cc31edce 4798 if (pid) {
102a775e 4799 int end = l->length;
20777766 4800
cc31edce
PM
4801 while (index < end) {
4802 int mid = (index + end) / 2;
afb2bc14 4803 if (cgroup_pid_fry(cgrp, l->list[mid]) == pid) {
cc31edce
PM
4804 index = mid;
4805 break;
afb2bc14 4806 } else if (cgroup_pid_fry(cgrp, l->list[mid]) <= pid)
cc31edce
PM
4807 index = mid + 1;
4808 else
4809 end = mid;
4810 }
4811 }
4812 /* If we're off the end of the array, we're done */
102a775e 4813 if (index >= l->length)
cc31edce
PM
4814 return NULL;
4815 /* Update the abstract position to be the actual pid that we found */
102a775e 4816 iter = l->list + index;
afb2bc14 4817 *pos = cgroup_pid_fry(cgrp, *iter);
cc31edce
PM
4818 return iter;
4819}
4820
102a775e 4821static void cgroup_pidlist_stop(struct seq_file *s, void *v)
cc31edce 4822{
2bd59d48 4823 struct kernfs_open_file *of = s->private;
5d22444f 4824 struct cgroup_pidlist *l = of->priv;
62236858 4825
5d22444f
TH
4826 if (l)
4827 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
04502365 4828 CGROUP_PIDLIST_DESTROY_DELAY);
7da11279 4829 mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
cc31edce
PM
4830}
4831
102a775e 4832static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
cc31edce 4833{
2bd59d48 4834 struct kernfs_open_file *of = s->private;
5d22444f 4835 struct cgroup_pidlist *l = of->priv;
102a775e
BB
4836 pid_t *p = v;
4837 pid_t *end = l->list + l->length;
cc31edce
PM
4838 /*
4839 * Advance to the next pid in the array. If this goes off the
4840 * end, we're done
4841 */
4842 p++;
4843 if (p >= end) {
4844 return NULL;
4845 } else {
7da11279 4846 *pos = cgroup_pid_fry(seq_css(s)->cgroup, *p);
cc31edce
PM
4847 return p;
4848 }
4849}
4850
102a775e 4851static int cgroup_pidlist_show(struct seq_file *s, void *v)
cc31edce 4852{
94ff212d
JP
4853 seq_printf(s, "%d\n", *(int *)v);
4854
4855 return 0;
cc31edce 4856}
bbcb81d0 4857
182446d0
TH
4858static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
4859 struct cftype *cft)
81a6a5cd 4860{
182446d0 4861 return notify_on_release(css->cgroup);
81a6a5cd
PM
4862}
4863
182446d0
TH
4864static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
4865 struct cftype *cft, u64 val)
6379c106 4866{
6379c106 4867 if (val)
182446d0 4868 set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
6379c106 4869 else
182446d0 4870 clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
6379c106
PM
4871 return 0;
4872}
4873
182446d0
TH
4874static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
4875 struct cftype *cft)
97978e6d 4876{
182446d0 4877 return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
97978e6d
DL
4878}
4879
182446d0
TH
4880static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
4881 struct cftype *cft, u64 val)
97978e6d
DL
4882{
4883 if (val)
182446d0 4884 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
97978e6d 4885 else
182446d0 4886 clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
97978e6d
DL
4887 return 0;
4888}
4889
a14c6874
TH
4890/* cgroup core interface files for the default hierarchy */
4891static struct cftype cgroup_dfl_base_files[] = {
81a6a5cd 4892 {
d5c56ced 4893 .name = "cgroup.procs",
6f60eade 4894 .file_offset = offsetof(struct cgroup, procs_file),
6612f05b
TH
4895 .seq_start = cgroup_pidlist_start,
4896 .seq_next = cgroup_pidlist_next,
4897 .seq_stop = cgroup_pidlist_stop,
4898 .seq_show = cgroup_pidlist_show,
5d22444f 4899 .private = CGROUP_FILE_PROCS,
acbef755 4900 .write = cgroup_procs_write,
102a775e 4901 },
f8f22e53
TH
4902 {
4903 .name = "cgroup.controllers",
f8f22e53
TH
4904 .seq_show = cgroup_controllers_show,
4905 },
4906 {
4907 .name = "cgroup.subtree_control",
f8f22e53 4908 .seq_show = cgroup_subtree_control_show,
451af504 4909 .write = cgroup_subtree_control_write,
f8f22e53 4910 },
842b597e 4911 {
4a07c222 4912 .name = "cgroup.events",
a14c6874 4913 .flags = CFTYPE_NOT_ON_ROOT,
6f60eade 4914 .file_offset = offsetof(struct cgroup, events_file),
4a07c222 4915 .seq_show = cgroup_events_show,
842b597e 4916 },
a14c6874
TH
4917 { } /* terminate */
4918};
d5c56ced 4919
a14c6874
TH
4920/* cgroup core interface files for the legacy hierarchies */
4921static struct cftype cgroup_legacy_base_files[] = {
4922 {
4923 .name = "cgroup.procs",
4924 .seq_start = cgroup_pidlist_start,
4925 .seq_next = cgroup_pidlist_next,
4926 .seq_stop = cgroup_pidlist_stop,
4927 .seq_show = cgroup_pidlist_show,
4928 .private = CGROUP_FILE_PROCS,
4929 .write = cgroup_procs_write,
a14c6874
TH
4930 },
4931 {
4932 .name = "cgroup.clone_children",
4933 .read_u64 = cgroup_clone_children_read,
4934 .write_u64 = cgroup_clone_children_write,
4935 },
4936 {
4937 .name = "cgroup.sane_behavior",
4938 .flags = CFTYPE_ONLY_ON_ROOT,
4939 .seq_show = cgroup_sane_behavior_show,
4940 },
d5c56ced
TH
4941 {
4942 .name = "tasks",
6612f05b
TH
4943 .seq_start = cgroup_pidlist_start,
4944 .seq_next = cgroup_pidlist_next,
4945 .seq_stop = cgroup_pidlist_stop,
4946 .seq_show = cgroup_pidlist_show,
5d22444f 4947 .private = CGROUP_FILE_TASKS,
acbef755 4948 .write = cgroup_tasks_write,
d5c56ced
TH
4949 },
4950 {
4951 .name = "notify_on_release",
d5c56ced
TH
4952 .read_u64 = cgroup_read_notify_on_release,
4953 .write_u64 = cgroup_write_notify_on_release,
4954 },
6e6ff25b
TH
4955 {
4956 .name = "release_agent",
a14c6874 4957 .flags = CFTYPE_ONLY_ON_ROOT,
2da8ca82 4958 .seq_show = cgroup_release_agent_show,
451af504 4959 .write = cgroup_release_agent_write,
5f469907 4960 .max_write_len = PATH_MAX - 1,
6e6ff25b 4961 },
db0416b6 4962 { } /* terminate */
bbcb81d0
PM
4963};
4964
0c21ead1
TH
4965/*
4966 * css destruction is four-stage process.
4967 *
4968 * 1. Destruction starts. Killing of the percpu_ref is initiated.
4969 * Implemented in kill_css().
4970 *
4971 * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
ec903c0c
TH
4972 * and thus css_tryget_online() is guaranteed to fail, the css can be
4973 * offlined by invoking offline_css(). After offlining, the base ref is
4974 * put. Implemented in css_killed_work_fn().
0c21ead1
TH
4975 *
4976 * 3. When the percpu_ref reaches zero, the only possible remaining
4977 * accessors are inside RCU read sections. css_release() schedules the
4978 * RCU callback.
4979 *
4980 * 4. After the grace period, the css can be freed. Implemented in
4981 * css_free_work_fn().
4982 *
4983 * It is actually hairier because both step 2 and 4 require process context
4984 * and thus involve punting to css->destroy_work adding two additional
4985 * steps to the already complex sequence.
4986 */
35ef10da 4987static void css_free_work_fn(struct work_struct *work)
48ddbe19
TH
4988{
4989 struct cgroup_subsys_state *css =
35ef10da 4990 container_of(work, struct cgroup_subsys_state, destroy_work);
01e58659 4991 struct cgroup_subsys *ss = css->ss;
0c21ead1 4992 struct cgroup *cgrp = css->cgroup;
48ddbe19 4993
9a1049da
TH
4994 percpu_ref_exit(&css->refcnt);
4995
01e58659 4996 if (ss) {
9d755d33 4997 /* css free path */
8bb5ef79 4998 struct cgroup_subsys_state *parent = css->parent;
01e58659
VD
4999 int id = css->id;
5000
01e58659
VD
5001 ss->css_free(css);
5002 cgroup_idr_remove(&ss->css_idr, id);
9d755d33 5003 cgroup_put(cgrp);
8bb5ef79
TH
5004
5005 if (parent)
5006 css_put(parent);
9d755d33
TH
5007 } else {
5008 /* cgroup free path */
5009 atomic_dec(&cgrp->root->nr_cgrps);
5010 cgroup_pidlist_destroy_all(cgrp);
971ff493 5011 cancel_work_sync(&cgrp->release_agent_work);
9d755d33 5012
d51f39b0 5013 if (cgroup_parent(cgrp)) {
9d755d33
TH
5014 /*
5015 * We get a ref to the parent, and put the ref when
5016 * this cgroup is being freed, so it's guaranteed
5017 * that the parent won't be destroyed before its
5018 * children.
5019 */
d51f39b0 5020 cgroup_put(cgroup_parent(cgrp));
9d755d33
TH
5021 kernfs_put(cgrp->kn);
5022 kfree(cgrp);
5023 } else {
5024 /*
5025 * This is root cgroup's refcnt reaching zero,
5026 * which indicates that the root should be
5027 * released.
5028 */
5029 cgroup_destroy_root(cgrp->root);
5030 }
5031 }
48ddbe19
TH
5032}
5033
0c21ead1 5034static void css_free_rcu_fn(struct rcu_head *rcu_head)
d3daf28d
TH
5035{
5036 struct cgroup_subsys_state *css =
0c21ead1 5037 container_of(rcu_head, struct cgroup_subsys_state, rcu_head);
d3daf28d 5038
35ef10da 5039 INIT_WORK(&css->destroy_work, css_free_work_fn);
e5fca243 5040 queue_work(cgroup_destroy_wq, &css->destroy_work);
48ddbe19
TH
5041}
5042
25e15d83 5043static void css_release_work_fn(struct work_struct *work)
d3daf28d
TH
5044{
5045 struct cgroup_subsys_state *css =
25e15d83 5046 container_of(work, struct cgroup_subsys_state, destroy_work);
15a4c835 5047 struct cgroup_subsys *ss = css->ss;
9d755d33 5048 struct cgroup *cgrp = css->cgroup;
15a4c835 5049
1fed1b2e
TH
5050 mutex_lock(&cgroup_mutex);
5051
de3f0341 5052 css->flags |= CSS_RELEASED;
1fed1b2e
TH
5053 list_del_rcu(&css->sibling);
5054
9d755d33
TH
5055 if (ss) {
5056 /* css release path */
01e58659 5057 cgroup_idr_replace(&ss->css_idr, NULL, css->id);
7d172cc8
TH
5058 if (ss->css_released)
5059 ss->css_released(css);
9d755d33
TH
5060 } else {
5061 /* cgroup release path */
ed1777de
TH
5062 trace_cgroup_release(cgrp);
5063
9d755d33
TH
5064 cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
5065 cgrp->id = -1;
a4189487
LZ
5066
5067 /*
5068 * There are two control paths which try to determine
5069 * cgroup from dentry without going through kernfs -
5070 * cgroupstats_build() and css_tryget_online_from_dir().
5071 * Those are supported by RCU protecting clearing of
5072 * cgrp->kn->priv backpointer.
5073 */
6cd0f5bb
TH
5074 if (cgrp->kn)
5075 RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv,
5076 NULL);
30070984
DM
5077
5078 cgroup_bpf_put(cgrp);
9d755d33 5079 }
d3daf28d 5080
1fed1b2e
TH
5081 mutex_unlock(&cgroup_mutex);
5082
0c21ead1 5083 call_rcu(&css->rcu_head, css_free_rcu_fn);
d3daf28d
TH
5084}
5085
d3daf28d
TH
5086static void css_release(struct percpu_ref *ref)
5087{
5088 struct cgroup_subsys_state *css =
5089 container_of(ref, struct cgroup_subsys_state, refcnt);
5090
25e15d83
TH
5091 INIT_WORK(&css->destroy_work, css_release_work_fn);
5092 queue_work(cgroup_destroy_wq, &css->destroy_work);
d3daf28d
TH
5093}
5094
ddfcadab
TH
5095static void init_and_link_css(struct cgroup_subsys_state *css,
5096 struct cgroup_subsys *ss, struct cgroup *cgrp)
ddbcc7e8 5097{
0cb51d71
TH
5098 lockdep_assert_held(&cgroup_mutex);
5099
ddfcadab
TH
5100 cgroup_get(cgrp);
5101
d5c419b6 5102 memset(css, 0, sizeof(*css));
bd89aabc 5103 css->cgroup = cgrp;
72c97e54 5104 css->ss = ss;
8fa3b8d6 5105 css->id = -1;
d5c419b6
TH
5106 INIT_LIST_HEAD(&css->sibling);
5107 INIT_LIST_HEAD(&css->children);
0cb51d71 5108 css->serial_nr = css_serial_nr_next++;
aa226ff4 5109 atomic_set(&css->online_cnt, 0);
0ae78e0b 5110
d51f39b0
TH
5111 if (cgroup_parent(cgrp)) {
5112 css->parent = cgroup_css(cgroup_parent(cgrp), ss);
ddfcadab 5113 css_get(css->parent);
ddfcadab 5114 }
48ddbe19 5115
ca8bdcaf 5116 BUG_ON(cgroup_css(cgrp, ss));
ddbcc7e8
PM
5117}
5118
2a4ac633 5119/* invoke ->css_online() on a new CSS and mark it online if successful */
623f926b 5120static int online_css(struct cgroup_subsys_state *css)
a31f2d3f 5121{
623f926b 5122 struct cgroup_subsys *ss = css->ss;
b1929db4
TH
5123 int ret = 0;
5124
a31f2d3f
TH
5125 lockdep_assert_held(&cgroup_mutex);
5126
92fb9748 5127 if (ss->css_online)
eb95419b 5128 ret = ss->css_online(css);
ae7f164a 5129 if (!ret) {
eb95419b 5130 css->flags |= CSS_ONLINE;
aec25020 5131 rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
aa226ff4
TH
5132
5133 atomic_inc(&css->online_cnt);
5134 if (css->parent)
5135 atomic_inc(&css->parent->online_cnt);
ae7f164a 5136 }
b1929db4 5137 return ret;
a31f2d3f
TH
5138}
5139
2a4ac633 5140/* if the CSS is online, invoke ->css_offline() on it and mark it offline */
623f926b 5141static void offline_css(struct cgroup_subsys_state *css)
a31f2d3f 5142{
623f926b 5143 struct cgroup_subsys *ss = css->ss;
a31f2d3f
TH
5144
5145 lockdep_assert_held(&cgroup_mutex);
5146
5147 if (!(css->flags & CSS_ONLINE))
5148 return;
5149
fa06235b
VD
5150 if (ss->css_reset)
5151 ss->css_reset(css);
5152
d7eeac19 5153 if (ss->css_offline)
eb95419b 5154 ss->css_offline(css);
a31f2d3f 5155
eb95419b 5156 css->flags &= ~CSS_ONLINE;
e3297803 5157 RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL);
f8f22e53
TH
5158
5159 wake_up_all(&css->cgroup->offline_waitq);
a31f2d3f
TH
5160}
5161
c81c925a 5162/**
6cd0f5bb 5163 * css_create - create a cgroup_subsys_state
c81c925a
TH
5164 * @cgrp: the cgroup new css will be associated with
5165 * @ss: the subsys of new css
5166 *
5167 * Create a new css associated with @cgrp - @ss pair. On success, the new
6cd0f5bb
TH
5168 * css is online and installed in @cgrp. This function doesn't create the
5169 * interface files. Returns 0 on success, -errno on failure.
c81c925a 5170 */
6cd0f5bb
TH
5171static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
5172 struct cgroup_subsys *ss)
c81c925a 5173{
d51f39b0 5174 struct cgroup *parent = cgroup_parent(cgrp);
1fed1b2e 5175 struct cgroup_subsys_state *parent_css = cgroup_css(parent, ss);
c81c925a
TH
5176 struct cgroup_subsys_state *css;
5177 int err;
5178
c81c925a
TH
5179 lockdep_assert_held(&cgroup_mutex);
5180
1fed1b2e 5181 css = ss->css_alloc(parent_css);
e7e15b87
TH
5182 if (!css)
5183 css = ERR_PTR(-ENOMEM);
c81c925a 5184 if (IS_ERR(css))
6cd0f5bb 5185 return css;
c81c925a 5186
ddfcadab 5187 init_and_link_css(css, ss, cgrp);
a2bed820 5188
2aad2a86 5189 err = percpu_ref_init(&css->refcnt, css_release, 0, GFP_KERNEL);
c81c925a 5190 if (err)
3eb59ec6 5191 goto err_free_css;
c81c925a 5192
cf780b7d 5193 err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
15a4c835 5194 if (err < 0)
b00c52da 5195 goto err_free_css;
15a4c835 5196 css->id = err;
c81c925a 5197
15a4c835 5198 /* @css is ready to be brought online now, make it visible */
1fed1b2e 5199 list_add_tail_rcu(&css->sibling, &parent_css->children);
15a4c835 5200 cgroup_idr_replace(&ss->css_idr, css, css->id);
c81c925a
TH
5201
5202 err = online_css(css);
5203 if (err)
1fed1b2e 5204 goto err_list_del;
94419627 5205
c81c925a 5206 if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
d51f39b0 5207 cgroup_parent(parent)) {
ed3d261b 5208 pr_warn("%s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
a2a1f9ea 5209 current->comm, current->pid, ss->name);
c81c925a 5210 if (!strcmp(ss->name, "memory"))
ed3d261b 5211 pr_warn("\"memory\" requires setting use_hierarchy to 1 on the root\n");
c81c925a
TH
5212 ss->warned_broken_hierarchy = true;
5213 }
5214
6cd0f5bb 5215 return css;
c81c925a 5216
1fed1b2e
TH
5217err_list_del:
5218 list_del_rcu(&css->sibling);
3eb59ec6 5219err_free_css:
a2bed820 5220 call_rcu(&css->rcu_head, css_free_rcu_fn);
6cd0f5bb 5221 return ERR_PTR(err);
c81c925a
TH
5222}
5223
07cd1294
TH
5224/*
5225 * The returned cgroup is fully initialized including its control mask, but
5226 * it isn't associated with its kernfs_node and doesn't have the control
5227 * mask applied.
5228 */
a5bca215 5229static struct cgroup *cgroup_create(struct cgroup *parent)
ddbcc7e8 5230{
a5bca215 5231 struct cgroup_root *root = parent->root;
a5bca215
TH
5232 struct cgroup *cgrp, *tcgrp;
5233 int level = parent->level + 1;
03970d3c 5234 int ret;
ddbcc7e8 5235
0a950f65 5236 /* allocate the cgroup and its ID, 0 is reserved for the root */
b11cfb58
TH
5237 cgrp = kzalloc(sizeof(*cgrp) +
5238 sizeof(cgrp->ancestor_ids[0]) * (level + 1), GFP_KERNEL);
a5bca215
TH
5239 if (!cgrp)
5240 return ERR_PTR(-ENOMEM);
0ab02ca8 5241
2aad2a86 5242 ret = percpu_ref_init(&cgrp->self.refcnt, css_release, 0, GFP_KERNEL);
9d755d33
TH
5243 if (ret)
5244 goto out_free_cgrp;
5245
0ab02ca8
LZ
5246 /*
5247 * Temporarily set the pointer to NULL, so idr_find() won't return
5248 * a half-baked cgroup.
5249 */
cf780b7d 5250 cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_KERNEL);
0ab02ca8 5251 if (cgrp->id < 0) {
ba0f4d76 5252 ret = -ENOMEM;
9d755d33 5253 goto out_cancel_ref;
976c06bc
TH
5254 }
5255
cc31edce 5256 init_cgroup_housekeeping(cgrp);
ddbcc7e8 5257
9d800df1 5258 cgrp->self.parent = &parent->self;
ba0f4d76 5259 cgrp->root = root;
b11cfb58
TH
5260 cgrp->level = level;
5261
5262 for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp))
5263 cgrp->ancestor_ids[tcgrp->level] = tcgrp->id;
ddbcc7e8 5264
b6abdb0e
LZ
5265 if (notify_on_release(parent))
5266 set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
5267
2260e7fc
TH
5268 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
5269 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
97978e6d 5270
0cb51d71 5271 cgrp->self.serial_nr = css_serial_nr_next++;
53fa5261 5272
4e139afc 5273 /* allocation complete, commit to creation */
d5c419b6 5274 list_add_tail_rcu(&cgrp->self.sibling, &cgroup_parent(cgrp)->self.children);
3c9c825b 5275 atomic_inc(&root->nr_cgrps);
59f5296b 5276 cgroup_get(parent);
415cf07a 5277
0d80255e
TH
5278 /*
5279 * @cgrp is now fully operational. If something fails after this
5280 * point, it'll be released via the normal destruction path.
5281 */
6fa4918d 5282 cgroup_idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
4e96ee8e 5283
bd53d617
TH
5284 /*
5285 * On the default hierarchy, a child doesn't automatically inherit
667c2491 5286 * subtree_control from the parent. Each is configured manually.
bd53d617 5287 */
03970d3c 5288 if (!cgroup_on_dfl(cgrp))
5531dc91 5289 cgrp->subtree_control = cgroup_control(cgrp);
03970d3c 5290
30070984
DM
5291 if (parent)
5292 cgroup_bpf_inherit(cgrp, parent);
5293
03970d3c
TH
5294 cgroup_propagate_control(cgrp);
5295
a5bca215
TH
5296 return cgrp;
5297
5298out_cancel_ref:
5299 percpu_ref_exit(&cgrp->self.refcnt);
5300out_free_cgrp:
5301 kfree(cgrp);
5302 return ERR_PTR(ret);
a5bca215
TH
5303}
5304
5305static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
5306 umode_t mode)
5307{
5308 struct cgroup *parent, *cgrp;
a5bca215 5309 struct kernfs_node *kn;
03970d3c 5310 int ret;
a5bca215
TH
5311
5312 /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
5313 if (strchr(name, '\n'))
5314 return -EINVAL;
5315
945ba199 5316 parent = cgroup_kn_lock_live(parent_kn, false);
a5bca215
TH
5317 if (!parent)
5318 return -ENODEV;
5319
5320 cgrp = cgroup_create(parent);
5321 if (IS_ERR(cgrp)) {
5322 ret = PTR_ERR(cgrp);
5323 goto out_unlock;
5324 }
5325
195e9b6c
TH
5326 /* create the directory */
5327 kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
5328 if (IS_ERR(kn)) {
5329 ret = PTR_ERR(kn);
5330 goto out_destroy;
5331 }
5332 cgrp->kn = kn;
5333
5334 /*
5335 * This extra ref will be put in cgroup_free_fn() and guarantees
5336 * that @cgrp->kn is always accessible.
5337 */
5338 kernfs_get(kn);
5339
5340 ret = cgroup_kn_set_ugid(kn);
5341 if (ret)
5342 goto out_destroy;
5343
334c3679 5344 ret = css_populate_dir(&cgrp->self);
195e9b6c
TH
5345 if (ret)
5346 goto out_destroy;
5347
03970d3c
TH
5348 ret = cgroup_apply_control_enable(cgrp);
5349 if (ret)
5350 goto out_destroy;
195e9b6c 5351
ed1777de
TH
5352 trace_cgroup_mkdir(cgrp);
5353
195e9b6c 5354 /* let's create and online css's */
2bd59d48 5355 kernfs_activate(kn);
ddbcc7e8 5356
ba0f4d76
TH
5357 ret = 0;
5358 goto out_unlock;
ddbcc7e8 5359
a5bca215
TH
5360out_destroy:
5361 cgroup_destroy_locked(cgrp);
ba0f4d76 5362out_unlock:
a9746d8d 5363 cgroup_kn_unlock(parent_kn);
ba0f4d76 5364 return ret;
ddbcc7e8
PM
5365}
5366
223dbc38
TH
5367/*
5368 * This is called when the refcnt of a css is confirmed to be killed.
249f3468
TH
5369 * css_tryget_online() is now guaranteed to fail. Tell the subsystem to
5370 * initate destruction and put the css ref from kill_css().
223dbc38
TH
5371 */
5372static void css_killed_work_fn(struct work_struct *work)
d3daf28d 5373{
223dbc38
TH
5374 struct cgroup_subsys_state *css =
5375 container_of(work, struct cgroup_subsys_state, destroy_work);
d3daf28d 5376
f20104de 5377 mutex_lock(&cgroup_mutex);
09a503ea 5378
aa226ff4
TH
5379 do {
5380 offline_css(css);
5381 css_put(css);
5382 /* @css can't go away while we're holding cgroup_mutex */
5383 css = css->parent;
5384 } while (css && atomic_dec_and_test(&css->online_cnt));
5385
5386 mutex_unlock(&cgroup_mutex);
d3daf28d
TH
5387}
5388
223dbc38
TH
5389/* css kill confirmation processing requires process context, bounce */
5390static void css_killed_ref_fn(struct percpu_ref *ref)
d3daf28d
TH
5391{
5392 struct cgroup_subsys_state *css =
5393 container_of(ref, struct cgroup_subsys_state, refcnt);
5394
aa226ff4
TH
5395 if (atomic_dec_and_test(&css->online_cnt)) {
5396 INIT_WORK(&css->destroy_work, css_killed_work_fn);
5397 queue_work(cgroup_destroy_wq, &css->destroy_work);
5398 }
d3daf28d
TH
5399}
5400
f392e51c
TH
5401/**
5402 * kill_css - destroy a css
5403 * @css: css to destroy
5404 *
5405 * This function initiates destruction of @css by removing cgroup interface
5406 * files and putting its base reference. ->css_offline() will be invoked
ec903c0c
TH
5407 * asynchronously once css_tryget_online() is guaranteed to fail and when
5408 * the reference count reaches zero, @css will be released.
f392e51c
TH
5409 */
5410static void kill_css(struct cgroup_subsys_state *css)
edae0c33 5411{
01f6474c 5412 lockdep_assert_held(&cgroup_mutex);
94419627 5413
2bd59d48
TH
5414 /*
5415 * This must happen before css is disassociated with its cgroup.
5416 * See seq_css() for details.
5417 */
334c3679 5418 css_clear_dir(css);
3c14f8b4 5419
edae0c33
TH
5420 /*
5421 * Killing would put the base ref, but we need to keep it alive
5422 * until after ->css_offline().
5423 */
5424 css_get(css);
5425
5426 /*
5427 * cgroup core guarantees that, by the time ->css_offline() is
5428 * invoked, no new css reference will be given out via
ec903c0c 5429 * css_tryget_online(). We can't simply call percpu_ref_kill() and
edae0c33
TH
5430 * proceed to offlining css's because percpu_ref_kill() doesn't
5431 * guarantee that the ref is seen as killed on all CPUs on return.
5432 *
5433 * Use percpu_ref_kill_and_confirm() to get notifications as each
5434 * css is confirmed to be seen as killed on all CPUs.
5435 */
5436 percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn);
d3daf28d
TH
5437}
5438
5439/**
5440 * cgroup_destroy_locked - the first stage of cgroup destruction
5441 * @cgrp: cgroup to be destroyed
5442 *
5443 * css's make use of percpu refcnts whose killing latency shouldn't be
5444 * exposed to userland and are RCU protected. Also, cgroup core needs to
ec903c0c
TH
5445 * guarantee that css_tryget_online() won't succeed by the time
5446 * ->css_offline() is invoked. To satisfy all the requirements,
5447 * destruction is implemented in the following two steps.
d3daf28d
TH
5448 *
5449 * s1. Verify @cgrp can be destroyed and mark it dying. Remove all
5450 * userland visible parts and start killing the percpu refcnts of
5451 * css's. Set up so that the next stage will be kicked off once all
5452 * the percpu refcnts are confirmed to be killed.
5453 *
5454 * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the
5455 * rest of destruction. Once all cgroup references are gone, the
5456 * cgroup is RCU-freed.
5457 *
5458 * This function implements s1. After this step, @cgrp is gone as far as
5459 * the userland is concerned and a new cgroup with the same name may be
5460 * created. As cgroup doesn't care about the names internally, this
5461 * doesn't cause any problem.
5462 */
42809dd4
TH
5463static int cgroup_destroy_locked(struct cgroup *cgrp)
5464 __releases(&cgroup_mutex) __acquires(&cgroup_mutex)
ddbcc7e8 5465{
2bd59d48 5466 struct cgroup_subsys_state *css;
2b021cbf 5467 struct cgrp_cset_link *link;
1c6727af 5468 int ssid;
ddbcc7e8 5469
42809dd4
TH
5470 lockdep_assert_held(&cgroup_mutex);
5471
91486f61
TH
5472 /*
5473 * Only migration can raise populated from zero and we're already
5474 * holding cgroup_mutex.
5475 */
5476 if (cgroup_is_populated(cgrp))
ddbcc7e8 5477 return -EBUSY;
a043e3b2 5478
bb78a92f 5479 /*
d5c419b6
TH
5480 * Make sure there's no live children. We can't test emptiness of
5481 * ->self.children as dead children linger on it while being
5482 * drained; otherwise, "rmdir parent/child parent" may fail.
bb78a92f 5483 */
f3d46500 5484 if (css_has_online_children(&cgrp->self))
bb78a92f
HD
5485 return -EBUSY;
5486
455050d2 5487 /*
2b021cbf
TH
5488 * Mark @cgrp and the associated csets dead. The former prevents
5489 * further task migration and child creation by disabling
5490 * cgroup_lock_live_group(). The latter makes the csets ignored by
5491 * the migration path.
455050d2 5492 */
184faf32 5493 cgrp->self.flags &= ~CSS_ONLINE;
ddbcc7e8 5494
82d6489d 5495 spin_lock_irq(&css_set_lock);
2b021cbf
TH
5496 list_for_each_entry(link, &cgrp->cset_links, cset_link)
5497 link->cset->dead = true;
82d6489d 5498 spin_unlock_irq(&css_set_lock);
2b021cbf 5499
249f3468 5500 /* initiate massacre of all css's */
1c6727af
TH
5501 for_each_css(css, ssid, cgrp)
5502 kill_css(css);
455050d2 5503
455050d2 5504 /*
01f6474c
TH
5505 * Remove @cgrp directory along with the base files. @cgrp has an
5506 * extra ref on its kn.
f20104de 5507 */
01f6474c 5508 kernfs_remove(cgrp->kn);
f20104de 5509
d51f39b0 5510 check_for_release(cgroup_parent(cgrp));
2bd59d48 5511
249f3468 5512 /* put the base reference */
9d755d33 5513 percpu_ref_kill(&cgrp->self.refcnt);
455050d2 5514
ea15f8cc
TH
5515 return 0;
5516};
5517
2bd59d48 5518static int cgroup_rmdir(struct kernfs_node *kn)
42809dd4 5519{
a9746d8d 5520 struct cgroup *cgrp;
2bd59d48 5521 int ret = 0;
42809dd4 5522
945ba199 5523 cgrp = cgroup_kn_lock_live(kn, false);
a9746d8d
TH
5524 if (!cgrp)
5525 return 0;
42809dd4 5526
a9746d8d 5527 ret = cgroup_destroy_locked(cgrp);
2bb566cb 5528
ed1777de
TH
5529 if (!ret)
5530 trace_cgroup_rmdir(cgrp);
5531
a9746d8d 5532 cgroup_kn_unlock(kn);
42809dd4 5533 return ret;
8e3f6541
TH
5534}
5535
2bd59d48
TH
5536static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
5537 .remount_fs = cgroup_remount,
5538 .show_options = cgroup_show_options,
5539 .mkdir = cgroup_mkdir,
5540 .rmdir = cgroup_rmdir,
5541 .rename = cgroup_rename,
4f41fc59 5542 .show_path = cgroup_show_path,
2bd59d48
TH
5543};
5544
15a4c835 5545static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
ddbcc7e8 5546{
ddbcc7e8 5547 struct cgroup_subsys_state *css;
cfe36bde 5548
a5ae9899 5549 pr_debug("Initializing cgroup subsys %s\n", ss->name);
ddbcc7e8 5550
648bb56d
TH
5551 mutex_lock(&cgroup_mutex);
5552
15a4c835 5553 idr_init(&ss->css_idr);
0adb0704 5554 INIT_LIST_HEAD(&ss->cfts);
8e3f6541 5555
3dd06ffa
TH
5556 /* Create the root cgroup state for this subsystem */
5557 ss->root = &cgrp_dfl_root;
5558 css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss));
ddbcc7e8
PM
5559 /* We don't handle early failures gracefully */
5560 BUG_ON(IS_ERR(css));
ddfcadab 5561 init_and_link_css(css, ss, &cgrp_dfl_root.cgrp);
3b514d24
TH
5562
5563 /*
5564 * Root csses are never destroyed and we can't initialize
5565 * percpu_ref during early init. Disable refcnting.
5566 */
5567 css->flags |= CSS_NO_REF;
5568
15a4c835 5569 if (early) {
9395a450 5570 /* allocation can't be done safely during early init */
15a4c835
TH
5571 css->id = 1;
5572 } else {
5573 css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);
5574 BUG_ON(css->id < 0);
5575 }
ddbcc7e8 5576
e8d55fde 5577 /* Update the init_css_set to contain a subsys
817929ec 5578 * pointer to this state - since the subsystem is
e8d55fde 5579 * newly registered, all tasks and hence the
3dd06ffa 5580 * init_css_set is in the subsystem's root cgroup. */
aec25020 5581 init_css_set.subsys[ss->id] = css;
ddbcc7e8 5582
cb4a3167
AS
5583 have_fork_callback |= (bool)ss->fork << ss->id;
5584 have_exit_callback |= (bool)ss->exit << ss->id;
afcf6c8b 5585 have_free_callback |= (bool)ss->free << ss->id;
7e47682e 5586 have_canfork_callback |= (bool)ss->can_fork << ss->id;
ddbcc7e8 5587
e8d55fde
LZ
5588 /* At system boot, before all subsystems have been
5589 * registered, no tasks have been forked, so we don't
5590 * need to invoke fork callbacks here. */
5591 BUG_ON(!list_empty(&init_task.tasks));
5592
ae7f164a 5593 BUG_ON(online_css(css));
a8638030 5594
cf5d5941
BB
5595 mutex_unlock(&cgroup_mutex);
5596}
cf5d5941 5597
ddbcc7e8 5598/**
a043e3b2
LZ
5599 * cgroup_init_early - cgroup initialization at system boot
5600 *
5601 * Initialize cgroups at system boot, and initialize any
5602 * subsystems that request early init.
ddbcc7e8
PM
5603 */
5604int __init cgroup_init_early(void)
5605{
7b9a6ba5 5606 static struct cgroup_sb_opts __initdata opts;
30159ec7 5607 struct cgroup_subsys *ss;
ddbcc7e8 5608 int i;
30159ec7 5609
3dd06ffa 5610 init_cgroup_root(&cgrp_dfl_root, &opts);
3b514d24
TH
5611 cgrp_dfl_root.cgrp.self.flags |= CSS_NO_REF;
5612
a4ea1cc9 5613 RCU_INIT_POINTER(init_task.cgroups, &init_css_set);
817929ec 5614
3ed80a62 5615 for_each_subsys(ss, i) {
aec25020 5616 WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id,
63253ad8 5617 "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p id:name=%d:%s\n",
073219e9 5618 i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free,
aec25020 5619 ss->id, ss->name);
073219e9
TH
5620 WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN,
5621 "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]);
5622
aec25020 5623 ss->id = i;
073219e9 5624 ss->name = cgroup_subsys_name[i];
3e1d2eed
TH
5625 if (!ss->legacy_name)
5626 ss->legacy_name = cgroup_subsys_name[i];
ddbcc7e8
PM
5627
5628 if (ss->early_init)
15a4c835 5629 cgroup_init_subsys(ss, true);
ddbcc7e8
PM
5630 }
5631 return 0;
5632}
5633
6e5c8307 5634static u16 cgroup_disable_mask __initdata;
a3e72739 5635
ddbcc7e8 5636/**
a043e3b2
LZ
5637 * cgroup_init - cgroup initialization
5638 *
5639 * Register cgroup filesystem and /proc file, and initialize
5640 * any subsystems that didn't request early init.
ddbcc7e8
PM
5641 */
5642int __init cgroup_init(void)
5643{
30159ec7 5644 struct cgroup_subsys *ss;
035f4f51 5645 int ssid;
ddbcc7e8 5646
6e5c8307 5647 BUILD_BUG_ON(CGROUP_SUBSYS_COUNT > 16);
1ed13287 5648 BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem));
a14c6874
TH
5649 BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
5650 BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
ddbcc7e8 5651
3942a9bd
PZ
5652 /*
5653 * The latency of the synchronize_sched() is too high for cgroups,
5654 * avoid it at the cost of forcing all readers into the slow path.
5655 */
5656 rcu_sync_enter_start(&cgroup_threadgroup_rwsem.rss);
5657
a79a908f
AK
5658 get_user_ns(init_cgroup_ns.user_ns);
5659
54e7b4eb 5660 mutex_lock(&cgroup_mutex);
54e7b4eb 5661
2378d8b8
TH
5662 /*
5663 * Add init_css_set to the hash table so that dfl_root can link to
5664 * it during init.
5665 */
5666 hash_add(css_set_table, &init_css_set.hlist,
5667 css_set_hash(init_css_set.subsys));
82fe9b0d 5668
3dd06ffa 5669 BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
4e96ee8e 5670
54e7b4eb
TH
5671 mutex_unlock(&cgroup_mutex);
5672
172a2c06 5673 for_each_subsys(ss, ssid) {
15a4c835
TH
5674 if (ss->early_init) {
5675 struct cgroup_subsys_state *css =
5676 init_css_set.subsys[ss->id];
5677
5678 css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2,
5679 GFP_KERNEL);
5680 BUG_ON(css->id < 0);
5681 } else {
5682 cgroup_init_subsys(ss, false);
5683 }
172a2c06 5684
2d8f243a
TH
5685 list_add_tail(&init_css_set.e_cset_node[ssid],
5686 &cgrp_dfl_root.cgrp.e_csets[ssid]);
172a2c06
TH
5687
5688 /*
c731ae1d
LZ
5689 * Setting dfl_root subsys_mask needs to consider the
5690 * disabled flag and cftype registration needs kmalloc,
5691 * both of which aren't available during early_init.
172a2c06 5692 */
a3e72739
TH
5693 if (cgroup_disable_mask & (1 << ssid)) {
5694 static_branch_disable(cgroup_subsys_enabled_key[ssid]);
5695 printk(KERN_INFO "Disabling %s control group subsystem\n",
5696 ss->name);
a8ddc821 5697 continue;
a3e72739 5698 }
a8ddc821 5699
223ffb29
JW
5700 if (cgroup_ssid_no_v1(ssid))
5701 printk(KERN_INFO "Disabling %s control group subsystem in v1 mounts\n",
5702 ss->name);
5703
a8ddc821
TH
5704 cgrp_dfl_root.subsys_mask |= 1 << ss->id;
5705
f6d635ad
TH
5706 if (ss->implicit_on_dfl)
5707 cgrp_dfl_implicit_ss_mask |= 1 << ss->id;
5708 else if (!ss->dfl_cftypes)
a7165264 5709 cgrp_dfl_inhibit_ss_mask |= 1 << ss->id;
5de4fa13 5710
a8ddc821
TH
5711 if (ss->dfl_cftypes == ss->legacy_cftypes) {
5712 WARN_ON(cgroup_add_cftypes(ss, ss->dfl_cftypes));
5713 } else {
5714 WARN_ON(cgroup_add_dfl_cftypes(ss, ss->dfl_cftypes));
5715 WARN_ON(cgroup_add_legacy_cftypes(ss, ss->legacy_cftypes));
c731ae1d 5716 }
295458e6
VD
5717
5718 if (ss->bind)
5719 ss->bind(init_css_set.subsys[ssid]);
676db4af
GKH
5720 }
5721
2378d8b8
TH
5722 /* init_css_set.subsys[] has been updated, re-hash */
5723 hash_del(&init_css_set.hlist);
5724 hash_add(css_set_table, &init_css_set.hlist,
5725 css_set_hash(init_css_set.subsys));
5726
035f4f51
TH
5727 WARN_ON(sysfs_create_mount_point(fs_kobj, "cgroup"));
5728 WARN_ON(register_filesystem(&cgroup_fs_type));
67e9c74b 5729 WARN_ON(register_filesystem(&cgroup2_fs_type));
035f4f51 5730 WARN_ON(!proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations));
ddbcc7e8 5731
2bd59d48 5732 return 0;
ddbcc7e8 5733}
b4f48b63 5734
e5fca243
TH
5735static int __init cgroup_wq_init(void)
5736{
5737 /*
5738 * There isn't much point in executing destruction path in
5739 * parallel. Good chunk is serialized with cgroup_mutex anyway.
1a11533f 5740 * Use 1 for @max_active.
e5fca243
TH
5741 *
5742 * We would prefer to do this in cgroup_init() above, but that
5743 * is called before init_workqueues(): so leave this until after.
5744 */
1a11533f 5745 cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
e5fca243 5746 BUG_ON(!cgroup_destroy_wq);
b1a21367
TH
5747
5748 /*
5749 * Used to destroy pidlists and separate to serve as flush domain.
5750 * Cap @max_active to 1 too.
5751 */
5752 cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
5753 0, 1);
5754 BUG_ON(!cgroup_pidlist_destroy_wq);
5755
e5fca243
TH
5756 return 0;
5757}
5758core_initcall(cgroup_wq_init);
5759
a424316c
PM
5760/*
5761 * proc_cgroup_show()
5762 * - Print task's cgroup paths into seq_file, one line for each hierarchy
5763 * - Used for /proc/<pid>/cgroup.
a424316c 5764 */
006f4ac4
ZL
5765int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
5766 struct pid *pid, struct task_struct *tsk)
a424316c 5767{
4c737b41 5768 char *buf;
a424316c 5769 int retval;
3dd06ffa 5770 struct cgroup_root *root;
a424316c
PM
5771
5772 retval = -ENOMEM;
e61734c5 5773 buf = kmalloc(PATH_MAX, GFP_KERNEL);
a424316c
PM
5774 if (!buf)
5775 goto out;
5776
a424316c 5777 mutex_lock(&cgroup_mutex);
82d6489d 5778 spin_lock_irq(&css_set_lock);
a424316c 5779
985ed670 5780 for_each_root(root) {
a424316c 5781 struct cgroup_subsys *ss;
bd89aabc 5782 struct cgroup *cgrp;
b85d2040 5783 int ssid, count = 0;
a424316c 5784
a7165264 5785 if (root == &cgrp_dfl_root && !cgrp_dfl_visible)
985ed670
TH
5786 continue;
5787
2c6ab6d2 5788 seq_printf(m, "%d:", root->hierarchy_id);
d98817d4
TH
5789 if (root != &cgrp_dfl_root)
5790 for_each_subsys(ss, ssid)
5791 if (root->subsys_mask & (1 << ssid))
5792 seq_printf(m, "%s%s", count++ ? "," : "",
3e1d2eed 5793 ss->legacy_name);
c6d57f33
PM
5794 if (strlen(root->name))
5795 seq_printf(m, "%sname=%s", count ? "," : "",
5796 root->name);
a424316c 5797 seq_putc(m, ':');
2e91fa7f 5798
7717f7ba 5799 cgrp = task_cgroup_from_root(tsk, root);
2e91fa7f
TH
5800
5801 /*
5802 * On traditional hierarchies, all zombie tasks show up as
5803 * belonging to the root cgroup. On the default hierarchy,
5804 * while a zombie doesn't show up in "cgroup.procs" and
5805 * thus can't be migrated, its /proc/PID/cgroup keeps
5806 * reporting the cgroup it belonged to before exiting. If
5807 * the cgroup is removed before the zombie is reaped,
5808 * " (deleted)" is appended to the cgroup path.
5809 */
5810 if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) {
4c737b41 5811 retval = cgroup_path_ns_locked(cgrp, buf, PATH_MAX,
a79a908f 5812 current->nsproxy->cgroup_ns);
e0223003 5813 if (retval >= PATH_MAX)
2e91fa7f 5814 retval = -ENAMETOOLONG;
e0223003 5815 if (retval < 0)
2e91fa7f 5816 goto out_unlock;
4c737b41
TH
5817
5818 seq_puts(m, buf);
2e91fa7f 5819 } else {
4c737b41 5820 seq_puts(m, "/");
e61734c5 5821 }
2e91fa7f 5822
2e91fa7f
TH
5823 if (cgroup_on_dfl(cgrp) && cgroup_is_dead(cgrp))
5824 seq_puts(m, " (deleted)\n");
5825 else
5826 seq_putc(m, '\n');
a424316c
PM
5827 }
5828
006f4ac4 5829 retval = 0;
a424316c 5830out_unlock:
82d6489d 5831 spin_unlock_irq(&css_set_lock);
a424316c 5832 mutex_unlock(&cgroup_mutex);
a424316c
PM
5833 kfree(buf);
5834out:
5835 return retval;
5836}
5837
a424316c
PM
5838/* Display information about each subsystem and each hierarchy */
5839static int proc_cgroupstats_show(struct seq_file *m, void *v)
5840{
30159ec7 5841 struct cgroup_subsys *ss;
a424316c 5842 int i;
a424316c 5843
8bab8dde 5844 seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
aae8aab4
BB
5845 /*
5846 * ideally we don't want subsystems moving around while we do this.
5847 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
5848 * subsys/hierarchy state.
5849 */
a424316c 5850 mutex_lock(&cgroup_mutex);
30159ec7
TH
5851
5852 for_each_subsys(ss, i)
2c6ab6d2 5853 seq_printf(m, "%s\t%d\t%d\t%d\n",
3e1d2eed 5854 ss->legacy_name, ss->root->hierarchy_id,
fc5ed1e9
TH
5855 atomic_read(&ss->root->nr_cgrps),
5856 cgroup_ssid_enabled(i));
30159ec7 5857
a424316c
PM
5858 mutex_unlock(&cgroup_mutex);
5859 return 0;
5860}
5861
5862static int cgroupstats_open(struct inode *inode, struct file *file)
5863{
9dce07f1 5864 return single_open(file, proc_cgroupstats_show, NULL);
a424316c
PM
5865}
5866
828c0950 5867static const struct file_operations proc_cgroupstats_operations = {
a424316c
PM
5868 .open = cgroupstats_open,
5869 .read = seq_read,
5870 .llseek = seq_lseek,
5871 .release = single_release,
5872};
5873
b4f48b63 5874/**
eaf797ab 5875 * cgroup_fork - initialize cgroup related fields during copy_process()
a043e3b2 5876 * @child: pointer to task_struct of forking parent process.
b4f48b63 5877 *
eaf797ab
TH
5878 * A task is associated with the init_css_set until cgroup_post_fork()
5879 * attaches it to the parent's css_set. Empty cg_list indicates that
5880 * @child isn't holding reference to its css_set.
b4f48b63
PM
5881 */
5882void cgroup_fork(struct task_struct *child)
5883{
eaf797ab 5884 RCU_INIT_POINTER(child->cgroups, &init_css_set);
817929ec 5885 INIT_LIST_HEAD(&child->cg_list);
b4f48b63
PM
5886}
5887
7e47682e
AS
5888/**
5889 * cgroup_can_fork - called on a new task before the process is exposed
5890 * @child: the task in question.
5891 *
5892 * This calls the subsystem can_fork() callbacks. If the can_fork() callback
5893 * returns an error, the fork aborts with that error code. This allows for
5894 * a cgroup subsystem to conditionally allow or deny new forks.
5895 */
b53202e6 5896int cgroup_can_fork(struct task_struct *child)
7e47682e
AS
5897{
5898 struct cgroup_subsys *ss;
5899 int i, j, ret;
5900
b4e0eeaf 5901 do_each_subsys_mask(ss, i, have_canfork_callback) {
b53202e6 5902 ret = ss->can_fork(child);
7e47682e
AS
5903 if (ret)
5904 goto out_revert;
b4e0eeaf 5905 } while_each_subsys_mask();
7e47682e
AS
5906
5907 return 0;
5908
5909out_revert:
5910 for_each_subsys(ss, j) {
5911 if (j >= i)
5912 break;
5913 if (ss->cancel_fork)
b53202e6 5914 ss->cancel_fork(child);
7e47682e
AS
5915 }
5916
5917 return ret;
5918}
5919
5920/**
5921 * cgroup_cancel_fork - called if a fork failed after cgroup_can_fork()
5922 * @child: the task in question
5923 *
5924 * This calls the cancel_fork() callbacks if a fork failed *after*
5925 * cgroup_can_fork() succeded.
5926 */
b53202e6 5927void cgroup_cancel_fork(struct task_struct *child)
7e47682e
AS
5928{
5929 struct cgroup_subsys *ss;
5930 int i;
5931
5932 for_each_subsys(ss, i)
5933 if (ss->cancel_fork)
b53202e6 5934 ss->cancel_fork(child);
7e47682e
AS
5935}
5936
817929ec 5937/**
a043e3b2
LZ
5938 * cgroup_post_fork - called on a new task after adding it to the task list
5939 * @child: the task in question
5940 *
5edee61e
TH
5941 * Adds the task to the list running through its css_set if necessary and
5942 * call the subsystem fork() callbacks. Has to be after the task is
5943 * visible on the task list in case we race with the first call to
0942eeee 5944 * cgroup_task_iter_start() - to guarantee that the new task ends up on its
5edee61e 5945 * list.
a043e3b2 5946 */
b53202e6 5947void cgroup_post_fork(struct task_struct *child)
817929ec 5948{
30159ec7 5949 struct cgroup_subsys *ss;
5edee61e
TH
5950 int i;
5951
3ce3230a 5952 /*
251f8c03 5953 * This may race against cgroup_enable_task_cg_lists(). As that
eaf797ab
TH
5954 * function sets use_task_css_set_links before grabbing
5955 * tasklist_lock and we just went through tasklist_lock to add
5956 * @child, it's guaranteed that either we see the set
5957 * use_task_css_set_links or cgroup_enable_task_cg_lists() sees
5958 * @child during its iteration.
5959 *
5960 * If we won the race, @child is associated with %current's
f0d9a5f1 5961 * css_set. Grabbing css_set_lock guarantees both that the
eaf797ab
TH
5962 * association is stable, and, on completion of the parent's
5963 * migration, @child is visible in the source of migration or
5964 * already in the destination cgroup. This guarantee is necessary
5965 * when implementing operations which need to migrate all tasks of
5966 * a cgroup to another.
5967 *
251f8c03 5968 * Note that if we lose to cgroup_enable_task_cg_lists(), @child
eaf797ab
TH
5969 * will remain in init_css_set. This is safe because all tasks are
5970 * in the init_css_set before cg_links is enabled and there's no
5971 * operation which transfers all tasks out of init_css_set.
3ce3230a 5972 */
817929ec 5973 if (use_task_css_set_links) {
eaf797ab
TH
5974 struct css_set *cset;
5975
82d6489d 5976 spin_lock_irq(&css_set_lock);
0e1d768f 5977 cset = task_css_set(current);
eaf797ab 5978 if (list_empty(&child->cg_list)) {
eaf797ab 5979 get_css_set(cset);
f6d7d049 5980 css_set_move_task(child, NULL, cset, false);
eaf797ab 5981 }
82d6489d 5982 spin_unlock_irq(&css_set_lock);
817929ec 5983 }
5edee61e
TH
5984
5985 /*
5986 * Call ss->fork(). This must happen after @child is linked on
5987 * css_set; otherwise, @child might change state between ->fork()
5988 * and addition to css_set.
5989 */
b4e0eeaf 5990 do_each_subsys_mask(ss, i, have_fork_callback) {
b53202e6 5991 ss->fork(child);
b4e0eeaf 5992 } while_each_subsys_mask();
817929ec 5993}
5edee61e 5994
b4f48b63
PM
5995/**
5996 * cgroup_exit - detach cgroup from exiting task
5997 * @tsk: pointer to task_struct of exiting process
5998 *
5999 * Description: Detach cgroup from @tsk and release it.
6000 *
6001 * Note that cgroups marked notify_on_release force every task in
6002 * them to take the global cgroup_mutex mutex when exiting.
6003 * This could impact scaling on very large systems. Be reluctant to
6004 * use notify_on_release cgroups where very high task exit scaling
6005 * is required on large systems.
6006 *
0e1d768f
TH
6007 * We set the exiting tasks cgroup to the root cgroup (top_cgroup). We
6008 * call cgroup_exit() while the task is still competent to handle
6009 * notify_on_release(), then leave the task attached to the root cgroup in
6010 * each hierarchy for the remainder of its exit. No need to bother with
6011 * init_css_set refcnting. init_css_set never goes away and we can't race
e8604cb4 6012 * with migration path - PF_EXITING is visible to migration path.
b4f48b63 6013 */
1ec41830 6014void cgroup_exit(struct task_struct *tsk)
b4f48b63 6015{
30159ec7 6016 struct cgroup_subsys *ss;
5abb8855 6017 struct css_set *cset;
d41d5a01 6018 int i;
817929ec
PM
6019
6020 /*
0e1d768f 6021 * Unlink from @tsk from its css_set. As migration path can't race
0de0942d 6022 * with us, we can check css_set and cg_list without synchronization.
817929ec 6023 */
0de0942d
TH
6024 cset = task_css_set(tsk);
6025
817929ec 6026 if (!list_empty(&tsk->cg_list)) {
82d6489d 6027 spin_lock_irq(&css_set_lock);
f6d7d049 6028 css_set_move_task(tsk, cset, NULL, false);
82d6489d 6029 spin_unlock_irq(&css_set_lock);
2e91fa7f
TH
6030 } else {
6031 get_css_set(cset);
817929ec
PM
6032 }
6033
cb4a3167 6034 /* see cgroup_post_fork() for details */
b4e0eeaf 6035 do_each_subsys_mask(ss, i, have_exit_callback) {
2e91fa7f 6036 ss->exit(tsk);
b4e0eeaf 6037 } while_each_subsys_mask();
2e91fa7f 6038}
30159ec7 6039
2e91fa7f
TH
6040void cgroup_free(struct task_struct *task)
6041{
6042 struct css_set *cset = task_css_set(task);
afcf6c8b
TH
6043 struct cgroup_subsys *ss;
6044 int ssid;
6045
b4e0eeaf 6046 do_each_subsys_mask(ss, ssid, have_free_callback) {
afcf6c8b 6047 ss->free(task);
b4e0eeaf 6048 } while_each_subsys_mask();
d41d5a01 6049
2e91fa7f 6050 put_css_set(cset);
b4f48b63 6051}
697f4161 6052
bd89aabc 6053static void check_for_release(struct cgroup *cgrp)
81a6a5cd 6054{
27bd4dbb 6055 if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
971ff493
ZL
6056 !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
6057 schedule_work(&cgrp->release_agent_work);
81a6a5cd
PM
6058}
6059
81a6a5cd
PM
6060/*
6061 * Notify userspace when a cgroup is released, by running the
6062 * configured release agent with the name of the cgroup (path
6063 * relative to the root of cgroup file system) as the argument.
6064 *
6065 * Most likely, this user command will try to rmdir this cgroup.
6066 *
6067 * This races with the possibility that some other task will be
6068 * attached to this cgroup before it is removed, or that some other
6069 * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
6070 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
6071 * unused, and this cgroup will be reprieved from its death sentence,
6072 * to continue to serve a useful existence. Next time it's released,
6073 * we will get notified again, if it still has 'notify_on_release' set.
6074 *
6075 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
6076 * means only wait until the task is successfully execve()'d. The
6077 * separate release agent task is forked by call_usermodehelper(),
6078 * then control in this thread returns here, without waiting for the
6079 * release agent task. We don't bother to wait because the caller of
6080 * this routine has no use for the exit status of the release agent
6081 * task, so no sense holding our caller up for that.
81a6a5cd 6082 */
81a6a5cd
PM
6083static void cgroup_release_agent(struct work_struct *work)
6084{
971ff493
ZL
6085 struct cgroup *cgrp =
6086 container_of(work, struct cgroup, release_agent_work);
4c737b41 6087 char *pathbuf = NULL, *agentbuf = NULL;
971ff493 6088 char *argv[3], *envp[3];
4c737b41 6089 int ret;
971ff493 6090
81a6a5cd 6091 mutex_lock(&cgroup_mutex);
971ff493
ZL
6092
6093 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
6094 agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
6095 if (!pathbuf || !agentbuf)
6096 goto out;
6097
82d6489d 6098 spin_lock_irq(&css_set_lock);
4c737b41 6099 ret = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
82d6489d 6100 spin_unlock_irq(&css_set_lock);
e0223003 6101 if (ret < 0 || ret >= PATH_MAX)
971ff493
ZL
6102 goto out;
6103
6104 argv[0] = agentbuf;
4c737b41 6105 argv[1] = pathbuf;
971ff493
ZL
6106 argv[2] = NULL;
6107
6108 /* minimal command environment */
6109 envp[0] = "HOME=/";
6110 envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
6111 envp[2] = NULL;
6112
81a6a5cd 6113 mutex_unlock(&cgroup_mutex);
971ff493 6114 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
3e2cd91a 6115 goto out_free;
971ff493 6116out:
81a6a5cd 6117 mutex_unlock(&cgroup_mutex);
3e2cd91a 6118out_free:
971ff493
ZL
6119 kfree(agentbuf);
6120 kfree(pathbuf);
81a6a5cd 6121}
8bab8dde
PM
6122
6123static int __init cgroup_disable(char *str)
6124{
30159ec7 6125 struct cgroup_subsys *ss;
8bab8dde 6126 char *token;
30159ec7 6127 int i;
8bab8dde
PM
6128
6129 while ((token = strsep(&str, ",")) != NULL) {
6130 if (!*token)
6131 continue;
be45c900 6132
3ed80a62 6133 for_each_subsys(ss, i) {
3e1d2eed
TH
6134 if (strcmp(token, ss->name) &&
6135 strcmp(token, ss->legacy_name))
6136 continue;
a3e72739 6137 cgroup_disable_mask |= 1 << i;
8bab8dde
PM
6138 }
6139 }
6140 return 1;
6141}
6142__setup("cgroup_disable=", cgroup_disable);
38460b48 6143
223ffb29
JW
6144static int __init cgroup_no_v1(char *str)
6145{
6146 struct cgroup_subsys *ss;
6147 char *token;
6148 int i;
6149
6150 while ((token = strsep(&str, ",")) != NULL) {
6151 if (!*token)
6152 continue;
6153
6154 if (!strcmp(token, "all")) {
6e5c8307 6155 cgroup_no_v1_mask = U16_MAX;
223ffb29
JW
6156 break;
6157 }
6158
6159 for_each_subsys(ss, i) {
6160 if (strcmp(token, ss->name) &&
6161 strcmp(token, ss->legacy_name))
6162 continue;
6163
6164 cgroup_no_v1_mask |= 1 << i;
6165 }
6166 }
6167 return 1;
6168}
6169__setup("cgroup_no_v1=", cgroup_no_v1);
6170
b77d7b60 6171/**
ec903c0c 6172 * css_tryget_online_from_dir - get corresponding css from a cgroup dentry
35cf0836
TH
6173 * @dentry: directory dentry of interest
6174 * @ss: subsystem of interest
b77d7b60 6175 *
5a17f543
TH
6176 * If @dentry is a directory for a cgroup which has @ss enabled on it, try
6177 * to get the corresponding css and return it. If such css doesn't exist
6178 * or can't be pinned, an ERR_PTR value is returned.
e5d1367f 6179 */
ec903c0c
TH
6180struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
6181 struct cgroup_subsys *ss)
e5d1367f 6182{
2bd59d48 6183 struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
f17fc25f 6184 struct file_system_type *s_type = dentry->d_sb->s_type;
2bd59d48 6185 struct cgroup_subsys_state *css = NULL;
e5d1367f 6186 struct cgroup *cgrp;
e5d1367f 6187
35cf0836 6188 /* is @dentry a cgroup dir? */
f17fc25f
TH
6189 if ((s_type != &cgroup_fs_type && s_type != &cgroup2_fs_type) ||
6190 !kn || kernfs_type(kn) != KERNFS_DIR)
e5d1367f
SE
6191 return ERR_PTR(-EBADF);
6192
5a17f543
TH
6193 rcu_read_lock();
6194
2bd59d48
TH
6195 /*
6196 * This path doesn't originate from kernfs and @kn could already
6197 * have been or be removed at any point. @kn->priv is RCU
a4189487 6198 * protected for this access. See css_release_work_fn() for details.
2bd59d48
TH
6199 */
6200 cgrp = rcu_dereference(kn->priv);
6201 if (cgrp)
6202 css = cgroup_css(cgrp, ss);
5a17f543 6203
ec903c0c 6204 if (!css || !css_tryget_online(css))
5a17f543
TH
6205 css = ERR_PTR(-ENOENT);
6206
6207 rcu_read_unlock();
6208 return css;
e5d1367f 6209}
e5d1367f 6210
1cb650b9
LZ
6211/**
6212 * css_from_id - lookup css by id
6213 * @id: the cgroup id
6214 * @ss: cgroup subsys to be looked into
6215 *
6216 * Returns the css if there's valid one with @id, otherwise returns NULL.
6217 * Should be called under rcu_read_lock().
6218 */
6219struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
6220{
6fa4918d 6221 WARN_ON_ONCE(!rcu_read_lock_held());
d6ccc55e 6222 return idr_find(&ss->css_idr, id);
e5d1367f
SE
6223}
6224
16af4396
TH
6225/**
6226 * cgroup_get_from_path - lookup and get a cgroup from its default hierarchy path
6227 * @path: path on the default hierarchy
6228 *
6229 * Find the cgroup at @path on the default hierarchy, increment its
6230 * reference count and return it. Returns pointer to the found cgroup on
6231 * success, ERR_PTR(-ENOENT) if @path doens't exist and ERR_PTR(-ENOTDIR)
6232 * if @path points to a non-directory.
6233 */
6234struct cgroup *cgroup_get_from_path(const char *path)
6235{
6236 struct kernfs_node *kn;
6237 struct cgroup *cgrp;
6238
6239 mutex_lock(&cgroup_mutex);
6240
6241 kn = kernfs_walk_and_get(cgrp_dfl_root.cgrp.kn, path);
6242 if (kn) {
6243 if (kernfs_type(kn) == KERNFS_DIR) {
6244 cgrp = kn->priv;
6245 cgroup_get(cgrp);
6246 } else {
6247 cgrp = ERR_PTR(-ENOTDIR);
6248 }
6249 kernfs_put(kn);
6250 } else {
6251 cgrp = ERR_PTR(-ENOENT);
6252 }
6253
6254 mutex_unlock(&cgroup_mutex);
6255 return cgrp;
6256}
6257EXPORT_SYMBOL_GPL(cgroup_get_from_path);
6258
1f3fe7eb
MKL
6259/**
6260 * cgroup_get_from_fd - get a cgroup pointer from a fd
6261 * @fd: fd obtained by open(cgroup2_dir)
6262 *
6263 * Find the cgroup from a fd which should be obtained
6264 * by opening a cgroup directory. Returns a pointer to the
6265 * cgroup on success. ERR_PTR is returned if the cgroup
6266 * cannot be found.
6267 */
6268struct cgroup *cgroup_get_from_fd(int fd)
6269{
6270 struct cgroup_subsys_state *css;
6271 struct cgroup *cgrp;
6272 struct file *f;
6273
6274 f = fget_raw(fd);
6275 if (!f)
6276 return ERR_PTR(-EBADF);
6277
6278 css = css_tryget_online_from_dir(f->f_path.dentry, NULL);
6279 fput(f);
6280 if (IS_ERR(css))
6281 return ERR_CAST(css);
6282
6283 cgrp = css->cgroup;
6284 if (!cgroup_on_dfl(cgrp)) {
6285 cgroup_put(cgrp);
6286 return ERR_PTR(-EBADF);
6287 }
6288
6289 return cgrp;
6290}
6291EXPORT_SYMBOL_GPL(cgroup_get_from_fd);
6292
bd1060a1
TH
6293/*
6294 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
6295 * definition in cgroup-defs.h.
6296 */
6297#ifdef CONFIG_SOCK_CGROUP_DATA
6298
6299#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
6300
3fa4cc9c 6301DEFINE_SPINLOCK(cgroup_sk_update_lock);
bd1060a1
TH
6302static bool cgroup_sk_alloc_disabled __read_mostly;
6303
6304void cgroup_sk_alloc_disable(void)
6305{
6306 if (cgroup_sk_alloc_disabled)
6307 return;
6308 pr_info("cgroup: disabling cgroup2 socket matching due to net_prio or net_cls activation\n");
6309 cgroup_sk_alloc_disabled = true;
6310}
6311
6312#else
6313
6314#define cgroup_sk_alloc_disabled false
6315
6316#endif
6317
6318void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
6319{
6320 if (cgroup_sk_alloc_disabled)
6321 return;
6322
d979a39d
JW
6323 /* Socket clone path */
6324 if (skcd->val) {
6325 cgroup_get(sock_cgroup_ptr(skcd));
6326 return;
6327 }
6328
bd1060a1
TH
6329 rcu_read_lock();
6330
6331 while (true) {
6332 struct css_set *cset;
6333
6334 cset = task_css_set(current);
6335 if (likely(cgroup_tryget(cset->dfl_cgrp))) {
6336 skcd->val = (unsigned long)cset->dfl_cgrp;
6337 break;
6338 }
6339 cpu_relax();
6340 }
6341
6342 rcu_read_unlock();
6343}
6344
6345void cgroup_sk_free(struct sock_cgroup_data *skcd)
6346{
6347 cgroup_put(sock_cgroup_ptr(skcd));
6348}
6349
6350#endif /* CONFIG_SOCK_CGROUP_DATA */
6351
a79a908f
AK
6352/* cgroup namespaces */
6353
d08311dd
EB
6354static struct ucounts *inc_cgroup_namespaces(struct user_namespace *ns)
6355{
6356 return inc_ucount(ns, current_euid(), UCOUNT_CGROUP_NAMESPACES);
6357}
6358
6359static void dec_cgroup_namespaces(struct ucounts *ucounts)
6360{
6361 dec_ucount(ucounts, UCOUNT_CGROUP_NAMESPACES);
6362}
6363
a79a908f
AK
6364static struct cgroup_namespace *alloc_cgroup_ns(void)
6365{
6366 struct cgroup_namespace *new_ns;
6367 int ret;
6368
6369 new_ns = kzalloc(sizeof(struct cgroup_namespace), GFP_KERNEL);
6370 if (!new_ns)
6371 return ERR_PTR(-ENOMEM);
6372 ret = ns_alloc_inum(&new_ns->ns);
6373 if (ret) {
6374 kfree(new_ns);
6375 return ERR_PTR(ret);
6376 }
6377 atomic_set(&new_ns->count, 1);
6378 new_ns->ns.ops = &cgroupns_operations;
6379 return new_ns;
6380}
6381
6382void free_cgroup_ns(struct cgroup_namespace *ns)
6383{
6384 put_css_set(ns->root_cset);
d08311dd 6385 dec_cgroup_namespaces(ns->ucounts);
a79a908f
AK
6386 put_user_ns(ns->user_ns);
6387 ns_free_inum(&ns->ns);
6388 kfree(ns);
6389}
6390EXPORT_SYMBOL(free_cgroup_ns);
6391
6392struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
6393 struct user_namespace *user_ns,
6394 struct cgroup_namespace *old_ns)
6395{
fa5ff8a1 6396 struct cgroup_namespace *new_ns;
d08311dd 6397 struct ucounts *ucounts;
fa5ff8a1 6398 struct css_set *cset;
a79a908f
AK
6399
6400 BUG_ON(!old_ns);
6401
6402 if (!(flags & CLONE_NEWCGROUP)) {
6403 get_cgroup_ns(old_ns);
6404 return old_ns;
6405 }
6406
6407 /* Allow only sysadmin to create cgroup namespace. */
a79a908f 6408 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
fa5ff8a1 6409 return ERR_PTR(-EPERM);
a79a908f 6410
d08311dd
EB
6411 ucounts = inc_cgroup_namespaces(user_ns);
6412 if (!ucounts)
df75e774 6413 return ERR_PTR(-ENOSPC);
d08311dd 6414
7bd88308 6415 /* It is not safe to take cgroup_mutex here */
82d6489d 6416 spin_lock_irq(&css_set_lock);
a79a908f
AK
6417 cset = task_css_set(current);
6418 get_css_set(cset);
82d6489d 6419 spin_unlock_irq(&css_set_lock);
a79a908f 6420
a79a908f 6421 new_ns = alloc_cgroup_ns();
d2202557 6422 if (IS_ERR(new_ns)) {
fa5ff8a1 6423 put_css_set(cset);
d08311dd 6424 dec_cgroup_namespaces(ucounts);
fa5ff8a1 6425 return new_ns;
d2202557 6426 }
a79a908f
AK
6427
6428 new_ns->user_ns = get_user_ns(user_ns);
d08311dd 6429 new_ns->ucounts = ucounts;
a79a908f
AK
6430 new_ns->root_cset = cset;
6431
6432 return new_ns;
a79a908f
AK
6433}
6434
6435static inline struct cgroup_namespace *to_cg_ns(struct ns_common *ns)
6436{
6437 return container_of(ns, struct cgroup_namespace, ns);
6438}
6439
a0530e08 6440static int cgroupns_install(struct nsproxy *nsproxy, struct ns_common *ns)
a79a908f 6441{
a0530e08
AK
6442 struct cgroup_namespace *cgroup_ns = to_cg_ns(ns);
6443
6444 if (!ns_capable(current_user_ns(), CAP_SYS_ADMIN) ||
6445 !ns_capable(cgroup_ns->user_ns, CAP_SYS_ADMIN))
6446 return -EPERM;
6447
6448 /* Don't need to do anything if we are attaching to our own cgroupns. */
6449 if (cgroup_ns == nsproxy->cgroup_ns)
6450 return 0;
6451
6452 get_cgroup_ns(cgroup_ns);
6453 put_cgroup_ns(nsproxy->cgroup_ns);
6454 nsproxy->cgroup_ns = cgroup_ns;
6455
6456 return 0;
a79a908f
AK
6457}
6458
6459static struct ns_common *cgroupns_get(struct task_struct *task)
6460{
6461 struct cgroup_namespace *ns = NULL;
6462 struct nsproxy *nsproxy;
6463
6464 task_lock(task);
6465 nsproxy = task->nsproxy;
6466 if (nsproxy) {
6467 ns = nsproxy->cgroup_ns;
6468 get_cgroup_ns(ns);
6469 }
6470 task_unlock(task);
6471
6472 return ns ? &ns->ns : NULL;
6473}
6474
6475static void cgroupns_put(struct ns_common *ns)
6476{
6477 put_cgroup_ns(to_cg_ns(ns));
6478}
6479
bcac25a5
AV
6480static struct user_namespace *cgroupns_owner(struct ns_common *ns)
6481{
6482 return to_cg_ns(ns)->user_ns;
6483}
6484
a79a908f
AK
6485const struct proc_ns_operations cgroupns_operations = {
6486 .name = "cgroup",
6487 .type = CLONE_NEWCGROUP,
6488 .get = cgroupns_get,
6489 .put = cgroupns_put,
6490 .install = cgroupns_install,
bcac25a5 6491 .owner = cgroupns_owner,
a79a908f
AK
6492};
6493
6494static __init int cgroup_namespaces_init(void)
6495{
6496 return 0;
6497}
6498subsys_initcall(cgroup_namespaces_init);
6499
30070984
DM
6500#ifdef CONFIG_CGROUP_BPF
6501void cgroup_bpf_update(struct cgroup *cgrp,
6502 struct bpf_prog *prog,
6503 enum bpf_attach_type type)
6504{
6505 struct cgroup *parent = cgroup_parent(cgrp);
6506
6507 mutex_lock(&cgroup_mutex);
6508 __cgroup_bpf_update(cgrp, parent, prog, type);
6509 mutex_unlock(&cgroup_mutex);
6510}
6511#endif /* CONFIG_CGROUP_BPF */
6512
fe693435 6513#ifdef CONFIG_CGROUP_DEBUG
eb95419b
TH
6514static struct cgroup_subsys_state *
6515debug_css_alloc(struct cgroup_subsys_state *parent_css)
fe693435
PM
6516{
6517 struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
6518
6519 if (!css)
6520 return ERR_PTR(-ENOMEM);
6521
6522 return css;
6523}
6524
eb95419b 6525static void debug_css_free(struct cgroup_subsys_state *css)
fe693435 6526{
eb95419b 6527 kfree(css);
fe693435
PM
6528}
6529
182446d0
TH
6530static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
6531 struct cftype *cft)
fe693435 6532{
182446d0 6533 return cgroup_task_count(css->cgroup);
fe693435
PM
6534}
6535
182446d0
TH
6536static u64 current_css_set_read(struct cgroup_subsys_state *css,
6537 struct cftype *cft)
fe693435
PM
6538{
6539 return (u64)(unsigned long)current->cgroups;
6540}
6541
182446d0 6542static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
03c78cbe 6543 struct cftype *cft)
fe693435
PM
6544{
6545 u64 count;
6546
6547 rcu_read_lock();
a8ad805c 6548 count = atomic_read(&task_css_set(current)->refcount);
fe693435
PM
6549 rcu_read_unlock();
6550 return count;
6551}
6552
2da8ca82 6553static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
7717f7ba 6554{
69d0206c 6555 struct cgrp_cset_link *link;
5abb8855 6556 struct css_set *cset;
e61734c5
TH
6557 char *name_buf;
6558
6559 name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
6560 if (!name_buf)
6561 return -ENOMEM;
7717f7ba 6562
82d6489d 6563 spin_lock_irq(&css_set_lock);
7717f7ba 6564 rcu_read_lock();
5abb8855 6565 cset = rcu_dereference(current->cgroups);
69d0206c 6566 list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
7717f7ba 6567 struct cgroup *c = link->cgrp;
7717f7ba 6568
a2dd4247 6569 cgroup_name(c, name_buf, NAME_MAX + 1);
2c6ab6d2 6570 seq_printf(seq, "Root %d group %s\n",
a2dd4247 6571 c->root->hierarchy_id, name_buf);
7717f7ba
PM
6572 }
6573 rcu_read_unlock();
82d6489d 6574 spin_unlock_irq(&css_set_lock);
e61734c5 6575 kfree(name_buf);
7717f7ba
PM
6576 return 0;
6577}
6578
6579#define MAX_TASKS_SHOWN_PER_CSS 25
2da8ca82 6580static int cgroup_css_links_read(struct seq_file *seq, void *v)
7717f7ba 6581{
2da8ca82 6582 struct cgroup_subsys_state *css = seq_css(seq);
69d0206c 6583 struct cgrp_cset_link *link;
7717f7ba 6584
82d6489d 6585 spin_lock_irq(&css_set_lock);
182446d0 6586 list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
69d0206c 6587 struct css_set *cset = link->cset;
7717f7ba
PM
6588 struct task_struct *task;
6589 int count = 0;
c7561128 6590
5abb8855 6591 seq_printf(seq, "css_set %p\n", cset);
c7561128 6592
5abb8855 6593 list_for_each_entry(task, &cset->tasks, cg_list) {
c7561128
TH
6594 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
6595 goto overflow;
6596 seq_printf(seq, " task %d\n", task_pid_vnr(task));
6597 }
6598
6599 list_for_each_entry(task, &cset->mg_tasks, cg_list) {
6600 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
6601 goto overflow;
6602 seq_printf(seq, " task %d\n", task_pid_vnr(task));
7717f7ba 6603 }
c7561128
TH
6604 continue;
6605 overflow:
6606 seq_puts(seq, " ...\n");
7717f7ba 6607 }
82d6489d 6608 spin_unlock_irq(&css_set_lock);
7717f7ba
PM
6609 return 0;
6610}
6611
182446d0 6612static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
fe693435 6613{
27bd4dbb 6614 return (!cgroup_is_populated(css->cgroup) &&
a25eb52e 6615 !css_has_online_children(&css->cgroup->self));
fe693435
PM
6616}
6617
6618static struct cftype debug_files[] = {
fe693435
PM
6619 {
6620 .name = "taskcount",
6621 .read_u64 = debug_taskcount_read,
6622 },
6623
6624 {
6625 .name = "current_css_set",
6626 .read_u64 = current_css_set_read,
6627 },
6628
6629 {
6630 .name = "current_css_set_refcount",
6631 .read_u64 = current_css_set_refcount_read,
6632 },
6633
7717f7ba
PM
6634 {
6635 .name = "current_css_set_cg_links",
2da8ca82 6636 .seq_show = current_css_set_cg_links_read,
7717f7ba
PM
6637 },
6638
6639 {
6640 .name = "cgroup_css_links",
2da8ca82 6641 .seq_show = cgroup_css_links_read,
7717f7ba
PM
6642 },
6643
fe693435
PM
6644 {
6645 .name = "releasable",
6646 .read_u64 = releasable_read,
6647 },
fe693435 6648
4baf6e33
TH
6649 { } /* terminate */
6650};
fe693435 6651
073219e9 6652struct cgroup_subsys debug_cgrp_subsys = {
92fb9748
TH
6653 .css_alloc = debug_css_alloc,
6654 .css_free = debug_css_free,
5577964e 6655 .legacy_cftypes = debug_files,
fe693435
PM
6656};
6657#endif /* CONFIG_CGROUP_DEBUG */