2 * Generic process-grouping system.
4 * Based originally on the cpuset system, extracted by Paul Menage
5 * Copyright (C) 2006 Google, Inc
7 * Notifications support
8 * Copyright (C) 2009 Nokia Corporation
9 * Author: Kirill A. Shutemov
11 * Copyright notices from the original cpuset code:
12 * --------------------------------------------------
13 * Copyright (C) 2003 BULL SA.
14 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
16 * Portions derived from Patrick Mochel's sysfs code.
17 * sysfs is Copyright (c) 2001-3 Patrick Mochel
19 * 2003-10-10 Written by Simon Derr.
20 * 2003-10-22 Updates by Stephen Hemminger.
21 * 2004 May-July Rework by Paul Jackson.
22 * ---------------------------------------------------
24 * This file is subject to the terms and conditions of the GNU General Public
25 * License. See the file COPYING in the main directory of the Linux
26 * distribution for more details.
29 #include <linux/cgroup.h>
30 #include <linux/cred.h>
31 #include <linux/ctype.h>
32 #include <linux/errno.h>
33 #include <linux/init_task.h>
34 #include <linux/kernel.h>
35 #include <linux/list.h>
37 #include <linux/mutex.h>
38 #include <linux/mount.h>
39 #include <linux/pagemap.h>
40 #include <linux/proc_fs.h>
41 #include <linux/rcupdate.h>
42 #include <linux/sched.h>
43 #include <linux/backing-dev.h>
44 #include <linux/slab.h>
45 #include <linux/magic.h>
46 #include <linux/spinlock.h>
47 #include <linux/string.h>
48 #include <linux/sort.h>
49 #include <linux/kmod.h>
50 #include <linux/delayacct.h>
51 #include <linux/cgroupstats.h>
52 #include <linux/hashtable.h>
53 #include <linux/namei.h>
54 #include <linux/pid_namespace.h>
55 #include <linux/idr.h>
56 #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
57 #include <linux/flex_array.h> /* used in cgroup_attach_task */
58 #include <linux/kthread.h>
60 #include <linux/atomic.h>
63 * pidlists linger the following amount before being destroyed. The goal
64 * is avoiding frequent destruction in the middle of consecutive read calls
65 * Expiring in the middle is a performance problem not a correctness one.
66 * 1 sec should be enough.
68 #define CGROUP_PIDLIST_DESTROY_DELAY HZ
70 #define CGROUP_FILE_NAME_MAX (MAX_CGROUP_TYPE_NAMELEN + \
74 * cgroup_tree_mutex nests above cgroup_mutex and protects cftypes, file
75 * creation/removal and hierarchy changing operations including cgroup
76 * creation, removal, css association and controller rebinding. This outer
77 * lock is needed mainly to resolve the circular dependency between kernfs
78 * active ref and cgroup_mutex. cgroup_tree_mutex nests above both.
80 static DEFINE_MUTEX(cgroup_tree_mutex
);
83 * cgroup_mutex is the master lock. Any modification to cgroup or its
84 * hierarchy must be performed while holding it.
86 #ifdef CONFIG_PROVE_RCU
87 DEFINE_MUTEX(cgroup_mutex
);
88 EXPORT_SYMBOL_GPL(cgroup_mutex
); /* only for lockdep */
90 static DEFINE_MUTEX(cgroup_mutex
);
94 * Protects cgroup_subsys->release_agent_path. Modifying it also requires
95 * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock.
97 static DEFINE_SPINLOCK(release_agent_path_lock
);
99 #define cgroup_assert_mutexes_or_rcu_locked() \
100 rcu_lockdep_assert(rcu_read_lock_held() || \
101 lockdep_is_held(&cgroup_tree_mutex) || \
102 lockdep_is_held(&cgroup_mutex), \
103 "cgroup_[tree_]mutex or RCU read lock required");
106 * cgroup destruction makes heavy use of work items and there can be a lot
107 * of concurrent destructions. Use a separate workqueue so that cgroup
108 * destruction work items don't end up filling up max_active of system_wq
109 * which may lead to deadlock.
111 static struct workqueue_struct
*cgroup_destroy_wq
;
114 * pidlist destructions need to be flushed on cgroup destruction. Use a
115 * separate workqueue as flush domain.
117 static struct workqueue_struct
*cgroup_pidlist_destroy_wq
;
119 /* generate an array of cgroup subsystem pointers */
120 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
121 static struct cgroup_subsys
*cgroup_subsys
[] = {
122 #include <linux/cgroup_subsys.h>
126 /* array of cgroup subsystem names */
127 #define SUBSYS(_x) [_x ## _cgrp_id] = #_x,
128 static const char *cgroup_subsys_name
[] = {
129 #include <linux/cgroup_subsys.h>
134 * The dummy hierarchy, reserved for the subsystems that are otherwise
135 * unattached - it never has more than a single cgroup, and all tasks are
136 * part of that cgroup.
138 static struct cgroupfs_root cgroup_dummy_root
;
140 /* dummy_top is a shorthand for the dummy hierarchy's top cgroup */
141 static struct cgroup
* const cgroup_dummy_top
= &cgroup_dummy_root
.top_cgroup
;
143 /* The list of hierarchy roots */
145 static LIST_HEAD(cgroup_roots
);
146 static int cgroup_root_count
;
148 /* hierarchy ID allocation and mapping, protected by cgroup_mutex */
149 static DEFINE_IDR(cgroup_hierarchy_idr
);
151 static struct cgroup_name root_cgroup_name
= { .name
= "/" };
154 * Assign a monotonically increasing serial number to cgroups. It
155 * guarantees cgroups with bigger numbers are newer than those with smaller
156 * numbers. Also, as cgroups are always appended to the parent's
157 * ->children list, it guarantees that sibling cgroups are always sorted in
158 * the ascending serial number order on the list. Protected by
161 static u64 cgroup_serial_nr_next
= 1;
163 /* This flag indicates whether tasks in the fork and exit paths should
164 * check for fork/exit handlers to call. This avoids us having to do
165 * extra work in the fork/exit path if none of the subsystems need to
168 static int need_forkexit_callback __read_mostly
;
170 static struct cftype cgroup_base_files
[];
172 static void cgroup_destroy_css_killed(struct cgroup
*cgrp
);
173 static int cgroup_destroy_locked(struct cgroup
*cgrp
);
174 static int cgroup_addrm_files(struct cgroup
*cgrp
, struct cftype cfts
[],
176 static int cgroup_file_release(struct inode
*inode
, struct file
*file
);
177 static void cgroup_pidlist_destroy_all(struct cgroup
*cgrp
);
180 * cgroup_css - obtain a cgroup's css for the specified subsystem
181 * @cgrp: the cgroup of interest
182 * @ss: the subsystem of interest (%NULL returns the dummy_css)
184 * Return @cgrp's css (cgroup_subsys_state) associated with @ss. This
185 * function must be called either under cgroup_mutex or rcu_read_lock() and
186 * the caller is responsible for pinning the returned css if it wants to
187 * keep accessing it outside the said locks. This function may return
188 * %NULL if @cgrp doesn't have @subsys_id enabled.
190 static struct cgroup_subsys_state
*cgroup_css(struct cgroup
*cgrp
,
191 struct cgroup_subsys
*ss
)
194 return rcu_dereference_check(cgrp
->subsys
[ss
->id
],
195 lockdep_is_held(&cgroup_tree_mutex
) ||
196 lockdep_is_held(&cgroup_mutex
));
198 return &cgrp
->dummy_css
;
201 /* convenient tests for these bits */
202 static inline bool cgroup_is_dead(const struct cgroup
*cgrp
)
204 return test_bit(CGRP_DEAD
, &cgrp
->flags
);
208 * cgroup_is_descendant - test ancestry
209 * @cgrp: the cgroup to be tested
210 * @ancestor: possible ancestor of @cgrp
212 * Test whether @cgrp is a descendant of @ancestor. It also returns %true
213 * if @cgrp == @ancestor. This function is safe to call as long as @cgrp
214 * and @ancestor are accessible.
216 bool cgroup_is_descendant(struct cgroup
*cgrp
, struct cgroup
*ancestor
)
219 if (cgrp
== ancestor
)
225 EXPORT_SYMBOL_GPL(cgroup_is_descendant
);
227 static int cgroup_is_releasable(const struct cgroup
*cgrp
)
230 (1 << CGRP_RELEASABLE
) |
231 (1 << CGRP_NOTIFY_ON_RELEASE
);
232 return (cgrp
->flags
& bits
) == bits
;
235 static int notify_on_release(const struct cgroup
*cgrp
)
237 return test_bit(CGRP_NOTIFY_ON_RELEASE
, &cgrp
->flags
);
241 * for_each_css - iterate all css's of a cgroup
242 * @css: the iteration cursor
243 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
244 * @cgrp: the target cgroup to iterate css's of
246 * Should be called under cgroup_mutex.
248 #define for_each_css(css, ssid, cgrp) \
249 for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
250 if (!((css) = rcu_dereference_check( \
251 (cgrp)->subsys[(ssid)], \
252 lockdep_is_held(&cgroup_tree_mutex) || \
253 lockdep_is_held(&cgroup_mutex)))) { } \
257 * for_each_subsys - iterate all enabled cgroup subsystems
258 * @ss: the iteration cursor
259 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
261 #define for_each_subsys(ss, ssid) \
262 for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT && \
263 (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
265 /* iterate across the active hierarchies */
266 #define for_each_active_root(root) \
267 list_for_each_entry((root), &cgroup_roots, root_list)
269 static inline struct cgroup
*__d_cgrp(struct dentry
*dentry
)
271 return dentry
->d_fsdata
;
274 static inline struct cfent
*__d_cfe(struct dentry
*dentry
)
276 return dentry
->d_fsdata
;
279 static inline struct cftype
*__d_cft(struct dentry
*dentry
)
281 return __d_cfe(dentry
)->type
;
285 * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive.
286 * @cgrp: the cgroup to be checked for liveness
288 * On success, returns true; the mutex should be later unlocked. On
289 * failure returns false with no lock held.
291 static bool cgroup_lock_live_group(struct cgroup
*cgrp
)
293 mutex_lock(&cgroup_mutex
);
294 if (cgroup_is_dead(cgrp
)) {
295 mutex_unlock(&cgroup_mutex
);
301 /* the list of cgroups eligible for automatic release. Protected by
302 * release_list_lock */
303 static LIST_HEAD(release_list
);
304 static DEFINE_RAW_SPINLOCK(release_list_lock
);
305 static void cgroup_release_agent(struct work_struct
*work
);
306 static DECLARE_WORK(release_agent_work
, cgroup_release_agent
);
307 static void check_for_release(struct cgroup
*cgrp
);
310 * A cgroup can be associated with multiple css_sets as different tasks may
311 * belong to different cgroups on different hierarchies. In the other
312 * direction, a css_set is naturally associated with multiple cgroups.
313 * This M:N relationship is represented by the following link structure
314 * which exists for each association and allows traversing the associations
317 struct cgrp_cset_link
{
318 /* the cgroup and css_set this link associates */
320 struct css_set
*cset
;
322 /* list of cgrp_cset_links anchored at cgrp->cset_links */
323 struct list_head cset_link
;
325 /* list of cgrp_cset_links anchored at css_set->cgrp_links */
326 struct list_head cgrp_link
;
329 /* The default css_set - used by init and its children prior to any
330 * hierarchies being mounted. It contains a pointer to the root state
331 * for each subsystem. Also used to anchor the list of css_sets. Not
332 * reference-counted, to improve performance when child cgroups
333 * haven't been created.
336 static struct css_set init_css_set
;
337 static struct cgrp_cset_link init_cgrp_cset_link
;
340 * css_set_lock protects the list of css_set objects, and the chain of
341 * tasks off each css_set. Nests outside task->alloc_lock due to
342 * css_task_iter_start().
344 static DEFINE_RWLOCK(css_set_lock
);
345 static int css_set_count
;
348 * hash table for cgroup groups. This improves the performance to find
349 * an existing css_set. This hash doesn't (currently) take into
350 * account cgroups in empty hierarchies.
352 #define CSS_SET_HASH_BITS 7
353 static DEFINE_HASHTABLE(css_set_table
, CSS_SET_HASH_BITS
);
355 static unsigned long css_set_hash(struct cgroup_subsys_state
*css
[])
357 unsigned long key
= 0UL;
358 struct cgroup_subsys
*ss
;
361 for_each_subsys(ss
, i
)
362 key
+= (unsigned long)css
[i
];
363 key
= (key
>> 16) ^ key
;
369 * We don't maintain the lists running through each css_set to its task
370 * until after the first call to css_task_iter_start(). This reduces the
371 * fork()/exit() overhead for people who have cgroups compiled into their
372 * kernel but not actually in use.
374 static int use_task_css_set_links __read_mostly
;
376 static void __put_css_set(struct css_set
*cset
, int taskexit
)
378 struct cgrp_cset_link
*link
, *tmp_link
;
381 * Ensure that the refcount doesn't hit zero while any readers
382 * can see it. Similar to atomic_dec_and_lock(), but for an
385 if (atomic_add_unless(&cset
->refcount
, -1, 1))
387 write_lock(&css_set_lock
);
388 if (!atomic_dec_and_test(&cset
->refcount
)) {
389 write_unlock(&css_set_lock
);
393 /* This css_set is dead. unlink it and release cgroup refcounts */
394 hash_del(&cset
->hlist
);
397 list_for_each_entry_safe(link
, tmp_link
, &cset
->cgrp_links
, cgrp_link
) {
398 struct cgroup
*cgrp
= link
->cgrp
;
400 list_del(&link
->cset_link
);
401 list_del(&link
->cgrp_link
);
403 /* @cgrp can't go away while we're holding css_set_lock */
404 if (list_empty(&cgrp
->cset_links
) && notify_on_release(cgrp
)) {
406 set_bit(CGRP_RELEASABLE
, &cgrp
->flags
);
407 check_for_release(cgrp
);
413 write_unlock(&css_set_lock
);
414 kfree_rcu(cset
, rcu_head
);
418 * refcounted get/put for css_set objects
420 static inline void get_css_set(struct css_set
*cset
)
422 atomic_inc(&cset
->refcount
);
425 static inline void put_css_set(struct css_set
*cset
)
427 __put_css_set(cset
, 0);
430 static inline void put_css_set_taskexit(struct css_set
*cset
)
432 __put_css_set(cset
, 1);
436 * compare_css_sets - helper function for find_existing_css_set().
437 * @cset: candidate css_set being tested
438 * @old_cset: existing css_set for a task
439 * @new_cgrp: cgroup that's being entered by the task
440 * @template: desired set of css pointers in css_set (pre-calculated)
442 * Returns true if "cset" matches "old_cset" except for the hierarchy
443 * which "new_cgrp" belongs to, for which it should match "new_cgrp".
445 static bool compare_css_sets(struct css_set
*cset
,
446 struct css_set
*old_cset
,
447 struct cgroup
*new_cgrp
,
448 struct cgroup_subsys_state
*template[])
450 struct list_head
*l1
, *l2
;
452 if (memcmp(template, cset
->subsys
, sizeof(cset
->subsys
))) {
453 /* Not all subsystems matched */
458 * Compare cgroup pointers in order to distinguish between
459 * different cgroups in heirarchies with no subsystems. We
460 * could get by with just this check alone (and skip the
461 * memcmp above) but on most setups the memcmp check will
462 * avoid the need for this more expensive check on almost all
466 l1
= &cset
->cgrp_links
;
467 l2
= &old_cset
->cgrp_links
;
469 struct cgrp_cset_link
*link1
, *link2
;
470 struct cgroup
*cgrp1
, *cgrp2
;
474 /* See if we reached the end - both lists are equal length. */
475 if (l1
== &cset
->cgrp_links
) {
476 BUG_ON(l2
!= &old_cset
->cgrp_links
);
479 BUG_ON(l2
== &old_cset
->cgrp_links
);
481 /* Locate the cgroups associated with these links. */
482 link1
= list_entry(l1
, struct cgrp_cset_link
, cgrp_link
);
483 link2
= list_entry(l2
, struct cgrp_cset_link
, cgrp_link
);
486 /* Hierarchies should be linked in the same order. */
487 BUG_ON(cgrp1
->root
!= cgrp2
->root
);
490 * If this hierarchy is the hierarchy of the cgroup
491 * that's changing, then we need to check that this
492 * css_set points to the new cgroup; if it's any other
493 * hierarchy, then this css_set should point to the
494 * same cgroup as the old css_set.
496 if (cgrp1
->root
== new_cgrp
->root
) {
497 if (cgrp1
!= new_cgrp
)
508 * find_existing_css_set - init css array and find the matching css_set
509 * @old_cset: the css_set that we're using before the cgroup transition
510 * @cgrp: the cgroup that we're moving into
511 * @template: out param for the new set of csses, should be clear on entry
513 static struct css_set
*find_existing_css_set(struct css_set
*old_cset
,
515 struct cgroup_subsys_state
*template[])
517 struct cgroupfs_root
*root
= cgrp
->root
;
518 struct cgroup_subsys
*ss
;
519 struct css_set
*cset
;
524 * Build the set of subsystem state objects that we want to see in the
525 * new css_set. while subsystems can change globally, the entries here
526 * won't change, so no need for locking.
528 for_each_subsys(ss
, i
) {
529 if (root
->subsys_mask
& (1UL << i
)) {
530 /* Subsystem is in this hierarchy. So we want
531 * the subsystem state from the new
533 template[i
] = cgroup_css(cgrp
, ss
);
535 /* Subsystem is not in this hierarchy, so we
536 * don't want to change the subsystem state */
537 template[i
] = old_cset
->subsys
[i
];
541 key
= css_set_hash(template);
542 hash_for_each_possible(css_set_table
, cset
, hlist
, key
) {
543 if (!compare_css_sets(cset
, old_cset
, cgrp
, template))
546 /* This css_set matches what we need */
550 /* No existing cgroup group matched */
554 static void free_cgrp_cset_links(struct list_head
*links_to_free
)
556 struct cgrp_cset_link
*link
, *tmp_link
;
558 list_for_each_entry_safe(link
, tmp_link
, links_to_free
, cset_link
) {
559 list_del(&link
->cset_link
);
565 * allocate_cgrp_cset_links - allocate cgrp_cset_links
566 * @count: the number of links to allocate
567 * @tmp_links: list_head the allocated links are put on
569 * Allocate @count cgrp_cset_link structures and chain them on @tmp_links
570 * through ->cset_link. Returns 0 on success or -errno.
572 static int allocate_cgrp_cset_links(int count
, struct list_head
*tmp_links
)
574 struct cgrp_cset_link
*link
;
577 INIT_LIST_HEAD(tmp_links
);
579 for (i
= 0; i
< count
; i
++) {
580 link
= kzalloc(sizeof(*link
), GFP_KERNEL
);
582 free_cgrp_cset_links(tmp_links
);
585 list_add(&link
->cset_link
, tmp_links
);
591 * link_css_set - a helper function to link a css_set to a cgroup
592 * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links()
593 * @cset: the css_set to be linked
594 * @cgrp: the destination cgroup
596 static void link_css_set(struct list_head
*tmp_links
, struct css_set
*cset
,
599 struct cgrp_cset_link
*link
;
601 BUG_ON(list_empty(tmp_links
));
602 link
= list_first_entry(tmp_links
, struct cgrp_cset_link
, cset_link
);
605 list_move(&link
->cset_link
, &cgrp
->cset_links
);
607 * Always add links to the tail of the list so that the list
608 * is sorted by order of hierarchy creation
610 list_add_tail(&link
->cgrp_link
, &cset
->cgrp_links
);
614 * find_css_set - return a new css_set with one cgroup updated
615 * @old_cset: the baseline css_set
616 * @cgrp: the cgroup to be updated
618 * Return a new css_set that's equivalent to @old_cset, but with @cgrp
619 * substituted into the appropriate hierarchy.
621 static struct css_set
*find_css_set(struct css_set
*old_cset
,
624 struct cgroup_subsys_state
*template[CGROUP_SUBSYS_COUNT
] = { };
625 struct css_set
*cset
;
626 struct list_head tmp_links
;
627 struct cgrp_cset_link
*link
;
630 lockdep_assert_held(&cgroup_mutex
);
632 /* First see if we already have a cgroup group that matches
634 read_lock(&css_set_lock
);
635 cset
= find_existing_css_set(old_cset
, cgrp
, template);
638 read_unlock(&css_set_lock
);
643 cset
= kzalloc(sizeof(*cset
), GFP_KERNEL
);
647 /* Allocate all the cgrp_cset_link objects that we'll need */
648 if (allocate_cgrp_cset_links(cgroup_root_count
, &tmp_links
) < 0) {
653 atomic_set(&cset
->refcount
, 1);
654 INIT_LIST_HEAD(&cset
->cgrp_links
);
655 INIT_LIST_HEAD(&cset
->tasks
);
656 INIT_HLIST_NODE(&cset
->hlist
);
658 /* Copy the set of subsystem state objects generated in
659 * find_existing_css_set() */
660 memcpy(cset
->subsys
, template, sizeof(cset
->subsys
));
662 write_lock(&css_set_lock
);
663 /* Add reference counts and links from the new css_set. */
664 list_for_each_entry(link
, &old_cset
->cgrp_links
, cgrp_link
) {
665 struct cgroup
*c
= link
->cgrp
;
667 if (c
->root
== cgrp
->root
)
669 link_css_set(&tmp_links
, cset
, c
);
672 BUG_ON(!list_empty(&tmp_links
));
676 /* Add this cgroup group to the hash table */
677 key
= css_set_hash(cset
->subsys
);
678 hash_add(css_set_table
, &cset
->hlist
, key
);
680 write_unlock(&css_set_lock
);
686 * Return the cgroup for "task" from the given hierarchy. Must be
687 * called with cgroup_mutex held.
689 static struct cgroup
*task_cgroup_from_root(struct task_struct
*task
,
690 struct cgroupfs_root
*root
)
692 struct css_set
*cset
;
693 struct cgroup
*res
= NULL
;
695 BUG_ON(!mutex_is_locked(&cgroup_mutex
));
696 read_lock(&css_set_lock
);
698 * No need to lock the task - since we hold cgroup_mutex the
699 * task can't change groups, so the only thing that can happen
700 * is that it exits and its css is set back to init_css_set.
702 cset
= task_css_set(task
);
703 if (cset
== &init_css_set
) {
704 res
= &root
->top_cgroup
;
706 struct cgrp_cset_link
*link
;
708 list_for_each_entry(link
, &cset
->cgrp_links
, cgrp_link
) {
709 struct cgroup
*c
= link
->cgrp
;
711 if (c
->root
== root
) {
717 read_unlock(&css_set_lock
);
723 * There is one global cgroup mutex. We also require taking
724 * task_lock() when dereferencing a task's cgroup subsys pointers.
725 * See "The task_lock() exception", at the end of this comment.
727 * A task must hold cgroup_mutex to modify cgroups.
729 * Any task can increment and decrement the count field without lock.
730 * So in general, code holding cgroup_mutex can't rely on the count
731 * field not changing. However, if the count goes to zero, then only
732 * cgroup_attach_task() can increment it again. Because a count of zero
733 * means that no tasks are currently attached, therefore there is no
734 * way a task attached to that cgroup can fork (the other way to
735 * increment the count). So code holding cgroup_mutex can safely
736 * assume that if the count is zero, it will stay zero. Similarly, if
737 * a task holds cgroup_mutex on a cgroup with zero count, it
738 * knows that the cgroup won't be removed, as cgroup_rmdir()
741 * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't
742 * (usually) take cgroup_mutex. These are the two most performance
743 * critical pieces of code here. The exception occurs on cgroup_exit(),
744 * when a task in a notify_on_release cgroup exits. Then cgroup_mutex
745 * is taken, and if the cgroup count is zero, a usermode call made
746 * to the release agent with the name of the cgroup (path relative to
747 * the root of cgroup file system) as the argument.
749 * A cgroup can only be deleted if both its 'count' of using tasks
750 * is zero, and its list of 'children' cgroups is empty. Since all
751 * tasks in the system use _some_ cgroup, and since there is always at
752 * least one task in the system (init, pid == 1), therefore, top_cgroup
753 * always has either children cgroups and/or using tasks. So we don't
754 * need a special hack to ensure that top_cgroup cannot be deleted.
756 * The task_lock() exception
758 * The need for this exception arises from the action of
759 * cgroup_attach_task(), which overwrites one task's cgroup pointer with
760 * another. It does so using cgroup_mutex, however there are
761 * several performance critical places that need to reference
762 * task->cgroup without the expense of grabbing a system global
763 * mutex. Therefore except as noted below, when dereferencing or, as
764 * in cgroup_attach_task(), modifying a task's cgroup pointer we use
765 * task_lock(), which acts on a spinlock (task->alloc_lock) already in
766 * the task_struct routinely used for such matters.
768 * P.S. One more locking exception. RCU is used to guard the
769 * update of a tasks cgroup pointer by cgroup_attach_task()
773 * A couple of forward declarations required, due to cyclic reference loop:
774 * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir ->
775 * cgroup_add_file -> cgroup_create_file -> cgroup_dir_inode_operations
779 static int cgroup_mkdir(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
);
780 static int cgroup_rmdir(struct inode
*unused_dir
, struct dentry
*dentry
);
781 static int cgroup_populate_dir(struct cgroup
*cgrp
, unsigned long subsys_mask
);
782 static const struct inode_operations cgroup_dir_inode_operations
;
783 static const struct file_operations proc_cgroupstats_operations
;
785 static struct backing_dev_info cgroup_backing_dev_info
= {
787 .capabilities
= BDI_CAP_NO_ACCT_AND_WRITEBACK
,
790 static struct inode
*cgroup_new_inode(umode_t mode
, struct super_block
*sb
)
792 struct inode
*inode
= new_inode(sb
);
795 inode
->i_ino
= get_next_ino();
796 inode
->i_mode
= mode
;
797 inode
->i_uid
= current_fsuid();
798 inode
->i_gid
= current_fsgid();
799 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
800 inode
->i_mapping
->backing_dev_info
= &cgroup_backing_dev_info
;
805 static struct cgroup_name
*cgroup_alloc_name(const char *name_str
)
807 struct cgroup_name
*name
;
809 name
= kmalloc(sizeof(*name
) + strlen(name_str
) + 1, GFP_KERNEL
);
812 strcpy(name
->name
, name_str
);
816 static char *cgroup_file_name(struct cgroup
*cgrp
, const struct cftype
*cft
,
819 if (cft
->ss
&& !(cft
->flags
& CFTYPE_NO_PREFIX
) &&
820 !(cgrp
->root
->flags
& CGRP_ROOT_NOPREFIX
))
821 snprintf(buf
, CGROUP_FILE_NAME_MAX
, "%s.%s",
822 cft
->ss
->name
, cft
->name
);
824 strncpy(buf
, cft
->name
, CGROUP_FILE_NAME_MAX
);
828 static void cgroup_free_fn(struct work_struct
*work
)
830 struct cgroup
*cgrp
= container_of(work
, struct cgroup
, destroy_work
);
832 mutex_lock(&cgroup_mutex
);
833 cgrp
->root
->number_of_cgroups
--;
834 mutex_unlock(&cgroup_mutex
);
837 * We get a ref to the parent's dentry, and put the ref when
838 * this cgroup is being freed, so it's guaranteed that the
839 * parent won't be destroyed before its children.
841 dput(cgrp
->parent
->dentry
);
844 * Drop the active superblock reference that we took when we
845 * created the cgroup. This will free cgrp->root, if we are
846 * holding the last reference to @sb.
848 deactivate_super(cgrp
->root
->sb
);
850 cgroup_pidlist_destroy_all(cgrp
);
852 simple_xattrs_free(&cgrp
->xattrs
);
854 kfree(rcu_dereference_raw(cgrp
->name
));
858 static void cgroup_free_rcu(struct rcu_head
*head
)
860 struct cgroup
*cgrp
= container_of(head
, struct cgroup
, rcu_head
);
862 INIT_WORK(&cgrp
->destroy_work
, cgroup_free_fn
);
863 queue_work(cgroup_destroy_wq
, &cgrp
->destroy_work
);
866 static void cgroup_diput(struct dentry
*dentry
, struct inode
*inode
)
868 /* is dentry a directory ? if so, kfree() associated cgroup */
869 if (S_ISDIR(inode
->i_mode
)) {
870 struct cgroup
*cgrp
= dentry
->d_fsdata
;
872 BUG_ON(!(cgroup_is_dead(cgrp
)));
875 * XXX: cgrp->id is only used to look up css's. As cgroup
876 * and css's lifetimes will be decoupled, it should be made
877 * per-subsystem and moved to css->id so that lookups are
878 * successful until the target css is released.
880 mutex_lock(&cgroup_mutex
);
881 idr_remove(&cgrp
->root
->cgroup_idr
, cgrp
->id
);
882 mutex_unlock(&cgroup_mutex
);
885 call_rcu(&cgrp
->rcu_head
, cgroup_free_rcu
);
887 struct cfent
*cfe
= __d_cfe(dentry
);
888 struct cgroup
*cgrp
= dentry
->d_parent
->d_fsdata
;
890 WARN_ONCE(!list_empty(&cfe
->node
) &&
891 cgrp
!= &cgrp
->root
->top_cgroup
,
892 "cfe still linked for %s\n", cfe
->type
->name
);
893 simple_xattrs_free(&cfe
->xattrs
);
899 static void remove_dir(struct dentry
*d
)
901 struct dentry
*parent
= dget(d
->d_parent
);
904 simple_rmdir(parent
->d_inode
, d
);
908 static void cgroup_rm_file(struct cgroup
*cgrp
, const struct cftype
*cft
)
912 lockdep_assert_held(&cgrp
->dentry
->d_inode
->i_mutex
);
913 lockdep_assert_held(&cgroup_tree_mutex
);
916 * If we're doing cleanup due to failure of cgroup_create(),
917 * the corresponding @cfe may not exist.
919 list_for_each_entry(cfe
, &cgrp
->files
, node
) {
920 struct dentry
*d
= cfe
->dentry
;
922 if (cft
&& cfe
->type
!= cft
)
927 simple_unlink(cgrp
->dentry
->d_inode
, d
);
928 list_del_init(&cfe
->node
);
936 * cgroup_clear_dir - remove subsys files in a cgroup directory
937 * @cgrp: target cgroup
938 * @subsys_mask: mask of the subsystem ids whose files should be removed
940 static void cgroup_clear_dir(struct cgroup
*cgrp
, unsigned long subsys_mask
)
942 struct cgroup_subsys
*ss
;
945 for_each_subsys(ss
, i
) {
946 struct cftype_set
*set
;
948 if (!test_bit(i
, &subsys_mask
))
950 list_for_each_entry(set
, &ss
->cftsets
, node
)
951 cgroup_addrm_files(cgrp
, set
->cfts
, false);
956 * NOTE : the dentry must have been dget()'ed
958 static void cgroup_d_remove_dir(struct dentry
*dentry
)
960 struct dentry
*parent
;
962 parent
= dentry
->d_parent
;
963 spin_lock(&parent
->d_lock
);
964 spin_lock_nested(&dentry
->d_lock
, DENTRY_D_LOCK_NESTED
);
965 list_del_init(&dentry
->d_u
.d_child
);
966 spin_unlock(&dentry
->d_lock
);
967 spin_unlock(&parent
->d_lock
);
971 static int rebind_subsystems(struct cgroupfs_root
*root
,
972 unsigned long added_mask
, unsigned removed_mask
)
974 struct cgroup
*cgrp
= &root
->top_cgroup
;
975 struct cgroup_subsys
*ss
;
978 lockdep_assert_held(&cgroup_tree_mutex
);
979 lockdep_assert_held(&cgroup_mutex
);
981 /* Check that any added subsystems are currently free */
982 for_each_subsys(ss
, i
)
983 if ((added_mask
& (1 << i
)) && ss
->root
!= &cgroup_dummy_root
)
986 ret
= cgroup_populate_dir(cgrp
, added_mask
);
991 * Nothing can fail from this point on. Remove files for the
992 * removed subsystems and rebind each subsystem.
994 mutex_unlock(&cgroup_mutex
);
995 cgroup_clear_dir(cgrp
, removed_mask
);
996 mutex_lock(&cgroup_mutex
);
998 for_each_subsys(ss
, i
) {
999 unsigned long bit
= 1UL << i
;
1001 if (bit
& added_mask
) {
1002 /* We're binding this subsystem to this hierarchy */
1003 BUG_ON(cgroup_css(cgrp
, ss
));
1004 BUG_ON(!cgroup_css(cgroup_dummy_top
, ss
));
1005 BUG_ON(cgroup_css(cgroup_dummy_top
, ss
)->cgroup
!= cgroup_dummy_top
);
1007 rcu_assign_pointer(cgrp
->subsys
[i
],
1008 cgroup_css(cgroup_dummy_top
, ss
));
1009 cgroup_css(cgrp
, ss
)->cgroup
= cgrp
;
1013 ss
->bind(cgroup_css(cgrp
, ss
));
1015 /* refcount was already taken, and we're keeping it */
1016 root
->subsys_mask
|= bit
;
1017 } else if (bit
& removed_mask
) {
1018 /* We're removing this subsystem */
1019 BUG_ON(cgroup_css(cgrp
, ss
) != cgroup_css(cgroup_dummy_top
, ss
));
1020 BUG_ON(cgroup_css(cgrp
, ss
)->cgroup
!= cgrp
);
1023 ss
->bind(cgroup_css(cgroup_dummy_top
, ss
));
1025 cgroup_css(cgroup_dummy_top
, ss
)->cgroup
= cgroup_dummy_top
;
1026 RCU_INIT_POINTER(cgrp
->subsys
[i
], NULL
);
1028 cgroup_subsys
[i
]->root
= &cgroup_dummy_root
;
1029 root
->subsys_mask
&= ~bit
;
1034 * Mark @root has finished binding subsystems. @root->subsys_mask
1035 * now matches the bound subsystems.
1037 root
->flags
|= CGRP_ROOT_SUBSYS_BOUND
;
1042 static int cgroup_show_options(struct seq_file
*seq
, struct dentry
*dentry
)
1044 struct cgroupfs_root
*root
= dentry
->d_sb
->s_fs_info
;
1045 struct cgroup_subsys
*ss
;
1048 for_each_subsys(ss
, ssid
)
1049 if (root
->subsys_mask
& (1 << ssid
))
1050 seq_printf(seq
, ",%s", ss
->name
);
1051 if (root
->flags
& CGRP_ROOT_SANE_BEHAVIOR
)
1052 seq_puts(seq
, ",sane_behavior");
1053 if (root
->flags
& CGRP_ROOT_NOPREFIX
)
1054 seq_puts(seq
, ",noprefix");
1055 if (root
->flags
& CGRP_ROOT_XATTR
)
1056 seq_puts(seq
, ",xattr");
1058 spin_lock(&release_agent_path_lock
);
1059 if (strlen(root
->release_agent_path
))
1060 seq_printf(seq
, ",release_agent=%s", root
->release_agent_path
);
1061 spin_unlock(&release_agent_path_lock
);
1063 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN
, &root
->top_cgroup
.flags
))
1064 seq_puts(seq
, ",clone_children");
1065 if (strlen(root
->name
))
1066 seq_printf(seq
, ",name=%s", root
->name
);
1070 struct cgroup_sb_opts
{
1071 unsigned long subsys_mask
;
1072 unsigned long flags
;
1073 char *release_agent
;
1074 bool cpuset_clone_children
;
1076 /* User explicitly requested empty subsystem */
1079 struct cgroupfs_root
*new_root
;
1084 * Convert a hierarchy specifier into a bitmask of subsystems and
1085 * flags. Call with cgroup_mutex held to protect the cgroup_subsys[]
1086 * array. This function takes refcounts on subsystems to be used, unless it
1087 * returns error, in which case no refcounts are taken.
1089 static int parse_cgroupfs_options(char *data
, struct cgroup_sb_opts
*opts
)
1091 char *token
, *o
= data
;
1092 bool all_ss
= false, one_ss
= false;
1093 unsigned long mask
= (unsigned long)-1;
1094 struct cgroup_subsys
*ss
;
1097 BUG_ON(!mutex_is_locked(&cgroup_mutex
));
1099 #ifdef CONFIG_CPUSETS
1100 mask
= ~(1UL << cpuset_cgrp_id
);
1103 memset(opts
, 0, sizeof(*opts
));
1105 while ((token
= strsep(&o
, ",")) != NULL
) {
1108 if (!strcmp(token
, "none")) {
1109 /* Explicitly have no subsystems */
1113 if (!strcmp(token
, "all")) {
1114 /* Mutually exclusive option 'all' + subsystem name */
1120 if (!strcmp(token
, "__DEVEL__sane_behavior")) {
1121 opts
->flags
|= CGRP_ROOT_SANE_BEHAVIOR
;
1124 if (!strcmp(token
, "noprefix")) {
1125 opts
->flags
|= CGRP_ROOT_NOPREFIX
;
1128 if (!strcmp(token
, "clone_children")) {
1129 opts
->cpuset_clone_children
= true;
1132 if (!strcmp(token
, "xattr")) {
1133 opts
->flags
|= CGRP_ROOT_XATTR
;
1136 if (!strncmp(token
, "release_agent=", 14)) {
1137 /* Specifying two release agents is forbidden */
1138 if (opts
->release_agent
)
1140 opts
->release_agent
=
1141 kstrndup(token
+ 14, PATH_MAX
- 1, GFP_KERNEL
);
1142 if (!opts
->release_agent
)
1146 if (!strncmp(token
, "name=", 5)) {
1147 const char *name
= token
+ 5;
1148 /* Can't specify an empty name */
1151 /* Must match [\w.-]+ */
1152 for (i
= 0; i
< strlen(name
); i
++) {
1156 if ((c
== '.') || (c
== '-') || (c
== '_'))
1160 /* Specifying two names is forbidden */
1163 opts
->name
= kstrndup(name
,
1164 MAX_CGROUP_ROOT_NAMELEN
- 1,
1172 for_each_subsys(ss
, i
) {
1173 if (strcmp(token
, ss
->name
))
1178 /* Mutually exclusive option 'all' + subsystem name */
1181 set_bit(i
, &opts
->subsys_mask
);
1186 if (i
== CGROUP_SUBSYS_COUNT
)
1191 * If the 'all' option was specified select all the subsystems,
1192 * otherwise if 'none', 'name=' and a subsystem name options
1193 * were not specified, let's default to 'all'
1195 if (all_ss
|| (!one_ss
&& !opts
->none
&& !opts
->name
))
1196 for_each_subsys(ss
, i
)
1198 set_bit(i
, &opts
->subsys_mask
);
1200 /* Consistency checks */
1202 if (opts
->flags
& CGRP_ROOT_SANE_BEHAVIOR
) {
1203 pr_warning("cgroup: sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n");
1205 if (opts
->flags
& CGRP_ROOT_NOPREFIX
) {
1206 pr_err("cgroup: sane_behavior: noprefix is not allowed\n");
1210 if (opts
->cpuset_clone_children
) {
1211 pr_err("cgroup: sane_behavior: clone_children is not allowed\n");
1217 * Option noprefix was introduced just for backward compatibility
1218 * with the old cpuset, so we allow noprefix only if mounting just
1219 * the cpuset subsystem.
1221 if ((opts
->flags
& CGRP_ROOT_NOPREFIX
) && (opts
->subsys_mask
& mask
))
1225 /* Can't specify "none" and some subsystems */
1226 if (opts
->subsys_mask
&& opts
->none
)
1230 * We either have to specify by name or by subsystems. (So all
1231 * empty hierarchies must have a name).
1233 if (!opts
->subsys_mask
&& !opts
->name
)
1239 static int cgroup_remount(struct super_block
*sb
, int *flags
, char *data
)
1242 struct cgroupfs_root
*root
= sb
->s_fs_info
;
1243 struct cgroup
*cgrp
= &root
->top_cgroup
;
1244 struct cgroup_sb_opts opts
;
1245 unsigned long added_mask
, removed_mask
;
1247 if (root
->flags
& CGRP_ROOT_SANE_BEHAVIOR
) {
1248 pr_err("cgroup: sane_behavior: remount is not allowed\n");
1252 mutex_lock(&cgrp
->dentry
->d_inode
->i_mutex
);
1253 mutex_lock(&cgroup_tree_mutex
);
1254 mutex_lock(&cgroup_mutex
);
1256 /* See what subsystems are wanted */
1257 ret
= parse_cgroupfs_options(data
, &opts
);
1261 if (opts
.subsys_mask
!= root
->subsys_mask
|| opts
.release_agent
)
1262 pr_warning("cgroup: option changes via remount are deprecated (pid=%d comm=%s)\n",
1263 task_tgid_nr(current
), current
->comm
);
1265 added_mask
= opts
.subsys_mask
& ~root
->subsys_mask
;
1266 removed_mask
= root
->subsys_mask
& ~opts
.subsys_mask
;
1268 /* Don't allow flags or name to change at remount */
1269 if (((opts
.flags
^ root
->flags
) & CGRP_ROOT_OPTION_MASK
) ||
1270 (opts
.name
&& strcmp(opts
.name
, root
->name
))) {
1271 pr_err("cgroup: option or name mismatch, new: 0x%lx \"%s\", old: 0x%lx \"%s\"\n",
1272 opts
.flags
& CGRP_ROOT_OPTION_MASK
, opts
.name
?: "",
1273 root
->flags
& CGRP_ROOT_OPTION_MASK
, root
->name
);
1278 /* remounting is not allowed for populated hierarchies */
1279 if (root
->number_of_cgroups
> 1) {
1284 ret
= rebind_subsystems(root
, added_mask
, removed_mask
);
1288 if (opts
.release_agent
) {
1289 spin_lock(&release_agent_path_lock
);
1290 strcpy(root
->release_agent_path
, opts
.release_agent
);
1291 spin_unlock(&release_agent_path_lock
);
1294 kfree(opts
.release_agent
);
1296 mutex_unlock(&cgroup_mutex
);
1297 mutex_unlock(&cgroup_tree_mutex
);
1298 mutex_unlock(&cgrp
->dentry
->d_inode
->i_mutex
);
1302 static const struct super_operations cgroup_ops
= {
1303 .statfs
= simple_statfs
,
1304 .drop_inode
= generic_delete_inode
,
1305 .show_options
= cgroup_show_options
,
1306 .remount_fs
= cgroup_remount
,
1309 static void init_cgroup_housekeeping(struct cgroup
*cgrp
)
1311 INIT_LIST_HEAD(&cgrp
->sibling
);
1312 INIT_LIST_HEAD(&cgrp
->children
);
1313 INIT_LIST_HEAD(&cgrp
->files
);
1314 INIT_LIST_HEAD(&cgrp
->cset_links
);
1315 INIT_LIST_HEAD(&cgrp
->release_list
);
1316 INIT_LIST_HEAD(&cgrp
->pidlists
);
1317 mutex_init(&cgrp
->pidlist_mutex
);
1318 cgrp
->dummy_css
.cgroup
= cgrp
;
1319 simple_xattrs_init(&cgrp
->xattrs
);
1322 static void init_cgroup_root(struct cgroupfs_root
*root
)
1324 struct cgroup
*cgrp
= &root
->top_cgroup
;
1326 INIT_LIST_HEAD(&root
->root_list
);
1327 root
->number_of_cgroups
= 1;
1329 RCU_INIT_POINTER(cgrp
->name
, &root_cgroup_name
);
1330 init_cgroup_housekeeping(cgrp
);
1331 idr_init(&root
->cgroup_idr
);
1334 static int cgroup_init_root_id(struct cgroupfs_root
*root
, int start
, int end
)
1338 lockdep_assert_held(&cgroup_mutex
);
1340 id
= idr_alloc_cyclic(&cgroup_hierarchy_idr
, root
, start
, end
,
1345 root
->hierarchy_id
= id
;
1349 static void cgroup_exit_root_id(struct cgroupfs_root
*root
)
1351 lockdep_assert_held(&cgroup_mutex
);
1353 if (root
->hierarchy_id
) {
1354 idr_remove(&cgroup_hierarchy_idr
, root
->hierarchy_id
);
1355 root
->hierarchy_id
= 0;
1359 static int cgroup_test_super(struct super_block
*sb
, void *data
)
1361 struct cgroup_sb_opts
*opts
= data
;
1362 struct cgroupfs_root
*root
= sb
->s_fs_info
;
1364 /* If we asked for a name then it must match */
1365 if (opts
->name
&& strcmp(opts
->name
, root
->name
))
1369 * If we asked for subsystems (or explicitly for no
1370 * subsystems) then they must match
1372 if ((opts
->subsys_mask
|| opts
->none
)
1373 && (opts
->subsys_mask
!= root
->subsys_mask
))
1379 static struct cgroupfs_root
*cgroup_root_from_opts(struct cgroup_sb_opts
*opts
)
1381 struct cgroupfs_root
*root
;
1383 if (!opts
->subsys_mask
&& !opts
->none
)
1386 root
= kzalloc(sizeof(*root
), GFP_KERNEL
);
1388 return ERR_PTR(-ENOMEM
);
1390 init_cgroup_root(root
);
1393 * We need to set @root->subsys_mask now so that @root can be
1394 * matched by cgroup_test_super() before it finishes
1395 * initialization; otherwise, competing mounts with the same
1396 * options may try to bind the same subsystems instead of waiting
1397 * for the first one leading to unexpected mount errors.
1398 * SUBSYS_BOUND will be set once actual binding is complete.
1400 root
->subsys_mask
= opts
->subsys_mask
;
1401 root
->flags
= opts
->flags
;
1402 if (opts
->release_agent
)
1403 strcpy(root
->release_agent_path
, opts
->release_agent
);
1405 strcpy(root
->name
, opts
->name
);
1406 if (opts
->cpuset_clone_children
)
1407 set_bit(CGRP_CPUSET_CLONE_CHILDREN
, &root
->top_cgroup
.flags
);
1411 static void cgroup_free_root(struct cgroupfs_root
*root
)
1414 /* hierarhcy ID shoulid already have been released */
1415 WARN_ON_ONCE(root
->hierarchy_id
);
1417 idr_destroy(&root
->cgroup_idr
);
1422 static int cgroup_set_super(struct super_block
*sb
, void *data
)
1425 struct cgroup_sb_opts
*opts
= data
;
1427 /* If we don't have a new root, we can't set up a new sb */
1428 if (!opts
->new_root
)
1431 BUG_ON(!opts
->subsys_mask
&& !opts
->none
);
1433 ret
= set_anon_super(sb
, NULL
);
1437 sb
->s_fs_info
= opts
->new_root
;
1438 opts
->new_root
->sb
= sb
;
1440 sb
->s_blocksize
= PAGE_CACHE_SIZE
;
1441 sb
->s_blocksize_bits
= PAGE_CACHE_SHIFT
;
1442 sb
->s_magic
= CGROUP_SUPER_MAGIC
;
1443 sb
->s_op
= &cgroup_ops
;
1448 static int cgroup_get_rootdir(struct super_block
*sb
)
1450 static const struct dentry_operations cgroup_dops
= {
1451 .d_iput
= cgroup_diput
,
1452 .d_delete
= always_delete_dentry
,
1455 struct inode
*inode
=
1456 cgroup_new_inode(S_IFDIR
| S_IRUGO
| S_IXUGO
| S_IWUSR
, sb
);
1461 inode
->i_fop
= &simple_dir_operations
;
1462 inode
->i_op
= &cgroup_dir_inode_operations
;
1463 /* directories start off with i_nlink == 2 (for "." entry) */
1465 sb
->s_root
= d_make_root(inode
);
1468 /* for everything else we want ->d_op set */
1469 sb
->s_d_op
= &cgroup_dops
;
1473 static int cgroup_setup_root(struct cgroupfs_root
*root
)
1475 LIST_HEAD(tmp_links
);
1476 struct super_block
*sb
= root
->sb
;
1477 struct cgroup
*root_cgrp
= &root
->top_cgroup
;
1478 struct cgroupfs_root
*existing_root
;
1479 struct css_set
*cset
;
1480 struct inode
*inode
;
1481 const struct cred
*cred
;
1484 lockdep_assert_held(&cgroup_tree_mutex
);
1485 lockdep_assert_held(&cgroup_mutex
);
1486 BUG_ON(sb
->s_root
!= NULL
);
1488 mutex_unlock(&cgroup_mutex
);
1489 mutex_unlock(&cgroup_tree_mutex
);
1491 ret
= cgroup_get_rootdir(sb
);
1493 mutex_lock(&cgroup_tree_mutex
);
1494 mutex_lock(&cgroup_mutex
);
1497 inode
= sb
->s_root
->d_inode
;
1499 mutex_lock(&inode
->i_mutex
);
1500 mutex_lock(&cgroup_tree_mutex
);
1501 mutex_lock(&cgroup_mutex
);
1503 ret
= idr_alloc(&root
->cgroup_idr
, root_cgrp
, 0, 1, GFP_KERNEL
);
1506 root_cgrp
->id
= ret
;
1508 /* check for name clashes with existing mounts */
1510 if (strlen(root
->name
))
1511 for_each_active_root(existing_root
)
1512 if (!strcmp(existing_root
->name
, root
->name
))
1516 * We're accessing css_set_count without locking css_set_lock here,
1517 * but that's OK - it can only be increased by someone holding
1518 * cgroup_lock, and that's us. The worst that can happen is that we
1519 * have some link structures left over
1521 ret
= allocate_cgrp_cset_links(css_set_count
, &tmp_links
);
1525 /* ID 0 is reserved for dummy root, 1 for unified hierarchy */
1526 ret
= cgroup_init_root_id(root
, 2, 0);
1530 sb
->s_root
->d_fsdata
= root_cgrp
;
1531 root_cgrp
->dentry
= sb
->s_root
;
1534 * We're inside get_sb() and will call lookup_one_len() to create
1535 * the root files, which doesn't work if SELinux is in use. The
1536 * following cred dancing somehow works around it. See 2ce9738ba
1537 * ("cgroupfs: use init_cred when populating new cgroupfs mount")
1540 cred
= override_creds(&init_cred
);
1542 ret
= cgroup_addrm_files(root_cgrp
, cgroup_base_files
, true);
1546 ret
= rebind_subsystems(root
, root
->subsys_mask
, 0);
1553 * There must be no failure case after here, since rebinding takes
1554 * care of subsystems' refcounts, which are explicitly dropped in
1555 * the failure exit path.
1557 list_add(&root
->root_list
, &cgroup_roots
);
1558 cgroup_root_count
++;
1561 * Link the top cgroup in this hierarchy into all the css_set
1564 write_lock(&css_set_lock
);
1565 hash_for_each(css_set_table
, i
, cset
, hlist
)
1566 link_css_set(&tmp_links
, cset
, root_cgrp
);
1567 write_unlock(&css_set_lock
);
1569 BUG_ON(!list_empty(&root_cgrp
->children
));
1570 BUG_ON(root
->number_of_cgroups
!= 1);
1576 cgroup_addrm_files(&root
->top_cgroup
, cgroup_base_files
, false);
1578 cgroup_exit_root_id(root
);
1580 mutex_unlock(&inode
->i_mutex
);
1581 free_cgrp_cset_links(&tmp_links
);
1585 static struct dentry
*cgroup_mount(struct file_system_type
*fs_type
,
1586 int flags
, const char *unused_dev_name
,
1589 struct super_block
*sb
= NULL
;
1590 struct cgroupfs_root
*root
= NULL
;
1591 struct cgroup_sb_opts opts
;
1592 struct cgroupfs_root
*new_root
;
1595 mutex_lock(&cgroup_tree_mutex
);
1596 mutex_lock(&cgroup_mutex
);
1598 /* First find the desired set of subsystems */
1599 ret
= parse_cgroupfs_options(data
, &opts
);
1604 * Allocate a new cgroup root. We may not need it if we're
1605 * reusing an existing hierarchy.
1607 new_root
= cgroup_root_from_opts(&opts
);
1608 if (IS_ERR(new_root
)) {
1609 ret
= PTR_ERR(new_root
);
1612 opts
.new_root
= new_root
;
1614 /* Locate an existing or new sb for this hierarchy */
1615 mutex_unlock(&cgroup_mutex
);
1616 mutex_unlock(&cgroup_tree_mutex
);
1617 sb
= sget(fs_type
, cgroup_test_super
, cgroup_set_super
, 0, &opts
);
1618 mutex_lock(&cgroup_tree_mutex
);
1619 mutex_lock(&cgroup_mutex
);
1622 cgroup_free_root(opts
.new_root
);
1626 root
= sb
->s_fs_info
;
1628 if (root
== opts
.new_root
) {
1629 ret
= cgroup_setup_root(root
);
1634 * We re-used an existing hierarchy - the new root (if
1635 * any) is not needed
1637 cgroup_free_root(opts
.new_root
);
1639 if ((root
->flags
^ opts
.flags
) & CGRP_ROOT_OPTION_MASK
) {
1640 if ((root
->flags
| opts
.flags
) & CGRP_ROOT_SANE_BEHAVIOR
) {
1641 pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n");
1645 pr_warning("cgroup: new mount options do not match the existing superblock, will be ignored\n");
1652 mutex_unlock(&cgroup_mutex
);
1653 mutex_unlock(&cgroup_tree_mutex
);
1655 if (ret
&& !IS_ERR_OR_NULL(sb
))
1656 deactivate_locked_super(sb
);
1658 kfree(opts
.release_agent
);
1662 return dget(sb
->s_root
);
1664 return ERR_PTR(ret
);
1667 static void cgroup_kill_sb(struct super_block
*sb
)
1669 struct cgroupfs_root
*root
= sb
->s_fs_info
;
1670 struct cgroup
*cgrp
= &root
->top_cgroup
;
1671 struct cgrp_cset_link
*link
, *tmp_link
;
1676 BUG_ON(root
->number_of_cgroups
!= 1);
1677 BUG_ON(!list_empty(&cgrp
->children
));
1679 mutex_lock(&cgrp
->dentry
->d_inode
->i_mutex
);
1680 mutex_lock(&cgroup_tree_mutex
);
1681 mutex_lock(&cgroup_mutex
);
1683 /* Rebind all subsystems back to the default hierarchy */
1684 if (root
->flags
& CGRP_ROOT_SUBSYS_BOUND
) {
1685 ret
= rebind_subsystems(root
, 0, root
->subsys_mask
);
1686 /* Shouldn't be able to fail ... */
1691 * Release all the links from cset_links to this hierarchy's
1694 write_lock(&css_set_lock
);
1696 list_for_each_entry_safe(link
, tmp_link
, &cgrp
->cset_links
, cset_link
) {
1697 list_del(&link
->cset_link
);
1698 list_del(&link
->cgrp_link
);
1701 write_unlock(&css_set_lock
);
1703 if (!list_empty(&root
->root_list
)) {
1704 list_del(&root
->root_list
);
1705 cgroup_root_count
--;
1708 cgroup_exit_root_id(root
);
1710 mutex_unlock(&cgroup_mutex
);
1711 mutex_unlock(&cgroup_tree_mutex
);
1712 mutex_unlock(&cgrp
->dentry
->d_inode
->i_mutex
);
1714 simple_xattrs_free(&cgrp
->xattrs
);
1716 kill_litter_super(sb
);
1717 cgroup_free_root(root
);
1720 static struct file_system_type cgroup_fs_type
= {
1722 .mount
= cgroup_mount
,
1723 .kill_sb
= cgroup_kill_sb
,
1726 static struct kobject
*cgroup_kobj
;
1729 * cgroup_path - generate the path of a cgroup
1730 * @cgrp: the cgroup in question
1731 * @buf: the buffer to write the path into
1732 * @buflen: the length of the buffer
1734 * Writes path of cgroup into buf. Returns 0 on success, -errno on error.
1736 * We can't generate cgroup path using dentry->d_name, as accessing
1737 * dentry->name must be protected by irq-unsafe dentry->d_lock or parent
1738 * inode's i_mutex, while on the other hand cgroup_path() can be called
1739 * with some irq-safe spinlocks held.
1741 int cgroup_path(const struct cgroup
*cgrp
, char *buf
, int buflen
)
1743 int ret
= -ENAMETOOLONG
;
1746 if (!cgrp
->parent
) {
1747 if (strlcpy(buf
, "/", buflen
) >= buflen
)
1748 return -ENAMETOOLONG
;
1752 start
= buf
+ buflen
- 1;
1757 const char *name
= cgroup_name(cgrp
);
1761 if ((start
-= len
) < buf
)
1763 memcpy(start
, name
, len
);
1769 cgrp
= cgrp
->parent
;
1770 } while (cgrp
->parent
);
1772 memmove(buf
, start
, buf
+ buflen
- start
);
1777 EXPORT_SYMBOL_GPL(cgroup_path
);
1780 * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
1781 * @task: target task
1782 * @buf: the buffer to write the path into
1783 * @buflen: the length of the buffer
1785 * Determine @task's cgroup on the first (the one with the lowest non-zero
1786 * hierarchy_id) cgroup hierarchy and copy its path into @buf. This
1787 * function grabs cgroup_mutex and shouldn't be used inside locks used by
1788 * cgroup controller callbacks.
1790 * Returns 0 on success, fails with -%ENAMETOOLONG if @buflen is too short.
1792 int task_cgroup_path(struct task_struct
*task
, char *buf
, size_t buflen
)
1794 struct cgroupfs_root
*root
;
1795 struct cgroup
*cgrp
;
1796 int hierarchy_id
= 1, ret
= 0;
1799 return -ENAMETOOLONG
;
1801 mutex_lock(&cgroup_mutex
);
1803 root
= idr_get_next(&cgroup_hierarchy_idr
, &hierarchy_id
);
1806 cgrp
= task_cgroup_from_root(task
, root
);
1807 ret
= cgroup_path(cgrp
, buf
, buflen
);
1809 /* if no hierarchy exists, everyone is in "/" */
1810 memcpy(buf
, "/", 2);
1813 mutex_unlock(&cgroup_mutex
);
1816 EXPORT_SYMBOL_GPL(task_cgroup_path
);
1819 * Control Group taskset
1821 struct task_and_cgroup
{
1822 struct task_struct
*task
;
1823 struct cgroup
*cgrp
;
1824 struct css_set
*cset
;
1827 struct cgroup_taskset
{
1828 struct task_and_cgroup single
;
1829 struct flex_array
*tc_array
;
1832 struct cgroup
*cur_cgrp
;
1836 * cgroup_taskset_first - reset taskset and return the first task
1837 * @tset: taskset of interest
1839 * @tset iteration is initialized and the first task is returned.
1841 struct task_struct
*cgroup_taskset_first(struct cgroup_taskset
*tset
)
1843 if (tset
->tc_array
) {
1845 return cgroup_taskset_next(tset
);
1847 tset
->cur_cgrp
= tset
->single
.cgrp
;
1848 return tset
->single
.task
;
1851 EXPORT_SYMBOL_GPL(cgroup_taskset_first
);
1854 * cgroup_taskset_next - iterate to the next task in taskset
1855 * @tset: taskset of interest
1857 * Return the next task in @tset. Iteration must have been initialized
1858 * with cgroup_taskset_first().
1860 struct task_struct
*cgroup_taskset_next(struct cgroup_taskset
*tset
)
1862 struct task_and_cgroup
*tc
;
1864 if (!tset
->tc_array
|| tset
->idx
>= tset
->tc_array_len
)
1867 tc
= flex_array_get(tset
->tc_array
, tset
->idx
++);
1868 tset
->cur_cgrp
= tc
->cgrp
;
1871 EXPORT_SYMBOL_GPL(cgroup_taskset_next
);
1874 * cgroup_taskset_cur_css - return the matching css for the current task
1875 * @tset: taskset of interest
1876 * @subsys_id: the ID of the target subsystem
1878 * Return the css for the current (last returned) task of @tset for
1879 * subsystem specified by @subsys_id. This function must be preceded by
1880 * either cgroup_taskset_first() or cgroup_taskset_next().
1882 struct cgroup_subsys_state
*cgroup_taskset_cur_css(struct cgroup_taskset
*tset
,
1885 return cgroup_css(tset
->cur_cgrp
, cgroup_subsys
[subsys_id
]);
1887 EXPORT_SYMBOL_GPL(cgroup_taskset_cur_css
);
1890 * cgroup_taskset_size - return the number of tasks in taskset
1891 * @tset: taskset of interest
1893 int cgroup_taskset_size(struct cgroup_taskset
*tset
)
1895 return tset
->tc_array
? tset
->tc_array_len
: 1;
1897 EXPORT_SYMBOL_GPL(cgroup_taskset_size
);
1901 * cgroup_task_migrate - move a task from one cgroup to another.
1903 * Must be called with cgroup_mutex and threadgroup locked.
1905 static void cgroup_task_migrate(struct cgroup
*old_cgrp
,
1906 struct task_struct
*tsk
,
1907 struct css_set
*new_cset
)
1909 struct css_set
*old_cset
;
1912 * We are synchronized through threadgroup_lock() against PF_EXITING
1913 * setting such that we can't race against cgroup_exit() changing the
1914 * css_set to init_css_set and dropping the old one.
1916 WARN_ON_ONCE(tsk
->flags
& PF_EXITING
);
1917 old_cset
= task_css_set(tsk
);
1920 rcu_assign_pointer(tsk
->cgroups
, new_cset
);
1923 /* Update the css_set linked lists if we're using them */
1924 write_lock(&css_set_lock
);
1925 if (!list_empty(&tsk
->cg_list
))
1926 list_move(&tsk
->cg_list
, &new_cset
->tasks
);
1927 write_unlock(&css_set_lock
);
1930 * We just gained a reference on old_cset by taking it from the
1931 * task. As trading it for new_cset is protected by cgroup_mutex,
1932 * we're safe to drop it here; it will be freed under RCU.
1934 set_bit(CGRP_RELEASABLE
, &old_cgrp
->flags
);
1935 put_css_set(old_cset
);
1939 * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
1940 * @cgrp: the cgroup to attach to
1941 * @tsk: the task or the leader of the threadgroup to be attached
1942 * @threadgroup: attach the whole threadgroup?
1944 * Call holding cgroup_mutex and the group_rwsem of the leader. Will take
1945 * task_lock of @tsk or each thread in the threadgroup individually in turn.
1947 static int cgroup_attach_task(struct cgroup
*cgrp
, struct task_struct
*tsk
,
1950 int retval
, i
, group_size
;
1951 struct cgroupfs_root
*root
= cgrp
->root
;
1952 struct cgroup_subsys_state
*css
, *failed_css
= NULL
;
1953 /* threadgroup list cursor and array */
1954 struct task_struct
*leader
= tsk
;
1955 struct task_and_cgroup
*tc
;
1956 struct flex_array
*group
;
1957 struct cgroup_taskset tset
= { };
1960 * step 0: in order to do expensive, possibly blocking operations for
1961 * every thread, we cannot iterate the thread group list, since it needs
1962 * rcu or tasklist locked. instead, build an array of all threads in the
1963 * group - group_rwsem prevents new threads from appearing, and if
1964 * threads exit, this will just be an over-estimate.
1967 group_size
= get_nr_threads(tsk
);
1970 /* flex_array supports very large thread-groups better than kmalloc. */
1971 group
= flex_array_alloc(sizeof(*tc
), group_size
, GFP_KERNEL
);
1974 /* pre-allocate to guarantee space while iterating in rcu read-side. */
1975 retval
= flex_array_prealloc(group
, 0, group_size
, GFP_KERNEL
);
1977 goto out_free_group_list
;
1981 * Prevent freeing of tasks while we take a snapshot. Tasks that are
1982 * already PF_EXITING could be freed from underneath us unless we
1983 * take an rcu_read_lock.
1987 struct task_and_cgroup ent
;
1989 /* @tsk either already exited or can't exit until the end */
1990 if (tsk
->flags
& PF_EXITING
)
1993 /* as per above, nr_threads may decrease, but not increase. */
1994 BUG_ON(i
>= group_size
);
1996 ent
.cgrp
= task_cgroup_from_root(tsk
, root
);
1997 /* nothing to do if this task is already in the cgroup */
1998 if (ent
.cgrp
== cgrp
)
2001 * saying GFP_ATOMIC has no effect here because we did prealloc
2002 * earlier, but it's good form to communicate our expectations.
2004 retval
= flex_array_put(group
, i
, &ent
, GFP_ATOMIC
);
2005 BUG_ON(retval
!= 0);
2010 } while_each_thread(leader
, tsk
);
2012 /* remember the number of threads in the array for later. */
2014 tset
.tc_array
= group
;
2015 tset
.tc_array_len
= group_size
;
2017 /* methods shouldn't be called if no task is actually migrating */
2020 goto out_free_group_list
;
2023 * step 1: check that we can legitimately attach to the cgroup.
2025 for_each_css(css
, i
, cgrp
) {
2026 if (css
->ss
->can_attach
) {
2027 retval
= css
->ss
->can_attach(css
, &tset
);
2030 goto out_cancel_attach
;
2036 * step 2: make sure css_sets exist for all threads to be migrated.
2037 * we use find_css_set, which allocates a new one if necessary.
2039 for (i
= 0; i
< group_size
; i
++) {
2040 struct css_set
*old_cset
;
2042 tc
= flex_array_get(group
, i
);
2043 old_cset
= task_css_set(tc
->task
);
2044 tc
->cset
= find_css_set(old_cset
, cgrp
);
2047 goto out_put_css_set_refs
;
2052 * step 3: now that we're guaranteed success wrt the css_sets,
2053 * proceed to move all tasks to the new cgroup. There are no
2054 * failure cases after here, so this is the commit point.
2056 for (i
= 0; i
< group_size
; i
++) {
2057 tc
= flex_array_get(group
, i
);
2058 cgroup_task_migrate(tc
->cgrp
, tc
->task
, tc
->cset
);
2060 /* nothing is sensitive to fork() after this point. */
2063 * step 4: do subsystem attach callbacks.
2065 for_each_css(css
, i
, cgrp
)
2066 if (css
->ss
->attach
)
2067 css
->ss
->attach(css
, &tset
);
2070 * step 5: success! and cleanup
2073 out_put_css_set_refs
:
2075 for (i
= 0; i
< group_size
; i
++) {
2076 tc
= flex_array_get(group
, i
);
2079 put_css_set(tc
->cset
);
2084 for_each_css(css
, i
, cgrp
) {
2085 if (css
== failed_css
)
2087 if (css
->ss
->cancel_attach
)
2088 css
->ss
->cancel_attach(css
, &tset
);
2091 out_free_group_list
:
2092 flex_array_free(group
);
2097 * Find the task_struct of the task to attach by vpid and pass it along to the
2098 * function to attach either it or all tasks in its threadgroup. Will lock
2099 * cgroup_mutex and threadgroup; may take task_lock of task.
2101 static int attach_task_by_pid(struct cgroup
*cgrp
, u64 pid
, bool threadgroup
)
2103 struct task_struct
*tsk
;
2104 const struct cred
*cred
= current_cred(), *tcred
;
2107 if (!cgroup_lock_live_group(cgrp
))
2113 tsk
= find_task_by_vpid(pid
);
2117 goto out_unlock_cgroup
;
2120 * even if we're attaching all tasks in the thread group, we
2121 * only need to check permissions on one of them.
2123 tcred
= __task_cred(tsk
);
2124 if (!uid_eq(cred
->euid
, GLOBAL_ROOT_UID
) &&
2125 !uid_eq(cred
->euid
, tcred
->uid
) &&
2126 !uid_eq(cred
->euid
, tcred
->suid
)) {
2129 goto out_unlock_cgroup
;
2135 tsk
= tsk
->group_leader
;
2138 * Workqueue threads may acquire PF_NO_SETAFFINITY and become
2139 * trapped in a cpuset, or RT worker may be born in a cgroup
2140 * with no rt_runtime allocated. Just say no.
2142 if (tsk
== kthreadd_task
|| (tsk
->flags
& PF_NO_SETAFFINITY
)) {
2145 goto out_unlock_cgroup
;
2148 get_task_struct(tsk
);
2151 threadgroup_lock(tsk
);
2153 if (!thread_group_leader(tsk
)) {
2155 * a race with de_thread from another thread's exec()
2156 * may strip us of our leadership, if this happens,
2157 * there is no choice but to throw this task away and
2158 * try again; this is
2159 * "double-double-toil-and-trouble-check locking".
2161 threadgroup_unlock(tsk
);
2162 put_task_struct(tsk
);
2163 goto retry_find_task
;
2167 ret
= cgroup_attach_task(cgrp
, tsk
, threadgroup
);
2169 threadgroup_unlock(tsk
);
2171 put_task_struct(tsk
);
2173 mutex_unlock(&cgroup_mutex
);
2178 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
2179 * @from: attach to all cgroups of a given task
2180 * @tsk: the task to be attached
2182 int cgroup_attach_task_all(struct task_struct
*from
, struct task_struct
*tsk
)
2184 struct cgroupfs_root
*root
;
2187 mutex_lock(&cgroup_mutex
);
2188 for_each_active_root(root
) {
2189 struct cgroup
*from_cgrp
= task_cgroup_from_root(from
, root
);
2191 retval
= cgroup_attach_task(from_cgrp
, tsk
, false);
2195 mutex_unlock(&cgroup_mutex
);
2199 EXPORT_SYMBOL_GPL(cgroup_attach_task_all
);
2201 static int cgroup_tasks_write(struct cgroup_subsys_state
*css
,
2202 struct cftype
*cft
, u64 pid
)
2204 return attach_task_by_pid(css
->cgroup
, pid
, false);
2207 static int cgroup_procs_write(struct cgroup_subsys_state
*css
,
2208 struct cftype
*cft
, u64 tgid
)
2210 return attach_task_by_pid(css
->cgroup
, tgid
, true);
2213 static int cgroup_release_agent_write(struct cgroup_subsys_state
*css
,
2214 struct cftype
*cft
, const char *buffer
)
2216 BUILD_BUG_ON(sizeof(css
->cgroup
->root
->release_agent_path
) < PATH_MAX
);
2217 if (strlen(buffer
) >= PATH_MAX
)
2219 if (!cgroup_lock_live_group(css
->cgroup
))
2221 spin_lock(&release_agent_path_lock
);
2222 strcpy(css
->cgroup
->root
->release_agent_path
, buffer
);
2223 spin_unlock(&release_agent_path_lock
);
2224 mutex_unlock(&cgroup_mutex
);
2228 static int cgroup_release_agent_show(struct seq_file
*seq
, void *v
)
2230 struct cgroup
*cgrp
= seq_css(seq
)->cgroup
;
2232 if (!cgroup_lock_live_group(cgrp
))
2234 seq_puts(seq
, cgrp
->root
->release_agent_path
);
2235 seq_putc(seq
, '\n');
2236 mutex_unlock(&cgroup_mutex
);
2240 static int cgroup_sane_behavior_show(struct seq_file
*seq
, void *v
)
2242 struct cgroup
*cgrp
= seq_css(seq
)->cgroup
;
2244 seq_printf(seq
, "%d\n", cgroup_sane_behavior(cgrp
));
2248 /* A buffer size big enough for numbers or short strings */
2249 #define CGROUP_LOCAL_BUFFER_SIZE 64
2251 static ssize_t
cgroup_file_write(struct file
*file
, const char __user
*userbuf
,
2252 size_t nbytes
, loff_t
*ppos
)
2254 struct cfent
*cfe
= __d_cfe(file
->f_dentry
);
2255 struct cftype
*cft
= __d_cft(file
->f_dentry
);
2256 struct cgroup_subsys_state
*css
= cfe
->css
;
2257 size_t max_bytes
= cft
->max_write_len
?: CGROUP_LOCAL_BUFFER_SIZE
- 1;
2261 if (nbytes
>= max_bytes
)
2264 buf
= kmalloc(nbytes
+ 1, GFP_KERNEL
);
2268 if (copy_from_user(buf
, userbuf
, nbytes
)) {
2275 if (cft
->write_string
) {
2276 ret
= cft
->write_string(css
, cft
, strstrip(buf
));
2277 } else if (cft
->write_u64
) {
2278 unsigned long long v
;
2279 ret
= kstrtoull(buf
, 0, &v
);
2281 ret
= cft
->write_u64(css
, cft
, v
);
2282 } else if (cft
->write_s64
) {
2284 ret
= kstrtoll(buf
, 0, &v
);
2286 ret
= cft
->write_s64(css
, cft
, v
);
2287 } else if (cft
->trigger
) {
2288 ret
= cft
->trigger(css
, (unsigned int)cft
->private);
2294 return ret
?: nbytes
;
2298 * seqfile ops/methods for returning structured data. Currently just
2299 * supports string->u64 maps, but can be extended in future.
2302 static void *cgroup_seqfile_start(struct seq_file
*seq
, loff_t
*ppos
)
2304 struct cftype
*cft
= seq_cft(seq
);
2306 if (cft
->seq_start
) {
2307 return cft
->seq_start(seq
, ppos
);
2310 * The same behavior and code as single_open(). Returns
2311 * !NULL if pos is at the beginning; otherwise, NULL.
2313 return NULL
+ !*ppos
;
2317 static void *cgroup_seqfile_next(struct seq_file
*seq
, void *v
, loff_t
*ppos
)
2319 struct cftype
*cft
= seq_cft(seq
);
2321 if (cft
->seq_next
) {
2322 return cft
->seq_next(seq
, v
, ppos
);
2325 * The same behavior and code as single_open(), always
2326 * terminate after the initial read.
2333 static void cgroup_seqfile_stop(struct seq_file
*seq
, void *v
)
2335 struct cftype
*cft
= seq_cft(seq
);
2338 cft
->seq_stop(seq
, v
);
2341 static int cgroup_seqfile_show(struct seq_file
*m
, void *arg
)
2343 struct cftype
*cft
= seq_cft(m
);
2344 struct cgroup_subsys_state
*css
= seq_css(m
);
2347 return cft
->seq_show(m
, arg
);
2350 seq_printf(m
, "%llu\n", cft
->read_u64(css
, cft
));
2351 else if (cft
->read_s64
)
2352 seq_printf(m
, "%lld\n", cft
->read_s64(css
, cft
));
2358 static struct seq_operations cgroup_seq_operations
= {
2359 .start
= cgroup_seqfile_start
,
2360 .next
= cgroup_seqfile_next
,
2361 .stop
= cgroup_seqfile_stop
,
2362 .show
= cgroup_seqfile_show
,
2365 static int cgroup_file_open(struct inode
*inode
, struct file
*file
)
2367 struct cfent
*cfe
= __d_cfe(file
->f_dentry
);
2368 struct cftype
*cft
= __d_cft(file
->f_dentry
);
2369 struct cgroup
*cgrp
= __d_cgrp(cfe
->dentry
->d_parent
);
2370 struct cgroup_subsys_state
*css
;
2371 struct cgroup_open_file
*of
;
2374 err
= generic_file_open(inode
, file
);
2379 * If the file belongs to a subsystem, pin the css. Will be
2380 * unpinned either on open failure or release. This ensures that
2381 * @css stays alive for all file operations.
2384 css
= cgroup_css(cgrp
, cft
->ss
);
2385 if (cft
->ss
&& !css_tryget(css
))
2393 * @cfe->css is used by read/write/close to determine the
2394 * associated css. @file->private_data would be a better place but
2395 * that's already used by seqfile. Multiple accessors may use it
2396 * simultaneously which is okay as the association never changes.
2398 WARN_ON_ONCE(cfe
->css
&& cfe
->css
!= css
);
2401 of
= __seq_open_private(file
, &cgroup_seq_operations
,
2402 sizeof(struct cgroup_open_file
));
2413 static int cgroup_file_release(struct inode
*inode
, struct file
*file
)
2415 struct cfent
*cfe
= __d_cfe(file
->f_dentry
);
2416 struct cgroup_subsys_state
*css
= cfe
->css
;
2420 return seq_release_private(inode
, file
);
2424 * cgroup_rename - Only allow simple rename of directories in place.
2426 static int cgroup_rename(struct inode
*old_dir
, struct dentry
*old_dentry
,
2427 struct inode
*new_dir
, struct dentry
*new_dentry
)
2430 struct cgroup_name
*name
, *old_name
;
2431 struct cgroup
*cgrp
;
2434 * It's convinient to use parent dir's i_mutex to protected
2437 lockdep_assert_held(&old_dir
->i_mutex
);
2439 if (!S_ISDIR(old_dentry
->d_inode
->i_mode
))
2441 if (new_dentry
->d_inode
)
2443 if (old_dir
!= new_dir
)
2446 cgrp
= __d_cgrp(old_dentry
);
2449 * This isn't a proper migration and its usefulness is very
2450 * limited. Disallow if sane_behavior.
2452 if (cgroup_sane_behavior(cgrp
))
2455 name
= cgroup_alloc_name(new_dentry
->d_name
.name
);
2459 ret
= simple_rename(old_dir
, old_dentry
, new_dir
, new_dentry
);
2465 old_name
= rcu_dereference_protected(cgrp
->name
, true);
2466 rcu_assign_pointer(cgrp
->name
, name
);
2468 kfree_rcu(old_name
, rcu_head
);
2472 static struct simple_xattrs
*__d_xattrs(struct dentry
*dentry
)
2474 if (S_ISDIR(dentry
->d_inode
->i_mode
))
2475 return &__d_cgrp(dentry
)->xattrs
;
2477 return &__d_cfe(dentry
)->xattrs
;
2480 static inline int xattr_enabled(struct dentry
*dentry
)
2482 struct cgroupfs_root
*root
= dentry
->d_sb
->s_fs_info
;
2483 return root
->flags
& CGRP_ROOT_XATTR
;
2486 static bool is_valid_xattr(const char *name
)
2488 if (!strncmp(name
, XATTR_TRUSTED_PREFIX
, XATTR_TRUSTED_PREFIX_LEN
) ||
2489 !strncmp(name
, XATTR_SECURITY_PREFIX
, XATTR_SECURITY_PREFIX_LEN
))
2494 static int cgroup_setxattr(struct dentry
*dentry
, const char *name
,
2495 const void *val
, size_t size
, int flags
)
2497 if (!xattr_enabled(dentry
))
2499 if (!is_valid_xattr(name
))
2501 return simple_xattr_set(__d_xattrs(dentry
), name
, val
, size
, flags
);
2504 static int cgroup_removexattr(struct dentry
*dentry
, const char *name
)
2506 if (!xattr_enabled(dentry
))
2508 if (!is_valid_xattr(name
))
2510 return simple_xattr_remove(__d_xattrs(dentry
), name
);
2513 static ssize_t
cgroup_getxattr(struct dentry
*dentry
, const char *name
,
2514 void *buf
, size_t size
)
2516 if (!xattr_enabled(dentry
))
2518 if (!is_valid_xattr(name
))
2520 return simple_xattr_get(__d_xattrs(dentry
), name
, buf
, size
);
2523 static ssize_t
cgroup_listxattr(struct dentry
*dentry
, char *buf
, size_t size
)
2525 if (!xattr_enabled(dentry
))
2527 return simple_xattr_list(__d_xattrs(dentry
), buf
, size
);
2530 static const struct file_operations cgroup_file_operations
= {
2532 .write
= cgroup_file_write
,
2533 .llseek
= generic_file_llseek
,
2534 .open
= cgroup_file_open
,
2535 .release
= cgroup_file_release
,
2538 static const struct inode_operations cgroup_file_inode_operations
= {
2539 .setxattr
= cgroup_setxattr
,
2540 .getxattr
= cgroup_getxattr
,
2541 .listxattr
= cgroup_listxattr
,
2542 .removexattr
= cgroup_removexattr
,
2545 static const struct inode_operations cgroup_dir_inode_operations
= {
2546 .lookup
= simple_lookup
,
2547 .mkdir
= cgroup_mkdir
,
2548 .rmdir
= cgroup_rmdir
,
2549 .rename
= cgroup_rename
,
2550 .setxattr
= cgroup_setxattr
,
2551 .getxattr
= cgroup_getxattr
,
2552 .listxattr
= cgroup_listxattr
,
2553 .removexattr
= cgroup_removexattr
,
2556 static int cgroup_create_file(struct dentry
*dentry
, umode_t mode
,
2557 struct super_block
*sb
)
2559 struct inode
*inode
;
2563 if (dentry
->d_inode
)
2566 inode
= cgroup_new_inode(mode
, sb
);
2570 if (S_ISDIR(mode
)) {
2571 inode
->i_op
= &cgroup_dir_inode_operations
;
2572 inode
->i_fop
= &simple_dir_operations
;
2574 /* start off with i_nlink == 2 (for "." entry) */
2576 inc_nlink(dentry
->d_parent
->d_inode
);
2579 * Control reaches here with cgroup_mutex held.
2580 * @inode->i_mutex should nest outside cgroup_mutex but we
2581 * want to populate it immediately without releasing
2582 * cgroup_mutex. As @inode isn't visible to anyone else
2583 * yet, trylock will always succeed without affecting
2586 WARN_ON_ONCE(!mutex_trylock(&inode
->i_mutex
));
2587 } else if (S_ISREG(mode
)) {
2589 inode
->i_fop
= &cgroup_file_operations
;
2590 inode
->i_op
= &cgroup_file_inode_operations
;
2592 d_instantiate(dentry
, inode
);
2593 dget(dentry
); /* Extra count - pin the dentry in core */
2598 * cgroup_file_mode - deduce file mode of a control file
2599 * @cft: the control file in question
2601 * returns cft->mode if ->mode is not 0
2602 * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
2603 * returns S_IRUGO if it has only a read handler
2604 * returns S_IWUSR if it has only a write hander
2606 static umode_t
cgroup_file_mode(const struct cftype
*cft
)
2613 if (cft
->read_u64
|| cft
->read_s64
|| cft
->seq_show
)
2616 if (cft
->write_u64
|| cft
->write_s64
|| cft
->write_string
||
2623 static int cgroup_add_file(struct cgroup
*cgrp
, struct cftype
*cft
)
2625 struct dentry
*dir
= cgrp
->dentry
;
2626 struct cgroup
*parent
= __d_cgrp(dir
);
2627 struct dentry
*dentry
;
2631 char name
[CGROUP_FILE_NAME_MAX
];
2633 BUG_ON(!mutex_is_locked(&dir
->d_inode
->i_mutex
));
2635 cfe
= kzalloc(sizeof(*cfe
), GFP_KERNEL
);
2639 cgroup_file_name(cgrp
, cft
, name
);
2640 dentry
= lookup_one_len(name
, dir
, strlen(name
));
2641 if (IS_ERR(dentry
)) {
2642 error
= PTR_ERR(dentry
);
2646 cfe
->type
= (void *)cft
;
2647 cfe
->dentry
= dentry
;
2648 dentry
->d_fsdata
= cfe
;
2649 simple_xattrs_init(&cfe
->xattrs
);
2651 mode
= cgroup_file_mode(cft
);
2652 error
= cgroup_create_file(dentry
, mode
| S_IFREG
, cgrp
->root
->sb
);
2654 list_add_tail(&cfe
->node
, &parent
->files
);
2664 * cgroup_addrm_files - add or remove files to a cgroup directory
2665 * @cgrp: the target cgroup
2666 * @cfts: array of cftypes to be added
2667 * @is_add: whether to add or remove
2669 * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
2670 * For removals, this function never fails. If addition fails, this
2671 * function doesn't remove files already added. The caller is responsible
2674 static int cgroup_addrm_files(struct cgroup
*cgrp
, struct cftype cfts
[],
2680 lockdep_assert_held(&cgrp
->dentry
->d_inode
->i_mutex
);
2681 lockdep_assert_held(&cgroup_tree_mutex
);
2683 for (cft
= cfts
; cft
->name
[0] != '\0'; cft
++) {
2684 /* does cft->flags tell us to skip this file on @cgrp? */
2685 if ((cft
->flags
& CFTYPE_INSANE
) && cgroup_sane_behavior(cgrp
))
2687 if ((cft
->flags
& CFTYPE_NOT_ON_ROOT
) && !cgrp
->parent
)
2689 if ((cft
->flags
& CFTYPE_ONLY_ON_ROOT
) && cgrp
->parent
)
2693 ret
= cgroup_add_file(cgrp
, cft
);
2695 pr_warn("cgroup_addrm_files: failed to add %s, err=%d\n",
2700 cgroup_rm_file(cgrp
, cft
);
2706 static void cgroup_cfts_prepare(void)
2707 __acquires(&cgroup_mutex
)
2710 * Thanks to the entanglement with vfs inode locking, we can't walk
2711 * the existing cgroups under cgroup_mutex and create files.
2712 * Instead, we use css_for_each_descendant_pre() and drop RCU read
2713 * lock before calling cgroup_addrm_files().
2715 mutex_lock(&cgroup_tree_mutex
);
2716 mutex_lock(&cgroup_mutex
);
2719 static int cgroup_cfts_commit(struct cftype
*cfts
, bool is_add
)
2720 __releases(&cgroup_mutex
)
2723 struct cgroup_subsys
*ss
= cfts
[0].ss
;
2724 struct cgroup
*root
= &ss
->root
->top_cgroup
;
2725 struct super_block
*sb
= ss
->root
->sb
;
2726 struct dentry
*prev
= NULL
;
2727 struct inode
*inode
;
2728 struct cgroup_subsys_state
*css
;
2732 mutex_unlock(&cgroup_mutex
);
2734 /* %NULL @cfts indicates abort and don't bother if @ss isn't attached */
2735 if (!cfts
|| ss
->root
== &cgroup_dummy_root
||
2736 !atomic_inc_not_zero(&sb
->s_active
)) {
2737 mutex_unlock(&cgroup_tree_mutex
);
2742 * All cgroups which are created after we drop cgroup_mutex will
2743 * have the updated set of files, so we only need to update the
2744 * cgroups created before the current @cgroup_serial_nr_next.
2746 update_before
= cgroup_serial_nr_next
;
2748 /* add/rm files for all cgroups created before */
2749 css_for_each_descendant_pre(css
, cgroup_css(root
, ss
)) {
2750 struct cgroup
*cgrp
= css
->cgroup
;
2752 if (cgroup_is_dead(cgrp
))
2755 inode
= cgrp
->dentry
->d_inode
;
2758 prev
= cgrp
->dentry
;
2760 mutex_unlock(&cgroup_tree_mutex
);
2761 mutex_lock(&inode
->i_mutex
);
2762 mutex_lock(&cgroup_tree_mutex
);
2763 if (cgrp
->serial_nr
< update_before
&& !cgroup_is_dead(cgrp
))
2764 ret
= cgroup_addrm_files(cgrp
, cfts
, is_add
);
2765 mutex_unlock(&inode
->i_mutex
);
2769 mutex_unlock(&cgroup_tree_mutex
);
2771 deactivate_super(sb
);
2776 * cgroup_add_cftypes - add an array of cftypes to a subsystem
2777 * @ss: target cgroup subsystem
2778 * @cfts: zero-length name terminated array of cftypes
2780 * Register @cfts to @ss. Files described by @cfts are created for all
2781 * existing cgroups to which @ss is attached and all future cgroups will
2782 * have them too. This function can be called anytime whether @ss is
2785 * Returns 0 on successful registration, -errno on failure. Note that this
2786 * function currently returns 0 as long as @cfts registration is successful
2787 * even if some file creation attempts on existing cgroups fail.
2789 int cgroup_add_cftypes(struct cgroup_subsys
*ss
, struct cftype
*cfts
)
2791 struct cftype_set
*set
;
2795 set
= kzalloc(sizeof(*set
), GFP_KERNEL
);
2799 for (cft
= cfts
; cft
->name
[0] != '\0'; cft
++)
2802 cgroup_cfts_prepare();
2804 list_add_tail(&set
->node
, &ss
->cftsets
);
2805 ret
= cgroup_cfts_commit(cfts
, true);
2807 cgroup_rm_cftypes(cfts
);
2810 EXPORT_SYMBOL_GPL(cgroup_add_cftypes
);
2813 * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
2814 * @cfts: zero-length name terminated array of cftypes
2816 * Unregister @cfts. Files described by @cfts are removed from all
2817 * existing cgroups and all future cgroups won't have them either. This
2818 * function can be called anytime whether @cfts' subsys is attached or not.
2820 * Returns 0 on successful unregistration, -ENOENT if @cfts is not
2823 int cgroup_rm_cftypes(struct cftype
*cfts
)
2825 struct cftype_set
*set
;
2827 if (!cfts
|| !cfts
[0].ss
)
2830 cgroup_cfts_prepare();
2832 list_for_each_entry(set
, &cfts
[0].ss
->cftsets
, node
) {
2833 if (set
->cfts
== cfts
) {
2834 list_del(&set
->node
);
2836 cgroup_cfts_commit(cfts
, false);
2841 cgroup_cfts_commit(NULL
, false);
2846 * cgroup_task_count - count the number of tasks in a cgroup.
2847 * @cgrp: the cgroup in question
2849 * Return the number of tasks in the cgroup.
2851 int cgroup_task_count(const struct cgroup
*cgrp
)
2854 struct cgrp_cset_link
*link
;
2856 read_lock(&css_set_lock
);
2857 list_for_each_entry(link
, &cgrp
->cset_links
, cset_link
)
2858 count
+= atomic_read(&link
->cset
->refcount
);
2859 read_unlock(&css_set_lock
);
2864 * To reduce the fork() overhead for systems that are not actually using
2865 * their cgroups capability, we don't maintain the lists running through
2866 * each css_set to its tasks until we see the list actually used - in other
2867 * words after the first call to css_task_iter_start().
2869 static void cgroup_enable_task_cg_lists(void)
2871 struct task_struct
*p
, *g
;
2872 write_lock(&css_set_lock
);
2873 use_task_css_set_links
= 1;
2875 * We need tasklist_lock because RCU is not safe against
2876 * while_each_thread(). Besides, a forking task that has passed
2877 * cgroup_post_fork() without seeing use_task_css_set_links = 1
2878 * is not guaranteed to have its child immediately visible in the
2879 * tasklist if we walk through it with RCU.
2881 read_lock(&tasklist_lock
);
2882 do_each_thread(g
, p
) {
2885 * We should check if the process is exiting, otherwise
2886 * it will race with cgroup_exit() in that the list
2887 * entry won't be deleted though the process has exited.
2889 if (!(p
->flags
& PF_EXITING
) && list_empty(&p
->cg_list
))
2890 list_add(&p
->cg_list
, &task_css_set(p
)->tasks
);
2892 } while_each_thread(g
, p
);
2893 read_unlock(&tasklist_lock
);
2894 write_unlock(&css_set_lock
);
2898 * css_next_child - find the next child of a given css
2899 * @pos_css: the current position (%NULL to initiate traversal)
2900 * @parent_css: css whose children to walk
2902 * This function returns the next child of @parent_css and should be called
2903 * under either cgroup_mutex or RCU read lock. The only requirement is
2904 * that @parent_css and @pos_css are accessible. The next sibling is
2905 * guaranteed to be returned regardless of their states.
2907 struct cgroup_subsys_state
*
2908 css_next_child(struct cgroup_subsys_state
*pos_css
,
2909 struct cgroup_subsys_state
*parent_css
)
2911 struct cgroup
*pos
= pos_css
? pos_css
->cgroup
: NULL
;
2912 struct cgroup
*cgrp
= parent_css
->cgroup
;
2913 struct cgroup
*next
;
2915 cgroup_assert_mutexes_or_rcu_locked();
2918 * @pos could already have been removed. Once a cgroup is removed,
2919 * its ->sibling.next is no longer updated when its next sibling
2920 * changes. As CGRP_DEAD assertion is serialized and happens
2921 * before the cgroup is taken off the ->sibling list, if we see it
2922 * unasserted, it's guaranteed that the next sibling hasn't
2923 * finished its grace period even if it's already removed, and thus
2924 * safe to dereference from this RCU critical section. If
2925 * ->sibling.next is inaccessible, cgroup_is_dead() is guaranteed
2926 * to be visible as %true here.
2928 * If @pos is dead, its next pointer can't be dereferenced;
2929 * however, as each cgroup is given a monotonically increasing
2930 * unique serial number and always appended to the sibling list,
2931 * the next one can be found by walking the parent's children until
2932 * we see a cgroup with higher serial number than @pos's. While
2933 * this path can be slower, it's taken only when either the current
2934 * cgroup is removed or iteration and removal race.
2937 next
= list_entry_rcu(cgrp
->children
.next
, struct cgroup
, sibling
);
2938 } else if (likely(!cgroup_is_dead(pos
))) {
2939 next
= list_entry_rcu(pos
->sibling
.next
, struct cgroup
, sibling
);
2941 list_for_each_entry_rcu(next
, &cgrp
->children
, sibling
)
2942 if (next
->serial_nr
> pos
->serial_nr
)
2946 if (&next
->sibling
== &cgrp
->children
)
2949 return cgroup_css(next
, parent_css
->ss
);
2951 EXPORT_SYMBOL_GPL(css_next_child
);
2954 * css_next_descendant_pre - find the next descendant for pre-order walk
2955 * @pos: the current position (%NULL to initiate traversal)
2956 * @root: css whose descendants to walk
2958 * To be used by css_for_each_descendant_pre(). Find the next descendant
2959 * to visit for pre-order traversal of @root's descendants. @root is
2960 * included in the iteration and the first node to be visited.
2962 * While this function requires cgroup_mutex or RCU read locking, it
2963 * doesn't require the whole traversal to be contained in a single critical
2964 * section. This function will return the correct next descendant as long
2965 * as both @pos and @root are accessible and @pos is a descendant of @root.
2967 struct cgroup_subsys_state
*
2968 css_next_descendant_pre(struct cgroup_subsys_state
*pos
,
2969 struct cgroup_subsys_state
*root
)
2971 struct cgroup_subsys_state
*next
;
2973 cgroup_assert_mutexes_or_rcu_locked();
2975 /* if first iteration, visit @root */
2979 /* visit the first child if exists */
2980 next
= css_next_child(NULL
, pos
);
2984 /* no child, visit my or the closest ancestor's next sibling */
2985 while (pos
!= root
) {
2986 next
= css_next_child(pos
, css_parent(pos
));
2989 pos
= css_parent(pos
);
2994 EXPORT_SYMBOL_GPL(css_next_descendant_pre
);
2997 * css_rightmost_descendant - return the rightmost descendant of a css
2998 * @pos: css of interest
3000 * Return the rightmost descendant of @pos. If there's no descendant, @pos
3001 * is returned. This can be used during pre-order traversal to skip
3004 * While this function requires cgroup_mutex or RCU read locking, it
3005 * doesn't require the whole traversal to be contained in a single critical
3006 * section. This function will return the correct rightmost descendant as
3007 * long as @pos is accessible.
3009 struct cgroup_subsys_state
*
3010 css_rightmost_descendant(struct cgroup_subsys_state
*pos
)
3012 struct cgroup_subsys_state
*last
, *tmp
;
3014 cgroup_assert_mutexes_or_rcu_locked();
3018 /* ->prev isn't RCU safe, walk ->next till the end */
3020 css_for_each_child(tmp
, last
)
3026 EXPORT_SYMBOL_GPL(css_rightmost_descendant
);
3028 static struct cgroup_subsys_state
*
3029 css_leftmost_descendant(struct cgroup_subsys_state
*pos
)
3031 struct cgroup_subsys_state
*last
;
3035 pos
= css_next_child(NULL
, pos
);
3042 * css_next_descendant_post - find the next descendant for post-order walk
3043 * @pos: the current position (%NULL to initiate traversal)
3044 * @root: css whose descendants to walk
3046 * To be used by css_for_each_descendant_post(). Find the next descendant
3047 * to visit for post-order traversal of @root's descendants. @root is
3048 * included in the iteration and the last node to be visited.
3050 * While this function requires cgroup_mutex or RCU read locking, it
3051 * doesn't require the whole traversal to be contained in a single critical
3052 * section. This function will return the correct next descendant as long
3053 * as both @pos and @cgroup are accessible and @pos is a descendant of
3056 struct cgroup_subsys_state
*
3057 css_next_descendant_post(struct cgroup_subsys_state
*pos
,
3058 struct cgroup_subsys_state
*root
)
3060 struct cgroup_subsys_state
*next
;
3062 cgroup_assert_mutexes_or_rcu_locked();
3064 /* if first iteration, visit leftmost descendant which may be @root */
3066 return css_leftmost_descendant(root
);
3068 /* if we visited @root, we're done */
3072 /* if there's an unvisited sibling, visit its leftmost descendant */
3073 next
= css_next_child(pos
, css_parent(pos
));
3075 return css_leftmost_descendant(next
);
3077 /* no sibling left, visit parent */
3078 return css_parent(pos
);
3080 EXPORT_SYMBOL_GPL(css_next_descendant_post
);
3083 * css_advance_task_iter - advance a task itererator to the next css_set
3084 * @it: the iterator to advance
3086 * Advance @it to the next css_set to walk.
3088 static void css_advance_task_iter(struct css_task_iter
*it
)
3090 struct list_head
*l
= it
->cset_link
;
3091 struct cgrp_cset_link
*link
;
3092 struct css_set
*cset
;
3094 /* Advance to the next non-empty css_set */
3097 if (l
== &it
->origin_css
->cgroup
->cset_links
) {
3098 it
->cset_link
= NULL
;
3101 link
= list_entry(l
, struct cgrp_cset_link
, cset_link
);
3103 } while (list_empty(&cset
->tasks
));
3105 it
->task
= cset
->tasks
.next
;
3109 * css_task_iter_start - initiate task iteration
3110 * @css: the css to walk tasks of
3111 * @it: the task iterator to use
3113 * Initiate iteration through the tasks of @css. The caller can call
3114 * css_task_iter_next() to walk through the tasks until the function
3115 * returns NULL. On completion of iteration, css_task_iter_end() must be
3118 * Note that this function acquires a lock which is released when the
3119 * iteration finishes. The caller can't sleep while iteration is in
3122 void css_task_iter_start(struct cgroup_subsys_state
*css
,
3123 struct css_task_iter
*it
)
3124 __acquires(css_set_lock
)
3127 * The first time anyone tries to iterate across a css, we need to
3128 * enable the list linking each css_set to its tasks, and fix up
3129 * all existing tasks.
3131 if (!use_task_css_set_links
)
3132 cgroup_enable_task_cg_lists();
3134 read_lock(&css_set_lock
);
3136 it
->origin_css
= css
;
3137 it
->cset_link
= &css
->cgroup
->cset_links
;
3139 css_advance_task_iter(it
);
3143 * css_task_iter_next - return the next task for the iterator
3144 * @it: the task iterator being iterated
3146 * The "next" function for task iteration. @it should have been
3147 * initialized via css_task_iter_start(). Returns NULL when the iteration
3150 struct task_struct
*css_task_iter_next(struct css_task_iter
*it
)
3152 struct task_struct
*res
;
3153 struct list_head
*l
= it
->task
;
3154 struct cgrp_cset_link
*link
;
3156 /* If the iterator cg is NULL, we have no tasks */
3159 res
= list_entry(l
, struct task_struct
, cg_list
);
3160 /* Advance iterator to find next entry */
3162 link
= list_entry(it
->cset_link
, struct cgrp_cset_link
, cset_link
);
3163 if (l
== &link
->cset
->tasks
) {
3165 * We reached the end of this task list - move on to the
3166 * next cgrp_cset_link.
3168 css_advance_task_iter(it
);
3176 * css_task_iter_end - finish task iteration
3177 * @it: the task iterator to finish
3179 * Finish task iteration started by css_task_iter_start().
3181 void css_task_iter_end(struct css_task_iter
*it
)
3182 __releases(css_set_lock
)
3184 read_unlock(&css_set_lock
);
3187 static inline int started_after_time(struct task_struct
*t1
,
3188 struct timespec
*time
,
3189 struct task_struct
*t2
)
3191 int start_diff
= timespec_compare(&t1
->start_time
, time
);
3192 if (start_diff
> 0) {
3194 } else if (start_diff
< 0) {
3198 * Arbitrarily, if two processes started at the same
3199 * time, we'll say that the lower pointer value
3200 * started first. Note that t2 may have exited by now
3201 * so this may not be a valid pointer any longer, but
3202 * that's fine - it still serves to distinguish
3203 * between two tasks started (effectively) simultaneously.
3210 * This function is a callback from heap_insert() and is used to order
3212 * In this case we order the heap in descending task start time.
3214 static inline int started_after(void *p1
, void *p2
)
3216 struct task_struct
*t1
= p1
;
3217 struct task_struct
*t2
= p2
;
3218 return started_after_time(t1
, &t2
->start_time
, t2
);
3222 * css_scan_tasks - iterate though all the tasks in a css
3223 * @css: the css to iterate tasks of
3224 * @test: optional test callback
3225 * @process: process callback
3226 * @data: data passed to @test and @process
3227 * @heap: optional pre-allocated heap used for task iteration
3229 * Iterate through all the tasks in @css, calling @test for each, and if it
3230 * returns %true, call @process for it also.
3232 * @test may be NULL, meaning always true (select all tasks), which
3233 * effectively duplicates css_task_iter_{start,next,end}() but does not
3234 * lock css_set_lock for the call to @process.
3236 * It is guaranteed that @process will act on every task that is a member
3237 * of @css for the duration of this call. This function may or may not
3238 * call @process for tasks that exit or move to a different css during the
3239 * call, or are forked or move into the css during the call.
3241 * Note that @test may be called with locks held, and may in some
3242 * situations be called multiple times for the same task, so it should be
3245 * If @heap is non-NULL, a heap has been pre-allocated and will be used for
3246 * heap operations (and its "gt" member will be overwritten), else a
3247 * temporary heap will be used (allocation of which may cause this function
3250 int css_scan_tasks(struct cgroup_subsys_state
*css
,
3251 bool (*test
)(struct task_struct
*, void *),
3252 void (*process
)(struct task_struct
*, void *),
3253 void *data
, struct ptr_heap
*heap
)
3256 struct css_task_iter it
;
3257 struct task_struct
*p
, *dropped
;
3258 /* Never dereference latest_task, since it's not refcounted */
3259 struct task_struct
*latest_task
= NULL
;
3260 struct ptr_heap tmp_heap
;
3261 struct timespec latest_time
= { 0, 0 };
3264 /* The caller supplied our heap and pre-allocated its memory */
3265 heap
->gt
= &started_after
;
3267 /* We need to allocate our own heap memory */
3269 retval
= heap_init(heap
, PAGE_SIZE
, GFP_KERNEL
, &started_after
);
3271 /* cannot allocate the heap */
3277 * Scan tasks in the css, using the @test callback to determine
3278 * which are of interest, and invoking @process callback on the
3279 * ones which need an update. Since we don't want to hold any
3280 * locks during the task updates, gather tasks to be processed in a
3281 * heap structure. The heap is sorted by descending task start
3282 * time. If the statically-sized heap fills up, we overflow tasks
3283 * that started later, and in future iterations only consider tasks
3284 * that started after the latest task in the previous pass. This
3285 * guarantees forward progress and that we don't miss any tasks.
3288 css_task_iter_start(css
, &it
);
3289 while ((p
= css_task_iter_next(&it
))) {
3291 * Only affect tasks that qualify per the caller's callback,
3292 * if he provided one
3294 if (test
&& !test(p
, data
))
3297 * Only process tasks that started after the last task
3300 if (!started_after_time(p
, &latest_time
, latest_task
))
3302 dropped
= heap_insert(heap
, p
);
3303 if (dropped
== NULL
) {
3305 * The new task was inserted; the heap wasn't
3309 } else if (dropped
!= p
) {
3311 * The new task was inserted, and pushed out a
3315 put_task_struct(dropped
);
3318 * Else the new task was newer than anything already in
3319 * the heap and wasn't inserted
3322 css_task_iter_end(&it
);
3325 for (i
= 0; i
< heap
->size
; i
++) {
3326 struct task_struct
*q
= heap
->ptrs
[i
];
3328 latest_time
= q
->start_time
;
3331 /* Process the task per the caller's callback */
3336 * If we had to process any tasks at all, scan again
3337 * in case some of them were in the middle of forking
3338 * children that didn't get processed.
3339 * Not the most efficient way to do it, but it avoids
3340 * having to take callback_mutex in the fork path
3344 if (heap
== &tmp_heap
)
3345 heap_free(&tmp_heap
);
3349 static void cgroup_transfer_one_task(struct task_struct
*task
, void *data
)
3351 struct cgroup
*new_cgroup
= data
;
3353 mutex_lock(&cgroup_mutex
);
3354 cgroup_attach_task(new_cgroup
, task
, false);
3355 mutex_unlock(&cgroup_mutex
);
3359 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
3360 * @to: cgroup to which the tasks will be moved
3361 * @from: cgroup in which the tasks currently reside
3363 int cgroup_transfer_tasks(struct cgroup
*to
, struct cgroup
*from
)
3365 return css_scan_tasks(&from
->dummy_css
, NULL
, cgroup_transfer_one_task
,
3370 * Stuff for reading the 'tasks'/'procs' files.
3372 * Reading this file can return large amounts of data if a cgroup has
3373 * *lots* of attached tasks. So it may need several calls to read(),
3374 * but we cannot guarantee that the information we produce is correct
3375 * unless we produce it entirely atomically.
3379 /* which pidlist file are we talking about? */
3380 enum cgroup_filetype
{
3386 * A pidlist is a list of pids that virtually represents the contents of one
3387 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
3388 * a pair (one each for procs, tasks) for each pid namespace that's relevant
3391 struct cgroup_pidlist
{
3393 * used to find which pidlist is wanted. doesn't change as long as
3394 * this particular list stays in the list.
3396 struct { enum cgroup_filetype type
; struct pid_namespace
*ns
; } key
;
3399 /* how many elements the above list has */
3401 /* each of these stored in a list by its cgroup */
3402 struct list_head links
;
3403 /* pointer to the cgroup we belong to, for list removal purposes */
3404 struct cgroup
*owner
;
3405 /* for delayed destruction */
3406 struct delayed_work destroy_dwork
;
3410 * The following two functions "fix" the issue where there are more pids
3411 * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
3412 * TODO: replace with a kernel-wide solution to this problem
3414 #define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
3415 static void *pidlist_allocate(int count
)
3417 if (PIDLIST_TOO_LARGE(count
))
3418 return vmalloc(count
* sizeof(pid_t
));
3420 return kmalloc(count
* sizeof(pid_t
), GFP_KERNEL
);
3423 static void pidlist_free(void *p
)
3425 if (is_vmalloc_addr(p
))
3432 * Used to destroy all pidlists lingering waiting for destroy timer. None
3433 * should be left afterwards.
3435 static void cgroup_pidlist_destroy_all(struct cgroup
*cgrp
)
3437 struct cgroup_pidlist
*l
, *tmp_l
;
3439 mutex_lock(&cgrp
->pidlist_mutex
);
3440 list_for_each_entry_safe(l
, tmp_l
, &cgrp
->pidlists
, links
)
3441 mod_delayed_work(cgroup_pidlist_destroy_wq
, &l
->destroy_dwork
, 0);
3442 mutex_unlock(&cgrp
->pidlist_mutex
);
3444 flush_workqueue(cgroup_pidlist_destroy_wq
);
3445 BUG_ON(!list_empty(&cgrp
->pidlists
));
3448 static void cgroup_pidlist_destroy_work_fn(struct work_struct
*work
)
3450 struct delayed_work
*dwork
= to_delayed_work(work
);
3451 struct cgroup_pidlist
*l
= container_of(dwork
, struct cgroup_pidlist
,
3453 struct cgroup_pidlist
*tofree
= NULL
;
3455 mutex_lock(&l
->owner
->pidlist_mutex
);
3458 * Destroy iff we didn't get queued again. The state won't change
3459 * as destroy_dwork can only be queued while locked.
3461 if (!delayed_work_pending(dwork
)) {
3462 list_del(&l
->links
);
3463 pidlist_free(l
->list
);
3464 put_pid_ns(l
->key
.ns
);
3468 mutex_unlock(&l
->owner
->pidlist_mutex
);
3473 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
3474 * Returns the number of unique elements.
3476 static int pidlist_uniq(pid_t
*list
, int length
)
3481 * we presume the 0th element is unique, so i starts at 1. trivial
3482 * edge cases first; no work needs to be done for either
3484 if (length
== 0 || length
== 1)
3486 /* src and dest walk down the list; dest counts unique elements */
3487 for (src
= 1; src
< length
; src
++) {
3488 /* find next unique element */
3489 while (list
[src
] == list
[src
-1]) {
3494 /* dest always points to where the next unique element goes */
3495 list
[dest
] = list
[src
];
3503 * The two pid files - task and cgroup.procs - guaranteed that the result
3504 * is sorted, which forced this whole pidlist fiasco. As pid order is
3505 * different per namespace, each namespace needs differently sorted list,
3506 * making it impossible to use, for example, single rbtree of member tasks
3507 * sorted by task pointer. As pidlists can be fairly large, allocating one
3508 * per open file is dangerous, so cgroup had to implement shared pool of
3509 * pidlists keyed by cgroup and namespace.
3511 * All this extra complexity was caused by the original implementation
3512 * committing to an entirely unnecessary property. In the long term, we
3513 * want to do away with it. Explicitly scramble sort order if
3514 * sane_behavior so that no such expectation exists in the new interface.
3516 * Scrambling is done by swapping every two consecutive bits, which is
3517 * non-identity one-to-one mapping which disturbs sort order sufficiently.
3519 static pid_t
pid_fry(pid_t pid
)
3521 unsigned a
= pid
& 0x55555555;
3522 unsigned b
= pid
& 0xAAAAAAAA;
3524 return (a
<< 1) | (b
>> 1);
3527 static pid_t
cgroup_pid_fry(struct cgroup
*cgrp
, pid_t pid
)
3529 if (cgroup_sane_behavior(cgrp
))
3530 return pid_fry(pid
);
3535 static int cmppid(const void *a
, const void *b
)
3537 return *(pid_t
*)a
- *(pid_t
*)b
;
3540 static int fried_cmppid(const void *a
, const void *b
)
3542 return pid_fry(*(pid_t
*)a
) - pid_fry(*(pid_t
*)b
);
3545 static struct cgroup_pidlist
*cgroup_pidlist_find(struct cgroup
*cgrp
,
3546 enum cgroup_filetype type
)
3548 struct cgroup_pidlist
*l
;
3549 /* don't need task_nsproxy() if we're looking at ourself */
3550 struct pid_namespace
*ns
= task_active_pid_ns(current
);
3552 lockdep_assert_held(&cgrp
->pidlist_mutex
);
3554 list_for_each_entry(l
, &cgrp
->pidlists
, links
)
3555 if (l
->key
.type
== type
&& l
->key
.ns
== ns
)
3561 * find the appropriate pidlist for our purpose (given procs vs tasks)
3562 * returns with the lock on that pidlist already held, and takes care
3563 * of the use count, or returns NULL with no locks held if we're out of
3566 static struct cgroup_pidlist
*cgroup_pidlist_find_create(struct cgroup
*cgrp
,
3567 enum cgroup_filetype type
)
3569 struct cgroup_pidlist
*l
;
3571 lockdep_assert_held(&cgrp
->pidlist_mutex
);
3573 l
= cgroup_pidlist_find(cgrp
, type
);
3577 /* entry not found; create a new one */
3578 l
= kzalloc(sizeof(struct cgroup_pidlist
), GFP_KERNEL
);
3582 INIT_DELAYED_WORK(&l
->destroy_dwork
, cgroup_pidlist_destroy_work_fn
);
3584 /* don't need task_nsproxy() if we're looking at ourself */
3585 l
->key
.ns
= get_pid_ns(task_active_pid_ns(current
));
3587 list_add(&l
->links
, &cgrp
->pidlists
);
3592 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
3594 static int pidlist_array_load(struct cgroup
*cgrp
, enum cgroup_filetype type
,
3595 struct cgroup_pidlist
**lp
)
3599 int pid
, n
= 0; /* used for populating the array */
3600 struct css_task_iter it
;
3601 struct task_struct
*tsk
;
3602 struct cgroup_pidlist
*l
;
3604 lockdep_assert_held(&cgrp
->pidlist_mutex
);
3607 * If cgroup gets more users after we read count, we won't have
3608 * enough space - tough. This race is indistinguishable to the
3609 * caller from the case that the additional cgroup users didn't
3610 * show up until sometime later on.
3612 length
= cgroup_task_count(cgrp
);
3613 array
= pidlist_allocate(length
);
3616 /* now, populate the array */
3617 css_task_iter_start(&cgrp
->dummy_css
, &it
);
3618 while ((tsk
= css_task_iter_next(&it
))) {
3619 if (unlikely(n
== length
))
3621 /* get tgid or pid for procs or tasks file respectively */
3622 if (type
== CGROUP_FILE_PROCS
)
3623 pid
= task_tgid_vnr(tsk
);
3625 pid
= task_pid_vnr(tsk
);
3626 if (pid
> 0) /* make sure to only use valid results */
3629 css_task_iter_end(&it
);
3631 /* now sort & (if procs) strip out duplicates */
3632 if (cgroup_sane_behavior(cgrp
))
3633 sort(array
, length
, sizeof(pid_t
), fried_cmppid
, NULL
);
3635 sort(array
, length
, sizeof(pid_t
), cmppid
, NULL
);
3636 if (type
== CGROUP_FILE_PROCS
)
3637 length
= pidlist_uniq(array
, length
);
3639 l
= cgroup_pidlist_find_create(cgrp
, type
);
3641 mutex_unlock(&cgrp
->pidlist_mutex
);
3642 pidlist_free(array
);
3646 /* store array, freeing old if necessary */
3647 pidlist_free(l
->list
);
3655 * cgroupstats_build - build and fill cgroupstats
3656 * @stats: cgroupstats to fill information into
3657 * @dentry: A dentry entry belonging to the cgroup for which stats have
3660 * Build and fill cgroupstats so that taskstats can export it to user
3663 int cgroupstats_build(struct cgroupstats
*stats
, struct dentry
*dentry
)
3666 struct cgroup
*cgrp
;
3667 struct css_task_iter it
;
3668 struct task_struct
*tsk
;
3671 * Validate dentry by checking the superblock operations,
3672 * and make sure it's a directory.
3674 if (dentry
->d_sb
->s_op
!= &cgroup_ops
||
3675 !S_ISDIR(dentry
->d_inode
->i_mode
))
3679 cgrp
= dentry
->d_fsdata
;
3681 css_task_iter_start(&cgrp
->dummy_css
, &it
);
3682 while ((tsk
= css_task_iter_next(&it
))) {
3683 switch (tsk
->state
) {
3685 stats
->nr_running
++;
3687 case TASK_INTERRUPTIBLE
:
3688 stats
->nr_sleeping
++;
3690 case TASK_UNINTERRUPTIBLE
:
3691 stats
->nr_uninterruptible
++;
3694 stats
->nr_stopped
++;
3697 if (delayacct_is_task_waiting_on_io(tsk
))
3698 stats
->nr_io_wait
++;
3702 css_task_iter_end(&it
);
3710 * seq_file methods for the tasks/procs files. The seq_file position is the
3711 * next pid to display; the seq_file iterator is a pointer to the pid
3712 * in the cgroup->l->list array.
3715 static void *cgroup_pidlist_start(struct seq_file
*s
, loff_t
*pos
)
3718 * Initially we receive a position value that corresponds to
3719 * one more than the last pid shown (or 0 on the first call or
3720 * after a seek to the start). Use a binary-search to find the
3721 * next pid to display, if any
3723 struct cgroup_open_file
*of
= s
->private;
3724 struct cgroup
*cgrp
= seq_css(s
)->cgroup
;
3725 struct cgroup_pidlist
*l
;
3726 enum cgroup_filetype type
= seq_cft(s
)->private;
3727 int index
= 0, pid
= *pos
;
3730 mutex_lock(&cgrp
->pidlist_mutex
);
3733 * !NULL @of->priv indicates that this isn't the first start()
3734 * after open. If the matching pidlist is around, we can use that.
3735 * Look for it. Note that @of->priv can't be used directly. It
3736 * could already have been destroyed.
3739 of
->priv
= cgroup_pidlist_find(cgrp
, type
);
3742 * Either this is the first start() after open or the matching
3743 * pidlist has been destroyed inbetween. Create a new one.
3746 ret
= pidlist_array_load(cgrp
, type
,
3747 (struct cgroup_pidlist
**)&of
->priv
);
3749 return ERR_PTR(ret
);
3754 int end
= l
->length
;
3756 while (index
< end
) {
3757 int mid
= (index
+ end
) / 2;
3758 if (cgroup_pid_fry(cgrp
, l
->list
[mid
]) == pid
) {
3761 } else if (cgroup_pid_fry(cgrp
, l
->list
[mid
]) <= pid
)
3767 /* If we're off the end of the array, we're done */
3768 if (index
>= l
->length
)
3770 /* Update the abstract position to be the actual pid that we found */
3771 iter
= l
->list
+ index
;
3772 *pos
= cgroup_pid_fry(cgrp
, *iter
);
3776 static void cgroup_pidlist_stop(struct seq_file
*s
, void *v
)
3778 struct cgroup_open_file
*of
= s
->private;
3779 struct cgroup_pidlist
*l
= of
->priv
;
3782 mod_delayed_work(cgroup_pidlist_destroy_wq
, &l
->destroy_dwork
,
3783 CGROUP_PIDLIST_DESTROY_DELAY
);
3784 mutex_unlock(&seq_css(s
)->cgroup
->pidlist_mutex
);
3787 static void *cgroup_pidlist_next(struct seq_file
*s
, void *v
, loff_t
*pos
)
3789 struct cgroup_open_file
*of
= s
->private;
3790 struct cgroup_pidlist
*l
= of
->priv
;
3792 pid_t
*end
= l
->list
+ l
->length
;
3794 * Advance to the next pid in the array. If this goes off the
3801 *pos
= cgroup_pid_fry(seq_css(s
)->cgroup
, *p
);
3806 static int cgroup_pidlist_show(struct seq_file
*s
, void *v
)
3808 return seq_printf(s
, "%d\n", *(int *)v
);
3812 * seq_operations functions for iterating on pidlists through seq_file -
3813 * independent of whether it's tasks or procs
3815 static const struct seq_operations cgroup_pidlist_seq_operations
= {
3816 .start
= cgroup_pidlist_start
,
3817 .stop
= cgroup_pidlist_stop
,
3818 .next
= cgroup_pidlist_next
,
3819 .show
= cgroup_pidlist_show
,
3822 static u64
cgroup_read_notify_on_release(struct cgroup_subsys_state
*css
,
3825 return notify_on_release(css
->cgroup
);
3828 static int cgroup_write_notify_on_release(struct cgroup_subsys_state
*css
,
3829 struct cftype
*cft
, u64 val
)
3831 clear_bit(CGRP_RELEASABLE
, &css
->cgroup
->flags
);
3833 set_bit(CGRP_NOTIFY_ON_RELEASE
, &css
->cgroup
->flags
);
3835 clear_bit(CGRP_NOTIFY_ON_RELEASE
, &css
->cgroup
->flags
);
3840 * When dput() is called asynchronously, if umount has been done and
3841 * then deactivate_super() in cgroup_free_fn() kills the superblock,
3842 * there's a small window that vfs will see the root dentry with non-zero
3843 * refcnt and trigger BUG().
3845 * That's why we hold a reference before dput() and drop it right after.
3847 static void cgroup_dput(struct cgroup
*cgrp
)
3849 struct super_block
*sb
= cgrp
->root
->sb
;
3851 atomic_inc(&sb
->s_active
);
3853 deactivate_super(sb
);
3856 static u64
cgroup_clone_children_read(struct cgroup_subsys_state
*css
,
3859 return test_bit(CGRP_CPUSET_CLONE_CHILDREN
, &css
->cgroup
->flags
);
3862 static int cgroup_clone_children_write(struct cgroup_subsys_state
*css
,
3863 struct cftype
*cft
, u64 val
)
3866 set_bit(CGRP_CPUSET_CLONE_CHILDREN
, &css
->cgroup
->flags
);
3868 clear_bit(CGRP_CPUSET_CLONE_CHILDREN
, &css
->cgroup
->flags
);
3872 static struct cftype cgroup_base_files
[] = {
3874 .name
= "cgroup.procs",
3875 .seq_start
= cgroup_pidlist_start
,
3876 .seq_next
= cgroup_pidlist_next
,
3877 .seq_stop
= cgroup_pidlist_stop
,
3878 .seq_show
= cgroup_pidlist_show
,
3879 .private = CGROUP_FILE_PROCS
,
3880 .write_u64
= cgroup_procs_write
,
3881 .mode
= S_IRUGO
| S_IWUSR
,
3884 .name
= "cgroup.clone_children",
3885 .flags
= CFTYPE_INSANE
,
3886 .read_u64
= cgroup_clone_children_read
,
3887 .write_u64
= cgroup_clone_children_write
,
3890 .name
= "cgroup.sane_behavior",
3891 .flags
= CFTYPE_ONLY_ON_ROOT
,
3892 .seq_show
= cgroup_sane_behavior_show
,
3896 * Historical crazy stuff. These don't have "cgroup." prefix and
3897 * don't exist if sane_behavior. If you're depending on these, be
3898 * prepared to be burned.
3902 .flags
= CFTYPE_INSANE
, /* use "procs" instead */
3903 .seq_start
= cgroup_pidlist_start
,
3904 .seq_next
= cgroup_pidlist_next
,
3905 .seq_stop
= cgroup_pidlist_stop
,
3906 .seq_show
= cgroup_pidlist_show
,
3907 .private = CGROUP_FILE_TASKS
,
3908 .write_u64
= cgroup_tasks_write
,
3909 .mode
= S_IRUGO
| S_IWUSR
,
3912 .name
= "notify_on_release",
3913 .flags
= CFTYPE_INSANE
,
3914 .read_u64
= cgroup_read_notify_on_release
,
3915 .write_u64
= cgroup_write_notify_on_release
,
3918 .name
= "release_agent",
3919 .flags
= CFTYPE_INSANE
| CFTYPE_ONLY_ON_ROOT
,
3920 .seq_show
= cgroup_release_agent_show
,
3921 .write_string
= cgroup_release_agent_write
,
3922 .max_write_len
= PATH_MAX
,
3928 * cgroup_populate_dir - create subsys files in a cgroup directory
3929 * @cgrp: target cgroup
3930 * @subsys_mask: mask of the subsystem ids whose files should be added
3932 * On failure, no file is added.
3934 static int cgroup_populate_dir(struct cgroup
*cgrp
, unsigned long subsys_mask
)
3936 struct cgroup_subsys
*ss
;
3939 /* process cftsets of each subsystem */
3940 for_each_subsys(ss
, i
) {
3941 struct cftype_set
*set
;
3943 if (!test_bit(i
, &subsys_mask
))
3946 list_for_each_entry(set
, &ss
->cftsets
, node
) {
3947 ret
= cgroup_addrm_files(cgrp
, set
->cfts
, true);
3954 cgroup_clear_dir(cgrp
, subsys_mask
);
3959 * css destruction is four-stage process.
3961 * 1. Destruction starts. Killing of the percpu_ref is initiated.
3962 * Implemented in kill_css().
3964 * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
3965 * and thus css_tryget() is guaranteed to fail, the css can be offlined
3966 * by invoking offline_css(). After offlining, the base ref is put.
3967 * Implemented in css_killed_work_fn().
3969 * 3. When the percpu_ref reaches zero, the only possible remaining
3970 * accessors are inside RCU read sections. css_release() schedules the
3973 * 4. After the grace period, the css can be freed. Implemented in
3974 * css_free_work_fn().
3976 * It is actually hairier because both step 2 and 4 require process context
3977 * and thus involve punting to css->destroy_work adding two additional
3978 * steps to the already complex sequence.
3980 static void css_free_work_fn(struct work_struct
*work
)
3982 struct cgroup_subsys_state
*css
=
3983 container_of(work
, struct cgroup_subsys_state
, destroy_work
);
3984 struct cgroup
*cgrp
= css
->cgroup
;
3987 css_put(css
->parent
);
3989 css
->ss
->css_free(css
);
3993 static void css_free_rcu_fn(struct rcu_head
*rcu_head
)
3995 struct cgroup_subsys_state
*css
=
3996 container_of(rcu_head
, struct cgroup_subsys_state
, rcu_head
);
3999 * css holds an extra ref to @cgrp->dentry which is put on the last
4000 * css_put(). dput() requires process context which we don't have.
4002 INIT_WORK(&css
->destroy_work
, css_free_work_fn
);
4003 queue_work(cgroup_destroy_wq
, &css
->destroy_work
);
4006 static void css_release(struct percpu_ref
*ref
)
4008 struct cgroup_subsys_state
*css
=
4009 container_of(ref
, struct cgroup_subsys_state
, refcnt
);
4011 rcu_assign_pointer(css
->cgroup
->subsys
[css
->ss
->id
], NULL
);
4012 call_rcu(&css
->rcu_head
, css_free_rcu_fn
);
4015 static void init_css(struct cgroup_subsys_state
*css
, struct cgroup_subsys
*ss
,
4016 struct cgroup
*cgrp
)
4023 css
->parent
= cgroup_css(cgrp
->parent
, ss
);
4025 css
->flags
|= CSS_ROOT
;
4027 BUG_ON(cgroup_css(cgrp
, ss
));
4030 /* invoke ->css_online() on a new CSS and mark it online if successful */
4031 static int online_css(struct cgroup_subsys_state
*css
)
4033 struct cgroup_subsys
*ss
= css
->ss
;
4036 lockdep_assert_held(&cgroup_tree_mutex
);
4037 lockdep_assert_held(&cgroup_mutex
);
4040 ret
= ss
->css_online(css
);
4042 css
->flags
|= CSS_ONLINE
;
4043 css
->cgroup
->nr_css
++;
4044 rcu_assign_pointer(css
->cgroup
->subsys
[ss
->id
], css
);
4049 /* if the CSS is online, invoke ->css_offline() on it and mark it offline */
4050 static void offline_css(struct cgroup_subsys_state
*css
)
4052 struct cgroup_subsys
*ss
= css
->ss
;
4054 lockdep_assert_held(&cgroup_tree_mutex
);
4055 lockdep_assert_held(&cgroup_mutex
);
4057 if (!(css
->flags
& CSS_ONLINE
))
4060 if (ss
->css_offline
)
4061 ss
->css_offline(css
);
4063 css
->flags
&= ~CSS_ONLINE
;
4064 css
->cgroup
->nr_css
--;
4065 RCU_INIT_POINTER(css
->cgroup
->subsys
[ss
->id
], css
);
4069 * create_css - create a cgroup_subsys_state
4070 * @cgrp: the cgroup new css will be associated with
4071 * @ss: the subsys of new css
4073 * Create a new css associated with @cgrp - @ss pair. On success, the new
4074 * css is online and installed in @cgrp with all interface files created.
4075 * Returns 0 on success, -errno on failure.
4077 static int create_css(struct cgroup
*cgrp
, struct cgroup_subsys
*ss
)
4079 struct cgroup
*parent
= cgrp
->parent
;
4080 struct cgroup_subsys_state
*css
;
4083 lockdep_assert_held(&cgrp
->dentry
->d_inode
->i_mutex
);
4084 lockdep_assert_held(&cgroup_mutex
);
4086 css
= ss
->css_alloc(cgroup_css(parent
, ss
));
4088 return PTR_ERR(css
);
4090 err
= percpu_ref_init(&css
->refcnt
, css_release
);
4094 init_css(css
, ss
, cgrp
);
4096 err
= cgroup_populate_dir(cgrp
, 1 << ss
->id
);
4100 err
= online_css(css
);
4105 css_get(css
->parent
);
4107 if (ss
->broken_hierarchy
&& !ss
->warned_broken_hierarchy
&&
4109 pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
4110 current
->comm
, current
->pid
, ss
->name
);
4111 if (!strcmp(ss
->name
, "memory"))
4112 pr_warning("cgroup: \"memory\" requires setting use_hierarchy to 1 on the root.\n");
4113 ss
->warned_broken_hierarchy
= true;
4119 percpu_ref_cancel_init(&css
->refcnt
);
4125 * cgroup_create - create a cgroup
4126 * @parent: cgroup that will be parent of the new cgroup
4127 * @dentry: dentry of the new cgroup
4128 * @mode: mode to set on new inode
4130 * Must be called with the mutex on the parent inode held
4132 static long cgroup_create(struct cgroup
*parent
, struct dentry
*dentry
,
4135 struct cgroup
*cgrp
;
4136 struct cgroup_name
*name
;
4137 struct cgroupfs_root
*root
= parent
->root
;
4139 struct cgroup_subsys
*ss
;
4140 struct super_block
*sb
= root
->sb
;
4142 /* allocate the cgroup and its ID, 0 is reserved for the root */
4143 cgrp
= kzalloc(sizeof(*cgrp
), GFP_KERNEL
);
4147 name
= cgroup_alloc_name(dentry
->d_name
.name
);
4152 rcu_assign_pointer(cgrp
->name
, name
);
4154 mutex_lock(&cgroup_tree_mutex
);
4157 * Only live parents can have children. Note that the liveliness
4158 * check isn't strictly necessary because cgroup_mkdir() and
4159 * cgroup_rmdir() are fully synchronized by i_mutex; however, do it
4160 * anyway so that locking is contained inside cgroup proper and we
4161 * don't get nasty surprises if we ever grow another caller.
4163 if (!cgroup_lock_live_group(parent
)) {
4165 goto err_unlock_tree
;
4169 * Temporarily set the pointer to NULL, so idr_find() won't return
4170 * a half-baked cgroup.
4172 cgrp
->id
= idr_alloc(&root
->cgroup_idr
, NULL
, 1, 0, GFP_KERNEL
);
4178 /* Grab a reference on the superblock so the hierarchy doesn't
4179 * get deleted on unmount if there are child cgroups. This
4180 * can be done outside cgroup_mutex, since the sb can't
4181 * disappear while someone has an open control file on the
4183 atomic_inc(&sb
->s_active
);
4185 init_cgroup_housekeeping(cgrp
);
4187 dentry
->d_fsdata
= cgrp
;
4188 cgrp
->dentry
= dentry
;
4190 cgrp
->parent
= parent
;
4191 cgrp
->dummy_css
.parent
= &parent
->dummy_css
;
4192 cgrp
->root
= parent
->root
;
4194 if (notify_on_release(parent
))
4195 set_bit(CGRP_NOTIFY_ON_RELEASE
, &cgrp
->flags
);
4197 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN
, &parent
->flags
))
4198 set_bit(CGRP_CPUSET_CLONE_CHILDREN
, &cgrp
->flags
);
4201 * Create directory. cgroup_create_file() returns with the new
4202 * directory locked on success so that it can be populated without
4203 * dropping cgroup_mutex.
4205 err
= cgroup_create_file(dentry
, S_IFDIR
| mode
, sb
);
4208 lockdep_assert_held(&dentry
->d_inode
->i_mutex
);
4210 cgrp
->serial_nr
= cgroup_serial_nr_next
++;
4212 /* allocation complete, commit to creation */
4213 list_add_tail_rcu(&cgrp
->sibling
, &cgrp
->parent
->children
);
4214 root
->number_of_cgroups
++;
4216 /* hold a ref to the parent's dentry */
4217 dget(parent
->dentry
);
4220 * @cgrp is now fully operational. If something fails after this
4221 * point, it'll be released via the normal destruction path.
4223 idr_replace(&root
->cgroup_idr
, cgrp
, cgrp
->id
);
4225 err
= cgroup_addrm_files(cgrp
, cgroup_base_files
, true);
4229 /* let's create and online css's */
4230 for_each_subsys(ss
, ssid
) {
4231 if (root
->subsys_mask
& (1 << ssid
)) {
4232 err
= create_css(cgrp
, ss
);
4238 mutex_unlock(&cgroup_mutex
);
4239 mutex_unlock(&cgroup_tree_mutex
);
4240 mutex_unlock(&cgrp
->dentry
->d_inode
->i_mutex
);
4245 idr_remove(&root
->cgroup_idr
, cgrp
->id
);
4246 /* Release the reference count that we took on the superblock */
4247 deactivate_super(sb
);
4249 mutex_unlock(&cgroup_mutex
);
4251 mutex_unlock(&cgroup_tree_mutex
);
4252 kfree(rcu_dereference_raw(cgrp
->name
));
4258 cgroup_destroy_locked(cgrp
);
4259 mutex_unlock(&cgroup_mutex
);
4260 mutex_unlock(&cgroup_tree_mutex
);
4261 mutex_unlock(&dentry
->d_inode
->i_mutex
);
4265 static int cgroup_mkdir(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
)
4267 struct cgroup
*c_parent
= dentry
->d_parent
->d_fsdata
;
4269 /* the vfs holds inode->i_mutex already */
4270 return cgroup_create(c_parent
, dentry
, mode
| S_IFDIR
);
4274 * This is called when the refcnt of a css is confirmed to be killed.
4275 * css_tryget() is now guaranteed to fail.
4277 static void css_killed_work_fn(struct work_struct
*work
)
4279 struct cgroup_subsys_state
*css
=
4280 container_of(work
, struct cgroup_subsys_state
, destroy_work
);
4281 struct cgroup
*cgrp
= css
->cgroup
;
4283 mutex_lock(&cgroup_tree_mutex
);
4284 mutex_lock(&cgroup_mutex
);
4287 * css_tryget() is guaranteed to fail now. Tell subsystems to
4288 * initate destruction.
4293 * If @cgrp is marked dead, it's waiting for refs of all css's to
4294 * be disabled before proceeding to the second phase of cgroup
4295 * destruction. If we are the last one, kick it off.
4297 if (!cgrp
->nr_css
&& cgroup_is_dead(cgrp
))
4298 cgroup_destroy_css_killed(cgrp
);
4300 mutex_unlock(&cgroup_mutex
);
4301 mutex_unlock(&cgroup_tree_mutex
);
4304 * Put the css refs from kill_css(). Each css holds an extra
4305 * reference to the cgroup's dentry and cgroup removal proceeds
4306 * regardless of css refs. On the last put of each css, whenever
4307 * that may be, the extra dentry ref is put so that dentry
4308 * destruction happens only after all css's are released.
4313 /* css kill confirmation processing requires process context, bounce */
4314 static void css_killed_ref_fn(struct percpu_ref
*ref
)
4316 struct cgroup_subsys_state
*css
=
4317 container_of(ref
, struct cgroup_subsys_state
, refcnt
);
4319 INIT_WORK(&css
->destroy_work
, css_killed_work_fn
);
4320 queue_work(cgroup_destroy_wq
, &css
->destroy_work
);
4324 * kill_css - destroy a css
4325 * @css: css to destroy
4327 * This function initiates destruction of @css by removing cgroup interface
4328 * files and putting its base reference. ->css_offline() will be invoked
4329 * asynchronously once css_tryget() is guaranteed to fail and when the
4330 * reference count reaches zero, @css will be released.
4332 static void kill_css(struct cgroup_subsys_state
*css
)
4334 cgroup_clear_dir(css
->cgroup
, 1 << css
->ss
->id
);
4337 * Killing would put the base ref, but we need to keep it alive
4338 * until after ->css_offline().
4343 * cgroup core guarantees that, by the time ->css_offline() is
4344 * invoked, no new css reference will be given out via
4345 * css_tryget(). We can't simply call percpu_ref_kill() and
4346 * proceed to offlining css's because percpu_ref_kill() doesn't
4347 * guarantee that the ref is seen as killed on all CPUs on return.
4349 * Use percpu_ref_kill_and_confirm() to get notifications as each
4350 * css is confirmed to be seen as killed on all CPUs.
4352 percpu_ref_kill_and_confirm(&css
->refcnt
, css_killed_ref_fn
);
4356 * cgroup_destroy_locked - the first stage of cgroup destruction
4357 * @cgrp: cgroup to be destroyed
4359 * css's make use of percpu refcnts whose killing latency shouldn't be
4360 * exposed to userland and are RCU protected. Also, cgroup core needs to
4361 * guarantee that css_tryget() won't succeed by the time ->css_offline() is
4362 * invoked. To satisfy all the requirements, destruction is implemented in
4363 * the following two steps.
4365 * s1. Verify @cgrp can be destroyed and mark it dying. Remove all
4366 * userland visible parts and start killing the percpu refcnts of
4367 * css's. Set up so that the next stage will be kicked off once all
4368 * the percpu refcnts are confirmed to be killed.
4370 * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the
4371 * rest of destruction. Once all cgroup references are gone, the
4372 * cgroup is RCU-freed.
4374 * This function implements s1. After this step, @cgrp is gone as far as
4375 * the userland is concerned and a new cgroup with the same name may be
4376 * created. As cgroup doesn't care about the names internally, this
4377 * doesn't cause any problem.
4379 static int cgroup_destroy_locked(struct cgroup
*cgrp
)
4380 __releases(&cgroup_mutex
) __acquires(&cgroup_mutex
)
4382 struct dentry
*d
= cgrp
->dentry
;
4383 struct cgroup_subsys_state
*css
;
4384 struct cgroup
*child
;
4388 lockdep_assert_held(&d
->d_inode
->i_mutex
);
4389 lockdep_assert_held(&cgroup_tree_mutex
);
4390 lockdep_assert_held(&cgroup_mutex
);
4393 * css_set_lock synchronizes access to ->cset_links and prevents
4394 * @cgrp from being removed while __put_css_set() is in progress.
4396 read_lock(&css_set_lock
);
4397 empty
= list_empty(&cgrp
->cset_links
);
4398 read_unlock(&css_set_lock
);
4403 * Make sure there's no live children. We can't test ->children
4404 * emptiness as dead children linger on it while being destroyed;
4405 * otherwise, "rmdir parent/child parent" may fail with -EBUSY.
4409 list_for_each_entry_rcu(child
, &cgrp
->children
, sibling
) {
4410 empty
= cgroup_is_dead(child
);
4419 * Initiate massacre of all css's. cgroup_destroy_css_killed()
4420 * will be invoked to perform the rest of destruction once the
4421 * percpu refs of all css's are confirmed to be killed. This
4422 * involves removing the subsystem's files, drop cgroup_mutex.
4424 mutex_unlock(&cgroup_mutex
);
4425 for_each_css(css
, ssid
, cgrp
)
4427 mutex_lock(&cgroup_mutex
);
4430 * Mark @cgrp dead. This prevents further task migration and child
4431 * creation by disabling cgroup_lock_live_group(). Note that
4432 * CGRP_DEAD assertion is depended upon by css_next_child() to
4433 * resume iteration after dropping RCU read lock. See
4434 * css_next_child() for details.
4436 set_bit(CGRP_DEAD
, &cgrp
->flags
);
4438 /* CGRP_DEAD is set, remove from ->release_list for the last time */
4439 raw_spin_lock(&release_list_lock
);
4440 if (!list_empty(&cgrp
->release_list
))
4441 list_del_init(&cgrp
->release_list
);
4442 raw_spin_unlock(&release_list_lock
);
4445 * If @cgrp has css's attached, the second stage of cgroup
4446 * destruction is kicked off from css_killed_work_fn() after the
4447 * refs of all attached css's are killed. If @cgrp doesn't have
4448 * any css, we kick it off here.
4451 cgroup_destroy_css_killed(cgrp
);
4454 * Clear the base files and remove @cgrp directory. The removal
4455 * puts the base ref but we aren't quite done with @cgrp yet, so
4458 mutex_unlock(&cgroup_mutex
);
4459 cgroup_addrm_files(cgrp
, cgroup_base_files
, false);
4461 cgroup_d_remove_dir(d
);
4462 mutex_lock(&cgroup_mutex
);
4468 * cgroup_destroy_css_killed - the second step of cgroup destruction
4469 * @work: cgroup->destroy_free_work
4471 * This function is invoked from a work item for a cgroup which is being
4472 * destroyed after all css's are offlined and performs the rest of
4473 * destruction. This is the second step of destruction described in the
4474 * comment above cgroup_destroy_locked().
4476 static void cgroup_destroy_css_killed(struct cgroup
*cgrp
)
4478 struct cgroup
*parent
= cgrp
->parent
;
4479 struct dentry
*d
= cgrp
->dentry
;
4481 lockdep_assert_held(&cgroup_tree_mutex
);
4482 lockdep_assert_held(&cgroup_mutex
);
4484 /* delete this cgroup from parent->children */
4485 list_del_rcu(&cgrp
->sibling
);
4489 set_bit(CGRP_RELEASABLE
, &parent
->flags
);
4490 check_for_release(parent
);
4493 static int cgroup_rmdir(struct inode
*unused_dir
, struct dentry
*dentry
)
4497 mutex_lock(&cgroup_tree_mutex
);
4498 mutex_lock(&cgroup_mutex
);
4499 ret
= cgroup_destroy_locked(dentry
->d_fsdata
);
4500 mutex_unlock(&cgroup_mutex
);
4501 mutex_unlock(&cgroup_tree_mutex
);
4506 static void __init
cgroup_init_cftsets(struct cgroup_subsys
*ss
)
4508 INIT_LIST_HEAD(&ss
->cftsets
);
4511 * base_cftset is embedded in subsys itself, no need to worry about
4514 if (ss
->base_cftypes
) {
4517 for (cft
= ss
->base_cftypes
; cft
->name
[0] != '\0'; cft
++)
4520 ss
->base_cftset
.cfts
= ss
->base_cftypes
;
4521 list_add_tail(&ss
->base_cftset
.node
, &ss
->cftsets
);
4525 static void __init
cgroup_init_subsys(struct cgroup_subsys
*ss
)
4527 struct cgroup_subsys_state
*css
;
4529 printk(KERN_INFO
"Initializing cgroup subsys %s\n", ss
->name
);
4531 mutex_lock(&cgroup_tree_mutex
);
4532 mutex_lock(&cgroup_mutex
);
4534 /* init base cftset */
4535 cgroup_init_cftsets(ss
);
4537 /* Create the top cgroup state for this subsystem */
4538 ss
->root
= &cgroup_dummy_root
;
4539 css
= ss
->css_alloc(cgroup_css(cgroup_dummy_top
, ss
));
4540 /* We don't handle early failures gracefully */
4541 BUG_ON(IS_ERR(css
));
4542 init_css(css
, ss
, cgroup_dummy_top
);
4544 /* Update the init_css_set to contain a subsys
4545 * pointer to this state - since the subsystem is
4546 * newly registered, all tasks and hence the
4547 * init_css_set is in the subsystem's top cgroup. */
4548 init_css_set
.subsys
[ss
->id
] = css
;
4550 need_forkexit_callback
|= ss
->fork
|| ss
->exit
;
4552 /* At system boot, before all subsystems have been
4553 * registered, no tasks have been forked, so we don't
4554 * need to invoke fork callbacks here. */
4555 BUG_ON(!list_empty(&init_task
.tasks
));
4557 BUG_ON(online_css(css
));
4559 mutex_unlock(&cgroup_mutex
);
4560 mutex_unlock(&cgroup_tree_mutex
);
4564 * cgroup_init_early - cgroup initialization at system boot
4566 * Initialize cgroups at system boot, and initialize any
4567 * subsystems that request early init.
4569 int __init
cgroup_init_early(void)
4571 struct cgroup_subsys
*ss
;
4574 atomic_set(&init_css_set
.refcount
, 1);
4575 INIT_LIST_HEAD(&init_css_set
.cgrp_links
);
4576 INIT_LIST_HEAD(&init_css_set
.tasks
);
4577 INIT_HLIST_NODE(&init_css_set
.hlist
);
4579 init_cgroup_root(&cgroup_dummy_root
);
4580 cgroup_root_count
= 1;
4581 RCU_INIT_POINTER(init_task
.cgroups
, &init_css_set
);
4583 init_cgrp_cset_link
.cset
= &init_css_set
;
4584 init_cgrp_cset_link
.cgrp
= cgroup_dummy_top
;
4585 list_add(&init_cgrp_cset_link
.cset_link
, &cgroup_dummy_top
->cset_links
);
4586 list_add(&init_cgrp_cset_link
.cgrp_link
, &init_css_set
.cgrp_links
);
4588 for_each_subsys(ss
, i
) {
4589 WARN(!ss
->css_alloc
|| !ss
->css_free
|| ss
->name
|| ss
->id
,
4590 "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p name:id=%d:%s\n",
4591 i
, cgroup_subsys_name
[i
], ss
->css_alloc
, ss
->css_free
,
4593 WARN(strlen(cgroup_subsys_name
[i
]) > MAX_CGROUP_TYPE_NAMELEN
,
4594 "cgroup_subsys_name %s too long\n", cgroup_subsys_name
[i
]);
4597 ss
->name
= cgroup_subsys_name
[i
];
4600 cgroup_init_subsys(ss
);
4606 * cgroup_init - cgroup initialization
4608 * Register cgroup filesystem and /proc file, and initialize
4609 * any subsystems that didn't request early init.
4611 int __init
cgroup_init(void)
4613 struct cgroup_subsys
*ss
;
4617 err
= bdi_init(&cgroup_backing_dev_info
);
4621 for_each_subsys(ss
, i
) {
4622 if (!ss
->early_init
)
4623 cgroup_init_subsys(ss
);
4626 /* allocate id for the dummy hierarchy */
4627 mutex_lock(&cgroup_mutex
);
4629 /* Add init_css_set to the hash table */
4630 key
= css_set_hash(init_css_set
.subsys
);
4631 hash_add(css_set_table
, &init_css_set
.hlist
, key
);
4633 BUG_ON(cgroup_init_root_id(&cgroup_dummy_root
, 0, 1));
4635 err
= idr_alloc(&cgroup_dummy_root
.cgroup_idr
, cgroup_dummy_top
,
4639 mutex_unlock(&cgroup_mutex
);
4641 cgroup_kobj
= kobject_create_and_add("cgroup", fs_kobj
);
4647 err
= register_filesystem(&cgroup_fs_type
);
4649 kobject_put(cgroup_kobj
);
4653 proc_create("cgroups", 0, NULL
, &proc_cgroupstats_operations
);
4657 bdi_destroy(&cgroup_backing_dev_info
);
4662 static int __init
cgroup_wq_init(void)
4665 * There isn't much point in executing destruction path in
4666 * parallel. Good chunk is serialized with cgroup_mutex anyway.
4668 * XXX: Must be ordered to make sure parent is offlined after
4669 * children. The ordering requirement is for memcg where a
4670 * parent's offline may wait for a child's leading to deadlock. In
4671 * the long term, this should be fixed from memcg side.
4673 * We would prefer to do this in cgroup_init() above, but that
4674 * is called before init_workqueues(): so leave this until after.
4676 cgroup_destroy_wq
= alloc_ordered_workqueue("cgroup_destroy", 0);
4677 BUG_ON(!cgroup_destroy_wq
);
4680 * Used to destroy pidlists and separate to serve as flush domain.
4681 * Cap @max_active to 1 too.
4683 cgroup_pidlist_destroy_wq
= alloc_workqueue("cgroup_pidlist_destroy",
4685 BUG_ON(!cgroup_pidlist_destroy_wq
);
4689 core_initcall(cgroup_wq_init
);
4692 * proc_cgroup_show()
4693 * - Print task's cgroup paths into seq_file, one line for each hierarchy
4694 * - Used for /proc/<pid>/cgroup.
4695 * - No need to task_lock(tsk) on this tsk->cgroup reference, as it
4696 * doesn't really matter if tsk->cgroup changes after we read it,
4697 * and we take cgroup_mutex, keeping cgroup_attach_task() from changing it
4698 * anyway. No need to check that tsk->cgroup != NULL, thanks to
4699 * the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks
4700 * cgroup to top_cgroup.
4703 /* TODO: Use a proper seq_file iterator */
4704 int proc_cgroup_show(struct seq_file
*m
, void *v
)
4707 struct task_struct
*tsk
;
4710 struct cgroupfs_root
*root
;
4713 buf
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
4719 tsk
= get_pid_task(pid
, PIDTYPE_PID
);
4725 mutex_lock(&cgroup_mutex
);
4727 for_each_active_root(root
) {
4728 struct cgroup_subsys
*ss
;
4729 struct cgroup
*cgrp
;
4730 int ssid
, count
= 0;
4732 seq_printf(m
, "%d:", root
->hierarchy_id
);
4733 for_each_subsys(ss
, ssid
)
4734 if (root
->subsys_mask
& (1 << ssid
))
4735 seq_printf(m
, "%s%s", count
++ ? "," : "", ss
->name
);
4736 if (strlen(root
->name
))
4737 seq_printf(m
, "%sname=%s", count
? "," : "",
4740 cgrp
= task_cgroup_from_root(tsk
, root
);
4741 retval
= cgroup_path(cgrp
, buf
, PAGE_SIZE
);
4749 mutex_unlock(&cgroup_mutex
);
4750 put_task_struct(tsk
);
4757 /* Display information about each subsystem and each hierarchy */
4758 static int proc_cgroupstats_show(struct seq_file
*m
, void *v
)
4760 struct cgroup_subsys
*ss
;
4763 seq_puts(m
, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
4765 * ideally we don't want subsystems moving around while we do this.
4766 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
4767 * subsys/hierarchy state.
4769 mutex_lock(&cgroup_mutex
);
4771 for_each_subsys(ss
, i
)
4772 seq_printf(m
, "%s\t%d\t%d\t%d\n",
4773 ss
->name
, ss
->root
->hierarchy_id
,
4774 ss
->root
->number_of_cgroups
, !ss
->disabled
);
4776 mutex_unlock(&cgroup_mutex
);
4780 static int cgroupstats_open(struct inode
*inode
, struct file
*file
)
4782 return single_open(file
, proc_cgroupstats_show
, NULL
);
4785 static const struct file_operations proc_cgroupstats_operations
= {
4786 .open
= cgroupstats_open
,
4788 .llseek
= seq_lseek
,
4789 .release
= single_release
,
4793 * cgroup_fork - attach newly forked task to its parents cgroup.
4794 * @child: pointer to task_struct of forking parent process.
4796 * Description: A task inherits its parent's cgroup at fork().
4798 * A pointer to the shared css_set was automatically copied in
4799 * fork.c by dup_task_struct(). However, we ignore that copy, since
4800 * it was not made under the protection of RCU or cgroup_mutex, so
4801 * might no longer be a valid cgroup pointer. cgroup_attach_task() might
4802 * have already changed current->cgroups, allowing the previously
4803 * referenced cgroup group to be removed and freed.
4805 * At the point that cgroup_fork() is called, 'current' is the parent
4806 * task, and the passed argument 'child' points to the child task.
4808 void cgroup_fork(struct task_struct
*child
)
4811 get_css_set(task_css_set(current
));
4812 child
->cgroups
= current
->cgroups
;
4813 task_unlock(current
);
4814 INIT_LIST_HEAD(&child
->cg_list
);
4818 * cgroup_post_fork - called on a new task after adding it to the task list
4819 * @child: the task in question
4821 * Adds the task to the list running through its css_set if necessary and
4822 * call the subsystem fork() callbacks. Has to be after the task is
4823 * visible on the task list in case we race with the first call to
4824 * cgroup_task_iter_start() - to guarantee that the new task ends up on its
4827 void cgroup_post_fork(struct task_struct
*child
)
4829 struct cgroup_subsys
*ss
;
4833 * use_task_css_set_links is set to 1 before we walk the tasklist
4834 * under the tasklist_lock and we read it here after we added the child
4835 * to the tasklist under the tasklist_lock as well. If the child wasn't
4836 * yet in the tasklist when we walked through it from
4837 * cgroup_enable_task_cg_lists(), then use_task_css_set_links value
4838 * should be visible now due to the paired locking and barriers implied
4839 * by LOCK/UNLOCK: it is written before the tasklist_lock unlock
4840 * in cgroup_enable_task_cg_lists() and read here after the tasklist_lock
4843 if (use_task_css_set_links
) {
4844 write_lock(&css_set_lock
);
4846 if (list_empty(&child
->cg_list
))
4847 list_add(&child
->cg_list
, &task_css_set(child
)->tasks
);
4849 write_unlock(&css_set_lock
);
4853 * Call ss->fork(). This must happen after @child is linked on
4854 * css_set; otherwise, @child might change state between ->fork()
4855 * and addition to css_set.
4857 if (need_forkexit_callback
) {
4858 for_each_subsys(ss
, i
)
4865 * cgroup_exit - detach cgroup from exiting task
4866 * @tsk: pointer to task_struct of exiting process
4867 * @run_callback: run exit callbacks?
4869 * Description: Detach cgroup from @tsk and release it.
4871 * Note that cgroups marked notify_on_release force every task in
4872 * them to take the global cgroup_mutex mutex when exiting.
4873 * This could impact scaling on very large systems. Be reluctant to
4874 * use notify_on_release cgroups where very high task exit scaling
4875 * is required on large systems.
4877 * the_top_cgroup_hack:
4879 * Set the exiting tasks cgroup to the root cgroup (top_cgroup).
4881 * We call cgroup_exit() while the task is still competent to
4882 * handle notify_on_release(), then leave the task attached to the
4883 * root cgroup in each hierarchy for the remainder of its exit.
4885 * To do this properly, we would increment the reference count on
4886 * top_cgroup, and near the very end of the kernel/exit.c do_exit()
4887 * code we would add a second cgroup function call, to drop that
4888 * reference. This would just create an unnecessary hot spot on
4889 * the top_cgroup reference count, to no avail.
4891 * Normally, holding a reference to a cgroup without bumping its
4892 * count is unsafe. The cgroup could go away, or someone could
4893 * attach us to a different cgroup, decrementing the count on
4894 * the first cgroup that we never incremented. But in this case,
4895 * top_cgroup isn't going away, and either task has PF_EXITING set,
4896 * which wards off any cgroup_attach_task() attempts, or task is a failed
4897 * fork, never visible to cgroup_attach_task.
4899 void cgroup_exit(struct task_struct
*tsk
, int run_callbacks
)
4901 struct cgroup_subsys
*ss
;
4902 struct css_set
*cset
;
4906 * Unlink from the css_set task list if necessary.
4907 * Optimistically check cg_list before taking
4910 if (!list_empty(&tsk
->cg_list
)) {
4911 write_lock(&css_set_lock
);
4912 if (!list_empty(&tsk
->cg_list
))
4913 list_del_init(&tsk
->cg_list
);
4914 write_unlock(&css_set_lock
);
4917 /* Reassign the task to the init_css_set. */
4919 cset
= task_css_set(tsk
);
4920 RCU_INIT_POINTER(tsk
->cgroups
, &init_css_set
);
4922 if (run_callbacks
&& need_forkexit_callback
) {
4923 /* see cgroup_post_fork() for details */
4924 for_each_subsys(ss
, i
) {
4926 struct cgroup_subsys_state
*old_css
= cset
->subsys
[i
];
4927 struct cgroup_subsys_state
*css
= task_css(tsk
, i
);
4929 ss
->exit(css
, old_css
, tsk
);
4935 put_css_set_taskexit(cset
);
4938 static void check_for_release(struct cgroup
*cgrp
)
4940 if (cgroup_is_releasable(cgrp
) &&
4941 list_empty(&cgrp
->cset_links
) && list_empty(&cgrp
->children
)) {
4943 * Control Group is currently removeable. If it's not
4944 * already queued for a userspace notification, queue
4947 int need_schedule_work
= 0;
4949 raw_spin_lock(&release_list_lock
);
4950 if (!cgroup_is_dead(cgrp
) &&
4951 list_empty(&cgrp
->release_list
)) {
4952 list_add(&cgrp
->release_list
, &release_list
);
4953 need_schedule_work
= 1;
4955 raw_spin_unlock(&release_list_lock
);
4956 if (need_schedule_work
)
4957 schedule_work(&release_agent_work
);
4962 * Notify userspace when a cgroup is released, by running the
4963 * configured release agent with the name of the cgroup (path
4964 * relative to the root of cgroup file system) as the argument.
4966 * Most likely, this user command will try to rmdir this cgroup.
4968 * This races with the possibility that some other task will be
4969 * attached to this cgroup before it is removed, or that some other
4970 * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
4971 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
4972 * unused, and this cgroup will be reprieved from its death sentence,
4973 * to continue to serve a useful existence. Next time it's released,
4974 * we will get notified again, if it still has 'notify_on_release' set.
4976 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
4977 * means only wait until the task is successfully execve()'d. The
4978 * separate release agent task is forked by call_usermodehelper(),
4979 * then control in this thread returns here, without waiting for the
4980 * release agent task. We don't bother to wait because the caller of
4981 * this routine has no use for the exit status of the release agent
4982 * task, so no sense holding our caller up for that.
4984 static void cgroup_release_agent(struct work_struct
*work
)
4986 BUG_ON(work
!= &release_agent_work
);
4987 mutex_lock(&cgroup_mutex
);
4988 raw_spin_lock(&release_list_lock
);
4989 while (!list_empty(&release_list
)) {
4990 char *argv
[3], *envp
[3];
4992 char *pathbuf
= NULL
, *agentbuf
= NULL
;
4993 struct cgroup
*cgrp
= list_entry(release_list
.next
,
4996 list_del_init(&cgrp
->release_list
);
4997 raw_spin_unlock(&release_list_lock
);
4998 pathbuf
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
5001 if (cgroup_path(cgrp
, pathbuf
, PAGE_SIZE
) < 0)
5003 agentbuf
= kstrdup(cgrp
->root
->release_agent_path
, GFP_KERNEL
);
5008 argv
[i
++] = agentbuf
;
5009 argv
[i
++] = pathbuf
;
5013 /* minimal command environment */
5014 envp
[i
++] = "HOME=/";
5015 envp
[i
++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
5018 /* Drop the lock while we invoke the usermode helper,
5019 * since the exec could involve hitting disk and hence
5020 * be a slow process */
5021 mutex_unlock(&cgroup_mutex
);
5022 call_usermodehelper(argv
[0], argv
, envp
, UMH_WAIT_EXEC
);
5023 mutex_lock(&cgroup_mutex
);
5027 raw_spin_lock(&release_list_lock
);
5029 raw_spin_unlock(&release_list_lock
);
5030 mutex_unlock(&cgroup_mutex
);
5033 static int __init
cgroup_disable(char *str
)
5035 struct cgroup_subsys
*ss
;
5039 while ((token
= strsep(&str
, ",")) != NULL
) {
5043 for_each_subsys(ss
, i
) {
5044 if (!strcmp(token
, ss
->name
)) {
5046 printk(KERN_INFO
"Disabling %s control group"
5047 " subsystem\n", ss
->name
);
5054 __setup("cgroup_disable=", cgroup_disable
);
5057 * css_tryget_from_dir - get corresponding css from the dentry of a cgroup dir
5058 * @dentry: directory dentry of interest
5059 * @ss: subsystem of interest
5061 * If @dentry is a directory for a cgroup which has @ss enabled on it, try
5062 * to get the corresponding css and return it. If such css doesn't exist
5063 * or can't be pinned, an ERR_PTR value is returned.
5065 struct cgroup_subsys_state
*css_tryget_from_dir(struct dentry
*dentry
,
5066 struct cgroup_subsys
*ss
)
5068 struct cgroup
*cgrp
;
5069 struct cgroup_subsys_state
*css
;
5071 /* is @dentry a cgroup dir? */
5072 if (!dentry
->d_inode
||
5073 dentry
->d_inode
->i_op
!= &cgroup_dir_inode_operations
)
5074 return ERR_PTR(-EBADF
);
5078 cgrp
= __d_cgrp(dentry
);
5079 css
= cgroup_css(cgrp
, ss
);
5081 if (!css
|| !css_tryget(css
))
5082 css
= ERR_PTR(-ENOENT
);
5089 * css_from_id - lookup css by id
5090 * @id: the cgroup id
5091 * @ss: cgroup subsys to be looked into
5093 * Returns the css if there's valid one with @id, otherwise returns NULL.
5094 * Should be called under rcu_read_lock().
5096 struct cgroup_subsys_state
*css_from_id(int id
, struct cgroup_subsys
*ss
)
5098 struct cgroup
*cgrp
;
5100 cgroup_assert_mutexes_or_rcu_locked();
5102 cgrp
= idr_find(&ss
->root
->cgroup_idr
, id
);
5104 return cgroup_css(cgrp
, ss
);
5108 #ifdef CONFIG_CGROUP_DEBUG
5109 static struct cgroup_subsys_state
*
5110 debug_css_alloc(struct cgroup_subsys_state
*parent_css
)
5112 struct cgroup_subsys_state
*css
= kzalloc(sizeof(*css
), GFP_KERNEL
);
5115 return ERR_PTR(-ENOMEM
);
5120 static void debug_css_free(struct cgroup_subsys_state
*css
)
5125 static u64
debug_taskcount_read(struct cgroup_subsys_state
*css
,
5128 return cgroup_task_count(css
->cgroup
);
5131 static u64
current_css_set_read(struct cgroup_subsys_state
*css
,
5134 return (u64
)(unsigned long)current
->cgroups
;
5137 static u64
current_css_set_refcount_read(struct cgroup_subsys_state
*css
,
5143 count
= atomic_read(&task_css_set(current
)->refcount
);
5148 static int current_css_set_cg_links_read(struct seq_file
*seq
, void *v
)
5150 struct cgrp_cset_link
*link
;
5151 struct css_set
*cset
;
5153 read_lock(&css_set_lock
);
5155 cset
= rcu_dereference(current
->cgroups
);
5156 list_for_each_entry(link
, &cset
->cgrp_links
, cgrp_link
) {
5157 struct cgroup
*c
= link
->cgrp
;
5161 name
= c
->dentry
->d_name
.name
;
5164 seq_printf(seq
, "Root %d group %s\n",
5165 c
->root
->hierarchy_id
, name
);
5168 read_unlock(&css_set_lock
);
5172 #define MAX_TASKS_SHOWN_PER_CSS 25
5173 static int cgroup_css_links_read(struct seq_file
*seq
, void *v
)
5175 struct cgroup_subsys_state
*css
= seq_css(seq
);
5176 struct cgrp_cset_link
*link
;
5178 read_lock(&css_set_lock
);
5179 list_for_each_entry(link
, &css
->cgroup
->cset_links
, cset_link
) {
5180 struct css_set
*cset
= link
->cset
;
5181 struct task_struct
*task
;
5183 seq_printf(seq
, "css_set %p\n", cset
);
5184 list_for_each_entry(task
, &cset
->tasks
, cg_list
) {
5185 if (count
++ > MAX_TASKS_SHOWN_PER_CSS
) {
5186 seq_puts(seq
, " ...\n");
5189 seq_printf(seq
, " task %d\n",
5190 task_pid_vnr(task
));
5194 read_unlock(&css_set_lock
);
5198 static u64
releasable_read(struct cgroup_subsys_state
*css
, struct cftype
*cft
)
5200 return test_bit(CGRP_RELEASABLE
, &css
->cgroup
->flags
);
5203 static struct cftype debug_files
[] = {
5205 .name
= "taskcount",
5206 .read_u64
= debug_taskcount_read
,
5210 .name
= "current_css_set",
5211 .read_u64
= current_css_set_read
,
5215 .name
= "current_css_set_refcount",
5216 .read_u64
= current_css_set_refcount_read
,
5220 .name
= "current_css_set_cg_links",
5221 .seq_show
= current_css_set_cg_links_read
,
5225 .name
= "cgroup_css_links",
5226 .seq_show
= cgroup_css_links_read
,
5230 .name
= "releasable",
5231 .read_u64
= releasable_read
,
5237 struct cgroup_subsys debug_cgrp_subsys
= {
5238 .css_alloc
= debug_css_alloc
,
5239 .css_free
= debug_css_free
,
5240 .base_cftypes
= debug_files
,
5242 #endif /* CONFIG_CGROUP_DEBUG */