]>
Commit | Line | Data |
---|---|---|
ddbcc7e8 PM |
1 | #ifndef _LINUX_CGROUP_H |
2 | #define _LINUX_CGROUP_H | |
3 | /* | |
4 | * cgroup interface | |
5 | * | |
6 | * Copyright (C) 2003 BULL SA | |
7 | * Copyright (C) 2004-2006 Silicon Graphics, Inc. | |
8 | * | |
9 | */ | |
10 | ||
11 | #include <linux/sched.h> | |
ddbcc7e8 PM |
12 | #include <linux/cpumask.h> |
13 | #include <linux/nodemask.h> | |
14 | #include <linux/rcupdate.h> | |
eb6fd504 | 15 | #include <linux/rculist.h> |
846c7bb0 | 16 | #include <linux/cgroupstats.h> |
31a7df01 | 17 | #include <linux/prio_heap.h> |
cc31edce | 18 | #include <linux/rwsem.h> |
38460b48 | 19 | #include <linux/idr.h> |
48ddbe19 | 20 | #include <linux/workqueue.h> |
03b1cde6 | 21 | #include <linux/xattr.h> |
25a7e684 | 22 | #include <linux/fs.h> |
d3daf28d | 23 | #include <linux/percpu-refcount.h> |
ddbcc7e8 PM |
24 | |
25 | #ifdef CONFIG_CGROUPS | |
26 | ||
27 | struct cgroupfs_root; | |
28 | struct cgroup_subsys; | |
29 | struct inode; | |
84eea842 | 30 | struct cgroup; |
38460b48 | 31 | struct css_id; |
a27bb332 | 32 | struct eventfd_ctx; |
ddbcc7e8 PM |
33 | |
34 | extern int cgroup_init_early(void); | |
35 | extern int cgroup_init(void); | |
b4f48b63 | 36 | extern void cgroup_fork(struct task_struct *p); |
817929ec | 37 | extern void cgroup_post_fork(struct task_struct *p); |
b4f48b63 | 38 | extern void cgroup_exit(struct task_struct *p, int run_callbacks); |
846c7bb0 BS |
39 | extern int cgroupstats_build(struct cgroupstats *stats, |
40 | struct dentry *dentry); | |
e6a1105b | 41 | extern int cgroup_load_subsys(struct cgroup_subsys *ss); |
cf5d5941 | 42 | extern void cgroup_unload_subsys(struct cgroup_subsys *ss); |
ddbcc7e8 | 43 | |
8d8b97ba | 44 | extern int proc_cgroup_show(struct seq_file *, void *); |
a424316c | 45 | |
7d8e0bf5 LZ |
46 | /* |
47 | * Define the enumeration of all cgroup subsystems. | |
48 | * | |
49 | * We define ids for builtin subsystems and then modular ones. | |
50 | */ | |
817929ec PM |
51 | #define SUBSYS(_x) _x ## _subsys_id, |
52 | enum cgroup_subsys_id { | |
7d8e0bf5 | 53 | #define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option) |
817929ec | 54 | #include <linux/cgroup_subsys.h> |
7d8e0bf5 LZ |
55 | #undef IS_SUBSYS_ENABLED |
56 | CGROUP_BUILTIN_SUBSYS_COUNT, | |
57 | ||
58 | __CGROUP_SUBSYS_TEMP_PLACEHOLDER = CGROUP_BUILTIN_SUBSYS_COUNT - 1, | |
59 | ||
60 | #define IS_SUBSYS_ENABLED(option) IS_MODULE(option) | |
817929ec | 61 | #include <linux/cgroup_subsys.h> |
7d8e0bf5 | 62 | #undef IS_SUBSYS_ENABLED |
a6f00298 | 63 | CGROUP_SUBSYS_COUNT, |
817929ec PM |
64 | }; |
65 | #undef SUBSYS | |
66 | ||
ddbcc7e8 PM |
67 | /* Per-subsystem/per-cgroup state maintained by the system. */ |
68 | struct cgroup_subsys_state { | |
d20a390a PM |
69 | /* |
70 | * The cgroup that this subsystem is attached to. Useful | |
ddbcc7e8 | 71 | * for subsystems that want to know about the cgroup |
d20a390a PM |
72 | * hierarchy structure |
73 | */ | |
ddbcc7e8 PM |
74 | struct cgroup *cgroup; |
75 | ||
d3daf28d TH |
76 | /* reference count - access via css_[try]get() and css_put() */ |
77 | struct percpu_ref refcnt; | |
ddbcc7e8 PM |
78 | |
79 | unsigned long flags; | |
38460b48 | 80 | /* ID for this css, if possible */ |
2c392b8c | 81 | struct css_id __rcu *id; |
48ddbe19 TH |
82 | |
83 | /* Used to put @cgroup->dentry on the last css_put() */ | |
84 | struct work_struct dput_work; | |
ddbcc7e8 PM |
85 | }; |
86 | ||
87 | /* bits in struct cgroup_subsys_state flags field */ | |
88 | enum { | |
38b53aba | 89 | CSS_ROOT = (1 << 0), /* this CSS is the root of the subsystem */ |
92fb9748 | 90 | CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ |
ddbcc7e8 PM |
91 | }; |
92 | ||
5de0107e TH |
93 | /** |
94 | * css_get - obtain a reference on the specified css | |
95 | * @css: target css | |
96 | * | |
97 | * The caller must already have a reference. | |
ddbcc7e8 | 98 | */ |
ddbcc7e8 PM |
99 | static inline void css_get(struct cgroup_subsys_state *css) |
100 | { | |
101 | /* We don't need to reference count the root state */ | |
38b53aba | 102 | if (!(css->flags & CSS_ROOT)) |
d3daf28d | 103 | percpu_ref_get(&css->refcnt); |
ddbcc7e8 | 104 | } |
e7c5ec91 | 105 | |
5de0107e TH |
106 | /** |
107 | * css_tryget - try to obtain a reference on the specified css | |
108 | * @css: target css | |
109 | * | |
110 | * Obtain a reference on @css if it's alive. The caller naturally needs to | |
111 | * ensure that @css is accessible but doesn't have to be holding a | |
112 | * reference on it - IOW, RCU protected access is good enough for this | |
113 | * function. Returns %true if a reference count was successfully obtained; | |
114 | * %false otherwise. | |
115 | */ | |
e7c5ec91 PM |
116 | static inline bool css_tryget(struct cgroup_subsys_state *css) |
117 | { | |
38b53aba | 118 | if (css->flags & CSS_ROOT) |
e7c5ec91 | 119 | return true; |
d3daf28d | 120 | return percpu_ref_tryget(&css->refcnt); |
e7c5ec91 PM |
121 | } |
122 | ||
5de0107e TH |
123 | /** |
124 | * css_put - put a css reference | |
125 | * @css: target css | |
126 | * | |
127 | * Put a reference obtained via css_get() and css_tryget(). | |
128 | */ | |
ddbcc7e8 PM |
129 | static inline void css_put(struct cgroup_subsys_state *css) |
130 | { | |
38b53aba | 131 | if (!(css->flags & CSS_ROOT)) |
d3daf28d | 132 | percpu_ref_put(&css->refcnt); |
ddbcc7e8 PM |
133 | } |
134 | ||
3116f0e3 PM |
135 | /* bits in struct cgroup flags field */ |
136 | enum { | |
137 | /* Control Group is dead */ | |
54766d4a | 138 | CGRP_DEAD, |
d20a390a PM |
139 | /* |
140 | * Control Group has previously had a child cgroup or a task, | |
141 | * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) | |
142 | */ | |
3116f0e3 PM |
143 | CGRP_RELEASABLE, |
144 | /* Control Group requires release notifications to userspace */ | |
145 | CGRP_NOTIFY_ON_RELEASE, | |
97978e6d | 146 | /* |
2260e7fc TH |
147 | * Clone the parent's configuration when creating a new child |
148 | * cpuset cgroup. For historical reasons, this option can be | |
149 | * specified at mount time and thus is implemented here. | |
97978e6d | 150 | */ |
2260e7fc | 151 | CGRP_CPUSET_CLONE_CHILDREN, |
873fe09e TH |
152 | /* see the comment above CGRP_ROOT_SANE_BEHAVIOR for details */ |
153 | CGRP_SANE_BEHAVIOR, | |
3116f0e3 PM |
154 | }; |
155 | ||
65dff759 LZ |
156 | struct cgroup_name { |
157 | struct rcu_head rcu_head; | |
158 | char name[]; | |
3116f0e3 PM |
159 | }; |
160 | ||
ddbcc7e8 PM |
161 | struct cgroup { |
162 | unsigned long flags; /* "unsigned long" so bitops work */ | |
163 | ||
b414dc09 LZ |
164 | /* |
165 | * idr allocated in-hierarchy ID. | |
166 | * | |
167 | * The ID of the root cgroup is always 0, and a new cgroup | |
168 | * will be assigned with a smallest available ID. | |
169 | */ | |
170 | int id; | |
0a950f65 | 171 | |
ddbcc7e8 PM |
172 | /* |
173 | * We link our 'sibling' struct into our parent's 'children'. | |
174 | * Our children link their 'sibling' into our 'children'. | |
175 | */ | |
176 | struct list_head sibling; /* my parent's children */ | |
177 | struct list_head children; /* my children */ | |
05ef1d7c | 178 | struct list_head files; /* my files */ |
ddbcc7e8 | 179 | |
d20a390a | 180 | struct cgroup *parent; /* my parent */ |
febfcef6 | 181 | struct dentry *dentry; /* cgroup fs entry, RCU protected */ |
ddbcc7e8 | 182 | |
53fa5261 TH |
183 | /* |
184 | * Monotonically increasing unique serial number which defines a | |
185 | * uniform order among all cgroups. It's guaranteed that all | |
186 | * ->children lists are in the ascending order of ->serial_nr. | |
187 | * It's used to allow interrupting and resuming iterations. | |
188 | */ | |
189 | u64 serial_nr; | |
190 | ||
65dff759 LZ |
191 | /* |
192 | * This is a copy of dentry->d_name, and it's needed because | |
193 | * we can't use dentry->d_name in cgroup_path(). | |
194 | * | |
195 | * You must acquire rcu_read_lock() to access cgrp->name, and | |
196 | * the only place that can change it is rename(), which is | |
197 | * protected by parent dir's i_mutex. | |
198 | * | |
199 | * Normally you should use cgroup_name() wrapper rather than | |
200 | * access it directly. | |
201 | */ | |
202 | struct cgroup_name __rcu *name; | |
203 | ||
ddbcc7e8 PM |
204 | /* Private pointers for each registered subsystem */ |
205 | struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; | |
206 | ||
207 | struct cgroupfs_root *root; | |
817929ec PM |
208 | |
209 | /* | |
69d0206c TH |
210 | * List of cgrp_cset_links pointing at css_sets with tasks in this |
211 | * cgroup. Protected by css_set_lock. | |
817929ec | 212 | */ |
69d0206c | 213 | struct list_head cset_links; |
81a6a5cd PM |
214 | |
215 | /* | |
216 | * Linked list running through all cgroups that can | |
217 | * potentially be reaped by the release agent. Protected by | |
218 | * release_list_lock | |
219 | */ | |
220 | struct list_head release_list; | |
cc31edce | 221 | |
72a8cb30 BB |
222 | /* |
223 | * list of pidlists, up to two for each namespace (one for procs, one | |
224 | * for tasks); created on demand. | |
225 | */ | |
226 | struct list_head pidlists; | |
227 | struct mutex pidlist_mutex; | |
a47295e6 | 228 | |
d3daf28d | 229 | /* For css percpu_ref killing and RCU-protected deletion */ |
a47295e6 | 230 | struct rcu_head rcu_head; |
ea15f8cc | 231 | struct work_struct destroy_work; |
d3daf28d | 232 | atomic_t css_kill_cnt; |
0dea1168 | 233 | |
25985edc | 234 | /* List of events which userspace want to receive */ |
0dea1168 KS |
235 | struct list_head event_list; |
236 | spinlock_t event_list_lock; | |
03b1cde6 AR |
237 | |
238 | /* directory xattrs */ | |
239 | struct simple_xattrs xattrs; | |
817929ec PM |
240 | }; |
241 | ||
25a7e684 TH |
242 | #define MAX_CGROUP_ROOT_NAMELEN 64 |
243 | ||
244 | /* cgroupfs_root->flags */ | |
245 | enum { | |
873fe09e TH |
246 | /* |
247 | * Unfortunately, cgroup core and various controllers are riddled | |
248 | * with idiosyncrasies and pointless options. The following flag, | |
249 | * when set, will force sane behavior - some options are forced on, | |
250 | * others are disallowed, and some controllers will change their | |
251 | * hierarchical or other behaviors. | |
252 | * | |
253 | * The set of behaviors affected by this flag are still being | |
254 | * determined and developed and the mount option for this flag is | |
255 | * prefixed with __DEVEL__. The prefix will be dropped once we | |
256 | * reach the point where all behaviors are compatible with the | |
257 | * planned unified hierarchy, which will automatically turn on this | |
258 | * flag. | |
259 | * | |
260 | * The followings are the behaviors currently affected this flag. | |
261 | * | |
262 | * - Mount options "noprefix" and "clone_children" are disallowed. | |
263 | * Also, cgroupfs file cgroup.clone_children is not created. | |
264 | * | |
265 | * - When mounting an existing superblock, mount options should | |
266 | * match. | |
267 | * | |
268 | * - Remount is disallowed. | |
269 | * | |
0b0585c3 LT |
270 | * - rename(2) is disallowed. |
271 | * | |
f63674fd TH |
272 | * - "tasks" is removed. Everything should be at process |
273 | * granularity. Use "cgroup.procs" instead. | |
f00baae7 | 274 | * |
f63674fd TH |
275 | * - "release_agent" and "notify_on_release" are removed. |
276 | * Replacement notification mechanism will be implemented. | |
873fe09e | 277 | * |
5c5cc623 LZ |
278 | * - cpuset: tasks will be kept in empty cpusets when hotplug happens |
279 | * and take masks of ancestors with non-empty cpus/mems, instead of | |
280 | * being moved to an ancestor. | |
281 | * | |
88fa523b LZ |
282 | * - cpuset: a task can be moved into an empty cpuset, and again it |
283 | * takes masks of ancestors. | |
6db8e85c | 284 | * |
f63674fd TH |
285 | * - memcg: use_hierarchy is on by default and the cgroup file for |
286 | * the flag is not created. | |
f00baae7 | 287 | * |
9138125b | 288 | * - blkcg: blk-throttle becomes properly hierarchical. |
873fe09e TH |
289 | */ |
290 | CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), | |
291 | ||
25a7e684 TH |
292 | CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ |
293 | CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ | |
0ce6cba3 TH |
294 | |
295 | /* mount options live below bit 16 */ | |
296 | CGRP_ROOT_OPTION_MASK = (1 << 16) - 1, | |
297 | ||
298 | CGRP_ROOT_SUBSYS_BOUND = (1 << 16), /* subsystems finished binding */ | |
25a7e684 TH |
299 | }; |
300 | ||
301 | /* | |
302 | * A cgroupfs_root represents the root of a cgroup hierarchy, and may be | |
303 | * associated with a superblock to form an active hierarchy. This is | |
304 | * internal to cgroup core. Don't access directly from controllers. | |
305 | */ | |
306 | struct cgroupfs_root { | |
307 | struct super_block *sb; | |
308 | ||
a8a648c4 | 309 | /* The bitmask of subsystems attached to this hierarchy */ |
25a7e684 TH |
310 | unsigned long subsys_mask; |
311 | ||
312 | /* Unique id for this hierarchy. */ | |
313 | int hierarchy_id; | |
314 | ||
25a7e684 TH |
315 | /* A list running through the attached subsystems */ |
316 | struct list_head subsys_list; | |
317 | ||
318 | /* The root cgroup for this hierarchy */ | |
319 | struct cgroup top_cgroup; | |
320 | ||
321 | /* Tracks how many cgroups are currently defined in hierarchy.*/ | |
322 | int number_of_cgroups; | |
323 | ||
324 | /* A list running through the active hierarchies */ | |
325 | struct list_head root_list; | |
326 | ||
25a7e684 TH |
327 | /* Hierarchy-specific flags */ |
328 | unsigned long flags; | |
329 | ||
330 | /* IDs for cgroups in this hierarchy */ | |
4e96ee8e | 331 | struct idr cgroup_idr; |
25a7e684 TH |
332 | |
333 | /* The path to use for release notifications. */ | |
334 | char release_agent_path[PATH_MAX]; | |
335 | ||
336 | /* The name for this hierarchy - may be empty */ | |
337 | char name[MAX_CGROUP_ROOT_NAMELEN]; | |
338 | }; | |
339 | ||
d20a390a PM |
340 | /* |
341 | * A css_set is a structure holding pointers to a set of | |
817929ec PM |
342 | * cgroup_subsys_state objects. This saves space in the task struct |
343 | * object and speeds up fork()/exit(), since a single inc/dec and a | |
d20a390a PM |
344 | * list_add()/del() can bump the reference count on the entire cgroup |
345 | * set for a task. | |
817929ec PM |
346 | */ |
347 | ||
348 | struct css_set { | |
349 | ||
350 | /* Reference count */ | |
146aa1bd | 351 | atomic_t refcount; |
817929ec | 352 | |
472b1053 LZ |
353 | /* |
354 | * List running through all cgroup groups in the same hash | |
355 | * slot. Protected by css_set_lock | |
356 | */ | |
357 | struct hlist_node hlist; | |
358 | ||
817929ec PM |
359 | /* |
360 | * List running through all tasks using this cgroup | |
361 | * group. Protected by css_set_lock | |
362 | */ | |
363 | struct list_head tasks; | |
364 | ||
365 | /* | |
69d0206c TH |
366 | * List of cgrp_cset_links pointing at cgroups referenced from this |
367 | * css_set. Protected by css_set_lock. | |
817929ec | 368 | */ |
69d0206c | 369 | struct list_head cgrp_links; |
817929ec PM |
370 | |
371 | /* | |
372 | * Set of subsystem states, one for each subsystem. This array | |
373 | * is immutable after creation apart from the init_css_set | |
cf5d5941 BB |
374 | * during subsystem registration (at boot time) and modular subsystem |
375 | * loading/unloading. | |
817929ec PM |
376 | */ |
377 | struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; | |
c378369d BB |
378 | |
379 | /* For RCU-protected deletion */ | |
380 | struct rcu_head rcu_head; | |
ddbcc7e8 PM |
381 | }; |
382 | ||
91796569 PM |
383 | /* |
384 | * cgroup_map_cb is an abstract callback API for reporting map-valued | |
385 | * control files | |
386 | */ | |
387 | ||
388 | struct cgroup_map_cb { | |
389 | int (*fill)(struct cgroup_map_cb *cb, const char *key, u64 value); | |
390 | void *state; | |
391 | }; | |
392 | ||
d20a390a PM |
393 | /* |
394 | * struct cftype: handler definitions for cgroup control files | |
ddbcc7e8 PM |
395 | * |
396 | * When reading/writing to a file: | |
a043e3b2 | 397 | * - the cgroup to use is file->f_dentry->d_parent->d_fsdata |
ddbcc7e8 PM |
398 | * - the 'cftype' of the file is file->f_dentry->d_fsdata |
399 | */ | |
400 | ||
8e3f6541 | 401 | /* cftype->flags */ |
02c402d9 | 402 | enum { |
6f4b7e63 LZ |
403 | CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ |
404 | CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ | |
02c402d9 TH |
405 | CFTYPE_INSANE = (1 << 2), /* don't create if sane_behavior */ |
406 | }; | |
8e3f6541 TH |
407 | |
408 | #define MAX_CFTYPE_NAME 64 | |
409 | ||
ddbcc7e8 | 410 | struct cftype { |
d20a390a PM |
411 | /* |
412 | * By convention, the name should begin with the name of the | |
8e3f6541 TH |
413 | * subsystem, followed by a period. Zero length string indicates |
414 | * end of cftype array. | |
d20a390a | 415 | */ |
ddbcc7e8 PM |
416 | char name[MAX_CFTYPE_NAME]; |
417 | int private; | |
099fca32 LZ |
418 | /* |
419 | * If not 0, file mode is set to this value, otherwise it will | |
420 | * be figured out automatically | |
421 | */ | |
a5e7ed32 | 422 | umode_t mode; |
db3b1497 PM |
423 | |
424 | /* | |
425 | * If non-zero, defines the maximum length of string that can | |
426 | * be passed to write_string; defaults to 64 | |
427 | */ | |
428 | size_t max_write_len; | |
429 | ||
8e3f6541 TH |
430 | /* CFTYPE_* flags */ |
431 | unsigned int flags; | |
432 | ||
ce16b49d PM |
433 | int (*open)(struct inode *inode, struct file *file); |
434 | ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft, | |
435 | struct file *file, | |
436 | char __user *buf, size_t nbytes, loff_t *ppos); | |
ddbcc7e8 | 437 | /* |
f4c753b7 | 438 | * read_u64() is a shortcut for the common case of returning a |
ddbcc7e8 PM |
439 | * single integer. Use it in place of read() |
440 | */ | |
ce16b49d | 441 | u64 (*read_u64)(struct cgroup *cgrp, struct cftype *cft); |
e73d2c61 PM |
442 | /* |
443 | * read_s64() is a signed version of read_u64() | |
444 | */ | |
ce16b49d | 445 | s64 (*read_s64)(struct cgroup *cgrp, struct cftype *cft); |
91796569 PM |
446 | /* |
447 | * read_map() is used for defining a map of key/value | |
448 | * pairs. It should call cb->fill(cb, key, value) for each | |
449 | * entry. The key/value pairs (and their ordering) should not | |
450 | * change between reboots. | |
451 | */ | |
03c78cbe | 452 | int (*read_map)(struct cgroup *cgrp, struct cftype *cft, |
ce16b49d | 453 | struct cgroup_map_cb *cb); |
29486df3 SH |
454 | /* |
455 | * read_seq_string() is used for outputting a simple sequence | |
456 | * using seqfile. | |
457 | */ | |
03c78cbe | 458 | int (*read_seq_string)(struct cgroup *cgrp, struct cftype *cft, |
ce16b49d | 459 | struct seq_file *m); |
91796569 | 460 | |
ce16b49d PM |
461 | ssize_t (*write)(struct cgroup *cgrp, struct cftype *cft, |
462 | struct file *file, | |
463 | const char __user *buf, size_t nbytes, loff_t *ppos); | |
355e0c48 PM |
464 | |
465 | /* | |
f4c753b7 | 466 | * write_u64() is a shortcut for the common case of accepting |
355e0c48 PM |
467 | * a single integer (as parsed by simple_strtoull) from |
468 | * userspace. Use in place of write(); return 0 or error. | |
469 | */ | |
ce16b49d | 470 | int (*write_u64)(struct cgroup *cgrp, struct cftype *cft, u64 val); |
e73d2c61 PM |
471 | /* |
472 | * write_s64() is a signed version of write_u64() | |
473 | */ | |
ce16b49d | 474 | int (*write_s64)(struct cgroup *cgrp, struct cftype *cft, s64 val); |
355e0c48 | 475 | |
db3b1497 PM |
476 | /* |
477 | * write_string() is passed a nul-terminated kernelspace | |
478 | * buffer of maximum length determined by max_write_len. | |
479 | * Returns 0 or -ve error code. | |
480 | */ | |
481 | int (*write_string)(struct cgroup *cgrp, struct cftype *cft, | |
482 | const char *buffer); | |
d447ea2f PE |
483 | /* |
484 | * trigger() callback can be used to get some kick from the | |
485 | * userspace, when the actual string written is not important | |
486 | * at all. The private field can be used to determine the | |
487 | * kick type for multiplexing. | |
488 | */ | |
489 | int (*trigger)(struct cgroup *cgrp, unsigned int event); | |
490 | ||
ce16b49d | 491 | int (*release)(struct inode *inode, struct file *file); |
0dea1168 KS |
492 | |
493 | /* | |
494 | * register_event() callback will be used to add new userspace | |
495 | * waiter for changes related to the cftype. Implement it if | |
496 | * you want to provide this functionality. Use eventfd_signal() | |
497 | * on eventfd to send notification to userspace. | |
498 | */ | |
499 | int (*register_event)(struct cgroup *cgrp, struct cftype *cft, | |
500 | struct eventfd_ctx *eventfd, const char *args); | |
501 | /* | |
502 | * unregister_event() callback will be called when userspace | |
503 | * closes the eventfd or on cgroup removing. | |
504 | * This callback must be implemented, if you want provide | |
505 | * notification functionality. | |
0dea1168 | 506 | */ |
907860ed | 507 | void (*unregister_event)(struct cgroup *cgrp, struct cftype *cft, |
0dea1168 | 508 | struct eventfd_ctx *eventfd); |
ddbcc7e8 PM |
509 | }; |
510 | ||
8e3f6541 TH |
511 | /* |
512 | * cftype_sets describe cftypes belonging to a subsystem and are chained at | |
513 | * cgroup_subsys->cftsets. Each cftset points to an array of cftypes | |
514 | * terminated by zero length name. | |
515 | */ | |
516 | struct cftype_set { | |
517 | struct list_head node; /* chained at subsys->cftsets */ | |
03b1cde6 | 518 | struct cftype *cfts; |
8e3f6541 TH |
519 | }; |
520 | ||
31a7df01 | 521 | struct cgroup_scanner { |
6f4b7e63 | 522 | struct cgroup *cgrp; |
31a7df01 CW |
523 | int (*test_task)(struct task_struct *p, struct cgroup_scanner *scan); |
524 | void (*process_task)(struct task_struct *p, | |
525 | struct cgroup_scanner *scan); | |
526 | struct ptr_heap *heap; | |
bd1a8ab7 | 527 | void *data; |
31a7df01 CW |
528 | }; |
529 | ||
873fe09e TH |
530 | /* |
531 | * See the comment above CGRP_ROOT_SANE_BEHAVIOR for details. This | |
532 | * function can be called as long as @cgrp is accessible. | |
533 | */ | |
534 | static inline bool cgroup_sane_behavior(const struct cgroup *cgrp) | |
535 | { | |
536 | return cgrp->root->flags & CGRP_ROOT_SANE_BEHAVIOR; | |
537 | } | |
538 | ||
65dff759 LZ |
539 | /* Caller should hold rcu_read_lock() */ |
540 | static inline const char *cgroup_name(const struct cgroup *cgrp) | |
541 | { | |
542 | return rcu_dereference(cgrp->name)->name; | |
543 | } | |
544 | ||
03b1cde6 AR |
545 | int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); |
546 | int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); | |
8e3f6541 | 547 | |
78574cf9 | 548 | bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor); |
ddbcc7e8 | 549 | |
ffd2d883 | 550 | int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen); |
913ffdb5 | 551 | int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); |
ddbcc7e8 | 552 | |
ffd2d883 | 553 | int cgroup_task_count(const struct cgroup *cgrp); |
bbcb81d0 | 554 | |
2f7ee569 TH |
555 | /* |
556 | * Control Group taskset, used to pass around set of tasks to cgroup_subsys | |
557 | * methods. | |
558 | */ | |
559 | struct cgroup_taskset; | |
560 | struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset); | |
561 | struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset); | |
562 | struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset); | |
563 | int cgroup_taskset_size(struct cgroup_taskset *tset); | |
564 | ||
565 | /** | |
566 | * cgroup_taskset_for_each - iterate cgroup_taskset | |
567 | * @task: the loop cursor | |
568 | * @skip_cgrp: skip if task's cgroup matches this, %NULL to iterate through all | |
569 | * @tset: taskset to iterate | |
570 | */ | |
571 | #define cgroup_taskset_for_each(task, skip_cgrp, tset) \ | |
572 | for ((task) = cgroup_taskset_first((tset)); (task); \ | |
573 | (task) = cgroup_taskset_next((tset))) \ | |
574 | if (!(skip_cgrp) || \ | |
575 | cgroup_taskset_cur_cgroup((tset)) != (skip_cgrp)) | |
576 | ||
21acb9ca TLSC |
577 | /* |
578 | * Control Group subsystem type. | |
579 | * See Documentation/cgroups/cgroups.txt for details | |
580 | */ | |
ddbcc7e8 PM |
581 | |
582 | struct cgroup_subsys { | |
92fb9748 TH |
583 | struct cgroup_subsys_state *(*css_alloc)(struct cgroup *cgrp); |
584 | int (*css_online)(struct cgroup *cgrp); | |
585 | void (*css_offline)(struct cgroup *cgrp); | |
586 | void (*css_free)(struct cgroup *cgrp); | |
587 | ||
761b3ef5 LZ |
588 | int (*can_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset); |
589 | void (*cancel_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset); | |
590 | void (*attach)(struct cgroup *cgrp, struct cgroup_taskset *tset); | |
591 | void (*fork)(struct task_struct *task); | |
592 | void (*exit)(struct cgroup *cgrp, struct cgroup *old_cgrp, | |
593 | struct task_struct *task); | |
761b3ef5 | 594 | void (*bind)(struct cgroup *root); |
e5991371 | 595 | |
ddbcc7e8 | 596 | int subsys_id; |
8bab8dde | 597 | int disabled; |
ddbcc7e8 | 598 | int early_init; |
38460b48 KH |
599 | /* |
600 | * True if this subsys uses ID. ID is not available before cgroup_init() | |
601 | * (not available in early_init time.) | |
602 | */ | |
603 | bool use_id; | |
48ddbe19 | 604 | |
8c7f6edb TH |
605 | /* |
606 | * If %false, this subsystem is properly hierarchical - | |
607 | * configuration, resource accounting and restriction on a parent | |
608 | * cgroup cover those of its children. If %true, hierarchy support | |
609 | * is broken in some ways - some subsystems ignore hierarchy | |
610 | * completely while others are only implemented half-way. | |
611 | * | |
612 | * It's now disallowed to create nested cgroups if the subsystem is | |
613 | * broken and cgroup core will emit a warning message on such | |
614 | * cases. Eventually, all subsystems will be made properly | |
615 | * hierarchical and this will go away. | |
616 | */ | |
617 | bool broken_hierarchy; | |
618 | bool warned_broken_hierarchy; | |
619 | ||
ddbcc7e8 PM |
620 | #define MAX_CGROUP_TYPE_NAMELEN 32 |
621 | const char *name; | |
622 | ||
999cd8a4 PM |
623 | /* |
624 | * Link to parent, and list entry in parent's children. | |
6be96a5c | 625 | * Protected by cgroup_lock() |
999cd8a4 PM |
626 | */ |
627 | struct cgroupfs_root *root; | |
ddbcc7e8 | 628 | struct list_head sibling; |
38460b48 KH |
629 | /* used when use_id == true */ |
630 | struct idr idr; | |
42aee6c4 | 631 | spinlock_t id_lock; |
e6a1105b | 632 | |
8e3f6541 TH |
633 | /* list of cftype_sets */ |
634 | struct list_head cftsets; | |
635 | ||
636 | /* base cftypes, automatically [de]registered with subsys itself */ | |
637 | struct cftype *base_cftypes; | |
638 | struct cftype_set base_cftset; | |
639 | ||
e6a1105b BB |
640 | /* should be defined only by modular subsystems */ |
641 | struct module *module; | |
ddbcc7e8 PM |
642 | }; |
643 | ||
644 | #define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys; | |
5fc0b025 | 645 | #define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option) |
ddbcc7e8 | 646 | #include <linux/cgroup_subsys.h> |
5fc0b025 | 647 | #undef IS_SUBSYS_ENABLED |
ddbcc7e8 PM |
648 | #undef SUBSYS |
649 | ||
650 | static inline struct cgroup_subsys_state *cgroup_subsys_state( | |
ffd2d883 | 651 | struct cgroup *cgrp, int subsys_id) |
ddbcc7e8 | 652 | { |
ffd2d883 | 653 | return cgrp->subsys[subsys_id]; |
ddbcc7e8 PM |
654 | } |
655 | ||
14611e51 TH |
656 | /** |
657 | * task_css_set_check - obtain a task's css_set with extra access conditions | |
658 | * @task: the task to obtain css_set for | |
659 | * @__c: extra condition expression to be passed to rcu_dereference_check() | |
660 | * | |
661 | * A task's css_set is RCU protected, initialized and exited while holding | |
662 | * task_lock(), and can only be modified while holding both cgroup_mutex | |
663 | * and task_lock() while the task is alive. This macro verifies that the | |
664 | * caller is inside proper critical section and returns @task's css_set. | |
665 | * | |
666 | * The caller can also specify additional allowed conditions via @__c, such | |
667 | * as locks used during the cgroup_subsys::attach() methods. | |
dc61b1d6 | 668 | */ |
2219449a TH |
669 | #ifdef CONFIG_PROVE_RCU |
670 | extern struct mutex cgroup_mutex; | |
14611e51 TH |
671 | #define task_css_set_check(task, __c) \ |
672 | rcu_dereference_check((task)->cgroups, \ | |
673 | lockdep_is_held(&(task)->alloc_lock) || \ | |
674 | lockdep_is_held(&cgroup_mutex) || (__c)) | |
2219449a | 675 | #else |
14611e51 TH |
676 | #define task_css_set_check(task, __c) \ |
677 | rcu_dereference((task)->cgroups) | |
2219449a | 678 | #endif |
dc61b1d6 | 679 | |
14611e51 TH |
680 | /** |
681 | * task_subsys_state_check - obtain css for (task, subsys) w/ extra access conds | |
682 | * @task: the target task | |
683 | * @subsys_id: the target subsystem ID | |
684 | * @__c: extra condition expression to be passed to rcu_dereference_check() | |
685 | * | |
686 | * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The | |
687 | * synchronization rules are the same as task_css_set_check(). | |
688 | */ | |
689 | #define task_subsys_state_check(task, subsys_id, __c) \ | |
690 | task_css_set_check((task), (__c))->subsys[(subsys_id)] | |
691 | ||
692 | /** | |
693 | * task_css_set - obtain a task's css_set | |
694 | * @task: the task to obtain css_set for | |
695 | * | |
696 | * See task_css_set_check(). | |
697 | */ | |
698 | static inline struct css_set *task_css_set(struct task_struct *task) | |
699 | { | |
700 | return task_css_set_check(task, false); | |
701 | } | |
702 | ||
703 | /** | |
704 | * task_subsys_state - obtain css for (task, subsys) | |
705 | * @task: the target task | |
706 | * @subsys_id: the target subsystem ID | |
707 | * | |
708 | * See task_subsys_state_check(). | |
709 | */ | |
dc61b1d6 PZ |
710 | static inline struct cgroup_subsys_state * |
711 | task_subsys_state(struct task_struct *task, int subsys_id) | |
ddbcc7e8 | 712 | { |
dc61b1d6 | 713 | return task_subsys_state_check(task, subsys_id, false); |
ddbcc7e8 PM |
714 | } |
715 | ||
716 | static inline struct cgroup* task_cgroup(struct task_struct *task, | |
717 | int subsys_id) | |
718 | { | |
719 | return task_subsys_state(task, subsys_id)->cgroup; | |
720 | } | |
721 | ||
e14880f7 LZ |
722 | /** |
723 | * cgroup_from_id - lookup cgroup by id | |
724 | * @ss: cgroup subsys to be looked into | |
725 | * @id: the cgroup id | |
726 | * | |
727 | * Returns the cgroup if there's valid one with @id, otherwise returns NULL. | |
728 | * Should be called under rcu_read_lock(). | |
729 | */ | |
730 | static inline struct cgroup *cgroup_from_id(struct cgroup_subsys *ss, int id) | |
731 | { | |
732 | #ifdef CONFIG_PROVE_RCU | |
733 | rcu_lockdep_assert(rcu_read_lock_held() || | |
734 | lockdep_is_held(&cgroup_mutex), | |
735 | "cgroup_from_id() needs proper protection"); | |
736 | #endif | |
737 | return idr_find(&ss->root->cgroup_idr, id); | |
738 | } | |
739 | ||
53fa5261 TH |
740 | struct cgroup *cgroup_next_sibling(struct cgroup *pos); |
741 | ||
574bd9f7 TH |
742 | /** |
743 | * cgroup_for_each_child - iterate through children of a cgroup | |
744 | * @pos: the cgroup * to use as the loop cursor | |
75501a6d | 745 | * @cgrp: cgroup whose children to walk |
574bd9f7 | 746 | * |
75501a6d | 747 | * Walk @cgrp's children. Must be called under rcu_read_lock(). A child |
92fb9748 TH |
748 | * cgroup which hasn't finished ->css_online() or already has finished |
749 | * ->css_offline() may show up during traversal and it's each subsystem's | |
574bd9f7 TH |
750 | * responsibility to verify that each @pos is alive. |
751 | * | |
92fb9748 TH |
752 | * If a subsystem synchronizes against the parent in its ->css_online() and |
753 | * before starting iterating, a cgroup which finished ->css_online() is | |
754 | * guaranteed to be visible in the future iterations. | |
75501a6d TH |
755 | * |
756 | * It is allowed to temporarily drop RCU read lock during iteration. The | |
757 | * caller is responsible for ensuring that @pos remains accessible until | |
758 | * the start of the next iteration by, for example, bumping the css refcnt. | |
574bd9f7 | 759 | */ |
75501a6d TH |
760 | #define cgroup_for_each_child(pos, cgrp) \ |
761 | for ((pos) = list_first_or_null_rcu(&(cgrp)->children, \ | |
762 | struct cgroup, sibling); \ | |
763 | (pos); (pos) = cgroup_next_sibling((pos))) | |
574bd9f7 TH |
764 | |
765 | struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos, | |
766 | struct cgroup *cgroup); | |
12a9d2fe | 767 | struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos); |
574bd9f7 TH |
768 | |
769 | /** | |
770 | * cgroup_for_each_descendant_pre - pre-order walk of a cgroup's descendants | |
771 | * @pos: the cgroup * to use as the loop cursor | |
772 | * @cgroup: cgroup whose descendants to walk | |
773 | * | |
774 | * Walk @cgroup's descendants. Must be called under rcu_read_lock(). A | |
92fb9748 TH |
775 | * descendant cgroup which hasn't finished ->css_online() or already has |
776 | * finished ->css_offline() may show up during traversal and it's each | |
574bd9f7 TH |
777 | * subsystem's responsibility to verify that each @pos is alive. |
778 | * | |
92fb9748 TH |
779 | * If a subsystem synchronizes against the parent in its ->css_online() and |
780 | * before starting iterating, and synchronizes against @pos on each | |
7805d000 | 781 | * iteration, any descendant cgroup which finished ->css_online() is |
574bd9f7 TH |
782 | * guaranteed to be visible in the future iterations. |
783 | * | |
784 | * In other words, the following guarantees that a descendant can't escape | |
785 | * state updates of its ancestors. | |
786 | * | |
92fb9748 | 787 | * my_online(@cgrp) |
574bd9f7 TH |
788 | * { |
789 | * Lock @cgrp->parent and @cgrp; | |
790 | * Inherit state from @cgrp->parent; | |
791 | * Unlock both. | |
792 | * } | |
793 | * | |
794 | * my_update_state(@cgrp) | |
795 | * { | |
796 | * Lock @cgrp; | |
797 | * Update @cgrp's state; | |
798 | * Unlock @cgrp; | |
799 | * | |
800 | * cgroup_for_each_descendant_pre(@pos, @cgrp) { | |
801 | * Lock @pos; | |
802 | * Verify @pos is alive and inherit state from @pos->parent; | |
803 | * Unlock @pos; | |
804 | * } | |
805 | * } | |
806 | * | |
807 | * As long as the inheriting step, including checking the parent state, is | |
808 | * enclosed inside @pos locking, double-locking the parent isn't necessary | |
809 | * while inheriting. The state update to the parent is guaranteed to be | |
810 | * visible by walking order and, as long as inheriting operations to the | |
811 | * same @pos are atomic to each other, multiple updates racing each other | |
812 | * still result in the correct state. It's guaranateed that at least one | |
813 | * inheritance happens for any cgroup after the latest update to its | |
814 | * parent. | |
815 | * | |
816 | * If checking parent's state requires locking the parent, each inheriting | |
817 | * iteration should lock and unlock both @pos->parent and @pos. | |
818 | * | |
819 | * Alternatively, a subsystem may choose to use a single global lock to | |
92fb9748 | 820 | * synchronize ->css_online() and ->css_offline() against tree-walking |
574bd9f7 | 821 | * operations. |
75501a6d TH |
822 | * |
823 | * It is allowed to temporarily drop RCU read lock during iteration. The | |
824 | * caller is responsible for ensuring that @pos remains accessible until | |
825 | * the start of the next iteration by, for example, bumping the css refcnt. | |
574bd9f7 TH |
826 | */ |
827 | #define cgroup_for_each_descendant_pre(pos, cgroup) \ | |
828 | for (pos = cgroup_next_descendant_pre(NULL, (cgroup)); (pos); \ | |
829 | pos = cgroup_next_descendant_pre((pos), (cgroup))) | |
830 | ||
831 | struct cgroup *cgroup_next_descendant_post(struct cgroup *pos, | |
832 | struct cgroup *cgroup); | |
833 | ||
834 | /** | |
835 | * cgroup_for_each_descendant_post - post-order walk of a cgroup's descendants | |
836 | * @pos: the cgroup * to use as the loop cursor | |
837 | * @cgroup: cgroup whose descendants to walk | |
838 | * | |
839 | * Similar to cgroup_for_each_descendant_pre() but performs post-order | |
840 | * traversal instead. Note that the walk visibility guarantee described in | |
841 | * pre-order walk doesn't apply the same to post-order walks. | |
842 | */ | |
843 | #define cgroup_for_each_descendant_post(pos, cgroup) \ | |
844 | for (pos = cgroup_next_descendant_post(NULL, (cgroup)); (pos); \ | |
845 | pos = cgroup_next_descendant_post((pos), (cgroup))) | |
846 | ||
817929ec PM |
847 | /* A cgroup_iter should be treated as an opaque object */ |
848 | struct cgroup_iter { | |
69d0206c | 849 | struct list_head *cset_link; |
817929ec PM |
850 | struct list_head *task; |
851 | }; | |
852 | ||
d20a390a PM |
853 | /* |
854 | * To iterate across the tasks in a cgroup: | |
817929ec | 855 | * |
b595076a | 856 | * 1) call cgroup_iter_start to initialize an iterator |
817929ec PM |
857 | * |
858 | * 2) call cgroup_iter_next() to retrieve member tasks until it | |
859 | * returns NULL or until you want to end the iteration | |
860 | * | |
861 | * 3) call cgroup_iter_end() to destroy the iterator. | |
31a7df01 | 862 | * |
d20a390a PM |
863 | * Or, call cgroup_scan_tasks() to iterate through every task in a |
864 | * cgroup - cgroup_scan_tasks() holds the css_set_lock when calling | |
865 | * the test_task() callback, but not while calling the process_task() | |
866 | * callback. | |
817929ec | 867 | */ |
ffd2d883 LZ |
868 | void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it); |
869 | struct task_struct *cgroup_iter_next(struct cgroup *cgrp, | |
817929ec | 870 | struct cgroup_iter *it); |
ffd2d883 | 871 | void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); |
31a7df01 | 872 | int cgroup_scan_tasks(struct cgroup_scanner *scan); |
31583bb0 | 873 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); |
8cc99345 | 874 | int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); |
31583bb0 | 875 | |
38460b48 KH |
876 | /* |
877 | * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works | |
878 | * if cgroup_subsys.use_id == true. It can be used for looking up and scanning. | |
879 | * CSS ID is assigned at cgroup allocation (create) automatically | |
880 | * and removed when subsys calls free_css_id() function. This is because | |
881 | * the lifetime of cgroup_subsys_state is subsys's matter. | |
882 | * | |
883 | * Looking up and scanning function should be called under rcu_read_lock(). | |
6be96a5c | 884 | * Taking cgroup_mutex is not necessary for following calls. |
38460b48 KH |
885 | * But the css returned by this routine can be "not populated yet" or "being |
886 | * destroyed". The caller should check css and cgroup's status. | |
887 | */ | |
888 | ||
889 | /* | |
890 | * Typically Called at ->destroy(), or somewhere the subsys frees | |
891 | * cgroup_subsys_state. | |
892 | */ | |
893 | void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css); | |
894 | ||
895 | /* Find a cgroup_subsys_state which has given ID */ | |
896 | ||
897 | struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id); | |
898 | ||
38460b48 KH |
899 | /* Returns true if root is ancestor of cg */ |
900 | bool css_is_ancestor(struct cgroup_subsys_state *cg, | |
0b7f569e | 901 | const struct cgroup_subsys_state *root); |
38460b48 KH |
902 | |
903 | /* Get id and depth of css */ | |
904 | unsigned short css_id(struct cgroup_subsys_state *css); | |
e5d1367f | 905 | struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id); |
38460b48 | 906 | |
ddbcc7e8 PM |
907 | #else /* !CONFIG_CGROUPS */ |
908 | ||
909 | static inline int cgroup_init_early(void) { return 0; } | |
910 | static inline int cgroup_init(void) { return 0; } | |
b4f48b63 | 911 | static inline void cgroup_fork(struct task_struct *p) {} |
817929ec | 912 | static inline void cgroup_post_fork(struct task_struct *p) {} |
b4f48b63 | 913 | static inline void cgroup_exit(struct task_struct *p, int callbacks) {} |
ddbcc7e8 | 914 | |
846c7bb0 BS |
915 | static inline int cgroupstats_build(struct cgroupstats *stats, |
916 | struct dentry *dentry) | |
917 | { | |
918 | return -EINVAL; | |
919 | } | |
ddbcc7e8 | 920 | |
d7926ee3 | 921 | /* No cgroups - nothing to do */ |
31583bb0 MT |
922 | static inline int cgroup_attach_task_all(struct task_struct *from, |
923 | struct task_struct *t) | |
924 | { | |
925 | return 0; | |
926 | } | |
d7926ee3 | 927 | |
ddbcc7e8 PM |
928 | #endif /* !CONFIG_CGROUPS */ |
929 | ||
930 | #endif /* _LINUX_CGROUP_H */ |