]>
Commit | Line | Data |
---|---|---|
ddbcc7e8 PM |
1 | #ifndef _LINUX_CGROUP_H |
2 | #define _LINUX_CGROUP_H | |
3 | /* | |
4 | * cgroup interface | |
5 | * | |
6 | * Copyright (C) 2003 BULL SA | |
7 | * Copyright (C) 2004-2006 Silicon Graphics, Inc. | |
8 | * | |
9 | */ | |
10 | ||
11 | #include <linux/sched.h> | |
ddbcc7e8 PM |
12 | #include <linux/cpumask.h> |
13 | #include <linux/nodemask.h> | |
14 | #include <linux/rcupdate.h> | |
eb6fd504 | 15 | #include <linux/rculist.h> |
846c7bb0 | 16 | #include <linux/cgroupstats.h> |
31a7df01 | 17 | #include <linux/prio_heap.h> |
cc31edce | 18 | #include <linux/rwsem.h> |
38460b48 | 19 | #include <linux/idr.h> |
48ddbe19 | 20 | #include <linux/workqueue.h> |
03b1cde6 | 21 | #include <linux/xattr.h> |
25a7e684 | 22 | #include <linux/fs.h> |
d3daf28d | 23 | #include <linux/percpu-refcount.h> |
ddbcc7e8 PM |
24 | |
25 | #ifdef CONFIG_CGROUPS | |
26 | ||
27 | struct cgroupfs_root; | |
28 | struct cgroup_subsys; | |
29 | struct inode; | |
84eea842 | 30 | struct cgroup; |
38460b48 | 31 | struct css_id; |
a27bb332 | 32 | struct eventfd_ctx; |
ddbcc7e8 PM |
33 | |
34 | extern int cgroup_init_early(void); | |
35 | extern int cgroup_init(void); | |
b4f48b63 | 36 | extern void cgroup_fork(struct task_struct *p); |
817929ec | 37 | extern void cgroup_post_fork(struct task_struct *p); |
b4f48b63 | 38 | extern void cgroup_exit(struct task_struct *p, int run_callbacks); |
846c7bb0 BS |
39 | extern int cgroupstats_build(struct cgroupstats *stats, |
40 | struct dentry *dentry); | |
e6a1105b | 41 | extern int cgroup_load_subsys(struct cgroup_subsys *ss); |
cf5d5941 | 42 | extern void cgroup_unload_subsys(struct cgroup_subsys *ss); |
ddbcc7e8 | 43 | |
8d8b97ba | 44 | extern int proc_cgroup_show(struct seq_file *, void *); |
a424316c | 45 | |
7d8e0bf5 LZ |
46 | /* |
47 | * Define the enumeration of all cgroup subsystems. | |
48 | * | |
49 | * We define ids for builtin subsystems and then modular ones. | |
50 | */ | |
817929ec PM |
51 | #define SUBSYS(_x) _x ## _subsys_id, |
52 | enum cgroup_subsys_id { | |
7d8e0bf5 | 53 | #define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option) |
817929ec | 54 | #include <linux/cgroup_subsys.h> |
7d8e0bf5 LZ |
55 | #undef IS_SUBSYS_ENABLED |
56 | CGROUP_BUILTIN_SUBSYS_COUNT, | |
57 | ||
58 | __CGROUP_SUBSYS_TEMP_PLACEHOLDER = CGROUP_BUILTIN_SUBSYS_COUNT - 1, | |
59 | ||
60 | #define IS_SUBSYS_ENABLED(option) IS_MODULE(option) | |
817929ec | 61 | #include <linux/cgroup_subsys.h> |
7d8e0bf5 | 62 | #undef IS_SUBSYS_ENABLED |
a6f00298 | 63 | CGROUP_SUBSYS_COUNT, |
817929ec PM |
64 | }; |
65 | #undef SUBSYS | |
66 | ||
ddbcc7e8 PM |
67 | /* Per-subsystem/per-cgroup state maintained by the system. */ |
68 | struct cgroup_subsys_state { | |
72c97e54 | 69 | /* the cgroup that this css is attached to */ |
ddbcc7e8 PM |
70 | struct cgroup *cgroup; |
71 | ||
72c97e54 TH |
72 | /* the cgroup subsystem that this css is attached to */ |
73 | struct cgroup_subsys *ss; | |
74 | ||
d3daf28d TH |
75 | /* reference count - access via css_[try]get() and css_put() */ |
76 | struct percpu_ref refcnt; | |
ddbcc7e8 PM |
77 | |
78 | unsigned long flags; | |
38460b48 | 79 | /* ID for this css, if possible */ |
2c392b8c | 80 | struct css_id __rcu *id; |
48ddbe19 TH |
81 | |
82 | /* Used to put @cgroup->dentry on the last css_put() */ | |
83 | struct work_struct dput_work; | |
ddbcc7e8 PM |
84 | }; |
85 | ||
86 | /* bits in struct cgroup_subsys_state flags field */ | |
87 | enum { | |
38b53aba | 88 | CSS_ROOT = (1 << 0), /* this CSS is the root of the subsystem */ |
92fb9748 | 89 | CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ |
ddbcc7e8 PM |
90 | }; |
91 | ||
5de0107e TH |
92 | /** |
93 | * css_get - obtain a reference on the specified css | |
94 | * @css: target css | |
95 | * | |
96 | * The caller must already have a reference. | |
ddbcc7e8 | 97 | */ |
ddbcc7e8 PM |
98 | static inline void css_get(struct cgroup_subsys_state *css) |
99 | { | |
100 | /* We don't need to reference count the root state */ | |
38b53aba | 101 | if (!(css->flags & CSS_ROOT)) |
d3daf28d | 102 | percpu_ref_get(&css->refcnt); |
ddbcc7e8 | 103 | } |
e7c5ec91 | 104 | |
5de0107e TH |
105 | /** |
106 | * css_tryget - try to obtain a reference on the specified css | |
107 | * @css: target css | |
108 | * | |
109 | * Obtain a reference on @css if it's alive. The caller naturally needs to | |
110 | * ensure that @css is accessible but doesn't have to be holding a | |
111 | * reference on it - IOW, RCU protected access is good enough for this | |
112 | * function. Returns %true if a reference count was successfully obtained; | |
113 | * %false otherwise. | |
114 | */ | |
e7c5ec91 PM |
115 | static inline bool css_tryget(struct cgroup_subsys_state *css) |
116 | { | |
38b53aba | 117 | if (css->flags & CSS_ROOT) |
e7c5ec91 | 118 | return true; |
d3daf28d | 119 | return percpu_ref_tryget(&css->refcnt); |
e7c5ec91 PM |
120 | } |
121 | ||
5de0107e TH |
122 | /** |
123 | * css_put - put a css reference | |
124 | * @css: target css | |
125 | * | |
126 | * Put a reference obtained via css_get() and css_tryget(). | |
127 | */ | |
ddbcc7e8 PM |
128 | static inline void css_put(struct cgroup_subsys_state *css) |
129 | { | |
38b53aba | 130 | if (!(css->flags & CSS_ROOT)) |
d3daf28d | 131 | percpu_ref_put(&css->refcnt); |
ddbcc7e8 PM |
132 | } |
133 | ||
3116f0e3 PM |
134 | /* bits in struct cgroup flags field */ |
135 | enum { | |
136 | /* Control Group is dead */ | |
54766d4a | 137 | CGRP_DEAD, |
d20a390a PM |
138 | /* |
139 | * Control Group has previously had a child cgroup or a task, | |
140 | * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) | |
141 | */ | |
3116f0e3 PM |
142 | CGRP_RELEASABLE, |
143 | /* Control Group requires release notifications to userspace */ | |
144 | CGRP_NOTIFY_ON_RELEASE, | |
97978e6d | 145 | /* |
2260e7fc TH |
146 | * Clone the parent's configuration when creating a new child |
147 | * cpuset cgroup. For historical reasons, this option can be | |
148 | * specified at mount time and thus is implemented here. | |
97978e6d | 149 | */ |
2260e7fc | 150 | CGRP_CPUSET_CLONE_CHILDREN, |
873fe09e TH |
151 | /* see the comment above CGRP_ROOT_SANE_BEHAVIOR for details */ |
152 | CGRP_SANE_BEHAVIOR, | |
3116f0e3 PM |
153 | }; |
154 | ||
65dff759 LZ |
155 | struct cgroup_name { |
156 | struct rcu_head rcu_head; | |
157 | char name[]; | |
3116f0e3 PM |
158 | }; |
159 | ||
ddbcc7e8 PM |
160 | struct cgroup { |
161 | unsigned long flags; /* "unsigned long" so bitops work */ | |
162 | ||
b414dc09 LZ |
163 | /* |
164 | * idr allocated in-hierarchy ID. | |
165 | * | |
166 | * The ID of the root cgroup is always 0, and a new cgroup | |
167 | * will be assigned with a smallest available ID. | |
168 | */ | |
169 | int id; | |
0a950f65 | 170 | |
ddbcc7e8 PM |
171 | /* |
172 | * We link our 'sibling' struct into our parent's 'children'. | |
173 | * Our children link their 'sibling' into our 'children'. | |
174 | */ | |
175 | struct list_head sibling; /* my parent's children */ | |
176 | struct list_head children; /* my children */ | |
05ef1d7c | 177 | struct list_head files; /* my files */ |
ddbcc7e8 | 178 | |
d20a390a | 179 | struct cgroup *parent; /* my parent */ |
febfcef6 | 180 | struct dentry *dentry; /* cgroup fs entry, RCU protected */ |
ddbcc7e8 | 181 | |
53fa5261 TH |
182 | /* |
183 | * Monotonically increasing unique serial number which defines a | |
184 | * uniform order among all cgroups. It's guaranteed that all | |
185 | * ->children lists are in the ascending order of ->serial_nr. | |
186 | * It's used to allow interrupting and resuming iterations. | |
187 | */ | |
188 | u64 serial_nr; | |
189 | ||
65dff759 LZ |
190 | /* |
191 | * This is a copy of dentry->d_name, and it's needed because | |
192 | * we can't use dentry->d_name in cgroup_path(). | |
193 | * | |
194 | * You must acquire rcu_read_lock() to access cgrp->name, and | |
195 | * the only place that can change it is rename(), which is | |
196 | * protected by parent dir's i_mutex. | |
197 | * | |
198 | * Normally you should use cgroup_name() wrapper rather than | |
199 | * access it directly. | |
200 | */ | |
201 | struct cgroup_name __rcu *name; | |
202 | ||
ddbcc7e8 PM |
203 | /* Private pointers for each registered subsystem */ |
204 | struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; | |
205 | ||
206 | struct cgroupfs_root *root; | |
817929ec PM |
207 | |
208 | /* | |
69d0206c TH |
209 | * List of cgrp_cset_links pointing at css_sets with tasks in this |
210 | * cgroup. Protected by css_set_lock. | |
817929ec | 211 | */ |
69d0206c | 212 | struct list_head cset_links; |
81a6a5cd PM |
213 | |
214 | /* | |
215 | * Linked list running through all cgroups that can | |
216 | * potentially be reaped by the release agent. Protected by | |
217 | * release_list_lock | |
218 | */ | |
219 | struct list_head release_list; | |
cc31edce | 220 | |
72a8cb30 BB |
221 | /* |
222 | * list of pidlists, up to two for each namespace (one for procs, one | |
223 | * for tasks); created on demand. | |
224 | */ | |
225 | struct list_head pidlists; | |
226 | struct mutex pidlist_mutex; | |
a47295e6 | 227 | |
d3daf28d | 228 | /* For css percpu_ref killing and RCU-protected deletion */ |
a47295e6 | 229 | struct rcu_head rcu_head; |
ea15f8cc | 230 | struct work_struct destroy_work; |
d3daf28d | 231 | atomic_t css_kill_cnt; |
0dea1168 | 232 | |
25985edc | 233 | /* List of events which userspace want to receive */ |
0dea1168 KS |
234 | struct list_head event_list; |
235 | spinlock_t event_list_lock; | |
03b1cde6 AR |
236 | |
237 | /* directory xattrs */ | |
238 | struct simple_xattrs xattrs; | |
817929ec PM |
239 | }; |
240 | ||
25a7e684 TH |
241 | #define MAX_CGROUP_ROOT_NAMELEN 64 |
242 | ||
243 | /* cgroupfs_root->flags */ | |
244 | enum { | |
873fe09e TH |
245 | /* |
246 | * Unfortunately, cgroup core and various controllers are riddled | |
247 | * with idiosyncrasies and pointless options. The following flag, | |
248 | * when set, will force sane behavior - some options are forced on, | |
249 | * others are disallowed, and some controllers will change their | |
250 | * hierarchical or other behaviors. | |
251 | * | |
252 | * The set of behaviors affected by this flag are still being | |
253 | * determined and developed and the mount option for this flag is | |
254 | * prefixed with __DEVEL__. The prefix will be dropped once we | |
255 | * reach the point where all behaviors are compatible with the | |
256 | * planned unified hierarchy, which will automatically turn on this | |
257 | * flag. | |
258 | * | |
259 | * The followings are the behaviors currently affected this flag. | |
260 | * | |
261 | * - Mount options "noprefix" and "clone_children" are disallowed. | |
262 | * Also, cgroupfs file cgroup.clone_children is not created. | |
263 | * | |
264 | * - When mounting an existing superblock, mount options should | |
265 | * match. | |
266 | * | |
267 | * - Remount is disallowed. | |
268 | * | |
0b0585c3 LT |
269 | * - rename(2) is disallowed. |
270 | * | |
f63674fd TH |
271 | * - "tasks" is removed. Everything should be at process |
272 | * granularity. Use "cgroup.procs" instead. | |
f00baae7 | 273 | * |
f63674fd TH |
274 | * - "release_agent" and "notify_on_release" are removed. |
275 | * Replacement notification mechanism will be implemented. | |
873fe09e | 276 | * |
5c5cc623 LZ |
277 | * - cpuset: tasks will be kept in empty cpusets when hotplug happens |
278 | * and take masks of ancestors with non-empty cpus/mems, instead of | |
279 | * being moved to an ancestor. | |
280 | * | |
88fa523b LZ |
281 | * - cpuset: a task can be moved into an empty cpuset, and again it |
282 | * takes masks of ancestors. | |
6db8e85c | 283 | * |
f63674fd TH |
284 | * - memcg: use_hierarchy is on by default and the cgroup file for |
285 | * the flag is not created. | |
f00baae7 | 286 | * |
9138125b | 287 | * - blkcg: blk-throttle becomes properly hierarchical. |
873fe09e TH |
288 | */ |
289 | CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), | |
290 | ||
25a7e684 TH |
291 | CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ |
292 | CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ | |
0ce6cba3 TH |
293 | |
294 | /* mount options live below bit 16 */ | |
295 | CGRP_ROOT_OPTION_MASK = (1 << 16) - 1, | |
296 | ||
297 | CGRP_ROOT_SUBSYS_BOUND = (1 << 16), /* subsystems finished binding */ | |
25a7e684 TH |
298 | }; |
299 | ||
300 | /* | |
301 | * A cgroupfs_root represents the root of a cgroup hierarchy, and may be | |
302 | * associated with a superblock to form an active hierarchy. This is | |
303 | * internal to cgroup core. Don't access directly from controllers. | |
304 | */ | |
305 | struct cgroupfs_root { | |
306 | struct super_block *sb; | |
307 | ||
a8a648c4 | 308 | /* The bitmask of subsystems attached to this hierarchy */ |
25a7e684 TH |
309 | unsigned long subsys_mask; |
310 | ||
311 | /* Unique id for this hierarchy. */ | |
312 | int hierarchy_id; | |
313 | ||
25a7e684 TH |
314 | /* A list running through the attached subsystems */ |
315 | struct list_head subsys_list; | |
316 | ||
317 | /* The root cgroup for this hierarchy */ | |
318 | struct cgroup top_cgroup; | |
319 | ||
320 | /* Tracks how many cgroups are currently defined in hierarchy.*/ | |
321 | int number_of_cgroups; | |
322 | ||
323 | /* A list running through the active hierarchies */ | |
324 | struct list_head root_list; | |
325 | ||
25a7e684 TH |
326 | /* Hierarchy-specific flags */ |
327 | unsigned long flags; | |
328 | ||
329 | /* IDs for cgroups in this hierarchy */ | |
4e96ee8e | 330 | struct idr cgroup_idr; |
25a7e684 TH |
331 | |
332 | /* The path to use for release notifications. */ | |
333 | char release_agent_path[PATH_MAX]; | |
334 | ||
335 | /* The name for this hierarchy - may be empty */ | |
336 | char name[MAX_CGROUP_ROOT_NAMELEN]; | |
337 | }; | |
338 | ||
d20a390a PM |
339 | /* |
340 | * A css_set is a structure holding pointers to a set of | |
817929ec PM |
341 | * cgroup_subsys_state objects. This saves space in the task struct |
342 | * object and speeds up fork()/exit(), since a single inc/dec and a | |
d20a390a PM |
343 | * list_add()/del() can bump the reference count on the entire cgroup |
344 | * set for a task. | |
817929ec PM |
345 | */ |
346 | ||
347 | struct css_set { | |
348 | ||
349 | /* Reference count */ | |
146aa1bd | 350 | atomic_t refcount; |
817929ec | 351 | |
472b1053 LZ |
352 | /* |
353 | * List running through all cgroup groups in the same hash | |
354 | * slot. Protected by css_set_lock | |
355 | */ | |
356 | struct hlist_node hlist; | |
357 | ||
817929ec PM |
358 | /* |
359 | * List running through all tasks using this cgroup | |
360 | * group. Protected by css_set_lock | |
361 | */ | |
362 | struct list_head tasks; | |
363 | ||
364 | /* | |
69d0206c TH |
365 | * List of cgrp_cset_links pointing at cgroups referenced from this |
366 | * css_set. Protected by css_set_lock. | |
817929ec | 367 | */ |
69d0206c | 368 | struct list_head cgrp_links; |
817929ec PM |
369 | |
370 | /* | |
371 | * Set of subsystem states, one for each subsystem. This array | |
372 | * is immutable after creation apart from the init_css_set | |
cf5d5941 BB |
373 | * during subsystem registration (at boot time) and modular subsystem |
374 | * loading/unloading. | |
817929ec PM |
375 | */ |
376 | struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; | |
c378369d BB |
377 | |
378 | /* For RCU-protected deletion */ | |
379 | struct rcu_head rcu_head; | |
ddbcc7e8 PM |
380 | }; |
381 | ||
91796569 PM |
382 | /* |
383 | * cgroup_map_cb is an abstract callback API for reporting map-valued | |
384 | * control files | |
385 | */ | |
386 | ||
387 | struct cgroup_map_cb { | |
388 | int (*fill)(struct cgroup_map_cb *cb, const char *key, u64 value); | |
389 | void *state; | |
390 | }; | |
391 | ||
d20a390a PM |
392 | /* |
393 | * struct cftype: handler definitions for cgroup control files | |
ddbcc7e8 PM |
394 | * |
395 | * When reading/writing to a file: | |
a043e3b2 | 396 | * - the cgroup to use is file->f_dentry->d_parent->d_fsdata |
ddbcc7e8 PM |
397 | * - the 'cftype' of the file is file->f_dentry->d_fsdata |
398 | */ | |
399 | ||
8e3f6541 | 400 | /* cftype->flags */ |
02c402d9 | 401 | enum { |
6f4b7e63 LZ |
402 | CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ |
403 | CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ | |
02c402d9 TH |
404 | CFTYPE_INSANE = (1 << 2), /* don't create if sane_behavior */ |
405 | }; | |
8e3f6541 TH |
406 | |
407 | #define MAX_CFTYPE_NAME 64 | |
408 | ||
ddbcc7e8 | 409 | struct cftype { |
d20a390a PM |
410 | /* |
411 | * By convention, the name should begin with the name of the | |
8e3f6541 TH |
412 | * subsystem, followed by a period. Zero length string indicates |
413 | * end of cftype array. | |
d20a390a | 414 | */ |
ddbcc7e8 PM |
415 | char name[MAX_CFTYPE_NAME]; |
416 | int private; | |
099fca32 LZ |
417 | /* |
418 | * If not 0, file mode is set to this value, otherwise it will | |
419 | * be figured out automatically | |
420 | */ | |
a5e7ed32 | 421 | umode_t mode; |
db3b1497 PM |
422 | |
423 | /* | |
424 | * If non-zero, defines the maximum length of string that can | |
425 | * be passed to write_string; defaults to 64 | |
426 | */ | |
427 | size_t max_write_len; | |
428 | ||
8e3f6541 TH |
429 | /* CFTYPE_* flags */ |
430 | unsigned int flags; | |
431 | ||
ce16b49d PM |
432 | int (*open)(struct inode *inode, struct file *file); |
433 | ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft, | |
434 | struct file *file, | |
435 | char __user *buf, size_t nbytes, loff_t *ppos); | |
ddbcc7e8 | 436 | /* |
f4c753b7 | 437 | * read_u64() is a shortcut for the common case of returning a |
ddbcc7e8 PM |
438 | * single integer. Use it in place of read() |
439 | */ | |
ce16b49d | 440 | u64 (*read_u64)(struct cgroup *cgrp, struct cftype *cft); |
e73d2c61 PM |
441 | /* |
442 | * read_s64() is a signed version of read_u64() | |
443 | */ | |
ce16b49d | 444 | s64 (*read_s64)(struct cgroup *cgrp, struct cftype *cft); |
91796569 PM |
445 | /* |
446 | * read_map() is used for defining a map of key/value | |
447 | * pairs. It should call cb->fill(cb, key, value) for each | |
448 | * entry. The key/value pairs (and their ordering) should not | |
449 | * change between reboots. | |
450 | */ | |
03c78cbe | 451 | int (*read_map)(struct cgroup *cgrp, struct cftype *cft, |
ce16b49d | 452 | struct cgroup_map_cb *cb); |
29486df3 SH |
453 | /* |
454 | * read_seq_string() is used for outputting a simple sequence | |
455 | * using seqfile. | |
456 | */ | |
03c78cbe | 457 | int (*read_seq_string)(struct cgroup *cgrp, struct cftype *cft, |
ce16b49d | 458 | struct seq_file *m); |
91796569 | 459 | |
ce16b49d PM |
460 | ssize_t (*write)(struct cgroup *cgrp, struct cftype *cft, |
461 | struct file *file, | |
462 | const char __user *buf, size_t nbytes, loff_t *ppos); | |
355e0c48 PM |
463 | |
464 | /* | |
f4c753b7 | 465 | * write_u64() is a shortcut for the common case of accepting |
355e0c48 PM |
466 | * a single integer (as parsed by simple_strtoull) from |
467 | * userspace. Use in place of write(); return 0 or error. | |
468 | */ | |
ce16b49d | 469 | int (*write_u64)(struct cgroup *cgrp, struct cftype *cft, u64 val); |
e73d2c61 PM |
470 | /* |
471 | * write_s64() is a signed version of write_u64() | |
472 | */ | |
ce16b49d | 473 | int (*write_s64)(struct cgroup *cgrp, struct cftype *cft, s64 val); |
355e0c48 | 474 | |
db3b1497 PM |
475 | /* |
476 | * write_string() is passed a nul-terminated kernelspace | |
477 | * buffer of maximum length determined by max_write_len. | |
478 | * Returns 0 or -ve error code. | |
479 | */ | |
480 | int (*write_string)(struct cgroup *cgrp, struct cftype *cft, | |
481 | const char *buffer); | |
d447ea2f PE |
482 | /* |
483 | * trigger() callback can be used to get some kick from the | |
484 | * userspace, when the actual string written is not important | |
485 | * at all. The private field can be used to determine the | |
486 | * kick type for multiplexing. | |
487 | */ | |
488 | int (*trigger)(struct cgroup *cgrp, unsigned int event); | |
489 | ||
ce16b49d | 490 | int (*release)(struct inode *inode, struct file *file); |
0dea1168 KS |
491 | |
492 | /* | |
493 | * register_event() callback will be used to add new userspace | |
494 | * waiter for changes related to the cftype. Implement it if | |
495 | * you want to provide this functionality. Use eventfd_signal() | |
496 | * on eventfd to send notification to userspace. | |
497 | */ | |
498 | int (*register_event)(struct cgroup *cgrp, struct cftype *cft, | |
499 | struct eventfd_ctx *eventfd, const char *args); | |
500 | /* | |
501 | * unregister_event() callback will be called when userspace | |
502 | * closes the eventfd or on cgroup removing. | |
503 | * This callback must be implemented, if you want provide | |
504 | * notification functionality. | |
0dea1168 | 505 | */ |
907860ed | 506 | void (*unregister_event)(struct cgroup *cgrp, struct cftype *cft, |
0dea1168 | 507 | struct eventfd_ctx *eventfd); |
ddbcc7e8 PM |
508 | }; |
509 | ||
8e3f6541 TH |
510 | /* |
511 | * cftype_sets describe cftypes belonging to a subsystem and are chained at | |
512 | * cgroup_subsys->cftsets. Each cftset points to an array of cftypes | |
513 | * terminated by zero length name. | |
514 | */ | |
515 | struct cftype_set { | |
516 | struct list_head node; /* chained at subsys->cftsets */ | |
03b1cde6 | 517 | struct cftype *cfts; |
8e3f6541 TH |
518 | }; |
519 | ||
31a7df01 | 520 | struct cgroup_scanner { |
6f4b7e63 | 521 | struct cgroup *cgrp; |
31a7df01 CW |
522 | int (*test_task)(struct task_struct *p, struct cgroup_scanner *scan); |
523 | void (*process_task)(struct task_struct *p, | |
524 | struct cgroup_scanner *scan); | |
525 | struct ptr_heap *heap; | |
bd1a8ab7 | 526 | void *data; |
31a7df01 CW |
527 | }; |
528 | ||
873fe09e TH |
529 | /* |
530 | * See the comment above CGRP_ROOT_SANE_BEHAVIOR for details. This | |
531 | * function can be called as long as @cgrp is accessible. | |
532 | */ | |
533 | static inline bool cgroup_sane_behavior(const struct cgroup *cgrp) | |
534 | { | |
535 | return cgrp->root->flags & CGRP_ROOT_SANE_BEHAVIOR; | |
536 | } | |
537 | ||
65dff759 LZ |
538 | /* Caller should hold rcu_read_lock() */ |
539 | static inline const char *cgroup_name(const struct cgroup *cgrp) | |
540 | { | |
541 | return rcu_dereference(cgrp->name)->name; | |
542 | } | |
543 | ||
03b1cde6 AR |
544 | int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); |
545 | int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); | |
8e3f6541 | 546 | |
78574cf9 | 547 | bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor); |
ddbcc7e8 | 548 | |
ffd2d883 | 549 | int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen); |
913ffdb5 | 550 | int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); |
ddbcc7e8 | 551 | |
ffd2d883 | 552 | int cgroup_task_count(const struct cgroup *cgrp); |
bbcb81d0 | 553 | |
2f7ee569 TH |
554 | /* |
555 | * Control Group taskset, used to pass around set of tasks to cgroup_subsys | |
556 | * methods. | |
557 | */ | |
558 | struct cgroup_taskset; | |
559 | struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset); | |
560 | struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset); | |
561 | struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset); | |
562 | int cgroup_taskset_size(struct cgroup_taskset *tset); | |
563 | ||
564 | /** | |
565 | * cgroup_taskset_for_each - iterate cgroup_taskset | |
566 | * @task: the loop cursor | |
567 | * @skip_cgrp: skip if task's cgroup matches this, %NULL to iterate through all | |
568 | * @tset: taskset to iterate | |
569 | */ | |
570 | #define cgroup_taskset_for_each(task, skip_cgrp, tset) \ | |
571 | for ((task) = cgroup_taskset_first((tset)); (task); \ | |
572 | (task) = cgroup_taskset_next((tset))) \ | |
573 | if (!(skip_cgrp) || \ | |
574 | cgroup_taskset_cur_cgroup((tset)) != (skip_cgrp)) | |
575 | ||
21acb9ca TLSC |
576 | /* |
577 | * Control Group subsystem type. | |
578 | * See Documentation/cgroups/cgroups.txt for details | |
579 | */ | |
ddbcc7e8 PM |
580 | |
581 | struct cgroup_subsys { | |
92fb9748 TH |
582 | struct cgroup_subsys_state *(*css_alloc)(struct cgroup *cgrp); |
583 | int (*css_online)(struct cgroup *cgrp); | |
584 | void (*css_offline)(struct cgroup *cgrp); | |
585 | void (*css_free)(struct cgroup *cgrp); | |
586 | ||
761b3ef5 LZ |
587 | int (*can_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset); |
588 | void (*cancel_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset); | |
589 | void (*attach)(struct cgroup *cgrp, struct cgroup_taskset *tset); | |
590 | void (*fork)(struct task_struct *task); | |
591 | void (*exit)(struct cgroup *cgrp, struct cgroup *old_cgrp, | |
592 | struct task_struct *task); | |
761b3ef5 | 593 | void (*bind)(struct cgroup *root); |
e5991371 | 594 | |
ddbcc7e8 | 595 | int subsys_id; |
8bab8dde | 596 | int disabled; |
ddbcc7e8 | 597 | int early_init; |
38460b48 KH |
598 | /* |
599 | * True if this subsys uses ID. ID is not available before cgroup_init() | |
600 | * (not available in early_init time.) | |
601 | */ | |
602 | bool use_id; | |
48ddbe19 | 603 | |
8c7f6edb TH |
604 | /* |
605 | * If %false, this subsystem is properly hierarchical - | |
606 | * configuration, resource accounting and restriction on a parent | |
607 | * cgroup cover those of its children. If %true, hierarchy support | |
608 | * is broken in some ways - some subsystems ignore hierarchy | |
609 | * completely while others are only implemented half-way. | |
610 | * | |
611 | * It's now disallowed to create nested cgroups if the subsystem is | |
612 | * broken and cgroup core will emit a warning message on such | |
613 | * cases. Eventually, all subsystems will be made properly | |
614 | * hierarchical and this will go away. | |
615 | */ | |
616 | bool broken_hierarchy; | |
617 | bool warned_broken_hierarchy; | |
618 | ||
ddbcc7e8 PM |
619 | #define MAX_CGROUP_TYPE_NAMELEN 32 |
620 | const char *name; | |
621 | ||
999cd8a4 PM |
622 | /* |
623 | * Link to parent, and list entry in parent's children. | |
6be96a5c | 624 | * Protected by cgroup_lock() |
999cd8a4 PM |
625 | */ |
626 | struct cgroupfs_root *root; | |
ddbcc7e8 | 627 | struct list_head sibling; |
38460b48 KH |
628 | /* used when use_id == true */ |
629 | struct idr idr; | |
42aee6c4 | 630 | spinlock_t id_lock; |
e6a1105b | 631 | |
8e3f6541 TH |
632 | /* list of cftype_sets */ |
633 | struct list_head cftsets; | |
634 | ||
635 | /* base cftypes, automatically [de]registered with subsys itself */ | |
636 | struct cftype *base_cftypes; | |
637 | struct cftype_set base_cftset; | |
638 | ||
e6a1105b BB |
639 | /* should be defined only by modular subsystems */ |
640 | struct module *module; | |
ddbcc7e8 PM |
641 | }; |
642 | ||
643 | #define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys; | |
5fc0b025 | 644 | #define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option) |
ddbcc7e8 | 645 | #include <linux/cgroup_subsys.h> |
5fc0b025 | 646 | #undef IS_SUBSYS_ENABLED |
ddbcc7e8 PM |
647 | #undef SUBSYS |
648 | ||
8af01f56 TH |
649 | /** |
650 | * cgroup_css - obtain a cgroup's css for the specified subsystem | |
651 | * @cgrp: the cgroup of interest | |
652 | * @subsys_id: the subsystem of interest | |
653 | * | |
654 | * Return @cgrp's css (cgroup_subsys_state) associated with @subsys_id. | |
655 | */ | |
656 | static inline struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp, | |
657 | int subsys_id) | |
ddbcc7e8 | 658 | { |
ffd2d883 | 659 | return cgrp->subsys[subsys_id]; |
ddbcc7e8 PM |
660 | } |
661 | ||
14611e51 TH |
662 | /** |
663 | * task_css_set_check - obtain a task's css_set with extra access conditions | |
664 | * @task: the task to obtain css_set for | |
665 | * @__c: extra condition expression to be passed to rcu_dereference_check() | |
666 | * | |
667 | * A task's css_set is RCU protected, initialized and exited while holding | |
668 | * task_lock(), and can only be modified while holding both cgroup_mutex | |
669 | * and task_lock() while the task is alive. This macro verifies that the | |
670 | * caller is inside proper critical section and returns @task's css_set. | |
671 | * | |
672 | * The caller can also specify additional allowed conditions via @__c, such | |
673 | * as locks used during the cgroup_subsys::attach() methods. | |
dc61b1d6 | 674 | */ |
2219449a TH |
675 | #ifdef CONFIG_PROVE_RCU |
676 | extern struct mutex cgroup_mutex; | |
14611e51 TH |
677 | #define task_css_set_check(task, __c) \ |
678 | rcu_dereference_check((task)->cgroups, \ | |
679 | lockdep_is_held(&(task)->alloc_lock) || \ | |
680 | lockdep_is_held(&cgroup_mutex) || (__c)) | |
2219449a | 681 | #else |
14611e51 TH |
682 | #define task_css_set_check(task, __c) \ |
683 | rcu_dereference((task)->cgroups) | |
2219449a | 684 | #endif |
dc61b1d6 | 685 | |
14611e51 | 686 | /** |
8af01f56 | 687 | * task_css_check - obtain css for (task, subsys) w/ extra access conds |
14611e51 TH |
688 | * @task: the target task |
689 | * @subsys_id: the target subsystem ID | |
690 | * @__c: extra condition expression to be passed to rcu_dereference_check() | |
691 | * | |
692 | * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The | |
693 | * synchronization rules are the same as task_css_set_check(). | |
694 | */ | |
8af01f56 | 695 | #define task_css_check(task, subsys_id, __c) \ |
14611e51 TH |
696 | task_css_set_check((task), (__c))->subsys[(subsys_id)] |
697 | ||
698 | /** | |
699 | * task_css_set - obtain a task's css_set | |
700 | * @task: the task to obtain css_set for | |
701 | * | |
702 | * See task_css_set_check(). | |
703 | */ | |
704 | static inline struct css_set *task_css_set(struct task_struct *task) | |
705 | { | |
706 | return task_css_set_check(task, false); | |
707 | } | |
708 | ||
709 | /** | |
8af01f56 | 710 | * task_css - obtain css for (task, subsys) |
14611e51 TH |
711 | * @task: the target task |
712 | * @subsys_id: the target subsystem ID | |
713 | * | |
8af01f56 | 714 | * See task_css_check(). |
14611e51 | 715 | */ |
8af01f56 TH |
716 | static inline struct cgroup_subsys_state *task_css(struct task_struct *task, |
717 | int subsys_id) | |
ddbcc7e8 | 718 | { |
8af01f56 | 719 | return task_css_check(task, subsys_id, false); |
ddbcc7e8 PM |
720 | } |
721 | ||
8af01f56 TH |
722 | static inline struct cgroup *task_cgroup(struct task_struct *task, |
723 | int subsys_id) | |
ddbcc7e8 | 724 | { |
8af01f56 | 725 | return task_css(task, subsys_id)->cgroup; |
ddbcc7e8 PM |
726 | } |
727 | ||
e14880f7 LZ |
728 | /** |
729 | * cgroup_from_id - lookup cgroup by id | |
730 | * @ss: cgroup subsys to be looked into | |
731 | * @id: the cgroup id | |
732 | * | |
733 | * Returns the cgroup if there's valid one with @id, otherwise returns NULL. | |
734 | * Should be called under rcu_read_lock(). | |
735 | */ | |
736 | static inline struct cgroup *cgroup_from_id(struct cgroup_subsys *ss, int id) | |
737 | { | |
738 | #ifdef CONFIG_PROVE_RCU | |
739 | rcu_lockdep_assert(rcu_read_lock_held() || | |
740 | lockdep_is_held(&cgroup_mutex), | |
741 | "cgroup_from_id() needs proper protection"); | |
742 | #endif | |
743 | return idr_find(&ss->root->cgroup_idr, id); | |
744 | } | |
745 | ||
53fa5261 TH |
746 | struct cgroup *cgroup_next_sibling(struct cgroup *pos); |
747 | ||
574bd9f7 TH |
748 | /** |
749 | * cgroup_for_each_child - iterate through children of a cgroup | |
750 | * @pos: the cgroup * to use as the loop cursor | |
75501a6d | 751 | * @cgrp: cgroup whose children to walk |
574bd9f7 | 752 | * |
75501a6d | 753 | * Walk @cgrp's children. Must be called under rcu_read_lock(). A child |
92fb9748 TH |
754 | * cgroup which hasn't finished ->css_online() or already has finished |
755 | * ->css_offline() may show up during traversal and it's each subsystem's | |
574bd9f7 TH |
756 | * responsibility to verify that each @pos is alive. |
757 | * | |
92fb9748 TH |
758 | * If a subsystem synchronizes against the parent in its ->css_online() and |
759 | * before starting iterating, a cgroup which finished ->css_online() is | |
760 | * guaranteed to be visible in the future iterations. | |
75501a6d TH |
761 | * |
762 | * It is allowed to temporarily drop RCU read lock during iteration. The | |
763 | * caller is responsible for ensuring that @pos remains accessible until | |
764 | * the start of the next iteration by, for example, bumping the css refcnt. | |
574bd9f7 | 765 | */ |
75501a6d TH |
766 | #define cgroup_for_each_child(pos, cgrp) \ |
767 | for ((pos) = list_first_or_null_rcu(&(cgrp)->children, \ | |
768 | struct cgroup, sibling); \ | |
769 | (pos); (pos) = cgroup_next_sibling((pos))) | |
574bd9f7 TH |
770 | |
771 | struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos, | |
772 | struct cgroup *cgroup); | |
12a9d2fe | 773 | struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos); |
574bd9f7 TH |
774 | |
775 | /** | |
776 | * cgroup_for_each_descendant_pre - pre-order walk of a cgroup's descendants | |
777 | * @pos: the cgroup * to use as the loop cursor | |
778 | * @cgroup: cgroup whose descendants to walk | |
779 | * | |
780 | * Walk @cgroup's descendants. Must be called under rcu_read_lock(). A | |
92fb9748 TH |
781 | * descendant cgroup which hasn't finished ->css_online() or already has |
782 | * finished ->css_offline() may show up during traversal and it's each | |
574bd9f7 TH |
783 | * subsystem's responsibility to verify that each @pos is alive. |
784 | * | |
92fb9748 TH |
785 | * If a subsystem synchronizes against the parent in its ->css_online() and |
786 | * before starting iterating, and synchronizes against @pos on each | |
7805d000 | 787 | * iteration, any descendant cgroup which finished ->css_online() is |
574bd9f7 TH |
788 | * guaranteed to be visible in the future iterations. |
789 | * | |
790 | * In other words, the following guarantees that a descendant can't escape | |
791 | * state updates of its ancestors. | |
792 | * | |
92fb9748 | 793 | * my_online(@cgrp) |
574bd9f7 TH |
794 | * { |
795 | * Lock @cgrp->parent and @cgrp; | |
796 | * Inherit state from @cgrp->parent; | |
797 | * Unlock both. | |
798 | * } | |
799 | * | |
800 | * my_update_state(@cgrp) | |
801 | * { | |
802 | * Lock @cgrp; | |
803 | * Update @cgrp's state; | |
804 | * Unlock @cgrp; | |
805 | * | |
806 | * cgroup_for_each_descendant_pre(@pos, @cgrp) { | |
807 | * Lock @pos; | |
808 | * Verify @pos is alive and inherit state from @pos->parent; | |
809 | * Unlock @pos; | |
810 | * } | |
811 | * } | |
812 | * | |
813 | * As long as the inheriting step, including checking the parent state, is | |
814 | * enclosed inside @pos locking, double-locking the parent isn't necessary | |
815 | * while inheriting. The state update to the parent is guaranteed to be | |
816 | * visible by walking order and, as long as inheriting operations to the | |
817 | * same @pos are atomic to each other, multiple updates racing each other | |
818 | * still result in the correct state. It's guaranateed that at least one | |
819 | * inheritance happens for any cgroup after the latest update to its | |
820 | * parent. | |
821 | * | |
822 | * If checking parent's state requires locking the parent, each inheriting | |
823 | * iteration should lock and unlock both @pos->parent and @pos. | |
824 | * | |
825 | * Alternatively, a subsystem may choose to use a single global lock to | |
92fb9748 | 826 | * synchronize ->css_online() and ->css_offline() against tree-walking |
574bd9f7 | 827 | * operations. |
75501a6d TH |
828 | * |
829 | * It is allowed to temporarily drop RCU read lock during iteration. The | |
830 | * caller is responsible for ensuring that @pos remains accessible until | |
831 | * the start of the next iteration by, for example, bumping the css refcnt. | |
574bd9f7 TH |
832 | */ |
833 | #define cgroup_for_each_descendant_pre(pos, cgroup) \ | |
834 | for (pos = cgroup_next_descendant_pre(NULL, (cgroup)); (pos); \ | |
835 | pos = cgroup_next_descendant_pre((pos), (cgroup))) | |
836 | ||
837 | struct cgroup *cgroup_next_descendant_post(struct cgroup *pos, | |
838 | struct cgroup *cgroup); | |
839 | ||
840 | /** | |
841 | * cgroup_for_each_descendant_post - post-order walk of a cgroup's descendants | |
842 | * @pos: the cgroup * to use as the loop cursor | |
843 | * @cgroup: cgroup whose descendants to walk | |
844 | * | |
845 | * Similar to cgroup_for_each_descendant_pre() but performs post-order | |
846 | * traversal instead. Note that the walk visibility guarantee described in | |
847 | * pre-order walk doesn't apply the same to post-order walks. | |
848 | */ | |
849 | #define cgroup_for_each_descendant_post(pos, cgroup) \ | |
850 | for (pos = cgroup_next_descendant_post(NULL, (cgroup)); (pos); \ | |
851 | pos = cgroup_next_descendant_post((pos), (cgroup))) | |
852 | ||
817929ec PM |
853 | /* A cgroup_iter should be treated as an opaque object */ |
854 | struct cgroup_iter { | |
69d0206c | 855 | struct list_head *cset_link; |
817929ec PM |
856 | struct list_head *task; |
857 | }; | |
858 | ||
d20a390a PM |
859 | /* |
860 | * To iterate across the tasks in a cgroup: | |
817929ec | 861 | * |
b595076a | 862 | * 1) call cgroup_iter_start to initialize an iterator |
817929ec PM |
863 | * |
864 | * 2) call cgroup_iter_next() to retrieve member tasks until it | |
865 | * returns NULL or until you want to end the iteration | |
866 | * | |
867 | * 3) call cgroup_iter_end() to destroy the iterator. | |
31a7df01 | 868 | * |
d20a390a PM |
869 | * Or, call cgroup_scan_tasks() to iterate through every task in a |
870 | * cgroup - cgroup_scan_tasks() holds the css_set_lock when calling | |
871 | * the test_task() callback, but not while calling the process_task() | |
872 | * callback. | |
817929ec | 873 | */ |
ffd2d883 LZ |
874 | void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it); |
875 | struct task_struct *cgroup_iter_next(struct cgroup *cgrp, | |
817929ec | 876 | struct cgroup_iter *it); |
ffd2d883 | 877 | void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); |
31a7df01 | 878 | int cgroup_scan_tasks(struct cgroup_scanner *scan); |
31583bb0 | 879 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); |
8cc99345 | 880 | int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); |
31583bb0 | 881 | |
38460b48 KH |
882 | /* |
883 | * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works | |
884 | * if cgroup_subsys.use_id == true. It can be used for looking up and scanning. | |
885 | * CSS ID is assigned at cgroup allocation (create) automatically | |
886 | * and removed when subsys calls free_css_id() function. This is because | |
887 | * the lifetime of cgroup_subsys_state is subsys's matter. | |
888 | * | |
889 | * Looking up and scanning function should be called under rcu_read_lock(). | |
6be96a5c | 890 | * Taking cgroup_mutex is not necessary for following calls. |
38460b48 KH |
891 | * But the css returned by this routine can be "not populated yet" or "being |
892 | * destroyed". The caller should check css and cgroup's status. | |
893 | */ | |
894 | ||
895 | /* | |
896 | * Typically Called at ->destroy(), or somewhere the subsys frees | |
897 | * cgroup_subsys_state. | |
898 | */ | |
899 | void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css); | |
900 | ||
901 | /* Find a cgroup_subsys_state which has given ID */ | |
902 | ||
903 | struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id); | |
904 | ||
38460b48 KH |
905 | /* Returns true if root is ancestor of cg */ |
906 | bool css_is_ancestor(struct cgroup_subsys_state *cg, | |
0b7f569e | 907 | const struct cgroup_subsys_state *root); |
38460b48 KH |
908 | |
909 | /* Get id and depth of css */ | |
910 | unsigned short css_id(struct cgroup_subsys_state *css); | |
e5d1367f | 911 | struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id); |
38460b48 | 912 | |
ddbcc7e8 PM |
913 | #else /* !CONFIG_CGROUPS */ |
914 | ||
915 | static inline int cgroup_init_early(void) { return 0; } | |
916 | static inline int cgroup_init(void) { return 0; } | |
b4f48b63 | 917 | static inline void cgroup_fork(struct task_struct *p) {} |
817929ec | 918 | static inline void cgroup_post_fork(struct task_struct *p) {} |
b4f48b63 | 919 | static inline void cgroup_exit(struct task_struct *p, int callbacks) {} |
ddbcc7e8 | 920 | |
846c7bb0 BS |
921 | static inline int cgroupstats_build(struct cgroupstats *stats, |
922 | struct dentry *dentry) | |
923 | { | |
924 | return -EINVAL; | |
925 | } | |
ddbcc7e8 | 926 | |
d7926ee3 | 927 | /* No cgroups - nothing to do */ |
31583bb0 MT |
928 | static inline int cgroup_attach_task_all(struct task_struct *from, |
929 | struct task_struct *t) | |
930 | { | |
931 | return 0; | |
932 | } | |
d7926ee3 | 933 | |
ddbcc7e8 PM |
934 | #endif /* !CONFIG_CGROUPS */ |
935 | ||
936 | #endif /* _LINUX_CGROUP_H */ |