]>
Commit | Line | Data |
---|---|---|
ddbcc7e8 PM |
1 | #ifndef _LINUX_CGROUP_H |
2 | #define _LINUX_CGROUP_H | |
3 | /* | |
4 | * cgroup interface | |
5 | * | |
6 | * Copyright (C) 2003 BULL SA | |
7 | * Copyright (C) 2004-2006 Silicon Graphics, Inc. | |
8 | * | |
9 | */ | |
10 | ||
11 | #include <linux/sched.h> | |
ddbcc7e8 PM |
12 | #include <linux/cpumask.h> |
13 | #include <linux/nodemask.h> | |
14 | #include <linux/rcupdate.h> | |
eb6fd504 | 15 | #include <linux/rculist.h> |
846c7bb0 | 16 | #include <linux/cgroupstats.h> |
31a7df01 | 17 | #include <linux/prio_heap.h> |
cc31edce | 18 | #include <linux/rwsem.h> |
38460b48 | 19 | #include <linux/idr.h> |
48ddbe19 | 20 | #include <linux/workqueue.h> |
03b1cde6 | 21 | #include <linux/xattr.h> |
25a7e684 | 22 | #include <linux/fs.h> |
d3daf28d | 23 | #include <linux/percpu-refcount.h> |
ddbcc7e8 PM |
24 | |
25 | #ifdef CONFIG_CGROUPS | |
26 | ||
27 | struct cgroupfs_root; | |
28 | struct cgroup_subsys; | |
29 | struct inode; | |
84eea842 | 30 | struct cgroup; |
38460b48 | 31 | struct css_id; |
a27bb332 | 32 | struct eventfd_ctx; |
ddbcc7e8 PM |
33 | |
34 | extern int cgroup_init_early(void); | |
35 | extern int cgroup_init(void); | |
b4f48b63 | 36 | extern void cgroup_fork(struct task_struct *p); |
817929ec | 37 | extern void cgroup_post_fork(struct task_struct *p); |
b4f48b63 | 38 | extern void cgroup_exit(struct task_struct *p, int run_callbacks); |
846c7bb0 BS |
39 | extern int cgroupstats_build(struct cgroupstats *stats, |
40 | struct dentry *dentry); | |
e6a1105b | 41 | extern int cgroup_load_subsys(struct cgroup_subsys *ss); |
cf5d5941 | 42 | extern void cgroup_unload_subsys(struct cgroup_subsys *ss); |
ddbcc7e8 | 43 | |
8d8b97ba | 44 | extern int proc_cgroup_show(struct seq_file *, void *); |
a424316c | 45 | |
7d8e0bf5 LZ |
46 | /* |
47 | * Define the enumeration of all cgroup subsystems. | |
48 | * | |
49 | * We define ids for builtin subsystems and then modular ones. | |
50 | */ | |
817929ec PM |
51 | #define SUBSYS(_x) _x ## _subsys_id, |
52 | enum cgroup_subsys_id { | |
7d8e0bf5 | 53 | #define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option) |
817929ec | 54 | #include <linux/cgroup_subsys.h> |
7d8e0bf5 LZ |
55 | #undef IS_SUBSYS_ENABLED |
56 | CGROUP_BUILTIN_SUBSYS_COUNT, | |
57 | ||
58 | __CGROUP_SUBSYS_TEMP_PLACEHOLDER = CGROUP_BUILTIN_SUBSYS_COUNT - 1, | |
59 | ||
60 | #define IS_SUBSYS_ENABLED(option) IS_MODULE(option) | |
817929ec | 61 | #include <linux/cgroup_subsys.h> |
7d8e0bf5 | 62 | #undef IS_SUBSYS_ENABLED |
a6f00298 | 63 | CGROUP_SUBSYS_COUNT, |
817929ec PM |
64 | }; |
65 | #undef SUBSYS | |
66 | ||
ddbcc7e8 PM |
67 | /* Per-subsystem/per-cgroup state maintained by the system. */ |
68 | struct cgroup_subsys_state { | |
72c97e54 | 69 | /* the cgroup that this css is attached to */ |
ddbcc7e8 PM |
70 | struct cgroup *cgroup; |
71 | ||
72c97e54 TH |
72 | /* the cgroup subsystem that this css is attached to */ |
73 | struct cgroup_subsys *ss; | |
74 | ||
d3daf28d TH |
75 | /* reference count - access via css_[try]get() and css_put() */ |
76 | struct percpu_ref refcnt; | |
ddbcc7e8 | 77 | |
0ae78e0b TH |
78 | /* the parent css */ |
79 | struct cgroup_subsys_state *parent; | |
80 | ||
ddbcc7e8 | 81 | unsigned long flags; |
38460b48 | 82 | /* ID for this css, if possible */ |
2c392b8c | 83 | struct css_id __rcu *id; |
48ddbe19 | 84 | |
0c21ead1 TH |
85 | /* percpu_ref killing and RCU release */ |
86 | struct rcu_head rcu_head; | |
35ef10da | 87 | struct work_struct destroy_work; |
ddbcc7e8 PM |
88 | }; |
89 | ||
90 | /* bits in struct cgroup_subsys_state flags field */ | |
91 | enum { | |
38b53aba | 92 | CSS_ROOT = (1 << 0), /* this CSS is the root of the subsystem */ |
92fb9748 | 93 | CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ |
ddbcc7e8 PM |
94 | }; |
95 | ||
5de0107e TH |
96 | /** |
97 | * css_get - obtain a reference on the specified css | |
98 | * @css: target css | |
99 | * | |
100 | * The caller must already have a reference. | |
ddbcc7e8 | 101 | */ |
ddbcc7e8 PM |
102 | static inline void css_get(struct cgroup_subsys_state *css) |
103 | { | |
104 | /* We don't need to reference count the root state */ | |
38b53aba | 105 | if (!(css->flags & CSS_ROOT)) |
d3daf28d | 106 | percpu_ref_get(&css->refcnt); |
ddbcc7e8 | 107 | } |
e7c5ec91 | 108 | |
5de0107e TH |
109 | /** |
110 | * css_tryget - try to obtain a reference on the specified css | |
111 | * @css: target css | |
112 | * | |
113 | * Obtain a reference on @css if it's alive. The caller naturally needs to | |
114 | * ensure that @css is accessible but doesn't have to be holding a | |
115 | * reference on it - IOW, RCU protected access is good enough for this | |
116 | * function. Returns %true if a reference count was successfully obtained; | |
117 | * %false otherwise. | |
118 | */ | |
e7c5ec91 PM |
119 | static inline bool css_tryget(struct cgroup_subsys_state *css) |
120 | { | |
38b53aba | 121 | if (css->flags & CSS_ROOT) |
e7c5ec91 | 122 | return true; |
d3daf28d | 123 | return percpu_ref_tryget(&css->refcnt); |
e7c5ec91 PM |
124 | } |
125 | ||
5de0107e TH |
126 | /** |
127 | * css_put - put a css reference | |
128 | * @css: target css | |
129 | * | |
130 | * Put a reference obtained via css_get() and css_tryget(). | |
131 | */ | |
ddbcc7e8 PM |
132 | static inline void css_put(struct cgroup_subsys_state *css) |
133 | { | |
38b53aba | 134 | if (!(css->flags & CSS_ROOT)) |
d3daf28d | 135 | percpu_ref_put(&css->refcnt); |
ddbcc7e8 PM |
136 | } |
137 | ||
3116f0e3 PM |
138 | /* bits in struct cgroup flags field */ |
139 | enum { | |
140 | /* Control Group is dead */ | |
54766d4a | 141 | CGRP_DEAD, |
d20a390a PM |
142 | /* |
143 | * Control Group has previously had a child cgroup or a task, | |
144 | * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) | |
145 | */ | |
3116f0e3 PM |
146 | CGRP_RELEASABLE, |
147 | /* Control Group requires release notifications to userspace */ | |
148 | CGRP_NOTIFY_ON_RELEASE, | |
97978e6d | 149 | /* |
2260e7fc TH |
150 | * Clone the parent's configuration when creating a new child |
151 | * cpuset cgroup. For historical reasons, this option can be | |
152 | * specified at mount time and thus is implemented here. | |
97978e6d | 153 | */ |
2260e7fc | 154 | CGRP_CPUSET_CLONE_CHILDREN, |
873fe09e TH |
155 | /* see the comment above CGRP_ROOT_SANE_BEHAVIOR for details */ |
156 | CGRP_SANE_BEHAVIOR, | |
3116f0e3 PM |
157 | }; |
158 | ||
65dff759 LZ |
159 | struct cgroup_name { |
160 | struct rcu_head rcu_head; | |
161 | char name[]; | |
3116f0e3 PM |
162 | }; |
163 | ||
ddbcc7e8 PM |
164 | struct cgroup { |
165 | unsigned long flags; /* "unsigned long" so bitops work */ | |
166 | ||
b414dc09 LZ |
167 | /* |
168 | * idr allocated in-hierarchy ID. | |
169 | * | |
170 | * The ID of the root cgroup is always 0, and a new cgroup | |
171 | * will be assigned with a smallest available ID. | |
172 | */ | |
173 | int id; | |
0a950f65 | 174 | |
f20104de TH |
175 | /* the number of attached css's */ |
176 | int nr_css; | |
177 | ||
ddbcc7e8 PM |
178 | /* |
179 | * We link our 'sibling' struct into our parent's 'children'. | |
180 | * Our children link their 'sibling' into our 'children'. | |
181 | */ | |
182 | struct list_head sibling; /* my parent's children */ | |
183 | struct list_head children; /* my children */ | |
05ef1d7c | 184 | struct list_head files; /* my files */ |
ddbcc7e8 | 185 | |
d20a390a | 186 | struct cgroup *parent; /* my parent */ |
febfcef6 | 187 | struct dentry *dentry; /* cgroup fs entry, RCU protected */ |
ddbcc7e8 | 188 | |
53fa5261 TH |
189 | /* |
190 | * Monotonically increasing unique serial number which defines a | |
191 | * uniform order among all cgroups. It's guaranteed that all | |
192 | * ->children lists are in the ascending order of ->serial_nr. | |
193 | * It's used to allow interrupting and resuming iterations. | |
194 | */ | |
195 | u64 serial_nr; | |
196 | ||
65dff759 LZ |
197 | /* |
198 | * This is a copy of dentry->d_name, and it's needed because | |
199 | * we can't use dentry->d_name in cgroup_path(). | |
200 | * | |
201 | * You must acquire rcu_read_lock() to access cgrp->name, and | |
202 | * the only place that can change it is rename(), which is | |
203 | * protected by parent dir's i_mutex. | |
204 | * | |
205 | * Normally you should use cgroup_name() wrapper rather than | |
206 | * access it directly. | |
207 | */ | |
208 | struct cgroup_name __rcu *name; | |
209 | ||
ddbcc7e8 | 210 | /* Private pointers for each registered subsystem */ |
73e80ed8 | 211 | struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT]; |
ddbcc7e8 PM |
212 | |
213 | struct cgroupfs_root *root; | |
817929ec PM |
214 | |
215 | /* | |
69d0206c TH |
216 | * List of cgrp_cset_links pointing at css_sets with tasks in this |
217 | * cgroup. Protected by css_set_lock. | |
817929ec | 218 | */ |
69d0206c | 219 | struct list_head cset_links; |
81a6a5cd PM |
220 | |
221 | /* | |
222 | * Linked list running through all cgroups that can | |
223 | * potentially be reaped by the release agent. Protected by | |
224 | * release_list_lock | |
225 | */ | |
226 | struct list_head release_list; | |
cc31edce | 227 | |
72a8cb30 BB |
228 | /* |
229 | * list of pidlists, up to two for each namespace (one for procs, one | |
230 | * for tasks); created on demand. | |
231 | */ | |
232 | struct list_head pidlists; | |
233 | struct mutex pidlist_mutex; | |
a47295e6 | 234 | |
67f4c36f TH |
235 | /* dummy css with NULL ->ss, points back to this cgroup */ |
236 | struct cgroup_subsys_state dummy_css; | |
237 | ||
d3daf28d | 238 | /* For css percpu_ref killing and RCU-protected deletion */ |
a47295e6 | 239 | struct rcu_head rcu_head; |
ea15f8cc | 240 | struct work_struct destroy_work; |
0dea1168 | 241 | |
25985edc | 242 | /* List of events which userspace want to receive */ |
0dea1168 KS |
243 | struct list_head event_list; |
244 | spinlock_t event_list_lock; | |
03b1cde6 AR |
245 | |
246 | /* directory xattrs */ | |
247 | struct simple_xattrs xattrs; | |
817929ec PM |
248 | }; |
249 | ||
25a7e684 TH |
250 | #define MAX_CGROUP_ROOT_NAMELEN 64 |
251 | ||
252 | /* cgroupfs_root->flags */ | |
253 | enum { | |
873fe09e TH |
254 | /* |
255 | * Unfortunately, cgroup core and various controllers are riddled | |
256 | * with idiosyncrasies and pointless options. The following flag, | |
257 | * when set, will force sane behavior - some options are forced on, | |
258 | * others are disallowed, and some controllers will change their | |
259 | * hierarchical or other behaviors. | |
260 | * | |
261 | * The set of behaviors affected by this flag are still being | |
262 | * determined and developed and the mount option for this flag is | |
263 | * prefixed with __DEVEL__. The prefix will be dropped once we | |
264 | * reach the point where all behaviors are compatible with the | |
265 | * planned unified hierarchy, which will automatically turn on this | |
266 | * flag. | |
267 | * | |
268 | * The followings are the behaviors currently affected this flag. | |
269 | * | |
270 | * - Mount options "noprefix" and "clone_children" are disallowed. | |
271 | * Also, cgroupfs file cgroup.clone_children is not created. | |
272 | * | |
273 | * - When mounting an existing superblock, mount options should | |
274 | * match. | |
275 | * | |
276 | * - Remount is disallowed. | |
277 | * | |
0b0585c3 LT |
278 | * - rename(2) is disallowed. |
279 | * | |
f63674fd TH |
280 | * - "tasks" is removed. Everything should be at process |
281 | * granularity. Use "cgroup.procs" instead. | |
f00baae7 | 282 | * |
f63674fd TH |
283 | * - "release_agent" and "notify_on_release" are removed. |
284 | * Replacement notification mechanism will be implemented. | |
873fe09e | 285 | * |
5c5cc623 LZ |
286 | * - cpuset: tasks will be kept in empty cpusets when hotplug happens |
287 | * and take masks of ancestors with non-empty cpus/mems, instead of | |
288 | * being moved to an ancestor. | |
289 | * | |
88fa523b LZ |
290 | * - cpuset: a task can be moved into an empty cpuset, and again it |
291 | * takes masks of ancestors. | |
6db8e85c | 292 | * |
f63674fd TH |
293 | * - memcg: use_hierarchy is on by default and the cgroup file for |
294 | * the flag is not created. | |
f00baae7 | 295 | * |
9138125b | 296 | * - blkcg: blk-throttle becomes properly hierarchical. |
873fe09e TH |
297 | */ |
298 | CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), | |
299 | ||
25a7e684 TH |
300 | CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ |
301 | CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ | |
0ce6cba3 TH |
302 | |
303 | /* mount options live below bit 16 */ | |
304 | CGRP_ROOT_OPTION_MASK = (1 << 16) - 1, | |
305 | ||
306 | CGRP_ROOT_SUBSYS_BOUND = (1 << 16), /* subsystems finished binding */ | |
25a7e684 TH |
307 | }; |
308 | ||
309 | /* | |
310 | * A cgroupfs_root represents the root of a cgroup hierarchy, and may be | |
311 | * associated with a superblock to form an active hierarchy. This is | |
312 | * internal to cgroup core. Don't access directly from controllers. | |
313 | */ | |
314 | struct cgroupfs_root { | |
315 | struct super_block *sb; | |
316 | ||
a8a648c4 | 317 | /* The bitmask of subsystems attached to this hierarchy */ |
25a7e684 TH |
318 | unsigned long subsys_mask; |
319 | ||
320 | /* Unique id for this hierarchy. */ | |
321 | int hierarchy_id; | |
322 | ||
25a7e684 TH |
323 | /* A list running through the attached subsystems */ |
324 | struct list_head subsys_list; | |
325 | ||
326 | /* The root cgroup for this hierarchy */ | |
327 | struct cgroup top_cgroup; | |
328 | ||
329 | /* Tracks how many cgroups are currently defined in hierarchy.*/ | |
330 | int number_of_cgroups; | |
331 | ||
332 | /* A list running through the active hierarchies */ | |
333 | struct list_head root_list; | |
334 | ||
25a7e684 TH |
335 | /* Hierarchy-specific flags */ |
336 | unsigned long flags; | |
337 | ||
338 | /* IDs for cgroups in this hierarchy */ | |
4e96ee8e | 339 | struct idr cgroup_idr; |
25a7e684 TH |
340 | |
341 | /* The path to use for release notifications. */ | |
342 | char release_agent_path[PATH_MAX]; | |
343 | ||
344 | /* The name for this hierarchy - may be empty */ | |
345 | char name[MAX_CGROUP_ROOT_NAMELEN]; | |
346 | }; | |
347 | ||
d20a390a PM |
348 | /* |
349 | * A css_set is a structure holding pointers to a set of | |
817929ec PM |
350 | * cgroup_subsys_state objects. This saves space in the task struct |
351 | * object and speeds up fork()/exit(), since a single inc/dec and a | |
d20a390a PM |
352 | * list_add()/del() can bump the reference count on the entire cgroup |
353 | * set for a task. | |
817929ec PM |
354 | */ |
355 | ||
356 | struct css_set { | |
357 | ||
358 | /* Reference count */ | |
146aa1bd | 359 | atomic_t refcount; |
817929ec | 360 | |
472b1053 LZ |
361 | /* |
362 | * List running through all cgroup groups in the same hash | |
363 | * slot. Protected by css_set_lock | |
364 | */ | |
365 | struct hlist_node hlist; | |
366 | ||
817929ec PM |
367 | /* |
368 | * List running through all tasks using this cgroup | |
369 | * group. Protected by css_set_lock | |
370 | */ | |
371 | struct list_head tasks; | |
372 | ||
373 | /* | |
69d0206c TH |
374 | * List of cgrp_cset_links pointing at cgroups referenced from this |
375 | * css_set. Protected by css_set_lock. | |
817929ec | 376 | */ |
69d0206c | 377 | struct list_head cgrp_links; |
817929ec PM |
378 | |
379 | /* | |
380 | * Set of subsystem states, one for each subsystem. This array | |
381 | * is immutable after creation apart from the init_css_set | |
cf5d5941 BB |
382 | * during subsystem registration (at boot time) and modular subsystem |
383 | * loading/unloading. | |
817929ec PM |
384 | */ |
385 | struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; | |
c378369d BB |
386 | |
387 | /* For RCU-protected deletion */ | |
388 | struct rcu_head rcu_head; | |
ddbcc7e8 PM |
389 | }; |
390 | ||
91796569 PM |
391 | /* |
392 | * cgroup_map_cb is an abstract callback API for reporting map-valued | |
393 | * control files | |
394 | */ | |
395 | ||
396 | struct cgroup_map_cb { | |
397 | int (*fill)(struct cgroup_map_cb *cb, const char *key, u64 value); | |
398 | void *state; | |
399 | }; | |
400 | ||
d20a390a PM |
401 | /* |
402 | * struct cftype: handler definitions for cgroup control files | |
ddbcc7e8 PM |
403 | * |
404 | * When reading/writing to a file: | |
a043e3b2 | 405 | * - the cgroup to use is file->f_dentry->d_parent->d_fsdata |
ddbcc7e8 PM |
406 | * - the 'cftype' of the file is file->f_dentry->d_fsdata |
407 | */ | |
408 | ||
8e3f6541 | 409 | /* cftype->flags */ |
02c402d9 | 410 | enum { |
6f4b7e63 LZ |
411 | CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ |
412 | CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ | |
02c402d9 TH |
413 | CFTYPE_INSANE = (1 << 2), /* don't create if sane_behavior */ |
414 | }; | |
8e3f6541 TH |
415 | |
416 | #define MAX_CFTYPE_NAME 64 | |
417 | ||
ddbcc7e8 | 418 | struct cftype { |
d20a390a PM |
419 | /* |
420 | * By convention, the name should begin with the name of the | |
8e3f6541 TH |
421 | * subsystem, followed by a period. Zero length string indicates |
422 | * end of cftype array. | |
d20a390a | 423 | */ |
ddbcc7e8 PM |
424 | char name[MAX_CFTYPE_NAME]; |
425 | int private; | |
099fca32 LZ |
426 | /* |
427 | * If not 0, file mode is set to this value, otherwise it will | |
428 | * be figured out automatically | |
429 | */ | |
a5e7ed32 | 430 | umode_t mode; |
db3b1497 PM |
431 | |
432 | /* | |
433 | * If non-zero, defines the maximum length of string that can | |
434 | * be passed to write_string; defaults to 64 | |
435 | */ | |
436 | size_t max_write_len; | |
437 | ||
8e3f6541 TH |
438 | /* CFTYPE_* flags */ |
439 | unsigned int flags; | |
440 | ||
2bb566cb TH |
441 | /* |
442 | * The subsys this file belongs to. Initialized automatically | |
443 | * during registration. NULL for cgroup core files. | |
444 | */ | |
445 | struct cgroup_subsys *ss; | |
446 | ||
ce16b49d | 447 | int (*open)(struct inode *inode, struct file *file); |
182446d0 | 448 | ssize_t (*read)(struct cgroup_subsys_state *css, struct cftype *cft, |
ce16b49d PM |
449 | struct file *file, |
450 | char __user *buf, size_t nbytes, loff_t *ppos); | |
ddbcc7e8 | 451 | /* |
f4c753b7 | 452 | * read_u64() is a shortcut for the common case of returning a |
ddbcc7e8 PM |
453 | * single integer. Use it in place of read() |
454 | */ | |
182446d0 | 455 | u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft); |
e73d2c61 PM |
456 | /* |
457 | * read_s64() is a signed version of read_u64() | |
458 | */ | |
182446d0 | 459 | s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft); |
91796569 PM |
460 | /* |
461 | * read_map() is used for defining a map of key/value | |
462 | * pairs. It should call cb->fill(cb, key, value) for each | |
463 | * entry. The key/value pairs (and their ordering) should not | |
464 | * change between reboots. | |
465 | */ | |
182446d0 | 466 | int (*read_map)(struct cgroup_subsys_state *css, struct cftype *cft, |
ce16b49d | 467 | struct cgroup_map_cb *cb); |
29486df3 SH |
468 | /* |
469 | * read_seq_string() is used for outputting a simple sequence | |
470 | * using seqfile. | |
471 | */ | |
182446d0 TH |
472 | int (*read_seq_string)(struct cgroup_subsys_state *css, |
473 | struct cftype *cft, struct seq_file *m); | |
91796569 | 474 | |
182446d0 | 475 | ssize_t (*write)(struct cgroup_subsys_state *css, struct cftype *cft, |
ce16b49d PM |
476 | struct file *file, |
477 | const char __user *buf, size_t nbytes, loff_t *ppos); | |
355e0c48 PM |
478 | |
479 | /* | |
f4c753b7 | 480 | * write_u64() is a shortcut for the common case of accepting |
355e0c48 PM |
481 | * a single integer (as parsed by simple_strtoull) from |
482 | * userspace. Use in place of write(); return 0 or error. | |
483 | */ | |
182446d0 TH |
484 | int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft, |
485 | u64 val); | |
e73d2c61 PM |
486 | /* |
487 | * write_s64() is a signed version of write_u64() | |
488 | */ | |
182446d0 TH |
489 | int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft, |
490 | s64 val); | |
355e0c48 | 491 | |
db3b1497 PM |
492 | /* |
493 | * write_string() is passed a nul-terminated kernelspace | |
494 | * buffer of maximum length determined by max_write_len. | |
495 | * Returns 0 or -ve error code. | |
496 | */ | |
182446d0 | 497 | int (*write_string)(struct cgroup_subsys_state *css, struct cftype *cft, |
db3b1497 | 498 | const char *buffer); |
d447ea2f PE |
499 | /* |
500 | * trigger() callback can be used to get some kick from the | |
501 | * userspace, when the actual string written is not important | |
502 | * at all. The private field can be used to determine the | |
503 | * kick type for multiplexing. | |
504 | */ | |
182446d0 | 505 | int (*trigger)(struct cgroup_subsys_state *css, unsigned int event); |
d447ea2f | 506 | |
ce16b49d | 507 | int (*release)(struct inode *inode, struct file *file); |
0dea1168 KS |
508 | |
509 | /* | |
510 | * register_event() callback will be used to add new userspace | |
511 | * waiter for changes related to the cftype. Implement it if | |
512 | * you want to provide this functionality. Use eventfd_signal() | |
513 | * on eventfd to send notification to userspace. | |
514 | */ | |
81eeaf04 TH |
515 | int (*register_event)(struct cgroup_subsys_state *css, |
516 | struct cftype *cft, struct eventfd_ctx *eventfd, | |
517 | const char *args); | |
0dea1168 KS |
518 | /* |
519 | * unregister_event() callback will be called when userspace | |
520 | * closes the eventfd or on cgroup removing. | |
521 | * This callback must be implemented, if you want provide | |
522 | * notification functionality. | |
0dea1168 | 523 | */ |
81eeaf04 TH |
524 | void (*unregister_event)(struct cgroup_subsys_state *css, |
525 | struct cftype *cft, | |
526 | struct eventfd_ctx *eventfd); | |
ddbcc7e8 PM |
527 | }; |
528 | ||
8e3f6541 TH |
529 | /* |
530 | * cftype_sets describe cftypes belonging to a subsystem and are chained at | |
531 | * cgroup_subsys->cftsets. Each cftset points to an array of cftypes | |
532 | * terminated by zero length name. | |
533 | */ | |
534 | struct cftype_set { | |
535 | struct list_head node; /* chained at subsys->cftsets */ | |
03b1cde6 | 536 | struct cftype *cfts; |
8e3f6541 TH |
537 | }; |
538 | ||
873fe09e TH |
539 | /* |
540 | * See the comment above CGRP_ROOT_SANE_BEHAVIOR for details. This | |
541 | * function can be called as long as @cgrp is accessible. | |
542 | */ | |
543 | static inline bool cgroup_sane_behavior(const struct cgroup *cgrp) | |
544 | { | |
545 | return cgrp->root->flags & CGRP_ROOT_SANE_BEHAVIOR; | |
546 | } | |
547 | ||
65dff759 LZ |
548 | /* Caller should hold rcu_read_lock() */ |
549 | static inline const char *cgroup_name(const struct cgroup *cgrp) | |
550 | { | |
551 | return rcu_dereference(cgrp->name)->name; | |
552 | } | |
553 | ||
03b1cde6 | 554 | int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); |
2bb566cb | 555 | int cgroup_rm_cftypes(struct cftype *cfts); |
8e3f6541 | 556 | |
78574cf9 | 557 | bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor); |
ddbcc7e8 | 558 | |
ffd2d883 | 559 | int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen); |
913ffdb5 | 560 | int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); |
ddbcc7e8 | 561 | |
ffd2d883 | 562 | int cgroup_task_count(const struct cgroup *cgrp); |
bbcb81d0 | 563 | |
2f7ee569 TH |
564 | /* |
565 | * Control Group taskset, used to pass around set of tasks to cgroup_subsys | |
566 | * methods. | |
567 | */ | |
568 | struct cgroup_taskset; | |
569 | struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset); | |
570 | struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset); | |
d99c8727 TH |
571 | struct cgroup_subsys_state *cgroup_taskset_cur_css(struct cgroup_taskset *tset, |
572 | int subsys_id); | |
2f7ee569 TH |
573 | int cgroup_taskset_size(struct cgroup_taskset *tset); |
574 | ||
575 | /** | |
576 | * cgroup_taskset_for_each - iterate cgroup_taskset | |
577 | * @task: the loop cursor | |
d99c8727 | 578 | * @skip_css: skip if task's css matches this, %NULL to iterate through all |
2f7ee569 TH |
579 | * @tset: taskset to iterate |
580 | */ | |
d99c8727 | 581 | #define cgroup_taskset_for_each(task, skip_css, tset) \ |
2f7ee569 TH |
582 | for ((task) = cgroup_taskset_first((tset)); (task); \ |
583 | (task) = cgroup_taskset_next((tset))) \ | |
d99c8727 TH |
584 | if (!(skip_css) || \ |
585 | cgroup_taskset_cur_css((tset), \ | |
586 | (skip_css)->ss->subsys_id) != (skip_css)) | |
2f7ee569 | 587 | |
21acb9ca TLSC |
588 | /* |
589 | * Control Group subsystem type. | |
590 | * See Documentation/cgroups/cgroups.txt for details | |
591 | */ | |
ddbcc7e8 PM |
592 | |
593 | struct cgroup_subsys { | |
eb95419b TH |
594 | struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); |
595 | int (*css_online)(struct cgroup_subsys_state *css); | |
596 | void (*css_offline)(struct cgroup_subsys_state *css); | |
597 | void (*css_free)(struct cgroup_subsys_state *css); | |
598 | ||
599 | int (*can_attach)(struct cgroup_subsys_state *css, | |
600 | struct cgroup_taskset *tset); | |
601 | void (*cancel_attach)(struct cgroup_subsys_state *css, | |
602 | struct cgroup_taskset *tset); | |
603 | void (*attach)(struct cgroup_subsys_state *css, | |
604 | struct cgroup_taskset *tset); | |
761b3ef5 | 605 | void (*fork)(struct task_struct *task); |
eb95419b TH |
606 | void (*exit)(struct cgroup_subsys_state *css, |
607 | struct cgroup_subsys_state *old_css, | |
761b3ef5 | 608 | struct task_struct *task); |
eb95419b | 609 | void (*bind)(struct cgroup_subsys_state *root_css); |
e5991371 | 610 | |
ddbcc7e8 | 611 | int subsys_id; |
8bab8dde | 612 | int disabled; |
ddbcc7e8 | 613 | int early_init; |
38460b48 KH |
614 | /* |
615 | * True if this subsys uses ID. ID is not available before cgroup_init() | |
616 | * (not available in early_init time.) | |
617 | */ | |
618 | bool use_id; | |
48ddbe19 | 619 | |
8c7f6edb TH |
620 | /* |
621 | * If %false, this subsystem is properly hierarchical - | |
622 | * configuration, resource accounting and restriction on a parent | |
623 | * cgroup cover those of its children. If %true, hierarchy support | |
624 | * is broken in some ways - some subsystems ignore hierarchy | |
625 | * completely while others are only implemented half-way. | |
626 | * | |
627 | * It's now disallowed to create nested cgroups if the subsystem is | |
628 | * broken and cgroup core will emit a warning message on such | |
629 | * cases. Eventually, all subsystems will be made properly | |
630 | * hierarchical and this will go away. | |
631 | */ | |
632 | bool broken_hierarchy; | |
633 | bool warned_broken_hierarchy; | |
634 | ||
ddbcc7e8 PM |
635 | #define MAX_CGROUP_TYPE_NAMELEN 32 |
636 | const char *name; | |
637 | ||
999cd8a4 PM |
638 | /* |
639 | * Link to parent, and list entry in parent's children. | |
6be96a5c | 640 | * Protected by cgroup_lock() |
999cd8a4 PM |
641 | */ |
642 | struct cgroupfs_root *root; | |
ddbcc7e8 | 643 | struct list_head sibling; |
38460b48 KH |
644 | /* used when use_id == true */ |
645 | struct idr idr; | |
42aee6c4 | 646 | spinlock_t id_lock; |
e6a1105b | 647 | |
8e3f6541 TH |
648 | /* list of cftype_sets */ |
649 | struct list_head cftsets; | |
650 | ||
651 | /* base cftypes, automatically [de]registered with subsys itself */ | |
652 | struct cftype *base_cftypes; | |
653 | struct cftype_set base_cftset; | |
654 | ||
e6a1105b BB |
655 | /* should be defined only by modular subsystems */ |
656 | struct module *module; | |
ddbcc7e8 PM |
657 | }; |
658 | ||
659 | #define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys; | |
5fc0b025 | 660 | #define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option) |
ddbcc7e8 | 661 | #include <linux/cgroup_subsys.h> |
5fc0b025 | 662 | #undef IS_SUBSYS_ENABLED |
ddbcc7e8 PM |
663 | #undef SUBSYS |
664 | ||
63876986 TH |
665 | /** |
666 | * css_parent - find the parent css | |
667 | * @css: the target cgroup_subsys_state | |
668 | * | |
669 | * Return the parent css of @css. This function is guaranteed to return | |
670 | * non-NULL parent as long as @css isn't the root. | |
671 | */ | |
672 | static inline | |
673 | struct cgroup_subsys_state *css_parent(struct cgroup_subsys_state *css) | |
674 | { | |
0ae78e0b | 675 | return css->parent; |
63876986 TH |
676 | } |
677 | ||
14611e51 TH |
678 | /** |
679 | * task_css_set_check - obtain a task's css_set with extra access conditions | |
680 | * @task: the task to obtain css_set for | |
681 | * @__c: extra condition expression to be passed to rcu_dereference_check() | |
682 | * | |
683 | * A task's css_set is RCU protected, initialized and exited while holding | |
684 | * task_lock(), and can only be modified while holding both cgroup_mutex | |
685 | * and task_lock() while the task is alive. This macro verifies that the | |
686 | * caller is inside proper critical section and returns @task's css_set. | |
687 | * | |
688 | * The caller can also specify additional allowed conditions via @__c, such | |
689 | * as locks used during the cgroup_subsys::attach() methods. | |
dc61b1d6 | 690 | */ |
2219449a TH |
691 | #ifdef CONFIG_PROVE_RCU |
692 | extern struct mutex cgroup_mutex; | |
14611e51 TH |
693 | #define task_css_set_check(task, __c) \ |
694 | rcu_dereference_check((task)->cgroups, \ | |
695 | lockdep_is_held(&(task)->alloc_lock) || \ | |
696 | lockdep_is_held(&cgroup_mutex) || (__c)) | |
2219449a | 697 | #else |
14611e51 TH |
698 | #define task_css_set_check(task, __c) \ |
699 | rcu_dereference((task)->cgroups) | |
2219449a | 700 | #endif |
dc61b1d6 | 701 | |
14611e51 | 702 | /** |
8af01f56 | 703 | * task_css_check - obtain css for (task, subsys) w/ extra access conds |
14611e51 TH |
704 | * @task: the target task |
705 | * @subsys_id: the target subsystem ID | |
706 | * @__c: extra condition expression to be passed to rcu_dereference_check() | |
707 | * | |
708 | * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The | |
709 | * synchronization rules are the same as task_css_set_check(). | |
710 | */ | |
8af01f56 | 711 | #define task_css_check(task, subsys_id, __c) \ |
14611e51 TH |
712 | task_css_set_check((task), (__c))->subsys[(subsys_id)] |
713 | ||
714 | /** | |
715 | * task_css_set - obtain a task's css_set | |
716 | * @task: the task to obtain css_set for | |
717 | * | |
718 | * See task_css_set_check(). | |
719 | */ | |
720 | static inline struct css_set *task_css_set(struct task_struct *task) | |
721 | { | |
722 | return task_css_set_check(task, false); | |
723 | } | |
724 | ||
725 | /** | |
8af01f56 | 726 | * task_css - obtain css for (task, subsys) |
14611e51 TH |
727 | * @task: the target task |
728 | * @subsys_id: the target subsystem ID | |
729 | * | |
8af01f56 | 730 | * See task_css_check(). |
14611e51 | 731 | */ |
8af01f56 TH |
732 | static inline struct cgroup_subsys_state *task_css(struct task_struct *task, |
733 | int subsys_id) | |
ddbcc7e8 | 734 | { |
8af01f56 | 735 | return task_css_check(task, subsys_id, false); |
ddbcc7e8 PM |
736 | } |
737 | ||
8af01f56 TH |
738 | static inline struct cgroup *task_cgroup(struct task_struct *task, |
739 | int subsys_id) | |
ddbcc7e8 | 740 | { |
8af01f56 | 741 | return task_css(task, subsys_id)->cgroup; |
ddbcc7e8 PM |
742 | } |
743 | ||
e14880f7 LZ |
744 | /** |
745 | * cgroup_from_id - lookup cgroup by id | |
746 | * @ss: cgroup subsys to be looked into | |
747 | * @id: the cgroup id | |
748 | * | |
749 | * Returns the cgroup if there's valid one with @id, otherwise returns NULL. | |
750 | * Should be called under rcu_read_lock(). | |
751 | */ | |
752 | static inline struct cgroup *cgroup_from_id(struct cgroup_subsys *ss, int id) | |
753 | { | |
754 | #ifdef CONFIG_PROVE_RCU | |
755 | rcu_lockdep_assert(rcu_read_lock_held() || | |
756 | lockdep_is_held(&cgroup_mutex), | |
757 | "cgroup_from_id() needs proper protection"); | |
758 | #endif | |
759 | return idr_find(&ss->root->cgroup_idr, id); | |
760 | } | |
761 | ||
492eb21b TH |
762 | struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos, |
763 | struct cgroup_subsys_state *parent); | |
53fa5261 | 764 | |
574bd9f7 | 765 | /** |
492eb21b TH |
766 | * css_for_each_child - iterate through children of a css |
767 | * @pos: the css * to use as the loop cursor | |
768 | * @parent: css whose children to walk | |
574bd9f7 | 769 | * |
492eb21b TH |
770 | * Walk @parent's children. Must be called under rcu_read_lock(). A child |
771 | * css which hasn't finished ->css_online() or already has finished | |
92fb9748 | 772 | * ->css_offline() may show up during traversal and it's each subsystem's |
574bd9f7 TH |
773 | * responsibility to verify that each @pos is alive. |
774 | * | |
92fb9748 | 775 | * If a subsystem synchronizes against the parent in its ->css_online() and |
492eb21b | 776 | * before starting iterating, a css which finished ->css_online() is |
92fb9748 | 777 | * guaranteed to be visible in the future iterations. |
75501a6d TH |
778 | * |
779 | * It is allowed to temporarily drop RCU read lock during iteration. The | |
780 | * caller is responsible for ensuring that @pos remains accessible until | |
781 | * the start of the next iteration by, for example, bumping the css refcnt. | |
574bd9f7 | 782 | */ |
492eb21b TH |
783 | #define css_for_each_child(pos, parent) \ |
784 | for ((pos) = css_next_child(NULL, (parent)); (pos); \ | |
785 | (pos) = css_next_child((pos), (parent))) | |
574bd9f7 | 786 | |
492eb21b TH |
787 | struct cgroup_subsys_state * |
788 | css_next_descendant_pre(struct cgroup_subsys_state *pos, | |
789 | struct cgroup_subsys_state *css); | |
790 | ||
791 | struct cgroup_subsys_state * | |
792 | css_rightmost_descendant(struct cgroup_subsys_state *pos); | |
574bd9f7 TH |
793 | |
794 | /** | |
492eb21b TH |
795 | * css_for_each_descendant_pre - pre-order walk of a css's descendants |
796 | * @pos: the css * to use as the loop cursor | |
797 | * @root: css whose descendants to walk | |
574bd9f7 | 798 | * |
bd8815a6 TH |
799 | * Walk @root's descendants. @root is included in the iteration and the |
800 | * first node to be visited. Must be called under rcu_read_lock(). A | |
492eb21b | 801 | * descendant css which hasn't finished ->css_online() or already has |
92fb9748 | 802 | * finished ->css_offline() may show up during traversal and it's each |
574bd9f7 TH |
803 | * subsystem's responsibility to verify that each @pos is alive. |
804 | * | |
92fb9748 TH |
805 | * If a subsystem synchronizes against the parent in its ->css_online() and |
806 | * before starting iterating, and synchronizes against @pos on each | |
492eb21b | 807 | * iteration, any descendant css which finished ->css_online() is |
574bd9f7 TH |
808 | * guaranteed to be visible in the future iterations. |
809 | * | |
810 | * In other words, the following guarantees that a descendant can't escape | |
811 | * state updates of its ancestors. | |
812 | * | |
492eb21b | 813 | * my_online(@css) |
574bd9f7 | 814 | * { |
492eb21b TH |
815 | * Lock @css's parent and @css; |
816 | * Inherit state from the parent; | |
574bd9f7 TH |
817 | * Unlock both. |
818 | * } | |
819 | * | |
492eb21b | 820 | * my_update_state(@css) |
574bd9f7 | 821 | * { |
492eb21b | 822 | * css_for_each_descendant_pre(@pos, @css) { |
574bd9f7 | 823 | * Lock @pos; |
bd8815a6 TH |
824 | * if (@pos == @css) |
825 | * Update @css's state; | |
826 | * else | |
827 | * Verify @pos is alive and inherit state from its parent; | |
574bd9f7 TH |
828 | * Unlock @pos; |
829 | * } | |
830 | * } | |
831 | * | |
832 | * As long as the inheriting step, including checking the parent state, is | |
833 | * enclosed inside @pos locking, double-locking the parent isn't necessary | |
834 | * while inheriting. The state update to the parent is guaranteed to be | |
835 | * visible by walking order and, as long as inheriting operations to the | |
836 | * same @pos are atomic to each other, multiple updates racing each other | |
837 | * still result in the correct state. It's guaranateed that at least one | |
492eb21b | 838 | * inheritance happens for any css after the latest update to its parent. |
574bd9f7 TH |
839 | * |
840 | * If checking parent's state requires locking the parent, each inheriting | |
841 | * iteration should lock and unlock both @pos->parent and @pos. | |
842 | * | |
843 | * Alternatively, a subsystem may choose to use a single global lock to | |
92fb9748 | 844 | * synchronize ->css_online() and ->css_offline() against tree-walking |
574bd9f7 | 845 | * operations. |
75501a6d TH |
846 | * |
847 | * It is allowed to temporarily drop RCU read lock during iteration. The | |
848 | * caller is responsible for ensuring that @pos remains accessible until | |
849 | * the start of the next iteration by, for example, bumping the css refcnt. | |
574bd9f7 | 850 | */ |
492eb21b TH |
851 | #define css_for_each_descendant_pre(pos, css) \ |
852 | for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \ | |
853 | (pos) = css_next_descendant_pre((pos), (css))) | |
574bd9f7 | 854 | |
492eb21b TH |
855 | struct cgroup_subsys_state * |
856 | css_next_descendant_post(struct cgroup_subsys_state *pos, | |
857 | struct cgroup_subsys_state *css); | |
574bd9f7 TH |
858 | |
859 | /** | |
492eb21b TH |
860 | * css_for_each_descendant_post - post-order walk of a css's descendants |
861 | * @pos: the css * to use as the loop cursor | |
862 | * @css: css whose descendants to walk | |
574bd9f7 | 863 | * |
492eb21b | 864 | * Similar to css_for_each_descendant_pre() but performs post-order |
bd8815a6 TH |
865 | * traversal instead. @root is included in the iteration and the last |
866 | * node to be visited. Note that the walk visibility guarantee described | |
867 | * in pre-order walk doesn't apply the same to post-order walks. | |
574bd9f7 | 868 | */ |
492eb21b TH |
869 | #define css_for_each_descendant_post(pos, css) \ |
870 | for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \ | |
871 | (pos) = css_next_descendant_post((pos), (css))) | |
574bd9f7 | 872 | |
72ec7029 TH |
873 | /* A css_task_iter should be treated as an opaque object */ |
874 | struct css_task_iter { | |
875 | struct cgroup_subsys_state *origin_css; | |
0942eeee TH |
876 | struct list_head *cset_link; |
877 | struct list_head *task; | |
817929ec PM |
878 | }; |
879 | ||
72ec7029 TH |
880 | void css_task_iter_start(struct cgroup_subsys_state *css, |
881 | struct css_task_iter *it); | |
882 | struct task_struct *css_task_iter_next(struct css_task_iter *it); | |
883 | void css_task_iter_end(struct css_task_iter *it); | |
e535837b | 884 | |
72ec7029 TH |
885 | int css_scan_tasks(struct cgroup_subsys_state *css, |
886 | bool (*test)(struct task_struct *, void *), | |
887 | void (*process)(struct task_struct *, void *), | |
888 | void *data, struct ptr_heap *heap); | |
e535837b | 889 | |
31583bb0 | 890 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); |
8cc99345 | 891 | int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); |
31583bb0 | 892 | |
38460b48 KH |
893 | /* |
894 | * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works | |
895 | * if cgroup_subsys.use_id == true. It can be used for looking up and scanning. | |
896 | * CSS ID is assigned at cgroup allocation (create) automatically | |
897 | * and removed when subsys calls free_css_id() function. This is because | |
898 | * the lifetime of cgroup_subsys_state is subsys's matter. | |
899 | * | |
900 | * Looking up and scanning function should be called under rcu_read_lock(). | |
6be96a5c | 901 | * Taking cgroup_mutex is not necessary for following calls. |
38460b48 KH |
902 | * But the css returned by this routine can be "not populated yet" or "being |
903 | * destroyed". The caller should check css and cgroup's status. | |
904 | */ | |
905 | ||
906 | /* | |
907 | * Typically Called at ->destroy(), or somewhere the subsys frees | |
908 | * cgroup_subsys_state. | |
909 | */ | |
910 | void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css); | |
911 | ||
912 | /* Find a cgroup_subsys_state which has given ID */ | |
913 | ||
914 | struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id); | |
915 | ||
38460b48 KH |
916 | /* Returns true if root is ancestor of cg */ |
917 | bool css_is_ancestor(struct cgroup_subsys_state *cg, | |
0b7f569e | 918 | const struct cgroup_subsys_state *root); |
38460b48 KH |
919 | |
920 | /* Get id and depth of css */ | |
921 | unsigned short css_id(struct cgroup_subsys_state *css); | |
e5d1367f | 922 | struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id); |
38460b48 | 923 | |
ddbcc7e8 PM |
924 | #else /* !CONFIG_CGROUPS */ |
925 | ||
926 | static inline int cgroup_init_early(void) { return 0; } | |
927 | static inline int cgroup_init(void) { return 0; } | |
b4f48b63 | 928 | static inline void cgroup_fork(struct task_struct *p) {} |
817929ec | 929 | static inline void cgroup_post_fork(struct task_struct *p) {} |
b4f48b63 | 930 | static inline void cgroup_exit(struct task_struct *p, int callbacks) {} |
ddbcc7e8 | 931 | |
846c7bb0 BS |
932 | static inline int cgroupstats_build(struct cgroupstats *stats, |
933 | struct dentry *dentry) | |
934 | { | |
935 | return -EINVAL; | |
936 | } | |
ddbcc7e8 | 937 | |
d7926ee3 | 938 | /* No cgroups - nothing to do */ |
31583bb0 MT |
939 | static inline int cgroup_attach_task_all(struct task_struct *from, |
940 | struct task_struct *t) | |
941 | { | |
942 | return 0; | |
943 | } | |
d7926ee3 | 944 | |
ddbcc7e8 PM |
945 | #endif /* !CONFIG_CGROUPS */ |
946 | ||
947 | #endif /* _LINUX_CGROUP_H */ |