]>
Commit | Line | Data |
---|---|---|
ddbcc7e8 PM |
1 | #ifndef _LINUX_CGROUP_H |
2 | #define _LINUX_CGROUP_H | |
3 | /* | |
4 | * cgroup interface | |
5 | * | |
6 | * Copyright (C) 2003 BULL SA | |
7 | * Copyright (C) 2004-2006 Silicon Graphics, Inc. | |
8 | * | |
9 | */ | |
10 | ||
11 | #include <linux/sched.h> | |
ddbcc7e8 PM |
12 | #include <linux/cpumask.h> |
13 | #include <linux/nodemask.h> | |
14 | #include <linux/rcupdate.h> | |
eb6fd504 | 15 | #include <linux/rculist.h> |
846c7bb0 | 16 | #include <linux/cgroupstats.h> |
31a7df01 | 17 | #include <linux/prio_heap.h> |
cc31edce | 18 | #include <linux/rwsem.h> |
38460b48 | 19 | #include <linux/idr.h> |
48ddbe19 | 20 | #include <linux/workqueue.h> |
03b1cde6 | 21 | #include <linux/xattr.h> |
25a7e684 | 22 | #include <linux/fs.h> |
d3daf28d | 23 | #include <linux/percpu-refcount.h> |
ddbcc7e8 PM |
24 | |
25 | #ifdef CONFIG_CGROUPS | |
26 | ||
27 | struct cgroupfs_root; | |
28 | struct cgroup_subsys; | |
29 | struct inode; | |
84eea842 | 30 | struct cgroup; |
38460b48 | 31 | struct css_id; |
a27bb332 | 32 | struct eventfd_ctx; |
ddbcc7e8 PM |
33 | |
34 | extern int cgroup_init_early(void); | |
35 | extern int cgroup_init(void); | |
b4f48b63 | 36 | extern void cgroup_fork(struct task_struct *p); |
817929ec | 37 | extern void cgroup_post_fork(struct task_struct *p); |
b4f48b63 | 38 | extern void cgroup_exit(struct task_struct *p, int run_callbacks); |
846c7bb0 BS |
39 | extern int cgroupstats_build(struct cgroupstats *stats, |
40 | struct dentry *dentry); | |
e6a1105b | 41 | extern int cgroup_load_subsys(struct cgroup_subsys *ss); |
cf5d5941 | 42 | extern void cgroup_unload_subsys(struct cgroup_subsys *ss); |
ddbcc7e8 | 43 | |
8d8b97ba | 44 | extern int proc_cgroup_show(struct seq_file *, void *); |
a424316c | 45 | |
7d8e0bf5 LZ |
46 | /* |
47 | * Define the enumeration of all cgroup subsystems. | |
48 | * | |
49 | * We define ids for builtin subsystems and then modular ones. | |
50 | */ | |
817929ec PM |
51 | #define SUBSYS(_x) _x ## _subsys_id, |
52 | enum cgroup_subsys_id { | |
7d8e0bf5 | 53 | #define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option) |
817929ec | 54 | #include <linux/cgroup_subsys.h> |
7d8e0bf5 LZ |
55 | #undef IS_SUBSYS_ENABLED |
56 | CGROUP_BUILTIN_SUBSYS_COUNT, | |
57 | ||
58 | __CGROUP_SUBSYS_TEMP_PLACEHOLDER = CGROUP_BUILTIN_SUBSYS_COUNT - 1, | |
59 | ||
60 | #define IS_SUBSYS_ENABLED(option) IS_MODULE(option) | |
817929ec | 61 | #include <linux/cgroup_subsys.h> |
7d8e0bf5 | 62 | #undef IS_SUBSYS_ENABLED |
a6f00298 | 63 | CGROUP_SUBSYS_COUNT, |
817929ec PM |
64 | }; |
65 | #undef SUBSYS | |
66 | ||
ddbcc7e8 PM |
67 | /* Per-subsystem/per-cgroup state maintained by the system. */ |
68 | struct cgroup_subsys_state { | |
d20a390a PM |
69 | /* |
70 | * The cgroup that this subsystem is attached to. Useful | |
ddbcc7e8 | 71 | * for subsystems that want to know about the cgroup |
d20a390a PM |
72 | * hierarchy structure |
73 | */ | |
ddbcc7e8 PM |
74 | struct cgroup *cgroup; |
75 | ||
d3daf28d TH |
76 | /* reference count - access via css_[try]get() and css_put() */ |
77 | struct percpu_ref refcnt; | |
ddbcc7e8 PM |
78 | |
79 | unsigned long flags; | |
38460b48 | 80 | /* ID for this css, if possible */ |
2c392b8c | 81 | struct css_id __rcu *id; |
48ddbe19 TH |
82 | |
83 | /* Used to put @cgroup->dentry on the last css_put() */ | |
84 | struct work_struct dput_work; | |
ddbcc7e8 PM |
85 | }; |
86 | ||
87 | /* bits in struct cgroup_subsys_state flags field */ | |
88 | enum { | |
38b53aba | 89 | CSS_ROOT = (1 << 0), /* this CSS is the root of the subsystem */ |
92fb9748 | 90 | CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ |
ddbcc7e8 PM |
91 | }; |
92 | ||
5de0107e TH |
93 | /** |
94 | * css_get - obtain a reference on the specified css | |
95 | * @css: target css | |
96 | * | |
97 | * The caller must already have a reference. | |
ddbcc7e8 | 98 | */ |
ddbcc7e8 PM |
99 | static inline void css_get(struct cgroup_subsys_state *css) |
100 | { | |
101 | /* We don't need to reference count the root state */ | |
38b53aba | 102 | if (!(css->flags & CSS_ROOT)) |
d3daf28d | 103 | percpu_ref_get(&css->refcnt); |
ddbcc7e8 | 104 | } |
e7c5ec91 | 105 | |
5de0107e TH |
106 | /** |
107 | * css_tryget - try to obtain a reference on the specified css | |
108 | * @css: target css | |
109 | * | |
110 | * Obtain a reference on @css if it's alive. The caller naturally needs to | |
111 | * ensure that @css is accessible but doesn't have to be holding a | |
112 | * reference on it - IOW, RCU protected access is good enough for this | |
113 | * function. Returns %true if a reference count was successfully obtained; | |
114 | * %false otherwise. | |
115 | */ | |
e7c5ec91 PM |
116 | static inline bool css_tryget(struct cgroup_subsys_state *css) |
117 | { | |
38b53aba | 118 | if (css->flags & CSS_ROOT) |
e7c5ec91 | 119 | return true; |
d3daf28d | 120 | return percpu_ref_tryget(&css->refcnt); |
e7c5ec91 PM |
121 | } |
122 | ||
5de0107e TH |
123 | /** |
124 | * css_put - put a css reference | |
125 | * @css: target css | |
126 | * | |
127 | * Put a reference obtained via css_get() and css_tryget(). | |
128 | */ | |
ddbcc7e8 PM |
129 | static inline void css_put(struct cgroup_subsys_state *css) |
130 | { | |
38b53aba | 131 | if (!(css->flags & CSS_ROOT)) |
d3daf28d | 132 | percpu_ref_put(&css->refcnt); |
ddbcc7e8 PM |
133 | } |
134 | ||
3116f0e3 PM |
135 | /* bits in struct cgroup flags field */ |
136 | enum { | |
137 | /* Control Group is dead */ | |
54766d4a | 138 | CGRP_DEAD, |
d20a390a PM |
139 | /* |
140 | * Control Group has previously had a child cgroup or a task, | |
141 | * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) | |
142 | */ | |
3116f0e3 PM |
143 | CGRP_RELEASABLE, |
144 | /* Control Group requires release notifications to userspace */ | |
145 | CGRP_NOTIFY_ON_RELEASE, | |
97978e6d | 146 | /* |
2260e7fc TH |
147 | * Clone the parent's configuration when creating a new child |
148 | * cpuset cgroup. For historical reasons, this option can be | |
149 | * specified at mount time and thus is implemented here. | |
97978e6d | 150 | */ |
2260e7fc | 151 | CGRP_CPUSET_CLONE_CHILDREN, |
873fe09e TH |
152 | /* see the comment above CGRP_ROOT_SANE_BEHAVIOR for details */ |
153 | CGRP_SANE_BEHAVIOR, | |
3116f0e3 PM |
154 | }; |
155 | ||
65dff759 LZ |
156 | struct cgroup_name { |
157 | struct rcu_head rcu_head; | |
158 | char name[]; | |
3116f0e3 PM |
159 | }; |
160 | ||
ddbcc7e8 PM |
161 | struct cgroup { |
162 | unsigned long flags; /* "unsigned long" so bitops work */ | |
163 | ||
0a950f65 TH |
164 | int id; /* ida allocated in-hierarchy ID */ |
165 | ||
ddbcc7e8 PM |
166 | /* |
167 | * We link our 'sibling' struct into our parent's 'children'. | |
168 | * Our children link their 'sibling' into our 'children'. | |
169 | */ | |
170 | struct list_head sibling; /* my parent's children */ | |
171 | struct list_head children; /* my children */ | |
05ef1d7c | 172 | struct list_head files; /* my files */ |
ddbcc7e8 | 173 | |
d20a390a | 174 | struct cgroup *parent; /* my parent */ |
febfcef6 | 175 | struct dentry *dentry; /* cgroup fs entry, RCU protected */ |
ddbcc7e8 | 176 | |
53fa5261 TH |
177 | /* |
178 | * Monotonically increasing unique serial number which defines a | |
179 | * uniform order among all cgroups. It's guaranteed that all | |
180 | * ->children lists are in the ascending order of ->serial_nr. | |
181 | * It's used to allow interrupting and resuming iterations. | |
182 | */ | |
183 | u64 serial_nr; | |
184 | ||
65dff759 LZ |
185 | /* |
186 | * This is a copy of dentry->d_name, and it's needed because | |
187 | * we can't use dentry->d_name in cgroup_path(). | |
188 | * | |
189 | * You must acquire rcu_read_lock() to access cgrp->name, and | |
190 | * the only place that can change it is rename(), which is | |
191 | * protected by parent dir's i_mutex. | |
192 | * | |
193 | * Normally you should use cgroup_name() wrapper rather than | |
194 | * access it directly. | |
195 | */ | |
196 | struct cgroup_name __rcu *name; | |
197 | ||
ddbcc7e8 PM |
198 | /* Private pointers for each registered subsystem */ |
199 | struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; | |
200 | ||
201 | struct cgroupfs_root *root; | |
817929ec PM |
202 | |
203 | /* | |
69d0206c TH |
204 | * List of cgrp_cset_links pointing at css_sets with tasks in this |
205 | * cgroup. Protected by css_set_lock. | |
817929ec | 206 | */ |
69d0206c | 207 | struct list_head cset_links; |
81a6a5cd PM |
208 | |
209 | /* | |
210 | * Linked list running through all cgroups that can | |
211 | * potentially be reaped by the release agent. Protected by | |
212 | * release_list_lock | |
213 | */ | |
214 | struct list_head release_list; | |
cc31edce | 215 | |
72a8cb30 BB |
216 | /* |
217 | * list of pidlists, up to two for each namespace (one for procs, one | |
218 | * for tasks); created on demand. | |
219 | */ | |
220 | struct list_head pidlists; | |
221 | struct mutex pidlist_mutex; | |
a47295e6 | 222 | |
d3daf28d | 223 | /* For css percpu_ref killing and RCU-protected deletion */ |
a47295e6 | 224 | struct rcu_head rcu_head; |
ea15f8cc | 225 | struct work_struct destroy_work; |
d3daf28d | 226 | atomic_t css_kill_cnt; |
0dea1168 | 227 | |
25985edc | 228 | /* List of events which userspace want to receive */ |
0dea1168 KS |
229 | struct list_head event_list; |
230 | spinlock_t event_list_lock; | |
03b1cde6 AR |
231 | |
232 | /* directory xattrs */ | |
233 | struct simple_xattrs xattrs; | |
817929ec PM |
234 | }; |
235 | ||
25a7e684 TH |
236 | #define MAX_CGROUP_ROOT_NAMELEN 64 |
237 | ||
238 | /* cgroupfs_root->flags */ | |
239 | enum { | |
873fe09e TH |
240 | /* |
241 | * Unfortunately, cgroup core and various controllers are riddled | |
242 | * with idiosyncrasies and pointless options. The following flag, | |
243 | * when set, will force sane behavior - some options are forced on, | |
244 | * others are disallowed, and some controllers will change their | |
245 | * hierarchical or other behaviors. | |
246 | * | |
247 | * The set of behaviors affected by this flag are still being | |
248 | * determined and developed and the mount option for this flag is | |
249 | * prefixed with __DEVEL__. The prefix will be dropped once we | |
250 | * reach the point where all behaviors are compatible with the | |
251 | * planned unified hierarchy, which will automatically turn on this | |
252 | * flag. | |
253 | * | |
254 | * The followings are the behaviors currently affected this flag. | |
255 | * | |
256 | * - Mount options "noprefix" and "clone_children" are disallowed. | |
257 | * Also, cgroupfs file cgroup.clone_children is not created. | |
258 | * | |
259 | * - When mounting an existing superblock, mount options should | |
260 | * match. | |
261 | * | |
262 | * - Remount is disallowed. | |
263 | * | |
f63674fd TH |
264 | * - "tasks" is removed. Everything should be at process |
265 | * granularity. Use "cgroup.procs" instead. | |
f00baae7 | 266 | * |
f63674fd TH |
267 | * - "release_agent" and "notify_on_release" are removed. |
268 | * Replacement notification mechanism will be implemented. | |
873fe09e | 269 | * |
6db8e85c TH |
270 | * - rename(2) is disallowed. |
271 | * | |
f63674fd TH |
272 | * - memcg: use_hierarchy is on by default and the cgroup file for |
273 | * the flag is not created. | |
873fe09e TH |
274 | */ |
275 | CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), | |
276 | ||
25a7e684 TH |
277 | CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ |
278 | CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ | |
1672d040 | 279 | CGRP_ROOT_SUBSYS_BOUND = (1 << 3), /* subsystems finished binding */ |
25a7e684 TH |
280 | }; |
281 | ||
282 | /* | |
283 | * A cgroupfs_root represents the root of a cgroup hierarchy, and may be | |
284 | * associated with a superblock to form an active hierarchy. This is | |
285 | * internal to cgroup core. Don't access directly from controllers. | |
286 | */ | |
287 | struct cgroupfs_root { | |
288 | struct super_block *sb; | |
289 | ||
a8a648c4 | 290 | /* The bitmask of subsystems attached to this hierarchy */ |
25a7e684 TH |
291 | unsigned long subsys_mask; |
292 | ||
293 | /* Unique id for this hierarchy. */ | |
294 | int hierarchy_id; | |
295 | ||
25a7e684 TH |
296 | /* A list running through the attached subsystems */ |
297 | struct list_head subsys_list; | |
298 | ||
299 | /* The root cgroup for this hierarchy */ | |
300 | struct cgroup top_cgroup; | |
301 | ||
302 | /* Tracks how many cgroups are currently defined in hierarchy.*/ | |
303 | int number_of_cgroups; | |
304 | ||
305 | /* A list running through the active hierarchies */ | |
306 | struct list_head root_list; | |
307 | ||
25a7e684 TH |
308 | /* Hierarchy-specific flags */ |
309 | unsigned long flags; | |
310 | ||
311 | /* IDs for cgroups in this hierarchy */ | |
312 | struct ida cgroup_ida; | |
313 | ||
314 | /* The path to use for release notifications. */ | |
315 | char release_agent_path[PATH_MAX]; | |
316 | ||
317 | /* The name for this hierarchy - may be empty */ | |
318 | char name[MAX_CGROUP_ROOT_NAMELEN]; | |
319 | }; | |
320 | ||
d20a390a PM |
321 | /* |
322 | * A css_set is a structure holding pointers to a set of | |
817929ec PM |
323 | * cgroup_subsys_state objects. This saves space in the task struct |
324 | * object and speeds up fork()/exit(), since a single inc/dec and a | |
d20a390a PM |
325 | * list_add()/del() can bump the reference count on the entire cgroup |
326 | * set for a task. | |
817929ec PM |
327 | */ |
328 | ||
329 | struct css_set { | |
330 | ||
331 | /* Reference count */ | |
146aa1bd | 332 | atomic_t refcount; |
817929ec | 333 | |
472b1053 LZ |
334 | /* |
335 | * List running through all cgroup groups in the same hash | |
336 | * slot. Protected by css_set_lock | |
337 | */ | |
338 | struct hlist_node hlist; | |
339 | ||
817929ec PM |
340 | /* |
341 | * List running through all tasks using this cgroup | |
342 | * group. Protected by css_set_lock | |
343 | */ | |
344 | struct list_head tasks; | |
345 | ||
346 | /* | |
69d0206c TH |
347 | * List of cgrp_cset_links pointing at cgroups referenced from this |
348 | * css_set. Protected by css_set_lock. | |
817929ec | 349 | */ |
69d0206c | 350 | struct list_head cgrp_links; |
817929ec PM |
351 | |
352 | /* | |
353 | * Set of subsystem states, one for each subsystem. This array | |
354 | * is immutable after creation apart from the init_css_set | |
cf5d5941 BB |
355 | * during subsystem registration (at boot time) and modular subsystem |
356 | * loading/unloading. | |
817929ec PM |
357 | */ |
358 | struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; | |
c378369d BB |
359 | |
360 | /* For RCU-protected deletion */ | |
361 | struct rcu_head rcu_head; | |
ddbcc7e8 PM |
362 | }; |
363 | ||
91796569 PM |
364 | /* |
365 | * cgroup_map_cb is an abstract callback API for reporting map-valued | |
366 | * control files | |
367 | */ | |
368 | ||
369 | struct cgroup_map_cb { | |
370 | int (*fill)(struct cgroup_map_cb *cb, const char *key, u64 value); | |
371 | void *state; | |
372 | }; | |
373 | ||
d20a390a PM |
374 | /* |
375 | * struct cftype: handler definitions for cgroup control files | |
ddbcc7e8 PM |
376 | * |
377 | * When reading/writing to a file: | |
a043e3b2 | 378 | * - the cgroup to use is file->f_dentry->d_parent->d_fsdata |
ddbcc7e8 PM |
379 | * - the 'cftype' of the file is file->f_dentry->d_fsdata |
380 | */ | |
381 | ||
8e3f6541 | 382 | /* cftype->flags */ |
02c402d9 TH |
383 | enum { |
384 | CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cg */ | |
385 | CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cg */ | |
386 | CFTYPE_INSANE = (1 << 2), /* don't create if sane_behavior */ | |
387 | }; | |
8e3f6541 TH |
388 | |
389 | #define MAX_CFTYPE_NAME 64 | |
390 | ||
ddbcc7e8 | 391 | struct cftype { |
d20a390a PM |
392 | /* |
393 | * By convention, the name should begin with the name of the | |
8e3f6541 TH |
394 | * subsystem, followed by a period. Zero length string indicates |
395 | * end of cftype array. | |
d20a390a | 396 | */ |
ddbcc7e8 PM |
397 | char name[MAX_CFTYPE_NAME]; |
398 | int private; | |
099fca32 LZ |
399 | /* |
400 | * If not 0, file mode is set to this value, otherwise it will | |
401 | * be figured out automatically | |
402 | */ | |
a5e7ed32 | 403 | umode_t mode; |
db3b1497 PM |
404 | |
405 | /* | |
406 | * If non-zero, defines the maximum length of string that can | |
407 | * be passed to write_string; defaults to 64 | |
408 | */ | |
409 | size_t max_write_len; | |
410 | ||
8e3f6541 TH |
411 | /* CFTYPE_* flags */ |
412 | unsigned int flags; | |
413 | ||
ce16b49d PM |
414 | int (*open)(struct inode *inode, struct file *file); |
415 | ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft, | |
416 | struct file *file, | |
417 | char __user *buf, size_t nbytes, loff_t *ppos); | |
ddbcc7e8 | 418 | /* |
f4c753b7 | 419 | * read_u64() is a shortcut for the common case of returning a |
ddbcc7e8 PM |
420 | * single integer. Use it in place of read() |
421 | */ | |
ce16b49d | 422 | u64 (*read_u64)(struct cgroup *cgrp, struct cftype *cft); |
e73d2c61 PM |
423 | /* |
424 | * read_s64() is a signed version of read_u64() | |
425 | */ | |
ce16b49d | 426 | s64 (*read_s64)(struct cgroup *cgrp, struct cftype *cft); |
91796569 PM |
427 | /* |
428 | * read_map() is used for defining a map of key/value | |
429 | * pairs. It should call cb->fill(cb, key, value) for each | |
430 | * entry. The key/value pairs (and their ordering) should not | |
431 | * change between reboots. | |
432 | */ | |
03c78cbe | 433 | int (*read_map)(struct cgroup *cgrp, struct cftype *cft, |
ce16b49d | 434 | struct cgroup_map_cb *cb); |
29486df3 SH |
435 | /* |
436 | * read_seq_string() is used for outputting a simple sequence | |
437 | * using seqfile. | |
438 | */ | |
03c78cbe | 439 | int (*read_seq_string)(struct cgroup *cgrp, struct cftype *cft, |
ce16b49d | 440 | struct seq_file *m); |
91796569 | 441 | |
ce16b49d PM |
442 | ssize_t (*write)(struct cgroup *cgrp, struct cftype *cft, |
443 | struct file *file, | |
444 | const char __user *buf, size_t nbytes, loff_t *ppos); | |
355e0c48 PM |
445 | |
446 | /* | |
f4c753b7 | 447 | * write_u64() is a shortcut for the common case of accepting |
355e0c48 PM |
448 | * a single integer (as parsed by simple_strtoull) from |
449 | * userspace. Use in place of write(); return 0 or error. | |
450 | */ | |
ce16b49d | 451 | int (*write_u64)(struct cgroup *cgrp, struct cftype *cft, u64 val); |
e73d2c61 PM |
452 | /* |
453 | * write_s64() is a signed version of write_u64() | |
454 | */ | |
ce16b49d | 455 | int (*write_s64)(struct cgroup *cgrp, struct cftype *cft, s64 val); |
355e0c48 | 456 | |
db3b1497 PM |
457 | /* |
458 | * write_string() is passed a nul-terminated kernelspace | |
459 | * buffer of maximum length determined by max_write_len. | |
460 | * Returns 0 or -ve error code. | |
461 | */ | |
462 | int (*write_string)(struct cgroup *cgrp, struct cftype *cft, | |
463 | const char *buffer); | |
d447ea2f PE |
464 | /* |
465 | * trigger() callback can be used to get some kick from the | |
466 | * userspace, when the actual string written is not important | |
467 | * at all. The private field can be used to determine the | |
468 | * kick type for multiplexing. | |
469 | */ | |
470 | int (*trigger)(struct cgroup *cgrp, unsigned int event); | |
471 | ||
ce16b49d | 472 | int (*release)(struct inode *inode, struct file *file); |
0dea1168 KS |
473 | |
474 | /* | |
475 | * register_event() callback will be used to add new userspace | |
476 | * waiter for changes related to the cftype. Implement it if | |
477 | * you want to provide this functionality. Use eventfd_signal() | |
478 | * on eventfd to send notification to userspace. | |
479 | */ | |
480 | int (*register_event)(struct cgroup *cgrp, struct cftype *cft, | |
481 | struct eventfd_ctx *eventfd, const char *args); | |
482 | /* | |
483 | * unregister_event() callback will be called when userspace | |
484 | * closes the eventfd or on cgroup removing. | |
485 | * This callback must be implemented, if you want provide | |
486 | * notification functionality. | |
0dea1168 | 487 | */ |
907860ed | 488 | void (*unregister_event)(struct cgroup *cgrp, struct cftype *cft, |
0dea1168 | 489 | struct eventfd_ctx *eventfd); |
ddbcc7e8 PM |
490 | }; |
491 | ||
8e3f6541 TH |
492 | /* |
493 | * cftype_sets describe cftypes belonging to a subsystem and are chained at | |
494 | * cgroup_subsys->cftsets. Each cftset points to an array of cftypes | |
495 | * terminated by zero length name. | |
496 | */ | |
497 | struct cftype_set { | |
498 | struct list_head node; /* chained at subsys->cftsets */ | |
03b1cde6 | 499 | struct cftype *cfts; |
8e3f6541 TH |
500 | }; |
501 | ||
31a7df01 CW |
502 | struct cgroup_scanner { |
503 | struct cgroup *cg; | |
504 | int (*test_task)(struct task_struct *p, struct cgroup_scanner *scan); | |
505 | void (*process_task)(struct task_struct *p, | |
506 | struct cgroup_scanner *scan); | |
507 | struct ptr_heap *heap; | |
bd1a8ab7 | 508 | void *data; |
31a7df01 CW |
509 | }; |
510 | ||
873fe09e TH |
511 | /* |
512 | * See the comment above CGRP_ROOT_SANE_BEHAVIOR for details. This | |
513 | * function can be called as long as @cgrp is accessible. | |
514 | */ | |
515 | static inline bool cgroup_sane_behavior(const struct cgroup *cgrp) | |
516 | { | |
517 | return cgrp->root->flags & CGRP_ROOT_SANE_BEHAVIOR; | |
518 | } | |
519 | ||
65dff759 LZ |
520 | /* Caller should hold rcu_read_lock() */ |
521 | static inline const char *cgroup_name(const struct cgroup *cgrp) | |
522 | { | |
523 | return rcu_dereference(cgrp->name)->name; | |
524 | } | |
525 | ||
03b1cde6 AR |
526 | int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); |
527 | int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); | |
8e3f6541 | 528 | |
78574cf9 | 529 | bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor); |
ddbcc7e8 | 530 | |
ffd2d883 | 531 | int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen); |
857a2beb TH |
532 | int task_cgroup_path_from_hierarchy(struct task_struct *task, int hierarchy_id, |
533 | char *buf, size_t buflen); | |
ddbcc7e8 | 534 | |
ffd2d883 | 535 | int cgroup_task_count(const struct cgroup *cgrp); |
bbcb81d0 | 536 | |
2f7ee569 TH |
537 | /* |
538 | * Control Group taskset, used to pass around set of tasks to cgroup_subsys | |
539 | * methods. | |
540 | */ | |
541 | struct cgroup_taskset; | |
542 | struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset); | |
543 | struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset); | |
544 | struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset); | |
545 | int cgroup_taskset_size(struct cgroup_taskset *tset); | |
546 | ||
547 | /** | |
548 | * cgroup_taskset_for_each - iterate cgroup_taskset | |
549 | * @task: the loop cursor | |
550 | * @skip_cgrp: skip if task's cgroup matches this, %NULL to iterate through all | |
551 | * @tset: taskset to iterate | |
552 | */ | |
553 | #define cgroup_taskset_for_each(task, skip_cgrp, tset) \ | |
554 | for ((task) = cgroup_taskset_first((tset)); (task); \ | |
555 | (task) = cgroup_taskset_next((tset))) \ | |
556 | if (!(skip_cgrp) || \ | |
557 | cgroup_taskset_cur_cgroup((tset)) != (skip_cgrp)) | |
558 | ||
21acb9ca TLSC |
559 | /* |
560 | * Control Group subsystem type. | |
561 | * See Documentation/cgroups/cgroups.txt for details | |
562 | */ | |
ddbcc7e8 PM |
563 | |
564 | struct cgroup_subsys { | |
92fb9748 TH |
565 | struct cgroup_subsys_state *(*css_alloc)(struct cgroup *cgrp); |
566 | int (*css_online)(struct cgroup *cgrp); | |
567 | void (*css_offline)(struct cgroup *cgrp); | |
568 | void (*css_free)(struct cgroup *cgrp); | |
569 | ||
761b3ef5 LZ |
570 | int (*can_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset); |
571 | void (*cancel_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset); | |
572 | void (*attach)(struct cgroup *cgrp, struct cgroup_taskset *tset); | |
573 | void (*fork)(struct task_struct *task); | |
574 | void (*exit)(struct cgroup *cgrp, struct cgroup *old_cgrp, | |
575 | struct task_struct *task); | |
761b3ef5 | 576 | void (*bind)(struct cgroup *root); |
e5991371 | 577 | |
ddbcc7e8 | 578 | int subsys_id; |
8bab8dde | 579 | int disabled; |
ddbcc7e8 | 580 | int early_init; |
38460b48 KH |
581 | /* |
582 | * True if this subsys uses ID. ID is not available before cgroup_init() | |
583 | * (not available in early_init time.) | |
584 | */ | |
585 | bool use_id; | |
48ddbe19 | 586 | |
8c7f6edb TH |
587 | /* |
588 | * If %false, this subsystem is properly hierarchical - | |
589 | * configuration, resource accounting and restriction on a parent | |
590 | * cgroup cover those of its children. If %true, hierarchy support | |
591 | * is broken in some ways - some subsystems ignore hierarchy | |
592 | * completely while others are only implemented half-way. | |
593 | * | |
594 | * It's now disallowed to create nested cgroups if the subsystem is | |
595 | * broken and cgroup core will emit a warning message on such | |
596 | * cases. Eventually, all subsystems will be made properly | |
597 | * hierarchical and this will go away. | |
598 | */ | |
599 | bool broken_hierarchy; | |
600 | bool warned_broken_hierarchy; | |
601 | ||
ddbcc7e8 PM |
602 | #define MAX_CGROUP_TYPE_NAMELEN 32 |
603 | const char *name; | |
604 | ||
999cd8a4 PM |
605 | /* |
606 | * Link to parent, and list entry in parent's children. | |
6be96a5c | 607 | * Protected by cgroup_lock() |
999cd8a4 PM |
608 | */ |
609 | struct cgroupfs_root *root; | |
ddbcc7e8 | 610 | struct list_head sibling; |
38460b48 KH |
611 | /* used when use_id == true */ |
612 | struct idr idr; | |
42aee6c4 | 613 | spinlock_t id_lock; |
e6a1105b | 614 | |
8e3f6541 TH |
615 | /* list of cftype_sets */ |
616 | struct list_head cftsets; | |
617 | ||
618 | /* base cftypes, automatically [de]registered with subsys itself */ | |
619 | struct cftype *base_cftypes; | |
620 | struct cftype_set base_cftset; | |
621 | ||
e6a1105b BB |
622 | /* should be defined only by modular subsystems */ |
623 | struct module *module; | |
ddbcc7e8 PM |
624 | }; |
625 | ||
626 | #define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys; | |
5fc0b025 | 627 | #define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option) |
ddbcc7e8 | 628 | #include <linux/cgroup_subsys.h> |
5fc0b025 | 629 | #undef IS_SUBSYS_ENABLED |
ddbcc7e8 PM |
630 | #undef SUBSYS |
631 | ||
632 | static inline struct cgroup_subsys_state *cgroup_subsys_state( | |
ffd2d883 | 633 | struct cgroup *cgrp, int subsys_id) |
ddbcc7e8 | 634 | { |
ffd2d883 | 635 | return cgrp->subsys[subsys_id]; |
ddbcc7e8 PM |
636 | } |
637 | ||
dc61b1d6 PZ |
638 | /* |
639 | * function to get the cgroup_subsys_state which allows for extra | |
640 | * rcu_dereference_check() conditions, such as locks used during the | |
641 | * cgroup_subsys::attach() methods. | |
642 | */ | |
2219449a TH |
643 | #ifdef CONFIG_PROVE_RCU |
644 | extern struct mutex cgroup_mutex; | |
dc61b1d6 | 645 | #define task_subsys_state_check(task, subsys_id, __c) \ |
2219449a TH |
646 | rcu_dereference_check((task)->cgroups->subsys[(subsys_id)], \ |
647 | lockdep_is_held(&(task)->alloc_lock) || \ | |
648 | lockdep_is_held(&cgroup_mutex) || (__c)) | |
649 | #else | |
dc61b1d6 | 650 | #define task_subsys_state_check(task, subsys_id, __c) \ |
2219449a TH |
651 | rcu_dereference((task)->cgroups->subsys[(subsys_id)]) |
652 | #endif | |
dc61b1d6 PZ |
653 | |
654 | static inline struct cgroup_subsys_state * | |
655 | task_subsys_state(struct task_struct *task, int subsys_id) | |
ddbcc7e8 | 656 | { |
dc61b1d6 | 657 | return task_subsys_state_check(task, subsys_id, false); |
ddbcc7e8 PM |
658 | } |
659 | ||
660 | static inline struct cgroup* task_cgroup(struct task_struct *task, | |
661 | int subsys_id) | |
662 | { | |
663 | return task_subsys_state(task, subsys_id)->cgroup; | |
664 | } | |
665 | ||
53fa5261 TH |
666 | struct cgroup *cgroup_next_sibling(struct cgroup *pos); |
667 | ||
574bd9f7 TH |
668 | /** |
669 | * cgroup_for_each_child - iterate through children of a cgroup | |
670 | * @pos: the cgroup * to use as the loop cursor | |
75501a6d | 671 | * @cgrp: cgroup whose children to walk |
574bd9f7 | 672 | * |
75501a6d | 673 | * Walk @cgrp's children. Must be called under rcu_read_lock(). A child |
92fb9748 TH |
674 | * cgroup which hasn't finished ->css_online() or already has finished |
675 | * ->css_offline() may show up during traversal and it's each subsystem's | |
574bd9f7 TH |
676 | * responsibility to verify that each @pos is alive. |
677 | * | |
92fb9748 TH |
678 | * If a subsystem synchronizes against the parent in its ->css_online() and |
679 | * before starting iterating, a cgroup which finished ->css_online() is | |
680 | * guaranteed to be visible in the future iterations. | |
75501a6d TH |
681 | * |
682 | * It is allowed to temporarily drop RCU read lock during iteration. The | |
683 | * caller is responsible for ensuring that @pos remains accessible until | |
684 | * the start of the next iteration by, for example, bumping the css refcnt. | |
574bd9f7 | 685 | */ |
75501a6d TH |
686 | #define cgroup_for_each_child(pos, cgrp) \ |
687 | for ((pos) = list_first_or_null_rcu(&(cgrp)->children, \ | |
688 | struct cgroup, sibling); \ | |
689 | (pos); (pos) = cgroup_next_sibling((pos))) | |
574bd9f7 TH |
690 | |
691 | struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos, | |
692 | struct cgroup *cgroup); | |
12a9d2fe | 693 | struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos); |
574bd9f7 TH |
694 | |
695 | /** | |
696 | * cgroup_for_each_descendant_pre - pre-order walk of a cgroup's descendants | |
697 | * @pos: the cgroup * to use as the loop cursor | |
698 | * @cgroup: cgroup whose descendants to walk | |
699 | * | |
700 | * Walk @cgroup's descendants. Must be called under rcu_read_lock(). A | |
92fb9748 TH |
701 | * descendant cgroup which hasn't finished ->css_online() or already has |
702 | * finished ->css_offline() may show up during traversal and it's each | |
574bd9f7 TH |
703 | * subsystem's responsibility to verify that each @pos is alive. |
704 | * | |
92fb9748 TH |
705 | * If a subsystem synchronizes against the parent in its ->css_online() and |
706 | * before starting iterating, and synchronizes against @pos on each | |
7805d000 | 707 | * iteration, any descendant cgroup which finished ->css_online() is |
574bd9f7 TH |
708 | * guaranteed to be visible in the future iterations. |
709 | * | |
710 | * In other words, the following guarantees that a descendant can't escape | |
711 | * state updates of its ancestors. | |
712 | * | |
92fb9748 | 713 | * my_online(@cgrp) |
574bd9f7 TH |
714 | * { |
715 | * Lock @cgrp->parent and @cgrp; | |
716 | * Inherit state from @cgrp->parent; | |
717 | * Unlock both. | |
718 | * } | |
719 | * | |
720 | * my_update_state(@cgrp) | |
721 | * { | |
722 | * Lock @cgrp; | |
723 | * Update @cgrp's state; | |
724 | * Unlock @cgrp; | |
725 | * | |
726 | * cgroup_for_each_descendant_pre(@pos, @cgrp) { | |
727 | * Lock @pos; | |
728 | * Verify @pos is alive and inherit state from @pos->parent; | |
729 | * Unlock @pos; | |
730 | * } | |
731 | * } | |
732 | * | |
733 | * As long as the inheriting step, including checking the parent state, is | |
734 | * enclosed inside @pos locking, double-locking the parent isn't necessary | |
735 | * while inheriting. The state update to the parent is guaranteed to be | |
736 | * visible by walking order and, as long as inheriting operations to the | |
737 | * same @pos are atomic to each other, multiple updates racing each other | |
738 | * still result in the correct state. It's guaranateed that at least one | |
739 | * inheritance happens for any cgroup after the latest update to its | |
740 | * parent. | |
741 | * | |
742 | * If checking parent's state requires locking the parent, each inheriting | |
743 | * iteration should lock and unlock both @pos->parent and @pos. | |
744 | * | |
745 | * Alternatively, a subsystem may choose to use a single global lock to | |
92fb9748 | 746 | * synchronize ->css_online() and ->css_offline() against tree-walking |
574bd9f7 | 747 | * operations. |
75501a6d TH |
748 | * |
749 | * It is allowed to temporarily drop RCU read lock during iteration. The | |
750 | * caller is responsible for ensuring that @pos remains accessible until | |
751 | * the start of the next iteration by, for example, bumping the css refcnt. | |
574bd9f7 TH |
752 | */ |
753 | #define cgroup_for_each_descendant_pre(pos, cgroup) \ | |
754 | for (pos = cgroup_next_descendant_pre(NULL, (cgroup)); (pos); \ | |
755 | pos = cgroup_next_descendant_pre((pos), (cgroup))) | |
756 | ||
757 | struct cgroup *cgroup_next_descendant_post(struct cgroup *pos, | |
758 | struct cgroup *cgroup); | |
759 | ||
760 | /** | |
761 | * cgroup_for_each_descendant_post - post-order walk of a cgroup's descendants | |
762 | * @pos: the cgroup * to use as the loop cursor | |
763 | * @cgroup: cgroup whose descendants to walk | |
764 | * | |
765 | * Similar to cgroup_for_each_descendant_pre() but performs post-order | |
766 | * traversal instead. Note that the walk visibility guarantee described in | |
767 | * pre-order walk doesn't apply the same to post-order walks. | |
768 | */ | |
769 | #define cgroup_for_each_descendant_post(pos, cgroup) \ | |
770 | for (pos = cgroup_next_descendant_post(NULL, (cgroup)); (pos); \ | |
771 | pos = cgroup_next_descendant_post((pos), (cgroup))) | |
772 | ||
817929ec PM |
773 | /* A cgroup_iter should be treated as an opaque object */ |
774 | struct cgroup_iter { | |
69d0206c | 775 | struct list_head *cset_link; |
817929ec PM |
776 | struct list_head *task; |
777 | }; | |
778 | ||
d20a390a PM |
779 | /* |
780 | * To iterate across the tasks in a cgroup: | |
817929ec | 781 | * |
b595076a | 782 | * 1) call cgroup_iter_start to initialize an iterator |
817929ec PM |
783 | * |
784 | * 2) call cgroup_iter_next() to retrieve member tasks until it | |
785 | * returns NULL or until you want to end the iteration | |
786 | * | |
787 | * 3) call cgroup_iter_end() to destroy the iterator. | |
31a7df01 | 788 | * |
d20a390a PM |
789 | * Or, call cgroup_scan_tasks() to iterate through every task in a |
790 | * cgroup - cgroup_scan_tasks() holds the css_set_lock when calling | |
791 | * the test_task() callback, but not while calling the process_task() | |
792 | * callback. | |
817929ec | 793 | */ |
ffd2d883 LZ |
794 | void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it); |
795 | struct task_struct *cgroup_iter_next(struct cgroup *cgrp, | |
817929ec | 796 | struct cgroup_iter *it); |
ffd2d883 | 797 | void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); |
31a7df01 | 798 | int cgroup_scan_tasks(struct cgroup_scanner *scan); |
31583bb0 | 799 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); |
8cc99345 | 800 | int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); |
31583bb0 | 801 | |
38460b48 KH |
802 | /* |
803 | * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works | |
804 | * if cgroup_subsys.use_id == true. It can be used for looking up and scanning. | |
805 | * CSS ID is assigned at cgroup allocation (create) automatically | |
806 | * and removed when subsys calls free_css_id() function. This is because | |
807 | * the lifetime of cgroup_subsys_state is subsys's matter. | |
808 | * | |
809 | * Looking up and scanning function should be called under rcu_read_lock(). | |
6be96a5c | 810 | * Taking cgroup_mutex is not necessary for following calls. |
38460b48 KH |
811 | * But the css returned by this routine can be "not populated yet" or "being |
812 | * destroyed". The caller should check css and cgroup's status. | |
813 | */ | |
814 | ||
815 | /* | |
816 | * Typically Called at ->destroy(), or somewhere the subsys frees | |
817 | * cgroup_subsys_state. | |
818 | */ | |
819 | void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css); | |
820 | ||
821 | /* Find a cgroup_subsys_state which has given ID */ | |
822 | ||
823 | struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id); | |
824 | ||
38460b48 KH |
825 | /* Returns true if root is ancestor of cg */ |
826 | bool css_is_ancestor(struct cgroup_subsys_state *cg, | |
0b7f569e | 827 | const struct cgroup_subsys_state *root); |
38460b48 KH |
828 | |
829 | /* Get id and depth of css */ | |
830 | unsigned short css_id(struct cgroup_subsys_state *css); | |
e5d1367f | 831 | struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id); |
38460b48 | 832 | |
ddbcc7e8 PM |
833 | #else /* !CONFIG_CGROUPS */ |
834 | ||
835 | static inline int cgroup_init_early(void) { return 0; } | |
836 | static inline int cgroup_init(void) { return 0; } | |
b4f48b63 | 837 | static inline void cgroup_fork(struct task_struct *p) {} |
817929ec | 838 | static inline void cgroup_post_fork(struct task_struct *p) {} |
b4f48b63 | 839 | static inline void cgroup_exit(struct task_struct *p, int callbacks) {} |
ddbcc7e8 | 840 | |
846c7bb0 BS |
841 | static inline int cgroupstats_build(struct cgroupstats *stats, |
842 | struct dentry *dentry) | |
843 | { | |
844 | return -EINVAL; | |
845 | } | |
ddbcc7e8 | 846 | |
d7926ee3 | 847 | /* No cgroups - nothing to do */ |
31583bb0 MT |
848 | static inline int cgroup_attach_task_all(struct task_struct *from, |
849 | struct task_struct *t) | |
850 | { | |
851 | return 0; | |
852 | } | |
d7926ee3 | 853 | |
ddbcc7e8 PM |
854 | #endif /* !CONFIG_CGROUPS */ |
855 | ||
856 | #endif /* _LINUX_CGROUP_H */ |