]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: GPL-2.0 */ | |
2 | #ifndef _LINUX_CGROUP_H | |
3 | #define _LINUX_CGROUP_H | |
4 | /* | |
5 | * cgroup interface | |
6 | * | |
7 | * Copyright (C) 2003 BULL SA | |
8 | * Copyright (C) 2004-2006 Silicon Graphics, Inc. | |
9 | * | |
10 | */ | |
11 | ||
12 | #include <linux/sched.h> | |
13 | #include <linux/cpumask.h> | |
14 | #include <linux/nodemask.h> | |
15 | #include <linux/rculist.h> | |
16 | #include <linux/cgroupstats.h> | |
17 | #include <linux/fs.h> | |
18 | #include <linux/seq_file.h> | |
19 | #include <linux/kernfs.h> | |
20 | #include <linux/jump_label.h> | |
21 | #include <linux/types.h> | |
22 | #include <linux/ns_common.h> | |
23 | #include <linux/nsproxy.h> | |
24 | #include <linux/user_namespace.h> | |
25 | #include <linux/refcount.h> | |
26 | #include <linux/kernel_stat.h> | |
27 | ||
28 | #include <linux/cgroup-defs.h> | |
29 | ||
30 | #ifdef CONFIG_CGROUPS | |
31 | ||
32 | /* | |
33 | * All weight knobs on the default hierarhcy should use the following min, | |
34 | * default and max values. The default value is the logarithmic center of | |
35 | * MIN and MAX and allows 100x to be expressed in both directions. | |
36 | */ | |
37 | #define CGROUP_WEIGHT_MIN 1 | |
38 | #define CGROUP_WEIGHT_DFL 100 | |
39 | #define CGROUP_WEIGHT_MAX 10000 | |
40 | ||
41 | /* walk only threadgroup leaders */ | |
42 | #define CSS_TASK_ITER_PROCS (1U << 0) | |
43 | /* walk all threaded css_sets in the domain */ | |
44 | #define CSS_TASK_ITER_THREADED (1U << 1) | |
45 | ||
46 | /* a css_task_iter should be treated as an opaque object */ | |
47 | struct css_task_iter { | |
48 | struct cgroup_subsys *ss; | |
49 | unsigned int flags; | |
50 | ||
51 | struct list_head *cset_pos; | |
52 | struct list_head *cset_head; | |
53 | ||
54 | struct list_head *tcset_pos; | |
55 | struct list_head *tcset_head; | |
56 | ||
57 | struct list_head *task_pos; | |
58 | struct list_head *tasks_head; | |
59 | struct list_head *mg_tasks_head; | |
60 | ||
61 | struct css_set *cur_cset; | |
62 | struct css_set *cur_dcset; | |
63 | struct task_struct *cur_task; | |
64 | struct list_head iters_node; /* css_set->task_iters */ | |
65 | }; | |
66 | ||
67 | extern struct cgroup_root cgrp_dfl_root; | |
68 | extern struct css_set init_css_set; | |
69 | ||
70 | #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys; | |
71 | #include <linux/cgroup_subsys.h> | |
72 | #undef SUBSYS | |
73 | ||
74 | #define SUBSYS(_x) \ | |
75 | extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \ | |
76 | extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key; | |
77 | #include <linux/cgroup_subsys.h> | |
78 | #undef SUBSYS | |
79 | ||
80 | /** | |
81 | * cgroup_subsys_enabled - fast test on whether a subsys is enabled | |
82 | * @ss: subsystem in question | |
83 | */ | |
84 | #define cgroup_subsys_enabled(ss) \ | |
85 | static_branch_likely(&ss ## _enabled_key) | |
86 | ||
87 | /** | |
88 | * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy | |
89 | * @ss: subsystem in question | |
90 | */ | |
91 | #define cgroup_subsys_on_dfl(ss) \ | |
92 | static_branch_likely(&ss ## _on_dfl_key) | |
93 | ||
94 | bool css_has_online_children(struct cgroup_subsys_state *css); | |
95 | struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); | |
96 | struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, | |
97 | struct cgroup_subsys *ss); | |
98 | struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, | |
99 | struct cgroup_subsys *ss); | |
100 | ||
101 | struct cgroup *cgroup_get_from_path(const char *path); | |
102 | struct cgroup *cgroup_get_from_fd(int fd); | |
103 | ||
104 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); | |
105 | int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); | |
106 | ||
107 | int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); | |
108 | int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); | |
109 | int cgroup_rm_cftypes(struct cftype *cfts); | |
110 | void cgroup_file_notify(struct cgroup_file *cfile); | |
111 | ||
112 | int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); | |
113 | int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); | |
114 | int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, | |
115 | struct pid *pid, struct task_struct *tsk); | |
116 | ||
117 | void cgroup_fork(struct task_struct *p); | |
118 | extern int cgroup_can_fork(struct task_struct *p); | |
119 | extern void cgroup_cancel_fork(struct task_struct *p); | |
120 | extern void cgroup_post_fork(struct task_struct *p); | |
121 | void cgroup_exit(struct task_struct *p); | |
122 | void cgroup_free(struct task_struct *p); | |
123 | ||
124 | int cgroup_init_early(void); | |
125 | int cgroup_init(void); | |
126 | ||
127 | /* | |
128 | * Iteration helpers and macros. | |
129 | */ | |
130 | ||
131 | struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos, | |
132 | struct cgroup_subsys_state *parent); | |
133 | struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos, | |
134 | struct cgroup_subsys_state *css); | |
135 | struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos); | |
136 | struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos, | |
137 | struct cgroup_subsys_state *css); | |
138 | ||
139 | struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset, | |
140 | struct cgroup_subsys_state **dst_cssp); | |
141 | struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset, | |
142 | struct cgroup_subsys_state **dst_cssp); | |
143 | ||
144 | void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags, | |
145 | struct css_task_iter *it); | |
146 | struct task_struct *css_task_iter_next(struct css_task_iter *it); | |
147 | void css_task_iter_end(struct css_task_iter *it); | |
148 | ||
149 | /** | |
150 | * css_for_each_child - iterate through children of a css | |
151 | * @pos: the css * to use as the loop cursor | |
152 | * @parent: css whose children to walk | |
153 | * | |
154 | * Walk @parent's children. Must be called under rcu_read_lock(). | |
155 | * | |
156 | * If a subsystem synchronizes ->css_online() and the start of iteration, a | |
157 | * css which finished ->css_online() is guaranteed to be visible in the | |
158 | * future iterations and will stay visible until the last reference is put. | |
159 | * A css which hasn't finished ->css_online() or already finished | |
160 | * ->css_offline() may show up during traversal. It's each subsystem's | |
161 | * responsibility to synchronize against on/offlining. | |
162 | * | |
163 | * It is allowed to temporarily drop RCU read lock during iteration. The | |
164 | * caller is responsible for ensuring that @pos remains accessible until | |
165 | * the start of the next iteration by, for example, bumping the css refcnt. | |
166 | */ | |
167 | #define css_for_each_child(pos, parent) \ | |
168 | for ((pos) = css_next_child(NULL, (parent)); (pos); \ | |
169 | (pos) = css_next_child((pos), (parent))) | |
170 | ||
171 | /** | |
172 | * css_for_each_descendant_pre - pre-order walk of a css's descendants | |
173 | * @pos: the css * to use as the loop cursor | |
174 | * @root: css whose descendants to walk | |
175 | * | |
176 | * Walk @root's descendants. @root is included in the iteration and the | |
177 | * first node to be visited. Must be called under rcu_read_lock(). | |
178 | * | |
179 | * If a subsystem synchronizes ->css_online() and the start of iteration, a | |
180 | * css which finished ->css_online() is guaranteed to be visible in the | |
181 | * future iterations and will stay visible until the last reference is put. | |
182 | * A css which hasn't finished ->css_online() or already finished | |
183 | * ->css_offline() may show up during traversal. It's each subsystem's | |
184 | * responsibility to synchronize against on/offlining. | |
185 | * | |
186 | * For example, the following guarantees that a descendant can't escape | |
187 | * state updates of its ancestors. | |
188 | * | |
189 | * my_online(@css) | |
190 | * { | |
191 | * Lock @css's parent and @css; | |
192 | * Inherit state from the parent; | |
193 | * Unlock both. | |
194 | * } | |
195 | * | |
196 | * my_update_state(@css) | |
197 | * { | |
198 | * css_for_each_descendant_pre(@pos, @css) { | |
199 | * Lock @pos; | |
200 | * if (@pos == @css) | |
201 | * Update @css's state; | |
202 | * else | |
203 | * Verify @pos is alive and inherit state from its parent; | |
204 | * Unlock @pos; | |
205 | * } | |
206 | * } | |
207 | * | |
208 | * As long as the inheriting step, including checking the parent state, is | |
209 | * enclosed inside @pos locking, double-locking the parent isn't necessary | |
210 | * while inheriting. The state update to the parent is guaranteed to be | |
211 | * visible by walking order and, as long as inheriting operations to the | |
212 | * same @pos are atomic to each other, multiple updates racing each other | |
213 | * still result in the correct state. It's guaranateed that at least one | |
214 | * inheritance happens for any css after the latest update to its parent. | |
215 | * | |
216 | * If checking parent's state requires locking the parent, each inheriting | |
217 | * iteration should lock and unlock both @pos->parent and @pos. | |
218 | * | |
219 | * Alternatively, a subsystem may choose to use a single global lock to | |
220 | * synchronize ->css_online() and ->css_offline() against tree-walking | |
221 | * operations. | |
222 | * | |
223 | * It is allowed to temporarily drop RCU read lock during iteration. The | |
224 | * caller is responsible for ensuring that @pos remains accessible until | |
225 | * the start of the next iteration by, for example, bumping the css refcnt. | |
226 | */ | |
227 | #define css_for_each_descendant_pre(pos, css) \ | |
228 | for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \ | |
229 | (pos) = css_next_descendant_pre((pos), (css))) | |
230 | ||
231 | /** | |
232 | * css_for_each_descendant_post - post-order walk of a css's descendants | |
233 | * @pos: the css * to use as the loop cursor | |
234 | * @css: css whose descendants to walk | |
235 | * | |
236 | * Similar to css_for_each_descendant_pre() but performs post-order | |
237 | * traversal instead. @root is included in the iteration and the last | |
238 | * node to be visited. | |
239 | * | |
240 | * If a subsystem synchronizes ->css_online() and the start of iteration, a | |
241 | * css which finished ->css_online() is guaranteed to be visible in the | |
242 | * future iterations and will stay visible until the last reference is put. | |
243 | * A css which hasn't finished ->css_online() or already finished | |
244 | * ->css_offline() may show up during traversal. It's each subsystem's | |
245 | * responsibility to synchronize against on/offlining. | |
246 | * | |
247 | * Note that the walk visibility guarantee example described in pre-order | |
248 | * walk doesn't apply the same to post-order walks. | |
249 | */ | |
250 | #define css_for_each_descendant_post(pos, css) \ | |
251 | for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \ | |
252 | (pos) = css_next_descendant_post((pos), (css))) | |
253 | ||
254 | /** | |
255 | * cgroup_taskset_for_each - iterate cgroup_taskset | |
256 | * @task: the loop cursor | |
257 | * @dst_css: the destination css | |
258 | * @tset: taskset to iterate | |
259 | * | |
260 | * @tset may contain multiple tasks and they may belong to multiple | |
261 | * processes. | |
262 | * | |
263 | * On the v2 hierarchy, there may be tasks from multiple processes and they | |
264 | * may not share the source or destination csses. | |
265 | * | |
266 | * On traditional hierarchies, when there are multiple tasks in @tset, if a | |
267 | * task of a process is in @tset, all tasks of the process are in @tset. | |
268 | * Also, all are guaranteed to share the same source and destination csses. | |
269 | * | |
270 | * Iteration is not in any specific order. | |
271 | */ | |
272 | #define cgroup_taskset_for_each(task, dst_css, tset) \ | |
273 | for ((task) = cgroup_taskset_first((tset), &(dst_css)); \ | |
274 | (task); \ | |
275 | (task) = cgroup_taskset_next((tset), &(dst_css))) | |
276 | ||
277 | /** | |
278 | * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset | |
279 | * @leader: the loop cursor | |
280 | * @dst_css: the destination css | |
281 | * @tset: taskset to iterate | |
282 | * | |
283 | * Iterate threadgroup leaders of @tset. For single-task migrations, @tset | |
284 | * may not contain any. | |
285 | */ | |
286 | #define cgroup_taskset_for_each_leader(leader, dst_css, tset) \ | |
287 | for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \ | |
288 | (leader); \ | |
289 | (leader) = cgroup_taskset_next((tset), &(dst_css))) \ | |
290 | if ((leader) != (leader)->group_leader) \ | |
291 | ; \ | |
292 | else | |
293 | ||
294 | /* | |
295 | * Inline functions. | |
296 | */ | |
297 | ||
298 | /** | |
299 | * css_get - obtain a reference on the specified css | |
300 | * @css: target css | |
301 | * | |
302 | * The caller must already have a reference. | |
303 | */ | |
304 | static inline void css_get(struct cgroup_subsys_state *css) | |
305 | { | |
306 | if (!(css->flags & CSS_NO_REF)) | |
307 | percpu_ref_get(&css->refcnt); | |
308 | } | |
309 | ||
310 | /** | |
311 | * css_get_many - obtain references on the specified css | |
312 | * @css: target css | |
313 | * @n: number of references to get | |
314 | * | |
315 | * The caller must already have a reference. | |
316 | */ | |
317 | static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n) | |
318 | { | |
319 | if (!(css->flags & CSS_NO_REF)) | |
320 | percpu_ref_get_many(&css->refcnt, n); | |
321 | } | |
322 | ||
323 | /** | |
324 | * css_tryget - try to obtain a reference on the specified css | |
325 | * @css: target css | |
326 | * | |
327 | * Obtain a reference on @css unless it already has reached zero and is | |
328 | * being released. This function doesn't care whether @css is on or | |
329 | * offline. The caller naturally needs to ensure that @css is accessible | |
330 | * but doesn't have to be holding a reference on it - IOW, RCU protected | |
331 | * access is good enough for this function. Returns %true if a reference | |
332 | * count was successfully obtained; %false otherwise. | |
333 | */ | |
334 | static inline bool css_tryget(struct cgroup_subsys_state *css) | |
335 | { | |
336 | if (!(css->flags & CSS_NO_REF)) | |
337 | return percpu_ref_tryget(&css->refcnt); | |
338 | return true; | |
339 | } | |
340 | ||
341 | /** | |
342 | * css_tryget_online - try to obtain a reference on the specified css if online | |
343 | * @css: target css | |
344 | * | |
345 | * Obtain a reference on @css if it's online. The caller naturally needs | |
346 | * to ensure that @css is accessible but doesn't have to be holding a | |
347 | * reference on it - IOW, RCU protected access is good enough for this | |
348 | * function. Returns %true if a reference count was successfully obtained; | |
349 | * %false otherwise. | |
350 | */ | |
351 | static inline bool css_tryget_online(struct cgroup_subsys_state *css) | |
352 | { | |
353 | if (!(css->flags & CSS_NO_REF)) | |
354 | return percpu_ref_tryget_live(&css->refcnt); | |
355 | return true; | |
356 | } | |
357 | ||
358 | /** | |
359 | * css_is_dying - test whether the specified css is dying | |
360 | * @css: target css | |
361 | * | |
362 | * Test whether @css is in the process of offlining or already offline. In | |
363 | * most cases, ->css_online() and ->css_offline() callbacks should be | |
364 | * enough; however, the actual offline operations are RCU delayed and this | |
365 | * test returns %true also when @css is scheduled to be offlined. | |
366 | * | |
367 | * This is useful, for example, when the use case requires synchronous | |
368 | * behavior with respect to cgroup removal. cgroup removal schedules css | |
369 | * offlining but the css can seem alive while the operation is being | |
370 | * delayed. If the delay affects user visible semantics, this test can be | |
371 | * used to resolve the situation. | |
372 | */ | |
373 | static inline bool css_is_dying(struct cgroup_subsys_state *css) | |
374 | { | |
375 | return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt); | |
376 | } | |
377 | ||
378 | /** | |
379 | * css_put - put a css reference | |
380 | * @css: target css | |
381 | * | |
382 | * Put a reference obtained via css_get() and css_tryget_online(). | |
383 | */ | |
384 | static inline void css_put(struct cgroup_subsys_state *css) | |
385 | { | |
386 | if (!(css->flags & CSS_NO_REF)) | |
387 | percpu_ref_put(&css->refcnt); | |
388 | } | |
389 | ||
390 | /** | |
391 | * css_put_many - put css references | |
392 | * @css: target css | |
393 | * @n: number of references to put | |
394 | * | |
395 | * Put references obtained via css_get() and css_tryget_online(). | |
396 | */ | |
397 | static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) | |
398 | { | |
399 | if (!(css->flags & CSS_NO_REF)) | |
400 | percpu_ref_put_many(&css->refcnt, n); | |
401 | } | |
402 | ||
403 | static inline void cgroup_get(struct cgroup *cgrp) | |
404 | { | |
405 | css_get(&cgrp->self); | |
406 | } | |
407 | ||
408 | static inline bool cgroup_tryget(struct cgroup *cgrp) | |
409 | { | |
410 | return css_tryget(&cgrp->self); | |
411 | } | |
412 | ||
413 | static inline void cgroup_put(struct cgroup *cgrp) | |
414 | { | |
415 | css_put(&cgrp->self); | |
416 | } | |
417 | ||
418 | /** | |
419 | * task_css_set_check - obtain a task's css_set with extra access conditions | |
420 | * @task: the task to obtain css_set for | |
421 | * @__c: extra condition expression to be passed to rcu_dereference_check() | |
422 | * | |
423 | * A task's css_set is RCU protected, initialized and exited while holding | |
424 | * task_lock(), and can only be modified while holding both cgroup_mutex | |
425 | * and task_lock() while the task is alive. This macro verifies that the | |
426 | * caller is inside proper critical section and returns @task's css_set. | |
427 | * | |
428 | * The caller can also specify additional allowed conditions via @__c, such | |
429 | * as locks used during the cgroup_subsys::attach() methods. | |
430 | */ | |
431 | #ifdef CONFIG_PROVE_RCU | |
432 | extern struct mutex cgroup_mutex; | |
433 | extern spinlock_t css_set_lock; | |
434 | #define task_css_set_check(task, __c) \ | |
435 | rcu_dereference_check((task)->cgroups, \ | |
436 | lockdep_is_held(&cgroup_mutex) || \ | |
437 | lockdep_is_held(&css_set_lock) || \ | |
438 | ((task)->flags & PF_EXITING) || (__c)) | |
439 | #else | |
440 | #define task_css_set_check(task, __c) \ | |
441 | rcu_dereference((task)->cgroups) | |
442 | #endif | |
443 | ||
444 | /** | |
445 | * task_css_check - obtain css for (task, subsys) w/ extra access conds | |
446 | * @task: the target task | |
447 | * @subsys_id: the target subsystem ID | |
448 | * @__c: extra condition expression to be passed to rcu_dereference_check() | |
449 | * | |
450 | * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The | |
451 | * synchronization rules are the same as task_css_set_check(). | |
452 | */ | |
453 | #define task_css_check(task, subsys_id, __c) \ | |
454 | task_css_set_check((task), (__c))->subsys[(subsys_id)] | |
455 | ||
456 | /** | |
457 | * task_css_set - obtain a task's css_set | |
458 | * @task: the task to obtain css_set for | |
459 | * | |
460 | * See task_css_set_check(). | |
461 | */ | |
462 | static inline struct css_set *task_css_set(struct task_struct *task) | |
463 | { | |
464 | return task_css_set_check(task, false); | |
465 | } | |
466 | ||
467 | /** | |
468 | * task_css - obtain css for (task, subsys) | |
469 | * @task: the target task | |
470 | * @subsys_id: the target subsystem ID | |
471 | * | |
472 | * See task_css_check(). | |
473 | */ | |
474 | static inline struct cgroup_subsys_state *task_css(struct task_struct *task, | |
475 | int subsys_id) | |
476 | { | |
477 | return task_css_check(task, subsys_id, false); | |
478 | } | |
479 | ||
480 | /** | |
481 | * task_get_css - find and get the css for (task, subsys) | |
482 | * @task: the target task | |
483 | * @subsys_id: the target subsystem ID | |
484 | * | |
485 | * Find the css for the (@task, @subsys_id) combination, increment a | |
486 | * reference on and return it. This function is guaranteed to return a | |
487 | * valid css. | |
488 | */ | |
489 | static inline struct cgroup_subsys_state * | |
490 | task_get_css(struct task_struct *task, int subsys_id) | |
491 | { | |
492 | struct cgroup_subsys_state *css; | |
493 | ||
494 | rcu_read_lock(); | |
495 | while (true) { | |
496 | css = task_css(task, subsys_id); | |
497 | if (likely(css_tryget_online(css))) | |
498 | break; | |
499 | cpu_relax(); | |
500 | } | |
501 | rcu_read_unlock(); | |
502 | return css; | |
503 | } | |
504 | ||
505 | /** | |
506 | * task_css_is_root - test whether a task belongs to the root css | |
507 | * @task: the target task | |
508 | * @subsys_id: the target subsystem ID | |
509 | * | |
510 | * Test whether @task belongs to the root css on the specified subsystem. | |
511 | * May be invoked in any context. | |
512 | */ | |
513 | static inline bool task_css_is_root(struct task_struct *task, int subsys_id) | |
514 | { | |
515 | return task_css_check(task, subsys_id, true) == | |
516 | init_css_set.subsys[subsys_id]; | |
517 | } | |
518 | ||
519 | static inline struct cgroup *task_cgroup(struct task_struct *task, | |
520 | int subsys_id) | |
521 | { | |
522 | return task_css(task, subsys_id)->cgroup; | |
523 | } | |
524 | ||
525 | static inline struct cgroup *task_dfl_cgroup(struct task_struct *task) | |
526 | { | |
527 | return task_css_set(task)->dfl_cgrp; | |
528 | } | |
529 | ||
530 | static inline struct cgroup *cgroup_parent(struct cgroup *cgrp) | |
531 | { | |
532 | struct cgroup_subsys_state *parent_css = cgrp->self.parent; | |
533 | ||
534 | if (parent_css) | |
535 | return container_of(parent_css, struct cgroup, self); | |
536 | return NULL; | |
537 | } | |
538 | ||
539 | /** | |
540 | * cgroup_is_descendant - test ancestry | |
541 | * @cgrp: the cgroup to be tested | |
542 | * @ancestor: possible ancestor of @cgrp | |
543 | * | |
544 | * Test whether @cgrp is a descendant of @ancestor. It also returns %true | |
545 | * if @cgrp == @ancestor. This function is safe to call as long as @cgrp | |
546 | * and @ancestor are accessible. | |
547 | */ | |
548 | static inline bool cgroup_is_descendant(struct cgroup *cgrp, | |
549 | struct cgroup *ancestor) | |
550 | { | |
551 | if (cgrp->root != ancestor->root || cgrp->level < ancestor->level) | |
552 | return false; | |
553 | return cgrp->ancestor_ids[ancestor->level] == ancestor->id; | |
554 | } | |
555 | ||
556 | /** | |
557 | * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry | |
558 | * @task: the task to be tested | |
559 | * @ancestor: possible ancestor of @task's cgroup | |
560 | * | |
561 | * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor. | |
562 | * It follows all the same rules as cgroup_is_descendant, and only applies | |
563 | * to the default hierarchy. | |
564 | */ | |
565 | static inline bool task_under_cgroup_hierarchy(struct task_struct *task, | |
566 | struct cgroup *ancestor) | |
567 | { | |
568 | struct css_set *cset = task_css_set(task); | |
569 | ||
570 | return cgroup_is_descendant(cset->dfl_cgrp, ancestor); | |
571 | } | |
572 | ||
573 | /* no synchronization, the result can only be used as a hint */ | |
574 | static inline bool cgroup_is_populated(struct cgroup *cgrp) | |
575 | { | |
576 | return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children + | |
577 | cgrp->nr_populated_threaded_children; | |
578 | } | |
579 | ||
580 | /* returns ino associated with a cgroup */ | |
581 | static inline ino_t cgroup_ino(struct cgroup *cgrp) | |
582 | { | |
583 | return cgrp->kn->id.ino; | |
584 | } | |
585 | ||
586 | /* cft/css accessors for cftype->write() operation */ | |
587 | static inline struct cftype *of_cft(struct kernfs_open_file *of) | |
588 | { | |
589 | return of->kn->priv; | |
590 | } | |
591 | ||
592 | struct cgroup_subsys_state *of_css(struct kernfs_open_file *of); | |
593 | ||
594 | /* cft/css accessors for cftype->seq_*() operations */ | |
595 | static inline struct cftype *seq_cft(struct seq_file *seq) | |
596 | { | |
597 | return of_cft(seq->private); | |
598 | } | |
599 | ||
600 | static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq) | |
601 | { | |
602 | return of_css(seq->private); | |
603 | } | |
604 | ||
605 | /* | |
606 | * Name / path handling functions. All are thin wrappers around the kernfs | |
607 | * counterparts and can be called under any context. | |
608 | */ | |
609 | ||
610 | static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen) | |
611 | { | |
612 | return kernfs_name(cgrp->kn, buf, buflen); | |
613 | } | |
614 | ||
615 | static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen) | |
616 | { | |
617 | return kernfs_path(cgrp->kn, buf, buflen); | |
618 | } | |
619 | ||
620 | static inline void pr_cont_cgroup_name(struct cgroup *cgrp) | |
621 | { | |
622 | pr_cont_kernfs_name(cgrp->kn); | |
623 | } | |
624 | ||
625 | static inline void pr_cont_cgroup_path(struct cgroup *cgrp) | |
626 | { | |
627 | pr_cont_kernfs_path(cgrp->kn); | |
628 | } | |
629 | ||
630 | static inline void cgroup_init_kthreadd(void) | |
631 | { | |
632 | /* | |
633 | * kthreadd is inherited by all kthreads, keep it in the root so | |
634 | * that the new kthreads are guaranteed to stay in the root until | |
635 | * initialization is finished. | |
636 | */ | |
637 | current->no_cgroup_migration = 1; | |
638 | } | |
639 | ||
640 | static inline void cgroup_kthread_ready(void) | |
641 | { | |
642 | /* | |
643 | * This kthread finished initialization. The creator should have | |
644 | * set PF_NO_SETAFFINITY if this kthread should stay in the root. | |
645 | */ | |
646 | current->no_cgroup_migration = 0; | |
647 | } | |
648 | ||
649 | static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp) | |
650 | { | |
651 | return &cgrp->kn->id; | |
652 | } | |
653 | ||
654 | void cgroup_path_from_kernfs_id(const union kernfs_node_id *id, | |
655 | char *buf, size_t buflen); | |
656 | #else /* !CONFIG_CGROUPS */ | |
657 | ||
658 | struct cgroup_subsys_state; | |
659 | struct cgroup; | |
660 | ||
661 | static inline void css_put(struct cgroup_subsys_state *css) {} | |
662 | static inline int cgroup_attach_task_all(struct task_struct *from, | |
663 | struct task_struct *t) { return 0; } | |
664 | static inline int cgroupstats_build(struct cgroupstats *stats, | |
665 | struct dentry *dentry) { return -EINVAL; } | |
666 | ||
667 | static inline void cgroup_fork(struct task_struct *p) {} | |
668 | static inline int cgroup_can_fork(struct task_struct *p) { return 0; } | |
669 | static inline void cgroup_cancel_fork(struct task_struct *p) {} | |
670 | static inline void cgroup_post_fork(struct task_struct *p) {} | |
671 | static inline void cgroup_exit(struct task_struct *p) {} | |
672 | static inline void cgroup_free(struct task_struct *p) {} | |
673 | ||
674 | static inline int cgroup_init_early(void) { return 0; } | |
675 | static inline int cgroup_init(void) { return 0; } | |
676 | static inline void cgroup_init_kthreadd(void) {} | |
677 | static inline void cgroup_kthread_ready(void) {} | |
678 | static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp) | |
679 | { | |
680 | return NULL; | |
681 | } | |
682 | ||
683 | static inline bool task_under_cgroup_hierarchy(struct task_struct *task, | |
684 | struct cgroup *ancestor) | |
685 | { | |
686 | return true; | |
687 | } | |
688 | ||
689 | static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id *id, | |
690 | char *buf, size_t buflen) {} | |
691 | #endif /* !CONFIG_CGROUPS */ | |
692 | ||
693 | /* | |
694 | * Basic resource stats. | |
695 | */ | |
696 | #ifdef CONFIG_CGROUPS | |
697 | ||
698 | #ifdef CONFIG_CGROUP_CPUACCT | |
699 | void cpuacct_charge(struct task_struct *tsk, u64 cputime); | |
700 | void cpuacct_account_field(struct task_struct *tsk, int index, u64 val); | |
701 | #else | |
702 | static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} | |
703 | static inline void cpuacct_account_field(struct task_struct *tsk, int index, | |
704 | u64 val) {} | |
705 | #endif | |
706 | ||
707 | void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec); | |
708 | void __cgroup_account_cputime_field(struct cgroup *cgrp, | |
709 | enum cpu_usage_stat index, u64 delta_exec); | |
710 | ||
711 | static inline void cgroup_account_cputime(struct task_struct *task, | |
712 | u64 delta_exec) | |
713 | { | |
714 | struct cgroup *cgrp; | |
715 | ||
716 | cpuacct_charge(task, delta_exec); | |
717 | ||
718 | rcu_read_lock(); | |
719 | cgrp = task_dfl_cgroup(task); | |
720 | if (cgroup_parent(cgrp)) | |
721 | __cgroup_account_cputime(cgrp, delta_exec); | |
722 | rcu_read_unlock(); | |
723 | } | |
724 | ||
725 | static inline void cgroup_account_cputime_field(struct task_struct *task, | |
726 | enum cpu_usage_stat index, | |
727 | u64 delta_exec) | |
728 | { | |
729 | struct cgroup *cgrp; | |
730 | ||
731 | cpuacct_account_field(task, index, delta_exec); | |
732 | ||
733 | rcu_read_lock(); | |
734 | cgrp = task_dfl_cgroup(task); | |
735 | if (cgroup_parent(cgrp)) | |
736 | __cgroup_account_cputime_field(cgrp, index, delta_exec); | |
737 | rcu_read_unlock(); | |
738 | } | |
739 | ||
740 | #else /* CONFIG_CGROUPS */ | |
741 | ||
742 | static inline void cgroup_account_cputime(struct task_struct *task, | |
743 | u64 delta_exec) {} | |
744 | static inline void cgroup_account_cputime_field(struct task_struct *task, | |
745 | enum cpu_usage_stat index, | |
746 | u64 delta_exec) {} | |
747 | ||
748 | #endif /* CONFIG_CGROUPS */ | |
749 | ||
750 | /* | |
751 | * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data | |
752 | * definition in cgroup-defs.h. | |
753 | */ | |
754 | #ifdef CONFIG_SOCK_CGROUP_DATA | |
755 | ||
756 | #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) | |
757 | extern spinlock_t cgroup_sk_update_lock; | |
758 | #endif | |
759 | ||
760 | void cgroup_sk_alloc_disable(void); | |
761 | void cgroup_sk_alloc(struct sock_cgroup_data *skcd); | |
762 | void cgroup_sk_free(struct sock_cgroup_data *skcd); | |
763 | ||
764 | static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) | |
765 | { | |
766 | #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) | |
767 | unsigned long v; | |
768 | ||
769 | /* | |
770 | * @skcd->val is 64bit but the following is safe on 32bit too as we | |
771 | * just need the lower ulong to be written and read atomically. | |
772 | */ | |
773 | v = READ_ONCE(skcd->val); | |
774 | ||
775 | if (v & 1) | |
776 | return &cgrp_dfl_root.cgrp; | |
777 | ||
778 | return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp; | |
779 | #else | |
780 | return (struct cgroup *)(unsigned long)skcd->val; | |
781 | #endif | |
782 | } | |
783 | ||
784 | #else /* CONFIG_CGROUP_DATA */ | |
785 | ||
786 | static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {} | |
787 | static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {} | |
788 | ||
789 | #endif /* CONFIG_CGROUP_DATA */ | |
790 | ||
791 | struct cgroup_namespace { | |
792 | refcount_t count; | |
793 | struct ns_common ns; | |
794 | struct user_namespace *user_ns; | |
795 | struct ucounts *ucounts; | |
796 | struct css_set *root_cset; | |
797 | }; | |
798 | ||
799 | extern struct cgroup_namespace init_cgroup_ns; | |
800 | ||
801 | #ifdef CONFIG_CGROUPS | |
802 | ||
803 | void free_cgroup_ns(struct cgroup_namespace *ns); | |
804 | ||
805 | struct cgroup_namespace *copy_cgroup_ns(unsigned long flags, | |
806 | struct user_namespace *user_ns, | |
807 | struct cgroup_namespace *old_ns); | |
808 | ||
809 | int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen, | |
810 | struct cgroup_namespace *ns); | |
811 | ||
812 | #else /* !CONFIG_CGROUPS */ | |
813 | ||
814 | static inline void free_cgroup_ns(struct cgroup_namespace *ns) { } | |
815 | static inline struct cgroup_namespace * | |
816 | copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns, | |
817 | struct cgroup_namespace *old_ns) | |
818 | { | |
819 | return old_ns; | |
820 | } | |
821 | ||
822 | #endif /* !CONFIG_CGROUPS */ | |
823 | ||
824 | static inline void get_cgroup_ns(struct cgroup_namespace *ns) | |
825 | { | |
826 | if (ns) | |
827 | refcount_inc(&ns->count); | |
828 | } | |
829 | ||
830 | static inline void put_cgroup_ns(struct cgroup_namespace *ns) | |
831 | { | |
832 | if (ns && refcount_dec_and_test(&ns->count)) | |
833 | free_cgroup_ns(ns); | |
834 | } | |
835 | ||
836 | #endif /* _LINUX_CGROUP_H */ |