]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - include/linux/cgroup.h
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[mirror_ubuntu-focal-kernel.git] / include / linux / cgroup.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_CGROUP_H
3 #define _LINUX_CGROUP_H
4 /*
5 * cgroup interface
6 *
7 * Copyright (C) 2003 BULL SA
8 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
9 *
10 */
11
12 #include <linux/sched.h>
13 #include <linux/cpumask.h>
14 #include <linux/nodemask.h>
15 #include <linux/rculist.h>
16 #include <linux/cgroupstats.h>
17 #include <linux/fs.h>
18 #include <linux/seq_file.h>
19 #include <linux/kernfs.h>
20 #include <linux/jump_label.h>
21 #include <linux/types.h>
22 #include <linux/ns_common.h>
23 #include <linux/nsproxy.h>
24 #include <linux/user_namespace.h>
25 #include <linux/refcount.h>
26
27 #include <linux/cgroup-defs.h>
28
29 #ifdef CONFIG_CGROUPS
30
31 /*
32 * All weight knobs on the default hierarhcy should use the following min,
33 * default and max values. The default value is the logarithmic center of
34 * MIN and MAX and allows 100x to be expressed in both directions.
35 */
36 #define CGROUP_WEIGHT_MIN 1
37 #define CGROUP_WEIGHT_DFL 100
38 #define CGROUP_WEIGHT_MAX 10000
39
40 /* walk only threadgroup leaders */
41 #define CSS_TASK_ITER_PROCS (1U << 0)
42 /* walk all threaded css_sets in the domain */
43 #define CSS_TASK_ITER_THREADED (1U << 1)
44
45 /* a css_task_iter should be treated as an opaque object */
46 struct css_task_iter {
47 struct cgroup_subsys *ss;
48 unsigned int flags;
49
50 struct list_head *cset_pos;
51 struct list_head *cset_head;
52
53 struct list_head *tcset_pos;
54 struct list_head *tcset_head;
55
56 struct list_head *task_pos;
57 struct list_head *tasks_head;
58 struct list_head *mg_tasks_head;
59
60 struct css_set *cur_cset;
61 struct css_set *cur_dcset;
62 struct task_struct *cur_task;
63 struct list_head iters_node; /* css_set->task_iters */
64 };
65
66 extern struct cgroup_root cgrp_dfl_root;
67 extern struct css_set init_css_set;
68
69 #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
70 #include <linux/cgroup_subsys.h>
71 #undef SUBSYS
72
73 #define SUBSYS(_x) \
74 extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \
75 extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
76 #include <linux/cgroup_subsys.h>
77 #undef SUBSYS
78
79 /**
80 * cgroup_subsys_enabled - fast test on whether a subsys is enabled
81 * @ss: subsystem in question
82 */
83 #define cgroup_subsys_enabled(ss) \
84 static_branch_likely(&ss ## _enabled_key)
85
86 /**
87 * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy
88 * @ss: subsystem in question
89 */
90 #define cgroup_subsys_on_dfl(ss) \
91 static_branch_likely(&ss ## _on_dfl_key)
92
93 bool css_has_online_children(struct cgroup_subsys_state *css);
94 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
95 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
96 struct cgroup_subsys *ss);
97 struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
98 struct cgroup_subsys *ss);
99
100 struct cgroup *cgroup_get_from_path(const char *path);
101 struct cgroup *cgroup_get_from_fd(int fd);
102
103 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
104 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
105
106 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
107 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
108 int cgroup_rm_cftypes(struct cftype *cfts);
109 void cgroup_file_notify(struct cgroup_file *cfile);
110
111 int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
112 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
113 int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
114 struct pid *pid, struct task_struct *tsk);
115
116 void cgroup_fork(struct task_struct *p);
117 extern int cgroup_can_fork(struct task_struct *p);
118 extern void cgroup_cancel_fork(struct task_struct *p);
119 extern void cgroup_post_fork(struct task_struct *p);
120 void cgroup_exit(struct task_struct *p);
121 void cgroup_free(struct task_struct *p);
122
123 int cgroup_init_early(void);
124 int cgroup_init(void);
125
126 /*
127 * Iteration helpers and macros.
128 */
129
130 struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
131 struct cgroup_subsys_state *parent);
132 struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
133 struct cgroup_subsys_state *css);
134 struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
135 struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
136 struct cgroup_subsys_state *css);
137
138 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
139 struct cgroup_subsys_state **dst_cssp);
140 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
141 struct cgroup_subsys_state **dst_cssp);
142
143 void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
144 struct css_task_iter *it);
145 struct task_struct *css_task_iter_next(struct css_task_iter *it);
146 void css_task_iter_end(struct css_task_iter *it);
147
148 /**
149 * css_for_each_child - iterate through children of a css
150 * @pos: the css * to use as the loop cursor
151 * @parent: css whose children to walk
152 *
153 * Walk @parent's children. Must be called under rcu_read_lock().
154 *
155 * If a subsystem synchronizes ->css_online() and the start of iteration, a
156 * css which finished ->css_online() is guaranteed to be visible in the
157 * future iterations and will stay visible until the last reference is put.
158 * A css which hasn't finished ->css_online() or already finished
159 * ->css_offline() may show up during traversal. It's each subsystem's
160 * responsibility to synchronize against on/offlining.
161 *
162 * It is allowed to temporarily drop RCU read lock during iteration. The
163 * caller is responsible for ensuring that @pos remains accessible until
164 * the start of the next iteration by, for example, bumping the css refcnt.
165 */
166 #define css_for_each_child(pos, parent) \
167 for ((pos) = css_next_child(NULL, (parent)); (pos); \
168 (pos) = css_next_child((pos), (parent)))
169
170 /**
171 * css_for_each_descendant_pre - pre-order walk of a css's descendants
172 * @pos: the css * to use as the loop cursor
173 * @root: css whose descendants to walk
174 *
175 * Walk @root's descendants. @root is included in the iteration and the
176 * first node to be visited. Must be called under rcu_read_lock().
177 *
178 * If a subsystem synchronizes ->css_online() and the start of iteration, a
179 * css which finished ->css_online() is guaranteed to be visible in the
180 * future iterations and will stay visible until the last reference is put.
181 * A css which hasn't finished ->css_online() or already finished
182 * ->css_offline() may show up during traversal. It's each subsystem's
183 * responsibility to synchronize against on/offlining.
184 *
185 * For example, the following guarantees that a descendant can't escape
186 * state updates of its ancestors.
187 *
188 * my_online(@css)
189 * {
190 * Lock @css's parent and @css;
191 * Inherit state from the parent;
192 * Unlock both.
193 * }
194 *
195 * my_update_state(@css)
196 * {
197 * css_for_each_descendant_pre(@pos, @css) {
198 * Lock @pos;
199 * if (@pos == @css)
200 * Update @css's state;
201 * else
202 * Verify @pos is alive and inherit state from its parent;
203 * Unlock @pos;
204 * }
205 * }
206 *
207 * As long as the inheriting step, including checking the parent state, is
208 * enclosed inside @pos locking, double-locking the parent isn't necessary
209 * while inheriting. The state update to the parent is guaranteed to be
210 * visible by walking order and, as long as inheriting operations to the
211 * same @pos are atomic to each other, multiple updates racing each other
212 * still result in the correct state. It's guaranateed that at least one
213 * inheritance happens for any css after the latest update to its parent.
214 *
215 * If checking parent's state requires locking the parent, each inheriting
216 * iteration should lock and unlock both @pos->parent and @pos.
217 *
218 * Alternatively, a subsystem may choose to use a single global lock to
219 * synchronize ->css_online() and ->css_offline() against tree-walking
220 * operations.
221 *
222 * It is allowed to temporarily drop RCU read lock during iteration. The
223 * caller is responsible for ensuring that @pos remains accessible until
224 * the start of the next iteration by, for example, bumping the css refcnt.
225 */
226 #define css_for_each_descendant_pre(pos, css) \
227 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
228 (pos) = css_next_descendant_pre((pos), (css)))
229
230 /**
231 * css_for_each_descendant_post - post-order walk of a css's descendants
232 * @pos: the css * to use as the loop cursor
233 * @css: css whose descendants to walk
234 *
235 * Similar to css_for_each_descendant_pre() but performs post-order
236 * traversal instead. @root is included in the iteration and the last
237 * node to be visited.
238 *
239 * If a subsystem synchronizes ->css_online() and the start of iteration, a
240 * css which finished ->css_online() is guaranteed to be visible in the
241 * future iterations and will stay visible until the last reference is put.
242 * A css which hasn't finished ->css_online() or already finished
243 * ->css_offline() may show up during traversal. It's each subsystem's
244 * responsibility to synchronize against on/offlining.
245 *
246 * Note that the walk visibility guarantee example described in pre-order
247 * walk doesn't apply the same to post-order walks.
248 */
249 #define css_for_each_descendant_post(pos, css) \
250 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
251 (pos) = css_next_descendant_post((pos), (css)))
252
253 /**
254 * cgroup_taskset_for_each - iterate cgroup_taskset
255 * @task: the loop cursor
256 * @dst_css: the destination css
257 * @tset: taskset to iterate
258 *
259 * @tset may contain multiple tasks and they may belong to multiple
260 * processes.
261 *
262 * On the v2 hierarchy, there may be tasks from multiple processes and they
263 * may not share the source or destination csses.
264 *
265 * On traditional hierarchies, when there are multiple tasks in @tset, if a
266 * task of a process is in @tset, all tasks of the process are in @tset.
267 * Also, all are guaranteed to share the same source and destination csses.
268 *
269 * Iteration is not in any specific order.
270 */
271 #define cgroup_taskset_for_each(task, dst_css, tset) \
272 for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
273 (task); \
274 (task) = cgroup_taskset_next((tset), &(dst_css)))
275
276 /**
277 * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
278 * @leader: the loop cursor
279 * @dst_css: the destination css
280 * @tset: taskset to iterate
281 *
282 * Iterate threadgroup leaders of @tset. For single-task migrations, @tset
283 * may not contain any.
284 */
285 #define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
286 for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
287 (leader); \
288 (leader) = cgroup_taskset_next((tset), &(dst_css))) \
289 if ((leader) != (leader)->group_leader) \
290 ; \
291 else
292
293 /*
294 * Inline functions.
295 */
296
297 /**
298 * css_get - obtain a reference on the specified css
299 * @css: target css
300 *
301 * The caller must already have a reference.
302 */
303 static inline void css_get(struct cgroup_subsys_state *css)
304 {
305 if (!(css->flags & CSS_NO_REF))
306 percpu_ref_get(&css->refcnt);
307 }
308
309 /**
310 * css_get_many - obtain references on the specified css
311 * @css: target css
312 * @n: number of references to get
313 *
314 * The caller must already have a reference.
315 */
316 static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
317 {
318 if (!(css->flags & CSS_NO_REF))
319 percpu_ref_get_many(&css->refcnt, n);
320 }
321
322 /**
323 * css_tryget - try to obtain a reference on the specified css
324 * @css: target css
325 *
326 * Obtain a reference on @css unless it already has reached zero and is
327 * being released. This function doesn't care whether @css is on or
328 * offline. The caller naturally needs to ensure that @css is accessible
329 * but doesn't have to be holding a reference on it - IOW, RCU protected
330 * access is good enough for this function. Returns %true if a reference
331 * count was successfully obtained; %false otherwise.
332 */
333 static inline bool css_tryget(struct cgroup_subsys_state *css)
334 {
335 if (!(css->flags & CSS_NO_REF))
336 return percpu_ref_tryget(&css->refcnt);
337 return true;
338 }
339
340 /**
341 * css_tryget_online - try to obtain a reference on the specified css if online
342 * @css: target css
343 *
344 * Obtain a reference on @css if it's online. The caller naturally needs
345 * to ensure that @css is accessible but doesn't have to be holding a
346 * reference on it - IOW, RCU protected access is good enough for this
347 * function. Returns %true if a reference count was successfully obtained;
348 * %false otherwise.
349 */
350 static inline bool css_tryget_online(struct cgroup_subsys_state *css)
351 {
352 if (!(css->flags & CSS_NO_REF))
353 return percpu_ref_tryget_live(&css->refcnt);
354 return true;
355 }
356
357 /**
358 * css_is_dying - test whether the specified css is dying
359 * @css: target css
360 *
361 * Test whether @css is in the process of offlining or already offline. In
362 * most cases, ->css_online() and ->css_offline() callbacks should be
363 * enough; however, the actual offline operations are RCU delayed and this
364 * test returns %true also when @css is scheduled to be offlined.
365 *
366 * This is useful, for example, when the use case requires synchronous
367 * behavior with respect to cgroup removal. cgroup removal schedules css
368 * offlining but the css can seem alive while the operation is being
369 * delayed. If the delay affects user visible semantics, this test can be
370 * used to resolve the situation.
371 */
372 static inline bool css_is_dying(struct cgroup_subsys_state *css)
373 {
374 return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
375 }
376
377 /**
378 * css_put - put a css reference
379 * @css: target css
380 *
381 * Put a reference obtained via css_get() and css_tryget_online().
382 */
383 static inline void css_put(struct cgroup_subsys_state *css)
384 {
385 if (!(css->flags & CSS_NO_REF))
386 percpu_ref_put(&css->refcnt);
387 }
388
389 /**
390 * css_put_many - put css references
391 * @css: target css
392 * @n: number of references to put
393 *
394 * Put references obtained via css_get() and css_tryget_online().
395 */
396 static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
397 {
398 if (!(css->flags & CSS_NO_REF))
399 percpu_ref_put_many(&css->refcnt, n);
400 }
401
402 static inline void cgroup_get(struct cgroup *cgrp)
403 {
404 css_get(&cgrp->self);
405 }
406
407 static inline bool cgroup_tryget(struct cgroup *cgrp)
408 {
409 return css_tryget(&cgrp->self);
410 }
411
412 static inline void cgroup_put(struct cgroup *cgrp)
413 {
414 css_put(&cgrp->self);
415 }
416
417 /**
418 * task_css_set_check - obtain a task's css_set with extra access conditions
419 * @task: the task to obtain css_set for
420 * @__c: extra condition expression to be passed to rcu_dereference_check()
421 *
422 * A task's css_set is RCU protected, initialized and exited while holding
423 * task_lock(), and can only be modified while holding both cgroup_mutex
424 * and task_lock() while the task is alive. This macro verifies that the
425 * caller is inside proper critical section and returns @task's css_set.
426 *
427 * The caller can also specify additional allowed conditions via @__c, such
428 * as locks used during the cgroup_subsys::attach() methods.
429 */
430 #ifdef CONFIG_PROVE_RCU
431 extern struct mutex cgroup_mutex;
432 extern spinlock_t css_set_lock;
433 #define task_css_set_check(task, __c) \
434 rcu_dereference_check((task)->cgroups, \
435 lockdep_is_held(&cgroup_mutex) || \
436 lockdep_is_held(&css_set_lock) || \
437 ((task)->flags & PF_EXITING) || (__c))
438 #else
439 #define task_css_set_check(task, __c) \
440 rcu_dereference((task)->cgroups)
441 #endif
442
443 /**
444 * task_css_check - obtain css for (task, subsys) w/ extra access conds
445 * @task: the target task
446 * @subsys_id: the target subsystem ID
447 * @__c: extra condition expression to be passed to rcu_dereference_check()
448 *
449 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
450 * synchronization rules are the same as task_css_set_check().
451 */
452 #define task_css_check(task, subsys_id, __c) \
453 task_css_set_check((task), (__c))->subsys[(subsys_id)]
454
455 /**
456 * task_css_set - obtain a task's css_set
457 * @task: the task to obtain css_set for
458 *
459 * See task_css_set_check().
460 */
461 static inline struct css_set *task_css_set(struct task_struct *task)
462 {
463 return task_css_set_check(task, false);
464 }
465
466 /**
467 * task_css - obtain css for (task, subsys)
468 * @task: the target task
469 * @subsys_id: the target subsystem ID
470 *
471 * See task_css_check().
472 */
473 static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
474 int subsys_id)
475 {
476 return task_css_check(task, subsys_id, false);
477 }
478
479 /**
480 * task_get_css - find and get the css for (task, subsys)
481 * @task: the target task
482 * @subsys_id: the target subsystem ID
483 *
484 * Find the css for the (@task, @subsys_id) combination, increment a
485 * reference on and return it. This function is guaranteed to return a
486 * valid css.
487 */
488 static inline struct cgroup_subsys_state *
489 task_get_css(struct task_struct *task, int subsys_id)
490 {
491 struct cgroup_subsys_state *css;
492
493 rcu_read_lock();
494 while (true) {
495 css = task_css(task, subsys_id);
496 if (likely(css_tryget_online(css)))
497 break;
498 cpu_relax();
499 }
500 rcu_read_unlock();
501 return css;
502 }
503
504 /**
505 * task_css_is_root - test whether a task belongs to the root css
506 * @task: the target task
507 * @subsys_id: the target subsystem ID
508 *
509 * Test whether @task belongs to the root css on the specified subsystem.
510 * May be invoked in any context.
511 */
512 static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
513 {
514 return task_css_check(task, subsys_id, true) ==
515 init_css_set.subsys[subsys_id];
516 }
517
518 static inline struct cgroup *task_cgroup(struct task_struct *task,
519 int subsys_id)
520 {
521 return task_css(task, subsys_id)->cgroup;
522 }
523
524 static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
525 {
526 return task_css_set(task)->dfl_cgrp;
527 }
528
529 static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
530 {
531 struct cgroup_subsys_state *parent_css = cgrp->self.parent;
532
533 if (parent_css)
534 return container_of(parent_css, struct cgroup, self);
535 return NULL;
536 }
537
538 /**
539 * cgroup_is_descendant - test ancestry
540 * @cgrp: the cgroup to be tested
541 * @ancestor: possible ancestor of @cgrp
542 *
543 * Test whether @cgrp is a descendant of @ancestor. It also returns %true
544 * if @cgrp == @ancestor. This function is safe to call as long as @cgrp
545 * and @ancestor are accessible.
546 */
547 static inline bool cgroup_is_descendant(struct cgroup *cgrp,
548 struct cgroup *ancestor)
549 {
550 if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
551 return false;
552 return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
553 }
554
555 /**
556 * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
557 * @task: the task to be tested
558 * @ancestor: possible ancestor of @task's cgroup
559 *
560 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
561 * It follows all the same rules as cgroup_is_descendant, and only applies
562 * to the default hierarchy.
563 */
564 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
565 struct cgroup *ancestor)
566 {
567 struct css_set *cset = task_css_set(task);
568
569 return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
570 }
571
572 /* no synchronization, the result can only be used as a hint */
573 static inline bool cgroup_is_populated(struct cgroup *cgrp)
574 {
575 return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
576 cgrp->nr_populated_threaded_children;
577 }
578
579 /* returns ino associated with a cgroup */
580 static inline ino_t cgroup_ino(struct cgroup *cgrp)
581 {
582 return cgrp->kn->id.ino;
583 }
584
585 /* cft/css accessors for cftype->write() operation */
586 static inline struct cftype *of_cft(struct kernfs_open_file *of)
587 {
588 return of->kn->priv;
589 }
590
591 struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
592
593 /* cft/css accessors for cftype->seq_*() operations */
594 static inline struct cftype *seq_cft(struct seq_file *seq)
595 {
596 return of_cft(seq->private);
597 }
598
599 static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
600 {
601 return of_css(seq->private);
602 }
603
604 /*
605 * Name / path handling functions. All are thin wrappers around the kernfs
606 * counterparts and can be called under any context.
607 */
608
609 static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
610 {
611 return kernfs_name(cgrp->kn, buf, buflen);
612 }
613
614 static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
615 {
616 return kernfs_path(cgrp->kn, buf, buflen);
617 }
618
619 static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
620 {
621 pr_cont_kernfs_name(cgrp->kn);
622 }
623
624 static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
625 {
626 pr_cont_kernfs_path(cgrp->kn);
627 }
628
629 static inline void cgroup_init_kthreadd(void)
630 {
631 /*
632 * kthreadd is inherited by all kthreads, keep it in the root so
633 * that the new kthreads are guaranteed to stay in the root until
634 * initialization is finished.
635 */
636 current->no_cgroup_migration = 1;
637 }
638
639 static inline void cgroup_kthread_ready(void)
640 {
641 /*
642 * This kthread finished initialization. The creator should have
643 * set PF_NO_SETAFFINITY if this kthread should stay in the root.
644 */
645 current->no_cgroup_migration = 0;
646 }
647
648 static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
649 {
650 return &cgrp->kn->id;
651 }
652
653 void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
654 char *buf, size_t buflen);
655 #else /* !CONFIG_CGROUPS */
656
657 struct cgroup_subsys_state;
658 struct cgroup;
659
660 static inline void css_put(struct cgroup_subsys_state *css) {}
661 static inline int cgroup_attach_task_all(struct task_struct *from,
662 struct task_struct *t) { return 0; }
663 static inline int cgroupstats_build(struct cgroupstats *stats,
664 struct dentry *dentry) { return -EINVAL; }
665
666 static inline void cgroup_fork(struct task_struct *p) {}
667 static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
668 static inline void cgroup_cancel_fork(struct task_struct *p) {}
669 static inline void cgroup_post_fork(struct task_struct *p) {}
670 static inline void cgroup_exit(struct task_struct *p) {}
671 static inline void cgroup_free(struct task_struct *p) {}
672
673 static inline int cgroup_init_early(void) { return 0; }
674 static inline int cgroup_init(void) { return 0; }
675 static inline void cgroup_init_kthreadd(void) {}
676 static inline void cgroup_kthread_ready(void) {}
677 static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
678 {
679 return NULL;
680 }
681
682 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
683 struct cgroup *ancestor)
684 {
685 return true;
686 }
687
688 static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
689 char *buf, size_t buflen) {}
690 #endif /* !CONFIG_CGROUPS */
691
692 /*
693 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
694 * definition in cgroup-defs.h.
695 */
696 #ifdef CONFIG_SOCK_CGROUP_DATA
697
698 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
699 extern spinlock_t cgroup_sk_update_lock;
700 #endif
701
702 void cgroup_sk_alloc_disable(void);
703 void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
704 void cgroup_sk_free(struct sock_cgroup_data *skcd);
705
706 static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
707 {
708 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
709 unsigned long v;
710
711 /*
712 * @skcd->val is 64bit but the following is safe on 32bit too as we
713 * just need the lower ulong to be written and read atomically.
714 */
715 v = READ_ONCE(skcd->val);
716
717 if (v & 1)
718 return &cgrp_dfl_root.cgrp;
719
720 return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
721 #else
722 return (struct cgroup *)(unsigned long)skcd->val;
723 #endif
724 }
725
726 #else /* CONFIG_CGROUP_DATA */
727
728 static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
729 static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
730
731 #endif /* CONFIG_CGROUP_DATA */
732
733 struct cgroup_namespace {
734 refcount_t count;
735 struct ns_common ns;
736 struct user_namespace *user_ns;
737 struct ucounts *ucounts;
738 struct css_set *root_cset;
739 };
740
741 extern struct cgroup_namespace init_cgroup_ns;
742
743 #ifdef CONFIG_CGROUPS
744
745 void free_cgroup_ns(struct cgroup_namespace *ns);
746
747 struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
748 struct user_namespace *user_ns,
749 struct cgroup_namespace *old_ns);
750
751 int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
752 struct cgroup_namespace *ns);
753
754 #else /* !CONFIG_CGROUPS */
755
756 static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
757 static inline struct cgroup_namespace *
758 copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
759 struct cgroup_namespace *old_ns)
760 {
761 return old_ns;
762 }
763
764 #endif /* !CONFIG_CGROUPS */
765
766 static inline void get_cgroup_ns(struct cgroup_namespace *ns)
767 {
768 if (ns)
769 refcount_inc(&ns->count);
770 }
771
772 static inline void put_cgroup_ns(struct cgroup_namespace *ns)
773 {
774 if (ns && refcount_dec_and_test(&ns->count))
775 free_cgroup_ns(ns);
776 }
777
778 #endif /* _LINUX_CGROUP_H */