1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_CGROUP_H
3 #define _LINUX_CGROUP_H
7 * Copyright (C) 2003 BULL SA
8 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
12 #include <linux/sched.h>
13 #include <linux/cpumask.h>
14 #include <linux/nodemask.h>
15 #include <linux/rculist.h>
16 #include <linux/cgroupstats.h>
18 #include <linux/seq_file.h>
19 #include <linux/kernfs.h>
20 #include <linux/jump_label.h>
21 #include <linux/types.h>
22 #include <linux/ns_common.h>
23 #include <linux/nsproxy.h>
24 #include <linux/user_namespace.h>
25 #include <linux/refcount.h>
26 #include <linux/kernel_stat.h>
28 #include <linux/cgroup-defs.h>
33 * All weight knobs on the default hierarhcy should use the following min,
34 * default and max values. The default value is the logarithmic center of
35 * MIN and MAX and allows 100x to be expressed in both directions.
37 #define CGROUP_WEIGHT_MIN 1
38 #define CGROUP_WEIGHT_DFL 100
39 #define CGROUP_WEIGHT_MAX 10000
41 /* walk only threadgroup leaders */
42 #define CSS_TASK_ITER_PROCS (1U << 0)
43 /* walk all threaded css_sets in the domain */
44 #define CSS_TASK_ITER_THREADED (1U << 1)
46 /* a css_task_iter should be treated as an opaque object */
47 struct css_task_iter
{
48 struct cgroup_subsys
*ss
;
51 struct list_head
*cset_pos
;
52 struct list_head
*cset_head
;
54 struct list_head
*tcset_pos
;
55 struct list_head
*tcset_head
;
57 struct list_head
*task_pos
;
58 struct list_head
*tasks_head
;
59 struct list_head
*mg_tasks_head
;
61 struct css_set
*cur_cset
;
62 struct css_set
*cur_dcset
;
63 struct task_struct
*cur_task
;
64 struct list_head iters_node
; /* css_set->task_iters */
67 extern struct cgroup_root cgrp_dfl_root
;
68 extern struct css_set init_css_set
;
70 #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
71 #include <linux/cgroup_subsys.h>
75 extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \
76 extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
77 #include <linux/cgroup_subsys.h>
81 * cgroup_subsys_enabled - fast test on whether a subsys is enabled
82 * @ss: subsystem in question
84 #define cgroup_subsys_enabled(ss) \
85 static_branch_likely(&ss ## _enabled_key)
88 * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy
89 * @ss: subsystem in question
91 #define cgroup_subsys_on_dfl(ss) \
92 static_branch_likely(&ss ## _on_dfl_key)
94 bool css_has_online_children(struct cgroup_subsys_state
*css
);
95 struct cgroup_subsys_state
*css_from_id(int id
, struct cgroup_subsys
*ss
);
96 struct cgroup_subsys_state
*cgroup_get_e_css(struct cgroup
*cgroup
,
97 struct cgroup_subsys
*ss
);
98 struct cgroup_subsys_state
*css_tryget_online_from_dir(struct dentry
*dentry
,
99 struct cgroup_subsys
*ss
);
101 struct cgroup
*cgroup_get_from_path(const char *path
);
102 struct cgroup
*cgroup_get_from_fd(int fd
);
104 int cgroup_attach_task_all(struct task_struct
*from
, struct task_struct
*);
105 int cgroup_transfer_tasks(struct cgroup
*to
, struct cgroup
*from
);
107 int cgroup_add_dfl_cftypes(struct cgroup_subsys
*ss
, struct cftype
*cfts
);
108 int cgroup_add_legacy_cftypes(struct cgroup_subsys
*ss
, struct cftype
*cfts
);
109 int cgroup_rm_cftypes(struct cftype
*cfts
);
110 void cgroup_file_notify(struct cgroup_file
*cfile
);
112 int task_cgroup_path(struct task_struct
*task
, char *buf
, size_t buflen
);
113 int cgroupstats_build(struct cgroupstats
*stats
, struct dentry
*dentry
);
114 int proc_cgroup_show(struct seq_file
*m
, struct pid_namespace
*ns
,
115 struct pid
*pid
, struct task_struct
*tsk
);
117 void cgroup_fork(struct task_struct
*p
);
118 extern int cgroup_can_fork(struct task_struct
*p
);
119 extern void cgroup_cancel_fork(struct task_struct
*p
);
120 extern void cgroup_post_fork(struct task_struct
*p
);
121 void cgroup_exit(struct task_struct
*p
);
122 void cgroup_release(struct task_struct
*p
);
123 void cgroup_free(struct task_struct
*p
);
125 int cgroup_init_early(void);
126 int cgroup_init(void);
129 * Iteration helpers and macros.
132 struct cgroup_subsys_state
*css_next_child(struct cgroup_subsys_state
*pos
,
133 struct cgroup_subsys_state
*parent
);
134 struct cgroup_subsys_state
*css_next_descendant_pre(struct cgroup_subsys_state
*pos
,
135 struct cgroup_subsys_state
*css
);
136 struct cgroup_subsys_state
*css_rightmost_descendant(struct cgroup_subsys_state
*pos
);
137 struct cgroup_subsys_state
*css_next_descendant_post(struct cgroup_subsys_state
*pos
,
138 struct cgroup_subsys_state
*css
);
140 struct task_struct
*cgroup_taskset_first(struct cgroup_taskset
*tset
,
141 struct cgroup_subsys_state
**dst_cssp
);
142 struct task_struct
*cgroup_taskset_next(struct cgroup_taskset
*tset
,
143 struct cgroup_subsys_state
**dst_cssp
);
145 void css_task_iter_start(struct cgroup_subsys_state
*css
, unsigned int flags
,
146 struct css_task_iter
*it
);
147 struct task_struct
*css_task_iter_next(struct css_task_iter
*it
);
148 void css_task_iter_end(struct css_task_iter
*it
);
151 * css_for_each_child - iterate through children of a css
152 * @pos: the css * to use as the loop cursor
153 * @parent: css whose children to walk
155 * Walk @parent's children. Must be called under rcu_read_lock().
157 * If a subsystem synchronizes ->css_online() and the start of iteration, a
158 * css which finished ->css_online() is guaranteed to be visible in the
159 * future iterations and will stay visible until the last reference is put.
160 * A css which hasn't finished ->css_online() or already finished
161 * ->css_offline() may show up during traversal. It's each subsystem's
162 * responsibility to synchronize against on/offlining.
164 * It is allowed to temporarily drop RCU read lock during iteration. The
165 * caller is responsible for ensuring that @pos remains accessible until
166 * the start of the next iteration by, for example, bumping the css refcnt.
168 #define css_for_each_child(pos, parent) \
169 for ((pos) = css_next_child(NULL, (parent)); (pos); \
170 (pos) = css_next_child((pos), (parent)))
173 * css_for_each_descendant_pre - pre-order walk of a css's descendants
174 * @pos: the css * to use as the loop cursor
175 * @root: css whose descendants to walk
177 * Walk @root's descendants. @root is included in the iteration and the
178 * first node to be visited. Must be called under rcu_read_lock().
180 * If a subsystem synchronizes ->css_online() and the start of iteration, a
181 * css which finished ->css_online() is guaranteed to be visible in the
182 * future iterations and will stay visible until the last reference is put.
183 * A css which hasn't finished ->css_online() or already finished
184 * ->css_offline() may show up during traversal. It's each subsystem's
185 * responsibility to synchronize against on/offlining.
187 * For example, the following guarantees that a descendant can't escape
188 * state updates of its ancestors.
192 * Lock @css's parent and @css;
193 * Inherit state from the parent;
197 * my_update_state(@css)
199 * css_for_each_descendant_pre(@pos, @css) {
202 * Update @css's state;
204 * Verify @pos is alive and inherit state from its parent;
209 * As long as the inheriting step, including checking the parent state, is
210 * enclosed inside @pos locking, double-locking the parent isn't necessary
211 * while inheriting. The state update to the parent is guaranteed to be
212 * visible by walking order and, as long as inheriting operations to the
213 * same @pos are atomic to each other, multiple updates racing each other
214 * still result in the correct state. It's guaranateed that at least one
215 * inheritance happens for any css after the latest update to its parent.
217 * If checking parent's state requires locking the parent, each inheriting
218 * iteration should lock and unlock both @pos->parent and @pos.
220 * Alternatively, a subsystem may choose to use a single global lock to
221 * synchronize ->css_online() and ->css_offline() against tree-walking
224 * It is allowed to temporarily drop RCU read lock during iteration. The
225 * caller is responsible for ensuring that @pos remains accessible until
226 * the start of the next iteration by, for example, bumping the css refcnt.
228 #define css_for_each_descendant_pre(pos, css) \
229 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
230 (pos) = css_next_descendant_pre((pos), (css)))
233 * css_for_each_descendant_post - post-order walk of a css's descendants
234 * @pos: the css * to use as the loop cursor
235 * @css: css whose descendants to walk
237 * Similar to css_for_each_descendant_pre() but performs post-order
238 * traversal instead. @root is included in the iteration and the last
239 * node to be visited.
241 * If a subsystem synchronizes ->css_online() and the start of iteration, a
242 * css which finished ->css_online() is guaranteed to be visible in the
243 * future iterations and will stay visible until the last reference is put.
244 * A css which hasn't finished ->css_online() or already finished
245 * ->css_offline() may show up during traversal. It's each subsystem's
246 * responsibility to synchronize against on/offlining.
248 * Note that the walk visibility guarantee example described in pre-order
249 * walk doesn't apply the same to post-order walks.
251 #define css_for_each_descendant_post(pos, css) \
252 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
253 (pos) = css_next_descendant_post((pos), (css)))
256 * cgroup_taskset_for_each - iterate cgroup_taskset
257 * @task: the loop cursor
258 * @dst_css: the destination css
259 * @tset: taskset to iterate
261 * @tset may contain multiple tasks and they may belong to multiple
264 * On the v2 hierarchy, there may be tasks from multiple processes and they
265 * may not share the source or destination csses.
267 * On traditional hierarchies, when there are multiple tasks in @tset, if a
268 * task of a process is in @tset, all tasks of the process are in @tset.
269 * Also, all are guaranteed to share the same source and destination csses.
271 * Iteration is not in any specific order.
273 #define cgroup_taskset_for_each(task, dst_css, tset) \
274 for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
276 (task) = cgroup_taskset_next((tset), &(dst_css)))
279 * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
280 * @leader: the loop cursor
281 * @dst_css: the destination css
282 * @tset: taskset to iterate
284 * Iterate threadgroup leaders of @tset. For single-task migrations, @tset
285 * may not contain any.
287 #define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
288 for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
290 (leader) = cgroup_taskset_next((tset), &(dst_css))) \
291 if ((leader) != (leader)->group_leader) \
300 * css_get - obtain a reference on the specified css
303 * The caller must already have a reference.
305 static inline void css_get(struct cgroup_subsys_state
*css
)
307 if (!(css
->flags
& CSS_NO_REF
))
308 percpu_ref_get(&css
->refcnt
);
312 * css_get_many - obtain references on the specified css
314 * @n: number of references to get
316 * The caller must already have a reference.
318 static inline void css_get_many(struct cgroup_subsys_state
*css
, unsigned int n
)
320 if (!(css
->flags
& CSS_NO_REF
))
321 percpu_ref_get_many(&css
->refcnt
, n
);
325 * css_tryget - try to obtain a reference on the specified css
328 * Obtain a reference on @css unless it already has reached zero and is
329 * being released. This function doesn't care whether @css is on or
330 * offline. The caller naturally needs to ensure that @css is accessible
331 * but doesn't have to be holding a reference on it - IOW, RCU protected
332 * access is good enough for this function. Returns %true if a reference
333 * count was successfully obtained; %false otherwise.
335 static inline bool css_tryget(struct cgroup_subsys_state
*css
)
337 if (!(css
->flags
& CSS_NO_REF
))
338 return percpu_ref_tryget(&css
->refcnt
);
343 * css_tryget_online - try to obtain a reference on the specified css if online
346 * Obtain a reference on @css if it's online. The caller naturally needs
347 * to ensure that @css is accessible but doesn't have to be holding a
348 * reference on it - IOW, RCU protected access is good enough for this
349 * function. Returns %true if a reference count was successfully obtained;
352 static inline bool css_tryget_online(struct cgroup_subsys_state
*css
)
354 if (!(css
->flags
& CSS_NO_REF
))
355 return percpu_ref_tryget_live(&css
->refcnt
);
360 * css_is_dying - test whether the specified css is dying
363 * Test whether @css is in the process of offlining or already offline. In
364 * most cases, ->css_online() and ->css_offline() callbacks should be
365 * enough; however, the actual offline operations are RCU delayed and this
366 * test returns %true also when @css is scheduled to be offlined.
368 * This is useful, for example, when the use case requires synchronous
369 * behavior with respect to cgroup removal. cgroup removal schedules css
370 * offlining but the css can seem alive while the operation is being
371 * delayed. If the delay affects user visible semantics, this test can be
372 * used to resolve the situation.
374 static inline bool css_is_dying(struct cgroup_subsys_state
*css
)
376 return !(css
->flags
& CSS_NO_REF
) && percpu_ref_is_dying(&css
->refcnt
);
380 * css_put - put a css reference
383 * Put a reference obtained via css_get() and css_tryget_online().
385 static inline void css_put(struct cgroup_subsys_state
*css
)
387 if (!(css
->flags
& CSS_NO_REF
))
388 percpu_ref_put(&css
->refcnt
);
392 * css_put_many - put css references
394 * @n: number of references to put
396 * Put references obtained via css_get() and css_tryget_online().
398 static inline void css_put_many(struct cgroup_subsys_state
*css
, unsigned int n
)
400 if (!(css
->flags
& CSS_NO_REF
))
401 percpu_ref_put_many(&css
->refcnt
, n
);
404 static inline void cgroup_get(struct cgroup
*cgrp
)
406 css_get(&cgrp
->self
);
409 static inline bool cgroup_tryget(struct cgroup
*cgrp
)
411 return css_tryget(&cgrp
->self
);
414 static inline void cgroup_put(struct cgroup
*cgrp
)
416 css_put(&cgrp
->self
);
420 * task_css_set_check - obtain a task's css_set with extra access conditions
421 * @task: the task to obtain css_set for
422 * @__c: extra condition expression to be passed to rcu_dereference_check()
424 * A task's css_set is RCU protected, initialized and exited while holding
425 * task_lock(), and can only be modified while holding both cgroup_mutex
426 * and task_lock() while the task is alive. This macro verifies that the
427 * caller is inside proper critical section and returns @task's css_set.
429 * The caller can also specify additional allowed conditions via @__c, such
430 * as locks used during the cgroup_subsys::attach() methods.
432 #ifdef CONFIG_PROVE_RCU
433 extern struct mutex cgroup_mutex
;
434 extern spinlock_t css_set_lock
;
435 #define task_css_set_check(task, __c) \
436 rcu_dereference_check((task)->cgroups, \
437 lockdep_is_held(&cgroup_mutex) || \
438 lockdep_is_held(&css_set_lock) || \
439 ((task)->flags & PF_EXITING) || (__c))
441 #define task_css_set_check(task, __c) \
442 rcu_dereference((task)->cgroups)
446 * task_css_check - obtain css for (task, subsys) w/ extra access conds
447 * @task: the target task
448 * @subsys_id: the target subsystem ID
449 * @__c: extra condition expression to be passed to rcu_dereference_check()
451 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
452 * synchronization rules are the same as task_css_set_check().
454 #define task_css_check(task, subsys_id, __c) \
455 task_css_set_check((task), (__c))->subsys[(subsys_id)]
458 * task_css_set - obtain a task's css_set
459 * @task: the task to obtain css_set for
461 * See task_css_set_check().
463 static inline struct css_set
*task_css_set(struct task_struct
*task
)
465 return task_css_set_check(task
, false);
469 * task_css - obtain css for (task, subsys)
470 * @task: the target task
471 * @subsys_id: the target subsystem ID
473 * See task_css_check().
475 static inline struct cgroup_subsys_state
*task_css(struct task_struct
*task
,
478 return task_css_check(task
, subsys_id
, false);
482 * task_get_css - find and get the css for (task, subsys)
483 * @task: the target task
484 * @subsys_id: the target subsystem ID
486 * Find the css for the (@task, @subsys_id) combination, increment a
487 * reference on and return it. This function is guaranteed to return a
488 * valid css. The returned css may already have been offlined.
490 static inline struct cgroup_subsys_state
*
491 task_get_css(struct task_struct
*task
, int subsys_id
)
493 struct cgroup_subsys_state
*css
;
497 css
= task_css(task
, subsys_id
);
499 * Can't use css_tryget_online() here. A task which has
500 * PF_EXITING set may stay associated with an offline css.
501 * If such task calls this function, css_tryget_online()
504 if (likely(css_tryget(css
)))
513 * task_css_is_root - test whether a task belongs to the root css
514 * @task: the target task
515 * @subsys_id: the target subsystem ID
517 * Test whether @task belongs to the root css on the specified subsystem.
518 * May be invoked in any context.
520 static inline bool task_css_is_root(struct task_struct
*task
, int subsys_id
)
522 return task_css_check(task
, subsys_id
, true) ==
523 init_css_set
.subsys
[subsys_id
];
526 static inline struct cgroup
*task_cgroup(struct task_struct
*task
,
529 return task_css(task
, subsys_id
)->cgroup
;
532 static inline struct cgroup
*task_dfl_cgroup(struct task_struct
*task
)
534 return task_css_set(task
)->dfl_cgrp
;
537 static inline struct cgroup
*cgroup_parent(struct cgroup
*cgrp
)
539 struct cgroup_subsys_state
*parent_css
= cgrp
->self
.parent
;
542 return container_of(parent_css
, struct cgroup
, self
);
547 * cgroup_is_descendant - test ancestry
548 * @cgrp: the cgroup to be tested
549 * @ancestor: possible ancestor of @cgrp
551 * Test whether @cgrp is a descendant of @ancestor. It also returns %true
552 * if @cgrp == @ancestor. This function is safe to call as long as @cgrp
553 * and @ancestor are accessible.
555 static inline bool cgroup_is_descendant(struct cgroup
*cgrp
,
556 struct cgroup
*ancestor
)
558 if (cgrp
->root
!= ancestor
->root
|| cgrp
->level
< ancestor
->level
)
560 return cgrp
->ancestor_ids
[ancestor
->level
] == ancestor
->id
;
564 * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
565 * @task: the task to be tested
566 * @ancestor: possible ancestor of @task's cgroup
568 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
569 * It follows all the same rules as cgroup_is_descendant, and only applies
570 * to the default hierarchy.
572 static inline bool task_under_cgroup_hierarchy(struct task_struct
*task
,
573 struct cgroup
*ancestor
)
575 struct css_set
*cset
= task_css_set(task
);
577 return cgroup_is_descendant(cset
->dfl_cgrp
, ancestor
);
580 /* no synchronization, the result can only be used as a hint */
581 static inline bool cgroup_is_populated(struct cgroup
*cgrp
)
583 return cgrp
->nr_populated_csets
+ cgrp
->nr_populated_domain_children
+
584 cgrp
->nr_populated_threaded_children
;
587 /* returns ino associated with a cgroup */
588 static inline ino_t
cgroup_ino(struct cgroup
*cgrp
)
590 return cgrp
->kn
->id
.ino
;
593 /* cft/css accessors for cftype->write() operation */
594 static inline struct cftype
*of_cft(struct kernfs_open_file
*of
)
599 struct cgroup_subsys_state
*of_css(struct kernfs_open_file
*of
);
601 /* cft/css accessors for cftype->seq_*() operations */
602 static inline struct cftype
*seq_cft(struct seq_file
*seq
)
604 return of_cft(seq
->private);
607 static inline struct cgroup_subsys_state
*seq_css(struct seq_file
*seq
)
609 return of_css(seq
->private);
613 * Name / path handling functions. All are thin wrappers around the kernfs
614 * counterparts and can be called under any context.
617 static inline int cgroup_name(struct cgroup
*cgrp
, char *buf
, size_t buflen
)
619 return kernfs_name(cgrp
->kn
, buf
, buflen
);
622 static inline int cgroup_path(struct cgroup
*cgrp
, char *buf
, size_t buflen
)
624 return kernfs_path(cgrp
->kn
, buf
, buflen
);
627 static inline void pr_cont_cgroup_name(struct cgroup
*cgrp
)
629 pr_cont_kernfs_name(cgrp
->kn
);
632 static inline void pr_cont_cgroup_path(struct cgroup
*cgrp
)
634 pr_cont_kernfs_path(cgrp
->kn
);
637 static inline void cgroup_init_kthreadd(void)
640 * kthreadd is inherited by all kthreads, keep it in the root so
641 * that the new kthreads are guaranteed to stay in the root until
642 * initialization is finished.
644 current
->no_cgroup_migration
= 1;
647 static inline void cgroup_kthread_ready(void)
650 * This kthread finished initialization. The creator should have
651 * set PF_NO_SETAFFINITY if this kthread should stay in the root.
653 current
->no_cgroup_migration
= 0;
656 static inline union kernfs_node_id
*cgroup_get_kernfs_id(struct cgroup
*cgrp
)
658 return &cgrp
->kn
->id
;
661 void cgroup_path_from_kernfs_id(const union kernfs_node_id
*id
,
662 char *buf
, size_t buflen
);
663 #else /* !CONFIG_CGROUPS */
665 struct cgroup_subsys_state
;
668 static inline void css_put(struct cgroup_subsys_state
*css
) {}
669 static inline int cgroup_attach_task_all(struct task_struct
*from
,
670 struct task_struct
*t
) { return 0; }
671 static inline int cgroupstats_build(struct cgroupstats
*stats
,
672 struct dentry
*dentry
) { return -EINVAL
; }
674 static inline void cgroup_fork(struct task_struct
*p
) {}
675 static inline int cgroup_can_fork(struct task_struct
*p
) { return 0; }
676 static inline void cgroup_cancel_fork(struct task_struct
*p
) {}
677 static inline void cgroup_post_fork(struct task_struct
*p
) {}
678 static inline void cgroup_exit(struct task_struct
*p
) {}
679 static inline void cgroup_release(struct task_struct
*p
) {}
680 static inline void cgroup_free(struct task_struct
*p
) {}
682 static inline int cgroup_init_early(void) { return 0; }
683 static inline int cgroup_init(void) { return 0; }
684 static inline void cgroup_init_kthreadd(void) {}
685 static inline void cgroup_kthread_ready(void) {}
686 static inline union kernfs_node_id
*cgroup_get_kernfs_id(struct cgroup
*cgrp
)
691 static inline bool task_under_cgroup_hierarchy(struct task_struct
*task
,
692 struct cgroup
*ancestor
)
697 static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id
*id
,
698 char *buf
, size_t buflen
) {}
699 #endif /* !CONFIG_CGROUPS */
702 * Basic resource stats.
704 #ifdef CONFIG_CGROUPS
706 #ifdef CONFIG_CGROUP_CPUACCT
707 void cpuacct_charge(struct task_struct
*tsk
, u64 cputime
);
708 void cpuacct_account_field(struct task_struct
*tsk
, int index
, u64 val
);
710 static inline void cpuacct_charge(struct task_struct
*tsk
, u64 cputime
) {}
711 static inline void cpuacct_account_field(struct task_struct
*tsk
, int index
,
715 void __cgroup_account_cputime(struct cgroup
*cgrp
, u64 delta_exec
);
716 void __cgroup_account_cputime_field(struct cgroup
*cgrp
,
717 enum cpu_usage_stat index
, u64 delta_exec
);
719 static inline void cgroup_account_cputime(struct task_struct
*task
,
724 cpuacct_charge(task
, delta_exec
);
727 cgrp
= task_dfl_cgroup(task
);
728 if (cgroup_parent(cgrp
))
729 __cgroup_account_cputime(cgrp
, delta_exec
);
733 static inline void cgroup_account_cputime_field(struct task_struct
*task
,
734 enum cpu_usage_stat index
,
739 cpuacct_account_field(task
, index
, delta_exec
);
742 cgrp
= task_dfl_cgroup(task
);
743 if (cgroup_parent(cgrp
))
744 __cgroup_account_cputime_field(cgrp
, index
, delta_exec
);
748 #else /* CONFIG_CGROUPS */
750 static inline void cgroup_account_cputime(struct task_struct
*task
,
752 static inline void cgroup_account_cputime_field(struct task_struct
*task
,
753 enum cpu_usage_stat index
,
756 #endif /* CONFIG_CGROUPS */
759 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
760 * definition in cgroup-defs.h.
762 #ifdef CONFIG_SOCK_CGROUP_DATA
764 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
765 extern spinlock_t cgroup_sk_update_lock
;
768 void cgroup_sk_alloc_disable(void);
769 void cgroup_sk_alloc(struct sock_cgroup_data
*skcd
);
770 void cgroup_sk_free(struct sock_cgroup_data
*skcd
);
772 static inline struct cgroup
*sock_cgroup_ptr(struct sock_cgroup_data
*skcd
)
774 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
778 * @skcd->val is 64bit but the following is safe on 32bit too as we
779 * just need the lower ulong to be written and read atomically.
781 v
= READ_ONCE(skcd
->val
);
784 return &cgrp_dfl_root
.cgrp
;
786 return (struct cgroup
*)(unsigned long)v
?: &cgrp_dfl_root
.cgrp
;
788 return (struct cgroup
*)(unsigned long)skcd
->val
;
792 #else /* CONFIG_CGROUP_DATA */
794 static inline void cgroup_sk_alloc(struct sock_cgroup_data
*skcd
) {}
795 static inline void cgroup_sk_free(struct sock_cgroup_data
*skcd
) {}
797 #endif /* CONFIG_CGROUP_DATA */
799 struct cgroup_namespace
{
802 struct user_namespace
*user_ns
;
803 struct ucounts
*ucounts
;
804 struct css_set
*root_cset
;
807 extern struct cgroup_namespace init_cgroup_ns
;
809 #ifdef CONFIG_CGROUPS
811 void free_cgroup_ns(struct cgroup_namespace
*ns
);
813 struct cgroup_namespace
*copy_cgroup_ns(unsigned long flags
,
814 struct user_namespace
*user_ns
,
815 struct cgroup_namespace
*old_ns
);
817 int cgroup_path_ns(struct cgroup
*cgrp
, char *buf
, size_t buflen
,
818 struct cgroup_namespace
*ns
);
820 #else /* !CONFIG_CGROUPS */
822 static inline void free_cgroup_ns(struct cgroup_namespace
*ns
) { }
823 static inline struct cgroup_namespace
*
824 copy_cgroup_ns(unsigned long flags
, struct user_namespace
*user_ns
,
825 struct cgroup_namespace
*old_ns
)
830 #endif /* !CONFIG_CGROUPS */
832 static inline void get_cgroup_ns(struct cgroup_namespace
*ns
)
835 refcount_inc(&ns
->count
);
838 static inline void put_cgroup_ns(struct cgroup_namespace
*ns
)
840 if (ns
&& refcount_dec_and_test(&ns
->count
))
844 #endif /* _LINUX_CGROUP_H */