4 * WARNING: This controller is for cgroup core debugging only.
5 * Its interfaces are unstable and subject to changes at any time.
7 #include <linux/ctype.h>
9 #include <linux/slab.h>
11 #include "cgroup-internal.h"
13 static struct cgroup_subsys_state
*
14 debug_css_alloc(struct cgroup_subsys_state
*parent_css
)
16 struct cgroup_subsys_state
*css
= kzalloc(sizeof(*css
), GFP_KERNEL
);
19 return ERR_PTR(-ENOMEM
);
24 static void debug_css_free(struct cgroup_subsys_state
*css
)
30 * debug_taskcount_read - return the number of tasks in a cgroup.
31 * @cgrp: the cgroup in question
33 static u64
debug_taskcount_read(struct cgroup_subsys_state
*css
,
36 return cgroup_task_count(css
->cgroup
);
39 static int current_css_set_read(struct seq_file
*seq
, void *v
)
42 struct cgroup_subsys
*ss
;
43 struct cgroup_subsys_state
*css
;
46 mutex_lock(&cgroup_mutex
);
47 spin_lock_irq(&css_set_lock
);
49 cset
= rcu_dereference(current
->cgroups
);
50 refcnt
= refcount_read(&cset
->refcount
);
51 seq_printf(seq
, "css_set %pK %d", cset
, refcnt
);
52 if (refcnt
> cset
->nr_tasks
)
53 seq_printf(seq
, " +%d", refcnt
- cset
->nr_tasks
);
57 * Print the css'es stored in the current css_set.
59 for_each_subsys(ss
, i
) {
60 css
= cset
->subsys
[ss
->id
];
63 seq_printf(seq
, "%2d: %-4s\t- %lx[%d]\n", ss
->id
, ss
->name
,
64 (unsigned long)css
, css
->id
);
67 spin_unlock_irq(&css_set_lock
);
68 mutex_unlock(&cgroup_mutex
);
72 static u64
current_css_set_refcount_read(struct cgroup_subsys_state
*css
,
78 count
= refcount_read(&task_css_set(current
)->refcount
);
83 static int current_css_set_cg_links_read(struct seq_file
*seq
, void *v
)
85 struct cgrp_cset_link
*link
;
89 name_buf
= kmalloc(NAME_MAX
+ 1, GFP_KERNEL
);
93 spin_lock_irq(&css_set_lock
);
95 cset
= rcu_dereference(current
->cgroups
);
96 list_for_each_entry(link
, &cset
->cgrp_links
, cgrp_link
) {
97 struct cgroup
*c
= link
->cgrp
;
99 cgroup_name(c
, name_buf
, NAME_MAX
+ 1);
100 seq_printf(seq
, "Root %d group %s\n",
101 c
->root
->hierarchy_id
, name_buf
);
104 spin_unlock_irq(&css_set_lock
);
109 #define MAX_TASKS_SHOWN_PER_CSS 25
110 static int cgroup_css_links_read(struct seq_file
*seq
, void *v
)
112 struct cgroup_subsys_state
*css
= seq_css(seq
);
113 struct cgrp_cset_link
*link
;
114 int dead_cnt
= 0, extra_refs
= 0;
116 spin_lock_irq(&css_set_lock
);
117 list_for_each_entry(link
, &css
->cgroup
->cset_links
, cset_link
) {
118 struct css_set
*cset
= link
->cset
;
119 struct task_struct
*task
;
121 int refcnt
= refcount_read(&cset
->refcount
);
123 seq_printf(seq
, " %d", refcnt
);
124 if (refcnt
- cset
->nr_tasks
> 0) {
125 int extra
= refcnt
- cset
->nr_tasks
;
127 seq_printf(seq
, " +%d", extra
);
129 * Take out the one additional reference in
132 if (cset
== &init_css_set
)
138 list_for_each_entry(task
, &cset
->tasks
, cg_list
) {
139 if (count
++ <= MAX_TASKS_SHOWN_PER_CSS
)
140 seq_printf(seq
, " task %d\n",
144 list_for_each_entry(task
, &cset
->mg_tasks
, cg_list
) {
145 if (count
++ <= MAX_TASKS_SHOWN_PER_CSS
)
146 seq_printf(seq
, " task %d\n",
149 /* show # of overflowed tasks */
150 if (count
> MAX_TASKS_SHOWN_PER_CSS
)
151 seq_printf(seq
, " ... (%d)\n",
152 count
- MAX_TASKS_SHOWN_PER_CSS
);
155 seq_puts(seq
, " [dead]\n");
159 WARN_ON(count
!= cset
->nr_tasks
);
161 spin_unlock_irq(&css_set_lock
);
163 if (!dead_cnt
&& !extra_refs
)
168 seq_printf(seq
, "extra references = %d\n", extra_refs
);
170 seq_printf(seq
, "dead css_sets = %d\n", dead_cnt
);
175 static int cgroup_subsys_states_read(struct seq_file
*seq
, void *v
)
177 struct cgroup
*cgrp
= seq_css(seq
)->cgroup
;
178 struct cgroup_subsys
*ss
;
179 struct cgroup_subsys_state
*css
;
183 mutex_lock(&cgroup_mutex
);
184 for_each_subsys(ss
, i
) {
185 css
= rcu_dereference_check(cgrp
->subsys
[ss
->id
], true);
191 /* Show the parent CSS if applicable*/
193 snprintf(pbuf
, sizeof(pbuf
) - 1, " P=%d",
195 seq_printf(seq
, "%2d: %-4s\t- %lx[%d] %d%s\n", ss
->id
, ss
->name
,
196 (unsigned long)css
, css
->id
,
197 atomic_read(&css
->online_cnt
), pbuf
);
199 mutex_unlock(&cgroup_mutex
);
203 static int cgroup_masks_read(struct seq_file
*seq
, void *v
)
205 struct cgroup
*cgrp
= seq_css(seq
)->cgroup
;
206 struct cgroup_subsys
*ss
;
212 { &cgrp
->subtree_control
, "subtree_control" },
213 { &cgrp
->subtree_ss_mask
, "subtree_ss_mask" },
216 mutex_lock(&cgroup_mutex
);
217 for (i
= 0; i
< ARRAY_SIZE(mask_list
); i
++) {
218 u16 mask
= *mask_list
[i
].mask
;
221 seq_printf(seq
, "%-17s: ", mask_list
[i
].name
);
222 for_each_subsys(ss
, j
) {
223 if (!(mask
& (1 << ss
->id
)))
227 seq_puts(seq
, ss
->name
);
233 mutex_unlock(&cgroup_mutex
);
237 static u64
releasable_read(struct cgroup_subsys_state
*css
, struct cftype
*cft
)
239 return (!cgroup_is_populated(css
->cgroup
) &&
240 !css_has_online_children(&css
->cgroup
->self
));
243 static struct cftype debug_files
[] = {
246 .read_u64
= debug_taskcount_read
,
250 .name
= "current_css_set",
251 .seq_show
= current_css_set_read
,
252 .flags
= CFTYPE_ONLY_ON_ROOT
,
256 .name
= "current_css_set_refcount",
257 .read_u64
= current_css_set_refcount_read
,
258 .flags
= CFTYPE_ONLY_ON_ROOT
,
262 .name
= "current_css_set_cg_links",
263 .seq_show
= current_css_set_cg_links_read
,
264 .flags
= CFTYPE_ONLY_ON_ROOT
,
268 .name
= "cgroup_css_links",
269 .seq_show
= cgroup_css_links_read
,
273 .name
= "cgroup_subsys_states",
274 .seq_show
= cgroup_subsys_states_read
,
278 .name
= "cgroup_masks",
279 .seq_show
= cgroup_masks_read
,
283 .name
= "releasable",
284 .read_u64
= releasable_read
,
290 struct cgroup_subsys debug_cgrp_subsys
= {
291 .css_alloc
= debug_css_alloc
,
292 .css_free
= debug_css_free
,
293 .legacy_cftypes
= debug_files
,
294 .dfl_cftypes
= debug_files
,