]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/cgroup/debug.c
cgroup: Make debug cgroup support v2 and thread mode
[mirror_ubuntu-bionic-kernel.git] / kernel / cgroup / debug.c
1 /*
2 * Debug controller
3 *
4 * WARNING: This controller is for cgroup core debugging only.
5 * Its interfaces are unstable and subject to changes at any time.
6 */
7 #include <linux/ctype.h>
8 #include <linux/mm.h>
9 #include <linux/slab.h>
10
11 #include "cgroup-internal.h"
12
13 static struct cgroup_subsys_state *
14 debug_css_alloc(struct cgroup_subsys_state *parent_css)
15 {
16 struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
17
18 if (!css)
19 return ERR_PTR(-ENOMEM);
20
21 return css;
22 }
23
24 static void debug_css_free(struct cgroup_subsys_state *css)
25 {
26 kfree(css);
27 }
28
29 /*
30 * debug_taskcount_read - return the number of tasks in a cgroup.
31 * @cgrp: the cgroup in question
32 */
33 static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
34 struct cftype *cft)
35 {
36 return cgroup_task_count(css->cgroup);
37 }
38
39 static int current_css_set_read(struct seq_file *seq, void *v)
40 {
41 struct css_set *cset;
42 struct cgroup_subsys *ss;
43 struct cgroup_subsys_state *css;
44 int i, refcnt;
45
46 mutex_lock(&cgroup_mutex);
47 spin_lock_irq(&css_set_lock);
48 rcu_read_lock();
49 cset = rcu_dereference(current->cgroups);
50 refcnt = refcount_read(&cset->refcount);
51 seq_printf(seq, "css_set %pK %d", cset, refcnt);
52 if (refcnt > cset->nr_tasks)
53 seq_printf(seq, " +%d", refcnt - cset->nr_tasks);
54 seq_puts(seq, "\n");
55
56 /*
57 * Print the css'es stored in the current css_set.
58 */
59 for_each_subsys(ss, i) {
60 css = cset->subsys[ss->id];
61 if (!css)
62 continue;
63 seq_printf(seq, "%2d: %-4s\t- %lx[%d]\n", ss->id, ss->name,
64 (unsigned long)css, css->id);
65 }
66 rcu_read_unlock();
67 spin_unlock_irq(&css_set_lock);
68 mutex_unlock(&cgroup_mutex);
69 return 0;
70 }
71
72 static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
73 struct cftype *cft)
74 {
75 u64 count;
76
77 rcu_read_lock();
78 count = refcount_read(&task_css_set(current)->refcount);
79 rcu_read_unlock();
80 return count;
81 }
82
83 static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
84 {
85 struct cgrp_cset_link *link;
86 struct css_set *cset;
87 char *name_buf;
88
89 name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
90 if (!name_buf)
91 return -ENOMEM;
92
93 spin_lock_irq(&css_set_lock);
94 rcu_read_lock();
95 cset = rcu_dereference(current->cgroups);
96 list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
97 struct cgroup *c = link->cgrp;
98
99 cgroup_name(c, name_buf, NAME_MAX + 1);
100 seq_printf(seq, "Root %d group %s\n",
101 c->root->hierarchy_id, name_buf);
102 }
103 rcu_read_unlock();
104 spin_unlock_irq(&css_set_lock);
105 kfree(name_buf);
106 return 0;
107 }
108
109 #define MAX_TASKS_SHOWN_PER_CSS 25
110 static int cgroup_css_links_read(struct seq_file *seq, void *v)
111 {
112 struct cgroup_subsys_state *css = seq_css(seq);
113 struct cgrp_cset_link *link;
114 int dead_cnt = 0, extra_refs = 0;
115
116 spin_lock_irq(&css_set_lock);
117 list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
118 struct css_set *cset = link->cset;
119 struct task_struct *task;
120 int count = 0;
121 int refcnt = refcount_read(&cset->refcount);
122
123 seq_printf(seq, " %d", refcnt);
124 if (refcnt - cset->nr_tasks > 0) {
125 int extra = refcnt - cset->nr_tasks;
126
127 seq_printf(seq, " +%d", extra);
128 /*
129 * Take out the one additional reference in
130 * init_css_set.
131 */
132 if (cset == &init_css_set)
133 extra--;
134 extra_refs += extra;
135 }
136 seq_puts(seq, "\n");
137
138 list_for_each_entry(task, &cset->tasks, cg_list) {
139 if (count++ <= MAX_TASKS_SHOWN_PER_CSS)
140 seq_printf(seq, " task %d\n",
141 task_pid_vnr(task));
142 }
143
144 list_for_each_entry(task, &cset->mg_tasks, cg_list) {
145 if (count++ <= MAX_TASKS_SHOWN_PER_CSS)
146 seq_printf(seq, " task %d\n",
147 task_pid_vnr(task));
148 }
149 /* show # of overflowed tasks */
150 if (count > MAX_TASKS_SHOWN_PER_CSS)
151 seq_printf(seq, " ... (%d)\n",
152 count - MAX_TASKS_SHOWN_PER_CSS);
153
154 if (cset->dead) {
155 seq_puts(seq, " [dead]\n");
156 dead_cnt++;
157 }
158
159 WARN_ON(count != cset->nr_tasks);
160 }
161 spin_unlock_irq(&css_set_lock);
162
163 if (!dead_cnt && !extra_refs)
164 return 0;
165
166 seq_puts(seq, "\n");
167 if (extra_refs)
168 seq_printf(seq, "extra references = %d\n", extra_refs);
169 if (dead_cnt)
170 seq_printf(seq, "dead css_sets = %d\n", dead_cnt);
171
172 return 0;
173 }
174
175 static int cgroup_subsys_states_read(struct seq_file *seq, void *v)
176 {
177 struct cgroup *cgrp = seq_css(seq)->cgroup;
178 struct cgroup_subsys *ss;
179 struct cgroup_subsys_state *css;
180 char pbuf[16];
181 int i;
182
183 mutex_lock(&cgroup_mutex);
184 for_each_subsys(ss, i) {
185 css = rcu_dereference_check(cgrp->subsys[ss->id], true);
186 if (!css)
187 continue;
188
189 pbuf[0] = '\0';
190
191 /* Show the parent CSS if applicable*/
192 if (css->parent)
193 snprintf(pbuf, sizeof(pbuf) - 1, " P=%d",
194 css->parent->id);
195 seq_printf(seq, "%2d: %-4s\t- %lx[%d] %d%s\n", ss->id, ss->name,
196 (unsigned long)css, css->id,
197 atomic_read(&css->online_cnt), pbuf);
198 }
199 mutex_unlock(&cgroup_mutex);
200 return 0;
201 }
202
203 static int cgroup_masks_read(struct seq_file *seq, void *v)
204 {
205 struct cgroup *cgrp = seq_css(seq)->cgroup;
206 struct cgroup_subsys *ss;
207 int i, j;
208 struct {
209 u16 *mask;
210 char *name;
211 } mask_list[] = {
212 { &cgrp->subtree_control, "subtree_control" },
213 { &cgrp->subtree_ss_mask, "subtree_ss_mask" },
214 };
215
216 mutex_lock(&cgroup_mutex);
217 for (i = 0; i < ARRAY_SIZE(mask_list); i++) {
218 u16 mask = *mask_list[i].mask;
219 bool first = true;
220
221 seq_printf(seq, "%-17s: ", mask_list[i].name);
222 for_each_subsys(ss, j) {
223 if (!(mask & (1 << ss->id)))
224 continue;
225 if (!first)
226 seq_puts(seq, ", ");
227 seq_puts(seq, ss->name);
228 first = false;
229 }
230 seq_putc(seq, '\n');
231 }
232
233 mutex_unlock(&cgroup_mutex);
234 return 0;
235 }
236
237 static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
238 {
239 return (!cgroup_is_populated(css->cgroup) &&
240 !css_has_online_children(&css->cgroup->self));
241 }
242
243 static struct cftype debug_files[] = {
244 {
245 .name = "taskcount",
246 .read_u64 = debug_taskcount_read,
247 },
248
249 {
250 .name = "current_css_set",
251 .seq_show = current_css_set_read,
252 .flags = CFTYPE_ONLY_ON_ROOT,
253 },
254
255 {
256 .name = "current_css_set_refcount",
257 .read_u64 = current_css_set_refcount_read,
258 .flags = CFTYPE_ONLY_ON_ROOT,
259 },
260
261 {
262 .name = "current_css_set_cg_links",
263 .seq_show = current_css_set_cg_links_read,
264 .flags = CFTYPE_ONLY_ON_ROOT,
265 },
266
267 {
268 .name = "cgroup_css_links",
269 .seq_show = cgroup_css_links_read,
270 },
271
272 {
273 .name = "cgroup_subsys_states",
274 .seq_show = cgroup_subsys_states_read,
275 },
276
277 {
278 .name = "cgroup_masks",
279 .seq_show = cgroup_masks_read,
280 },
281
282 {
283 .name = "releasable",
284 .read_u64 = releasable_read,
285 },
286
287 { } /* terminate */
288 };
289
290 struct cgroup_subsys debug_cgrp_subsys = {
291 .css_alloc = debug_css_alloc,
292 .css_free = debug_css_free,
293 .legacy_cftypes = debug_files,
294 .dfl_cftypes = debug_files,
295 };