Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/kernel/cgroup/debug.c
29278 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Debug controller
4
*
5
* WARNING: This controller is for cgroup core debugging only.
6
* Its interfaces are unstable and subject to changes at any time.
7
*/
8
#include <linux/ctype.h>
9
#include <linux/mm.h>
10
#include <linux/slab.h>
11
12
#include "cgroup-internal.h"
13
14
static struct cgroup_subsys_state *
15
debug_css_alloc(struct cgroup_subsys_state *parent_css)
16
{
17
struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
18
19
if (!css)
20
return ERR_PTR(-ENOMEM);
21
22
return css;
23
}
24
25
static void debug_css_free(struct cgroup_subsys_state *css)
26
{
27
kfree(css);
28
}
29
30
/*
31
* debug_taskcount_read - return the number of tasks in a cgroup.
32
* @cgrp: the cgroup in question
33
*/
34
static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
35
struct cftype *cft)
36
{
37
return cgroup_task_count(css->cgroup);
38
}
39
40
static int current_css_set_read(struct seq_file *seq, void *v)
41
{
42
struct kernfs_open_file *of = seq->private;
43
struct css_set *cset;
44
struct cgroup_subsys *ss;
45
struct cgroup_subsys_state *css;
46
int i, refcnt;
47
48
if (!cgroup_kn_lock_live(of->kn, false))
49
return -ENODEV;
50
51
spin_lock_irq(&css_set_lock);
52
cset = task_css_set(current);
53
refcnt = refcount_read(&cset->refcount);
54
seq_printf(seq, "css_set %pK %d", cset, refcnt);
55
if (refcnt > cset->nr_tasks)
56
seq_printf(seq, " +%d", refcnt - cset->nr_tasks);
57
seq_puts(seq, "\n");
58
59
/*
60
* Print the css'es stored in the current css_set.
61
*/
62
for_each_subsys(ss, i) {
63
css = cset->subsys[ss->id];
64
if (!css)
65
continue;
66
seq_printf(seq, "%2d: %-4s\t- %p[%d]\n", ss->id, ss->name,
67
css, css->id);
68
}
69
spin_unlock_irq(&css_set_lock);
70
cgroup_kn_unlock(of->kn);
71
return 0;
72
}
73
74
static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
75
struct cftype *cft)
76
{
77
u64 count;
78
79
rcu_read_lock();
80
count = refcount_read(&task_css_set(current)->refcount);
81
rcu_read_unlock();
82
return count;
83
}
84
85
static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
86
{
87
struct cgrp_cset_link *link;
88
struct css_set *cset;
89
char *name_buf;
90
91
name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
92
if (!name_buf)
93
return -ENOMEM;
94
95
spin_lock_irq(&css_set_lock);
96
cset = task_css_set(current);
97
list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
98
struct cgroup *c = link->cgrp;
99
100
cgroup_name(c, name_buf, NAME_MAX + 1);
101
seq_printf(seq, "Root %d group %s\n",
102
c->root->hierarchy_id, name_buf);
103
}
104
spin_unlock_irq(&css_set_lock);
105
kfree(name_buf);
106
return 0;
107
}
108
109
#define MAX_TASKS_SHOWN_PER_CSS 25
110
static int cgroup_css_links_read(struct seq_file *seq, void *v)
111
{
112
struct cgroup_subsys_state *css = seq_css(seq);
113
struct cgrp_cset_link *link;
114
int dead_cnt = 0, extra_refs = 0, threaded_csets = 0;
115
116
spin_lock_irq(&css_set_lock);
117
118
list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
119
struct css_set *cset = link->cset;
120
struct task_struct *task;
121
int count = 0;
122
int refcnt = refcount_read(&cset->refcount);
123
124
/*
125
* Print out the proc_cset and threaded_cset relationship
126
* and highlight difference between refcount and task_count.
127
*/
128
seq_printf(seq, "css_set %pK", cset);
129
if (rcu_dereference_protected(cset->dom_cset, 1) != cset) {
130
threaded_csets++;
131
seq_printf(seq, "=>%pK", cset->dom_cset);
132
}
133
if (!list_empty(&cset->threaded_csets)) {
134
struct css_set *tcset;
135
int idx = 0;
136
137
list_for_each_entry(tcset, &cset->threaded_csets,
138
threaded_csets_node) {
139
seq_puts(seq, idx ? "," : "<=");
140
seq_printf(seq, "%pK", tcset);
141
idx++;
142
}
143
} else {
144
seq_printf(seq, " %d", refcnt);
145
if (refcnt - cset->nr_tasks > 0) {
146
int extra = refcnt - cset->nr_tasks;
147
148
seq_printf(seq, " +%d", extra);
149
/*
150
* Take out the one additional reference in
151
* init_css_set.
152
*/
153
if (cset == &init_css_set)
154
extra--;
155
extra_refs += extra;
156
}
157
}
158
seq_puts(seq, "\n");
159
160
list_for_each_entry(task, &cset->tasks, cg_list) {
161
if (count++ <= MAX_TASKS_SHOWN_PER_CSS)
162
seq_printf(seq, " task %d\n",
163
task_pid_vnr(task));
164
}
165
166
list_for_each_entry(task, &cset->mg_tasks, cg_list) {
167
if (count++ <= MAX_TASKS_SHOWN_PER_CSS)
168
seq_printf(seq, " task %d\n",
169
task_pid_vnr(task));
170
}
171
/* show # of overflowed tasks */
172
if (count > MAX_TASKS_SHOWN_PER_CSS)
173
seq_printf(seq, " ... (%d)\n",
174
count - MAX_TASKS_SHOWN_PER_CSS);
175
176
if (cset->dead) {
177
seq_puts(seq, " [dead]\n");
178
dead_cnt++;
179
}
180
181
WARN_ON(count != cset->nr_tasks);
182
}
183
spin_unlock_irq(&css_set_lock);
184
185
if (!dead_cnt && !extra_refs && !threaded_csets)
186
return 0;
187
188
seq_puts(seq, "\n");
189
if (threaded_csets)
190
seq_printf(seq, "threaded css_sets = %d\n", threaded_csets);
191
if (extra_refs)
192
seq_printf(seq, "extra references = %d\n", extra_refs);
193
if (dead_cnt)
194
seq_printf(seq, "dead css_sets = %d\n", dead_cnt);
195
196
return 0;
197
}
198
199
static int cgroup_subsys_states_read(struct seq_file *seq, void *v)
200
{
201
struct kernfs_open_file *of = seq->private;
202
struct cgroup *cgrp;
203
struct cgroup_subsys *ss;
204
struct cgroup_subsys_state *css;
205
char pbuf[16];
206
int i;
207
208
cgrp = cgroup_kn_lock_live(of->kn, false);
209
if (!cgrp)
210
return -ENODEV;
211
212
for_each_subsys(ss, i) {
213
css = rcu_dereference_check(cgrp->subsys[ss->id], true);
214
if (!css)
215
continue;
216
217
pbuf[0] = '\0';
218
219
/* Show the parent CSS if applicable*/
220
if (css->parent)
221
snprintf(pbuf, sizeof(pbuf) - 1, " P=%d",
222
css->parent->id);
223
seq_printf(seq, "%2d: %-4s\t- %p[%d] %d%s\n", ss->id, ss->name,
224
css, css->id,
225
atomic_read(&css->online_cnt), pbuf);
226
}
227
228
cgroup_kn_unlock(of->kn);
229
return 0;
230
}
231
232
static void cgroup_masks_read_one(struct seq_file *seq, const char *name,
233
u16 mask)
234
{
235
struct cgroup_subsys *ss;
236
int ssid;
237
bool first = true;
238
239
seq_printf(seq, "%-17s: ", name);
240
for_each_subsys(ss, ssid) {
241
if (!(mask & (1 << ssid)))
242
continue;
243
if (!first)
244
seq_puts(seq, ", ");
245
seq_puts(seq, ss->name);
246
first = false;
247
}
248
seq_putc(seq, '\n');
249
}
250
251
static int cgroup_masks_read(struct seq_file *seq, void *v)
252
{
253
struct kernfs_open_file *of = seq->private;
254
struct cgroup *cgrp;
255
256
cgrp = cgroup_kn_lock_live(of->kn, false);
257
if (!cgrp)
258
return -ENODEV;
259
260
cgroup_masks_read_one(seq, "subtree_control", cgrp->subtree_control);
261
cgroup_masks_read_one(seq, "subtree_ss_mask", cgrp->subtree_ss_mask);
262
263
cgroup_kn_unlock(of->kn);
264
return 0;
265
}
266
267
static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
268
{
269
return (!cgroup_is_populated(css->cgroup) &&
270
!css_has_online_children(&css->cgroup->self));
271
}
272
273
static struct cftype debug_legacy_files[] = {
274
{
275
.name = "taskcount",
276
.read_u64 = debug_taskcount_read,
277
},
278
279
{
280
.name = "current_css_set",
281
.seq_show = current_css_set_read,
282
.flags = CFTYPE_ONLY_ON_ROOT,
283
},
284
285
{
286
.name = "current_css_set_refcount",
287
.read_u64 = current_css_set_refcount_read,
288
.flags = CFTYPE_ONLY_ON_ROOT,
289
},
290
291
{
292
.name = "current_css_set_cg_links",
293
.seq_show = current_css_set_cg_links_read,
294
.flags = CFTYPE_ONLY_ON_ROOT,
295
},
296
297
{
298
.name = "cgroup_css_links",
299
.seq_show = cgroup_css_links_read,
300
},
301
302
{
303
.name = "cgroup_subsys_states",
304
.seq_show = cgroup_subsys_states_read,
305
},
306
307
{
308
.name = "cgroup_masks",
309
.seq_show = cgroup_masks_read,
310
},
311
312
{
313
.name = "releasable",
314
.read_u64 = releasable_read,
315
},
316
317
{ } /* terminate */
318
};
319
320
static struct cftype debug_files[] = {
321
{
322
.name = "taskcount",
323
.read_u64 = debug_taskcount_read,
324
},
325
326
{
327
.name = "current_css_set",
328
.seq_show = current_css_set_read,
329
.flags = CFTYPE_ONLY_ON_ROOT,
330
},
331
332
{
333
.name = "current_css_set_refcount",
334
.read_u64 = current_css_set_refcount_read,
335
.flags = CFTYPE_ONLY_ON_ROOT,
336
},
337
338
{
339
.name = "current_css_set_cg_links",
340
.seq_show = current_css_set_cg_links_read,
341
.flags = CFTYPE_ONLY_ON_ROOT,
342
},
343
344
{
345
.name = "css_links",
346
.seq_show = cgroup_css_links_read,
347
},
348
349
{
350
.name = "csses",
351
.seq_show = cgroup_subsys_states_read,
352
},
353
354
{
355
.name = "masks",
356
.seq_show = cgroup_masks_read,
357
},
358
359
{ } /* terminate */
360
};
361
362
struct cgroup_subsys debug_cgrp_subsys = {
363
.css_alloc = debug_css_alloc,
364
.css_free = debug_css_free,
365
.legacy_cftypes = debug_legacy_files,
366
};
367
368
/*
369
* On v2, debug is an implicit controller enabled by "cgroup_debug" boot
370
* parameter.
371
*/
372
void __init enable_debug_cgroup(void)
373
{
374
debug_cgrp_subsys.dfl_cftypes = debug_files;
375
debug_cgrp_subsys.implicit_on_dfl = true;
376
debug_cgrp_subsys.threaded = true;
377
}
378
379