]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/ceph/debugfs.c
38b78b45811f91088b6ec33caa877fc748e41d60
[mirror_ubuntu-jammy-kernel.git] / fs / ceph / debugfs.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/device.h>
5 #include <linux/slab.h>
6 #include <linux/module.h>
7 #include <linux/ctype.h>
8 #include <linux/debugfs.h>
9 #include <linux/seq_file.h>
10 #include <linux/math64.h>
11 #include <linux/ktime.h>
12
13 #include <linux/ceph/libceph.h>
14 #include <linux/ceph/mon_client.h>
15 #include <linux/ceph/auth.h>
16 #include <linux/ceph/debugfs.h>
17
18 #include "super.h"
19
20 #ifdef CONFIG_DEBUG_FS
21
22 #include "mds_client.h"
23 #include "metric.h"
24
25 static int mdsmap_show(struct seq_file *s, void *p)
26 {
27 int i;
28 struct ceph_fs_client *fsc = s->private;
29 struct ceph_mdsmap *mdsmap;
30
31 if (!fsc->mdsc || !fsc->mdsc->mdsmap)
32 return 0;
33 mdsmap = fsc->mdsc->mdsmap;
34 seq_printf(s, "epoch %d\n", mdsmap->m_epoch);
35 seq_printf(s, "root %d\n", mdsmap->m_root);
36 seq_printf(s, "max_mds %d\n", mdsmap->m_max_mds);
37 seq_printf(s, "session_timeout %d\n", mdsmap->m_session_timeout);
38 seq_printf(s, "session_autoclose %d\n", mdsmap->m_session_autoclose);
39 for (i = 0; i < mdsmap->possible_max_rank; i++) {
40 struct ceph_entity_addr *addr = &mdsmap->m_info[i].addr;
41 int state = mdsmap->m_info[i].state;
42 seq_printf(s, "\tmds%d\t%s\t(%s)\n", i,
43 ceph_pr_addr(addr),
44 ceph_mds_state_name(state));
45 }
46 return 0;
47 }
48
49 /*
50 * mdsc debugfs
51 */
52 static int mdsc_show(struct seq_file *s, void *p)
53 {
54 struct ceph_fs_client *fsc = s->private;
55 struct ceph_mds_client *mdsc = fsc->mdsc;
56 struct ceph_mds_request *req;
57 struct rb_node *rp;
58 int pathlen = 0;
59 u64 pathbase;
60 char *path;
61
62 mutex_lock(&mdsc->mutex);
63 for (rp = rb_first(&mdsc->request_tree); rp; rp = rb_next(rp)) {
64 req = rb_entry(rp, struct ceph_mds_request, r_node);
65
66 if (req->r_request && req->r_session)
67 seq_printf(s, "%lld\tmds%d\t", req->r_tid,
68 req->r_session->s_mds);
69 else if (!req->r_request)
70 seq_printf(s, "%lld\t(no request)\t", req->r_tid);
71 else
72 seq_printf(s, "%lld\t(no session)\t", req->r_tid);
73
74 seq_printf(s, "%s", ceph_mds_op_name(req->r_op));
75
76 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
77 seq_puts(s, "\t(unsafe)");
78 else
79 seq_puts(s, "\t");
80
81 if (req->r_inode) {
82 seq_printf(s, " #%llx", ceph_ino(req->r_inode));
83 } else if (req->r_dentry) {
84 path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
85 &pathbase, 0);
86 if (IS_ERR(path))
87 path = NULL;
88 spin_lock(&req->r_dentry->d_lock);
89 seq_printf(s, " #%llx/%pd (%s)",
90 ceph_ino(d_inode(req->r_dentry->d_parent)),
91 req->r_dentry,
92 path ? path : "");
93 spin_unlock(&req->r_dentry->d_lock);
94 ceph_mdsc_free_path(path, pathlen);
95 } else if (req->r_path1) {
96 seq_printf(s, " #%llx/%s", req->r_ino1.ino,
97 req->r_path1);
98 } else {
99 seq_printf(s, " #%llx", req->r_ino1.ino);
100 }
101
102 if (req->r_old_dentry) {
103 path = ceph_mdsc_build_path(req->r_old_dentry, &pathlen,
104 &pathbase, 0);
105 if (IS_ERR(path))
106 path = NULL;
107 spin_lock(&req->r_old_dentry->d_lock);
108 seq_printf(s, " #%llx/%pd (%s)",
109 req->r_old_dentry_dir ?
110 ceph_ino(req->r_old_dentry_dir) : 0,
111 req->r_old_dentry,
112 path ? path : "");
113 spin_unlock(&req->r_old_dentry->d_lock);
114 ceph_mdsc_free_path(path, pathlen);
115 } else if (req->r_path2 && req->r_op != CEPH_MDS_OP_SYMLINK) {
116 if (req->r_ino2.ino)
117 seq_printf(s, " #%llx/%s", req->r_ino2.ino,
118 req->r_path2);
119 else
120 seq_printf(s, " %s", req->r_path2);
121 }
122
123 seq_puts(s, "\n");
124 }
125 mutex_unlock(&mdsc->mutex);
126
127 return 0;
128 }
129
130 #define CEPH_LAT_METRIC_SHOW(name, total, avg, min, max, sq) { \
131 s64 _total, _avg, _min, _max, _sq, _st; \
132 _avg = ktime_to_us(avg); \
133 _min = ktime_to_us(min == KTIME_MAX ? 0 : min); \
134 _max = ktime_to_us(max); \
135 _total = total - 1; \
136 _sq = _total > 0 ? DIV64_U64_ROUND_CLOSEST(sq, _total) : 0; \
137 _st = int_sqrt64(_sq); \
138 _st = ktime_to_us(_st); \
139 seq_printf(s, "%-14s%-12lld%-16lld%-16lld%-16lld%lld\n", \
140 name, total, _avg, _min, _max, _st); \
141 }
142
143 #define CEPH_SZ_METRIC_SHOW(name, total, avg, min, max, sum) { \
144 u64 _min = min == U64_MAX ? 0 : min; \
145 seq_printf(s, "%-14s%-12lld%-16llu%-16llu%-16llu%llu\n", \
146 name, total, avg, _min, max, sum); \
147 }
148
149 static int metric_show(struct seq_file *s, void *p)
150 {
151 struct ceph_fs_client *fsc = s->private;
152 struct ceph_mds_client *mdsc = fsc->mdsc;
153 struct ceph_client_metric *m = &mdsc->metric;
154 int nr_caps = 0;
155 s64 total, sum, avg, min, max, sq;
156 u64 sum_sz, avg_sz, min_sz, max_sz;
157
158 sum = percpu_counter_sum(&m->total_inodes);
159 seq_printf(s, "item total\n");
160 seq_printf(s, "------------------------------------------\n");
161 seq_printf(s, "%-35s%lld / %lld\n", "opened files / total inodes",
162 atomic64_read(&m->opened_files), sum);
163 seq_printf(s, "%-35s%lld / %lld\n", "pinned i_caps / total inodes",
164 atomic64_read(&m->total_caps), sum);
165 seq_printf(s, "%-35s%lld / %lld\n", "opened inodes / total inodes",
166 percpu_counter_sum(&m->opened_inodes), sum);
167
168 seq_printf(s, "\n");
169 seq_printf(s, "item total avg_lat(us) min_lat(us) max_lat(us) stdev(us)\n");
170 seq_printf(s, "-----------------------------------------------------------------------------------\n");
171
172 spin_lock(&m->read_metric_lock);
173 total = m->total_reads;
174 sum = m->read_latency_sum;
175 avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
176 min = m->read_latency_min;
177 max = m->read_latency_max;
178 sq = m->read_latency_sq_sum;
179 spin_unlock(&m->read_metric_lock);
180 CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, sq);
181
182 spin_lock(&m->write_metric_lock);
183 total = m->total_writes;
184 sum = m->write_latency_sum;
185 avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
186 min = m->write_latency_min;
187 max = m->write_latency_max;
188 sq = m->write_latency_sq_sum;
189 spin_unlock(&m->write_metric_lock);
190 CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, sq);
191
192 spin_lock(&m->metadata_metric_lock);
193 total = m->total_metadatas;
194 sum = m->metadata_latency_sum;
195 avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
196 min = m->metadata_latency_min;
197 max = m->metadata_latency_max;
198 sq = m->metadata_latency_sq_sum;
199 spin_unlock(&m->metadata_metric_lock);
200 CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, sq);
201
202 seq_printf(s, "\n");
203 seq_printf(s, "item total avg_sz(bytes) min_sz(bytes) max_sz(bytes) total_sz(bytes)\n");
204 seq_printf(s, "----------------------------------------------------------------------------------------\n");
205
206 spin_lock(&m->read_metric_lock);
207 total = m->total_reads;
208 sum_sz = m->read_size_sum;
209 avg_sz = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum_sz, total) : 0;
210 min_sz = m->read_size_min;
211 max_sz = m->read_size_max;
212 spin_unlock(&m->read_metric_lock);
213 CEPH_SZ_METRIC_SHOW("read", total, avg_sz, min_sz, max_sz, sum_sz);
214
215 spin_lock(&m->write_metric_lock);
216 total = m->total_writes;
217 sum_sz = m->write_size_sum;
218 avg_sz = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum_sz, total) : 0;
219 min_sz = m->write_size_min;
220 max_sz = m->write_size_max;
221 spin_unlock(&m->write_metric_lock);
222 CEPH_SZ_METRIC_SHOW("write", total, avg_sz, min_sz, max_sz, sum_sz);
223
224 seq_printf(s, "\n");
225 seq_printf(s, "item total miss hit\n");
226 seq_printf(s, "-------------------------------------------------\n");
227
228 seq_printf(s, "%-14s%-16lld%-16lld%lld\n", "d_lease",
229 atomic64_read(&m->total_dentries),
230 percpu_counter_sum(&m->d_lease_mis),
231 percpu_counter_sum(&m->d_lease_hit));
232
233 nr_caps = atomic64_read(&m->total_caps);
234 seq_printf(s, "%-14s%-16d%-16lld%lld\n", "caps", nr_caps,
235 percpu_counter_sum(&m->i_caps_mis),
236 percpu_counter_sum(&m->i_caps_hit));
237
238 return 0;
239 }
240
241 static int caps_show_cb(struct inode *inode, struct ceph_cap *cap, void *p)
242 {
243 struct seq_file *s = p;
244
245 seq_printf(s, "0x%-17llx%-3d%-17s%-17s\n", ceph_ino(inode),
246 cap->session->s_mds,
247 ceph_cap_string(cap->issued),
248 ceph_cap_string(cap->implemented));
249 return 0;
250 }
251
252 static int caps_show(struct seq_file *s, void *p)
253 {
254 struct ceph_fs_client *fsc = s->private;
255 struct ceph_mds_client *mdsc = fsc->mdsc;
256 int total, avail, used, reserved, min, i;
257 struct cap_wait *cw;
258
259 ceph_reservation_status(fsc, &total, &avail, &used, &reserved, &min);
260 seq_printf(s, "total\t\t%d\n"
261 "avail\t\t%d\n"
262 "used\t\t%d\n"
263 "reserved\t%d\n"
264 "min\t\t%d\n\n",
265 total, avail, used, reserved, min);
266 seq_printf(s, "ino mds issued implemented\n");
267 seq_printf(s, "--------------------------------------------------\n");
268
269 mutex_lock(&mdsc->mutex);
270 for (i = 0; i < mdsc->max_sessions; i++) {
271 struct ceph_mds_session *session;
272
273 session = __ceph_lookup_mds_session(mdsc, i);
274 if (!session)
275 continue;
276 mutex_unlock(&mdsc->mutex);
277 mutex_lock(&session->s_mutex);
278 ceph_iterate_session_caps(session, caps_show_cb, s);
279 mutex_unlock(&session->s_mutex);
280 ceph_put_mds_session(session);
281 mutex_lock(&mdsc->mutex);
282 }
283 mutex_unlock(&mdsc->mutex);
284
285 seq_printf(s, "\n\nWaiters:\n--------\n");
286 seq_printf(s, "tgid ino need want\n");
287 seq_printf(s, "-----------------------------------------------------\n");
288
289 spin_lock(&mdsc->caps_list_lock);
290 list_for_each_entry(cw, &mdsc->cap_wait_list, list) {
291 seq_printf(s, "%-13d0x%-17llx%-17s%-17s\n", cw->tgid, cw->ino,
292 ceph_cap_string(cw->need),
293 ceph_cap_string(cw->want));
294 }
295 spin_unlock(&mdsc->caps_list_lock);
296
297 return 0;
298 }
299
300 static int mds_sessions_show(struct seq_file *s, void *ptr)
301 {
302 struct ceph_fs_client *fsc = s->private;
303 struct ceph_mds_client *mdsc = fsc->mdsc;
304 struct ceph_auth_client *ac = fsc->client->monc.auth;
305 struct ceph_options *opt = fsc->client->options;
306 int mds;
307
308 mutex_lock(&mdsc->mutex);
309
310 /* The 'num' portion of an 'entity name' */
311 seq_printf(s, "global_id %llu\n", ac->global_id);
312
313 /* The -o name mount argument */
314 seq_printf(s, "name \"%s\"\n", opt->name ? opt->name : "");
315
316 /* The list of MDS session rank+state */
317 for (mds = 0; mds < mdsc->max_sessions; mds++) {
318 struct ceph_mds_session *session =
319 __ceph_lookup_mds_session(mdsc, mds);
320 if (!session) {
321 continue;
322 }
323 mutex_unlock(&mdsc->mutex);
324 seq_printf(s, "mds.%d %s\n",
325 session->s_mds,
326 ceph_session_state_name(session->s_state));
327
328 ceph_put_mds_session(session);
329 mutex_lock(&mdsc->mutex);
330 }
331 mutex_unlock(&mdsc->mutex);
332
333 return 0;
334 }
335
336 static int status_show(struct seq_file *s, void *p)
337 {
338 struct ceph_fs_client *fsc = s->private;
339 struct ceph_entity_inst *inst = &fsc->client->msgr.inst;
340 struct ceph_entity_addr *client_addr = ceph_client_addr(fsc->client);
341
342 seq_printf(s, "instance: %s.%lld %s/%u\n", ENTITY_NAME(inst->name),
343 ceph_pr_addr(client_addr), le32_to_cpu(client_addr->nonce));
344 seq_printf(s, "blocklisted: %s\n", fsc->blocklisted ? "true" : "false");
345
346 return 0;
347 }
348
349 DEFINE_SHOW_ATTRIBUTE(mdsmap);
350 DEFINE_SHOW_ATTRIBUTE(mdsc);
351 DEFINE_SHOW_ATTRIBUTE(caps);
352 DEFINE_SHOW_ATTRIBUTE(mds_sessions);
353 DEFINE_SHOW_ATTRIBUTE(metric);
354 DEFINE_SHOW_ATTRIBUTE(status);
355
356
357 /*
358 * debugfs
359 */
360 static int congestion_kb_set(void *data, u64 val)
361 {
362 struct ceph_fs_client *fsc = (struct ceph_fs_client *)data;
363
364 fsc->mount_options->congestion_kb = (int)val;
365 return 0;
366 }
367
368 static int congestion_kb_get(void *data, u64 *val)
369 {
370 struct ceph_fs_client *fsc = (struct ceph_fs_client *)data;
371
372 *val = (u64)fsc->mount_options->congestion_kb;
373 return 0;
374 }
375
376 DEFINE_SIMPLE_ATTRIBUTE(congestion_kb_fops, congestion_kb_get,
377 congestion_kb_set, "%llu\n");
378
379
380 void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
381 {
382 dout("ceph_fs_debugfs_cleanup\n");
383 debugfs_remove(fsc->debugfs_bdi);
384 debugfs_remove(fsc->debugfs_congestion_kb);
385 debugfs_remove(fsc->debugfs_mdsmap);
386 debugfs_remove(fsc->debugfs_mds_sessions);
387 debugfs_remove(fsc->debugfs_caps);
388 debugfs_remove(fsc->debugfs_metric);
389 debugfs_remove(fsc->debugfs_mdsc);
390 }
391
392 void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
393 {
394 char name[100];
395
396 dout("ceph_fs_debugfs_init\n");
397 fsc->debugfs_congestion_kb =
398 debugfs_create_file("writeback_congestion_kb",
399 0600,
400 fsc->client->debugfs_dir,
401 fsc,
402 &congestion_kb_fops);
403
404 snprintf(name, sizeof(name), "../../bdi/%s",
405 bdi_dev_name(fsc->sb->s_bdi));
406 fsc->debugfs_bdi =
407 debugfs_create_symlink("bdi",
408 fsc->client->debugfs_dir,
409 name);
410
411 fsc->debugfs_mdsmap = debugfs_create_file("mdsmap",
412 0400,
413 fsc->client->debugfs_dir,
414 fsc,
415 &mdsmap_fops);
416
417 fsc->debugfs_mds_sessions = debugfs_create_file("mds_sessions",
418 0400,
419 fsc->client->debugfs_dir,
420 fsc,
421 &mds_sessions_fops);
422
423 fsc->debugfs_mdsc = debugfs_create_file("mdsc",
424 0400,
425 fsc->client->debugfs_dir,
426 fsc,
427 &mdsc_fops);
428
429 fsc->debugfs_metric = debugfs_create_file("metrics",
430 0400,
431 fsc->client->debugfs_dir,
432 fsc,
433 &metric_fops);
434
435 fsc->debugfs_caps = debugfs_create_file("caps",
436 0400,
437 fsc->client->debugfs_dir,
438 fsc,
439 &caps_fops);
440
441 fsc->debugfs_status = debugfs_create_file("status",
442 0400,
443 fsc->client->debugfs_dir,
444 fsc,
445 &status_fops);
446 }
447
448
449 #else /* CONFIG_DEBUG_FS */
450
451 void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
452 {
453 }
454
455 void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
456 {
457 }
458
459 #endif /* CONFIG_DEBUG_FS */