]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/block/drbd/drbd_debugfs.c
Merge branch 'work.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[mirror_ubuntu-artful-kernel.git] / drivers / block / drbd / drbd_debugfs.c
1 #define pr_fmt(fmt) "drbd debugfs: " fmt
2 #include <linux/kernel.h>
3 #include <linux/module.h>
4 #include <linux/debugfs.h>
5 #include <linux/seq_file.h>
6 #include <linux/stat.h>
7 #include <linux/jiffies.h>
8 #include <linux/list.h>
9
10 #include "drbd_int.h"
11 #include "drbd_req.h"
12 #include "drbd_debugfs.h"
13
14
15 /**********************************************************************
16 * Whenever you change the file format, remember to bump the version. *
17 **********************************************************************/
18
19 static struct dentry *drbd_debugfs_root;
20 static struct dentry *drbd_debugfs_version;
21 static struct dentry *drbd_debugfs_resources;
22 static struct dentry *drbd_debugfs_minors;
23
24 static void seq_print_age_or_dash(struct seq_file *m, bool valid, unsigned long dt)
25 {
26 if (valid)
27 seq_printf(m, "\t%d", jiffies_to_msecs(dt));
28 else
29 seq_printf(m, "\t-");
30 }
31
32 static void __seq_print_rq_state_bit(struct seq_file *m,
33 bool is_set, char *sep, const char *set_name, const char *unset_name)
34 {
35 if (is_set && set_name) {
36 seq_putc(m, *sep);
37 seq_puts(m, set_name);
38 *sep = '|';
39 } else if (!is_set && unset_name) {
40 seq_putc(m, *sep);
41 seq_puts(m, unset_name);
42 *sep = '|';
43 }
44 }
45
46 static void seq_print_rq_state_bit(struct seq_file *m,
47 bool is_set, char *sep, const char *set_name)
48 {
49 __seq_print_rq_state_bit(m, is_set, sep, set_name, NULL);
50 }
51
52 /* pretty print enum drbd_req_state_bits req->rq_state */
53 static void seq_print_request_state(struct seq_file *m, struct drbd_request *req)
54 {
55 unsigned int s = req->rq_state;
56 char sep = ' ';
57 seq_printf(m, "\t0x%08x", s);
58 seq_printf(m, "\tmaster: %s", req->master_bio ? "pending" : "completed");
59
60 /* RQ_WRITE ignored, already reported */
61 seq_puts(m, "\tlocal:");
62 seq_print_rq_state_bit(m, s & RQ_IN_ACT_LOG, &sep, "in-AL");
63 seq_print_rq_state_bit(m, s & RQ_POSTPONED, &sep, "postponed");
64 seq_print_rq_state_bit(m, s & RQ_COMPLETION_SUSP, &sep, "suspended");
65 sep = ' ';
66 seq_print_rq_state_bit(m, s & RQ_LOCAL_PENDING, &sep, "pending");
67 seq_print_rq_state_bit(m, s & RQ_LOCAL_COMPLETED, &sep, "completed");
68 seq_print_rq_state_bit(m, s & RQ_LOCAL_ABORTED, &sep, "aborted");
69 seq_print_rq_state_bit(m, s & RQ_LOCAL_OK, &sep, "ok");
70 if (sep == ' ')
71 seq_puts(m, " -");
72
73 /* for_each_connection ... */
74 seq_printf(m, "\tnet:");
75 sep = ' ';
76 seq_print_rq_state_bit(m, s & RQ_NET_PENDING, &sep, "pending");
77 seq_print_rq_state_bit(m, s & RQ_NET_QUEUED, &sep, "queued");
78 seq_print_rq_state_bit(m, s & RQ_NET_SENT, &sep, "sent");
79 seq_print_rq_state_bit(m, s & RQ_NET_DONE, &sep, "done");
80 seq_print_rq_state_bit(m, s & RQ_NET_SIS, &sep, "sis");
81 seq_print_rq_state_bit(m, s & RQ_NET_OK, &sep, "ok");
82 if (sep == ' ')
83 seq_puts(m, " -");
84
85 seq_printf(m, " :");
86 sep = ' ';
87 seq_print_rq_state_bit(m, s & RQ_EXP_RECEIVE_ACK, &sep, "B");
88 seq_print_rq_state_bit(m, s & RQ_EXP_WRITE_ACK, &sep, "C");
89 seq_print_rq_state_bit(m, s & RQ_EXP_BARR_ACK, &sep, "barr");
90 if (sep == ' ')
91 seq_puts(m, " -");
92 seq_printf(m, "\n");
93 }
94
95 static void seq_print_one_request(struct seq_file *m, struct drbd_request *req, unsigned long now)
96 {
97 /* change anything here, fixup header below! */
98 unsigned int s = req->rq_state;
99
100 #define RQ_HDR_1 "epoch\tsector\tsize\trw"
101 seq_printf(m, "0x%x\t%llu\t%u\t%s",
102 req->epoch,
103 (unsigned long long)req->i.sector, req->i.size >> 9,
104 (s & RQ_WRITE) ? "W" : "R");
105
106 #define RQ_HDR_2 "\tstart\tin AL\tsubmit"
107 seq_printf(m, "\t%d", jiffies_to_msecs(now - req->start_jif));
108 seq_print_age_or_dash(m, s & RQ_IN_ACT_LOG, now - req->in_actlog_jif);
109 seq_print_age_or_dash(m, s & RQ_LOCAL_PENDING, now - req->pre_submit_jif);
110
111 #define RQ_HDR_3 "\tsent\tacked\tdone"
112 seq_print_age_or_dash(m, s & RQ_NET_SENT, now - req->pre_send_jif);
113 seq_print_age_or_dash(m, (s & RQ_NET_SENT) && !(s & RQ_NET_PENDING), now - req->acked_jif);
114 seq_print_age_or_dash(m, s & RQ_NET_DONE, now - req->net_done_jif);
115
116 #define RQ_HDR_4 "\tstate\n"
117 seq_print_request_state(m, req);
118 }
119 #define RQ_HDR RQ_HDR_1 RQ_HDR_2 RQ_HDR_3 RQ_HDR_4
120
121 static void seq_print_minor_vnr_req(struct seq_file *m, struct drbd_request *req, unsigned long now)
122 {
123 seq_printf(m, "%u\t%u\t", req->device->minor, req->device->vnr);
124 seq_print_one_request(m, req, now);
125 }
126
127 static void seq_print_resource_pending_meta_io(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
128 {
129 struct drbd_device *device;
130 unsigned int i;
131
132 seq_puts(m, "minor\tvnr\tstart\tsubmit\tintent\n");
133 rcu_read_lock();
134 idr_for_each_entry(&resource->devices, device, i) {
135 struct drbd_md_io tmp;
136 /* In theory this is racy,
137 * in the sense that there could have been a
138 * drbd_md_put_buffer(); drbd_md_get_buffer();
139 * between accessing these members here. */
140 tmp = device->md_io;
141 if (atomic_read(&tmp.in_use)) {
142 seq_printf(m, "%u\t%u\t%d\t",
143 device->minor, device->vnr,
144 jiffies_to_msecs(now - tmp.start_jif));
145 if (time_before(tmp.submit_jif, tmp.start_jif))
146 seq_puts(m, "-\t");
147 else
148 seq_printf(m, "%d\t", jiffies_to_msecs(now - tmp.submit_jif));
149 seq_printf(m, "%s\n", tmp.current_use);
150 }
151 }
152 rcu_read_unlock();
153 }
154
155 static void seq_print_waiting_for_AL(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
156 {
157 struct drbd_device *device;
158 unsigned int i;
159
160 seq_puts(m, "minor\tvnr\tage\t#waiting\n");
161 rcu_read_lock();
162 idr_for_each_entry(&resource->devices, device, i) {
163 unsigned long jif;
164 struct drbd_request *req;
165 int n = atomic_read(&device->ap_actlog_cnt);
166 if (n) {
167 spin_lock_irq(&device->resource->req_lock);
168 req = list_first_entry_or_null(&device->pending_master_completion[1],
169 struct drbd_request, req_pending_master_completion);
170 /* if the oldest request does not wait for the activity log
171 * it is not interesting for us here */
172 if (req && !(req->rq_state & RQ_IN_ACT_LOG))
173 jif = req->start_jif;
174 else
175 req = NULL;
176 spin_unlock_irq(&device->resource->req_lock);
177 }
178 if (n) {
179 seq_printf(m, "%u\t%u\t", device->minor, device->vnr);
180 if (req)
181 seq_printf(m, "%u\t", jiffies_to_msecs(now - jif));
182 else
183 seq_puts(m, "-\t");
184 seq_printf(m, "%u\n", n);
185 }
186 }
187 rcu_read_unlock();
188 }
189
190 static void seq_print_device_bitmap_io(struct seq_file *m, struct drbd_device *device, unsigned long now)
191 {
192 struct drbd_bm_aio_ctx *ctx;
193 unsigned long start_jif;
194 unsigned int in_flight;
195 unsigned int flags;
196 spin_lock_irq(&device->resource->req_lock);
197 ctx = list_first_entry_or_null(&device->pending_bitmap_io, struct drbd_bm_aio_ctx, list);
198 if (ctx && ctx->done)
199 ctx = NULL;
200 if (ctx) {
201 start_jif = ctx->start_jif;
202 in_flight = atomic_read(&ctx->in_flight);
203 flags = ctx->flags;
204 }
205 spin_unlock_irq(&device->resource->req_lock);
206 if (ctx) {
207 seq_printf(m, "%u\t%u\t%c\t%u\t%u\n",
208 device->minor, device->vnr,
209 (flags & BM_AIO_READ) ? 'R' : 'W',
210 jiffies_to_msecs(now - start_jif),
211 in_flight);
212 }
213 }
214
215 static void seq_print_resource_pending_bitmap_io(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
216 {
217 struct drbd_device *device;
218 unsigned int i;
219
220 seq_puts(m, "minor\tvnr\trw\tage\t#in-flight\n");
221 rcu_read_lock();
222 idr_for_each_entry(&resource->devices, device, i) {
223 seq_print_device_bitmap_io(m, device, now);
224 }
225 rcu_read_unlock();
226 }
227
228 /* pretty print enum peer_req->flags */
229 static void seq_print_peer_request_flags(struct seq_file *m, struct drbd_peer_request *peer_req)
230 {
231 unsigned long f = peer_req->flags;
232 char sep = ' ';
233
234 __seq_print_rq_state_bit(m, f & EE_SUBMITTED, &sep, "submitted", "preparing");
235 __seq_print_rq_state_bit(m, f & EE_APPLICATION, &sep, "application", "internal");
236 seq_print_rq_state_bit(m, f & EE_CALL_AL_COMPLETE_IO, &sep, "in-AL");
237 seq_print_rq_state_bit(m, f & EE_SEND_WRITE_ACK, &sep, "C");
238 seq_print_rq_state_bit(m, f & EE_MAY_SET_IN_SYNC, &sep, "set-in-sync");
239
240 if (f & EE_IS_TRIM)
241 __seq_print_rq_state_bit(m, f & EE_IS_TRIM_USE_ZEROOUT, &sep, "zero-out", "trim");
242 seq_print_rq_state_bit(m, f & EE_WRITE_SAME, &sep, "write-same");
243 seq_putc(m, '\n');
244 }
245
246 static void seq_print_peer_request(struct seq_file *m,
247 struct drbd_device *device, struct list_head *lh,
248 unsigned long now)
249 {
250 bool reported_preparing = false;
251 struct drbd_peer_request *peer_req;
252 list_for_each_entry(peer_req, lh, w.list) {
253 if (reported_preparing && !(peer_req->flags & EE_SUBMITTED))
254 continue;
255
256 if (device)
257 seq_printf(m, "%u\t%u\t", device->minor, device->vnr);
258
259 seq_printf(m, "%llu\t%u\t%c\t%u\t",
260 (unsigned long long)peer_req->i.sector, peer_req->i.size >> 9,
261 (peer_req->flags & EE_WRITE) ? 'W' : 'R',
262 jiffies_to_msecs(now - peer_req->submit_jif));
263 seq_print_peer_request_flags(m, peer_req);
264 if (peer_req->flags & EE_SUBMITTED)
265 break;
266 else
267 reported_preparing = true;
268 }
269 }
270
271 static void seq_print_device_peer_requests(struct seq_file *m,
272 struct drbd_device *device, unsigned long now)
273 {
274 seq_puts(m, "minor\tvnr\tsector\tsize\trw\tage\tflags\n");
275 spin_lock_irq(&device->resource->req_lock);
276 seq_print_peer_request(m, device, &device->active_ee, now);
277 seq_print_peer_request(m, device, &device->read_ee, now);
278 seq_print_peer_request(m, device, &device->sync_ee, now);
279 spin_unlock_irq(&device->resource->req_lock);
280 if (test_bit(FLUSH_PENDING, &device->flags)) {
281 seq_printf(m, "%u\t%u\t-\t-\tF\t%u\tflush\n",
282 device->minor, device->vnr,
283 jiffies_to_msecs(now - device->flush_jif));
284 }
285 }
286
287 static void seq_print_resource_pending_peer_requests(struct seq_file *m,
288 struct drbd_resource *resource, unsigned long now)
289 {
290 struct drbd_device *device;
291 unsigned int i;
292
293 rcu_read_lock();
294 idr_for_each_entry(&resource->devices, device, i) {
295 seq_print_device_peer_requests(m, device, now);
296 }
297 rcu_read_unlock();
298 }
299
300 static void seq_print_resource_transfer_log_summary(struct seq_file *m,
301 struct drbd_resource *resource,
302 struct drbd_connection *connection,
303 unsigned long now)
304 {
305 struct drbd_request *req;
306 unsigned int count = 0;
307 unsigned int show_state = 0;
308
309 seq_puts(m, "n\tdevice\tvnr\t" RQ_HDR);
310 spin_lock_irq(&resource->req_lock);
311 list_for_each_entry(req, &connection->transfer_log, tl_requests) {
312 unsigned int tmp = 0;
313 unsigned int s;
314 ++count;
315
316 /* don't disable irq "forever" */
317 if (!(count & 0x1ff)) {
318 struct drbd_request *req_next;
319 kref_get(&req->kref);
320 spin_unlock_irq(&resource->req_lock);
321 cond_resched();
322 spin_lock_irq(&resource->req_lock);
323 req_next = list_next_entry(req, tl_requests);
324 if (kref_put(&req->kref, drbd_req_destroy))
325 req = req_next;
326 if (&req->tl_requests == &connection->transfer_log)
327 break;
328 }
329
330 s = req->rq_state;
331
332 /* This is meant to summarize timing issues, to be able to tell
333 * local disk problems from network problems.
334 * Skip requests, if we have shown an even older request with
335 * similar aspects already. */
336 if (req->master_bio == NULL)
337 tmp |= 1;
338 if ((s & RQ_LOCAL_MASK) && (s & RQ_LOCAL_PENDING))
339 tmp |= 2;
340 if (s & RQ_NET_MASK) {
341 if (!(s & RQ_NET_SENT))
342 tmp |= 4;
343 if (s & RQ_NET_PENDING)
344 tmp |= 8;
345 if (!(s & RQ_NET_DONE))
346 tmp |= 16;
347 }
348 if ((tmp & show_state) == tmp)
349 continue;
350 show_state |= tmp;
351 seq_printf(m, "%u\t", count);
352 seq_print_minor_vnr_req(m, req, now);
353 if (show_state == 0x1f)
354 break;
355 }
356 spin_unlock_irq(&resource->req_lock);
357 }
358
359 /* TODO: transfer_log and friends should be moved to resource */
360 static int in_flight_summary_show(struct seq_file *m, void *pos)
361 {
362 struct drbd_resource *resource = m->private;
363 struct drbd_connection *connection;
364 unsigned long jif = jiffies;
365
366 connection = first_connection(resource);
367 /* This does not happen, actually.
368 * But be robust and prepare for future code changes. */
369 if (!connection || !kref_get_unless_zero(&connection->kref))
370 return -ESTALE;
371
372 /* BUMP me if you change the file format/content/presentation */
373 seq_printf(m, "v: %u\n\n", 0);
374
375 seq_puts(m, "oldest bitmap IO\n");
376 seq_print_resource_pending_bitmap_io(m, resource, jif);
377 seq_putc(m, '\n');
378
379 seq_puts(m, "meta data IO\n");
380 seq_print_resource_pending_meta_io(m, resource, jif);
381 seq_putc(m, '\n');
382
383 seq_puts(m, "socket buffer stats\n");
384 /* for each connection ... once we have more than one */
385 rcu_read_lock();
386 if (connection->data.socket) {
387 /* open coded SIOCINQ, the "relevant" part */
388 struct tcp_sock *tp = tcp_sk(connection->data.socket->sk);
389 int answ = tp->rcv_nxt - tp->copied_seq;
390 seq_printf(m, "unread receive buffer: %u Byte\n", answ);
391 /* open coded SIOCOUTQ, the "relevant" part */
392 answ = tp->write_seq - tp->snd_una;
393 seq_printf(m, "unacked send buffer: %u Byte\n", answ);
394 }
395 rcu_read_unlock();
396 seq_putc(m, '\n');
397
398 seq_puts(m, "oldest peer requests\n");
399 seq_print_resource_pending_peer_requests(m, resource, jif);
400 seq_putc(m, '\n');
401
402 seq_puts(m, "application requests waiting for activity log\n");
403 seq_print_waiting_for_AL(m, resource, jif);
404 seq_putc(m, '\n');
405
406 seq_puts(m, "oldest application requests\n");
407 seq_print_resource_transfer_log_summary(m, resource, connection, jif);
408 seq_putc(m, '\n');
409
410 jif = jiffies - jif;
411 if (jif)
412 seq_printf(m, "generated in %d ms\n", jiffies_to_msecs(jif));
413 kref_put(&connection->kref, drbd_destroy_connection);
414 return 0;
415 }
416
417 /* make sure at *open* time that the respective object won't go away. */
418 static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, void *),
419 void *data, struct kref *kref,
420 void (*release)(struct kref *))
421 {
422 struct dentry *parent;
423 int ret = -ESTALE;
424
425 /* Are we still linked,
426 * or has debugfs_remove() already been called? */
427 parent = file->f_path.dentry->d_parent;
428 /* serialize with d_delete() */
429 inode_lock(d_inode(parent));
430 /* Make sure the object is still alive */
431 if (simple_positive(file->f_path.dentry)
432 && kref_get_unless_zero(kref))
433 ret = 0;
434 inode_unlock(d_inode(parent));
435 if (!ret) {
436 ret = single_open(file, show, data);
437 if (ret)
438 kref_put(kref, release);
439 }
440 return ret;
441 }
442
443 static int in_flight_summary_open(struct inode *inode, struct file *file)
444 {
445 struct drbd_resource *resource = inode->i_private;
446 return drbd_single_open(file, in_flight_summary_show, resource,
447 &resource->kref, drbd_destroy_resource);
448 }
449
450 static int in_flight_summary_release(struct inode *inode, struct file *file)
451 {
452 struct drbd_resource *resource = inode->i_private;
453 kref_put(&resource->kref, drbd_destroy_resource);
454 return single_release(inode, file);
455 }
456
457 static const struct file_operations in_flight_summary_fops = {
458 .owner = THIS_MODULE,
459 .open = in_flight_summary_open,
460 .read = seq_read,
461 .llseek = seq_lseek,
462 .release = in_flight_summary_release,
463 };
464
465 void drbd_debugfs_resource_add(struct drbd_resource *resource)
466 {
467 struct dentry *dentry;
468 if (!drbd_debugfs_resources)
469 return;
470
471 dentry = debugfs_create_dir(resource->name, drbd_debugfs_resources);
472 if (IS_ERR_OR_NULL(dentry))
473 goto fail;
474 resource->debugfs_res = dentry;
475
476 dentry = debugfs_create_dir("volumes", resource->debugfs_res);
477 if (IS_ERR_OR_NULL(dentry))
478 goto fail;
479 resource->debugfs_res_volumes = dentry;
480
481 dentry = debugfs_create_dir("connections", resource->debugfs_res);
482 if (IS_ERR_OR_NULL(dentry))
483 goto fail;
484 resource->debugfs_res_connections = dentry;
485
486 dentry = debugfs_create_file("in_flight_summary", S_IRUSR|S_IRGRP,
487 resource->debugfs_res, resource,
488 &in_flight_summary_fops);
489 if (IS_ERR_OR_NULL(dentry))
490 goto fail;
491 resource->debugfs_res_in_flight_summary = dentry;
492 return;
493
494 fail:
495 drbd_debugfs_resource_cleanup(resource);
496 drbd_err(resource, "failed to create debugfs dentry\n");
497 }
498
499 static void drbd_debugfs_remove(struct dentry **dp)
500 {
501 debugfs_remove(*dp);
502 *dp = NULL;
503 }
504
505 void drbd_debugfs_resource_cleanup(struct drbd_resource *resource)
506 {
507 /* it is ok to call debugfs_remove(NULL) */
508 drbd_debugfs_remove(&resource->debugfs_res_in_flight_summary);
509 drbd_debugfs_remove(&resource->debugfs_res_connections);
510 drbd_debugfs_remove(&resource->debugfs_res_volumes);
511 drbd_debugfs_remove(&resource->debugfs_res);
512 }
513
514 static void seq_print_one_timing_detail(struct seq_file *m,
515 const struct drbd_thread_timing_details *tdp,
516 unsigned long now)
517 {
518 struct drbd_thread_timing_details td;
519 /* No locking...
520 * use temporary assignment to get at consistent data. */
521 do {
522 td = *tdp;
523 } while (td.cb_nr != tdp->cb_nr);
524 if (!td.cb_addr)
525 return;
526 seq_printf(m, "%u\t%d\t%s:%u\t%ps\n",
527 td.cb_nr,
528 jiffies_to_msecs(now - td.start_jif),
529 td.caller_fn, td.line,
530 td.cb_addr);
531 }
532
533 static void seq_print_timing_details(struct seq_file *m,
534 const char *title,
535 unsigned int cb_nr, struct drbd_thread_timing_details *tdp, unsigned long now)
536 {
537 unsigned int start_idx;
538 unsigned int i;
539
540 seq_printf(m, "%s\n", title);
541 /* If not much is going on, this will result in natural ordering.
542 * If it is very busy, we will possibly skip events, or even see wrap
543 * arounds, which could only be avoided with locking.
544 */
545 start_idx = cb_nr % DRBD_THREAD_DETAILS_HIST;
546 for (i = start_idx; i < DRBD_THREAD_DETAILS_HIST; i++)
547 seq_print_one_timing_detail(m, tdp+i, now);
548 for (i = 0; i < start_idx; i++)
549 seq_print_one_timing_detail(m, tdp+i, now);
550 }
551
552 static int callback_history_show(struct seq_file *m, void *ignored)
553 {
554 struct drbd_connection *connection = m->private;
555 unsigned long jif = jiffies;
556
557 /* BUMP me if you change the file format/content/presentation */
558 seq_printf(m, "v: %u\n\n", 0);
559
560 seq_puts(m, "n\tage\tcallsite\tfn\n");
561 seq_print_timing_details(m, "worker", connection->w_cb_nr, connection->w_timing_details, jif);
562 seq_print_timing_details(m, "receiver", connection->r_cb_nr, connection->r_timing_details, jif);
563 return 0;
564 }
565
566 static int callback_history_open(struct inode *inode, struct file *file)
567 {
568 struct drbd_connection *connection = inode->i_private;
569 return drbd_single_open(file, callback_history_show, connection,
570 &connection->kref, drbd_destroy_connection);
571 }
572
573 static int callback_history_release(struct inode *inode, struct file *file)
574 {
575 struct drbd_connection *connection = inode->i_private;
576 kref_put(&connection->kref, drbd_destroy_connection);
577 return single_release(inode, file);
578 }
579
580 static const struct file_operations connection_callback_history_fops = {
581 .owner = THIS_MODULE,
582 .open = callback_history_open,
583 .read = seq_read,
584 .llseek = seq_lseek,
585 .release = callback_history_release,
586 };
587
588 static int connection_oldest_requests_show(struct seq_file *m, void *ignored)
589 {
590 struct drbd_connection *connection = m->private;
591 unsigned long now = jiffies;
592 struct drbd_request *r1, *r2;
593
594 /* BUMP me if you change the file format/content/presentation */
595 seq_printf(m, "v: %u\n\n", 0);
596
597 spin_lock_irq(&connection->resource->req_lock);
598 r1 = connection->req_next;
599 if (r1)
600 seq_print_minor_vnr_req(m, r1, now);
601 r2 = connection->req_ack_pending;
602 if (r2 && r2 != r1) {
603 r1 = r2;
604 seq_print_minor_vnr_req(m, r1, now);
605 }
606 r2 = connection->req_not_net_done;
607 if (r2 && r2 != r1)
608 seq_print_minor_vnr_req(m, r2, now);
609 spin_unlock_irq(&connection->resource->req_lock);
610 return 0;
611 }
612
613 static int connection_oldest_requests_open(struct inode *inode, struct file *file)
614 {
615 struct drbd_connection *connection = inode->i_private;
616 return drbd_single_open(file, connection_oldest_requests_show, connection,
617 &connection->kref, drbd_destroy_connection);
618 }
619
620 static int connection_oldest_requests_release(struct inode *inode, struct file *file)
621 {
622 struct drbd_connection *connection = inode->i_private;
623 kref_put(&connection->kref, drbd_destroy_connection);
624 return single_release(inode, file);
625 }
626
627 static const struct file_operations connection_oldest_requests_fops = {
628 .owner = THIS_MODULE,
629 .open = connection_oldest_requests_open,
630 .read = seq_read,
631 .llseek = seq_lseek,
632 .release = connection_oldest_requests_release,
633 };
634
635 void drbd_debugfs_connection_add(struct drbd_connection *connection)
636 {
637 struct dentry *conns_dir = connection->resource->debugfs_res_connections;
638 struct dentry *dentry;
639 if (!conns_dir)
640 return;
641
642 /* Once we enable mutliple peers,
643 * these connections will have descriptive names.
644 * For now, it is just the one connection to the (only) "peer". */
645 dentry = debugfs_create_dir("peer", conns_dir);
646 if (IS_ERR_OR_NULL(dentry))
647 goto fail;
648 connection->debugfs_conn = dentry;
649
650 dentry = debugfs_create_file("callback_history", S_IRUSR|S_IRGRP,
651 connection->debugfs_conn, connection,
652 &connection_callback_history_fops);
653 if (IS_ERR_OR_NULL(dentry))
654 goto fail;
655 connection->debugfs_conn_callback_history = dentry;
656
657 dentry = debugfs_create_file("oldest_requests", S_IRUSR|S_IRGRP,
658 connection->debugfs_conn, connection,
659 &connection_oldest_requests_fops);
660 if (IS_ERR_OR_NULL(dentry))
661 goto fail;
662 connection->debugfs_conn_oldest_requests = dentry;
663 return;
664
665 fail:
666 drbd_debugfs_connection_cleanup(connection);
667 drbd_err(connection, "failed to create debugfs dentry\n");
668 }
669
670 void drbd_debugfs_connection_cleanup(struct drbd_connection *connection)
671 {
672 drbd_debugfs_remove(&connection->debugfs_conn_callback_history);
673 drbd_debugfs_remove(&connection->debugfs_conn_oldest_requests);
674 drbd_debugfs_remove(&connection->debugfs_conn);
675 }
676
677 static void resync_dump_detail(struct seq_file *m, struct lc_element *e)
678 {
679 struct bm_extent *bme = lc_entry(e, struct bm_extent, lce);
680
681 seq_printf(m, "%5d %s %s %s", bme->rs_left,
682 test_bit(BME_NO_WRITES, &bme->flags) ? "NO_WRITES" : "---------",
683 test_bit(BME_LOCKED, &bme->flags) ? "LOCKED" : "------",
684 test_bit(BME_PRIORITY, &bme->flags) ? "PRIORITY" : "--------"
685 );
686 }
687
688 static int device_resync_extents_show(struct seq_file *m, void *ignored)
689 {
690 struct drbd_device *device = m->private;
691
692 /* BUMP me if you change the file format/content/presentation */
693 seq_printf(m, "v: %u\n\n", 0);
694
695 if (get_ldev_if_state(device, D_FAILED)) {
696 lc_seq_printf_stats(m, device->resync);
697 lc_seq_dump_details(m, device->resync, "rs_left flags", resync_dump_detail);
698 put_ldev(device);
699 }
700 return 0;
701 }
702
703 static int device_act_log_extents_show(struct seq_file *m, void *ignored)
704 {
705 struct drbd_device *device = m->private;
706
707 /* BUMP me if you change the file format/content/presentation */
708 seq_printf(m, "v: %u\n\n", 0);
709
710 if (get_ldev_if_state(device, D_FAILED)) {
711 lc_seq_printf_stats(m, device->act_log);
712 lc_seq_dump_details(m, device->act_log, "", NULL);
713 put_ldev(device);
714 }
715 return 0;
716 }
717
718 static int device_oldest_requests_show(struct seq_file *m, void *ignored)
719 {
720 struct drbd_device *device = m->private;
721 struct drbd_resource *resource = device->resource;
722 unsigned long now = jiffies;
723 struct drbd_request *r1, *r2;
724 int i;
725
726 /* BUMP me if you change the file format/content/presentation */
727 seq_printf(m, "v: %u\n\n", 0);
728
729 seq_puts(m, RQ_HDR);
730 spin_lock_irq(&resource->req_lock);
731 /* WRITE, then READ */
732 for (i = 1; i >= 0; --i) {
733 r1 = list_first_entry_or_null(&device->pending_master_completion[i],
734 struct drbd_request, req_pending_master_completion);
735 r2 = list_first_entry_or_null(&device->pending_completion[i],
736 struct drbd_request, req_pending_local);
737 if (r1)
738 seq_print_one_request(m, r1, now);
739 if (r2 && r2 != r1)
740 seq_print_one_request(m, r2, now);
741 }
742 spin_unlock_irq(&resource->req_lock);
743 return 0;
744 }
745
746 static int device_data_gen_id_show(struct seq_file *m, void *ignored)
747 {
748 struct drbd_device *device = m->private;
749 struct drbd_md *md;
750 enum drbd_uuid_index idx;
751
752 if (!get_ldev_if_state(device, D_FAILED))
753 return -ENODEV;
754
755 md = &device->ldev->md;
756 spin_lock_irq(&md->uuid_lock);
757 for (idx = UI_CURRENT; idx <= UI_HISTORY_END; idx++) {
758 seq_printf(m, "0x%016llX\n", md->uuid[idx]);
759 }
760 spin_unlock_irq(&md->uuid_lock);
761 put_ldev(device);
762 return 0;
763 }
764
765 static int device_ed_gen_id_show(struct seq_file *m, void *ignored)
766 {
767 struct drbd_device *device = m->private;
768 seq_printf(m, "0x%016llX\n", (unsigned long long)device->ed_uuid);
769 return 0;
770 }
771
772 #define drbd_debugfs_device_attr(name) \
773 static int device_ ## name ## _open(struct inode *inode, struct file *file) \
774 { \
775 struct drbd_device *device = inode->i_private; \
776 return drbd_single_open(file, device_ ## name ## _show, device, \
777 &device->kref, drbd_destroy_device); \
778 } \
779 static int device_ ## name ## _release(struct inode *inode, struct file *file) \
780 { \
781 struct drbd_device *device = inode->i_private; \
782 kref_put(&device->kref, drbd_destroy_device); \
783 return single_release(inode, file); \
784 } \
785 static const struct file_operations device_ ## name ## _fops = { \
786 .owner = THIS_MODULE, \
787 .open = device_ ## name ## _open, \
788 .read = seq_read, \
789 .llseek = seq_lseek, \
790 .release = device_ ## name ## _release, \
791 };
792
793 drbd_debugfs_device_attr(oldest_requests)
794 drbd_debugfs_device_attr(act_log_extents)
795 drbd_debugfs_device_attr(resync_extents)
796 drbd_debugfs_device_attr(data_gen_id)
797 drbd_debugfs_device_attr(ed_gen_id)
798
799 void drbd_debugfs_device_add(struct drbd_device *device)
800 {
801 struct dentry *vols_dir = device->resource->debugfs_res_volumes;
802 char minor_buf[8]; /* MINORMASK, MINORBITS == 20; */
803 char vnr_buf[8]; /* volume number vnr is even 16 bit only; */
804 char *slink_name = NULL;
805
806 struct dentry *dentry;
807 if (!vols_dir || !drbd_debugfs_minors)
808 return;
809
810 snprintf(vnr_buf, sizeof(vnr_buf), "%u", device->vnr);
811 dentry = debugfs_create_dir(vnr_buf, vols_dir);
812 if (IS_ERR_OR_NULL(dentry))
813 goto fail;
814 device->debugfs_vol = dentry;
815
816 snprintf(minor_buf, sizeof(minor_buf), "%u", device->minor);
817 slink_name = kasprintf(GFP_KERNEL, "../resources/%s/volumes/%u",
818 device->resource->name, device->vnr);
819 if (!slink_name)
820 goto fail;
821 dentry = debugfs_create_symlink(minor_buf, drbd_debugfs_minors, slink_name);
822 kfree(slink_name);
823 slink_name = NULL;
824 if (IS_ERR_OR_NULL(dentry))
825 goto fail;
826 device->debugfs_minor = dentry;
827
828 #define DCF(name) do { \
829 dentry = debugfs_create_file(#name, S_IRUSR|S_IRGRP, \
830 device->debugfs_vol, device, \
831 &device_ ## name ## _fops); \
832 if (IS_ERR_OR_NULL(dentry)) \
833 goto fail; \
834 device->debugfs_vol_ ## name = dentry; \
835 } while (0)
836
837 DCF(oldest_requests);
838 DCF(act_log_extents);
839 DCF(resync_extents);
840 DCF(data_gen_id);
841 DCF(ed_gen_id);
842 #undef DCF
843 return;
844
845 fail:
846 drbd_debugfs_device_cleanup(device);
847 drbd_err(device, "failed to create debugfs entries\n");
848 }
849
850 void drbd_debugfs_device_cleanup(struct drbd_device *device)
851 {
852 drbd_debugfs_remove(&device->debugfs_minor);
853 drbd_debugfs_remove(&device->debugfs_vol_oldest_requests);
854 drbd_debugfs_remove(&device->debugfs_vol_act_log_extents);
855 drbd_debugfs_remove(&device->debugfs_vol_resync_extents);
856 drbd_debugfs_remove(&device->debugfs_vol_data_gen_id);
857 drbd_debugfs_remove(&device->debugfs_vol_ed_gen_id);
858 drbd_debugfs_remove(&device->debugfs_vol);
859 }
860
861 void drbd_debugfs_peer_device_add(struct drbd_peer_device *peer_device)
862 {
863 struct dentry *conn_dir = peer_device->connection->debugfs_conn;
864 struct dentry *dentry;
865 char vnr_buf[8];
866
867 if (!conn_dir)
868 return;
869
870 snprintf(vnr_buf, sizeof(vnr_buf), "%u", peer_device->device->vnr);
871 dentry = debugfs_create_dir(vnr_buf, conn_dir);
872 if (IS_ERR_OR_NULL(dentry))
873 goto fail;
874 peer_device->debugfs_peer_dev = dentry;
875 return;
876
877 fail:
878 drbd_debugfs_peer_device_cleanup(peer_device);
879 drbd_err(peer_device, "failed to create debugfs entries\n");
880 }
881
882 void drbd_debugfs_peer_device_cleanup(struct drbd_peer_device *peer_device)
883 {
884 drbd_debugfs_remove(&peer_device->debugfs_peer_dev);
885 }
886
887 static int drbd_version_show(struct seq_file *m, void *ignored)
888 {
889 seq_printf(m, "# %s\n", drbd_buildtag());
890 seq_printf(m, "VERSION=%s\n", REL_VERSION);
891 seq_printf(m, "API_VERSION=%u\n", API_VERSION);
892 seq_printf(m, "PRO_VERSION_MIN=%u\n", PRO_VERSION_MIN);
893 seq_printf(m, "PRO_VERSION_MAX=%u\n", PRO_VERSION_MAX);
894 return 0;
895 }
896
897 static int drbd_version_open(struct inode *inode, struct file *file)
898 {
899 return single_open(file, drbd_version_show, NULL);
900 }
901
902 static const struct file_operations drbd_version_fops = {
903 .owner = THIS_MODULE,
904 .open = drbd_version_open,
905 .llseek = seq_lseek,
906 .read = seq_read,
907 .release = single_release,
908 };
909
910 /* not __exit, may be indirectly called
911 * from the module-load-failure path as well. */
912 void drbd_debugfs_cleanup(void)
913 {
914 drbd_debugfs_remove(&drbd_debugfs_resources);
915 drbd_debugfs_remove(&drbd_debugfs_minors);
916 drbd_debugfs_remove(&drbd_debugfs_version);
917 drbd_debugfs_remove(&drbd_debugfs_root);
918 }
919
920 int __init drbd_debugfs_init(void)
921 {
922 struct dentry *dentry;
923
924 dentry = debugfs_create_dir("drbd", NULL);
925 if (IS_ERR_OR_NULL(dentry))
926 goto fail;
927 drbd_debugfs_root = dentry;
928
929 dentry = debugfs_create_file("version", 0444, drbd_debugfs_root, NULL, &drbd_version_fops);
930 if (IS_ERR_OR_NULL(dentry))
931 goto fail;
932 drbd_debugfs_version = dentry;
933
934 dentry = debugfs_create_dir("resources", drbd_debugfs_root);
935 if (IS_ERR_OR_NULL(dentry))
936 goto fail;
937 drbd_debugfs_resources = dentry;
938
939 dentry = debugfs_create_dir("minors", drbd_debugfs_root);
940 if (IS_ERR_OR_NULL(dentry))
941 goto fail;
942 drbd_debugfs_minors = dentry;
943 return 0;
944
945 fail:
946 drbd_debugfs_cleanup();
947 if (dentry)
948 return PTR_ERR(dentry);
949 else
950 return -EINVAL;
951 }