]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - block/blk-mq-debugfs.c
blk-mq: Move the "state" debugfs attribute one level down
[mirror_ubuntu-artful-kernel.git] / block / blk-mq-debugfs.c
1 /*
2 * Copyright (C) 2017 Facebook
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <https://www.gnu.org/licenses/>.
15 */
16
17 #include <linux/kernel.h>
18 #include <linux/blkdev.h>
19 #include <linux/debugfs.h>
20
21 #include <linux/blk-mq.h>
22 #include "blk.h"
23 #include "blk-mq.h"
24 #include "blk-mq-tag.h"
25
26 struct blk_mq_debugfs_attr {
27 const char *name;
28 umode_t mode;
29 const struct file_operations *fops;
30 };
31
32 static int blk_mq_debugfs_seq_open(struct inode *inode, struct file *file,
33 const struct seq_operations *ops)
34 {
35 struct seq_file *m;
36 int ret;
37
38 ret = seq_open(file, ops);
39 if (!ret) {
40 m = file->private_data;
41 m->private = inode->i_private;
42 }
43 return ret;
44 }
45
46 static int blk_flags_show(struct seq_file *m, const unsigned long flags,
47 const char *const *flag_name, int flag_name_count)
48 {
49 bool sep = false;
50 int i;
51
52 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
53 if (!(flags & BIT(i)))
54 continue;
55 if (sep)
56 seq_puts(m, " ");
57 sep = true;
58 if (i < flag_name_count && flag_name[i])
59 seq_puts(m, flag_name[i]);
60 else
61 seq_printf(m, "%d", i);
62 }
63 seq_puts(m, "\n");
64 return 0;
65 }
66
67 static const char *const blk_queue_flag_name[] = {
68 [QUEUE_FLAG_QUEUED] = "QUEUED",
69 [QUEUE_FLAG_STOPPED] = "STOPPED",
70 [QUEUE_FLAG_SYNCFULL] = "SYNCFULL",
71 [QUEUE_FLAG_ASYNCFULL] = "ASYNCFULL",
72 [QUEUE_FLAG_DYING] = "DYING",
73 [QUEUE_FLAG_BYPASS] = "BYPASS",
74 [QUEUE_FLAG_BIDI] = "BIDI",
75 [QUEUE_FLAG_NOMERGES] = "NOMERGES",
76 [QUEUE_FLAG_SAME_COMP] = "SAME_COMP",
77 [QUEUE_FLAG_FAIL_IO] = "FAIL_IO",
78 [QUEUE_FLAG_STACKABLE] = "STACKABLE",
79 [QUEUE_FLAG_NONROT] = "NONROT",
80 [QUEUE_FLAG_IO_STAT] = "IO_STAT",
81 [QUEUE_FLAG_DISCARD] = "DISCARD",
82 [QUEUE_FLAG_NOXMERGES] = "NOXMERGES",
83 [QUEUE_FLAG_ADD_RANDOM] = "ADD_RANDOM",
84 [QUEUE_FLAG_SECERASE] = "SECERASE",
85 [QUEUE_FLAG_SAME_FORCE] = "SAME_FORCE",
86 [QUEUE_FLAG_DEAD] = "DEAD",
87 [QUEUE_FLAG_INIT_DONE] = "INIT_DONE",
88 [QUEUE_FLAG_NO_SG_MERGE] = "NO_SG_MERGE",
89 [QUEUE_FLAG_POLL] = "POLL",
90 [QUEUE_FLAG_WC] = "WC",
91 [QUEUE_FLAG_FUA] = "FUA",
92 [QUEUE_FLAG_FLUSH_NQ] = "FLUSH_NQ",
93 [QUEUE_FLAG_DAX] = "DAX",
94 [QUEUE_FLAG_STATS] = "STATS",
95 [QUEUE_FLAG_POLL_STATS] = "POLL_STATS",
96 [QUEUE_FLAG_REGISTERED] = "REGISTERED",
97 };
98
99 static int blk_queue_flags_show(struct seq_file *m, void *v)
100 {
101 struct request_queue *q = m->private;
102
103 blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
104 ARRAY_SIZE(blk_queue_flag_name));
105 return 0;
106 }
107
108 static ssize_t blk_queue_flags_store(struct file *file, const char __user *ubuf,
109 size_t len, loff_t *offp)
110 {
111 struct request_queue *q = file_inode(file)->i_private;
112 char op[16] = { }, *s;
113
114 len = min(len, sizeof(op) - 1);
115 if (copy_from_user(op, ubuf, len))
116 return -EFAULT;
117 s = op;
118 strsep(&s, " \t\n"); /* strip trailing whitespace */
119 if (strcmp(op, "run") == 0) {
120 blk_mq_run_hw_queues(q, true);
121 } else if (strcmp(op, "start") == 0) {
122 blk_mq_start_stopped_hw_queues(q, true);
123 } else {
124 pr_err("%s: unsupported operation %s. Use either 'run' or 'start'\n",
125 __func__, op);
126 return -EINVAL;
127 }
128 return len;
129 }
130
131 static int blk_queue_flags_open(struct inode *inode, struct file *file)
132 {
133 return single_open(file, blk_queue_flags_show, inode->i_private);
134 }
135
136 static const struct file_operations blk_queue_flags_fops = {
137 .open = blk_queue_flags_open,
138 .read = seq_read,
139 .llseek = seq_lseek,
140 .release = single_release,
141 .write = blk_queue_flags_store,
142 };
143
144 static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
145 {
146 if (stat->nr_samples) {
147 seq_printf(m, "samples=%d, mean=%lld, min=%llu, max=%llu",
148 stat->nr_samples, stat->mean, stat->min, stat->max);
149 } else {
150 seq_puts(m, "samples=0");
151 }
152 }
153
154 static int queue_poll_stat_show(struct seq_file *m, void *v)
155 {
156 struct request_queue *q = m->private;
157 int bucket;
158
159 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) {
160 seq_printf(m, "read (%d Bytes): ", 1 << (9+bucket));
161 print_stat(m, &q->poll_stat[2*bucket]);
162 seq_puts(m, "\n");
163
164 seq_printf(m, "write (%d Bytes): ", 1 << (9+bucket));
165 print_stat(m, &q->poll_stat[2*bucket+1]);
166 seq_puts(m, "\n");
167 }
168 return 0;
169 }
170
171 static int queue_poll_stat_open(struct inode *inode, struct file *file)
172 {
173 return single_open(file, queue_poll_stat_show, inode->i_private);
174 }
175
176 static const struct file_operations queue_poll_stat_fops = {
177 .open = queue_poll_stat_open,
178 .read = seq_read,
179 .llseek = seq_lseek,
180 .release = single_release,
181 };
182
183 static const char *const hctx_state_name[] = {
184 [BLK_MQ_S_STOPPED] = "STOPPED",
185 [BLK_MQ_S_TAG_ACTIVE] = "TAG_ACTIVE",
186 [BLK_MQ_S_SCHED_RESTART] = "SCHED_RESTART",
187 [BLK_MQ_S_TAG_WAITING] = "TAG_WAITING",
188
189 };
190 static int hctx_state_show(struct seq_file *m, void *v)
191 {
192 struct blk_mq_hw_ctx *hctx = m->private;
193
194 blk_flags_show(m, hctx->state, hctx_state_name,
195 ARRAY_SIZE(hctx_state_name));
196 return 0;
197 }
198
199 static int hctx_state_open(struct inode *inode, struct file *file)
200 {
201 return single_open(file, hctx_state_show, inode->i_private);
202 }
203
204 static const struct file_operations hctx_state_fops = {
205 .open = hctx_state_open,
206 .read = seq_read,
207 .llseek = seq_lseek,
208 .release = single_release,
209 };
210
211 static const char *const alloc_policy_name[] = {
212 [BLK_TAG_ALLOC_FIFO] = "fifo",
213 [BLK_TAG_ALLOC_RR] = "rr",
214 };
215
216 static const char *const hctx_flag_name[] = {
217 [ilog2(BLK_MQ_F_SHOULD_MERGE)] = "SHOULD_MERGE",
218 [ilog2(BLK_MQ_F_TAG_SHARED)] = "TAG_SHARED",
219 [ilog2(BLK_MQ_F_SG_MERGE)] = "SG_MERGE",
220 [ilog2(BLK_MQ_F_BLOCKING)] = "BLOCKING",
221 [ilog2(BLK_MQ_F_NO_SCHED)] = "NO_SCHED",
222 };
223
224 static int hctx_flags_show(struct seq_file *m, void *v)
225 {
226 struct blk_mq_hw_ctx *hctx = m->private;
227 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
228
229 seq_puts(m, "alloc_policy=");
230 if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
231 alloc_policy_name[alloc_policy])
232 seq_puts(m, alloc_policy_name[alloc_policy]);
233 else
234 seq_printf(m, "%d", alloc_policy);
235 seq_puts(m, " ");
236 blk_flags_show(m,
237 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
238 hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
239 return 0;
240 }
241
242 static int hctx_flags_open(struct inode *inode, struct file *file)
243 {
244 return single_open(file, hctx_flags_show, inode->i_private);
245 }
246
247 static const struct file_operations hctx_flags_fops = {
248 .open = hctx_flags_open,
249 .read = seq_read,
250 .llseek = seq_lseek,
251 .release = single_release,
252 };
253
254 static int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
255 {
256 struct request *rq = list_entry_rq(v);
257
258 seq_printf(m, "%p {.cmd_flags=0x%x, .rq_flags=0x%x, .tag=%d, .internal_tag=%d}\n",
259 rq, rq->cmd_flags, (__force unsigned int)rq->rq_flags,
260 rq->tag, rq->internal_tag);
261 return 0;
262 }
263
264 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
265 __acquires(&hctx->lock)
266 {
267 struct blk_mq_hw_ctx *hctx = m->private;
268
269 spin_lock(&hctx->lock);
270 return seq_list_start(&hctx->dispatch, *pos);
271 }
272
273 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
274 {
275 struct blk_mq_hw_ctx *hctx = m->private;
276
277 return seq_list_next(v, &hctx->dispatch, pos);
278 }
279
280 static void hctx_dispatch_stop(struct seq_file *m, void *v)
281 __releases(&hctx->lock)
282 {
283 struct blk_mq_hw_ctx *hctx = m->private;
284
285 spin_unlock(&hctx->lock);
286 }
287
288 static const struct seq_operations hctx_dispatch_seq_ops = {
289 .start = hctx_dispatch_start,
290 .next = hctx_dispatch_next,
291 .stop = hctx_dispatch_stop,
292 .show = blk_mq_debugfs_rq_show,
293 };
294
295 static int hctx_dispatch_open(struct inode *inode, struct file *file)
296 {
297 return blk_mq_debugfs_seq_open(inode, file, &hctx_dispatch_seq_ops);
298 }
299
300 static const struct file_operations hctx_dispatch_fops = {
301 .open = hctx_dispatch_open,
302 .read = seq_read,
303 .llseek = seq_lseek,
304 .release = seq_release,
305 };
306
307 static int hctx_ctx_map_show(struct seq_file *m, void *v)
308 {
309 struct blk_mq_hw_ctx *hctx = m->private;
310
311 sbitmap_bitmap_show(&hctx->ctx_map, m);
312 return 0;
313 }
314
315 static int hctx_ctx_map_open(struct inode *inode, struct file *file)
316 {
317 return single_open(file, hctx_ctx_map_show, inode->i_private);
318 }
319
320 static const struct file_operations hctx_ctx_map_fops = {
321 .open = hctx_ctx_map_open,
322 .read = seq_read,
323 .llseek = seq_lseek,
324 .release = single_release,
325 };
326
327 static void blk_mq_debugfs_tags_show(struct seq_file *m,
328 struct blk_mq_tags *tags)
329 {
330 seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
331 seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
332 seq_printf(m, "active_queues=%d\n",
333 atomic_read(&tags->active_queues));
334
335 seq_puts(m, "\nbitmap_tags:\n");
336 sbitmap_queue_show(&tags->bitmap_tags, m);
337
338 if (tags->nr_reserved_tags) {
339 seq_puts(m, "\nbreserved_tags:\n");
340 sbitmap_queue_show(&tags->breserved_tags, m);
341 }
342 }
343
344 static int hctx_tags_show(struct seq_file *m, void *v)
345 {
346 struct blk_mq_hw_ctx *hctx = m->private;
347 struct request_queue *q = hctx->queue;
348 int res;
349
350 res = mutex_lock_interruptible(&q->sysfs_lock);
351 if (res)
352 goto out;
353 if (hctx->tags)
354 blk_mq_debugfs_tags_show(m, hctx->tags);
355 mutex_unlock(&q->sysfs_lock);
356
357 out:
358 return res;
359 }
360
361 static int hctx_tags_open(struct inode *inode, struct file *file)
362 {
363 return single_open(file, hctx_tags_show, inode->i_private);
364 }
365
366 static const struct file_operations hctx_tags_fops = {
367 .open = hctx_tags_open,
368 .read = seq_read,
369 .llseek = seq_lseek,
370 .release = single_release,
371 };
372
373 static int hctx_tags_bitmap_show(struct seq_file *m, void *v)
374 {
375 struct blk_mq_hw_ctx *hctx = m->private;
376 struct request_queue *q = hctx->queue;
377 int res;
378
379 res = mutex_lock_interruptible(&q->sysfs_lock);
380 if (res)
381 goto out;
382 if (hctx->tags)
383 sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
384 mutex_unlock(&q->sysfs_lock);
385
386 out:
387 return res;
388 }
389
390 static int hctx_tags_bitmap_open(struct inode *inode, struct file *file)
391 {
392 return single_open(file, hctx_tags_bitmap_show, inode->i_private);
393 }
394
395 static const struct file_operations hctx_tags_bitmap_fops = {
396 .open = hctx_tags_bitmap_open,
397 .read = seq_read,
398 .llseek = seq_lseek,
399 .release = single_release,
400 };
401
402 static int hctx_sched_tags_show(struct seq_file *m, void *v)
403 {
404 struct blk_mq_hw_ctx *hctx = m->private;
405 struct request_queue *q = hctx->queue;
406 int res;
407
408 res = mutex_lock_interruptible(&q->sysfs_lock);
409 if (res)
410 goto out;
411 if (hctx->sched_tags)
412 blk_mq_debugfs_tags_show(m, hctx->sched_tags);
413 mutex_unlock(&q->sysfs_lock);
414
415 out:
416 return res;
417 }
418
419 static int hctx_sched_tags_open(struct inode *inode, struct file *file)
420 {
421 return single_open(file, hctx_sched_tags_show, inode->i_private);
422 }
423
424 static const struct file_operations hctx_sched_tags_fops = {
425 .open = hctx_sched_tags_open,
426 .read = seq_read,
427 .llseek = seq_lseek,
428 .release = single_release,
429 };
430
431 static int hctx_sched_tags_bitmap_show(struct seq_file *m, void *v)
432 {
433 struct blk_mq_hw_ctx *hctx = m->private;
434 struct request_queue *q = hctx->queue;
435 int res;
436
437 res = mutex_lock_interruptible(&q->sysfs_lock);
438 if (res)
439 goto out;
440 if (hctx->sched_tags)
441 sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
442 mutex_unlock(&q->sysfs_lock);
443
444 out:
445 return res;
446 }
447
448 static int hctx_sched_tags_bitmap_open(struct inode *inode, struct file *file)
449 {
450 return single_open(file, hctx_sched_tags_bitmap_show, inode->i_private);
451 }
452
453 static const struct file_operations hctx_sched_tags_bitmap_fops = {
454 .open = hctx_sched_tags_bitmap_open,
455 .read = seq_read,
456 .llseek = seq_lseek,
457 .release = single_release,
458 };
459
460 static int hctx_io_poll_show(struct seq_file *m, void *v)
461 {
462 struct blk_mq_hw_ctx *hctx = m->private;
463
464 seq_printf(m, "considered=%lu\n", hctx->poll_considered);
465 seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
466 seq_printf(m, "success=%lu\n", hctx->poll_success);
467 return 0;
468 }
469
470 static int hctx_io_poll_open(struct inode *inode, struct file *file)
471 {
472 return single_open(file, hctx_io_poll_show, inode->i_private);
473 }
474
475 static ssize_t hctx_io_poll_write(struct file *file, const char __user *buf,
476 size_t count, loff_t *ppos)
477 {
478 struct seq_file *m = file->private_data;
479 struct blk_mq_hw_ctx *hctx = m->private;
480
481 hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
482 return count;
483 }
484
485 static const struct file_operations hctx_io_poll_fops = {
486 .open = hctx_io_poll_open,
487 .read = seq_read,
488 .write = hctx_io_poll_write,
489 .llseek = seq_lseek,
490 .release = single_release,
491 };
492
493 static int hctx_dispatched_show(struct seq_file *m, void *v)
494 {
495 struct blk_mq_hw_ctx *hctx = m->private;
496 int i;
497
498 seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
499
500 for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
501 unsigned int d = 1U << (i - 1);
502
503 seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]);
504 }
505
506 seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]);
507 return 0;
508 }
509
510 static int hctx_dispatched_open(struct inode *inode, struct file *file)
511 {
512 return single_open(file, hctx_dispatched_show, inode->i_private);
513 }
514
515 static ssize_t hctx_dispatched_write(struct file *file, const char __user *buf,
516 size_t count, loff_t *ppos)
517 {
518 struct seq_file *m = file->private_data;
519 struct blk_mq_hw_ctx *hctx = m->private;
520 int i;
521
522 for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
523 hctx->dispatched[i] = 0;
524 return count;
525 }
526
527 static const struct file_operations hctx_dispatched_fops = {
528 .open = hctx_dispatched_open,
529 .read = seq_read,
530 .write = hctx_dispatched_write,
531 .llseek = seq_lseek,
532 .release = single_release,
533 };
534
535 static int hctx_queued_show(struct seq_file *m, void *v)
536 {
537 struct blk_mq_hw_ctx *hctx = m->private;
538
539 seq_printf(m, "%lu\n", hctx->queued);
540 return 0;
541 }
542
543 static int hctx_queued_open(struct inode *inode, struct file *file)
544 {
545 return single_open(file, hctx_queued_show, inode->i_private);
546 }
547
548 static ssize_t hctx_queued_write(struct file *file, const char __user *buf,
549 size_t count, loff_t *ppos)
550 {
551 struct seq_file *m = file->private_data;
552 struct blk_mq_hw_ctx *hctx = m->private;
553
554 hctx->queued = 0;
555 return count;
556 }
557
558 static const struct file_operations hctx_queued_fops = {
559 .open = hctx_queued_open,
560 .read = seq_read,
561 .write = hctx_queued_write,
562 .llseek = seq_lseek,
563 .release = single_release,
564 };
565
566 static int hctx_run_show(struct seq_file *m, void *v)
567 {
568 struct blk_mq_hw_ctx *hctx = m->private;
569
570 seq_printf(m, "%lu\n", hctx->run);
571 return 0;
572 }
573
574 static int hctx_run_open(struct inode *inode, struct file *file)
575 {
576 return single_open(file, hctx_run_show, inode->i_private);
577 }
578
579 static ssize_t hctx_run_write(struct file *file, const char __user *buf,
580 size_t count, loff_t *ppos)
581 {
582 struct seq_file *m = file->private_data;
583 struct blk_mq_hw_ctx *hctx = m->private;
584
585 hctx->run = 0;
586 return count;
587 }
588
589 static const struct file_operations hctx_run_fops = {
590 .open = hctx_run_open,
591 .read = seq_read,
592 .write = hctx_run_write,
593 .llseek = seq_lseek,
594 .release = single_release,
595 };
596
597 static int hctx_active_show(struct seq_file *m, void *v)
598 {
599 struct blk_mq_hw_ctx *hctx = m->private;
600
601 seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
602 return 0;
603 }
604
605 static int hctx_active_open(struct inode *inode, struct file *file)
606 {
607 return single_open(file, hctx_active_show, inode->i_private);
608 }
609
610 static const struct file_operations hctx_active_fops = {
611 .open = hctx_active_open,
612 .read = seq_read,
613 .llseek = seq_lseek,
614 .release = single_release,
615 };
616
617 static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos)
618 __acquires(&ctx->lock)
619 {
620 struct blk_mq_ctx *ctx = m->private;
621
622 spin_lock(&ctx->lock);
623 return seq_list_start(&ctx->rq_list, *pos);
624 }
625
626 static void *ctx_rq_list_next(struct seq_file *m, void *v, loff_t *pos)
627 {
628 struct blk_mq_ctx *ctx = m->private;
629
630 return seq_list_next(v, &ctx->rq_list, pos);
631 }
632
633 static void ctx_rq_list_stop(struct seq_file *m, void *v)
634 __releases(&ctx->lock)
635 {
636 struct blk_mq_ctx *ctx = m->private;
637
638 spin_unlock(&ctx->lock);
639 }
640
641 static const struct seq_operations ctx_rq_list_seq_ops = {
642 .start = ctx_rq_list_start,
643 .next = ctx_rq_list_next,
644 .stop = ctx_rq_list_stop,
645 .show = blk_mq_debugfs_rq_show,
646 };
647
648 static int ctx_rq_list_open(struct inode *inode, struct file *file)
649 {
650 return blk_mq_debugfs_seq_open(inode, file, &ctx_rq_list_seq_ops);
651 }
652
653 static const struct file_operations ctx_rq_list_fops = {
654 .open = ctx_rq_list_open,
655 .read = seq_read,
656 .llseek = seq_lseek,
657 .release = seq_release,
658 };
659
660 static int ctx_dispatched_show(struct seq_file *m, void *v)
661 {
662 struct blk_mq_ctx *ctx = m->private;
663
664 seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
665 return 0;
666 }
667
668 static int ctx_dispatched_open(struct inode *inode, struct file *file)
669 {
670 return single_open(file, ctx_dispatched_show, inode->i_private);
671 }
672
673 static ssize_t ctx_dispatched_write(struct file *file, const char __user *buf,
674 size_t count, loff_t *ppos)
675 {
676 struct seq_file *m = file->private_data;
677 struct blk_mq_ctx *ctx = m->private;
678
679 ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
680 return count;
681 }
682
683 static const struct file_operations ctx_dispatched_fops = {
684 .open = ctx_dispatched_open,
685 .read = seq_read,
686 .write = ctx_dispatched_write,
687 .llseek = seq_lseek,
688 .release = single_release,
689 };
690
691 static int ctx_merged_show(struct seq_file *m, void *v)
692 {
693 struct blk_mq_ctx *ctx = m->private;
694
695 seq_printf(m, "%lu\n", ctx->rq_merged);
696 return 0;
697 }
698
699 static int ctx_merged_open(struct inode *inode, struct file *file)
700 {
701 return single_open(file, ctx_merged_show, inode->i_private);
702 }
703
704 static ssize_t ctx_merged_write(struct file *file, const char __user *buf,
705 size_t count, loff_t *ppos)
706 {
707 struct seq_file *m = file->private_data;
708 struct blk_mq_ctx *ctx = m->private;
709
710 ctx->rq_merged = 0;
711 return count;
712 }
713
714 static const struct file_operations ctx_merged_fops = {
715 .open = ctx_merged_open,
716 .read = seq_read,
717 .write = ctx_merged_write,
718 .llseek = seq_lseek,
719 .release = single_release,
720 };
721
722 static int ctx_completed_show(struct seq_file *m, void *v)
723 {
724 struct blk_mq_ctx *ctx = m->private;
725
726 seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
727 return 0;
728 }
729
730 static int ctx_completed_open(struct inode *inode, struct file *file)
731 {
732 return single_open(file, ctx_completed_show, inode->i_private);
733 }
734
735 static ssize_t ctx_completed_write(struct file *file, const char __user *buf,
736 size_t count, loff_t *ppos)
737 {
738 struct seq_file *m = file->private_data;
739 struct blk_mq_ctx *ctx = m->private;
740
741 ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
742 return count;
743 }
744
745 static const struct file_operations ctx_completed_fops = {
746 .open = ctx_completed_open,
747 .read = seq_read,
748 .write = ctx_completed_write,
749 .llseek = seq_lseek,
750 .release = single_release,
751 };
752
753 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
754 {"poll_stat", 0400, &queue_poll_stat_fops},
755 {"state", 0600, &blk_queue_flags_fops},
756 {},
757 };
758
759 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
760 {"state", 0400, &hctx_state_fops},
761 {"flags", 0400, &hctx_flags_fops},
762 {"dispatch", 0400, &hctx_dispatch_fops},
763 {"ctx_map", 0400, &hctx_ctx_map_fops},
764 {"tags", 0400, &hctx_tags_fops},
765 {"tags_bitmap", 0400, &hctx_tags_bitmap_fops},
766 {"sched_tags", 0400, &hctx_sched_tags_fops},
767 {"sched_tags_bitmap", 0400, &hctx_sched_tags_bitmap_fops},
768 {"io_poll", 0600, &hctx_io_poll_fops},
769 {"dispatched", 0600, &hctx_dispatched_fops},
770 {"queued", 0600, &hctx_queued_fops},
771 {"run", 0600, &hctx_run_fops},
772 {"active", 0400, &hctx_active_fops},
773 {},
774 };
775
776 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
777 {"rq_list", 0400, &ctx_rq_list_fops},
778 {"dispatched", 0600, &ctx_dispatched_fops},
779 {"merged", 0600, &ctx_merged_fops},
780 {"completed", 0600, &ctx_completed_fops},
781 {},
782 };
783
784 int blk_mq_debugfs_register(struct request_queue *q)
785 {
786 if (!blk_debugfs_root)
787 return -ENOENT;
788
789 q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
790 blk_debugfs_root);
791 if (!q->debugfs_dir)
792 goto err;
793
794 if (blk_mq_debugfs_register_mq(q))
795 goto err;
796
797 return 0;
798
799 err:
800 blk_mq_debugfs_unregister(q);
801 return -ENOMEM;
802 }
803
804 void blk_mq_debugfs_unregister(struct request_queue *q)
805 {
806 debugfs_remove_recursive(q->debugfs_dir);
807 q->mq_debugfs_dir = NULL;
808 q->debugfs_dir = NULL;
809 }
810
811 static bool debugfs_create_files(struct dentry *parent, void *data,
812 const struct blk_mq_debugfs_attr *attr)
813 {
814 for (; attr->name; attr++) {
815 if (!debugfs_create_file(attr->name, attr->mode, parent,
816 data, attr->fops))
817 return false;
818 }
819 return true;
820 }
821
822 static int blk_mq_debugfs_register_ctx(struct request_queue *q,
823 struct blk_mq_ctx *ctx,
824 struct dentry *hctx_dir)
825 {
826 struct dentry *ctx_dir;
827 char name[20];
828
829 snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
830 ctx_dir = debugfs_create_dir(name, hctx_dir);
831 if (!ctx_dir)
832 return -ENOMEM;
833
834 if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs))
835 return -ENOMEM;
836
837 return 0;
838 }
839
840 static int blk_mq_debugfs_register_hctx(struct request_queue *q,
841 struct blk_mq_hw_ctx *hctx)
842 {
843 struct blk_mq_ctx *ctx;
844 struct dentry *hctx_dir;
845 char name[20];
846 int i;
847
848 snprintf(name, sizeof(name), "%u", hctx->queue_num);
849 hctx_dir = debugfs_create_dir(name, q->mq_debugfs_dir);
850 if (!hctx_dir)
851 return -ENOMEM;
852
853 if (!debugfs_create_files(hctx_dir, hctx, blk_mq_debugfs_hctx_attrs))
854 return -ENOMEM;
855
856 hctx_for_each_ctx(hctx, ctx, i) {
857 if (blk_mq_debugfs_register_ctx(q, ctx, hctx_dir))
858 return -ENOMEM;
859 }
860
861 return 0;
862 }
863
864 int blk_mq_debugfs_register_mq(struct request_queue *q)
865 {
866 struct blk_mq_hw_ctx *hctx;
867 int i;
868
869 if (!q->debugfs_dir)
870 return -ENOENT;
871
872 q->mq_debugfs_dir = debugfs_create_dir("mq", q->debugfs_dir);
873 if (!q->mq_debugfs_dir)
874 goto err;
875
876 if (!debugfs_create_files(q->mq_debugfs_dir, q, blk_mq_debugfs_queue_attrs))
877 goto err;
878
879 queue_for_each_hw_ctx(q, hctx, i) {
880 if (blk_mq_debugfs_register_hctx(q, hctx))
881 goto err;
882 }
883
884 return 0;
885
886 err:
887 blk_mq_debugfs_unregister_mq(q);
888 return -ENOMEM;
889 }
890
891 void blk_mq_debugfs_unregister_mq(struct request_queue *q)
892 {
893 debugfs_remove_recursive(q->mq_debugfs_dir);
894 q->mq_debugfs_dir = NULL;
895 }