]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - block/blk-mq-debugfs.c
blk-mq-debugfs: don't open code strstrip()
[mirror_ubuntu-bionic-kernel.git] / block / blk-mq-debugfs.c
1 /*
2 * Copyright (C) 2017 Facebook
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <https://www.gnu.org/licenses/>.
15 */
16
17 #include <linux/kernel.h>
18 #include <linux/blkdev.h>
19 #include <linux/debugfs.h>
20
21 #include <linux/blk-mq.h>
22 #include "blk.h"
23 #include "blk-mq.h"
24 #include "blk-mq-tag.h"
25
26 struct blk_mq_debugfs_attr {
27 const char *name;
28 umode_t mode;
29 const struct file_operations *fops;
30 };
31
32 static int blk_mq_debugfs_seq_open(struct inode *inode, struct file *file,
33 const struct seq_operations *ops)
34 {
35 struct seq_file *m;
36 int ret;
37
38 ret = seq_open(file, ops);
39 if (!ret) {
40 m = file->private_data;
41 m->private = inode->i_private;
42 }
43 return ret;
44 }
45
46 static int blk_flags_show(struct seq_file *m, const unsigned long flags,
47 const char *const *flag_name, int flag_name_count)
48 {
49 bool sep = false;
50 int i;
51
52 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
53 if (!(flags & BIT(i)))
54 continue;
55 if (sep)
56 seq_puts(m, "|");
57 sep = true;
58 if (i < flag_name_count && flag_name[i])
59 seq_puts(m, flag_name[i]);
60 else
61 seq_printf(m, "%d", i);
62 }
63 return 0;
64 }
65
66 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
67 static const char *const blk_queue_flag_name[] = {
68 QUEUE_FLAG_NAME(QUEUED),
69 QUEUE_FLAG_NAME(STOPPED),
70 QUEUE_FLAG_NAME(SYNCFULL),
71 QUEUE_FLAG_NAME(ASYNCFULL),
72 QUEUE_FLAG_NAME(DYING),
73 QUEUE_FLAG_NAME(BYPASS),
74 QUEUE_FLAG_NAME(BIDI),
75 QUEUE_FLAG_NAME(NOMERGES),
76 QUEUE_FLAG_NAME(SAME_COMP),
77 QUEUE_FLAG_NAME(FAIL_IO),
78 QUEUE_FLAG_NAME(STACKABLE),
79 QUEUE_FLAG_NAME(NONROT),
80 QUEUE_FLAG_NAME(IO_STAT),
81 QUEUE_FLAG_NAME(DISCARD),
82 QUEUE_FLAG_NAME(NOXMERGES),
83 QUEUE_FLAG_NAME(ADD_RANDOM),
84 QUEUE_FLAG_NAME(SECERASE),
85 QUEUE_FLAG_NAME(SAME_FORCE),
86 QUEUE_FLAG_NAME(DEAD),
87 QUEUE_FLAG_NAME(INIT_DONE),
88 QUEUE_FLAG_NAME(NO_SG_MERGE),
89 QUEUE_FLAG_NAME(POLL),
90 QUEUE_FLAG_NAME(WC),
91 QUEUE_FLAG_NAME(FUA),
92 QUEUE_FLAG_NAME(FLUSH_NQ),
93 QUEUE_FLAG_NAME(DAX),
94 QUEUE_FLAG_NAME(STATS),
95 QUEUE_FLAG_NAME(POLL_STATS),
96 QUEUE_FLAG_NAME(REGISTERED),
97 };
98 #undef QUEUE_FLAG_NAME
99
100 static int blk_queue_flags_show(struct seq_file *m, void *v)
101 {
102 struct request_queue *q = m->private;
103
104 blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
105 ARRAY_SIZE(blk_queue_flag_name));
106 seq_puts(m, "\n");
107 return 0;
108 }
109
110 static ssize_t blk_queue_flags_store(struct file *file, const char __user *buf,
111 size_t count, loff_t *ppos)
112 {
113 struct request_queue *q = file_inode(file)->i_private;
114 char opbuf[16] = { }, *op;
115
116 if (count >= sizeof(opbuf)) {
117 pr_err("%s: operation too long\n", __func__);
118 goto inval;
119 }
120
121 if (copy_from_user(opbuf, buf, count))
122 return -EFAULT;
123 op = strstrip(opbuf);
124 if (strcmp(op, "run") == 0) {
125 blk_mq_run_hw_queues(q, true);
126 } else if (strcmp(op, "start") == 0) {
127 blk_mq_start_stopped_hw_queues(q, true);
128 } else {
129 pr_err("%s: unsupported operation '%s'\n", __func__, op);
130 inval:
131 pr_err("%s: use either 'run' or 'start'\n", __func__);
132 return -EINVAL;
133 }
134 return count;
135 }
136
137 static int blk_queue_flags_open(struct inode *inode, struct file *file)
138 {
139 return single_open(file, blk_queue_flags_show, inode->i_private);
140 }
141
142 static const struct file_operations blk_queue_flags_fops = {
143 .open = blk_queue_flags_open,
144 .read = seq_read,
145 .llseek = seq_lseek,
146 .release = single_release,
147 .write = blk_queue_flags_store,
148 };
149
150 static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
151 {
152 if (stat->nr_samples) {
153 seq_printf(m, "samples=%d, mean=%lld, min=%llu, max=%llu",
154 stat->nr_samples, stat->mean, stat->min, stat->max);
155 } else {
156 seq_puts(m, "samples=0");
157 }
158 }
159
160 static int queue_poll_stat_show(struct seq_file *m, void *v)
161 {
162 struct request_queue *q = m->private;
163 int bucket;
164
165 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) {
166 seq_printf(m, "read (%d Bytes): ", 1 << (9+bucket));
167 print_stat(m, &q->poll_stat[2*bucket]);
168 seq_puts(m, "\n");
169
170 seq_printf(m, "write (%d Bytes): ", 1 << (9+bucket));
171 print_stat(m, &q->poll_stat[2*bucket+1]);
172 seq_puts(m, "\n");
173 }
174 return 0;
175 }
176
177 static int queue_poll_stat_open(struct inode *inode, struct file *file)
178 {
179 return single_open(file, queue_poll_stat_show, inode->i_private);
180 }
181
182 static const struct file_operations queue_poll_stat_fops = {
183 .open = queue_poll_stat_open,
184 .read = seq_read,
185 .llseek = seq_lseek,
186 .release = single_release,
187 };
188
189 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
190 static const char *const hctx_state_name[] = {
191 HCTX_STATE_NAME(STOPPED),
192 HCTX_STATE_NAME(TAG_ACTIVE),
193 HCTX_STATE_NAME(SCHED_RESTART),
194 HCTX_STATE_NAME(TAG_WAITING),
195 HCTX_STATE_NAME(START_ON_RUN),
196 };
197 #undef HCTX_STATE_NAME
198
199 static int hctx_state_show(struct seq_file *m, void *v)
200 {
201 struct blk_mq_hw_ctx *hctx = m->private;
202
203 blk_flags_show(m, hctx->state, hctx_state_name,
204 ARRAY_SIZE(hctx_state_name));
205 seq_puts(m, "\n");
206 return 0;
207 }
208
209 static int hctx_state_open(struct inode *inode, struct file *file)
210 {
211 return single_open(file, hctx_state_show, inode->i_private);
212 }
213
214 static const struct file_operations hctx_state_fops = {
215 .open = hctx_state_open,
216 .read = seq_read,
217 .llseek = seq_lseek,
218 .release = single_release,
219 };
220
221 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
222 static const char *const alloc_policy_name[] = {
223 BLK_TAG_ALLOC_NAME(FIFO),
224 BLK_TAG_ALLOC_NAME(RR),
225 };
226 #undef BLK_TAG_ALLOC_NAME
227
228 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
229 static const char *const hctx_flag_name[] = {
230 HCTX_FLAG_NAME(SHOULD_MERGE),
231 HCTX_FLAG_NAME(TAG_SHARED),
232 HCTX_FLAG_NAME(SG_MERGE),
233 HCTX_FLAG_NAME(BLOCKING),
234 HCTX_FLAG_NAME(NO_SCHED),
235 };
236 #undef HCTX_FLAG_NAME
237
238 static int hctx_flags_show(struct seq_file *m, void *v)
239 {
240 struct blk_mq_hw_ctx *hctx = m->private;
241 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
242
243 seq_puts(m, "alloc_policy=");
244 if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
245 alloc_policy_name[alloc_policy])
246 seq_puts(m, alloc_policy_name[alloc_policy]);
247 else
248 seq_printf(m, "%d", alloc_policy);
249 seq_puts(m, " ");
250 blk_flags_show(m,
251 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
252 hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
253 seq_puts(m, "\n");
254 return 0;
255 }
256
257 static int hctx_flags_open(struct inode *inode, struct file *file)
258 {
259 return single_open(file, hctx_flags_show, inode->i_private);
260 }
261
262 static const struct file_operations hctx_flags_fops = {
263 .open = hctx_flags_open,
264 .read = seq_read,
265 .llseek = seq_lseek,
266 .release = single_release,
267 };
268
269 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
270 static const char *const op_name[] = {
271 REQ_OP_NAME(READ),
272 REQ_OP_NAME(WRITE),
273 REQ_OP_NAME(FLUSH),
274 REQ_OP_NAME(DISCARD),
275 REQ_OP_NAME(ZONE_REPORT),
276 REQ_OP_NAME(SECURE_ERASE),
277 REQ_OP_NAME(ZONE_RESET),
278 REQ_OP_NAME(WRITE_SAME),
279 REQ_OP_NAME(WRITE_ZEROES),
280 REQ_OP_NAME(SCSI_IN),
281 REQ_OP_NAME(SCSI_OUT),
282 REQ_OP_NAME(DRV_IN),
283 REQ_OP_NAME(DRV_OUT),
284 };
285 #undef REQ_OP_NAME
286
287 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
288 static const char *const cmd_flag_name[] = {
289 CMD_FLAG_NAME(FAILFAST_DEV),
290 CMD_FLAG_NAME(FAILFAST_TRANSPORT),
291 CMD_FLAG_NAME(FAILFAST_DRIVER),
292 CMD_FLAG_NAME(SYNC),
293 CMD_FLAG_NAME(META),
294 CMD_FLAG_NAME(PRIO),
295 CMD_FLAG_NAME(NOMERGE),
296 CMD_FLAG_NAME(IDLE),
297 CMD_FLAG_NAME(INTEGRITY),
298 CMD_FLAG_NAME(FUA),
299 CMD_FLAG_NAME(PREFLUSH),
300 CMD_FLAG_NAME(RAHEAD),
301 CMD_FLAG_NAME(BACKGROUND),
302 CMD_FLAG_NAME(NOUNMAP),
303 };
304 #undef CMD_FLAG_NAME
305
306 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
307 static const char *const rqf_name[] = {
308 RQF_NAME(SORTED),
309 RQF_NAME(STARTED),
310 RQF_NAME(QUEUED),
311 RQF_NAME(SOFTBARRIER),
312 RQF_NAME(FLUSH_SEQ),
313 RQF_NAME(MIXED_MERGE),
314 RQF_NAME(MQ_INFLIGHT),
315 RQF_NAME(DONTPREP),
316 RQF_NAME(PREEMPT),
317 RQF_NAME(COPY_USER),
318 RQF_NAME(FAILED),
319 RQF_NAME(QUIET),
320 RQF_NAME(ELVPRIV),
321 RQF_NAME(IO_STAT),
322 RQF_NAME(ALLOCED),
323 RQF_NAME(PM),
324 RQF_NAME(HASHED),
325 RQF_NAME(STATS),
326 RQF_NAME(SPECIAL_PAYLOAD),
327 };
328 #undef RQF_NAME
329
330 static int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
331 {
332 struct request *rq = list_entry_rq(v);
333 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
334 const unsigned int op = rq->cmd_flags & REQ_OP_MASK;
335
336 seq_printf(m, "%p {.op=", rq);
337 if (op < ARRAY_SIZE(op_name) && op_name[op])
338 seq_printf(m, "%s", op_name[op]);
339 else
340 seq_printf(m, "%d", op);
341 seq_puts(m, ", .cmd_flags=");
342 blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
343 ARRAY_SIZE(cmd_flag_name));
344 seq_puts(m, ", .rq_flags=");
345 blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
346 ARRAY_SIZE(rqf_name));
347 seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
348 rq->internal_tag);
349 if (mq_ops->show_rq)
350 mq_ops->show_rq(m, rq);
351 seq_puts(m, "}\n");
352 return 0;
353 }
354
355 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
356 __acquires(&hctx->lock)
357 {
358 struct blk_mq_hw_ctx *hctx = m->private;
359
360 spin_lock(&hctx->lock);
361 return seq_list_start(&hctx->dispatch, *pos);
362 }
363
364 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
365 {
366 struct blk_mq_hw_ctx *hctx = m->private;
367
368 return seq_list_next(v, &hctx->dispatch, pos);
369 }
370
371 static void hctx_dispatch_stop(struct seq_file *m, void *v)
372 __releases(&hctx->lock)
373 {
374 struct blk_mq_hw_ctx *hctx = m->private;
375
376 spin_unlock(&hctx->lock);
377 }
378
379 static const struct seq_operations hctx_dispatch_seq_ops = {
380 .start = hctx_dispatch_start,
381 .next = hctx_dispatch_next,
382 .stop = hctx_dispatch_stop,
383 .show = blk_mq_debugfs_rq_show,
384 };
385
386 static int hctx_dispatch_open(struct inode *inode, struct file *file)
387 {
388 return blk_mq_debugfs_seq_open(inode, file, &hctx_dispatch_seq_ops);
389 }
390
391 static const struct file_operations hctx_dispatch_fops = {
392 .open = hctx_dispatch_open,
393 .read = seq_read,
394 .llseek = seq_lseek,
395 .release = seq_release,
396 };
397
398 static int hctx_ctx_map_show(struct seq_file *m, void *v)
399 {
400 struct blk_mq_hw_ctx *hctx = m->private;
401
402 sbitmap_bitmap_show(&hctx->ctx_map, m);
403 return 0;
404 }
405
406 static int hctx_ctx_map_open(struct inode *inode, struct file *file)
407 {
408 return single_open(file, hctx_ctx_map_show, inode->i_private);
409 }
410
411 static const struct file_operations hctx_ctx_map_fops = {
412 .open = hctx_ctx_map_open,
413 .read = seq_read,
414 .llseek = seq_lseek,
415 .release = single_release,
416 };
417
418 static void blk_mq_debugfs_tags_show(struct seq_file *m,
419 struct blk_mq_tags *tags)
420 {
421 seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
422 seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
423 seq_printf(m, "active_queues=%d\n",
424 atomic_read(&tags->active_queues));
425
426 seq_puts(m, "\nbitmap_tags:\n");
427 sbitmap_queue_show(&tags->bitmap_tags, m);
428
429 if (tags->nr_reserved_tags) {
430 seq_puts(m, "\nbreserved_tags:\n");
431 sbitmap_queue_show(&tags->breserved_tags, m);
432 }
433 }
434
435 static int hctx_tags_show(struct seq_file *m, void *v)
436 {
437 struct blk_mq_hw_ctx *hctx = m->private;
438 struct request_queue *q = hctx->queue;
439 int res;
440
441 res = mutex_lock_interruptible(&q->sysfs_lock);
442 if (res)
443 goto out;
444 if (hctx->tags)
445 blk_mq_debugfs_tags_show(m, hctx->tags);
446 mutex_unlock(&q->sysfs_lock);
447
448 out:
449 return res;
450 }
451
452 static int hctx_tags_open(struct inode *inode, struct file *file)
453 {
454 return single_open(file, hctx_tags_show, inode->i_private);
455 }
456
457 static const struct file_operations hctx_tags_fops = {
458 .open = hctx_tags_open,
459 .read = seq_read,
460 .llseek = seq_lseek,
461 .release = single_release,
462 };
463
464 static int hctx_tags_bitmap_show(struct seq_file *m, void *v)
465 {
466 struct blk_mq_hw_ctx *hctx = m->private;
467 struct request_queue *q = hctx->queue;
468 int res;
469
470 res = mutex_lock_interruptible(&q->sysfs_lock);
471 if (res)
472 goto out;
473 if (hctx->tags)
474 sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
475 mutex_unlock(&q->sysfs_lock);
476
477 out:
478 return res;
479 }
480
481 static int hctx_tags_bitmap_open(struct inode *inode, struct file *file)
482 {
483 return single_open(file, hctx_tags_bitmap_show, inode->i_private);
484 }
485
486 static const struct file_operations hctx_tags_bitmap_fops = {
487 .open = hctx_tags_bitmap_open,
488 .read = seq_read,
489 .llseek = seq_lseek,
490 .release = single_release,
491 };
492
493 static int hctx_sched_tags_show(struct seq_file *m, void *v)
494 {
495 struct blk_mq_hw_ctx *hctx = m->private;
496 struct request_queue *q = hctx->queue;
497 int res;
498
499 res = mutex_lock_interruptible(&q->sysfs_lock);
500 if (res)
501 goto out;
502 if (hctx->sched_tags)
503 blk_mq_debugfs_tags_show(m, hctx->sched_tags);
504 mutex_unlock(&q->sysfs_lock);
505
506 out:
507 return res;
508 }
509
510 static int hctx_sched_tags_open(struct inode *inode, struct file *file)
511 {
512 return single_open(file, hctx_sched_tags_show, inode->i_private);
513 }
514
515 static const struct file_operations hctx_sched_tags_fops = {
516 .open = hctx_sched_tags_open,
517 .read = seq_read,
518 .llseek = seq_lseek,
519 .release = single_release,
520 };
521
522 static int hctx_sched_tags_bitmap_show(struct seq_file *m, void *v)
523 {
524 struct blk_mq_hw_ctx *hctx = m->private;
525 struct request_queue *q = hctx->queue;
526 int res;
527
528 res = mutex_lock_interruptible(&q->sysfs_lock);
529 if (res)
530 goto out;
531 if (hctx->sched_tags)
532 sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
533 mutex_unlock(&q->sysfs_lock);
534
535 out:
536 return res;
537 }
538
539 static int hctx_sched_tags_bitmap_open(struct inode *inode, struct file *file)
540 {
541 return single_open(file, hctx_sched_tags_bitmap_show, inode->i_private);
542 }
543
544 static const struct file_operations hctx_sched_tags_bitmap_fops = {
545 .open = hctx_sched_tags_bitmap_open,
546 .read = seq_read,
547 .llseek = seq_lseek,
548 .release = single_release,
549 };
550
551 static int hctx_io_poll_show(struct seq_file *m, void *v)
552 {
553 struct blk_mq_hw_ctx *hctx = m->private;
554
555 seq_printf(m, "considered=%lu\n", hctx->poll_considered);
556 seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
557 seq_printf(m, "success=%lu\n", hctx->poll_success);
558 return 0;
559 }
560
561 static int hctx_io_poll_open(struct inode *inode, struct file *file)
562 {
563 return single_open(file, hctx_io_poll_show, inode->i_private);
564 }
565
566 static ssize_t hctx_io_poll_write(struct file *file, const char __user *buf,
567 size_t count, loff_t *ppos)
568 {
569 struct seq_file *m = file->private_data;
570 struct blk_mq_hw_ctx *hctx = m->private;
571
572 hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
573 return count;
574 }
575
576 static const struct file_operations hctx_io_poll_fops = {
577 .open = hctx_io_poll_open,
578 .read = seq_read,
579 .write = hctx_io_poll_write,
580 .llseek = seq_lseek,
581 .release = single_release,
582 };
583
584 static int hctx_dispatched_show(struct seq_file *m, void *v)
585 {
586 struct blk_mq_hw_ctx *hctx = m->private;
587 int i;
588
589 seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
590
591 for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
592 unsigned int d = 1U << (i - 1);
593
594 seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]);
595 }
596
597 seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]);
598 return 0;
599 }
600
601 static int hctx_dispatched_open(struct inode *inode, struct file *file)
602 {
603 return single_open(file, hctx_dispatched_show, inode->i_private);
604 }
605
606 static ssize_t hctx_dispatched_write(struct file *file, const char __user *buf,
607 size_t count, loff_t *ppos)
608 {
609 struct seq_file *m = file->private_data;
610 struct blk_mq_hw_ctx *hctx = m->private;
611 int i;
612
613 for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
614 hctx->dispatched[i] = 0;
615 return count;
616 }
617
618 static const struct file_operations hctx_dispatched_fops = {
619 .open = hctx_dispatched_open,
620 .read = seq_read,
621 .write = hctx_dispatched_write,
622 .llseek = seq_lseek,
623 .release = single_release,
624 };
625
626 static int hctx_queued_show(struct seq_file *m, void *v)
627 {
628 struct blk_mq_hw_ctx *hctx = m->private;
629
630 seq_printf(m, "%lu\n", hctx->queued);
631 return 0;
632 }
633
634 static int hctx_queued_open(struct inode *inode, struct file *file)
635 {
636 return single_open(file, hctx_queued_show, inode->i_private);
637 }
638
639 static ssize_t hctx_queued_write(struct file *file, const char __user *buf,
640 size_t count, loff_t *ppos)
641 {
642 struct seq_file *m = file->private_data;
643 struct blk_mq_hw_ctx *hctx = m->private;
644
645 hctx->queued = 0;
646 return count;
647 }
648
649 static const struct file_operations hctx_queued_fops = {
650 .open = hctx_queued_open,
651 .read = seq_read,
652 .write = hctx_queued_write,
653 .llseek = seq_lseek,
654 .release = single_release,
655 };
656
657 static int hctx_run_show(struct seq_file *m, void *v)
658 {
659 struct blk_mq_hw_ctx *hctx = m->private;
660
661 seq_printf(m, "%lu\n", hctx->run);
662 return 0;
663 }
664
665 static int hctx_run_open(struct inode *inode, struct file *file)
666 {
667 return single_open(file, hctx_run_show, inode->i_private);
668 }
669
670 static ssize_t hctx_run_write(struct file *file, const char __user *buf,
671 size_t count, loff_t *ppos)
672 {
673 struct seq_file *m = file->private_data;
674 struct blk_mq_hw_ctx *hctx = m->private;
675
676 hctx->run = 0;
677 return count;
678 }
679
680 static const struct file_operations hctx_run_fops = {
681 .open = hctx_run_open,
682 .read = seq_read,
683 .write = hctx_run_write,
684 .llseek = seq_lseek,
685 .release = single_release,
686 };
687
688 static int hctx_active_show(struct seq_file *m, void *v)
689 {
690 struct blk_mq_hw_ctx *hctx = m->private;
691
692 seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
693 return 0;
694 }
695
696 static int hctx_active_open(struct inode *inode, struct file *file)
697 {
698 return single_open(file, hctx_active_show, inode->i_private);
699 }
700
701 static const struct file_operations hctx_active_fops = {
702 .open = hctx_active_open,
703 .read = seq_read,
704 .llseek = seq_lseek,
705 .release = single_release,
706 };
707
708 static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos)
709 __acquires(&ctx->lock)
710 {
711 struct blk_mq_ctx *ctx = m->private;
712
713 spin_lock(&ctx->lock);
714 return seq_list_start(&ctx->rq_list, *pos);
715 }
716
717 static void *ctx_rq_list_next(struct seq_file *m, void *v, loff_t *pos)
718 {
719 struct blk_mq_ctx *ctx = m->private;
720
721 return seq_list_next(v, &ctx->rq_list, pos);
722 }
723
724 static void ctx_rq_list_stop(struct seq_file *m, void *v)
725 __releases(&ctx->lock)
726 {
727 struct blk_mq_ctx *ctx = m->private;
728
729 spin_unlock(&ctx->lock);
730 }
731
732 static const struct seq_operations ctx_rq_list_seq_ops = {
733 .start = ctx_rq_list_start,
734 .next = ctx_rq_list_next,
735 .stop = ctx_rq_list_stop,
736 .show = blk_mq_debugfs_rq_show,
737 };
738
739 static int ctx_rq_list_open(struct inode *inode, struct file *file)
740 {
741 return blk_mq_debugfs_seq_open(inode, file, &ctx_rq_list_seq_ops);
742 }
743
744 static const struct file_operations ctx_rq_list_fops = {
745 .open = ctx_rq_list_open,
746 .read = seq_read,
747 .llseek = seq_lseek,
748 .release = seq_release,
749 };
750
751 static int ctx_dispatched_show(struct seq_file *m, void *v)
752 {
753 struct blk_mq_ctx *ctx = m->private;
754
755 seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
756 return 0;
757 }
758
759 static int ctx_dispatched_open(struct inode *inode, struct file *file)
760 {
761 return single_open(file, ctx_dispatched_show, inode->i_private);
762 }
763
764 static ssize_t ctx_dispatched_write(struct file *file, const char __user *buf,
765 size_t count, loff_t *ppos)
766 {
767 struct seq_file *m = file->private_data;
768 struct blk_mq_ctx *ctx = m->private;
769
770 ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
771 return count;
772 }
773
774 static const struct file_operations ctx_dispatched_fops = {
775 .open = ctx_dispatched_open,
776 .read = seq_read,
777 .write = ctx_dispatched_write,
778 .llseek = seq_lseek,
779 .release = single_release,
780 };
781
782 static int ctx_merged_show(struct seq_file *m, void *v)
783 {
784 struct blk_mq_ctx *ctx = m->private;
785
786 seq_printf(m, "%lu\n", ctx->rq_merged);
787 return 0;
788 }
789
790 static int ctx_merged_open(struct inode *inode, struct file *file)
791 {
792 return single_open(file, ctx_merged_show, inode->i_private);
793 }
794
795 static ssize_t ctx_merged_write(struct file *file, const char __user *buf,
796 size_t count, loff_t *ppos)
797 {
798 struct seq_file *m = file->private_data;
799 struct blk_mq_ctx *ctx = m->private;
800
801 ctx->rq_merged = 0;
802 return count;
803 }
804
805 static const struct file_operations ctx_merged_fops = {
806 .open = ctx_merged_open,
807 .read = seq_read,
808 .write = ctx_merged_write,
809 .llseek = seq_lseek,
810 .release = single_release,
811 };
812
813 static int ctx_completed_show(struct seq_file *m, void *v)
814 {
815 struct blk_mq_ctx *ctx = m->private;
816
817 seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
818 return 0;
819 }
820
821 static int ctx_completed_open(struct inode *inode, struct file *file)
822 {
823 return single_open(file, ctx_completed_show, inode->i_private);
824 }
825
826 static ssize_t ctx_completed_write(struct file *file, const char __user *buf,
827 size_t count, loff_t *ppos)
828 {
829 struct seq_file *m = file->private_data;
830 struct blk_mq_ctx *ctx = m->private;
831
832 ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
833 return count;
834 }
835
836 static const struct file_operations ctx_completed_fops = {
837 .open = ctx_completed_open,
838 .read = seq_read,
839 .write = ctx_completed_write,
840 .llseek = seq_lseek,
841 .release = single_release,
842 };
843
844 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
845 {"poll_stat", 0400, &queue_poll_stat_fops},
846 {"state", 0600, &blk_queue_flags_fops},
847 {},
848 };
849
850 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
851 {"state", 0400, &hctx_state_fops},
852 {"flags", 0400, &hctx_flags_fops},
853 {"dispatch", 0400, &hctx_dispatch_fops},
854 {"ctx_map", 0400, &hctx_ctx_map_fops},
855 {"tags", 0400, &hctx_tags_fops},
856 {"tags_bitmap", 0400, &hctx_tags_bitmap_fops},
857 {"sched_tags", 0400, &hctx_sched_tags_fops},
858 {"sched_tags_bitmap", 0400, &hctx_sched_tags_bitmap_fops},
859 {"io_poll", 0600, &hctx_io_poll_fops},
860 {"dispatched", 0600, &hctx_dispatched_fops},
861 {"queued", 0600, &hctx_queued_fops},
862 {"run", 0600, &hctx_run_fops},
863 {"active", 0400, &hctx_active_fops},
864 {},
865 };
866
867 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
868 {"rq_list", 0400, &ctx_rq_list_fops},
869 {"dispatched", 0600, &ctx_dispatched_fops},
870 {"merged", 0600, &ctx_merged_fops},
871 {"completed", 0600, &ctx_completed_fops},
872 {},
873 };
874
875 int blk_mq_debugfs_register(struct request_queue *q)
876 {
877 if (!blk_debugfs_root)
878 return -ENOENT;
879
880 q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
881 blk_debugfs_root);
882 if (!q->debugfs_dir)
883 goto err;
884
885 if (blk_mq_debugfs_register_mq(q))
886 goto err;
887
888 return 0;
889
890 err:
891 blk_mq_debugfs_unregister(q);
892 return -ENOMEM;
893 }
894
895 void blk_mq_debugfs_unregister(struct request_queue *q)
896 {
897 debugfs_remove_recursive(q->debugfs_dir);
898 q->mq_debugfs_dir = NULL;
899 q->debugfs_dir = NULL;
900 }
901
902 static bool debugfs_create_files(struct dentry *parent, void *data,
903 const struct blk_mq_debugfs_attr *attr)
904 {
905 for (; attr->name; attr++) {
906 if (!debugfs_create_file(attr->name, attr->mode, parent,
907 data, attr->fops))
908 return false;
909 }
910 return true;
911 }
912
913 static int blk_mq_debugfs_register_ctx(struct request_queue *q,
914 struct blk_mq_ctx *ctx,
915 struct dentry *hctx_dir)
916 {
917 struct dentry *ctx_dir;
918 char name[20];
919
920 snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
921 ctx_dir = debugfs_create_dir(name, hctx_dir);
922 if (!ctx_dir)
923 return -ENOMEM;
924
925 if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs))
926 return -ENOMEM;
927
928 return 0;
929 }
930
931 static int blk_mq_debugfs_register_hctx(struct request_queue *q,
932 struct blk_mq_hw_ctx *hctx)
933 {
934 struct blk_mq_ctx *ctx;
935 struct dentry *hctx_dir;
936 char name[20];
937 int i;
938
939 snprintf(name, sizeof(name), "%u", hctx->queue_num);
940 hctx_dir = debugfs_create_dir(name, q->mq_debugfs_dir);
941 if (!hctx_dir)
942 return -ENOMEM;
943
944 if (!debugfs_create_files(hctx_dir, hctx, blk_mq_debugfs_hctx_attrs))
945 return -ENOMEM;
946
947 hctx_for_each_ctx(hctx, ctx, i) {
948 if (blk_mq_debugfs_register_ctx(q, ctx, hctx_dir))
949 return -ENOMEM;
950 }
951
952 return 0;
953 }
954
955 int blk_mq_debugfs_register_mq(struct request_queue *q)
956 {
957 struct blk_mq_hw_ctx *hctx;
958 int i;
959
960 if (!q->debugfs_dir)
961 return -ENOENT;
962
963 q->mq_debugfs_dir = debugfs_create_dir("mq", q->debugfs_dir);
964 if (!q->mq_debugfs_dir)
965 goto err;
966
967 if (!debugfs_create_files(q->mq_debugfs_dir, q, blk_mq_debugfs_queue_attrs))
968 goto err;
969
970 queue_for_each_hw_ctx(q, hctx, i) {
971 if (blk_mq_debugfs_register_hctx(q, hctx))
972 goto err;
973 }
974
975 return 0;
976
977 err:
978 blk_mq_debugfs_unregister_mq(q);
979 return -ENOMEM;
980 }
981
982 void blk_mq_debugfs_unregister_mq(struct request_queue *q)
983 {
984 debugfs_remove_recursive(q->mq_debugfs_dir);
985 q->mq_debugfs_dir = NULL;
986 }