]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - block/blk-mq-debugfs.c
Merge tag 'printk-for-5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/printk...
[mirror_ubuntu-jammy-kernel.git] / block / blk-mq-debugfs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2017 Facebook
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/blkdev.h>
8 #include <linux/debugfs.h>
9
10 #include <linux/blk-mq.h>
11 #include "blk.h"
12 #include "blk-mq.h"
13 #include "blk-mq-debugfs.h"
14 #include "blk-mq-tag.h"
15 #include "blk-rq-qos.h"
16
17 static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
18 {
19 if (stat->nr_samples) {
20 seq_printf(m, "samples=%d, mean=%llu, min=%llu, max=%llu",
21 stat->nr_samples, stat->mean, stat->min, stat->max);
22 } else {
23 seq_puts(m, "samples=0");
24 }
25 }
26
27 static int queue_poll_stat_show(void *data, struct seq_file *m)
28 {
29 struct request_queue *q = data;
30 int bucket;
31
32 for (bucket = 0; bucket < (BLK_MQ_POLL_STATS_BKTS / 2); bucket++) {
33 seq_printf(m, "read (%d Bytes): ", 1 << (9 + bucket));
34 print_stat(m, &q->poll_stat[2 * bucket]);
35 seq_puts(m, "\n");
36
37 seq_printf(m, "write (%d Bytes): ", 1 << (9 + bucket));
38 print_stat(m, &q->poll_stat[2 * bucket + 1]);
39 seq_puts(m, "\n");
40 }
41 return 0;
42 }
43
44 static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
45 __acquires(&q->requeue_lock)
46 {
47 struct request_queue *q = m->private;
48
49 spin_lock_irq(&q->requeue_lock);
50 return seq_list_start(&q->requeue_list, *pos);
51 }
52
53 static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
54 {
55 struct request_queue *q = m->private;
56
57 return seq_list_next(v, &q->requeue_list, pos);
58 }
59
60 static void queue_requeue_list_stop(struct seq_file *m, void *v)
61 __releases(&q->requeue_lock)
62 {
63 struct request_queue *q = m->private;
64
65 spin_unlock_irq(&q->requeue_lock);
66 }
67
68 static const struct seq_operations queue_requeue_list_seq_ops = {
69 .start = queue_requeue_list_start,
70 .next = queue_requeue_list_next,
71 .stop = queue_requeue_list_stop,
72 .show = blk_mq_debugfs_rq_show,
73 };
74
75 static int blk_flags_show(struct seq_file *m, const unsigned long flags,
76 const char *const *flag_name, int flag_name_count)
77 {
78 bool sep = false;
79 int i;
80
81 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
82 if (!(flags & BIT(i)))
83 continue;
84 if (sep)
85 seq_puts(m, "|");
86 sep = true;
87 if (i < flag_name_count && flag_name[i])
88 seq_puts(m, flag_name[i]);
89 else
90 seq_printf(m, "%d", i);
91 }
92 return 0;
93 }
94
95 static int queue_pm_only_show(void *data, struct seq_file *m)
96 {
97 struct request_queue *q = data;
98
99 seq_printf(m, "%d\n", atomic_read(&q->pm_only));
100 return 0;
101 }
102
103 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
104 static const char *const blk_queue_flag_name[] = {
105 QUEUE_FLAG_NAME(STOPPED),
106 QUEUE_FLAG_NAME(DYING),
107 QUEUE_FLAG_NAME(NOMERGES),
108 QUEUE_FLAG_NAME(SAME_COMP),
109 QUEUE_FLAG_NAME(FAIL_IO),
110 QUEUE_FLAG_NAME(NONROT),
111 QUEUE_FLAG_NAME(IO_STAT),
112 QUEUE_FLAG_NAME(DISCARD),
113 QUEUE_FLAG_NAME(NOXMERGES),
114 QUEUE_FLAG_NAME(ADD_RANDOM),
115 QUEUE_FLAG_NAME(SECERASE),
116 QUEUE_FLAG_NAME(SAME_FORCE),
117 QUEUE_FLAG_NAME(DEAD),
118 QUEUE_FLAG_NAME(INIT_DONE),
119 QUEUE_FLAG_NAME(STABLE_WRITES),
120 QUEUE_FLAG_NAME(POLL),
121 QUEUE_FLAG_NAME(WC),
122 QUEUE_FLAG_NAME(FUA),
123 QUEUE_FLAG_NAME(DAX),
124 QUEUE_FLAG_NAME(STATS),
125 QUEUE_FLAG_NAME(POLL_STATS),
126 QUEUE_FLAG_NAME(REGISTERED),
127 QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
128 QUEUE_FLAG_NAME(QUIESCED),
129 QUEUE_FLAG_NAME(PCI_P2PDMA),
130 QUEUE_FLAG_NAME(ZONE_RESETALL),
131 QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
132 QUEUE_FLAG_NAME(NOWAIT),
133 };
134 #undef QUEUE_FLAG_NAME
135
136 static int queue_state_show(void *data, struct seq_file *m)
137 {
138 struct request_queue *q = data;
139
140 blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
141 ARRAY_SIZE(blk_queue_flag_name));
142 seq_puts(m, "\n");
143 return 0;
144 }
145
146 static ssize_t queue_state_write(void *data, const char __user *buf,
147 size_t count, loff_t *ppos)
148 {
149 struct request_queue *q = data;
150 char opbuf[16] = { }, *op;
151
152 /*
153 * The "state" attribute is removed after blk_cleanup_queue() has called
154 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
155 * triggering a use-after-free.
156 */
157 if (blk_queue_dead(q))
158 return -ENOENT;
159
160 if (count >= sizeof(opbuf)) {
161 pr_err("%s: operation too long\n", __func__);
162 goto inval;
163 }
164
165 if (copy_from_user(opbuf, buf, count))
166 return -EFAULT;
167 op = strstrip(opbuf);
168 if (strcmp(op, "run") == 0) {
169 blk_mq_run_hw_queues(q, true);
170 } else if (strcmp(op, "start") == 0) {
171 blk_mq_start_stopped_hw_queues(q, true);
172 } else if (strcmp(op, "kick") == 0) {
173 blk_mq_kick_requeue_list(q);
174 } else {
175 pr_err("%s: unsupported operation '%s'\n", __func__, op);
176 inval:
177 pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
178 return -EINVAL;
179 }
180 return count;
181 }
182
183 static int queue_write_hint_show(void *data, struct seq_file *m)
184 {
185 struct request_queue *q = data;
186 int i;
187
188 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
189 seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]);
190
191 return 0;
192 }
193
194 static ssize_t queue_write_hint_store(void *data, const char __user *buf,
195 size_t count, loff_t *ppos)
196 {
197 struct request_queue *q = data;
198 int i;
199
200 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
201 q->write_hints[i] = 0;
202
203 return count;
204 }
205
206 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
207 { "poll_stat", 0400, queue_poll_stat_show },
208 { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
209 { "pm_only", 0600, queue_pm_only_show, NULL },
210 { "state", 0600, queue_state_show, queue_state_write },
211 { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
212 { "zone_wlock", 0400, queue_zone_wlock_show, NULL },
213 { },
214 };
215
216 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
217 static const char *const hctx_state_name[] = {
218 HCTX_STATE_NAME(STOPPED),
219 HCTX_STATE_NAME(TAG_ACTIVE),
220 HCTX_STATE_NAME(SCHED_RESTART),
221 HCTX_STATE_NAME(INACTIVE),
222 };
223 #undef HCTX_STATE_NAME
224
225 static int hctx_state_show(void *data, struct seq_file *m)
226 {
227 struct blk_mq_hw_ctx *hctx = data;
228
229 blk_flags_show(m, hctx->state, hctx_state_name,
230 ARRAY_SIZE(hctx_state_name));
231 seq_puts(m, "\n");
232 return 0;
233 }
234
235 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
236 static const char *const alloc_policy_name[] = {
237 BLK_TAG_ALLOC_NAME(FIFO),
238 BLK_TAG_ALLOC_NAME(RR),
239 };
240 #undef BLK_TAG_ALLOC_NAME
241
242 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
243 static const char *const hctx_flag_name[] = {
244 HCTX_FLAG_NAME(SHOULD_MERGE),
245 HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
246 HCTX_FLAG_NAME(BLOCKING),
247 HCTX_FLAG_NAME(NO_SCHED),
248 HCTX_FLAG_NAME(STACKING),
249 HCTX_FLAG_NAME(TAG_HCTX_SHARED),
250 };
251 #undef HCTX_FLAG_NAME
252
253 static int hctx_flags_show(void *data, struct seq_file *m)
254 {
255 struct blk_mq_hw_ctx *hctx = data;
256 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
257
258 seq_puts(m, "alloc_policy=");
259 if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
260 alloc_policy_name[alloc_policy])
261 seq_puts(m, alloc_policy_name[alloc_policy]);
262 else
263 seq_printf(m, "%d", alloc_policy);
264 seq_puts(m, " ");
265 blk_flags_show(m,
266 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
267 hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
268 seq_puts(m, "\n");
269 return 0;
270 }
271
272 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
273 static const char *const cmd_flag_name[] = {
274 CMD_FLAG_NAME(FAILFAST_DEV),
275 CMD_FLAG_NAME(FAILFAST_TRANSPORT),
276 CMD_FLAG_NAME(FAILFAST_DRIVER),
277 CMD_FLAG_NAME(SYNC),
278 CMD_FLAG_NAME(META),
279 CMD_FLAG_NAME(PRIO),
280 CMD_FLAG_NAME(NOMERGE),
281 CMD_FLAG_NAME(IDLE),
282 CMD_FLAG_NAME(INTEGRITY),
283 CMD_FLAG_NAME(FUA),
284 CMD_FLAG_NAME(PREFLUSH),
285 CMD_FLAG_NAME(RAHEAD),
286 CMD_FLAG_NAME(BACKGROUND),
287 CMD_FLAG_NAME(NOWAIT),
288 CMD_FLAG_NAME(NOUNMAP),
289 CMD_FLAG_NAME(HIPRI),
290 };
291 #undef CMD_FLAG_NAME
292
293 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
294 static const char *const rqf_name[] = {
295 RQF_NAME(SORTED),
296 RQF_NAME(STARTED),
297 RQF_NAME(SOFTBARRIER),
298 RQF_NAME(FLUSH_SEQ),
299 RQF_NAME(MIXED_MERGE),
300 RQF_NAME(MQ_INFLIGHT),
301 RQF_NAME(DONTPREP),
302 RQF_NAME(FAILED),
303 RQF_NAME(QUIET),
304 RQF_NAME(ELVPRIV),
305 RQF_NAME(IO_STAT),
306 RQF_NAME(ALLOCED),
307 RQF_NAME(PM),
308 RQF_NAME(HASHED),
309 RQF_NAME(STATS),
310 RQF_NAME(SPECIAL_PAYLOAD),
311 RQF_NAME(ZONE_WRITE_LOCKED),
312 RQF_NAME(MQ_POLL_SLEPT),
313 };
314 #undef RQF_NAME
315
316 static const char *const blk_mq_rq_state_name_array[] = {
317 [MQ_RQ_IDLE] = "idle",
318 [MQ_RQ_IN_FLIGHT] = "in_flight",
319 [MQ_RQ_COMPLETE] = "complete",
320 };
321
322 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
323 {
324 if (WARN_ON_ONCE((unsigned int)rq_state >=
325 ARRAY_SIZE(blk_mq_rq_state_name_array)))
326 return "(?)";
327 return blk_mq_rq_state_name_array[rq_state];
328 }
329
330 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
331 {
332 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
333 const unsigned int op = req_op(rq);
334 const char *op_str = blk_op_str(op);
335
336 seq_printf(m, "%p {.op=", rq);
337 if (strcmp(op_str, "UNKNOWN") == 0)
338 seq_printf(m, "%u", op);
339 else
340 seq_printf(m, "%s", op_str);
341 seq_puts(m, ", .cmd_flags=");
342 blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
343 ARRAY_SIZE(cmd_flag_name));
344 seq_puts(m, ", .rq_flags=");
345 blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
346 ARRAY_SIZE(rqf_name));
347 seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
348 seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
349 rq->internal_tag);
350 if (mq_ops->show_rq)
351 mq_ops->show_rq(m, rq);
352 seq_puts(m, "}\n");
353 return 0;
354 }
355 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
356
357 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
358 {
359 return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
360 }
361 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
362
363 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
364 __acquires(&hctx->lock)
365 {
366 struct blk_mq_hw_ctx *hctx = m->private;
367
368 spin_lock(&hctx->lock);
369 return seq_list_start(&hctx->dispatch, *pos);
370 }
371
372 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
373 {
374 struct blk_mq_hw_ctx *hctx = m->private;
375
376 return seq_list_next(v, &hctx->dispatch, pos);
377 }
378
379 static void hctx_dispatch_stop(struct seq_file *m, void *v)
380 __releases(&hctx->lock)
381 {
382 struct blk_mq_hw_ctx *hctx = m->private;
383
384 spin_unlock(&hctx->lock);
385 }
386
387 static const struct seq_operations hctx_dispatch_seq_ops = {
388 .start = hctx_dispatch_start,
389 .next = hctx_dispatch_next,
390 .stop = hctx_dispatch_stop,
391 .show = blk_mq_debugfs_rq_show,
392 };
393
394 struct show_busy_params {
395 struct seq_file *m;
396 struct blk_mq_hw_ctx *hctx;
397 };
398
399 /*
400 * Note: the state of a request may change while this function is in progress,
401 * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
402 * keep iterating requests.
403 */
404 static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
405 {
406 const struct show_busy_params *params = data;
407
408 if (rq->mq_hctx == params->hctx)
409 __blk_mq_debugfs_rq_show(params->m, rq);
410
411 return true;
412 }
413
414 static int hctx_busy_show(void *data, struct seq_file *m)
415 {
416 struct blk_mq_hw_ctx *hctx = data;
417 struct show_busy_params params = { .m = m, .hctx = hctx };
418
419 blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
420 &params);
421
422 return 0;
423 }
424
425 static const char *const hctx_types[] = {
426 [HCTX_TYPE_DEFAULT] = "default",
427 [HCTX_TYPE_READ] = "read",
428 [HCTX_TYPE_POLL] = "poll",
429 };
430
431 static int hctx_type_show(void *data, struct seq_file *m)
432 {
433 struct blk_mq_hw_ctx *hctx = data;
434
435 BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
436 seq_printf(m, "%s\n", hctx_types[hctx->type]);
437 return 0;
438 }
439
440 static int hctx_ctx_map_show(void *data, struct seq_file *m)
441 {
442 struct blk_mq_hw_ctx *hctx = data;
443
444 sbitmap_bitmap_show(&hctx->ctx_map, m);
445 return 0;
446 }
447
448 static void blk_mq_debugfs_tags_show(struct seq_file *m,
449 struct blk_mq_tags *tags)
450 {
451 seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
452 seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
453 seq_printf(m, "active_queues=%d\n",
454 atomic_read(&tags->active_queues));
455
456 seq_puts(m, "\nbitmap_tags:\n");
457 sbitmap_queue_show(tags->bitmap_tags, m);
458
459 if (tags->nr_reserved_tags) {
460 seq_puts(m, "\nbreserved_tags:\n");
461 sbitmap_queue_show(tags->breserved_tags, m);
462 }
463 }
464
465 static int hctx_tags_show(void *data, struct seq_file *m)
466 {
467 struct blk_mq_hw_ctx *hctx = data;
468 struct request_queue *q = hctx->queue;
469 int res;
470
471 res = mutex_lock_interruptible(&q->sysfs_lock);
472 if (res)
473 goto out;
474 if (hctx->tags)
475 blk_mq_debugfs_tags_show(m, hctx->tags);
476 mutex_unlock(&q->sysfs_lock);
477
478 out:
479 return res;
480 }
481
482 static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
483 {
484 struct blk_mq_hw_ctx *hctx = data;
485 struct request_queue *q = hctx->queue;
486 int res;
487
488 res = mutex_lock_interruptible(&q->sysfs_lock);
489 if (res)
490 goto out;
491 if (hctx->tags)
492 sbitmap_bitmap_show(&hctx->tags->bitmap_tags->sb, m);
493 mutex_unlock(&q->sysfs_lock);
494
495 out:
496 return res;
497 }
498
499 static int hctx_sched_tags_show(void *data, struct seq_file *m)
500 {
501 struct blk_mq_hw_ctx *hctx = data;
502 struct request_queue *q = hctx->queue;
503 int res;
504
505 res = mutex_lock_interruptible(&q->sysfs_lock);
506 if (res)
507 goto out;
508 if (hctx->sched_tags)
509 blk_mq_debugfs_tags_show(m, hctx->sched_tags);
510 mutex_unlock(&q->sysfs_lock);
511
512 out:
513 return res;
514 }
515
516 static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
517 {
518 struct blk_mq_hw_ctx *hctx = data;
519 struct request_queue *q = hctx->queue;
520 int res;
521
522 res = mutex_lock_interruptible(&q->sysfs_lock);
523 if (res)
524 goto out;
525 if (hctx->sched_tags)
526 sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags->sb, m);
527 mutex_unlock(&q->sysfs_lock);
528
529 out:
530 return res;
531 }
532
533 static int hctx_io_poll_show(void *data, struct seq_file *m)
534 {
535 struct blk_mq_hw_ctx *hctx = data;
536
537 seq_printf(m, "considered=%lu\n", hctx->poll_considered);
538 seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
539 seq_printf(m, "success=%lu\n", hctx->poll_success);
540 return 0;
541 }
542
543 static ssize_t hctx_io_poll_write(void *data, const char __user *buf,
544 size_t count, loff_t *ppos)
545 {
546 struct blk_mq_hw_ctx *hctx = data;
547
548 hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
549 return count;
550 }
551
552 static int hctx_dispatched_show(void *data, struct seq_file *m)
553 {
554 struct blk_mq_hw_ctx *hctx = data;
555 int i;
556
557 seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
558
559 for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
560 unsigned int d = 1U << (i - 1);
561
562 seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]);
563 }
564
565 seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]);
566 return 0;
567 }
568
569 static ssize_t hctx_dispatched_write(void *data, const char __user *buf,
570 size_t count, loff_t *ppos)
571 {
572 struct blk_mq_hw_ctx *hctx = data;
573 int i;
574
575 for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
576 hctx->dispatched[i] = 0;
577 return count;
578 }
579
580 static int hctx_queued_show(void *data, struct seq_file *m)
581 {
582 struct blk_mq_hw_ctx *hctx = data;
583
584 seq_printf(m, "%lu\n", hctx->queued);
585 return 0;
586 }
587
588 static ssize_t hctx_queued_write(void *data, const char __user *buf,
589 size_t count, loff_t *ppos)
590 {
591 struct blk_mq_hw_ctx *hctx = data;
592
593 hctx->queued = 0;
594 return count;
595 }
596
597 static int hctx_run_show(void *data, struct seq_file *m)
598 {
599 struct blk_mq_hw_ctx *hctx = data;
600
601 seq_printf(m, "%lu\n", hctx->run);
602 return 0;
603 }
604
605 static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
606 loff_t *ppos)
607 {
608 struct blk_mq_hw_ctx *hctx = data;
609
610 hctx->run = 0;
611 return count;
612 }
613
614 static int hctx_active_show(void *data, struct seq_file *m)
615 {
616 struct blk_mq_hw_ctx *hctx = data;
617
618 seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
619 return 0;
620 }
621
622 static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
623 {
624 struct blk_mq_hw_ctx *hctx = data;
625
626 seq_printf(m, "%u\n", hctx->dispatch_busy);
627 return 0;
628 }
629
630 #define CTX_RQ_SEQ_OPS(name, type) \
631 static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
632 __acquires(&ctx->lock) \
633 { \
634 struct blk_mq_ctx *ctx = m->private; \
635 \
636 spin_lock(&ctx->lock); \
637 return seq_list_start(&ctx->rq_lists[type], *pos); \
638 } \
639 \
640 static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
641 loff_t *pos) \
642 { \
643 struct blk_mq_ctx *ctx = m->private; \
644 \
645 return seq_list_next(v, &ctx->rq_lists[type], pos); \
646 } \
647 \
648 static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
649 __releases(&ctx->lock) \
650 { \
651 struct blk_mq_ctx *ctx = m->private; \
652 \
653 spin_unlock(&ctx->lock); \
654 } \
655 \
656 static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
657 .start = ctx_##name##_rq_list_start, \
658 .next = ctx_##name##_rq_list_next, \
659 .stop = ctx_##name##_rq_list_stop, \
660 .show = blk_mq_debugfs_rq_show, \
661 }
662
663 CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
664 CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
665 CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
666
667 static int ctx_dispatched_show(void *data, struct seq_file *m)
668 {
669 struct blk_mq_ctx *ctx = data;
670
671 seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
672 return 0;
673 }
674
675 static ssize_t ctx_dispatched_write(void *data, const char __user *buf,
676 size_t count, loff_t *ppos)
677 {
678 struct blk_mq_ctx *ctx = data;
679
680 ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
681 return count;
682 }
683
684 static int ctx_merged_show(void *data, struct seq_file *m)
685 {
686 struct blk_mq_ctx *ctx = data;
687
688 seq_printf(m, "%lu\n", ctx->rq_merged);
689 return 0;
690 }
691
692 static ssize_t ctx_merged_write(void *data, const char __user *buf,
693 size_t count, loff_t *ppos)
694 {
695 struct blk_mq_ctx *ctx = data;
696
697 ctx->rq_merged = 0;
698 return count;
699 }
700
701 static int ctx_completed_show(void *data, struct seq_file *m)
702 {
703 struct blk_mq_ctx *ctx = data;
704
705 seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
706 return 0;
707 }
708
709 static ssize_t ctx_completed_write(void *data, const char __user *buf,
710 size_t count, loff_t *ppos)
711 {
712 struct blk_mq_ctx *ctx = data;
713
714 ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
715 return count;
716 }
717
718 static int blk_mq_debugfs_show(struct seq_file *m, void *v)
719 {
720 const struct blk_mq_debugfs_attr *attr = m->private;
721 void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
722
723 return attr->show(data, m);
724 }
725
726 static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
727 size_t count, loff_t *ppos)
728 {
729 struct seq_file *m = file->private_data;
730 const struct blk_mq_debugfs_attr *attr = m->private;
731 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
732
733 /*
734 * Attributes that only implement .seq_ops are read-only and 'attr' is
735 * the same with 'data' in this case.
736 */
737 if (attr == data || !attr->write)
738 return -EPERM;
739
740 return attr->write(data, buf, count, ppos);
741 }
742
743 static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
744 {
745 const struct blk_mq_debugfs_attr *attr = inode->i_private;
746 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
747 struct seq_file *m;
748 int ret;
749
750 if (attr->seq_ops) {
751 ret = seq_open(file, attr->seq_ops);
752 if (!ret) {
753 m = file->private_data;
754 m->private = data;
755 }
756 return ret;
757 }
758
759 if (WARN_ON_ONCE(!attr->show))
760 return -EPERM;
761
762 return single_open(file, blk_mq_debugfs_show, inode->i_private);
763 }
764
765 static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
766 {
767 const struct blk_mq_debugfs_attr *attr = inode->i_private;
768
769 if (attr->show)
770 return single_release(inode, file);
771
772 return seq_release(inode, file);
773 }
774
775 static const struct file_operations blk_mq_debugfs_fops = {
776 .open = blk_mq_debugfs_open,
777 .read = seq_read,
778 .write = blk_mq_debugfs_write,
779 .llseek = seq_lseek,
780 .release = blk_mq_debugfs_release,
781 };
782
783 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
784 {"state", 0400, hctx_state_show},
785 {"flags", 0400, hctx_flags_show},
786 {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
787 {"busy", 0400, hctx_busy_show},
788 {"ctx_map", 0400, hctx_ctx_map_show},
789 {"tags", 0400, hctx_tags_show},
790 {"tags_bitmap", 0400, hctx_tags_bitmap_show},
791 {"sched_tags", 0400, hctx_sched_tags_show},
792 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
793 {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write},
794 {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write},
795 {"queued", 0600, hctx_queued_show, hctx_queued_write},
796 {"run", 0600, hctx_run_show, hctx_run_write},
797 {"active", 0400, hctx_active_show},
798 {"dispatch_busy", 0400, hctx_dispatch_busy_show},
799 {"type", 0400, hctx_type_show},
800 {},
801 };
802
803 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
804 {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
805 {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
806 {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
807 {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
808 {"merged", 0600, ctx_merged_show, ctx_merged_write},
809 {"completed", 0600, ctx_completed_show, ctx_completed_write},
810 {},
811 };
812
813 static void debugfs_create_files(struct dentry *parent, void *data,
814 const struct blk_mq_debugfs_attr *attr)
815 {
816 if (IS_ERR_OR_NULL(parent))
817 return;
818
819 d_inode(parent)->i_private = data;
820
821 for (; attr->name; attr++)
822 debugfs_create_file(attr->name, attr->mode, parent,
823 (void *)attr, &blk_mq_debugfs_fops);
824 }
825
826 void blk_mq_debugfs_register(struct request_queue *q)
827 {
828 struct blk_mq_hw_ctx *hctx;
829 int i;
830
831 debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
832
833 /*
834 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
835 * didn't exist yet (because we don't know what to name the directory
836 * until the queue is registered to a gendisk).
837 */
838 if (q->elevator && !q->sched_debugfs_dir)
839 blk_mq_debugfs_register_sched(q);
840
841 /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
842 queue_for_each_hw_ctx(q, hctx, i) {
843 if (!hctx->debugfs_dir)
844 blk_mq_debugfs_register_hctx(q, hctx);
845 if (q->elevator && !hctx->sched_debugfs_dir)
846 blk_mq_debugfs_register_sched_hctx(q, hctx);
847 }
848
849 if (q->rq_qos) {
850 struct rq_qos *rqos = q->rq_qos;
851
852 while (rqos) {
853 blk_mq_debugfs_register_rqos(rqos);
854 rqos = rqos->next;
855 }
856 }
857 }
858
859 void blk_mq_debugfs_unregister(struct request_queue *q)
860 {
861 q->sched_debugfs_dir = NULL;
862 }
863
864 static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
865 struct blk_mq_ctx *ctx)
866 {
867 struct dentry *ctx_dir;
868 char name[20];
869
870 snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
871 ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
872
873 debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
874 }
875
876 void blk_mq_debugfs_register_hctx(struct request_queue *q,
877 struct blk_mq_hw_ctx *hctx)
878 {
879 struct blk_mq_ctx *ctx;
880 char name[20];
881 int i;
882
883 snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
884 hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
885
886 debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
887
888 hctx_for_each_ctx(hctx, ctx, i)
889 blk_mq_debugfs_register_ctx(hctx, ctx);
890 }
891
892 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
893 {
894 debugfs_remove_recursive(hctx->debugfs_dir);
895 hctx->sched_debugfs_dir = NULL;
896 hctx->debugfs_dir = NULL;
897 }
898
899 void blk_mq_debugfs_register_hctxs(struct request_queue *q)
900 {
901 struct blk_mq_hw_ctx *hctx;
902 int i;
903
904 queue_for_each_hw_ctx(q, hctx, i)
905 blk_mq_debugfs_register_hctx(q, hctx);
906 }
907
908 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
909 {
910 struct blk_mq_hw_ctx *hctx;
911 int i;
912
913 queue_for_each_hw_ctx(q, hctx, i)
914 blk_mq_debugfs_unregister_hctx(hctx);
915 }
916
917 void blk_mq_debugfs_register_sched(struct request_queue *q)
918 {
919 struct elevator_type *e = q->elevator->type;
920
921 /*
922 * If the parent directory has not been created yet, return, we will be
923 * called again later on and the directory/files will be created then.
924 */
925 if (!q->debugfs_dir)
926 return;
927
928 if (!e->queue_debugfs_attrs)
929 return;
930
931 q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
932
933 debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
934 }
935
936 void blk_mq_debugfs_unregister_sched(struct request_queue *q)
937 {
938 debugfs_remove_recursive(q->sched_debugfs_dir);
939 q->sched_debugfs_dir = NULL;
940 }
941
942 void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
943 {
944 debugfs_remove_recursive(rqos->debugfs_dir);
945 rqos->debugfs_dir = NULL;
946 }
947
948 void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
949 {
950 struct request_queue *q = rqos->q;
951 const char *dir_name = rq_qos_id_to_name(rqos->id);
952
953 if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
954 return;
955
956 if (!q->rqos_debugfs_dir)
957 q->rqos_debugfs_dir = debugfs_create_dir("rqos",
958 q->debugfs_dir);
959
960 rqos->debugfs_dir = debugfs_create_dir(dir_name,
961 rqos->q->rqos_debugfs_dir);
962
963 debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
964 }
965
966 void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
967 {
968 debugfs_remove_recursive(q->rqos_debugfs_dir);
969 q->rqos_debugfs_dir = NULL;
970 }
971
972 void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
973 struct blk_mq_hw_ctx *hctx)
974 {
975 struct elevator_type *e = q->elevator->type;
976
977 if (!e->hctx_debugfs_attrs)
978 return;
979
980 hctx->sched_debugfs_dir = debugfs_create_dir("sched",
981 hctx->debugfs_dir);
982 debugfs_create_files(hctx->sched_debugfs_dir, hctx,
983 e->hctx_debugfs_attrs);
984 }
985
986 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
987 {
988 debugfs_remove_recursive(hctx->sched_debugfs_dir);
989 hctx->sched_debugfs_dir = NULL;
990 }