]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - block/blk-mq-sysfs.c
blk-mq: export software queue pending map to debugfs
[mirror_ubuntu-bionic-kernel.git] / block / blk-mq-sysfs.c
CommitLineData
320ae51f
JA
1#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/backing-dev.h>
4#include <linux/bio.h>
5#include <linux/blkdev.h>
6#include <linux/mm.h>
7#include <linux/init.h>
8#include <linux/slab.h>
9#include <linux/workqueue.h>
10#include <linux/smp.h>
11
12#include <linux/blk-mq.h>
13#include "blk-mq.h"
14#include "blk-mq-tag.h"
15
16static void blk_mq_sysfs_release(struct kobject *kobj)
17{
18}
19
20struct blk_mq_ctx_sysfs_entry {
21 struct attribute attr;
22 ssize_t (*show)(struct blk_mq_ctx *, char *);
23 ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
24};
25
26struct blk_mq_hw_ctx_sysfs_entry {
27 struct attribute attr;
28 ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
29 ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
30};
31
32static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
33 char *page)
34{
35 struct blk_mq_ctx_sysfs_entry *entry;
36 struct blk_mq_ctx *ctx;
37 struct request_queue *q;
38 ssize_t res;
39
40 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
41 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
42 q = ctx->queue;
43
44 if (!entry->show)
45 return -EIO;
46
47 res = -ENOENT;
48 mutex_lock(&q->sysfs_lock);
49 if (!blk_queue_dying(q))
50 res = entry->show(ctx, page);
51 mutex_unlock(&q->sysfs_lock);
52 return res;
53}
54
55static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
56 const char *page, size_t length)
57{
58 struct blk_mq_ctx_sysfs_entry *entry;
59 struct blk_mq_ctx *ctx;
60 struct request_queue *q;
61 ssize_t res;
62
63 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
64 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
65 q = ctx->queue;
66
67 if (!entry->store)
68 return -EIO;
69
70 res = -ENOENT;
71 mutex_lock(&q->sysfs_lock);
72 if (!blk_queue_dying(q))
73 res = entry->store(ctx, page, length);
74 mutex_unlock(&q->sysfs_lock);
75 return res;
76}
77
78static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
79 struct attribute *attr, char *page)
80{
81 struct blk_mq_hw_ctx_sysfs_entry *entry;
82 struct blk_mq_hw_ctx *hctx;
83 struct request_queue *q;
84 ssize_t res;
85
86 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
87 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
88 q = hctx->queue;
89
90 if (!entry->show)
91 return -EIO;
92
93 res = -ENOENT;
94 mutex_lock(&q->sysfs_lock);
95 if (!blk_queue_dying(q))
96 res = entry->show(hctx, page);
97 mutex_unlock(&q->sysfs_lock);
98 return res;
99}
100
101static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
102 struct attribute *attr, const char *page,
103 size_t length)
104{
105 struct blk_mq_hw_ctx_sysfs_entry *entry;
106 struct blk_mq_hw_ctx *hctx;
107 struct request_queue *q;
108 ssize_t res;
109
110 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
111 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
112 q = hctx->queue;
113
114 if (!entry->store)
115 return -EIO;
116
117 res = -ENOENT;
118 mutex_lock(&q->sysfs_lock);
119 if (!blk_queue_dying(q))
120 res = entry->store(hctx, page, length);
121 mutex_unlock(&q->sysfs_lock);
122 return res;
123}
124
125static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
126{
127 return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
128 ctx->rq_dispatched[0]);
129}
130
131static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
132{
133 return sprintf(page, "%lu\n", ctx->rq_merged);
134}
135
136static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
137{
138 return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
139 ctx->rq_completed[0]);
140}
141
05229bee
JA
142static ssize_t blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx *hctx, char *page)
143{
6e219353
SB
144 return sprintf(page, "considered=%lu, invoked=%lu, success=%lu\n",
145 hctx->poll_considered, hctx->poll_invoked,
146 hctx->poll_success);
05229bee
JA
147}
148
d21ea4bc
SB
149static ssize_t blk_mq_hw_sysfs_poll_store(struct blk_mq_hw_ctx *hctx,
150 const char *page, size_t size)
151{
152 hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
153
154 return size;
155}
156
320ae51f
JA
157static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
158 char *page)
159{
160 return sprintf(page, "%lu\n", hctx->queued);
161}
162
163static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
164{
165 return sprintf(page, "%lu\n", hctx->run);
166}
167
168static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
169 char *page)
170{
171 char *start_page = page;
172 int i;
173
174 page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
175
703fd1c0
JA
176 for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
177 unsigned int d = 1U << (i - 1);
320ae51f 178
703fd1c0 179 page += sprintf(page, "%8u\t%lu\n", d, hctx->dispatched[i]);
320ae51f
JA
180 }
181
703fd1c0
JA
182 page += sprintf(page, "%8u+\t%lu\n", 1U << (i - 1),
183 hctx->dispatched[i]);
320ae51f
JA
184 return page - start_page;
185}
186
bd166ef1
JA
187static ssize_t blk_mq_hw_sysfs_sched_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
188{
189 if (hctx->sched_tags)
190 return blk_mq_tag_sysfs_show(hctx->sched_tags, page);
191
192 return 0;
193}
194
320ae51f
JA
195static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
196{
197 return blk_mq_tag_sysfs_show(hctx->tags, page);
198}
199
0d2602ca
JA
200static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page)
201{
202 return sprintf(page, "%u\n", atomic_read(&hctx->nr_active));
203}
204
676141e4
JA
205static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
206{
cb2da43e 207 unsigned int i, first = 1;
676141e4
JA
208 ssize_t ret = 0;
209
cb2da43e 210 for_each_cpu(i, hctx->cpumask) {
676141e4
JA
211 if (first)
212 ret += sprintf(ret + page, "%u", i);
213 else
214 ret += sprintf(ret + page, ", %u", i);
215
216 first = 0;
217 }
218
676141e4
JA
219 ret += sprintf(ret + page, "\n");
220 return ret;
221}
222
cf43e6be
JA
223static void blk_mq_stat_clear(struct blk_mq_hw_ctx *hctx)
224{
225 struct blk_mq_ctx *ctx;
226 unsigned int i;
227
228 hctx_for_each_ctx(hctx, ctx, i) {
229 blk_stat_init(&ctx->stat[BLK_STAT_READ]);
230 blk_stat_init(&ctx->stat[BLK_STAT_WRITE]);
231 }
232}
233
234static ssize_t blk_mq_hw_sysfs_stat_store(struct blk_mq_hw_ctx *hctx,
235 const char *page, size_t count)
236{
237 blk_mq_stat_clear(hctx);
238 return count;
239}
240
241static ssize_t print_stat(char *page, struct blk_rq_stat *stat, const char *pre)
242{
243 return sprintf(page, "%s samples=%llu, mean=%lld, min=%lld, max=%lld\n",
244 pre, (long long) stat->nr_samples,
245 (long long) stat->mean, (long long) stat->min,
246 (long long) stat->max);
247}
248
249static ssize_t blk_mq_hw_sysfs_stat_show(struct blk_mq_hw_ctx *hctx, char *page)
250{
251 struct blk_rq_stat stat[2];
252 ssize_t ret;
253
254 blk_stat_init(&stat[BLK_STAT_READ]);
255 blk_stat_init(&stat[BLK_STAT_WRITE]);
256
257 blk_hctx_stat_get(hctx, stat);
258
259 ret = print_stat(page, &stat[BLK_STAT_READ], "read :");
260 ret += print_stat(page + ret, &stat[BLK_STAT_WRITE], "write:");
261 return ret;
262}
263
320ae51f
JA
264static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
265 .attr = {.name = "dispatched", .mode = S_IRUGO },
266 .show = blk_mq_sysfs_dispatched_show,
267};
268static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
269 .attr = {.name = "merged", .mode = S_IRUGO },
270 .show = blk_mq_sysfs_merged_show,
271};
272static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
273 .attr = {.name = "completed", .mode = S_IRUGO },
274 .show = blk_mq_sysfs_completed_show,
275};
320ae51f
JA
276
277static struct attribute *default_ctx_attrs[] = {
278 &blk_mq_sysfs_dispatched.attr,
279 &blk_mq_sysfs_merged.attr,
280 &blk_mq_sysfs_completed.attr,
320ae51f
JA
281 NULL,
282};
283
284static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
285 .attr = {.name = "queued", .mode = S_IRUGO },
286 .show = blk_mq_hw_sysfs_queued_show,
287};
288static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
289 .attr = {.name = "run", .mode = S_IRUGO },
290 .show = blk_mq_hw_sysfs_run_show,
291};
292static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
293 .attr = {.name = "dispatched", .mode = S_IRUGO },
294 .show = blk_mq_hw_sysfs_dispatched_show,
295};
0d2602ca
JA
296static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = {
297 .attr = {.name = "active", .mode = S_IRUGO },
298 .show = blk_mq_hw_sysfs_active_show,
299};
bd166ef1
JA
300static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_sched_tags = {
301 .attr = {.name = "sched_tags", .mode = S_IRUGO },
302 .show = blk_mq_hw_sysfs_sched_tags_show,
303};
320ae51f
JA
304static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
305 .attr = {.name = "tags", .mode = S_IRUGO },
306 .show = blk_mq_hw_sysfs_tags_show,
307};
676141e4
JA
308static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
309 .attr = {.name = "cpu_list", .mode = S_IRUGO },
310 .show = blk_mq_hw_sysfs_cpus_show,
311};
05229bee 312static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = {
d21ea4bc 313 .attr = {.name = "io_poll", .mode = S_IWUSR | S_IRUGO },
05229bee 314 .show = blk_mq_hw_sysfs_poll_show,
d21ea4bc 315 .store = blk_mq_hw_sysfs_poll_store,
05229bee 316};
cf43e6be
JA
317static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_stat = {
318 .attr = {.name = "stats", .mode = S_IRUGO | S_IWUSR },
319 .show = blk_mq_hw_sysfs_stat_show,
320 .store = blk_mq_hw_sysfs_stat_store,
321};
320ae51f
JA
322
323static struct attribute *default_hw_ctx_attrs[] = {
324 &blk_mq_hw_sysfs_queued.attr,
325 &blk_mq_hw_sysfs_run.attr,
326 &blk_mq_hw_sysfs_dispatched.attr,
320ae51f 327 &blk_mq_hw_sysfs_tags.attr,
bd166ef1 328 &blk_mq_hw_sysfs_sched_tags.attr,
676141e4 329 &blk_mq_hw_sysfs_cpus.attr,
0d2602ca 330 &blk_mq_hw_sysfs_active.attr,
05229bee 331 &blk_mq_hw_sysfs_poll.attr,
cf43e6be 332 &blk_mq_hw_sysfs_stat.attr,
320ae51f
JA
333 NULL,
334};
335
336static const struct sysfs_ops blk_mq_sysfs_ops = {
337 .show = blk_mq_sysfs_show,
338 .store = blk_mq_sysfs_store,
339};
340
341static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
342 .show = blk_mq_hw_sysfs_show,
343 .store = blk_mq_hw_sysfs_store,
344};
345
346static struct kobj_type blk_mq_ktype = {
347 .sysfs_ops = &blk_mq_sysfs_ops,
348 .release = blk_mq_sysfs_release,
349};
350
351static struct kobj_type blk_mq_ctx_ktype = {
352 .sysfs_ops = &blk_mq_sysfs_ops,
353 .default_attrs = default_ctx_attrs,
74170118 354 .release = blk_mq_sysfs_release,
320ae51f
JA
355};
356
357static struct kobj_type blk_mq_hw_ktype = {
358 .sysfs_ops = &blk_mq_hw_sysfs_ops,
359 .default_attrs = default_hw_ctx_attrs,
74170118 360 .release = blk_mq_sysfs_release,
320ae51f
JA
361};
362
ee3c5db0 363static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
67aec14c
JA
364{
365 struct blk_mq_ctx *ctx;
366 int i;
367
4593fdbe 368 if (!hctx->nr_ctx)
67aec14c
JA
369 return;
370
371 hctx_for_each_ctx(hctx, ctx, i)
372 kobject_del(&ctx->kobj);
373
374 kobject_del(&hctx->kobj);
375}
376
ee3c5db0 377static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
67aec14c
JA
378{
379 struct request_queue *q = hctx->queue;
380 struct blk_mq_ctx *ctx;
381 int i, ret;
382
4593fdbe 383 if (!hctx->nr_ctx)
67aec14c
JA
384 return 0;
385
386 ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
387 if (ret)
388 return ret;
389
390 hctx_for_each_ctx(hctx, ctx, i) {
391 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
392 if (ret)
393 break;
394 }
395
396 return ret;
397}
398
b21d5b30 399static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
320ae51f 400{
85157366
AV
401 struct blk_mq_hw_ctx *hctx;
402 struct blk_mq_ctx *ctx;
403 int i, j;
404
405 queue_for_each_hw_ctx(q, hctx, i) {
67aec14c
JA
406 blk_mq_unregister_hctx(hctx);
407
408 hctx_for_each_ctx(hctx, ctx, j)
85157366 409 kobject_put(&ctx->kobj);
67aec14c 410
85157366
AV
411 kobject_put(&hctx->kobj);
412 }
320ae51f 413
07e4fead
OS
414 blk_mq_debugfs_unregister(q);
415
320ae51f
JA
416 kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
417 kobject_del(&q->mq_kobj);
85157366 418 kobject_put(&q->mq_kobj);
320ae51f 419
b21d5b30 420 kobject_put(&dev->kobj);
4593fdbe
AM
421
422 q->mq_sysfs_init_done = false;
c0f3fd2b
JA
423}
424
b21d5b30 425void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
c0f3fd2b
JA
426{
427 blk_mq_disable_hotplug();
b21d5b30 428 __blk_mq_unregister_dev(dev, q);
4593fdbe 429 blk_mq_enable_hotplug();
320ae51f
JA
430}
431
868f2f0b
KB
432void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
433{
434 kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
435}
436
67aec14c
JA
437static void blk_mq_sysfs_init(struct request_queue *q)
438{
67aec14c 439 struct blk_mq_ctx *ctx;
897bb0c7 440 int cpu;
67aec14c
JA
441
442 kobject_init(&q->mq_kobj, &blk_mq_ktype);
443
897bb0c7
TG
444 for_each_possible_cpu(cpu) {
445 ctx = per_cpu_ptr(q->queue_ctx, cpu);
06a41a99 446 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
897bb0c7 447 }
67aec14c
JA
448}
449
b21d5b30 450int blk_mq_register_dev(struct device *dev, struct request_queue *q)
320ae51f 451{
320ae51f 452 struct blk_mq_hw_ctx *hctx;
67aec14c 453 int ret, i;
320ae51f 454
4593fdbe
AM
455 blk_mq_disable_hotplug();
456
67aec14c 457 blk_mq_sysfs_init(q);
320ae51f
JA
458
459 ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
460 if (ret < 0)
4593fdbe 461 goto out;
320ae51f
JA
462
463 kobject_uevent(&q->mq_kobj, KOBJ_ADD);
464
07e4fead
OS
465 blk_mq_debugfs_register(q, kobject_name(&dev->kobj));
466
320ae51f 467 queue_for_each_hw_ctx(q, hctx, i) {
67aec14c 468 ret = blk_mq_register_hctx(hctx);
320ae51f
JA
469 if (ret)
470 break;
320ae51f
JA
471 }
472
4593fdbe 473 if (ret)
b21d5b30 474 __blk_mq_unregister_dev(dev, q);
4593fdbe
AM
475 else
476 q->mq_sysfs_init_done = true;
477out:
478 blk_mq_enable_hotplug();
320ae51f 479
4593fdbe 480 return ret;
320ae51f 481}
b21d5b30 482EXPORT_SYMBOL_GPL(blk_mq_register_dev);
67aec14c
JA
483
484void blk_mq_sysfs_unregister(struct request_queue *q)
485{
486 struct blk_mq_hw_ctx *hctx;
487 int i;
488
4593fdbe
AM
489 if (!q->mq_sysfs_init_done)
490 return;
491
07e4fead
OS
492 blk_mq_debugfs_unregister_hctxs(q);
493
67aec14c
JA
494 queue_for_each_hw_ctx(q, hctx, i)
495 blk_mq_unregister_hctx(hctx);
496}
497
498int blk_mq_sysfs_register(struct request_queue *q)
499{
500 struct blk_mq_hw_ctx *hctx;
501 int i, ret = 0;
502
4593fdbe
AM
503 if (!q->mq_sysfs_init_done)
504 return ret;
505
07e4fead
OS
506 blk_mq_debugfs_register_hctxs(q);
507
67aec14c
JA
508 queue_for_each_hw_ctx(q, hctx, i) {
509 ret = blk_mq_register_hctx(hctx);
510 if (ret)
511 break;
512 }
513
514 return ret;
515}