]>
Commit | Line | Data |
---|---|---|
320ae51f JA |
1 | #include <linux/kernel.h> |
2 | #include <linux/module.h> | |
3 | #include <linux/backing-dev.h> | |
4 | #include <linux/bio.h> | |
5 | #include <linux/blkdev.h> | |
6 | #include <linux/mm.h> | |
7 | #include <linux/init.h> | |
8 | #include <linux/slab.h> | |
9 | #include <linux/workqueue.h> | |
10 | #include <linux/smp.h> | |
11 | ||
12 | #include <linux/blk-mq.h> | |
13 | #include "blk-mq.h" | |
14 | #include "blk-mq-tag.h" | |
15 | ||
16 | static void blk_mq_sysfs_release(struct kobject *kobj) | |
17 | { | |
18 | } | |
19 | ||
6c8b232e ML |
20 | static void blk_mq_hw_sysfs_release(struct kobject *kobj) |
21 | { | |
22 | struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, | |
23 | kobj); | |
24 | kfree(hctx->ctxs); | |
25 | kfree(hctx); | |
26 | } | |
27 | ||
320ae51f JA |
28 | struct blk_mq_ctx_sysfs_entry { |
29 | struct attribute attr; | |
30 | ssize_t (*show)(struct blk_mq_ctx *, char *); | |
31 | ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t); | |
32 | }; | |
33 | ||
34 | struct blk_mq_hw_ctx_sysfs_entry { | |
35 | struct attribute attr; | |
36 | ssize_t (*show)(struct blk_mq_hw_ctx *, char *); | |
37 | ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t); | |
38 | }; | |
39 | ||
40 | static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr, | |
41 | char *page) | |
42 | { | |
43 | struct blk_mq_ctx_sysfs_entry *entry; | |
44 | struct blk_mq_ctx *ctx; | |
45 | struct request_queue *q; | |
46 | ssize_t res; | |
47 | ||
48 | entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr); | |
49 | ctx = container_of(kobj, struct blk_mq_ctx, kobj); | |
50 | q = ctx->queue; | |
51 | ||
52 | if (!entry->show) | |
53 | return -EIO; | |
54 | ||
55 | res = -ENOENT; | |
56 | mutex_lock(&q->sysfs_lock); | |
57 | if (!blk_queue_dying(q)) | |
58 | res = entry->show(ctx, page); | |
59 | mutex_unlock(&q->sysfs_lock); | |
60 | return res; | |
61 | } | |
62 | ||
63 | static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr, | |
64 | const char *page, size_t length) | |
65 | { | |
66 | struct blk_mq_ctx_sysfs_entry *entry; | |
67 | struct blk_mq_ctx *ctx; | |
68 | struct request_queue *q; | |
69 | ssize_t res; | |
70 | ||
71 | entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr); | |
72 | ctx = container_of(kobj, struct blk_mq_ctx, kobj); | |
73 | q = ctx->queue; | |
74 | ||
75 | if (!entry->store) | |
76 | return -EIO; | |
77 | ||
78 | res = -ENOENT; | |
79 | mutex_lock(&q->sysfs_lock); | |
80 | if (!blk_queue_dying(q)) | |
81 | res = entry->store(ctx, page, length); | |
82 | mutex_unlock(&q->sysfs_lock); | |
83 | return res; | |
84 | } | |
85 | ||
86 | static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj, | |
87 | struct attribute *attr, char *page) | |
88 | { | |
89 | struct blk_mq_hw_ctx_sysfs_entry *entry; | |
90 | struct blk_mq_hw_ctx *hctx; | |
91 | struct request_queue *q; | |
92 | ssize_t res; | |
93 | ||
94 | entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr); | |
95 | hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); | |
96 | q = hctx->queue; | |
97 | ||
98 | if (!entry->show) | |
99 | return -EIO; | |
100 | ||
101 | res = -ENOENT; | |
102 | mutex_lock(&q->sysfs_lock); | |
103 | if (!blk_queue_dying(q)) | |
104 | res = entry->show(hctx, page); | |
105 | mutex_unlock(&q->sysfs_lock); | |
106 | return res; | |
107 | } | |
108 | ||
109 | static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj, | |
110 | struct attribute *attr, const char *page, | |
111 | size_t length) | |
112 | { | |
113 | struct blk_mq_hw_ctx_sysfs_entry *entry; | |
114 | struct blk_mq_hw_ctx *hctx; | |
115 | struct request_queue *q; | |
116 | ssize_t res; | |
117 | ||
118 | entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr); | |
119 | hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); | |
120 | q = hctx->queue; | |
121 | ||
122 | if (!entry->store) | |
123 | return -EIO; | |
124 | ||
125 | res = -ENOENT; | |
126 | mutex_lock(&q->sysfs_lock); | |
127 | if (!blk_queue_dying(q)) | |
128 | res = entry->store(hctx, page, length); | |
129 | mutex_unlock(&q->sysfs_lock); | |
130 | return res; | |
131 | } | |
132 | ||
d96b37c0 OS |
133 | static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx, |
134 | char *page) | |
bd166ef1 | 135 | { |
d96b37c0 | 136 | return sprintf(page, "%u\n", hctx->tags->nr_tags); |
bd166ef1 JA |
137 | } |
138 | ||
d96b37c0 OS |
139 | static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx, |
140 | char *page) | |
320ae51f | 141 | { |
d96b37c0 | 142 | return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags); |
320ae51f JA |
143 | } |
144 | ||
676141e4 JA |
145 | static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) |
146 | { | |
cb2da43e | 147 | unsigned int i, first = 1; |
676141e4 JA |
148 | ssize_t ret = 0; |
149 | ||
cb2da43e | 150 | for_each_cpu(i, hctx->cpumask) { |
676141e4 JA |
151 | if (first) |
152 | ret += sprintf(ret + page, "%u", i); | |
153 | else | |
154 | ret += sprintf(ret + page, ", %u", i); | |
155 | ||
156 | first = 0; | |
157 | } | |
158 | ||
676141e4 JA |
159 | ret += sprintf(ret + page, "\n"); |
160 | return ret; | |
161 | } | |
162 | ||
320ae51f | 163 | static struct attribute *default_ctx_attrs[] = { |
320ae51f JA |
164 | NULL, |
165 | }; | |
166 | ||
d96b37c0 OS |
167 | static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = { |
168 | .attr = {.name = "nr_tags", .mode = S_IRUGO }, | |
169 | .show = blk_mq_hw_sysfs_nr_tags_show, | |
170 | }; | |
171 | static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = { | |
172 | .attr = {.name = "nr_reserved_tags", .mode = S_IRUGO }, | |
173 | .show = blk_mq_hw_sysfs_nr_reserved_tags_show, | |
174 | }; | |
676141e4 JA |
175 | static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = { |
176 | .attr = {.name = "cpu_list", .mode = S_IRUGO }, | |
177 | .show = blk_mq_hw_sysfs_cpus_show, | |
178 | }; | |
320ae51f JA |
179 | |
180 | static struct attribute *default_hw_ctx_attrs[] = { | |
d96b37c0 OS |
181 | &blk_mq_hw_sysfs_nr_tags.attr, |
182 | &blk_mq_hw_sysfs_nr_reserved_tags.attr, | |
676141e4 | 183 | &blk_mq_hw_sysfs_cpus.attr, |
320ae51f JA |
184 | NULL, |
185 | }; | |
186 | ||
187 | static const struct sysfs_ops blk_mq_sysfs_ops = { | |
188 | .show = blk_mq_sysfs_show, | |
189 | .store = blk_mq_sysfs_store, | |
190 | }; | |
191 | ||
192 | static const struct sysfs_ops blk_mq_hw_sysfs_ops = { | |
193 | .show = blk_mq_hw_sysfs_show, | |
194 | .store = blk_mq_hw_sysfs_store, | |
195 | }; | |
196 | ||
197 | static struct kobj_type blk_mq_ktype = { | |
198 | .sysfs_ops = &blk_mq_sysfs_ops, | |
199 | .release = blk_mq_sysfs_release, | |
200 | }; | |
201 | ||
202 | static struct kobj_type blk_mq_ctx_ktype = { | |
203 | .sysfs_ops = &blk_mq_sysfs_ops, | |
204 | .default_attrs = default_ctx_attrs, | |
74170118 | 205 | .release = blk_mq_sysfs_release, |
320ae51f JA |
206 | }; |
207 | ||
208 | static struct kobj_type blk_mq_hw_ktype = { | |
209 | .sysfs_ops = &blk_mq_hw_sysfs_ops, | |
210 | .default_attrs = default_hw_ctx_attrs, | |
6c8b232e | 211 | .release = blk_mq_hw_sysfs_release, |
320ae51f JA |
212 | }; |
213 | ||
ee3c5db0 | 214 | static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) |
67aec14c JA |
215 | { |
216 | struct blk_mq_ctx *ctx; | |
217 | int i; | |
218 | ||
4593fdbe | 219 | if (!hctx->nr_ctx) |
67aec14c JA |
220 | return; |
221 | ||
222 | hctx_for_each_ctx(hctx, ctx, i) | |
223 | kobject_del(&ctx->kobj); | |
224 | ||
225 | kobject_del(&hctx->kobj); | |
226 | } | |
227 | ||
ee3c5db0 | 228 | static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) |
67aec14c JA |
229 | { |
230 | struct request_queue *q = hctx->queue; | |
231 | struct blk_mq_ctx *ctx; | |
232 | int i, ret; | |
233 | ||
4593fdbe | 234 | if (!hctx->nr_ctx) |
67aec14c JA |
235 | return 0; |
236 | ||
237 | ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num); | |
238 | if (ret) | |
239 | return ret; | |
240 | ||
241 | hctx_for_each_ctx(hctx, ctx, i) { | |
242 | ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); | |
243 | if (ret) | |
244 | break; | |
245 | } | |
246 | ||
247 | return ret; | |
248 | } | |
249 | ||
b21d5b30 | 250 | static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q) |
320ae51f | 251 | { |
85157366 | 252 | struct blk_mq_hw_ctx *hctx; |
7ea5fe31 | 253 | int i; |
85157366 | 254 | |
6c8b232e | 255 | queue_for_each_hw_ctx(q, hctx, i) |
67aec14c JA |
256 | blk_mq_unregister_hctx(hctx); |
257 | ||
62ebce16 | 258 | blk_mq_debugfs_unregister_hctxs(q); |
07e4fead | 259 | |
320ae51f JA |
260 | kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); |
261 | kobject_del(&q->mq_kobj); | |
b21d5b30 | 262 | kobject_put(&dev->kobj); |
4593fdbe AM |
263 | |
264 | q->mq_sysfs_init_done = false; | |
c0f3fd2b JA |
265 | } |
266 | ||
b21d5b30 | 267 | void blk_mq_unregister_dev(struct device *dev, struct request_queue *q) |
c0f3fd2b JA |
268 | { |
269 | blk_mq_disable_hotplug(); | |
b21d5b30 | 270 | __blk_mq_unregister_dev(dev, q); |
4593fdbe | 271 | blk_mq_enable_hotplug(); |
320ae51f JA |
272 | } |
273 | ||
868f2f0b KB |
274 | void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx) |
275 | { | |
276 | kobject_init(&hctx->kobj, &blk_mq_hw_ktype); | |
277 | } | |
278 | ||
7ea5fe31 ML |
279 | void blk_mq_sysfs_deinit(struct request_queue *q) |
280 | { | |
281 | struct blk_mq_ctx *ctx; | |
282 | int cpu; | |
283 | ||
284 | for_each_possible_cpu(cpu) { | |
285 | ctx = per_cpu_ptr(q->queue_ctx, cpu); | |
286 | kobject_put(&ctx->kobj); | |
287 | } | |
288 | kobject_put(&q->mq_kobj); | |
289 | } | |
290 | ||
737f98cf | 291 | void blk_mq_sysfs_init(struct request_queue *q) |
67aec14c | 292 | { |
67aec14c | 293 | struct blk_mq_ctx *ctx; |
897bb0c7 | 294 | int cpu; |
67aec14c JA |
295 | |
296 | kobject_init(&q->mq_kobj, &blk_mq_ktype); | |
297 | ||
897bb0c7 TG |
298 | for_each_possible_cpu(cpu) { |
299 | ctx = per_cpu_ptr(q->queue_ctx, cpu); | |
06a41a99 | 300 | kobject_init(&ctx->kobj, &blk_mq_ctx_ktype); |
897bb0c7 | 301 | } |
67aec14c JA |
302 | } |
303 | ||
b21d5b30 | 304 | int blk_mq_register_dev(struct device *dev, struct request_queue *q) |
320ae51f | 305 | { |
320ae51f | 306 | struct blk_mq_hw_ctx *hctx; |
67aec14c | 307 | int ret, i; |
320ae51f | 308 | |
4593fdbe AM |
309 | blk_mq_disable_hotplug(); |
310 | ||
320ae51f JA |
311 | ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); |
312 | if (ret < 0) | |
4593fdbe | 313 | goto out; |
320ae51f JA |
314 | |
315 | kobject_uevent(&q->mq_kobj, KOBJ_ADD); | |
316 | ||
07e4fead OS |
317 | blk_mq_debugfs_register(q, kobject_name(&dev->kobj)); |
318 | ||
320ae51f | 319 | queue_for_each_hw_ctx(q, hctx, i) { |
67aec14c | 320 | ret = blk_mq_register_hctx(hctx); |
320ae51f JA |
321 | if (ret) |
322 | break; | |
320ae51f JA |
323 | } |
324 | ||
4593fdbe | 325 | if (ret) |
b21d5b30 | 326 | __blk_mq_unregister_dev(dev, q); |
4593fdbe AM |
327 | else |
328 | q->mq_sysfs_init_done = true; | |
329 | out: | |
330 | blk_mq_enable_hotplug(); | |
320ae51f | 331 | |
4593fdbe | 332 | return ret; |
320ae51f | 333 | } |
b21d5b30 | 334 | EXPORT_SYMBOL_GPL(blk_mq_register_dev); |
67aec14c JA |
335 | |
336 | void blk_mq_sysfs_unregister(struct request_queue *q) | |
337 | { | |
338 | struct blk_mq_hw_ctx *hctx; | |
339 | int i; | |
340 | ||
4593fdbe AM |
341 | if (!q->mq_sysfs_init_done) |
342 | return; | |
343 | ||
07e4fead OS |
344 | blk_mq_debugfs_unregister_hctxs(q); |
345 | ||
67aec14c JA |
346 | queue_for_each_hw_ctx(q, hctx, i) |
347 | blk_mq_unregister_hctx(hctx); | |
348 | } | |
349 | ||
350 | int blk_mq_sysfs_register(struct request_queue *q) | |
351 | { | |
352 | struct blk_mq_hw_ctx *hctx; | |
353 | int i, ret = 0; | |
354 | ||
4593fdbe AM |
355 | if (!q->mq_sysfs_init_done) |
356 | return ret; | |
357 | ||
07e4fead OS |
358 | blk_mq_debugfs_register_hctxs(q); |
359 | ||
67aec14c JA |
360 | queue_for_each_hw_ctx(q, hctx, i) { |
361 | ret = blk_mq_register_hctx(hctx); | |
362 | if (ret) | |
363 | break; | |
364 | } | |
365 | ||
366 | return ret; | |
367 | } |