]>
Commit | Line | Data |
---|---|---|
75bb4625 | 1 | /* |
88459642 OS |
2 | * Tag allocation using scalable bitmaps. Uses active queue tracking to support |
3 | * fairer distribution of tags between multiple submitters when a shared tag map | |
4 | * is used. | |
75bb4625 JA |
5 | * |
6 | * Copyright (C) 2013-2014 Jens Axboe | |
7 | */ | |
320ae51f JA |
8 | #include <linux/kernel.h> |
9 | #include <linux/module.h> | |
4bb659b1 | 10 | #include <linux/random.h> |
320ae51f JA |
11 | |
12 | #include <linux/blk-mq.h> | |
13 | #include "blk.h" | |
14 | #include "blk-mq.h" | |
15 | #include "blk-mq-tag.h" | |
16 | ||
320ae51f JA |
17 | bool blk_mq_has_free_tags(struct blk_mq_tags *tags) |
18 | { | |
4bb659b1 JA |
19 | if (!tags) |
20 | return true; | |
21 | ||
88459642 | 22 | return sbitmap_any_bit_clear(&tags->bitmap_tags.sb); |
0d2602ca JA |
23 | } |
24 | ||
25 | /* | |
26 | * If a previously inactive queue goes active, bump the active user count. | |
27 | */ | |
28 | bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) | |
29 | { | |
30 | if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && | |
31 | !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) | |
32 | atomic_inc(&hctx->tags->active_queues); | |
33 | ||
34 | return true; | |
35 | } | |
36 | ||
37 | /* | |
aed3ea94 | 38 | * Wakeup all potentially sleeping on tags |
0d2602ca | 39 | */ |
aed3ea94 | 40 | void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) |
0d2602ca | 41 | { |
88459642 OS |
42 | sbitmap_queue_wake_all(&tags->bitmap_tags); |
43 | if (include_reserve) | |
44 | sbitmap_queue_wake_all(&tags->breserved_tags); | |
0d2602ca JA |
45 | } |
46 | ||
e3a2b3f9 JA |
47 | /* |
48 | * If a previously busy queue goes inactive, potential waiters could now | |
49 | * be allowed to queue. Wake them up and check. | |
50 | */ | |
51 | void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) | |
52 | { | |
53 | struct blk_mq_tags *tags = hctx->tags; | |
54 | ||
55 | if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) | |
56 | return; | |
57 | ||
58 | atomic_dec(&tags->active_queues); | |
59 | ||
aed3ea94 | 60 | blk_mq_tag_wakeup_all(tags, false); |
e3a2b3f9 JA |
61 | } |
62 | ||
0d2602ca JA |
63 | /* |
64 | * For shared tag users, we track the number of currently active users | |
65 | * and attempt to provide a fair share of the tag depth for each of them. | |
66 | */ | |
67 | static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, | |
88459642 | 68 | struct sbitmap_queue *bt) |
0d2602ca JA |
69 | { |
70 | unsigned int depth, users; | |
71 | ||
72 | if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) | |
73 | return true; | |
74 | if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) | |
75 | return true; | |
76 | ||
77 | /* | |
78 | * Don't try dividing an ant | |
79 | */ | |
88459642 | 80 | if (bt->sb.depth == 1) |
0d2602ca JA |
81 | return true; |
82 | ||
83 | users = atomic_read(&hctx->tags->active_queues); | |
84 | if (!users) | |
85 | return true; | |
86 | ||
87 | /* | |
88 | * Allow at least some tags | |
89 | */ | |
88459642 | 90 | depth = max((bt->sb.depth + users - 1) / users, 4U); |
0d2602ca JA |
91 | return atomic_read(&hctx->nr_active) < depth; |
92 | } | |
93 | ||
24391c0d SL |
94 | #define BT_ALLOC_RR(tags) (tags->alloc_policy == BLK_TAG_ALLOC_RR) |
95 | ||
88459642 | 96 | static int __bt_get(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt, |
24391c0d | 97 | unsigned int *tag_cache, struct blk_mq_tags *tags) |
4bb659b1 | 98 | { |
88459642 OS |
99 | unsigned int last_tag; |
100 | int tag; | |
4bb659b1 | 101 | |
0d2602ca JA |
102 | if (!hctx_may_queue(hctx, bt)) |
103 | return -1; | |
104 | ||
88459642 OS |
105 | last_tag = *tag_cache; |
106 | tag = sbitmap_get(&bt->sb, last_tag, BT_ALLOC_RR(tags)); | |
4bb659b1 | 107 | |
88459642 OS |
108 | if (tag == -1) { |
109 | *tag_cache = 0; | |
110 | } else if (tag == last_tag || unlikely(BT_ALLOC_RR(tags))) { | |
4bb659b1 | 111 | last_tag = tag + 1; |
88459642 | 112 | if (last_tag >= bt->sb.depth - 1) |
4bb659b1 | 113 | last_tag = 0; |
4bb659b1 JA |
114 | *tag_cache = last_tag; |
115 | } | |
116 | ||
117 | return tag; | |
118 | } | |
119 | ||
cb96a42c | 120 | static int bt_get(struct blk_mq_alloc_data *data, |
88459642 OS |
121 | struct sbitmap_queue *bt, |
122 | struct blk_mq_hw_ctx *hctx, | |
123 | unsigned int *last_tag, struct blk_mq_tags *tags) | |
320ae51f | 124 | { |
88459642 | 125 | struct sbq_wait_state *ws; |
4bb659b1 | 126 | DEFINE_WAIT(wait); |
320ae51f JA |
127 | int tag; |
128 | ||
24391c0d | 129 | tag = __bt_get(hctx, bt, last_tag, tags); |
4bb659b1 JA |
130 | if (tag != -1) |
131 | return tag; | |
132 | ||
6f3b0e8b | 133 | if (data->flags & BLK_MQ_REQ_NOWAIT) |
4bb659b1 JA |
134 | return -1; |
135 | ||
88459642 | 136 | ws = bt_wait_ptr(bt, hctx); |
4bb659b1 | 137 | do { |
88459642 | 138 | prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE); |
4bb659b1 | 139 | |
24391c0d | 140 | tag = __bt_get(hctx, bt, last_tag, tags); |
4bb659b1 JA |
141 | if (tag != -1) |
142 | break; | |
143 | ||
b3223207 BVA |
144 | /* |
145 | * We're out of tags on this hardware queue, kick any | |
146 | * pending IO submits before going to sleep waiting for | |
bc188d81 SB |
147 | * some to complete. Note that hctx can be NULL here for |
148 | * reserved tag allocation. | |
b3223207 | 149 | */ |
bc188d81 SB |
150 | if (hctx) |
151 | blk_mq_run_hw_queue(hctx, false); | |
b3223207 | 152 | |
080ff351 JA |
153 | /* |
154 | * Retry tag allocation after running the hardware queue, | |
155 | * as running the queue may also have found completions. | |
156 | */ | |
24391c0d | 157 | tag = __bt_get(hctx, bt, last_tag, tags); |
080ff351 JA |
158 | if (tag != -1) |
159 | break; | |
160 | ||
cb96a42c ML |
161 | blk_mq_put_ctx(data->ctx); |
162 | ||
4bb659b1 | 163 | io_schedule(); |
cb96a42c ML |
164 | |
165 | data->ctx = blk_mq_get_ctx(data->q); | |
166 | data->hctx = data->q->mq_ops->map_queue(data->q, | |
167 | data->ctx->cpu); | |
6f3b0e8b | 168 | if (data->flags & BLK_MQ_REQ_RESERVED) { |
cb96a42c ML |
169 | bt = &data->hctx->tags->breserved_tags; |
170 | } else { | |
171 | last_tag = &data->ctx->last_tag; | |
172 | hctx = data->hctx; | |
173 | bt = &hctx->tags->bitmap_tags; | |
174 | } | |
88459642 OS |
175 | finish_wait(&ws->wait, &wait); |
176 | ws = bt_wait_ptr(bt, hctx); | |
4bb659b1 JA |
177 | } while (1); |
178 | ||
88459642 | 179 | finish_wait(&ws->wait, &wait); |
4bb659b1 JA |
180 | return tag; |
181 | } | |
182 | ||
cb96a42c | 183 | static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data) |
4bb659b1 JA |
184 | { |
185 | int tag; | |
186 | ||
cb96a42c | 187 | tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx, |
24391c0d | 188 | &data->ctx->last_tag, data->hctx->tags); |
4bb659b1 | 189 | if (tag >= 0) |
cb96a42c | 190 | return tag + data->hctx->tags->nr_reserved_tags; |
4bb659b1 JA |
191 | |
192 | return BLK_MQ_TAG_FAIL; | |
320ae51f JA |
193 | } |
194 | ||
cb96a42c | 195 | static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data) |
320ae51f | 196 | { |
4bb659b1 | 197 | int tag, zero = 0; |
320ae51f | 198 | |
cb96a42c | 199 | if (unlikely(!data->hctx->tags->nr_reserved_tags)) { |
320ae51f JA |
200 | WARN_ON_ONCE(1); |
201 | return BLK_MQ_TAG_FAIL; | |
202 | } | |
203 | ||
24391c0d SL |
204 | tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero, |
205 | data->hctx->tags); | |
320ae51f JA |
206 | if (tag < 0) |
207 | return BLK_MQ_TAG_FAIL; | |
4bb659b1 | 208 | |
320ae51f JA |
209 | return tag; |
210 | } | |
211 | ||
cb96a42c | 212 | unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) |
320ae51f | 213 | { |
6f3b0e8b CH |
214 | if (data->flags & BLK_MQ_REQ_RESERVED) |
215 | return __blk_mq_get_reserved_tag(data); | |
216 | return __blk_mq_get_tag(data); | |
320ae51f JA |
217 | } |
218 | ||
0d2602ca | 219 | void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, |
4bb659b1 | 220 | unsigned int *last_tag) |
320ae51f | 221 | { |
0d2602ca JA |
222 | struct blk_mq_tags *tags = hctx->tags; |
223 | ||
4bb659b1 JA |
224 | if (tag >= tags->nr_reserved_tags) { |
225 | const int real_tag = tag - tags->nr_reserved_tags; | |
226 | ||
70114c39 | 227 | BUG_ON(real_tag >= tags->nr_tags); |
88459642 | 228 | sbitmap_queue_clear(&tags->bitmap_tags, real_tag); |
24391c0d SL |
229 | if (likely(tags->alloc_policy == BLK_TAG_ALLOC_FIFO)) |
230 | *last_tag = real_tag; | |
70114c39 JA |
231 | } else { |
232 | BUG_ON(tag >= tags->nr_reserved_tags); | |
88459642 | 233 | sbitmap_queue_clear(&tags->breserved_tags, tag); |
70114c39 | 234 | } |
320ae51f JA |
235 | } |
236 | ||
88459642 OS |
237 | struct bt_iter_data { |
238 | struct blk_mq_hw_ctx *hctx; | |
239 | busy_iter_fn *fn; | |
240 | void *data; | |
241 | bool reserved; | |
242 | }; | |
243 | ||
244 | static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) | |
320ae51f | 245 | { |
88459642 OS |
246 | struct bt_iter_data *iter_data = data; |
247 | struct blk_mq_hw_ctx *hctx = iter_data->hctx; | |
248 | struct blk_mq_tags *tags = hctx->tags; | |
249 | bool reserved = iter_data->reserved; | |
81481eb4 | 250 | struct request *rq; |
4bb659b1 | 251 | |
88459642 OS |
252 | if (!reserved) |
253 | bitnr += tags->nr_reserved_tags; | |
254 | rq = tags->rqs[bitnr]; | |
4bb659b1 | 255 | |
88459642 OS |
256 | if (rq->q == hctx->queue) |
257 | iter_data->fn(hctx, rq, iter_data->data, reserved); | |
258 | return true; | |
259 | } | |
4bb659b1 | 260 | |
88459642 OS |
261 | static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt, |
262 | busy_iter_fn *fn, void *data, bool reserved) | |
263 | { | |
264 | struct bt_iter_data iter_data = { | |
265 | .hctx = hctx, | |
266 | .fn = fn, | |
267 | .data = data, | |
268 | .reserved = reserved, | |
269 | }; | |
270 | ||
271 | sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data); | |
320ae51f JA |
272 | } |
273 | ||
88459642 OS |
274 | struct bt_tags_iter_data { |
275 | struct blk_mq_tags *tags; | |
276 | busy_tag_iter_fn *fn; | |
277 | void *data; | |
278 | bool reserved; | |
279 | }; | |
280 | ||
281 | static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) | |
f26cdc85 | 282 | { |
88459642 OS |
283 | struct bt_tags_iter_data *iter_data = data; |
284 | struct blk_mq_tags *tags = iter_data->tags; | |
285 | bool reserved = iter_data->reserved; | |
f26cdc85 | 286 | struct request *rq; |
f26cdc85 | 287 | |
88459642 OS |
288 | if (!reserved) |
289 | bitnr += tags->nr_reserved_tags; | |
290 | rq = tags->rqs[bitnr]; | |
f26cdc85 | 291 | |
88459642 OS |
292 | iter_data->fn(rq, iter_data->data, reserved); |
293 | return true; | |
294 | } | |
295 | ||
296 | static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt, | |
297 | busy_tag_iter_fn *fn, void *data, bool reserved) | |
298 | { | |
299 | struct bt_tags_iter_data iter_data = { | |
300 | .tags = tags, | |
301 | .fn = fn, | |
302 | .data = data, | |
303 | .reserved = reserved, | |
304 | }; | |
305 | ||
306 | if (tags->rqs) | |
307 | sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data); | |
f26cdc85 KB |
308 | } |
309 | ||
e8f1e163 SG |
310 | static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, |
311 | busy_tag_iter_fn *fn, void *priv) | |
f26cdc85 KB |
312 | { |
313 | if (tags->nr_reserved_tags) | |
88459642 OS |
314 | bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true); |
315 | bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false); | |
f26cdc85 | 316 | } |
f26cdc85 | 317 | |
e0489487 SG |
318 | void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, |
319 | busy_tag_iter_fn *fn, void *priv) | |
320 | { | |
321 | int i; | |
322 | ||
323 | for (i = 0; i < tagset->nr_hw_queues; i++) { | |
324 | if (tagset->tags && tagset->tags[i]) | |
325 | blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv); | |
326 | } | |
327 | } | |
328 | EXPORT_SYMBOL(blk_mq_tagset_busy_iter); | |
329 | ||
486cf989 SG |
330 | int blk_mq_reinit_tagset(struct blk_mq_tag_set *set) |
331 | { | |
332 | int i, j, ret = 0; | |
333 | ||
334 | if (!set->ops->reinit_request) | |
335 | goto out; | |
336 | ||
337 | for (i = 0; i < set->nr_hw_queues; i++) { | |
338 | struct blk_mq_tags *tags = set->tags[i]; | |
339 | ||
340 | for (j = 0; j < tags->nr_tags; j++) { | |
341 | if (!tags->rqs[j]) | |
342 | continue; | |
343 | ||
344 | ret = set->ops->reinit_request(set->driver_data, | |
345 | tags->rqs[j]); | |
346 | if (ret) | |
347 | goto out; | |
348 | } | |
349 | } | |
350 | ||
351 | out: | |
352 | return ret; | |
353 | } | |
354 | EXPORT_SYMBOL_GPL(blk_mq_reinit_tagset); | |
355 | ||
0bf6cd5b | 356 | void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, |
81481eb4 | 357 | void *priv) |
320ae51f | 358 | { |
0bf6cd5b CH |
359 | struct blk_mq_hw_ctx *hctx; |
360 | int i; | |
361 | ||
362 | ||
363 | queue_for_each_hw_ctx(q, hctx, i) { | |
364 | struct blk_mq_tags *tags = hctx->tags; | |
365 | ||
366 | /* | |
367 | * If not software queues are currently mapped to this | |
368 | * hardware queue, there's nothing to check | |
369 | */ | |
370 | if (!blk_mq_hw_queue_mapped(hctx)) | |
371 | continue; | |
372 | ||
373 | if (tags->nr_reserved_tags) | |
88459642 OS |
374 | bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); |
375 | bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); | |
4bb659b1 JA |
376 | } |
377 | ||
4bb659b1 JA |
378 | } |
379 | ||
88459642 | 380 | static unsigned int bt_unused_tags(const struct sbitmap_queue *bt) |
e3a2b3f9 | 381 | { |
88459642 | 382 | return bt->sb.depth - sbitmap_weight(&bt->sb); |
e3a2b3f9 JA |
383 | } |
384 | ||
88459642 | 385 | static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, int node) |
4bb659b1 | 386 | { |
88459642 | 387 | return sbitmap_queue_init_node(bt, depth, -1, GFP_KERNEL, node); |
4bb659b1 JA |
388 | } |
389 | ||
390 | static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, | |
24391c0d | 391 | int node, int alloc_policy) |
4bb659b1 JA |
392 | { |
393 | unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; | |
394 | ||
24391c0d SL |
395 | tags->alloc_policy = alloc_policy; |
396 | ||
88459642 OS |
397 | if (bt_alloc(&tags->bitmap_tags, depth, node)) |
398 | goto free_tags; | |
399 | if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node)) | |
400 | goto free_bitmap_tags; | |
4bb659b1 JA |
401 | |
402 | return tags; | |
88459642 OS |
403 | free_bitmap_tags: |
404 | sbitmap_queue_free(&tags->bitmap_tags); | |
405 | free_tags: | |
4bb659b1 JA |
406 | kfree(tags); |
407 | return NULL; | |
408 | } | |
409 | ||
320ae51f | 410 | struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, |
24391c0d SL |
411 | unsigned int reserved_tags, |
412 | int node, int alloc_policy) | |
320ae51f | 413 | { |
320ae51f | 414 | struct blk_mq_tags *tags; |
320ae51f JA |
415 | |
416 | if (total_tags > BLK_MQ_TAG_MAX) { | |
417 | pr_err("blk-mq: tag depth too large\n"); | |
418 | return NULL; | |
419 | } | |
420 | ||
421 | tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); | |
422 | if (!tags) | |
423 | return NULL; | |
424 | ||
f26cdc85 KB |
425 | if (!zalloc_cpumask_var(&tags->cpumask, GFP_KERNEL)) { |
426 | kfree(tags); | |
427 | return NULL; | |
428 | } | |
429 | ||
320ae51f JA |
430 | tags->nr_tags = total_tags; |
431 | tags->nr_reserved_tags = reserved_tags; | |
320ae51f | 432 | |
24391c0d | 433 | return blk_mq_init_bitmap_tags(tags, node, alloc_policy); |
320ae51f JA |
434 | } |
435 | ||
436 | void blk_mq_free_tags(struct blk_mq_tags *tags) | |
437 | { | |
88459642 OS |
438 | sbitmap_queue_free(&tags->bitmap_tags); |
439 | sbitmap_queue_free(&tags->breserved_tags); | |
f42d79ab | 440 | free_cpumask_var(tags->cpumask); |
320ae51f JA |
441 | kfree(tags); |
442 | } | |
443 | ||
4bb659b1 JA |
444 | void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag) |
445 | { | |
446 | unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; | |
447 | ||
9d3d21ae | 448 | *tag = prandom_u32() % depth; |
4bb659b1 JA |
449 | } |
450 | ||
e3a2b3f9 JA |
451 | int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth) |
452 | { | |
453 | tdepth -= tags->nr_reserved_tags; | |
454 | if (tdepth > tags->nr_tags) | |
455 | return -EINVAL; | |
456 | ||
457 | /* | |
458 | * Don't need (or can't) update reserved tags here, they remain | |
459 | * static and should never need resizing. | |
460 | */ | |
88459642 OS |
461 | sbitmap_queue_resize(&tags->bitmap_tags, tdepth); |
462 | ||
aed3ea94 | 463 | blk_mq_tag_wakeup_all(tags, false); |
e3a2b3f9 JA |
464 | return 0; |
465 | } | |
466 | ||
205fb5f5 BVA |
467 | /** |
468 | * blk_mq_unique_tag() - return a tag that is unique queue-wide | |
469 | * @rq: request for which to compute a unique tag | |
470 | * | |
471 | * The tag field in struct request is unique per hardware queue but not over | |
472 | * all hardware queues. Hence this function that returns a tag with the | |
473 | * hardware context index in the upper bits and the per hardware queue tag in | |
474 | * the lower bits. | |
475 | * | |
476 | * Note: When called for a request that is queued on a non-multiqueue request | |
477 | * queue, the hardware context index is set to zero. | |
478 | */ | |
479 | u32 blk_mq_unique_tag(struct request *rq) | |
480 | { | |
481 | struct request_queue *q = rq->q; | |
482 | struct blk_mq_hw_ctx *hctx; | |
483 | int hwq = 0; | |
484 | ||
485 | if (q->mq_ops) { | |
486 | hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu); | |
487 | hwq = hctx->queue_num; | |
488 | } | |
489 | ||
490 | return (hwq << BLK_MQ_UNIQUE_TAG_BITS) | | |
491 | (rq->tag & BLK_MQ_UNIQUE_TAG_MASK); | |
492 | } | |
493 | EXPORT_SYMBOL(blk_mq_unique_tag); | |
494 | ||
320ae51f JA |
495 | ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) |
496 | { | |
497 | char *orig_page = page; | |
4bb659b1 | 498 | unsigned int free, res; |
320ae51f JA |
499 | |
500 | if (!tags) | |
501 | return 0; | |
502 | ||
59d13bf5 JA |
503 | page += sprintf(page, "nr_tags=%u, reserved_tags=%u, " |
504 | "bits_per_word=%u\n", | |
505 | tags->nr_tags, tags->nr_reserved_tags, | |
88459642 | 506 | 1U << tags->bitmap_tags.sb.shift); |
320ae51f | 507 | |
4bb659b1 JA |
508 | free = bt_unused_tags(&tags->bitmap_tags); |
509 | res = bt_unused_tags(&tags->breserved_tags); | |
320ae51f | 510 | |
4bb659b1 | 511 | page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res); |
0d2602ca | 512 | page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues)); |
320ae51f JA |
513 | |
514 | return page - orig_page; | |
515 | } |