2 * Tag allocation using scalable bitmaps. Uses active queue tracking to support
3 * fairer distribution of tags between multiple submitters when a shared tag map
6 * Copyright (C) 2013-2014 Jens Axboe
8 #include <linux/kernel.h>
9 #include <linux/module.h>
11 #include <linux/blk-mq.h>
14 #include "blk-mq-tag.h"
16 bool blk_mq_has_free_tags(struct blk_mq_tags
*tags
)
21 return sbitmap_any_bit_clear(&tags
->bitmap_tags
.sb
);
25 * If a previously inactive queue goes active, bump the active user count.
27 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx
*hctx
)
29 if (!test_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
) &&
30 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
31 atomic_inc(&hctx
->tags
->active_queues
);
37 * Wakeup all potentially sleeping on tags
39 void blk_mq_tag_wakeup_all(struct blk_mq_tags
*tags
, bool include_reserve
)
41 sbitmap_queue_wake_all(&tags
->bitmap_tags
);
43 sbitmap_queue_wake_all(&tags
->breserved_tags
);
47 * If a previously busy queue goes inactive, potential waiters could now
48 * be allowed to queue. Wake them up and check.
50 void __blk_mq_tag_idle(struct blk_mq_hw_ctx
*hctx
)
52 struct blk_mq_tags
*tags
= hctx
->tags
;
54 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
57 atomic_dec(&tags
->active_queues
);
59 blk_mq_tag_wakeup_all(tags
, false);
63 * For shared tag users, we track the number of currently active users
64 * and attempt to provide a fair share of the tag depth for each of them.
66 static inline bool hctx_may_queue(struct blk_mq_hw_ctx
*hctx
,
67 struct sbitmap_queue
*bt
)
69 unsigned int depth
, users
;
71 if (!hctx
|| !(hctx
->flags
& BLK_MQ_F_TAG_SHARED
))
73 if (!test_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
77 * Don't try dividing an ant
79 if (bt
->sb
.depth
== 1)
82 users
= atomic_read(&hctx
->tags
->active_queues
);
87 * Allow at least some tags
89 depth
= max((bt
->sb
.depth
+ users
- 1) / users
, 4U);
90 return atomic_read(&hctx
->nr_active
) < depth
;
93 static int __bt_get(struct blk_mq_hw_ctx
*hctx
, struct sbitmap_queue
*bt
)
95 if (!hctx_may_queue(hctx
, bt
))
97 return __sbitmap_queue_get(bt
);
100 static int bt_get(struct blk_mq_alloc_data
*data
, struct sbitmap_queue
*bt
,
101 struct blk_mq_hw_ctx
*hctx
, struct blk_mq_tags
*tags
)
103 struct sbq_wait_state
*ws
;
107 tag
= __bt_get(hctx
, bt
);
111 if (data
->flags
& BLK_MQ_REQ_NOWAIT
)
114 ws
= bt_wait_ptr(bt
, hctx
);
116 prepare_to_wait(&ws
->wait
, &wait
, TASK_UNINTERRUPTIBLE
);
118 tag
= __bt_get(hctx
, bt
);
123 * We're out of tags on this hardware queue, kick any
124 * pending IO submits before going to sleep waiting for
125 * some to complete. Note that hctx can be NULL here for
126 * reserved tag allocation.
129 blk_mq_run_hw_queue(hctx
, false);
132 * Retry tag allocation after running the hardware queue,
133 * as running the queue may also have found completions.
135 tag
= __bt_get(hctx
, bt
);
139 blk_mq_put_ctx(data
->ctx
);
143 data
->ctx
= blk_mq_get_ctx(data
->q
);
144 data
->hctx
= data
->q
->mq_ops
->map_queue(data
->q
,
146 if (data
->flags
& BLK_MQ_REQ_RESERVED
) {
147 bt
= &data
->hctx
->tags
->breserved_tags
;
150 bt
= &hctx
->tags
->bitmap_tags
;
152 finish_wait(&ws
->wait
, &wait
);
153 ws
= bt_wait_ptr(bt
, hctx
);
156 finish_wait(&ws
->wait
, &wait
);
160 static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data
*data
)
164 tag
= bt_get(data
, &data
->hctx
->tags
->bitmap_tags
, data
->hctx
,
167 return tag
+ data
->hctx
->tags
->nr_reserved_tags
;
169 return BLK_MQ_TAG_FAIL
;
172 static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data
*data
)
176 if (unlikely(!data
->hctx
->tags
->nr_reserved_tags
)) {
178 return BLK_MQ_TAG_FAIL
;
181 tag
= bt_get(data
, &data
->hctx
->tags
->breserved_tags
, NULL
,
184 return BLK_MQ_TAG_FAIL
;
189 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data
*data
)
191 if (data
->flags
& BLK_MQ_REQ_RESERVED
)
192 return __blk_mq_get_reserved_tag(data
);
193 return __blk_mq_get_tag(data
);
196 void blk_mq_put_tag(struct blk_mq_hw_ctx
*hctx
, struct blk_mq_ctx
*ctx
,
199 struct blk_mq_tags
*tags
= hctx
->tags
;
201 if (tag
>= tags
->nr_reserved_tags
) {
202 const int real_tag
= tag
- tags
->nr_reserved_tags
;
204 BUG_ON(real_tag
>= tags
->nr_tags
);
205 sbitmap_queue_clear(&tags
->bitmap_tags
, real_tag
, ctx
->cpu
);
207 BUG_ON(tag
>= tags
->nr_reserved_tags
);
208 sbitmap_queue_clear(&tags
->breserved_tags
, tag
, ctx
->cpu
);
212 struct bt_iter_data
{
213 struct blk_mq_hw_ctx
*hctx
;
219 static bool bt_iter(struct sbitmap
*bitmap
, unsigned int bitnr
, void *data
)
221 struct bt_iter_data
*iter_data
= data
;
222 struct blk_mq_hw_ctx
*hctx
= iter_data
->hctx
;
223 struct blk_mq_tags
*tags
= hctx
->tags
;
224 bool reserved
= iter_data
->reserved
;
228 bitnr
+= tags
->nr_reserved_tags
;
229 rq
= tags
->rqs
[bitnr
];
231 if (rq
->q
== hctx
->queue
)
232 iter_data
->fn(hctx
, rq
, iter_data
->data
, reserved
);
236 static void bt_for_each(struct blk_mq_hw_ctx
*hctx
, struct sbitmap_queue
*bt
,
237 busy_iter_fn
*fn
, void *data
, bool reserved
)
239 struct bt_iter_data iter_data
= {
243 .reserved
= reserved
,
246 sbitmap_for_each_set(&bt
->sb
, bt_iter
, &iter_data
);
249 struct bt_tags_iter_data
{
250 struct blk_mq_tags
*tags
;
251 busy_tag_iter_fn
*fn
;
256 static bool bt_tags_iter(struct sbitmap
*bitmap
, unsigned int bitnr
, void *data
)
258 struct bt_tags_iter_data
*iter_data
= data
;
259 struct blk_mq_tags
*tags
= iter_data
->tags
;
260 bool reserved
= iter_data
->reserved
;
264 bitnr
+= tags
->nr_reserved_tags
;
265 rq
= tags
->rqs
[bitnr
];
267 iter_data
->fn(rq
, iter_data
->data
, reserved
);
271 static void bt_tags_for_each(struct blk_mq_tags
*tags
, struct sbitmap_queue
*bt
,
272 busy_tag_iter_fn
*fn
, void *data
, bool reserved
)
274 struct bt_tags_iter_data iter_data
= {
278 .reserved
= reserved
,
282 sbitmap_for_each_set(&bt
->sb
, bt_tags_iter
, &iter_data
);
285 static void blk_mq_all_tag_busy_iter(struct blk_mq_tags
*tags
,
286 busy_tag_iter_fn
*fn
, void *priv
)
288 if (tags
->nr_reserved_tags
)
289 bt_tags_for_each(tags
, &tags
->breserved_tags
, fn
, priv
, true);
290 bt_tags_for_each(tags
, &tags
->bitmap_tags
, fn
, priv
, false);
293 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set
*tagset
,
294 busy_tag_iter_fn
*fn
, void *priv
)
298 for (i
= 0; i
< tagset
->nr_hw_queues
; i
++) {
299 if (tagset
->tags
&& tagset
->tags
[i
])
300 blk_mq_all_tag_busy_iter(tagset
->tags
[i
], fn
, priv
);
303 EXPORT_SYMBOL(blk_mq_tagset_busy_iter
);
305 int blk_mq_reinit_tagset(struct blk_mq_tag_set
*set
)
309 if (!set
->ops
->reinit_request
)
312 for (i
= 0; i
< set
->nr_hw_queues
; i
++) {
313 struct blk_mq_tags
*tags
= set
->tags
[i
];
315 for (j
= 0; j
< tags
->nr_tags
; j
++) {
319 ret
= set
->ops
->reinit_request(set
->driver_data
,
329 EXPORT_SYMBOL_GPL(blk_mq_reinit_tagset
);
331 void blk_mq_queue_tag_busy_iter(struct request_queue
*q
, busy_iter_fn
*fn
,
334 struct blk_mq_hw_ctx
*hctx
;
338 queue_for_each_hw_ctx(q
, hctx
, i
) {
339 struct blk_mq_tags
*tags
= hctx
->tags
;
342 * If not software queues are currently mapped to this
343 * hardware queue, there's nothing to check
345 if (!blk_mq_hw_queue_mapped(hctx
))
348 if (tags
->nr_reserved_tags
)
349 bt_for_each(hctx
, &tags
->breserved_tags
, fn
, priv
, true);
350 bt_for_each(hctx
, &tags
->bitmap_tags
, fn
, priv
, false);
355 static unsigned int bt_unused_tags(const struct sbitmap_queue
*bt
)
357 return bt
->sb
.depth
- sbitmap_weight(&bt
->sb
);
360 static int bt_alloc(struct sbitmap_queue
*bt
, unsigned int depth
,
361 bool round_robin
, int node
)
363 return sbitmap_queue_init_node(bt
, depth
, -1, round_robin
, GFP_KERNEL
,
367 static struct blk_mq_tags
*blk_mq_init_bitmap_tags(struct blk_mq_tags
*tags
,
368 int node
, int alloc_policy
)
370 unsigned int depth
= tags
->nr_tags
- tags
->nr_reserved_tags
;
371 bool round_robin
= alloc_policy
== BLK_TAG_ALLOC_RR
;
373 if (bt_alloc(&tags
->bitmap_tags
, depth
, round_robin
, node
))
375 if (bt_alloc(&tags
->breserved_tags
, tags
->nr_reserved_tags
, round_robin
,
377 goto free_bitmap_tags
;
381 sbitmap_queue_free(&tags
->bitmap_tags
);
387 struct blk_mq_tags
*blk_mq_init_tags(unsigned int total_tags
,
388 unsigned int reserved_tags
,
389 int node
, int alloc_policy
)
391 struct blk_mq_tags
*tags
;
393 if (total_tags
> BLK_MQ_TAG_MAX
) {
394 pr_err("blk-mq: tag depth too large\n");
398 tags
= kzalloc_node(sizeof(*tags
), GFP_KERNEL
, node
);
402 if (!zalloc_cpumask_var(&tags
->cpumask
, GFP_KERNEL
)) {
407 tags
->nr_tags
= total_tags
;
408 tags
->nr_reserved_tags
= reserved_tags
;
410 return blk_mq_init_bitmap_tags(tags
, node
, alloc_policy
);
413 void blk_mq_free_tags(struct blk_mq_tags
*tags
)
415 sbitmap_queue_free(&tags
->bitmap_tags
);
416 sbitmap_queue_free(&tags
->breserved_tags
);
417 free_cpumask_var(tags
->cpumask
);
421 int blk_mq_tag_update_depth(struct blk_mq_tags
*tags
, unsigned int tdepth
)
423 tdepth
-= tags
->nr_reserved_tags
;
424 if (tdepth
> tags
->nr_tags
)
428 * Don't need (or can't) update reserved tags here, they remain
429 * static and should never need resizing.
431 sbitmap_queue_resize(&tags
->bitmap_tags
, tdepth
);
433 blk_mq_tag_wakeup_all(tags
, false);
438 * blk_mq_unique_tag() - return a tag that is unique queue-wide
439 * @rq: request for which to compute a unique tag
441 * The tag field in struct request is unique per hardware queue but not over
442 * all hardware queues. Hence this function that returns a tag with the
443 * hardware context index in the upper bits and the per hardware queue tag in
446 * Note: When called for a request that is queued on a non-multiqueue request
447 * queue, the hardware context index is set to zero.
449 u32
blk_mq_unique_tag(struct request
*rq
)
451 struct request_queue
*q
= rq
->q
;
452 struct blk_mq_hw_ctx
*hctx
;
456 hctx
= q
->mq_ops
->map_queue(q
, rq
->mq_ctx
->cpu
);
457 hwq
= hctx
->queue_num
;
460 return (hwq
<< BLK_MQ_UNIQUE_TAG_BITS
) |
461 (rq
->tag
& BLK_MQ_UNIQUE_TAG_MASK
);
463 EXPORT_SYMBOL(blk_mq_unique_tag
);
465 ssize_t
blk_mq_tag_sysfs_show(struct blk_mq_tags
*tags
, char *page
)
467 char *orig_page
= page
;
468 unsigned int free
, res
;
473 page
+= sprintf(page
, "nr_tags=%u, reserved_tags=%u, "
474 "bits_per_word=%u\n",
475 tags
->nr_tags
, tags
->nr_reserved_tags
,
476 1U << tags
->bitmap_tags
.sb
.shift
);
478 free
= bt_unused_tags(&tags
->bitmap_tags
);
479 res
= bt_unused_tags(&tags
->breserved_tags
);
481 page
+= sprintf(page
, "nr_free=%u, nr_reserved=%u\n", free
, res
);
482 page
+= sprintf(page
, "active_queues=%u\n", atomic_read(&tags
->active_queues
));
484 return page
- orig_page
;