2 * Tag allocation using scalable bitmaps. Uses active queue tracking to support
3 * fairer distribution of tags between multiple submitters when a shared tag map
6 * Copyright (C) 2013-2014 Jens Axboe
8 #include <linux/kernel.h>
9 #include <linux/module.h>
11 #include <linux/blk-mq.h>
14 #include "blk-mq-tag.h"
16 bool blk_mq_has_free_tags(struct blk_mq_tags
*tags
)
21 return sbitmap_any_bit_clear(&tags
->bitmap_tags
.sb
);
25 * If a previously inactive queue goes active, bump the active user count.
26 * We need to do this before try to allocate driver tag, then even if fail
27 * to get tag when first time, the other shared-tag users could reserve
30 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx
*hctx
)
32 if (!test_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
) &&
33 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
34 atomic_inc(&hctx
->tags
->active_queues
);
40 * Wakeup all potentially sleeping on tags
42 void blk_mq_tag_wakeup_all(struct blk_mq_tags
*tags
, bool include_reserve
)
44 sbitmap_queue_wake_all(&tags
->bitmap_tags
);
46 sbitmap_queue_wake_all(&tags
->breserved_tags
);
50 * If a previously busy queue goes inactive, potential waiters could now
51 * be allowed to queue. Wake them up and check.
53 void __blk_mq_tag_idle(struct blk_mq_hw_ctx
*hctx
)
55 struct blk_mq_tags
*tags
= hctx
->tags
;
57 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
60 atomic_dec(&tags
->active_queues
);
62 blk_mq_tag_wakeup_all(tags
, false);
66 * For shared tag users, we track the number of currently active users
67 * and attempt to provide a fair share of the tag depth for each of them.
69 static inline bool hctx_may_queue(struct blk_mq_hw_ctx
*hctx
,
70 struct sbitmap_queue
*bt
)
72 unsigned int depth
, users
;
74 if (!hctx
|| !(hctx
->flags
& BLK_MQ_F_TAG_SHARED
))
76 if (!test_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
80 * Don't try dividing an ant
82 if (bt
->sb
.depth
== 1)
85 users
= atomic_read(&hctx
->tags
->active_queues
);
90 * Allow at least some tags
92 depth
= max((bt
->sb
.depth
+ users
- 1) / users
, 4U);
93 return atomic_read(&hctx
->nr_active
) < depth
;
96 static int __blk_mq_get_tag(struct blk_mq_alloc_data
*data
,
97 struct sbitmap_queue
*bt
)
99 if (!(data
->flags
& BLK_MQ_REQ_INTERNAL
) &&
100 !hctx_may_queue(data
->hctx
, bt
))
102 if (data
->shallow_depth
)
103 return __sbitmap_queue_get_shallow(bt
, data
->shallow_depth
);
105 return __sbitmap_queue_get(bt
);
108 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data
*data
)
110 struct blk_mq_tags
*tags
= blk_mq_tags_from_data(data
);
111 struct sbitmap_queue
*bt
;
112 struct sbq_wait_state
*ws
;
114 unsigned int tag_offset
;
118 if (data
->flags
& BLK_MQ_REQ_RESERVED
) {
119 if (unlikely(!tags
->nr_reserved_tags
)) {
121 return BLK_MQ_TAG_FAIL
;
123 bt
= &tags
->breserved_tags
;
126 bt
= &tags
->bitmap_tags
;
127 tag_offset
= tags
->nr_reserved_tags
;
130 tag
= __blk_mq_get_tag(data
, bt
);
134 if (data
->flags
& BLK_MQ_REQ_NOWAIT
)
135 return BLK_MQ_TAG_FAIL
;
137 ws
= bt_wait_ptr(bt
, data
->hctx
);
138 drop_ctx
= data
->ctx
== NULL
;
140 prepare_to_wait(&ws
->wait
, &wait
, TASK_UNINTERRUPTIBLE
);
142 tag
= __blk_mq_get_tag(data
, bt
);
147 * We're out of tags on this hardware queue, kick any
148 * pending IO submits before going to sleep waiting for
151 blk_mq_run_hw_queue(data
->hctx
, false);
154 * Retry tag allocation after running the hardware queue,
155 * as running the queue may also have found completions.
157 tag
= __blk_mq_get_tag(data
, bt
);
162 blk_mq_put_ctx(data
->ctx
);
166 data
->ctx
= blk_mq_get_ctx(data
->q
);
167 data
->hctx
= blk_mq_map_queue(data
->q
, data
->ctx
->cpu
);
168 tags
= blk_mq_tags_from_data(data
);
169 if (data
->flags
& BLK_MQ_REQ_RESERVED
)
170 bt
= &tags
->breserved_tags
;
172 bt
= &tags
->bitmap_tags
;
174 finish_wait(&ws
->wait
, &wait
);
175 ws
= bt_wait_ptr(bt
, data
->hctx
);
178 if (drop_ctx
&& data
->ctx
)
179 blk_mq_put_ctx(data
->ctx
);
181 finish_wait(&ws
->wait
, &wait
);
184 return tag
+ tag_offset
;
187 void blk_mq_put_tag(struct blk_mq_hw_ctx
*hctx
, struct blk_mq_tags
*tags
,
188 struct blk_mq_ctx
*ctx
, unsigned int tag
)
190 if (!blk_mq_tag_is_reserved(tags
, tag
)) {
191 const int real_tag
= tag
- tags
->nr_reserved_tags
;
193 BUG_ON(real_tag
>= tags
->nr_tags
);
194 sbitmap_queue_clear(&tags
->bitmap_tags
, real_tag
, ctx
->cpu
);
196 BUG_ON(tag
>= tags
->nr_reserved_tags
);
197 sbitmap_queue_clear(&tags
->breserved_tags
, tag
, ctx
->cpu
);
201 struct bt_iter_data
{
202 struct blk_mq_hw_ctx
*hctx
;
208 static bool bt_iter(struct sbitmap
*bitmap
, unsigned int bitnr
, void *data
)
210 struct bt_iter_data
*iter_data
= data
;
211 struct blk_mq_hw_ctx
*hctx
= iter_data
->hctx
;
212 struct blk_mq_tags
*tags
= hctx
->tags
;
213 bool reserved
= iter_data
->reserved
;
217 bitnr
+= tags
->nr_reserved_tags
;
218 rq
= tags
->rqs
[bitnr
];
221 * We can hit rq == NULL here, because the tagging functions
222 * test and set the bit before assining ->rqs[].
224 if (rq
&& rq
->q
== hctx
->queue
)
225 iter_data
->fn(hctx
, rq
, iter_data
->data
, reserved
);
229 static void bt_for_each(struct blk_mq_hw_ctx
*hctx
, struct sbitmap_queue
*bt
,
230 busy_iter_fn
*fn
, void *data
, bool reserved
)
232 struct bt_iter_data iter_data
= {
236 .reserved
= reserved
,
239 sbitmap_for_each_set(&bt
->sb
, bt_iter
, &iter_data
);
242 struct bt_tags_iter_data
{
243 struct blk_mq_tags
*tags
;
244 busy_tag_iter_fn
*fn
;
249 static bool bt_tags_iter(struct sbitmap
*bitmap
, unsigned int bitnr
, void *data
)
251 struct bt_tags_iter_data
*iter_data
= data
;
252 struct blk_mq_tags
*tags
= iter_data
->tags
;
253 bool reserved
= iter_data
->reserved
;
257 bitnr
+= tags
->nr_reserved_tags
;
260 * We can hit rq == NULL here, because the tagging functions
261 * test and set the bit before assining ->rqs[].
263 rq
= tags
->rqs
[bitnr
];
265 iter_data
->fn(rq
, iter_data
->data
, reserved
);
270 static void bt_tags_for_each(struct blk_mq_tags
*tags
, struct sbitmap_queue
*bt
,
271 busy_tag_iter_fn
*fn
, void *data
, bool reserved
)
273 struct bt_tags_iter_data iter_data
= {
277 .reserved
= reserved
,
281 sbitmap_for_each_set(&bt
->sb
, bt_tags_iter
, &iter_data
);
284 static void blk_mq_all_tag_busy_iter(struct blk_mq_tags
*tags
,
285 busy_tag_iter_fn
*fn
, void *priv
)
287 if (tags
->nr_reserved_tags
)
288 bt_tags_for_each(tags
, &tags
->breserved_tags
, fn
, priv
, true);
289 bt_tags_for_each(tags
, &tags
->bitmap_tags
, fn
, priv
, false);
292 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set
*tagset
,
293 busy_tag_iter_fn
*fn
, void *priv
)
297 for (i
= 0; i
< tagset
->nr_hw_queues
; i
++) {
298 if (tagset
->tags
&& tagset
->tags
[i
])
299 blk_mq_all_tag_busy_iter(tagset
->tags
[i
], fn
, priv
);
302 EXPORT_SYMBOL(blk_mq_tagset_busy_iter
);
304 int blk_mq_tagset_iter(struct blk_mq_tag_set
*set
, void *data
,
305 int (fn
)(void *, struct request
*))
309 if (WARN_ON_ONCE(!fn
))
312 for (i
= 0; i
< set
->nr_hw_queues
; i
++) {
313 struct blk_mq_tags
*tags
= set
->tags
[i
];
318 for (j
= 0; j
< tags
->nr_tags
; j
++) {
319 if (!tags
->static_rqs
[j
])
322 ret
= fn(data
, tags
->static_rqs
[j
]);
331 EXPORT_SYMBOL_GPL(blk_mq_tagset_iter
);
333 void blk_mq_queue_tag_busy_iter(struct request_queue
*q
, busy_iter_fn
*fn
,
336 struct blk_mq_hw_ctx
*hctx
;
340 queue_for_each_hw_ctx(q
, hctx
, i
) {
341 struct blk_mq_tags
*tags
= hctx
->tags
;
344 * If not software queues are currently mapped to this
345 * hardware queue, there's nothing to check
347 if (!blk_mq_hw_queue_mapped(hctx
))
350 if (tags
->nr_reserved_tags
)
351 bt_for_each(hctx
, &tags
->breserved_tags
, fn
, priv
, true);
352 bt_for_each(hctx
, &tags
->bitmap_tags
, fn
, priv
, false);
357 static int bt_alloc(struct sbitmap_queue
*bt
, unsigned int depth
,
358 bool round_robin
, int node
)
360 return sbitmap_queue_init_node(bt
, depth
, -1, round_robin
, GFP_KERNEL
,
364 static struct blk_mq_tags
*blk_mq_init_bitmap_tags(struct blk_mq_tags
*tags
,
365 int node
, int alloc_policy
)
367 unsigned int depth
= tags
->nr_tags
- tags
->nr_reserved_tags
;
368 bool round_robin
= alloc_policy
== BLK_TAG_ALLOC_RR
;
370 if (bt_alloc(&tags
->bitmap_tags
, depth
, round_robin
, node
))
372 if (bt_alloc(&tags
->breserved_tags
, tags
->nr_reserved_tags
, round_robin
,
374 goto free_bitmap_tags
;
378 sbitmap_queue_free(&tags
->bitmap_tags
);
384 struct blk_mq_tags
*blk_mq_init_tags(unsigned int total_tags
,
385 unsigned int reserved_tags
,
386 int node
, int alloc_policy
)
388 struct blk_mq_tags
*tags
;
390 if (total_tags
> BLK_MQ_TAG_MAX
) {
391 pr_err("blk-mq: tag depth too large\n");
395 tags
= kzalloc_node(sizeof(*tags
), GFP_KERNEL
, node
);
399 tags
->nr_tags
= total_tags
;
400 tags
->nr_reserved_tags
= reserved_tags
;
402 return blk_mq_init_bitmap_tags(tags
, node
, alloc_policy
);
405 void blk_mq_free_tags(struct blk_mq_tags
*tags
)
407 sbitmap_queue_free(&tags
->bitmap_tags
);
408 sbitmap_queue_free(&tags
->breserved_tags
);
412 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx
*hctx
,
413 struct blk_mq_tags
**tagsptr
, unsigned int tdepth
,
416 struct blk_mq_tags
*tags
= *tagsptr
;
418 if (tdepth
<= tags
->nr_reserved_tags
)
422 * If we are allowed to grow beyond the original size, allocate
423 * a new set of tags before freeing the old one.
425 if (tdepth
> tags
->nr_tags
) {
426 struct blk_mq_tag_set
*set
= hctx
->queue
->tag_set
;
427 struct blk_mq_tags
*new;
434 * We need some sort of upper limit, set it high enough that
435 * no valid use cases should require more.
437 if (tdepth
> 16 * BLKDEV_MAX_RQ
)
440 new = blk_mq_alloc_rq_map(set
, hctx
->queue_num
, tdepth
,
441 tags
->nr_reserved_tags
);
444 ret
= blk_mq_alloc_rqs(set
, new, hctx
->queue_num
, tdepth
);
446 blk_mq_free_rq_map(new);
450 blk_mq_free_rqs(set
, *tagsptr
, hctx
->queue_num
);
451 blk_mq_free_rq_map(*tagsptr
);
455 * Don't need (or can't) update reserved tags here, they
456 * remain static and should never need resizing.
458 sbitmap_queue_resize(&tags
->bitmap_tags
,
459 tdepth
- tags
->nr_reserved_tags
);
466 * blk_mq_unique_tag() - return a tag that is unique queue-wide
467 * @rq: request for which to compute a unique tag
469 * The tag field in struct request is unique per hardware queue but not over
470 * all hardware queues. Hence this function that returns a tag with the
471 * hardware context index in the upper bits and the per hardware queue tag in
474 * Note: When called for a request that is queued on a non-multiqueue request
475 * queue, the hardware context index is set to zero.
477 u32
blk_mq_unique_tag(struct request
*rq
)
479 struct request_queue
*q
= rq
->q
;
480 struct blk_mq_hw_ctx
*hctx
;
484 hctx
= blk_mq_map_queue(q
, rq
->mq_ctx
->cpu
);
485 hwq
= hctx
->queue_num
;
488 return (hwq
<< BLK_MQ_UNIQUE_TAG_BITS
) |
489 (rq
->tag
& BLK_MQ_UNIQUE_TAG_MASK
);
491 EXPORT_SYMBOL(blk_mq_unique_tag
);