]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Tag allocation using scalable bitmaps. Uses active queue tracking to support | |
3 | * fairer distribution of tags between multiple submitters when a shared tag map | |
4 | * is used. | |
5 | * | |
6 | * Copyright (C) 2013-2014 Jens Axboe | |
7 | */ | |
8 | #include <linux/kernel.h> | |
9 | #include <linux/module.h> | |
10 | ||
11 | #include <linux/blk-mq.h> | |
12 | #include "blk.h" | |
13 | #include "blk-mq.h" | |
14 | #include "blk-mq-tag.h" | |
15 | ||
16 | bool blk_mq_has_free_tags(struct blk_mq_tags *tags) | |
17 | { | |
18 | if (!tags) | |
19 | return true; | |
20 | ||
21 | return sbitmap_any_bit_clear(&tags->bitmap_tags.sb); | |
22 | } | |
23 | ||
24 | /* | |
25 | * If a previously inactive queue goes active, bump the active user count. | |
26 | * We need to do this before try to allocate driver tag, then even if fail | |
27 | * to get tag when first time, the other shared-tag users could reserve | |
28 | * budget for it. | |
29 | */ | |
30 | bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) | |
31 | { | |
32 | if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && | |
33 | !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) | |
34 | atomic_inc(&hctx->tags->active_queues); | |
35 | ||
36 | return true; | |
37 | } | |
38 | ||
39 | /* | |
40 | * Wakeup all potentially sleeping on tags | |
41 | */ | |
42 | void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) | |
43 | { | |
44 | sbitmap_queue_wake_all(&tags->bitmap_tags); | |
45 | if (include_reserve) | |
46 | sbitmap_queue_wake_all(&tags->breserved_tags); | |
47 | } | |
48 | ||
49 | /* | |
50 | * If a previously busy queue goes inactive, potential waiters could now | |
51 | * be allowed to queue. Wake them up and check. | |
52 | */ | |
53 | void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) | |
54 | { | |
55 | struct blk_mq_tags *tags = hctx->tags; | |
56 | ||
57 | if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) | |
58 | return; | |
59 | ||
60 | atomic_dec(&tags->active_queues); | |
61 | ||
62 | blk_mq_tag_wakeup_all(tags, false); | |
63 | } | |
64 | ||
65 | /* | |
66 | * For shared tag users, we track the number of currently active users | |
67 | * and attempt to provide a fair share of the tag depth for each of them. | |
68 | */ | |
69 | static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, | |
70 | struct sbitmap_queue *bt) | |
71 | { | |
72 | unsigned int depth, users; | |
73 | ||
74 | if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) | |
75 | return true; | |
76 | if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) | |
77 | return true; | |
78 | ||
79 | /* | |
80 | * Don't try dividing an ant | |
81 | */ | |
82 | if (bt->sb.depth == 1) | |
83 | return true; | |
84 | ||
85 | users = atomic_read(&hctx->tags->active_queues); | |
86 | if (!users) | |
87 | return true; | |
88 | ||
89 | /* | |
90 | * Allow at least some tags | |
91 | */ | |
92 | depth = max((bt->sb.depth + users - 1) / users, 4U); | |
93 | return atomic_read(&hctx->nr_active) < depth; | |
94 | } | |
95 | ||
96 | static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, | |
97 | struct sbitmap_queue *bt) | |
98 | { | |
99 | if (!(data->flags & BLK_MQ_REQ_INTERNAL) && | |
100 | !hctx_may_queue(data->hctx, bt)) | |
101 | return -1; | |
102 | if (data->shallow_depth) | |
103 | return __sbitmap_queue_get_shallow(bt, data->shallow_depth); | |
104 | else | |
105 | return __sbitmap_queue_get(bt); | |
106 | } | |
107 | ||
108 | unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) | |
109 | { | |
110 | struct blk_mq_tags *tags = blk_mq_tags_from_data(data); | |
111 | struct sbitmap_queue *bt; | |
112 | struct sbq_wait_state *ws; | |
113 | DEFINE_WAIT(wait); | |
114 | unsigned int tag_offset; | |
115 | bool drop_ctx; | |
116 | int tag; | |
117 | ||
118 | if (data->flags & BLK_MQ_REQ_RESERVED) { | |
119 | if (unlikely(!tags->nr_reserved_tags)) { | |
120 | WARN_ON_ONCE(1); | |
121 | return BLK_MQ_TAG_FAIL; | |
122 | } | |
123 | bt = &tags->breserved_tags; | |
124 | tag_offset = 0; | |
125 | } else { | |
126 | bt = &tags->bitmap_tags; | |
127 | tag_offset = tags->nr_reserved_tags; | |
128 | } | |
129 | ||
130 | tag = __blk_mq_get_tag(data, bt); | |
131 | if (tag != -1) | |
132 | goto found_tag; | |
133 | ||
134 | if (data->flags & BLK_MQ_REQ_NOWAIT) | |
135 | return BLK_MQ_TAG_FAIL; | |
136 | ||
137 | ws = bt_wait_ptr(bt, data->hctx); | |
138 | drop_ctx = data->ctx == NULL; | |
139 | do { | |
140 | prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE); | |
141 | ||
142 | tag = __blk_mq_get_tag(data, bt); | |
143 | if (tag != -1) | |
144 | break; | |
145 | ||
146 | /* | |
147 | * We're out of tags on this hardware queue, kick any | |
148 | * pending IO submits before going to sleep waiting for | |
149 | * some to complete. | |
150 | */ | |
151 | blk_mq_run_hw_queue(data->hctx, false); | |
152 | ||
153 | /* | |
154 | * Retry tag allocation after running the hardware queue, | |
155 | * as running the queue may also have found completions. | |
156 | */ | |
157 | tag = __blk_mq_get_tag(data, bt); | |
158 | if (tag != -1) | |
159 | break; | |
160 | ||
161 | if (data->ctx) | |
162 | blk_mq_put_ctx(data->ctx); | |
163 | ||
164 | io_schedule(); | |
165 | ||
166 | data->ctx = blk_mq_get_ctx(data->q); | |
167 | data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu); | |
168 | tags = blk_mq_tags_from_data(data); | |
169 | if (data->flags & BLK_MQ_REQ_RESERVED) | |
170 | bt = &tags->breserved_tags; | |
171 | else | |
172 | bt = &tags->bitmap_tags; | |
173 | ||
174 | finish_wait(&ws->wait, &wait); | |
175 | ws = bt_wait_ptr(bt, data->hctx); | |
176 | } while (1); | |
177 | ||
178 | if (drop_ctx && data->ctx) | |
179 | blk_mq_put_ctx(data->ctx); | |
180 | ||
181 | finish_wait(&ws->wait, &wait); | |
182 | ||
183 | found_tag: | |
184 | return tag + tag_offset; | |
185 | } | |
186 | ||
187 | void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags, | |
188 | struct blk_mq_ctx *ctx, unsigned int tag) | |
189 | { | |
190 | if (!blk_mq_tag_is_reserved(tags, tag)) { | |
191 | const int real_tag = tag - tags->nr_reserved_tags; | |
192 | ||
193 | BUG_ON(real_tag >= tags->nr_tags); | |
194 | sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu); | |
195 | } else { | |
196 | BUG_ON(tag >= tags->nr_reserved_tags); | |
197 | sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu); | |
198 | } | |
199 | } | |
200 | ||
201 | struct bt_iter_data { | |
202 | struct blk_mq_hw_ctx *hctx; | |
203 | busy_iter_fn *fn; | |
204 | void *data; | |
205 | bool reserved; | |
206 | }; | |
207 | ||
208 | static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) | |
209 | { | |
210 | struct bt_iter_data *iter_data = data; | |
211 | struct blk_mq_hw_ctx *hctx = iter_data->hctx; | |
212 | struct blk_mq_tags *tags = hctx->tags; | |
213 | bool reserved = iter_data->reserved; | |
214 | struct request *rq; | |
215 | ||
216 | if (!reserved) | |
217 | bitnr += tags->nr_reserved_tags; | |
218 | rq = tags->rqs[bitnr]; | |
219 | ||
220 | /* | |
221 | * We can hit rq == NULL here, because the tagging functions | |
222 | * test and set the bit before assining ->rqs[]. | |
223 | */ | |
224 | if (rq && rq->q == hctx->queue) | |
225 | iter_data->fn(hctx, rq, iter_data->data, reserved); | |
226 | return true; | |
227 | } | |
228 | ||
229 | static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt, | |
230 | busy_iter_fn *fn, void *data, bool reserved) | |
231 | { | |
232 | struct bt_iter_data iter_data = { | |
233 | .hctx = hctx, | |
234 | .fn = fn, | |
235 | .data = data, | |
236 | .reserved = reserved, | |
237 | }; | |
238 | ||
239 | sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data); | |
240 | } | |
241 | ||
242 | struct bt_tags_iter_data { | |
243 | struct blk_mq_tags *tags; | |
244 | busy_tag_iter_fn *fn; | |
245 | void *data; | |
246 | bool reserved; | |
247 | }; | |
248 | ||
249 | static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) | |
250 | { | |
251 | struct bt_tags_iter_data *iter_data = data; | |
252 | struct blk_mq_tags *tags = iter_data->tags; | |
253 | bool reserved = iter_data->reserved; | |
254 | struct request *rq; | |
255 | ||
256 | if (!reserved) | |
257 | bitnr += tags->nr_reserved_tags; | |
258 | ||
259 | /* | |
260 | * We can hit rq == NULL here, because the tagging functions | |
261 | * test and set the bit before assining ->rqs[]. | |
262 | */ | |
263 | rq = tags->rqs[bitnr]; | |
264 | if (rq) | |
265 | iter_data->fn(rq, iter_data->data, reserved); | |
266 | ||
267 | return true; | |
268 | } | |
269 | ||
270 | static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt, | |
271 | busy_tag_iter_fn *fn, void *data, bool reserved) | |
272 | { | |
273 | struct bt_tags_iter_data iter_data = { | |
274 | .tags = tags, | |
275 | .fn = fn, | |
276 | .data = data, | |
277 | .reserved = reserved, | |
278 | }; | |
279 | ||
280 | if (tags->rqs) | |
281 | sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data); | |
282 | } | |
283 | ||
284 | static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, | |
285 | busy_tag_iter_fn *fn, void *priv) | |
286 | { | |
287 | if (tags->nr_reserved_tags) | |
288 | bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true); | |
289 | bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false); | |
290 | } | |
291 | ||
292 | void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, | |
293 | busy_tag_iter_fn *fn, void *priv) | |
294 | { | |
295 | int i; | |
296 | ||
297 | for (i = 0; i < tagset->nr_hw_queues; i++) { | |
298 | if (tagset->tags && tagset->tags[i]) | |
299 | blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv); | |
300 | } | |
301 | } | |
302 | EXPORT_SYMBOL(blk_mq_tagset_busy_iter); | |
303 | ||
304 | int blk_mq_tagset_iter(struct blk_mq_tag_set *set, void *data, | |
305 | int (fn)(void *, struct request *)) | |
306 | { | |
307 | int i, j, ret = 0; | |
308 | ||
309 | if (WARN_ON_ONCE(!fn)) | |
310 | goto out; | |
311 | ||
312 | for (i = 0; i < set->nr_hw_queues; i++) { | |
313 | struct blk_mq_tags *tags = set->tags[i]; | |
314 | ||
315 | if (!tags) | |
316 | continue; | |
317 | ||
318 | for (j = 0; j < tags->nr_tags; j++) { | |
319 | if (!tags->static_rqs[j]) | |
320 | continue; | |
321 | ||
322 | ret = fn(data, tags->static_rqs[j]); | |
323 | if (ret) | |
324 | goto out; | |
325 | } | |
326 | } | |
327 | ||
328 | out: | |
329 | return ret; | |
330 | } | |
331 | EXPORT_SYMBOL_GPL(blk_mq_tagset_iter); | |
332 | ||
333 | void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, | |
334 | void *priv) | |
335 | { | |
336 | struct blk_mq_hw_ctx *hctx; | |
337 | int i; | |
338 | ||
339 | ||
340 | queue_for_each_hw_ctx(q, hctx, i) { | |
341 | struct blk_mq_tags *tags = hctx->tags; | |
342 | ||
343 | /* | |
344 | * If not software queues are currently mapped to this | |
345 | * hardware queue, there's nothing to check | |
346 | */ | |
347 | if (!blk_mq_hw_queue_mapped(hctx)) | |
348 | continue; | |
349 | ||
350 | if (tags->nr_reserved_tags) | |
351 | bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); | |
352 | bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); | |
353 | } | |
354 | ||
355 | } | |
356 | ||
357 | static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, | |
358 | bool round_robin, int node) | |
359 | { | |
360 | return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL, | |
361 | node); | |
362 | } | |
363 | ||
364 | static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, | |
365 | int node, int alloc_policy) | |
366 | { | |
367 | unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; | |
368 | bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR; | |
369 | ||
370 | if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node)) | |
371 | goto free_tags; | |
372 | if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin, | |
373 | node)) | |
374 | goto free_bitmap_tags; | |
375 | ||
376 | return tags; | |
377 | free_bitmap_tags: | |
378 | sbitmap_queue_free(&tags->bitmap_tags); | |
379 | free_tags: | |
380 | kfree(tags); | |
381 | return NULL; | |
382 | } | |
383 | ||
384 | struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, | |
385 | unsigned int reserved_tags, | |
386 | int node, int alloc_policy) | |
387 | { | |
388 | struct blk_mq_tags *tags; | |
389 | ||
390 | if (total_tags > BLK_MQ_TAG_MAX) { | |
391 | pr_err("blk-mq: tag depth too large\n"); | |
392 | return NULL; | |
393 | } | |
394 | ||
395 | tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); | |
396 | if (!tags) | |
397 | return NULL; | |
398 | ||
399 | tags->nr_tags = total_tags; | |
400 | tags->nr_reserved_tags = reserved_tags; | |
401 | ||
402 | return blk_mq_init_bitmap_tags(tags, node, alloc_policy); | |
403 | } | |
404 | ||
405 | void blk_mq_free_tags(struct blk_mq_tags *tags) | |
406 | { | |
407 | sbitmap_queue_free(&tags->bitmap_tags); | |
408 | sbitmap_queue_free(&tags->breserved_tags); | |
409 | kfree(tags); | |
410 | } | |
411 | ||
412 | int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, | |
413 | struct blk_mq_tags **tagsptr, unsigned int tdepth, | |
414 | bool can_grow) | |
415 | { | |
416 | struct blk_mq_tags *tags = *tagsptr; | |
417 | ||
418 | if (tdepth <= tags->nr_reserved_tags) | |
419 | return -EINVAL; | |
420 | ||
421 | /* | |
422 | * If we are allowed to grow beyond the original size, allocate | |
423 | * a new set of tags before freeing the old one. | |
424 | */ | |
425 | if (tdepth > tags->nr_tags) { | |
426 | struct blk_mq_tag_set *set = hctx->queue->tag_set; | |
427 | struct blk_mq_tags *new; | |
428 | bool ret; | |
429 | ||
430 | if (!can_grow) | |
431 | return -EINVAL; | |
432 | ||
433 | /* | |
434 | * We need some sort of upper limit, set it high enough that | |
435 | * no valid use cases should require more. | |
436 | */ | |
437 | if (tdepth > 16 * BLKDEV_MAX_RQ) | |
438 | return -EINVAL; | |
439 | ||
440 | new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, | |
441 | tags->nr_reserved_tags); | |
442 | if (!new) | |
443 | return -ENOMEM; | |
444 | ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth); | |
445 | if (ret) { | |
446 | blk_mq_free_rq_map(new); | |
447 | return -ENOMEM; | |
448 | } | |
449 | ||
450 | blk_mq_free_rqs(set, *tagsptr, hctx->queue_num); | |
451 | blk_mq_free_rq_map(*tagsptr); | |
452 | *tagsptr = new; | |
453 | } else { | |
454 | /* | |
455 | * Don't need (or can't) update reserved tags here, they | |
456 | * remain static and should never need resizing. | |
457 | */ | |
458 | sbitmap_queue_resize(&tags->bitmap_tags, | |
459 | tdepth - tags->nr_reserved_tags); | |
460 | } | |
461 | ||
462 | return 0; | |
463 | } | |
464 | ||
465 | /** | |
466 | * blk_mq_unique_tag() - return a tag that is unique queue-wide | |
467 | * @rq: request for which to compute a unique tag | |
468 | * | |
469 | * The tag field in struct request is unique per hardware queue but not over | |
470 | * all hardware queues. Hence this function that returns a tag with the | |
471 | * hardware context index in the upper bits and the per hardware queue tag in | |
472 | * the lower bits. | |
473 | * | |
474 | * Note: When called for a request that is queued on a non-multiqueue request | |
475 | * queue, the hardware context index is set to zero. | |
476 | */ | |
477 | u32 blk_mq_unique_tag(struct request *rq) | |
478 | { | |
479 | struct request_queue *q = rq->q; | |
480 | struct blk_mq_hw_ctx *hctx; | |
481 | int hwq = 0; | |
482 | ||
483 | if (q->mq_ops) { | |
484 | hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu); | |
485 | hwq = hctx->queue_num; | |
486 | } | |
487 | ||
488 | return (hwq << BLK_MQ_UNIQUE_TAG_BITS) | | |
489 | (rq->tag & BLK_MQ_UNIQUE_TAG_MASK); | |
490 | } | |
491 | EXPORT_SYMBOL(blk_mq_unique_tag); |