]>
Commit | Line | Data |
---|---|---|
75bb4625 JA |
1 | /* |
2 | * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread | |
3 | * over multiple cachelines to avoid ping-pong between multiple submitters | |
4 | * or submitter and completer. Uses rolling wakeups to avoid falling of | |
5 | * the scaling cliff when we run out of tags and have to start putting | |
6 | * submitters to sleep. | |
7 | * | |
8 | * Uses active queue tracking to support fairer distribution of tags | |
9 | * between multiple submitters when a shared tag map is used. | |
10 | * | |
11 | * Copyright (C) 2013-2014 Jens Axboe | |
12 | */ | |
320ae51f JA |
13 | #include <linux/kernel.h> |
14 | #include <linux/module.h> | |
4bb659b1 | 15 | #include <linux/random.h> |
320ae51f JA |
16 | |
17 | #include <linux/blk-mq.h> | |
18 | #include "blk.h" | |
19 | #include "blk-mq.h" | |
20 | #include "blk-mq-tag.h" | |
21 | ||
4bb659b1 JA |
22 | static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt) |
23 | { | |
24 | int i; | |
25 | ||
26 | for (i = 0; i < bt->map_nr; i++) { | |
e93ecf60 | 27 | struct blk_align_bitmap *bm = &bt->map[i]; |
4bb659b1 JA |
28 | int ret; |
29 | ||
30 | ret = find_first_zero_bit(&bm->word, bm->depth); | |
31 | if (ret < bm->depth) | |
32 | return true; | |
33 | } | |
34 | ||
35 | return false; | |
320ae51f JA |
36 | } |
37 | ||
38 | bool blk_mq_has_free_tags(struct blk_mq_tags *tags) | |
39 | { | |
4bb659b1 JA |
40 | if (!tags) |
41 | return true; | |
42 | ||
43 | return bt_has_free_tags(&tags->bitmap_tags); | |
44 | } | |
45 | ||
0d2602ca JA |
46 | static inline void bt_index_inc(unsigned int *index) |
47 | { | |
48 | *index = (*index + 1) & (BT_WAIT_QUEUES - 1); | |
49 | } | |
50 | ||
51 | /* | |
52 | * If a previously inactive queue goes active, bump the active user count. | |
53 | */ | |
54 | bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) | |
55 | { | |
56 | if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && | |
57 | !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) | |
58 | atomic_inc(&hctx->tags->active_queues); | |
59 | ||
60 | return true; | |
61 | } | |
62 | ||
63 | /* | |
e3a2b3f9 | 64 | * Wakeup all potentially sleeping on normal (non-reserved) tags |
0d2602ca | 65 | */ |
e3a2b3f9 | 66 | static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags) |
0d2602ca | 67 | { |
0d2602ca JA |
68 | struct blk_mq_bitmap_tags *bt; |
69 | int i, wake_index; | |
70 | ||
0d2602ca JA |
71 | bt = &tags->bitmap_tags; |
72 | wake_index = bt->wake_index; | |
73 | for (i = 0; i < BT_WAIT_QUEUES; i++) { | |
74 | struct bt_wait_state *bs = &bt->bs[wake_index]; | |
75 | ||
76 | if (waitqueue_active(&bs->wait)) | |
77 | wake_up(&bs->wait); | |
78 | ||
79 | bt_index_inc(&wake_index); | |
80 | } | |
81 | } | |
82 | ||
e3a2b3f9 JA |
83 | /* |
84 | * If a previously busy queue goes inactive, potential waiters could now | |
85 | * be allowed to queue. Wake them up and check. | |
86 | */ | |
87 | void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) | |
88 | { | |
89 | struct blk_mq_tags *tags = hctx->tags; | |
90 | ||
91 | if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) | |
92 | return; | |
93 | ||
94 | atomic_dec(&tags->active_queues); | |
95 | ||
96 | blk_mq_tag_wakeup_all(tags); | |
97 | } | |
98 | ||
0d2602ca JA |
99 | /* |
100 | * For shared tag users, we track the number of currently active users | |
101 | * and attempt to provide a fair share of the tag depth for each of them. | |
102 | */ | |
103 | static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, | |
104 | struct blk_mq_bitmap_tags *bt) | |
105 | { | |
106 | unsigned int depth, users; | |
107 | ||
108 | if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) | |
109 | return true; | |
110 | if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) | |
111 | return true; | |
112 | ||
113 | /* | |
114 | * Don't try dividing an ant | |
115 | */ | |
116 | if (bt->depth == 1) | |
117 | return true; | |
118 | ||
119 | users = atomic_read(&hctx->tags->active_queues); | |
120 | if (!users) | |
121 | return true; | |
122 | ||
123 | /* | |
124 | * Allow at least some tags | |
125 | */ | |
126 | depth = max((bt->depth + users - 1) / users, 4U); | |
127 | return atomic_read(&hctx->nr_active) < depth; | |
128 | } | |
129 | ||
e93ecf60 | 130 | static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag) |
4bb659b1 JA |
131 | { |
132 | int tag, org_last_tag, end; | |
133 | ||
59d13bf5 | 134 | org_last_tag = last_tag; |
4bb659b1 JA |
135 | end = bm->depth; |
136 | do { | |
137 | restart: | |
138 | tag = find_next_zero_bit(&bm->word, end, last_tag); | |
139 | if (unlikely(tag >= end)) { | |
140 | /* | |
141 | * We started with an offset, start from 0 to | |
142 | * exhaust the map. | |
143 | */ | |
144 | if (org_last_tag && last_tag) { | |
145 | end = last_tag; | |
146 | last_tag = 0; | |
147 | goto restart; | |
148 | } | |
149 | return -1; | |
150 | } | |
151 | last_tag = tag + 1; | |
152 | } while (test_and_set_bit_lock(tag, &bm->word)); | |
153 | ||
154 | return tag; | |
155 | } | |
156 | ||
157 | /* | |
158 | * Straight forward bitmap tag implementation, where each bit is a tag | |
159 | * (cleared == free, and set == busy). The small twist is using per-cpu | |
160 | * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue | |
161 | * contexts. This enables us to drastically limit the space searched, | |
162 | * without dirtying an extra shared cacheline like we would if we stored | |
163 | * the cache value inside the shared blk_mq_bitmap_tags structure. On top | |
164 | * of that, each word of tags is in a separate cacheline. This means that | |
165 | * multiple users will tend to stick to different cachelines, at least | |
166 | * until the map is exhausted. | |
167 | */ | |
0d2602ca JA |
168 | static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt, |
169 | unsigned int *tag_cache) | |
4bb659b1 JA |
170 | { |
171 | unsigned int last_tag, org_last_tag; | |
172 | int index, i, tag; | |
173 | ||
0d2602ca JA |
174 | if (!hctx_may_queue(hctx, bt)) |
175 | return -1; | |
176 | ||
4bb659b1 | 177 | last_tag = org_last_tag = *tag_cache; |
59d13bf5 | 178 | index = TAG_TO_INDEX(bt, last_tag); |
4bb659b1 JA |
179 | |
180 | for (i = 0; i < bt->map_nr; i++) { | |
59d13bf5 | 181 | tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag)); |
4bb659b1 | 182 | if (tag != -1) { |
59d13bf5 | 183 | tag += (index << bt->bits_per_word); |
4bb659b1 JA |
184 | goto done; |
185 | } | |
186 | ||
187 | last_tag = 0; | |
188 | if (++index >= bt->map_nr) | |
189 | index = 0; | |
190 | } | |
191 | ||
192 | *tag_cache = 0; | |
193 | return -1; | |
194 | ||
195 | /* | |
196 | * Only update the cache from the allocation path, if we ended | |
197 | * up using the specific cached tag. | |
198 | */ | |
199 | done: | |
200 | if (tag == org_last_tag) { | |
201 | last_tag = tag + 1; | |
202 | if (last_tag >= bt->depth - 1) | |
203 | last_tag = 0; | |
204 | ||
205 | *tag_cache = last_tag; | |
206 | } | |
207 | ||
208 | return tag; | |
209 | } | |
210 | ||
4bb659b1 JA |
211 | static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt, |
212 | struct blk_mq_hw_ctx *hctx) | |
213 | { | |
214 | struct bt_wait_state *bs; | |
215 | ||
216 | if (!hctx) | |
217 | return &bt->bs[0]; | |
218 | ||
219 | bs = &bt->bs[hctx->wait_index]; | |
220 | bt_index_inc(&hctx->wait_index); | |
221 | return bs; | |
320ae51f JA |
222 | } |
223 | ||
cb96a42c ML |
224 | static int bt_get(struct blk_mq_alloc_data *data, |
225 | struct blk_mq_bitmap_tags *bt, | |
226 | struct blk_mq_hw_ctx *hctx, | |
227 | unsigned int *last_tag) | |
320ae51f | 228 | { |
4bb659b1 JA |
229 | struct bt_wait_state *bs; |
230 | DEFINE_WAIT(wait); | |
320ae51f JA |
231 | int tag; |
232 | ||
0d2602ca | 233 | tag = __bt_get(hctx, bt, last_tag); |
4bb659b1 JA |
234 | if (tag != -1) |
235 | return tag; | |
236 | ||
cb96a42c | 237 | if (!(data->gfp & __GFP_WAIT)) |
4bb659b1 JA |
238 | return -1; |
239 | ||
240 | bs = bt_wait_ptr(bt, hctx); | |
241 | do { | |
242 | bool was_empty; | |
243 | ||
244 | was_empty = list_empty(&wait.task_list); | |
245 | prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE); | |
246 | ||
0d2602ca | 247 | tag = __bt_get(hctx, bt, last_tag); |
4bb659b1 JA |
248 | if (tag != -1) |
249 | break; | |
250 | ||
251 | if (was_empty) | |
252 | atomic_set(&bs->wait_cnt, bt->wake_cnt); | |
253 | ||
cb96a42c ML |
254 | blk_mq_put_ctx(data->ctx); |
255 | ||
4bb659b1 | 256 | io_schedule(); |
cb96a42c ML |
257 | |
258 | data->ctx = blk_mq_get_ctx(data->q); | |
259 | data->hctx = data->q->mq_ops->map_queue(data->q, | |
260 | data->ctx->cpu); | |
261 | if (data->reserved) { | |
262 | bt = &data->hctx->tags->breserved_tags; | |
263 | } else { | |
264 | last_tag = &data->ctx->last_tag; | |
265 | hctx = data->hctx; | |
266 | bt = &hctx->tags->bitmap_tags; | |
267 | } | |
268 | finish_wait(&bs->wait, &wait); | |
269 | bs = bt_wait_ptr(bt, hctx); | |
4bb659b1 JA |
270 | } while (1); |
271 | ||
272 | finish_wait(&bs->wait, &wait); | |
273 | return tag; | |
274 | } | |
275 | ||
cb96a42c | 276 | static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data) |
4bb659b1 JA |
277 | { |
278 | int tag; | |
279 | ||
cb96a42c ML |
280 | tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx, |
281 | &data->ctx->last_tag); | |
4bb659b1 | 282 | if (tag >= 0) |
cb96a42c | 283 | return tag + data->hctx->tags->nr_reserved_tags; |
4bb659b1 JA |
284 | |
285 | return BLK_MQ_TAG_FAIL; | |
320ae51f JA |
286 | } |
287 | ||
cb96a42c | 288 | static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data) |
320ae51f | 289 | { |
4bb659b1 | 290 | int tag, zero = 0; |
320ae51f | 291 | |
cb96a42c | 292 | if (unlikely(!data->hctx->tags->nr_reserved_tags)) { |
320ae51f JA |
293 | WARN_ON_ONCE(1); |
294 | return BLK_MQ_TAG_FAIL; | |
295 | } | |
296 | ||
cb96a42c | 297 | tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero); |
320ae51f JA |
298 | if (tag < 0) |
299 | return BLK_MQ_TAG_FAIL; | |
4bb659b1 | 300 | |
320ae51f JA |
301 | return tag; |
302 | } | |
303 | ||
cb96a42c | 304 | unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) |
320ae51f | 305 | { |
cb96a42c ML |
306 | if (!data->reserved) |
307 | return __blk_mq_get_tag(data); | |
320ae51f | 308 | |
cb96a42c | 309 | return __blk_mq_get_reserved_tag(data); |
320ae51f JA |
310 | } |
311 | ||
4bb659b1 JA |
312 | static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt) |
313 | { | |
314 | int i, wake_index; | |
315 | ||
316 | wake_index = bt->wake_index; | |
317 | for (i = 0; i < BT_WAIT_QUEUES; i++) { | |
318 | struct bt_wait_state *bs = &bt->bs[wake_index]; | |
319 | ||
320 | if (waitqueue_active(&bs->wait)) { | |
321 | if (wake_index != bt->wake_index) | |
322 | bt->wake_index = wake_index; | |
323 | ||
324 | return bs; | |
325 | } | |
326 | ||
327 | bt_index_inc(&wake_index); | |
328 | } | |
329 | ||
330 | return NULL; | |
331 | } | |
332 | ||
333 | static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag) | |
334 | { | |
59d13bf5 | 335 | const int index = TAG_TO_INDEX(bt, tag); |
4bb659b1 JA |
336 | struct bt_wait_state *bs; |
337 | ||
0289b2e1 ML |
338 | /* |
339 | * The unlock memory barrier need to order access to req in free | |
340 | * path and clearing tag bit | |
341 | */ | |
342 | clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word); | |
4bb659b1 JA |
343 | |
344 | bs = bt_wake_ptr(bt); | |
345 | if (bs && atomic_dec_and_test(&bs->wait_cnt)) { | |
4bb659b1 JA |
346 | atomic_set(&bs->wait_cnt, bt->wake_cnt); |
347 | bt_index_inc(&bt->wake_index); | |
348 | wake_up(&bs->wait); | |
349 | } | |
350 | } | |
351 | ||
320ae51f JA |
352 | static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag) |
353 | { | |
354 | BUG_ON(tag >= tags->nr_tags); | |
355 | ||
4bb659b1 | 356 | bt_clear_tag(&tags->bitmap_tags, tag); |
320ae51f JA |
357 | } |
358 | ||
359 | static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags, | |
360 | unsigned int tag) | |
361 | { | |
362 | BUG_ON(tag >= tags->nr_reserved_tags); | |
363 | ||
4bb659b1 | 364 | bt_clear_tag(&tags->breserved_tags, tag); |
320ae51f JA |
365 | } |
366 | ||
0d2602ca | 367 | void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, |
4bb659b1 | 368 | unsigned int *last_tag) |
320ae51f | 369 | { |
0d2602ca JA |
370 | struct blk_mq_tags *tags = hctx->tags; |
371 | ||
4bb659b1 JA |
372 | if (tag >= tags->nr_reserved_tags) { |
373 | const int real_tag = tag - tags->nr_reserved_tags; | |
374 | ||
375 | __blk_mq_put_tag(tags, real_tag); | |
376 | *last_tag = real_tag; | |
377 | } else | |
320ae51f JA |
378 | __blk_mq_put_reserved_tag(tags, tag); |
379 | } | |
380 | ||
4bb659b1 JA |
381 | static void bt_for_each_free(struct blk_mq_bitmap_tags *bt, |
382 | unsigned long *free_map, unsigned int off) | |
320ae51f | 383 | { |
4bb659b1 JA |
384 | int i; |
385 | ||
386 | for (i = 0; i < bt->map_nr; i++) { | |
e93ecf60 | 387 | struct blk_align_bitmap *bm = &bt->map[i]; |
4bb659b1 JA |
388 | int bit = 0; |
389 | ||
390 | do { | |
391 | bit = find_next_zero_bit(&bm->word, bm->depth, bit); | |
392 | if (bit >= bm->depth) | |
393 | break; | |
394 | ||
395 | __set_bit(bit + off, free_map); | |
396 | bit++; | |
397 | } while (1); | |
398 | ||
59d13bf5 | 399 | off += (1 << bt->bits_per_word); |
4bb659b1 | 400 | } |
320ae51f JA |
401 | } |
402 | ||
403 | void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, | |
404 | void (*fn)(void *, unsigned long *), void *data) | |
405 | { | |
406 | unsigned long *tag_map; | |
407 | size_t map_size; | |
408 | ||
409 | map_size = ALIGN(tags->nr_tags, BITS_PER_LONG) / BITS_PER_LONG; | |
410 | tag_map = kzalloc(map_size * sizeof(unsigned long), GFP_ATOMIC); | |
411 | if (!tag_map) | |
412 | return; | |
413 | ||
4bb659b1 | 414 | bt_for_each_free(&tags->bitmap_tags, tag_map, tags->nr_reserved_tags); |
320ae51f | 415 | if (tags->nr_reserved_tags) |
4bb659b1 | 416 | bt_for_each_free(&tags->breserved_tags, tag_map, 0); |
320ae51f JA |
417 | |
418 | fn(data, tag_map); | |
419 | kfree(tag_map); | |
420 | } | |
edf866b3 | 421 | EXPORT_SYMBOL(blk_mq_tag_busy_iter); |
320ae51f | 422 | |
4bb659b1 JA |
423 | static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) |
424 | { | |
425 | unsigned int i, used; | |
426 | ||
427 | for (i = 0, used = 0; i < bt->map_nr; i++) { | |
e93ecf60 | 428 | struct blk_align_bitmap *bm = &bt->map[i]; |
4bb659b1 JA |
429 | |
430 | used += bitmap_weight(&bm->word, bm->depth); | |
431 | } | |
432 | ||
433 | return bt->depth - used; | |
434 | } | |
435 | ||
e3a2b3f9 JA |
436 | static void bt_update_count(struct blk_mq_bitmap_tags *bt, |
437 | unsigned int depth) | |
438 | { | |
439 | unsigned int tags_per_word = 1U << bt->bits_per_word; | |
440 | unsigned int map_depth = depth; | |
441 | ||
442 | if (depth) { | |
443 | int i; | |
444 | ||
445 | for (i = 0; i < bt->map_nr; i++) { | |
446 | bt->map[i].depth = min(map_depth, tags_per_word); | |
447 | map_depth -= bt->map[i].depth; | |
448 | } | |
449 | } | |
450 | ||
451 | bt->wake_cnt = BT_WAIT_BATCH; | |
452 | if (bt->wake_cnt > depth / 4) | |
453 | bt->wake_cnt = max(1U, depth / 4); | |
454 | ||
455 | bt->depth = depth; | |
456 | } | |
457 | ||
4bb659b1 JA |
458 | static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth, |
459 | int node, bool reserved) | |
460 | { | |
461 | int i; | |
462 | ||
59d13bf5 JA |
463 | bt->bits_per_word = ilog2(BITS_PER_LONG); |
464 | ||
4bb659b1 JA |
465 | /* |
466 | * Depth can be zero for reserved tags, that's not a failure | |
467 | * condition. | |
468 | */ | |
469 | if (depth) { | |
e3a2b3f9 | 470 | unsigned int nr, tags_per_word; |
59d13bf5 JA |
471 | |
472 | tags_per_word = (1 << bt->bits_per_word); | |
473 | ||
474 | /* | |
475 | * If the tag space is small, shrink the number of tags | |
476 | * per word so we spread over a few cachelines, at least. | |
477 | * If less than 4 tags, just forget about it, it's not | |
478 | * going to work optimally anyway. | |
479 | */ | |
480 | if (depth >= 4) { | |
481 | while (tags_per_word * 4 > depth) { | |
482 | bt->bits_per_word--; | |
483 | tags_per_word = (1 << bt->bits_per_word); | |
484 | } | |
485 | } | |
4bb659b1 | 486 | |
59d13bf5 | 487 | nr = ALIGN(depth, tags_per_word) / tags_per_word; |
e93ecf60 | 488 | bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap), |
4bb659b1 JA |
489 | GFP_KERNEL, node); |
490 | if (!bt->map) | |
491 | return -ENOMEM; | |
492 | ||
493 | bt->map_nr = nr; | |
4bb659b1 JA |
494 | } |
495 | ||
496 | bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL); | |
497 | if (!bt->bs) { | |
498 | kfree(bt->map); | |
499 | return -ENOMEM; | |
500 | } | |
501 | ||
502 | for (i = 0; i < BT_WAIT_QUEUES; i++) | |
503 | init_waitqueue_head(&bt->bs[i].wait); | |
504 | ||
e3a2b3f9 | 505 | bt_update_count(bt, depth); |
4bb659b1 JA |
506 | return 0; |
507 | } | |
508 | ||
509 | static void bt_free(struct blk_mq_bitmap_tags *bt) | |
510 | { | |
511 | kfree(bt->map); | |
512 | kfree(bt->bs); | |
513 | } | |
514 | ||
515 | static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, | |
516 | int node) | |
517 | { | |
518 | unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; | |
519 | ||
520 | if (bt_alloc(&tags->bitmap_tags, depth, node, false)) | |
521 | goto enomem; | |
522 | if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true)) | |
523 | goto enomem; | |
524 | ||
525 | return tags; | |
526 | enomem: | |
527 | bt_free(&tags->bitmap_tags); | |
528 | kfree(tags); | |
529 | return NULL; | |
530 | } | |
531 | ||
320ae51f JA |
532 | struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, |
533 | unsigned int reserved_tags, int node) | |
534 | { | |
320ae51f | 535 | struct blk_mq_tags *tags; |
320ae51f JA |
536 | |
537 | if (total_tags > BLK_MQ_TAG_MAX) { | |
538 | pr_err("blk-mq: tag depth too large\n"); | |
539 | return NULL; | |
540 | } | |
541 | ||
542 | tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); | |
543 | if (!tags) | |
544 | return NULL; | |
545 | ||
320ae51f JA |
546 | tags->nr_tags = total_tags; |
547 | tags->nr_reserved_tags = reserved_tags; | |
320ae51f | 548 | |
4bb659b1 | 549 | return blk_mq_init_bitmap_tags(tags, node); |
320ae51f JA |
550 | } |
551 | ||
552 | void blk_mq_free_tags(struct blk_mq_tags *tags) | |
553 | { | |
4bb659b1 JA |
554 | bt_free(&tags->bitmap_tags); |
555 | bt_free(&tags->breserved_tags); | |
320ae51f JA |
556 | kfree(tags); |
557 | } | |
558 | ||
4bb659b1 JA |
559 | void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag) |
560 | { | |
561 | unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; | |
562 | ||
9d3d21ae | 563 | *tag = prandom_u32() % depth; |
4bb659b1 JA |
564 | } |
565 | ||
e3a2b3f9 JA |
566 | int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth) |
567 | { | |
568 | tdepth -= tags->nr_reserved_tags; | |
569 | if (tdepth > tags->nr_tags) | |
570 | return -EINVAL; | |
571 | ||
572 | /* | |
573 | * Don't need (or can't) update reserved tags here, they remain | |
574 | * static and should never need resizing. | |
575 | */ | |
576 | bt_update_count(&tags->bitmap_tags, tdepth); | |
577 | blk_mq_tag_wakeup_all(tags); | |
578 | return 0; | |
579 | } | |
580 | ||
320ae51f JA |
581 | ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) |
582 | { | |
583 | char *orig_page = page; | |
4bb659b1 | 584 | unsigned int free, res; |
320ae51f JA |
585 | |
586 | if (!tags) | |
587 | return 0; | |
588 | ||
59d13bf5 JA |
589 | page += sprintf(page, "nr_tags=%u, reserved_tags=%u, " |
590 | "bits_per_word=%u\n", | |
591 | tags->nr_tags, tags->nr_reserved_tags, | |
592 | tags->bitmap_tags.bits_per_word); | |
320ae51f | 593 | |
4bb659b1 JA |
594 | free = bt_unused_tags(&tags->bitmap_tags); |
595 | res = bt_unused_tags(&tags->breserved_tags); | |
320ae51f | 596 | |
4bb659b1 | 597 | page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res); |
0d2602ca | 598 | page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues)); |
320ae51f JA |
599 | |
600 | return page - orig_page; | |
601 | } |