]>
Commit | Line | Data |
---|---|---|
320ae51f JA |
1 | #include <linux/kernel.h> |
2 | #include <linux/module.h> | |
4bb659b1 | 3 | #include <linux/random.h> |
320ae51f JA |
4 | |
5 | #include <linux/blk-mq.h> | |
6 | #include "blk.h" | |
7 | #include "blk-mq.h" | |
8 | #include "blk-mq-tag.h" | |
9 | ||
0d2602ca | 10 | void blk_mq_wait_for_tags(struct blk_mq_hw_ctx *hctx, bool reserved) |
320ae51f | 11 | { |
4bb659b1 JA |
12 | int tag, zero = 0; |
13 | ||
0d2602ca JA |
14 | tag = blk_mq_get_tag(hctx, &zero, __GFP_WAIT, reserved); |
15 | blk_mq_put_tag(hctx, tag, &zero); | |
4bb659b1 JA |
16 | } |
17 | ||
18 | static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt) | |
19 | { | |
20 | int i; | |
21 | ||
22 | for (i = 0; i < bt->map_nr; i++) { | |
23 | struct blk_mq_bitmap *bm = &bt->map[i]; | |
24 | int ret; | |
25 | ||
26 | ret = find_first_zero_bit(&bm->word, bm->depth); | |
27 | if (ret < bm->depth) | |
28 | return true; | |
29 | } | |
30 | ||
31 | return false; | |
320ae51f JA |
32 | } |
33 | ||
34 | bool blk_mq_has_free_tags(struct blk_mq_tags *tags) | |
35 | { | |
4bb659b1 JA |
36 | if (!tags) |
37 | return true; | |
38 | ||
39 | return bt_has_free_tags(&tags->bitmap_tags); | |
40 | } | |
41 | ||
0d2602ca JA |
42 | static inline void bt_index_inc(unsigned int *index) |
43 | { | |
44 | *index = (*index + 1) & (BT_WAIT_QUEUES - 1); | |
45 | } | |
46 | ||
47 | /* | |
48 | * If a previously inactive queue goes active, bump the active user count. | |
49 | */ | |
50 | bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) | |
51 | { | |
52 | if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && | |
53 | !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) | |
54 | atomic_inc(&hctx->tags->active_queues); | |
55 | ||
56 | return true; | |
57 | } | |
58 | ||
59 | /* | |
60 | * If a previously busy queue goes inactive, potential waiters could now | |
61 | * be allowed to queue. Wake them up and check. | |
62 | */ | |
63 | void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) | |
64 | { | |
65 | struct blk_mq_tags *tags = hctx->tags; | |
66 | struct blk_mq_bitmap_tags *bt; | |
67 | int i, wake_index; | |
68 | ||
69 | if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) | |
70 | return; | |
71 | ||
72 | atomic_dec(&tags->active_queues); | |
73 | ||
74 | /* | |
75 | * Will only throttle depth on non-reserved tags | |
76 | */ | |
77 | bt = &tags->bitmap_tags; | |
78 | wake_index = bt->wake_index; | |
79 | for (i = 0; i < BT_WAIT_QUEUES; i++) { | |
80 | struct bt_wait_state *bs = &bt->bs[wake_index]; | |
81 | ||
82 | if (waitqueue_active(&bs->wait)) | |
83 | wake_up(&bs->wait); | |
84 | ||
85 | bt_index_inc(&wake_index); | |
86 | } | |
87 | } | |
88 | ||
89 | /* | |
90 | * For shared tag users, we track the number of currently active users | |
91 | * and attempt to provide a fair share of the tag depth for each of them. | |
92 | */ | |
93 | static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, | |
94 | struct blk_mq_bitmap_tags *bt) | |
95 | { | |
96 | unsigned int depth, users; | |
97 | ||
98 | if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) | |
99 | return true; | |
100 | if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) | |
101 | return true; | |
102 | ||
103 | /* | |
104 | * Don't try dividing an ant | |
105 | */ | |
106 | if (bt->depth == 1) | |
107 | return true; | |
108 | ||
109 | users = atomic_read(&hctx->tags->active_queues); | |
110 | if (!users) | |
111 | return true; | |
112 | ||
113 | /* | |
114 | * Allow at least some tags | |
115 | */ | |
116 | depth = max((bt->depth + users - 1) / users, 4U); | |
117 | return atomic_read(&hctx->nr_active) < depth; | |
118 | } | |
119 | ||
4bb659b1 JA |
120 | static int __bt_get_word(struct blk_mq_bitmap *bm, unsigned int last_tag) |
121 | { | |
122 | int tag, org_last_tag, end; | |
123 | ||
59d13bf5 | 124 | org_last_tag = last_tag; |
4bb659b1 JA |
125 | end = bm->depth; |
126 | do { | |
127 | restart: | |
128 | tag = find_next_zero_bit(&bm->word, end, last_tag); | |
129 | if (unlikely(tag >= end)) { | |
130 | /* | |
131 | * We started with an offset, start from 0 to | |
132 | * exhaust the map. | |
133 | */ | |
134 | if (org_last_tag && last_tag) { | |
135 | end = last_tag; | |
136 | last_tag = 0; | |
137 | goto restart; | |
138 | } | |
139 | return -1; | |
140 | } | |
141 | last_tag = tag + 1; | |
142 | } while (test_and_set_bit_lock(tag, &bm->word)); | |
143 | ||
144 | return tag; | |
145 | } | |
146 | ||
147 | /* | |
148 | * Straight forward bitmap tag implementation, where each bit is a tag | |
149 | * (cleared == free, and set == busy). The small twist is using per-cpu | |
150 | * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue | |
151 | * contexts. This enables us to drastically limit the space searched, | |
152 | * without dirtying an extra shared cacheline like we would if we stored | |
153 | * the cache value inside the shared blk_mq_bitmap_tags structure. On top | |
154 | * of that, each word of tags is in a separate cacheline. This means that | |
155 | * multiple users will tend to stick to different cachelines, at least | |
156 | * until the map is exhausted. | |
157 | */ | |
0d2602ca JA |
158 | static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt, |
159 | unsigned int *tag_cache) | |
4bb659b1 JA |
160 | { |
161 | unsigned int last_tag, org_last_tag; | |
162 | int index, i, tag; | |
163 | ||
0d2602ca JA |
164 | if (!hctx_may_queue(hctx, bt)) |
165 | return -1; | |
166 | ||
4bb659b1 | 167 | last_tag = org_last_tag = *tag_cache; |
59d13bf5 | 168 | index = TAG_TO_INDEX(bt, last_tag); |
4bb659b1 JA |
169 | |
170 | for (i = 0; i < bt->map_nr; i++) { | |
59d13bf5 | 171 | tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag)); |
4bb659b1 | 172 | if (tag != -1) { |
59d13bf5 | 173 | tag += (index << bt->bits_per_word); |
4bb659b1 JA |
174 | goto done; |
175 | } | |
176 | ||
177 | last_tag = 0; | |
178 | if (++index >= bt->map_nr) | |
179 | index = 0; | |
180 | } | |
181 | ||
182 | *tag_cache = 0; | |
183 | return -1; | |
184 | ||
185 | /* | |
186 | * Only update the cache from the allocation path, if we ended | |
187 | * up using the specific cached tag. | |
188 | */ | |
189 | done: | |
190 | if (tag == org_last_tag) { | |
191 | last_tag = tag + 1; | |
192 | if (last_tag >= bt->depth - 1) | |
193 | last_tag = 0; | |
194 | ||
195 | *tag_cache = last_tag; | |
196 | } | |
197 | ||
198 | return tag; | |
199 | } | |
200 | ||
4bb659b1 JA |
201 | static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt, |
202 | struct blk_mq_hw_ctx *hctx) | |
203 | { | |
204 | struct bt_wait_state *bs; | |
205 | ||
206 | if (!hctx) | |
207 | return &bt->bs[0]; | |
208 | ||
209 | bs = &bt->bs[hctx->wait_index]; | |
210 | bt_index_inc(&hctx->wait_index); | |
211 | return bs; | |
320ae51f JA |
212 | } |
213 | ||
4bb659b1 JA |
214 | static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx, |
215 | unsigned int *last_tag, gfp_t gfp) | |
320ae51f | 216 | { |
4bb659b1 JA |
217 | struct bt_wait_state *bs; |
218 | DEFINE_WAIT(wait); | |
320ae51f JA |
219 | int tag; |
220 | ||
0d2602ca | 221 | tag = __bt_get(hctx, bt, last_tag); |
4bb659b1 JA |
222 | if (tag != -1) |
223 | return tag; | |
224 | ||
225 | if (!(gfp & __GFP_WAIT)) | |
226 | return -1; | |
227 | ||
228 | bs = bt_wait_ptr(bt, hctx); | |
229 | do { | |
230 | bool was_empty; | |
231 | ||
232 | was_empty = list_empty(&wait.task_list); | |
233 | prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE); | |
234 | ||
0d2602ca | 235 | tag = __bt_get(hctx, bt, last_tag); |
4bb659b1 JA |
236 | if (tag != -1) |
237 | break; | |
238 | ||
239 | if (was_empty) | |
240 | atomic_set(&bs->wait_cnt, bt->wake_cnt); | |
241 | ||
242 | io_schedule(); | |
243 | } while (1); | |
244 | ||
245 | finish_wait(&bs->wait, &wait); | |
246 | return tag; | |
247 | } | |
248 | ||
249 | static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, | |
250 | struct blk_mq_hw_ctx *hctx, | |
251 | unsigned int *last_tag, gfp_t gfp) | |
252 | { | |
253 | int tag; | |
254 | ||
255 | tag = bt_get(&tags->bitmap_tags, hctx, last_tag, gfp); | |
256 | if (tag >= 0) | |
257 | return tag + tags->nr_reserved_tags; | |
258 | ||
259 | return BLK_MQ_TAG_FAIL; | |
320ae51f JA |
260 | } |
261 | ||
262 | static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags, | |
263 | gfp_t gfp) | |
264 | { | |
4bb659b1 | 265 | int tag, zero = 0; |
320ae51f JA |
266 | |
267 | if (unlikely(!tags->nr_reserved_tags)) { | |
268 | WARN_ON_ONCE(1); | |
269 | return BLK_MQ_TAG_FAIL; | |
270 | } | |
271 | ||
4bb659b1 | 272 | tag = bt_get(&tags->breserved_tags, NULL, &zero, gfp); |
320ae51f JA |
273 | if (tag < 0) |
274 | return BLK_MQ_TAG_FAIL; | |
4bb659b1 | 275 | |
320ae51f JA |
276 | return tag; |
277 | } | |
278 | ||
0d2602ca | 279 | unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, |
4bb659b1 | 280 | gfp_t gfp, bool reserved) |
320ae51f JA |
281 | { |
282 | if (!reserved) | |
0d2602ca | 283 | return __blk_mq_get_tag(hctx->tags, hctx, last_tag, gfp); |
320ae51f | 284 | |
0d2602ca | 285 | return __blk_mq_get_reserved_tag(hctx->tags, gfp); |
320ae51f JA |
286 | } |
287 | ||
4bb659b1 JA |
288 | static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt) |
289 | { | |
290 | int i, wake_index; | |
291 | ||
292 | wake_index = bt->wake_index; | |
293 | for (i = 0; i < BT_WAIT_QUEUES; i++) { | |
294 | struct bt_wait_state *bs = &bt->bs[wake_index]; | |
295 | ||
296 | if (waitqueue_active(&bs->wait)) { | |
297 | if (wake_index != bt->wake_index) | |
298 | bt->wake_index = wake_index; | |
299 | ||
300 | return bs; | |
301 | } | |
302 | ||
303 | bt_index_inc(&wake_index); | |
304 | } | |
305 | ||
306 | return NULL; | |
307 | } | |
308 | ||
309 | static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag) | |
310 | { | |
59d13bf5 | 311 | const int index = TAG_TO_INDEX(bt, tag); |
4bb659b1 JA |
312 | struct bt_wait_state *bs; |
313 | ||
0289b2e1 ML |
314 | /* |
315 | * The unlock memory barrier need to order access to req in free | |
316 | * path and clearing tag bit | |
317 | */ | |
318 | clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word); | |
4bb659b1 JA |
319 | |
320 | bs = bt_wake_ptr(bt); | |
321 | if (bs && atomic_dec_and_test(&bs->wait_cnt)) { | |
4bb659b1 JA |
322 | atomic_set(&bs->wait_cnt, bt->wake_cnt); |
323 | bt_index_inc(&bt->wake_index); | |
324 | wake_up(&bs->wait); | |
325 | } | |
326 | } | |
327 | ||
320ae51f JA |
328 | static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag) |
329 | { | |
330 | BUG_ON(tag >= tags->nr_tags); | |
331 | ||
4bb659b1 | 332 | bt_clear_tag(&tags->bitmap_tags, tag); |
320ae51f JA |
333 | } |
334 | ||
335 | static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags, | |
336 | unsigned int tag) | |
337 | { | |
338 | BUG_ON(tag >= tags->nr_reserved_tags); | |
339 | ||
4bb659b1 | 340 | bt_clear_tag(&tags->breserved_tags, tag); |
320ae51f JA |
341 | } |
342 | ||
0d2602ca | 343 | void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, |
4bb659b1 | 344 | unsigned int *last_tag) |
320ae51f | 345 | { |
0d2602ca JA |
346 | struct blk_mq_tags *tags = hctx->tags; |
347 | ||
4bb659b1 JA |
348 | if (tag >= tags->nr_reserved_tags) { |
349 | const int real_tag = tag - tags->nr_reserved_tags; | |
350 | ||
351 | __blk_mq_put_tag(tags, real_tag); | |
352 | *last_tag = real_tag; | |
353 | } else | |
320ae51f JA |
354 | __blk_mq_put_reserved_tag(tags, tag); |
355 | } | |
356 | ||
4bb659b1 JA |
357 | static void bt_for_each_free(struct blk_mq_bitmap_tags *bt, |
358 | unsigned long *free_map, unsigned int off) | |
320ae51f | 359 | { |
4bb659b1 JA |
360 | int i; |
361 | ||
362 | for (i = 0; i < bt->map_nr; i++) { | |
363 | struct blk_mq_bitmap *bm = &bt->map[i]; | |
364 | int bit = 0; | |
365 | ||
366 | do { | |
367 | bit = find_next_zero_bit(&bm->word, bm->depth, bit); | |
368 | if (bit >= bm->depth) | |
369 | break; | |
370 | ||
371 | __set_bit(bit + off, free_map); | |
372 | bit++; | |
373 | } while (1); | |
374 | ||
59d13bf5 | 375 | off += (1 << bt->bits_per_word); |
4bb659b1 | 376 | } |
320ae51f JA |
377 | } |
378 | ||
379 | void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, | |
380 | void (*fn)(void *, unsigned long *), void *data) | |
381 | { | |
382 | unsigned long *tag_map; | |
383 | size_t map_size; | |
384 | ||
385 | map_size = ALIGN(tags->nr_tags, BITS_PER_LONG) / BITS_PER_LONG; | |
386 | tag_map = kzalloc(map_size * sizeof(unsigned long), GFP_ATOMIC); | |
387 | if (!tag_map) | |
388 | return; | |
389 | ||
4bb659b1 | 390 | bt_for_each_free(&tags->bitmap_tags, tag_map, tags->nr_reserved_tags); |
320ae51f | 391 | if (tags->nr_reserved_tags) |
4bb659b1 | 392 | bt_for_each_free(&tags->breserved_tags, tag_map, 0); |
320ae51f JA |
393 | |
394 | fn(data, tag_map); | |
395 | kfree(tag_map); | |
396 | } | |
397 | ||
4bb659b1 JA |
398 | static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) |
399 | { | |
400 | unsigned int i, used; | |
401 | ||
402 | for (i = 0, used = 0; i < bt->map_nr; i++) { | |
403 | struct blk_mq_bitmap *bm = &bt->map[i]; | |
404 | ||
405 | used += bitmap_weight(&bm->word, bm->depth); | |
406 | } | |
407 | ||
408 | return bt->depth - used; | |
409 | } | |
410 | ||
411 | static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth, | |
412 | int node, bool reserved) | |
413 | { | |
414 | int i; | |
415 | ||
59d13bf5 JA |
416 | bt->bits_per_word = ilog2(BITS_PER_LONG); |
417 | ||
4bb659b1 JA |
418 | /* |
419 | * Depth can be zero for reserved tags, that's not a failure | |
420 | * condition. | |
421 | */ | |
422 | if (depth) { | |
59d13bf5 JA |
423 | unsigned int nr, i, map_depth, tags_per_word; |
424 | ||
425 | tags_per_word = (1 << bt->bits_per_word); | |
426 | ||
427 | /* | |
428 | * If the tag space is small, shrink the number of tags | |
429 | * per word so we spread over a few cachelines, at least. | |
430 | * If less than 4 tags, just forget about it, it's not | |
431 | * going to work optimally anyway. | |
432 | */ | |
433 | if (depth >= 4) { | |
434 | while (tags_per_word * 4 > depth) { | |
435 | bt->bits_per_word--; | |
436 | tags_per_word = (1 << bt->bits_per_word); | |
437 | } | |
438 | } | |
4bb659b1 | 439 | |
59d13bf5 | 440 | nr = ALIGN(depth, tags_per_word) / tags_per_word; |
4bb659b1 JA |
441 | bt->map = kzalloc_node(nr * sizeof(struct blk_mq_bitmap), |
442 | GFP_KERNEL, node); | |
443 | if (!bt->map) | |
444 | return -ENOMEM; | |
445 | ||
446 | bt->map_nr = nr; | |
447 | map_depth = depth; | |
448 | for (i = 0; i < nr; i++) { | |
59d13bf5 JA |
449 | bt->map[i].depth = min(map_depth, tags_per_word); |
450 | map_depth -= tags_per_word; | |
4bb659b1 JA |
451 | } |
452 | } | |
453 | ||
454 | bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL); | |
455 | if (!bt->bs) { | |
456 | kfree(bt->map); | |
457 | return -ENOMEM; | |
458 | } | |
459 | ||
460 | for (i = 0; i < BT_WAIT_QUEUES; i++) | |
461 | init_waitqueue_head(&bt->bs[i].wait); | |
462 | ||
463 | bt->wake_cnt = BT_WAIT_BATCH; | |
464 | if (bt->wake_cnt > depth / 4) | |
465 | bt->wake_cnt = max(1U, depth / 4); | |
466 | ||
467 | bt->depth = depth; | |
468 | return 0; | |
469 | } | |
470 | ||
471 | static void bt_free(struct blk_mq_bitmap_tags *bt) | |
472 | { | |
473 | kfree(bt->map); | |
474 | kfree(bt->bs); | |
475 | } | |
476 | ||
477 | static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, | |
478 | int node) | |
479 | { | |
480 | unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; | |
481 | ||
482 | if (bt_alloc(&tags->bitmap_tags, depth, node, false)) | |
483 | goto enomem; | |
484 | if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true)) | |
485 | goto enomem; | |
486 | ||
487 | return tags; | |
488 | enomem: | |
489 | bt_free(&tags->bitmap_tags); | |
490 | kfree(tags); | |
491 | return NULL; | |
492 | } | |
493 | ||
320ae51f JA |
494 | struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, |
495 | unsigned int reserved_tags, int node) | |
496 | { | |
320ae51f | 497 | struct blk_mq_tags *tags; |
320ae51f JA |
498 | |
499 | if (total_tags > BLK_MQ_TAG_MAX) { | |
500 | pr_err("blk-mq: tag depth too large\n"); | |
501 | return NULL; | |
502 | } | |
503 | ||
504 | tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); | |
505 | if (!tags) | |
506 | return NULL; | |
507 | ||
320ae51f JA |
508 | tags->nr_tags = total_tags; |
509 | tags->nr_reserved_tags = reserved_tags; | |
320ae51f | 510 | |
4bb659b1 | 511 | return blk_mq_init_bitmap_tags(tags, node); |
320ae51f JA |
512 | } |
513 | ||
514 | void blk_mq_free_tags(struct blk_mq_tags *tags) | |
515 | { | |
4bb659b1 JA |
516 | bt_free(&tags->bitmap_tags); |
517 | bt_free(&tags->breserved_tags); | |
320ae51f JA |
518 | kfree(tags); |
519 | } | |
520 | ||
4bb659b1 JA |
521 | void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag) |
522 | { | |
523 | unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; | |
524 | ||
9d3d21ae | 525 | *tag = prandom_u32() % depth; |
4bb659b1 JA |
526 | } |
527 | ||
320ae51f JA |
528 | ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) |
529 | { | |
530 | char *orig_page = page; | |
4bb659b1 | 531 | unsigned int free, res; |
320ae51f JA |
532 | |
533 | if (!tags) | |
534 | return 0; | |
535 | ||
59d13bf5 JA |
536 | page += sprintf(page, "nr_tags=%u, reserved_tags=%u, " |
537 | "bits_per_word=%u\n", | |
538 | tags->nr_tags, tags->nr_reserved_tags, | |
539 | tags->bitmap_tags.bits_per_word); | |
320ae51f | 540 | |
4bb659b1 JA |
541 | free = bt_unused_tags(&tags->bitmap_tags); |
542 | res = bt_unused_tags(&tags->breserved_tags); | |
320ae51f | 543 | |
4bb659b1 | 544 | page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res); |
0d2602ca | 545 | page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues)); |
320ae51f JA |
546 | |
547 | return page - orig_page; | |
548 | } |