]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - lib/sbitmap.c
UBUNTU: Ubuntu-5.11.0-20.21
[mirror_ubuntu-hirsute-kernel.git] / lib / sbitmap.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2016 Facebook
4 * Copyright (C) 2013-2014 Jens Axboe
5 */
6
7 #include <linux/sched.h>
8 #include <linux/random.h>
9 #include <linux/sbitmap.h>
10 #include <linux/seq_file.h>
11
12 /*
13 * See if we have deferred clears that we can batch move
14 */
15 static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
16 {
17 unsigned long mask;
18
19 if (!READ_ONCE(map->cleared))
20 return false;
21
22 /*
23 * First get a stable cleared mask, setting the old mask to 0.
24 */
25 mask = xchg(&map->cleared, 0);
26
27 /*
28 * Now clear the masked bits in our free word
29 */
30 atomic_long_andnot(mask, (atomic_long_t *)&map->word);
31 BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word));
32 return true;
33 }
34
35 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
36 gfp_t flags, int node)
37 {
38 unsigned int bits_per_word;
39 unsigned int i;
40
41 if (shift < 0) {
42 shift = ilog2(BITS_PER_LONG);
43 /*
44 * If the bitmap is small, shrink the number of bits per word so
45 * we spread over a few cachelines, at least. If less than 4
46 * bits, just forget about it, it's not going to work optimally
47 * anyway.
48 */
49 if (depth >= 4) {
50 while ((4U << shift) > depth)
51 shift--;
52 }
53 }
54 bits_per_word = 1U << shift;
55 if (bits_per_word > BITS_PER_LONG)
56 return -EINVAL;
57
58 sb->shift = shift;
59 sb->depth = depth;
60 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
61
62 if (depth == 0) {
63 sb->map = NULL;
64 return 0;
65 }
66
67 sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
68 if (!sb->map)
69 return -ENOMEM;
70
71 for (i = 0; i < sb->map_nr; i++) {
72 sb->map[i].depth = min(depth, bits_per_word);
73 depth -= sb->map[i].depth;
74 }
75 return 0;
76 }
77 EXPORT_SYMBOL_GPL(sbitmap_init_node);
78
79 void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
80 {
81 unsigned int bits_per_word = 1U << sb->shift;
82 unsigned int i;
83
84 for (i = 0; i < sb->map_nr; i++)
85 sbitmap_deferred_clear(&sb->map[i]);
86
87 sb->depth = depth;
88 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
89
90 for (i = 0; i < sb->map_nr; i++) {
91 sb->map[i].depth = min(depth, bits_per_word);
92 depth -= sb->map[i].depth;
93 }
94 }
95 EXPORT_SYMBOL_GPL(sbitmap_resize);
96
97 static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
98 unsigned int hint, bool wrap)
99 {
100 int nr;
101
102 /* don't wrap if starting from 0 */
103 wrap = wrap && hint;
104
105 while (1) {
106 nr = find_next_zero_bit(word, depth, hint);
107 if (unlikely(nr >= depth)) {
108 /*
109 * We started with an offset, and we didn't reset the
110 * offset to 0 in a failure case, so start from 0 to
111 * exhaust the map.
112 */
113 if (hint && wrap) {
114 hint = 0;
115 continue;
116 }
117 return -1;
118 }
119
120 if (!test_and_set_bit_lock(nr, word))
121 break;
122
123 hint = nr + 1;
124 if (hint >= depth - 1)
125 hint = 0;
126 }
127
128 return nr;
129 }
130
131 static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
132 unsigned int alloc_hint, bool round_robin)
133 {
134 struct sbitmap_word *map = &sb->map[index];
135 int nr;
136
137 do {
138 nr = __sbitmap_get_word(&map->word, map->depth, alloc_hint,
139 !round_robin);
140 if (nr != -1)
141 break;
142 if (!sbitmap_deferred_clear(map))
143 break;
144 } while (1);
145
146 return nr;
147 }
148
149 int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
150 {
151 unsigned int i, index;
152 int nr = -1;
153
154 index = SB_NR_TO_INDEX(sb, alloc_hint);
155
156 /*
157 * Unless we're doing round robin tag allocation, just use the
158 * alloc_hint to find the right word index. No point in looping
159 * twice in find_next_zero_bit() for that case.
160 */
161 if (round_robin)
162 alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
163 else
164 alloc_hint = 0;
165
166 for (i = 0; i < sb->map_nr; i++) {
167 nr = sbitmap_find_bit_in_index(sb, index, alloc_hint,
168 round_robin);
169 if (nr != -1) {
170 nr += index << sb->shift;
171 break;
172 }
173
174 /* Jump to next index. */
175 alloc_hint = 0;
176 if (++index >= sb->map_nr)
177 index = 0;
178 }
179
180 return nr;
181 }
182 EXPORT_SYMBOL_GPL(sbitmap_get);
183
184 int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
185 unsigned long shallow_depth)
186 {
187 unsigned int i, index;
188 int nr = -1;
189
190 index = SB_NR_TO_INDEX(sb, alloc_hint);
191
192 for (i = 0; i < sb->map_nr; i++) {
193 again:
194 nr = __sbitmap_get_word(&sb->map[index].word,
195 min(sb->map[index].depth, shallow_depth),
196 SB_NR_TO_BIT(sb, alloc_hint), true);
197 if (nr != -1) {
198 nr += index << sb->shift;
199 break;
200 }
201
202 if (sbitmap_deferred_clear(&sb->map[index]))
203 goto again;
204
205 /* Jump to next index. */
206 index++;
207 alloc_hint = index << sb->shift;
208
209 if (index >= sb->map_nr) {
210 index = 0;
211 alloc_hint = 0;
212 }
213 }
214
215 return nr;
216 }
217 EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
218
219 bool sbitmap_any_bit_set(const struct sbitmap *sb)
220 {
221 unsigned int i;
222
223 for (i = 0; i < sb->map_nr; i++) {
224 if (sb->map[i].word & ~sb->map[i].cleared)
225 return true;
226 }
227 return false;
228 }
229 EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
230
231 static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
232 {
233 unsigned int i, weight = 0;
234
235 for (i = 0; i < sb->map_nr; i++) {
236 const struct sbitmap_word *word = &sb->map[i];
237
238 if (set)
239 weight += bitmap_weight(&word->word, word->depth);
240 else
241 weight += bitmap_weight(&word->cleared, word->depth);
242 }
243 return weight;
244 }
245
246 static unsigned int sbitmap_weight(const struct sbitmap *sb)
247 {
248 return __sbitmap_weight(sb, true);
249 }
250
251 static unsigned int sbitmap_cleared(const struct sbitmap *sb)
252 {
253 return __sbitmap_weight(sb, false);
254 }
255
256 void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
257 {
258 seq_printf(m, "depth=%u\n", sb->depth);
259 seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb));
260 seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
261 seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
262 seq_printf(m, "map_nr=%u\n", sb->map_nr);
263 }
264 EXPORT_SYMBOL_GPL(sbitmap_show);
265
266 static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
267 {
268 if ((offset & 0xf) == 0) {
269 if (offset != 0)
270 seq_putc(m, '\n');
271 seq_printf(m, "%08x:", offset);
272 }
273 if ((offset & 0x1) == 0)
274 seq_putc(m, ' ');
275 seq_printf(m, "%02x", byte);
276 }
277
278 void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
279 {
280 u8 byte = 0;
281 unsigned int byte_bits = 0;
282 unsigned int offset = 0;
283 int i;
284
285 for (i = 0; i < sb->map_nr; i++) {
286 unsigned long word = READ_ONCE(sb->map[i].word);
287 unsigned long cleared = READ_ONCE(sb->map[i].cleared);
288 unsigned int word_bits = READ_ONCE(sb->map[i].depth);
289
290 word &= ~cleared;
291
292 while (word_bits > 0) {
293 unsigned int bits = min(8 - byte_bits, word_bits);
294
295 byte |= (word & (BIT(bits) - 1)) << byte_bits;
296 byte_bits += bits;
297 if (byte_bits == 8) {
298 emit_byte(m, offset, byte);
299 byte = 0;
300 byte_bits = 0;
301 offset++;
302 }
303 word >>= bits;
304 word_bits -= bits;
305 }
306 }
307 if (byte_bits) {
308 emit_byte(m, offset, byte);
309 offset++;
310 }
311 if (offset)
312 seq_putc(m, '\n');
313 }
314 EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
315
316 static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
317 unsigned int depth)
318 {
319 unsigned int wake_batch;
320 unsigned int shallow_depth;
321
322 /*
323 * For each batch, we wake up one queue. We need to make sure that our
324 * batch size is small enough that the full depth of the bitmap,
325 * potentially limited by a shallow depth, is enough to wake up all of
326 * the queues.
327 *
328 * Each full word of the bitmap has bits_per_word bits, and there might
329 * be a partial word. There are depth / bits_per_word full words and
330 * depth % bits_per_word bits left over. In bitwise arithmetic:
331 *
332 * bits_per_word = 1 << shift
333 * depth / bits_per_word = depth >> shift
334 * depth % bits_per_word = depth & ((1 << shift) - 1)
335 *
336 * Each word can be limited to sbq->min_shallow_depth bits.
337 */
338 shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
339 depth = ((depth >> sbq->sb.shift) * shallow_depth +
340 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
341 wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
342 SBQ_WAKE_BATCH);
343
344 return wake_batch;
345 }
346
347 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
348 int shift, bool round_robin, gfp_t flags, int node)
349 {
350 int ret;
351 int i;
352
353 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node);
354 if (ret)
355 return ret;
356
357 sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
358 if (!sbq->alloc_hint) {
359 sbitmap_free(&sbq->sb);
360 return -ENOMEM;
361 }
362
363 if (depth && !round_robin) {
364 for_each_possible_cpu(i)
365 *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
366 }
367
368 sbq->min_shallow_depth = UINT_MAX;
369 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
370 atomic_set(&sbq->wake_index, 0);
371 atomic_set(&sbq->ws_active, 0);
372
373 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
374 if (!sbq->ws) {
375 free_percpu(sbq->alloc_hint);
376 sbitmap_free(&sbq->sb);
377 return -ENOMEM;
378 }
379
380 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
381 init_waitqueue_head(&sbq->ws[i].wait);
382 atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
383 }
384
385 sbq->round_robin = round_robin;
386 return 0;
387 }
388 EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
389
390 static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
391 unsigned int depth)
392 {
393 unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth);
394 int i;
395
396 if (sbq->wake_batch != wake_batch) {
397 WRITE_ONCE(sbq->wake_batch, wake_batch);
398 /*
399 * Pairs with the memory barrier in sbitmap_queue_wake_up()
400 * to ensure that the batch size is updated before the wait
401 * counts.
402 */
403 smp_mb();
404 for (i = 0; i < SBQ_WAIT_QUEUES; i++)
405 atomic_set(&sbq->ws[i].wait_cnt, 1);
406 }
407 }
408
409 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
410 {
411 sbitmap_queue_update_wake_batch(sbq, depth);
412 sbitmap_resize(&sbq->sb, depth);
413 }
414 EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
415
416 int __sbitmap_queue_get(struct sbitmap_queue *sbq)
417 {
418 unsigned int hint, depth;
419 int nr;
420
421 hint = this_cpu_read(*sbq->alloc_hint);
422 depth = READ_ONCE(sbq->sb.depth);
423 if (unlikely(hint >= depth)) {
424 hint = depth ? prandom_u32() % depth : 0;
425 this_cpu_write(*sbq->alloc_hint, hint);
426 }
427 nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
428
429 if (nr == -1) {
430 /* If the map is full, a hint won't do us much good. */
431 this_cpu_write(*sbq->alloc_hint, 0);
432 } else if (nr == hint || unlikely(sbq->round_robin)) {
433 /* Only update the hint if we used it. */
434 hint = nr + 1;
435 if (hint >= depth - 1)
436 hint = 0;
437 this_cpu_write(*sbq->alloc_hint, hint);
438 }
439
440 return nr;
441 }
442 EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
443
444 int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
445 unsigned int shallow_depth)
446 {
447 unsigned int hint, depth;
448 int nr;
449
450 WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
451
452 hint = this_cpu_read(*sbq->alloc_hint);
453 depth = READ_ONCE(sbq->sb.depth);
454 if (unlikely(hint >= depth)) {
455 hint = depth ? prandom_u32() % depth : 0;
456 this_cpu_write(*sbq->alloc_hint, hint);
457 }
458 nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
459
460 if (nr == -1) {
461 /* If the map is full, a hint won't do us much good. */
462 this_cpu_write(*sbq->alloc_hint, 0);
463 } else if (nr == hint || unlikely(sbq->round_robin)) {
464 /* Only update the hint if we used it. */
465 hint = nr + 1;
466 if (hint >= depth - 1)
467 hint = 0;
468 this_cpu_write(*sbq->alloc_hint, hint);
469 }
470
471 return nr;
472 }
473 EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
474
475 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
476 unsigned int min_shallow_depth)
477 {
478 sbq->min_shallow_depth = min_shallow_depth;
479 sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
480 }
481 EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
482
483 static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
484 {
485 int i, wake_index;
486
487 if (!atomic_read(&sbq->ws_active))
488 return NULL;
489
490 wake_index = atomic_read(&sbq->wake_index);
491 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
492 struct sbq_wait_state *ws = &sbq->ws[wake_index];
493
494 if (waitqueue_active(&ws->wait)) {
495 if (wake_index != atomic_read(&sbq->wake_index))
496 atomic_set(&sbq->wake_index, wake_index);
497 return ws;
498 }
499
500 wake_index = sbq_index_inc(wake_index);
501 }
502
503 return NULL;
504 }
505
506 static bool __sbq_wake_up(struct sbitmap_queue *sbq)
507 {
508 struct sbq_wait_state *ws;
509 unsigned int wake_batch;
510 int wait_cnt;
511
512 ws = sbq_wake_ptr(sbq);
513 if (!ws)
514 return false;
515
516 wait_cnt = atomic_dec_return(&ws->wait_cnt);
517 if (wait_cnt <= 0) {
518 int ret;
519
520 wake_batch = READ_ONCE(sbq->wake_batch);
521
522 /*
523 * Pairs with the memory barrier in sbitmap_queue_resize() to
524 * ensure that we see the batch size update before the wait
525 * count is reset.
526 */
527 smp_mb__before_atomic();
528
529 /*
530 * For concurrent callers of this, the one that failed the
531 * atomic_cmpxhcg() race should call this function again
532 * to wakeup a new batch on a different 'ws'.
533 */
534 ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch);
535 if (ret == wait_cnt) {
536 sbq_index_atomic_inc(&sbq->wake_index);
537 wake_up_nr(&ws->wait, wake_batch);
538 return false;
539 }
540
541 return true;
542 }
543
544 return false;
545 }
546
547 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
548 {
549 while (__sbq_wake_up(sbq))
550 ;
551 }
552 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
553
554 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
555 unsigned int cpu)
556 {
557 /*
558 * Once the clear bit is set, the bit may be allocated out.
559 *
560 * Orders READ/WRITE on the asssociated instance(such as request
561 * of blk_mq) by this bit for avoiding race with re-allocation,
562 * and its pair is the memory barrier implied in __sbitmap_get_word.
563 *
564 * One invariant is that the clear bit has to be zero when the bit
565 * is in use.
566 */
567 smp_mb__before_atomic();
568 sbitmap_deferred_clear_bit(&sbq->sb, nr);
569
570 /*
571 * Pairs with the memory barrier in set_current_state() to ensure the
572 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
573 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
574 * waiter. See the comment on waitqueue_active().
575 */
576 smp_mb__after_atomic();
577 sbitmap_queue_wake_up(sbq);
578
579 if (likely(!sbq->round_robin && nr < sbq->sb.depth))
580 *per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
581 }
582 EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
583
584 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
585 {
586 int i, wake_index;
587
588 /*
589 * Pairs with the memory barrier in set_current_state() like in
590 * sbitmap_queue_wake_up().
591 */
592 smp_mb();
593 wake_index = atomic_read(&sbq->wake_index);
594 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
595 struct sbq_wait_state *ws = &sbq->ws[wake_index];
596
597 if (waitqueue_active(&ws->wait))
598 wake_up(&ws->wait);
599
600 wake_index = sbq_index_inc(wake_index);
601 }
602 }
603 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
604
605 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
606 {
607 bool first;
608 int i;
609
610 sbitmap_show(&sbq->sb, m);
611
612 seq_puts(m, "alloc_hint={");
613 first = true;
614 for_each_possible_cpu(i) {
615 if (!first)
616 seq_puts(m, ", ");
617 first = false;
618 seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i));
619 }
620 seq_puts(m, "}\n");
621
622 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
623 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
624 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
625
626 seq_puts(m, "ws={\n");
627 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
628 struct sbq_wait_state *ws = &sbq->ws[i];
629
630 seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
631 atomic_read(&ws->wait_cnt),
632 waitqueue_active(&ws->wait) ? "active" : "inactive");
633 }
634 seq_puts(m, "}\n");
635
636 seq_printf(m, "round_robin=%d\n", sbq->round_robin);
637 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
638 }
639 EXPORT_SYMBOL_GPL(sbitmap_queue_show);
640
641 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
642 struct sbq_wait_state *ws,
643 struct sbq_wait *sbq_wait)
644 {
645 if (!sbq_wait->sbq) {
646 sbq_wait->sbq = sbq;
647 atomic_inc(&sbq->ws_active);
648 add_wait_queue(&ws->wait, &sbq_wait->wait);
649 }
650 }
651 EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
652
653 void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait)
654 {
655 list_del_init(&sbq_wait->wait.entry);
656 if (sbq_wait->sbq) {
657 atomic_dec(&sbq_wait->sbq->ws_active);
658 sbq_wait->sbq = NULL;
659 }
660 }
661 EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue);
662
663 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
664 struct sbq_wait_state *ws,
665 struct sbq_wait *sbq_wait, int state)
666 {
667 if (!sbq_wait->sbq) {
668 atomic_inc(&sbq->ws_active);
669 sbq_wait->sbq = sbq;
670 }
671 prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
672 }
673 EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
674
675 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
676 struct sbq_wait *sbq_wait)
677 {
678 finish_wait(&ws->wait, &sbq_wait->wait);
679 if (sbq_wait->sbq) {
680 atomic_dec(&sbq->ws_active);
681 sbq_wait->sbq = NULL;
682 }
683 }
684 EXPORT_SYMBOL_GPL(sbitmap_finish_wait);