1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2016 Facebook
4 * Copyright (C) 2013-2014 Jens Axboe
7 #include <linux/sched.h>
8 #include <linux/random.h>
9 #include <linux/sbitmap.h>
10 #include <linux/seq_file.h>
12 static int init_alloc_hint(struct sbitmap
*sb
, gfp_t flags
)
14 unsigned depth
= sb
->depth
;
16 sb
->alloc_hint
= alloc_percpu_gfp(unsigned int, flags
);
20 if (depth
&& !sb
->round_robin
) {
23 for_each_possible_cpu(i
)
24 *per_cpu_ptr(sb
->alloc_hint
, i
) = prandom_u32() % depth
;
29 static inline unsigned update_alloc_hint_before_get(struct sbitmap
*sb
,
34 hint
= this_cpu_read(*sb
->alloc_hint
);
35 if (unlikely(hint
>= depth
)) {
36 hint
= depth
? prandom_u32() % depth
: 0;
37 this_cpu_write(*sb
->alloc_hint
, hint
);
43 static inline void update_alloc_hint_after_get(struct sbitmap
*sb
,
49 /* If the map is full, a hint won't do us much good. */
50 this_cpu_write(*sb
->alloc_hint
, 0);
51 } else if (nr
== hint
|| unlikely(sb
->round_robin
)) {
52 /* Only update the hint if we used it. */
54 if (hint
>= depth
- 1)
56 this_cpu_write(*sb
->alloc_hint
, hint
);
61 * See if we have deferred clears that we can batch move
63 static inline bool sbitmap_deferred_clear(struct sbitmap_word
*map
)
67 if (!READ_ONCE(map
->cleared
))
71 * First get a stable cleared mask, setting the old mask to 0.
73 mask
= xchg(&map
->cleared
, 0);
76 * Now clear the masked bits in our free word
78 atomic_long_andnot(mask
, (atomic_long_t
*)&map
->word
);
79 BUILD_BUG_ON(sizeof(atomic_long_t
) != sizeof(map
->word
));
83 int sbitmap_init_node(struct sbitmap
*sb
, unsigned int depth
, int shift
,
84 gfp_t flags
, int node
, bool round_robin
,
87 unsigned int bits_per_word
;
91 shift
= sbitmap_calculate_shift(depth
);
93 bits_per_word
= 1U << shift
;
94 if (bits_per_word
> BITS_PER_LONG
)
99 sb
->map_nr
= DIV_ROUND_UP(sb
->depth
, bits_per_word
);
100 sb
->round_robin
= round_robin
;
108 if (init_alloc_hint(sb
, flags
))
111 sb
->alloc_hint
= NULL
;
114 sb
->map
= kcalloc_node(sb
->map_nr
, sizeof(*sb
->map
), flags
, node
);
116 free_percpu(sb
->alloc_hint
);
120 for (i
= 0; i
< sb
->map_nr
; i
++) {
121 sb
->map
[i
].depth
= min(depth
, bits_per_word
);
122 depth
-= sb
->map
[i
].depth
;
126 EXPORT_SYMBOL_GPL(sbitmap_init_node
);
128 void sbitmap_resize(struct sbitmap
*sb
, unsigned int depth
)
130 unsigned int bits_per_word
= 1U << sb
->shift
;
133 for (i
= 0; i
< sb
->map_nr
; i
++)
134 sbitmap_deferred_clear(&sb
->map
[i
]);
137 sb
->map_nr
= DIV_ROUND_UP(sb
->depth
, bits_per_word
);
139 for (i
= 0; i
< sb
->map_nr
; i
++) {
140 sb
->map
[i
].depth
= min(depth
, bits_per_word
);
141 depth
-= sb
->map
[i
].depth
;
144 EXPORT_SYMBOL_GPL(sbitmap_resize
);
146 static int __sbitmap_get_word(unsigned long *word
, unsigned long depth
,
147 unsigned int hint
, bool wrap
)
151 /* don't wrap if starting from 0 */
155 nr
= find_next_zero_bit(word
, depth
, hint
);
156 if (unlikely(nr
>= depth
)) {
158 * We started with an offset, and we didn't reset the
159 * offset to 0 in a failure case, so start from 0 to
169 if (!test_and_set_bit_lock(nr
, word
))
173 if (hint
>= depth
- 1)
180 static int sbitmap_find_bit_in_index(struct sbitmap
*sb
, int index
,
181 unsigned int alloc_hint
)
183 struct sbitmap_word
*map
= &sb
->map
[index
];
187 nr
= __sbitmap_get_word(&map
->word
, map
->depth
, alloc_hint
,
191 if (!sbitmap_deferred_clear(map
))
198 static int __sbitmap_get(struct sbitmap
*sb
, unsigned int alloc_hint
)
200 unsigned int i
, index
;
203 index
= SB_NR_TO_INDEX(sb
, alloc_hint
);
206 * Unless we're doing round robin tag allocation, just use the
207 * alloc_hint to find the right word index. No point in looping
208 * twice in find_next_zero_bit() for that case.
211 alloc_hint
= SB_NR_TO_BIT(sb
, alloc_hint
);
215 for (i
= 0; i
< sb
->map_nr
; i
++) {
216 nr
= sbitmap_find_bit_in_index(sb
, index
, alloc_hint
);
218 nr
+= index
<< sb
->shift
;
222 /* Jump to next index. */
224 if (++index
>= sb
->map_nr
)
231 int sbitmap_get(struct sbitmap
*sb
)
234 unsigned int hint
, depth
;
236 if (WARN_ON_ONCE(unlikely(!sb
->alloc_hint
)))
239 depth
= READ_ONCE(sb
->depth
);
240 hint
= update_alloc_hint_before_get(sb
, depth
);
241 nr
= __sbitmap_get(sb
, hint
);
242 update_alloc_hint_after_get(sb
, depth
, hint
, nr
);
246 EXPORT_SYMBOL_GPL(sbitmap_get
);
248 static int __sbitmap_get_shallow(struct sbitmap
*sb
,
249 unsigned int alloc_hint
,
250 unsigned long shallow_depth
)
252 unsigned int i
, index
;
255 index
= SB_NR_TO_INDEX(sb
, alloc_hint
);
257 for (i
= 0; i
< sb
->map_nr
; i
++) {
259 nr
= __sbitmap_get_word(&sb
->map
[index
].word
,
260 min(sb
->map
[index
].depth
, shallow_depth
),
261 SB_NR_TO_BIT(sb
, alloc_hint
), true);
263 nr
+= index
<< sb
->shift
;
267 if (sbitmap_deferred_clear(&sb
->map
[index
]))
270 /* Jump to next index. */
272 alloc_hint
= index
<< sb
->shift
;
274 if (index
>= sb
->map_nr
) {
283 int sbitmap_get_shallow(struct sbitmap
*sb
, unsigned long shallow_depth
)
286 unsigned int hint
, depth
;
288 if (WARN_ON_ONCE(unlikely(!sb
->alloc_hint
)))
291 depth
= READ_ONCE(sb
->depth
);
292 hint
= update_alloc_hint_before_get(sb
, depth
);
293 nr
= __sbitmap_get_shallow(sb
, hint
, shallow_depth
);
294 update_alloc_hint_after_get(sb
, depth
, hint
, nr
);
298 EXPORT_SYMBOL_GPL(sbitmap_get_shallow
);
300 bool sbitmap_any_bit_set(const struct sbitmap
*sb
)
304 for (i
= 0; i
< sb
->map_nr
; i
++) {
305 if (sb
->map
[i
].word
& ~sb
->map
[i
].cleared
)
310 EXPORT_SYMBOL_GPL(sbitmap_any_bit_set
);
312 static unsigned int __sbitmap_weight(const struct sbitmap
*sb
, bool set
)
314 unsigned int i
, weight
= 0;
316 for (i
= 0; i
< sb
->map_nr
; i
++) {
317 const struct sbitmap_word
*word
= &sb
->map
[i
];
320 weight
+= bitmap_weight(&word
->word
, word
->depth
);
322 weight
+= bitmap_weight(&word
->cleared
, word
->depth
);
327 static unsigned int sbitmap_cleared(const struct sbitmap
*sb
)
329 return __sbitmap_weight(sb
, false);
332 unsigned int sbitmap_weight(const struct sbitmap
*sb
)
334 return __sbitmap_weight(sb
, true) - sbitmap_cleared(sb
);
336 EXPORT_SYMBOL_GPL(sbitmap_weight
);
338 void sbitmap_show(struct sbitmap
*sb
, struct seq_file
*m
)
340 seq_printf(m
, "depth=%u\n", sb
->depth
);
341 seq_printf(m
, "busy=%u\n", sbitmap_weight(sb
));
342 seq_printf(m
, "cleared=%u\n", sbitmap_cleared(sb
));
343 seq_printf(m
, "bits_per_word=%u\n", 1U << sb
->shift
);
344 seq_printf(m
, "map_nr=%u\n", sb
->map_nr
);
346 EXPORT_SYMBOL_GPL(sbitmap_show
);
348 static inline void emit_byte(struct seq_file
*m
, unsigned int offset
, u8 byte
)
350 if ((offset
& 0xf) == 0) {
353 seq_printf(m
, "%08x:", offset
);
355 if ((offset
& 0x1) == 0)
357 seq_printf(m
, "%02x", byte
);
360 void sbitmap_bitmap_show(struct sbitmap
*sb
, struct seq_file
*m
)
363 unsigned int byte_bits
= 0;
364 unsigned int offset
= 0;
367 for (i
= 0; i
< sb
->map_nr
; i
++) {
368 unsigned long word
= READ_ONCE(sb
->map
[i
].word
);
369 unsigned long cleared
= READ_ONCE(sb
->map
[i
].cleared
);
370 unsigned int word_bits
= READ_ONCE(sb
->map
[i
].depth
);
374 while (word_bits
> 0) {
375 unsigned int bits
= min(8 - byte_bits
, word_bits
);
377 byte
|= (word
& (BIT(bits
) - 1)) << byte_bits
;
379 if (byte_bits
== 8) {
380 emit_byte(m
, offset
, byte
);
390 emit_byte(m
, offset
, byte
);
396 EXPORT_SYMBOL_GPL(sbitmap_bitmap_show
);
398 static unsigned int sbq_calc_wake_batch(struct sbitmap_queue
*sbq
,
401 unsigned int wake_batch
;
402 unsigned int shallow_depth
;
405 * For each batch, we wake up one queue. We need to make sure that our
406 * batch size is small enough that the full depth of the bitmap,
407 * potentially limited by a shallow depth, is enough to wake up all of
410 * Each full word of the bitmap has bits_per_word bits, and there might
411 * be a partial word. There are depth / bits_per_word full words and
412 * depth % bits_per_word bits left over. In bitwise arithmetic:
414 * bits_per_word = 1 << shift
415 * depth / bits_per_word = depth >> shift
416 * depth % bits_per_word = depth & ((1 << shift) - 1)
418 * Each word can be limited to sbq->min_shallow_depth bits.
420 shallow_depth
= min(1U << sbq
->sb
.shift
, sbq
->min_shallow_depth
);
421 depth
= ((depth
>> sbq
->sb
.shift
) * shallow_depth
+
422 min(depth
& ((1U << sbq
->sb
.shift
) - 1), shallow_depth
));
423 wake_batch
= clamp_t(unsigned int, depth
/ SBQ_WAIT_QUEUES
, 1,
429 int sbitmap_queue_init_node(struct sbitmap_queue
*sbq
, unsigned int depth
,
430 int shift
, bool round_robin
, gfp_t flags
, int node
)
435 ret
= sbitmap_init_node(&sbq
->sb
, depth
, shift
, flags
, node
,
440 sbq
->min_shallow_depth
= UINT_MAX
;
441 sbq
->wake_batch
= sbq_calc_wake_batch(sbq
, depth
);
442 atomic_set(&sbq
->wake_index
, 0);
443 atomic_set(&sbq
->ws_active
, 0);
445 sbq
->ws
= kzalloc_node(SBQ_WAIT_QUEUES
* sizeof(*sbq
->ws
), flags
, node
);
447 sbitmap_free(&sbq
->sb
);
451 for (i
= 0; i
< SBQ_WAIT_QUEUES
; i
++) {
452 init_waitqueue_head(&sbq
->ws
[i
].wait
);
453 atomic_set(&sbq
->ws
[i
].wait_cnt
, sbq
->wake_batch
);
458 EXPORT_SYMBOL_GPL(sbitmap_queue_init_node
);
460 static void sbitmap_queue_update_wake_batch(struct sbitmap_queue
*sbq
,
463 unsigned int wake_batch
= sbq_calc_wake_batch(sbq
, depth
);
466 if (sbq
->wake_batch
!= wake_batch
) {
467 WRITE_ONCE(sbq
->wake_batch
, wake_batch
);
469 * Pairs with the memory barrier in sbitmap_queue_wake_up()
470 * to ensure that the batch size is updated before the wait
474 for (i
= 0; i
< SBQ_WAIT_QUEUES
; i
++)
475 atomic_set(&sbq
->ws
[i
].wait_cnt
, 1);
479 void sbitmap_queue_resize(struct sbitmap_queue
*sbq
, unsigned int depth
)
481 sbitmap_queue_update_wake_batch(sbq
, depth
);
482 sbitmap_resize(&sbq
->sb
, depth
);
484 EXPORT_SYMBOL_GPL(sbitmap_queue_resize
);
486 int __sbitmap_queue_get(struct sbitmap_queue
*sbq
)
488 return sbitmap_get(&sbq
->sb
);
490 EXPORT_SYMBOL_GPL(__sbitmap_queue_get
);
492 unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue
*sbq
, int nr_tags
,
493 unsigned int *offset
)
495 struct sbitmap
*sb
= &sbq
->sb
;
496 unsigned int hint
, depth
;
497 unsigned long index
, nr
;
500 if (unlikely(sb
->round_robin
))
503 depth
= READ_ONCE(sb
->depth
);
504 hint
= update_alloc_hint_before_get(sb
, depth
);
506 index
= SB_NR_TO_INDEX(sb
, hint
);
508 for (i
= 0; i
< sb
->map_nr
; i
++) {
509 struct sbitmap_word
*map
= &sb
->map
[index
];
510 unsigned long get_mask
;
512 sbitmap_deferred_clear(map
);
513 if (map
->word
== (1UL << (map
->depth
- 1)) - 1)
516 nr
= find_first_zero_bit(&map
->word
, map
->depth
);
517 if (nr
+ nr_tags
<= map
->depth
) {
518 atomic_long_t
*ptr
= (atomic_long_t
*) &map
->word
;
519 int map_tags
= min_t(int, nr_tags
, map
->depth
);
520 unsigned long val
, ret
;
522 get_mask
= ((1UL << map_tags
) - 1) << nr
;
524 val
= READ_ONCE(map
->word
);
525 ret
= atomic_long_cmpxchg(ptr
, val
, get_mask
| val
);
526 } while (ret
!= val
);
527 get_mask
= (get_mask
& ~ret
) >> nr
;
529 *offset
= nr
+ (index
<< sb
->shift
);
530 update_alloc_hint_after_get(sb
, depth
, hint
,
531 *offset
+ map_tags
- 1);
535 /* Jump to next index. */
536 if (++index
>= sb
->map_nr
)
543 int __sbitmap_queue_get_shallow(struct sbitmap_queue
*sbq
,
544 unsigned int shallow_depth
)
546 WARN_ON_ONCE(shallow_depth
< sbq
->min_shallow_depth
);
548 return sbitmap_get_shallow(&sbq
->sb
, shallow_depth
);
550 EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow
);
552 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue
*sbq
,
553 unsigned int min_shallow_depth
)
555 sbq
->min_shallow_depth
= min_shallow_depth
;
556 sbitmap_queue_update_wake_batch(sbq
, sbq
->sb
.depth
);
558 EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth
);
560 static struct sbq_wait_state
*sbq_wake_ptr(struct sbitmap_queue
*sbq
)
564 if (!atomic_read(&sbq
->ws_active
))
567 wake_index
= atomic_read(&sbq
->wake_index
);
568 for (i
= 0; i
< SBQ_WAIT_QUEUES
; i
++) {
569 struct sbq_wait_state
*ws
= &sbq
->ws
[wake_index
];
571 if (waitqueue_active(&ws
->wait
)) {
572 if (wake_index
!= atomic_read(&sbq
->wake_index
))
573 atomic_set(&sbq
->wake_index
, wake_index
);
577 wake_index
= sbq_index_inc(wake_index
);
583 static bool __sbq_wake_up(struct sbitmap_queue
*sbq
)
585 struct sbq_wait_state
*ws
;
586 unsigned int wake_batch
;
589 ws
= sbq_wake_ptr(sbq
);
593 wait_cnt
= atomic_dec_return(&ws
->wait_cnt
);
597 wake_batch
= READ_ONCE(sbq
->wake_batch
);
600 * Pairs with the memory barrier in sbitmap_queue_resize() to
601 * ensure that we see the batch size update before the wait
604 smp_mb__before_atomic();
607 * For concurrent callers of this, the one that failed the
608 * atomic_cmpxhcg() race should call this function again
609 * to wakeup a new batch on a different 'ws'.
611 ret
= atomic_cmpxchg(&ws
->wait_cnt
, wait_cnt
, wake_batch
);
612 if (ret
== wait_cnt
) {
613 sbq_index_atomic_inc(&sbq
->wake_index
);
614 wake_up_nr(&ws
->wait
, wake_batch
);
624 void sbitmap_queue_wake_up(struct sbitmap_queue
*sbq
)
626 while (__sbq_wake_up(sbq
))
629 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up
);
631 static inline void sbitmap_update_cpu_hint(struct sbitmap
*sb
, int cpu
, int tag
)
633 if (likely(!sb
->round_robin
&& tag
< sb
->depth
))
634 data_race(*per_cpu_ptr(sb
->alloc_hint
, cpu
) = tag
);
637 void sbitmap_queue_clear_batch(struct sbitmap_queue
*sbq
, int offset
,
638 int *tags
, int nr_tags
)
640 struct sbitmap
*sb
= &sbq
->sb
;
641 unsigned long *addr
= NULL
;
642 unsigned long mask
= 0;
645 smp_mb__before_atomic();
646 for (i
= 0; i
< nr_tags
; i
++) {
647 const int tag
= tags
[i
] - offset
;
648 unsigned long *this_addr
;
650 /* since we're clearing a batch, skip the deferred map */
651 this_addr
= &sb
->map
[SB_NR_TO_INDEX(sb
, tag
)].word
;
654 } else if (addr
!= this_addr
) {
655 atomic_long_andnot(mask
, (atomic_long_t
*) addr
);
659 mask
|= (1UL << SB_NR_TO_BIT(sb
, tag
));
663 atomic_long_andnot(mask
, (atomic_long_t
*) addr
);
665 smp_mb__after_atomic();
666 sbitmap_queue_wake_up(sbq
);
667 sbitmap_update_cpu_hint(&sbq
->sb
, raw_smp_processor_id(),
668 tags
[nr_tags
- 1] - offset
);
671 void sbitmap_queue_clear(struct sbitmap_queue
*sbq
, unsigned int nr
,
675 * Once the clear bit is set, the bit may be allocated out.
677 * Orders READ/WRITE on the associated instance(such as request
678 * of blk_mq) by this bit for avoiding race with re-allocation,
679 * and its pair is the memory barrier implied in __sbitmap_get_word.
681 * One invariant is that the clear bit has to be zero when the bit
684 smp_mb__before_atomic();
685 sbitmap_deferred_clear_bit(&sbq
->sb
, nr
);
688 * Pairs with the memory barrier in set_current_state() to ensure the
689 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
690 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
691 * waiter. See the comment on waitqueue_active().
693 smp_mb__after_atomic();
694 sbitmap_queue_wake_up(sbq
);
695 sbitmap_update_cpu_hint(&sbq
->sb
, cpu
, nr
);
697 EXPORT_SYMBOL_GPL(sbitmap_queue_clear
);
699 void sbitmap_queue_wake_all(struct sbitmap_queue
*sbq
)
704 * Pairs with the memory barrier in set_current_state() like in
705 * sbitmap_queue_wake_up().
708 wake_index
= atomic_read(&sbq
->wake_index
);
709 for (i
= 0; i
< SBQ_WAIT_QUEUES
; i
++) {
710 struct sbq_wait_state
*ws
= &sbq
->ws
[wake_index
];
712 if (waitqueue_active(&ws
->wait
))
715 wake_index
= sbq_index_inc(wake_index
);
718 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all
);
720 void sbitmap_queue_show(struct sbitmap_queue
*sbq
, struct seq_file
*m
)
725 sbitmap_show(&sbq
->sb
, m
);
727 seq_puts(m
, "alloc_hint={");
729 for_each_possible_cpu(i
) {
733 seq_printf(m
, "%u", *per_cpu_ptr(sbq
->sb
.alloc_hint
, i
));
737 seq_printf(m
, "wake_batch=%u\n", sbq
->wake_batch
);
738 seq_printf(m
, "wake_index=%d\n", atomic_read(&sbq
->wake_index
));
739 seq_printf(m
, "ws_active=%d\n", atomic_read(&sbq
->ws_active
));
741 seq_puts(m
, "ws={\n");
742 for (i
= 0; i
< SBQ_WAIT_QUEUES
; i
++) {
743 struct sbq_wait_state
*ws
= &sbq
->ws
[i
];
745 seq_printf(m
, "\t{.wait_cnt=%d, .wait=%s},\n",
746 atomic_read(&ws
->wait_cnt
),
747 waitqueue_active(&ws
->wait
) ? "active" : "inactive");
751 seq_printf(m
, "round_robin=%d\n", sbq
->sb
.round_robin
);
752 seq_printf(m
, "min_shallow_depth=%u\n", sbq
->min_shallow_depth
);
754 EXPORT_SYMBOL_GPL(sbitmap_queue_show
);
756 void sbitmap_add_wait_queue(struct sbitmap_queue
*sbq
,
757 struct sbq_wait_state
*ws
,
758 struct sbq_wait
*sbq_wait
)
760 if (!sbq_wait
->sbq
) {
762 atomic_inc(&sbq
->ws_active
);
763 add_wait_queue(&ws
->wait
, &sbq_wait
->wait
);
766 EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue
);
768 void sbitmap_del_wait_queue(struct sbq_wait
*sbq_wait
)
770 list_del_init(&sbq_wait
->wait
.entry
);
772 atomic_dec(&sbq_wait
->sbq
->ws_active
);
773 sbq_wait
->sbq
= NULL
;
776 EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue
);
778 void sbitmap_prepare_to_wait(struct sbitmap_queue
*sbq
,
779 struct sbq_wait_state
*ws
,
780 struct sbq_wait
*sbq_wait
, int state
)
782 if (!sbq_wait
->sbq
) {
783 atomic_inc(&sbq
->ws_active
);
786 prepare_to_wait_exclusive(&ws
->wait
, &sbq_wait
->wait
, state
);
788 EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait
);
790 void sbitmap_finish_wait(struct sbitmap_queue
*sbq
, struct sbq_wait_state
*ws
,
791 struct sbq_wait
*sbq_wait
)
793 finish_wait(&ws
->wait
, &sbq_wait
->wait
);
795 atomic_dec(&sbq
->ws_active
);
796 sbq_wait
->sbq
= NULL
;
799 EXPORT_SYMBOL_GPL(sbitmap_finish_wait
);