1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2016 Facebook
4 * Copyright (C) 2013-2014 Jens Axboe
7 #include <linux/sched.h>
8 #include <linux/random.h>
9 #include <linux/sbitmap.h>
10 #include <linux/seq_file.h>
12 static int init_alloc_hint(struct sbitmap
*sb
, gfp_t flags
)
14 unsigned depth
= sb
->depth
;
16 sb
->alloc_hint
= alloc_percpu_gfp(unsigned int, flags
);
20 if (depth
&& !sb
->round_robin
) {
23 for_each_possible_cpu(i
)
24 *per_cpu_ptr(sb
->alloc_hint
, i
) = prandom_u32() % depth
;
29 static inline unsigned update_alloc_hint_before_get(struct sbitmap
*sb
,
34 hint
= this_cpu_read(*sb
->alloc_hint
);
35 if (unlikely(hint
>= depth
)) {
36 hint
= depth
? prandom_u32() % depth
: 0;
37 this_cpu_write(*sb
->alloc_hint
, hint
);
43 static inline void update_alloc_hint_after_get(struct sbitmap
*sb
,
49 /* If the map is full, a hint won't do us much good. */
50 this_cpu_write(*sb
->alloc_hint
, 0);
51 } else if (nr
== hint
|| unlikely(sb
->round_robin
)) {
52 /* Only update the hint if we used it. */
54 if (hint
>= depth
- 1)
56 this_cpu_write(*sb
->alloc_hint
, hint
);
61 * See if we have deferred clears that we can batch move
63 static inline bool sbitmap_deferred_clear(struct sbitmap_word
*map
)
67 if (!READ_ONCE(map
->cleared
))
71 * First get a stable cleared mask, setting the old mask to 0.
73 mask
= xchg(&map
->cleared
, 0);
76 * Now clear the masked bits in our free word
78 atomic_long_andnot(mask
, (atomic_long_t
*)&map
->word
);
79 BUILD_BUG_ON(sizeof(atomic_long_t
) != sizeof(map
->word
));
83 int sbitmap_init_node(struct sbitmap
*sb
, unsigned int depth
, int shift
,
84 gfp_t flags
, int node
, bool round_robin
,
87 unsigned int bits_per_word
;
90 shift
= sbitmap_calculate_shift(depth
);
92 bits_per_word
= 1U << shift
;
93 if (bits_per_word
> BITS_PER_LONG
)
98 sb
->map_nr
= DIV_ROUND_UP(sb
->depth
, bits_per_word
);
99 sb
->round_robin
= round_robin
;
107 if (init_alloc_hint(sb
, flags
))
110 sb
->alloc_hint
= NULL
;
113 sb
->map
= kvzalloc_node(sb
->map_nr
* sizeof(*sb
->map
), flags
, node
);
115 free_percpu(sb
->alloc_hint
);
121 EXPORT_SYMBOL_GPL(sbitmap_init_node
);
123 void sbitmap_resize(struct sbitmap
*sb
, unsigned int depth
)
125 unsigned int bits_per_word
= 1U << sb
->shift
;
128 for (i
= 0; i
< sb
->map_nr
; i
++)
129 sbitmap_deferred_clear(&sb
->map
[i
]);
132 sb
->map_nr
= DIV_ROUND_UP(sb
->depth
, bits_per_word
);
134 EXPORT_SYMBOL_GPL(sbitmap_resize
);
136 static int __sbitmap_get_word(unsigned long *word
, unsigned long depth
,
137 unsigned int hint
, bool wrap
)
141 /* don't wrap if starting from 0 */
145 nr
= find_next_zero_bit(word
, depth
, hint
);
146 if (unlikely(nr
>= depth
)) {
148 * We started with an offset, and we didn't reset the
149 * offset to 0 in a failure case, so start from 0 to
159 if (!test_and_set_bit_lock(nr
, word
))
163 if (hint
>= depth
- 1)
170 static int sbitmap_find_bit_in_index(struct sbitmap
*sb
, int index
,
171 unsigned int alloc_hint
)
173 struct sbitmap_word
*map
= &sb
->map
[index
];
177 nr
= __sbitmap_get_word(&map
->word
, __map_depth(sb
, index
),
178 alloc_hint
, !sb
->round_robin
);
181 if (!sbitmap_deferred_clear(map
))
188 static int __sbitmap_get(struct sbitmap
*sb
, unsigned int alloc_hint
)
190 unsigned int i
, index
;
193 index
= SB_NR_TO_INDEX(sb
, alloc_hint
);
196 * Unless we're doing round robin tag allocation, just use the
197 * alloc_hint to find the right word index. No point in looping
198 * twice in find_next_zero_bit() for that case.
201 alloc_hint
= SB_NR_TO_BIT(sb
, alloc_hint
);
205 for (i
= 0; i
< sb
->map_nr
; i
++) {
206 nr
= sbitmap_find_bit_in_index(sb
, index
, alloc_hint
);
208 nr
+= index
<< sb
->shift
;
212 /* Jump to next index. */
214 if (++index
>= sb
->map_nr
)
221 int sbitmap_get(struct sbitmap
*sb
)
224 unsigned int hint
, depth
;
226 if (WARN_ON_ONCE(unlikely(!sb
->alloc_hint
)))
229 depth
= READ_ONCE(sb
->depth
);
230 hint
= update_alloc_hint_before_get(sb
, depth
);
231 nr
= __sbitmap_get(sb
, hint
);
232 update_alloc_hint_after_get(sb
, depth
, hint
, nr
);
236 EXPORT_SYMBOL_GPL(sbitmap_get
);
238 static int __sbitmap_get_shallow(struct sbitmap
*sb
,
239 unsigned int alloc_hint
,
240 unsigned long shallow_depth
)
242 unsigned int i
, index
;
245 index
= SB_NR_TO_INDEX(sb
, alloc_hint
);
247 for (i
= 0; i
< sb
->map_nr
; i
++) {
249 nr
= __sbitmap_get_word(&sb
->map
[index
].word
,
251 __map_depth(sb
, index
),
253 SB_NR_TO_BIT(sb
, alloc_hint
), true);
255 nr
+= index
<< sb
->shift
;
259 if (sbitmap_deferred_clear(&sb
->map
[index
]))
262 /* Jump to next index. */
264 alloc_hint
= index
<< sb
->shift
;
266 if (index
>= sb
->map_nr
) {
275 int sbitmap_get_shallow(struct sbitmap
*sb
, unsigned long shallow_depth
)
278 unsigned int hint
, depth
;
280 if (WARN_ON_ONCE(unlikely(!sb
->alloc_hint
)))
283 depth
= READ_ONCE(sb
->depth
);
284 hint
= update_alloc_hint_before_get(sb
, depth
);
285 nr
= __sbitmap_get_shallow(sb
, hint
, shallow_depth
);
286 update_alloc_hint_after_get(sb
, depth
, hint
, nr
);
290 EXPORT_SYMBOL_GPL(sbitmap_get_shallow
);
292 bool sbitmap_any_bit_set(const struct sbitmap
*sb
)
296 for (i
= 0; i
< sb
->map_nr
; i
++) {
297 if (sb
->map
[i
].word
& ~sb
->map
[i
].cleared
)
302 EXPORT_SYMBOL_GPL(sbitmap_any_bit_set
);
304 static unsigned int __sbitmap_weight(const struct sbitmap
*sb
, bool set
)
306 unsigned int i
, weight
= 0;
308 for (i
= 0; i
< sb
->map_nr
; i
++) {
309 const struct sbitmap_word
*word
= &sb
->map
[i
];
310 unsigned int word_depth
= __map_depth(sb
, i
);
313 weight
+= bitmap_weight(&word
->word
, word_depth
);
315 weight
+= bitmap_weight(&word
->cleared
, word_depth
);
320 static unsigned int sbitmap_cleared(const struct sbitmap
*sb
)
322 return __sbitmap_weight(sb
, false);
325 unsigned int sbitmap_weight(const struct sbitmap
*sb
)
327 return __sbitmap_weight(sb
, true) - sbitmap_cleared(sb
);
329 EXPORT_SYMBOL_GPL(sbitmap_weight
);
331 void sbitmap_show(struct sbitmap
*sb
, struct seq_file
*m
)
333 seq_printf(m
, "depth=%u\n", sb
->depth
);
334 seq_printf(m
, "busy=%u\n", sbitmap_weight(sb
));
335 seq_printf(m
, "cleared=%u\n", sbitmap_cleared(sb
));
336 seq_printf(m
, "bits_per_word=%u\n", 1U << sb
->shift
);
337 seq_printf(m
, "map_nr=%u\n", sb
->map_nr
);
339 EXPORT_SYMBOL_GPL(sbitmap_show
);
341 static inline void emit_byte(struct seq_file
*m
, unsigned int offset
, u8 byte
)
343 if ((offset
& 0xf) == 0) {
346 seq_printf(m
, "%08x:", offset
);
348 if ((offset
& 0x1) == 0)
350 seq_printf(m
, "%02x", byte
);
353 void sbitmap_bitmap_show(struct sbitmap
*sb
, struct seq_file
*m
)
356 unsigned int byte_bits
= 0;
357 unsigned int offset
= 0;
360 for (i
= 0; i
< sb
->map_nr
; i
++) {
361 unsigned long word
= READ_ONCE(sb
->map
[i
].word
);
362 unsigned long cleared
= READ_ONCE(sb
->map
[i
].cleared
);
363 unsigned int word_bits
= __map_depth(sb
, i
);
367 while (word_bits
> 0) {
368 unsigned int bits
= min(8 - byte_bits
, word_bits
);
370 byte
|= (word
& (BIT(bits
) - 1)) << byte_bits
;
372 if (byte_bits
== 8) {
373 emit_byte(m
, offset
, byte
);
383 emit_byte(m
, offset
, byte
);
389 EXPORT_SYMBOL_GPL(sbitmap_bitmap_show
);
391 static unsigned int sbq_calc_wake_batch(struct sbitmap_queue
*sbq
,
394 unsigned int wake_batch
;
395 unsigned int shallow_depth
;
398 * For each batch, we wake up one queue. We need to make sure that our
399 * batch size is small enough that the full depth of the bitmap,
400 * potentially limited by a shallow depth, is enough to wake up all of
403 * Each full word of the bitmap has bits_per_word bits, and there might
404 * be a partial word. There are depth / bits_per_word full words and
405 * depth % bits_per_word bits left over. In bitwise arithmetic:
407 * bits_per_word = 1 << shift
408 * depth / bits_per_word = depth >> shift
409 * depth % bits_per_word = depth & ((1 << shift) - 1)
411 * Each word can be limited to sbq->min_shallow_depth bits.
413 shallow_depth
= min(1U << sbq
->sb
.shift
, sbq
->min_shallow_depth
);
414 depth
= ((depth
>> sbq
->sb
.shift
) * shallow_depth
+
415 min(depth
& ((1U << sbq
->sb
.shift
) - 1), shallow_depth
));
416 wake_batch
= clamp_t(unsigned int, depth
/ SBQ_WAIT_QUEUES
, 1,
422 int sbitmap_queue_init_node(struct sbitmap_queue
*sbq
, unsigned int depth
,
423 int shift
, bool round_robin
, gfp_t flags
, int node
)
428 ret
= sbitmap_init_node(&sbq
->sb
, depth
, shift
, flags
, node
,
433 sbq
->min_shallow_depth
= UINT_MAX
;
434 sbq
->wake_batch
= sbq_calc_wake_batch(sbq
, depth
);
435 atomic_set(&sbq
->wake_index
, 0);
436 atomic_set(&sbq
->ws_active
, 0);
438 sbq
->ws
= kzalloc_node(SBQ_WAIT_QUEUES
* sizeof(*sbq
->ws
), flags
, node
);
440 sbitmap_free(&sbq
->sb
);
444 for (i
= 0; i
< SBQ_WAIT_QUEUES
; i
++) {
445 init_waitqueue_head(&sbq
->ws
[i
].wait
);
446 atomic_set(&sbq
->ws
[i
].wait_cnt
, sbq
->wake_batch
);
451 EXPORT_SYMBOL_GPL(sbitmap_queue_init_node
);
453 static inline void __sbitmap_queue_update_wake_batch(struct sbitmap_queue
*sbq
,
454 unsigned int wake_batch
)
458 if (sbq
->wake_batch
!= wake_batch
) {
459 WRITE_ONCE(sbq
->wake_batch
, wake_batch
);
461 * Pairs with the memory barrier in sbitmap_queue_wake_up()
462 * to ensure that the batch size is updated before the wait
466 for (i
= 0; i
< SBQ_WAIT_QUEUES
; i
++)
467 atomic_set(&sbq
->ws
[i
].wait_cnt
, 1);
471 static void sbitmap_queue_update_wake_batch(struct sbitmap_queue
*sbq
,
474 unsigned int wake_batch
;
476 wake_batch
= sbq_calc_wake_batch(sbq
, depth
);
477 __sbitmap_queue_update_wake_batch(sbq
, wake_batch
);
480 void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue
*sbq
,
483 unsigned int wake_batch
;
484 unsigned int min_batch
;
485 unsigned int depth
= (sbq
->sb
.depth
+ users
- 1) / users
;
487 min_batch
= sbq
->sb
.depth
>= (4 * SBQ_WAIT_QUEUES
) ? 4 : 1;
489 wake_batch
= clamp_val(depth
/ SBQ_WAIT_QUEUES
,
490 min_batch
, SBQ_WAKE_BATCH
);
491 __sbitmap_queue_update_wake_batch(sbq
, wake_batch
);
493 EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch
);
495 void sbitmap_queue_resize(struct sbitmap_queue
*sbq
, unsigned int depth
)
497 sbitmap_queue_update_wake_batch(sbq
, depth
);
498 sbitmap_resize(&sbq
->sb
, depth
);
500 EXPORT_SYMBOL_GPL(sbitmap_queue_resize
);
502 int __sbitmap_queue_get(struct sbitmap_queue
*sbq
)
504 return sbitmap_get(&sbq
->sb
);
506 EXPORT_SYMBOL_GPL(__sbitmap_queue_get
);
508 unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue
*sbq
, int nr_tags
,
509 unsigned int *offset
)
511 struct sbitmap
*sb
= &sbq
->sb
;
512 unsigned int hint
, depth
;
513 unsigned long index
, nr
;
516 if (unlikely(sb
->round_robin
))
519 depth
= READ_ONCE(sb
->depth
);
520 hint
= update_alloc_hint_before_get(sb
, depth
);
522 index
= SB_NR_TO_INDEX(sb
, hint
);
524 for (i
= 0; i
< sb
->map_nr
; i
++) {
525 struct sbitmap_word
*map
= &sb
->map
[index
];
526 unsigned long get_mask
;
527 unsigned int map_depth
= __map_depth(sb
, index
);
529 sbitmap_deferred_clear(map
);
530 if (map
->word
== (1UL << (map_depth
- 1)) - 1)
533 nr
= find_first_zero_bit(&map
->word
, map_depth
);
534 if (nr
+ nr_tags
<= map_depth
) {
535 atomic_long_t
*ptr
= (atomic_long_t
*) &map
->word
;
536 int map_tags
= min_t(int, nr_tags
, map_depth
);
537 unsigned long val
, ret
;
539 get_mask
= ((1UL << map_tags
) - 1) << nr
;
541 val
= READ_ONCE(map
->word
);
542 if ((val
& ~get_mask
) != val
)
544 ret
= atomic_long_cmpxchg(ptr
, val
, get_mask
| val
);
545 } while (ret
!= val
);
546 get_mask
= (get_mask
& ~ret
) >> nr
;
548 *offset
= nr
+ (index
<< sb
->shift
);
549 update_alloc_hint_after_get(sb
, depth
, hint
,
550 *offset
+ map_tags
- 1);
555 /* Jump to next index. */
556 if (++index
>= sb
->map_nr
)
563 int sbitmap_queue_get_shallow(struct sbitmap_queue
*sbq
,
564 unsigned int shallow_depth
)
566 WARN_ON_ONCE(shallow_depth
< sbq
->min_shallow_depth
);
568 return sbitmap_get_shallow(&sbq
->sb
, shallow_depth
);
570 EXPORT_SYMBOL_GPL(sbitmap_queue_get_shallow
);
572 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue
*sbq
,
573 unsigned int min_shallow_depth
)
575 sbq
->min_shallow_depth
= min_shallow_depth
;
576 sbitmap_queue_update_wake_batch(sbq
, sbq
->sb
.depth
);
578 EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth
);
580 static struct sbq_wait_state
*sbq_wake_ptr(struct sbitmap_queue
*sbq
)
584 if (!atomic_read(&sbq
->ws_active
))
587 wake_index
= atomic_read(&sbq
->wake_index
);
588 for (i
= 0; i
< SBQ_WAIT_QUEUES
; i
++) {
589 struct sbq_wait_state
*ws
= &sbq
->ws
[wake_index
];
591 if (waitqueue_active(&ws
->wait
)) {
592 if (wake_index
!= atomic_read(&sbq
->wake_index
))
593 atomic_set(&sbq
->wake_index
, wake_index
);
597 wake_index
= sbq_index_inc(wake_index
);
603 static bool __sbq_wake_up(struct sbitmap_queue
*sbq
)
605 struct sbq_wait_state
*ws
;
606 unsigned int wake_batch
;
609 ws
= sbq_wake_ptr(sbq
);
613 wait_cnt
= atomic_dec_return(&ws
->wait_cnt
);
617 wake_batch
= READ_ONCE(sbq
->wake_batch
);
620 * Pairs with the memory barrier in sbitmap_queue_resize() to
621 * ensure that we see the batch size update before the wait
624 smp_mb__before_atomic();
627 * For concurrent callers of this, the one that failed the
628 * atomic_cmpxhcg() race should call this function again
629 * to wakeup a new batch on a different 'ws'.
631 ret
= atomic_cmpxchg(&ws
->wait_cnt
, wait_cnt
, wake_batch
);
632 if (ret
== wait_cnt
) {
633 sbq_index_atomic_inc(&sbq
->wake_index
);
634 wake_up_nr(&ws
->wait
, wake_batch
);
644 void sbitmap_queue_wake_up(struct sbitmap_queue
*sbq
)
646 while (__sbq_wake_up(sbq
))
649 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up
);
651 static inline void sbitmap_update_cpu_hint(struct sbitmap
*sb
, int cpu
, int tag
)
653 if (likely(!sb
->round_robin
&& tag
< sb
->depth
))
654 data_race(*per_cpu_ptr(sb
->alloc_hint
, cpu
) = tag
);
657 void sbitmap_queue_clear_batch(struct sbitmap_queue
*sbq
, int offset
,
658 int *tags
, int nr_tags
)
660 struct sbitmap
*sb
= &sbq
->sb
;
661 unsigned long *addr
= NULL
;
662 unsigned long mask
= 0;
665 smp_mb__before_atomic();
666 for (i
= 0; i
< nr_tags
; i
++) {
667 const int tag
= tags
[i
] - offset
;
668 unsigned long *this_addr
;
670 /* since we're clearing a batch, skip the deferred map */
671 this_addr
= &sb
->map
[SB_NR_TO_INDEX(sb
, tag
)].word
;
674 } else if (addr
!= this_addr
) {
675 atomic_long_andnot(mask
, (atomic_long_t
*) addr
);
679 mask
|= (1UL << SB_NR_TO_BIT(sb
, tag
));
683 atomic_long_andnot(mask
, (atomic_long_t
*) addr
);
685 smp_mb__after_atomic();
686 sbitmap_queue_wake_up(sbq
);
687 sbitmap_update_cpu_hint(&sbq
->sb
, raw_smp_processor_id(),
688 tags
[nr_tags
- 1] - offset
);
691 void sbitmap_queue_clear(struct sbitmap_queue
*sbq
, unsigned int nr
,
695 * Once the clear bit is set, the bit may be allocated out.
697 * Orders READ/WRITE on the associated instance(such as request
698 * of blk_mq) by this bit for avoiding race with re-allocation,
699 * and its pair is the memory barrier implied in __sbitmap_get_word.
701 * One invariant is that the clear bit has to be zero when the bit
704 smp_mb__before_atomic();
705 sbitmap_deferred_clear_bit(&sbq
->sb
, nr
);
708 * Pairs with the memory barrier in set_current_state() to ensure the
709 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
710 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
711 * waiter. See the comment on waitqueue_active().
713 smp_mb__after_atomic();
714 sbitmap_queue_wake_up(sbq
);
715 sbitmap_update_cpu_hint(&sbq
->sb
, cpu
, nr
);
717 EXPORT_SYMBOL_GPL(sbitmap_queue_clear
);
719 void sbitmap_queue_wake_all(struct sbitmap_queue
*sbq
)
724 * Pairs with the memory barrier in set_current_state() like in
725 * sbitmap_queue_wake_up().
728 wake_index
= atomic_read(&sbq
->wake_index
);
729 for (i
= 0; i
< SBQ_WAIT_QUEUES
; i
++) {
730 struct sbq_wait_state
*ws
= &sbq
->ws
[wake_index
];
732 if (waitqueue_active(&ws
->wait
))
735 wake_index
= sbq_index_inc(wake_index
);
738 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all
);
740 void sbitmap_queue_show(struct sbitmap_queue
*sbq
, struct seq_file
*m
)
745 sbitmap_show(&sbq
->sb
, m
);
747 seq_puts(m
, "alloc_hint={");
749 for_each_possible_cpu(i
) {
753 seq_printf(m
, "%u", *per_cpu_ptr(sbq
->sb
.alloc_hint
, i
));
757 seq_printf(m
, "wake_batch=%u\n", sbq
->wake_batch
);
758 seq_printf(m
, "wake_index=%d\n", atomic_read(&sbq
->wake_index
));
759 seq_printf(m
, "ws_active=%d\n", atomic_read(&sbq
->ws_active
));
761 seq_puts(m
, "ws={\n");
762 for (i
= 0; i
< SBQ_WAIT_QUEUES
; i
++) {
763 struct sbq_wait_state
*ws
= &sbq
->ws
[i
];
765 seq_printf(m
, "\t{.wait_cnt=%d, .wait=%s},\n",
766 atomic_read(&ws
->wait_cnt
),
767 waitqueue_active(&ws
->wait
) ? "active" : "inactive");
771 seq_printf(m
, "round_robin=%d\n", sbq
->sb
.round_robin
);
772 seq_printf(m
, "min_shallow_depth=%u\n", sbq
->min_shallow_depth
);
774 EXPORT_SYMBOL_GPL(sbitmap_queue_show
);
776 void sbitmap_add_wait_queue(struct sbitmap_queue
*sbq
,
777 struct sbq_wait_state
*ws
,
778 struct sbq_wait
*sbq_wait
)
780 if (!sbq_wait
->sbq
) {
782 atomic_inc(&sbq
->ws_active
);
783 add_wait_queue(&ws
->wait
, &sbq_wait
->wait
);
786 EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue
);
788 void sbitmap_del_wait_queue(struct sbq_wait
*sbq_wait
)
790 list_del_init(&sbq_wait
->wait
.entry
);
792 atomic_dec(&sbq_wait
->sbq
->ws_active
);
793 sbq_wait
->sbq
= NULL
;
796 EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue
);
798 void sbitmap_prepare_to_wait(struct sbitmap_queue
*sbq
,
799 struct sbq_wait_state
*ws
,
800 struct sbq_wait
*sbq_wait
, int state
)
802 if (!sbq_wait
->sbq
) {
803 atomic_inc(&sbq
->ws_active
);
806 prepare_to_wait_exclusive(&ws
->wait
, &sbq_wait
->wait
, state
);
808 EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait
);
810 void sbitmap_finish_wait(struct sbitmap_queue
*sbq
, struct sbq_wait_state
*ws
,
811 struct sbq_wait
*sbq_wait
)
813 finish_wait(&ws
->wait
, &sbq_wait
->wait
);
815 atomic_dec(&sbq
->ws_active
);
816 sbq_wait
->sbq
= NULL
;
819 EXPORT_SYMBOL_GPL(sbitmap_finish_wait
);