]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - lib/sbitmap.c
2 * Copyright (C) 2016 Facebook
3 * Copyright (C) 2013-2014 Jens Axboe
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <https://www.gnu.org/licenses/>.
18 #include <linux/sbitmap.h>
20 int sbitmap_init_node(struct sbitmap
*sb
, unsigned int depth
, int shift
,
21 gfp_t flags
, int node
)
23 unsigned int bits_per_word
;
27 shift
= ilog2(BITS_PER_LONG
);
29 * If the bitmap is small, shrink the number of bits per word so
30 * we spread over a few cachelines, at least. If less than 4
31 * bits, just forget about it, it's not going to work optimally
35 while ((4U << shift
) > depth
)
39 bits_per_word
= 1U << shift
;
40 if (bits_per_word
> BITS_PER_LONG
)
45 sb
->map_nr
= DIV_ROUND_UP(sb
->depth
, bits_per_word
);
52 sb
->map
= kzalloc_node(sb
->map_nr
* sizeof(*sb
->map
), flags
, node
);
56 for (i
= 0; i
< sb
->map_nr
; i
++) {
57 sb
->map
[i
].depth
= min(depth
, bits_per_word
);
58 depth
-= sb
->map
[i
].depth
;
62 EXPORT_SYMBOL_GPL(sbitmap_init_node
);
64 void sbitmap_resize(struct sbitmap
*sb
, unsigned int depth
)
66 unsigned int bits_per_word
= 1U << sb
->shift
;
70 sb
->map_nr
= DIV_ROUND_UP(sb
->depth
, bits_per_word
);
72 for (i
= 0; i
< sb
->map_nr
; i
++) {
73 sb
->map
[i
].depth
= min(depth
, bits_per_word
);
74 depth
-= sb
->map
[i
].depth
;
77 EXPORT_SYMBOL_GPL(sbitmap_resize
);
79 static int __sbitmap_get_word(struct sbitmap_word
*word
, unsigned int hint
,
82 unsigned int orig_hint
= hint
;
86 nr
= find_next_zero_bit(&word
->word
, word
->depth
, hint
);
87 if (unlikely(nr
>= word
->depth
)) {
89 * We started with an offset, and we didn't reset the
90 * offset to 0 in a failure case, so start from 0 to
93 if (orig_hint
&& hint
&& wrap
) {
100 if (!test_and_set_bit(nr
, &word
->word
))
104 if (hint
>= word
->depth
- 1)
111 int sbitmap_get(struct sbitmap
*sb
, unsigned int alloc_hint
, bool round_robin
)
113 unsigned int i
, index
;
116 index
= SB_NR_TO_INDEX(sb
, alloc_hint
);
118 for (i
= 0; i
< sb
->map_nr
; i
++) {
119 nr
= __sbitmap_get_word(&sb
->map
[index
],
120 SB_NR_TO_BIT(sb
, alloc_hint
),
123 nr
+= index
<< sb
->shift
;
127 /* Jump to next index. */
129 alloc_hint
= index
<< sb
->shift
;
131 if (index
>= sb
->map_nr
) {
139 EXPORT_SYMBOL_GPL(sbitmap_get
);
141 bool sbitmap_any_bit_set(const struct sbitmap
*sb
)
145 for (i
= 0; i
< sb
->map_nr
; i
++) {
151 EXPORT_SYMBOL_GPL(sbitmap_any_bit_set
);
153 bool sbitmap_any_bit_clear(const struct sbitmap
*sb
)
157 for (i
= 0; i
< sb
->map_nr
; i
++) {
158 const struct sbitmap_word
*word
= &sb
->map
[i
];
161 ret
= find_first_zero_bit(&word
->word
, word
->depth
);
162 if (ret
< word
->depth
)
167 EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear
);
169 unsigned int sbitmap_weight(const struct sbitmap
*sb
)
171 unsigned int i
, weight
;
173 for (i
= 0; i
< sb
->map_nr
; i
++) {
174 const struct sbitmap_word
*word
= &sb
->map
[i
];
176 weight
+= bitmap_weight(&word
->word
, word
->depth
);
180 EXPORT_SYMBOL_GPL(sbitmap_weight
);
182 static unsigned int sbq_calc_wake_batch(unsigned int depth
)
184 unsigned int wake_batch
;
187 * For each batch, we wake up one queue. We need to make sure that our
188 * batch size is small enough that the full depth of the bitmap is
189 * enough to wake up all of the queues.
191 wake_batch
= SBQ_WAKE_BATCH
;
192 if (wake_batch
> depth
/ SBQ_WAIT_QUEUES
)
193 wake_batch
= max(1U, depth
/ SBQ_WAIT_QUEUES
);
198 int sbitmap_queue_init_node(struct sbitmap_queue
*sbq
, unsigned int depth
,
199 int shift
, gfp_t flags
, int node
)
204 ret
= sbitmap_init_node(&sbq
->sb
, depth
, shift
, flags
, node
);
208 sbq
->wake_batch
= sbq_calc_wake_batch(depth
);
209 atomic_set(&sbq
->wake_index
, 0);
211 sbq
->ws
= kzalloc(SBQ_WAIT_QUEUES
* sizeof(*sbq
->ws
), flags
);
213 sbitmap_free(&sbq
->sb
);
217 for (i
= 0; i
< SBQ_WAIT_QUEUES
; i
++) {
218 init_waitqueue_head(&sbq
->ws
[i
].wait
);
219 atomic_set(&sbq
->ws
[i
].wait_cnt
, sbq
->wake_batch
);
223 EXPORT_SYMBOL_GPL(sbitmap_queue_init_node
);
225 void sbitmap_queue_resize(struct sbitmap_queue
*sbq
, unsigned int depth
)
227 sbq
->wake_batch
= sbq_calc_wake_batch(depth
);
228 sbitmap_resize(&sbq
->sb
, depth
);
230 EXPORT_SYMBOL_GPL(sbitmap_queue_resize
);
232 static struct sbq_wait_state
*sbq_wake_ptr(struct sbitmap_queue
*sbq
)
236 wake_index
= atomic_read(&sbq
->wake_index
);
237 for (i
= 0; i
< SBQ_WAIT_QUEUES
; i
++) {
238 struct sbq_wait_state
*ws
= &sbq
->ws
[wake_index
];
240 if (waitqueue_active(&ws
->wait
)) {
241 int o
= atomic_read(&sbq
->wake_index
);
244 atomic_cmpxchg(&sbq
->wake_index
, o
, wake_index
);
248 wake_index
= sbq_index_inc(wake_index
);
254 static void sbq_wake_up(struct sbitmap_queue
*sbq
)
256 struct sbq_wait_state
*ws
;
259 /* Ensure that the wait list checks occur after clear_bit(). */
262 ws
= sbq_wake_ptr(sbq
);
266 wait_cnt
= atomic_dec_return(&ws
->wait_cnt
);
267 if (unlikely(wait_cnt
< 0))
268 wait_cnt
= atomic_inc_return(&ws
->wait_cnt
);
270 atomic_add(sbq
->wake_batch
, &ws
->wait_cnt
);
271 sbq_index_atomic_inc(&sbq
->wake_index
);
276 void sbitmap_queue_clear(struct sbitmap_queue
*sbq
, unsigned int nr
)
278 sbitmap_clear_bit(&sbq
->sb
, nr
);
281 EXPORT_SYMBOL_GPL(sbitmap_queue_clear
);
283 void sbitmap_queue_wake_all(struct sbitmap_queue
*sbq
)
288 * Make sure all changes prior to this are visible from other CPUs.
291 wake_index
= atomic_read(&sbq
->wake_index
);
292 for (i
= 0; i
< SBQ_WAIT_QUEUES
; i
++) {
293 struct sbq_wait_state
*ws
= &sbq
->ws
[wake_index
];
295 if (waitqueue_active(&ws
->wait
))
298 wake_index
= sbq_index_inc(wake_index
);
301 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all
);