]>
Commit | Line | Data |
---|---|---|
0fc479b1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
88459642 OS |
2 | /* |
3 | * Copyright (C) 2016 Facebook | |
4 | * Copyright (C) 2013-2014 Jens Axboe | |
88459642 OS |
5 | */ |
6 | ||
af8601ad | 7 | #include <linux/sched.h> |
98d95416 | 8 | #include <linux/random.h> |
88459642 | 9 | #include <linux/sbitmap.h> |
24af1ccf | 10 | #include <linux/seq_file.h> |
88459642 | 11 | |
b2dbff1b JA |
12 | /* |
13 | * See if we have deferred clears that we can batch move | |
14 | */ | |
15 | static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index) | |
16 | { | |
17 | unsigned long mask, val; | |
b2dbff1b | 18 | bool ret = false; |
fe76fc6a | 19 | unsigned long flags; |
b2dbff1b | 20 | |
fe76fc6a | 21 | spin_lock_irqsave(&sb->map[index].swap_lock, flags); |
b2dbff1b JA |
22 | |
23 | if (!sb->map[index].cleared) | |
24 | goto out_unlock; | |
25 | ||
26 | /* | |
27 | * First get a stable cleared mask, setting the old mask to 0. | |
28 | */ | |
41723288 | 29 | mask = xchg(&sb->map[index].cleared, 0); |
b2dbff1b JA |
30 | |
31 | /* | |
32 | * Now clear the masked bits in our free word | |
33 | */ | |
34 | do { | |
35 | val = sb->map[index].word; | |
36 | } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val); | |
37 | ||
38 | ret = true; | |
39 | out_unlock: | |
fe76fc6a | 40 | spin_unlock_irqrestore(&sb->map[index].swap_lock, flags); |
b2dbff1b JA |
41 | return ret; |
42 | } | |
43 | ||
88459642 OS |
44 | int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, |
45 | gfp_t flags, int node) | |
46 | { | |
47 | unsigned int bits_per_word; | |
48 | unsigned int i; | |
49 | ||
50 | if (shift < 0) { | |
51 | shift = ilog2(BITS_PER_LONG); | |
52 | /* | |
53 | * If the bitmap is small, shrink the number of bits per word so | |
54 | * we spread over a few cachelines, at least. If less than 4 | |
55 | * bits, just forget about it, it's not going to work optimally | |
56 | * anyway. | |
57 | */ | |
58 | if (depth >= 4) { | |
59 | while ((4U << shift) > depth) | |
60 | shift--; | |
61 | } | |
62 | } | |
63 | bits_per_word = 1U << shift; | |
64 | if (bits_per_word > BITS_PER_LONG) | |
65 | return -EINVAL; | |
66 | ||
67 | sb->shift = shift; | |
68 | sb->depth = depth; | |
69 | sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); | |
70 | ||
71 | if (depth == 0) { | |
72 | sb->map = NULL; | |
73 | return 0; | |
74 | } | |
75 | ||
590b5b7d | 76 | sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node); |
88459642 OS |
77 | if (!sb->map) |
78 | return -ENOMEM; | |
79 | ||
80 | for (i = 0; i < sb->map_nr; i++) { | |
81 | sb->map[i].depth = min(depth, bits_per_word); | |
82 | depth -= sb->map[i].depth; | |
ea86ea2c | 83 | spin_lock_init(&sb->map[i].swap_lock); |
88459642 OS |
84 | } |
85 | return 0; | |
86 | } | |
87 | EXPORT_SYMBOL_GPL(sbitmap_init_node); | |
88 | ||
89 | void sbitmap_resize(struct sbitmap *sb, unsigned int depth) | |
90 | { | |
91 | unsigned int bits_per_word = 1U << sb->shift; | |
92 | unsigned int i; | |
93 | ||
b2dbff1b JA |
94 | for (i = 0; i < sb->map_nr; i++) |
95 | sbitmap_deferred_clear(sb, i); | |
96 | ||
88459642 OS |
97 | sb->depth = depth; |
98 | sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); | |
99 | ||
100 | for (i = 0; i < sb->map_nr; i++) { | |
101 | sb->map[i].depth = min(depth, bits_per_word); | |
102 | depth -= sb->map[i].depth; | |
103 | } | |
104 | } | |
105 | EXPORT_SYMBOL_GPL(sbitmap_resize); | |
106 | ||
c05e6673 OS |
107 | static int __sbitmap_get_word(unsigned long *word, unsigned long depth, |
108 | unsigned int hint, bool wrap) | |
88459642 OS |
109 | { |
110 | unsigned int orig_hint = hint; | |
111 | int nr; | |
112 | ||
113 | while (1) { | |
c05e6673 OS |
114 | nr = find_next_zero_bit(word, depth, hint); |
115 | if (unlikely(nr >= depth)) { | |
88459642 OS |
116 | /* |
117 | * We started with an offset, and we didn't reset the | |
118 | * offset to 0 in a failure case, so start from 0 to | |
119 | * exhaust the map. | |
120 | */ | |
121 | if (orig_hint && hint && wrap) { | |
122 | hint = orig_hint = 0; | |
123 | continue; | |
124 | } | |
125 | return -1; | |
126 | } | |
127 | ||
4ace53f1 | 128 | if (!test_and_set_bit_lock(nr, word)) |
88459642 OS |
129 | break; |
130 | ||
131 | hint = nr + 1; | |
c05e6673 | 132 | if (hint >= depth - 1) |
88459642 OS |
133 | hint = 0; |
134 | } | |
135 | ||
136 | return nr; | |
137 | } | |
138 | ||
ea86ea2c JA |
139 | static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index, |
140 | unsigned int alloc_hint, bool round_robin) | |
141 | { | |
142 | int nr; | |
143 | ||
144 | do { | |
145 | nr = __sbitmap_get_word(&sb->map[index].word, | |
146 | sb->map[index].depth, alloc_hint, | |
147 | !round_robin); | |
148 | if (nr != -1) | |
149 | break; | |
150 | if (!sbitmap_deferred_clear(sb, index)) | |
151 | break; | |
152 | } while (1); | |
153 | ||
154 | return nr; | |
155 | } | |
156 | ||
88459642 OS |
157 | int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin) |
158 | { | |
159 | unsigned int i, index; | |
160 | int nr = -1; | |
161 | ||
162 | index = SB_NR_TO_INDEX(sb, alloc_hint); | |
163 | ||
27fae429 JA |
164 | /* |
165 | * Unless we're doing round robin tag allocation, just use the | |
166 | * alloc_hint to find the right word index. No point in looping | |
167 | * twice in find_next_zero_bit() for that case. | |
168 | */ | |
169 | if (round_robin) | |
170 | alloc_hint = SB_NR_TO_BIT(sb, alloc_hint); | |
171 | else | |
172 | alloc_hint = 0; | |
173 | ||
88459642 | 174 | for (i = 0; i < sb->map_nr; i++) { |
ea86ea2c JA |
175 | nr = sbitmap_find_bit_in_index(sb, index, alloc_hint, |
176 | round_robin); | |
88459642 OS |
177 | if (nr != -1) { |
178 | nr += index << sb->shift; | |
179 | break; | |
180 | } | |
181 | ||
182 | /* Jump to next index. */ | |
27fae429 JA |
183 | alloc_hint = 0; |
184 | if (++index >= sb->map_nr) | |
88459642 | 185 | index = 0; |
88459642 OS |
186 | } |
187 | ||
188 | return nr; | |
189 | } | |
190 | EXPORT_SYMBOL_GPL(sbitmap_get); | |
191 | ||
c05e6673 OS |
192 | int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint, |
193 | unsigned long shallow_depth) | |
194 | { | |
195 | unsigned int i, index; | |
196 | int nr = -1; | |
197 | ||
198 | index = SB_NR_TO_INDEX(sb, alloc_hint); | |
199 | ||
200 | for (i = 0; i < sb->map_nr; i++) { | |
b2dbff1b | 201 | again: |
c05e6673 OS |
202 | nr = __sbitmap_get_word(&sb->map[index].word, |
203 | min(sb->map[index].depth, shallow_depth), | |
204 | SB_NR_TO_BIT(sb, alloc_hint), true); | |
205 | if (nr != -1) { | |
206 | nr += index << sb->shift; | |
207 | break; | |
208 | } | |
209 | ||
b2dbff1b JA |
210 | if (sbitmap_deferred_clear(sb, index)) |
211 | goto again; | |
212 | ||
c05e6673 OS |
213 | /* Jump to next index. */ |
214 | index++; | |
215 | alloc_hint = index << sb->shift; | |
216 | ||
217 | if (index >= sb->map_nr) { | |
218 | index = 0; | |
219 | alloc_hint = 0; | |
220 | } | |
221 | } | |
222 | ||
223 | return nr; | |
224 | } | |
225 | EXPORT_SYMBOL_GPL(sbitmap_get_shallow); | |
226 | ||
88459642 OS |
227 | bool sbitmap_any_bit_set(const struct sbitmap *sb) |
228 | { | |
229 | unsigned int i; | |
230 | ||
231 | for (i = 0; i < sb->map_nr; i++) { | |
b2dbff1b | 232 | if (sb->map[i].word & ~sb->map[i].cleared) |
88459642 OS |
233 | return true; |
234 | } | |
235 | return false; | |
236 | } | |
237 | EXPORT_SYMBOL_GPL(sbitmap_any_bit_set); | |
238 | ||
ea86ea2c | 239 | static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set) |
88459642 | 240 | { |
60658e0d | 241 | unsigned int i, weight = 0; |
88459642 OS |
242 | |
243 | for (i = 0; i < sb->map_nr; i++) { | |
244 | const struct sbitmap_word *word = &sb->map[i]; | |
245 | ||
ea86ea2c JA |
246 | if (set) |
247 | weight += bitmap_weight(&word->word, word->depth); | |
248 | else | |
249 | weight += bitmap_weight(&word->cleared, word->depth); | |
88459642 OS |
250 | } |
251 | return weight; | |
252 | } | |
ea86ea2c JA |
253 | |
254 | static unsigned int sbitmap_weight(const struct sbitmap *sb) | |
255 | { | |
256 | return __sbitmap_weight(sb, true); | |
257 | } | |
258 | ||
259 | static unsigned int sbitmap_cleared(const struct sbitmap *sb) | |
260 | { | |
261 | return __sbitmap_weight(sb, false); | |
262 | } | |
88459642 | 263 | |
24af1ccf OS |
264 | void sbitmap_show(struct sbitmap *sb, struct seq_file *m) |
265 | { | |
266 | seq_printf(m, "depth=%u\n", sb->depth); | |
ea86ea2c JA |
267 | seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb)); |
268 | seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb)); | |
24af1ccf OS |
269 | seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift); |
270 | seq_printf(m, "map_nr=%u\n", sb->map_nr); | |
271 | } | |
272 | EXPORT_SYMBOL_GPL(sbitmap_show); | |
273 | ||
274 | static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte) | |
275 | { | |
276 | if ((offset & 0xf) == 0) { | |
277 | if (offset != 0) | |
278 | seq_putc(m, '\n'); | |
279 | seq_printf(m, "%08x:", offset); | |
280 | } | |
281 | if ((offset & 0x1) == 0) | |
282 | seq_putc(m, ' '); | |
283 | seq_printf(m, "%02x", byte); | |
284 | } | |
285 | ||
286 | void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m) | |
287 | { | |
288 | u8 byte = 0; | |
289 | unsigned int byte_bits = 0; | |
290 | unsigned int offset = 0; | |
291 | int i; | |
292 | ||
293 | for (i = 0; i < sb->map_nr; i++) { | |
294 | unsigned long word = READ_ONCE(sb->map[i].word); | |
295 | unsigned int word_bits = READ_ONCE(sb->map[i].depth); | |
296 | ||
297 | while (word_bits > 0) { | |
298 | unsigned int bits = min(8 - byte_bits, word_bits); | |
299 | ||
300 | byte |= (word & (BIT(bits) - 1)) << byte_bits; | |
301 | byte_bits += bits; | |
302 | if (byte_bits == 8) { | |
303 | emit_byte(m, offset, byte); | |
304 | byte = 0; | |
305 | byte_bits = 0; | |
306 | offset++; | |
307 | } | |
308 | word >>= bits; | |
309 | word_bits -= bits; | |
310 | } | |
311 | } | |
312 | if (byte_bits) { | |
313 | emit_byte(m, offset, byte); | |
314 | offset++; | |
315 | } | |
316 | if (offset) | |
317 | seq_putc(m, '\n'); | |
318 | } | |
319 | EXPORT_SYMBOL_GPL(sbitmap_bitmap_show); | |
320 | ||
a3275539 OS |
321 | static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq, |
322 | unsigned int depth) | |
88459642 OS |
323 | { |
324 | unsigned int wake_batch; | |
a3275539 | 325 | unsigned int shallow_depth; |
88459642 OS |
326 | |
327 | /* | |
328 | * For each batch, we wake up one queue. We need to make sure that our | |
a3275539 OS |
329 | * batch size is small enough that the full depth of the bitmap, |
330 | * potentially limited by a shallow depth, is enough to wake up all of | |
331 | * the queues. | |
332 | * | |
333 | * Each full word of the bitmap has bits_per_word bits, and there might | |
334 | * be a partial word. There are depth / bits_per_word full words and | |
335 | * depth % bits_per_word bits left over. In bitwise arithmetic: | |
336 | * | |
337 | * bits_per_word = 1 << shift | |
338 | * depth / bits_per_word = depth >> shift | |
339 | * depth % bits_per_word = depth & ((1 << shift) - 1) | |
340 | * | |
341 | * Each word can be limited to sbq->min_shallow_depth bits. | |
88459642 | 342 | */ |
a3275539 OS |
343 | shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth); |
344 | depth = ((depth >> sbq->sb.shift) * shallow_depth + | |
345 | min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth)); | |
346 | wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1, | |
347 | SBQ_WAKE_BATCH); | |
88459642 OS |
348 | |
349 | return wake_batch; | |
350 | } | |
351 | ||
352 | int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, | |
f4a644db | 353 | int shift, bool round_robin, gfp_t flags, int node) |
88459642 OS |
354 | { |
355 | int ret; | |
356 | int i; | |
357 | ||
358 | ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node); | |
359 | if (ret) | |
360 | return ret; | |
361 | ||
40aabb67 OS |
362 | sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags); |
363 | if (!sbq->alloc_hint) { | |
364 | sbitmap_free(&sbq->sb); | |
365 | return -ENOMEM; | |
366 | } | |
367 | ||
98d95416 OS |
368 | if (depth && !round_robin) { |
369 | for_each_possible_cpu(i) | |
370 | *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth; | |
371 | } | |
372 | ||
a3275539 OS |
373 | sbq->min_shallow_depth = UINT_MAX; |
374 | sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); | |
88459642 | 375 | atomic_set(&sbq->wake_index, 0); |
5d2ee712 | 376 | atomic_set(&sbq->ws_active, 0); |
88459642 | 377 | |
48e28166 | 378 | sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); |
88459642 | 379 | if (!sbq->ws) { |
40aabb67 | 380 | free_percpu(sbq->alloc_hint); |
88459642 OS |
381 | sbitmap_free(&sbq->sb); |
382 | return -ENOMEM; | |
383 | } | |
384 | ||
385 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) { | |
386 | init_waitqueue_head(&sbq->ws[i].wait); | |
387 | atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch); | |
388 | } | |
f4a644db OS |
389 | |
390 | sbq->round_robin = round_robin; | |
88459642 OS |
391 | return 0; |
392 | } | |
393 | EXPORT_SYMBOL_GPL(sbitmap_queue_init_node); | |
394 | ||
a3275539 OS |
395 | static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, |
396 | unsigned int depth) | |
88459642 | 397 | { |
a3275539 | 398 | unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth); |
6c0ca7ae OS |
399 | int i; |
400 | ||
401 | if (sbq->wake_batch != wake_batch) { | |
402 | WRITE_ONCE(sbq->wake_batch, wake_batch); | |
403 | /* | |
e6fc4649 ML |
404 | * Pairs with the memory barrier in sbitmap_queue_wake_up() |
405 | * to ensure that the batch size is updated before the wait | |
406 | * counts. | |
6c0ca7ae | 407 | */ |
a0934fd2 | 408 | smp_mb(); |
6c0ca7ae OS |
409 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) |
410 | atomic_set(&sbq->ws[i].wait_cnt, 1); | |
411 | } | |
a3275539 OS |
412 | } |
413 | ||
414 | void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) | |
415 | { | |
416 | sbitmap_queue_update_wake_batch(sbq, depth); | |
88459642 OS |
417 | sbitmap_resize(&sbq->sb, depth); |
418 | } | |
419 | EXPORT_SYMBOL_GPL(sbitmap_queue_resize); | |
420 | ||
f4a644db | 421 | int __sbitmap_queue_get(struct sbitmap_queue *sbq) |
40aabb67 | 422 | { |
05fd095d | 423 | unsigned int hint, depth; |
40aabb67 OS |
424 | int nr; |
425 | ||
426 | hint = this_cpu_read(*sbq->alloc_hint); | |
05fd095d OS |
427 | depth = READ_ONCE(sbq->sb.depth); |
428 | if (unlikely(hint >= depth)) { | |
429 | hint = depth ? prandom_u32() % depth : 0; | |
430 | this_cpu_write(*sbq->alloc_hint, hint); | |
431 | } | |
f4a644db | 432 | nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin); |
40aabb67 OS |
433 | |
434 | if (nr == -1) { | |
435 | /* If the map is full, a hint won't do us much good. */ | |
436 | this_cpu_write(*sbq->alloc_hint, 0); | |
f4a644db | 437 | } else if (nr == hint || unlikely(sbq->round_robin)) { |
40aabb67 OS |
438 | /* Only update the hint if we used it. */ |
439 | hint = nr + 1; | |
05fd095d | 440 | if (hint >= depth - 1) |
40aabb67 OS |
441 | hint = 0; |
442 | this_cpu_write(*sbq->alloc_hint, hint); | |
443 | } | |
444 | ||
445 | return nr; | |
446 | } | |
447 | EXPORT_SYMBOL_GPL(__sbitmap_queue_get); | |
448 | ||
c05e6673 OS |
449 | int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, |
450 | unsigned int shallow_depth) | |
451 | { | |
452 | unsigned int hint, depth; | |
453 | int nr; | |
454 | ||
61445b56 OS |
455 | WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth); |
456 | ||
c05e6673 OS |
457 | hint = this_cpu_read(*sbq->alloc_hint); |
458 | depth = READ_ONCE(sbq->sb.depth); | |
459 | if (unlikely(hint >= depth)) { | |
460 | hint = depth ? prandom_u32() % depth : 0; | |
461 | this_cpu_write(*sbq->alloc_hint, hint); | |
462 | } | |
463 | nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth); | |
464 | ||
465 | if (nr == -1) { | |
466 | /* If the map is full, a hint won't do us much good. */ | |
467 | this_cpu_write(*sbq->alloc_hint, 0); | |
468 | } else if (nr == hint || unlikely(sbq->round_robin)) { | |
469 | /* Only update the hint if we used it. */ | |
470 | hint = nr + 1; | |
471 | if (hint >= depth - 1) | |
472 | hint = 0; | |
473 | this_cpu_write(*sbq->alloc_hint, hint); | |
474 | } | |
475 | ||
476 | return nr; | |
477 | } | |
478 | EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow); | |
479 | ||
a3275539 OS |
480 | void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, |
481 | unsigned int min_shallow_depth) | |
482 | { | |
483 | sbq->min_shallow_depth = min_shallow_depth; | |
484 | sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth); | |
485 | } | |
486 | EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth); | |
487 | ||
88459642 OS |
488 | static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) |
489 | { | |
490 | int i, wake_index; | |
491 | ||
5d2ee712 JA |
492 | if (!atomic_read(&sbq->ws_active)) |
493 | return NULL; | |
494 | ||
88459642 OS |
495 | wake_index = atomic_read(&sbq->wake_index); |
496 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) { | |
497 | struct sbq_wait_state *ws = &sbq->ws[wake_index]; | |
498 | ||
499 | if (waitqueue_active(&ws->wait)) { | |
41723288 PB |
500 | if (wake_index != atomic_read(&sbq->wake_index)) |
501 | atomic_set(&sbq->wake_index, wake_index); | |
88459642 OS |
502 | return ws; |
503 | } | |
504 | ||
505 | wake_index = sbq_index_inc(wake_index); | |
506 | } | |
507 | ||
508 | return NULL; | |
509 | } | |
510 | ||
c854ab57 | 511 | static bool __sbq_wake_up(struct sbitmap_queue *sbq) |
88459642 OS |
512 | { |
513 | struct sbq_wait_state *ws; | |
6c0ca7ae | 514 | unsigned int wake_batch; |
88459642 OS |
515 | int wait_cnt; |
516 | ||
88459642 OS |
517 | ws = sbq_wake_ptr(sbq); |
518 | if (!ws) | |
c854ab57 | 519 | return false; |
88459642 OS |
520 | |
521 | wait_cnt = atomic_dec_return(&ws->wait_cnt); | |
6c0ca7ae | 522 | if (wait_cnt <= 0) { |
c854ab57 JA |
523 | int ret; |
524 | ||
6c0ca7ae | 525 | wake_batch = READ_ONCE(sbq->wake_batch); |
c854ab57 | 526 | |
6c0ca7ae OS |
527 | /* |
528 | * Pairs with the memory barrier in sbitmap_queue_resize() to | |
529 | * ensure that we see the batch size update before the wait | |
530 | * count is reset. | |
531 | */ | |
532 | smp_mb__before_atomic(); | |
c854ab57 | 533 | |
6c0ca7ae | 534 | /* |
c854ab57 JA |
535 | * For concurrent callers of this, the one that failed the |
536 | * atomic_cmpxhcg() race should call this function again | |
537 | * to wakeup a new batch on a different 'ws'. | |
6c0ca7ae | 538 | */ |
c854ab57 JA |
539 | ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch); |
540 | if (ret == wait_cnt) { | |
541 | sbq_index_atomic_inc(&sbq->wake_index); | |
542 | wake_up_nr(&ws->wait, wake_batch); | |
543 | return false; | |
544 | } | |
545 | ||
546 | return true; | |
88459642 | 547 | } |
c854ab57 JA |
548 | |
549 | return false; | |
550 | } | |
551 | ||
e6fc4649 | 552 | void sbitmap_queue_wake_up(struct sbitmap_queue *sbq) |
c854ab57 JA |
553 | { |
554 | while (__sbq_wake_up(sbq)) | |
555 | ; | |
88459642 | 556 | } |
e6fc4649 | 557 | EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up); |
88459642 | 558 | |
40aabb67 | 559 | void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, |
f4a644db | 560 | unsigned int cpu) |
88459642 | 561 | { |
e6d1fa58 ML |
562 | /* |
563 | * Once the clear bit is set, the bit may be allocated out. | |
564 | * | |
565 | * Orders READ/WRITE on the asssociated instance(such as request | |
566 | * of blk_mq) by this bit for avoiding race with re-allocation, | |
567 | * and its pair is the memory barrier implied in __sbitmap_get_word. | |
568 | * | |
569 | * One invariant is that the clear bit has to be zero when the bit | |
570 | * is in use. | |
571 | */ | |
572 | smp_mb__before_atomic(); | |
ea86ea2c JA |
573 | sbitmap_deferred_clear_bit(&sbq->sb, nr); |
574 | ||
e6fc4649 ML |
575 | /* |
576 | * Pairs with the memory barrier in set_current_state() to ensure the | |
577 | * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker | |
578 | * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the | |
579 | * waiter. See the comment on waitqueue_active(). | |
580 | */ | |
581 | smp_mb__after_atomic(); | |
582 | sbitmap_queue_wake_up(sbq); | |
583 | ||
5c64a8df | 584 | if (likely(!sbq->round_robin && nr < sbq->sb.depth)) |
40aabb67 | 585 | *per_cpu_ptr(sbq->alloc_hint, cpu) = nr; |
88459642 OS |
586 | } |
587 | EXPORT_SYMBOL_GPL(sbitmap_queue_clear); | |
588 | ||
589 | void sbitmap_queue_wake_all(struct sbitmap_queue *sbq) | |
590 | { | |
591 | int i, wake_index; | |
592 | ||
593 | /* | |
f66227de | 594 | * Pairs with the memory barrier in set_current_state() like in |
e6fc4649 | 595 | * sbitmap_queue_wake_up(). |
88459642 OS |
596 | */ |
597 | smp_mb(); | |
598 | wake_index = atomic_read(&sbq->wake_index); | |
599 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) { | |
600 | struct sbq_wait_state *ws = &sbq->ws[wake_index]; | |
601 | ||
602 | if (waitqueue_active(&ws->wait)) | |
603 | wake_up(&ws->wait); | |
604 | ||
605 | wake_index = sbq_index_inc(wake_index); | |
606 | } | |
607 | } | |
608 | EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all); | |
24af1ccf OS |
609 | |
610 | void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) | |
611 | { | |
612 | bool first; | |
613 | int i; | |
614 | ||
615 | sbitmap_show(&sbq->sb, m); | |
616 | ||
617 | seq_puts(m, "alloc_hint={"); | |
618 | first = true; | |
619 | for_each_possible_cpu(i) { | |
620 | if (!first) | |
621 | seq_puts(m, ", "); | |
622 | first = false; | |
623 | seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i)); | |
624 | } | |
625 | seq_puts(m, "}\n"); | |
626 | ||
627 | seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); | |
628 | seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); | |
5d2ee712 | 629 | seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active)); |
24af1ccf OS |
630 | |
631 | seq_puts(m, "ws={\n"); | |
632 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) { | |
633 | struct sbq_wait_state *ws = &sbq->ws[i]; | |
634 | ||
635 | seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n", | |
636 | atomic_read(&ws->wait_cnt), | |
637 | waitqueue_active(&ws->wait) ? "active" : "inactive"); | |
638 | } | |
639 | seq_puts(m, "}\n"); | |
640 | ||
641 | seq_printf(m, "round_robin=%d\n", sbq->round_robin); | |
a3275539 | 642 | seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); |
24af1ccf OS |
643 | } |
644 | EXPORT_SYMBOL_GPL(sbitmap_queue_show); | |
5d2ee712 | 645 | |
9f6b7ef6 JA |
646 | void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, |
647 | struct sbq_wait_state *ws, | |
648 | struct sbq_wait *sbq_wait) | |
649 | { | |
650 | if (!sbq_wait->sbq) { | |
651 | sbq_wait->sbq = sbq; | |
652 | atomic_inc(&sbq->ws_active); | |
df034c93 | 653 | add_wait_queue(&ws->wait, &sbq_wait->wait); |
9f6b7ef6 | 654 | } |
9f6b7ef6 JA |
655 | } |
656 | EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue); | |
657 | ||
658 | void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait) | |
659 | { | |
660 | list_del_init(&sbq_wait->wait.entry); | |
661 | if (sbq_wait->sbq) { | |
662 | atomic_dec(&sbq_wait->sbq->ws_active); | |
663 | sbq_wait->sbq = NULL; | |
664 | } | |
665 | } | |
666 | EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue); | |
667 | ||
5d2ee712 JA |
668 | void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, |
669 | struct sbq_wait_state *ws, | |
670 | struct sbq_wait *sbq_wait, int state) | |
671 | { | |
9f6b7ef6 | 672 | if (!sbq_wait->sbq) { |
5d2ee712 | 673 | atomic_inc(&sbq->ws_active); |
9f6b7ef6 | 674 | sbq_wait->sbq = sbq; |
5d2ee712 JA |
675 | } |
676 | prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state); | |
677 | } | |
678 | EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait); | |
679 | ||
680 | void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, | |
681 | struct sbq_wait *sbq_wait) | |
682 | { | |
683 | finish_wait(&ws->wait, &sbq_wait->wait); | |
9f6b7ef6 | 684 | if (sbq_wait->sbq) { |
5d2ee712 | 685 | atomic_dec(&sbq->ws_active); |
9f6b7ef6 | 686 | sbq_wait->sbq = NULL; |
5d2ee712 JA |
687 | } |
688 | } | |
689 | EXPORT_SYMBOL_GPL(sbitmap_finish_wait); |