]>
Commit | Line | Data |
---|---|---|
2e0ab8ca MT |
1 | /* |
2 | * Definitions for the 'struct ptr_ring' datastructure. | |
3 | * | |
4 | * Author: | |
5 | * Michael S. Tsirkin <mst@redhat.com> | |
6 | * | |
7 | * Copyright (C) 2016 Red Hat, Inc. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify it | |
10 | * under the terms of the GNU General Public License as published by the | |
11 | * Free Software Foundation; either version 2 of the License, or (at your | |
12 | * option) any later version. | |
13 | * | |
14 | * This is a limited-size FIFO maintaining pointers in FIFO order, with | |
15 | * one CPU producing entries and another consuming entries from a FIFO. | |
16 | * | |
17 | * This implementation tries to minimize cache-contention when there is a | |
18 | * single producer and a single consumer CPU. | |
19 | */ | |
20 | ||
21 | #ifndef _LINUX_PTR_RING_H | |
22 | #define _LINUX_PTR_RING_H 1 | |
23 | ||
24 | #ifdef __KERNEL__ | |
25 | #include <linux/spinlock.h> | |
26 | #include <linux/cache.h> | |
27 | #include <linux/types.h> | |
28 | #include <linux/compiler.h> | |
29 | #include <linux/cache.h> | |
30 | #include <linux/slab.h> | |
31 | #include <asm/errno.h> | |
32 | #endif | |
33 | ||
34 | struct ptr_ring { | |
35 | int producer ____cacheline_aligned_in_smp; | |
36 | spinlock_t producer_lock; | |
fb9de970 MT |
37 | int consumer_head ____cacheline_aligned_in_smp; /* next valid entry */ |
38 | int consumer_tail; /* next entry to invalidate */ | |
2e0ab8ca MT |
39 | spinlock_t consumer_lock; |
40 | /* Shared consumer/producer data */ | |
41 | /* Read-only by both the producer and the consumer */ | |
42 | int size ____cacheline_aligned_in_smp; /* max entries in queue */ | |
fb9de970 | 43 | int batch; /* number of entries to consume in a batch */ |
2e0ab8ca MT |
44 | void **queue; |
45 | }; | |
46 | ||
47 | /* Note: callers invoking this in a loop must use a compiler barrier, | |
5d49de53 MT |
48 | * for example cpu_relax(). If ring is ever resized, callers must hold |
49 | * producer_lock - see e.g. ptr_ring_full. Otherwise, if callers don't hold | |
50 | * producer_lock, the next call to __ptr_ring_produce may fail. | |
2e0ab8ca MT |
51 | */ |
52 | static inline bool __ptr_ring_full(struct ptr_ring *r) | |
53 | { | |
54 | return r->queue[r->producer]; | |
55 | } | |
56 | ||
57 | static inline bool ptr_ring_full(struct ptr_ring *r) | |
58 | { | |
5d49de53 MT |
59 | bool ret; |
60 | ||
61 | spin_lock(&r->producer_lock); | |
62 | ret = __ptr_ring_full(r); | |
63 | spin_unlock(&r->producer_lock); | |
64 | ||
65 | return ret; | |
66 | } | |
67 | ||
68 | static inline bool ptr_ring_full_irq(struct ptr_ring *r) | |
69 | { | |
70 | bool ret; | |
71 | ||
72 | spin_lock_irq(&r->producer_lock); | |
73 | ret = __ptr_ring_full(r); | |
74 | spin_unlock_irq(&r->producer_lock); | |
75 | ||
76 | return ret; | |
77 | } | |
78 | ||
79 | static inline bool ptr_ring_full_any(struct ptr_ring *r) | |
80 | { | |
81 | unsigned long flags; | |
82 | bool ret; | |
83 | ||
84 | spin_lock_irqsave(&r->producer_lock, flags); | |
85 | ret = __ptr_ring_full(r); | |
86 | spin_unlock_irqrestore(&r->producer_lock, flags); | |
87 | ||
88 | return ret; | |
89 | } | |
90 | ||
91 | static inline bool ptr_ring_full_bh(struct ptr_ring *r) | |
92 | { | |
93 | bool ret; | |
94 | ||
95 | spin_lock_bh(&r->producer_lock); | |
96 | ret = __ptr_ring_full(r); | |
97 | spin_unlock_bh(&r->producer_lock); | |
98 | ||
99 | return ret; | |
2e0ab8ca MT |
100 | } |
101 | ||
102 | /* Note: callers invoking this in a loop must use a compiler barrier, | |
5d49de53 | 103 | * for example cpu_relax(). Callers must hold producer_lock. |
a8ceb5db MT |
104 | * Callers are responsible for making sure pointer that is being queued |
105 | * points to a valid data. | |
2e0ab8ca MT |
106 | */ |
107 | static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) | |
108 | { | |
982fb490 | 109 | if (unlikely(!r->size) || r->queue[r->producer]) |
2e0ab8ca MT |
110 | return -ENOSPC; |
111 | ||
a8ceb5db MT |
112 | /* Make sure the pointer we are storing points to a valid data. */ |
113 | /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ | |
114 | smp_wmb(); | |
115 | ||
2e0ab8ca MT |
116 | r->queue[r->producer++] = ptr; |
117 | if (unlikely(r->producer >= r->size)) | |
118 | r->producer = 0; | |
119 | return 0; | |
120 | } | |
121 | ||
e7169530 MT |
122 | /* |
123 | * Note: resize (below) nests producer lock within consumer lock, so if you | |
124 | * consume in interrupt or BH context, you must disable interrupts/BH when | |
125 | * calling this. | |
126 | */ | |
2e0ab8ca MT |
127 | static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr) |
128 | { | |
129 | int ret; | |
130 | ||
131 | spin_lock(&r->producer_lock); | |
132 | ret = __ptr_ring_produce(r, ptr); | |
133 | spin_unlock(&r->producer_lock); | |
134 | ||
135 | return ret; | |
136 | } | |
137 | ||
138 | static inline int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr) | |
139 | { | |
140 | int ret; | |
141 | ||
142 | spin_lock_irq(&r->producer_lock); | |
143 | ret = __ptr_ring_produce(r, ptr); | |
144 | spin_unlock_irq(&r->producer_lock); | |
145 | ||
146 | return ret; | |
147 | } | |
148 | ||
149 | static inline int ptr_ring_produce_any(struct ptr_ring *r, void *ptr) | |
150 | { | |
151 | unsigned long flags; | |
152 | int ret; | |
153 | ||
154 | spin_lock_irqsave(&r->producer_lock, flags); | |
155 | ret = __ptr_ring_produce(r, ptr); | |
156 | spin_unlock_irqrestore(&r->producer_lock, flags); | |
157 | ||
158 | return ret; | |
159 | } | |
160 | ||
161 | static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr) | |
162 | { | |
163 | int ret; | |
164 | ||
165 | spin_lock_bh(&r->producer_lock); | |
166 | ret = __ptr_ring_produce(r, ptr); | |
167 | spin_unlock_bh(&r->producer_lock); | |
168 | ||
169 | return ret; | |
170 | } | |
171 | ||
172 | /* Note: callers invoking this in a loop must use a compiler barrier, | |
173 | * for example cpu_relax(). Callers must take consumer_lock | |
174 | * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL. | |
5d49de53 MT |
175 | * If ring is never resized, and if the pointer is merely |
176 | * tested, there's no need to take the lock - see e.g. __ptr_ring_empty. | |
66940f35 MT |
177 | * However, if called outside the lock, and if some other CPU |
178 | * consumes ring entries at the same time, the value returned | |
179 | * is not guaranteed to be correct. | |
180 | * In this case - to avoid incorrectly detecting the ring | |
181 | * as empty - the CPU consuming the ring entries is responsible | |
182 | * for either consuming all ring entries until the ring is empty, | |
183 | * or synchronizing with some other CPU and causing it to | |
184 | * execute __ptr_ring_peek and/or consume the ring enteries | |
185 | * after the synchronization point. | |
2e0ab8ca MT |
186 | */ |
187 | static inline void *__ptr_ring_peek(struct ptr_ring *r) | |
188 | { | |
982fb490 | 189 | if (likely(r->size)) |
fb9de970 | 190 | return r->queue[r->consumer_head]; |
982fb490 | 191 | return NULL; |
2e0ab8ca MT |
192 | } |
193 | ||
66940f35 | 194 | /* See __ptr_ring_peek above for locking rules. */ |
5d49de53 | 195 | static inline bool __ptr_ring_empty(struct ptr_ring *r) |
2e0ab8ca | 196 | { |
2e0ab8ca MT |
197 | return !__ptr_ring_peek(r); |
198 | } | |
199 | ||
5d49de53 MT |
200 | static inline bool ptr_ring_empty(struct ptr_ring *r) |
201 | { | |
202 | bool ret; | |
203 | ||
204 | spin_lock(&r->consumer_lock); | |
205 | ret = __ptr_ring_empty(r); | |
206 | spin_unlock(&r->consumer_lock); | |
207 | ||
208 | return ret; | |
209 | } | |
210 | ||
211 | static inline bool ptr_ring_empty_irq(struct ptr_ring *r) | |
212 | { | |
213 | bool ret; | |
214 | ||
215 | spin_lock_irq(&r->consumer_lock); | |
216 | ret = __ptr_ring_empty(r); | |
217 | spin_unlock_irq(&r->consumer_lock); | |
218 | ||
219 | return ret; | |
220 | } | |
221 | ||
222 | static inline bool ptr_ring_empty_any(struct ptr_ring *r) | |
223 | { | |
224 | unsigned long flags; | |
225 | bool ret; | |
226 | ||
227 | spin_lock_irqsave(&r->consumer_lock, flags); | |
228 | ret = __ptr_ring_empty(r); | |
229 | spin_unlock_irqrestore(&r->consumer_lock, flags); | |
230 | ||
231 | return ret; | |
232 | } | |
233 | ||
234 | static inline bool ptr_ring_empty_bh(struct ptr_ring *r) | |
235 | { | |
236 | bool ret; | |
237 | ||
238 | spin_lock_bh(&r->consumer_lock); | |
239 | ret = __ptr_ring_empty(r); | |
240 | spin_unlock_bh(&r->consumer_lock); | |
241 | ||
242 | return ret; | |
243 | } | |
244 | ||
2e0ab8ca MT |
245 | /* Must only be called after __ptr_ring_peek returned !NULL */ |
246 | static inline void __ptr_ring_discard_one(struct ptr_ring *r) | |
247 | { | |
fb9de970 MT |
248 | /* Fundamentally, what we want to do is update consumer |
249 | * index and zero out the entry so producer can reuse it. | |
250 | * Doing it naively at each consume would be as simple as: | |
251 | * r->queue[r->consumer++] = NULL; | |
252 | * if (unlikely(r->consumer >= r->size)) | |
253 | * r->consumer = 0; | |
254 | * but that is suboptimal when the ring is full as producer is writing | |
255 | * out new entries in the same cache line. Defer these updates until a | |
256 | * batch of entries has been consumed. | |
257 | */ | |
258 | int head = r->consumer_head++; | |
259 | ||
260 | /* Once we have processed enough entries invalidate them in | |
261 | * the ring all at once so producer can reuse their space in the ring. | |
262 | * We also do this when we reach end of the ring - not mandatory | |
263 | * but helps keep the implementation simple. | |
264 | */ | |
265 | if (unlikely(r->consumer_head - r->consumer_tail >= r->batch || | |
266 | r->consumer_head >= r->size)) { | |
267 | /* Zero out entries in the reverse order: this way we touch the | |
268 | * cache line that producer might currently be reading the last; | |
269 | * producer won't make progress and touch other cache lines | |
270 | * besides the first one until we write out all entries. | |
271 | */ | |
272 | while (likely(head >= r->consumer_tail)) | |
273 | r->queue[head--] = NULL; | |
274 | r->consumer_tail = r->consumer_head; | |
275 | } | |
276 | if (unlikely(r->consumer_head >= r->size)) { | |
277 | r->consumer_head = 0; | |
278 | r->consumer_tail = 0; | |
279 | } | |
2e0ab8ca MT |
280 | } |
281 | ||
282 | static inline void *__ptr_ring_consume(struct ptr_ring *r) | |
283 | { | |
284 | void *ptr; | |
285 | ||
286 | ptr = __ptr_ring_peek(r); | |
287 | if (ptr) | |
288 | __ptr_ring_discard_one(r); | |
289 | ||
a8ceb5db MT |
290 | /* Make sure anyone accessing data through the pointer is up to date. */ |
291 | /* Pairs with smp_wmb in __ptr_ring_produce. */ | |
292 | smp_read_barrier_depends(); | |
2e0ab8ca MT |
293 | return ptr; |
294 | } | |
295 | ||
728fc8d5 JW |
296 | static inline int __ptr_ring_consume_batched(struct ptr_ring *r, |
297 | void **array, int n) | |
298 | { | |
299 | void *ptr; | |
300 | int i; | |
301 | ||
302 | for (i = 0; i < n; i++) { | |
303 | ptr = __ptr_ring_consume(r); | |
304 | if (!ptr) | |
305 | break; | |
306 | array[i] = ptr; | |
307 | } | |
308 | ||
309 | return i; | |
310 | } | |
311 | ||
e7169530 MT |
312 | /* |
313 | * Note: resize (below) nests producer lock within consumer lock, so if you | |
314 | * call this in interrupt or BH context, you must disable interrupts/BH when | |
315 | * producing. | |
316 | */ | |
2e0ab8ca MT |
317 | static inline void *ptr_ring_consume(struct ptr_ring *r) |
318 | { | |
319 | void *ptr; | |
320 | ||
321 | spin_lock(&r->consumer_lock); | |
322 | ptr = __ptr_ring_consume(r); | |
323 | spin_unlock(&r->consumer_lock); | |
324 | ||
325 | return ptr; | |
326 | } | |
327 | ||
328 | static inline void *ptr_ring_consume_irq(struct ptr_ring *r) | |
329 | { | |
330 | void *ptr; | |
331 | ||
332 | spin_lock_irq(&r->consumer_lock); | |
333 | ptr = __ptr_ring_consume(r); | |
334 | spin_unlock_irq(&r->consumer_lock); | |
335 | ||
336 | return ptr; | |
337 | } | |
338 | ||
339 | static inline void *ptr_ring_consume_any(struct ptr_ring *r) | |
340 | { | |
341 | unsigned long flags; | |
342 | void *ptr; | |
343 | ||
344 | spin_lock_irqsave(&r->consumer_lock, flags); | |
345 | ptr = __ptr_ring_consume(r); | |
346 | spin_unlock_irqrestore(&r->consumer_lock, flags); | |
347 | ||
348 | return ptr; | |
349 | } | |
350 | ||
351 | static inline void *ptr_ring_consume_bh(struct ptr_ring *r) | |
352 | { | |
353 | void *ptr; | |
354 | ||
355 | spin_lock_bh(&r->consumer_lock); | |
356 | ptr = __ptr_ring_consume(r); | |
357 | spin_unlock_bh(&r->consumer_lock); | |
358 | ||
359 | return ptr; | |
360 | } | |
361 | ||
728fc8d5 JW |
362 | static inline int ptr_ring_consume_batched(struct ptr_ring *r, |
363 | void **array, int n) | |
364 | { | |
365 | int ret; | |
366 | ||
367 | spin_lock(&r->consumer_lock); | |
368 | ret = __ptr_ring_consume_batched(r, array, n); | |
369 | spin_unlock(&r->consumer_lock); | |
370 | ||
371 | return ret; | |
372 | } | |
373 | ||
374 | static inline int ptr_ring_consume_batched_irq(struct ptr_ring *r, | |
375 | void **array, int n) | |
376 | { | |
377 | int ret; | |
378 | ||
379 | spin_lock_irq(&r->consumer_lock); | |
380 | ret = __ptr_ring_consume_batched(r, array, n); | |
381 | spin_unlock_irq(&r->consumer_lock); | |
382 | ||
383 | return ret; | |
384 | } | |
385 | ||
386 | static inline int ptr_ring_consume_batched_any(struct ptr_ring *r, | |
387 | void **array, int n) | |
388 | { | |
389 | unsigned long flags; | |
390 | int ret; | |
391 | ||
392 | spin_lock_irqsave(&r->consumer_lock, flags); | |
393 | ret = __ptr_ring_consume_batched(r, array, n); | |
394 | spin_unlock_irqrestore(&r->consumer_lock, flags); | |
395 | ||
396 | return ret; | |
397 | } | |
398 | ||
399 | static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, | |
400 | void **array, int n) | |
401 | { | |
402 | int ret; | |
403 | ||
404 | spin_lock_bh(&r->consumer_lock); | |
405 | ret = __ptr_ring_consume_batched(r, array, n); | |
406 | spin_unlock_bh(&r->consumer_lock); | |
407 | ||
408 | return ret; | |
409 | } | |
410 | ||
2e0ab8ca MT |
411 | /* Cast to structure type and call a function without discarding from FIFO. |
412 | * Function must return a value. | |
413 | * Callers must take consumer_lock. | |
414 | */ | |
415 | #define __PTR_RING_PEEK_CALL(r, f) ((f)(__ptr_ring_peek(r))) | |
416 | ||
417 | #define PTR_RING_PEEK_CALL(r, f) ({ \ | |
418 | typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ | |
419 | \ | |
420 | spin_lock(&(r)->consumer_lock); \ | |
421 | __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ | |
422 | spin_unlock(&(r)->consumer_lock); \ | |
423 | __PTR_RING_PEEK_CALL_v; \ | |
424 | }) | |
425 | ||
426 | #define PTR_RING_PEEK_CALL_IRQ(r, f) ({ \ | |
427 | typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ | |
428 | \ | |
429 | spin_lock_irq(&(r)->consumer_lock); \ | |
430 | __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ | |
431 | spin_unlock_irq(&(r)->consumer_lock); \ | |
432 | __PTR_RING_PEEK_CALL_v; \ | |
433 | }) | |
434 | ||
435 | #define PTR_RING_PEEK_CALL_BH(r, f) ({ \ | |
436 | typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ | |
437 | \ | |
438 | spin_lock_bh(&(r)->consumer_lock); \ | |
439 | __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ | |
440 | spin_unlock_bh(&(r)->consumer_lock); \ | |
441 | __PTR_RING_PEEK_CALL_v; \ | |
442 | }) | |
443 | ||
444 | #define PTR_RING_PEEK_CALL_ANY(r, f) ({ \ | |
445 | typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ | |
446 | unsigned long __PTR_RING_PEEK_CALL_f;\ | |
447 | \ | |
448 | spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ | |
449 | __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ | |
450 | spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ | |
451 | __PTR_RING_PEEK_CALL_v; \ | |
452 | }) | |
453 | ||
a0a918ff JW |
454 | /* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See |
455 | * documentation for vmalloc for which of them are legal. | |
456 | */ | |
81fbfe8a | 457 | static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) |
5d49de53 | 458 | { |
e3dd909f | 459 | if (size > KMALLOC_MAX_SIZE / sizeof(void *)) |
3efd5411 | 460 | return NULL; |
a0a918ff | 461 | return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO); |
5d49de53 MT |
462 | } |
463 | ||
fb9de970 MT |
464 | static inline void __ptr_ring_set_size(struct ptr_ring *r, int size) |
465 | { | |
466 | r->size = size; | |
467 | r->batch = SMP_CACHE_BYTES * 2 / sizeof(*(r->queue)); | |
468 | /* We need to set batch at least to 1 to make logic | |
469 | * in __ptr_ring_discard_one work correctly. | |
470 | * Batching too much (because ring is small) would cause a lot of | |
471 | * burstiness. Needs tuning, for now disable batching. | |
472 | */ | |
473 | if (r->batch > r->size / 2 || !r->batch) | |
474 | r->batch = 1; | |
475 | } | |
476 | ||
2e0ab8ca MT |
477 | static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) |
478 | { | |
5d49de53 | 479 | r->queue = __ptr_ring_init_queue_alloc(size, gfp); |
2e0ab8ca MT |
480 | if (!r->queue) |
481 | return -ENOMEM; | |
482 | ||
fb9de970 MT |
483 | __ptr_ring_set_size(r, size); |
484 | r->producer = r->consumer_head = r->consumer_tail = 0; | |
2e0ab8ca MT |
485 | spin_lock_init(&r->producer_lock); |
486 | spin_lock_init(&r->consumer_lock); | |
487 | ||
488 | return 0; | |
489 | } | |
490 | ||
197a5212 MT |
491 | /* |
492 | * Return entries into ring. Destroy entries that don't fit. | |
493 | * | |
494 | * Note: this is expected to be a rare slow path operation. | |
495 | * | |
496 | * Note: producer lock is nested within consumer lock, so if you | |
497 | * resize you must make sure all uses nest correctly. | |
498 | * In particular if you consume ring in interrupt or BH context, you must | |
499 | * disable interrupts/BH when doing so. | |
500 | */ | |
501 | static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n, | |
502 | void (*destroy)(void *)) | |
503 | { | |
504 | unsigned long flags; | |
505 | int head; | |
506 | ||
507 | spin_lock_irqsave(&r->consumer_lock, flags); | |
508 | spin_lock(&r->producer_lock); | |
509 | ||
510 | if (!r->size) | |
511 | goto done; | |
512 | ||
513 | /* | |
514 | * Clean out buffered entries (for simplicity). This way following code | |
515 | * can test entries for NULL and if not assume they are valid. | |
516 | */ | |
517 | head = r->consumer_head - 1; | |
518 | while (likely(head >= r->consumer_tail)) | |
519 | r->queue[head--] = NULL; | |
520 | r->consumer_tail = r->consumer_head; | |
521 | ||
522 | /* | |
523 | * Go over entries in batch, start moving head back and copy entries. | |
524 | * Stop when we run into previously unconsumed entries. | |
525 | */ | |
526 | while (n) { | |
527 | head = r->consumer_head - 1; | |
528 | if (head < 0) | |
529 | head = r->size - 1; | |
530 | if (r->queue[head]) { | |
531 | /* This batch entry will have to be destroyed. */ | |
532 | goto done; | |
533 | } | |
534 | r->queue[head] = batch[--n]; | |
535 | r->consumer_tail = r->consumer_head = head; | |
536 | } | |
537 | ||
538 | done: | |
539 | /* Destroy all entries left in the batch. */ | |
540 | while (n) | |
541 | destroy(batch[--n]); | |
542 | spin_unlock(&r->producer_lock); | |
543 | spin_unlock_irqrestore(&r->consumer_lock, flags); | |
544 | } | |
545 | ||
59e6ae53 MT |
546 | static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, |
547 | int size, gfp_t gfp, | |
548 | void (*destroy)(void *)) | |
5d49de53 | 549 | { |
5d49de53 | 550 | int producer = 0; |
5d49de53 MT |
551 | void **old; |
552 | void *ptr; | |
553 | ||
e7169530 | 554 | while ((ptr = __ptr_ring_consume(r))) |
5d49de53 MT |
555 | if (producer < size) |
556 | queue[producer++] = ptr; | |
557 | else if (destroy) | |
558 | destroy(ptr); | |
559 | ||
32872d0e CW |
560 | if (producer >= size) |
561 | producer = 0; | |
fb9de970 | 562 | __ptr_ring_set_size(r, size); |
5d49de53 | 563 | r->producer = producer; |
fb9de970 MT |
564 | r->consumer_head = 0; |
565 | r->consumer_tail = 0; | |
5d49de53 MT |
566 | old = r->queue; |
567 | r->queue = queue; | |
568 | ||
59e6ae53 MT |
569 | return old; |
570 | } | |
571 | ||
e7169530 MT |
572 | /* |
573 | * Note: producer lock is nested within consumer lock, so if you | |
574 | * resize you must make sure all uses nest correctly. | |
575 | * In particular if you consume ring in interrupt or BH context, you must | |
576 | * disable interrupts/BH when doing so. | |
577 | */ | |
59e6ae53 MT |
578 | static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, |
579 | void (*destroy)(void *)) | |
580 | { | |
581 | unsigned long flags; | |
582 | void **queue = __ptr_ring_init_queue_alloc(size, gfp); | |
583 | void **old; | |
584 | ||
585 | if (!queue) | |
586 | return -ENOMEM; | |
587 | ||
e7169530 MT |
588 | spin_lock_irqsave(&(r)->consumer_lock, flags); |
589 | spin_lock(&(r)->producer_lock); | |
59e6ae53 MT |
590 | |
591 | old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy); | |
592 | ||
e7169530 MT |
593 | spin_unlock(&(r)->producer_lock); |
594 | spin_unlock_irqrestore(&(r)->consumer_lock, flags); | |
5d49de53 | 595 | |
a0a918ff | 596 | kvfree(old); |
5d49de53 MT |
597 | |
598 | return 0; | |
599 | } | |
600 | ||
e7169530 MT |
601 | /* |
602 | * Note: producer lock is nested within consumer lock, so if you | |
603 | * resize you must make sure all uses nest correctly. | |
604 | * In particular if you consume ring in interrupt or BH context, you must | |
605 | * disable interrupts/BH when doing so. | |
606 | */ | |
81fbfe8a ED |
607 | static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, |
608 | unsigned int nrings, | |
59e6ae53 MT |
609 | int size, |
610 | gfp_t gfp, void (*destroy)(void *)) | |
611 | { | |
612 | unsigned long flags; | |
613 | void ***queues; | |
614 | int i; | |
615 | ||
81fbfe8a | 616 | queues = kmalloc_array(nrings, sizeof(*queues), gfp); |
59e6ae53 MT |
617 | if (!queues) |
618 | goto noqueues; | |
619 | ||
620 | for (i = 0; i < nrings; ++i) { | |
621 | queues[i] = __ptr_ring_init_queue_alloc(size, gfp); | |
622 | if (!queues[i]) | |
623 | goto nomem; | |
624 | } | |
625 | ||
626 | for (i = 0; i < nrings; ++i) { | |
e7169530 MT |
627 | spin_lock_irqsave(&(rings[i])->consumer_lock, flags); |
628 | spin_lock(&(rings[i])->producer_lock); | |
59e6ae53 MT |
629 | queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], |
630 | size, gfp, destroy); | |
e7169530 MT |
631 | spin_unlock(&(rings[i])->producer_lock); |
632 | spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags); | |
59e6ae53 MT |
633 | } |
634 | ||
635 | for (i = 0; i < nrings; ++i) | |
a0a918ff | 636 | kvfree(queues[i]); |
59e6ae53 MT |
637 | |
638 | kfree(queues); | |
639 | ||
640 | return 0; | |
641 | ||
642 | nomem: | |
643 | while (--i >= 0) | |
a0a918ff | 644 | kvfree(queues[i]); |
59e6ae53 MT |
645 | |
646 | kfree(queues); | |
647 | ||
648 | noqueues: | |
649 | return -ENOMEM; | |
650 | } | |
651 | ||
5d49de53 | 652 | static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *)) |
2e0ab8ca | 653 | { |
5d49de53 MT |
654 | void *ptr; |
655 | ||
656 | if (destroy) | |
657 | while ((ptr = ptr_ring_consume(r))) | |
658 | destroy(ptr); | |
a0a918ff | 659 | kvfree(r->queue); |
2e0ab8ca MT |
660 | } |
661 | ||
662 | #endif /* _LINUX_PTR_RING_H */ |