]>
Commit | Line | Data |
---|---|---|
2e0ab8ca MT |
1 | /* |
2 | * Definitions for the 'struct ptr_ring' datastructure. | |
3 | * | |
4 | * Author: | |
5 | * Michael S. Tsirkin <mst@redhat.com> | |
6 | * | |
7 | * Copyright (C) 2016 Red Hat, Inc. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify it | |
10 | * under the terms of the GNU General Public License as published by the | |
11 | * Free Software Foundation; either version 2 of the License, or (at your | |
12 | * option) any later version. | |
13 | * | |
14 | * This is a limited-size FIFO maintaining pointers in FIFO order, with | |
15 | * one CPU producing entries and another consuming entries from a FIFO. | |
16 | * | |
17 | * This implementation tries to minimize cache-contention when there is a | |
18 | * single producer and a single consumer CPU. | |
19 | */ | |
20 | ||
21 | #ifndef _LINUX_PTR_RING_H | |
22 | #define _LINUX_PTR_RING_H 1 | |
23 | ||
24 | #ifdef __KERNEL__ | |
25 | #include <linux/spinlock.h> | |
26 | #include <linux/cache.h> | |
27 | #include <linux/types.h> | |
28 | #include <linux/compiler.h> | |
29 | #include <linux/cache.h> | |
30 | #include <linux/slab.h> | |
31 | #include <asm/errno.h> | |
32 | #endif | |
33 | ||
34 | struct ptr_ring { | |
35 | int producer ____cacheline_aligned_in_smp; | |
36 | spinlock_t producer_lock; | |
37 | int consumer ____cacheline_aligned_in_smp; | |
38 | spinlock_t consumer_lock; | |
39 | /* Shared consumer/producer data */ | |
40 | /* Read-only by both the producer and the consumer */ | |
41 | int size ____cacheline_aligned_in_smp; /* max entries in queue */ | |
42 | void **queue; | |
43 | }; | |
44 | ||
45 | /* Note: callers invoking this in a loop must use a compiler barrier, | |
5d49de53 MT |
46 | * for example cpu_relax(). If ring is ever resized, callers must hold |
47 | * producer_lock - see e.g. ptr_ring_full. Otherwise, if callers don't hold | |
48 | * producer_lock, the next call to __ptr_ring_produce may fail. | |
2e0ab8ca MT |
49 | */ |
50 | static inline bool __ptr_ring_full(struct ptr_ring *r) | |
51 | { | |
52 | return r->queue[r->producer]; | |
53 | } | |
54 | ||
55 | static inline bool ptr_ring_full(struct ptr_ring *r) | |
56 | { | |
5d49de53 MT |
57 | bool ret; |
58 | ||
59 | spin_lock(&r->producer_lock); | |
60 | ret = __ptr_ring_full(r); | |
61 | spin_unlock(&r->producer_lock); | |
62 | ||
63 | return ret; | |
64 | } | |
65 | ||
66 | static inline bool ptr_ring_full_irq(struct ptr_ring *r) | |
67 | { | |
68 | bool ret; | |
69 | ||
70 | spin_lock_irq(&r->producer_lock); | |
71 | ret = __ptr_ring_full(r); | |
72 | spin_unlock_irq(&r->producer_lock); | |
73 | ||
74 | return ret; | |
75 | } | |
76 | ||
77 | static inline bool ptr_ring_full_any(struct ptr_ring *r) | |
78 | { | |
79 | unsigned long flags; | |
80 | bool ret; | |
81 | ||
82 | spin_lock_irqsave(&r->producer_lock, flags); | |
83 | ret = __ptr_ring_full(r); | |
84 | spin_unlock_irqrestore(&r->producer_lock, flags); | |
85 | ||
86 | return ret; | |
87 | } | |
88 | ||
89 | static inline bool ptr_ring_full_bh(struct ptr_ring *r) | |
90 | { | |
91 | bool ret; | |
92 | ||
93 | spin_lock_bh(&r->producer_lock); | |
94 | ret = __ptr_ring_full(r); | |
95 | spin_unlock_bh(&r->producer_lock); | |
96 | ||
97 | return ret; | |
2e0ab8ca MT |
98 | } |
99 | ||
100 | /* Note: callers invoking this in a loop must use a compiler barrier, | |
5d49de53 | 101 | * for example cpu_relax(). Callers must hold producer_lock. |
2e0ab8ca MT |
102 | */ |
103 | static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) | |
104 | { | |
982fb490 | 105 | if (unlikely(!r->size) || r->queue[r->producer]) |
2e0ab8ca MT |
106 | return -ENOSPC; |
107 | ||
108 | r->queue[r->producer++] = ptr; | |
109 | if (unlikely(r->producer >= r->size)) | |
110 | r->producer = 0; | |
111 | return 0; | |
112 | } | |
113 | ||
114 | static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr) | |
115 | { | |
116 | int ret; | |
117 | ||
118 | spin_lock(&r->producer_lock); | |
119 | ret = __ptr_ring_produce(r, ptr); | |
120 | spin_unlock(&r->producer_lock); | |
121 | ||
122 | return ret; | |
123 | } | |
124 | ||
125 | static inline int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr) | |
126 | { | |
127 | int ret; | |
128 | ||
129 | spin_lock_irq(&r->producer_lock); | |
130 | ret = __ptr_ring_produce(r, ptr); | |
131 | spin_unlock_irq(&r->producer_lock); | |
132 | ||
133 | return ret; | |
134 | } | |
135 | ||
136 | static inline int ptr_ring_produce_any(struct ptr_ring *r, void *ptr) | |
137 | { | |
138 | unsigned long flags; | |
139 | int ret; | |
140 | ||
141 | spin_lock_irqsave(&r->producer_lock, flags); | |
142 | ret = __ptr_ring_produce(r, ptr); | |
143 | spin_unlock_irqrestore(&r->producer_lock, flags); | |
144 | ||
145 | return ret; | |
146 | } | |
147 | ||
148 | static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr) | |
149 | { | |
150 | int ret; | |
151 | ||
152 | spin_lock_bh(&r->producer_lock); | |
153 | ret = __ptr_ring_produce(r, ptr); | |
154 | spin_unlock_bh(&r->producer_lock); | |
155 | ||
156 | return ret; | |
157 | } | |
158 | ||
159 | /* Note: callers invoking this in a loop must use a compiler barrier, | |
160 | * for example cpu_relax(). Callers must take consumer_lock | |
161 | * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL. | |
5d49de53 MT |
162 | * If ring is never resized, and if the pointer is merely |
163 | * tested, there's no need to take the lock - see e.g. __ptr_ring_empty. | |
2e0ab8ca MT |
164 | */ |
165 | static inline void *__ptr_ring_peek(struct ptr_ring *r) | |
166 | { | |
982fb490 JW |
167 | if (likely(r->size)) |
168 | return r->queue[r->consumer]; | |
169 | return NULL; | |
2e0ab8ca MT |
170 | } |
171 | ||
5d49de53 MT |
172 | /* Note: callers invoking this in a loop must use a compiler barrier, |
173 | * for example cpu_relax(). Callers must take consumer_lock | |
174 | * if the ring is ever resized - see e.g. ptr_ring_empty. | |
175 | */ | |
176 | static inline bool __ptr_ring_empty(struct ptr_ring *r) | |
2e0ab8ca | 177 | { |
2e0ab8ca MT |
178 | return !__ptr_ring_peek(r); |
179 | } | |
180 | ||
5d49de53 MT |
181 | static inline bool ptr_ring_empty(struct ptr_ring *r) |
182 | { | |
183 | bool ret; | |
184 | ||
185 | spin_lock(&r->consumer_lock); | |
186 | ret = __ptr_ring_empty(r); | |
187 | spin_unlock(&r->consumer_lock); | |
188 | ||
189 | return ret; | |
190 | } | |
191 | ||
192 | static inline bool ptr_ring_empty_irq(struct ptr_ring *r) | |
193 | { | |
194 | bool ret; | |
195 | ||
196 | spin_lock_irq(&r->consumer_lock); | |
197 | ret = __ptr_ring_empty(r); | |
198 | spin_unlock_irq(&r->consumer_lock); | |
199 | ||
200 | return ret; | |
201 | } | |
202 | ||
203 | static inline bool ptr_ring_empty_any(struct ptr_ring *r) | |
204 | { | |
205 | unsigned long flags; | |
206 | bool ret; | |
207 | ||
208 | spin_lock_irqsave(&r->consumer_lock, flags); | |
209 | ret = __ptr_ring_empty(r); | |
210 | spin_unlock_irqrestore(&r->consumer_lock, flags); | |
211 | ||
212 | return ret; | |
213 | } | |
214 | ||
215 | static inline bool ptr_ring_empty_bh(struct ptr_ring *r) | |
216 | { | |
217 | bool ret; | |
218 | ||
219 | spin_lock_bh(&r->consumer_lock); | |
220 | ret = __ptr_ring_empty(r); | |
221 | spin_unlock_bh(&r->consumer_lock); | |
222 | ||
223 | return ret; | |
224 | } | |
225 | ||
2e0ab8ca MT |
226 | /* Must only be called after __ptr_ring_peek returned !NULL */ |
227 | static inline void __ptr_ring_discard_one(struct ptr_ring *r) | |
228 | { | |
229 | r->queue[r->consumer++] = NULL; | |
230 | if (unlikely(r->consumer >= r->size)) | |
231 | r->consumer = 0; | |
232 | } | |
233 | ||
234 | static inline void *__ptr_ring_consume(struct ptr_ring *r) | |
235 | { | |
236 | void *ptr; | |
237 | ||
238 | ptr = __ptr_ring_peek(r); | |
239 | if (ptr) | |
240 | __ptr_ring_discard_one(r); | |
241 | ||
242 | return ptr; | |
243 | } | |
244 | ||
245 | static inline void *ptr_ring_consume(struct ptr_ring *r) | |
246 | { | |
247 | void *ptr; | |
248 | ||
249 | spin_lock(&r->consumer_lock); | |
250 | ptr = __ptr_ring_consume(r); | |
251 | spin_unlock(&r->consumer_lock); | |
252 | ||
253 | return ptr; | |
254 | } | |
255 | ||
256 | static inline void *ptr_ring_consume_irq(struct ptr_ring *r) | |
257 | { | |
258 | void *ptr; | |
259 | ||
260 | spin_lock_irq(&r->consumer_lock); | |
261 | ptr = __ptr_ring_consume(r); | |
262 | spin_unlock_irq(&r->consumer_lock); | |
263 | ||
264 | return ptr; | |
265 | } | |
266 | ||
267 | static inline void *ptr_ring_consume_any(struct ptr_ring *r) | |
268 | { | |
269 | unsigned long flags; | |
270 | void *ptr; | |
271 | ||
272 | spin_lock_irqsave(&r->consumer_lock, flags); | |
273 | ptr = __ptr_ring_consume(r); | |
274 | spin_unlock_irqrestore(&r->consumer_lock, flags); | |
275 | ||
276 | return ptr; | |
277 | } | |
278 | ||
279 | static inline void *ptr_ring_consume_bh(struct ptr_ring *r) | |
280 | { | |
281 | void *ptr; | |
282 | ||
283 | spin_lock_bh(&r->consumer_lock); | |
284 | ptr = __ptr_ring_consume(r); | |
285 | spin_unlock_bh(&r->consumer_lock); | |
286 | ||
287 | return ptr; | |
288 | } | |
289 | ||
290 | /* Cast to structure type and call a function without discarding from FIFO. | |
291 | * Function must return a value. | |
292 | * Callers must take consumer_lock. | |
293 | */ | |
294 | #define __PTR_RING_PEEK_CALL(r, f) ((f)(__ptr_ring_peek(r))) | |
295 | ||
296 | #define PTR_RING_PEEK_CALL(r, f) ({ \ | |
297 | typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ | |
298 | \ | |
299 | spin_lock(&(r)->consumer_lock); \ | |
300 | __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ | |
301 | spin_unlock(&(r)->consumer_lock); \ | |
302 | __PTR_RING_PEEK_CALL_v; \ | |
303 | }) | |
304 | ||
305 | #define PTR_RING_PEEK_CALL_IRQ(r, f) ({ \ | |
306 | typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ | |
307 | \ | |
308 | spin_lock_irq(&(r)->consumer_lock); \ | |
309 | __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ | |
310 | spin_unlock_irq(&(r)->consumer_lock); \ | |
311 | __PTR_RING_PEEK_CALL_v; \ | |
312 | }) | |
313 | ||
314 | #define PTR_RING_PEEK_CALL_BH(r, f) ({ \ | |
315 | typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ | |
316 | \ | |
317 | spin_lock_bh(&(r)->consumer_lock); \ | |
318 | __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ | |
319 | spin_unlock_bh(&(r)->consumer_lock); \ | |
320 | __PTR_RING_PEEK_CALL_v; \ | |
321 | }) | |
322 | ||
323 | #define PTR_RING_PEEK_CALL_ANY(r, f) ({ \ | |
324 | typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ | |
325 | unsigned long __PTR_RING_PEEK_CALL_f;\ | |
326 | \ | |
327 | spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ | |
328 | __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ | |
329 | spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ | |
330 | __PTR_RING_PEEK_CALL_v; \ | |
331 | }) | |
332 | ||
5d49de53 MT |
333 | static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp) |
334 | { | |
335 | return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp); | |
336 | } | |
337 | ||
2e0ab8ca MT |
338 | static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) |
339 | { | |
5d49de53 | 340 | r->queue = __ptr_ring_init_queue_alloc(size, gfp); |
2e0ab8ca MT |
341 | if (!r->queue) |
342 | return -ENOMEM; | |
343 | ||
344 | r->size = size; | |
345 | r->producer = r->consumer = 0; | |
346 | spin_lock_init(&r->producer_lock); | |
347 | spin_lock_init(&r->consumer_lock); | |
348 | ||
349 | return 0; | |
350 | } | |
351 | ||
5d49de53 MT |
352 | static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, |
353 | void (*destroy)(void *)) | |
354 | { | |
355 | unsigned long flags; | |
356 | int producer = 0; | |
357 | void **queue = __ptr_ring_init_queue_alloc(size, gfp); | |
358 | void **old; | |
359 | void *ptr; | |
360 | ||
361 | if (!queue) | |
362 | return -ENOMEM; | |
363 | ||
364 | spin_lock_irqsave(&(r)->producer_lock, flags); | |
365 | ||
366 | while ((ptr = ptr_ring_consume(r))) | |
367 | if (producer < size) | |
368 | queue[producer++] = ptr; | |
369 | else if (destroy) | |
370 | destroy(ptr); | |
371 | ||
372 | r->size = size; | |
373 | r->producer = producer; | |
374 | r->consumer = 0; | |
375 | old = r->queue; | |
376 | r->queue = queue; | |
377 | ||
378 | spin_unlock_irqrestore(&(r)->producer_lock, flags); | |
379 | ||
380 | kfree(old); | |
381 | ||
382 | return 0; | |
383 | } | |
384 | ||
385 | static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *)) | |
2e0ab8ca | 386 | { |
5d49de53 MT |
387 | void *ptr; |
388 | ||
389 | if (destroy) | |
390 | while ((ptr = ptr_ring_consume(r))) | |
391 | destroy(ptr); | |
2e0ab8ca MT |
392 | kfree(r->queue); |
393 | } | |
394 | ||
395 | #endif /* _LINUX_PTR_RING_H */ |