]> git.proxmox.com Git - mirror_qemu.git/blob - include/hw/xen/io/ring.h
avoid TABs in files that only contain a few
[mirror_qemu.git] / include / hw / xen / io / ring.h
1 /******************************************************************************
2 * ring.h
3 *
4 * Shared producer-consumer ring macros.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Tim Deegan and Andrew Warfield November 2004.
25 */
26
27 #ifndef __XEN_PUBLIC_IO_RING_H__
28 #define __XEN_PUBLIC_IO_RING_H__
29
30 /*
31 * When #include'ing this header, you need to provide the following
32 * declaration upfront:
33 * - standard integers types (uint8_t, uint16_t, etc)
34 * They are provided by stdint.h of the standard headers.
35 *
36 * In addition, if you intend to use the FLEX macros, you also need to
37 * provide the following, before invoking the FLEX macros:
38 * - size_t
39 * - memcpy
40 * - grant_ref_t
41 * These declarations are provided by string.h of the standard headers,
42 * and grant_table.h from the Xen public headers.
43 */
44
45 #if __XEN_INTERFACE_VERSION__ < 0x00030208
46 #define xen_mb() mb()
47 #define xen_rmb() rmb()
48 #define xen_wmb() wmb()
49 #endif
50
51 typedef unsigned int RING_IDX;
52
53 /* Round a 32-bit unsigned constant down to the nearest power of two. */
54 #define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
55 #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
56 #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
57 #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
58 #define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
59
60 /*
61 * Calculate size of a shared ring, given the total available space for the
62 * ring and indexes (_sz), and the name tag of the request/response structure.
63 * A ring contains as many entries as will fit, rounded down to the nearest
64 * power of two (so we can mask with (size-1) to loop around).
65 */
66 #define __CONST_RING_SIZE(_s, _sz) \
67 (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
68 sizeof_field(struct _s##_sring, ring[0])))
69 /*
70 * The same for passing in an actual pointer instead of a name tag.
71 */
72 #define __RING_SIZE(_s, _sz) \
73 (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
74
75 /*
76 * Macros to make the correct C datatypes for a new kind of ring.
77 *
78 * To make a new ring datatype, you need to have two message structures,
79 * let's say request_t, and response_t already defined.
80 *
81 * In a header where you want the ring datatype declared, you then do:
82 *
83 * DEFINE_RING_TYPES(mytag, request_t, response_t);
84 *
85 * These expand out to give you a set of types, as you can see below.
86 * The most important of these are:
87 *
88 * mytag_sring_t - The shared ring.
89 * mytag_front_ring_t - The 'front' half of the ring.
90 * mytag_back_ring_t - The 'back' half of the ring.
91 *
92 * To initialize a ring in your code you need to know the location and size
93 * of the shared memory area (PAGE_SIZE, for instance). To initialise
94 * the front half:
95 *
96 * mytag_front_ring_t front_ring;
97 * SHARED_RING_INIT((mytag_sring_t *)shared_page);
98 * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
99 *
100 * Initializing the back follows similarly (note that only the front
101 * initializes the shared ring):
102 *
103 * mytag_back_ring_t back_ring;
104 * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
105 */
106
107 #define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
108 \
109 /* Shared ring entry */ \
110 union __name##_sring_entry { \
111 __req_t req; \
112 __rsp_t rsp; \
113 }; \
114 \
115 /* Shared ring page */ \
116 struct __name##_sring { \
117 RING_IDX req_prod, req_event; \
118 RING_IDX rsp_prod, rsp_event; \
119 union { \
120 struct { \
121 uint8_t smartpoll_active; \
122 } netif; \
123 struct { \
124 uint8_t msg; \
125 } tapif_user; \
126 uint8_t pvt_pad[4]; \
127 } pvt; \
128 uint8_t __pad[44]; \
129 union __name##_sring_entry ring[1]; /* variable-length */ \
130 }; \
131 \
132 /* "Front" end's private variables */ \
133 struct __name##_front_ring { \
134 RING_IDX req_prod_pvt; \
135 RING_IDX rsp_cons; \
136 unsigned int nr_ents; \
137 struct __name##_sring *sring; \
138 }; \
139 \
140 /* "Back" end's private variables */ \
141 struct __name##_back_ring { \
142 RING_IDX rsp_prod_pvt; \
143 RING_IDX req_cons; \
144 unsigned int nr_ents; \
145 struct __name##_sring *sring; \
146 }; \
147 \
148 /* Syntactic sugar */ \
149 typedef struct __name##_sring __name##_sring_t; \
150 typedef struct __name##_front_ring __name##_front_ring_t; \
151 typedef struct __name##_back_ring __name##_back_ring_t
152
153 /*
154 * Macros for manipulating rings.
155 *
156 * FRONT_RING_whatever works on the "front end" of a ring: here
157 * requests are pushed on to the ring and responses taken off it.
158 *
159 * BACK_RING_whatever works on the "back end" of a ring: here
160 * requests are taken off the ring and responses put on.
161 *
162 * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
163 * This is OK in 1-for-1 request-response situations where the
164 * requestor (front end) never has more than RING_SIZE()-1
165 * outstanding requests.
166 */
167
168 /* Initialising empty rings */
169 #define SHARED_RING_INIT(_s) do { \
170 (_s)->req_prod = (_s)->rsp_prod = 0; \
171 (_s)->req_event = (_s)->rsp_event = 1; \
172 (void)memset((_s)->pvt.pvt_pad, 0, sizeof((_s)->pvt.pvt_pad)); \
173 (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
174 } while(0)
175
176 #define FRONT_RING_INIT(_r, _s, __size) do { \
177 (_r)->req_prod_pvt = 0; \
178 (_r)->rsp_cons = 0; \
179 (_r)->nr_ents = __RING_SIZE(_s, __size); \
180 (_r)->sring = (_s); \
181 } while (0)
182
183 #define BACK_RING_INIT(_r, _s, __size) do { \
184 (_r)->rsp_prod_pvt = 0; \
185 (_r)->req_cons = 0; \
186 (_r)->nr_ents = __RING_SIZE(_s, __size); \
187 (_r)->sring = (_s); \
188 } while (0)
189
190 /* How big is this ring? */
191 #define RING_SIZE(_r) \
192 ((_r)->nr_ents)
193
194 /* Number of free requests (for use on front side only). */
195 #define RING_FREE_REQUESTS(_r) \
196 (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
197
198 /* Test if there is an empty slot available on the front ring.
199 * (This is only meaningful from the front. )
200 */
201 #define RING_FULL(_r) \
202 (RING_FREE_REQUESTS(_r) == 0)
203
204 /* Test if there are outstanding messages to be processed on a ring. */
205 #define RING_HAS_UNCONSUMED_RESPONSES(_r) \
206 ((_r)->sring->rsp_prod - (_r)->rsp_cons)
207
208 #ifdef __GNUC__
209 #define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
210 unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
211 unsigned int rsp = RING_SIZE(_r) - \
212 ((_r)->req_cons - (_r)->rsp_prod_pvt); \
213 req < rsp ? req : rsp; \
214 })
215 #else
216 /* Same as above, but without the nice GCC ({ ... }) syntax. */
217 #define RING_HAS_UNCONSUMED_REQUESTS(_r) \
218 ((((_r)->sring->req_prod - (_r)->req_cons) < \
219 (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \
220 ((_r)->sring->req_prod - (_r)->req_cons) : \
221 (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt)))
222 #endif
223
224 /* Direct access to individual ring elements, by index. */
225 #define RING_GET_REQUEST(_r, _idx) \
226 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
227
228 /*
229 * Get a local copy of a request.
230 *
231 * Use this in preference to RING_GET_REQUEST() so all processing is
232 * done on a local copy that cannot be modified by the other end.
233 *
234 * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
235 * to be ineffective where _req is a struct which consists of only bitfields.
236 */
237 #define RING_COPY_REQUEST(_r, _idx, _req) do { \
238 /* Use volatile to force the copy into _req. */ \
239 *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
240 } while (0)
241
242 #define RING_GET_RESPONSE(_r, _idx) \
243 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
244
245 /* Loop termination condition: Would the specified index overflow the ring? */
246 #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
247 (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
248
249 /* Ill-behaved frontend determination: Can there be this many requests? */
250 #define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
251 (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
252
253 #define RING_PUSH_REQUESTS(_r) do { \
254 xen_wmb(); /* back sees requests /before/ updated producer index */ \
255 (_r)->sring->req_prod = (_r)->req_prod_pvt; \
256 } while (0)
257
258 #define RING_PUSH_RESPONSES(_r) do { \
259 xen_wmb(); /* front sees resps /before/ updated producer index */ \
260 (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
261 } while (0)
262
263 /*
264 * Notification hold-off (req_event and rsp_event):
265 *
266 * When queueing requests or responses on a shared ring, it may not always be
267 * necessary to notify the remote end. For example, if requests are in flight
268 * in a backend, the front may be able to queue further requests without
269 * notifying the back (if the back checks for new requests when it queues
270 * responses).
271 *
272 * When enqueuing requests or responses:
273 *
274 * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
275 * is a boolean return value. True indicates that the receiver requires an
276 * asynchronous notification.
277 *
278 * After dequeuing requests or responses (before sleeping the connection):
279 *
280 * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
281 * The second argument is a boolean return value. True indicates that there
282 * are pending messages on the ring (i.e., the connection should not be put
283 * to sleep).
284 *
285 * These macros will set the req_event/rsp_event field to trigger a
286 * notification on the very next message that is enqueued. If you want to
287 * create batches of work (i.e., only receive a notification after several
288 * messages have been enqueued) then you will need to create a customised
289 * version of the FINAL_CHECK macro in your own code, which sets the event
290 * field appropriately.
291 */
292
293 #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
294 RING_IDX __old = (_r)->sring->req_prod; \
295 RING_IDX __new = (_r)->req_prod_pvt; \
296 xen_wmb(); /* back sees requests /before/ updated producer index */ \
297 (_r)->sring->req_prod = __new; \
298 xen_mb(); /* back sees new requests /before/ we check req_event */ \
299 (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
300 (RING_IDX)(__new - __old)); \
301 } while (0)
302
303 #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
304 RING_IDX __old = (_r)->sring->rsp_prod; \
305 RING_IDX __new = (_r)->rsp_prod_pvt; \
306 xen_wmb(); /* front sees resps /before/ updated producer index */ \
307 (_r)->sring->rsp_prod = __new; \
308 xen_mb(); /* front sees new resps /before/ we check rsp_event */ \
309 (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
310 (RING_IDX)(__new - __old)); \
311 } while (0)
312
313 #define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
314 (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
315 if (_work_to_do) break; \
316 (_r)->sring->req_event = (_r)->req_cons + 1; \
317 xen_mb(); \
318 (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
319 } while (0)
320
321 #define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
322 (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
323 if (_work_to_do) break; \
324 (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
325 xen_mb(); \
326 (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
327 } while (0)
328
329
330 /*
331 * DEFINE_XEN_FLEX_RING_AND_INTF defines two monodirectional rings and
332 * functions to check if there is data on the ring, and to read and
333 * write to them.
334 *
335 * DEFINE_XEN_FLEX_RING is similar to DEFINE_XEN_FLEX_RING_AND_INTF, but
336 * does not define the indexes page. As different protocols can have
337 * extensions to the basic format, this macro allow them to define their
338 * own struct.
339 *
340 * XEN_FLEX_RING_SIZE
341 * Convenience macro to calculate the size of one of the two rings
342 * from the overall order.
343 *
344 * $NAME_mask
345 * Function to apply the size mask to an index, to reduce the index
346 * within the range [0-size].
347 *
348 * $NAME_read_packet
349 * Function to read data from the ring. The amount of data to read is
350 * specified by the "size" argument.
351 *
352 * $NAME_write_packet
353 * Function to write data to the ring. The amount of data to write is
354 * specified by the "size" argument.
355 *
356 * $NAME_get_ring_ptr
357 * Convenience function that returns a pointer to read/write to the
358 * ring at the right location.
359 *
360 * $NAME_data_intf
361 * Indexes page, shared between frontend and backend. It also
362 * contains the array of grant refs.
363 *
364 * $NAME_queued
365 * Function to calculate how many bytes are currently on the ring,
366 * ready to be read. It can also be used to calculate how much free
367 * space is currently on the ring (XEN_FLEX_RING_SIZE() -
368 * $NAME_queued()).
369 */
370
371 #ifndef XEN_PAGE_SHIFT
372 /* The PAGE_SIZE for ring protocols and hypercall interfaces is always
373 * 4K, regardless of the architecture, and page granularity chosen by
374 * operating systems.
375 */
376 #define XEN_PAGE_SHIFT 12
377 #endif
378 #define XEN_FLEX_RING_SIZE(order) \
379 (1UL << ((order) + XEN_PAGE_SHIFT - 1))
380
381 #define DEFINE_XEN_FLEX_RING(name) \
382 static inline RING_IDX name##_mask(RING_IDX idx, RING_IDX ring_size) \
383 { \
384 return idx & (ring_size - 1); \
385 } \
386 \
387 static inline unsigned char *name##_get_ring_ptr(unsigned char *buf, \
388 RING_IDX idx, \
389 RING_IDX ring_size) \
390 { \
391 return buf + name##_mask(idx, ring_size); \
392 } \
393 \
394 static inline void name##_read_packet(void *opaque, \
395 const unsigned char *buf, \
396 size_t size, \
397 RING_IDX masked_prod, \
398 RING_IDX *masked_cons, \
399 RING_IDX ring_size) \
400 { \
401 if (*masked_cons < masked_prod || \
402 size <= ring_size - *masked_cons) { \
403 memcpy(opaque, buf + *masked_cons, size); \
404 } else { \
405 memcpy(opaque, buf + *masked_cons, ring_size - *masked_cons); \
406 memcpy((unsigned char *)opaque + ring_size - *masked_cons, buf, \
407 size - (ring_size - *masked_cons)); \
408 } \
409 *masked_cons = name##_mask(*masked_cons + size, ring_size); \
410 } \
411 \
412 static inline void name##_write_packet(unsigned char *buf, \
413 const void *opaque, \
414 size_t size, \
415 RING_IDX *masked_prod, \
416 RING_IDX masked_cons, \
417 RING_IDX ring_size) \
418 { \
419 if (*masked_prod < masked_cons || \
420 size <= ring_size - *masked_prod) { \
421 memcpy(buf + *masked_prod, opaque, size); \
422 } else { \
423 memcpy(buf + *masked_prod, opaque, ring_size - *masked_prod); \
424 memcpy(buf, (unsigned char *)opaque + (ring_size - *masked_prod), \
425 size - (ring_size - *masked_prod)); \
426 } \
427 *masked_prod = name##_mask(*masked_prod + size, ring_size); \
428 } \
429 \
430 static inline RING_IDX name##_queued(RING_IDX prod, \
431 RING_IDX cons, \
432 RING_IDX ring_size) \
433 { \
434 RING_IDX size; \
435 \
436 if (prod == cons) \
437 return 0; \
438 \
439 prod = name##_mask(prod, ring_size); \
440 cons = name##_mask(cons, ring_size); \
441 \
442 if (prod == cons) \
443 return ring_size; \
444 \
445 if (prod > cons) \
446 size = prod - cons; \
447 else \
448 size = ring_size - (cons - prod); \
449 return size; \
450 } \
451 \
452 struct name##_data { \
453 unsigned char *in; /* half of the allocation */ \
454 unsigned char *out; /* half of the allocation */ \
455 }
456
457 #define DEFINE_XEN_FLEX_RING_AND_INTF(name) \
458 struct name##_data_intf { \
459 RING_IDX in_cons, in_prod; \
460 \
461 uint8_t pad1[56]; \
462 \
463 RING_IDX out_cons, out_prod; \
464 \
465 uint8_t pad2[56]; \
466 \
467 RING_IDX ring_order; \
468 grant_ref_t ref[]; \
469 }; \
470 DEFINE_XEN_FLEX_RING(name)
471
472 #endif /* __XEN_PUBLIC_IO_RING_H__ */
473
474 /*
475 * Local variables:
476 * mode: C
477 * c-file-style: "BSD"
478 * c-basic-offset: 4
479 * tab-width: 4
480 * indent-tabs-mode: nil
481 * End:
482 */