]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/drivers/event/sw/event_ring.h
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / drivers / event / sw / event_ring.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
3 */
4
5 /*
6 * Generic ring structure for passing events from one core to another.
7 *
8 * Used by the software scheduler for the producer and consumer rings for
9 * each port, i.e. for passing events from worker cores to scheduler and
10 * vice-versa. Designed for single-producer, single-consumer use with two
11 * cores working on each ring.
12 */
13
14 #ifndef _EVENT_RING_
15 #define _EVENT_RING_
16
17 #include <stdint.h>
18
19 #include <rte_common.h>
20 #include <rte_memory.h>
21 #include <rte_malloc.h>
22
23 #define QE_RING_NAMESIZE 32
24
25 struct qe_ring {
26 char name[QE_RING_NAMESIZE] __rte_cache_aligned;
27 uint32_t ring_size; /* size of memory block allocated to the ring */
28 uint32_t mask; /* mask for read/write values == ring_size -1 */
29 uint32_t size; /* actual usable space in the ring */
30 volatile uint32_t write_idx __rte_cache_aligned;
31 volatile uint32_t read_idx __rte_cache_aligned;
32
33 struct rte_event ring[0] __rte_cache_aligned;
34 };
35
36 static inline struct qe_ring *
37 qe_ring_create(const char *name, unsigned int size, unsigned int socket_id)
38 {
39 struct qe_ring *retval;
40 const uint32_t ring_size = rte_align32pow2(size + 1);
41 size_t memsize = sizeof(*retval) +
42 (ring_size * sizeof(retval->ring[0]));
43
44 retval = rte_zmalloc_socket(NULL, memsize, 0, socket_id);
45 if (retval == NULL)
46 goto end;
47
48 snprintf(retval->name, sizeof(retval->name), "EVDEV_RG_%s", name);
49 retval->ring_size = ring_size;
50 retval->mask = ring_size - 1;
51 retval->size = size;
52 end:
53 return retval;
54 }
55
56 static inline void
57 qe_ring_destroy(struct qe_ring *r)
58 {
59 rte_free(r);
60 }
61
62 static __rte_always_inline unsigned int
63 qe_ring_count(const struct qe_ring *r)
64 {
65 return r->write_idx - r->read_idx;
66 }
67
68 static __rte_always_inline unsigned int
69 qe_ring_free_count(const struct qe_ring *r)
70 {
71 return r->size - qe_ring_count(r);
72 }
73
74 static __rte_always_inline unsigned int
75 qe_ring_enqueue_burst(struct qe_ring *r, const struct rte_event *qes,
76 unsigned int nb_qes, uint16_t *free_count)
77 {
78 const uint32_t size = r->size;
79 const uint32_t mask = r->mask;
80 const uint32_t read = r->read_idx;
81 uint32_t write = r->write_idx;
82 const uint32_t space = read + size - write;
83 uint32_t i;
84
85 if (space < nb_qes)
86 nb_qes = space;
87
88 for (i = 0; i < nb_qes; i++, write++)
89 r->ring[write & mask] = qes[i];
90
91 rte_smp_wmb();
92
93 if (nb_qes != 0)
94 r->write_idx = write;
95
96 *free_count = space - nb_qes;
97
98 return nb_qes;
99 }
100
101 static __rte_always_inline unsigned int
102 qe_ring_enqueue_burst_with_ops(struct qe_ring *r, const struct rte_event *qes,
103 unsigned int nb_qes, uint8_t *ops)
104 {
105 const uint32_t size = r->size;
106 const uint32_t mask = r->mask;
107 const uint32_t read = r->read_idx;
108 uint32_t write = r->write_idx;
109 const uint32_t space = read + size - write;
110 uint32_t i;
111
112 if (space < nb_qes)
113 nb_qes = space;
114
115 for (i = 0; i < nb_qes; i++, write++) {
116 r->ring[write & mask] = qes[i];
117 r->ring[write & mask].op = ops[i];
118 }
119
120 rte_smp_wmb();
121
122 if (nb_qes != 0)
123 r->write_idx = write;
124
125 return nb_qes;
126 }
127
128 static __rte_always_inline unsigned int
129 qe_ring_dequeue_burst(struct qe_ring *r, struct rte_event *qes,
130 unsigned int nb_qes)
131 {
132 const uint32_t mask = r->mask;
133 uint32_t read = r->read_idx;
134 const uint32_t write = r->write_idx;
135 const uint32_t items = write - read;
136 uint32_t i;
137
138 if (items < nb_qes)
139 nb_qes = items;
140
141
142 for (i = 0; i < nb_qes; i++, read++)
143 qes[i] = r->ring[read & mask];
144
145 rte_smp_rmb();
146
147 if (nb_qes != 0)
148 r->read_idx += nb_qes;
149
150 return nb_qes;
151 }
152
153 #endif