]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/lib/librte_distributor/rte_distributor_v20.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / lib / librte_distributor / rte_distributor_v20.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include <stdio.h>
6 #include <sys/queue.h>
7 #include <string.h>
8 #include <rte_mbuf.h>
9 #include <rte_memory.h>
10 #include <rte_memzone.h>
11 #include <rte_errno.h>
12 #include <rte_compat.h>
13 #include <rte_string_fns.h>
14 #include <rte_eal_memconfig.h>
15 #include <rte_pause.h>
16
17 #include "rte_distributor_v20.h"
18 #include "rte_distributor_private.h"
19
20 TAILQ_HEAD(rte_distributor_list, rte_distributor_v20);
21
22 static struct rte_tailq_elem rte_distributor_tailq = {
23 .name = "RTE_DISTRIBUTOR",
24 };
25 EAL_REGISTER_TAILQ(rte_distributor_tailq)
26
27 /**** APIs called by workers ****/
28
29 void
30 rte_distributor_request_pkt_v20(struct rte_distributor_v20 *d,
31 unsigned worker_id, struct rte_mbuf *oldpkt)
32 {
33 union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
34 int64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
35 | RTE_DISTRIB_GET_BUF;
36 while (unlikely(buf->bufptr64 & RTE_DISTRIB_FLAGS_MASK))
37 rte_pause();
38 buf->bufptr64 = req;
39 }
40 VERSION_SYMBOL(rte_distributor_request_pkt, _v20, 2.0);
41
42 struct rte_mbuf *
43 rte_distributor_poll_pkt_v20(struct rte_distributor_v20 *d,
44 unsigned worker_id)
45 {
46 union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
47 if (buf->bufptr64 & RTE_DISTRIB_GET_BUF)
48 return NULL;
49
50 /* since bufptr64 is signed, this should be an arithmetic shift */
51 int64_t ret = buf->bufptr64 >> RTE_DISTRIB_FLAG_BITS;
52 return (struct rte_mbuf *)((uintptr_t)ret);
53 }
54 VERSION_SYMBOL(rte_distributor_poll_pkt, _v20, 2.0);
55
56 struct rte_mbuf *
57 rte_distributor_get_pkt_v20(struct rte_distributor_v20 *d,
58 unsigned worker_id, struct rte_mbuf *oldpkt)
59 {
60 struct rte_mbuf *ret;
61 rte_distributor_request_pkt_v20(d, worker_id, oldpkt);
62 while ((ret = rte_distributor_poll_pkt_v20(d, worker_id)) == NULL)
63 rte_pause();
64 return ret;
65 }
66 VERSION_SYMBOL(rte_distributor_get_pkt, _v20, 2.0);
67
68 int
69 rte_distributor_return_pkt_v20(struct rte_distributor_v20 *d,
70 unsigned worker_id, struct rte_mbuf *oldpkt)
71 {
72 union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
73 uint64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
74 | RTE_DISTRIB_RETURN_BUF;
75 buf->bufptr64 = req;
76 return 0;
77 }
78 VERSION_SYMBOL(rte_distributor_return_pkt, _v20, 2.0);
79
80 /**** APIs called on distributor core ***/
81
82 /* as name suggests, adds a packet to the backlog for a particular worker */
83 static int
84 add_to_backlog(struct rte_distributor_backlog *bl, int64_t item)
85 {
86 if (bl->count == RTE_DISTRIB_BACKLOG_SIZE)
87 return -1;
88
89 bl->pkts[(bl->start + bl->count++) & (RTE_DISTRIB_BACKLOG_MASK)]
90 = item;
91 return 0;
92 }
93
94 /* takes the next packet for a worker off the backlog */
95 static int64_t
96 backlog_pop(struct rte_distributor_backlog *bl)
97 {
98 bl->count--;
99 return bl->pkts[bl->start++ & RTE_DISTRIB_BACKLOG_MASK];
100 }
101
102 /* stores a packet returned from a worker inside the returns array */
103 static inline void
104 store_return(uintptr_t oldbuf, struct rte_distributor_v20 *d,
105 unsigned *ret_start, unsigned *ret_count)
106 {
107 /* store returns in a circular buffer - code is branch-free */
108 d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK]
109 = (void *)oldbuf;
110 *ret_start += (*ret_count == RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
111 *ret_count += (*ret_count != RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
112 }
113
114 static inline void
115 handle_worker_shutdown(struct rte_distributor_v20 *d, unsigned int wkr)
116 {
117 d->in_flight_tags[wkr] = 0;
118 d->in_flight_bitmask &= ~(1UL << wkr);
119 d->bufs[wkr].bufptr64 = 0;
120 if (unlikely(d->backlog[wkr].count != 0)) {
121 /* On return of a packet, we need to move the
122 * queued packets for this core elsewhere.
123 * Easiest solution is to set things up for
124 * a recursive call. That will cause those
125 * packets to be queued up for the next free
126 * core, i.e. it will return as soon as a
127 * core becomes free to accept the first
128 * packet, as subsequent ones will be added to
129 * the backlog for that core.
130 */
131 struct rte_mbuf *pkts[RTE_DISTRIB_BACKLOG_SIZE];
132 unsigned i;
133 struct rte_distributor_backlog *bl = &d->backlog[wkr];
134
135 for (i = 0; i < bl->count; i++) {
136 unsigned idx = (bl->start + i) &
137 RTE_DISTRIB_BACKLOG_MASK;
138 pkts[i] = (void *)((uintptr_t)(bl->pkts[idx] >>
139 RTE_DISTRIB_FLAG_BITS));
140 }
141 /* recursive call.
142 * Note that the tags were set before first level call
143 * to rte_distributor_process.
144 */
145 rte_distributor_process_v20(d, pkts, i);
146 bl->count = bl->start = 0;
147 }
148 }
149
150 /* this function is called when process() fn is called without any new
151 * packets. It goes through all the workers and clears any returned packets
152 * to do a partial flush.
153 */
154 static int
155 process_returns(struct rte_distributor_v20 *d)
156 {
157 unsigned wkr;
158 unsigned flushed = 0;
159 unsigned ret_start = d->returns.start,
160 ret_count = d->returns.count;
161
162 for (wkr = 0; wkr < d->num_workers; wkr++) {
163
164 const int64_t data = d->bufs[wkr].bufptr64;
165 uintptr_t oldbuf = 0;
166
167 if (data & RTE_DISTRIB_GET_BUF) {
168 flushed++;
169 if (d->backlog[wkr].count)
170 d->bufs[wkr].bufptr64 =
171 backlog_pop(&d->backlog[wkr]);
172 else {
173 d->bufs[wkr].bufptr64 = RTE_DISTRIB_GET_BUF;
174 d->in_flight_tags[wkr] = 0;
175 d->in_flight_bitmask &= ~(1UL << wkr);
176 }
177 oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
178 } else if (data & RTE_DISTRIB_RETURN_BUF) {
179 handle_worker_shutdown(d, wkr);
180 oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
181 }
182
183 store_return(oldbuf, d, &ret_start, &ret_count);
184 }
185
186 d->returns.start = ret_start;
187 d->returns.count = ret_count;
188
189 return flushed;
190 }
191
192 /* process a set of packets to distribute them to workers */
193 int
194 rte_distributor_process_v20(struct rte_distributor_v20 *d,
195 struct rte_mbuf **mbufs, unsigned num_mbufs)
196 {
197 unsigned next_idx = 0;
198 unsigned wkr = 0;
199 struct rte_mbuf *next_mb = NULL;
200 int64_t next_value = 0;
201 uint32_t new_tag = 0;
202 unsigned ret_start = d->returns.start,
203 ret_count = d->returns.count;
204
205 if (unlikely(num_mbufs == 0))
206 return process_returns(d);
207
208 while (next_idx < num_mbufs || next_mb != NULL) {
209
210 int64_t data = d->bufs[wkr].bufptr64;
211 uintptr_t oldbuf = 0;
212
213 if (!next_mb) {
214 next_mb = mbufs[next_idx++];
215 next_value = (((int64_t)(uintptr_t)next_mb)
216 << RTE_DISTRIB_FLAG_BITS);
217 /*
218 * User is advocated to set tag value for each
219 * mbuf before calling rte_distributor_process.
220 * User defined tags are used to identify flows,
221 * or sessions.
222 */
223 new_tag = next_mb->hash.usr;
224
225 /*
226 * Note that if RTE_DISTRIB_MAX_WORKERS is larger than 64
227 * then the size of match has to be expanded.
228 */
229 uint64_t match = 0;
230 unsigned i;
231 /*
232 * to scan for a match use "xor" and "not" to get a 0/1
233 * value, then use shifting to merge to single "match"
234 * variable, where a one-bit indicates a match for the
235 * worker given by the bit-position
236 */
237 for (i = 0; i < d->num_workers; i++)
238 match |= (!(d->in_flight_tags[i] ^ new_tag)
239 << i);
240
241 /* Only turned-on bits are considered as match */
242 match &= d->in_flight_bitmask;
243
244 if (match) {
245 next_mb = NULL;
246 unsigned worker = __builtin_ctzl(match);
247 if (add_to_backlog(&d->backlog[worker],
248 next_value) < 0)
249 next_idx--;
250 }
251 }
252
253 if ((data & RTE_DISTRIB_GET_BUF) &&
254 (d->backlog[wkr].count || next_mb)) {
255
256 if (d->backlog[wkr].count)
257 d->bufs[wkr].bufptr64 =
258 backlog_pop(&d->backlog[wkr]);
259
260 else {
261 d->bufs[wkr].bufptr64 = next_value;
262 d->in_flight_tags[wkr] = new_tag;
263 d->in_flight_bitmask |= (1UL << wkr);
264 next_mb = NULL;
265 }
266 oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
267 } else if (data & RTE_DISTRIB_RETURN_BUF) {
268 handle_worker_shutdown(d, wkr);
269 oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
270 }
271
272 /* store returns in a circular buffer */
273 store_return(oldbuf, d, &ret_start, &ret_count);
274
275 if (++wkr == d->num_workers)
276 wkr = 0;
277 }
278 /* to finish, check all workers for backlog and schedule work for them
279 * if they are ready */
280 for (wkr = 0; wkr < d->num_workers; wkr++)
281 if (d->backlog[wkr].count &&
282 (d->bufs[wkr].bufptr64 & RTE_DISTRIB_GET_BUF)) {
283
284 int64_t oldbuf = d->bufs[wkr].bufptr64 >>
285 RTE_DISTRIB_FLAG_BITS;
286 store_return(oldbuf, d, &ret_start, &ret_count);
287
288 d->bufs[wkr].bufptr64 = backlog_pop(&d->backlog[wkr]);
289 }
290
291 d->returns.start = ret_start;
292 d->returns.count = ret_count;
293 return num_mbufs;
294 }
295 VERSION_SYMBOL(rte_distributor_process, _v20, 2.0);
296
297 /* return to the caller, packets returned from workers */
298 int
299 rte_distributor_returned_pkts_v20(struct rte_distributor_v20 *d,
300 struct rte_mbuf **mbufs, unsigned max_mbufs)
301 {
302 struct rte_distributor_returned_pkts *returns = &d->returns;
303 unsigned retval = (max_mbufs < returns->count) ?
304 max_mbufs : returns->count;
305 unsigned i;
306
307 for (i = 0; i < retval; i++) {
308 unsigned idx = (returns->start + i) & RTE_DISTRIB_RETURNS_MASK;
309 mbufs[i] = returns->mbufs[idx];
310 }
311 returns->start += i;
312 returns->count -= i;
313
314 return retval;
315 }
316 VERSION_SYMBOL(rte_distributor_returned_pkts, _v20, 2.0);
317
318 /* return the number of packets in-flight in a distributor, i.e. packets
319 * being worked on or queued up in a backlog.
320 */
321 static inline unsigned
322 total_outstanding(const struct rte_distributor_v20 *d)
323 {
324 unsigned wkr, total_outstanding;
325
326 total_outstanding = __builtin_popcountl(d->in_flight_bitmask);
327
328 for (wkr = 0; wkr < d->num_workers; wkr++)
329 total_outstanding += d->backlog[wkr].count;
330
331 return total_outstanding;
332 }
333
334 /* flush the distributor, so that there are no outstanding packets in flight or
335 * queued up. */
336 int
337 rte_distributor_flush_v20(struct rte_distributor_v20 *d)
338 {
339 const unsigned flushed = total_outstanding(d);
340
341 while (total_outstanding(d) > 0)
342 rte_distributor_process_v20(d, NULL, 0);
343
344 return flushed;
345 }
346 VERSION_SYMBOL(rte_distributor_flush, _v20, 2.0);
347
348 /* clears the internal returns array in the distributor */
349 void
350 rte_distributor_clear_returns_v20(struct rte_distributor_v20 *d)
351 {
352 d->returns.start = d->returns.count = 0;
353 #ifndef __OPTIMIZE__
354 memset(d->returns.mbufs, 0, sizeof(d->returns.mbufs));
355 #endif
356 }
357 VERSION_SYMBOL(rte_distributor_clear_returns, _v20, 2.0);
358
359 /* creates a distributor instance */
360 struct rte_distributor_v20 *
361 rte_distributor_create_v20(const char *name,
362 unsigned socket_id,
363 unsigned num_workers)
364 {
365 struct rte_distributor_v20 *d;
366 struct rte_distributor_list *distributor_list;
367 char mz_name[RTE_MEMZONE_NAMESIZE];
368 const struct rte_memzone *mz;
369
370 /* compilation-time checks */
371 RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
372 RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
373 RTE_BUILD_BUG_ON(RTE_DISTRIB_MAX_WORKERS >
374 sizeof(d->in_flight_bitmask) * CHAR_BIT);
375
376 if (name == NULL || num_workers >= RTE_DISTRIB_MAX_WORKERS) {
377 rte_errno = EINVAL;
378 return NULL;
379 }
380
381 snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name);
382 mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS);
383 if (mz == NULL) {
384 rte_errno = ENOMEM;
385 return NULL;
386 }
387
388 d = mz->addr;
389 strlcpy(d->name, name, sizeof(d->name));
390 d->num_workers = num_workers;
391
392 distributor_list = RTE_TAILQ_CAST(rte_distributor_tailq.head,
393 rte_distributor_list);
394
395 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
396 TAILQ_INSERT_TAIL(distributor_list, d, next);
397 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
398
399 return d;
400 }
401 VERSION_SYMBOL(rte_distributor_create, _v20, 2.0);