]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/lib/librte_port/rte_port_ethdev.c
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / dpdk / lib / librte_port / rte_port_ethdev.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33 #include <string.h>
34 #include <stdint.h>
35
36 #include <rte_mbuf.h>
37 #include <rte_ethdev.h>
38 #include <rte_malloc.h>
39
40 #include "rte_port_ethdev.h"
41
42 /*
43 * Port ETHDEV Reader
44 */
45 #ifdef RTE_PORT_STATS_COLLECT
46
47 #define RTE_PORT_ETHDEV_READER_STATS_PKTS_IN_ADD(port, val) \
48 port->stats.n_pkts_in += val
49 #define RTE_PORT_ETHDEV_READER_STATS_PKTS_DROP_ADD(port, val) \
50 port->stats.n_pkts_drop += val
51
52 #else
53
54 #define RTE_PORT_ETHDEV_READER_STATS_PKTS_IN_ADD(port, val)
55 #define RTE_PORT_ETHDEV_READER_STATS_PKTS_DROP_ADD(port, val)
56
57 #endif
58
59 struct rte_port_ethdev_reader {
60 struct rte_port_in_stats stats;
61
62 uint16_t queue_id;
63 uint8_t port_id;
64 };
65
66 static void *
67 rte_port_ethdev_reader_create(void *params, int socket_id)
68 {
69 struct rte_port_ethdev_reader_params *conf =
70 (struct rte_port_ethdev_reader_params *) params;
71 struct rte_port_ethdev_reader *port;
72
73 /* Check input parameters */
74 if (conf == NULL) {
75 RTE_LOG(ERR, PORT, "%s: params is NULL\n", __func__);
76 return NULL;
77 }
78
79 /* Memory allocation */
80 port = rte_zmalloc_socket("PORT", sizeof(*port),
81 RTE_CACHE_LINE_SIZE, socket_id);
82 if (port == NULL) {
83 RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
84 return NULL;
85 }
86
87 /* Initialization */
88 port->port_id = conf->port_id;
89 port->queue_id = conf->queue_id;
90
91 return port;
92 }
93
94 static int
95 rte_port_ethdev_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
96 {
97 struct rte_port_ethdev_reader *p =
98 (struct rte_port_ethdev_reader *) port;
99 uint16_t rx_pkt_cnt;
100
101 rx_pkt_cnt = rte_eth_rx_burst(p->port_id, p->queue_id, pkts, n_pkts);
102 RTE_PORT_ETHDEV_READER_STATS_PKTS_IN_ADD(p, rx_pkt_cnt);
103 return rx_pkt_cnt;
104 }
105
106 static int
107 rte_port_ethdev_reader_free(void *port)
108 {
109 if (port == NULL) {
110 RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
111 return -EINVAL;
112 }
113
114 rte_free(port);
115
116 return 0;
117 }
118
119 static int rte_port_ethdev_reader_stats_read(void *port,
120 struct rte_port_in_stats *stats, int clear)
121 {
122 struct rte_port_ethdev_reader *p =
123 (struct rte_port_ethdev_reader *) port;
124
125 if (stats != NULL)
126 memcpy(stats, &p->stats, sizeof(p->stats));
127
128 if (clear)
129 memset(&p->stats, 0, sizeof(p->stats));
130
131 return 0;
132 }
133
134 /*
135 * Port ETHDEV Writer
136 */
137 #ifdef RTE_PORT_STATS_COLLECT
138
139 #define RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(port, val) \
140 port->stats.n_pkts_in += val
141 #define RTE_PORT_ETHDEV_WRITER_STATS_PKTS_DROP_ADD(port, val) \
142 port->stats.n_pkts_drop += val
143
144 #else
145
146 #define RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(port, val)
147 #define RTE_PORT_ETHDEV_WRITER_STATS_PKTS_DROP_ADD(port, val)
148
149 #endif
150
151 struct rte_port_ethdev_writer {
152 struct rte_port_out_stats stats;
153
154 struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
155 uint32_t tx_burst_sz;
156 uint16_t tx_buf_count;
157 uint64_t bsz_mask;
158 uint16_t queue_id;
159 uint8_t port_id;
160 };
161
162 static void *
163 rte_port_ethdev_writer_create(void *params, int socket_id)
164 {
165 struct rte_port_ethdev_writer_params *conf =
166 (struct rte_port_ethdev_writer_params *) params;
167 struct rte_port_ethdev_writer *port;
168
169 /* Check input parameters */
170 if ((conf == NULL) ||
171 (conf->tx_burst_sz == 0) ||
172 (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
173 (!rte_is_power_of_2(conf->tx_burst_sz))) {
174 RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__);
175 return NULL;
176 }
177
178 /* Memory allocation */
179 port = rte_zmalloc_socket("PORT", sizeof(*port),
180 RTE_CACHE_LINE_SIZE, socket_id);
181 if (port == NULL) {
182 RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
183 return NULL;
184 }
185
186 /* Initialization */
187 port->port_id = conf->port_id;
188 port->queue_id = conf->queue_id;
189 port->tx_burst_sz = conf->tx_burst_sz;
190 port->tx_buf_count = 0;
191 port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
192
193 return port;
194 }
195
196 static inline void
197 send_burst(struct rte_port_ethdev_writer *p)
198 {
199 uint32_t nb_tx;
200
201 nb_tx = rte_eth_tx_burst(p->port_id, p->queue_id,
202 p->tx_buf, p->tx_buf_count);
203
204 RTE_PORT_ETHDEV_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
205 for ( ; nb_tx < p->tx_buf_count; nb_tx++)
206 rte_pktmbuf_free(p->tx_buf[nb_tx]);
207
208 p->tx_buf_count = 0;
209 }
210
211 static int
212 rte_port_ethdev_writer_tx(void *port, struct rte_mbuf *pkt)
213 {
214 struct rte_port_ethdev_writer *p =
215 (struct rte_port_ethdev_writer *) port;
216
217 p->tx_buf[p->tx_buf_count++] = pkt;
218 RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(p, 1);
219 if (p->tx_buf_count >= p->tx_burst_sz)
220 send_burst(p);
221
222 return 0;
223 }
224
225 static int
226 rte_port_ethdev_writer_tx_bulk(void *port,
227 struct rte_mbuf **pkts,
228 uint64_t pkts_mask)
229 {
230 struct rte_port_ethdev_writer *p =
231 (struct rte_port_ethdev_writer *) port;
232 uint64_t bsz_mask = p->bsz_mask;
233 uint32_t tx_buf_count = p->tx_buf_count;
234 uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
235 ((pkts_mask & bsz_mask) ^ bsz_mask);
236
237 if (expr == 0) {
238 uint64_t n_pkts = __builtin_popcountll(pkts_mask);
239 uint32_t n_pkts_ok;
240
241 if (tx_buf_count)
242 send_burst(p);
243
244 RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(p, n_pkts);
245 n_pkts_ok = rte_eth_tx_burst(p->port_id, p->queue_id, pkts,
246 n_pkts);
247
248 RTE_PORT_ETHDEV_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - n_pkts_ok);
249 for ( ; n_pkts_ok < n_pkts; n_pkts_ok++) {
250 struct rte_mbuf *pkt = pkts[n_pkts_ok];
251
252 rte_pktmbuf_free(pkt);
253 }
254 } else {
255 for ( ; pkts_mask; ) {
256 uint32_t pkt_index = __builtin_ctzll(pkts_mask);
257 uint64_t pkt_mask = 1LLU << pkt_index;
258 struct rte_mbuf *pkt = pkts[pkt_index];
259
260 p->tx_buf[tx_buf_count++] = pkt;
261 RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(p, 1);
262 pkts_mask &= ~pkt_mask;
263 }
264
265 p->tx_buf_count = tx_buf_count;
266 if (tx_buf_count >= p->tx_burst_sz)
267 send_burst(p);
268 }
269
270 return 0;
271 }
272
273 static int
274 rte_port_ethdev_writer_flush(void *port)
275 {
276 struct rte_port_ethdev_writer *p =
277 (struct rte_port_ethdev_writer *) port;
278
279 if (p->tx_buf_count > 0)
280 send_burst(p);
281
282 return 0;
283 }
284
285 static int
286 rte_port_ethdev_writer_free(void *port)
287 {
288 if (port == NULL) {
289 RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
290 return -EINVAL;
291 }
292
293 rte_port_ethdev_writer_flush(port);
294 rte_free(port);
295
296 return 0;
297 }
298
299 static int rte_port_ethdev_writer_stats_read(void *port,
300 struct rte_port_out_stats *stats, int clear)
301 {
302 struct rte_port_ethdev_writer *p =
303 (struct rte_port_ethdev_writer *) port;
304
305 if (stats != NULL)
306 memcpy(stats, &p->stats, sizeof(p->stats));
307
308 if (clear)
309 memset(&p->stats, 0, sizeof(p->stats));
310
311 return 0;
312 }
313
314 /*
315 * Port ETHDEV Writer Nodrop
316 */
317 #ifdef RTE_PORT_STATS_COLLECT
318
319 #define RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val) \
320 port->stats.n_pkts_in += val
321 #define RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val) \
322 port->stats.n_pkts_drop += val
323
324 #else
325
326 #define RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val)
327 #define RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val)
328
329 #endif
330
331 struct rte_port_ethdev_writer_nodrop {
332 struct rte_port_out_stats stats;
333
334 struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
335 uint32_t tx_burst_sz;
336 uint16_t tx_buf_count;
337 uint64_t bsz_mask;
338 uint64_t n_retries;
339 uint16_t queue_id;
340 uint8_t port_id;
341 };
342
343 static void *
344 rte_port_ethdev_writer_nodrop_create(void *params, int socket_id)
345 {
346 struct rte_port_ethdev_writer_nodrop_params *conf =
347 (struct rte_port_ethdev_writer_nodrop_params *) params;
348 struct rte_port_ethdev_writer_nodrop *port;
349
350 /* Check input parameters */
351 if ((conf == NULL) ||
352 (conf->tx_burst_sz == 0) ||
353 (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
354 (!rte_is_power_of_2(conf->tx_burst_sz))) {
355 RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__);
356 return NULL;
357 }
358
359 /* Memory allocation */
360 port = rte_zmalloc_socket("PORT", sizeof(*port),
361 RTE_CACHE_LINE_SIZE, socket_id);
362 if (port == NULL) {
363 RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
364 return NULL;
365 }
366
367 /* Initialization */
368 port->port_id = conf->port_id;
369 port->queue_id = conf->queue_id;
370 port->tx_burst_sz = conf->tx_burst_sz;
371 port->tx_buf_count = 0;
372 port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
373
374 /*
375 * When n_retries is 0 it means that we should wait for every packet to
376 * send no matter how many retries should it take. To limit number of
377 * branches in fast path, we use UINT64_MAX instead of branching.
378 */
379 port->n_retries = (conf->n_retries == 0) ? UINT64_MAX : conf->n_retries;
380
381 return port;
382 }
383
384 static inline void
385 send_burst_nodrop(struct rte_port_ethdev_writer_nodrop *p)
386 {
387 uint32_t nb_tx = 0, i;
388
389 nb_tx = rte_eth_tx_burst(p->port_id, p->queue_id, p->tx_buf,
390 p->tx_buf_count);
391
392 /* We sent all the packets in a first try */
393 if (nb_tx >= p->tx_buf_count) {
394 p->tx_buf_count = 0;
395 return;
396 }
397
398 for (i = 0; i < p->n_retries; i++) {
399 nb_tx += rte_eth_tx_burst(p->port_id, p->queue_id,
400 p->tx_buf + nb_tx, p->tx_buf_count - nb_tx);
401
402 /* We sent all the packets in more than one try */
403 if (nb_tx >= p->tx_buf_count) {
404 p->tx_buf_count = 0;
405 return;
406 }
407 }
408
409 /* We didn't send the packets in maximum allowed attempts */
410 RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
411 for ( ; nb_tx < p->tx_buf_count; nb_tx++)
412 rte_pktmbuf_free(p->tx_buf[nb_tx]);
413
414 p->tx_buf_count = 0;
415 }
416
417 static int
418 rte_port_ethdev_writer_nodrop_tx(void *port, struct rte_mbuf *pkt)
419 {
420 struct rte_port_ethdev_writer_nodrop *p =
421 (struct rte_port_ethdev_writer_nodrop *) port;
422
423 p->tx_buf[p->tx_buf_count++] = pkt;
424 RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(p, 1);
425 if (p->tx_buf_count >= p->tx_burst_sz)
426 send_burst_nodrop(p);
427
428 return 0;
429 }
430
431 static int
432 rte_port_ethdev_writer_nodrop_tx_bulk(void *port,
433 struct rte_mbuf **pkts,
434 uint64_t pkts_mask)
435 {
436 struct rte_port_ethdev_writer_nodrop *p =
437 (struct rte_port_ethdev_writer_nodrop *) port;
438
439 uint64_t bsz_mask = p->bsz_mask;
440 uint32_t tx_buf_count = p->tx_buf_count;
441 uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
442 ((pkts_mask & bsz_mask) ^ bsz_mask);
443
444 if (expr == 0) {
445 uint64_t n_pkts = __builtin_popcountll(pkts_mask);
446 uint32_t n_pkts_ok;
447
448 if (tx_buf_count)
449 send_burst_nodrop(p);
450
451 RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(p, n_pkts);
452 n_pkts_ok = rte_eth_tx_burst(p->port_id, p->queue_id, pkts,
453 n_pkts);
454
455 if (n_pkts_ok >= n_pkts)
456 return 0;
457
458 /*
459 * If we didnt manage to send all packets in single burst, move
460 * remaining packets to the buffer and call send burst.
461 */
462 for (; n_pkts_ok < n_pkts; n_pkts_ok++) {
463 struct rte_mbuf *pkt = pkts[n_pkts_ok];
464 p->tx_buf[p->tx_buf_count++] = pkt;
465 }
466 send_burst_nodrop(p);
467 } else {
468 for ( ; pkts_mask; ) {
469 uint32_t pkt_index = __builtin_ctzll(pkts_mask);
470 uint64_t pkt_mask = 1LLU << pkt_index;
471 struct rte_mbuf *pkt = pkts[pkt_index];
472
473 p->tx_buf[tx_buf_count++] = pkt;
474 RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(p, 1);
475 pkts_mask &= ~pkt_mask;
476 }
477
478 p->tx_buf_count = tx_buf_count;
479 if (tx_buf_count >= p->tx_burst_sz)
480 send_burst_nodrop(p);
481 }
482
483 return 0;
484 }
485
486 static int
487 rte_port_ethdev_writer_nodrop_flush(void *port)
488 {
489 struct rte_port_ethdev_writer_nodrop *p =
490 (struct rte_port_ethdev_writer_nodrop *) port;
491
492 if (p->tx_buf_count > 0)
493 send_burst_nodrop(p);
494
495 return 0;
496 }
497
498 static int
499 rte_port_ethdev_writer_nodrop_free(void *port)
500 {
501 if (port == NULL) {
502 RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
503 return -EINVAL;
504 }
505
506 rte_port_ethdev_writer_nodrop_flush(port);
507 rte_free(port);
508
509 return 0;
510 }
511
512 static int rte_port_ethdev_writer_nodrop_stats_read(void *port,
513 struct rte_port_out_stats *stats, int clear)
514 {
515 struct rte_port_ethdev_writer_nodrop *p =
516 (struct rte_port_ethdev_writer_nodrop *) port;
517
518 if (stats != NULL)
519 memcpy(stats, &p->stats, sizeof(p->stats));
520
521 if (clear)
522 memset(&p->stats, 0, sizeof(p->stats));
523
524 return 0;
525 }
526
527 /*
528 * Summary of port operations
529 */
530 struct rte_port_in_ops rte_port_ethdev_reader_ops = {
531 .f_create = rte_port_ethdev_reader_create,
532 .f_free = rte_port_ethdev_reader_free,
533 .f_rx = rte_port_ethdev_reader_rx,
534 .f_stats = rte_port_ethdev_reader_stats_read,
535 };
536
537 struct rte_port_out_ops rte_port_ethdev_writer_ops = {
538 .f_create = rte_port_ethdev_writer_create,
539 .f_free = rte_port_ethdev_writer_free,
540 .f_tx = rte_port_ethdev_writer_tx,
541 .f_tx_bulk = rte_port_ethdev_writer_tx_bulk,
542 .f_flush = rte_port_ethdev_writer_flush,
543 .f_stats = rte_port_ethdev_writer_stats_read,
544 };
545
546 struct rte_port_out_ops rte_port_ethdev_writer_nodrop_ops = {
547 .f_create = rte_port_ethdev_writer_nodrop_create,
548 .f_free = rte_port_ethdev_writer_nodrop_free,
549 .f_tx = rte_port_ethdev_writer_nodrop_tx,
550 .f_tx_bulk = rte_port_ethdev_writer_nodrop_tx_bulk,
551 .f_flush = rte_port_ethdev_writer_nodrop_flush,
552 .f_stats = rte_port_ethdev_writer_nodrop_stats_read,
553 };