4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_ethdev.h>
38 #include <rte_malloc.h>
40 #include "rte_port_ethdev.h"
45 #ifdef RTE_PORT_STATS_COLLECT
47 #define RTE_PORT_ETHDEV_READER_STATS_PKTS_IN_ADD(port, val) \
48 port->stats.n_pkts_in += val
49 #define RTE_PORT_ETHDEV_READER_STATS_PKTS_DROP_ADD(port, val) \
50 port->stats.n_pkts_drop += val
54 #define RTE_PORT_ETHDEV_READER_STATS_PKTS_IN_ADD(port, val)
55 #define RTE_PORT_ETHDEV_READER_STATS_PKTS_DROP_ADD(port, val)
59 struct rte_port_ethdev_reader
{
60 struct rte_port_in_stats stats
;
67 rte_port_ethdev_reader_create(void *params
, int socket_id
)
69 struct rte_port_ethdev_reader_params
*conf
=
70 (struct rte_port_ethdev_reader_params
*) params
;
71 struct rte_port_ethdev_reader
*port
;
73 /* Check input parameters */
75 RTE_LOG(ERR
, PORT
, "%s: params is NULL\n", __func__
);
79 /* Memory allocation */
80 port
= rte_zmalloc_socket("PORT", sizeof(*port
),
81 RTE_CACHE_LINE_SIZE
, socket_id
);
83 RTE_LOG(ERR
, PORT
, "%s: Failed to allocate port\n", __func__
);
88 port
->port_id
= conf
->port_id
;
89 port
->queue_id
= conf
->queue_id
;
95 rte_port_ethdev_reader_rx(void *port
, struct rte_mbuf
**pkts
, uint32_t n_pkts
)
97 struct rte_port_ethdev_reader
*p
=
98 (struct rte_port_ethdev_reader
*) port
;
101 rx_pkt_cnt
= rte_eth_rx_burst(p
->port_id
, p
->queue_id
, pkts
, n_pkts
);
102 RTE_PORT_ETHDEV_READER_STATS_PKTS_IN_ADD(p
, rx_pkt_cnt
);
107 rte_port_ethdev_reader_free(void *port
)
110 RTE_LOG(ERR
, PORT
, "%s: port is NULL\n", __func__
);
119 static int rte_port_ethdev_reader_stats_read(void *port
,
120 struct rte_port_in_stats
*stats
, int clear
)
122 struct rte_port_ethdev_reader
*p
=
123 (struct rte_port_ethdev_reader
*) port
;
126 memcpy(stats
, &p
->stats
, sizeof(p
->stats
));
129 memset(&p
->stats
, 0, sizeof(p
->stats
));
137 #ifdef RTE_PORT_STATS_COLLECT
139 #define RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(port, val) \
140 port->stats.n_pkts_in += val
141 #define RTE_PORT_ETHDEV_WRITER_STATS_PKTS_DROP_ADD(port, val) \
142 port->stats.n_pkts_drop += val
146 #define RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(port, val)
147 #define RTE_PORT_ETHDEV_WRITER_STATS_PKTS_DROP_ADD(port, val)
151 struct rte_port_ethdev_writer
{
152 struct rte_port_out_stats stats
;
154 struct rte_mbuf
*tx_buf
[2 * RTE_PORT_IN_BURST_SIZE_MAX
];
155 uint32_t tx_burst_sz
;
156 uint16_t tx_buf_count
;
163 rte_port_ethdev_writer_create(void *params
, int socket_id
)
165 struct rte_port_ethdev_writer_params
*conf
=
166 (struct rte_port_ethdev_writer_params
*) params
;
167 struct rte_port_ethdev_writer
*port
;
169 /* Check input parameters */
170 if ((conf
== NULL
) ||
171 (conf
->tx_burst_sz
== 0) ||
172 (conf
->tx_burst_sz
> RTE_PORT_IN_BURST_SIZE_MAX
) ||
173 (!rte_is_power_of_2(conf
->tx_burst_sz
))) {
174 RTE_LOG(ERR
, PORT
, "%s: Invalid input parameters\n", __func__
);
178 /* Memory allocation */
179 port
= rte_zmalloc_socket("PORT", sizeof(*port
),
180 RTE_CACHE_LINE_SIZE
, socket_id
);
182 RTE_LOG(ERR
, PORT
, "%s: Failed to allocate port\n", __func__
);
187 port
->port_id
= conf
->port_id
;
188 port
->queue_id
= conf
->queue_id
;
189 port
->tx_burst_sz
= conf
->tx_burst_sz
;
190 port
->tx_buf_count
= 0;
191 port
->bsz_mask
= 1LLU << (conf
->tx_burst_sz
- 1);
197 send_burst(struct rte_port_ethdev_writer
*p
)
201 nb_tx
= rte_eth_tx_burst(p
->port_id
, p
->queue_id
,
202 p
->tx_buf
, p
->tx_buf_count
);
204 RTE_PORT_ETHDEV_WRITER_STATS_PKTS_DROP_ADD(p
, p
->tx_buf_count
- nb_tx
);
205 for ( ; nb_tx
< p
->tx_buf_count
; nb_tx
++)
206 rte_pktmbuf_free(p
->tx_buf
[nb_tx
]);
212 rte_port_ethdev_writer_tx(void *port
, struct rte_mbuf
*pkt
)
214 struct rte_port_ethdev_writer
*p
=
215 (struct rte_port_ethdev_writer
*) port
;
217 p
->tx_buf
[p
->tx_buf_count
++] = pkt
;
218 RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(p
, 1);
219 if (p
->tx_buf_count
>= p
->tx_burst_sz
)
226 rte_port_ethdev_writer_tx_bulk(void *port
,
227 struct rte_mbuf
**pkts
,
230 struct rte_port_ethdev_writer
*p
=
231 (struct rte_port_ethdev_writer
*) port
;
232 uint64_t bsz_mask
= p
->bsz_mask
;
233 uint32_t tx_buf_count
= p
->tx_buf_count
;
234 uint64_t expr
= (pkts_mask
& (pkts_mask
+ 1)) |
235 ((pkts_mask
& bsz_mask
) ^ bsz_mask
);
238 uint64_t n_pkts
= __builtin_popcountll(pkts_mask
);
244 RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(p
, n_pkts
);
245 n_pkts_ok
= rte_eth_tx_burst(p
->port_id
, p
->queue_id
, pkts
,
248 RTE_PORT_ETHDEV_WRITER_STATS_PKTS_DROP_ADD(p
, n_pkts
- n_pkts_ok
);
249 for ( ; n_pkts_ok
< n_pkts
; n_pkts_ok
++) {
250 struct rte_mbuf
*pkt
= pkts
[n_pkts_ok
];
252 rte_pktmbuf_free(pkt
);
255 for ( ; pkts_mask
; ) {
256 uint32_t pkt_index
= __builtin_ctzll(pkts_mask
);
257 uint64_t pkt_mask
= 1LLU << pkt_index
;
258 struct rte_mbuf
*pkt
= pkts
[pkt_index
];
260 p
->tx_buf
[tx_buf_count
++] = pkt
;
261 RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(p
, 1);
262 pkts_mask
&= ~pkt_mask
;
265 p
->tx_buf_count
= tx_buf_count
;
266 if (tx_buf_count
>= p
->tx_burst_sz
)
274 rte_port_ethdev_writer_flush(void *port
)
276 struct rte_port_ethdev_writer
*p
=
277 (struct rte_port_ethdev_writer
*) port
;
279 if (p
->tx_buf_count
> 0)
286 rte_port_ethdev_writer_free(void *port
)
289 RTE_LOG(ERR
, PORT
, "%s: Port is NULL\n", __func__
);
293 rte_port_ethdev_writer_flush(port
);
299 static int rte_port_ethdev_writer_stats_read(void *port
,
300 struct rte_port_out_stats
*stats
, int clear
)
302 struct rte_port_ethdev_writer
*p
=
303 (struct rte_port_ethdev_writer
*) port
;
306 memcpy(stats
, &p
->stats
, sizeof(p
->stats
));
309 memset(&p
->stats
, 0, sizeof(p
->stats
));
315 * Port ETHDEV Writer Nodrop
317 #ifdef RTE_PORT_STATS_COLLECT
319 #define RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val) \
320 port->stats.n_pkts_in += val
321 #define RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val) \
322 port->stats.n_pkts_drop += val
326 #define RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val)
327 #define RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val)
331 struct rte_port_ethdev_writer_nodrop
{
332 struct rte_port_out_stats stats
;
334 struct rte_mbuf
*tx_buf
[2 * RTE_PORT_IN_BURST_SIZE_MAX
];
335 uint32_t tx_burst_sz
;
336 uint16_t tx_buf_count
;
344 rte_port_ethdev_writer_nodrop_create(void *params
, int socket_id
)
346 struct rte_port_ethdev_writer_nodrop_params
*conf
=
347 (struct rte_port_ethdev_writer_nodrop_params
*) params
;
348 struct rte_port_ethdev_writer_nodrop
*port
;
350 /* Check input parameters */
351 if ((conf
== NULL
) ||
352 (conf
->tx_burst_sz
== 0) ||
353 (conf
->tx_burst_sz
> RTE_PORT_IN_BURST_SIZE_MAX
) ||
354 (!rte_is_power_of_2(conf
->tx_burst_sz
))) {
355 RTE_LOG(ERR
, PORT
, "%s: Invalid input parameters\n", __func__
);
359 /* Memory allocation */
360 port
= rte_zmalloc_socket("PORT", sizeof(*port
),
361 RTE_CACHE_LINE_SIZE
, socket_id
);
363 RTE_LOG(ERR
, PORT
, "%s: Failed to allocate port\n", __func__
);
368 port
->port_id
= conf
->port_id
;
369 port
->queue_id
= conf
->queue_id
;
370 port
->tx_burst_sz
= conf
->tx_burst_sz
;
371 port
->tx_buf_count
= 0;
372 port
->bsz_mask
= 1LLU << (conf
->tx_burst_sz
- 1);
375 * When n_retries is 0 it means that we should wait for every packet to
376 * send no matter how many retries should it take. To limit number of
377 * branches in fast path, we use UINT64_MAX instead of branching.
379 port
->n_retries
= (conf
->n_retries
== 0) ? UINT64_MAX
: conf
->n_retries
;
385 send_burst_nodrop(struct rte_port_ethdev_writer_nodrop
*p
)
387 uint32_t nb_tx
= 0, i
;
389 nb_tx
= rte_eth_tx_burst(p
->port_id
, p
->queue_id
, p
->tx_buf
,
392 /* We sent all the packets in a first try */
393 if (nb_tx
>= p
->tx_buf_count
) {
398 for (i
= 0; i
< p
->n_retries
; i
++) {
399 nb_tx
+= rte_eth_tx_burst(p
->port_id
, p
->queue_id
,
400 p
->tx_buf
+ nb_tx
, p
->tx_buf_count
- nb_tx
);
402 /* We sent all the packets in more than one try */
403 if (nb_tx
>= p
->tx_buf_count
) {
409 /* We didn't send the packets in maximum allowed attempts */
410 RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_DROP_ADD(p
, p
->tx_buf_count
- nb_tx
);
411 for ( ; nb_tx
< p
->tx_buf_count
; nb_tx
++)
412 rte_pktmbuf_free(p
->tx_buf
[nb_tx
]);
418 rte_port_ethdev_writer_nodrop_tx(void *port
, struct rte_mbuf
*pkt
)
420 struct rte_port_ethdev_writer_nodrop
*p
=
421 (struct rte_port_ethdev_writer_nodrop
*) port
;
423 p
->tx_buf
[p
->tx_buf_count
++] = pkt
;
424 RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(p
, 1);
425 if (p
->tx_buf_count
>= p
->tx_burst_sz
)
426 send_burst_nodrop(p
);
432 rte_port_ethdev_writer_nodrop_tx_bulk(void *port
,
433 struct rte_mbuf
**pkts
,
436 struct rte_port_ethdev_writer_nodrop
*p
=
437 (struct rte_port_ethdev_writer_nodrop
*) port
;
439 uint64_t bsz_mask
= p
->bsz_mask
;
440 uint32_t tx_buf_count
= p
->tx_buf_count
;
441 uint64_t expr
= (pkts_mask
& (pkts_mask
+ 1)) |
442 ((pkts_mask
& bsz_mask
) ^ bsz_mask
);
445 uint64_t n_pkts
= __builtin_popcountll(pkts_mask
);
449 send_burst_nodrop(p
);
451 RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(p
, n_pkts
);
452 n_pkts_ok
= rte_eth_tx_burst(p
->port_id
, p
->queue_id
, pkts
,
455 if (n_pkts_ok
>= n_pkts
)
459 * If we didnt manage to send all packets in single burst, move
460 * remaining packets to the buffer and call send burst.
462 for (; n_pkts_ok
< n_pkts
; n_pkts_ok
++) {
463 struct rte_mbuf
*pkt
= pkts
[n_pkts_ok
];
464 p
->tx_buf
[p
->tx_buf_count
++] = pkt
;
466 send_burst_nodrop(p
);
468 for ( ; pkts_mask
; ) {
469 uint32_t pkt_index
= __builtin_ctzll(pkts_mask
);
470 uint64_t pkt_mask
= 1LLU << pkt_index
;
471 struct rte_mbuf
*pkt
= pkts
[pkt_index
];
473 p
->tx_buf
[tx_buf_count
++] = pkt
;
474 RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(p
, 1);
475 pkts_mask
&= ~pkt_mask
;
478 p
->tx_buf_count
= tx_buf_count
;
479 if (tx_buf_count
>= p
->tx_burst_sz
)
480 send_burst_nodrop(p
);
487 rte_port_ethdev_writer_nodrop_flush(void *port
)
489 struct rte_port_ethdev_writer_nodrop
*p
=
490 (struct rte_port_ethdev_writer_nodrop
*) port
;
492 if (p
->tx_buf_count
> 0)
493 send_burst_nodrop(p
);
499 rte_port_ethdev_writer_nodrop_free(void *port
)
502 RTE_LOG(ERR
, PORT
, "%s: Port is NULL\n", __func__
);
506 rte_port_ethdev_writer_nodrop_flush(port
);
512 static int rte_port_ethdev_writer_nodrop_stats_read(void *port
,
513 struct rte_port_out_stats
*stats
, int clear
)
515 struct rte_port_ethdev_writer_nodrop
*p
=
516 (struct rte_port_ethdev_writer_nodrop
*) port
;
519 memcpy(stats
, &p
->stats
, sizeof(p
->stats
));
522 memset(&p
->stats
, 0, sizeof(p
->stats
));
528 * Summary of port operations
530 struct rte_port_in_ops rte_port_ethdev_reader_ops
= {
531 .f_create
= rte_port_ethdev_reader_create
,
532 .f_free
= rte_port_ethdev_reader_free
,
533 .f_rx
= rte_port_ethdev_reader_rx
,
534 .f_stats
= rte_port_ethdev_reader_stats_read
,
537 struct rte_port_out_ops rte_port_ethdev_writer_ops
= {
538 .f_create
= rte_port_ethdev_writer_create
,
539 .f_free
= rte_port_ethdev_writer_free
,
540 .f_tx
= rte_port_ethdev_writer_tx
,
541 .f_tx_bulk
= rte_port_ethdev_writer_tx_bulk
,
542 .f_flush
= rte_port_ethdev_writer_flush
,
543 .f_stats
= rte_port_ethdev_writer_stats_read
,
546 struct rte_port_out_ops rte_port_ethdev_writer_nodrop_ops
= {
547 .f_create
= rte_port_ethdev_writer_nodrop_create
,
548 .f_free
= rte_port_ethdev_writer_nodrop_free
,
549 .f_tx
= rte_port_ethdev_writer_nodrop_tx
,
550 .f_tx_bulk
= rte_port_ethdev_writer_nodrop_tx_bulk
,
551 .f_flush
= rte_port_ethdev_writer_nodrop_flush
,
552 .f_stats
= rte_port_ethdev_writer_nodrop_stats_read
,