1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Intel Corporation
9 #include <rte_malloc.h>
11 #include "rte_port_fd.h"
16 #ifdef RTE_PORT_STATS_COLLECT
18 #define RTE_PORT_FD_READER_STATS_PKTS_IN_ADD(port, val) \
19 do { port->stats.n_pkts_in += val; } while (0)
20 #define RTE_PORT_FD_READER_STATS_PKTS_DROP_ADD(port, val) \
21 do { port->stats.n_pkts_drop += val; } while (0)
25 #define RTE_PORT_FD_READER_STATS_PKTS_IN_ADD(port, val)
26 #define RTE_PORT_FD_READER_STATS_PKTS_DROP_ADD(port, val)
30 struct rte_port_fd_reader
{
31 struct rte_port_in_stats stats
;
34 struct rte_mempool
*mempool
;
38 rte_port_fd_reader_create(void *params
, int socket_id
)
40 struct rte_port_fd_reader_params
*conf
=
42 struct rte_port_fd_reader
*port
;
44 /* Check input parameters */
46 RTE_LOG(ERR
, PORT
, "%s: params is NULL\n", __func__
);
50 RTE_LOG(ERR
, PORT
, "%s: Invalid file descriptor\n", __func__
);
54 RTE_LOG(ERR
, PORT
, "%s: Invalid MTU\n", __func__
);
57 if (conf
->mempool
== NULL
) {
58 RTE_LOG(ERR
, PORT
, "%s: Invalid mempool\n", __func__
);
62 /* Memory allocation */
63 port
= rte_zmalloc_socket("PORT", sizeof(*port
),
64 RTE_CACHE_LINE_SIZE
, socket_id
);
66 RTE_LOG(ERR
, PORT
, "%s: Failed to allocate port\n", __func__
);
72 port
->mtu
= conf
->mtu
;
73 port
->mempool
= conf
->mempool
;
79 rte_port_fd_reader_rx(void *port
, struct rte_mbuf
**pkts
, uint32_t n_pkts
)
81 struct rte_port_fd_reader
*p
= port
;
84 if (rte_pktmbuf_alloc_bulk(p
->mempool
, pkts
, n_pkts
) != 0)
87 for (i
= 0; i
< n_pkts
; i
++) {
88 struct rte_mbuf
*pkt
= pkts
[i
];
89 void *pkt_data
= rte_pktmbuf_mtod(pkt
, void *);
92 n_bytes
= read(p
->fd
, pkt_data
, (size_t) p
->mtu
);
96 pkt
->data_len
= n_bytes
;
97 pkt
->pkt_len
= n_bytes
;
100 for (j
= i
; j
< n_pkts
; j
++)
101 rte_pktmbuf_free(pkts
[j
]);
103 RTE_PORT_FD_READER_STATS_PKTS_IN_ADD(p
, i
);
109 rte_port_fd_reader_free(void *port
)
112 RTE_LOG(ERR
, PORT
, "%s: port is NULL\n", __func__
);
121 static int rte_port_fd_reader_stats_read(void *port
,
122 struct rte_port_in_stats
*stats
, int clear
)
124 struct rte_port_fd_reader
*p
=
128 memcpy(stats
, &p
->stats
, sizeof(p
->stats
));
131 memset(&p
->stats
, 0, sizeof(p
->stats
));
139 #ifdef RTE_PORT_STATS_COLLECT
141 #define RTE_PORT_FD_WRITER_STATS_PKTS_IN_ADD(port, val) \
142 do { port->stats.n_pkts_in += val; } while (0)
143 #define RTE_PORT_FD_WRITER_STATS_PKTS_DROP_ADD(port, val) \
144 do { port->stats.n_pkts_drop += val; } while (0)
148 #define RTE_PORT_FD_WRITER_STATS_PKTS_IN_ADD(port, val)
149 #define RTE_PORT_FD_WRITER_STATS_PKTS_DROP_ADD(port, val)
153 struct rte_port_fd_writer
{
154 struct rte_port_out_stats stats
;
156 struct rte_mbuf
*tx_buf
[2 * RTE_PORT_IN_BURST_SIZE_MAX
];
157 uint32_t tx_burst_sz
;
158 uint16_t tx_buf_count
;
163 rte_port_fd_writer_create(void *params
, int socket_id
)
165 struct rte_port_fd_writer_params
*conf
=
167 struct rte_port_fd_writer
*port
;
169 /* Check input parameters */
170 if ((conf
== NULL
) ||
171 (conf
->tx_burst_sz
== 0) ||
172 (conf
->tx_burst_sz
> RTE_PORT_IN_BURST_SIZE_MAX
) ||
173 (!rte_is_power_of_2(conf
->tx_burst_sz
))) {
174 RTE_LOG(ERR
, PORT
, "%s: Invalid input parameters\n", __func__
);
178 /* Memory allocation */
179 port
= rte_zmalloc_socket("PORT", sizeof(*port
),
180 RTE_CACHE_LINE_SIZE
, socket_id
);
182 RTE_LOG(ERR
, PORT
, "%s: Failed to allocate port\n", __func__
);
188 port
->tx_burst_sz
= conf
->tx_burst_sz
;
189 port
->tx_buf_count
= 0;
195 send_burst(struct rte_port_fd_writer
*p
)
199 for (i
= 0; i
< p
->tx_buf_count
; i
++) {
200 struct rte_mbuf
*pkt
= p
->tx_buf
[i
];
201 void *pkt_data
= rte_pktmbuf_mtod(pkt
, void*);
202 size_t n_bytes
= rte_pktmbuf_data_len(pkt
);
205 ret
= write(p
->fd
, pkt_data
, n_bytes
);
210 RTE_PORT_FD_WRITER_STATS_PKTS_DROP_ADD(p
, p
->tx_buf_count
- i
);
212 for (i
= 0; i
< p
->tx_buf_count
; i
++)
213 rte_pktmbuf_free(p
->tx_buf
[i
]);
219 rte_port_fd_writer_tx(void *port
, struct rte_mbuf
*pkt
)
221 struct rte_port_fd_writer
*p
=
224 p
->tx_buf
[p
->tx_buf_count
++] = pkt
;
225 RTE_PORT_FD_WRITER_STATS_PKTS_IN_ADD(p
, 1);
226 if (p
->tx_buf_count
>= p
->tx_burst_sz
)
233 rte_port_fd_writer_tx_bulk(void *port
,
234 struct rte_mbuf
**pkts
,
237 struct rte_port_fd_writer
*p
=
239 uint32_t tx_buf_count
= p
->tx_buf_count
;
241 if ((pkts_mask
& (pkts_mask
+ 1)) == 0) {
242 uint64_t n_pkts
= __builtin_popcountll(pkts_mask
);
245 for (i
= 0; i
< n_pkts
; i
++)
246 p
->tx_buf
[tx_buf_count
++] = pkts
[i
];
247 RTE_PORT_FD_WRITER_STATS_PKTS_IN_ADD(p
, n_pkts
);
249 for ( ; pkts_mask
; ) {
250 uint32_t pkt_index
= __builtin_ctzll(pkts_mask
);
251 uint64_t pkt_mask
= 1LLU << pkt_index
;
252 struct rte_mbuf
*pkt
= pkts
[pkt_index
];
254 p
->tx_buf
[tx_buf_count
++] = pkt
;
255 RTE_PORT_FD_WRITER_STATS_PKTS_IN_ADD(p
, 1);
256 pkts_mask
&= ~pkt_mask
;
259 p
->tx_buf_count
= tx_buf_count
;
260 if (tx_buf_count
>= p
->tx_burst_sz
)
267 rte_port_fd_writer_flush(void *port
)
269 struct rte_port_fd_writer
*p
=
272 if (p
->tx_buf_count
> 0)
279 rte_port_fd_writer_free(void *port
)
282 RTE_LOG(ERR
, PORT
, "%s: Port is NULL\n", __func__
);
286 rte_port_fd_writer_flush(port
);
292 static int rte_port_fd_writer_stats_read(void *port
,
293 struct rte_port_out_stats
*stats
, int clear
)
295 struct rte_port_fd_writer
*p
=
299 memcpy(stats
, &p
->stats
, sizeof(p
->stats
));
302 memset(&p
->stats
, 0, sizeof(p
->stats
));
308 * Port FD Writer Nodrop
310 #ifdef RTE_PORT_STATS_COLLECT
312 #define RTE_PORT_FD_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val) \
313 do { port->stats.n_pkts_in += val; } while (0)
314 #define RTE_PORT_FD_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val) \
315 do { port->stats.n_pkts_drop += val; } while (0)
319 #define RTE_PORT_FD_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val)
320 #define RTE_PORT_FD_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val)
324 struct rte_port_fd_writer_nodrop
{
325 struct rte_port_out_stats stats
;
327 struct rte_mbuf
*tx_buf
[2 * RTE_PORT_IN_BURST_SIZE_MAX
];
328 uint32_t tx_burst_sz
;
329 uint16_t tx_buf_count
;
335 rte_port_fd_writer_nodrop_create(void *params
, int socket_id
)
337 struct rte_port_fd_writer_nodrop_params
*conf
=
339 struct rte_port_fd_writer_nodrop
*port
;
341 /* Check input parameters */
342 if ((conf
== NULL
) ||
344 (conf
->tx_burst_sz
== 0) ||
345 (conf
->tx_burst_sz
> RTE_PORT_IN_BURST_SIZE_MAX
) ||
346 (!rte_is_power_of_2(conf
->tx_burst_sz
))) {
347 RTE_LOG(ERR
, PORT
, "%s: Invalid input parameters\n", __func__
);
351 /* Memory allocation */
352 port
= rte_zmalloc_socket("PORT", sizeof(*port
),
353 RTE_CACHE_LINE_SIZE
, socket_id
);
355 RTE_LOG(ERR
, PORT
, "%s: Failed to allocate port\n", __func__
);
361 port
->tx_burst_sz
= conf
->tx_burst_sz
;
362 port
->tx_buf_count
= 0;
365 * When n_retries is 0 it means that we should wait for every packet to
366 * send no matter how many retries should it take. To limit number of
367 * branches in fast path, we use UINT64_MAX instead of branching.
369 port
->n_retries
= (conf
->n_retries
== 0) ? UINT64_MAX
: conf
->n_retries
;
375 send_burst_nodrop(struct rte_port_fd_writer_nodrop
*p
)
381 for (i
= 0; (i
< p
->tx_buf_count
) && (n_retries
< p
->n_retries
); i
++) {
382 struct rte_mbuf
*pkt
= p
->tx_buf
[i
];
383 void *pkt_data
= rte_pktmbuf_mtod(pkt
, void*);
384 size_t n_bytes
= rte_pktmbuf_data_len(pkt
);
386 for ( ; n_retries
< p
->n_retries
; n_retries
++) {
389 ret
= write(p
->fd
, pkt_data
, n_bytes
);
395 RTE_PORT_FD_WRITER_NODROP_STATS_PKTS_DROP_ADD(p
, p
->tx_buf_count
- i
);
397 for (i
= 0; i
< p
->tx_buf_count
; i
++)
398 rte_pktmbuf_free(p
->tx_buf
[i
]);
404 rte_port_fd_writer_nodrop_tx(void *port
, struct rte_mbuf
*pkt
)
406 struct rte_port_fd_writer_nodrop
*p
=
409 p
->tx_buf
[p
->tx_buf_count
++] = pkt
;
410 RTE_PORT_FD_WRITER_NODROP_STATS_PKTS_IN_ADD(p
, 1);
411 if (p
->tx_buf_count
>= p
->tx_burst_sz
)
412 send_burst_nodrop(p
);
418 rte_port_fd_writer_nodrop_tx_bulk(void *port
,
419 struct rte_mbuf
**pkts
,
422 struct rte_port_fd_writer_nodrop
*p
=
424 uint32_t tx_buf_count
= p
->tx_buf_count
;
426 if ((pkts_mask
& (pkts_mask
+ 1)) == 0) {
427 uint64_t n_pkts
= __builtin_popcountll(pkts_mask
);
430 for (i
= 0; i
< n_pkts
; i
++)
431 p
->tx_buf
[tx_buf_count
++] = pkts
[i
];
432 RTE_PORT_FD_WRITER_NODROP_STATS_PKTS_IN_ADD(p
, n_pkts
);
434 for ( ; pkts_mask
; ) {
435 uint32_t pkt_index
= __builtin_ctzll(pkts_mask
);
436 uint64_t pkt_mask
= 1LLU << pkt_index
;
437 struct rte_mbuf
*pkt
= pkts
[pkt_index
];
439 p
->tx_buf
[tx_buf_count
++] = pkt
;
440 RTE_PORT_FD_WRITER_NODROP_STATS_PKTS_IN_ADD(p
, 1);
441 pkts_mask
&= ~pkt_mask
;
444 p
->tx_buf_count
= tx_buf_count
;
445 if (tx_buf_count
>= p
->tx_burst_sz
)
446 send_burst_nodrop(p
);
452 rte_port_fd_writer_nodrop_flush(void *port
)
454 struct rte_port_fd_writer_nodrop
*p
=
457 if (p
->tx_buf_count
> 0)
458 send_burst_nodrop(p
);
464 rte_port_fd_writer_nodrop_free(void *port
)
467 RTE_LOG(ERR
, PORT
, "%s: Port is NULL\n", __func__
);
471 rte_port_fd_writer_nodrop_flush(port
);
477 static int rte_port_fd_writer_nodrop_stats_read(void *port
,
478 struct rte_port_out_stats
*stats
, int clear
)
480 struct rte_port_fd_writer_nodrop
*p
=
484 memcpy(stats
, &p
->stats
, sizeof(p
->stats
));
487 memset(&p
->stats
, 0, sizeof(p
->stats
));
493 * Summary of port operations
495 struct rte_port_in_ops rte_port_fd_reader_ops
= {
496 .f_create
= rte_port_fd_reader_create
,
497 .f_free
= rte_port_fd_reader_free
,
498 .f_rx
= rte_port_fd_reader_rx
,
499 .f_stats
= rte_port_fd_reader_stats_read
,
502 struct rte_port_out_ops rte_port_fd_writer_ops
= {
503 .f_create
= rte_port_fd_writer_create
,
504 .f_free
= rte_port_fd_writer_free
,
505 .f_tx
= rte_port_fd_writer_tx
,
506 .f_tx_bulk
= rte_port_fd_writer_tx_bulk
,
507 .f_flush
= rte_port_fd_writer_flush
,
508 .f_stats
= rte_port_fd_writer_stats_read
,
511 struct rte_port_out_ops rte_port_fd_writer_nodrop_ops
= {
512 .f_create
= rte_port_fd_writer_nodrop_create
,
513 .f_free
= rte_port_fd_writer_nodrop_free
,
514 .f_tx
= rte_port_fd_writer_nodrop_tx
,
515 .f_tx_bulk
= rte_port_fd_writer_nodrop_tx_bulk
,
516 .f_flush
= rte_port_fd_writer_nodrop_flush
,
517 .f_stats
= rte_port_fd_writer_nodrop_stats_read
,