1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2018 Intel Corporation
5 #include <rte_memcpy.h>
7 #include <rte_ethdev.h>
10 #include <rte_errno.h>
11 #include <rte_string_fns.h>
13 #include "rte_pdump.h"
15 #define DEVICE_ID_SIZE 64
16 /* Macros for printing using RTE_LOG */
17 #define RTE_LOGTYPE_PDUMP RTE_LOGTYPE_USER1
19 /* Used for the multi-process communication */
20 #define PDUMP_MP "mp_pdump"
22 enum pdump_operation
{
31 struct pdump_request
{
37 char device
[DEVICE_ID_SIZE
];
39 struct rte_ring
*ring
;
40 struct rte_mempool
*mp
;
44 char device
[DEVICE_ID_SIZE
];
46 struct rte_ring
*ring
;
47 struct rte_mempool
*mp
;
53 struct pdump_response
{
59 static struct pdump_rxtx_cbs
{
60 struct rte_ring
*ring
;
61 struct rte_mempool
*mp
;
62 const struct rte_eth_rxtx_callback
*cb
;
64 } rx_cbs
[RTE_MAX_ETHPORTS
][RTE_MAX_QUEUES_PER_PORT
],
65 tx_cbs
[RTE_MAX_ETHPORTS
][RTE_MAX_QUEUES_PER_PORT
];
68 pdump_pktmbuf_copy_data(struct rte_mbuf
*seg
, const struct rte_mbuf
*m
)
70 if (rte_pktmbuf_tailroom(seg
) < m
->data_len
) {
72 "User mempool: insufficient data_len of mbuf\n");
77 seg
->vlan_tci
= m
->vlan_tci
;
79 seg
->tx_offload
= m
->tx_offload
;
80 seg
->ol_flags
= m
->ol_flags
;
81 seg
->packet_type
= m
->packet_type
;
82 seg
->vlan_tci_outer
= m
->vlan_tci_outer
;
83 seg
->data_len
= m
->data_len
;
84 seg
->pkt_len
= seg
->data_len
;
85 rte_memcpy(rte_pktmbuf_mtod(seg
, void *),
86 rte_pktmbuf_mtod(m
, void *),
87 rte_pktmbuf_data_len(seg
));
92 static inline struct rte_mbuf
*
93 pdump_pktmbuf_copy(struct rte_mbuf
*m
, struct rte_mempool
*mp
)
95 struct rte_mbuf
*m_dup
, *seg
, **prev
;
99 m_dup
= rte_pktmbuf_alloc(mp
);
100 if (unlikely(m_dup
== NULL
))
110 if (pdump_pktmbuf_copy_data(seg
, m
) < 0) {
112 rte_pktmbuf_free_seg(seg
);
113 rte_pktmbuf_free(m_dup
);
118 } while ((m
= m
->next
) != NULL
&&
119 (seg
= rte_pktmbuf_alloc(mp
)) != NULL
);
122 m_dup
->nb_segs
= nseg
;
123 m_dup
->pkt_len
= pktlen
;
125 /* Allocation of new indirect segment failed */
126 if (unlikely(seg
== NULL
)) {
127 rte_pktmbuf_free(m_dup
);
131 __rte_mbuf_sanity_check(m_dup
, 1);
136 pdump_copy(struct rte_mbuf
**pkts
, uint16_t nb_pkts
, void *user_params
)
141 struct rte_mbuf
*dup_bufs
[nb_pkts
];
142 struct pdump_rxtx_cbs
*cbs
;
143 struct rte_ring
*ring
;
144 struct rte_mempool
*mp
;
150 for (i
= 0; i
< nb_pkts
; i
++) {
151 p
= pdump_pktmbuf_copy(pkts
[i
], mp
);
153 dup_bufs
[d_pkts
++] = p
;
156 ring_enq
= rte_ring_enqueue_burst(ring
, (void *)dup_bufs
, d_pkts
, NULL
);
157 if (unlikely(ring_enq
< d_pkts
)) {
158 RTE_LOG(DEBUG
, PDUMP
,
159 "only %d of packets enqueued to ring\n", ring_enq
);
161 rte_pktmbuf_free(dup_bufs
[ring_enq
]);
162 } while (++ring_enq
< d_pkts
);
167 pdump_rx(uint16_t port __rte_unused
, uint16_t qidx __rte_unused
,
168 struct rte_mbuf
**pkts
, uint16_t nb_pkts
,
169 uint16_t max_pkts __rte_unused
,
172 pdump_copy(pkts
, nb_pkts
, user_params
);
177 pdump_tx(uint16_t port __rte_unused
, uint16_t qidx __rte_unused
,
178 struct rte_mbuf
**pkts
, uint16_t nb_pkts
, void *user_params
)
180 pdump_copy(pkts
, nb_pkts
, user_params
);
185 pdump_register_rx_callbacks(uint16_t end_q
, uint16_t port
, uint16_t queue
,
186 struct rte_ring
*ring
, struct rte_mempool
*mp
,
190 struct pdump_rxtx_cbs
*cbs
= NULL
;
192 qid
= (queue
== RTE_PDUMP_ALL_QUEUES
) ? 0 : queue
;
193 for (; qid
< end_q
; qid
++) {
194 cbs
= &rx_cbs
[port
][qid
];
195 if (cbs
&& operation
== ENABLE
) {
198 "failed to add rx callback for port=%d "
199 "and queue=%d, callback already exists\n",
205 cbs
->cb
= rte_eth_add_first_rx_callback(port
, qid
,
207 if (cbs
->cb
== NULL
) {
209 "failed to add rx callback, errno=%d\n",
214 if (cbs
&& operation
== DISABLE
) {
217 if (cbs
->cb
== NULL
) {
219 "failed to delete non existing rx "
220 "callback for port=%d and queue=%d\n",
224 ret
= rte_eth_remove_rx_callback(port
, qid
, cbs
->cb
);
227 "failed to remove rx callback, errno=%d\n",
239 pdump_register_tx_callbacks(uint16_t end_q
, uint16_t port
, uint16_t queue
,
240 struct rte_ring
*ring
, struct rte_mempool
*mp
,
245 struct pdump_rxtx_cbs
*cbs
= NULL
;
247 qid
= (queue
== RTE_PDUMP_ALL_QUEUES
) ? 0 : queue
;
248 for (; qid
< end_q
; qid
++) {
249 cbs
= &tx_cbs
[port
][qid
];
250 if (cbs
&& operation
== ENABLE
) {
253 "failed to add tx callback for port=%d "
254 "and queue=%d, callback already exists\n",
260 cbs
->cb
= rte_eth_add_tx_callback(port
, qid
, pdump_tx
,
262 if (cbs
->cb
== NULL
) {
264 "failed to add tx callback, errno=%d\n",
269 if (cbs
&& operation
== DISABLE
) {
272 if (cbs
->cb
== NULL
) {
274 "failed to delete non existing tx "
275 "callback for port=%d and queue=%d\n",
279 ret
= rte_eth_remove_tx_callback(port
, qid
, cbs
->cb
);
282 "failed to remove tx callback, errno=%d\n",
294 set_pdump_rxtx_cbs(const struct pdump_request
*p
)
296 uint16_t nb_rx_q
= 0, nb_tx_q
= 0, end_q
, queue
;
301 struct rte_ring
*ring
;
302 struct rte_mempool
*mp
;
306 if (operation
== ENABLE
) {
307 ret
= rte_eth_dev_get_port_by_name(p
->data
.en_v1
.device
,
311 "failed to get port id for device id=%s\n",
312 p
->data
.en_v1
.device
);
315 queue
= p
->data
.en_v1
.queue
;
316 ring
= p
->data
.en_v1
.ring
;
317 mp
= p
->data
.en_v1
.mp
;
319 ret
= rte_eth_dev_get_port_by_name(p
->data
.dis_v1
.device
,
323 "failed to get port id for device id=%s\n",
324 p
->data
.dis_v1
.device
);
327 queue
= p
->data
.dis_v1
.queue
;
328 ring
= p
->data
.dis_v1
.ring
;
329 mp
= p
->data
.dis_v1
.mp
;
332 /* validation if packet capture is for all queues */
333 if (queue
== RTE_PDUMP_ALL_QUEUES
) {
334 struct rte_eth_dev_info dev_info
;
336 rte_eth_dev_info_get(port
, &dev_info
);
337 nb_rx_q
= dev_info
.nb_rx_queues
;
338 nb_tx_q
= dev_info
.nb_tx_queues
;
339 if (nb_rx_q
== 0 && flags
& RTE_PDUMP_FLAG_RX
) {
341 "number of rx queues cannot be 0\n");
344 if (nb_tx_q
== 0 && flags
& RTE_PDUMP_FLAG_TX
) {
346 "number of tx queues cannot be 0\n");
349 if ((nb_tx_q
== 0 || nb_rx_q
== 0) &&
350 flags
== RTE_PDUMP_FLAG_RXTX
) {
352 "both tx&rx queues must be non zero\n");
357 /* register RX callback */
358 if (flags
& RTE_PDUMP_FLAG_RX
) {
359 end_q
= (queue
== RTE_PDUMP_ALL_QUEUES
) ? nb_rx_q
: queue
+ 1;
360 ret
= pdump_register_rx_callbacks(end_q
, port
, queue
, ring
, mp
,
366 /* register TX callback */
367 if (flags
& RTE_PDUMP_FLAG_TX
) {
368 end_q
= (queue
== RTE_PDUMP_ALL_QUEUES
) ? nb_tx_q
: queue
+ 1;
369 ret
= pdump_register_tx_callbacks(end_q
, port
, queue
, ring
, mp
,
379 pdump_server(const struct rte_mp_msg
*mp_msg
, const void *peer
)
381 struct rte_mp_msg mp_resp
;
382 const struct pdump_request
*cli_req
;
383 struct pdump_response
*resp
= (struct pdump_response
*)&mp_resp
.param
;
385 /* recv client requests */
386 if (mp_msg
->len_param
!= sizeof(*cli_req
)) {
387 RTE_LOG(ERR
, PDUMP
, "failed to recv from client\n");
388 resp
->err_value
= -EINVAL
;
390 cli_req
= (const struct pdump_request
*)mp_msg
->param
;
391 resp
->ver
= cli_req
->ver
;
392 resp
->res_op
= cli_req
->op
;
393 resp
->err_value
= set_pdump_rxtx_cbs(cli_req
);
396 strlcpy(mp_resp
.name
, PDUMP_MP
, RTE_MP_MAX_NAME_LEN
);
397 mp_resp
.len_param
= sizeof(*resp
);
399 if (rte_mp_reply(&mp_resp
, peer
) < 0) {
400 RTE_LOG(ERR
, PDUMP
, "failed to send to client:%s, %s:%d\n",
401 strerror(rte_errno
), __func__
, __LINE__
);
411 return rte_mp_action_register(PDUMP_MP
, pdump_server
);
415 rte_pdump_uninit(void)
417 rte_mp_action_unregister(PDUMP_MP
);
423 pdump_validate_ring_mp(struct rte_ring
*ring
, struct rte_mempool
*mp
)
425 if (ring
== NULL
|| mp
== NULL
) {
426 RTE_LOG(ERR
, PDUMP
, "NULL ring or mempool are passed %s:%d\n",
431 if (mp
->flags
& MEMPOOL_F_SP_PUT
|| mp
->flags
& MEMPOOL_F_SC_GET
) {
432 RTE_LOG(ERR
, PDUMP
, "mempool with either SP or SC settings"
433 " is not valid for pdump, should have MP and MC settings\n");
437 if (ring
->prod
.single
|| ring
->cons
.single
) {
438 RTE_LOG(ERR
, PDUMP
, "ring with either SP or SC settings"
439 " is not valid for pdump, should have MP and MC settings\n");
448 pdump_validate_flags(uint32_t flags
)
450 if (flags
!= RTE_PDUMP_FLAG_RX
&& flags
!= RTE_PDUMP_FLAG_TX
&&
451 flags
!= RTE_PDUMP_FLAG_RXTX
) {
453 "invalid flags, should be either rx/tx/rxtx\n");
462 pdump_validate_port(uint16_t port
, char *name
)
466 if (port
>= RTE_MAX_ETHPORTS
) {
467 RTE_LOG(ERR
, PDUMP
, "Invalid port id %u, %s:%d\n", port
,
473 ret
= rte_eth_dev_get_name_by_port(port
, name
);
476 "port id to name mapping failed for port id=%u, %s:%d\n",
477 port
, __func__
, __LINE__
);
486 pdump_prepare_client_request(char *device
, uint16_t queue
,
489 struct rte_ring
*ring
,
490 struct rte_mempool
*mp
,
494 struct rte_mp_msg mp_req
, *mp_rep
;
495 struct rte_mp_reply mp_reply
;
496 struct timespec ts
= {.tv_sec
= 5, .tv_nsec
= 0};
497 struct pdump_request
*req
= (struct pdump_request
*)mp_req
.param
;
498 struct pdump_response
*resp
;
503 if ((operation
& ENABLE
) != 0) {
504 strlcpy(req
->data
.en_v1
.device
, device
,
505 sizeof(req
->data
.en_v1
.device
));
506 req
->data
.en_v1
.queue
= queue
;
507 req
->data
.en_v1
.ring
= ring
;
508 req
->data
.en_v1
.mp
= mp
;
509 req
->data
.en_v1
.filter
= filter
;
511 strlcpy(req
->data
.dis_v1
.device
, device
,
512 sizeof(req
->data
.dis_v1
.device
));
513 req
->data
.dis_v1
.queue
= queue
;
514 req
->data
.dis_v1
.ring
= NULL
;
515 req
->data
.dis_v1
.mp
= NULL
;
516 req
->data
.dis_v1
.filter
= NULL
;
519 strlcpy(mp_req
.name
, PDUMP_MP
, RTE_MP_MAX_NAME_LEN
);
520 mp_req
.len_param
= sizeof(*req
);
522 if (rte_mp_request_sync(&mp_req
, &mp_reply
, &ts
) == 0) {
523 mp_rep
= &mp_reply
.msgs
[0];
524 resp
= (struct pdump_response
*)mp_rep
->param
;
525 rte_errno
= resp
->err_value
;
526 if (!resp
->err_value
)
533 "client request for pdump enable/disable failed\n");
538 rte_pdump_enable(uint16_t port
, uint16_t queue
, uint32_t flags
,
539 struct rte_ring
*ring
,
540 struct rte_mempool
*mp
,
545 char name
[DEVICE_ID_SIZE
];
547 ret
= pdump_validate_port(port
, name
);
550 ret
= pdump_validate_ring_mp(ring
, mp
);
553 ret
= pdump_validate_flags(flags
);
557 ret
= pdump_prepare_client_request(name
, queue
, flags
,
558 ENABLE
, ring
, mp
, filter
);
564 rte_pdump_enable_by_deviceid(char *device_id
, uint16_t queue
,
566 struct rte_ring
*ring
,
567 struct rte_mempool
*mp
,
572 ret
= pdump_validate_ring_mp(ring
, mp
);
575 ret
= pdump_validate_flags(flags
);
579 ret
= pdump_prepare_client_request(device_id
, queue
, flags
,
580 ENABLE
, ring
, mp
, filter
);
586 rte_pdump_disable(uint16_t port
, uint16_t queue
, uint32_t flags
)
589 char name
[DEVICE_ID_SIZE
];
591 ret
= pdump_validate_port(port
, name
);
594 ret
= pdump_validate_flags(flags
);
598 ret
= pdump_prepare_client_request(name
, queue
, flags
,
599 DISABLE
, NULL
, NULL
, NULL
);
605 rte_pdump_disable_by_deviceid(char *device_id
, uint16_t queue
,
610 ret
= pdump_validate_flags(flags
);
614 ret
= pdump_prepare_client_request(device_id
, queue
, flags
,
615 DISABLE
, NULL
, NULL
, NULL
);