]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/lib/librte_pdump/rte_pdump.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_pdump / rte_pdump.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2018 Intel Corporation
3 */
4
5 #include <rte_memcpy.h>
6 #include <rte_mbuf.h>
7 #include <rte_ethdev.h>
8 #include <rte_lcore.h>
9 #include <rte_log.h>
10 #include <rte_errno.h>
11 #include <rte_string_fns.h>
12
13 #include "rte_pdump.h"
14
15 #define DEVICE_ID_SIZE 64
16 /* Macros for printing using RTE_LOG */
17 #define RTE_LOGTYPE_PDUMP RTE_LOGTYPE_USER1
18
19 /* Used for the multi-process communication */
20 #define PDUMP_MP "mp_pdump"
21
22 enum pdump_operation {
23 DISABLE = 1,
24 ENABLE = 2
25 };
26
27 enum pdump_version {
28 V1 = 1
29 };
30
31 struct pdump_request {
32 uint16_t ver;
33 uint16_t op;
34 uint32_t flags;
35 union pdump_data {
36 struct enable_v1 {
37 char device[DEVICE_ID_SIZE];
38 uint16_t queue;
39 struct rte_ring *ring;
40 struct rte_mempool *mp;
41 void *filter;
42 } en_v1;
43 struct disable_v1 {
44 char device[DEVICE_ID_SIZE];
45 uint16_t queue;
46 struct rte_ring *ring;
47 struct rte_mempool *mp;
48 void *filter;
49 } dis_v1;
50 } data;
51 };
52
53 struct pdump_response {
54 uint16_t ver;
55 uint16_t res_op;
56 int32_t err_value;
57 };
58
59 static struct pdump_rxtx_cbs {
60 struct rte_ring *ring;
61 struct rte_mempool *mp;
62 const struct rte_eth_rxtx_callback *cb;
63 void *filter;
64 } rx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT],
65 tx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
66
67 static inline int
68 pdump_pktmbuf_copy_data(struct rte_mbuf *seg, const struct rte_mbuf *m)
69 {
70 if (rte_pktmbuf_tailroom(seg) < m->data_len) {
71 RTE_LOG(ERR, PDUMP,
72 "User mempool: insufficient data_len of mbuf\n");
73 return -EINVAL;
74 }
75
76 seg->port = m->port;
77 seg->vlan_tci = m->vlan_tci;
78 seg->hash = m->hash;
79 seg->tx_offload = m->tx_offload;
80 seg->ol_flags = m->ol_flags;
81 seg->packet_type = m->packet_type;
82 seg->vlan_tci_outer = m->vlan_tci_outer;
83 seg->data_len = m->data_len;
84 seg->pkt_len = seg->data_len;
85 rte_memcpy(rte_pktmbuf_mtod(seg, void *),
86 rte_pktmbuf_mtod(m, void *),
87 rte_pktmbuf_data_len(seg));
88
89 return 0;
90 }
91
92 static inline struct rte_mbuf *
93 pdump_pktmbuf_copy(struct rte_mbuf *m, struct rte_mempool *mp)
94 {
95 struct rte_mbuf *m_dup, *seg, **prev;
96 uint32_t pktlen;
97 uint16_t nseg;
98
99 m_dup = rte_pktmbuf_alloc(mp);
100 if (unlikely(m_dup == NULL))
101 return NULL;
102
103 seg = m_dup;
104 prev = &seg->next;
105 pktlen = m->pkt_len;
106 nseg = 0;
107
108 do {
109 nseg++;
110 if (pdump_pktmbuf_copy_data(seg, m) < 0) {
111 if (seg != m_dup)
112 rte_pktmbuf_free_seg(seg);
113 rte_pktmbuf_free(m_dup);
114 return NULL;
115 }
116 *prev = seg;
117 prev = &seg->next;
118 } while ((m = m->next) != NULL &&
119 (seg = rte_pktmbuf_alloc(mp)) != NULL);
120
121 *prev = NULL;
122 m_dup->nb_segs = nseg;
123 m_dup->pkt_len = pktlen;
124
125 /* Allocation of new indirect segment failed */
126 if (unlikely(seg == NULL)) {
127 rte_pktmbuf_free(m_dup);
128 return NULL;
129 }
130
131 __rte_mbuf_sanity_check(m_dup, 1);
132 return m_dup;
133 }
134
135 static inline void
136 pdump_copy(struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
137 {
138 unsigned i;
139 int ring_enq;
140 uint16_t d_pkts = 0;
141 struct rte_mbuf *dup_bufs[nb_pkts];
142 struct pdump_rxtx_cbs *cbs;
143 struct rte_ring *ring;
144 struct rte_mempool *mp;
145 struct rte_mbuf *p;
146
147 cbs = user_params;
148 ring = cbs->ring;
149 mp = cbs->mp;
150 for (i = 0; i < nb_pkts; i++) {
151 p = pdump_pktmbuf_copy(pkts[i], mp);
152 if (p)
153 dup_bufs[d_pkts++] = p;
154 }
155
156 ring_enq = rte_ring_enqueue_burst(ring, (void *)dup_bufs, d_pkts, NULL);
157 if (unlikely(ring_enq < d_pkts)) {
158 RTE_LOG(DEBUG, PDUMP,
159 "only %d of packets enqueued to ring\n", ring_enq);
160 do {
161 rte_pktmbuf_free(dup_bufs[ring_enq]);
162 } while (++ring_enq < d_pkts);
163 }
164 }
165
166 static uint16_t
167 pdump_rx(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
168 struct rte_mbuf **pkts, uint16_t nb_pkts,
169 uint16_t max_pkts __rte_unused,
170 void *user_params)
171 {
172 pdump_copy(pkts, nb_pkts, user_params);
173 return nb_pkts;
174 }
175
176 static uint16_t
177 pdump_tx(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
178 struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
179 {
180 pdump_copy(pkts, nb_pkts, user_params);
181 return nb_pkts;
182 }
183
184 static int
185 pdump_register_rx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
186 struct rte_ring *ring, struct rte_mempool *mp,
187 uint16_t operation)
188 {
189 uint16_t qid;
190 struct pdump_rxtx_cbs *cbs = NULL;
191
192 qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue;
193 for (; qid < end_q; qid++) {
194 cbs = &rx_cbs[port][qid];
195 if (cbs && operation == ENABLE) {
196 if (cbs->cb) {
197 RTE_LOG(ERR, PDUMP,
198 "failed to add rx callback for port=%d "
199 "and queue=%d, callback already exists\n",
200 port, qid);
201 return -EEXIST;
202 }
203 cbs->ring = ring;
204 cbs->mp = mp;
205 cbs->cb = rte_eth_add_first_rx_callback(port, qid,
206 pdump_rx, cbs);
207 if (cbs->cb == NULL) {
208 RTE_LOG(ERR, PDUMP,
209 "failed to add rx callback, errno=%d\n",
210 rte_errno);
211 return rte_errno;
212 }
213 }
214 if (cbs && operation == DISABLE) {
215 int ret;
216
217 if (cbs->cb == NULL) {
218 RTE_LOG(ERR, PDUMP,
219 "failed to delete non existing rx "
220 "callback for port=%d and queue=%d\n",
221 port, qid);
222 return -EINVAL;
223 }
224 ret = rte_eth_remove_rx_callback(port, qid, cbs->cb);
225 if (ret < 0) {
226 RTE_LOG(ERR, PDUMP,
227 "failed to remove rx callback, errno=%d\n",
228 -ret);
229 return ret;
230 }
231 cbs->cb = NULL;
232 }
233 }
234
235 return 0;
236 }
237
238 static int
239 pdump_register_tx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
240 struct rte_ring *ring, struct rte_mempool *mp,
241 uint16_t operation)
242 {
243
244 uint16_t qid;
245 struct pdump_rxtx_cbs *cbs = NULL;
246
247 qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue;
248 for (; qid < end_q; qid++) {
249 cbs = &tx_cbs[port][qid];
250 if (cbs && operation == ENABLE) {
251 if (cbs->cb) {
252 RTE_LOG(ERR, PDUMP,
253 "failed to add tx callback for port=%d "
254 "and queue=%d, callback already exists\n",
255 port, qid);
256 return -EEXIST;
257 }
258 cbs->ring = ring;
259 cbs->mp = mp;
260 cbs->cb = rte_eth_add_tx_callback(port, qid, pdump_tx,
261 cbs);
262 if (cbs->cb == NULL) {
263 RTE_LOG(ERR, PDUMP,
264 "failed to add tx callback, errno=%d\n",
265 rte_errno);
266 return rte_errno;
267 }
268 }
269 if (cbs && operation == DISABLE) {
270 int ret;
271
272 if (cbs->cb == NULL) {
273 RTE_LOG(ERR, PDUMP,
274 "failed to delete non existing tx "
275 "callback for port=%d and queue=%d\n",
276 port, qid);
277 return -EINVAL;
278 }
279 ret = rte_eth_remove_tx_callback(port, qid, cbs->cb);
280 if (ret < 0) {
281 RTE_LOG(ERR, PDUMP,
282 "failed to remove tx callback, errno=%d\n",
283 -ret);
284 return ret;
285 }
286 cbs->cb = NULL;
287 }
288 }
289
290 return 0;
291 }
292
293 static int
294 set_pdump_rxtx_cbs(const struct pdump_request *p)
295 {
296 uint16_t nb_rx_q = 0, nb_tx_q = 0, end_q, queue;
297 uint16_t port;
298 int ret = 0;
299 uint32_t flags;
300 uint16_t operation;
301 struct rte_ring *ring;
302 struct rte_mempool *mp;
303
304 flags = p->flags;
305 operation = p->op;
306 if (operation == ENABLE) {
307 ret = rte_eth_dev_get_port_by_name(p->data.en_v1.device,
308 &port);
309 if (ret < 0) {
310 RTE_LOG(ERR, PDUMP,
311 "failed to get port id for device id=%s\n",
312 p->data.en_v1.device);
313 return -EINVAL;
314 }
315 queue = p->data.en_v1.queue;
316 ring = p->data.en_v1.ring;
317 mp = p->data.en_v1.mp;
318 } else {
319 ret = rte_eth_dev_get_port_by_name(p->data.dis_v1.device,
320 &port);
321 if (ret < 0) {
322 RTE_LOG(ERR, PDUMP,
323 "failed to get port id for device id=%s\n",
324 p->data.dis_v1.device);
325 return -EINVAL;
326 }
327 queue = p->data.dis_v1.queue;
328 ring = p->data.dis_v1.ring;
329 mp = p->data.dis_v1.mp;
330 }
331
332 /* validation if packet capture is for all queues */
333 if (queue == RTE_PDUMP_ALL_QUEUES) {
334 struct rte_eth_dev_info dev_info;
335
336 rte_eth_dev_info_get(port, &dev_info);
337 nb_rx_q = dev_info.nb_rx_queues;
338 nb_tx_q = dev_info.nb_tx_queues;
339 if (nb_rx_q == 0 && flags & RTE_PDUMP_FLAG_RX) {
340 RTE_LOG(ERR, PDUMP,
341 "number of rx queues cannot be 0\n");
342 return -EINVAL;
343 }
344 if (nb_tx_q == 0 && flags & RTE_PDUMP_FLAG_TX) {
345 RTE_LOG(ERR, PDUMP,
346 "number of tx queues cannot be 0\n");
347 return -EINVAL;
348 }
349 if ((nb_tx_q == 0 || nb_rx_q == 0) &&
350 flags == RTE_PDUMP_FLAG_RXTX) {
351 RTE_LOG(ERR, PDUMP,
352 "both tx&rx queues must be non zero\n");
353 return -EINVAL;
354 }
355 }
356
357 /* register RX callback */
358 if (flags & RTE_PDUMP_FLAG_RX) {
359 end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_rx_q : queue + 1;
360 ret = pdump_register_rx_callbacks(end_q, port, queue, ring, mp,
361 operation);
362 if (ret < 0)
363 return ret;
364 }
365
366 /* register TX callback */
367 if (flags & RTE_PDUMP_FLAG_TX) {
368 end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_tx_q : queue + 1;
369 ret = pdump_register_tx_callbacks(end_q, port, queue, ring, mp,
370 operation);
371 if (ret < 0)
372 return ret;
373 }
374
375 return ret;
376 }
377
378 static int
379 pdump_server(const struct rte_mp_msg *mp_msg, const void *peer)
380 {
381 struct rte_mp_msg mp_resp;
382 const struct pdump_request *cli_req;
383 struct pdump_response *resp = (struct pdump_response *)&mp_resp.param;
384
385 /* recv client requests */
386 if (mp_msg->len_param != sizeof(*cli_req)) {
387 RTE_LOG(ERR, PDUMP, "failed to recv from client\n");
388 resp->err_value = -EINVAL;
389 } else {
390 cli_req = (const struct pdump_request *)mp_msg->param;
391 resp->ver = cli_req->ver;
392 resp->res_op = cli_req->op;
393 resp->err_value = set_pdump_rxtx_cbs(cli_req);
394 }
395
396 strlcpy(mp_resp.name, PDUMP_MP, RTE_MP_MAX_NAME_LEN);
397 mp_resp.len_param = sizeof(*resp);
398 mp_resp.num_fds = 0;
399 if (rte_mp_reply(&mp_resp, peer) < 0) {
400 RTE_LOG(ERR, PDUMP, "failed to send to client:%s, %s:%d\n",
401 strerror(rte_errno), __func__, __LINE__);
402 return -1;
403 }
404
405 return 0;
406 }
407
408 int
409 rte_pdump_init(const char *path __rte_unused)
410 {
411 return rte_mp_action_register(PDUMP_MP, pdump_server);
412 }
413
414 int
415 rte_pdump_uninit(void)
416 {
417 rte_mp_action_unregister(PDUMP_MP);
418
419 return 0;
420 }
421
422 static int
423 pdump_validate_ring_mp(struct rte_ring *ring, struct rte_mempool *mp)
424 {
425 if (ring == NULL || mp == NULL) {
426 RTE_LOG(ERR, PDUMP, "NULL ring or mempool are passed %s:%d\n",
427 __func__, __LINE__);
428 rte_errno = EINVAL;
429 return -1;
430 }
431 if (mp->flags & MEMPOOL_F_SP_PUT || mp->flags & MEMPOOL_F_SC_GET) {
432 RTE_LOG(ERR, PDUMP, "mempool with either SP or SC settings"
433 " is not valid for pdump, should have MP and MC settings\n");
434 rte_errno = EINVAL;
435 return -1;
436 }
437 if (ring->prod.single || ring->cons.single) {
438 RTE_LOG(ERR, PDUMP, "ring with either SP or SC settings"
439 " is not valid for pdump, should have MP and MC settings\n");
440 rte_errno = EINVAL;
441 return -1;
442 }
443
444 return 0;
445 }
446
447 static int
448 pdump_validate_flags(uint32_t flags)
449 {
450 if (flags != RTE_PDUMP_FLAG_RX && flags != RTE_PDUMP_FLAG_TX &&
451 flags != RTE_PDUMP_FLAG_RXTX) {
452 RTE_LOG(ERR, PDUMP,
453 "invalid flags, should be either rx/tx/rxtx\n");
454 rte_errno = EINVAL;
455 return -1;
456 }
457
458 return 0;
459 }
460
461 static int
462 pdump_validate_port(uint16_t port, char *name)
463 {
464 int ret = 0;
465
466 if (port >= RTE_MAX_ETHPORTS) {
467 RTE_LOG(ERR, PDUMP, "Invalid port id %u, %s:%d\n", port,
468 __func__, __LINE__);
469 rte_errno = EINVAL;
470 return -1;
471 }
472
473 ret = rte_eth_dev_get_name_by_port(port, name);
474 if (ret < 0) {
475 RTE_LOG(ERR, PDUMP,
476 "port id to name mapping failed for port id=%u, %s:%d\n",
477 port, __func__, __LINE__);
478 rte_errno = EINVAL;
479 return -1;
480 }
481
482 return 0;
483 }
484
485 static int
486 pdump_prepare_client_request(char *device, uint16_t queue,
487 uint32_t flags,
488 uint16_t operation,
489 struct rte_ring *ring,
490 struct rte_mempool *mp,
491 void *filter)
492 {
493 int ret = -1;
494 struct rte_mp_msg mp_req, *mp_rep;
495 struct rte_mp_reply mp_reply;
496 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
497 struct pdump_request *req = (struct pdump_request *)mp_req.param;
498 struct pdump_response *resp;
499
500 req->ver = 1;
501 req->flags = flags;
502 req->op = operation;
503 if ((operation & ENABLE) != 0) {
504 snprintf(req->data.en_v1.device,
505 sizeof(req->data.en_v1.device), "%s", device);
506 req->data.en_v1.queue = queue;
507 req->data.en_v1.ring = ring;
508 req->data.en_v1.mp = mp;
509 req->data.en_v1.filter = filter;
510 } else {
511 snprintf(req->data.dis_v1.device,
512 sizeof(req->data.dis_v1.device), "%s", device);
513 req->data.dis_v1.queue = queue;
514 req->data.dis_v1.ring = NULL;
515 req->data.dis_v1.mp = NULL;
516 req->data.dis_v1.filter = NULL;
517 }
518
519 strlcpy(mp_req.name, PDUMP_MP, RTE_MP_MAX_NAME_LEN);
520 mp_req.len_param = sizeof(*req);
521 mp_req.num_fds = 0;
522 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0) {
523 mp_rep = &mp_reply.msgs[0];
524 resp = (struct pdump_response *)mp_rep->param;
525 rte_errno = resp->err_value;
526 if (!resp->err_value)
527 ret = 0;
528 free(mp_reply.msgs);
529 }
530
531 if (ret < 0)
532 RTE_LOG(ERR, PDUMP,
533 "client request for pdump enable/disable failed\n");
534 return ret;
535 }
536
537 int
538 rte_pdump_enable(uint16_t port, uint16_t queue, uint32_t flags,
539 struct rte_ring *ring,
540 struct rte_mempool *mp,
541 void *filter)
542 {
543
544 int ret = 0;
545 char name[DEVICE_ID_SIZE];
546
547 ret = pdump_validate_port(port, name);
548 if (ret < 0)
549 return ret;
550 ret = pdump_validate_ring_mp(ring, mp);
551 if (ret < 0)
552 return ret;
553 ret = pdump_validate_flags(flags);
554 if (ret < 0)
555 return ret;
556
557 ret = pdump_prepare_client_request(name, queue, flags,
558 ENABLE, ring, mp, filter);
559
560 return ret;
561 }
562
563 int
564 rte_pdump_enable_by_deviceid(char *device_id, uint16_t queue,
565 uint32_t flags,
566 struct rte_ring *ring,
567 struct rte_mempool *mp,
568 void *filter)
569 {
570 int ret = 0;
571
572 ret = pdump_validate_ring_mp(ring, mp);
573 if (ret < 0)
574 return ret;
575 ret = pdump_validate_flags(flags);
576 if (ret < 0)
577 return ret;
578
579 ret = pdump_prepare_client_request(device_id, queue, flags,
580 ENABLE, ring, mp, filter);
581
582 return ret;
583 }
584
585 int
586 rte_pdump_disable(uint16_t port, uint16_t queue, uint32_t flags)
587 {
588 int ret = 0;
589 char name[DEVICE_ID_SIZE];
590
591 ret = pdump_validate_port(port, name);
592 if (ret < 0)
593 return ret;
594 ret = pdump_validate_flags(flags);
595 if (ret < 0)
596 return ret;
597
598 ret = pdump_prepare_client_request(name, queue, flags,
599 DISABLE, NULL, NULL, NULL);
600
601 return ret;
602 }
603
604 int
605 rte_pdump_disable_by_deviceid(char *device_id, uint16_t queue,
606 uint32_t flags)
607 {
608 int ret = 0;
609
610 ret = pdump_validate_flags(flags);
611 if (ret < 0)
612 return ret;
613
614 ret = pdump_prepare_client_request(device_id, queue, flags,
615 DISABLE, NULL, NULL, NULL);
616
617 return ret;
618 }
619
620 int
621 rte_pdump_set_socket_dir(const char *path __rte_unused,
622 enum rte_pdump_socktype type __rte_unused)
623 {
624 return 0;
625 }