]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/lib/librte_mbuf/rte_mbuf.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_mbuf / rte_mbuf.c
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright 2014 6WIND S.A.
7c673cae
FG
4 */
5
6#include <string.h>
7#include <stdio.h>
8#include <stdlib.h>
9#include <stdint.h>
10#include <stdarg.h>
11#include <inttypes.h>
12#include <errno.h>
13#include <ctype.h>
14#include <sys/queue.h>
15
11fdf7f2 16#include <rte_compat.h>
7c673cae
FG
17#include <rte_debug.h>
18#include <rte_common.h>
19#include <rte_log.h>
20#include <rte_memory.h>
7c673cae
FG
21#include <rte_launch.h>
22#include <rte_eal.h>
23#include <rte_per_lcore.h>
24#include <rte_lcore.h>
25#include <rte_atomic.h>
26#include <rte_branch_prediction.h>
27#include <rte_mempool.h>
28#include <rte_mbuf.h>
11fdf7f2 29#include <rte_mbuf_pool_ops.h>
7c673cae
FG
30#include <rte_string_fns.h>
31#include <rte_hexdump.h>
32#include <rte_errno.h>
33#include <rte_memcpy.h>
34
7c673cae
FG
35/*
36 * pktmbuf pool constructor, given as a callback function to
11fdf7f2
TL
37 * rte_mempool_create(), or called directly if using
38 * rte_mempool_create_empty()/rte_mempool_populate()
7c673cae
FG
39 */
40void
41rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
42{
43 struct rte_pktmbuf_pool_private *user_mbp_priv, *mbp_priv;
44 struct rte_pktmbuf_pool_private default_mbp_priv;
45 uint16_t roomsz;
46
47 RTE_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf));
48
49 /* if no structure is provided, assume no mbuf private area */
50 user_mbp_priv = opaque_arg;
51 if (user_mbp_priv == NULL) {
52 default_mbp_priv.mbuf_priv_size = 0;
53 if (mp->elt_size > sizeof(struct rte_mbuf))
54 roomsz = mp->elt_size - sizeof(struct rte_mbuf);
55 else
56 roomsz = 0;
57 default_mbp_priv.mbuf_data_room_size = roomsz;
58 user_mbp_priv = &default_mbp_priv;
59 }
60
61 RTE_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf) +
62 user_mbp_priv->mbuf_data_room_size +
63 user_mbp_priv->mbuf_priv_size);
64
65 mbp_priv = rte_mempool_get_priv(mp);
66 memcpy(mbp_priv, user_mbp_priv, sizeof(*mbp_priv));
67}
68
69/*
70 * pktmbuf constructor, given as a callback function to
11fdf7f2 71 * rte_mempool_obj_iter() or rte_mempool_create().
7c673cae
FG
72 * Set the fields of a packet mbuf to their default values.
73 */
74void
75rte_pktmbuf_init(struct rte_mempool *mp,
76 __attribute__((unused)) void *opaque_arg,
77 void *_m,
78 __attribute__((unused)) unsigned i)
79{
80 struct rte_mbuf *m = _m;
81 uint32_t mbuf_size, buf_len, priv_size;
82
83 priv_size = rte_pktmbuf_priv_size(mp);
84 mbuf_size = sizeof(struct rte_mbuf) + priv_size;
85 buf_len = rte_pktmbuf_data_room_size(mp);
86
87 RTE_ASSERT(RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) == priv_size);
88 RTE_ASSERT(mp->elt_size >= mbuf_size);
89 RTE_ASSERT(buf_len <= UINT16_MAX);
90
11fdf7f2 91 memset(m, 0, mbuf_size);
7c673cae
FG
92 /* start of buffer is after mbuf structure and priv data */
93 m->priv_size = priv_size;
94 m->buf_addr = (char *)m + mbuf_size;
11fdf7f2 95 m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
7c673cae
FG
96 m->buf_len = (uint16_t)buf_len;
97
98 /* keep some headroom between start of buffer and data */
99 m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len);
100
101 /* init some constant fields */
102 m->pool = mp;
103 m->nb_segs = 1;
11fdf7f2
TL
104 m->port = MBUF_INVALID_PORT;
105 rte_mbuf_refcnt_set(m, 1);
106 m->next = NULL;
7c673cae
FG
107}
108
11fdf7f2 109/* Helper to create a mbuf pool with given mempool ops name*/
7c673cae 110struct rte_mempool *
11fdf7f2
TL
111rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n,
112 unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
113 int socket_id, const char *ops_name)
7c673cae
FG
114{
115 struct rte_mempool *mp;
116 struct rte_pktmbuf_pool_private mbp_priv;
11fdf7f2 117 const char *mp_ops_name = ops_name;
7c673cae
FG
118 unsigned elt_size;
119 int ret;
120
121 if (RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) != priv_size) {
122 RTE_LOG(ERR, MBUF, "mbuf priv_size=%u is not aligned\n",
123 priv_size);
124 rte_errno = EINVAL;
125 return NULL;
126 }
127 elt_size = sizeof(struct rte_mbuf) + (unsigned)priv_size +
128 (unsigned)data_room_size;
129 mbp_priv.mbuf_data_room_size = data_room_size;
130 mbp_priv.mbuf_priv_size = priv_size;
131
132 mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
133 sizeof(struct rte_pktmbuf_pool_private), socket_id, 0);
134 if (mp == NULL)
135 return NULL;
136
11fdf7f2
TL
137 if (mp_ops_name == NULL)
138 mp_ops_name = rte_mbuf_best_mempool_ops();
139 ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
7c673cae
FG
140 if (ret != 0) {
141 RTE_LOG(ERR, MBUF, "error setting mempool handler\n");
142 rte_mempool_free(mp);
143 rte_errno = -ret;
144 return NULL;
145 }
146 rte_pktmbuf_pool_init(mp, &mbp_priv);
147
148 ret = rte_mempool_populate_default(mp);
149 if (ret < 0) {
150 rte_mempool_free(mp);
151 rte_errno = -ret;
152 return NULL;
153 }
154
155 rte_mempool_obj_iter(mp, rte_pktmbuf_init, NULL);
156
157 return mp;
158}
159
11fdf7f2
TL
160/* helper to create a mbuf pool */
161struct rte_mempool *
162rte_pktmbuf_pool_create(const char *name, unsigned int n,
163 unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
164 int socket_id)
165{
166 return rte_pktmbuf_pool_create_by_ops(name, n, cache_size, priv_size,
167 data_room_size, socket_id, NULL);
168}
169
7c673cae
FG
170/* do some sanity checks on a mbuf: panic if it fails */
171void
172rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
9f95a23c
TL
173{
174 const char *reason;
175
176 if (rte_mbuf_check(m, is_header, &reason))
177 rte_panic("%s\n", reason);
178}
179
180__rte_experimental
181int rte_mbuf_check(const struct rte_mbuf *m, int is_header,
182 const char **reason)
7c673cae 183{
11fdf7f2 184 unsigned int nb_segs, pkt_len;
7c673cae 185
9f95a23c
TL
186 if (m == NULL) {
187 *reason = "mbuf is NULL";
188 return -1;
189 }
7c673cae
FG
190
191 /* generic checks */
9f95a23c
TL
192 if (m->pool == NULL) {
193 *reason = "bad mbuf pool";
194 return -1;
195 }
196 if (m->buf_iova == 0) {
197 *reason = "bad IO addr";
198 return -1;
199 }
200 if (m->buf_addr == NULL) {
201 *reason = "bad virt addr";
202 return -1;
203 }
7c673cae
FG
204
205 uint16_t cnt = rte_mbuf_refcnt_read(m);
9f95a23c
TL
206 if ((cnt == 0) || (cnt == UINT16_MAX)) {
207 *reason = "bad ref cnt";
208 return -1;
209 }
7c673cae
FG
210
211 /* nothing to check for sub-segments */
212 if (is_header == 0)
9f95a23c 213 return 0;
7c673cae 214
11fdf7f2 215 /* data_len is supposed to be not more than pkt_len */
9f95a23c
TL
216 if (m->data_len > m->pkt_len) {
217 *reason = "bad data_len";
218 return -1;
219 }
11fdf7f2 220
7c673cae 221 nb_segs = m->nb_segs;
11fdf7f2
TL
222 pkt_len = m->pkt_len;
223
224 do {
9f95a23c
TL
225 if (m->data_off > m->buf_len) {
226 *reason = "data offset too big in mbuf segment";
227 return -1;
228 }
229 if (m->data_off + m->data_len > m->buf_len) {
230 *reason = "data length too big in mbuf segment";
231 return -1;
232 }
11fdf7f2
TL
233 nb_segs -= 1;
234 pkt_len -= m->data_len;
235 } while ((m = m->next) != NULL);
236
9f95a23c
TL
237 if (nb_segs) {
238 *reason = "bad nb_segs";
239 return -1;
240 }
241 if (pkt_len) {
242 *reason = "bad pkt_len";
243 return -1;
244 }
245
246 return 0;
7c673cae
FG
247}
248
249/* dump a mbuf on console */
250void
251rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
252{
253 unsigned int len;
11fdf7f2 254 unsigned int nb_segs;
7c673cae
FG
255
256 __rte_mbuf_sanity_check(m, 1);
257
11fdf7f2
TL
258 fprintf(f, "dump mbuf at %p, iova=%"PRIx64", buf_len=%u\n",
259 m, (uint64_t)m->buf_iova, (unsigned)m->buf_len);
7c673cae
FG
260 fprintf(f, " pkt_len=%"PRIu32", ol_flags=%"PRIx64", nb_segs=%u, "
261 "in_port=%u\n", m->pkt_len, m->ol_flags,
262 (unsigned)m->nb_segs, (unsigned)m->port);
263 nb_segs = m->nb_segs;
264
265 while (m && nb_segs != 0) {
266 __rte_mbuf_sanity_check(m, 0);
267
268 fprintf(f, " segment at %p, data=%p, data_len=%u\n",
269 m, rte_pktmbuf_mtod(m, void *), (unsigned)m->data_len);
270 len = dump_len;
271 if (len > m->data_len)
272 len = m->data_len;
273 if (len != 0)
274 rte_hexdump(f, NULL, rte_pktmbuf_mtod(m, void *), len);
275 dump_len -= len;
276 m = m->next;
277 nb_segs --;
278 }
279}
280
281/* read len data bytes in a mbuf at specified offset (internal) */
282const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
283 uint32_t len, void *buf)
284{
285 const struct rte_mbuf *seg = m;
286 uint32_t buf_off = 0, copy_len;
287
288 if (off + len > rte_pktmbuf_pkt_len(m))
289 return NULL;
290
291 while (off >= rte_pktmbuf_data_len(seg)) {
292 off -= rte_pktmbuf_data_len(seg);
293 seg = seg->next;
294 }
295
296 if (off + len <= rte_pktmbuf_data_len(seg))
297 return rte_pktmbuf_mtod_offset(seg, char *, off);
298
299 /* rare case: header is split among several segments */
300 while (len > 0) {
301 copy_len = rte_pktmbuf_data_len(seg) - off;
302 if (copy_len > len)
303 copy_len = len;
304 rte_memcpy((char *)buf + buf_off,
305 rte_pktmbuf_mtod_offset(seg, char *, off), copy_len);
306 off = 0;
307 buf_off += copy_len;
308 len -= copy_len;
309 seg = seg->next;
310 }
311
312 return buf;
313}
314
315/*
316 * Get the name of a RX offload flag. Must be kept synchronized with flag
317 * definitions in rte_mbuf.h.
318 */
319const char *rte_get_rx_ol_flag_name(uint64_t mask)
320{
321 switch (mask) {
11fdf7f2 322 case PKT_RX_VLAN: return "PKT_RX_VLAN";
7c673cae
FG
323 case PKT_RX_RSS_HASH: return "PKT_RX_RSS_HASH";
324 case PKT_RX_FDIR: return "PKT_RX_FDIR";
325 case PKT_RX_L4_CKSUM_BAD: return "PKT_RX_L4_CKSUM_BAD";
326 case PKT_RX_L4_CKSUM_GOOD: return "PKT_RX_L4_CKSUM_GOOD";
327 case PKT_RX_L4_CKSUM_NONE: return "PKT_RX_L4_CKSUM_NONE";
328 case PKT_RX_IP_CKSUM_BAD: return "PKT_RX_IP_CKSUM_BAD";
329 case PKT_RX_IP_CKSUM_GOOD: return "PKT_RX_IP_CKSUM_GOOD";
330 case PKT_RX_IP_CKSUM_NONE: return "PKT_RX_IP_CKSUM_NONE";
331 case PKT_RX_EIP_CKSUM_BAD: return "PKT_RX_EIP_CKSUM_BAD";
332 case PKT_RX_VLAN_STRIPPED: return "PKT_RX_VLAN_STRIPPED";
333 case PKT_RX_IEEE1588_PTP: return "PKT_RX_IEEE1588_PTP";
334 case PKT_RX_IEEE1588_TMST: return "PKT_RX_IEEE1588_TMST";
9f95a23c
TL
335 case PKT_RX_FDIR_ID: return "PKT_RX_FDIR_ID";
336 case PKT_RX_FDIR_FLX: return "PKT_RX_FDIR_FLX";
7c673cae 337 case PKT_RX_QINQ_STRIPPED: return "PKT_RX_QINQ_STRIPPED";
9f95a23c 338 case PKT_RX_QINQ: return "PKT_RX_QINQ";
7c673cae 339 case PKT_RX_LRO: return "PKT_RX_LRO";
11fdf7f2
TL
340 case PKT_RX_TIMESTAMP: return "PKT_RX_TIMESTAMP";
341 case PKT_RX_SEC_OFFLOAD: return "PKT_RX_SEC_OFFLOAD";
342 case PKT_RX_SEC_OFFLOAD_FAILED: return "PKT_RX_SEC_OFFLOAD_FAILED";
9f95a23c
TL
343 case PKT_RX_OUTER_L4_CKSUM_BAD: return "PKT_RX_OUTER_L4_CKSUM_BAD";
344 case PKT_RX_OUTER_L4_CKSUM_GOOD: return "PKT_RX_OUTER_L4_CKSUM_GOOD";
345 case PKT_RX_OUTER_L4_CKSUM_INVALID:
346 return "PKT_RX_OUTER_L4_CKSUM_INVALID";
347
7c673cae
FG
348 default: return NULL;
349 }
350}
351
352struct flag_mask {
353 uint64_t flag;
354 uint64_t mask;
355 const char *default_name;
356};
357
358/* write the list of rx ol flags in buffer buf */
359int
360rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
361{
362 const struct flag_mask rx_flags[] = {
11fdf7f2 363 { PKT_RX_VLAN, PKT_RX_VLAN, NULL },
7c673cae
FG
364 { PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, NULL },
365 { PKT_RX_FDIR, PKT_RX_FDIR, NULL },
366 { PKT_RX_L4_CKSUM_BAD, PKT_RX_L4_CKSUM_MASK, NULL },
367 { PKT_RX_L4_CKSUM_GOOD, PKT_RX_L4_CKSUM_MASK, NULL },
368 { PKT_RX_L4_CKSUM_NONE, PKT_RX_L4_CKSUM_MASK, NULL },
369 { PKT_RX_L4_CKSUM_UNKNOWN, PKT_RX_L4_CKSUM_MASK,
370 "PKT_RX_L4_CKSUM_UNKNOWN" },
371 { PKT_RX_IP_CKSUM_BAD, PKT_RX_IP_CKSUM_MASK, NULL },
372 { PKT_RX_IP_CKSUM_GOOD, PKT_RX_IP_CKSUM_MASK, NULL },
373 { PKT_RX_IP_CKSUM_NONE, PKT_RX_IP_CKSUM_MASK, NULL },
374 { PKT_RX_IP_CKSUM_UNKNOWN, PKT_RX_IP_CKSUM_MASK,
375 "PKT_RX_IP_CKSUM_UNKNOWN" },
376 { PKT_RX_EIP_CKSUM_BAD, PKT_RX_EIP_CKSUM_BAD, NULL },
377 { PKT_RX_VLAN_STRIPPED, PKT_RX_VLAN_STRIPPED, NULL },
378 { PKT_RX_IEEE1588_PTP, PKT_RX_IEEE1588_PTP, NULL },
379 { PKT_RX_IEEE1588_TMST, PKT_RX_IEEE1588_TMST, NULL },
9f95a23c
TL
380 { PKT_RX_FDIR_ID, PKT_RX_FDIR_ID, NULL },
381 { PKT_RX_FDIR_FLX, PKT_RX_FDIR_FLX, NULL },
7c673cae
FG
382 { PKT_RX_QINQ_STRIPPED, PKT_RX_QINQ_STRIPPED, NULL },
383 { PKT_RX_LRO, PKT_RX_LRO, NULL },
11fdf7f2
TL
384 { PKT_RX_TIMESTAMP, PKT_RX_TIMESTAMP, NULL },
385 { PKT_RX_SEC_OFFLOAD, PKT_RX_SEC_OFFLOAD, NULL },
386 { PKT_RX_SEC_OFFLOAD_FAILED, PKT_RX_SEC_OFFLOAD_FAILED, NULL },
387 { PKT_RX_QINQ, PKT_RX_QINQ, NULL },
9f95a23c
TL
388 { PKT_RX_OUTER_L4_CKSUM_BAD, PKT_RX_OUTER_L4_CKSUM_MASK, NULL },
389 { PKT_RX_OUTER_L4_CKSUM_GOOD, PKT_RX_OUTER_L4_CKSUM_MASK,
390 NULL },
391 { PKT_RX_OUTER_L4_CKSUM_INVALID, PKT_RX_OUTER_L4_CKSUM_MASK,
392 NULL },
393 { PKT_RX_OUTER_L4_CKSUM_UNKNOWN, PKT_RX_OUTER_L4_CKSUM_MASK,
394 "PKT_RX_OUTER_L4_CKSUM_UNKNOWN" },
7c673cae
FG
395 };
396 const char *name;
397 unsigned int i;
398 int ret;
399
400 if (buflen == 0)
401 return -1;
402
403 buf[0] = '\0';
404 for (i = 0; i < RTE_DIM(rx_flags); i++) {
405 if ((mask & rx_flags[i].mask) != rx_flags[i].flag)
406 continue;
407 name = rte_get_rx_ol_flag_name(rx_flags[i].flag);
408 if (name == NULL)
409 name = rx_flags[i].default_name;
410 ret = snprintf(buf, buflen, "%s ", name);
411 if (ret < 0)
412 return -1;
413 if ((size_t)ret >= buflen)
414 return -1;
415 buf += ret;
416 buflen -= ret;
417 }
418
419 return 0;
420}
421
422/*
423 * Get the name of a TX offload flag. Must be kept synchronized with flag
424 * definitions in rte_mbuf.h.
425 */
426const char *rte_get_tx_ol_flag_name(uint64_t mask)
427{
428 switch (mask) {
9f95a23c 429 case PKT_TX_VLAN: return "PKT_TX_VLAN";
7c673cae
FG
430 case PKT_TX_IP_CKSUM: return "PKT_TX_IP_CKSUM";
431 case PKT_TX_TCP_CKSUM: return "PKT_TX_TCP_CKSUM";
432 case PKT_TX_SCTP_CKSUM: return "PKT_TX_SCTP_CKSUM";
433 case PKT_TX_UDP_CKSUM: return "PKT_TX_UDP_CKSUM";
434 case PKT_TX_IEEE1588_TMST: return "PKT_TX_IEEE1588_TMST";
435 case PKT_TX_TCP_SEG: return "PKT_TX_TCP_SEG";
436 case PKT_TX_IPV4: return "PKT_TX_IPV4";
437 case PKT_TX_IPV6: return "PKT_TX_IPV6";
438 case PKT_TX_OUTER_IP_CKSUM: return "PKT_TX_OUTER_IP_CKSUM";
439 case PKT_TX_OUTER_IPV4: return "PKT_TX_OUTER_IPV4";
440 case PKT_TX_OUTER_IPV6: return "PKT_TX_OUTER_IPV6";
441 case PKT_TX_TUNNEL_VXLAN: return "PKT_TX_TUNNEL_VXLAN";
442 case PKT_TX_TUNNEL_GRE: return "PKT_TX_TUNNEL_GRE";
443 case PKT_TX_TUNNEL_IPIP: return "PKT_TX_TUNNEL_IPIP";
444 case PKT_TX_TUNNEL_GENEVE: return "PKT_TX_TUNNEL_GENEVE";
11fdf7f2
TL
445 case PKT_TX_TUNNEL_MPLSINUDP: return "PKT_TX_TUNNEL_MPLSINUDP";
446 case PKT_TX_TUNNEL_VXLAN_GPE: return "PKT_TX_TUNNEL_VXLAN_GPE";
447 case PKT_TX_TUNNEL_IP: return "PKT_TX_TUNNEL_IP";
448 case PKT_TX_TUNNEL_UDP: return "PKT_TX_TUNNEL_UDP";
9f95a23c 449 case PKT_TX_QINQ: return "PKT_TX_QINQ";
11fdf7f2
TL
450 case PKT_TX_MACSEC: return "PKT_TX_MACSEC";
451 case PKT_TX_SEC_OFFLOAD: return "PKT_TX_SEC_OFFLOAD";
9f95a23c
TL
452 case PKT_TX_UDP_SEG: return "PKT_TX_UDP_SEG";
453 case PKT_TX_OUTER_UDP_CKSUM: return "PKT_TX_OUTER_UDP_CKSUM";
454 case PKT_TX_METADATA: return "PKT_TX_METADATA";
7c673cae
FG
455 default: return NULL;
456 }
457}
458
459/* write the list of tx ol flags in buffer buf */
460int
461rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
462{
463 const struct flag_mask tx_flags[] = {
9f95a23c 464 { PKT_TX_VLAN, PKT_TX_VLAN, NULL },
7c673cae
FG
465 { PKT_TX_IP_CKSUM, PKT_TX_IP_CKSUM, NULL },
466 { PKT_TX_TCP_CKSUM, PKT_TX_L4_MASK, NULL },
467 { PKT_TX_SCTP_CKSUM, PKT_TX_L4_MASK, NULL },
468 { PKT_TX_UDP_CKSUM, PKT_TX_L4_MASK, NULL },
469 { PKT_TX_L4_NO_CKSUM, PKT_TX_L4_MASK, "PKT_TX_L4_NO_CKSUM" },
470 { PKT_TX_IEEE1588_TMST, PKT_TX_IEEE1588_TMST, NULL },
471 { PKT_TX_TCP_SEG, PKT_TX_TCP_SEG, NULL },
472 { PKT_TX_IPV4, PKT_TX_IPV4, NULL },
473 { PKT_TX_IPV6, PKT_TX_IPV6, NULL },
474 { PKT_TX_OUTER_IP_CKSUM, PKT_TX_OUTER_IP_CKSUM, NULL },
475 { PKT_TX_OUTER_IPV4, PKT_TX_OUTER_IPV4, NULL },
476 { PKT_TX_OUTER_IPV6, PKT_TX_OUTER_IPV6, NULL },
9f95a23c
TL
477 { PKT_TX_TUNNEL_VXLAN, PKT_TX_TUNNEL_MASK, NULL },
478 { PKT_TX_TUNNEL_GRE, PKT_TX_TUNNEL_MASK, NULL },
479 { PKT_TX_TUNNEL_IPIP, PKT_TX_TUNNEL_MASK, NULL },
480 { PKT_TX_TUNNEL_GENEVE, PKT_TX_TUNNEL_MASK, NULL },
481 { PKT_TX_TUNNEL_MPLSINUDP, PKT_TX_TUNNEL_MASK, NULL },
482 { PKT_TX_TUNNEL_VXLAN_GPE, PKT_TX_TUNNEL_MASK, NULL },
483 { PKT_TX_TUNNEL_IP, PKT_TX_TUNNEL_MASK, NULL },
484 { PKT_TX_TUNNEL_UDP, PKT_TX_TUNNEL_MASK, NULL },
485 { PKT_TX_QINQ, PKT_TX_QINQ, NULL },
11fdf7f2
TL
486 { PKT_TX_MACSEC, PKT_TX_MACSEC, NULL },
487 { PKT_TX_SEC_OFFLOAD, PKT_TX_SEC_OFFLOAD, NULL },
9f95a23c
TL
488 { PKT_TX_UDP_SEG, PKT_TX_UDP_SEG, NULL },
489 { PKT_TX_OUTER_UDP_CKSUM, PKT_TX_OUTER_UDP_CKSUM, NULL },
490 { PKT_TX_METADATA, PKT_TX_METADATA, NULL },
7c673cae
FG
491 };
492 const char *name;
493 unsigned int i;
494 int ret;
495
496 if (buflen == 0)
497 return -1;
498
499 buf[0] = '\0';
500 for (i = 0; i < RTE_DIM(tx_flags); i++) {
501 if ((mask & tx_flags[i].mask) != tx_flags[i].flag)
502 continue;
503 name = rte_get_tx_ol_flag_name(tx_flags[i].flag);
504 if (name == NULL)
505 name = tx_flags[i].default_name;
506 ret = snprintf(buf, buflen, "%s ", name);
507 if (ret < 0)
508 return -1;
509 if ((size_t)ret >= buflen)
510 return -1;
511 buf += ret;
512 buflen -= ret;
513 }
514
515 return 0;
516}