1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #ifndef __OCTEONTX_RXTX_H__
6 #define __OCTEONTX_RXTX_H__
8 #include <rte_ethdev_driver.h>
10 #define OFFLOAD_FLAGS \
11 uint16_t rx_offload_flags; \
12 uint16_t tx_offload_flags
14 #define BIT(nr) (1UL << (nr))
16 #define OCCTX_RX_OFFLOAD_NONE (0)
17 #define OCCTX_RX_MULTI_SEG_F BIT(0)
18 #define OCCTX_RX_OFFLOAD_CSUM_F BIT(1)
19 #define OCCTX_RX_VLAN_FLTR_F BIT(2)
21 #define OCCTX_TX_OFFLOAD_NONE (0)
22 #define OCCTX_TX_MULTI_SEG_F BIT(0)
23 #define OCCTX_TX_OFFLOAD_L3_L4_CSUM_F BIT(1)
24 #define OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F BIT(2)
25 #define OCCTX_TX_OFFLOAD_MBUF_NOFF_F BIT(3)
27 /* Packet type table */
28 #define PTYPE_SIZE OCCTX_PKI_LTYPE_LAST
30 /* octeontx send header sub descriptor structure */
32 union octeontx_send_hdr_w0_u
{
57 union octeontx_send_hdr_w1_u
{
60 uint64_t tso_mss
: 14;
71 struct octeontx_send_hdr_s
{
72 union octeontx_send_hdr_w0_u w0
;
73 union octeontx_send_hdr_w1_u w1
;
76 static const uint32_t __rte_cache_aligned
77 ptype_table
[PTYPE_SIZE
][PTYPE_SIZE
][PTYPE_SIZE
] = {
78 [LC_NONE
][LE_NONE
][LF_NONE
] = RTE_PTYPE_UNKNOWN
,
79 [LC_NONE
][LE_NONE
][LF_IPSEC_ESP
] = RTE_PTYPE_UNKNOWN
,
80 [LC_NONE
][LE_NONE
][LF_IPFRAG
] = RTE_PTYPE_L4_FRAG
,
81 [LC_NONE
][LE_NONE
][LF_IPCOMP
] = RTE_PTYPE_UNKNOWN
,
82 [LC_NONE
][LE_NONE
][LF_TCP
] = RTE_PTYPE_L4_TCP
,
83 [LC_NONE
][LE_NONE
][LF_UDP
] = RTE_PTYPE_L4_UDP
,
84 [LC_NONE
][LE_NONE
][LF_GRE
] = RTE_PTYPE_TUNNEL_GRE
,
85 [LC_NONE
][LE_NONE
][LF_UDP_GENEVE
] = RTE_PTYPE_TUNNEL_GENEVE
,
86 [LC_NONE
][LE_NONE
][LF_UDP_VXLAN
] = RTE_PTYPE_TUNNEL_VXLAN
,
87 [LC_NONE
][LE_NONE
][LF_NVGRE
] = RTE_PTYPE_TUNNEL_NVGRE
,
89 [LC_IPV4
][LE_NONE
][LF_NONE
] = RTE_PTYPE_L3_IPV4
| RTE_PTYPE_UNKNOWN
,
90 [LC_IPV4
][LE_NONE
][LF_IPSEC_ESP
] =
91 RTE_PTYPE_L3_IPV4
| RTE_PTYPE_L3_IPV4
,
92 [LC_IPV4
][LE_NONE
][LF_IPFRAG
] = RTE_PTYPE_L3_IPV4
| RTE_PTYPE_L4_FRAG
,
93 [LC_IPV4
][LE_NONE
][LF_IPCOMP
] = RTE_PTYPE_L3_IPV4
| RTE_PTYPE_UNKNOWN
,
94 [LC_IPV4
][LE_NONE
][LF_TCP
] = RTE_PTYPE_L3_IPV4
| RTE_PTYPE_L4_TCP
,
95 [LC_IPV4
][LE_NONE
][LF_UDP
] = RTE_PTYPE_L3_IPV4
| RTE_PTYPE_L4_UDP
,
96 [LC_IPV4
][LE_NONE
][LF_GRE
] = RTE_PTYPE_L3_IPV4
| RTE_PTYPE_TUNNEL_GRE
,
97 [LC_IPV4
][LE_NONE
][LF_UDP_GENEVE
] =
98 RTE_PTYPE_L3_IPV4
| RTE_PTYPE_TUNNEL_GENEVE
,
99 [LC_IPV4
][LE_NONE
][LF_UDP_VXLAN
] =
100 RTE_PTYPE_L3_IPV4
| RTE_PTYPE_TUNNEL_VXLAN
,
101 [LC_IPV4
][LE_NONE
][LF_NVGRE
] =
102 RTE_PTYPE_L3_IPV4
| RTE_PTYPE_TUNNEL_NVGRE
,
104 [LC_IPV4_OPT
][LE_NONE
][LF_NONE
] =
105 RTE_PTYPE_L3_IPV4_EXT
| RTE_PTYPE_UNKNOWN
,
106 [LC_IPV4_OPT
][LE_NONE
][LF_IPSEC_ESP
] =
107 RTE_PTYPE_L3_IPV4_EXT
| RTE_PTYPE_L3_IPV4
,
108 [LC_IPV4_OPT
][LE_NONE
][LF_IPFRAG
] =
109 RTE_PTYPE_L3_IPV4_EXT
| RTE_PTYPE_L4_FRAG
,
110 [LC_IPV4_OPT
][LE_NONE
][LF_IPCOMP
] =
111 RTE_PTYPE_L3_IPV4_EXT
| RTE_PTYPE_UNKNOWN
,
112 [LC_IPV4_OPT
][LE_NONE
][LF_TCP
] =
113 RTE_PTYPE_L3_IPV4_EXT
| RTE_PTYPE_L4_TCP
,
114 [LC_IPV4_OPT
][LE_NONE
][LF_UDP
] =
115 RTE_PTYPE_L3_IPV4_EXT
| RTE_PTYPE_L4_UDP
,
116 [LC_IPV4_OPT
][LE_NONE
][LF_GRE
] =
117 RTE_PTYPE_L3_IPV4_EXT
| RTE_PTYPE_TUNNEL_GRE
,
118 [LC_IPV4_OPT
][LE_NONE
][LF_UDP_GENEVE
] =
119 RTE_PTYPE_L3_IPV4_EXT
| RTE_PTYPE_TUNNEL_GENEVE
,
120 [LC_IPV4_OPT
][LE_NONE
][LF_UDP_VXLAN
] =
121 RTE_PTYPE_L3_IPV4_EXT
| RTE_PTYPE_TUNNEL_VXLAN
,
122 [LC_IPV4_OPT
][LE_NONE
][LF_NVGRE
] =
123 RTE_PTYPE_L3_IPV4_EXT
| RTE_PTYPE_TUNNEL_NVGRE
,
125 [LC_IPV6
][LE_NONE
][LF_NONE
] = RTE_PTYPE_L3_IPV6
| RTE_PTYPE_UNKNOWN
,
126 [LC_IPV6
][LE_NONE
][LF_IPSEC_ESP
] =
127 RTE_PTYPE_L3_IPV6
| RTE_PTYPE_L3_IPV4
,
128 [LC_IPV6
][LE_NONE
][LF_IPFRAG
] = RTE_PTYPE_L3_IPV6
| RTE_PTYPE_L4_FRAG
,
129 [LC_IPV6
][LE_NONE
][LF_IPCOMP
] = RTE_PTYPE_L3_IPV6
| RTE_PTYPE_UNKNOWN
,
130 [LC_IPV6
][LE_NONE
][LF_TCP
] = RTE_PTYPE_L3_IPV6
| RTE_PTYPE_L4_TCP
,
131 [LC_IPV6
][LE_NONE
][LF_UDP
] = RTE_PTYPE_L3_IPV6
| RTE_PTYPE_L4_UDP
,
132 [LC_IPV6
][LE_NONE
][LF_GRE
] = RTE_PTYPE_L3_IPV6
| RTE_PTYPE_TUNNEL_GRE
,
133 [LC_IPV6
][LE_NONE
][LF_UDP_GENEVE
] =
134 RTE_PTYPE_L3_IPV6
| RTE_PTYPE_TUNNEL_GENEVE
,
135 [LC_IPV6
][LE_NONE
][LF_UDP_VXLAN
] =
136 RTE_PTYPE_L3_IPV6
| RTE_PTYPE_TUNNEL_VXLAN
,
137 [LC_IPV6
][LE_NONE
][LF_NVGRE
] =
138 RTE_PTYPE_L3_IPV4
| RTE_PTYPE_TUNNEL_NVGRE
,
139 [LC_IPV6_OPT
][LE_NONE
][LF_NONE
] =
140 RTE_PTYPE_L3_IPV6_EXT
| RTE_PTYPE_UNKNOWN
,
141 [LC_IPV6_OPT
][LE_NONE
][LF_IPSEC_ESP
] =
142 RTE_PTYPE_L3_IPV6_EXT
| RTE_PTYPE_L3_IPV4
,
143 [LC_IPV6_OPT
][LE_NONE
][LF_IPFRAG
] =
144 RTE_PTYPE_L3_IPV6_EXT
| RTE_PTYPE_L4_FRAG
,
145 [LC_IPV6_OPT
][LE_NONE
][LF_IPCOMP
] =
146 RTE_PTYPE_L3_IPV6_EXT
| RTE_PTYPE_UNKNOWN
,
147 [LC_IPV6_OPT
][LE_NONE
][LF_TCP
] =
148 RTE_PTYPE_L3_IPV6_EXT
| RTE_PTYPE_L4_TCP
,
149 [LC_IPV6_OPT
][LE_NONE
][LF_UDP
] =
150 RTE_PTYPE_L3_IPV6_EXT
| RTE_PTYPE_L4_UDP
,
151 [LC_IPV6_OPT
][LE_NONE
][LF_GRE
] =
152 RTE_PTYPE_L3_IPV6_EXT
| RTE_PTYPE_TUNNEL_GRE
,
153 [LC_IPV6_OPT
][LE_NONE
][LF_UDP_GENEVE
] =
154 RTE_PTYPE_L3_IPV6_EXT
| RTE_PTYPE_TUNNEL_GENEVE
,
155 [LC_IPV6_OPT
][LE_NONE
][LF_UDP_VXLAN
] =
156 RTE_PTYPE_L3_IPV6_EXT
| RTE_PTYPE_TUNNEL_VXLAN
,
157 [LC_IPV6_OPT
][LE_NONE
][LF_NVGRE
] =
158 RTE_PTYPE_L3_IPV6_EXT
| RTE_PTYPE_TUNNEL_NVGRE
,
163 static __rte_always_inline
uint64_t
164 octeontx_pktmbuf_detach(struct rte_mbuf
*m
)
166 struct rte_mempool
*mp
= m
->pool
;
167 uint32_t mbuf_size
, buf_len
;
172 /* Update refcount of direct mbuf */
173 md
= rte_mbuf_from_indirect(m
);
174 refcount
= rte_mbuf_refcnt_update(md
, -1);
176 priv_size
= rte_pktmbuf_priv_size(mp
);
177 mbuf_size
= (uint32_t)(sizeof(struct rte_mbuf
) + priv_size
);
178 buf_len
= rte_pktmbuf_data_room_size(mp
);
180 m
->priv_size
= priv_size
;
181 m
->buf_addr
= (char *)m
+ mbuf_size
;
182 m
->buf_iova
= rte_mempool_virt2iova(m
) + mbuf_size
;
183 m
->buf_len
= (uint16_t)buf_len
;
184 rte_pktmbuf_reset_headroom(m
);
190 /* Now indirect mbuf is safe to free */
194 rte_mbuf_refcnt_set(md
, 1);
205 static __rte_always_inline
uint64_t
206 octeontx_prefree_seg(struct rte_mbuf
*m
)
208 if (likely(rte_mbuf_refcnt_read(m
) == 1)) {
209 if (!RTE_MBUF_DIRECT(m
))
210 return octeontx_pktmbuf_detach(m
);
215 } else if (rte_mbuf_refcnt_update(m
, -1) == 0) {
216 if (!RTE_MBUF_DIRECT(m
))
217 return octeontx_pktmbuf_detach(m
);
219 rte_mbuf_refcnt_set(m
, 1);
225 /* Mbuf is having refcount more than 1 so need not to be freed */
229 static __rte_always_inline
void
230 octeontx_tx_checksum_offload(uint64_t *cmd_buf
, const uint16_t flags
,
233 struct octeontx_send_hdr_s
*send_hdr
=
234 (struct octeontx_send_hdr_s
*)cmd_buf
;
235 uint64_t ol_flags
= m
->ol_flags
;
237 /* PKO Checksum L4 Algorithm Enumeration
239 * 0x1 - UDP L4 checksum
240 * 0x2 - TCP L4 checksum
241 * 0x3 - SCTP L4 checksum
243 const uint8_t csum
= (!(((ol_flags
^ PKT_TX_UDP_CKSUM
) >> 52) & 0x3) +
244 (!(((ol_flags
^ PKT_TX_TCP_CKSUM
) >> 52) & 0x3) * 2) +
245 (!(((ol_flags
^ PKT_TX_SCTP_CKSUM
) >> 52) & 0x3) * 3));
247 const uint8_t is_tunnel_parsed
= (!!(ol_flags
& PKT_TX_TUNNEL_GTP
) ||
248 !!(ol_flags
& PKT_TX_TUNNEL_VXLAN_GPE
) ||
249 !!(ol_flags
& PKT_TX_TUNNEL_VXLAN
) ||
250 !!(ol_flags
& PKT_TX_TUNNEL_GRE
) ||
251 !!(ol_flags
& PKT_TX_TUNNEL_GENEVE
) ||
252 !!(ol_flags
& PKT_TX_TUNNEL_IP
) ||
253 !!(ol_flags
& PKT_TX_TUNNEL_IPIP
));
255 const uint8_t csum_outer
= (!!(ol_flags
& PKT_TX_OUTER_UDP_CKSUM
) ||
256 !!(ol_flags
& PKT_TX_TUNNEL_UDP
));
257 const uint8_t outer_l2_len
= m
->outer_l2_len
;
258 const uint8_t l2_len
= m
->l2_len
;
260 if ((flags
& OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F
) &&
261 (flags
& OCCTX_TX_OFFLOAD_L3_L4_CSUM_F
)) {
262 if (is_tunnel_parsed
) {
264 send_hdr
->w0
.l3ptr
= outer_l2_len
;
265 send_hdr
->w0
.l4ptr
= outer_l2_len
+ m
->outer_l3_len
;
266 /* Set clk3 for PKO to calculate IPV4 header checksum */
267 send_hdr
->w0
.ckl3
= !!(ol_flags
& PKT_TX_OUTER_IPV4
);
270 send_hdr
->w0
.ckl4
= csum_outer
;
273 send_hdr
->w1
.leptr
= send_hdr
->w0
.l4ptr
+ l2_len
;
274 send_hdr
->w1
.lfptr
= send_hdr
->w1
.leptr
+ m
->l3_len
;
275 /* Set clke for PKO to calculate inner IPV4 header
278 send_hdr
->w0
.ckle
= !!(ol_flags
& PKT_TX_IPV4
);
281 send_hdr
->w0
.cklf
= csum
;
284 send_hdr
->w0
.l3ptr
= l2_len
;
285 send_hdr
->w0
.l4ptr
= l2_len
+ m
->l3_len
;
286 /* Set clk3 for PKO to calculate IPV4 header checksum */
287 send_hdr
->w0
.ckl3
= !!(ol_flags
& PKT_TX_IPV4
);
290 send_hdr
->w0
.ckl4
= csum
;
292 } else if (flags
& OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F
) {
294 send_hdr
->w0
.l3ptr
= outer_l2_len
;
295 send_hdr
->w0
.l4ptr
= outer_l2_len
+ m
->outer_l3_len
;
296 /* Set clk3 for PKO to calculate IPV4 header checksum */
297 send_hdr
->w0
.ckl3
= !!(ol_flags
& PKT_TX_OUTER_IPV4
);
300 send_hdr
->w0
.ckl4
= csum_outer
;
301 } else if (flags
& OCCTX_TX_OFFLOAD_L3_L4_CSUM_F
) {
303 send_hdr
->w0
.l3ptr
= l2_len
;
304 send_hdr
->w0
.l4ptr
= l2_len
+ m
->l3_len
;
305 /* Set clk3 for PKO to calculate IPV4 header checksum */
306 send_hdr
->w0
.ckl3
= !!(ol_flags
& PKT_TX_IPV4
);
309 send_hdr
->w0
.ckl4
= csum
;
313 static __rte_always_inline
uint16_t
314 __octeontx_xmit_prepare(struct rte_mbuf
*tx_pkt
, uint64_t *cmd_buf
,
317 uint16_t gaura_id
, nb_desc
= 0;
319 /* Setup PKO_SEND_HDR_S */
320 cmd_buf
[nb_desc
++] = tx_pkt
->data_len
& 0xffff;
321 cmd_buf
[nb_desc
++] = 0x0;
323 /* Enable tx checksum offload */
324 if ((flag
& OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F
) ||
325 (flag
& OCCTX_TX_OFFLOAD_L3_L4_CSUM_F
))
326 octeontx_tx_checksum_offload(cmd_buf
, flag
, tx_pkt
);
328 /* SEND_HDR[DF] bit controls if buffer is to be freed or
329 * not, as SG_DESC[I] and SEND_HDR[II] are clear.
331 if (flag
& OCCTX_TX_OFFLOAD_MBUF_NOFF_F
)
332 cmd_buf
[0] |= (octeontx_prefree_seg(tx_pkt
) <<
335 /* Mark mempool object as "put" since it is freed by PKO */
336 if (!(cmd_buf
[0] & (1ULL << 58)))
337 __mempool_check_cookies(tx_pkt
->pool
, (void **)&tx_pkt
,
339 /* Get the gaura Id */
340 gaura_id
= octeontx_fpa_bufpool_gpool((uintptr_t)
341 tx_pkt
->pool
->pool_id
);
343 /* Setup PKO_SEND_BUFLINK_S */
344 cmd_buf
[nb_desc
++] = PKO_SEND_BUFLINK_SUBDC
|
345 PKO_SEND_BUFLINK_LDTYPE(0x1ull
) |
346 PKO_SEND_BUFLINK_GAUAR((long)gaura_id
) |
348 cmd_buf
[nb_desc
++] = rte_mbuf_data_iova(tx_pkt
);
353 static __rte_always_inline
uint16_t
354 __octeontx_xmit_mseg_prepare(struct rte_mbuf
*tx_pkt
, uint64_t *cmd_buf
,
357 uint16_t nb_segs
, nb_desc
= 0;
358 uint16_t gaura_id
, len
= 0;
359 struct rte_mbuf
*m_next
= NULL
;
361 nb_segs
= tx_pkt
->nb_segs
;
362 /* Setup PKO_SEND_HDR_S */
363 cmd_buf
[nb_desc
++] = tx_pkt
->pkt_len
& 0xffff;
364 cmd_buf
[nb_desc
++] = 0x0;
366 /* Enable tx checksum offload */
367 if ((flag
& OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F
) ||
368 (flag
& OCCTX_TX_OFFLOAD_L3_L4_CSUM_F
))
369 octeontx_tx_checksum_offload(cmd_buf
, flag
, tx_pkt
);
372 m_next
= tx_pkt
->next
;
373 /* To handle case where mbufs belong to diff pools, like
376 gaura_id
= octeontx_fpa_bufpool_gpool((uintptr_t)
377 tx_pkt
->pool
->pool_id
);
379 /* Setup PKO_SEND_GATHER_S */
380 cmd_buf
[nb_desc
] = PKO_SEND_GATHER_SUBDC
|
381 PKO_SEND_GATHER_LDTYPE(0x1ull
) |
382 PKO_SEND_GATHER_GAUAR((long)gaura_id
) |
385 /* SG_DESC[I] bit controls if buffer is to be freed or
386 * not, as SEND_HDR[DF] and SEND_HDR[II] are clear.
388 if (flag
& OCCTX_TX_OFFLOAD_MBUF_NOFF_F
) {
390 (octeontx_prefree_seg(tx_pkt
) << 57);
393 /* Mark mempool object as "put" since it is freed by
396 if (!(cmd_buf
[nb_desc
] & (1ULL << 57))) {
398 __mempool_check_cookies(tx_pkt
->pool
,
399 (void **)&tx_pkt
, 1, 0);
403 cmd_buf
[nb_desc
++] = rte_mbuf_data_iova(tx_pkt
);
406 len
+= tx_pkt
->data_len
;
413 static __rte_always_inline
uint16_t
414 __octeontx_xmit_pkts(void *tx_queue
, struct rte_mbuf
**tx_pkts
,
415 uint16_t nb_pkts
, uint64_t *cmd_buf
,
416 const uint16_t flags
)
418 struct octeontx_txq
*txq
= tx_queue
;
419 octeontx_dq_t
*dq
= &txq
->dq
;
420 uint16_t count
= 0, nb_desc
;
423 while (count
< nb_pkts
) {
424 if (unlikely(*((volatile int64_t *)dq
->fc_status_va
) < 0))
427 if (flags
& OCCTX_TX_MULTI_SEG_F
) {
428 nb_desc
= __octeontx_xmit_mseg_prepare(tx_pkts
[count
],
431 nb_desc
= __octeontx_xmit_prepare(tx_pkts
[count
],
435 octeontx_reg_lmtst(dq
->lmtline_va
, dq
->ioreg_va
, cmd_buf
,
444 octeontx_recv_pkts(void *rx_queue
, struct rte_mbuf
**rx_pkts
, uint16_t nb_pkts
);
446 #define L3L4CSUM_F OCCTX_TX_OFFLOAD_L3_L4_CSUM_F
447 #define OL3OL4CSUM_F OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F
448 #define NOFF_F OCCTX_TX_OFFLOAD_MBUF_NOFF_F
449 #define MULT_F OCCTX_TX_MULTI_SEG_F
451 /* [L3L4CSUM_F] [OL3OL4CSUM_F] [NOFF] [MULTI_SEG] */
452 #define OCCTX_TX_FASTPATH_MODES \
453 T(no_offload, 0, 0, 0, 0, 4, \
454 OCCTX_TX_OFFLOAD_NONE) \
455 T(mseg, 0, 0, 0, 1, 14, \
457 T(l3l4csum, 0, 0, 1, 0, 4, \
459 T(l3l4csum_mseg, 0, 0, 1, 1, 14, \
460 L3L4CSUM_F | MULT_F) \
461 T(ol3ol4csum, 0, 1, 0, 0, 4, \
463 T(ol3l4csum_mseg, 0, 1, 0, 1, 14, \
464 OL3OL4CSUM_F | MULT_F) \
465 T(ol3l4csum_l3l4csum, 0, 1, 1, 0, 4, \
466 OL3OL4CSUM_F | L3L4CSUM_F) \
467 T(ol3l4csum_l3l4csum_mseg, 0, 1, 1, 1, 14, \
468 OL3OL4CSUM_F | L3L4CSUM_F | MULT_F) \
469 T(noff, 1, 0, 0, 0, 4, \
471 T(noff_mseg, 1, 0, 0, 1, 14, \
473 T(noff_l3l4csum, 1, 0, 1, 0, 4, \
474 NOFF_F | L3L4CSUM_F) \
475 T(noff_l3l4csum_mseg, 1, 0, 1, 1, 14, \
476 NOFF_F | L3L4CSUM_F | MULT_F) \
477 T(noff_ol3ol4csum, 1, 1, 0, 0, 4, \
478 NOFF_F | OL3OL4CSUM_F) \
479 T(noff_ol3ol4csum_mseg, 1, 1, 0, 1, 14, \
480 NOFF_F | OL3OL4CSUM_F | MULT_F) \
481 T(noff_ol3ol4csum_l3l4csum, 1, 1, 1, 0, 4, \
482 NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
483 T(noff_ol3ol4csum_l3l4csum_mseg, 1, 1, 1, 1, 14, \
484 NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F | \
487 /* RX offload macros */
488 #define VLAN_FLTR_F OCCTX_RX_VLAN_FLTR_F
489 #define CSUM_F OCCTX_RX_OFFLOAD_CSUM_F
490 #define MULT_RX_F OCCTX_RX_MULTI_SEG_F
492 /* [VLAN_FLTR] [CSUM_F] [MULTI_SEG] */
493 #define OCCTX_RX_FASTPATH_MODES \
494 R(no_offload, 0, 0, 0, OCCTX_RX_OFFLOAD_NONE) \
495 R(mseg, 0, 0, 1, MULT_RX_F) \
496 R(csum, 0, 1, 0, CSUM_F) \
497 R(csum_mseg, 0, 1, 1, CSUM_F | MULT_RX_F) \
498 R(vlan, 1, 0, 0, VLAN_FLTR_F) \
499 R(vlan_mseg, 1, 0, 1, VLAN_FLTR_F | MULT_RX_F) \
500 R(vlan_csum, 1, 1, 0, VLAN_FLTR_F | CSUM_F) \
501 R(vlan_csum_mseg, 1, 1, 1, CSUM_F | VLAN_FLTR_F | \
504 #endif /* __OCTEONTX_RXTX_H__ */