1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
8 #include <rte_mbuf_ptype.h>
9 #include <rte_byteorder.h>
10 #include <rte_ether.h>
18 /* get l3 packet type from ip6 next protocol */
20 ptype_l3_ip6(uint8_t ip6_proto
)
22 static const uint32_t ip6_ext_proto_map
[256] = {
23 [IPPROTO_HOPOPTS
] = RTE_PTYPE_L3_IPV6_EXT
- RTE_PTYPE_L3_IPV6
,
24 [IPPROTO_ROUTING
] = RTE_PTYPE_L3_IPV6_EXT
- RTE_PTYPE_L3_IPV6
,
25 [IPPROTO_FRAGMENT
] = RTE_PTYPE_L3_IPV6_EXT
- RTE_PTYPE_L3_IPV6
,
26 [IPPROTO_ESP
] = RTE_PTYPE_L3_IPV6_EXT
- RTE_PTYPE_L3_IPV6
,
27 [IPPROTO_AH
] = RTE_PTYPE_L3_IPV6_EXT
- RTE_PTYPE_L3_IPV6
,
28 [IPPROTO_DSTOPTS
] = RTE_PTYPE_L3_IPV6_EXT
- RTE_PTYPE_L3_IPV6
,
31 return RTE_PTYPE_L3_IPV6
+ ip6_ext_proto_map
[ip6_proto
];
34 /* get l3 packet type from ip version and header length */
36 ptype_l3_ip(uint8_t ipv_ihl
)
38 static const uint32_t ptype_l3_ip_proto_map
[256] = {
39 [0x45] = RTE_PTYPE_L3_IPV4
,
40 [0x46] = RTE_PTYPE_L3_IPV4_EXT
,
41 [0x47] = RTE_PTYPE_L3_IPV4_EXT
,
42 [0x48] = RTE_PTYPE_L3_IPV4_EXT
,
43 [0x49] = RTE_PTYPE_L3_IPV4_EXT
,
44 [0x4A] = RTE_PTYPE_L3_IPV4_EXT
,
45 [0x4B] = RTE_PTYPE_L3_IPV4_EXT
,
46 [0x4C] = RTE_PTYPE_L3_IPV4_EXT
,
47 [0x4D] = RTE_PTYPE_L3_IPV4_EXT
,
48 [0x4E] = RTE_PTYPE_L3_IPV4_EXT
,
49 [0x4F] = RTE_PTYPE_L3_IPV4_EXT
,
52 return ptype_l3_ip_proto_map
[ipv_ihl
];
55 /* get l4 packet type from proto */
57 ptype_l4(uint8_t proto
)
59 static const uint32_t ptype_l4_proto
[256] = {
60 [IPPROTO_UDP
] = RTE_PTYPE_L4_UDP
,
61 [IPPROTO_TCP
] = RTE_PTYPE_L4_TCP
,
62 [IPPROTO_SCTP
] = RTE_PTYPE_L4_SCTP
,
65 return ptype_l4_proto
[proto
];
68 /* get inner l3 packet type from ip6 next protocol */
70 ptype_inner_l3_ip6(uint8_t ip6_proto
)
72 static const uint32_t ptype_inner_ip6_ext_proto_map
[256] = {
73 [IPPROTO_HOPOPTS
] = RTE_PTYPE_INNER_L3_IPV6_EXT
-
74 RTE_PTYPE_INNER_L3_IPV6
,
75 [IPPROTO_ROUTING
] = RTE_PTYPE_INNER_L3_IPV6_EXT
-
76 RTE_PTYPE_INNER_L3_IPV6
,
77 [IPPROTO_FRAGMENT
] = RTE_PTYPE_INNER_L3_IPV6_EXT
-
78 RTE_PTYPE_INNER_L3_IPV6
,
79 [IPPROTO_ESP
] = RTE_PTYPE_INNER_L3_IPV6_EXT
-
80 RTE_PTYPE_INNER_L3_IPV6
,
81 [IPPROTO_AH
] = RTE_PTYPE_INNER_L3_IPV6_EXT
-
82 RTE_PTYPE_INNER_L3_IPV6
,
83 [IPPROTO_DSTOPTS
] = RTE_PTYPE_INNER_L3_IPV6_EXT
-
84 RTE_PTYPE_INNER_L3_IPV6
,
87 return RTE_PTYPE_INNER_L3_IPV6
+
88 ptype_inner_ip6_ext_proto_map
[ip6_proto
];
91 /* get inner l3 packet type from ip version and header length */
93 ptype_inner_l3_ip(uint8_t ipv_ihl
)
95 static const uint32_t ptype_inner_l3_ip_proto_map
[256] = {
96 [0x45] = RTE_PTYPE_INNER_L3_IPV4
,
97 [0x46] = RTE_PTYPE_INNER_L3_IPV4_EXT
,
98 [0x47] = RTE_PTYPE_INNER_L3_IPV4_EXT
,
99 [0x48] = RTE_PTYPE_INNER_L3_IPV4_EXT
,
100 [0x49] = RTE_PTYPE_INNER_L3_IPV4_EXT
,
101 [0x4A] = RTE_PTYPE_INNER_L3_IPV4_EXT
,
102 [0x4B] = RTE_PTYPE_INNER_L3_IPV4_EXT
,
103 [0x4C] = RTE_PTYPE_INNER_L3_IPV4_EXT
,
104 [0x4D] = RTE_PTYPE_INNER_L3_IPV4_EXT
,
105 [0x4E] = RTE_PTYPE_INNER_L3_IPV4_EXT
,
106 [0x4F] = RTE_PTYPE_INNER_L3_IPV4_EXT
,
109 return ptype_inner_l3_ip_proto_map
[ipv_ihl
];
112 /* get inner l4 packet type from proto */
114 ptype_inner_l4(uint8_t proto
)
116 static const uint32_t ptype_inner_l4_proto
[256] = {
117 [IPPROTO_UDP
] = RTE_PTYPE_INNER_L4_UDP
,
118 [IPPROTO_TCP
] = RTE_PTYPE_INNER_L4_TCP
,
119 [IPPROTO_SCTP
] = RTE_PTYPE_INNER_L4_SCTP
,
122 return ptype_inner_l4_proto
[proto
];
125 /* get the tunnel packet type if any, update proto and off. */
127 ptype_tunnel(uint16_t *proto
, const struct rte_mbuf
*m
,
132 static const uint8_t opt_len
[16] = {
142 const struct gre_hdr
*gh
;
143 struct gre_hdr gh_copy
;
146 gh
= rte_pktmbuf_read(m
, *off
, sizeof(*gh
), &gh_copy
);
147 if (unlikely(gh
== NULL
))
150 flags
= rte_be_to_cpu_16(*(const uint16_t *)gh
);
152 if (opt_len
[flags
] == 0)
155 *off
+= opt_len
[flags
];
157 if (*proto
== rte_cpu_to_be_16(ETHER_TYPE_TEB
))
158 return RTE_PTYPE_TUNNEL_NVGRE
;
160 return RTE_PTYPE_TUNNEL_GRE
;
163 *proto
= rte_cpu_to_be_16(ETHER_TYPE_IPv4
);
164 return RTE_PTYPE_TUNNEL_IP
;
166 *proto
= rte_cpu_to_be_16(ETHER_TYPE_IPv6
);
167 return RTE_PTYPE_TUNNEL_IP
; /* IP is also valid for IPv6 */
173 /* get the ipv4 header length */
175 ip4_hlen(const struct ipv4_hdr
*hdr
)
177 return (hdr
->version_ihl
& 0xf) * 4;
180 /* parse ipv6 extended headers, update offset and return next proto */
181 int __rte_experimental
182 rte_net_skip_ip6_ext(uint16_t proto
, const struct rte_mbuf
*m
, uint32_t *off
,
189 const struct ext_hdr
*xh
;
190 struct ext_hdr xh_copy
;
195 #define MAX_EXT_HDRS 5
196 for (i
= 0; i
< MAX_EXT_HDRS
; i
++) {
198 case IPPROTO_HOPOPTS
:
199 case IPPROTO_ROUTING
:
200 case IPPROTO_DSTOPTS
:
201 xh
= rte_pktmbuf_read(m
, *off
, sizeof(*xh
),
205 *off
+= (xh
->len
+ 1) * 8;
206 proto
= xh
->next_hdr
;
208 case IPPROTO_FRAGMENT
:
209 xh
= rte_pktmbuf_read(m
, *off
, sizeof(*xh
),
214 proto
= xh
->next_hdr
;
216 return proto
; /* this is always the last ext hdr */
226 /* parse mbuf data to get packet type */
227 uint32_t rte_net_get_ptype(const struct rte_mbuf
*m
,
228 struct rte_net_hdr_lens
*hdr_lens
, uint32_t layers
)
230 struct rte_net_hdr_lens local_hdr_lens
;
231 const struct ether_hdr
*eh
;
232 struct ether_hdr eh_copy
;
233 uint32_t pkt_type
= RTE_PTYPE_L2_ETHER
;
238 if (hdr_lens
== NULL
)
239 hdr_lens
= &local_hdr_lens
;
241 eh
= rte_pktmbuf_read(m
, off
, sizeof(*eh
), &eh_copy
);
242 if (unlikely(eh
== NULL
))
244 proto
= eh
->ether_type
;
246 hdr_lens
->l2_len
= off
;
248 if ((layers
& RTE_PTYPE_L2_MASK
) == 0)
251 if (proto
== rte_cpu_to_be_16(ETHER_TYPE_IPv4
))
252 goto l3
; /* fast path if packet is IPv4 */
254 if (proto
== rte_cpu_to_be_16(ETHER_TYPE_VLAN
)) {
255 const struct vlan_hdr
*vh
;
256 struct vlan_hdr vh_copy
;
258 pkt_type
= RTE_PTYPE_L2_ETHER_VLAN
;
259 vh
= rte_pktmbuf_read(m
, off
, sizeof(*vh
), &vh_copy
);
260 if (unlikely(vh
== NULL
))
263 hdr_lens
->l2_len
+= sizeof(*vh
);
264 proto
= vh
->eth_proto
;
265 } else if (proto
== rte_cpu_to_be_16(ETHER_TYPE_QINQ
)) {
266 const struct vlan_hdr
*vh
;
267 struct vlan_hdr vh_copy
;
269 pkt_type
= RTE_PTYPE_L2_ETHER_QINQ
;
270 vh
= rte_pktmbuf_read(m
, off
+ sizeof(*vh
), sizeof(*vh
),
272 if (unlikely(vh
== NULL
))
274 off
+= 2 * sizeof(*vh
);
275 hdr_lens
->l2_len
+= 2 * sizeof(*vh
);
276 proto
= vh
->eth_proto
;
280 if ((layers
& RTE_PTYPE_L3_MASK
) == 0)
283 if (proto
== rte_cpu_to_be_16(ETHER_TYPE_IPv4
)) {
284 const struct ipv4_hdr
*ip4h
;
285 struct ipv4_hdr ip4h_copy
;
287 ip4h
= rte_pktmbuf_read(m
, off
, sizeof(*ip4h
), &ip4h_copy
);
288 if (unlikely(ip4h
== NULL
))
291 pkt_type
|= ptype_l3_ip(ip4h
->version_ihl
);
292 hdr_lens
->l3_len
= ip4_hlen(ip4h
);
293 off
+= hdr_lens
->l3_len
;
295 if ((layers
& RTE_PTYPE_L4_MASK
) == 0)
298 if (ip4h
->fragment_offset
& rte_cpu_to_be_16(
299 IPV4_HDR_OFFSET_MASK
| IPV4_HDR_MF_FLAG
)) {
300 pkt_type
|= RTE_PTYPE_L4_FRAG
;
301 hdr_lens
->l4_len
= 0;
304 proto
= ip4h
->next_proto_id
;
305 pkt_type
|= ptype_l4(proto
);
306 } else if (proto
== rte_cpu_to_be_16(ETHER_TYPE_IPv6
)) {
307 const struct ipv6_hdr
*ip6h
;
308 struct ipv6_hdr ip6h_copy
;
311 ip6h
= rte_pktmbuf_read(m
, off
, sizeof(*ip6h
), &ip6h_copy
);
312 if (unlikely(ip6h
== NULL
))
316 hdr_lens
->l3_len
= sizeof(*ip6h
);
317 off
+= hdr_lens
->l3_len
;
318 pkt_type
|= ptype_l3_ip6(proto
);
319 if ((pkt_type
& RTE_PTYPE_L3_MASK
) == RTE_PTYPE_L3_IPV6_EXT
) {
320 ret
= rte_net_skip_ip6_ext(proto
, m
, &off
, &frag
);
324 hdr_lens
->l3_len
= off
- hdr_lens
->l2_len
;
329 if ((layers
& RTE_PTYPE_L4_MASK
) == 0)
333 pkt_type
|= RTE_PTYPE_L4_FRAG
;
334 hdr_lens
->l4_len
= 0;
337 pkt_type
|= ptype_l4(proto
);
340 if ((pkt_type
& RTE_PTYPE_L4_MASK
) == RTE_PTYPE_L4_UDP
) {
341 hdr_lens
->l4_len
= sizeof(struct udp_hdr
);
343 } else if ((pkt_type
& RTE_PTYPE_L4_MASK
) == RTE_PTYPE_L4_TCP
) {
344 const struct tcp_hdr
*th
;
345 struct tcp_hdr th_copy
;
347 th
= rte_pktmbuf_read(m
, off
, sizeof(*th
), &th_copy
);
348 if (unlikely(th
== NULL
))
349 return pkt_type
& (RTE_PTYPE_L2_MASK
|
351 hdr_lens
->l4_len
= (th
->data_off
& 0xf0) >> 2;
353 } else if ((pkt_type
& RTE_PTYPE_L4_MASK
) == RTE_PTYPE_L4_SCTP
) {
354 hdr_lens
->l4_len
= sizeof(struct sctp_hdr
);
357 uint32_t prev_off
= off
;
359 hdr_lens
->l4_len
= 0;
361 if ((layers
& RTE_PTYPE_TUNNEL_MASK
) == 0)
364 pkt_type
|= ptype_tunnel(&proto
, m
, &off
);
365 hdr_lens
->tunnel_len
= off
- prev_off
;
368 /* same job for inner header: we need to duplicate the code
369 * because the packet types do not have the same value.
371 if ((layers
& RTE_PTYPE_INNER_L2_MASK
) == 0)
374 hdr_lens
->inner_l2_len
= 0;
375 if (proto
== rte_cpu_to_be_16(ETHER_TYPE_TEB
)) {
376 eh
= rte_pktmbuf_read(m
, off
, sizeof(*eh
), &eh_copy
);
377 if (unlikely(eh
== NULL
))
379 pkt_type
|= RTE_PTYPE_INNER_L2_ETHER
;
380 proto
= eh
->ether_type
;
382 hdr_lens
->inner_l2_len
= sizeof(*eh
);
385 if (proto
== rte_cpu_to_be_16(ETHER_TYPE_VLAN
)) {
386 const struct vlan_hdr
*vh
;
387 struct vlan_hdr vh_copy
;
389 pkt_type
&= ~RTE_PTYPE_INNER_L2_MASK
;
390 pkt_type
|= RTE_PTYPE_INNER_L2_ETHER_VLAN
;
391 vh
= rte_pktmbuf_read(m
, off
, sizeof(*vh
), &vh_copy
);
392 if (unlikely(vh
== NULL
))
395 hdr_lens
->inner_l2_len
+= sizeof(*vh
);
396 proto
= vh
->eth_proto
;
397 } else if (proto
== rte_cpu_to_be_16(ETHER_TYPE_QINQ
)) {
398 const struct vlan_hdr
*vh
;
399 struct vlan_hdr vh_copy
;
401 pkt_type
&= ~RTE_PTYPE_INNER_L2_MASK
;
402 pkt_type
|= RTE_PTYPE_INNER_L2_ETHER_QINQ
;
403 vh
= rte_pktmbuf_read(m
, off
+ sizeof(*vh
), sizeof(*vh
),
405 if (unlikely(vh
== NULL
))
407 off
+= 2 * sizeof(*vh
);
408 hdr_lens
->inner_l2_len
+= 2 * sizeof(*vh
);
409 proto
= vh
->eth_proto
;
412 if ((layers
& RTE_PTYPE_INNER_L3_MASK
) == 0)
415 if (proto
== rte_cpu_to_be_16(ETHER_TYPE_IPv4
)) {
416 const struct ipv4_hdr
*ip4h
;
417 struct ipv4_hdr ip4h_copy
;
419 ip4h
= rte_pktmbuf_read(m
, off
, sizeof(*ip4h
), &ip4h_copy
);
420 if (unlikely(ip4h
== NULL
))
423 pkt_type
|= ptype_inner_l3_ip(ip4h
->version_ihl
);
424 hdr_lens
->inner_l3_len
= ip4_hlen(ip4h
);
425 off
+= hdr_lens
->inner_l3_len
;
427 if ((layers
& RTE_PTYPE_INNER_L4_MASK
) == 0)
429 if (ip4h
->fragment_offset
&
430 rte_cpu_to_be_16(IPV4_HDR_OFFSET_MASK
|
432 pkt_type
|= RTE_PTYPE_INNER_L4_FRAG
;
433 hdr_lens
->inner_l4_len
= 0;
436 proto
= ip4h
->next_proto_id
;
437 pkt_type
|= ptype_inner_l4(proto
);
438 } else if (proto
== rte_cpu_to_be_16(ETHER_TYPE_IPv6
)) {
439 const struct ipv6_hdr
*ip6h
;
440 struct ipv6_hdr ip6h_copy
;
443 ip6h
= rte_pktmbuf_read(m
, off
, sizeof(*ip6h
), &ip6h_copy
);
444 if (unlikely(ip6h
== NULL
))
448 hdr_lens
->inner_l3_len
= sizeof(*ip6h
);
449 off
+= hdr_lens
->inner_l3_len
;
450 pkt_type
|= ptype_inner_l3_ip6(proto
);
451 if ((pkt_type
& RTE_PTYPE_INNER_L3_MASK
) ==
452 RTE_PTYPE_INNER_L3_IPV6_EXT
) {
456 ret
= rte_net_skip_ip6_ext(proto
, m
, &off
, &frag
);
460 hdr_lens
->inner_l3_len
+= off
- prev_off
;
465 if ((layers
& RTE_PTYPE_INNER_L4_MASK
) == 0)
469 pkt_type
|= RTE_PTYPE_INNER_L4_FRAG
;
470 hdr_lens
->inner_l4_len
= 0;
473 pkt_type
|= ptype_inner_l4(proto
);
476 if ((pkt_type
& RTE_PTYPE_INNER_L4_MASK
) == RTE_PTYPE_INNER_L4_UDP
) {
477 hdr_lens
->inner_l4_len
= sizeof(struct udp_hdr
);
478 } else if ((pkt_type
& RTE_PTYPE_INNER_L4_MASK
) ==
479 RTE_PTYPE_INNER_L4_TCP
) {
480 const struct tcp_hdr
*th
;
481 struct tcp_hdr th_copy
;
483 th
= rte_pktmbuf_read(m
, off
, sizeof(*th
), &th_copy
);
484 if (unlikely(th
== NULL
))
485 return pkt_type
& (RTE_PTYPE_INNER_L2_MASK
|
486 RTE_PTYPE_INNER_L3_MASK
);
487 hdr_lens
->inner_l4_len
= (th
->data_off
& 0xf0) >> 2;
488 } else if ((pkt_type
& RTE_PTYPE_INNER_L4_MASK
) ==
489 RTE_PTYPE_INNER_L4_SCTP
) {
490 hdr_lens
->inner_l4_len
= sizeof(struct sctp_hdr
);
492 hdr_lens
->inner_l4_len
= 0;