4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * Copyright 2014 6WIND S.A.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <sys/queue.h>
45 #include <rte_common.h>
46 #include <rte_byteorder.h>
48 #include <rte_debug.h>
49 #include <rte_cycles.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
55 #include <rte_per_lcore.h>
56 #include <rte_lcore.h>
57 #include <rte_atomic.h>
58 #include <rte_branch_prediction.h>
59 #include <rte_memory.h>
60 #include <rte_mempool.h>
62 #include <rte_memcpy.h>
63 #include <rte_interrupts.h>
65 #include <rte_ether.h>
66 #include <rte_ethdev.h>
71 #include <rte_prefetch.h>
72 #include <rte_string_fns.h>
75 #define IP_DEFTTL 64 /* from RFC 1340. */
76 #define IP_VERSION 0x40
77 #define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
78 #define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
80 #define GRE_KEY_PRESENT 0x2000
82 #define GRE_SUPPORTED_FIELDS GRE_KEY_PRESENT
84 /* We cannot use rte_cpu_to_be_16() on a constant in a switch/case */
85 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
86 #define _htons(x) ((uint16_t)((((x) & 0x00ffU) << 8) | (((x) & 0xff00U) >> 8)))
91 /* structure that caches offload info for the current packet */
92 struct testpmd_offload_info
{
99 uint16_t outer_ethertype
;
100 uint16_t outer_l2_len
;
101 uint16_t outer_l3_len
;
102 uint8_t outer_l4_proto
;
104 uint16_t tunnel_tso_segsz
;
108 /* simplified GRE header */
109 struct simple_gre_hdr
{
112 } __attribute__((__packed__
));
115 get_psd_sum(void *l3_hdr
, uint16_t ethertype
, uint64_t ol_flags
)
117 if (ethertype
== _htons(ETHER_TYPE_IPv4
))
118 return rte_ipv4_phdr_cksum(l3_hdr
, ol_flags
);
119 else /* assume ethertype == ETHER_TYPE_IPv6 */
120 return rte_ipv6_phdr_cksum(l3_hdr
, ol_flags
);
124 get_udptcp_checksum(void *l3_hdr
, void *l4_hdr
, uint16_t ethertype
)
126 if (ethertype
== _htons(ETHER_TYPE_IPv4
))
127 return rte_ipv4_udptcp_cksum(l3_hdr
, l4_hdr
);
128 else /* assume ethertype == ETHER_TYPE_IPv6 */
129 return rte_ipv6_udptcp_cksum(l3_hdr
, l4_hdr
);
132 /* Parse an IPv4 header to fill l3_len, l4_len, and l4_proto */
134 parse_ipv4(struct ipv4_hdr
*ipv4_hdr
, struct testpmd_offload_info
*info
)
136 struct tcp_hdr
*tcp_hdr
;
138 info
->l3_len
= (ipv4_hdr
->version_ihl
& 0x0f) * 4;
139 info
->l4_proto
= ipv4_hdr
->next_proto_id
;
141 /* only fill l4_len for TCP, it's useful for TSO */
142 if (info
->l4_proto
== IPPROTO_TCP
) {
143 tcp_hdr
= (struct tcp_hdr
*)((char *)ipv4_hdr
+ info
->l3_len
);
144 info
->l4_len
= (tcp_hdr
->data_off
& 0xf0) >> 2;
149 /* Parse an IPv6 header to fill l3_len, l4_len, and l4_proto */
151 parse_ipv6(struct ipv6_hdr
*ipv6_hdr
, struct testpmd_offload_info
*info
)
153 struct tcp_hdr
*tcp_hdr
;
155 info
->l3_len
= sizeof(struct ipv6_hdr
);
156 info
->l4_proto
= ipv6_hdr
->proto
;
158 /* only fill l4_len for TCP, it's useful for TSO */
159 if (info
->l4_proto
== IPPROTO_TCP
) {
160 tcp_hdr
= (struct tcp_hdr
*)((char *)ipv6_hdr
+ info
->l3_len
);
161 info
->l4_len
= (tcp_hdr
->data_off
& 0xf0) >> 2;
167 * Parse an ethernet header to fill the ethertype, l2_len, l3_len and
168 * ipproto. This function is able to recognize IPv4/IPv6 with one optional vlan
169 * header. The l4_len argument is only set in case of TCP (useful for TSO).
172 parse_ethernet(struct ether_hdr
*eth_hdr
, struct testpmd_offload_info
*info
)
174 struct ipv4_hdr
*ipv4_hdr
;
175 struct ipv6_hdr
*ipv6_hdr
;
177 info
->l2_len
= sizeof(struct ether_hdr
);
178 info
->ethertype
= eth_hdr
->ether_type
;
180 if (info
->ethertype
== _htons(ETHER_TYPE_VLAN
)) {
181 struct vlan_hdr
*vlan_hdr
= (struct vlan_hdr
*)(eth_hdr
+ 1);
183 info
->l2_len
+= sizeof(struct vlan_hdr
);
184 info
->ethertype
= vlan_hdr
->eth_proto
;
187 switch (info
->ethertype
) {
188 case _htons(ETHER_TYPE_IPv4
):
189 ipv4_hdr
= (struct ipv4_hdr
*) ((char *)eth_hdr
+ info
->l2_len
);
190 parse_ipv4(ipv4_hdr
, info
);
192 case _htons(ETHER_TYPE_IPv6
):
193 ipv6_hdr
= (struct ipv6_hdr
*) ((char *)eth_hdr
+ info
->l2_len
);
194 parse_ipv6(ipv6_hdr
, info
);
204 /* Parse a vxlan header */
206 parse_vxlan(struct udp_hdr
*udp_hdr
,
207 struct testpmd_offload_info
*info
,
210 struct ether_hdr
*eth_hdr
;
212 /* check udp destination port, 4789 is the default vxlan port
213 * (rfc7348) or that the rx offload flag is set (i40e only
215 if (udp_hdr
->dst_port
!= _htons(4789) &&
216 RTE_ETH_IS_TUNNEL_PKT(pkt_type
) == 0)
220 info
->outer_ethertype
= info
->ethertype
;
221 info
->outer_l2_len
= info
->l2_len
;
222 info
->outer_l3_len
= info
->l3_len
;
223 info
->outer_l4_proto
= info
->l4_proto
;
225 eth_hdr
= (struct ether_hdr
*)((char *)udp_hdr
+
226 sizeof(struct udp_hdr
) +
227 sizeof(struct vxlan_hdr
));
229 parse_ethernet(eth_hdr
, info
);
230 info
->l2_len
+= ETHER_VXLAN_HLEN
; /* add udp + vxlan */
233 /* Parse a gre header */
235 parse_gre(struct simple_gre_hdr
*gre_hdr
, struct testpmd_offload_info
*info
)
237 struct ether_hdr
*eth_hdr
;
238 struct ipv4_hdr
*ipv4_hdr
;
239 struct ipv6_hdr
*ipv6_hdr
;
242 /* check which fields are supported */
243 if ((gre_hdr
->flags
& _htons(~GRE_SUPPORTED_FIELDS
)) != 0)
246 gre_len
+= sizeof(struct simple_gre_hdr
);
248 if (gre_hdr
->flags
& _htons(GRE_KEY_PRESENT
))
249 gre_len
+= GRE_KEY_LEN
;
251 if (gre_hdr
->proto
== _htons(ETHER_TYPE_IPv4
)) {
253 info
->outer_ethertype
= info
->ethertype
;
254 info
->outer_l2_len
= info
->l2_len
;
255 info
->outer_l3_len
= info
->l3_len
;
256 info
->outer_l4_proto
= info
->l4_proto
;
258 ipv4_hdr
= (struct ipv4_hdr
*)((char *)gre_hdr
+ gre_len
);
260 parse_ipv4(ipv4_hdr
, info
);
261 info
->ethertype
= _htons(ETHER_TYPE_IPv4
);
264 } else if (gre_hdr
->proto
== _htons(ETHER_TYPE_IPv6
)) {
266 info
->outer_ethertype
= info
->ethertype
;
267 info
->outer_l2_len
= info
->l2_len
;
268 info
->outer_l3_len
= info
->l3_len
;
269 info
->outer_l4_proto
= info
->l4_proto
;
271 ipv6_hdr
= (struct ipv6_hdr
*)((char *)gre_hdr
+ gre_len
);
273 info
->ethertype
= _htons(ETHER_TYPE_IPv6
);
274 parse_ipv6(ipv6_hdr
, info
);
277 } else if (gre_hdr
->proto
== _htons(ETHER_TYPE_TEB
)) {
279 info
->outer_ethertype
= info
->ethertype
;
280 info
->outer_l2_len
= info
->l2_len
;
281 info
->outer_l3_len
= info
->l3_len
;
282 info
->outer_l4_proto
= info
->l4_proto
;
284 eth_hdr
= (struct ether_hdr
*)((char *)gre_hdr
+ gre_len
);
286 parse_ethernet(eth_hdr
, info
);
290 info
->l2_len
+= gre_len
;
294 /* Parse an encapsulated ip or ipv6 header */
296 parse_encap_ip(void *encap_ip
, struct testpmd_offload_info
*info
)
298 struct ipv4_hdr
*ipv4_hdr
= encap_ip
;
299 struct ipv6_hdr
*ipv6_hdr
= encap_ip
;
302 ip_version
= (ipv4_hdr
->version_ihl
& 0xf0) >> 4;
304 if (ip_version
!= 4 && ip_version
!= 6)
308 info
->outer_ethertype
= info
->ethertype
;
309 info
->outer_l2_len
= info
->l2_len
;
310 info
->outer_l3_len
= info
->l3_len
;
312 if (ip_version
== 4) {
313 parse_ipv4(ipv4_hdr
, info
);
314 info
->ethertype
= _htons(ETHER_TYPE_IPv4
);
316 parse_ipv6(ipv6_hdr
, info
);
317 info
->ethertype
= _htons(ETHER_TYPE_IPv6
);
322 /* if possible, calculate the checksum of a packet in hw or sw,
323 * depending on the testpmd command line configuration */
325 process_inner_cksums(void *l3_hdr
, const struct testpmd_offload_info
*info
,
326 uint16_t testpmd_ol_flags
)
328 struct ipv4_hdr
*ipv4_hdr
= l3_hdr
;
329 struct udp_hdr
*udp_hdr
;
330 struct tcp_hdr
*tcp_hdr
;
331 struct sctp_hdr
*sctp_hdr
;
332 uint64_t ol_flags
= 0;
333 uint32_t max_pkt_len
, tso_segsz
= 0;
335 /* ensure packet is large enough to require tso */
336 if (!info
->is_tunnel
) {
337 max_pkt_len
= info
->l2_len
+ info
->l3_len
+ info
->l4_len
+
339 if (info
->tso_segsz
!= 0 && info
->pkt_len
> max_pkt_len
)
340 tso_segsz
= info
->tso_segsz
;
342 max_pkt_len
= info
->outer_l2_len
+ info
->outer_l3_len
+
343 info
->l2_len
+ info
->l3_len
+ info
->l4_len
+
344 info
->tunnel_tso_segsz
;
345 if (info
->tunnel_tso_segsz
!= 0 && info
->pkt_len
> max_pkt_len
)
346 tso_segsz
= info
->tunnel_tso_segsz
;
349 if (info
->ethertype
== _htons(ETHER_TYPE_IPv4
)) {
351 ipv4_hdr
->hdr_checksum
= 0;
353 ol_flags
|= PKT_TX_IPV4
;
354 if (info
->l4_proto
== IPPROTO_TCP
&& tso_segsz
) {
355 ol_flags
|= PKT_TX_IP_CKSUM
;
357 if (testpmd_ol_flags
& TESTPMD_TX_OFFLOAD_IP_CKSUM
)
358 ol_flags
|= PKT_TX_IP_CKSUM
;
360 ipv4_hdr
->hdr_checksum
=
361 rte_ipv4_cksum(ipv4_hdr
);
363 } else if (info
->ethertype
== _htons(ETHER_TYPE_IPv6
))
364 ol_flags
|= PKT_TX_IPV6
;
366 return 0; /* packet type not supported, nothing to do */
368 if (info
->l4_proto
== IPPROTO_UDP
) {
369 udp_hdr
= (struct udp_hdr
*)((char *)l3_hdr
+ info
->l3_len
);
370 /* do not recalculate udp cksum if it was 0 */
371 if (udp_hdr
->dgram_cksum
!= 0) {
372 udp_hdr
->dgram_cksum
= 0;
373 if (testpmd_ol_flags
& TESTPMD_TX_OFFLOAD_UDP_CKSUM
) {
374 ol_flags
|= PKT_TX_UDP_CKSUM
;
375 udp_hdr
->dgram_cksum
= get_psd_sum(l3_hdr
,
376 info
->ethertype
, ol_flags
);
378 udp_hdr
->dgram_cksum
=
379 get_udptcp_checksum(l3_hdr
, udp_hdr
,
383 } else if (info
->l4_proto
== IPPROTO_TCP
) {
384 tcp_hdr
= (struct tcp_hdr
*)((char *)l3_hdr
+ info
->l3_len
);
387 ol_flags
|= PKT_TX_TCP_SEG
;
388 tcp_hdr
->cksum
= get_psd_sum(l3_hdr
, info
->ethertype
,
390 } else if (testpmd_ol_flags
& TESTPMD_TX_OFFLOAD_TCP_CKSUM
) {
391 ol_flags
|= PKT_TX_TCP_CKSUM
;
392 tcp_hdr
->cksum
= get_psd_sum(l3_hdr
, info
->ethertype
,
396 get_udptcp_checksum(l3_hdr
, tcp_hdr
,
399 } else if (info
->l4_proto
== IPPROTO_SCTP
) {
400 sctp_hdr
= (struct sctp_hdr
*)((char *)l3_hdr
+ info
->l3_len
);
402 /* sctp payload must be a multiple of 4 to be
404 if ((testpmd_ol_flags
& TESTPMD_TX_OFFLOAD_SCTP_CKSUM
) &&
405 ((ipv4_hdr
->total_length
& 0x3) == 0)) {
406 ol_flags
|= PKT_TX_SCTP_CKSUM
;
408 /* XXX implement CRC32c, example available in
416 /* Calculate the checksum of outer header */
418 process_outer_cksums(void *outer_l3_hdr
, struct testpmd_offload_info
*info
,
419 uint16_t testpmd_ol_flags
, int tso_enabled
)
421 struct ipv4_hdr
*ipv4_hdr
= outer_l3_hdr
;
422 struct ipv6_hdr
*ipv6_hdr
= outer_l3_hdr
;
423 struct udp_hdr
*udp_hdr
;
424 uint64_t ol_flags
= 0;
426 if (info
->outer_ethertype
== _htons(ETHER_TYPE_IPv4
)) {
427 ipv4_hdr
->hdr_checksum
= 0;
428 ol_flags
|= PKT_TX_OUTER_IPV4
;
430 if (testpmd_ol_flags
& TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM
)
431 ol_flags
|= PKT_TX_OUTER_IP_CKSUM
;
433 ipv4_hdr
->hdr_checksum
= rte_ipv4_cksum(ipv4_hdr
);
434 } else if (testpmd_ol_flags
& TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM
)
435 ol_flags
|= PKT_TX_OUTER_IPV6
;
437 if (info
->outer_l4_proto
!= IPPROTO_UDP
)
440 udp_hdr
= (struct udp_hdr
*)((char *)outer_l3_hdr
+ info
->outer_l3_len
);
442 /* outer UDP checksum is done in software as we have no hardware
443 * supporting it today, and no API for it. In the other side, for
444 * UDP tunneling, like VXLAN or Geneve, outer UDP checksum can be
447 * If a packet will be TSOed into small packets by NIC, we cannot
448 * set/calculate a non-zero checksum, because it will be a wrong
449 * value after the packet be split into several small packets.
452 udp_hdr
->dgram_cksum
= 0;
454 /* do not recalculate udp cksum if it was 0 */
455 if (udp_hdr
->dgram_cksum
!= 0) {
456 udp_hdr
->dgram_cksum
= 0;
457 if (info
->outer_ethertype
== _htons(ETHER_TYPE_IPv4
))
458 udp_hdr
->dgram_cksum
=
459 rte_ipv4_udptcp_cksum(ipv4_hdr
, udp_hdr
);
461 udp_hdr
->dgram_cksum
=
462 rte_ipv6_udptcp_cksum(ipv6_hdr
, udp_hdr
);
470 * Performs actual copying.
471 * Returns number of segments in the destination mbuf on success,
472 * or negative error code on failure.
475 mbuf_copy_split(const struct rte_mbuf
*ms
, struct rte_mbuf
*md
[],
476 uint16_t seglen
[], uint8_t nb_seg
)
478 uint32_t dlen
, slen
, tlen
;
480 const struct rte_mbuf
*m
;
493 while (ms
!= NULL
&& i
!= nb_seg
) {
496 slen
= rte_pktmbuf_data_len(ms
);
497 src
= rte_pktmbuf_mtod(ms
, const uint8_t *);
501 dlen
= RTE_MIN(seglen
[i
], slen
);
502 md
[i
]->data_len
= dlen
;
503 md
[i
]->next
= (i
+ 1 == nb_seg
) ? NULL
: md
[i
+ 1];
504 dst
= rte_pktmbuf_mtod(md
[i
], uint8_t *);
507 len
= RTE_MIN(slen
, dlen
);
508 memcpy(dst
, src
, len
);
523 else if (tlen
!= m
->pkt_len
)
526 md
[0]->nb_segs
= nb_seg
;
527 md
[0]->pkt_len
= tlen
;
528 md
[0]->vlan_tci
= m
->vlan_tci
;
529 md
[0]->vlan_tci_outer
= m
->vlan_tci_outer
;
530 md
[0]->ol_flags
= m
->ol_flags
;
531 md
[0]->tx_offload
= m
->tx_offload
;
537 * Allocate a new mbuf with up to tx_pkt_nb_segs segments.
538 * Copy packet contents and offload information into then new segmented mbuf.
540 static struct rte_mbuf
*
541 pkt_copy_split(const struct rte_mbuf
*pkt
)
544 uint32_t i
, len
, nb_seg
;
545 struct rte_mempool
*mp
;
546 uint16_t seglen
[RTE_MAX_SEGS_PER_PKT
];
547 struct rte_mbuf
*p
, *md
[RTE_MAX_SEGS_PER_PKT
];
549 mp
= current_fwd_lcore()->mbp
;
551 if (tx_pkt_split
== TX_PKT_SPLIT_RND
)
552 nb_seg
= random() % tx_pkt_nb_segs
+ 1;
554 nb_seg
= tx_pkt_nb_segs
;
556 memcpy(seglen
, tx_pkt_seg_lengths
, nb_seg
* sizeof(seglen
[0]));
558 /* calculate number of segments to use and their length. */
560 for (i
= 0; i
!= nb_seg
&& len
< pkt
->pkt_len
; i
++) {
565 n
= pkt
->pkt_len
- len
;
567 /* update size of the last segment to fit rest of the packet */
575 p
= rte_pktmbuf_alloc(mp
);
578 "failed to allocate %u-th of %u mbuf "
579 "from mempool: %s\n",
580 nb_seg
- i
, nb_seg
, mp
->name
);
585 if (rte_pktmbuf_tailroom(md
[i
]) < seglen
[i
]) {
586 RTE_LOG(ERR
, USER1
, "mempool %s, %u-th segment: "
587 "expected seglen: %u, "
588 "actual mbuf tailroom: %u\n",
589 mp
->name
, i
, seglen
[i
],
590 rte_pktmbuf_tailroom(md
[i
]));
595 /* all mbufs successfully allocated, do copy */
597 rc
= mbuf_copy_split(pkt
, md
, seglen
, nb_seg
);
600 "mbuf_copy_split for %p(len=%u, nb_seg=%hhu) "
601 "into %u segments failed with error code: %d\n",
602 pkt
, pkt
->pkt_len
, pkt
->nb_segs
, nb_seg
, rc
);
604 /* figure out how many mbufs to free. */
608 /* free unused mbufs */
609 for (; i
!= nb_seg
; i
++) {
610 rte_pktmbuf_free_seg(md
[i
]);
618 * Receive a burst of packets, and for each packet:
619 * - parse packet, and try to recognize a supported packet type (1)
620 * - if it's not a supported packet type, don't touch the packet, else:
621 * - reprocess the checksum of all supported layers. This is done in SW
622 * or HW, depending on testpmd command line configuration
623 * - if TSO is enabled in testpmd command line, also flag the mbuf for TCP
624 * segmentation offload (this implies HW TCP checksum)
625 * Then transmit packets on the output port.
627 * (1) Supported packets are:
628 * Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP .
629 * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
631 * Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP
632 * Ether / (vlan) / outer IP|IP6 / GRE / IP|IP6 / UDP|TCP|SCTP
633 * Ether / (vlan) / outer IP|IP6 / IP|IP6 / UDP|TCP|SCTP
635 * The testpmd command line for this forward engine sets the flags
636 * TESTPMD_TX_OFFLOAD_* in ports[tx_port].tx_ol_flags. They control
637 * wether a checksum must be calculated in software or in hardware. The
638 * IP, UDP, TCP and SCTP flags always concern the inner layer. The
639 * OUTER_IP is only useful for tunnel packets.
642 pkt_burst_checksum_forward(struct fwd_stream
*fs
)
644 struct rte_mbuf
*pkts_burst
[MAX_PKT_BURST
];
645 struct rte_port
*txp
;
646 struct rte_mbuf
*m
, *p
;
647 struct ether_hdr
*eth_hdr
;
648 void *l3_hdr
= NULL
, *outer_l3_hdr
= NULL
; /* can be IPv4 or IPv6 */
652 uint64_t rx_ol_flags
, tx_ol_flags
;
653 uint16_t testpmd_ol_flags
;
655 uint32_t rx_bad_ip_csum
;
656 uint32_t rx_bad_l4_csum
;
657 struct testpmd_offload_info info
;
659 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
662 uint64_t core_cycles
;
665 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
666 start_tsc
= rte_rdtsc();
669 /* receive a burst of packet */
670 nb_rx
= rte_eth_rx_burst(fs
->rx_port
, fs
->rx_queue
, pkts_burst
,
672 if (unlikely(nb_rx
== 0))
675 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
676 fs
->rx_burst_stats
.pkt_burst_spread
[nb_rx
]++;
678 fs
->rx_packets
+= nb_rx
;
682 txp
= &ports
[fs
->tx_port
];
683 testpmd_ol_flags
= txp
->tx_ol_flags
;
684 memset(&info
, 0, sizeof(info
));
685 info
.tso_segsz
= txp
->tso_segsz
;
686 info
.tunnel_tso_segsz
= txp
->tunnel_tso_segsz
;
688 for (i
= 0; i
< nb_rx
; i
++) {
689 if (likely(i
< nb_rx
- 1))
690 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst
[i
+ 1],
695 info
.pkt_len
= rte_pktmbuf_pkt_len(m
);
697 rx_ol_flags
= m
->ol_flags
;
699 /* Update the L3/L4 checksum error packet statistics */
700 if ((rx_ol_flags
& PKT_RX_IP_CKSUM_MASK
) == PKT_RX_IP_CKSUM_BAD
)
702 if ((rx_ol_flags
& PKT_RX_L4_CKSUM_MASK
) == PKT_RX_L4_CKSUM_BAD
)
705 /* step 1: dissect packet, parsing optional vlan, ip4/ip6, vxlan
706 * and inner headers */
708 eth_hdr
= rte_pktmbuf_mtod(m
, struct ether_hdr
*);
709 ether_addr_copy(&peer_eth_addrs
[fs
->peer_addr
],
711 ether_addr_copy(&ports
[fs
->tx_port
].eth_addr
,
713 parse_ethernet(eth_hdr
, &info
);
714 l3_hdr
= (char *)eth_hdr
+ info
.l2_len
;
716 /* check if it's a supported tunnel */
717 if (testpmd_ol_flags
& TESTPMD_TX_OFFLOAD_PARSE_TUNNEL
) {
718 if (info
.l4_proto
== IPPROTO_UDP
) {
719 struct udp_hdr
*udp_hdr
;
721 udp_hdr
= (struct udp_hdr
*)((char *)l3_hdr
+
723 parse_vxlan(udp_hdr
, &info
, m
->packet_type
);
725 tx_ol_flags
|= PKT_TX_TUNNEL_VXLAN
;
726 } else if (info
.l4_proto
== IPPROTO_GRE
) {
727 struct simple_gre_hdr
*gre_hdr
;
729 gre_hdr
= (struct simple_gre_hdr
*)
730 ((char *)l3_hdr
+ info
.l3_len
);
731 parse_gre(gre_hdr
, &info
);
733 tx_ol_flags
|= PKT_TX_TUNNEL_GRE
;
734 } else if (info
.l4_proto
== IPPROTO_IPIP
) {
737 encap_ip_hdr
= (char *)l3_hdr
+ info
.l3_len
;
738 parse_encap_ip(encap_ip_hdr
, &info
);
740 tx_ol_flags
|= PKT_TX_TUNNEL_IPIP
;
744 /* update l3_hdr and outer_l3_hdr if a tunnel was parsed */
745 if (info
.is_tunnel
) {
746 outer_l3_hdr
= l3_hdr
;
747 l3_hdr
= (char *)l3_hdr
+ info
.outer_l3_len
+ info
.l2_len
;
750 /* step 2: depending on user command line configuration,
751 * recompute checksum either in software or flag the
752 * mbuf to offload the calculation to the NIC. If TSO
753 * is configured, prepare the mbuf for TCP segmentation. */
755 /* process checksums of inner headers first */
756 tx_ol_flags
|= process_inner_cksums(l3_hdr
, &info
,
759 /* Then process outer headers if any. Note that the software
760 * checksum will be wrong if one of the inner checksums is
761 * processed in hardware. */
762 if (info
.is_tunnel
== 1) {
763 tx_ol_flags
|= process_outer_cksums(outer_l3_hdr
, &info
,
765 !!(tx_ol_flags
& PKT_TX_TCP_SEG
));
768 /* step 3: fill the mbuf meta data (flags and header lengths) */
770 if (info
.is_tunnel
== 1) {
771 if (info
.tunnel_tso_segsz
||
772 testpmd_ol_flags
& TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM
) {
773 m
->outer_l2_len
= info
.outer_l2_len
;
774 m
->outer_l3_len
= info
.outer_l3_len
;
775 m
->l2_len
= info
.l2_len
;
776 m
->l3_len
= info
.l3_len
;
777 m
->l4_len
= info
.l4_len
;
778 m
->tso_segsz
= info
.tunnel_tso_segsz
;
781 /* if there is a outer UDP cksum
782 processed in sw and the inner in hw,
783 the outer checksum will be wrong as
784 the payload will be modified by the
786 m
->l2_len
= info
.outer_l2_len
+
787 info
.outer_l3_len
+ info
.l2_len
;
788 m
->l3_len
= info
.l3_len
;
789 m
->l4_len
= info
.l4_len
;
792 /* this is only useful if an offload flag is
793 * set, but it does not hurt to fill it in any
795 m
->l2_len
= info
.l2_len
;
796 m
->l3_len
= info
.l3_len
;
797 m
->l4_len
= info
.l4_len
;
798 m
->tso_segsz
= info
.tso_segsz
;
800 m
->ol_flags
= tx_ol_flags
;
802 /* Do split & copy for the packet. */
803 if (tx_pkt_split
!= TX_PKT_SPLIT_OFF
) {
804 p
= pkt_copy_split(m
);
812 /* if verbose mode is enabled, dump debug info */
813 if (verbose_level
> 0) {
816 printf("-----------------\n");
817 printf("port=%u, mbuf=%p, pkt_len=%u, nb_segs=%hhu:\n",
818 fs
->rx_port
, m
, m
->pkt_len
, m
->nb_segs
);
819 /* dump rx parsed packet info */
820 rte_get_rx_ol_flag_list(rx_ol_flags
, buf
, sizeof(buf
));
821 printf("rx: l2_len=%d ethertype=%x l3_len=%d "
822 "l4_proto=%d l4_len=%d flags=%s\n",
823 info
.l2_len
, rte_be_to_cpu_16(info
.ethertype
),
824 info
.l3_len
, info
.l4_proto
, info
.l4_len
, buf
);
825 if (rx_ol_flags
& PKT_RX_LRO
)
826 printf("rx: m->lro_segsz=%u\n", m
->tso_segsz
);
827 if (info
.is_tunnel
== 1)
828 printf("rx: outer_l2_len=%d outer_ethertype=%x "
829 "outer_l3_len=%d\n", info
.outer_l2_len
,
830 rte_be_to_cpu_16(info
.outer_ethertype
),
832 /* dump tx packet info */
833 if ((testpmd_ol_flags
& (TESTPMD_TX_OFFLOAD_IP_CKSUM
|
834 TESTPMD_TX_OFFLOAD_UDP_CKSUM
|
835 TESTPMD_TX_OFFLOAD_TCP_CKSUM
|
836 TESTPMD_TX_OFFLOAD_SCTP_CKSUM
)) ||
838 printf("tx: m->l2_len=%d m->l3_len=%d "
840 m
->l2_len
, m
->l3_len
, m
->l4_len
);
841 if (info
.is_tunnel
== 1) {
842 if (testpmd_ol_flags
&
843 TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM
)
844 printf("tx: m->outer_l2_len=%d "
845 "m->outer_l3_len=%d\n",
848 if (info
.tunnel_tso_segsz
!= 0 &&
849 (m
->ol_flags
& PKT_TX_TCP_SEG
))
850 printf("tx: m->tso_segsz=%d\n",
852 } else if (info
.tso_segsz
!= 0 &&
853 (m
->ol_flags
& PKT_TX_TCP_SEG
))
854 printf("tx: m->tso_segsz=%d\n", m
->tso_segsz
);
855 rte_get_tx_ol_flag_list(m
->ol_flags
, buf
, sizeof(buf
));
856 printf("tx: flags=%s", buf
);
860 nb_tx
= rte_eth_tx_burst(fs
->tx_port
, fs
->tx_queue
, pkts_burst
, nb_rx
);
864 if (unlikely(nb_tx
< nb_rx
) && fs
->retry_enabled
) {
866 while (nb_tx
< nb_rx
&& retry
++ < burst_tx_retry_num
) {
867 rte_delay_us(burst_tx_delay_time
);
868 nb_tx
+= rte_eth_tx_burst(fs
->tx_port
, fs
->tx_queue
,
869 &pkts_burst
[nb_tx
], nb_rx
- nb_tx
);
872 fs
->tx_packets
+= nb_tx
;
873 fs
->rx_bad_ip_csum
+= rx_bad_ip_csum
;
874 fs
->rx_bad_l4_csum
+= rx_bad_l4_csum
;
876 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
877 fs
->tx_burst_stats
.pkt_burst_spread
[nb_tx
]++;
879 if (unlikely(nb_tx
< nb_rx
)) {
880 fs
->fwd_dropped
+= (nb_rx
- nb_tx
);
882 rte_pktmbuf_free(pkts_burst
[nb_tx
]);
883 } while (++nb_tx
< nb_rx
);
885 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
886 end_tsc
= rte_rdtsc();
887 core_cycles
= (end_tsc
- start_tsc
);
888 fs
->core_cycles
= (uint64_t) (fs
->core_cycles
+ core_cycles
);
892 struct fwd_engine csum_fwd_engine
= {
893 .fwd_mode_name
= "csum",
894 .port_fwd_begin
= NULL
,
895 .port_fwd_end
= NULL
,
896 .packet_fwd
= pkt_burst_checksum_forward
,