4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
40 #include <sys/queue.h>
45 #include <netinet/in.h>
47 #include <rte_debug.h>
48 #include <rte_ether.h>
49 #include <rte_ethdev.h>
50 #include <rte_mempool.h>
51 #include <rte_cycles.h>
60 #if defined(RTE_MACHINE_CPUFLAG_SSE4_2) || defined(RTE_MACHINE_CPUFLAG_CRC32)
65 #include <rte_hash_crc.h>
66 #define DEFAULT_HASH_FUNC rte_hash_crc
68 #include <rte_jhash.h>
69 #define DEFAULT_HASH_FUNC rte_jhash
72 #define IPV6_ADDR_LEN 16
80 } __attribute__((__packed__
));
82 union ipv4_5tuple_host
{
95 #define XMM_NUM_IN_IPV6_5TUPLE 3
98 uint8_t ip_dst
[IPV6_ADDR_LEN
];
99 uint8_t ip_src
[IPV6_ADDR_LEN
];
103 } __attribute__((__packed__
));
105 union ipv6_5tuple_host
{
110 uint8_t ip_src
[IPV6_ADDR_LEN
];
111 uint8_t ip_dst
[IPV6_ADDR_LEN
];
116 xmm_t xmm
[XMM_NUM_IN_IPV6_5TUPLE
];
121 struct ipv4_l3fwd_em_route
{
122 struct ipv4_5tuple key
;
126 struct ipv6_l3fwd_em_route
{
127 struct ipv6_5tuple key
;
131 static struct ipv4_l3fwd_em_route ipv4_l3fwd_em_route_array
[] = {
132 {{IPv4(101, 0, 0, 0), IPv4(100, 10, 0, 1), 101, 11, IPPROTO_TCP
}, 0},
133 {{IPv4(201, 0, 0, 0), IPv4(200, 20, 0, 1), 102, 12, IPPROTO_TCP
}, 1},
134 {{IPv4(111, 0, 0, 0), IPv4(100, 30, 0, 1), 101, 11, IPPROTO_TCP
}, 2},
135 {{IPv4(211, 0, 0, 0), IPv4(200, 40, 0, 1), 102, 12, IPPROTO_TCP
}, 3},
138 static struct ipv6_l3fwd_em_route ipv6_l3fwd_em_route_array
[] = {
140 {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
141 {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
142 101, 11, IPPROTO_TCP
}, 0},
145 {0xfe, 0x90, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
146 {0xfe, 0x90, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
147 102, 12, IPPROTO_TCP
}, 1},
150 {0xfe, 0xa0, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
151 {0xfe, 0xa0, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
152 101, 11, IPPROTO_TCP
}, 2},
155 {0xfe, 0xb0, 0, 0, 0, 0, 0, 0, 0x02, 0x1e, 0x67, 0xff, 0xfe, 0, 0, 0},
156 {0xfe, 0xb0, 0, 0, 0, 0, 0, 0, 0x02, 0x1b, 0x21, 0xff, 0xfe, 0x91, 0x38, 0x05},
157 102, 12, IPPROTO_TCP
}, 3},
160 struct rte_hash
*ipv4_l3fwd_em_lookup_struct
[NB_SOCKETS
];
161 struct rte_hash
*ipv6_l3fwd_em_lookup_struct
[NB_SOCKETS
];
163 static inline uint32_t
164 ipv4_hash_crc(const void *data
, __rte_unused
uint32_t data_len
,
167 const union ipv4_5tuple_host
*k
;
173 p
= (const uint32_t *)&k
->port_src
;
176 init_val
= rte_hash_crc_4byte(t
, init_val
);
177 init_val
= rte_hash_crc_4byte(k
->ip_src
, init_val
);
178 init_val
= rte_hash_crc_4byte(k
->ip_dst
, init_val
);
179 init_val
= rte_hash_crc_4byte(*p
, init_val
);
181 init_val
= rte_jhash_1word(t
, init_val
);
182 init_val
= rte_jhash_1word(k
->ip_src
, init_val
);
183 init_val
= rte_jhash_1word(k
->ip_dst
, init_val
);
184 init_val
= rte_jhash_1word(*p
, init_val
);
190 static inline uint32_t
191 ipv6_hash_crc(const void *data
, __rte_unused
uint32_t data_len
,
194 const union ipv6_5tuple_host
*k
;
198 const uint32_t *ip_src0
, *ip_src1
, *ip_src2
, *ip_src3
;
199 const uint32_t *ip_dst0
, *ip_dst1
, *ip_dst2
, *ip_dst3
;
204 p
= (const uint32_t *)&k
->port_src
;
207 ip_src0
= (const uint32_t *) k
->ip_src
;
208 ip_src1
= (const uint32_t *)(k
->ip_src
+4);
209 ip_src2
= (const uint32_t *)(k
->ip_src
+8);
210 ip_src3
= (const uint32_t *)(k
->ip_src
+12);
211 ip_dst0
= (const uint32_t *) k
->ip_dst
;
212 ip_dst1
= (const uint32_t *)(k
->ip_dst
+4);
213 ip_dst2
= (const uint32_t *)(k
->ip_dst
+8);
214 ip_dst3
= (const uint32_t *)(k
->ip_dst
+12);
215 init_val
= rte_hash_crc_4byte(t
, init_val
);
216 init_val
= rte_hash_crc_4byte(*ip_src0
, init_val
);
217 init_val
= rte_hash_crc_4byte(*ip_src1
, init_val
);
218 init_val
= rte_hash_crc_4byte(*ip_src2
, init_val
);
219 init_val
= rte_hash_crc_4byte(*ip_src3
, init_val
);
220 init_val
= rte_hash_crc_4byte(*ip_dst0
, init_val
);
221 init_val
= rte_hash_crc_4byte(*ip_dst1
, init_val
);
222 init_val
= rte_hash_crc_4byte(*ip_dst2
, init_val
);
223 init_val
= rte_hash_crc_4byte(*ip_dst3
, init_val
);
224 init_val
= rte_hash_crc_4byte(*p
, init_val
);
226 init_val
= rte_jhash_1word(t
, init_val
);
227 init_val
= rte_jhash(k
->ip_src
,
228 sizeof(uint8_t) * IPV6_ADDR_LEN
, init_val
);
229 init_val
= rte_jhash(k
->ip_dst
,
230 sizeof(uint8_t) * IPV6_ADDR_LEN
, init_val
);
231 init_val
= rte_jhash_1word(*p
, init_val
);
236 #define IPV4_L3FWD_EM_NUM_ROUTES \
237 (sizeof(ipv4_l3fwd_em_route_array) / sizeof(ipv4_l3fwd_em_route_array[0]))
239 #define IPV6_L3FWD_EM_NUM_ROUTES \
240 (sizeof(ipv6_l3fwd_em_route_array) / sizeof(ipv6_l3fwd_em_route_array[0]))
242 static uint8_t ipv4_l3fwd_out_if
[L3FWD_HASH_ENTRIES
] __rte_cache_aligned
;
243 static uint8_t ipv6_l3fwd_out_if
[L3FWD_HASH_ENTRIES
] __rte_cache_aligned
;
245 static rte_xmm_t mask0
;
246 static rte_xmm_t mask1
;
247 static rte_xmm_t mask2
;
249 #if defined(__SSE2__)
251 em_mask_key(void *key
, xmm_t mask
)
253 __m128i data
= _mm_loadu_si128((__m128i
*)(key
));
255 return _mm_and_si128(data
, mask
);
257 #elif defined(RTE_MACHINE_CPUFLAG_NEON)
259 em_mask_key(void *key
, xmm_t mask
)
261 int32x4_t data
= vld1q_s32((int32_t *)key
);
263 return vandq_s32(data
, mask
);
265 #elif defined(RTE_MACHINE_CPUFLAG_ALTIVEC)
267 em_mask_key(void *key
, xmm_t mask
)
269 xmm_t data
= vec_ld(0, (xmm_t
*)(key
));
271 return vec_and(data
, mask
);
274 #error No vector engine (SSE, NEON, ALTIVEC) available, check your toolchain
277 static inline uint8_t
278 em_get_ipv4_dst_port(void *ipv4_hdr
, uint8_t portid
, void *lookup_struct
)
281 union ipv4_5tuple_host key
;
282 struct rte_hash
*ipv4_l3fwd_lookup_struct
=
283 (struct rte_hash
*)lookup_struct
;
285 ipv4_hdr
= (uint8_t *)ipv4_hdr
+ offsetof(struct ipv4_hdr
, time_to_live
);
288 * Get 5 tuple: dst port, src port, dst IP address,
289 * src IP address and protocol.
291 key
.xmm
= em_mask_key(ipv4_hdr
, mask0
.x
);
293 /* Find destination port */
294 ret
= rte_hash_lookup(ipv4_l3fwd_lookup_struct
, (const void *)&key
);
295 return (uint8_t)((ret
< 0) ? portid
: ipv4_l3fwd_out_if
[ret
]);
298 static inline uint8_t
299 em_get_ipv6_dst_port(void *ipv6_hdr
, uint8_t portid
, void *lookup_struct
)
302 union ipv6_5tuple_host key
;
303 struct rte_hash
*ipv6_l3fwd_lookup_struct
=
304 (struct rte_hash
*)lookup_struct
;
306 ipv6_hdr
= (uint8_t *)ipv6_hdr
+ offsetof(struct ipv6_hdr
, payload_len
);
307 void *data0
= ipv6_hdr
;
308 void *data1
= ((uint8_t *)ipv6_hdr
) + sizeof(xmm_t
);
309 void *data2
= ((uint8_t *)ipv6_hdr
) + sizeof(xmm_t
) + sizeof(xmm_t
);
311 /* Get part of 5 tuple: src IP address lower 96 bits and protocol */
312 key
.xmm
[0] = em_mask_key(data0
, mask1
.x
);
315 * Get part of 5 tuple: dst IP address lower 96 bits
316 * and src IP address higher 32 bits.
318 key
.xmm
[1] = *(xmm_t
*)data1
;
321 * Get part of 5 tuple: dst port and src port
322 * and dst IP address higher 32 bits.
324 key
.xmm
[2] = em_mask_key(data2
, mask2
.x
);
326 /* Find destination port */
327 ret
= rte_hash_lookup(ipv6_l3fwd_lookup_struct
, (const void *)&key
);
328 return (uint8_t)((ret
< 0) ? portid
: ipv6_l3fwd_out_if
[ret
]);
331 #if defined(__SSE4_1__)
332 #if defined(NO_HASH_MULTI_LOOKUP)
333 #include "l3fwd_em_sse.h"
335 #include "l3fwd_em_hlm_sse.h"
338 #include "l3fwd_em.h"
342 convert_ipv4_5tuple(struct ipv4_5tuple
*key1
,
343 union ipv4_5tuple_host
*key2
)
345 key2
->ip_dst
= rte_cpu_to_be_32(key1
->ip_dst
);
346 key2
->ip_src
= rte_cpu_to_be_32(key1
->ip_src
);
347 key2
->port_dst
= rte_cpu_to_be_16(key1
->port_dst
);
348 key2
->port_src
= rte_cpu_to_be_16(key1
->port_src
);
349 key2
->proto
= key1
->proto
;
355 convert_ipv6_5tuple(struct ipv6_5tuple
*key1
,
356 union ipv6_5tuple_host
*key2
)
360 for (i
= 0; i
< 16; i
++) {
361 key2
->ip_dst
[i
] = key1
->ip_dst
[i
];
362 key2
->ip_src
[i
] = key1
->ip_src
[i
];
364 key2
->port_dst
= rte_cpu_to_be_16(key1
->port_dst
);
365 key2
->port_src
= rte_cpu_to_be_16(key1
->port_src
);
366 key2
->proto
= key1
->proto
;
372 #define BYTE_VALUE_MAX 256
373 #define ALL_32_BITS 0xffffffff
374 #define BIT_8_TO_15 0x0000ff00
377 populate_ipv4_few_flow_into_table(const struct rte_hash
*h
)
382 mask0
= (rte_xmm_t
){.u32
= {BIT_8_TO_15
, ALL_32_BITS
,
383 ALL_32_BITS
, ALL_32_BITS
} };
385 for (i
= 0; i
< IPV4_L3FWD_EM_NUM_ROUTES
; i
++) {
386 struct ipv4_l3fwd_em_route entry
;
387 union ipv4_5tuple_host newkey
;
389 entry
= ipv4_l3fwd_em_route_array
[i
];
390 convert_ipv4_5tuple(&entry
.key
, &newkey
);
391 ret
= rte_hash_add_key(h
, (void *) &newkey
);
393 rte_exit(EXIT_FAILURE
, "Unable to add entry %" PRIu32
394 " to the l3fwd hash.\n", i
);
396 ipv4_l3fwd_out_if
[ret
] = entry
.if_out
;
398 printf("Hash: Adding 0x%" PRIx64
" keys\n",
399 (uint64_t)IPV4_L3FWD_EM_NUM_ROUTES
);
402 #define BIT_16_TO_23 0x00ff0000
404 populate_ipv6_few_flow_into_table(const struct rte_hash
*h
)
409 mask1
= (rte_xmm_t
){.u32
= {BIT_16_TO_23
, ALL_32_BITS
,
410 ALL_32_BITS
, ALL_32_BITS
} };
412 mask2
= (rte_xmm_t
){.u32
= {ALL_32_BITS
, ALL_32_BITS
, 0, 0} };
414 for (i
= 0; i
< IPV6_L3FWD_EM_NUM_ROUTES
; i
++) {
415 struct ipv6_l3fwd_em_route entry
;
416 union ipv6_5tuple_host newkey
;
418 entry
= ipv6_l3fwd_em_route_array
[i
];
419 convert_ipv6_5tuple(&entry
.key
, &newkey
);
420 ret
= rte_hash_add_key(h
, (void *) &newkey
);
422 rte_exit(EXIT_FAILURE
, "Unable to add entry %" PRIu32
423 " to the l3fwd hash.\n", i
);
425 ipv6_l3fwd_out_if
[ret
] = entry
.if_out
;
427 printf("Hash: Adding 0x%" PRIx64
"keys\n",
428 (uint64_t)IPV6_L3FWD_EM_NUM_ROUTES
);
431 #define NUMBER_PORT_USED 4
433 populate_ipv4_many_flow_into_table(const struct rte_hash
*h
,
434 unsigned int nr_flow
)
438 mask0
= (rte_xmm_t
){.u32
= {BIT_8_TO_15
, ALL_32_BITS
,
439 ALL_32_BITS
, ALL_32_BITS
} };
441 for (i
= 0; i
< nr_flow
; i
++) {
442 struct ipv4_l3fwd_em_route entry
;
443 union ipv4_5tuple_host newkey
;
445 uint8_t a
= (uint8_t)
446 ((i
/NUMBER_PORT_USED
)%BYTE_VALUE_MAX
);
447 uint8_t b
= (uint8_t)
448 (((i
/NUMBER_PORT_USED
)/BYTE_VALUE_MAX
)%BYTE_VALUE_MAX
);
449 uint8_t c
= (uint8_t)
450 ((i
/NUMBER_PORT_USED
)/(BYTE_VALUE_MAX
*BYTE_VALUE_MAX
));
452 /* Create the ipv4 exact match flow */
453 memset(&entry
, 0, sizeof(entry
));
454 switch (i
& (NUMBER_PORT_USED
- 1)) {
456 entry
= ipv4_l3fwd_em_route_array
[0];
457 entry
.key
.ip_dst
= IPv4(101, c
, b
, a
);
460 entry
= ipv4_l3fwd_em_route_array
[1];
461 entry
.key
.ip_dst
= IPv4(201, c
, b
, a
);
464 entry
= ipv4_l3fwd_em_route_array
[2];
465 entry
.key
.ip_dst
= IPv4(111, c
, b
, a
);
468 entry
= ipv4_l3fwd_em_route_array
[3];
469 entry
.key
.ip_dst
= IPv4(211, c
, b
, a
);
472 convert_ipv4_5tuple(&entry
.key
, &newkey
);
473 int32_t ret
= rte_hash_add_key(h
, (void *) &newkey
);
476 rte_exit(EXIT_FAILURE
, "Unable to add entry %u\n", i
);
478 ipv4_l3fwd_out_if
[ret
] = (uint8_t) entry
.if_out
;
481 printf("Hash: Adding 0x%x keys\n", nr_flow
);
485 populate_ipv6_many_flow_into_table(const struct rte_hash
*h
,
486 unsigned int nr_flow
)
490 mask1
= (rte_xmm_t
){.u32
= {BIT_16_TO_23
, ALL_32_BITS
,
491 ALL_32_BITS
, ALL_32_BITS
} };
492 mask2
= (rte_xmm_t
){.u32
= {ALL_32_BITS
, ALL_32_BITS
, 0, 0} };
494 for (i
= 0; i
< nr_flow
; i
++) {
495 struct ipv6_l3fwd_em_route entry
;
496 union ipv6_5tuple_host newkey
;
498 uint8_t a
= (uint8_t)
499 ((i
/NUMBER_PORT_USED
)%BYTE_VALUE_MAX
);
500 uint8_t b
= (uint8_t)
501 (((i
/NUMBER_PORT_USED
)/BYTE_VALUE_MAX
)%BYTE_VALUE_MAX
);
502 uint8_t c
= (uint8_t)
503 ((i
/NUMBER_PORT_USED
)/(BYTE_VALUE_MAX
*BYTE_VALUE_MAX
));
505 /* Create the ipv6 exact match flow */
506 memset(&entry
, 0, sizeof(entry
));
507 switch (i
& (NUMBER_PORT_USED
- 1)) {
509 entry
= ipv6_l3fwd_em_route_array
[0];
512 entry
= ipv6_l3fwd_em_route_array
[1];
515 entry
= ipv6_l3fwd_em_route_array
[2];
518 entry
= ipv6_l3fwd_em_route_array
[3];
521 entry
.key
.ip_dst
[13] = c
;
522 entry
.key
.ip_dst
[14] = b
;
523 entry
.key
.ip_dst
[15] = a
;
524 convert_ipv6_5tuple(&entry
.key
, &newkey
);
525 int32_t ret
= rte_hash_add_key(h
, (void *) &newkey
);
528 rte_exit(EXIT_FAILURE
, "Unable to add entry %u\n", i
);
530 ipv6_l3fwd_out_if
[ret
] = (uint8_t) entry
.if_out
;
533 printf("Hash: Adding 0x%x keys\n", nr_flow
);
537 * 1. IP packets without extension;
538 * 2. L4 payload should be either TCP or UDP.
541 em_check_ptype(int portid
)
544 int ptype_l3_ipv4_ext
= 0;
545 int ptype_l3_ipv6_ext
= 0;
546 int ptype_l4_tcp
= 0;
547 int ptype_l4_udp
= 0;
548 uint32_t ptype_mask
= RTE_PTYPE_L3_MASK
| RTE_PTYPE_L4_MASK
;
550 ret
= rte_eth_dev_get_supported_ptypes(portid
, ptype_mask
, NULL
, 0);
554 uint32_t ptypes
[ret
];
556 ret
= rte_eth_dev_get_supported_ptypes(portid
, ptype_mask
, ptypes
, ret
);
557 for (i
= 0; i
< ret
; ++i
) {
559 case RTE_PTYPE_L3_IPV4_EXT
:
560 ptype_l3_ipv4_ext
= 1;
562 case RTE_PTYPE_L3_IPV6_EXT
:
563 ptype_l3_ipv6_ext
= 1;
565 case RTE_PTYPE_L4_TCP
:
568 case RTE_PTYPE_L4_UDP
:
574 if (ptype_l3_ipv4_ext
== 0)
575 printf("port %d cannot parse RTE_PTYPE_L3_IPV4_EXT\n", portid
);
576 if (ptype_l3_ipv6_ext
== 0)
577 printf("port %d cannot parse RTE_PTYPE_L3_IPV6_EXT\n", portid
);
578 if (!ptype_l3_ipv4_ext
|| !ptype_l3_ipv6_ext
)
581 if (ptype_l4_tcp
== 0)
582 printf("port %d cannot parse RTE_PTYPE_L4_TCP\n", portid
);
583 if (ptype_l4_udp
== 0)
584 printf("port %d cannot parse RTE_PTYPE_L4_UDP\n", portid
);
585 if (ptype_l4_tcp
&& ptype_l4_udp
)
592 em_parse_ptype(struct rte_mbuf
*m
)
594 struct ether_hdr
*eth_hdr
;
595 uint32_t packet_type
= RTE_PTYPE_UNKNOWN
;
599 struct ipv4_hdr
*ipv4_hdr
;
600 struct ipv6_hdr
*ipv6_hdr
;
602 eth_hdr
= rte_pktmbuf_mtod(m
, struct ether_hdr
*);
603 ether_type
= eth_hdr
->ether_type
;
604 l3
= (uint8_t *)eth_hdr
+ sizeof(struct ether_hdr
);
605 if (ether_type
== rte_cpu_to_be_16(ETHER_TYPE_IPv4
)) {
606 ipv4_hdr
= (struct ipv4_hdr
*)l3
;
607 hdr_len
= (ipv4_hdr
->version_ihl
& IPV4_HDR_IHL_MASK
) *
609 if (hdr_len
== sizeof(struct ipv4_hdr
)) {
610 packet_type
|= RTE_PTYPE_L3_IPV4
;
611 if (ipv4_hdr
->next_proto_id
== IPPROTO_TCP
)
612 packet_type
|= RTE_PTYPE_L4_TCP
;
613 else if (ipv4_hdr
->next_proto_id
== IPPROTO_UDP
)
614 packet_type
|= RTE_PTYPE_L4_UDP
;
616 packet_type
|= RTE_PTYPE_L3_IPV4_EXT
;
617 } else if (ether_type
== rte_cpu_to_be_16(ETHER_TYPE_IPv4
)) {
618 ipv6_hdr
= (struct ipv6_hdr
*)l3
;
619 if (ipv6_hdr
->proto
== IPPROTO_TCP
)
620 packet_type
|= RTE_PTYPE_L3_IPV6
| RTE_PTYPE_L4_TCP
;
621 else if (ipv6_hdr
->proto
== IPPROTO_UDP
)
622 packet_type
|= RTE_PTYPE_L3_IPV6
| RTE_PTYPE_L4_UDP
;
624 packet_type
|= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
;
627 m
->packet_type
= packet_type
;
631 em_cb_parse_ptype(uint8_t port __rte_unused
, uint16_t queue __rte_unused
,
632 struct rte_mbuf
*pkts
[], uint16_t nb_pkts
,
633 uint16_t max_pkts __rte_unused
,
634 void *user_param __rte_unused
)
638 for (i
= 0; i
< nb_pkts
; ++i
)
639 em_parse_ptype(pkts
[i
]);
644 /* main processing loop */
646 em_main_loop(__attribute__((unused
)) void *dummy
)
648 struct rte_mbuf
*pkts_burst
[MAX_PKT_BURST
];
650 uint64_t prev_tsc
, diff_tsc
, cur_tsc
;
652 uint8_t portid
, queueid
;
653 struct lcore_conf
*qconf
;
654 const uint64_t drain_tsc
= (rte_get_tsc_hz() + US_PER_S
- 1) /
655 US_PER_S
* BURST_TX_DRAIN_US
;
659 lcore_id
= rte_lcore_id();
660 qconf
= &lcore_conf
[lcore_id
];
662 if (qconf
->n_rx_queue
== 0) {
663 RTE_LOG(INFO
, L3FWD
, "lcore %u has nothing to do\n", lcore_id
);
667 RTE_LOG(INFO
, L3FWD
, "entering main loop on lcore %u\n", lcore_id
);
669 for (i
= 0; i
< qconf
->n_rx_queue
; i
++) {
671 portid
= qconf
->rx_queue_list
[i
].port_id
;
672 queueid
= qconf
->rx_queue_list
[i
].queue_id
;
674 " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
675 lcore_id
, portid
, queueid
);
678 while (!force_quit
) {
680 cur_tsc
= rte_rdtsc();
683 * TX burst queue drain
685 diff_tsc
= cur_tsc
- prev_tsc
;
686 if (unlikely(diff_tsc
> drain_tsc
)) {
688 for (i
= 0; i
< qconf
->n_tx_port
; ++i
) {
689 portid
= qconf
->tx_port_id
[i
];
690 if (qconf
->tx_mbufs
[portid
].len
== 0)
693 qconf
->tx_mbufs
[portid
].len
,
695 qconf
->tx_mbufs
[portid
].len
= 0;
702 * Read packet from RX queues
704 for (i
= 0; i
< qconf
->n_rx_queue
; ++i
) {
705 portid
= qconf
->rx_queue_list
[i
].port_id
;
706 queueid
= qconf
->rx_queue_list
[i
].queue_id
;
707 nb_rx
= rte_eth_rx_burst(portid
, queueid
, pkts_burst
,
712 #if defined(__SSE4_1__)
713 l3fwd_em_send_packets(nb_rx
, pkts_burst
,
716 l3fwd_em_no_opt_send_packets(nb_rx
, pkts_burst
,
718 #endif /* __SSE_4_1__ */
726 * Initialize exact match (hash) parameters.
729 setup_hash(const int socketid
)
731 struct rte_hash_parameters ipv4_l3fwd_hash_params
= {
733 .entries
= L3FWD_HASH_ENTRIES
,
734 .key_len
= sizeof(union ipv4_5tuple_host
),
735 .hash_func
= ipv4_hash_crc
,
736 .hash_func_init_val
= 0,
739 struct rte_hash_parameters ipv6_l3fwd_hash_params
= {
741 .entries
= L3FWD_HASH_ENTRIES
,
742 .key_len
= sizeof(union ipv6_5tuple_host
),
743 .hash_func
= ipv6_hash_crc
,
744 .hash_func_init_val
= 0,
749 /* create ipv4 hash */
750 snprintf(s
, sizeof(s
), "ipv4_l3fwd_hash_%d", socketid
);
751 ipv4_l3fwd_hash_params
.name
= s
;
752 ipv4_l3fwd_hash_params
.socket_id
= socketid
;
753 ipv4_l3fwd_em_lookup_struct
[socketid
] =
754 rte_hash_create(&ipv4_l3fwd_hash_params
);
755 if (ipv4_l3fwd_em_lookup_struct
[socketid
] == NULL
)
756 rte_exit(EXIT_FAILURE
,
757 "Unable to create the l3fwd hash on socket %d\n",
760 /* create ipv6 hash */
761 snprintf(s
, sizeof(s
), "ipv6_l3fwd_hash_%d", socketid
);
762 ipv6_l3fwd_hash_params
.name
= s
;
763 ipv6_l3fwd_hash_params
.socket_id
= socketid
;
764 ipv6_l3fwd_em_lookup_struct
[socketid
] =
765 rte_hash_create(&ipv6_l3fwd_hash_params
);
766 if (ipv6_l3fwd_em_lookup_struct
[socketid
] == NULL
)
767 rte_exit(EXIT_FAILURE
,
768 "Unable to create the l3fwd hash on socket %d\n",
771 if (hash_entry_number
!= HASH_ENTRY_NUMBER_DEFAULT
) {
772 /* For testing hash matching with a large number of flows we
773 * generate millions of IP 5-tuples with an incremented dst
774 * address to initialize the hash table. */
776 /* populate the ipv4 hash */
777 populate_ipv4_many_flow_into_table(
778 ipv4_l3fwd_em_lookup_struct
[socketid
],
781 /* populate the ipv6 hash */
782 populate_ipv6_many_flow_into_table(
783 ipv6_l3fwd_em_lookup_struct
[socketid
],
788 * Use data in ipv4/ipv6 l3fwd lookup table
789 * directly to initialize the hash table.
792 /* populate the ipv4 hash */
793 populate_ipv4_few_flow_into_table(
794 ipv4_l3fwd_em_lookup_struct
[socketid
]);
796 /* populate the ipv6 hash */
797 populate_ipv6_few_flow_into_table(
798 ipv6_l3fwd_em_lookup_struct
[socketid
]);
803 /* Return ipv4/ipv6 em fwd lookup struct. */
805 em_get_ipv4_l3fwd_lookup_struct(const int socketid
)
807 return ipv4_l3fwd_em_lookup_struct
[socketid
];
811 em_get_ipv6_l3fwd_lookup_struct(const int socketid
)
813 return ipv6_l3fwd_em_lookup_struct
[socketid
];