4 * Copyright(c) 2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
39 #include <netinet/in.h>
40 #include <netinet/ip.h>
41 #include <netinet/ip6.h>
43 #include <sys/queue.h>
48 #include <rte_common.h>
49 #include <rte_byteorder.h>
52 #include <rte_launch.h>
53 #include <rte_atomic.h>
54 #include <rte_cycles.h>
55 #include <rte_prefetch.h>
56 #include <rte_lcore.h>
57 #include <rte_per_lcore.h>
58 #include <rte_branch_prediction.h>
59 #include <rte_interrupts.h>
61 #include <rte_random.h>
62 #include <rte_debug.h>
63 #include <rte_ether.h>
64 #include <rte_ethdev.h>
65 #include <rte_mempool.h>
71 #include <rte_jhash.h>
72 #include <rte_cryptodev.h>
77 #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
79 #define MAX_JUMBO_PKT_LEN 9600
81 #define MEMPOOL_CACHE_SIZE 256
83 #define NB_MBUF (32000)
85 #define CDEV_QUEUE_DESC 2048
86 #define CDEV_MAP_ENTRIES 1024
87 #define CDEV_MP_NB_OBJS 2048
88 #define CDEV_MP_CACHE_SZ 64
89 #define MAX_QUEUE_PAIRS 1
91 #define OPTION_CONFIG "config"
92 #define OPTION_SINGLE_SA "single-sa"
94 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
98 /* Configure how many packets ahead to prefetch, when reading packets */
99 #define PREFETCH_OFFSET 3
101 #define MAX_RX_QUEUE_PER_LCORE 16
103 #define MAX_LCORE_PARAMS 1024
105 #define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid))
108 * Configurable number of RX/TX ring descriptors
110 #define IPSEC_SECGW_RX_DESC_DEFAULT 128
111 #define IPSEC_SECGW_TX_DESC_DEFAULT 512
112 static uint16_t nb_rxd
= IPSEC_SECGW_RX_DESC_DEFAULT
;
113 static uint16_t nb_txd
= IPSEC_SECGW_TX_DESC_DEFAULT
;
115 #if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
116 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
117 (((uint64_t)((a) & 0xff) << 56) | \
118 ((uint64_t)((b) & 0xff) << 48) | \
119 ((uint64_t)((c) & 0xff) << 40) | \
120 ((uint64_t)((d) & 0xff) << 32) | \
121 ((uint64_t)((e) & 0xff) << 24) | \
122 ((uint64_t)((f) & 0xff) << 16) | \
123 ((uint64_t)((g) & 0xff) << 8) | \
124 ((uint64_t)(h) & 0xff))
126 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
127 (((uint64_t)((h) & 0xff) << 56) | \
128 ((uint64_t)((g) & 0xff) << 48) | \
129 ((uint64_t)((f) & 0xff) << 40) | \
130 ((uint64_t)((e) & 0xff) << 32) | \
131 ((uint64_t)((d) & 0xff) << 24) | \
132 ((uint64_t)((c) & 0xff) << 16) | \
133 ((uint64_t)((b) & 0xff) << 8) | \
134 ((uint64_t)(a) & 0xff))
136 #define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
138 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
139 addr.addr_bytes[0], addr.addr_bytes[1], \
140 addr.addr_bytes[2], addr.addr_bytes[3], \
141 addr.addr_bytes[4], addr.addr_bytes[5], \
144 /* port/source ethernet addr and destination ethernet addr */
145 struct ethaddr_info
{
149 struct ethaddr_info ethaddr_tbl
[RTE_MAX_ETHPORTS
] = {
150 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },
151 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },
152 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) },
153 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) }
156 /* mask of enabled ports */
157 static uint32_t enabled_port_mask
;
158 static uint32_t unprotected_port_mask
;
159 static int32_t promiscuous_on
= 1;
160 static int32_t numa_on
= 1; /**< NUMA is enabled by default. */
161 static uint32_t nb_lcores
;
162 static uint32_t single_sa
;
163 static uint32_t single_sa_idx
;
165 struct lcore_rx_queue
{
168 } __rte_cache_aligned
;
170 struct lcore_params
{
174 } __rte_cache_aligned
;
176 static struct lcore_params lcore_params_array
[MAX_LCORE_PARAMS
];
178 static struct lcore_params
*lcore_params
;
179 static uint16_t nb_lcore_params
;
181 static struct rte_hash
*cdev_map_in
;
182 static struct rte_hash
*cdev_map_out
;
186 struct rte_mbuf
*m_table
[MAX_PKT_BURST
] __rte_aligned(sizeof(void *));
190 uint16_t nb_rx_queue
;
191 struct lcore_rx_queue rx_queue_list
[MAX_RX_QUEUE_PER_LCORE
];
192 uint16_t tx_queue_id
[RTE_MAX_ETHPORTS
];
193 struct buffer tx_mbufs
[RTE_MAX_ETHPORTS
];
194 struct ipsec_ctx inbound
;
195 struct ipsec_ctx outbound
;
196 struct rt_ctx
*rt4_ctx
;
197 struct rt_ctx
*rt6_ctx
;
198 } __rte_cache_aligned
;
200 static struct lcore_conf lcore_conf
[RTE_MAX_LCORE
];
202 static struct rte_eth_conf port_conf
= {
204 .mq_mode
= ETH_MQ_RX_RSS
,
205 .max_rx_pkt_len
= ETHER_MAX_LEN
,
207 .header_split
= 0, /**< Header Split disabled */
208 .hw_ip_checksum
= 1, /**< IP checksum offload enabled */
209 .hw_vlan_filter
= 0, /**< VLAN filtering disabled */
210 .jumbo_frame
= 0, /**< Jumbo Frame Support disabled */
211 .hw_strip_crc
= 0, /**< CRC stripped by hardware */
216 .rss_hf
= ETH_RSS_IP
| ETH_RSS_UDP
|
217 ETH_RSS_TCP
| ETH_RSS_SCTP
,
221 .mq_mode
= ETH_MQ_TX_NONE
,
225 static struct socket_ctx socket_ctx
[NB_SOCKETS
];
227 struct traffic_type
{
228 const uint8_t *data
[MAX_PKT_BURST
* 2];
229 struct rte_mbuf
*pkts
[MAX_PKT_BURST
* 2];
230 uint32_t res
[MAX_PKT_BURST
* 2];
234 struct ipsec_traffic
{
235 struct traffic_type ipsec
;
236 struct traffic_type ip4
;
237 struct traffic_type ip6
;
241 prepare_one_packet(struct rte_mbuf
*pkt
, struct ipsec_traffic
*t
)
244 struct ether_hdr
*eth
;
246 eth
= rte_pktmbuf_mtod(pkt
, struct ether_hdr
*);
247 if (eth
->ether_type
== rte_cpu_to_be_16(ETHER_TYPE_IPv4
)) {
248 nlp
= (uint8_t *)rte_pktmbuf_adj(pkt
, ETHER_HDR_LEN
);
249 nlp
= RTE_PTR_ADD(nlp
, offsetof(struct ip
, ip_p
));
250 if (*nlp
== IPPROTO_ESP
)
251 t
->ipsec
.pkts
[(t
->ipsec
.num
)++] = pkt
;
253 t
->ip4
.data
[t
->ip4
.num
] = nlp
;
254 t
->ip4
.pkts
[(t
->ip4
.num
)++] = pkt
;
256 } else if (eth
->ether_type
== rte_cpu_to_be_16(ETHER_TYPE_IPv6
)) {
257 nlp
= (uint8_t *)rte_pktmbuf_adj(pkt
, ETHER_HDR_LEN
);
258 nlp
= RTE_PTR_ADD(nlp
, offsetof(struct ip6_hdr
, ip6_nxt
));
259 if (*nlp
== IPPROTO_ESP
)
260 t
->ipsec
.pkts
[(t
->ipsec
.num
)++] = pkt
;
262 t
->ip6
.data
[t
->ip6
.num
] = nlp
;
263 t
->ip6
.pkts
[(t
->ip6
.num
)++] = pkt
;
266 /* Unknown/Unsupported type, drop the packet */
267 RTE_LOG(ERR
, IPSEC
, "Unsupported packet type\n");
268 rte_pktmbuf_free(pkt
);
273 prepare_traffic(struct rte_mbuf
**pkts
, struct ipsec_traffic
*t
,
282 for (i
= 0; i
< (nb_pkts
- PREFETCH_OFFSET
); i
++) {
283 rte_prefetch0(rte_pktmbuf_mtod(pkts
[i
+ PREFETCH_OFFSET
],
285 prepare_one_packet(pkts
[i
], t
);
287 /* Process left packets */
288 for (; i
< nb_pkts
; i
++)
289 prepare_one_packet(pkts
[i
], t
);
293 prepare_tx_pkt(struct rte_mbuf
*pkt
, uint8_t port
)
296 struct ether_hdr
*ethhdr
;
298 ip
= rte_pktmbuf_mtod(pkt
, struct ip
*);
300 ethhdr
= (struct ether_hdr
*)rte_pktmbuf_prepend(pkt
, ETHER_HDR_LEN
);
302 if (ip
->ip_v
== IPVERSION
) {
303 pkt
->ol_flags
|= PKT_TX_IP_CKSUM
| PKT_TX_IPV4
;
304 pkt
->l3_len
= sizeof(struct ip
);
305 pkt
->l2_len
= ETHER_HDR_LEN
;
307 ethhdr
->ether_type
= rte_cpu_to_be_16(ETHER_TYPE_IPv4
);
309 pkt
->ol_flags
|= PKT_TX_IPV6
;
310 pkt
->l3_len
= sizeof(struct ip6_hdr
);
311 pkt
->l2_len
= ETHER_HDR_LEN
;
313 ethhdr
->ether_type
= rte_cpu_to_be_16(ETHER_TYPE_IPv6
);
316 memcpy(ðhdr
->s_addr
, ðaddr_tbl
[port
].src
,
317 sizeof(struct ether_addr
));
318 memcpy(ðhdr
->d_addr
, ðaddr_tbl
[port
].dst
,
319 sizeof(struct ether_addr
));
323 prepare_tx_burst(struct rte_mbuf
*pkts
[], uint16_t nb_pkts
, uint8_t port
)
326 const int32_t prefetch_offset
= 2;
328 for (i
= 0; i
< (nb_pkts
- prefetch_offset
); i
++) {
329 rte_mbuf_prefetch_part2(pkts
[i
+ prefetch_offset
]);
330 prepare_tx_pkt(pkts
[i
], port
);
332 /* Process left packets */
333 for (; i
< nb_pkts
; i
++)
334 prepare_tx_pkt(pkts
[i
], port
);
337 /* Send burst of packets on an output interface */
338 static inline int32_t
339 send_burst(struct lcore_conf
*qconf
, uint16_t n
, uint8_t port
)
341 struct rte_mbuf
**m_table
;
345 queueid
= qconf
->tx_queue_id
[port
];
346 m_table
= (struct rte_mbuf
**)qconf
->tx_mbufs
[port
].m_table
;
348 prepare_tx_burst(m_table
, n
, port
);
350 ret
= rte_eth_tx_burst(port
, queueid
, m_table
, n
);
351 if (unlikely(ret
< n
)) {
353 rte_pktmbuf_free(m_table
[ret
]);
360 /* Enqueue a single packet, and send burst if queue is filled */
361 static inline int32_t
362 send_single_packet(struct rte_mbuf
*m
, uint8_t port
)
366 struct lcore_conf
*qconf
;
368 lcore_id
= rte_lcore_id();
370 qconf
= &lcore_conf
[lcore_id
];
371 len
= qconf
->tx_mbufs
[port
].len
;
372 qconf
->tx_mbufs
[port
].m_table
[len
] = m
;
375 /* enough pkts to be sent */
376 if (unlikely(len
== MAX_PKT_BURST
)) {
377 send_burst(qconf
, MAX_PKT_BURST
, port
);
381 qconf
->tx_mbufs
[port
].len
= len
;
386 inbound_sp_sa(struct sp_ctx
*sp
, struct sa_ctx
*sa
, struct traffic_type
*ip
,
390 uint32_t i
, j
, res
, sa_idx
;
392 if (ip
->num
== 0 || sp
== NULL
)
395 rte_acl_classify((struct rte_acl_ctx
*)sp
, ip
->data
, ip
->res
,
396 ip
->num
, DEFAULT_MAX_CATEGORIES
);
399 for (i
= 0; i
< ip
->num
; i
++) {
406 if (res
& DISCARD
|| i
< lim
) {
410 /* Only check SPI match for processed IPSec packets */
411 sa_idx
= ip
->res
[i
] & PROTECT_MASK
;
412 if (sa_idx
== 0 || !inbound_sa_check(sa
, m
, sa_idx
)) {
422 process_pkts_inbound(struct ipsec_ctx
*ipsec_ctx
,
423 struct ipsec_traffic
*traffic
)
426 uint16_t idx
, nb_pkts_in
, i
, n_ip4
, n_ip6
;
428 nb_pkts_in
= ipsec_inbound(ipsec_ctx
, traffic
->ipsec
.pkts
,
429 traffic
->ipsec
.num
, MAX_PKT_BURST
);
431 n_ip4
= traffic
->ip4
.num
;
432 n_ip6
= traffic
->ip6
.num
;
434 /* SP/ACL Inbound check ipsec and ip4 */
435 for (i
= 0; i
< nb_pkts_in
; i
++) {
436 m
= traffic
->ipsec
.pkts
[i
];
437 struct ip
*ip
= rte_pktmbuf_mtod(m
, struct ip
*);
438 if (ip
->ip_v
== IPVERSION
) {
439 idx
= traffic
->ip4
.num
++;
440 traffic
->ip4
.pkts
[idx
] = m
;
441 traffic
->ip4
.data
[idx
] = rte_pktmbuf_mtod_offset(m
,
442 uint8_t *, offsetof(struct ip
, ip_p
));
443 } else if (ip
->ip_v
== IP6_VERSION
) {
444 idx
= traffic
->ip6
.num
++;
445 traffic
->ip6
.pkts
[idx
] = m
;
446 traffic
->ip6
.data
[idx
] = rte_pktmbuf_mtod_offset(m
,
448 offsetof(struct ip6_hdr
, ip6_nxt
));
453 inbound_sp_sa(ipsec_ctx
->sp4_ctx
, ipsec_ctx
->sa_ctx
, &traffic
->ip4
,
456 inbound_sp_sa(ipsec_ctx
->sp6_ctx
, ipsec_ctx
->sa_ctx
, &traffic
->ip6
,
461 outbound_sp(struct sp_ctx
*sp
, struct traffic_type
*ip
,
462 struct traffic_type
*ipsec
)
465 uint32_t i
, j
, sa_idx
;
467 if (ip
->num
== 0 || sp
== NULL
)
470 rte_acl_classify((struct rte_acl_ctx
*)sp
, ip
->data
, ip
->res
,
471 ip
->num
, DEFAULT_MAX_CATEGORIES
);
474 for (i
= 0; i
< ip
->num
; i
++) {
476 sa_idx
= ip
->res
[i
] & PROTECT_MASK
;
477 if ((ip
->res
[i
] == 0) || (ip
->res
[i
] & DISCARD
))
479 else if (sa_idx
!= 0) {
480 ipsec
->res
[ipsec
->num
] = sa_idx
;
481 ipsec
->pkts
[ipsec
->num
++] = m
;
489 process_pkts_outbound(struct ipsec_ctx
*ipsec_ctx
,
490 struct ipsec_traffic
*traffic
)
493 uint16_t idx
, nb_pkts_out
, i
;
495 /* Drop any IPsec traffic from protected ports */
496 for (i
= 0; i
< traffic
->ipsec
.num
; i
++)
497 rte_pktmbuf_free(traffic
->ipsec
.pkts
[i
]);
499 traffic
->ipsec
.num
= 0;
501 outbound_sp(ipsec_ctx
->sp4_ctx
, &traffic
->ip4
, &traffic
->ipsec
);
503 outbound_sp(ipsec_ctx
->sp6_ctx
, &traffic
->ip6
, &traffic
->ipsec
);
505 nb_pkts_out
= ipsec_outbound(ipsec_ctx
, traffic
->ipsec
.pkts
,
506 traffic
->ipsec
.res
, traffic
->ipsec
.num
,
509 for (i
= 0; i
< nb_pkts_out
; i
++) {
510 m
= traffic
->ipsec
.pkts
[i
];
511 struct ip
*ip
= rte_pktmbuf_mtod(m
, struct ip
*);
512 if (ip
->ip_v
== IPVERSION
) {
513 idx
= traffic
->ip4
.num
++;
514 traffic
->ip4
.pkts
[idx
] = m
;
516 idx
= traffic
->ip6
.num
++;
517 traffic
->ip6
.pkts
[idx
] = m
;
523 process_pkts_inbound_nosp(struct ipsec_ctx
*ipsec_ctx
,
524 struct ipsec_traffic
*traffic
)
527 uint32_t nb_pkts_in
, i
, idx
;
529 /* Drop any IPv4 traffic from unprotected ports */
530 for (i
= 0; i
< traffic
->ip4
.num
; i
++)
531 rte_pktmbuf_free(traffic
->ip4
.pkts
[i
]);
533 traffic
->ip4
.num
= 0;
535 /* Drop any IPv6 traffic from unprotected ports */
536 for (i
= 0; i
< traffic
->ip6
.num
; i
++)
537 rte_pktmbuf_free(traffic
->ip6
.pkts
[i
]);
539 traffic
->ip6
.num
= 0;
541 nb_pkts_in
= ipsec_inbound(ipsec_ctx
, traffic
->ipsec
.pkts
,
542 traffic
->ipsec
.num
, MAX_PKT_BURST
);
544 for (i
= 0; i
< nb_pkts_in
; i
++) {
545 m
= traffic
->ipsec
.pkts
[i
];
546 struct ip
*ip
= rte_pktmbuf_mtod(m
, struct ip
*);
547 if (ip
->ip_v
== IPVERSION
) {
548 idx
= traffic
->ip4
.num
++;
549 traffic
->ip4
.pkts
[idx
] = m
;
551 idx
= traffic
->ip6
.num
++;
552 traffic
->ip6
.pkts
[idx
] = m
;
558 process_pkts_outbound_nosp(struct ipsec_ctx
*ipsec_ctx
,
559 struct ipsec_traffic
*traffic
)
562 uint32_t nb_pkts_out
, i
;
565 /* Drop any IPsec traffic from protected ports */
566 for (i
= 0; i
< traffic
->ipsec
.num
; i
++)
567 rte_pktmbuf_free(traffic
->ipsec
.pkts
[i
]);
569 traffic
->ipsec
.num
= 0;
571 for (i
= 0; i
< traffic
->ip4
.num
; i
++)
572 traffic
->ip4
.res
[i
] = single_sa_idx
;
574 for (i
= 0; i
< traffic
->ip6
.num
; i
++)
575 traffic
->ip6
.res
[i
] = single_sa_idx
;
577 nb_pkts_out
= ipsec_outbound(ipsec_ctx
, traffic
->ip4
.pkts
,
578 traffic
->ip4
.res
, traffic
->ip4
.num
,
581 /* They all sue the same SA (ip4 or ip6 tunnel) */
582 m
= traffic
->ipsec
.pkts
[i
];
583 ip
= rte_pktmbuf_mtod(m
, struct ip
*);
584 if (ip
->ip_v
== IPVERSION
)
585 traffic
->ip4
.num
= nb_pkts_out
;
587 traffic
->ip6
.num
= nb_pkts_out
;
591 route4_pkts(struct rt_ctx
*rt_ctx
, struct rte_mbuf
*pkts
[], uint8_t nb_pkts
)
593 uint32_t hop
[MAX_PKT_BURST
* 2];
594 uint32_t dst_ip
[MAX_PKT_BURST
* 2];
600 for (i
= 0; i
< nb_pkts
; i
++) {
601 offset
= offsetof(struct ip
, ip_dst
);
602 dst_ip
[i
] = *rte_pktmbuf_mtod_offset(pkts
[i
],
604 dst_ip
[i
] = rte_be_to_cpu_32(dst_ip
[i
]);
607 rte_lpm_lookup_bulk((struct rte_lpm
*)rt_ctx
, dst_ip
, hop
, nb_pkts
);
609 for (i
= 0; i
< nb_pkts
; i
++) {
610 if ((hop
[i
] & RTE_LPM_LOOKUP_SUCCESS
) == 0) {
611 rte_pktmbuf_free(pkts
[i
]);
614 send_single_packet(pkts
[i
], hop
[i
] & 0xff);
619 route6_pkts(struct rt_ctx
*rt_ctx
, struct rte_mbuf
*pkts
[], uint8_t nb_pkts
)
621 int16_t hop
[MAX_PKT_BURST
* 2];
622 uint8_t dst_ip
[MAX_PKT_BURST
* 2][16];
629 for (i
= 0; i
< nb_pkts
; i
++) {
630 offset
= offsetof(struct ip6_hdr
, ip6_dst
);
631 ip6_dst
= rte_pktmbuf_mtod_offset(pkts
[i
], uint8_t *, offset
);
632 memcpy(&dst_ip
[i
][0], ip6_dst
, 16);
635 rte_lpm6_lookup_bulk_func((struct rte_lpm6
*)rt_ctx
, dst_ip
,
638 for (i
= 0; i
< nb_pkts
; i
++) {
640 rte_pktmbuf_free(pkts
[i
]);
643 send_single_packet(pkts
[i
], hop
[i
] & 0xff);
648 process_pkts(struct lcore_conf
*qconf
, struct rte_mbuf
**pkts
,
649 uint8_t nb_pkts
, uint8_t portid
)
651 struct ipsec_traffic traffic
;
653 prepare_traffic(pkts
, &traffic
, nb_pkts
);
655 if (unlikely(single_sa
)) {
656 if (UNPROTECTED_PORT(portid
))
657 process_pkts_inbound_nosp(&qconf
->inbound
, &traffic
);
659 process_pkts_outbound_nosp(&qconf
->outbound
, &traffic
);
661 if (UNPROTECTED_PORT(portid
))
662 process_pkts_inbound(&qconf
->inbound
, &traffic
);
664 process_pkts_outbound(&qconf
->outbound
, &traffic
);
667 route4_pkts(qconf
->rt4_ctx
, traffic
.ip4
.pkts
, traffic
.ip4
.num
);
668 route6_pkts(qconf
->rt6_ctx
, traffic
.ip6
.pkts
, traffic
.ip6
.num
);
672 drain_buffers(struct lcore_conf
*qconf
)
677 for (portid
= 0; portid
< RTE_MAX_ETHPORTS
; portid
++) {
678 buf
= &qconf
->tx_mbufs
[portid
];
681 send_burst(qconf
, buf
->len
, portid
);
686 /* main processing loop */
688 main_loop(__attribute__((unused
)) void *dummy
)
690 struct rte_mbuf
*pkts
[MAX_PKT_BURST
];
692 uint64_t prev_tsc
, diff_tsc
, cur_tsc
;
694 uint8_t portid
, queueid
;
695 struct lcore_conf
*qconf
;
697 const uint64_t drain_tsc
= (rte_get_tsc_hz() + US_PER_S
- 1)
698 / US_PER_S
* BURST_TX_DRAIN_US
;
699 struct lcore_rx_queue
*rxql
;
702 lcore_id
= rte_lcore_id();
703 qconf
= &lcore_conf
[lcore_id
];
704 rxql
= qconf
->rx_queue_list
;
705 socket_id
= rte_lcore_to_socket_id(lcore_id
);
707 qconf
->rt4_ctx
= socket_ctx
[socket_id
].rt_ip4
;
708 qconf
->rt6_ctx
= socket_ctx
[socket_id
].rt_ip6
;
709 qconf
->inbound
.sp4_ctx
= socket_ctx
[socket_id
].sp_ip4_in
;
710 qconf
->inbound
.sp6_ctx
= socket_ctx
[socket_id
].sp_ip6_in
;
711 qconf
->inbound
.sa_ctx
= socket_ctx
[socket_id
].sa_in
;
712 qconf
->inbound
.cdev_map
= cdev_map_in
;
713 qconf
->outbound
.sp4_ctx
= socket_ctx
[socket_id
].sp_ip4_out
;
714 qconf
->outbound
.sp6_ctx
= socket_ctx
[socket_id
].sp_ip6_out
;
715 qconf
->outbound
.sa_ctx
= socket_ctx
[socket_id
].sa_out
;
716 qconf
->outbound
.cdev_map
= cdev_map_out
;
718 if (qconf
->nb_rx_queue
== 0) {
719 RTE_LOG(INFO
, IPSEC
, "lcore %u has nothing to do\n", lcore_id
);
723 RTE_LOG(INFO
, IPSEC
, "entering main loop on lcore %u\n", lcore_id
);
725 for (i
= 0; i
< qconf
->nb_rx_queue
; i
++) {
726 portid
= rxql
[i
].port_id
;
727 queueid
= rxql
[i
].queue_id
;
729 " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
730 lcore_id
, portid
, queueid
);
734 cur_tsc
= rte_rdtsc();
736 /* TX queue buffer drain */
737 diff_tsc
= cur_tsc
- prev_tsc
;
739 if (unlikely(diff_tsc
> drain_tsc
)) {
740 drain_buffers(qconf
);
744 /* Read packet from RX queues */
745 for (i
= 0; i
< qconf
->nb_rx_queue
; ++i
) {
746 portid
= rxql
[i
].port_id
;
747 queueid
= rxql
[i
].queue_id
;
748 nb_rx
= rte_eth_rx_burst(portid
, queueid
,
749 pkts
, MAX_PKT_BURST
);
752 process_pkts(qconf
, pkts
, nb_rx
, portid
);
760 uint8_t lcore
, portid
, nb_ports
;
764 if (lcore_params
== NULL
) {
765 printf("Error: No port/queue/core mappings\n");
769 nb_ports
= rte_eth_dev_count();
771 for (i
= 0; i
< nb_lcore_params
; ++i
) {
772 lcore
= lcore_params
[i
].lcore_id
;
773 if (!rte_lcore_is_enabled(lcore
)) {
774 printf("error: lcore %hhu is not enabled in "
775 "lcore mask\n", lcore
);
778 socket_id
= rte_lcore_to_socket_id(lcore
);
779 if (socket_id
!= 0 && numa_on
== 0) {
780 printf("warning: lcore %hhu is on socket %d "
784 portid
= lcore_params
[i
].port_id
;
785 if ((enabled_port_mask
& (1 << portid
)) == 0) {
786 printf("port %u is not enabled in port mask\n", portid
);
789 if (portid
>= nb_ports
) {
790 printf("port %u is not present on the board\n", portid
);
798 get_port_nb_rx_queues(const uint8_t port
)
803 for (i
= 0; i
< nb_lcore_params
; ++i
) {
804 if (lcore_params
[i
].port_id
== port
&&
805 lcore_params
[i
].queue_id
> queue
)
806 queue
= lcore_params
[i
].queue_id
;
808 return (uint8_t)(++queue
);
812 init_lcore_rx_queues(void)
814 uint16_t i
, nb_rx_queue
;
817 for (i
= 0; i
< nb_lcore_params
; ++i
) {
818 lcore
= lcore_params
[i
].lcore_id
;
819 nb_rx_queue
= lcore_conf
[lcore
].nb_rx_queue
;
820 if (nb_rx_queue
>= MAX_RX_QUEUE_PER_LCORE
) {
821 printf("error: too many queues (%u) for lcore: %u\n",
822 nb_rx_queue
+ 1, lcore
);
825 lcore_conf
[lcore
].rx_queue_list
[nb_rx_queue
].port_id
=
826 lcore_params
[i
].port_id
;
827 lcore_conf
[lcore
].rx_queue_list
[nb_rx_queue
].queue_id
=
828 lcore_params
[i
].queue_id
;
829 lcore_conf
[lcore
].nb_rx_queue
++;
836 print_usage(const char *prgname
)
838 printf("%s [EAL options] -- -p PORTMASK -P -u PORTMASK"
839 " --"OPTION_CONFIG
" (port,queue,lcore)[,(port,queue,lcore]"
840 " --single-sa SAIDX -f CONFIG_FILE\n"
841 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
842 " -P : enable promiscuous mode\n"
843 " -u PORTMASK: hexadecimal bitmask of unprotected ports\n"
844 " --"OPTION_CONFIG
": (port,queue,lcore): "
845 "rx queues configuration\n"
846 " --single-sa SAIDX: use single SA index for outbound, "
848 " -f CONFIG_FILE: Configuration file path\n",
853 parse_portmask(const char *portmask
)
858 /* parse hexadecimal string */
859 pm
= strtoul(portmask
, &end
, 16);
860 if ((portmask
[0] == '\0') || (end
== NULL
) || (*end
!= '\0'))
863 if ((pm
== 0) && errno
)
870 parse_decimal(const char *str
)
875 num
= strtoul(str
, &end
, 10);
876 if ((str
[0] == '\0') || (end
== NULL
) || (*end
!= '\0'))
883 parse_config(const char *q_arg
)
886 const char *p
, *p0
= q_arg
;
894 unsigned long int_fld
[_NUM_FLD
];
895 char *str_fld
[_NUM_FLD
];
901 while ((p
= strchr(p0
, '(')) != NULL
) {
908 if (size
>= sizeof(s
))
911 snprintf(s
, sizeof(s
), "%.*s", size
, p
);
912 if (rte_strsplit(s
, sizeof(s
), str_fld
, _NUM_FLD
, ',') !=
915 for (i
= 0; i
< _NUM_FLD
; i
++) {
917 int_fld
[i
] = strtoul(str_fld
[i
], &end
, 0);
918 if (errno
!= 0 || end
== str_fld
[i
] || int_fld
[i
] > 255)
921 if (nb_lcore_params
>= MAX_LCORE_PARAMS
) {
922 printf("exceeded max number of lcore params: %hu\n",
926 lcore_params_array
[nb_lcore_params
].port_id
=
927 (uint8_t)int_fld
[FLD_PORT
];
928 lcore_params_array
[nb_lcore_params
].queue_id
=
929 (uint8_t)int_fld
[FLD_QUEUE
];
930 lcore_params_array
[nb_lcore_params
].lcore_id
=
931 (uint8_t)int_fld
[FLD_LCORE
];
934 lcore_params
= lcore_params_array
;
938 #define __STRNCMP(name, opt) (!strncmp(name, opt, sizeof(opt)))
940 parse_args_long_options(struct option
*lgopts
, int32_t option_index
)
943 const char *optname
= lgopts
[option_index
].name
;
945 if (__STRNCMP(optname
, OPTION_CONFIG
)) {
946 ret
= parse_config(optarg
);
948 printf("invalid config\n");
951 if (__STRNCMP(optname
, OPTION_SINGLE_SA
)) {
952 ret
= parse_decimal(optarg
);
956 printf("Configured with single SA index %u\n",
967 parse_args(int32_t argc
, char **argv
)
971 int32_t option_index
;
972 char *prgname
= argv
[0];
973 static struct option lgopts
[] = {
974 {OPTION_CONFIG
, 1, 0, 0},
975 {OPTION_SINGLE_SA
, 1, 0, 0},
978 int32_t f_present
= 0;
982 while ((opt
= getopt_long(argc
, argvopt
, "p:Pu:f:",
983 lgopts
, &option_index
)) != EOF
) {
987 enabled_port_mask
= parse_portmask(optarg
);
988 if (enabled_port_mask
== 0) {
989 printf("invalid portmask\n");
990 print_usage(prgname
);
995 printf("Promiscuous mode selected\n");
999 unprotected_port_mask
= parse_portmask(optarg
);
1000 if (unprotected_port_mask
== 0) {
1001 printf("invalid unprotected portmask\n");
1002 print_usage(prgname
);
1007 if (f_present
== 1) {
1008 printf("\"-f\" option present more than "
1010 print_usage(prgname
);
1013 if (parse_cfg_file(optarg
) < 0) {
1014 printf("parsing file \"%s\" failed\n",
1016 print_usage(prgname
);
1022 if (parse_args_long_options(lgopts
, option_index
)) {
1023 print_usage(prgname
);
1028 print_usage(prgname
);
1033 if (f_present
== 0) {
1034 printf("Mandatory option \"-f\" not present\n");
1039 argv
[optind
-1] = prgname
;
1042 optind
= 0; /* reset getopt lib */
1047 print_ethaddr(const char *name
, const struct ether_addr
*eth_addr
)
1049 char buf
[ETHER_ADDR_FMT_SIZE
];
1050 ether_format_addr(buf
, ETHER_ADDR_FMT_SIZE
, eth_addr
);
1051 printf("%s%s", name
, buf
);
1054 /* Check the link status of all ports in up to 9s, and print them finally */
1056 check_all_ports_link_status(uint8_t port_num
, uint32_t port_mask
)
1058 #define CHECK_INTERVAL 100 /* 100ms */
1059 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1060 uint8_t portid
, count
, all_ports_up
, print_flag
= 0;
1061 struct rte_eth_link link
;
1063 printf("\nChecking link status");
1065 for (count
= 0; count
<= MAX_CHECK_TIME
; count
++) {
1067 for (portid
= 0; portid
< port_num
; portid
++) {
1068 if ((port_mask
& (1 << portid
)) == 0)
1070 memset(&link
, 0, sizeof(link
));
1071 rte_eth_link_get_nowait(portid
, &link
);
1072 /* print link status if flag set */
1073 if (print_flag
== 1) {
1074 if (link
.link_status
)
1075 printf("Port %d Link Up - speed %u "
1076 "Mbps - %s\n", (uint8_t)portid
,
1077 (uint32_t)link
.link_speed
,
1078 (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
1079 ("full-duplex") : ("half-duplex\n"));
1081 printf("Port %d Link Down\n",
1085 /* clear all_ports_up flag if any link down */
1086 if (link
.link_status
== ETH_LINK_DOWN
) {
1091 /* after finally printing all link status, get out */
1092 if (print_flag
== 1)
1095 if (all_ports_up
== 0) {
1098 rte_delay_ms(CHECK_INTERVAL
);
1101 /* set the print_flag if all ports up or timeout */
1102 if (all_ports_up
== 1 || count
== (MAX_CHECK_TIME
- 1)) {
1110 add_mapping(struct rte_hash
*map
, const char *str
, uint16_t cdev_id
,
1111 uint16_t qp
, struct lcore_params
*params
,
1112 struct ipsec_ctx
*ipsec_ctx
,
1113 const struct rte_cryptodev_capabilities
*cipher
,
1114 const struct rte_cryptodev_capabilities
*auth
)
1118 struct cdev_key key
= { 0 };
1120 key
.lcore_id
= params
->lcore_id
;
1122 key
.cipher_algo
= cipher
->sym
.cipher
.algo
;
1124 key
.auth_algo
= auth
->sym
.auth
.algo
;
1126 ret
= rte_hash_lookup(map
, &key
);
1130 for (i
= 0; i
< ipsec_ctx
->nb_qps
; i
++)
1131 if (ipsec_ctx
->tbl
[i
].id
== cdev_id
)
1134 if (i
== ipsec_ctx
->nb_qps
) {
1135 if (ipsec_ctx
->nb_qps
== MAX_QP_PER_LCORE
) {
1136 printf("Maximum number of crypto devices assigned to "
1137 "a core, increase MAX_QP_PER_LCORE value\n");
1140 ipsec_ctx
->tbl
[i
].id
= cdev_id
;
1141 ipsec_ctx
->tbl
[i
].qp
= qp
;
1142 ipsec_ctx
->nb_qps
++;
1143 printf("%s cdev mapping: lcore %u using cdev %u qp %u "
1144 "(cdev_id_qp %lu)\n", str
, key
.lcore_id
,
1148 ret
= rte_hash_add_key_data(map
, &key
, (void *)i
);
1150 printf("Faled to insert cdev mapping for (lcore %u, "
1151 "cdev %u, qp %u), errno %d\n",
1152 key
.lcore_id
, ipsec_ctx
->tbl
[i
].id
,
1153 ipsec_ctx
->tbl
[i
].qp
, ret
);
1161 add_cdev_mapping(struct rte_cryptodev_info
*dev_info
, uint16_t cdev_id
,
1162 uint16_t qp
, struct lcore_params
*params
)
1165 const struct rte_cryptodev_capabilities
*i
, *j
;
1166 struct rte_hash
*map
;
1167 struct lcore_conf
*qconf
;
1168 struct ipsec_ctx
*ipsec_ctx
;
1171 qconf
= &lcore_conf
[params
->lcore_id
];
1173 if ((unprotected_port_mask
& (1 << params
->port_id
)) == 0) {
1175 ipsec_ctx
= &qconf
->outbound
;
1179 ipsec_ctx
= &qconf
->inbound
;
1183 /* Required cryptodevs with operation chainning */
1184 if (!(dev_info
->feature_flags
&
1185 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
))
1188 for (i
= dev_info
->capabilities
;
1189 i
->op
!= RTE_CRYPTO_OP_TYPE_UNDEFINED
; i
++) {
1190 if (i
->op
!= RTE_CRYPTO_OP_TYPE_SYMMETRIC
)
1193 if (i
->sym
.xform_type
!= RTE_CRYPTO_SYM_XFORM_CIPHER
)
1196 for (j
= dev_info
->capabilities
;
1197 j
->op
!= RTE_CRYPTO_OP_TYPE_UNDEFINED
; j
++) {
1198 if (j
->op
!= RTE_CRYPTO_OP_TYPE_SYMMETRIC
)
1201 if (j
->sym
.xform_type
!= RTE_CRYPTO_SYM_XFORM_AUTH
)
1204 ret
|= add_mapping(map
, str
, cdev_id
, qp
, params
,
1213 cryptodevs_init(void)
1215 struct rte_cryptodev_config dev_conf
;
1216 struct rte_cryptodev_qp_conf qp_conf
;
1217 uint16_t idx
, max_nb_qps
, qp
, i
;
1219 struct rte_hash_parameters params
= { 0 };
1221 params
.entries
= CDEV_MAP_ENTRIES
;
1222 params
.key_len
= sizeof(struct cdev_key
);
1223 params
.hash_func
= rte_jhash
;
1224 params
.hash_func_init_val
= 0;
1225 params
.socket_id
= rte_socket_id();
1227 params
.name
= "cdev_map_in";
1228 cdev_map_in
= rte_hash_create(¶ms
);
1229 if (cdev_map_in
== NULL
)
1230 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1233 params
.name
= "cdev_map_out";
1234 cdev_map_out
= rte_hash_create(¶ms
);
1235 if (cdev_map_out
== NULL
)
1236 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1239 printf("lcore/cryptodev/qp mappings:\n");
1242 /* Start from last cdev id to give HW priority */
1243 for (cdev_id
= rte_cryptodev_count() - 1; cdev_id
>= 0; cdev_id
--) {
1244 struct rte_cryptodev_info cdev_info
;
1246 rte_cryptodev_info_get(cdev_id
, &cdev_info
);
1248 if (nb_lcore_params
> cdev_info
.max_nb_queue_pairs
)
1249 max_nb_qps
= cdev_info
.max_nb_queue_pairs
;
1251 max_nb_qps
= nb_lcore_params
;
1255 while (qp
< max_nb_qps
&& i
< nb_lcore_params
) {
1256 if (add_cdev_mapping(&cdev_info
, cdev_id
, qp
,
1257 &lcore_params
[idx
]))
1260 idx
= idx
% nb_lcore_params
;
1267 dev_conf
.socket_id
= rte_cryptodev_socket_id(cdev_id
);
1268 dev_conf
.nb_queue_pairs
= qp
;
1269 dev_conf
.session_mp
.nb_objs
= CDEV_MP_NB_OBJS
;
1270 dev_conf
.session_mp
.cache_size
= CDEV_MP_CACHE_SZ
;
1272 if (rte_cryptodev_configure(cdev_id
, &dev_conf
))
1273 rte_panic("Failed to initialize crypodev %u\n",
1276 qp_conf
.nb_descriptors
= CDEV_QUEUE_DESC
;
1277 for (qp
= 0; qp
< dev_conf
.nb_queue_pairs
; qp
++)
1278 if (rte_cryptodev_queue_pair_setup(cdev_id
, qp
,
1279 &qp_conf
, dev_conf
.socket_id
))
1280 rte_panic("Failed to setup queue %u for "
1281 "cdev_id %u\n", 0, cdev_id
);
1283 if (rte_cryptodev_start(cdev_id
))
1284 rte_panic("Failed to start cryptodev %u\n",
1294 port_init(uint8_t portid
)
1296 struct rte_eth_dev_info dev_info
;
1297 struct rte_eth_txconf
*txconf
;
1298 uint16_t nb_tx_queue
, nb_rx_queue
;
1299 uint16_t tx_queueid
, rx_queueid
, queue
, lcore_id
;
1300 int32_t ret
, socket_id
;
1301 struct lcore_conf
*qconf
;
1302 struct ether_addr ethaddr
;
1304 rte_eth_dev_info_get(portid
, &dev_info
);
1306 printf("Configuring device port %u:\n", portid
);
1308 rte_eth_macaddr_get(portid
, ðaddr
);
1309 ethaddr_tbl
[portid
].src
= ETHADDR_TO_UINT64(ethaddr
);
1310 print_ethaddr("Address: ", ðaddr
);
1313 nb_rx_queue
= get_port_nb_rx_queues(portid
);
1314 nb_tx_queue
= nb_lcores
;
1316 if (nb_rx_queue
> dev_info
.max_rx_queues
)
1317 rte_exit(EXIT_FAILURE
, "Error: queue %u not available "
1318 "(max rx queue is %u)\n",
1319 nb_rx_queue
, dev_info
.max_rx_queues
);
1321 if (nb_tx_queue
> dev_info
.max_tx_queues
)
1322 rte_exit(EXIT_FAILURE
, "Error: queue %u not available "
1323 "(max tx queue is %u)\n",
1324 nb_tx_queue
, dev_info
.max_tx_queues
);
1326 printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
1327 nb_rx_queue
, nb_tx_queue
);
1329 ret
= rte_eth_dev_configure(portid
, nb_rx_queue
, nb_tx_queue
,
1332 rte_exit(EXIT_FAILURE
, "Cannot configure device: "
1333 "err=%d, port=%d\n", ret
, portid
);
1335 /* init one TX queue per lcore */
1337 for (lcore_id
= 0; lcore_id
< RTE_MAX_LCORE
; lcore_id
++) {
1338 if (rte_lcore_is_enabled(lcore_id
) == 0)
1342 socket_id
= (uint8_t)rte_lcore_to_socket_id(lcore_id
);
1347 printf("Setup txq=%u,%d,%d\n", lcore_id
, tx_queueid
, socket_id
);
1349 txconf
= &dev_info
.default_txconf
;
1350 txconf
->txq_flags
= 0;
1352 ret
= rte_eth_tx_queue_setup(portid
, tx_queueid
, nb_txd
,
1355 rte_exit(EXIT_FAILURE
, "rte_eth_tx_queue_setup: "
1356 "err=%d, port=%d\n", ret
, portid
);
1358 qconf
= &lcore_conf
[lcore_id
];
1359 qconf
->tx_queue_id
[portid
] = tx_queueid
;
1362 /* init RX queues */
1363 for (queue
= 0; queue
< qconf
->nb_rx_queue
; ++queue
) {
1364 if (portid
!= qconf
->rx_queue_list
[queue
].port_id
)
1367 rx_queueid
= qconf
->rx_queue_list
[queue
].queue_id
;
1369 printf("Setup rxq=%d,%d,%d\n", portid
, rx_queueid
,
1372 ret
= rte_eth_rx_queue_setup(portid
, rx_queueid
,
1373 nb_rxd
, socket_id
, NULL
,
1374 socket_ctx
[socket_id
].mbuf_pool
);
1376 rte_exit(EXIT_FAILURE
,
1377 "rte_eth_rx_queue_setup: err=%d, "
1378 "port=%d\n", ret
, portid
);
1385 pool_init(struct socket_ctx
*ctx
, int32_t socket_id
, uint32_t nb_mbuf
)
1389 snprintf(s
, sizeof(s
), "mbuf_pool_%d", socket_id
);
1390 ctx
->mbuf_pool
= rte_pktmbuf_pool_create(s
, nb_mbuf
,
1391 MEMPOOL_CACHE_SIZE
, ipsec_metadata_size(),
1392 RTE_MBUF_DEFAULT_BUF_SIZE
,
1394 if (ctx
->mbuf_pool
== NULL
)
1395 rte_exit(EXIT_FAILURE
, "Cannot init mbuf pool on socket %d\n",
1398 printf("Allocated mbuf pool on socket %d\n", socket_id
);
1402 main(int32_t argc
, char **argv
)
1405 uint32_t lcore_id
, nb_ports
;
1406 uint8_t portid
, socket_id
;
1409 ret
= rte_eal_init(argc
, argv
);
1411 rte_exit(EXIT_FAILURE
, "Invalid EAL parameters\n");
1415 /* parse application arguments (after the EAL ones) */
1416 ret
= parse_args(argc
, argv
);
1418 rte_exit(EXIT_FAILURE
, "Invalid parameters\n");
1420 if ((unprotected_port_mask
& enabled_port_mask
) !=
1421 unprotected_port_mask
)
1422 rte_exit(EXIT_FAILURE
, "Invalid unprotected portmask 0x%x\n",
1423 unprotected_port_mask
);
1425 nb_ports
= rte_eth_dev_count();
1427 if (check_params() < 0)
1428 rte_exit(EXIT_FAILURE
, "check_params failed\n");
1430 ret
= init_lcore_rx_queues();
1432 rte_exit(EXIT_FAILURE
, "init_lcore_rx_queues failed\n");
1434 nb_lcores
= rte_lcore_count();
1436 /* Replicate each contex per socket */
1437 for (lcore_id
= 0; lcore_id
< RTE_MAX_LCORE
; lcore_id
++) {
1438 if (rte_lcore_is_enabled(lcore_id
) == 0)
1442 socket_id
= (uint8_t)rte_lcore_to_socket_id(lcore_id
);
1446 if (socket_ctx
[socket_id
].mbuf_pool
)
1449 sa_init(&socket_ctx
[socket_id
], socket_id
);
1451 sp4_init(&socket_ctx
[socket_id
], socket_id
);
1453 sp6_init(&socket_ctx
[socket_id
], socket_id
);
1455 rt_init(&socket_ctx
[socket_id
], socket_id
);
1457 pool_init(&socket_ctx
[socket_id
], socket_id
, NB_MBUF
);
1460 for (portid
= 0; portid
< nb_ports
; portid
++) {
1461 if ((enabled_port_mask
& (1 << portid
)) == 0)
1470 for (portid
= 0; portid
< nb_ports
; portid
++) {
1471 if ((enabled_port_mask
& (1 << portid
)) == 0)
1475 ret
= rte_eth_dev_start(portid
);
1477 rte_exit(EXIT_FAILURE
, "rte_eth_dev_start: "
1478 "err=%d, port=%d\n", ret
, portid
);
1480 * If enabled, put device in promiscuous mode.
1481 * This allows IO forwarding mode to forward packets
1482 * to itself through 2 cross-connected ports of the
1486 rte_eth_promiscuous_enable(portid
);
1489 check_all_ports_link_status((uint8_t)nb_ports
, enabled_port_mask
);
1491 /* launch per-lcore init on every lcore */
1492 rte_eal_mp_remote_launch(main_loop
, NULL
, CALL_MASTER
);
1493 RTE_LCORE_FOREACH_SLAVE(lcore_id
) {
1494 if (rte_eal_wait_lcore(lcore_id
) < 0)