1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
11 #include <sys/queue.h>
16 #include <rte_common.h>
17 #include <rte_byteorder.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
22 #include <rte_launch.h>
23 #include <rte_atomic.h>
24 #include <rte_cycles.h>
25 #include <rte_prefetch.h>
26 #include <rte_lcore.h>
27 #include <rte_per_lcore.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_interrupts.h>
30 #include <rte_random.h>
31 #include <rte_debug.h>
32 #include <rte_ether.h>
33 #include <rte_ethdev.h>
35 #include <rte_mempool.h>
37 #include <rte_string_fns.h>
44 static struct rte_eth_conf port_conf
= {
46 .mq_mode
= ETH_MQ_RX_RSS
,
48 .offloads
= DEV_RX_OFFLOAD_CHECKSUM
,
57 .mq_mode
= ETH_MQ_TX_NONE
,
62 app_assign_worker_ids(void)
64 uint32_t lcore
, worker_id
;
66 /* Assign ID for each worker */
68 for (lcore
= 0; lcore
< APP_MAX_LCORES
; lcore
++) {
69 struct app_lcore_params_worker
*lp_worker
= &app
.lcore_params
[lcore
].worker
;
71 if (app
.lcore_params
[lcore
].type
!= e_APP_LCORE_WORKER
) {
75 lp_worker
->worker_id
= worker_id
;
81 app_init_mbuf_pools(void)
83 unsigned socket
, lcore
;
85 /* Init the buffer pools */
86 for (socket
= 0; socket
< APP_MAX_SOCKETS
; socket
++) {
88 if (app_is_socket_used(socket
) == 0) {
92 snprintf(name
, sizeof(name
), "mbuf_pool_%u", socket
);
93 printf("Creating the mbuf pool for socket %u ...\n", socket
);
94 app
.pools
[socket
] = rte_pktmbuf_pool_create(
95 name
, APP_DEFAULT_MEMPOOL_BUFFERS
,
96 APP_DEFAULT_MEMPOOL_CACHE_SIZE
,
97 0, APP_DEFAULT_MBUF_DATA_SIZE
, socket
);
98 if (app
.pools
[socket
] == NULL
) {
99 rte_panic("Cannot create mbuf pool on socket %u\n", socket
);
103 for (lcore
= 0; lcore
< APP_MAX_LCORES
; lcore
++) {
104 if (app
.lcore_params
[lcore
].type
== e_APP_LCORE_DISABLED
) {
108 socket
= rte_lcore_to_socket_id(lcore
);
109 app
.lcore_params
[lcore
].pool
= app
.pools
[socket
];
114 app_init_lpm_tables(void)
116 unsigned socket
, lcore
;
118 /* Init the LPM tables */
119 for (socket
= 0; socket
< APP_MAX_SOCKETS
; socket
++) {
123 if (app_is_socket_used(socket
) == 0) {
127 struct rte_lpm_config lpm_config
;
129 lpm_config
.max_rules
= APP_MAX_LPM_RULES
;
130 lpm_config
.number_tbl8s
= 256;
131 lpm_config
.flags
= 0;
132 snprintf(name
, sizeof(name
), "lpm_table_%u", socket
);
133 printf("Creating the LPM table for socket %u ...\n", socket
);
134 app
.lpm_tables
[socket
] = rte_lpm_create(
138 if (app
.lpm_tables
[socket
] == NULL
) {
139 rte_panic("Unable to create LPM table on socket %u\n", socket
);
142 for (rule
= 0; rule
< app
.n_lpm_rules
; rule
++) {
145 ret
= rte_lpm_add(app
.lpm_tables
[socket
],
146 app
.lpm_rules
[rule
].ip
,
147 app
.lpm_rules
[rule
].depth
,
148 app
.lpm_rules
[rule
].if_out
);
151 rte_panic("Unable to add entry %u (%x/%u => %u) to the LPM table on socket %u (%d)\n",
153 (unsigned) app
.lpm_rules
[rule
].ip
,
154 (unsigned) app
.lpm_rules
[rule
].depth
,
155 (unsigned) app
.lpm_rules
[rule
].if_out
,
163 for (lcore
= 0; lcore
< APP_MAX_LCORES
; lcore
++) {
164 if (app
.lcore_params
[lcore
].type
!= e_APP_LCORE_WORKER
) {
168 socket
= rte_lcore_to_socket_id(lcore
);
169 app
.lcore_params
[lcore
].worker
.lpm_table
= app
.lpm_tables
[socket
];
174 app_init_rings_rx(void)
178 /* Initialize the rings for the RX side */
179 for (lcore
= 0; lcore
< APP_MAX_LCORES
; lcore
++) {
180 struct app_lcore_params_io
*lp_io
= &app
.lcore_params
[lcore
].io
;
181 unsigned socket_io
, lcore_worker
;
183 if ((app
.lcore_params
[lcore
].type
!= e_APP_LCORE_IO
) ||
184 (lp_io
->rx
.n_nic_queues
== 0)) {
188 socket_io
= rte_lcore_to_socket_id(lcore
);
190 for (lcore_worker
= 0; lcore_worker
< APP_MAX_LCORES
; lcore_worker
++) {
192 struct app_lcore_params_worker
*lp_worker
= &app
.lcore_params
[lcore_worker
].worker
;
193 struct rte_ring
*ring
= NULL
;
195 if (app
.lcore_params
[lcore_worker
].type
!= e_APP_LCORE_WORKER
) {
199 printf("Creating ring to connect I/O lcore %u (socket %u) with worker lcore %u ...\n",
203 snprintf(name
, sizeof(name
), "app_ring_rx_s%u_io%u_w%u",
207 ring
= rte_ring_create(
211 RING_F_SP_ENQ
| RING_F_SC_DEQ
);
213 rte_panic("Cannot create ring to connect I/O core %u with worker core %u\n",
218 lp_io
->rx
.rings
[lp_io
->rx
.n_rings
] = ring
;
219 lp_io
->rx
.n_rings
++;
221 lp_worker
->rings_in
[lp_worker
->n_rings_in
] = ring
;
222 lp_worker
->n_rings_in
++;
226 for (lcore
= 0; lcore
< APP_MAX_LCORES
; lcore
++) {
227 struct app_lcore_params_io
*lp_io
= &app
.lcore_params
[lcore
].io
;
229 if ((app
.lcore_params
[lcore
].type
!= e_APP_LCORE_IO
) ||
230 (lp_io
->rx
.n_nic_queues
== 0)) {
234 if (lp_io
->rx
.n_rings
!= app_get_lcores_worker()) {
235 rte_panic("Algorithmic error (I/O RX rings)\n");
239 for (lcore
= 0; lcore
< APP_MAX_LCORES
; lcore
++) {
240 struct app_lcore_params_worker
*lp_worker
= &app
.lcore_params
[lcore
].worker
;
242 if (app
.lcore_params
[lcore
].type
!= e_APP_LCORE_WORKER
) {
246 if (lp_worker
->n_rings_in
!= app_get_lcores_io_rx()) {
247 rte_panic("Algorithmic error (worker input rings)\n");
253 app_init_rings_tx(void)
257 /* Initialize the rings for the TX side */
258 for (lcore
= 0; lcore
< APP_MAX_LCORES
; lcore
++) {
259 struct app_lcore_params_worker
*lp_worker
= &app
.lcore_params
[lcore
].worker
;
262 if (app
.lcore_params
[lcore
].type
!= e_APP_LCORE_WORKER
) {
266 for (port
= 0; port
< APP_MAX_NIC_PORTS
; port
++) {
268 struct app_lcore_params_io
*lp_io
= NULL
;
269 struct rte_ring
*ring
;
270 uint32_t socket_io
, lcore_io
;
272 if (app
.nic_tx_port_mask
[port
] == 0) {
276 if (app_get_lcore_for_nic_tx(port
, &lcore_io
) < 0) {
277 rte_panic("Algorithmic error (no I/O core to handle TX of port %u)\n",
281 lp_io
= &app
.lcore_params
[lcore_io
].io
;
282 socket_io
= rte_lcore_to_socket_id(lcore_io
);
284 printf("Creating ring to connect worker lcore %u with TX port %u (through I/O lcore %u) (socket %u) ...\n",
285 lcore
, port
, (unsigned)lcore_io
, (unsigned)socket_io
);
286 snprintf(name
, sizeof(name
), "app_ring_tx_s%u_w%u_p%u", socket_io
, lcore
, port
);
287 ring
= rte_ring_create(
291 RING_F_SP_ENQ
| RING_F_SC_DEQ
);
293 rte_panic("Cannot create ring to connect worker core %u with TX port %u\n",
298 lp_worker
->rings_out
[port
] = ring
;
299 lp_io
->tx
.rings
[port
][lp_worker
->worker_id
] = ring
;
303 for (lcore
= 0; lcore
< APP_MAX_LCORES
; lcore
++) {
304 struct app_lcore_params_io
*lp_io
= &app
.lcore_params
[lcore
].io
;
307 if ((app
.lcore_params
[lcore
].type
!= e_APP_LCORE_IO
) ||
308 (lp_io
->tx
.n_nic_ports
== 0)) {
312 for (i
= 0; i
< lp_io
->tx
.n_nic_ports
; i
++){
315 port
= lp_io
->tx
.nic_ports
[i
];
316 for (j
= 0; j
< app_get_lcores_worker(); j
++) {
317 if (lp_io
->tx
.rings
[port
][j
] == NULL
) {
318 rte_panic("Algorithmic error (I/O TX rings)\n");
325 /* Check the link status of all ports in up to 9s, and print them finally */
327 check_all_ports_link_status(uint16_t port_num
, uint32_t port_mask
)
329 #define CHECK_INTERVAL 100 /* 100ms */
330 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
332 uint8_t count
, all_ports_up
, print_flag
= 0;
333 struct rte_eth_link link
;
334 uint32_t n_rx_queues
, n_tx_queues
;
336 printf("\nChecking link status");
338 for (count
= 0; count
<= MAX_CHECK_TIME
; count
++) {
340 for (portid
= 0; portid
< port_num
; portid
++) {
341 if ((port_mask
& (1 << portid
)) == 0)
343 n_rx_queues
= app_get_nic_rx_queues_per_port(portid
);
344 n_tx_queues
= app
.nic_tx_port_mask
[portid
];
345 if ((n_rx_queues
== 0) && (n_tx_queues
== 0))
347 memset(&link
, 0, sizeof(link
));
348 rte_eth_link_get_nowait(portid
, &link
);
349 /* print link status if flag set */
350 if (print_flag
== 1) {
351 if (link
.link_status
)
353 "Port%d Link Up - speed %uMbps - %s\n",
354 portid
, link
.link_speed
,
355 (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
356 ("full-duplex") : ("half-duplex\n"));
358 printf("Port %d Link Down\n", portid
);
361 /* clear all_ports_up flag if any link down */
362 if (link
.link_status
== ETH_LINK_DOWN
) {
367 /* after finally printing all link status, get out */
371 if (all_ports_up
== 0) {
374 rte_delay_ms(CHECK_INTERVAL
);
377 /* set the print_flag if all ports up or timeout */
378 if (all_ports_up
== 1 || count
== (MAX_CHECK_TIME
- 1)) {
393 uint32_t n_rx_queues
, n_tx_queues
;
395 /* Init NIC ports and queues, then start the ports */
396 for (port
= 0; port
< APP_MAX_NIC_PORTS
; port
++) {
397 struct rte_mempool
*pool
;
398 uint16_t nic_rx_ring_size
;
399 uint16_t nic_tx_ring_size
;
400 struct rte_eth_rxconf rxq_conf
;
401 struct rte_eth_txconf txq_conf
;
402 struct rte_eth_dev_info dev_info
;
403 struct rte_eth_conf local_port_conf
= port_conf
;
405 n_rx_queues
= app_get_nic_rx_queues_per_port(port
);
406 n_tx_queues
= app
.nic_tx_port_mask
[port
];
408 if ((n_rx_queues
== 0) && (n_tx_queues
== 0)) {
413 printf("Initializing NIC port %u ...\n", port
);
414 rte_eth_dev_info_get(port
, &dev_info
);
415 if (dev_info
.tx_offload_capa
& DEV_TX_OFFLOAD_MBUF_FAST_FREE
)
416 local_port_conf
.txmode
.offloads
|=
417 DEV_TX_OFFLOAD_MBUF_FAST_FREE
;
419 local_port_conf
.rx_adv_conf
.rss_conf
.rss_hf
&=
420 dev_info
.flow_type_rss_offloads
;
421 if (local_port_conf
.rx_adv_conf
.rss_conf
.rss_hf
!=
422 port_conf
.rx_adv_conf
.rss_conf
.rss_hf
) {
423 printf("Port %u modified RSS hash function based on hardware support,"
424 "requested:%#"PRIx64
" configured:%#"PRIx64
"\n",
426 port_conf
.rx_adv_conf
.rss_conf
.rss_hf
,
427 local_port_conf
.rx_adv_conf
.rss_conf
.rss_hf
);
430 ret
= rte_eth_dev_configure(
432 (uint8_t) n_rx_queues
,
433 (uint8_t) n_tx_queues
,
436 rte_panic("Cannot init NIC port %u (%d)\n", port
, ret
);
438 rte_eth_promiscuous_enable(port
);
440 nic_rx_ring_size
= app
.nic_rx_ring_size
;
441 nic_tx_ring_size
= app
.nic_tx_ring_size
;
442 ret
= rte_eth_dev_adjust_nb_rx_tx_desc(
443 port
, &nic_rx_ring_size
, &nic_tx_ring_size
);
445 rte_panic("Cannot adjust number of descriptors for port %u (%d)\n",
448 app
.nic_rx_ring_size
= nic_rx_ring_size
;
449 app
.nic_tx_ring_size
= nic_tx_ring_size
;
451 rxq_conf
= dev_info
.default_rxconf
;
452 rxq_conf
.offloads
= local_port_conf
.rxmode
.offloads
;
454 for (queue
= 0; queue
< APP_MAX_RX_QUEUES_PER_NIC_PORT
; queue
++) {
455 if (app
.nic_rx_queue_mask
[port
][queue
] == 0) {
459 app_get_lcore_for_nic_rx(port
, queue
, &lcore
);
460 socket
= rte_lcore_to_socket_id(lcore
);
461 pool
= app
.lcore_params
[lcore
].pool
;
463 printf("Initializing NIC port %u RX queue %u ...\n",
465 ret
= rte_eth_rx_queue_setup(
468 (uint16_t) app
.nic_rx_ring_size
,
473 rte_panic("Cannot init RX queue %u for port %u (%d)\n",
478 txq_conf
= dev_info
.default_txconf
;
479 txq_conf
.offloads
= local_port_conf
.txmode
.offloads
;
481 if (app
.nic_tx_port_mask
[port
] == 1) {
482 app_get_lcore_for_nic_tx(port
, &lcore
);
483 socket
= rte_lcore_to_socket_id(lcore
);
484 printf("Initializing NIC port %u TX queue 0 ...\n",
486 ret
= rte_eth_tx_queue_setup(
489 (uint16_t) app
.nic_tx_ring_size
,
493 rte_panic("Cannot init TX queue 0 for port %d (%d)\n",
500 ret
= rte_eth_dev_start(port
);
502 rte_panic("Cannot start port %d (%d)\n", port
, ret
);
506 check_all_ports_link_status(APP_MAX_NIC_PORTS
, (~0x0));
512 app_assign_worker_ids();
513 app_init_mbuf_pools();
514 app_init_lpm_tables();
519 printf("Initialization completed.\n");