1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
10 #include <sys/types.h>
11 #include <sys/queue.h>
12 #include <netinet/in.h>
19 #include <rte_common.h>
21 #include <rte_malloc.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
25 #include <rte_launch.h>
26 #include <rte_atomic.h>
27 #include <rte_cycles.h>
28 #include <rte_prefetch.h>
29 #include <rte_lcore.h>
30 #include <rte_per_lcore.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_interrupts.h>
33 #include <rte_random.h>
34 #include <rte_debug.h>
35 #include <rte_ether.h>
36 #include <rte_ethdev.h>
37 #include <rte_mempool.h>
40 #define RTE_LOGTYPE_LSI RTE_LOGTYPE_USER1
44 #define MAX_PKT_BURST 32
45 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
48 * Configurable number of RX/TX ring descriptors
50 #define RTE_TEST_RX_DESC_DEFAULT 1024
51 #define RTE_TEST_TX_DESC_DEFAULT 1024
52 static uint16_t nb_rxd
= RTE_TEST_RX_DESC_DEFAULT
;
53 static uint16_t nb_txd
= RTE_TEST_TX_DESC_DEFAULT
;
55 /* ethernet addresses of ports */
56 static struct rte_ether_addr lsi_ports_eth_addr
[RTE_MAX_ETHPORTS
];
58 /* mask of enabled ports */
59 static uint32_t lsi_enabled_port_mask
= 0;
61 static unsigned int lsi_rx_queue_per_lcore
= 1;
63 /* destination port for L2 forwarding */
64 static unsigned lsi_dst_ports
[RTE_MAX_ETHPORTS
] = {0};
66 #define MAX_PKT_BURST 32
68 #define MAX_RX_QUEUE_PER_LCORE 16
69 #define MAX_TX_QUEUE_PER_PORT 16
70 struct lcore_queue_conf
{
72 unsigned rx_port_list
[MAX_RX_QUEUE_PER_LCORE
];
74 } __rte_cache_aligned
;
75 struct lcore_queue_conf lcore_queue_conf
[RTE_MAX_LCORE
];
77 struct rte_eth_dev_tx_buffer
*tx_buffer
[RTE_MAX_ETHPORTS
];
79 static struct rte_eth_conf port_conf
= {
84 .mq_mode
= ETH_MQ_TX_NONE
,
87 .lsc
= 1, /**< lsc interrupt feature enabled */
91 struct rte_mempool
* lsi_pktmbuf_pool
= NULL
;
93 /* Per-port statistics struct */
94 struct lsi_port_statistics
{
98 } __rte_cache_aligned
;
99 struct lsi_port_statistics port_statistics
[RTE_MAX_ETHPORTS
];
101 /* A tsc-based timer responsible for triggering statistics printout */
102 #define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
103 #define MAX_TIMER_PERIOD 86400 /* 1 day max */
104 static int64_t timer_period
= 10 * TIMER_MILLISECOND
* 1000; /* default period is 10 seconds */
106 /* Print out statistics on packets dropped */
110 struct rte_eth_link link
;
111 uint64_t total_packets_dropped
, total_packets_tx
, total_packets_rx
;
114 total_packets_dropped
= 0;
115 total_packets_tx
= 0;
116 total_packets_rx
= 0;
118 const char clr
[] = { 27, '[', '2', 'J', '\0' };
119 const char topLeft
[] = { 27, '[', '1', ';', '1', 'H','\0' };
122 /* Clear screen and move to top left */
123 printf("%s%s", clr
, topLeft
);
125 printf("\nPort statistics ====================================");
127 for (portid
= 0; portid
< RTE_MAX_ETHPORTS
; portid
++) {
128 /* skip ports that are not enabled */
129 if ((lsi_enabled_port_mask
& (1 << portid
)) == 0)
132 memset(&link
, 0, sizeof(link
));
133 link_get_err
= rte_eth_link_get_nowait(portid
, &link
);
134 printf("\nStatistics for port %u ------------------------------"
135 "\nLink status: %25s"
137 "\nLink duplex: %25s"
138 "\nPackets sent: %24"PRIu64
139 "\nPackets received: %20"PRIu64
140 "\nPackets dropped: %21"PRIu64
,
142 link_get_err
< 0 ? "Link get failed" :
143 (link
.link_status
? "Link up" : "Link down"),
144 link_get_err
< 0 ? 0 :
145 (unsigned int)link
.link_speed
,
146 link_get_err
< 0 ? "Link get failed" :
147 (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
? \
148 "full-duplex" : "half-duplex"),
149 port_statistics
[portid
].tx
,
150 port_statistics
[portid
].rx
,
151 port_statistics
[portid
].dropped
);
153 total_packets_dropped
+= port_statistics
[portid
].dropped
;
154 total_packets_tx
+= port_statistics
[portid
].tx
;
155 total_packets_rx
+= port_statistics
[portid
].rx
;
157 printf("\nAggregate statistics ==============================="
158 "\nTotal packets sent: %18"PRIu64
159 "\nTotal packets received: %14"PRIu64
160 "\nTotal packets dropped: %15"PRIu64
,
163 total_packets_dropped
);
164 printf("\n====================================================\n");
168 lsi_simple_forward(struct rte_mbuf
*m
, unsigned portid
)
170 struct rte_ether_hdr
*eth
;
172 unsigned dst_port
= lsi_dst_ports
[portid
];
174 struct rte_eth_dev_tx_buffer
*buffer
;
176 eth
= rte_pktmbuf_mtod(m
, struct rte_ether_hdr
*);
178 /* 02:00:00:00:00:xx */
179 tmp
= ð
->d_addr
.addr_bytes
[0];
180 *((uint64_t *)tmp
) = 0x000000000002 + ((uint64_t)dst_port
<< 40);
183 rte_ether_addr_copy(&lsi_ports_eth_addr
[dst_port
], ð
->s_addr
);
185 buffer
= tx_buffer
[dst_port
];
186 sent
= rte_eth_tx_buffer(dst_port
, 0, buffer
, m
);
188 port_statistics
[dst_port
].tx
+= sent
;
191 /* main processing loop */
195 struct rte_mbuf
*pkts_burst
[MAX_PKT_BURST
];
199 uint64_t prev_tsc
, diff_tsc
, cur_tsc
, timer_tsc
;
200 unsigned i
, j
, portid
, nb_rx
;
201 struct lcore_queue_conf
*qconf
;
202 const uint64_t drain_tsc
= (rte_get_tsc_hz() + US_PER_S
- 1) / US_PER_S
*
204 struct rte_eth_dev_tx_buffer
*buffer
;
209 lcore_id
= rte_lcore_id();
210 qconf
= &lcore_queue_conf
[lcore_id
];
212 if (qconf
->n_rx_port
== 0) {
213 RTE_LOG(INFO
, LSI
, "lcore %u has nothing to do\n", lcore_id
);
217 RTE_LOG(INFO
, LSI
, "entering main loop on lcore %u\n", lcore_id
);
219 for (i
= 0; i
< qconf
->n_rx_port
; i
++) {
221 portid
= qconf
->rx_port_list
[i
];
222 RTE_LOG(INFO
, LSI
, " -- lcoreid=%u portid=%u\n", lcore_id
,
228 cur_tsc
= rte_rdtsc();
231 * TX burst queue drain
233 diff_tsc
= cur_tsc
- prev_tsc
;
234 if (unlikely(diff_tsc
> drain_tsc
)) {
236 for (i
= 0; i
< qconf
->n_rx_port
; i
++) {
238 portid
= lsi_dst_ports
[qconf
->rx_port_list
[i
]];
239 buffer
= tx_buffer
[portid
];
241 sent
= rte_eth_tx_buffer_flush(portid
, 0, buffer
);
243 port_statistics
[portid
].tx
+= sent
;
247 /* if timer is enabled */
248 if (timer_period
> 0) {
250 /* advance the timer */
251 timer_tsc
+= diff_tsc
;
253 /* if timer has reached its timeout */
254 if (unlikely(timer_tsc
>= (uint64_t) timer_period
)) {
256 /* do this only on master core */
257 if (lcore_id
== rte_get_master_lcore()) {
259 /* reset the timer */
269 * Read packet from RX queues
271 for (i
= 0; i
< qconf
->n_rx_port
; i
++) {
273 portid
= qconf
->rx_port_list
[i
];
274 nb_rx
= rte_eth_rx_burst((uint8_t) portid
, 0,
275 pkts_burst
, MAX_PKT_BURST
);
277 port_statistics
[portid
].rx
+= nb_rx
;
279 for (j
= 0; j
< nb_rx
; j
++) {
281 rte_prefetch0(rte_pktmbuf_mtod(m
, void *));
282 lsi_simple_forward(m
, portid
);
289 lsi_launch_one_lcore(__rte_unused
void *dummy
)
297 lsi_usage(const char *prgname
)
299 printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
300 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
301 " -q NQ: number of queue (=ports) per lcore (default is 1)\n"
302 " -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n",
307 lsi_parse_portmask(const char *portmask
)
312 /* parse hexadecimal string */
313 pm
= strtoul(portmask
, &end
, 16);
314 if ((portmask
[0] == '\0') || (end
== NULL
) || (*end
!= '\0'))
324 lsi_parse_nqueue(const char *q_arg
)
329 /* parse hexadecimal string */
330 n
= strtoul(q_arg
, &end
, 10);
331 if ((q_arg
[0] == '\0') || (end
== NULL
) || (*end
!= '\0'))
335 if (n
>= MAX_RX_QUEUE_PER_LCORE
)
342 lsi_parse_timer_period(const char *q_arg
)
347 /* parse number string */
348 n
= strtol(q_arg
, &end
, 10);
349 if ((q_arg
[0] == '\0') || (end
== NULL
) || (*end
!= '\0'))
351 if (n
>= MAX_TIMER_PERIOD
)
357 /* Parse the argument given in the command line of the application */
359 lsi_parse_args(int argc
, char **argv
)
364 char *prgname
= argv
[0];
365 static struct option lgopts
[] = {
371 while ((opt
= getopt_long(argc
, argvopt
, "p:q:T:",
372 lgopts
, &option_index
)) != EOF
) {
377 lsi_enabled_port_mask
= lsi_parse_portmask(optarg
);
378 if (lsi_enabled_port_mask
== 0) {
379 printf("invalid portmask\n");
387 lsi_rx_queue_per_lcore
= lsi_parse_nqueue(optarg
);
388 if (lsi_rx_queue_per_lcore
== 0) {
389 printf("invalid queue number\n");
397 timer_period
= lsi_parse_timer_period(optarg
) * 1000 * TIMER_MILLISECOND
;
398 if (timer_period
< 0) {
399 printf("invalid timer period\n");
417 argv
[optind
-1] = prgname
;
420 optind
= 1; /* reset getopt lib */
425 * It will be called as the callback for specified port after a LSI interrupt
426 * has been fully handled. This callback needs to be implemented carefully as
427 * it will be called in the interrupt host thread which is different from the
428 * application main thread.
435 * Pointer to(address of) the parameters.
441 lsi_event_callback(uint16_t port_id
, enum rte_eth_event_type type
, void *param
,
444 struct rte_eth_link link
;
448 RTE_SET_USED(ret_param
);
450 printf("\n\nIn registered callback...\n");
451 printf("Event type: %s\n", type
== RTE_ETH_EVENT_INTR_LSC
? "LSC interrupt" : "unknown event");
452 ret
= rte_eth_link_get_nowait(port_id
, &link
);
454 printf("Failed link get on port %d: %s\n",
455 port_id
, rte_strerror(-ret
));
458 if (link
.link_status
) {
459 printf("Port %d Link Up - speed %u Mbps - %s\n\n",
460 port_id
, (unsigned)link
.link_speed
,
461 (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
462 ("full-duplex") : ("half-duplex"));
464 printf("Port %d Link Down\n\n", port_id
);
469 /* Check the link status of all ports in up to 9s, and print them finally */
471 check_all_ports_link_status(uint16_t port_num
, uint32_t port_mask
)
473 #define CHECK_INTERVAL 100 /* 100ms */
474 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
475 uint8_t count
, all_ports_up
, print_flag
= 0;
477 struct rte_eth_link link
;
480 printf("\nChecking link status");
482 for (count
= 0; count
<= MAX_CHECK_TIME
; count
++) {
484 for (portid
= 0; portid
< port_num
; portid
++) {
485 if ((port_mask
& (1 << portid
)) == 0)
487 memset(&link
, 0, sizeof(link
));
488 ret
= rte_eth_link_get_nowait(portid
, &link
);
492 printf("Port %u link get failed: %s\n",
493 portid
, rte_strerror(-ret
));
496 /* print link status if flag set */
497 if (print_flag
== 1) {
498 if (link
.link_status
)
500 "Port%d Link Up. Speed %u Mbps - %s\n",
501 portid
, link
.link_speed
,
502 (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
503 ("full-duplex") : ("half-duplex"));
505 printf("Port %d Link Down\n", portid
);
508 /* clear all_ports_up flag if any link down */
509 if (link
.link_status
== ETH_LINK_DOWN
) {
514 /* after finally printing all link status, get out */
518 if (all_ports_up
== 0) {
521 rte_delay_ms(CHECK_INTERVAL
);
524 /* set the print_flag if all ports up or timeout */
525 if (all_ports_up
== 1 || count
== (MAX_CHECK_TIME
- 1)) {
533 main(int argc
, char **argv
)
535 struct lcore_queue_conf
*qconf
;
538 uint16_t portid
, portid_last
= 0;
539 unsigned lcore_id
, rx_lcore_id
;
540 unsigned nb_ports_in_mask
= 0;
543 ret
= rte_eal_init(argc
, argv
);
545 rte_exit(EXIT_FAILURE
, "rte_eal_init failed");
549 /* parse application arguments (after the EAL ones) */
550 ret
= lsi_parse_args(argc
, argv
);
552 rte_exit(EXIT_FAILURE
, "Invalid arguments");
554 /* create the mbuf pool */
556 rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF
, 32, 0,
557 RTE_MBUF_DEFAULT_BUF_SIZE
, rte_socket_id());
558 if (lsi_pktmbuf_pool
== NULL
)
559 rte_panic("Cannot init mbuf pool\n");
561 nb_ports
= rte_eth_dev_count_avail();
563 rte_panic("No Ethernet port - bye\n");
566 * Each logical core is assigned a dedicated TX queue on each port.
568 for (portid
= 0; portid
< nb_ports
; portid
++) {
569 /* skip ports that are not enabled */
570 if ((lsi_enabled_port_mask
& (1 << portid
)) == 0)
573 /* save the destination port id */
574 if (nb_ports_in_mask
% 2) {
575 lsi_dst_ports
[portid
] = portid_last
;
576 lsi_dst_ports
[portid_last
] = portid
;
579 portid_last
= portid
;
583 if (nb_ports_in_mask
< 2 || nb_ports_in_mask
% 2)
584 rte_exit(EXIT_FAILURE
, "Current enabled port number is %u, "
585 "but it should be even and at least 2\n",
589 qconf
= &lcore_queue_conf
[rx_lcore_id
];
591 /* Initialize the port/queue configuration of each logical core */
592 for (portid
= 0; portid
< nb_ports
; portid
++) {
593 /* skip ports that are not enabled */
594 if ((lsi_enabled_port_mask
& (1 << portid
)) == 0)
597 /* get the lcore_id for this port */
598 while (rte_lcore_is_enabled(rx_lcore_id
) == 0 ||
599 lcore_queue_conf
[rx_lcore_id
].n_rx_port
==
600 lsi_rx_queue_per_lcore
) {
603 if (rx_lcore_id
>= RTE_MAX_LCORE
)
604 rte_exit(EXIT_FAILURE
, "Not enough cores\n");
606 if (qconf
!= &lcore_queue_conf
[rx_lcore_id
])
607 /* Assigned a new logical core in the loop above. */
608 qconf
= &lcore_queue_conf
[rx_lcore_id
];
610 qconf
->rx_port_list
[qconf
->n_rx_port
] = portid
;
612 printf("Lcore %u: RX port %u\n",rx_lcore_id
, (unsigned) portid
);
615 /* Initialise each port */
616 for (portid
= 0; portid
< nb_ports
; portid
++) {
617 struct rte_eth_rxconf rxq_conf
;
618 struct rte_eth_txconf txq_conf
;
619 struct rte_eth_conf local_port_conf
= port_conf
;
620 struct rte_eth_dev_info dev_info
;
622 /* skip ports that are not enabled */
623 if ((lsi_enabled_port_mask
& (1 << portid
)) == 0) {
624 printf("Skipping disabled port %u\n", (unsigned) portid
);
628 printf("Initializing port %u... ", (unsigned) portid
);
631 ret
= rte_eth_dev_info_get(portid
, &dev_info
);
633 rte_exit(EXIT_FAILURE
,
634 "Error during getting device (port %u) info: %s\n",
635 portid
, strerror(-ret
));
637 if (dev_info
.tx_offload_capa
& DEV_TX_OFFLOAD_MBUF_FAST_FREE
)
638 local_port_conf
.txmode
.offloads
|=
639 DEV_TX_OFFLOAD_MBUF_FAST_FREE
;
640 ret
= rte_eth_dev_configure(portid
, 1, 1, &local_port_conf
);
642 rte_exit(EXIT_FAILURE
, "Cannot configure device: err=%d, port=%u\n",
643 ret
, (unsigned) portid
);
645 ret
= rte_eth_dev_adjust_nb_rx_tx_desc(portid
, &nb_rxd
,
648 rte_exit(EXIT_FAILURE
,
649 "rte_eth_dev_adjust_nb_rx_tx_desc: err=%d, port=%u\n",
650 ret
, (unsigned) portid
);
652 /* register lsi interrupt callback, need to be after
653 * rte_eth_dev_configure(). if (intr_conf.lsc == 0), no
654 * lsc interrupt will be present, and below callback to
655 * be registered will never be called.
657 rte_eth_dev_callback_register(portid
,
658 RTE_ETH_EVENT_INTR_LSC
, lsi_event_callback
, NULL
);
660 ret
= rte_eth_macaddr_get(portid
,
661 &lsi_ports_eth_addr
[portid
]);
663 rte_exit(EXIT_FAILURE
,
664 "rte_eth_macaddr_get: err=%d, port=%u\n",
665 ret
, (unsigned int)portid
);
667 /* init one RX queue */
669 rxq_conf
= dev_info
.default_rxconf
;
670 rxq_conf
.offloads
= local_port_conf
.rxmode
.offloads
;
671 ret
= rte_eth_rx_queue_setup(portid
, 0, nb_rxd
,
672 rte_eth_dev_socket_id(portid
),
676 rte_exit(EXIT_FAILURE
, "rte_eth_rx_queue_setup: err=%d, port=%u\n",
677 ret
, (unsigned) portid
);
679 /* init one TX queue logical core on each port */
681 txq_conf
= dev_info
.default_txconf
;
682 txq_conf
.offloads
= local_port_conf
.txmode
.offloads
;
683 ret
= rte_eth_tx_queue_setup(portid
, 0, nb_txd
,
684 rte_eth_dev_socket_id(portid
),
687 rte_exit(EXIT_FAILURE
, "rte_eth_tx_queue_setup: err=%d,port=%u\n",
688 ret
, (unsigned) portid
);
690 /* Initialize TX buffers */
691 tx_buffer
[portid
] = rte_zmalloc_socket("tx_buffer",
692 RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST
), 0,
693 rte_eth_dev_socket_id(portid
));
694 if (tx_buffer
[portid
] == NULL
)
695 rte_exit(EXIT_FAILURE
, "Cannot allocate buffer for tx on port %u\n",
698 rte_eth_tx_buffer_init(tx_buffer
[portid
], MAX_PKT_BURST
);
700 ret
= rte_eth_tx_buffer_set_err_callback(tx_buffer
[portid
],
701 rte_eth_tx_buffer_count_callback
,
702 &port_statistics
[portid
].dropped
);
704 rte_exit(EXIT_FAILURE
, "Cannot set error callback for "
705 "tx buffer on port %u\n", (unsigned) portid
);
708 ret
= rte_eth_dev_start(portid
);
710 rte_exit(EXIT_FAILURE
, "rte_eth_dev_start: err=%d, port=%u\n",
711 ret
, (unsigned) portid
);
714 ret
= rte_eth_promiscuous_enable(portid
);
716 rte_exit(EXIT_FAILURE
,
717 "rte_eth_promiscuous_enable: err=%s, port=%u\n",
718 rte_strerror(-ret
), portid
);
720 printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
722 lsi_ports_eth_addr
[portid
].addr_bytes
[0],
723 lsi_ports_eth_addr
[portid
].addr_bytes
[1],
724 lsi_ports_eth_addr
[portid
].addr_bytes
[2],
725 lsi_ports_eth_addr
[portid
].addr_bytes
[3],
726 lsi_ports_eth_addr
[portid
].addr_bytes
[4],
727 lsi_ports_eth_addr
[portid
].addr_bytes
[5]);
729 /* initialize port stats */
730 memset(&port_statistics
, 0, sizeof(port_statistics
));
733 check_all_ports_link_status(nb_ports
, lsi_enabled_port_mask
);
735 /* launch per-lcore init on every lcore */
736 rte_eal_mp_remote_launch(lsi_launch_one_lcore
, NULL
, CALL_MASTER
);
737 RTE_LCORE_FOREACH_SLAVE(lcore_id
) {
738 if (rte_eal_wait_lcore(lcore_id
) < 0)