1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
10 #include <sys/queue.h>
15 #include <netinet/in.h>
17 #include <linux/if_tun.h>
19 #include <sys/ioctl.h>
23 #include <rte_common.h>
25 #include <rte_memory.h>
26 #include <rte_memcpy.h>
28 #include <rte_per_lcore.h>
29 #include <rte_launch.h>
30 #include <rte_atomic.h>
31 #include <rte_lcore.h>
32 #include <rte_branch_prediction.h>
33 #include <rte_interrupts.h>
34 #include <rte_bus_pci.h>
35 #include <rte_debug.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_mempool.h>
40 #include <rte_string_fns.h>
41 #include <rte_cycles.h>
42 #include <rte_malloc.h>
45 /* Macros for printing using RTE_LOG */
46 #define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1
48 /* Max size of a single packet */
49 #define MAX_PACKET_SZ 2048
51 /* Size of the data buffer in each mbuf */
52 #define MBUF_DATA_SZ (MAX_PACKET_SZ + RTE_PKTMBUF_HEADROOM)
54 /* Number of mbufs in mempool that is created */
55 #define NB_MBUF (8192 * 16)
57 /* How many packets to attempt to read from NIC in one go */
58 #define PKT_BURST_SZ 32
60 /* How many objects (mbufs) to keep in per-lcore mempool cache */
61 #define MEMPOOL_CACHE_SZ PKT_BURST_SZ
63 /* Number of RX ring descriptors */
66 /* Number of TX ring descriptors */
69 /* Total octets in ethernet header */
70 #define KNI_ENET_HEADER_SIZE 14
72 /* Total octets in the FCS */
73 #define KNI_ENET_FCS_SIZE 4
75 #define KNI_US_PER_SECOND 1000000
76 #define KNI_SECOND_PER_DAY 86400
78 #define KNI_MAX_KTHREAD 32
80 * Structure of port parameters
82 struct kni_port_params
{
83 uint16_t port_id
;/* Port ID */
84 unsigned lcore_rx
; /* lcore ID for RX */
85 unsigned lcore_tx
; /* lcore ID for TX */
86 uint32_t nb_lcore_k
; /* Number of lcores for KNI multi kernel threads */
87 uint32_t nb_kni
; /* Number of KNI devices to be created */
88 unsigned lcore_k
[KNI_MAX_KTHREAD
]; /* lcore ID list for kthreads */
89 struct rte_kni
*kni
[KNI_MAX_KTHREAD
]; /* KNI context pointers */
90 } __rte_cache_aligned
;
92 static struct kni_port_params
*kni_port_params_array
[RTE_MAX_ETHPORTS
];
95 /* Options for configuring ethernet port */
96 static struct rte_eth_conf port_conf
= {
98 .mq_mode
= ETH_MQ_TX_NONE
,
102 /* Mempool for mbufs */
103 static struct rte_mempool
* pktmbuf_pool
= NULL
;
105 /* Mask of enabled ports */
106 static uint32_t ports_mask
= 0;
107 /* Ports set in promiscuous mode off by default. */
108 static int promiscuous_on
= 0;
109 /* Monitor link status continually. off by default. */
110 static int monitor_links
;
112 /* Structure type for recording kni interface specific stats */
113 struct kni_interface_stats
{
114 /* number of pkts received from NIC, and sent to KNI */
117 /* number of pkts received from NIC, but failed to send to KNI */
120 /* number of pkts received from KNI, and sent to NIC */
123 /* number of pkts received from KNI, but failed to send to NIC */
127 /* kni device statistics array */
128 static struct kni_interface_stats kni_stats
[RTE_MAX_ETHPORTS
];
130 static int kni_change_mtu(uint16_t port_id
, unsigned int new_mtu
);
131 static int kni_config_network_interface(uint16_t port_id
, uint8_t if_up
);
132 static int kni_config_mac_address(uint16_t port_id
, uint8_t mac_addr
[]);
134 static rte_atomic32_t kni_stop
= RTE_ATOMIC32_INIT(0);
135 static rte_atomic32_t kni_pause
= RTE_ATOMIC32_INIT(0);
137 /* Print out statistics on packets handled */
143 printf("\n**KNI example application statistics**\n"
144 "====== ============== ============ ============ ============ ============\n"
145 " Port Lcore(RX/TX) rx_packets rx_dropped tx_packets tx_dropped\n"
146 "------ -------------- ------------ ------------ ------------ ------------\n");
147 for (i
= 0; i
< RTE_MAX_ETHPORTS
; i
++) {
148 if (!kni_port_params_array
[i
])
151 printf("%7d %10u/%2u %13"PRIu64
" %13"PRIu64
" %13"PRIu64
" "
153 kni_port_params_array
[i
]->lcore_rx
,
154 kni_port_params_array
[i
]->lcore_tx
,
155 kni_stats
[i
].rx_packets
,
156 kni_stats
[i
].rx_dropped
,
157 kni_stats
[i
].tx_packets
,
158 kni_stats
[i
].tx_dropped
);
160 printf("====== ============== ============ ============ ============ ============\n");
163 /* Custom handling of signals to handle stats and kni processing */
165 signal_handler(int signum
)
167 /* When we receive a USR1 signal, print stats */
168 if (signum
== SIGUSR1
) {
172 /* When we receive a USR2 signal, reset stats */
173 if (signum
== SIGUSR2
) {
174 memset(&kni_stats
, 0, sizeof(kni_stats
));
175 printf("\n** Statistics have been reset **\n");
180 * When we receive a RTMIN or SIGINT or SIGTERM signal,
181 * stop kni processing
183 if (signum
== SIGRTMIN
|| signum
== SIGINT
|| signum
== SIGTERM
) {
184 printf("\nSIGRTMIN/SIGINT/SIGTERM received. "
185 "KNI processing stopping.\n");
186 rte_atomic32_inc(&kni_stop
);
192 kni_burst_free_mbufs(struct rte_mbuf
**pkts
, unsigned num
)
199 for (i
= 0; i
< num
; i
++) {
200 rte_pktmbuf_free(pkts
[i
]);
206 * Interface to burst rx and enqueue mbufs into rx_q
209 kni_ingress(struct kni_port_params
*p
)
215 struct rte_mbuf
*pkts_burst
[PKT_BURST_SZ
];
221 port_id
= p
->port_id
;
222 for (i
= 0; i
< nb_kni
; i
++) {
223 /* Burst rx from eth */
224 nb_rx
= rte_eth_rx_burst(port_id
, 0, pkts_burst
, PKT_BURST_SZ
);
225 if (unlikely(nb_rx
> PKT_BURST_SZ
)) {
226 RTE_LOG(ERR
, APP
, "Error receiving from eth\n");
229 /* Burst tx to kni */
230 num
= rte_kni_tx_burst(p
->kni
[i
], pkts_burst
, nb_rx
);
232 kni_stats
[port_id
].rx_packets
+= num
;
234 rte_kni_handle_request(p
->kni
[i
]);
235 if (unlikely(num
< nb_rx
)) {
236 /* Free mbufs not tx to kni interface */
237 kni_burst_free_mbufs(&pkts_burst
[num
], nb_rx
- num
);
238 kni_stats
[port_id
].rx_dropped
+= nb_rx
- num
;
244 * Interface to dequeue mbufs from tx_q and burst tx
247 kni_egress(struct kni_port_params
*p
)
253 struct rte_mbuf
*pkts_burst
[PKT_BURST_SZ
];
259 port_id
= p
->port_id
;
260 for (i
= 0; i
< nb_kni
; i
++) {
261 /* Burst rx from kni */
262 num
= rte_kni_rx_burst(p
->kni
[i
], pkts_burst
, PKT_BURST_SZ
);
263 if (unlikely(num
> PKT_BURST_SZ
)) {
264 RTE_LOG(ERR
, APP
, "Error receiving from KNI\n");
267 /* Burst tx to eth */
268 nb_tx
= rte_eth_tx_burst(port_id
, 0, pkts_burst
, (uint16_t)num
);
270 kni_stats
[port_id
].tx_packets
+= nb_tx
;
271 if (unlikely(nb_tx
< num
)) {
272 /* Free mbufs not tx to NIC */
273 kni_burst_free_mbufs(&pkts_burst
[nb_tx
], num
- nb_tx
);
274 kni_stats
[port_id
].tx_dropped
+= num
- nb_tx
;
280 main_loop(__rte_unused
void *arg
)
285 const unsigned lcore_id
= rte_lcore_id();
292 enum lcore_rxtx flag
= LCORE_NONE
;
294 RTE_ETH_FOREACH_DEV(i
) {
295 if (!kni_port_params_array
[i
])
297 if (kni_port_params_array
[i
]->lcore_rx
== (uint8_t)lcore_id
) {
300 } else if (kni_port_params_array
[i
]->lcore_tx
==
307 if (flag
== LCORE_RX
) {
308 RTE_LOG(INFO
, APP
, "Lcore %u is reading from port %d\n",
309 kni_port_params_array
[i
]->lcore_rx
,
310 kni_port_params_array
[i
]->port_id
);
312 f_stop
= rte_atomic32_read(&kni_stop
);
313 f_pause
= rte_atomic32_read(&kni_pause
);
318 kni_ingress(kni_port_params_array
[i
]);
320 } else if (flag
== LCORE_TX
) {
321 RTE_LOG(INFO
, APP
, "Lcore %u is writing to port %d\n",
322 kni_port_params_array
[i
]->lcore_tx
,
323 kni_port_params_array
[i
]->port_id
);
325 f_stop
= rte_atomic32_read(&kni_stop
);
326 f_pause
= rte_atomic32_read(&kni_pause
);
331 kni_egress(kni_port_params_array
[i
]);
334 RTE_LOG(INFO
, APP
, "Lcore %u has nothing to do\n", lcore_id
);
339 /* Display usage instructions */
341 print_usage(const char *prgname
)
343 RTE_LOG(INFO
, APP
, "\nUsage: %s [EAL options] -- -p PORTMASK -P -m "
344 "[--config (port,lcore_rx,lcore_tx,lcore_kthread...)"
345 "[,(port,lcore_rx,lcore_tx,lcore_kthread...)]]\n"
346 " -p PORTMASK: hex bitmask of ports to use\n"
347 " -P : enable promiscuous mode\n"
348 " -m : enable monitoring of port carrier state\n"
349 " --config (port,lcore_rx,lcore_tx,lcore_kthread...): "
350 "port and lcore configurations\n",
354 /* Convert string to unsigned number. 0 is returned if error occurs */
356 parse_unsigned(const char *portmask
)
361 num
= strtoul(portmask
, &end
, 16);
362 if ((portmask
[0] == '\0') || (end
== NULL
) || (*end
!= '\0'))
365 return (uint32_t)num
;
372 struct kni_port_params
**p
= kni_port_params_array
;
374 for (i
= 0; i
< RTE_MAX_ETHPORTS
; i
++) {
377 RTE_LOG(DEBUG
, APP
, "Port ID: %d\n", p
[i
]->port_id
);
378 RTE_LOG(DEBUG
, APP
, "Rx lcore ID: %u, Tx lcore ID: %u\n",
379 p
[i
]->lcore_rx
, p
[i
]->lcore_tx
);
380 for (j
= 0; j
< p
[i
]->nb_lcore_k
; j
++)
381 RTE_LOG(DEBUG
, APP
, "Kernel thread lcore ID: %u\n",
387 parse_config(const char *arg
)
389 const char *p
, *p0
= arg
;
396 _NUM_FLD
= KNI_MAX_KTHREAD
+ 3,
399 char *str_fld
[_NUM_FLD
];
400 unsigned long int_fld
[_NUM_FLD
];
401 uint16_t port_id
, nb_kni_port_params
= 0;
403 memset(&kni_port_params_array
, 0, sizeof(kni_port_params_array
));
404 while (((p
= strchr(p0
, '(')) != NULL
) &&
405 nb_kni_port_params
< RTE_MAX_ETHPORTS
) {
407 if ((p0
= strchr(p
, ')')) == NULL
)
410 if (size
>= sizeof(s
)) {
411 printf("Invalid config parameters\n");
414 snprintf(s
, sizeof(s
), "%.*s", size
, p
);
415 nb_token
= rte_strsplit(s
, sizeof(s
), str_fld
, _NUM_FLD
, ',');
416 if (nb_token
<= FLD_LCORE_TX
) {
417 printf("Invalid config parameters\n");
420 for (i
= 0; i
< nb_token
; i
++) {
422 int_fld
[i
] = strtoul(str_fld
[i
], &end
, 0);
423 if (errno
!= 0 || end
== str_fld
[i
]) {
424 printf("Invalid config parameters\n");
430 port_id
= int_fld
[i
++];
431 if (port_id
>= RTE_MAX_ETHPORTS
) {
432 printf("Port ID %d could not exceed the maximum %d\n",
433 port_id
, RTE_MAX_ETHPORTS
);
436 if (kni_port_params_array
[port_id
]) {
437 printf("Port %d has been configured\n", port_id
);
440 kni_port_params_array
[port_id
] =
441 rte_zmalloc("KNI_port_params",
442 sizeof(struct kni_port_params
), RTE_CACHE_LINE_SIZE
);
443 kni_port_params_array
[port_id
]->port_id
= port_id
;
444 kni_port_params_array
[port_id
]->lcore_rx
=
445 (uint8_t)int_fld
[i
++];
446 kni_port_params_array
[port_id
]->lcore_tx
=
447 (uint8_t)int_fld
[i
++];
448 if (kni_port_params_array
[port_id
]->lcore_rx
>= RTE_MAX_LCORE
||
449 kni_port_params_array
[port_id
]->lcore_tx
>= RTE_MAX_LCORE
) {
450 printf("lcore_rx %u or lcore_tx %u ID could not "
451 "exceed the maximum %u\n",
452 kni_port_params_array
[port_id
]->lcore_rx
,
453 kni_port_params_array
[port_id
]->lcore_tx
,
454 (unsigned)RTE_MAX_LCORE
);
457 for (j
= 0; i
< nb_token
&& j
< KNI_MAX_KTHREAD
; i
++, j
++)
458 kni_port_params_array
[port_id
]->lcore_k
[j
] =
460 kni_port_params_array
[port_id
]->nb_lcore_k
= j
;
467 for (i
= 0; i
< RTE_MAX_ETHPORTS
; i
++) {
468 if (kni_port_params_array
[i
]) {
469 rte_free(kni_port_params_array
[i
]);
470 kni_port_params_array
[i
] = NULL
;
478 validate_parameters(uint32_t portmask
)
483 printf("No port configured in port mask\n");
487 for (i
= 0; i
< RTE_MAX_ETHPORTS
; i
++) {
488 if (((portmask
& (1 << i
)) && !kni_port_params_array
[i
]) ||
489 (!(portmask
& (1 << i
)) && kni_port_params_array
[i
]))
490 rte_exit(EXIT_FAILURE
, "portmask is not consistent "
491 "to port ids specified in --config\n");
493 if (kni_port_params_array
[i
] && !rte_lcore_is_enabled(\
494 (unsigned)(kni_port_params_array
[i
]->lcore_rx
)))
495 rte_exit(EXIT_FAILURE
, "lcore id %u for "
496 "port %d receiving not enabled\n",
497 kni_port_params_array
[i
]->lcore_rx
,
498 kni_port_params_array
[i
]->port_id
);
500 if (kni_port_params_array
[i
] && !rte_lcore_is_enabled(\
501 (unsigned)(kni_port_params_array
[i
]->lcore_tx
)))
502 rte_exit(EXIT_FAILURE
, "lcore id %u for "
503 "port %d transmitting not enabled\n",
504 kni_port_params_array
[i
]->lcore_tx
,
505 kni_port_params_array
[i
]->port_id
);
512 #define CMDLINE_OPT_CONFIG "config"
514 /* Parse the arguments given in the command line of the application */
516 parse_args(int argc
, char **argv
)
518 int opt
, longindex
, ret
= 0;
519 const char *prgname
= argv
[0];
520 static struct option longopts
[] = {
521 {CMDLINE_OPT_CONFIG
, required_argument
, NULL
, 0},
525 /* Disable printing messages within getopt() */
528 /* Parse command line */
529 while ((opt
= getopt_long(argc
, argv
, "p:Pm", longopts
,
530 &longindex
)) != EOF
) {
533 ports_mask
= parse_unsigned(optarg
);
542 if (!strncmp(longopts
[longindex
].name
,
544 sizeof(CMDLINE_OPT_CONFIG
))) {
545 ret
= parse_config(optarg
);
547 printf("Invalid config\n");
548 print_usage(prgname
);
554 print_usage(prgname
);
555 rte_exit(EXIT_FAILURE
, "Invalid option specified\n");
559 /* Check that options were parsed ok */
560 if (validate_parameters(ports_mask
) < 0) {
561 print_usage(prgname
);
562 rte_exit(EXIT_FAILURE
, "Invalid parameters\n");
568 /* Initialize KNI subsystem */
572 unsigned int num_of_kni_ports
= 0, i
;
573 struct kni_port_params
**params
= kni_port_params_array
;
575 /* Calculate the maximum number of KNI interfaces that will be used */
576 for (i
= 0; i
< RTE_MAX_ETHPORTS
; i
++) {
577 if (kni_port_params_array
[i
]) {
578 num_of_kni_ports
+= (params
[i
]->nb_lcore_k
?
579 params
[i
]->nb_lcore_k
: 1);
583 /* Invoke rte KNI init to preallocate the ports */
584 rte_kni_init(num_of_kni_ports
);
587 /* Initialise a single port on an Ethernet device */
589 init_port(uint16_t port
)
592 uint16_t nb_rxd
= NB_RXD
;
593 uint16_t nb_txd
= NB_TXD
;
594 struct rte_eth_dev_info dev_info
;
595 struct rte_eth_rxconf rxq_conf
;
596 struct rte_eth_txconf txq_conf
;
597 struct rte_eth_conf local_port_conf
= port_conf
;
599 /* Initialise device and RX/TX queues */
600 RTE_LOG(INFO
, APP
, "Initialising port %u ...\n", (unsigned)port
);
603 ret
= rte_eth_dev_info_get(port
, &dev_info
);
605 rte_exit(EXIT_FAILURE
,
606 "Error during getting device (port %u) info: %s\n",
607 port
, strerror(-ret
));
609 if (dev_info
.tx_offload_capa
& DEV_TX_OFFLOAD_MBUF_FAST_FREE
)
610 local_port_conf
.txmode
.offloads
|=
611 DEV_TX_OFFLOAD_MBUF_FAST_FREE
;
612 ret
= rte_eth_dev_configure(port
, 1, 1, &local_port_conf
);
614 rte_exit(EXIT_FAILURE
, "Could not configure port%u (%d)\n",
615 (unsigned)port
, ret
);
617 ret
= rte_eth_dev_adjust_nb_rx_tx_desc(port
, &nb_rxd
, &nb_txd
);
619 rte_exit(EXIT_FAILURE
, "Could not adjust number of descriptors "
620 "for port%u (%d)\n", (unsigned)port
, ret
);
622 rxq_conf
= dev_info
.default_rxconf
;
623 rxq_conf
.offloads
= local_port_conf
.rxmode
.offloads
;
624 ret
= rte_eth_rx_queue_setup(port
, 0, nb_rxd
,
625 rte_eth_dev_socket_id(port
), &rxq_conf
, pktmbuf_pool
);
627 rte_exit(EXIT_FAILURE
, "Could not setup up RX queue for "
628 "port%u (%d)\n", (unsigned)port
, ret
);
630 txq_conf
= dev_info
.default_txconf
;
631 txq_conf
.offloads
= local_port_conf
.txmode
.offloads
;
632 ret
= rte_eth_tx_queue_setup(port
, 0, nb_txd
,
633 rte_eth_dev_socket_id(port
), &txq_conf
);
635 rte_exit(EXIT_FAILURE
, "Could not setup up TX queue for "
636 "port%u (%d)\n", (unsigned)port
, ret
);
638 ret
= rte_eth_dev_start(port
);
640 rte_exit(EXIT_FAILURE
, "Could not start port%u (%d)\n",
641 (unsigned)port
, ret
);
643 if (promiscuous_on
) {
644 ret
= rte_eth_promiscuous_enable(port
);
646 rte_exit(EXIT_FAILURE
,
647 "Could not enable promiscuous mode for port%u: %s\n",
648 port
, rte_strerror(-ret
));
652 /* Check the link status of all ports in up to 9s, and print them finally */
654 check_all_ports_link_status(uint32_t port_mask
)
656 #define CHECK_INTERVAL 100 /* 100ms */
657 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
659 uint8_t count
, all_ports_up
, print_flag
= 0;
660 struct rte_eth_link link
;
663 printf("\nChecking link status\n");
665 for (count
= 0; count
<= MAX_CHECK_TIME
; count
++) {
667 RTE_ETH_FOREACH_DEV(portid
) {
668 if ((port_mask
& (1 << portid
)) == 0)
670 memset(&link
, 0, sizeof(link
));
671 ret
= rte_eth_link_get_nowait(portid
, &link
);
675 printf("Port %u link get failed: %s\n",
676 portid
, rte_strerror(-ret
));
679 /* print link status if flag set */
680 if (print_flag
== 1) {
681 if (link
.link_status
)
683 "Port%d Link Up - speed %uMbps - %s\n",
684 portid
, link
.link_speed
,
685 (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
686 ("full-duplex") : ("half-duplex"));
688 printf("Port %d Link Down\n", portid
);
691 /* clear all_ports_up flag if any link down */
692 if (link
.link_status
== ETH_LINK_DOWN
) {
697 /* after finally printing all link status, get out */
701 if (all_ports_up
== 0) {
704 rte_delay_ms(CHECK_INTERVAL
);
707 /* set the print_flag if all ports up or timeout */
708 if (all_ports_up
== 1 || count
== (MAX_CHECK_TIME
- 1)) {
716 log_link_state(struct rte_kni
*kni
, int prev
, struct rte_eth_link
*link
)
718 if (kni
== NULL
|| link
== NULL
)
721 if (prev
== ETH_LINK_DOWN
&& link
->link_status
== ETH_LINK_UP
) {
722 RTE_LOG(INFO
, APP
, "%s NIC Link is Up %d Mbps %s %s.\n",
723 rte_kni_get_name(kni
),
725 link
->link_autoneg
? "(AutoNeg)" : "(Fixed)",
726 link
->link_duplex
? "Full Duplex" : "Half Duplex");
727 } else if (prev
== ETH_LINK_UP
&& link
->link_status
== ETH_LINK_DOWN
) {
728 RTE_LOG(INFO
, APP
, "%s NIC Link is Down.\n",
729 rte_kni_get_name(kni
));
734 * Monitor the link status of all ports and update the
735 * corresponding KNI interface(s)
738 monitor_all_ports_link_status(void *arg
)
741 struct rte_eth_link link
;
743 struct kni_port_params
**p
= kni_port_params_array
;
748 while (monitor_links
) {
750 RTE_ETH_FOREACH_DEV(portid
) {
751 if ((ports_mask
& (1 << portid
)) == 0)
753 memset(&link
, 0, sizeof(link
));
754 ret
= rte_eth_link_get_nowait(portid
, &link
);
757 "Get link failed (port %u): %s\n",
758 portid
, rte_strerror(-ret
));
761 for (i
= 0; i
< p
[portid
]->nb_kni
; i
++) {
762 prev
= rte_kni_update_link(p
[portid
]->kni
[i
],
764 log_link_state(p
[portid
]->kni
[i
], prev
, &link
);
772 kni_change_mtu_(uint16_t port_id
, unsigned int new_mtu
)
775 uint16_t nb_rxd
= NB_RXD
;
776 uint16_t nb_txd
= NB_TXD
;
777 struct rte_eth_conf conf
;
778 struct rte_eth_dev_info dev_info
;
779 struct rte_eth_rxconf rxq_conf
;
780 struct rte_eth_txconf txq_conf
;
782 if (!rte_eth_dev_is_valid_port(port_id
)) {
783 RTE_LOG(ERR
, APP
, "Invalid port id %d\n", port_id
);
787 RTE_LOG(INFO
, APP
, "Change MTU of port %d to %u\n", port_id
, new_mtu
);
789 /* Stop specific port */
790 rte_eth_dev_stop(port_id
);
792 memcpy(&conf
, &port_conf
, sizeof(conf
));
794 if (new_mtu
> RTE_ETHER_MAX_LEN
)
795 conf
.rxmode
.offloads
|= DEV_RX_OFFLOAD_JUMBO_FRAME
;
797 conf
.rxmode
.offloads
&= ~DEV_RX_OFFLOAD_JUMBO_FRAME
;
799 /* mtu + length of header + length of FCS = max pkt length */
800 conf
.rxmode
.max_rx_pkt_len
= new_mtu
+ KNI_ENET_HEADER_SIZE
+
802 ret
= rte_eth_dev_configure(port_id
, 1, 1, &conf
);
804 RTE_LOG(ERR
, APP
, "Fail to reconfigure port %d\n", port_id
);
808 ret
= rte_eth_dev_adjust_nb_rx_tx_desc(port_id
, &nb_rxd
, &nb_txd
);
810 rte_exit(EXIT_FAILURE
, "Could not adjust number of descriptors "
811 "for port%u (%d)\n", (unsigned int)port_id
,
814 ret
= rte_eth_dev_info_get(port_id
, &dev_info
);
817 "Error during getting device (port %u) info: %s\n",
818 port_id
, strerror(-ret
));
823 rxq_conf
= dev_info
.default_rxconf
;
824 rxq_conf
.offloads
= conf
.rxmode
.offloads
;
825 ret
= rte_eth_rx_queue_setup(port_id
, 0, nb_rxd
,
826 rte_eth_dev_socket_id(port_id
), &rxq_conf
, pktmbuf_pool
);
828 RTE_LOG(ERR
, APP
, "Fail to setup Rx queue of port %d\n",
833 txq_conf
= dev_info
.default_txconf
;
834 txq_conf
.offloads
= conf
.txmode
.offloads
;
835 ret
= rte_eth_tx_queue_setup(port_id
, 0, nb_txd
,
836 rte_eth_dev_socket_id(port_id
), &txq_conf
);
838 RTE_LOG(ERR
, APP
, "Fail to setup Tx queue of port %d\n",
843 /* Restart specific port */
844 ret
= rte_eth_dev_start(port_id
);
846 RTE_LOG(ERR
, APP
, "Fail to restart port %d\n", port_id
);
853 /* Callback for request of changing MTU */
855 kni_change_mtu(uint16_t port_id
, unsigned int new_mtu
)
859 rte_atomic32_inc(&kni_pause
);
860 ret
= kni_change_mtu_(port_id
, new_mtu
);
861 rte_atomic32_dec(&kni_pause
);
866 /* Callback for request of configuring network interface up/down */
868 kni_config_network_interface(uint16_t port_id
, uint8_t if_up
)
872 if (!rte_eth_dev_is_valid_port(port_id
)) {
873 RTE_LOG(ERR
, APP
, "Invalid port id %d\n", port_id
);
877 RTE_LOG(INFO
, APP
, "Configure network interface of %d %s\n",
878 port_id
, if_up
? "up" : "down");
880 rte_atomic32_inc(&kni_pause
);
882 if (if_up
!= 0) { /* Configure network interface up */
883 rte_eth_dev_stop(port_id
);
884 ret
= rte_eth_dev_start(port_id
);
885 } else /* Configure network interface down */
886 rte_eth_dev_stop(port_id
);
888 rte_atomic32_dec(&kni_pause
);
891 RTE_LOG(ERR
, APP
, "Failed to start port %d\n", port_id
);
897 print_ethaddr(const char *name
, struct rte_ether_addr
*mac_addr
)
899 char buf
[RTE_ETHER_ADDR_FMT_SIZE
];
900 rte_ether_format_addr(buf
, RTE_ETHER_ADDR_FMT_SIZE
, mac_addr
);
901 RTE_LOG(INFO
, APP
, "\t%s%s\n", name
, buf
);
904 /* Callback for request of configuring mac address */
906 kni_config_mac_address(uint16_t port_id
, uint8_t mac_addr
[])
910 if (!rte_eth_dev_is_valid_port(port_id
)) {
911 RTE_LOG(ERR
, APP
, "Invalid port id %d\n", port_id
);
915 RTE_LOG(INFO
, APP
, "Configure mac address of %d\n", port_id
);
916 print_ethaddr("Address:", (struct rte_ether_addr
*)mac_addr
);
918 ret
= rte_eth_dev_default_mac_addr_set(port_id
,
919 (struct rte_ether_addr
*)mac_addr
);
921 RTE_LOG(ERR
, APP
, "Failed to config mac_addr for port %d\n",
928 kni_alloc(uint16_t port_id
)
932 struct rte_kni_conf conf
;
933 struct kni_port_params
**params
= kni_port_params_array
;
936 if (port_id
>= RTE_MAX_ETHPORTS
|| !params
[port_id
])
939 params
[port_id
]->nb_kni
= params
[port_id
]->nb_lcore_k
?
940 params
[port_id
]->nb_lcore_k
: 1;
942 for (i
= 0; i
< params
[port_id
]->nb_kni
; i
++) {
943 /* Clear conf at first */
944 memset(&conf
, 0, sizeof(conf
));
945 if (params
[port_id
]->nb_lcore_k
) {
946 snprintf(conf
.name
, RTE_KNI_NAMESIZE
,
947 "vEth%u_%u", port_id
, i
);
948 conf
.core_id
= params
[port_id
]->lcore_k
[i
];
951 snprintf(conf
.name
, RTE_KNI_NAMESIZE
,
953 conf
.group_id
= port_id
;
954 conf
.mbuf_size
= MAX_PACKET_SZ
;
956 * The first KNI device associated to a port
957 * is the master, for multiple kernel thread
961 struct rte_kni_ops ops
;
962 struct rte_eth_dev_info dev_info
;
964 ret
= rte_eth_dev_info_get(port_id
, &dev_info
);
966 rte_exit(EXIT_FAILURE
,
967 "Error during getting device (port %u) info: %s\n",
968 port_id
, strerror(-ret
));
970 /* Get the interface default mac address */
971 ret
= rte_eth_macaddr_get(port_id
,
972 (struct rte_ether_addr
*)&conf
.mac_addr
);
974 rte_exit(EXIT_FAILURE
,
975 "Failed to get MAC address (port %u): %s\n",
976 port_id
, rte_strerror(-ret
));
978 rte_eth_dev_get_mtu(port_id
, &conf
.mtu
);
980 conf
.min_mtu
= dev_info
.min_mtu
;
981 conf
.max_mtu
= dev_info
.max_mtu
;
983 memset(&ops
, 0, sizeof(ops
));
984 ops
.port_id
= port_id
;
985 ops
.change_mtu
= kni_change_mtu
;
986 ops
.config_network_if
= kni_config_network_interface
;
987 ops
.config_mac_address
= kni_config_mac_address
;
989 kni
= rte_kni_alloc(pktmbuf_pool
, &conf
, &ops
);
991 kni
= rte_kni_alloc(pktmbuf_pool
, &conf
, NULL
);
994 rte_exit(EXIT_FAILURE
, "Fail to create kni for "
995 "port: %d\n", port_id
);
996 params
[port_id
]->kni
[i
] = kni
;
1003 kni_free_kni(uint16_t port_id
)
1006 struct kni_port_params
**p
= kni_port_params_array
;
1008 if (port_id
>= RTE_MAX_ETHPORTS
|| !p
[port_id
])
1011 for (i
= 0; i
< p
[port_id
]->nb_kni
; i
++) {
1012 if (rte_kni_release(p
[port_id
]->kni
[i
]))
1013 printf("Fail to release kni\n");
1014 p
[port_id
]->kni
[i
] = NULL
;
1016 rte_eth_dev_stop(port_id
);
1021 /* Initialise ports/queues etc. and start main loop on each core */
1023 main(int argc
, char** argv
)
1026 uint16_t nb_sys_ports
, port
;
1029 pthread_t kni_link_tid
;
1032 /* Associate signal_hanlder function with USR signals */
1033 signal(SIGUSR1
, signal_handler
);
1034 signal(SIGUSR2
, signal_handler
);
1035 signal(SIGRTMIN
, signal_handler
);
1036 signal(SIGINT
, signal_handler
);
1037 signal(SIGTERM
, signal_handler
);
1039 /* Initialise EAL */
1040 ret
= rte_eal_init(argc
, argv
);
1042 rte_exit(EXIT_FAILURE
, "Could not initialise EAL (%d)\n", ret
);
1046 /* Parse application arguments (after the EAL ones) */
1047 ret
= parse_args(argc
, argv
);
1049 rte_exit(EXIT_FAILURE
, "Could not parse input parameters\n");
1051 /* Create the mbuf pool */
1052 pktmbuf_pool
= rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF
,
1053 MEMPOOL_CACHE_SZ
, 0, MBUF_DATA_SZ
, rte_socket_id());
1054 if (pktmbuf_pool
== NULL
) {
1055 rte_exit(EXIT_FAILURE
, "Could not initialise mbuf pool\n");
1059 /* Get number of ports found in scan */
1060 nb_sys_ports
= rte_eth_dev_count_avail();
1061 if (nb_sys_ports
== 0)
1062 rte_exit(EXIT_FAILURE
, "No supported Ethernet device found\n");
1064 /* Check if the configured port ID is valid */
1065 for (i
= 0; i
< RTE_MAX_ETHPORTS
; i
++)
1066 if (kni_port_params_array
[i
] && !rte_eth_dev_is_valid_port(i
))
1067 rte_exit(EXIT_FAILURE
, "Configured invalid "
1070 /* Initialize KNI subsystem */
1073 /* Initialise each port */
1074 RTE_ETH_FOREACH_DEV(port
) {
1075 /* Skip ports that are not enabled */
1076 if (!(ports_mask
& (1 << port
)))
1080 if (port
>= RTE_MAX_ETHPORTS
)
1081 rte_exit(EXIT_FAILURE
, "Can not use more than "
1082 "%d ports for kni\n", RTE_MAX_ETHPORTS
);
1086 check_all_ports_link_status(ports_mask
);
1089 RTE_LOG(INFO
, APP
, "========================\n");
1090 RTE_LOG(INFO
, APP
, "KNI Running\n");
1091 RTE_LOG(INFO
, APP
, "kill -SIGUSR1 %d\n", pid
);
1092 RTE_LOG(INFO
, APP
, " Show KNI Statistics.\n");
1093 RTE_LOG(INFO
, APP
, "kill -SIGUSR2 %d\n", pid
);
1094 RTE_LOG(INFO
, APP
, " Zero KNI Statistics.\n");
1095 RTE_LOG(INFO
, APP
, "========================\n");
1098 ret
= rte_ctrl_thread_create(&kni_link_tid
,
1099 "KNI link status check", NULL
,
1100 monitor_all_ports_link_status
, NULL
);
1102 rte_exit(EXIT_FAILURE
,
1103 "Could not create link status thread!\n");
1105 /* Launch per-lcore function on every lcore */
1106 rte_eal_mp_remote_launch(main_loop
, NULL
, CALL_MASTER
);
1107 RTE_LCORE_FOREACH_SLAVE(i
) {
1108 if (rte_eal_wait_lcore(i
) < 0)
1112 pthread_join(kni_link_tid
, &retval
);
1114 /* Release resources */
1115 RTE_ETH_FOREACH_DEV(port
) {
1116 if (!(ports_mask
& (1 << port
)))
1120 for (i
= 0; i
< RTE_MAX_ETHPORTS
; i
++)
1121 if (kni_port_params_array
[i
]) {
1122 rte_free(kni_port_params_array
[i
]);
1123 kni_port_params_array
[i
] = NULL
;