1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/virtio_net.h>
10 #include <linux/virtio_ring.h>
13 #include <sys/eventfd.h>
14 #include <sys/param.h>
17 #include <rte_atomic.h>
18 #include <rte_cycles.h>
19 #include <rte_ethdev.h>
21 #include <rte_string_fns.h>
22 #include <rte_malloc.h>
23 #include <rte_vhost.h>
24 #include <rte_pause.h>
28 #include "vxlan_setup.h"
30 /* the maximum number of external ports supported */
31 #define MAX_SUP_PORTS 1
34 * Calculate the number of buffers needed per port
36 #define NUM_MBUFS_PER_PORT ((MAX_QUEUES * RTE_TEST_RX_DESC_DEFAULT) +\
37 (nb_switching_cores * MAX_PKT_BURST) +\
38 (nb_switching_cores * \
39 RTE_TEST_TX_DESC_DEFAULT) +\
40 (nb_switching_cores * MBUF_CACHE_SIZE))
42 #define MBUF_CACHE_SIZE 128
43 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
45 #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
46 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
48 /* Defines how long we wait between retries on RX */
49 #define BURST_RX_WAIT_US 15
51 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */
53 #define JUMBO_FRAME_MAX_SIZE 0x2600
55 /* State of virtio device. */
56 #define DEVICE_MAC_LEARNING 0
58 #define DEVICE_SAFE_REMOVE 2
60 /* Config_core_flag status definitions. */
61 #define REQUEST_DEV_REMOVAL 1
62 #define ACK_DEV_REMOVAL 0
64 /* Configurable number of RX/TX ring descriptors */
65 #define RTE_TEST_RX_DESC_DEFAULT 1024
66 #define RTE_TEST_TX_DESC_DEFAULT 512
68 /* Get first 4 bytes in mbuf headroom. */
69 #define MBUF_HEADROOM_UINT32(mbuf) (*(uint32_t *)((uint8_t *)(mbuf) \
70 + sizeof(struct rte_mbuf)))
72 #define INVALID_PORT_ID 0xFFFF
74 /* Maximum character device basename size. */
75 #define MAX_BASENAME_SZ 20
77 /* Maximum long option length for option parsing. */
78 #define MAX_LONG_OPT_SZ 64
80 /* Used to compare MAC addresses. */
81 #define MAC_ADDR_CMP 0xFFFFFFFFFFFFULL
83 #define CMD_LINE_OPT_NB_DEVICES "nb-devices"
84 #define CMD_LINE_OPT_UDP_PORT "udp-port"
85 #define CMD_LINE_OPT_TX_CHECKSUM "tx-checksum"
86 #define CMD_LINE_OPT_TSO_SEGSZ "tso-segsz"
87 #define CMD_LINE_OPT_FILTER_TYPE "filter-type"
88 #define CMD_LINE_OPT_ENCAP "encap"
89 #define CMD_LINE_OPT_DECAP "decap"
90 #define CMD_LINE_OPT_RX_RETRY "rx-retry"
91 #define CMD_LINE_OPT_RX_RETRY_DELAY "rx-retry-delay"
92 #define CMD_LINE_OPT_RX_RETRY_NUM "rx-retry-num"
93 #define CMD_LINE_OPT_STATS "stats"
94 #define CMD_LINE_OPT_DEV_BASENAME "dev-basename"
96 /* mask of enabled ports */
97 static uint32_t enabled_port_mask
;
99 /*Number of switching cores enabled*/
100 static uint32_t nb_switching_cores
;
102 /* number of devices/queues to support*/
103 uint16_t nb_devices
= 2;
105 /* max ring descriptor, ixgbe, i40e, e1000 all are 4096. */
106 #define MAX_RING_DESC 4096
109 struct rte_mempool
*pool
;
110 struct rte_ring
*ring
;
112 } vpool_array
[MAX_QUEUES
+MAX_QUEUES
];
114 /* UDP tunneling port */
115 uint16_t udp_port
= 4789;
117 /* enable/disable inner TX checksum */
118 uint8_t tx_checksum
= 0;
120 /* TCP segment size */
121 uint16_t tso_segsz
= 0;
123 /* enable/disable decapsulation */
124 uint8_t rx_decap
= 1;
126 /* enable/disable encapsulation */
127 uint8_t tx_encap
= 1;
129 /* RX filter type for tunneling packet */
130 uint8_t filter_idx
= 1;
132 /* overlay packet operation */
133 struct ol_switch_ops overlay_options
= {
134 .port_configure
= vxlan_port_init
,
135 .tunnel_setup
= vxlan_link
,
136 .tunnel_destroy
= vxlan_unlink
,
137 .tx_handle
= vxlan_tx_pkts
,
138 .rx_handle
= vxlan_rx_pkts
,
139 .param_handle
= NULL
,
143 uint32_t enable_stats
= 0;
144 /* Enable retries on RX. */
145 static uint32_t enable_retry
= 1;
146 /* Specify timeout (in useconds) between retries on RX. */
147 static uint32_t burst_rx_delay_time
= BURST_RX_WAIT_US
;
148 /* Specify the number of retries on RX. */
149 static uint32_t burst_rx_retry_num
= BURST_RX_RETRIES
;
151 /* Character device basename. Can be set by user. */
152 static char dev_basename
[MAX_BASENAME_SZ
] = "vhost-net";
154 static unsigned lcore_ids
[RTE_MAX_LCORE
];
155 uint16_t ports
[RTE_MAX_ETHPORTS
];
157 static unsigned nb_ports
; /**< The number of ports specified in command line */
159 /* ethernet addresses of ports */
160 struct ether_addr ports_eth_addr
[RTE_MAX_ETHPORTS
];
162 /* heads for the main used and free linked lists for the data path. */
163 static struct virtio_net_data_ll
*ll_root_used
;
164 static struct virtio_net_data_ll
*ll_root_free
;
167 * Array of data core structures containing information on
168 * individual core linked lists.
170 static struct lcore_info lcore_info
[RTE_MAX_LCORE
];
172 /* Used for queueing bursts of TX packets. */
176 struct rte_mbuf
*m_table
[MAX_PKT_BURST
];
179 /* TX queue for each data core. */
180 struct mbuf_table lcore_tx_queue
[RTE_MAX_LCORE
];
182 struct device_statistics dev_statistics
[MAX_DEVICES
];
185 * Set character device basename.
188 us_vhost_parse_basename(const char *q_arg
)
190 /* parse number string */
191 if (strlen(q_arg
) >= MAX_BASENAME_SZ
)
194 strlcpy((char *)&dev_basename
, q_arg
, MAX_BASENAME_SZ
);
200 * Parse the portmask provided at run time.
203 parse_portmask(const char *portmask
)
208 /* parse hexadecimal string */
209 pm
= strtoul(portmask
, &end
, 16);
210 if ((portmask
[0] == '\0') || (end
== NULL
) || (*end
!= '\0'))
220 * Parse num options at run time.
223 parse_num_opt(const char *q_arg
, uint32_t max_valid_value
)
228 /* parse unsigned int string */
229 num
= strtoul(q_arg
, &end
, 10);
230 if ((q_arg
[0] == '\0') || (end
== NULL
) || (*end
!= '\0'))
233 if (num
> max_valid_value
)
243 tep_termination_usage(const char *prgname
)
245 RTE_LOG(INFO
, VHOST_CONFIG
, "%s [EAL options] -- -p PORTMASK\n"
246 " --udp-port: UDP destination port for VXLAN packet\n"
247 " --nb-devices[1-64]: The number of virtIO device\n"
248 " --tx-checksum [0|1]: inner Tx checksum offload\n"
249 " --tso-segsz [0-N]: TCP segment size\n"
250 " --decap [0|1]: tunneling packet decapsulation\n"
251 " --encap [0|1]: tunneling packet encapsulation\n"
252 " --filter-type[1-3]: filter type for tunneling packet\n"
253 " 1: Inner MAC and tenent ID\n"
254 " 2: Inner MAC and VLAN, and tenent ID\n"
255 " 3: Outer MAC, Inner MAC and tenent ID\n"
256 " -p PORTMASK: Set mask for ports to be used by application\n"
257 " --rx-retry [0|1]: disable/enable(default) retries on rx."
258 " Enable retry if destintation queue is full\n"
259 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX."
260 " This makes effect only if retries on rx enabled\n"
261 " --rx-retry-num [0-N]: the number of retries on rx."
262 " This makes effect only if retries on rx enabled\n"
263 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
264 " --dev-basename: The basename to be used for the character device.\n",
269 * Parse the arguments given in the command line of the application.
272 tep_termination_parse_args(int argc
, char **argv
)
277 const char *prgname
= argv
[0];
278 static struct option long_option
[] = {
279 {CMD_LINE_OPT_NB_DEVICES
, required_argument
, NULL
, 0},
280 {CMD_LINE_OPT_UDP_PORT
, required_argument
, NULL
, 0},
281 {CMD_LINE_OPT_TX_CHECKSUM
, required_argument
, NULL
, 0},
282 {CMD_LINE_OPT_TSO_SEGSZ
, required_argument
, NULL
, 0},
283 {CMD_LINE_OPT_DECAP
, required_argument
, NULL
, 0},
284 {CMD_LINE_OPT_ENCAP
, required_argument
, NULL
, 0},
285 {CMD_LINE_OPT_FILTER_TYPE
, required_argument
, NULL
, 0},
286 {CMD_LINE_OPT_RX_RETRY
, required_argument
, NULL
, 0},
287 {CMD_LINE_OPT_RX_RETRY_DELAY
, required_argument
, NULL
, 0},
288 {CMD_LINE_OPT_RX_RETRY_NUM
, required_argument
, NULL
, 0},
289 {CMD_LINE_OPT_STATS
, required_argument
, NULL
, 0},
290 {CMD_LINE_OPT_DEV_BASENAME
, required_argument
, NULL
, 0},
294 /* Parse command line */
295 while ((opt
= getopt_long(argc
, argv
, "p:",
296 long_option
, &option_index
)) != EOF
) {
300 enabled_port_mask
= parse_portmask(optarg
);
301 if (enabled_port_mask
== 0) {
302 RTE_LOG(INFO
, VHOST_CONFIG
,
303 "Invalid portmask\n");
304 tep_termination_usage(prgname
);
309 if (!strncmp(long_option
[option_index
].name
,
310 CMD_LINE_OPT_NB_DEVICES
,
311 sizeof(CMD_LINE_OPT_NB_DEVICES
))) {
312 ret
= parse_num_opt(optarg
, MAX_DEVICES
);
314 RTE_LOG(INFO
, VHOST_CONFIG
,
315 "Invalid argument for nb-devices [0-%d]\n",
317 tep_termination_usage(prgname
);
323 /* Enable/disable retries on RX. */
324 if (!strncmp(long_option
[option_index
].name
,
325 CMD_LINE_OPT_RX_RETRY
,
326 sizeof(CMD_LINE_OPT_RX_RETRY
))) {
327 ret
= parse_num_opt(optarg
, 1);
329 RTE_LOG(INFO
, VHOST_CONFIG
,
330 "Invalid argument for rx-retry [0|1]\n");
331 tep_termination_usage(prgname
);
337 if (!strncmp(long_option
[option_index
].name
,
338 CMD_LINE_OPT_TSO_SEGSZ
,
339 sizeof(CMD_LINE_OPT_TSO_SEGSZ
))) {
340 ret
= parse_num_opt(optarg
, INT16_MAX
);
342 RTE_LOG(INFO
, VHOST_CONFIG
,
343 "Invalid argument for TCP segment size [0-N]\n");
344 tep_termination_usage(prgname
);
350 if (!strncmp(long_option
[option_index
].name
,
351 CMD_LINE_OPT_UDP_PORT
,
352 sizeof(CMD_LINE_OPT_UDP_PORT
))) {
353 ret
= parse_num_opt(optarg
, INT16_MAX
);
355 RTE_LOG(INFO
, VHOST_CONFIG
,
356 "Invalid argument for UDP port [0-N]\n");
357 tep_termination_usage(prgname
);
363 /* Specify the retries delay time (in useconds) on RX.*/
364 if (!strncmp(long_option
[option_index
].name
,
365 CMD_LINE_OPT_RX_RETRY_DELAY
,
366 sizeof(CMD_LINE_OPT_RX_RETRY_DELAY
))) {
367 ret
= parse_num_opt(optarg
, INT32_MAX
);
369 RTE_LOG(INFO
, VHOST_CONFIG
,
370 "Invalid argument for rx-retry-delay [0-N]\n");
371 tep_termination_usage(prgname
);
374 burst_rx_delay_time
= ret
;
377 /* Specify the retries number on RX. */
378 if (!strncmp(long_option
[option_index
].name
,
379 CMD_LINE_OPT_RX_RETRY_NUM
,
380 sizeof(CMD_LINE_OPT_RX_RETRY_NUM
))) {
381 ret
= parse_num_opt(optarg
, INT32_MAX
);
383 RTE_LOG(INFO
, VHOST_CONFIG
,
384 "Invalid argument for rx-retry-num [0-N]\n");
385 tep_termination_usage(prgname
);
388 burst_rx_retry_num
= ret
;
391 if (!strncmp(long_option
[option_index
].name
,
392 CMD_LINE_OPT_TX_CHECKSUM
,
393 sizeof(CMD_LINE_OPT_TX_CHECKSUM
))) {
394 ret
= parse_num_opt(optarg
, 1);
396 RTE_LOG(INFO
, VHOST_CONFIG
,
397 "Invalid argument for tx-checksum [0|1]\n");
398 tep_termination_usage(prgname
);
404 if (!strncmp(long_option
[option_index
].name
,
405 CMD_LINE_OPT_FILTER_TYPE
,
406 sizeof(CMD_LINE_OPT_FILTER_TYPE
))) {
407 ret
= parse_num_opt(optarg
, 3);
408 if ((ret
== -1) || (ret
== 0)) {
409 RTE_LOG(INFO
, VHOST_CONFIG
,
410 "Invalid argument for filter type [1-3]\n");
411 tep_termination_usage(prgname
);
414 filter_idx
= ret
- 1;
417 /* Enable/disable encapsulation on RX. */
418 if (!strncmp(long_option
[option_index
].name
,
420 sizeof(CMD_LINE_OPT_DECAP
))) {
421 ret
= parse_num_opt(optarg
, 1);
423 RTE_LOG(INFO
, VHOST_CONFIG
,
424 "Invalid argument for decap [0|1]\n");
425 tep_termination_usage(prgname
);
431 /* Enable/disable encapsulation on TX. */
432 if (!strncmp(long_option
[option_index
].name
,
434 sizeof(CMD_LINE_OPT_ENCAP
))) {
435 ret
= parse_num_opt(optarg
, 1);
437 RTE_LOG(INFO
, VHOST_CONFIG
,
438 "Invalid argument for encap [0|1]\n");
439 tep_termination_usage(prgname
);
445 /* Enable/disable stats. */
446 if (!strncmp(long_option
[option_index
].name
,
448 sizeof(CMD_LINE_OPT_STATS
))) {
449 ret
= parse_num_opt(optarg
, INT32_MAX
);
451 RTE_LOG(INFO
, VHOST_CONFIG
,
452 "Invalid argument for stats [0..N]\n");
453 tep_termination_usage(prgname
);
459 /* Set character device basename. */
460 if (!strncmp(long_option
[option_index
].name
,
461 CMD_LINE_OPT_DEV_BASENAME
,
462 sizeof(CMD_LINE_OPT_DEV_BASENAME
))) {
463 if (us_vhost_parse_basename(optarg
) == -1) {
464 RTE_LOG(INFO
, VHOST_CONFIG
,
465 "Invalid argument for character "
466 "device basename (Max %d characters)\n",
468 tep_termination_usage(prgname
);
475 /* Invalid option - print options. */
477 tep_termination_usage(prgname
);
482 for (i
= 0; i
< RTE_MAX_ETHPORTS
; i
++) {
483 if (enabled_port_mask
& (1 << i
))
484 ports
[nb_ports
++] = (uint8_t)i
;
487 if ((nb_ports
== 0) || (nb_ports
> MAX_SUP_PORTS
)) {
488 RTE_LOG(INFO
, VHOST_PORT
, "Current enabled port number is %u,"
489 "but only %u port can be enabled\n", nb_ports
,
498 * Update the global var NB_PORTS and array PORTS
499 * according to system ports number and return valid ports number
502 check_ports_num(unsigned max_nb_ports
)
504 unsigned valid_nb_ports
= nb_ports
;
507 if (nb_ports
> max_nb_ports
) {
508 RTE_LOG(INFO
, VHOST_PORT
, "\nSpecified port number(%u) "
509 " exceeds total system port number(%u)\n",
510 nb_ports
, max_nb_ports
);
511 nb_ports
= max_nb_ports
;
514 for (portid
= 0; portid
< nb_ports
; portid
++) {
515 if (!rte_eth_dev_is_valid_port(ports
[portid
])) {
516 RTE_LOG(INFO
, VHOST_PORT
,
517 "\nSpecified port ID(%u) is not valid\n",
519 ports
[portid
] = INVALID_PORT_ID
;
523 return valid_nb_ports
;
527 * This function routes the TX packet to the correct interface. This may be a local device
528 * or the physical port.
530 static __rte_always_inline
void
531 virtio_tx_route(struct vhost_dev
*vdev
, struct rte_mbuf
*m
)
533 struct mbuf_table
*tx_q
;
534 struct rte_mbuf
**m_table
;
535 unsigned len
, ret
= 0;
536 const uint16_t lcore_id
= rte_lcore_id();
538 RTE_LOG_DP(DEBUG
, VHOST_DATA
, "(%d) TX: MAC address is external\n",
541 /* Add packet to the port tx queue */
542 tx_q
= &lcore_tx_queue
[lcore_id
];
545 tx_q
->m_table
[len
] = m
;
548 dev_statistics
[vdev
->vid
].tx_total
++;
549 dev_statistics
[vdev
->vid
].tx
++;
552 if (unlikely(len
== MAX_PKT_BURST
)) {
553 m_table
= (struct rte_mbuf
**)tx_q
->m_table
;
554 ret
= overlay_options
.tx_handle(ports
[0],
555 (uint16_t)tx_q
->txq_id
, m_table
,
556 (uint16_t)tx_q
->len
);
558 /* Free any buffers not handled by TX and update
561 if (unlikely(ret
< len
)) {
563 rte_pktmbuf_free(m_table
[ret
]);
564 } while (++ret
< len
);
575 * This function is called by each data core. It handles all
576 * RX/TX registered with the core. For TX the specific lcore
577 * linked list is used. For RX, MAC addresses are compared
578 * with all devices in the main linked list.
581 switch_worker(__rte_unused
void *arg
)
583 struct rte_mempool
*mbuf_pool
= arg
;
584 struct vhost_dev
*vdev
= NULL
;
585 struct rte_mbuf
*pkts_burst
[MAX_PKT_BURST
];
586 struct virtio_net_data_ll
*dev_ll
;
587 struct mbuf_table
*tx_q
;
588 volatile struct lcore_ll_info
*lcore_ll
;
589 const uint64_t drain_tsc
= (rte_get_tsc_hz() + US_PER_S
- 1)
590 / US_PER_S
* BURST_TX_DRAIN_US
;
591 uint64_t prev_tsc
, diff_tsc
, cur_tsc
, ret_count
= 0;
593 const uint16_t lcore_id
= rte_lcore_id();
594 const uint16_t num_cores
= (uint16_t)rte_lcore_count();
595 uint16_t rx_count
= 0;
599 RTE_LOG(INFO
, VHOST_DATA
, "Procesing on Core %u started\n", lcore_id
);
600 lcore_ll
= lcore_info
[lcore_id
].lcore_ll
;
603 tx_q
= &lcore_tx_queue
[lcore_id
];
604 for (i
= 0; i
< num_cores
; i
++) {
605 if (lcore_ids
[i
] == lcore_id
) {
612 cur_tsc
= rte_rdtsc();
614 * TX burst queue drain
616 diff_tsc
= cur_tsc
- prev_tsc
;
617 if (unlikely(diff_tsc
> drain_tsc
)) {
620 RTE_LOG_DP(DEBUG
, VHOST_DATA
, "TX queue drained after "
621 "timeout with burst size %u\n",
623 ret
= overlay_options
.tx_handle(ports
[0],
624 (uint16_t)tx_q
->txq_id
,
625 (struct rte_mbuf
**)tx_q
->m_table
,
626 (uint16_t)tx_q
->len
);
627 if (unlikely(ret
< tx_q
->len
)) {
629 rte_pktmbuf_free(tx_q
->m_table
[ret
]);
630 } while (++ret
< tx_q
->len
);
640 rte_prefetch0(lcore_ll
->ll_root_used
);
643 * Inform the configuration core that we have exited
644 * the linked list and that no devices are
645 * in use if requested.
647 if (lcore_ll
->dev_removal_flag
== REQUEST_DEV_REMOVAL
)
648 lcore_ll
->dev_removal_flag
= ACK_DEV_REMOVAL
;
653 dev_ll
= lcore_ll
->ll_root_used
;
655 while (dev_ll
!= NULL
) {
658 if (unlikely(vdev
->remove
)) {
659 dev_ll
= dev_ll
->next
;
660 overlay_options
.tunnel_destroy(vdev
);
661 vdev
->ready
= DEVICE_SAFE_REMOVE
;
664 if (likely(vdev
->ready
== DEVICE_RX
)) {
665 /* Handle guest RX */
666 rx_count
= rte_eth_rx_burst(ports
[0],
667 vdev
->rx_q
, pkts_burst
, MAX_PKT_BURST
);
671 * Retry is enabled and the queue is
672 * full then we wait and retry to
673 * avoid packet loss. Here MAX_PKT_BURST
674 * must be less than virtio queue size
676 if (enable_retry
&& unlikely(rx_count
>
677 rte_vhost_avail_entries(vdev
->vid
, VIRTIO_RXQ
))) {
678 for (retry
= 0; retry
< burst_rx_retry_num
;
680 rte_delay_us(burst_rx_delay_time
);
681 if (rx_count
<= rte_vhost_avail_entries(vdev
->vid
, VIRTIO_RXQ
))
686 ret_count
= overlay_options
.rx_handle(vdev
->vid
, pkts_burst
, rx_count
);
689 &dev_statistics
[vdev
->vid
].rx_total_atomic
,
692 &dev_statistics
[vdev
->vid
].rx_atomic
, ret_count
);
694 while (likely(rx_count
)) {
696 rte_pktmbuf_free(pkts_burst
[rx_count
]);
702 if (likely(!vdev
->remove
)) {
704 tx_count
= rte_vhost_dequeue_burst(vdev
->vid
,
705 VIRTIO_TXQ
, mbuf_pool
,
706 pkts_burst
, MAX_PKT_BURST
);
707 /* If this is the first received packet we need to learn the MAC */
708 if (unlikely(vdev
->ready
== DEVICE_MAC_LEARNING
) && tx_count
) {
710 (overlay_options
.tunnel_setup(vdev
, pkts_burst
[0]) == -1)) {
712 rte_pktmbuf_free(pkts_burst
[--tx_count
]);
716 virtio_tx_route(vdev
, pkts_burst
[--tx_count
]);
719 /* move to the next device in the list */
720 dev_ll
= dev_ll
->next
;
728 * Add an entry to a used linked list. A free entry must first be found
729 * in the free linked list using get_data_ll_free_entry();
732 add_data_ll_entry(struct virtio_net_data_ll
**ll_root_addr
,
733 struct virtio_net_data_ll
*ll_dev
)
735 struct virtio_net_data_ll
*ll
= *ll_root_addr
;
737 /* Set next as NULL and use a compiler barrier to avoid reordering. */
739 rte_compiler_barrier();
741 /* If ll == NULL then this is the first device. */
743 /* Increment to the tail of the linked list. */
744 while (ll
->next
!= NULL
)
749 *ll_root_addr
= ll_dev
;
754 * Remove an entry from a used linked list. The entry must then be added to
755 * the free linked list using put_data_ll_free_entry().
758 rm_data_ll_entry(struct virtio_net_data_ll
**ll_root_addr
,
759 struct virtio_net_data_ll
*ll_dev
,
760 struct virtio_net_data_ll
*ll_dev_last
)
762 struct virtio_net_data_ll
*ll
= *ll_root_addr
;
764 if (unlikely((ll
== NULL
) || (ll_dev
== NULL
)))
768 *ll_root_addr
= ll_dev
->next
;
770 if (likely(ll_dev_last
!= NULL
))
771 ll_dev_last
->next
= ll_dev
->next
;
773 RTE_LOG(ERR
, VHOST_CONFIG
,
774 "Remove entry form ll failed.\n");
778 * Find and return an entry from the free linked list.
780 static struct virtio_net_data_ll
*
781 get_data_ll_free_entry(struct virtio_net_data_ll
**ll_root_addr
)
783 struct virtio_net_data_ll
*ll_free
= *ll_root_addr
;
784 struct virtio_net_data_ll
*ll_dev
;
790 *ll_root_addr
= ll_free
->next
;
796 * Place an entry back on to the free linked list.
799 put_data_ll_free_entry(struct virtio_net_data_ll
**ll_root_addr
,
800 struct virtio_net_data_ll
*ll_dev
)
802 struct virtio_net_data_ll
*ll_free
= *ll_root_addr
;
807 ll_dev
->next
= ll_free
;
808 *ll_root_addr
= ll_dev
;
812 * Creates a linked list of a given size.
814 static struct virtio_net_data_ll
*
815 alloc_data_ll(uint32_t size
)
817 struct virtio_net_data_ll
*ll_new
;
820 /* Malloc and then chain the linked list. */
821 ll_new
= malloc(size
* sizeof(struct virtio_net_data_ll
));
822 if (ll_new
== NULL
) {
823 RTE_LOG(ERR
, VHOST_CONFIG
,
824 "Failed to allocate memory for ll_new.\n");
828 for (i
= 0; i
< size
- 1; i
++) {
829 ll_new
[i
].vdev
= NULL
;
830 ll_new
[i
].next
= &ll_new
[i
+1];
832 ll_new
[i
].next
= NULL
;
838 * Create the main linked list along with each individual cores
839 * linked list. A used and a free list are created to manage entries.
846 RTE_LCORE_FOREACH_SLAVE(lcore
) {
847 lcore_info
[lcore
].lcore_ll
=
848 malloc(sizeof(struct lcore_ll_info
));
849 if (lcore_info
[lcore
].lcore_ll
== NULL
) {
850 RTE_LOG(ERR
, VHOST_CONFIG
,
851 "Failed to allocate memory for lcore_ll.\n");
855 lcore_info
[lcore
].lcore_ll
->device_num
= 0;
856 lcore_info
[lcore
].lcore_ll
->dev_removal_flag
= ACK_DEV_REMOVAL
;
857 lcore_info
[lcore
].lcore_ll
->ll_root_used
= NULL
;
858 if (nb_devices
% nb_switching_cores
)
859 lcore_info
[lcore
].lcore_ll
->ll_root_free
=
860 alloc_data_ll((nb_devices
/ nb_switching_cores
)
863 lcore_info
[lcore
].lcore_ll
->ll_root_free
=
864 alloc_data_ll(nb_devices
/ nb_switching_cores
);
867 /* Allocate devices up to a maximum of MAX_DEVICES. */
868 ll_root_free
= alloc_data_ll(MIN((nb_devices
), MAX_DEVICES
));
874 * Remove a device from the specific data core linked list and
875 * from the main linked list. Synchonization occurs through the use
876 * of the lcore dev_removal_flag.
879 destroy_device(int vid
)
881 struct virtio_net_data_ll
*ll_lcore_dev_cur
;
882 struct virtio_net_data_ll
*ll_main_dev_cur
;
883 struct virtio_net_data_ll
*ll_lcore_dev_last
= NULL
;
884 struct virtio_net_data_ll
*ll_main_dev_last
= NULL
;
885 struct vhost_dev
*vdev
= NULL
;
888 ll_main_dev_cur
= ll_root_used
;
889 while (ll_main_dev_cur
!= NULL
) {
890 if (ll_main_dev_cur
->vdev
->vid
== vid
) {
891 vdev
= ll_main_dev_cur
->vdev
;
898 /* set the remove flag. */
900 while (vdev
->ready
!= DEVICE_SAFE_REMOVE
)
903 /* Search for entry to be removed from lcore ll */
904 ll_lcore_dev_cur
= lcore_info
[vdev
->coreid
].lcore_ll
->ll_root_used
;
905 while (ll_lcore_dev_cur
!= NULL
) {
906 if (ll_lcore_dev_cur
->vdev
== vdev
) {
909 ll_lcore_dev_last
= ll_lcore_dev_cur
;
910 ll_lcore_dev_cur
= ll_lcore_dev_cur
->next
;
914 if (ll_lcore_dev_cur
== NULL
) {
915 RTE_LOG(ERR
, VHOST_CONFIG
,
916 "(%d) Failed to find the dev to be destroy.\n", vid
);
920 /* Search for entry to be removed from main ll */
921 ll_main_dev_cur
= ll_root_used
;
922 ll_main_dev_last
= NULL
;
923 while (ll_main_dev_cur
!= NULL
) {
924 if (ll_main_dev_cur
->vdev
== vdev
) {
927 ll_main_dev_last
= ll_main_dev_cur
;
928 ll_main_dev_cur
= ll_main_dev_cur
->next
;
932 /* Remove entries from the lcore and main ll. */
933 rm_data_ll_entry(&lcore_info
[vdev
->coreid
].lcore_ll
->ll_root_used
,
934 ll_lcore_dev_cur
, ll_lcore_dev_last
);
935 rm_data_ll_entry(&ll_root_used
, ll_main_dev_cur
, ll_main_dev_last
);
937 /* Set the dev_removal_flag on each lcore. */
938 RTE_LCORE_FOREACH_SLAVE(lcore
) {
939 lcore_info
[lcore
].lcore_ll
->dev_removal_flag
=
944 * Once each core has set the dev_removal_flag to
945 * ACK_DEV_REMOVAL we can be sure that they can no longer access
946 * the device removed from the linked lists and that the devices
947 * are no longer in use.
949 RTE_LCORE_FOREACH_SLAVE(lcore
) {
950 while (lcore_info
[lcore
].lcore_ll
->dev_removal_flag
955 /* Add the entries back to the lcore and main free ll.*/
956 put_data_ll_free_entry(&lcore_info
[vdev
->coreid
].lcore_ll
->ll_root_free
,
958 put_data_ll_free_entry(&ll_root_free
, ll_main_dev_cur
);
960 /* Decrement number of device on the lcore. */
961 lcore_info
[vdev
->coreid
].lcore_ll
->device_num
--;
963 RTE_LOG(INFO
, VHOST_DATA
, "(%d) Device has been removed "
964 "from data core\n", vid
);
971 * A new device is added to a data core. First the device is added
972 * to the main linked list and the allocated to a specific data core.
977 struct virtio_net_data_ll
*ll_dev
;
978 int lcore
, core_add
= 0;
979 uint32_t device_num_min
= nb_devices
;
980 struct vhost_dev
*vdev
;
982 vdev
= rte_zmalloc("vhost device", sizeof(*vdev
), RTE_CACHE_LINE_SIZE
);
984 RTE_LOG(INFO
, VHOST_DATA
,
985 "(%d) Couldn't allocate memory for vhost dev\n", vid
);
989 /* Add device to main ll */
990 ll_dev
= get_data_ll_free_entry(&ll_root_free
);
991 if (ll_dev
== NULL
) {
992 RTE_LOG(INFO
, VHOST_DATA
, "(%d) No free entry found in"
993 " linked list Device limit of %d devices per core"
994 " has been reached\n", vid
, nb_devices
);
995 if (vdev
->regions_hpa
)
996 rte_free(vdev
->regions_hpa
);
1000 ll_dev
->vdev
= vdev
;
1001 add_data_ll_entry(&ll_root_used
, ll_dev
);
1004 /* reset ready flag */
1005 vdev
->ready
= DEVICE_MAC_LEARNING
;
1008 /* Find a suitable lcore to add the device. */
1009 RTE_LCORE_FOREACH_SLAVE(lcore
) {
1010 if (lcore_info
[lcore
].lcore_ll
->device_num
< device_num_min
) {
1011 device_num_min
= lcore_info
[lcore
].lcore_ll
->device_num
;
1015 /* Add device to lcore ll */
1016 ll_dev
= get_data_ll_free_entry(&lcore_info
[core_add
].lcore_ll
->ll_root_free
);
1017 if (ll_dev
== NULL
) {
1018 RTE_LOG(INFO
, VHOST_DATA
,
1019 "(%d) Failed to add device to data core\n",
1021 vdev
->ready
= DEVICE_SAFE_REMOVE
;
1022 destroy_device(vid
);
1023 rte_free(vdev
->regions_hpa
);
1027 ll_dev
->vdev
= vdev
;
1028 vdev
->coreid
= core_add
;
1030 add_data_ll_entry(&lcore_info
[vdev
->coreid
].lcore_ll
->ll_root_used
,
1033 /* Initialize device stats */
1034 memset(&dev_statistics
[vid
], 0,
1035 sizeof(struct device_statistics
));
1037 /* Disable notifications. */
1038 rte_vhost_enable_guest_notification(vid
, VIRTIO_RXQ
, 0);
1039 rte_vhost_enable_guest_notification(vid
, VIRTIO_TXQ
, 0);
1040 lcore_info
[vdev
->coreid
].lcore_ll
->device_num
++;
1042 RTE_LOG(INFO
, VHOST_DATA
, "(%d) Device has been added to data core %d\n",
1049 * These callback allow devices to be added to the data core when configuration
1050 * has been fully complete.
1052 static const struct vhost_device_ops virtio_net_device_ops
= {
1053 .new_device
= new_device
,
1054 .destroy_device
= destroy_device
,
1058 * This is a thread will wake up after a period to print stats if the user has
1062 print_stats(__rte_unused
void *arg
)
1064 struct virtio_net_data_ll
*dev_ll
;
1065 uint64_t tx_dropped
, rx_dropped
;
1066 uint64_t tx
, tx_total
, rx
, rx_total
, rx_ip_csum
, rx_l4_csum
;
1068 const char clr
[] = { 27, '[', '2', 'J', '\0' };
1069 const char top_left
[] = { 27, '[', '1', ';', '1', 'H', '\0' };
1072 sleep(enable_stats
);
1074 /* Clear screen and move to top left */
1075 printf("%s%s", clr
, top_left
);
1077 printf("\nDevice statistics ================================");
1079 dev_ll
= ll_root_used
;
1080 while (dev_ll
!= NULL
) {
1081 vid
= dev_ll
->vdev
->vid
;
1082 tx_total
= dev_statistics
[vid
].tx_total
;
1083 tx
= dev_statistics
[vid
].tx
;
1084 tx_dropped
= tx_total
- tx
;
1086 rx_total
= rte_atomic64_read(
1087 &dev_statistics
[vid
].rx_total_atomic
);
1088 rx
= rte_atomic64_read(
1089 &dev_statistics
[vid
].rx_atomic
);
1090 rx_dropped
= rx_total
- rx
;
1091 rx_ip_csum
= rte_atomic64_read(
1092 &dev_statistics
[vid
].rx_bad_ip_csum
);
1093 rx_l4_csum
= rte_atomic64_read(
1094 &dev_statistics
[vid
].rx_bad_l4_csum
);
1096 printf("\nStatistics for device %d ----------"
1097 "\nTX total: %"PRIu64
""
1098 "\nTX dropped: %"PRIu64
""
1099 "\nTX successful: %"PRIu64
""
1100 "\nRX total: %"PRIu64
""
1101 "\nRX bad IP csum: %"PRIu64
""
1102 "\nRX bad L4 csum: %"PRIu64
""
1103 "\nRX dropped: %"PRIu64
""
1104 "\nRX successful: %"PRIu64
"",
1115 dev_ll
= dev_ll
->next
;
1117 printf("\n================================================\n");
1124 * Main function, does initialisation and calls the per-lcore functions.
1127 main(int argc
, char *argv
[])
1129 struct rte_mempool
*mbuf_pool
= NULL
;
1130 unsigned lcore_id
, core_id
= 0;
1131 unsigned nb_ports
, valid_nb_ports
;
1135 static pthread_t tid
;
1138 ret
= rte_eal_init(argc
, argv
);
1140 rte_exit(EXIT_FAILURE
, "Error with EAL initialization\n");
1144 /* parse app arguments */
1145 ret
= tep_termination_parse_args(argc
, argv
);
1147 rte_exit(EXIT_FAILURE
, "Invalid argument\n");
1149 for (lcore_id
= 0; lcore_id
< RTE_MAX_LCORE
; lcore_id
++)
1150 if (rte_lcore_is_enabled(lcore_id
))
1151 lcore_ids
[core_id
++] = lcore_id
;
1153 /* set the number of swithcing cores available */
1154 nb_switching_cores
= rte_lcore_count()-1;
1156 /* Get the number of physical ports. */
1157 nb_ports
= rte_eth_dev_count_avail();
1160 * Update the global var NB_PORTS and global array PORTS
1161 * and get value of var VALID_NB_PORTS according to system ports number
1163 valid_nb_ports
= check_ports_num(nb_ports
);
1165 if ((valid_nb_ports
== 0) || (valid_nb_ports
> MAX_SUP_PORTS
)) {
1166 rte_exit(EXIT_FAILURE
, "Current enabled port number is %u,"
1167 "but only %u port can be enabled\n", nb_ports
,
1170 /* Create the mbuf pool. */
1171 mbuf_pool
= rte_pktmbuf_pool_create(
1173 NUM_MBUFS_PER_PORT
* valid_nb_ports
,
1178 if (mbuf_pool
== NULL
)
1179 rte_exit(EXIT_FAILURE
, "Cannot create mbuf pool\n");
1181 for (queue_id
= 0; queue_id
< MAX_QUEUES
+ 1; queue_id
++)
1182 vpool_array
[queue_id
].pool
= mbuf_pool
;
1184 /* initialize all ports */
1185 RTE_ETH_FOREACH_DEV(portid
) {
1186 /* skip ports that are not enabled */
1187 if ((enabled_port_mask
& (1 << portid
)) == 0) {
1188 RTE_LOG(INFO
, VHOST_PORT
,
1189 "Skipping disabled port %d\n", portid
);
1192 if (overlay_options
.port_configure(portid
, mbuf_pool
) != 0)
1193 rte_exit(EXIT_FAILURE
,
1194 "Cannot initialize network ports\n");
1197 /* Initialise all linked lists. */
1198 if (init_data_ll() == -1)
1199 rte_exit(EXIT_FAILURE
, "Failed to initialize linked list\n");
1201 /* Initialize device stats */
1202 memset(&dev_statistics
, 0, sizeof(dev_statistics
));
1204 /* Enable stats if the user option is set. */
1206 ret
= rte_ctrl_thread_create(&tid
, "print-stats", NULL
,
1209 rte_exit(EXIT_FAILURE
, "Cannot create print-stats thread\n");
1212 /* Launch all data cores. */
1213 RTE_LCORE_FOREACH_SLAVE(lcore_id
) {
1214 rte_eal_remote_launch(switch_worker
,
1215 mbuf_pool
, lcore_id
);
1218 ret
= rte_vhost_driver_register((char *)&dev_basename
, 0);
1220 rte_exit(EXIT_FAILURE
, "failed to register vhost driver.\n");
1222 rte_vhost_driver_disable_features(dev_basename
,
1223 1ULL << VIRTIO_NET_F_MRG_RXBUF
);
1225 ret
= rte_vhost_driver_callback_register(dev_basename
,
1226 &virtio_net_device_ops
);
1228 rte_exit(EXIT_FAILURE
,
1229 "failed to register vhost driver callbacks.\n");
1232 if (rte_vhost_driver_start(dev_basename
) < 0) {
1233 rte_exit(EXIT_FAILURE
,
1234 "failed to start vhost driver.\n");
1237 RTE_LCORE_FOREACH_SLAVE(lcore_id
)
1238 rte_eal_wait_lcore(lcore_id
);