1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/virtio_net.h>
10 #include <linux/virtio_ring.h>
13 #include <sys/eventfd.h>
14 #include <sys/param.h>
17 #include <rte_atomic.h>
18 #include <rte_cycles.h>
19 #include <rte_ethdev.h>
21 #include <rte_string_fns.h>
22 #include <rte_malloc.h>
23 #include <rte_vhost.h>
24 #include <rte_pause.h>
28 #include "vxlan_setup.h"
30 /* the maximum number of external ports supported */
31 #define MAX_SUP_PORTS 1
34 * Calculate the number of buffers needed per port
36 #define NUM_MBUFS_PER_PORT ((MAX_QUEUES * RTE_TEST_RX_DESC_DEFAULT) +\
37 (nb_switching_cores * MAX_PKT_BURST) +\
38 (nb_switching_cores * \
39 RTE_TEST_TX_DESC_DEFAULT) +\
40 (nb_switching_cores * MBUF_CACHE_SIZE))
42 #define MBUF_CACHE_SIZE 128
43 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
45 #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
46 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
48 /* Defines how long we wait between retries on RX */
49 #define BURST_RX_WAIT_US 15
51 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */
53 #define JUMBO_FRAME_MAX_SIZE 0x2600
55 /* State of virtio device. */
56 #define DEVICE_MAC_LEARNING 0
58 #define DEVICE_SAFE_REMOVE 2
60 /* Config_core_flag status definitions. */
61 #define REQUEST_DEV_REMOVAL 1
62 #define ACK_DEV_REMOVAL 0
64 /* Configurable number of RX/TX ring descriptors */
65 #define RTE_TEST_RX_DESC_DEFAULT 1024
66 #define RTE_TEST_TX_DESC_DEFAULT 512
68 /* Get first 4 bytes in mbuf headroom. */
69 #define MBUF_HEADROOM_UINT32(mbuf) (*(uint32_t *)((uint8_t *)(mbuf) \
70 + sizeof(struct rte_mbuf)))
72 #define INVALID_PORT_ID 0xFFFF
74 /* Size of buffers used for snprintfs. */
75 #define MAX_PRINT_BUFF 6072
77 /* Maximum character device basename size. */
78 #define MAX_BASENAME_SZ 20
80 /* Maximum long option length for option parsing. */
81 #define MAX_LONG_OPT_SZ 64
83 /* Used to compare MAC addresses. */
84 #define MAC_ADDR_CMP 0xFFFFFFFFFFFFULL
86 #define CMD_LINE_OPT_NB_DEVICES "nb-devices"
87 #define CMD_LINE_OPT_UDP_PORT "udp-port"
88 #define CMD_LINE_OPT_TX_CHECKSUM "tx-checksum"
89 #define CMD_LINE_OPT_TSO_SEGSZ "tso-segsz"
90 #define CMD_LINE_OPT_FILTER_TYPE "filter-type"
91 #define CMD_LINE_OPT_ENCAP "encap"
92 #define CMD_LINE_OPT_DECAP "decap"
93 #define CMD_LINE_OPT_RX_RETRY "rx-retry"
94 #define CMD_LINE_OPT_RX_RETRY_DELAY "rx-retry-delay"
95 #define CMD_LINE_OPT_RX_RETRY_NUM "rx-retry-num"
96 #define CMD_LINE_OPT_STATS "stats"
97 #define CMD_LINE_OPT_DEV_BASENAME "dev-basename"
99 /* mask of enabled ports */
100 static uint32_t enabled_port_mask
;
102 /*Number of switching cores enabled*/
103 static uint32_t nb_switching_cores
;
105 /* number of devices/queues to support*/
106 uint16_t nb_devices
= 2;
108 /* max ring descriptor, ixgbe, i40e, e1000 all are 4096. */
109 #define MAX_RING_DESC 4096
112 struct rte_mempool
*pool
;
113 struct rte_ring
*ring
;
115 } vpool_array
[MAX_QUEUES
+MAX_QUEUES
];
117 /* UDP tunneling port */
118 uint16_t udp_port
= 4789;
120 /* enable/disable inner TX checksum */
121 uint8_t tx_checksum
= 0;
123 /* TCP segment size */
124 uint16_t tso_segsz
= 0;
126 /* enable/disable decapsulation */
127 uint8_t rx_decap
= 1;
129 /* enable/disable encapsulation */
130 uint8_t tx_encap
= 1;
132 /* RX filter type for tunneling packet */
133 uint8_t filter_idx
= 1;
135 /* overlay packet operation */
136 struct ol_switch_ops overlay_options
= {
137 .port_configure
= vxlan_port_init
,
138 .tunnel_setup
= vxlan_link
,
139 .tunnel_destroy
= vxlan_unlink
,
140 .tx_handle
= vxlan_tx_pkts
,
141 .rx_handle
= vxlan_rx_pkts
,
142 .param_handle
= NULL
,
146 uint32_t enable_stats
= 0;
147 /* Enable retries on RX. */
148 static uint32_t enable_retry
= 1;
149 /* Specify timeout (in useconds) between retries on RX. */
150 static uint32_t burst_rx_delay_time
= BURST_RX_WAIT_US
;
151 /* Specify the number of retries on RX. */
152 static uint32_t burst_rx_retry_num
= BURST_RX_RETRIES
;
154 /* Character device basename. Can be set by user. */
155 static char dev_basename
[MAX_BASENAME_SZ
] = "vhost-net";
157 static unsigned lcore_ids
[RTE_MAX_LCORE
];
158 uint16_t ports
[RTE_MAX_ETHPORTS
];
160 static unsigned nb_ports
; /**< The number of ports specified in command line */
162 /* ethernet addresses of ports */
163 struct ether_addr ports_eth_addr
[RTE_MAX_ETHPORTS
];
165 /* heads for the main used and free linked lists for the data path. */
166 static struct virtio_net_data_ll
*ll_root_used
;
167 static struct virtio_net_data_ll
*ll_root_free
;
170 * Array of data core structures containing information on
171 * individual core linked lists.
173 static struct lcore_info lcore_info
[RTE_MAX_LCORE
];
175 /* Used for queueing bursts of TX packets. */
179 struct rte_mbuf
*m_table
[MAX_PKT_BURST
];
182 /* TX queue for each data core. */
183 struct mbuf_table lcore_tx_queue
[RTE_MAX_LCORE
];
185 struct device_statistics dev_statistics
[MAX_DEVICES
];
188 * Set character device basename.
191 us_vhost_parse_basename(const char *q_arg
)
193 /* parse number string */
194 if (strlen(q_arg
) >= MAX_BASENAME_SZ
)
197 snprintf((char *)&dev_basename
, MAX_BASENAME_SZ
, "%s", q_arg
);
203 * Parse the portmask provided at run time.
206 parse_portmask(const char *portmask
)
211 /* parse hexadecimal string */
212 pm
= strtoul(portmask
, &end
, 16);
213 if ((portmask
[0] == '\0') || (end
== NULL
) || (*end
!= '\0'))
223 * Parse num options at run time.
226 parse_num_opt(const char *q_arg
, uint32_t max_valid_value
)
231 /* parse unsigned int string */
232 num
= strtoul(q_arg
, &end
, 10);
233 if ((q_arg
[0] == '\0') || (end
== NULL
) || (*end
!= '\0'))
236 if (num
> max_valid_value
)
246 tep_termination_usage(const char *prgname
)
248 RTE_LOG(INFO
, VHOST_CONFIG
, "%s [EAL options] -- -p PORTMASK\n"
249 " --udp-port: UDP destination port for VXLAN packet\n"
250 " --nb-devices[1-64]: The number of virtIO device\n"
251 " --tx-checksum [0|1]: inner Tx checksum offload\n"
252 " --tso-segsz [0-N]: TCP segment size\n"
253 " --decap [0|1]: tunneling packet decapsulation\n"
254 " --encap [0|1]: tunneling packet encapsulation\n"
255 " --filter-type[1-3]: filter type for tunneling packet\n"
256 " 1: Inner MAC and tenent ID\n"
257 " 2: Inner MAC and VLAN, and tenent ID\n"
258 " 3: Outer MAC, Inner MAC and tenent ID\n"
259 " -p PORTMASK: Set mask for ports to be used by application\n"
260 " --rx-retry [0|1]: disable/enable(default) retries on rx."
261 " Enable retry if destintation queue is full\n"
262 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX."
263 " This makes effect only if retries on rx enabled\n"
264 " --rx-retry-num [0-N]: the number of retries on rx."
265 " This makes effect only if retries on rx enabled\n"
266 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
267 " --dev-basename: The basename to be used for the character device.\n",
272 * Parse the arguments given in the command line of the application.
275 tep_termination_parse_args(int argc
, char **argv
)
280 const char *prgname
= argv
[0];
281 static struct option long_option
[] = {
282 {CMD_LINE_OPT_NB_DEVICES
, required_argument
, NULL
, 0},
283 {CMD_LINE_OPT_UDP_PORT
, required_argument
, NULL
, 0},
284 {CMD_LINE_OPT_TX_CHECKSUM
, required_argument
, NULL
, 0},
285 {CMD_LINE_OPT_TSO_SEGSZ
, required_argument
, NULL
, 0},
286 {CMD_LINE_OPT_DECAP
, required_argument
, NULL
, 0},
287 {CMD_LINE_OPT_ENCAP
, required_argument
, NULL
, 0},
288 {CMD_LINE_OPT_FILTER_TYPE
, required_argument
, NULL
, 0},
289 {CMD_LINE_OPT_RX_RETRY
, required_argument
, NULL
, 0},
290 {CMD_LINE_OPT_RX_RETRY_DELAY
, required_argument
, NULL
, 0},
291 {CMD_LINE_OPT_RX_RETRY_NUM
, required_argument
, NULL
, 0},
292 {CMD_LINE_OPT_STATS
, required_argument
, NULL
, 0},
293 {CMD_LINE_OPT_DEV_BASENAME
, required_argument
, NULL
, 0},
297 /* Parse command line */
298 while ((opt
= getopt_long(argc
, argv
, "p:",
299 long_option
, &option_index
)) != EOF
) {
303 enabled_port_mask
= parse_portmask(optarg
);
304 if (enabled_port_mask
== 0) {
305 RTE_LOG(INFO
, VHOST_CONFIG
,
306 "Invalid portmask\n");
307 tep_termination_usage(prgname
);
312 if (!strncmp(long_option
[option_index
].name
,
313 CMD_LINE_OPT_NB_DEVICES
,
314 sizeof(CMD_LINE_OPT_NB_DEVICES
))) {
315 ret
= parse_num_opt(optarg
, MAX_DEVICES
);
317 RTE_LOG(INFO
, VHOST_CONFIG
,
318 "Invalid argument for nb-devices [0-%d]\n",
320 tep_termination_usage(prgname
);
326 /* Enable/disable retries on RX. */
327 if (!strncmp(long_option
[option_index
].name
,
328 CMD_LINE_OPT_RX_RETRY
,
329 sizeof(CMD_LINE_OPT_RX_RETRY
))) {
330 ret
= parse_num_opt(optarg
, 1);
332 RTE_LOG(INFO
, VHOST_CONFIG
,
333 "Invalid argument for rx-retry [0|1]\n");
334 tep_termination_usage(prgname
);
340 if (!strncmp(long_option
[option_index
].name
,
341 CMD_LINE_OPT_TSO_SEGSZ
,
342 sizeof(CMD_LINE_OPT_TSO_SEGSZ
))) {
343 ret
= parse_num_opt(optarg
, INT16_MAX
);
345 RTE_LOG(INFO
, VHOST_CONFIG
,
346 "Invalid argument for TCP segment size [0-N]\n");
347 tep_termination_usage(prgname
);
353 if (!strncmp(long_option
[option_index
].name
,
354 CMD_LINE_OPT_UDP_PORT
,
355 sizeof(CMD_LINE_OPT_UDP_PORT
))) {
356 ret
= parse_num_opt(optarg
, INT16_MAX
);
358 RTE_LOG(INFO
, VHOST_CONFIG
,
359 "Invalid argument for UDP port [0-N]\n");
360 tep_termination_usage(prgname
);
366 /* Specify the retries delay time (in useconds) on RX.*/
367 if (!strncmp(long_option
[option_index
].name
,
368 CMD_LINE_OPT_RX_RETRY_DELAY
,
369 sizeof(CMD_LINE_OPT_RX_RETRY_DELAY
))) {
370 ret
= parse_num_opt(optarg
, INT32_MAX
);
372 RTE_LOG(INFO
, VHOST_CONFIG
,
373 "Invalid argument for rx-retry-delay [0-N]\n");
374 tep_termination_usage(prgname
);
377 burst_rx_delay_time
= ret
;
380 /* Specify the retries number on RX. */
381 if (!strncmp(long_option
[option_index
].name
,
382 CMD_LINE_OPT_RX_RETRY_NUM
,
383 sizeof(CMD_LINE_OPT_RX_RETRY_NUM
))) {
384 ret
= parse_num_opt(optarg
, INT32_MAX
);
386 RTE_LOG(INFO
, VHOST_CONFIG
,
387 "Invalid argument for rx-retry-num [0-N]\n");
388 tep_termination_usage(prgname
);
391 burst_rx_retry_num
= ret
;
394 if (!strncmp(long_option
[option_index
].name
,
395 CMD_LINE_OPT_TX_CHECKSUM
,
396 sizeof(CMD_LINE_OPT_TX_CHECKSUM
))) {
397 ret
= parse_num_opt(optarg
, 1);
399 RTE_LOG(INFO
, VHOST_CONFIG
,
400 "Invalid argument for tx-checksum [0|1]\n");
401 tep_termination_usage(prgname
);
407 if (!strncmp(long_option
[option_index
].name
,
408 CMD_LINE_OPT_FILTER_TYPE
,
409 sizeof(CMD_LINE_OPT_FILTER_TYPE
))) {
410 ret
= parse_num_opt(optarg
, 3);
411 if ((ret
== -1) || (ret
== 0)) {
412 RTE_LOG(INFO
, VHOST_CONFIG
,
413 "Invalid argument for filter type [1-3]\n");
414 tep_termination_usage(prgname
);
417 filter_idx
= ret
- 1;
420 /* Enable/disable encapsulation on RX. */
421 if (!strncmp(long_option
[option_index
].name
,
423 sizeof(CMD_LINE_OPT_DECAP
))) {
424 ret
= parse_num_opt(optarg
, 1);
426 RTE_LOG(INFO
, VHOST_CONFIG
,
427 "Invalid argument for decap [0|1]\n");
428 tep_termination_usage(prgname
);
434 /* Enable/disable encapsulation on TX. */
435 if (!strncmp(long_option
[option_index
].name
,
437 sizeof(CMD_LINE_OPT_ENCAP
))) {
438 ret
= parse_num_opt(optarg
, 1);
440 RTE_LOG(INFO
, VHOST_CONFIG
,
441 "Invalid argument for encap [0|1]\n");
442 tep_termination_usage(prgname
);
448 /* Enable/disable stats. */
449 if (!strncmp(long_option
[option_index
].name
,
451 sizeof(CMD_LINE_OPT_STATS
))) {
452 ret
= parse_num_opt(optarg
, INT32_MAX
);
454 RTE_LOG(INFO
, VHOST_CONFIG
,
455 "Invalid argument for stats [0..N]\n");
456 tep_termination_usage(prgname
);
462 /* Set character device basename. */
463 if (!strncmp(long_option
[option_index
].name
,
464 CMD_LINE_OPT_DEV_BASENAME
,
465 sizeof(CMD_LINE_OPT_DEV_BASENAME
))) {
466 if (us_vhost_parse_basename(optarg
) == -1) {
467 RTE_LOG(INFO
, VHOST_CONFIG
,
468 "Invalid argument for character "
469 "device basename (Max %d characters)\n",
471 tep_termination_usage(prgname
);
478 /* Invalid option - print options. */
480 tep_termination_usage(prgname
);
485 for (i
= 0; i
< RTE_MAX_ETHPORTS
; i
++) {
486 if (enabled_port_mask
& (1 << i
))
487 ports
[nb_ports
++] = (uint8_t)i
;
490 if ((nb_ports
== 0) || (nb_ports
> MAX_SUP_PORTS
)) {
491 RTE_LOG(INFO
, VHOST_PORT
, "Current enabled port number is %u,"
492 "but only %u port can be enabled\n", nb_ports
,
501 * Update the global var NB_PORTS and array PORTS
502 * according to system ports number and return valid ports number
505 check_ports_num(unsigned max_nb_ports
)
507 unsigned valid_nb_ports
= nb_ports
;
510 if (nb_ports
> max_nb_ports
) {
511 RTE_LOG(INFO
, VHOST_PORT
, "\nSpecified port number(%u) "
512 " exceeds total system port number(%u)\n",
513 nb_ports
, max_nb_ports
);
514 nb_ports
= max_nb_ports
;
517 for (portid
= 0; portid
< nb_ports
; portid
++) {
518 if (!rte_eth_dev_is_valid_port(ports
[portid
])) {
519 RTE_LOG(INFO
, VHOST_PORT
,
520 "\nSpecified port ID(%u) is not valid\n",
522 ports
[portid
] = INVALID_PORT_ID
;
526 return valid_nb_ports
;
530 * This function routes the TX packet to the correct interface. This may be a local device
531 * or the physical port.
533 static __rte_always_inline
void
534 virtio_tx_route(struct vhost_dev
*vdev
, struct rte_mbuf
*m
)
536 struct mbuf_table
*tx_q
;
537 struct rte_mbuf
**m_table
;
538 unsigned len
, ret
= 0;
539 const uint16_t lcore_id
= rte_lcore_id();
541 RTE_LOG_DP(DEBUG
, VHOST_DATA
, "(%d) TX: MAC address is external\n",
544 /* Add packet to the port tx queue */
545 tx_q
= &lcore_tx_queue
[lcore_id
];
548 tx_q
->m_table
[len
] = m
;
551 dev_statistics
[vdev
->vid
].tx_total
++;
552 dev_statistics
[vdev
->vid
].tx
++;
555 if (unlikely(len
== MAX_PKT_BURST
)) {
556 m_table
= (struct rte_mbuf
**)tx_q
->m_table
;
557 ret
= overlay_options
.tx_handle(ports
[0],
558 (uint16_t)tx_q
->txq_id
, m_table
,
559 (uint16_t)tx_q
->len
);
561 /* Free any buffers not handled by TX and update
564 if (unlikely(ret
< len
)) {
566 rte_pktmbuf_free(m_table
[ret
]);
567 } while (++ret
< len
);
578 * This function is called by each data core. It handles all
579 * RX/TX registered with the core. For TX the specific lcore
580 * linked list is used. For RX, MAC addresses are compared
581 * with all devices in the main linked list.
584 switch_worker(__rte_unused
void *arg
)
586 struct rte_mempool
*mbuf_pool
= arg
;
587 struct vhost_dev
*vdev
= NULL
;
588 struct rte_mbuf
*pkts_burst
[MAX_PKT_BURST
];
589 struct virtio_net_data_ll
*dev_ll
;
590 struct mbuf_table
*tx_q
;
591 volatile struct lcore_ll_info
*lcore_ll
;
592 const uint64_t drain_tsc
= (rte_get_tsc_hz() + US_PER_S
- 1)
593 / US_PER_S
* BURST_TX_DRAIN_US
;
594 uint64_t prev_tsc
, diff_tsc
, cur_tsc
, ret_count
= 0;
596 const uint16_t lcore_id
= rte_lcore_id();
597 const uint16_t num_cores
= (uint16_t)rte_lcore_count();
598 uint16_t rx_count
= 0;
602 RTE_LOG(INFO
, VHOST_DATA
, "Procesing on Core %u started\n", lcore_id
);
603 lcore_ll
= lcore_info
[lcore_id
].lcore_ll
;
606 tx_q
= &lcore_tx_queue
[lcore_id
];
607 for (i
= 0; i
< num_cores
; i
++) {
608 if (lcore_ids
[i
] == lcore_id
) {
615 cur_tsc
= rte_rdtsc();
617 * TX burst queue drain
619 diff_tsc
= cur_tsc
- prev_tsc
;
620 if (unlikely(diff_tsc
> drain_tsc
)) {
623 RTE_LOG_DP(DEBUG
, VHOST_DATA
, "TX queue drained after "
624 "timeout with burst size %u\n",
626 ret
= overlay_options
.tx_handle(ports
[0],
627 (uint16_t)tx_q
->txq_id
,
628 (struct rte_mbuf
**)tx_q
->m_table
,
629 (uint16_t)tx_q
->len
);
630 if (unlikely(ret
< tx_q
->len
)) {
632 rte_pktmbuf_free(tx_q
->m_table
[ret
]);
633 } while (++ret
< tx_q
->len
);
643 rte_prefetch0(lcore_ll
->ll_root_used
);
646 * Inform the configuration core that we have exited
647 * the linked list and that no devices are
648 * in use if requested.
650 if (lcore_ll
->dev_removal_flag
== REQUEST_DEV_REMOVAL
)
651 lcore_ll
->dev_removal_flag
= ACK_DEV_REMOVAL
;
656 dev_ll
= lcore_ll
->ll_root_used
;
658 while (dev_ll
!= NULL
) {
661 if (unlikely(vdev
->remove
)) {
662 dev_ll
= dev_ll
->next
;
663 overlay_options
.tunnel_destroy(vdev
);
664 vdev
->ready
= DEVICE_SAFE_REMOVE
;
667 if (likely(vdev
->ready
== DEVICE_RX
)) {
668 /* Handle guest RX */
669 rx_count
= rte_eth_rx_burst(ports
[0],
670 vdev
->rx_q
, pkts_burst
, MAX_PKT_BURST
);
674 * Retry is enabled and the queue is
675 * full then we wait and retry to
676 * avoid packet loss. Here MAX_PKT_BURST
677 * must be less than virtio queue size
679 if (enable_retry
&& unlikely(rx_count
>
680 rte_vhost_avail_entries(vdev
->vid
, VIRTIO_RXQ
))) {
681 for (retry
= 0; retry
< burst_rx_retry_num
;
683 rte_delay_us(burst_rx_delay_time
);
684 if (rx_count
<= rte_vhost_avail_entries(vdev
->vid
, VIRTIO_RXQ
))
689 ret_count
= overlay_options
.rx_handle(vdev
->vid
, pkts_burst
, rx_count
);
692 &dev_statistics
[vdev
->vid
].rx_total_atomic
,
695 &dev_statistics
[vdev
->vid
].rx_atomic
, ret_count
);
697 while (likely(rx_count
)) {
699 rte_pktmbuf_free(pkts_burst
[rx_count
]);
705 if (likely(!vdev
->remove
)) {
707 tx_count
= rte_vhost_dequeue_burst(vdev
->vid
,
708 VIRTIO_TXQ
, mbuf_pool
,
709 pkts_burst
, MAX_PKT_BURST
);
710 /* If this is the first received packet we need to learn the MAC */
711 if (unlikely(vdev
->ready
== DEVICE_MAC_LEARNING
) && tx_count
) {
713 (overlay_options
.tunnel_setup(vdev
, pkts_burst
[0]) == -1)) {
715 rte_pktmbuf_free(pkts_burst
[--tx_count
]);
719 virtio_tx_route(vdev
, pkts_burst
[--tx_count
]);
722 /* move to the next device in the list */
723 dev_ll
= dev_ll
->next
;
731 * Add an entry to a used linked list. A free entry must first be found
732 * in the free linked list using get_data_ll_free_entry();
735 add_data_ll_entry(struct virtio_net_data_ll
**ll_root_addr
,
736 struct virtio_net_data_ll
*ll_dev
)
738 struct virtio_net_data_ll
*ll
= *ll_root_addr
;
740 /* Set next as NULL and use a compiler barrier to avoid reordering. */
742 rte_compiler_barrier();
744 /* If ll == NULL then this is the first device. */
746 /* Increment to the tail of the linked list. */
747 while (ll
->next
!= NULL
)
752 *ll_root_addr
= ll_dev
;
757 * Remove an entry from a used linked list. The entry must then be added to
758 * the free linked list using put_data_ll_free_entry().
761 rm_data_ll_entry(struct virtio_net_data_ll
**ll_root_addr
,
762 struct virtio_net_data_ll
*ll_dev
,
763 struct virtio_net_data_ll
*ll_dev_last
)
765 struct virtio_net_data_ll
*ll
= *ll_root_addr
;
767 if (unlikely((ll
== NULL
) || (ll_dev
== NULL
)))
771 *ll_root_addr
= ll_dev
->next
;
773 if (likely(ll_dev_last
!= NULL
))
774 ll_dev_last
->next
= ll_dev
->next
;
776 RTE_LOG(ERR
, VHOST_CONFIG
,
777 "Remove entry form ll failed.\n");
781 * Find and return an entry from the free linked list.
783 static struct virtio_net_data_ll
*
784 get_data_ll_free_entry(struct virtio_net_data_ll
**ll_root_addr
)
786 struct virtio_net_data_ll
*ll_free
= *ll_root_addr
;
787 struct virtio_net_data_ll
*ll_dev
;
793 *ll_root_addr
= ll_free
->next
;
799 * Place an entry back on to the free linked list.
802 put_data_ll_free_entry(struct virtio_net_data_ll
**ll_root_addr
,
803 struct virtio_net_data_ll
*ll_dev
)
805 struct virtio_net_data_ll
*ll_free
= *ll_root_addr
;
810 ll_dev
->next
= ll_free
;
811 *ll_root_addr
= ll_dev
;
815 * Creates a linked list of a given size.
817 static struct virtio_net_data_ll
*
818 alloc_data_ll(uint32_t size
)
820 struct virtio_net_data_ll
*ll_new
;
823 /* Malloc and then chain the linked list. */
824 ll_new
= malloc(size
* sizeof(struct virtio_net_data_ll
));
825 if (ll_new
== NULL
) {
826 RTE_LOG(ERR
, VHOST_CONFIG
,
827 "Failed to allocate memory for ll_new.\n");
831 for (i
= 0; i
< size
- 1; i
++) {
832 ll_new
[i
].vdev
= NULL
;
833 ll_new
[i
].next
= &ll_new
[i
+1];
835 ll_new
[i
].next
= NULL
;
841 * Create the main linked list along with each individual cores
842 * linked list. A used and a free list are created to manage entries.
849 RTE_LCORE_FOREACH_SLAVE(lcore
) {
850 lcore_info
[lcore
].lcore_ll
=
851 malloc(sizeof(struct lcore_ll_info
));
852 if (lcore_info
[lcore
].lcore_ll
== NULL
) {
853 RTE_LOG(ERR
, VHOST_CONFIG
,
854 "Failed to allocate memory for lcore_ll.\n");
858 lcore_info
[lcore
].lcore_ll
->device_num
= 0;
859 lcore_info
[lcore
].lcore_ll
->dev_removal_flag
= ACK_DEV_REMOVAL
;
860 lcore_info
[lcore
].lcore_ll
->ll_root_used
= NULL
;
861 if (nb_devices
% nb_switching_cores
)
862 lcore_info
[lcore
].lcore_ll
->ll_root_free
=
863 alloc_data_ll((nb_devices
/ nb_switching_cores
)
866 lcore_info
[lcore
].lcore_ll
->ll_root_free
=
867 alloc_data_ll(nb_devices
/ nb_switching_cores
);
870 /* Allocate devices up to a maximum of MAX_DEVICES. */
871 ll_root_free
= alloc_data_ll(MIN((nb_devices
), MAX_DEVICES
));
877 * Remove a device from the specific data core linked list and
878 * from the main linked list. Synchonization occurs through the use
879 * of the lcore dev_removal_flag.
882 destroy_device(int vid
)
884 struct virtio_net_data_ll
*ll_lcore_dev_cur
;
885 struct virtio_net_data_ll
*ll_main_dev_cur
;
886 struct virtio_net_data_ll
*ll_lcore_dev_last
= NULL
;
887 struct virtio_net_data_ll
*ll_main_dev_last
= NULL
;
888 struct vhost_dev
*vdev
= NULL
;
891 ll_main_dev_cur
= ll_root_used
;
892 while (ll_main_dev_cur
!= NULL
) {
893 if (ll_main_dev_cur
->vdev
->vid
== vid
) {
894 vdev
= ll_main_dev_cur
->vdev
;
901 /* set the remove flag. */
903 while (vdev
->ready
!= DEVICE_SAFE_REMOVE
)
906 /* Search for entry to be removed from lcore ll */
907 ll_lcore_dev_cur
= lcore_info
[vdev
->coreid
].lcore_ll
->ll_root_used
;
908 while (ll_lcore_dev_cur
!= NULL
) {
909 if (ll_lcore_dev_cur
->vdev
== vdev
) {
912 ll_lcore_dev_last
= ll_lcore_dev_cur
;
913 ll_lcore_dev_cur
= ll_lcore_dev_cur
->next
;
917 if (ll_lcore_dev_cur
== NULL
) {
918 RTE_LOG(ERR
, VHOST_CONFIG
,
919 "(%d) Failed to find the dev to be destroy.\n", vid
);
923 /* Search for entry to be removed from main ll */
924 ll_main_dev_cur
= ll_root_used
;
925 ll_main_dev_last
= NULL
;
926 while (ll_main_dev_cur
!= NULL
) {
927 if (ll_main_dev_cur
->vdev
== vdev
) {
930 ll_main_dev_last
= ll_main_dev_cur
;
931 ll_main_dev_cur
= ll_main_dev_cur
->next
;
935 /* Remove entries from the lcore and main ll. */
936 rm_data_ll_entry(&lcore_info
[vdev
->coreid
].lcore_ll
->ll_root_used
,
937 ll_lcore_dev_cur
, ll_lcore_dev_last
);
938 rm_data_ll_entry(&ll_root_used
, ll_main_dev_cur
, ll_main_dev_last
);
940 /* Set the dev_removal_flag on each lcore. */
941 RTE_LCORE_FOREACH_SLAVE(lcore
) {
942 lcore_info
[lcore
].lcore_ll
->dev_removal_flag
=
947 * Once each core has set the dev_removal_flag to
948 * ACK_DEV_REMOVAL we can be sure that they can no longer access
949 * the device removed from the linked lists and that the devices
950 * are no longer in use.
952 RTE_LCORE_FOREACH_SLAVE(lcore
) {
953 while (lcore_info
[lcore
].lcore_ll
->dev_removal_flag
958 /* Add the entries back to the lcore and main free ll.*/
959 put_data_ll_free_entry(&lcore_info
[vdev
->coreid
].lcore_ll
->ll_root_free
,
961 put_data_ll_free_entry(&ll_root_free
, ll_main_dev_cur
);
963 /* Decrement number of device on the lcore. */
964 lcore_info
[vdev
->coreid
].lcore_ll
->device_num
--;
966 RTE_LOG(INFO
, VHOST_DATA
, "(%d) Device has been removed "
967 "from data core\n", vid
);
974 * A new device is added to a data core. First the device is added
975 * to the main linked list and the allocated to a specific data core.
980 struct virtio_net_data_ll
*ll_dev
;
981 int lcore
, core_add
= 0;
982 uint32_t device_num_min
= nb_devices
;
983 struct vhost_dev
*vdev
;
985 vdev
= rte_zmalloc("vhost device", sizeof(*vdev
), RTE_CACHE_LINE_SIZE
);
987 RTE_LOG(INFO
, VHOST_DATA
,
988 "(%d) Couldn't allocate memory for vhost dev\n", vid
);
992 /* Add device to main ll */
993 ll_dev
= get_data_ll_free_entry(&ll_root_free
);
994 if (ll_dev
== NULL
) {
995 RTE_LOG(INFO
, VHOST_DATA
, "(%d) No free entry found in"
996 " linked list Device limit of %d devices per core"
997 " has been reached\n", vid
, nb_devices
);
998 if (vdev
->regions_hpa
)
999 rte_free(vdev
->regions_hpa
);
1003 ll_dev
->vdev
= vdev
;
1004 add_data_ll_entry(&ll_root_used
, ll_dev
);
1007 /* reset ready flag */
1008 vdev
->ready
= DEVICE_MAC_LEARNING
;
1011 /* Find a suitable lcore to add the device. */
1012 RTE_LCORE_FOREACH_SLAVE(lcore
) {
1013 if (lcore_info
[lcore
].lcore_ll
->device_num
< device_num_min
) {
1014 device_num_min
= lcore_info
[lcore
].lcore_ll
->device_num
;
1018 /* Add device to lcore ll */
1019 ll_dev
= get_data_ll_free_entry(&lcore_info
[core_add
].lcore_ll
->ll_root_free
);
1020 if (ll_dev
== NULL
) {
1021 RTE_LOG(INFO
, VHOST_DATA
,
1022 "(%d) Failed to add device to data core\n",
1024 vdev
->ready
= DEVICE_SAFE_REMOVE
;
1025 destroy_device(vid
);
1026 rte_free(vdev
->regions_hpa
);
1030 ll_dev
->vdev
= vdev
;
1031 vdev
->coreid
= core_add
;
1033 add_data_ll_entry(&lcore_info
[vdev
->coreid
].lcore_ll
->ll_root_used
,
1036 /* Initialize device stats */
1037 memset(&dev_statistics
[vid
], 0,
1038 sizeof(struct device_statistics
));
1040 /* Disable notifications. */
1041 rte_vhost_enable_guest_notification(vid
, VIRTIO_RXQ
, 0);
1042 rte_vhost_enable_guest_notification(vid
, VIRTIO_TXQ
, 0);
1043 lcore_info
[vdev
->coreid
].lcore_ll
->device_num
++;
1045 RTE_LOG(INFO
, VHOST_DATA
, "(%d) Device has been added to data core %d\n",
1052 * These callback allow devices to be added to the data core when configuration
1053 * has been fully complete.
1055 static const struct vhost_device_ops virtio_net_device_ops
= {
1056 .new_device
= new_device
,
1057 .destroy_device
= destroy_device
,
1061 * This is a thread will wake up after a period to print stats if the user has
1065 print_stats(__rte_unused
void *arg
)
1067 struct virtio_net_data_ll
*dev_ll
;
1068 uint64_t tx_dropped
, rx_dropped
;
1069 uint64_t tx
, tx_total
, rx
, rx_total
, rx_ip_csum
, rx_l4_csum
;
1071 const char clr
[] = { 27, '[', '2', 'J', '\0' };
1072 const char top_left
[] = { 27, '[', '1', ';', '1', 'H', '\0' };
1075 sleep(enable_stats
);
1077 /* Clear screen and move to top left */
1078 printf("%s%s", clr
, top_left
);
1080 printf("\nDevice statistics ================================");
1082 dev_ll
= ll_root_used
;
1083 while (dev_ll
!= NULL
) {
1084 vid
= dev_ll
->vdev
->vid
;
1085 tx_total
= dev_statistics
[vid
].tx_total
;
1086 tx
= dev_statistics
[vid
].tx
;
1087 tx_dropped
= tx_total
- tx
;
1089 rx_total
= rte_atomic64_read(
1090 &dev_statistics
[vid
].rx_total_atomic
);
1091 rx
= rte_atomic64_read(
1092 &dev_statistics
[vid
].rx_atomic
);
1093 rx_dropped
= rx_total
- rx
;
1094 rx_ip_csum
= rte_atomic64_read(
1095 &dev_statistics
[vid
].rx_bad_ip_csum
);
1096 rx_l4_csum
= rte_atomic64_read(
1097 &dev_statistics
[vid
].rx_bad_l4_csum
);
1099 printf("\nStatistics for device %d ----------"
1100 "\nTX total: %"PRIu64
""
1101 "\nTX dropped: %"PRIu64
""
1102 "\nTX successful: %"PRIu64
""
1103 "\nRX total: %"PRIu64
""
1104 "\nRX bad IP csum: %"PRIu64
""
1105 "\nRX bad L4 csum: %"PRIu64
""
1106 "\nRX dropped: %"PRIu64
""
1107 "\nRX successful: %"PRIu64
"",
1118 dev_ll
= dev_ll
->next
;
1120 printf("\n================================================\n");
1127 * Main function, does initialisation and calls the per-lcore functions.
1130 main(int argc
, char *argv
[])
1132 struct rte_mempool
*mbuf_pool
= NULL
;
1133 unsigned lcore_id
, core_id
= 0;
1134 unsigned nb_ports
, valid_nb_ports
;
1138 static pthread_t tid
;
1141 ret
= rte_eal_init(argc
, argv
);
1143 rte_exit(EXIT_FAILURE
, "Error with EAL initialization\n");
1147 /* parse app arguments */
1148 ret
= tep_termination_parse_args(argc
, argv
);
1150 rte_exit(EXIT_FAILURE
, "Invalid argument\n");
1152 for (lcore_id
= 0; lcore_id
< RTE_MAX_LCORE
; lcore_id
++)
1153 if (rte_lcore_is_enabled(lcore_id
))
1154 lcore_ids
[core_id
++] = lcore_id
;
1156 /* set the number of swithcing cores available */
1157 nb_switching_cores
= rte_lcore_count()-1;
1159 /* Get the number of physical ports. */
1160 nb_ports
= rte_eth_dev_count_avail();
1163 * Update the global var NB_PORTS and global array PORTS
1164 * and get value of var VALID_NB_PORTS according to system ports number
1166 valid_nb_ports
= check_ports_num(nb_ports
);
1168 if ((valid_nb_ports
== 0) || (valid_nb_ports
> MAX_SUP_PORTS
)) {
1169 rte_exit(EXIT_FAILURE
, "Current enabled port number is %u,"
1170 "but only %u port can be enabled\n", nb_ports
,
1173 /* Create the mbuf pool. */
1174 mbuf_pool
= rte_pktmbuf_pool_create(
1176 NUM_MBUFS_PER_PORT
* valid_nb_ports
,
1181 if (mbuf_pool
== NULL
)
1182 rte_exit(EXIT_FAILURE
, "Cannot create mbuf pool\n");
1184 for (queue_id
= 0; queue_id
< MAX_QUEUES
+ 1; queue_id
++)
1185 vpool_array
[queue_id
].pool
= mbuf_pool
;
1187 /* initialize all ports */
1188 RTE_ETH_FOREACH_DEV(portid
) {
1189 /* skip ports that are not enabled */
1190 if ((enabled_port_mask
& (1 << portid
)) == 0) {
1191 RTE_LOG(INFO
, VHOST_PORT
,
1192 "Skipping disabled port %d\n", portid
);
1195 if (overlay_options
.port_configure(portid
, mbuf_pool
) != 0)
1196 rte_exit(EXIT_FAILURE
,
1197 "Cannot initialize network ports\n");
1200 /* Initialise all linked lists. */
1201 if (init_data_ll() == -1)
1202 rte_exit(EXIT_FAILURE
, "Failed to initialize linked list\n");
1204 /* Initialize device stats */
1205 memset(&dev_statistics
, 0, sizeof(dev_statistics
));
1207 /* Enable stats if the user option is set. */
1209 ret
= rte_ctrl_thread_create(&tid
, "print-stats", NULL
,
1212 rte_exit(EXIT_FAILURE
, "Cannot create print-stats thread\n");
1215 /* Launch all data cores. */
1216 RTE_LCORE_FOREACH_SLAVE(lcore_id
) {
1217 rte_eal_remote_launch(switch_worker
,
1218 mbuf_pool
, lcore_id
);
1221 ret
= rte_vhost_driver_register((char *)&dev_basename
, 0);
1223 rte_exit(EXIT_FAILURE
, "failed to register vhost driver.\n");
1225 rte_vhost_driver_disable_features(dev_basename
,
1226 1ULL << VIRTIO_NET_F_MRG_RXBUF
);
1228 ret
= rte_vhost_driver_callback_register(dev_basename
,
1229 &virtio_net_device_ops
);
1231 rte_exit(EXIT_FAILURE
,
1232 "failed to register vhost driver callbacks.\n");
1235 if (rte_vhost_driver_start(dev_basename
) < 0) {
1236 rte_exit(EXIT_FAILURE
,
1237 "failed to start vhost driver.\n");
1240 RTE_LCORE_FOREACH_SLAVE(lcore_id
)
1241 rte_eal_wait_lcore(lcore_id
);