2 * Copyright (c) 2014, 2015 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
30 #include <sys/types.h>
34 #include "dp-packet.h"
35 #include "dpif-netdev.h"
37 #include "netdev-dpdk.h"
38 #include "netdev-provider.h"
39 #include "netdev-vport.h"
41 #include "ofp-print.h"
43 #include "ovs-thread.h"
48 #include "unaligned.h"
51 #include "openvswitch/vlog.h"
53 #include "rte_config.h"
55 #include "rte_virtio_net.h"
57 VLOG_DEFINE_THIS_MODULE(dpdk
);
58 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
60 #define DPDK_PORT_WATCHDOG_INTERVAL 5
62 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
63 #define OVS_VPORT_DPDK "ovs_dpdk"
66 * need to reserve tons of extra space in the mbufs so we can align the
67 * DMA addresses to 4KB.
68 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
69 * performance for standard Ethernet MTU.
71 #define MTU_TO_MAX_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
72 #define MBUF_SIZE_MTU(mtu) (MTU_TO_MAX_LEN(mtu) \
73 + sizeof(struct dp_packet) \
74 + RTE_PKTMBUF_HEADROOM)
75 #define MBUF_SIZE_DRIVER (2048 \
76 + sizeof (struct rte_mbuf) \
77 + RTE_PKTMBUF_HEADROOM)
78 #define MBUF_SIZE(mtu) MAX(MBUF_SIZE_MTU(mtu), MBUF_SIZE_DRIVER)
80 /* Max and min number of packets in the mempool. OVS tries to allocate a
81 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
82 * enough hugepages) we keep halving the number until the allocation succeeds
83 * or we reach MIN_NB_MBUF */
85 #define MAX_NB_MBUF (4096 * 64)
86 #define MIN_NB_MBUF (4096 * 4)
87 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
89 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
90 BUILD_ASSERT_DECL(MAX_NB_MBUF
% ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
) == 0);
92 /* The smallest possible NB_MBUF that we're going to try should be a multiple
93 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
94 BUILD_ASSERT_DECL((MAX_NB_MBUF
/ ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
))
99 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
100 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
102 static char *cuse_dev_name
= NULL
; /* Character device cuse_dev_name. */
103 static char *vhost_sock_dir
= NULL
; /* Location of vhost-user sockets */
106 * Maximum amount of time in micro seconds to try and enqueue to vhost.
108 #define VHOST_ENQ_RETRY_USECS 100
110 static const struct rte_eth_conf port_conf
= {
112 .mq_mode
= ETH_MQ_RX_RSS
,
114 .header_split
= 0, /* Header Split disabled */
115 .hw_ip_checksum
= 0, /* IP checksum offload disabled */
116 .hw_vlan_filter
= 0, /* VLAN filtering disabled */
117 .jumbo_frame
= 0, /* Jumbo Frame Support disabled */
123 .rss_hf
= ETH_RSS_IP
| ETH_RSS_UDP
| ETH_RSS_TCP
,
127 .mq_mode
= ETH_MQ_TX_NONE
,
131 enum { MAX_TX_QUEUE_LEN
= 384 };
132 enum { DPDK_RING_SIZE
= 256 };
133 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE
));
134 enum { DRAIN_TSC
= 200000ULL };
141 static int rte_eal_init_ret
= ENODEV
;
143 static struct ovs_mutex dpdk_mutex
= OVS_MUTEX_INITIALIZER
;
145 /* Contains all 'struct dpdk_dev's. */
146 static struct ovs_list dpdk_list
OVS_GUARDED_BY(dpdk_mutex
)
147 = OVS_LIST_INITIALIZER(&dpdk_list
);
149 static struct ovs_list dpdk_mp_list
OVS_GUARDED_BY(dpdk_mutex
)
150 = OVS_LIST_INITIALIZER(&dpdk_mp_list
);
152 /* This mutex must be used by non pmd threads when allocating or freeing
153 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
154 * use mempools, a non pmd thread should hold this mutex while calling them */
155 static struct ovs_mutex nonpmd_mempool_mutex
= OVS_MUTEX_INITIALIZER
;
158 struct rte_mempool
*mp
;
162 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
165 /* There should be one 'struct dpdk_tx_queue' created for
167 struct dpdk_tx_queue
{
168 bool flush_tx
; /* Set to true to flush queue everytime */
169 /* pkts are queued. */
171 rte_spinlock_t tx_lock
; /* Protects the members and the NIC queue
172 * from concurrent access. It is used only
173 * if the queue is shared among different
174 * pmd threads (see 'txq_needs_locking'). */
176 struct rte_mbuf
*burst_pkts
[MAX_TX_QUEUE_LEN
];
179 /* dpdk has no way to remove dpdk ring ethernet devices
180 so we have to keep them around once they've been created
183 static struct ovs_list dpdk_ring_list
OVS_GUARDED_BY(dpdk_mutex
)
184 = OVS_LIST_INITIALIZER(&dpdk_ring_list
);
187 /* For the client rings */
188 struct rte_ring
*cring_tx
;
189 struct rte_ring
*cring_rx
;
190 int user_port_id
; /* User given port no, parsed from port name */
191 int eth_port_id
; /* ethernet device port id */
192 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
199 enum dpdk_dev_type type
;
201 struct dpdk_tx_queue
*tx_q
;
203 struct ovs_mutex mutex
OVS_ACQ_AFTER(dpdk_mutex
);
205 struct dpdk_mp
*dpdk_mp
;
209 struct netdev_stats stats
;
211 rte_spinlock_t stats_lock
;
213 struct eth_addr hwaddr
;
214 enum netdev_flags flags
;
216 struct rte_eth_link link
;
219 /* The user might request more txqs than the NIC has. We remap those
220 * ('up.n_txq') on these ('real_n_txq').
221 * If the numbers match, 'txq_needs_locking' is false, otherwise it is
222 * true and we will take a spinlock on transmission */
224 bool txq_needs_locking
;
226 /* Spinlock for vhost transmission. Other DPDK devices use spinlocks in
228 rte_spinlock_t vhost_tx_lock
;
230 /* virtio-net structure for vhost device */
231 OVSRCU_TYPE(struct virtio_net
*) virtio_dev
;
233 /* Identifier used to distinguish vhost devices from each other */
234 char vhost_id
[PATH_MAX
];
237 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
240 struct netdev_rxq_dpdk
{
241 struct netdev_rxq up
;
245 static bool thread_is_pmd(void);
247 static int netdev_dpdk_construct(struct netdev
*);
249 struct virtio_net
* netdev_dpdk_get_virtio(const struct netdev_dpdk
*dev
);
252 is_dpdk_class(const struct netdev_class
*class)
254 return class->construct
== netdev_dpdk_construct
;
257 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
258 * for all other segments data, bss and text. */
261 dpdk_rte_mzalloc(size_t sz
)
265 ptr
= rte_zmalloc(OVS_VPORT_DPDK
, sz
, OVS_CACHE_LINE_SIZE
);
272 /* XXX this function should be called only by pmd threads (or by non pmd
273 * threads holding the nonpmd_mempool_mutex) */
275 free_dpdk_buf(struct dp_packet
*p
)
277 struct rte_mbuf
*pkt
= (struct rte_mbuf
*) p
;
279 rte_pktmbuf_free_seg(pkt
);
283 __rte_pktmbuf_init(struct rte_mempool
*mp
,
284 void *opaque_arg OVS_UNUSED
,
286 unsigned i OVS_UNUSED
)
288 struct rte_mbuf
*m
= _m
;
289 uint32_t buf_len
= mp
->elt_size
- sizeof(struct dp_packet
);
291 RTE_MBUF_ASSERT(mp
->elt_size
>= sizeof(struct dp_packet
));
293 memset(m
, 0, mp
->elt_size
);
295 /* start of buffer is just after mbuf structure */
296 m
->buf_addr
= (char *)m
+ sizeof(struct dp_packet
);
297 m
->buf_physaddr
= rte_mempool_virt2phy(mp
, m
) +
298 sizeof(struct dp_packet
);
299 m
->buf_len
= (uint16_t)buf_len
;
301 /* keep some headroom between start of buffer and data */
302 m
->data_off
= RTE_MIN(RTE_PKTMBUF_HEADROOM
, m
->buf_len
);
304 /* init some constant fields */
311 ovs_rte_pktmbuf_init(struct rte_mempool
*mp
,
312 void *opaque_arg OVS_UNUSED
,
314 unsigned i OVS_UNUSED
)
316 struct rte_mbuf
*m
= _m
;
318 __rte_pktmbuf_init(mp
, opaque_arg
, _m
, i
);
320 dp_packet_init_dpdk((struct dp_packet
*) m
, m
->buf_len
);
323 static struct dpdk_mp
*
324 dpdk_mp_get(int socket_id
, int mtu
) OVS_REQUIRES(dpdk_mutex
)
326 struct dpdk_mp
*dmp
= NULL
;
327 char mp_name
[RTE_MEMPOOL_NAMESIZE
];
330 LIST_FOR_EACH (dmp
, list_node
, &dpdk_mp_list
) {
331 if (dmp
->socket_id
== socket_id
&& dmp
->mtu
== mtu
) {
337 dmp
= dpdk_rte_mzalloc(sizeof *dmp
);
338 dmp
->socket_id
= socket_id
;
342 mp_size
= MAX_NB_MBUF
;
344 if (snprintf(mp_name
, RTE_MEMPOOL_NAMESIZE
, "ovs_mp_%d_%d_%u",
345 dmp
->mtu
, dmp
->socket_id
, mp_size
) < 0) {
349 dmp
->mp
= rte_mempool_create(mp_name
, mp_size
, MBUF_SIZE(mtu
),
351 sizeof(struct rte_pktmbuf_pool_private
),
352 rte_pktmbuf_pool_init
, NULL
,
353 ovs_rte_pktmbuf_init
, NULL
,
355 } while (!dmp
->mp
&& rte_errno
== ENOMEM
&& (mp_size
/= 2) >= MIN_NB_MBUF
);
357 if (dmp
->mp
== NULL
) {
360 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name
, mp_size
);
363 list_push_back(&dpdk_mp_list
, &dmp
->list_node
);
368 dpdk_mp_put(struct dpdk_mp
*dmp
)
376 ovs_assert(dmp
->refcount
>= 0);
379 /* I could not find any API to destroy mp. */
380 if (dmp
->refcount
== 0) {
381 list_delete(dmp
->list_node
);
382 /* destroy mp-pool. */
388 check_link_status(struct netdev_dpdk
*dev
)
390 struct rte_eth_link link
;
392 rte_eth_link_get_nowait(dev
->port_id
, &link
);
394 if (dev
->link
.link_status
!= link
.link_status
) {
395 netdev_change_seq_changed(&dev
->up
);
397 dev
->link_reset_cnt
++;
399 if (dev
->link
.link_status
) {
400 VLOG_DBG_RL(&rl
, "Port %d Link Up - speed %u Mbps - %s",
401 dev
->port_id
, (unsigned)dev
->link
.link_speed
,
402 (dev
->link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
403 ("full-duplex") : ("half-duplex"));
405 VLOG_DBG_RL(&rl
, "Port %d Link Down", dev
->port_id
);
411 dpdk_watchdog(void *dummy OVS_UNUSED
)
413 struct netdev_dpdk
*dev
;
415 pthread_detach(pthread_self());
418 ovs_mutex_lock(&dpdk_mutex
);
419 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
420 ovs_mutex_lock(&dev
->mutex
);
421 check_link_status(dev
);
422 ovs_mutex_unlock(&dev
->mutex
);
424 ovs_mutex_unlock(&dpdk_mutex
);
425 xsleep(DPDK_PORT_WATCHDOG_INTERVAL
);
432 dpdk_eth_dev_queue_setup(struct netdev_dpdk
*dev
, int n_rxq
, int n_txq
)
437 /* A device may report more queues than it makes available (this has
438 * been observed for Intel xl710, which reserves some of them for
439 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
440 * available. When this happens we can retry the configuration
441 * and request less queues */
442 while (n_rxq
&& n_txq
) {
444 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq
, n_txq
);
447 diag
= rte_eth_dev_configure(dev
->port_id
, n_rxq
, n_txq
, &port_conf
);
452 for (i
= 0; i
< n_txq
; i
++) {
453 diag
= rte_eth_tx_queue_setup(dev
->port_id
, i
, NIC_PORT_TX_Q_SIZE
,
454 dev
->socket_id
, NULL
);
456 VLOG_INFO("Interface %s txq(%d) setup error: %s",
457 dev
->up
.name
, i
, rte_strerror(-diag
));
463 /* Retry with less tx queues */
468 for (i
= 0; i
< n_rxq
; i
++) {
469 diag
= rte_eth_rx_queue_setup(dev
->port_id
, i
, NIC_PORT_RX_Q_SIZE
,
470 dev
->socket_id
, NULL
,
473 VLOG_INFO("Interface %s rxq(%d) setup error: %s",
474 dev
->up
.name
, i
, rte_strerror(-diag
));
480 /* Retry with less rx queues */
485 dev
->up
.n_rxq
= n_rxq
;
486 dev
->real_n_txq
= n_txq
;
496 dpdk_eth_dev_init(struct netdev_dpdk
*dev
) OVS_REQUIRES(dpdk_mutex
)
498 struct rte_pktmbuf_pool_private
*mbp_priv
;
499 struct rte_eth_dev_info info
;
500 struct ether_addr eth_addr
;
504 if (dev
->port_id
< 0 || dev
->port_id
>= rte_eth_dev_count()) {
508 rte_eth_dev_info_get(dev
->port_id
, &info
);
510 n_rxq
= MIN(info
.max_rx_queues
, dev
->up
.n_rxq
);
511 n_txq
= MIN(info
.max_tx_queues
, dev
->up
.n_txq
);
513 diag
= dpdk_eth_dev_queue_setup(dev
, n_rxq
, n_txq
);
515 VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
516 dev
->up
.name
, n_rxq
, n_txq
, rte_strerror(-diag
));
520 diag
= rte_eth_dev_start(dev
->port_id
);
522 VLOG_ERR("Interface %s start error: %s", dev
->up
.name
,
523 rte_strerror(-diag
));
527 rte_eth_promiscuous_enable(dev
->port_id
);
528 rte_eth_allmulticast_enable(dev
->port_id
);
530 memset(ð_addr
, 0x0, sizeof(eth_addr
));
531 rte_eth_macaddr_get(dev
->port_id
, ð_addr
);
532 VLOG_INFO_RL(&rl
, "Port %d: "ETH_ADDR_FMT
"",
533 dev
->port_id
, ETH_ADDR_BYTES_ARGS(eth_addr
.addr_bytes
));
535 memcpy(dev
->hwaddr
.ea
, eth_addr
.addr_bytes
, ETH_ADDR_LEN
);
536 rte_eth_link_get_nowait(dev
->port_id
, &dev
->link
);
538 mbp_priv
= rte_mempool_get_priv(dev
->dpdk_mp
->mp
);
539 dev
->buf_size
= mbp_priv
->mbuf_data_room_size
- RTE_PKTMBUF_HEADROOM
;
541 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
545 static struct netdev_dpdk
*
546 netdev_dpdk_cast(const struct netdev
*netdev
)
548 return CONTAINER_OF(netdev
, struct netdev_dpdk
, up
);
551 static struct netdev
*
552 netdev_dpdk_alloc(void)
554 struct netdev_dpdk
*netdev
= dpdk_rte_mzalloc(sizeof *netdev
);
559 netdev_dpdk_alloc_txq(struct netdev_dpdk
*netdev
, unsigned int n_txqs
)
563 netdev
->tx_q
= dpdk_rte_mzalloc(n_txqs
* sizeof *netdev
->tx_q
);
564 for (i
= 0; i
< n_txqs
; i
++) {
565 int numa_id
= ovs_numa_get_numa_id(i
);
567 if (!netdev
->txq_needs_locking
) {
568 /* Each index is considered as a cpu core id, since there should
569 * be one tx queue for each cpu core. If the corresponding core
570 * is not on the same numa node as 'netdev', flags the
572 netdev
->tx_q
[i
].flush_tx
= netdev
->socket_id
== numa_id
;
574 /* Queues are shared among CPUs. Always flush */
575 netdev
->tx_q
[i
].flush_tx
= true;
577 rte_spinlock_init(&netdev
->tx_q
[i
].tx_lock
);
582 netdev_dpdk_init(struct netdev
*netdev_
, unsigned int port_no
,
583 enum dpdk_dev_type type
)
584 OVS_REQUIRES(dpdk_mutex
)
586 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
590 ovs_mutex_init(&netdev
->mutex
);
591 ovs_mutex_lock(&netdev
->mutex
);
593 rte_spinlock_init(&netdev
->stats_lock
);
595 /* If the 'sid' is negative, it means that the kernel fails
596 * to obtain the pci numa info. In that situation, always
598 if (type
== DPDK_DEV_ETH
) {
599 sid
= rte_eth_dev_socket_id(port_no
);
601 sid
= rte_lcore_to_socket_id(rte_get_master_lcore());
604 netdev
->socket_id
= sid
< 0 ? SOCKET0
: sid
;
605 netdev
->port_id
= port_no
;
608 netdev
->mtu
= ETHER_MTU
;
609 netdev
->max_packet_len
= MTU_TO_MAX_LEN(netdev
->mtu
);
611 netdev
->dpdk_mp
= dpdk_mp_get(netdev
->socket_id
, netdev
->mtu
);
612 if (!netdev
->dpdk_mp
) {
617 netdev_
->n_txq
= NR_QUEUE
;
618 netdev_
->n_rxq
= NR_QUEUE
;
619 netdev
->real_n_txq
= NR_QUEUE
;
621 if (type
== DPDK_DEV_ETH
) {
622 netdev_dpdk_alloc_txq(netdev
, NR_QUEUE
);
623 err
= dpdk_eth_dev_init(netdev
);
629 list_push_back(&dpdk_list
, &netdev
->list_node
);
633 rte_free(netdev
->tx_q
);
635 ovs_mutex_unlock(&netdev
->mutex
);
640 dpdk_dev_parse_name(const char dev_name
[], const char prefix
[],
641 unsigned int *port_no
)
645 if (strncmp(dev_name
, prefix
, strlen(prefix
))) {
649 cport
= dev_name
+ strlen(prefix
);
650 *port_no
= strtol(cport
, NULL
, 0); /* string must be null terminated */
655 vhost_construct_helper(struct netdev
*netdev_
) OVS_REQUIRES(dpdk_mutex
)
657 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
659 if (rte_eal_init_ret
) {
660 return rte_eal_init_ret
;
663 rte_spinlock_init(&netdev
->vhost_tx_lock
);
664 return netdev_dpdk_init(netdev_
, -1, DPDK_DEV_VHOST
);
668 netdev_dpdk_vhost_cuse_construct(struct netdev
*netdev_
)
670 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
673 ovs_mutex_lock(&dpdk_mutex
);
674 strncpy(netdev
->vhost_id
, netdev
->up
.name
, sizeof(netdev
->vhost_id
));
675 err
= vhost_construct_helper(netdev_
);
676 ovs_mutex_unlock(&dpdk_mutex
);
681 netdev_dpdk_vhost_user_construct(struct netdev
*netdev_
)
683 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
686 ovs_mutex_lock(&dpdk_mutex
);
687 /* Take the name of the vhost-user port and append it to the location where
688 * the socket is to be created, then register the socket.
690 snprintf(netdev
->vhost_id
, sizeof(netdev
->vhost_id
), "%s/%s",
691 vhost_sock_dir
, netdev_
->name
);
692 err
= rte_vhost_driver_register(netdev
->vhost_id
);
694 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
697 VLOG_INFO("Socket %s created for vhost-user port %s\n", netdev
->vhost_id
, netdev_
->name
);
698 err
= vhost_construct_helper(netdev_
);
699 ovs_mutex_unlock(&dpdk_mutex
);
704 netdev_dpdk_construct(struct netdev
*netdev
)
706 unsigned int port_no
;
709 if (rte_eal_init_ret
) {
710 return rte_eal_init_ret
;
713 /* Names always start with "dpdk" */
714 err
= dpdk_dev_parse_name(netdev
->name
, "dpdk", &port_no
);
719 ovs_mutex_lock(&dpdk_mutex
);
720 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
721 ovs_mutex_unlock(&dpdk_mutex
);
726 netdev_dpdk_destruct(struct netdev
*netdev_
)
728 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
730 ovs_mutex_lock(&dev
->mutex
);
731 rte_eth_dev_stop(dev
->port_id
);
732 ovs_mutex_unlock(&dev
->mutex
);
734 ovs_mutex_lock(&dpdk_mutex
);
736 list_remove(&dev
->list_node
);
737 dpdk_mp_put(dev
->dpdk_mp
);
738 ovs_mutex_unlock(&dpdk_mutex
);
742 netdev_dpdk_vhost_destruct(struct netdev
*netdev_
)
744 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
746 /* Can't remove a port while a guest is attached to it. */
747 if (netdev_dpdk_get_virtio(dev
) != NULL
) {
748 VLOG_ERR("Can not remove port, vhost device still attached");
752 ovs_mutex_lock(&dpdk_mutex
);
753 list_remove(&dev
->list_node
);
754 dpdk_mp_put(dev
->dpdk_mp
);
755 ovs_mutex_unlock(&dpdk_mutex
);
759 netdev_dpdk_dealloc(struct netdev
*netdev_
)
761 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
767 netdev_dpdk_get_config(const struct netdev
*netdev_
, struct smap
*args
)
769 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
771 ovs_mutex_lock(&dev
->mutex
);
773 smap_add_format(args
, "configured_rx_queues", "%d", netdev_
->n_rxq
);
774 smap_add_format(args
, "requested_tx_queues", "%d", netdev_
->n_txq
);
775 smap_add_format(args
, "configured_tx_queues", "%d", dev
->real_n_txq
);
776 ovs_mutex_unlock(&dev
->mutex
);
782 netdev_dpdk_get_numa_id(const struct netdev
*netdev_
)
784 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
786 return netdev
->socket_id
;
789 /* Sets the number of tx queues and rx queues for the dpdk interface.
790 * If the configuration fails, do not try restoring its old configuration
791 * and just returns the error. */
793 netdev_dpdk_set_multiq(struct netdev
*netdev_
, unsigned int n_txq
,
796 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
798 int old_rxq
, old_txq
;
800 if (netdev
->up
.n_txq
== n_txq
&& netdev
->up
.n_rxq
== n_rxq
) {
804 ovs_mutex_lock(&dpdk_mutex
);
805 ovs_mutex_lock(&netdev
->mutex
);
807 rte_eth_dev_stop(netdev
->port_id
);
809 old_txq
= netdev
->up
.n_txq
;
810 old_rxq
= netdev
->up
.n_rxq
;
811 netdev
->up
.n_txq
= n_txq
;
812 netdev
->up
.n_rxq
= n_rxq
;
814 rte_free(netdev
->tx_q
);
815 err
= dpdk_eth_dev_init(netdev
);
816 netdev_dpdk_alloc_txq(netdev
, netdev
->real_n_txq
);
818 /* If there has been an error, it means that the requested queues
819 * have not been created. Restore the old numbers. */
820 netdev
->up
.n_txq
= old_txq
;
821 netdev
->up
.n_rxq
= old_rxq
;
824 netdev
->txq_needs_locking
= netdev
->real_n_txq
!= netdev
->up
.n_txq
;
826 ovs_mutex_unlock(&netdev
->mutex
);
827 ovs_mutex_unlock(&dpdk_mutex
);
833 netdev_dpdk_vhost_set_multiq(struct netdev
*netdev_
, unsigned int n_txq
,
836 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
839 if (netdev
->up
.n_txq
== n_txq
&& netdev
->up
.n_rxq
== n_rxq
) {
843 ovs_mutex_lock(&dpdk_mutex
);
844 ovs_mutex_lock(&netdev
->mutex
);
846 netdev
->up
.n_txq
= n_txq
;
847 netdev
->real_n_txq
= 1;
848 netdev
->up
.n_rxq
= 1;
850 ovs_mutex_unlock(&netdev
->mutex
);
851 ovs_mutex_unlock(&dpdk_mutex
);
856 static struct netdev_rxq
*
857 netdev_dpdk_rxq_alloc(void)
859 struct netdev_rxq_dpdk
*rx
= dpdk_rte_mzalloc(sizeof *rx
);
864 static struct netdev_rxq_dpdk
*
865 netdev_rxq_dpdk_cast(const struct netdev_rxq
*rx
)
867 return CONTAINER_OF(rx
, struct netdev_rxq_dpdk
, up
);
871 netdev_dpdk_rxq_construct(struct netdev_rxq
*rxq_
)
873 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
874 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(rx
->up
.netdev
);
876 ovs_mutex_lock(&netdev
->mutex
);
877 rx
->port_id
= netdev
->port_id
;
878 ovs_mutex_unlock(&netdev
->mutex
);
884 netdev_dpdk_rxq_destruct(struct netdev_rxq
*rxq_ OVS_UNUSED
)
889 netdev_dpdk_rxq_dealloc(struct netdev_rxq
*rxq_
)
891 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
897 dpdk_queue_flush__(struct netdev_dpdk
*dev
, int qid
)
899 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
902 while (nb_tx
!= txq
->count
) {
905 ret
= rte_eth_tx_burst(dev
->port_id
, qid
, txq
->burst_pkts
+ nb_tx
,
914 if (OVS_UNLIKELY(nb_tx
!= txq
->count
)) {
915 /* free buffers, which we couldn't transmit, one at a time (each
916 * packet could come from a different mempool) */
919 for (i
= nb_tx
; i
< txq
->count
; i
++) {
920 rte_pktmbuf_free_seg(txq
->burst_pkts
[i
]);
922 rte_spinlock_lock(&dev
->stats_lock
);
923 dev
->stats
.tx_dropped
+= txq
->count
-nb_tx
;
924 rte_spinlock_unlock(&dev
->stats_lock
);
928 txq
->tsc
= rte_get_timer_cycles();
932 dpdk_queue_flush(struct netdev_dpdk
*dev
, int qid
)
934 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
936 if (txq
->count
== 0) {
939 dpdk_queue_flush__(dev
, qid
);
943 is_vhost_running(struct virtio_net
*dev
)
945 return (dev
!= NULL
&& (dev
->flags
& VIRTIO_DEV_RUNNING
));
949 netdev_dpdk_vhost_update_rx_counters(struct netdev_stats
*stats
,
950 struct dp_packet
**packets
, int count
)
953 struct dp_packet
*packet
;
955 stats
->rx_packets
+= count
;
956 for (i
= 0; i
< count
; i
++) {
959 if (OVS_UNLIKELY(dp_packet_size(packet
) < ETH_HEADER_LEN
)) {
960 /* This only protects the following multicast counting from
961 * too short packets, but it does not stop the packet from
962 * further processing. */
964 stats
->rx_length_errors
++;
968 struct eth_header
*eh
= (struct eth_header
*) dp_packet_data(packet
);
969 if (OVS_UNLIKELY(eth_addr_is_multicast(eh
->eth_dst
))) {
973 stats
->rx_bytes
+= dp_packet_size(packet
);
978 * The receive path for the vhost port is the TX path out from guest.
981 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq
*rxq_
,
982 struct dp_packet
**packets
, int *c
)
984 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
985 struct netdev
*netdev
= rx
->up
.netdev
;
986 struct netdev_dpdk
*vhost_dev
= netdev_dpdk_cast(netdev
);
987 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(vhost_dev
);
991 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev
))) {
995 nb_rx
= rte_vhost_dequeue_burst(virtio_dev
, qid
,
996 vhost_dev
->dpdk_mp
->mp
,
997 (struct rte_mbuf
**)packets
,
1003 rte_spinlock_lock(&vhost_dev
->stats_lock
);
1004 netdev_dpdk_vhost_update_rx_counters(&vhost_dev
->stats
, packets
, nb_rx
);
1005 rte_spinlock_unlock(&vhost_dev
->stats_lock
);
1012 netdev_dpdk_rxq_recv(struct netdev_rxq
*rxq_
, struct dp_packet
**packets
,
1015 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
1016 struct netdev
*netdev
= rx
->up
.netdev
;
1017 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1020 /* There is only one tx queue for this core. Do not flush other
1022 * Do not flush tx queue which is shared among CPUs
1023 * since it is always flushed */
1024 if (rxq_
->queue_id
== rte_lcore_id() &&
1025 OVS_LIKELY(!dev
->txq_needs_locking
)) {
1026 dpdk_queue_flush(dev
, rxq_
->queue_id
);
1029 nb_rx
= rte_eth_rx_burst(rx
->port_id
, rxq_
->queue_id
,
1030 (struct rte_mbuf
**) packets
,
1042 netdev_dpdk_vhost_update_tx_counters(struct netdev_stats
*stats
,
1043 struct dp_packet
**packets
,
1048 int sent
= attempted
- dropped
;
1050 stats
->tx_packets
+= sent
;
1051 stats
->tx_dropped
+= dropped
;
1053 for (i
= 0; i
< sent
; i
++) {
1054 stats
->tx_bytes
+= dp_packet_size(packets
[i
]);
1059 __netdev_dpdk_vhost_send(struct netdev
*netdev
, struct dp_packet
**pkts
,
1060 int cnt
, bool may_steal
)
1062 struct netdev_dpdk
*vhost_dev
= netdev_dpdk_cast(netdev
);
1063 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(vhost_dev
);
1064 struct rte_mbuf
**cur_pkts
= (struct rte_mbuf
**) pkts
;
1065 unsigned int total_pkts
= cnt
;
1068 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev
))) {
1069 rte_spinlock_lock(&vhost_dev
->stats_lock
);
1070 vhost_dev
->stats
.tx_dropped
+= cnt
;
1071 rte_spinlock_unlock(&vhost_dev
->stats_lock
);
1075 /* There is vHost TX single queue, So we need to lock it for TX. */
1076 rte_spinlock_lock(&vhost_dev
->vhost_tx_lock
);
1079 unsigned int tx_pkts
;
1081 tx_pkts
= rte_vhost_enqueue_burst(virtio_dev
, VIRTIO_RXQ
,
1083 if (OVS_LIKELY(tx_pkts
)) {
1084 /* Packets have been sent.*/
1086 /* Prepare for possible next iteration.*/
1087 cur_pkts
= &cur_pkts
[tx_pkts
];
1089 uint64_t timeout
= VHOST_ENQ_RETRY_USECS
* rte_get_timer_hz() / 1E6
;
1090 unsigned int expired
= 0;
1093 start
= rte_get_timer_cycles();
1097 * Unable to enqueue packets to vhost interface.
1098 * Check available entries before retrying.
1100 while (!rte_vring_available_entries(virtio_dev
, VIRTIO_RXQ
)) {
1101 if (OVS_UNLIKELY((rte_get_timer_cycles() - start
) > timeout
)) {
1107 /* break out of main loop. */
1112 rte_spinlock_unlock(&vhost_dev
->vhost_tx_lock
);
1114 rte_spinlock_lock(&vhost_dev
->stats_lock
);
1115 netdev_dpdk_vhost_update_tx_counters(&vhost_dev
->stats
, pkts
, total_pkts
,
1117 rte_spinlock_unlock(&vhost_dev
->stats_lock
);
1123 for (i
= 0; i
< total_pkts
; i
++) {
1124 dp_packet_delete(pkts
[i
]);
1130 dpdk_queue_pkts(struct netdev_dpdk
*dev
, int qid
,
1131 struct rte_mbuf
**pkts
, int cnt
)
1133 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
1139 int freeslots
= MAX_TX_QUEUE_LEN
- txq
->count
;
1140 int tocopy
= MIN(freeslots
, cnt
-i
);
1142 memcpy(&txq
->burst_pkts
[txq
->count
], &pkts
[i
],
1143 tocopy
* sizeof (struct rte_mbuf
*));
1145 txq
->count
+= tocopy
;
1148 if (txq
->count
== MAX_TX_QUEUE_LEN
|| txq
->flush_tx
) {
1149 dpdk_queue_flush__(dev
, qid
);
1151 diff_tsc
= rte_get_timer_cycles() - txq
->tsc
;
1152 if (diff_tsc
>= DRAIN_TSC
) {
1153 dpdk_queue_flush__(dev
, qid
);
1158 /* Tx function. Transmit packets indefinitely */
1160 dpdk_do_tx_copy(struct netdev
*netdev
, int qid
, struct dp_packet
**pkts
,
1162 OVS_NO_THREAD_SAFETY_ANALYSIS
1164 #if !defined(__CHECKER__) && !defined(_WIN32)
1165 const size_t PKT_ARRAY_SIZE
= cnt
;
1167 /* Sparse or MSVC doesn't like variable length array. */
1168 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
1170 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1171 struct rte_mbuf
*mbufs
[PKT_ARRAY_SIZE
];
1176 /* If we are on a non pmd thread we have to use the mempool mutex, because
1177 * every non pmd thread shares the same mempool cache */
1179 if (!thread_is_pmd()) {
1180 ovs_mutex_lock(&nonpmd_mempool_mutex
);
1183 for (i
= 0; i
< cnt
; i
++) {
1184 int size
= dp_packet_size(pkts
[i
]);
1186 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
1187 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
1188 (int)size
, dev
->max_packet_len
);
1194 mbufs
[newcnt
] = rte_pktmbuf_alloc(dev
->dpdk_mp
->mp
);
1196 if (!mbufs
[newcnt
]) {
1201 /* We have to do a copy for now */
1202 memcpy(rte_pktmbuf_mtod(mbufs
[newcnt
], void *), dp_packet_data(pkts
[i
]), size
);
1204 rte_pktmbuf_data_len(mbufs
[newcnt
]) = size
;
1205 rte_pktmbuf_pkt_len(mbufs
[newcnt
]) = size
;
1210 if (OVS_UNLIKELY(dropped
)) {
1211 rte_spinlock_lock(&dev
->stats_lock
);
1212 dev
->stats
.tx_dropped
+= dropped
;
1213 rte_spinlock_unlock(&dev
->stats_lock
);
1216 if (dev
->type
== DPDK_DEV_VHOST
) {
1217 __netdev_dpdk_vhost_send(netdev
, (struct dp_packet
**) mbufs
, newcnt
, true);
1219 dpdk_queue_pkts(dev
, qid
, mbufs
, newcnt
);
1220 dpdk_queue_flush(dev
, qid
);
1223 if (!thread_is_pmd()) {
1224 ovs_mutex_unlock(&nonpmd_mempool_mutex
);
1229 netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid OVS_UNUSED
, struct dp_packet
**pkts
,
1230 int cnt
, bool may_steal
)
1232 if (OVS_UNLIKELY(pkts
[0]->source
!= DPBUF_DPDK
)) {
1235 dpdk_do_tx_copy(netdev
, qid
, pkts
, cnt
);
1237 for (i
= 0; i
< cnt
; i
++) {
1238 dp_packet_delete(pkts
[i
]);
1242 __netdev_dpdk_vhost_send(netdev
, pkts
, cnt
, may_steal
);
1248 netdev_dpdk_send__(struct netdev_dpdk
*dev
, int qid
,
1249 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1253 if (OVS_UNLIKELY(dev
->txq_needs_locking
)) {
1254 qid
= qid
% dev
->real_n_txq
;
1255 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
1258 if (OVS_UNLIKELY(!may_steal
||
1259 pkts
[0]->source
!= DPBUF_DPDK
)) {
1260 struct netdev
*netdev
= &dev
->up
;
1262 dpdk_do_tx_copy(netdev
, qid
, pkts
, cnt
);
1265 for (i
= 0; i
< cnt
; i
++) {
1266 dp_packet_delete(pkts
[i
]);
1270 int next_tx_idx
= 0;
1273 for (i
= 0; i
< cnt
; i
++) {
1274 int size
= dp_packet_size(pkts
[i
]);
1276 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
1277 if (next_tx_idx
!= i
) {
1278 dpdk_queue_pkts(dev
, qid
,
1279 (struct rte_mbuf
**)&pkts
[next_tx_idx
],
1283 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
1284 (int)size
, dev
->max_packet_len
);
1286 dp_packet_delete(pkts
[i
]);
1288 next_tx_idx
= i
+ 1;
1291 if (next_tx_idx
!= cnt
) {
1292 dpdk_queue_pkts(dev
, qid
,
1293 (struct rte_mbuf
**)&pkts
[next_tx_idx
],
1297 if (OVS_UNLIKELY(dropped
)) {
1298 rte_spinlock_lock(&dev
->stats_lock
);
1299 dev
->stats
.tx_dropped
+= dropped
;
1300 rte_spinlock_unlock(&dev
->stats_lock
);
1304 if (OVS_UNLIKELY(dev
->txq_needs_locking
)) {
1305 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
1310 netdev_dpdk_eth_send(struct netdev
*netdev
, int qid
,
1311 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1313 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1315 netdev_dpdk_send__(dev
, qid
, pkts
, cnt
, may_steal
);
1320 netdev_dpdk_set_etheraddr(struct netdev
*netdev
, const struct eth_addr mac
)
1322 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1324 ovs_mutex_lock(&dev
->mutex
);
1325 if (!eth_addr_equals(dev
->hwaddr
, mac
)) {
1327 netdev_change_seq_changed(netdev
);
1329 ovs_mutex_unlock(&dev
->mutex
);
1335 netdev_dpdk_get_etheraddr(const struct netdev
*netdev
, struct eth_addr
*mac
)
1337 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1339 ovs_mutex_lock(&dev
->mutex
);
1341 ovs_mutex_unlock(&dev
->mutex
);
1347 netdev_dpdk_get_mtu(const struct netdev
*netdev
, int *mtup
)
1349 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1351 ovs_mutex_lock(&dev
->mutex
);
1353 ovs_mutex_unlock(&dev
->mutex
);
1359 netdev_dpdk_set_mtu(const struct netdev
*netdev
, int mtu
)
1361 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1363 struct dpdk_mp
*old_mp
;
1366 ovs_mutex_lock(&dpdk_mutex
);
1367 ovs_mutex_lock(&dev
->mutex
);
1368 if (dev
->mtu
== mtu
) {
1373 mp
= dpdk_mp_get(dev
->socket_id
, dev
->mtu
);
1379 rte_eth_dev_stop(dev
->port_id
);
1382 old_mp
= dev
->dpdk_mp
;
1385 dev
->max_packet_len
= MTU_TO_MAX_LEN(dev
->mtu
);
1387 err
= dpdk_eth_dev_init(dev
);
1391 dev
->dpdk_mp
= old_mp
;
1392 dev
->max_packet_len
= MTU_TO_MAX_LEN(dev
->mtu
);
1393 dpdk_eth_dev_init(dev
);
1397 dpdk_mp_put(old_mp
);
1398 netdev_change_seq_changed(netdev
);
1400 ovs_mutex_unlock(&dev
->mutex
);
1401 ovs_mutex_unlock(&dpdk_mutex
);
1406 netdev_dpdk_get_carrier(const struct netdev
*netdev_
, bool *carrier
);
1409 netdev_dpdk_vhost_get_stats(const struct netdev
*netdev
,
1410 struct netdev_stats
*stats
)
1412 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1414 ovs_mutex_lock(&dev
->mutex
);
1415 memset(stats
, 0, sizeof(*stats
));
1416 /* Unsupported Stats */
1417 stats
->collisions
= UINT64_MAX
;
1418 stats
->rx_crc_errors
= UINT64_MAX
;
1419 stats
->rx_fifo_errors
= UINT64_MAX
;
1420 stats
->rx_frame_errors
= UINT64_MAX
;
1421 stats
->rx_missed_errors
= UINT64_MAX
;
1422 stats
->rx_over_errors
= UINT64_MAX
;
1423 stats
->tx_aborted_errors
= UINT64_MAX
;
1424 stats
->tx_carrier_errors
= UINT64_MAX
;
1425 stats
->tx_errors
= UINT64_MAX
;
1426 stats
->tx_fifo_errors
= UINT64_MAX
;
1427 stats
->tx_heartbeat_errors
= UINT64_MAX
;
1428 stats
->tx_window_errors
= UINT64_MAX
;
1429 stats
->rx_dropped
+= UINT64_MAX
;
1431 rte_spinlock_lock(&dev
->stats_lock
);
1432 /* Supported Stats */
1433 stats
->rx_packets
+= dev
->stats
.rx_packets
;
1434 stats
->tx_packets
+= dev
->stats
.tx_packets
;
1435 stats
->tx_dropped
+= dev
->stats
.tx_dropped
;
1436 stats
->multicast
= dev
->stats
.multicast
;
1437 stats
->rx_bytes
= dev
->stats
.rx_bytes
;
1438 stats
->tx_bytes
= dev
->stats
.tx_bytes
;
1439 stats
->rx_errors
= dev
->stats
.rx_errors
;
1440 stats
->rx_length_errors
= dev
->stats
.rx_length_errors
;
1441 rte_spinlock_unlock(&dev
->stats_lock
);
1443 ovs_mutex_unlock(&dev
->mutex
);
1449 netdev_dpdk_get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
1451 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1452 struct rte_eth_stats rte_stats
;
1455 netdev_dpdk_get_carrier(netdev
, &gg
);
1456 ovs_mutex_lock(&dev
->mutex
);
1457 rte_eth_stats_get(dev
->port_id
, &rte_stats
);
1459 memset(stats
, 0, sizeof(*stats
));
1461 stats
->rx_packets
= rte_stats
.ipackets
;
1462 stats
->tx_packets
= rte_stats
.opackets
;
1463 stats
->rx_bytes
= rte_stats
.ibytes
;
1464 stats
->tx_bytes
= rte_stats
.obytes
;
1465 /* DPDK counts imissed as errors, but count them here as dropped instead */
1466 stats
->rx_errors
= rte_stats
.ierrors
- rte_stats
.imissed
;
1467 stats
->tx_errors
= rte_stats
.oerrors
;
1468 stats
->multicast
= rte_stats
.imcasts
;
1470 rte_spinlock_lock(&dev
->stats_lock
);
1471 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
1472 rte_spinlock_unlock(&dev
->stats_lock
);
1474 /* These are the available DPDK counters for packets not received due to
1475 * local resource constraints in DPDK and NIC respectively. */
1476 stats
->rx_dropped
= rte_stats
.rx_nombuf
+ rte_stats
.imissed
;
1477 stats
->collisions
= UINT64_MAX
;
1479 stats
->rx_length_errors
= rte_stats
.ibadlen
;
1480 stats
->rx_over_errors
= UINT64_MAX
;
1481 stats
->rx_crc_errors
= rte_stats
.ibadcrc
;
1482 stats
->rx_frame_errors
= UINT64_MAX
;
1483 stats
->rx_fifo_errors
= UINT64_MAX
;
1484 stats
->rx_missed_errors
= rte_stats
.imissed
;
1486 stats
->tx_aborted_errors
= UINT64_MAX
;
1487 stats
->tx_carrier_errors
= UINT64_MAX
;
1488 stats
->tx_fifo_errors
= UINT64_MAX
;
1489 stats
->tx_heartbeat_errors
= UINT64_MAX
;
1490 stats
->tx_window_errors
= UINT64_MAX
;
1492 ovs_mutex_unlock(&dev
->mutex
);
1498 netdev_dpdk_get_features(const struct netdev
*netdev_
,
1499 enum netdev_features
*current
,
1500 enum netdev_features
*advertised OVS_UNUSED
,
1501 enum netdev_features
*supported OVS_UNUSED
,
1502 enum netdev_features
*peer OVS_UNUSED
)
1504 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1505 struct rte_eth_link link
;
1507 ovs_mutex_lock(&dev
->mutex
);
1509 ovs_mutex_unlock(&dev
->mutex
);
1511 if (link
.link_duplex
== ETH_LINK_AUTONEG_DUPLEX
) {
1512 if (link
.link_speed
== ETH_LINK_SPEED_AUTONEG
) {
1513 *current
= NETDEV_F_AUTONEG
;
1515 } else if (link
.link_duplex
== ETH_LINK_HALF_DUPLEX
) {
1516 if (link
.link_speed
== ETH_LINK_SPEED_10
) {
1517 *current
= NETDEV_F_10MB_HD
;
1519 if (link
.link_speed
== ETH_LINK_SPEED_100
) {
1520 *current
= NETDEV_F_100MB_HD
;
1522 if (link
.link_speed
== ETH_LINK_SPEED_1000
) {
1523 *current
= NETDEV_F_1GB_HD
;
1525 } else if (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) {
1526 if (link
.link_speed
== ETH_LINK_SPEED_10
) {
1527 *current
= NETDEV_F_10MB_FD
;
1529 if (link
.link_speed
== ETH_LINK_SPEED_100
) {
1530 *current
= NETDEV_F_100MB_FD
;
1532 if (link
.link_speed
== ETH_LINK_SPEED_1000
) {
1533 *current
= NETDEV_F_1GB_FD
;
1535 if (link
.link_speed
== ETH_LINK_SPEED_10000
) {
1536 *current
= NETDEV_F_10GB_FD
;
1544 netdev_dpdk_get_ifindex(const struct netdev
*netdev
)
1546 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1549 ovs_mutex_lock(&dev
->mutex
);
1550 ifindex
= dev
->port_id
;
1551 ovs_mutex_unlock(&dev
->mutex
);
1557 netdev_dpdk_get_carrier(const struct netdev
*netdev_
, bool *carrier
)
1559 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1561 ovs_mutex_lock(&dev
->mutex
);
1562 check_link_status(dev
);
1563 *carrier
= dev
->link
.link_status
;
1565 ovs_mutex_unlock(&dev
->mutex
);
1571 netdev_dpdk_vhost_get_carrier(const struct netdev
*netdev_
, bool *carrier
)
1573 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1574 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(dev
);
1576 ovs_mutex_lock(&dev
->mutex
);
1578 if (is_vhost_running(virtio_dev
)) {
1584 ovs_mutex_unlock(&dev
->mutex
);
1589 static long long int
1590 netdev_dpdk_get_carrier_resets(const struct netdev
*netdev_
)
1592 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1593 long long int carrier_resets
;
1595 ovs_mutex_lock(&dev
->mutex
);
1596 carrier_resets
= dev
->link_reset_cnt
;
1597 ovs_mutex_unlock(&dev
->mutex
);
1599 return carrier_resets
;
1603 netdev_dpdk_set_miimon(struct netdev
*netdev_ OVS_UNUSED
,
1604 long long int interval OVS_UNUSED
)
1610 netdev_dpdk_update_flags__(struct netdev_dpdk
*dev
,
1611 enum netdev_flags off
, enum netdev_flags on
,
1612 enum netdev_flags
*old_flagsp
) OVS_REQUIRES(dev
->mutex
)
1616 if ((off
| on
) & ~(NETDEV_UP
| NETDEV_PROMISC
)) {
1620 *old_flagsp
= dev
->flags
;
1624 if (dev
->flags
== *old_flagsp
) {
1628 if (dev
->type
== DPDK_DEV_ETH
) {
1629 if (dev
->flags
& NETDEV_UP
) {
1630 err
= rte_eth_dev_start(dev
->port_id
);
1635 if (dev
->flags
& NETDEV_PROMISC
) {
1636 rte_eth_promiscuous_enable(dev
->port_id
);
1639 if (!(dev
->flags
& NETDEV_UP
)) {
1640 rte_eth_dev_stop(dev
->port_id
);
1648 netdev_dpdk_update_flags(struct netdev
*netdev_
,
1649 enum netdev_flags off
, enum netdev_flags on
,
1650 enum netdev_flags
*old_flagsp
)
1652 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
1655 ovs_mutex_lock(&netdev
->mutex
);
1656 error
= netdev_dpdk_update_flags__(netdev
, off
, on
, old_flagsp
);
1657 ovs_mutex_unlock(&netdev
->mutex
);
1663 netdev_dpdk_get_status(const struct netdev
*netdev_
, struct smap
*args
)
1665 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1666 struct rte_eth_dev_info dev_info
;
1668 if (dev
->port_id
< 0)
1671 ovs_mutex_lock(&dev
->mutex
);
1672 rte_eth_dev_info_get(dev
->port_id
, &dev_info
);
1673 ovs_mutex_unlock(&dev
->mutex
);
1675 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
1677 smap_add_format(args
, "port_no", "%d", dev
->port_id
);
1678 smap_add_format(args
, "numa_id", "%d", rte_eth_dev_socket_id(dev
->port_id
));
1679 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
1680 smap_add_format(args
, "min_rx_bufsize", "%u", dev_info
.min_rx_bufsize
);
1681 smap_add_format(args
, "max_rx_pktlen", "%u", dev_info
.max_rx_pktlen
);
1682 smap_add_format(args
, "max_rx_queues", "%u", dev_info
.max_rx_queues
);
1683 smap_add_format(args
, "max_tx_queues", "%u", dev_info
.max_tx_queues
);
1684 smap_add_format(args
, "max_mac_addrs", "%u", dev_info
.max_mac_addrs
);
1685 smap_add_format(args
, "max_hash_mac_addrs", "%u", dev_info
.max_hash_mac_addrs
);
1686 smap_add_format(args
, "max_vfs", "%u", dev_info
.max_vfs
);
1687 smap_add_format(args
, "max_vmdq_pools", "%u", dev_info
.max_vmdq_pools
);
1689 smap_add_format(args
, "pci-vendor_id", "0x%u", dev_info
.pci_dev
->id
.vendor_id
);
1690 smap_add_format(args
, "pci-device_id", "0x%x", dev_info
.pci_dev
->id
.device_id
);
1696 netdev_dpdk_set_admin_state__(struct netdev_dpdk
*dev
, bool admin_state
)
1697 OVS_REQUIRES(dev
->mutex
)
1699 enum netdev_flags old_flags
;
1702 netdev_dpdk_update_flags__(dev
, 0, NETDEV_UP
, &old_flags
);
1704 netdev_dpdk_update_flags__(dev
, NETDEV_UP
, 0, &old_flags
);
1709 netdev_dpdk_set_admin_state(struct unixctl_conn
*conn
, int argc
,
1710 const char *argv
[], void *aux OVS_UNUSED
)
1714 if (!strcasecmp(argv
[argc
- 1], "up")) {
1716 } else if ( !strcasecmp(argv
[argc
- 1], "down")) {
1719 unixctl_command_reply_error(conn
, "Invalid Admin State");
1724 struct netdev
*netdev
= netdev_from_name(argv
[1]);
1725 if (netdev
&& is_dpdk_class(netdev
->netdev_class
)) {
1726 struct netdev_dpdk
*dpdk_dev
= netdev_dpdk_cast(netdev
);
1728 ovs_mutex_lock(&dpdk_dev
->mutex
);
1729 netdev_dpdk_set_admin_state__(dpdk_dev
, up
);
1730 ovs_mutex_unlock(&dpdk_dev
->mutex
);
1732 netdev_close(netdev
);
1734 unixctl_command_reply_error(conn
, "Not a DPDK Interface");
1735 netdev_close(netdev
);
1739 struct netdev_dpdk
*netdev
;
1741 ovs_mutex_lock(&dpdk_mutex
);
1742 LIST_FOR_EACH (netdev
, list_node
, &dpdk_list
) {
1743 ovs_mutex_lock(&netdev
->mutex
);
1744 netdev_dpdk_set_admin_state__(netdev
, up
);
1745 ovs_mutex_unlock(&netdev
->mutex
);
1747 ovs_mutex_unlock(&dpdk_mutex
);
1749 unixctl_command_reply(conn
, "OK");
1753 * Set virtqueue flags so that we do not receive interrupts.
1756 set_irq_status(struct virtio_net
*dev
)
1758 dev
->virtqueue
[VIRTIO_RXQ
]->used
->flags
= VRING_USED_F_NO_NOTIFY
;
1759 dev
->virtqueue
[VIRTIO_TXQ
]->used
->flags
= VRING_USED_F_NO_NOTIFY
;
1763 * A new virtio-net device is added to a vhost port.
1766 new_device(struct virtio_net
*dev
)
1768 struct netdev_dpdk
*netdev
;
1769 bool exists
= false;
1771 ovs_mutex_lock(&dpdk_mutex
);
1772 /* Add device to the vhost port with the same name as that passed down. */
1773 LIST_FOR_EACH(netdev
, list_node
, &dpdk_list
) {
1774 if (strncmp(dev
->ifname
, netdev
->vhost_id
, IF_NAME_SZ
) == 0) {
1775 ovs_mutex_lock(&netdev
->mutex
);
1776 ovsrcu_set(&netdev
->virtio_dev
, dev
);
1777 ovs_mutex_unlock(&netdev
->mutex
);
1779 dev
->flags
|= VIRTIO_DEV_RUNNING
;
1780 /* Disable notifications. */
1781 set_irq_status(dev
);
1785 ovs_mutex_unlock(&dpdk_mutex
);
1788 VLOG_INFO("vHost Device '%s' %"PRIu64
" can't be added - name not "
1789 "found", dev
->ifname
, dev
->device_fh
);
1794 VLOG_INFO("vHost Device '%s' %"PRIu64
" has been added", dev
->ifname
,
1800 * Remove a virtio-net device from the specific vhost port. Use dev->remove
1801 * flag to stop any more packets from being sent or received to/from a VM and
1802 * ensure all currently queued packets have been sent/received before removing
1806 destroy_device(volatile struct virtio_net
*dev
)
1808 struct netdev_dpdk
*vhost_dev
;
1810 ovs_mutex_lock(&dpdk_mutex
);
1811 LIST_FOR_EACH (vhost_dev
, list_node
, &dpdk_list
) {
1812 if (netdev_dpdk_get_virtio(vhost_dev
) == dev
) {
1814 ovs_mutex_lock(&vhost_dev
->mutex
);
1815 dev
->flags
&= ~VIRTIO_DEV_RUNNING
;
1816 ovsrcu_set(&vhost_dev
->virtio_dev
, NULL
);
1817 ovs_mutex_unlock(&vhost_dev
->mutex
);
1820 * Wait for other threads to quiesce before
1821 * setting the virtio_dev to NULL.
1823 ovsrcu_synchronize();
1825 * As call to ovsrcu_synchronize() will end the quiescent state,
1826 * put thread back into quiescent state before returning.
1828 ovsrcu_quiesce_start();
1831 ovs_mutex_unlock(&dpdk_mutex
);
1833 VLOG_INFO("vHost Device '%s' %"PRIu64
" has been removed", dev
->ifname
,
1838 netdev_dpdk_get_virtio(const struct netdev_dpdk
*dev
)
1840 return ovsrcu_get(struct virtio_net
*, &dev
->virtio_dev
);
1844 * These callbacks allow virtio-net devices to be added to vhost ports when
1845 * configuration has been fully complete.
1847 static const struct virtio_net_device_ops virtio_net_device_ops
=
1849 .new_device
= new_device
,
1850 .destroy_device
= destroy_device
,
1854 start_vhost_loop(void *dummy OVS_UNUSED
)
1856 pthread_detach(pthread_self());
1857 /* Put the cuse thread into quiescent state. */
1858 ovsrcu_quiesce_start();
1859 rte_vhost_driver_session_start();
1864 dpdk_vhost_class_init(void)
1866 rte_vhost_driver_callback_register(&virtio_net_device_ops
);
1867 ovs_thread_create("vhost_thread", start_vhost_loop
, NULL
);
1872 dpdk_vhost_cuse_class_init(void)
1877 /* Register CUSE device to handle IOCTLs.
1878 * Unless otherwise specified on the vswitchd command line, cuse_dev_name
1879 * is set to vhost-net.
1881 err
= rte_vhost_driver_register(cuse_dev_name
);
1884 VLOG_ERR("CUSE device setup failure.");
1888 dpdk_vhost_class_init();
1893 dpdk_vhost_user_class_init(void)
1895 dpdk_vhost_class_init();
1900 dpdk_common_init(void)
1902 unixctl_command_register("netdev-dpdk/set-admin-state",
1903 "[netdev] up|down", 1, 2,
1904 netdev_dpdk_set_admin_state
, NULL
);
1906 ovs_thread_create("dpdk_watchdog", dpdk_watchdog
, NULL
);
1912 dpdk_ring_create(const char dev_name
[], unsigned int port_no
,
1913 unsigned int *eth_port_id
)
1915 struct dpdk_ring
*ivshmem
;
1919 ivshmem
= dpdk_rte_mzalloc(sizeof *ivshmem
);
1920 if (ivshmem
== NULL
) {
1924 /* XXX: Add support for multiquque ring. */
1925 err
= snprintf(ring_name
, 10, "%s_tx", dev_name
);
1930 /* Create single consumer/producer rings, netdev does explicit locking. */
1931 ivshmem
->cring_tx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
1932 RING_F_SP_ENQ
| RING_F_SC_DEQ
);
1933 if (ivshmem
->cring_tx
== NULL
) {
1938 err
= snprintf(ring_name
, 10, "%s_rx", dev_name
);
1943 /* Create single consumer/producer rings, netdev does explicit locking. */
1944 ivshmem
->cring_rx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
1945 RING_F_SP_ENQ
| RING_F_SC_DEQ
);
1946 if (ivshmem
->cring_rx
== NULL
) {
1951 err
= rte_eth_from_rings(dev_name
, &ivshmem
->cring_rx
, 1,
1952 &ivshmem
->cring_tx
, 1, SOCKET0
);
1959 ivshmem
->user_port_id
= port_no
;
1960 ivshmem
->eth_port_id
= rte_eth_dev_count() - 1;
1961 list_push_back(&dpdk_ring_list
, &ivshmem
->list_node
);
1963 *eth_port_id
= ivshmem
->eth_port_id
;
1968 dpdk_ring_open(const char dev_name
[], unsigned int *eth_port_id
) OVS_REQUIRES(dpdk_mutex
)
1970 struct dpdk_ring
*ivshmem
;
1971 unsigned int port_no
;
1974 /* Names always start with "dpdkr" */
1975 err
= dpdk_dev_parse_name(dev_name
, "dpdkr", &port_no
);
1980 /* look through our list to find the device */
1981 LIST_FOR_EACH (ivshmem
, list_node
, &dpdk_ring_list
) {
1982 if (ivshmem
->user_port_id
== port_no
) {
1983 VLOG_INFO("Found dpdk ring device %s:", dev_name
);
1984 *eth_port_id
= ivshmem
->eth_port_id
; /* really all that is needed */
1988 /* Need to create the device rings */
1989 return dpdk_ring_create(dev_name
, port_no
, eth_port_id
);
1993 netdev_dpdk_ring_send(struct netdev
*netdev_
, int qid
,
1994 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1996 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
1999 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
2000 * rss hash field is clear. This is because the same mbuf may be modified by
2001 * the consumer of the ring and return into the datapath without recalculating
2003 for (i
= 0; i
< cnt
; i
++) {
2004 dp_packet_rss_invalidate(pkts
[i
]);
2007 netdev_dpdk_send__(netdev
, qid
, pkts
, cnt
, may_steal
);
2012 netdev_dpdk_ring_construct(struct netdev
*netdev
)
2014 unsigned int port_no
= 0;
2017 if (rte_eal_init_ret
) {
2018 return rte_eal_init_ret
;
2021 ovs_mutex_lock(&dpdk_mutex
);
2023 err
= dpdk_ring_open(netdev
->name
, &port_no
);
2028 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
2031 ovs_mutex_unlock(&dpdk_mutex
);
2035 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, MULTIQ, SEND, \
2036 GET_CARRIER, GET_STATS, GET_FEATURES, GET_STATUS, RXQ_RECV) \
2040 NULL, /* netdev_dpdk_run */ \
2041 NULL, /* netdev_dpdk_wait */ \
2043 netdev_dpdk_alloc, \
2046 netdev_dpdk_dealloc, \
2047 netdev_dpdk_get_config, \
2048 NULL, /* netdev_dpdk_set_config */ \
2049 NULL, /* get_tunnel_config */ \
2050 NULL, /* build header */ \
2051 NULL, /* push header */ \
2052 NULL, /* pop header */ \
2053 netdev_dpdk_get_numa_id, /* get_numa_id */ \
2054 MULTIQ, /* set_multiq */ \
2057 NULL, /* send_wait */ \
2059 netdev_dpdk_set_etheraddr, \
2060 netdev_dpdk_get_etheraddr, \
2061 netdev_dpdk_get_mtu, \
2062 netdev_dpdk_set_mtu, \
2063 netdev_dpdk_get_ifindex, \
2065 netdev_dpdk_get_carrier_resets, \
2066 netdev_dpdk_set_miimon, \
2069 NULL, /* set_advertisements */ \
2071 NULL, /* set_policing */ \
2072 NULL, /* get_qos_types */ \
2073 NULL, /* get_qos_capabilities */ \
2074 NULL, /* get_qos */ \
2075 NULL, /* set_qos */ \
2076 NULL, /* get_queue */ \
2077 NULL, /* set_queue */ \
2078 NULL, /* delete_queue */ \
2079 NULL, /* get_queue_stats */ \
2080 NULL, /* queue_dump_start */ \
2081 NULL, /* queue_dump_next */ \
2082 NULL, /* queue_dump_done */ \
2083 NULL, /* dump_queue_stats */ \
2085 NULL, /* get_in4 */ \
2086 NULL, /* set_in4 */ \
2087 NULL, /* get_in6 */ \
2088 NULL, /* add_router */ \
2089 NULL, /* get_next_hop */ \
2091 NULL, /* arp_lookup */ \
2093 netdev_dpdk_update_flags, \
2095 netdev_dpdk_rxq_alloc, \
2096 netdev_dpdk_rxq_construct, \
2097 netdev_dpdk_rxq_destruct, \
2098 netdev_dpdk_rxq_dealloc, \
2100 NULL, /* rx_wait */ \
2101 NULL, /* rxq_drain */ \
2105 process_vhost_flags(char *flag
, char *default_val
, int size
,
2106 char **argv
, char **new_val
)
2110 /* Depending on which version of vhost is in use, process the vhost-specific
2111 * flag if it is provided on the vswitchd command line, otherwise resort to
2114 * For vhost-user: Process "-cuse_dev_name" to set the custom location of
2115 * the vhost-user socket(s).
2116 * For vhost-cuse: Process "-vhost_sock_dir" to set the custom name of the
2117 * vhost-cuse character device.
2119 if (!strcmp(argv
[1], flag
) && (strlen(argv
[2]) <= size
)) {
2121 *new_val
= strdup(argv
[2]);
2122 VLOG_INFO("User-provided %s in use: %s", flag
, *new_val
);
2124 VLOG_INFO("No %s provided - defaulting to %s", flag
, default_val
);
2125 *new_val
= default_val
;
2132 dpdk_init(int argc
, char **argv
)
2136 char *pragram_name
= argv
[0];
2138 if (argc
< 2 || strcmp(argv
[1], "--dpdk"))
2141 /* Remove the --dpdk argument from arg list.*/
2146 if (process_vhost_flags("-cuse_dev_name", strdup("vhost-net"),
2147 PATH_MAX
, argv
, &cuse_dev_name
)) {
2149 if (process_vhost_flags("-vhost_sock_dir", strdup(ovs_rundir()),
2150 NAME_MAX
, argv
, &vhost_sock_dir
)) {
2154 err
= stat(vhost_sock_dir
, &s
);
2156 VLOG_ERR("vHostUser socket DIR '%s' does not exist.",
2161 /* Remove the vhost flag configuration parameters from the argument
2162 * list, so that the correct elements are passed to the DPDK
2163 * initialization function
2166 argv
+= 2; /* Increment by two to bypass the vhost flag arguments */
2170 /* Keep the program name argument as this is needed for call to
2173 argv
[0] = pragram_name
;
2175 /* Make sure things are initialized ... */
2176 result
= rte_eal_init(argc
, argv
);
2178 ovs_abort(result
, "Cannot init EAL");
2181 rte_memzone_dump(stdout
);
2182 rte_eal_init_ret
= 0;
2184 if (argc
> result
) {
2185 argv
[result
] = argv
[0];
2188 /* We are called from the main thread here */
2189 RTE_PER_LCORE(_lcore_id
) = NON_PMD_CORE_ID
;
2191 return result
+ 1 + base
;
2194 static const struct netdev_class dpdk_class
=
2198 netdev_dpdk_construct
,
2199 netdev_dpdk_destruct
,
2200 netdev_dpdk_set_multiq
,
2201 netdev_dpdk_eth_send
,
2202 netdev_dpdk_get_carrier
,
2203 netdev_dpdk_get_stats
,
2204 netdev_dpdk_get_features
,
2205 netdev_dpdk_get_status
,
2206 netdev_dpdk_rxq_recv
);
2208 static const struct netdev_class dpdk_ring_class
=
2212 netdev_dpdk_ring_construct
,
2213 netdev_dpdk_destruct
,
2214 netdev_dpdk_set_multiq
,
2215 netdev_dpdk_ring_send
,
2216 netdev_dpdk_get_carrier
,
2217 netdev_dpdk_get_stats
,
2218 netdev_dpdk_get_features
,
2219 netdev_dpdk_get_status
,
2220 netdev_dpdk_rxq_recv
);
2222 static const struct netdev_class OVS_UNUSED dpdk_vhost_cuse_class
=
2225 dpdk_vhost_cuse_class_init
,
2226 netdev_dpdk_vhost_cuse_construct
,
2227 netdev_dpdk_vhost_destruct
,
2228 netdev_dpdk_vhost_set_multiq
,
2229 netdev_dpdk_vhost_send
,
2230 netdev_dpdk_vhost_get_carrier
,
2231 netdev_dpdk_vhost_get_stats
,
2234 netdev_dpdk_vhost_rxq_recv
);
2236 static const struct netdev_class OVS_UNUSED dpdk_vhost_user_class
=
2239 dpdk_vhost_user_class_init
,
2240 netdev_dpdk_vhost_user_construct
,
2241 netdev_dpdk_vhost_destruct
,
2242 netdev_dpdk_vhost_set_multiq
,
2243 netdev_dpdk_vhost_send
,
2244 netdev_dpdk_vhost_get_carrier
,
2245 netdev_dpdk_vhost_get_stats
,
2248 netdev_dpdk_vhost_rxq_recv
);
2251 netdev_dpdk_register(void)
2253 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
2255 if (rte_eal_init_ret
) {
2259 if (ovsthread_once_start(&once
)) {
2261 netdev_register_provider(&dpdk_class
);
2262 netdev_register_provider(&dpdk_ring_class
);
2264 netdev_register_provider(&dpdk_vhost_cuse_class
);
2266 netdev_register_provider(&dpdk_vhost_user_class
);
2268 ovsthread_once_done(&once
);
2273 pmd_thread_setaffinity_cpu(unsigned cpu
)
2279 CPU_SET(cpu
, &cpuset
);
2280 err
= pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t
), &cpuset
);
2282 VLOG_ERR("Thread affinity error %d",err
);
2285 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
2286 ovs_assert(cpu
!= NON_PMD_CORE_ID
);
2287 RTE_PER_LCORE(_lcore_id
) = cpu
;
2295 return rte_lcore_id() != NON_PMD_CORE_ID
;