]> git.proxmox.com Git - mirror_ovs.git/blame - lib/netdev-dpdk.c
netdev-dpdk: Use specific spinlock for stats.
[mirror_ovs.git] / lib / netdev-dpdk.c
CommitLineData
8a9562d2
PS
1/*
2 * Copyright (c) 2014 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <config.h>
18
19#include <stdio.h>
20#include <string.h>
21#include <signal.h>
22#include <stdlib.h>
23#include <pthread.h>
24#include <config.h>
25#include <errno.h>
26#include <sched.h>
27#include <stdlib.h>
28#include <unistd.h>
29#include <stdio.h>
30
e14deea0 31#include "dp-packet.h"
8a9562d2
PS
32#include "dpif-netdev.h"
33#include "list.h"
34#include "netdev-dpdk.h"
35#include "netdev-provider.h"
36#include "netdev-vport.h"
37#include "odp-util.h"
38#include "ofp-print.h"
94143fc4 39#include "ovs-numa.h"
8a9562d2
PS
40#include "ovs-thread.h"
41#include "ovs-rcu.h"
42#include "packets.h"
43#include "shash.h"
8a9562d2
PS
44#include "sset.h"
45#include "unaligned.h"
46#include "timeval.h"
47#include "unixctl.h"
e6211adc 48#include "openvswitch/vlog.h"
8a9562d2 49
b8e57534
MK
50#include "rte_config.h"
51#include "rte_mbuf.h"
58397e6c 52#include "rte_virtio_net.h"
b8e57534 53
8a9562d2
PS
54VLOG_DEFINE_THIS_MODULE(dpdk);
55static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
56
57#define DPDK_PORT_WATCHDOG_INTERVAL 5
58
59#define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
60#define OVS_VPORT_DPDK "ovs_dpdk"
61
62/*
63 * need to reserve tons of extra space in the mbufs so we can align the
64 * DMA addresses to 4KB.
65 */
66
67#define MTU_TO_MAX_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
68#define MBUF_SIZE(mtu) (MTU_TO_MAX_LEN(mtu) + (512) + \
69 sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
70
da79ce2b
DDP
71/* Max and min number of packets in the mempool. OVS tries to allocate a
72 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
73 * enough hugepages) we keep halving the number until the allocation succeeds
74 * or we reach MIN_NB_MBUF */
75
76#define MAX_NB_MBUF (4096 * 64)
77#define MIN_NB_MBUF (4096 * 4)
78#define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
79
80/* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
81BUILD_ASSERT_DECL(MAX_NB_MBUF % ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF) == 0);
82
83/* The smallest possible NB_MBUF that we're going to try should be a multiple
84 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
85BUILD_ASSERT_DECL((MAX_NB_MBUF / ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF))
86 % MP_CACHE_SZ == 0);
87
8a9562d2
PS
88#define SOCKET0 0
89
79f5354c
PM
90#define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
91#define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
92
58397e6c 93/* Character device cuse_dev_name. */
bce01e3a 94static char *cuse_dev_name = NULL;
58397e6c 95
95e9881f
KT
96/*
97 * Maximum amount of time in micro seconds to try and enqueue to vhost.
98 */
99#define VHOST_ENQ_RETRY_USECS 100
100
8a9562d2 101static const struct rte_eth_conf port_conf = {
a28ddd11
DDP
102 .rxmode = {
103 .mq_mode = ETH_MQ_RX_RSS,
104 .split_hdr_size = 0,
105 .header_split = 0, /* Header Split disabled */
106 .hw_ip_checksum = 0, /* IP checksum offload disabled */
107 .hw_vlan_filter = 0, /* VLAN filtering disabled */
108 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
109 .hw_strip_crc = 0,
110 },
111 .rx_adv_conf = {
112 .rss_conf = {
113 .rss_key = NULL,
543342a4 114 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP,
8a9562d2 115 },
a28ddd11
DDP
116 },
117 .txmode = {
118 .mq_mode = ETH_MQ_TX_NONE,
119 },
8a9562d2
PS
120};
121
3a100265 122enum { MAX_TX_QUEUE_LEN = 384 };
58f7c37b
DDP
123enum { DPDK_RING_SIZE = 256 };
124BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
8a9562d2
PS
125enum { DRAIN_TSC = 200000ULL };
126
58397e6c
KT
127enum dpdk_dev_type {
128 DPDK_DEV_ETH = 0,
129 DPDK_DEV_VHOST = 1
130};
131
8a9562d2
PS
132static int rte_eal_init_ret = ENODEV;
133
134static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
135
136/* Contains all 'struct dpdk_dev's. */
ca6ba700 137static struct ovs_list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
55951e15 138 = OVS_LIST_INITIALIZER(&dpdk_list);
8a9562d2 139
ca6ba700 140static struct ovs_list dpdk_mp_list OVS_GUARDED_BY(dpdk_mutex)
55951e15 141 = OVS_LIST_INITIALIZER(&dpdk_mp_list);
8a9562d2 142
db73f716
DDP
143/* This mutex must be used by non pmd threads when allocating or freeing
144 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
145 * use mempools, a non pmd thread should hold this mutex while calling them */
bce01e3a 146static struct ovs_mutex nonpmd_mempool_mutex = OVS_MUTEX_INITIALIZER;
db73f716 147
8a9562d2
PS
148struct dpdk_mp {
149 struct rte_mempool *mp;
150 int mtu;
151 int socket_id;
152 int refcount;
ca6ba700 153 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
8a9562d2
PS
154};
155
5a034064
AW
156/* There should be one 'struct dpdk_tx_queue' created for
157 * each cpu core. */
8a9562d2 158struct dpdk_tx_queue {
94143fc4
AW
159 bool flush_tx; /* Set to true to flush queue everytime */
160 /* pkts are queued. */
8a9562d2
PS
161 int count;
162 uint64_t tsc;
163 struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
164};
165
95fb793a 166/* dpdk has no way to remove dpdk ring ethernet devices
167 so we have to keep them around once they've been created
168*/
169
ca6ba700 170static struct ovs_list dpdk_ring_list OVS_GUARDED_BY(dpdk_mutex)
55951e15 171 = OVS_LIST_INITIALIZER(&dpdk_ring_list);
95fb793a 172
173struct dpdk_ring {
174 /* For the client rings */
175 struct rte_ring *cring_tx;
176 struct rte_ring *cring_rx;
177 int user_port_id; /* User given port no, parsed from port name */
178 int eth_port_id; /* ethernet device port id */
ca6ba700 179 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
95fb793a 180};
181
8a9562d2
PS
182struct netdev_dpdk {
183 struct netdev up;
184 int port_id;
185 int max_packet_len;
58397e6c 186 enum dpdk_dev_type type;
8a9562d2 187
5a034064 188 struct dpdk_tx_queue *tx_q;
8a9562d2
PS
189
190 struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
191
192 struct dpdk_mp *dpdk_mp;
193 int mtu;
194 int socket_id;
195 int buf_size;
8a9562d2 196 struct netdev_stats stats;
45d947c4
DDP
197 /* Protects stats */
198 rte_spinlock_t stats_lock;
8a9562d2
PS
199
200 uint8_t hwaddr[ETH_ADDR_LEN];
201 enum netdev_flags flags;
202
203 struct rte_eth_link link;
204 int link_reset_cnt;
205
58397e6c
KT
206 /* virtio-net structure for vhost device */
207 OVSRCU_TYPE(struct virtio_net *) virtio_dev;
208
8a9562d2 209 /* In dpdk_list. */
ca6ba700 210 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
58397e6c 211 rte_spinlock_t txq_lock;
8a9562d2
PS
212};
213
214struct netdev_rxq_dpdk {
215 struct netdev_rxq up;
216 int port_id;
217};
218
db73f716
DDP
219static bool thread_is_pmd(void);
220
8a9562d2
PS
221static int netdev_dpdk_construct(struct netdev *);
222
58397e6c
KT
223struct virtio_net * netdev_dpdk_get_virtio(const struct netdev_dpdk *dev);
224
8a9562d2
PS
225static bool
226is_dpdk_class(const struct netdev_class *class)
227{
228 return class->construct == netdev_dpdk_construct;
229}
230
58397e6c
KT
231/* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
232 * for all other segments data, bss and text. */
8a9562d2
PS
233
234static void *
235dpdk_rte_mzalloc(size_t sz)
236{
237 void *ptr;
238
239 ptr = rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
240 if (ptr == NULL) {
241 out_of_memory();
242 }
243 return ptr;
244}
245
db73f716
DDP
246/* XXX this function should be called only by pmd threads (or by non pmd
247 * threads holding the nonpmd_mempool_mutex) */
8a9562d2 248void
e14deea0 249free_dpdk_buf(struct dp_packet *p)
8a9562d2 250{
db73f716 251 struct rte_mbuf *pkt = (struct rte_mbuf *) p;
8a9562d2 252
db73f716 253 rte_pktmbuf_free_seg(pkt);
8a9562d2
PS
254}
255
b3cd9f9d
PS
256static void
257__rte_pktmbuf_init(struct rte_mempool *mp,
258 void *opaque_arg OVS_UNUSED,
259 void *_m,
260 unsigned i OVS_UNUSED)
261{
262 struct rte_mbuf *m = _m;
e14deea0 263 uint32_t buf_len = mp->elt_size - sizeof(struct dp_packet);
b3cd9f9d 264
e14deea0 265 RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct dp_packet));
b3cd9f9d
PS
266
267 memset(m, 0, mp->elt_size);
268
269 /* start of buffer is just after mbuf structure */
e14deea0 270 m->buf_addr = (char *)m + sizeof(struct dp_packet);
b3cd9f9d 271 m->buf_physaddr = rte_mempool_virt2phy(mp, m) +
e14deea0 272 sizeof(struct dp_packet);
b3cd9f9d
PS
273 m->buf_len = (uint16_t)buf_len;
274
275 /* keep some headroom between start of buffer and data */
b8e57534 276 m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, m->buf_len);
b3cd9f9d
PS
277
278 /* init some constant fields */
b3cd9f9d 279 m->pool = mp;
b8e57534
MK
280 m->nb_segs = 1;
281 m->port = 0xff;
b3cd9f9d
PS
282}
283
284static void
285ovs_rte_pktmbuf_init(struct rte_mempool *mp,
286 void *opaque_arg OVS_UNUSED,
287 void *_m,
288 unsigned i OVS_UNUSED)
289{
290 struct rte_mbuf *m = _m;
291
292 __rte_pktmbuf_init(mp, opaque_arg, _m, i);
293
cf62fa4c 294 dp_packet_init_dpdk((struct dp_packet *) m, m->buf_len);
b3cd9f9d
PS
295}
296
8a9562d2
PS
297static struct dpdk_mp *
298dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
299{
300 struct dpdk_mp *dmp = NULL;
301 char mp_name[RTE_MEMPOOL_NAMESIZE];
da79ce2b 302 unsigned mp_size;
8a9562d2
PS
303
304 LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
305 if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
306 dmp->refcount++;
307 return dmp;
308 }
309 }
310
311 dmp = dpdk_rte_mzalloc(sizeof *dmp);
312 dmp->socket_id = socket_id;
313 dmp->mtu = mtu;
314 dmp->refcount = 1;
315
da79ce2b
DDP
316 mp_size = MAX_NB_MBUF;
317 do {
318 if (snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "ovs_mp_%d_%d_%u",
319 dmp->mtu, dmp->socket_id, mp_size) < 0) {
320 return NULL;
321 }
95fb793a 322
da79ce2b
DDP
323 dmp->mp = rte_mempool_create(mp_name, mp_size, MBUF_SIZE(mtu),
324 MP_CACHE_SZ,
325 sizeof(struct rte_pktmbuf_pool_private),
326 rte_pktmbuf_pool_init, NULL,
327 ovs_rte_pktmbuf_init, NULL,
328 socket_id, 0);
329 } while (!dmp->mp && rte_errno == ENOMEM && (mp_size /= 2) >= MIN_NB_MBUF);
8a9562d2
PS
330
331 if (dmp->mp == NULL) {
332 return NULL;
da79ce2b
DDP
333 } else {
334 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name, mp_size );
8a9562d2
PS
335 }
336
337 list_push_back(&dpdk_mp_list, &dmp->list_node);
338 return dmp;
339}
340
341static void
342dpdk_mp_put(struct dpdk_mp *dmp)
343{
344
345 if (!dmp) {
346 return;
347 }
348
349 dmp->refcount--;
350 ovs_assert(dmp->refcount >= 0);
351
352#if 0
353 /* I could not find any API to destroy mp. */
354 if (dmp->refcount == 0) {
355 list_delete(dmp->list_node);
356 /* destroy mp-pool. */
357 }
358#endif
359}
360
361static void
362check_link_status(struct netdev_dpdk *dev)
363{
364 struct rte_eth_link link;
365
366 rte_eth_link_get_nowait(dev->port_id, &link);
367
368 if (dev->link.link_status != link.link_status) {
3e912ffc 369 netdev_change_seq_changed(&dev->up);
8a9562d2
PS
370
371 dev->link_reset_cnt++;
372 dev->link = link;
373 if (dev->link.link_status) {
374 VLOG_DBG_RL(&rl, "Port %d Link Up - speed %u Mbps - %s",
375 dev->port_id, (unsigned)dev->link.link_speed,
376 (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
377 ("full-duplex") : ("half-duplex"));
378 } else {
379 VLOG_DBG_RL(&rl, "Port %d Link Down", dev->port_id);
380 }
381 }
382}
383
384static void *
385dpdk_watchdog(void *dummy OVS_UNUSED)
386{
387 struct netdev_dpdk *dev;
388
389 pthread_detach(pthread_self());
390
391 for (;;) {
392 ovs_mutex_lock(&dpdk_mutex);
393 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
394 ovs_mutex_lock(&dev->mutex);
395 check_link_status(dev);
396 ovs_mutex_unlock(&dev->mutex);
397 }
398 ovs_mutex_unlock(&dpdk_mutex);
399 xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
400 }
401
402 return NULL;
403}
404
405static int
406dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
407{
408 struct rte_pktmbuf_pool_private *mbp_priv;
409 struct ether_addr eth_addr;
410 int diag;
411 int i;
412
413 if (dev->port_id < 0 || dev->port_id >= rte_eth_dev_count()) {
95fb793a 414 return ENODEV;
8a9562d2
PS
415 }
416
5496878c
AW
417 diag = rte_eth_dev_configure(dev->port_id, dev->up.n_rxq, dev->up.n_txq,
418 &port_conf);
8a9562d2
PS
419 if (diag) {
420 VLOG_ERR("eth dev config error %d",diag);
95fb793a 421 return -diag;
8a9562d2
PS
422 }
423
5496878c 424 for (i = 0; i < dev->up.n_txq; i++) {
79f5354c 425 diag = rte_eth_tx_queue_setup(dev->port_id, i, NIC_PORT_TX_Q_SIZE,
9154f798 426 dev->socket_id, NULL);
8a9562d2
PS
427 if (diag) {
428 VLOG_ERR("eth dev tx queue setup error %d",diag);
95fb793a 429 return -diag;
8a9562d2
PS
430 }
431 }
432
5496878c 433 for (i = 0; i < dev->up.n_rxq; i++) {
79f5354c 434 diag = rte_eth_rx_queue_setup(dev->port_id, i, NIC_PORT_RX_Q_SIZE,
d221ffa1 435 dev->socket_id,
9154f798 436 NULL, dev->dpdk_mp->mp);
8a9562d2
PS
437 if (diag) {
438 VLOG_ERR("eth dev rx queue setup error %d",diag);
95fb793a 439 return -diag;
8a9562d2
PS
440 }
441 }
442
443 diag = rte_eth_dev_start(dev->port_id);
444 if (diag) {
445 VLOG_ERR("eth dev start error %d",diag);
95fb793a 446 return -diag;
8a9562d2
PS
447 }
448
449 rte_eth_promiscuous_enable(dev->port_id);
450 rte_eth_allmulticast_enable(dev->port_id);
451
452 memset(&eth_addr, 0x0, sizeof(eth_addr));
453 rte_eth_macaddr_get(dev->port_id, &eth_addr);
454 VLOG_INFO_RL(&rl, "Port %d: "ETH_ADDR_FMT"",
455 dev->port_id, ETH_ADDR_ARGS(eth_addr.addr_bytes));
456
457 memcpy(dev->hwaddr, eth_addr.addr_bytes, ETH_ADDR_LEN);
458 rte_eth_link_get_nowait(dev->port_id, &dev->link);
459
460 mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp);
461 dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
462
463 dev->flags = NETDEV_UP | NETDEV_PROMISC;
464 return 0;
465}
466
467static struct netdev_dpdk *
468netdev_dpdk_cast(const struct netdev *netdev)
469{
470 return CONTAINER_OF(netdev, struct netdev_dpdk, up);
471}
472
473static struct netdev *
474netdev_dpdk_alloc(void)
475{
476 struct netdev_dpdk *netdev = dpdk_rte_mzalloc(sizeof *netdev);
477 return &netdev->up;
478}
479
5a034064 480static void
91968eb0 481netdev_dpdk_alloc_txq(struct netdev_dpdk *netdev, unsigned int n_txqs)
5a034064 482{
bd5131ba 483 unsigned i;
5a034064
AW
484
485 netdev->tx_q = dpdk_rte_mzalloc(n_txqs * sizeof *netdev->tx_q);
94143fc4
AW
486 /* Each index is considered as a cpu core id, since there should
487 * be one tx queue for each cpu core. */
5a034064 488 for (i = 0; i < n_txqs; i++) {
ba0358a1 489 int numa_id = ovs_numa_get_numa_id(i);
94143fc4 490
94143fc4
AW
491 /* If the corresponding core is not on the same numa node
492 * as 'netdev', flags the 'flush_tx'. */
ba0358a1 493 netdev->tx_q[i].flush_tx = netdev->socket_id == numa_id;
5a034064
AW
494 }
495}
496
8a9562d2 497static int
58397e6c
KT
498netdev_dpdk_init(struct netdev *netdev_, unsigned int port_no,
499 enum dpdk_dev_type type)
5a034064 500 OVS_REQUIRES(dpdk_mutex)
8a9562d2
PS
501{
502 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
1b7a04e0 503 int sid;
95fb793a 504 int err = 0;
8a9562d2 505
95fb793a 506 ovs_mutex_init(&netdev->mutex);
95fb793a 507 ovs_mutex_lock(&netdev->mutex);
8a9562d2 508
45d947c4
DDP
509 rte_spinlock_init(&netdev->stats_lock);
510
1b7a04e0
AW
511 /* If the 'sid' is negative, it means that the kernel fails
512 * to obtain the pci numa info. In that situation, always
513 * use 'SOCKET0'. */
58397e6c
KT
514 if (type == DPDK_DEV_ETH) {
515 sid = rte_eth_dev_socket_id(port_no);
516 } else {
517 sid = rte_lcore_to_socket_id(rte_get_master_lcore());
518 }
519
1b7a04e0 520 netdev->socket_id = sid < 0 ? SOCKET0 : sid;
95fb793a 521 netdev->port_id = port_no;
58397e6c 522 netdev->type = type;
8a9562d2 523 netdev->flags = 0;
8a9562d2
PS
524 netdev->mtu = ETHER_MTU;
525 netdev->max_packet_len = MTU_TO_MAX_LEN(netdev->mtu);
58397e6c 526 rte_spinlock_init(&netdev->txq_lock);
8a9562d2 527
8a9562d2
PS
528 netdev->dpdk_mp = dpdk_mp_get(netdev->socket_id, netdev->mtu);
529 if (!netdev->dpdk_mp) {
530 err = ENOMEM;
95fb793a 531 goto unlock;
8a9562d2
PS
532 }
533
5496878c
AW
534 netdev_->n_txq = NR_QUEUE;
535 netdev_->n_rxq = NR_QUEUE;
58397e6c
KT
536
537 if (type == DPDK_DEV_ETH) {
1b99bb05
MG
538 netdev_dpdk_alloc_txq(netdev, NR_QUEUE);
539 err = dpdk_eth_dev_init(netdev);
540 if (err) {
541 goto unlock;
542 }
8a9562d2 543 }
8a9562d2
PS
544
545 list_push_back(&dpdk_list, &netdev->list_node);
546
95fb793a 547unlock:
5a034064
AW
548 if (err) {
549 rte_free(netdev->tx_q);
550 }
8a9562d2 551 ovs_mutex_unlock(&netdev->mutex);
95fb793a 552 return err;
553}
554
555static int
556dpdk_dev_parse_name(const char dev_name[], const char prefix[],
557 unsigned int *port_no)
558{
559 const char *cport;
560
561 if (strncmp(dev_name, prefix, strlen(prefix))) {
562 return ENODEV;
563 }
564
565 cport = dev_name + strlen(prefix);
bce01e3a 566 *port_no = strtol(cport, NULL, 0); /* string must be null terminated */
95fb793a 567 return 0;
568}
569
58397e6c
KT
570static int
571netdev_dpdk_vhost_construct(struct netdev *netdev_)
572{
573 int err;
574
575 if (rte_eal_init_ret) {
576 return rte_eal_init_ret;
577 }
578
579 ovs_mutex_lock(&dpdk_mutex);
580 err = netdev_dpdk_init(netdev_, -1, DPDK_DEV_VHOST);
581 ovs_mutex_unlock(&dpdk_mutex);
582
583 return err;
584}
585
95fb793a 586static int
587netdev_dpdk_construct(struct netdev *netdev)
588{
589 unsigned int port_no;
590 int err;
591
592 if (rte_eal_init_ret) {
593 return rte_eal_init_ret;
594 }
595
596 /* Names always start with "dpdk" */
597 err = dpdk_dev_parse_name(netdev->name, "dpdk", &port_no);
598 if (err) {
599 return err;
600 }
601
602 ovs_mutex_lock(&dpdk_mutex);
58397e6c 603 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
8a9562d2
PS
604 ovs_mutex_unlock(&dpdk_mutex);
605 return err;
606}
607
608static void
609netdev_dpdk_destruct(struct netdev *netdev_)
610{
611 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
612
613 ovs_mutex_lock(&dev->mutex);
614 rte_eth_dev_stop(dev->port_id);
615 ovs_mutex_unlock(&dev->mutex);
616
617 ovs_mutex_lock(&dpdk_mutex);
5a034064 618 rte_free(dev->tx_q);
8a9562d2
PS
619 list_remove(&dev->list_node);
620 dpdk_mp_put(dev->dpdk_mp);
621 ovs_mutex_unlock(&dpdk_mutex);
58397e6c 622}
8a9562d2 623
58397e6c
KT
624static void
625netdev_dpdk_vhost_destruct(struct netdev *netdev_)
626{
627 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
628
629 /* Can't remove a port while a guest is attached to it. */
630 if (netdev_dpdk_get_virtio(dev) != NULL) {
631 VLOG_ERR("Can not remove port, vhost device still attached");
632 return;
633 }
634
635 ovs_mutex_lock(&dpdk_mutex);
636 list_remove(&dev->list_node);
637 dpdk_mp_put(dev->dpdk_mp);
638 ovs_mutex_unlock(&dpdk_mutex);
8a9562d2
PS
639}
640
641static void
642netdev_dpdk_dealloc(struct netdev *netdev_)
643{
644 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
645
646 rte_free(netdev);
647}
648
649static int
650netdev_dpdk_get_config(const struct netdev *netdev_, struct smap *args)
651{
652 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
653
654 ovs_mutex_lock(&dev->mutex);
655
47659062
AW
656 smap_add_format(args, "configured_rx_queues", "%d", netdev_->n_rxq);
657 smap_add_format(args, "configured_tx_queues", "%d", netdev_->n_txq);
8a9562d2
PS
658 ovs_mutex_unlock(&dev->mutex);
659
660 return 0;
661}
662
7dec44fe
AW
663static int
664netdev_dpdk_get_numa_id(const struct netdev *netdev_)
665{
666 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
667
668 return netdev->socket_id;
669}
670
5496878c
AW
671/* Sets the number of tx queues and rx queues for the dpdk interface.
672 * If the configuration fails, do not try restoring its old configuration
673 * and just returns the error. */
674static int
675netdev_dpdk_set_multiq(struct netdev *netdev_, unsigned int n_txq,
676 unsigned int n_rxq)
677{
678 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
679 int err = 0;
680
681 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
682 return err;
683 }
684
b7ccaf67 685 ovs_mutex_lock(&dpdk_mutex);
5496878c 686 ovs_mutex_lock(&netdev->mutex);
91968eb0 687
5496878c 688 rte_eth_dev_stop(netdev->port_id);
91968eb0 689
5496878c
AW
690 netdev->up.n_txq = n_txq;
691 netdev->up.n_rxq = n_rxq;
58397e6c 692
91968eb0
AW
693 rte_free(netdev->tx_q);
694 netdev_dpdk_alloc_txq(netdev, n_txq);
5496878c 695 err = dpdk_eth_dev_init(netdev);
91968eb0 696
5496878c 697 ovs_mutex_unlock(&netdev->mutex);
b7ccaf67 698 ovs_mutex_unlock(&dpdk_mutex);
5496878c
AW
699
700 return err;
701}
702
58397e6c
KT
703static int
704netdev_dpdk_vhost_set_multiq(struct netdev *netdev_, unsigned int n_txq,
705 unsigned int n_rxq)
706{
707 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
708 int err = 0;
709
710 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
711 return err;
712 }
713
714 ovs_mutex_lock(&dpdk_mutex);
715 ovs_mutex_lock(&netdev->mutex);
716
717 netdev->up.n_txq = n_txq;
718 netdev->up.n_rxq = n_rxq;
719
720 ovs_mutex_unlock(&netdev->mutex);
721 ovs_mutex_unlock(&dpdk_mutex);
722
723 return err;
724}
725
8a9562d2
PS
726static struct netdev_rxq *
727netdev_dpdk_rxq_alloc(void)
728{
729 struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx);
730
731 return &rx->up;
732}
733
734static struct netdev_rxq_dpdk *
735netdev_rxq_dpdk_cast(const struct netdev_rxq *rx)
736{
737 return CONTAINER_OF(rx, struct netdev_rxq_dpdk, up);
738}
739
740static int
741netdev_dpdk_rxq_construct(struct netdev_rxq *rxq_)
742{
743 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
744 struct netdev_dpdk *netdev = netdev_dpdk_cast(rx->up.netdev);
745
746 ovs_mutex_lock(&netdev->mutex);
747 rx->port_id = netdev->port_id;
748 ovs_mutex_unlock(&netdev->mutex);
749
750 return 0;
751}
752
753static void
754netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq_ OVS_UNUSED)
755{
756}
757
758static void
759netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq_)
760{
761 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
762
763 rte_free(rx);
764}
765
b170db2a
RW
766static inline void
767dpdk_queue_flush__(struct netdev_dpdk *dev, int qid)
8a9562d2
PS
768{
769 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
1304f1f8
DDP
770 uint32_t nb_tx = 0;
771
772 while (nb_tx != txq->count) {
773 uint32_t ret;
774
775 ret = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts + nb_tx,
776 txq->count - nb_tx);
777 if (!ret) {
778 break;
779 }
780
781 nb_tx += ret;
782 }
8a9562d2 783
b170db2a 784 if (OVS_UNLIKELY(nb_tx != txq->count)) {
db73f716
DDP
785 /* free buffers, which we couldn't transmit, one at a time (each
786 * packet could come from a different mempool) */
787 int i;
788
789 for (i = nb_tx; i < txq->count; i++) {
790 rte_pktmbuf_free_seg(txq->burst_pkts[i]);
791 }
45d947c4 792 rte_spinlock_lock(&dev->stats_lock);
1304f1f8 793 dev->stats.tx_dropped += txq->count-nb_tx;
45d947c4 794 rte_spinlock_unlock(&dev->stats_lock);
8a9562d2 795 }
1304f1f8 796
8a9562d2 797 txq->count = 0;
844f2d74 798 txq->tsc = rte_get_timer_cycles();
b170db2a
RW
799}
800
801static inline void
802dpdk_queue_flush(struct netdev_dpdk *dev, int qid)
803{
804 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
805
806 if (txq->count == 0) {
807 return;
808 }
b170db2a 809 dpdk_queue_flush__(dev, qid);
8a9562d2
PS
810}
811
58397e6c
KT
812static bool
813is_vhost_running(struct virtio_net *dev)
814{
815 return (dev != NULL && (dev->flags & VIRTIO_DEV_RUNNING));
816}
817
818/*
819 * The receive path for the vhost port is the TX path out from guest.
820 */
821static int
822netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq_,
823 struct dp_packet **packets, int *c)
824{
825 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
826 struct netdev *netdev = rx->up.netdev;
827 struct netdev_dpdk *vhost_dev = netdev_dpdk_cast(netdev);
828 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(vhost_dev);
829 int qid = 1;
830 uint16_t nb_rx = 0;
831
832 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
833 return EAGAIN;
834 }
835
836 nb_rx = rte_vhost_dequeue_burst(virtio_dev, qid,
837 vhost_dev->dpdk_mp->mp,
838 (struct rte_mbuf **)packets,
cd159f1a 839 NETDEV_MAX_BURST);
58397e6c
KT
840 if (!nb_rx) {
841 return EAGAIN;
842 }
843
45d947c4 844 rte_spinlock_lock(&vhost_dev->stats_lock);
58397e6c 845 vhost_dev->stats.rx_packets += (uint64_t)nb_rx;
45d947c4
DDP
846 rte_spinlock_unlock(&vhost_dev->stats_lock);
847
58397e6c
KT
848 *c = (int) nb_rx;
849 return 0;
850}
851
8a9562d2 852static int
e14deea0 853netdev_dpdk_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **packets,
91088554 854 int *c)
8a9562d2
PS
855{
856 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
857 struct netdev *netdev = rx->up.netdev;
858 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
8a9562d2 859 int nb_rx;
8a9562d2 860
5496878c
AW
861 /* There is only one tx queue for this core. Do not flush other
862 * queueus. */
863 if (rxq_->queue_id == rte_lcore_id()) {
864 dpdk_queue_flush(dev, rxq_->queue_id);
865 }
8a9562d2
PS
866
867 nb_rx = rte_eth_rx_burst(rx->port_id, rxq_->queue_id,
7d08d53e 868 (struct rte_mbuf **) packets,
cd159f1a 869 NETDEV_MAX_BURST);
8a9562d2
PS
870 if (!nb_rx) {
871 return EAGAIN;
872 }
873
8a9562d2
PS
874 *c = nb_rx;
875
876 return 0;
877}
878
58397e6c
KT
879static void
880__netdev_dpdk_vhost_send(struct netdev *netdev, struct dp_packet **pkts,
881 int cnt, bool may_steal)
882{
883 struct netdev_dpdk *vhost_dev = netdev_dpdk_cast(netdev);
884 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(vhost_dev);
95e9881f
KT
885 struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
886 unsigned int total_pkts = cnt;
887 uint64_t start = 0;
58397e6c
KT
888
889 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
45d947c4 890 rte_spinlock_lock(&vhost_dev->stats_lock);
1b99bb05 891 vhost_dev->stats.tx_dropped+= cnt;
45d947c4 892 rte_spinlock_unlock(&vhost_dev->stats_lock);
1b99bb05 893 goto out;
58397e6c
KT
894 }
895
896 /* There is vHost TX single queue, So we need to lock it for TX. */
897 rte_spinlock_lock(&vhost_dev->txq_lock);
58397e6c 898
95e9881f
KT
899 do {
900 unsigned int tx_pkts;
901
902 tx_pkts = rte_vhost_enqueue_burst(virtio_dev, VIRTIO_RXQ,
903 cur_pkts, cnt);
904 if (OVS_LIKELY(tx_pkts)) {
905 /* Packets have been sent.*/
906 cnt -= tx_pkts;
907 /* Prepare for possible next iteration.*/
908 cur_pkts = &cur_pkts[tx_pkts];
909 } else {
910 uint64_t timeout = VHOST_ENQ_RETRY_USECS * rte_get_timer_hz() / 1E6;
911 unsigned int expired = 0;
912
913 if (!start) {
914 start = rte_get_timer_cycles();
915 }
916
917 /*
918 * Unable to enqueue packets to vhost interface.
919 * Check available entries before retrying.
920 */
921 while (!rte_vring_available_entries(virtio_dev, VIRTIO_RXQ)) {
922 if (OVS_UNLIKELY((rte_get_timer_cycles() - start) > timeout)) {
923 expired = 1;
924 break;
925 }
926 }
927 if (expired) {
928 /* break out of main loop. */
929 break;
930 }
931 }
932 } while (cnt);
933
45d947c4 934 rte_spinlock_lock(&vhost_dev->stats_lock);
95e9881f
KT
935 vhost_dev->stats.tx_packets += (total_pkts - cnt);
936 vhost_dev->stats.tx_dropped += cnt;
45d947c4 937 rte_spinlock_unlock(&vhost_dev->stats_lock);
58397e6c
KT
938 rte_spinlock_unlock(&vhost_dev->txq_lock);
939
940out:
941 if (may_steal) {
95e9881f
KT
942 int i;
943
944 for (i = 0; i < total_pkts; i++) {
1b99bb05
MG
945 dp_packet_delete(pkts[i]);
946 }
58397e6c
KT
947 }
948}
949
8a9562d2 950inline static void
f4fd623c
DDP
951dpdk_queue_pkts(struct netdev_dpdk *dev, int qid,
952 struct rte_mbuf **pkts, int cnt)
8a9562d2
PS
953{
954 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
955 uint64_t diff_tsc;
8a9562d2 956
f4fd623c
DDP
957 int i = 0;
958
f4fd623c
DDP
959 while (i < cnt) {
960 int freeslots = MAX_TX_QUEUE_LEN - txq->count;
961 int tocopy = MIN(freeslots, cnt-i);
8a9562d2 962
f4fd623c
DDP
963 memcpy(&txq->burst_pkts[txq->count], &pkts[i],
964 tocopy * sizeof (struct rte_mbuf *));
965
966 txq->count += tocopy;
967 i += tocopy;
968
94143fc4 969 if (txq->count == MAX_TX_QUEUE_LEN || txq->flush_tx) {
b170db2a 970 dpdk_queue_flush__(dev, qid);
f4fd623c 971 }
844f2d74 972 diff_tsc = rte_get_timer_cycles() - txq->tsc;
f4fd623c 973 if (diff_tsc >= DRAIN_TSC) {
b170db2a 974 dpdk_queue_flush__(dev, qid);
f4fd623c 975 }
8a9562d2 976 }
8a9562d2
PS
977}
978
979/* Tx function. Transmit packets indefinitely */
980static void
58397e6c 981dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet **pkts,
2654cc33 982 int cnt)
db73f716 983 OVS_NO_THREAD_SAFETY_ANALYSIS
8a9562d2 984{
bce01e3a
EJ
985#if !defined(__CHECKER__) && !defined(_WIN32)
986 const size_t PKT_ARRAY_SIZE = cnt;
987#else
988 /* Sparse or MSVC doesn't like variable length array. */
cd159f1a 989 enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
bce01e3a 990#endif
8a9562d2 991 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
bce01e3a 992 struct rte_mbuf *mbufs[PKT_ARRAY_SIZE];
175cf4de
RW
993 int dropped = 0;
994 int newcnt = 0;
995 int i;
8a9562d2 996
db73f716
DDP
997 /* If we are on a non pmd thread we have to use the mempool mutex, because
998 * every non pmd thread shares the same mempool cache */
999
1000 if (!thread_is_pmd()) {
1001 ovs_mutex_lock(&nonpmd_mempool_mutex);
1002 }
1003
f4fd623c 1004 for (i = 0; i < cnt; i++) {
cf62fa4c 1005 int size = dp_packet_size(pkts[i]);
95fb793a 1006
f98d7864 1007 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
f4fd623c
DDP
1008 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1009 (int)size , dev->max_packet_len);
1010
175cf4de 1011 dropped++;
f4fd623c
DDP
1012 continue;
1013 }
8a9562d2 1014
f4fd623c 1015 mbufs[newcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
8a9562d2 1016
f4fd623c 1017 if (!mbufs[newcnt]) {
175cf4de
RW
1018 dropped += cnt - i;
1019 break;
f4fd623c
DDP
1020 }
1021
1022 /* We have to do a copy for now */
b8e57534 1023 memcpy(rte_pktmbuf_mtod(mbufs[newcnt], void *), dp_packet_data(pkts[i]), size);
f4fd623c
DDP
1024
1025 rte_pktmbuf_data_len(mbufs[newcnt]) = size;
1026 rte_pktmbuf_pkt_len(mbufs[newcnt]) = size;
1027
1028 newcnt++;
1029 }
8a9562d2 1030
f98d7864 1031 if (OVS_UNLIKELY(dropped)) {
45d947c4 1032 rte_spinlock_lock(&dev->stats_lock);
175cf4de 1033 dev->stats.tx_dropped += dropped;
45d947c4 1034 rte_spinlock_unlock(&dev->stats_lock);
175cf4de
RW
1035 }
1036
58397e6c
KT
1037 if (dev->type == DPDK_DEV_VHOST) {
1038 __netdev_dpdk_vhost_send(netdev, (struct dp_packet **) mbufs, newcnt, true);
1039 } else {
1040 dpdk_queue_pkts(dev, qid, mbufs, newcnt);
1041 dpdk_queue_flush(dev, qid);
1042 }
db73f716
DDP
1043
1044 if (!thread_is_pmd()) {
1045 ovs_mutex_unlock(&nonpmd_mempool_mutex);
1046 }
8a9562d2
PS
1047}
1048
58397e6c
KT
1049static int
1050netdev_dpdk_vhost_send(struct netdev *netdev, int qid OVS_UNUSED, struct dp_packet **pkts,
1051 int cnt, bool may_steal)
1052{
1053 if (OVS_UNLIKELY(pkts[0]->source != DPBUF_DPDK)) {
1054 int i;
1055
1056 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1057 if (may_steal) {
1058 for (i = 0; i < cnt; i++) {
1059 dp_packet_delete(pkts[i]);
1060 }
1061 }
1062 } else {
1063 __netdev_dpdk_vhost_send(netdev, pkts, cnt, may_steal);
1064 }
1065 return 0;
1066}
1067
7251515e
DV
1068static inline void
1069netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
e14deea0 1070 struct dp_packet **pkts, int cnt, bool may_steal)
8a9562d2 1071{
f4fd623c 1072 int i;
8a9562d2 1073
7251515e 1074 if (OVS_UNLIKELY(!may_steal ||
cf62fa4c 1075 pkts[0]->source != DPBUF_DPDK)) {
7251515e
DV
1076 struct netdev *netdev = &dev->up;
1077
2654cc33 1078 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
b3cd9f9d
PS
1079
1080 if (may_steal) {
f4fd623c 1081 for (i = 0; i < cnt; i++) {
e14deea0 1082 dp_packet_delete(pkts[i]);
f4fd623c 1083 }
b3cd9f9d 1084 }
8a9562d2 1085 } else {
f4fd623c
DDP
1086 int next_tx_idx = 0;
1087 int dropped = 0;
8a9562d2 1088
f4fd623c 1089 for (i = 0; i < cnt; i++) {
cf62fa4c 1090 int size = dp_packet_size(pkts[i]);
1b99bb05 1091
f4fd623c
DDP
1092 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1093 if (next_tx_idx != i) {
1094 dpdk_queue_pkts(dev, qid,
1095 (struct rte_mbuf **)&pkts[next_tx_idx],
1096 i-next_tx_idx);
1ebfe1ac 1097 }
f4fd623c 1098
1ebfe1ac
DDP
1099 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1100 (int)size , dev->max_packet_len);
f4fd623c 1101
e14deea0 1102 dp_packet_delete(pkts[i]);
1ebfe1ac 1103 dropped++;
f4fd623c
DDP
1104 next_tx_idx = i + 1;
1105 }
1106 }
1107 if (next_tx_idx != cnt) {
1108 dpdk_queue_pkts(dev, qid,
1109 (struct rte_mbuf **)&pkts[next_tx_idx],
1110 cnt-next_tx_idx);
1111 }
8a9562d2 1112
f4fd623c 1113 if (OVS_UNLIKELY(dropped)) {
45d947c4 1114 rte_spinlock_lock(&dev->stats_lock);
f4fd623c 1115 dev->stats.tx_dropped += dropped;
45d947c4 1116 rte_spinlock_unlock(&dev->stats_lock);
f4fd623c 1117 }
8a9562d2 1118 }
7251515e
DV
1119}
1120
1121static int
1122netdev_dpdk_eth_send(struct netdev *netdev, int qid,
e14deea0 1123 struct dp_packet **pkts, int cnt, bool may_steal)
7251515e
DV
1124{
1125 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
8a9562d2 1126
7251515e
DV
1127 netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
1128 return 0;
8a9562d2
PS
1129}
1130
1131static int
1132netdev_dpdk_set_etheraddr(struct netdev *netdev,
1133 const uint8_t mac[ETH_ADDR_LEN])
1134{
1135 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1136
1137 ovs_mutex_lock(&dev->mutex);
1138 if (!eth_addr_equals(dev->hwaddr, mac)) {
1139 memcpy(dev->hwaddr, mac, ETH_ADDR_LEN);
045c0d1a 1140 netdev_change_seq_changed(netdev);
8a9562d2
PS
1141 }
1142 ovs_mutex_unlock(&dev->mutex);
1143
1144 return 0;
1145}
1146
1147static int
1148netdev_dpdk_get_etheraddr(const struct netdev *netdev,
1149 uint8_t mac[ETH_ADDR_LEN])
1150{
1151 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1152
1153 ovs_mutex_lock(&dev->mutex);
1154 memcpy(mac, dev->hwaddr, ETH_ADDR_LEN);
1155 ovs_mutex_unlock(&dev->mutex);
1156
1157 return 0;
1158}
1159
1160static int
1161netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup)
1162{
1163 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1164
1165 ovs_mutex_lock(&dev->mutex);
1166 *mtup = dev->mtu;
1167 ovs_mutex_unlock(&dev->mutex);
1168
1169 return 0;
1170}
1171
1172static int
1173netdev_dpdk_set_mtu(const struct netdev *netdev, int mtu)
1174{
1175 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1176 int old_mtu, err;
1177 struct dpdk_mp *old_mp;
1178 struct dpdk_mp *mp;
1179
1180 ovs_mutex_lock(&dpdk_mutex);
1181 ovs_mutex_lock(&dev->mutex);
1182 if (dev->mtu == mtu) {
1183 err = 0;
1184 goto out;
1185 }
1186
1187 mp = dpdk_mp_get(dev->socket_id, dev->mtu);
1188 if (!mp) {
1189 err = ENOMEM;
1190 goto out;
1191 }
1192
1193 rte_eth_dev_stop(dev->port_id);
1194
1195 old_mtu = dev->mtu;
1196 old_mp = dev->dpdk_mp;
1197 dev->dpdk_mp = mp;
1198 dev->mtu = mtu;
1199 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
1200
1201 err = dpdk_eth_dev_init(dev);
1202 if (err) {
8a9562d2
PS
1203 dpdk_mp_put(mp);
1204 dev->mtu = old_mtu;
1205 dev->dpdk_mp = old_mp;
1206 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
1207 dpdk_eth_dev_init(dev);
1208 goto out;
1209 }
1210
1211 dpdk_mp_put(old_mp);
045c0d1a 1212 netdev_change_seq_changed(netdev);
8a9562d2
PS
1213out:
1214 ovs_mutex_unlock(&dev->mutex);
1215 ovs_mutex_unlock(&dpdk_mutex);
1216 return err;
1217}
1218
1219static int
1220netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier);
1221
58397e6c
KT
1222static int
1223netdev_dpdk_vhost_get_stats(const struct netdev *netdev,
1224 struct netdev_stats *stats)
1225{
1226 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1227
1228 ovs_mutex_lock(&dev->mutex);
1229 memset(stats, 0, sizeof(*stats));
1230 /* Unsupported Stats */
1231 stats->rx_errors = UINT64_MAX;
1232 stats->tx_errors = UINT64_MAX;
1233 stats->multicast = UINT64_MAX;
1234 stats->collisions = UINT64_MAX;
1235 stats->rx_crc_errors = UINT64_MAX;
1236 stats->rx_fifo_errors = UINT64_MAX;
1237 stats->rx_frame_errors = UINT64_MAX;
1238 stats->rx_length_errors = UINT64_MAX;
1239 stats->rx_missed_errors = UINT64_MAX;
1240 stats->rx_over_errors = UINT64_MAX;
1241 stats->tx_aborted_errors = UINT64_MAX;
1242 stats->tx_carrier_errors = UINT64_MAX;
1243 stats->tx_errors = UINT64_MAX;
1244 stats->tx_fifo_errors = UINT64_MAX;
1245 stats->tx_heartbeat_errors = UINT64_MAX;
1246 stats->tx_window_errors = UINT64_MAX;
1247 stats->rx_bytes += UINT64_MAX;
1248 stats->rx_dropped += UINT64_MAX;
1249 stats->tx_bytes += UINT64_MAX;
1250
45d947c4 1251 rte_spinlock_lock(&dev->stats_lock);
58397e6c
KT
1252 /* Supported Stats */
1253 stats->rx_packets += dev->stats.rx_packets;
1254 stats->tx_packets += dev->stats.tx_packets;
1255 stats->tx_dropped += dev->stats.tx_dropped;
45d947c4 1256 rte_spinlock_unlock(&dev->stats_lock);
58397e6c
KT
1257 ovs_mutex_unlock(&dev->mutex);
1258
1259 return 0;
1260}
1261
8a9562d2
PS
1262static int
1263netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1264{
1265 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1266 struct rte_eth_stats rte_stats;
1267 bool gg;
1268
1269 netdev_dpdk_get_carrier(netdev, &gg);
1270 ovs_mutex_lock(&dev->mutex);
1271 rte_eth_stats_get(dev->port_id, &rte_stats);
1272
2f9dd77f 1273 memset(stats, 0, sizeof(*stats));
8a9562d2 1274
2f9dd77f
PS
1275 stats->rx_packets = rte_stats.ipackets;
1276 stats->tx_packets = rte_stats.opackets;
1277 stats->rx_bytes = rte_stats.ibytes;
1278 stats->tx_bytes = rte_stats.obytes;
1279 stats->rx_errors = rte_stats.ierrors;
1280 stats->tx_errors = rte_stats.oerrors;
1281 stats->multicast = rte_stats.imcasts;
8a9562d2 1282
45d947c4 1283 rte_spinlock_lock(&dev->stats_lock);
2f9dd77f 1284 stats->tx_dropped = dev->stats.tx_dropped;
45d947c4 1285 rte_spinlock_unlock(&dev->stats_lock);
8a9562d2
PS
1286 ovs_mutex_unlock(&dev->mutex);
1287
1288 return 0;
1289}
1290
1291static int
1292netdev_dpdk_get_features(const struct netdev *netdev_,
1293 enum netdev_features *current,
1294 enum netdev_features *advertised OVS_UNUSED,
1295 enum netdev_features *supported OVS_UNUSED,
1296 enum netdev_features *peer OVS_UNUSED)
1297{
1298 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1299 struct rte_eth_link link;
1300
1301 ovs_mutex_lock(&dev->mutex);
1302 link = dev->link;
1303 ovs_mutex_unlock(&dev->mutex);
1304
1305 if (link.link_duplex == ETH_LINK_AUTONEG_DUPLEX) {
1306 if (link.link_speed == ETH_LINK_SPEED_AUTONEG) {
1307 *current = NETDEV_F_AUTONEG;
1308 }
1309 } else if (link.link_duplex == ETH_LINK_HALF_DUPLEX) {
1310 if (link.link_speed == ETH_LINK_SPEED_10) {
1311 *current = NETDEV_F_10MB_HD;
1312 }
1313 if (link.link_speed == ETH_LINK_SPEED_100) {
1314 *current = NETDEV_F_100MB_HD;
1315 }
1316 if (link.link_speed == ETH_LINK_SPEED_1000) {
1317 *current = NETDEV_F_1GB_HD;
1318 }
1319 } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) {
1320 if (link.link_speed == ETH_LINK_SPEED_10) {
1321 *current = NETDEV_F_10MB_FD;
1322 }
1323 if (link.link_speed == ETH_LINK_SPEED_100) {
1324 *current = NETDEV_F_100MB_FD;
1325 }
1326 if (link.link_speed == ETH_LINK_SPEED_1000) {
1327 *current = NETDEV_F_1GB_FD;
1328 }
1329 if (link.link_speed == ETH_LINK_SPEED_10000) {
1330 *current = NETDEV_F_10GB_FD;
1331 }
1332 }
1333
1334 return 0;
1335}
1336
1337static int
1338netdev_dpdk_get_ifindex(const struct netdev *netdev)
1339{
1340 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1341 int ifindex;
1342
1343 ovs_mutex_lock(&dev->mutex);
1344 ifindex = dev->port_id;
1345 ovs_mutex_unlock(&dev->mutex);
1346
1347 return ifindex;
1348}
1349
1350static int
1351netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier)
1352{
1353 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1354
1355 ovs_mutex_lock(&dev->mutex);
1356 check_link_status(dev);
1357 *carrier = dev->link.link_status;
58397e6c
KT
1358
1359 ovs_mutex_unlock(&dev->mutex);
1360
1361 return 0;
1362}
1363
1364static int
1365netdev_dpdk_vhost_get_carrier(const struct netdev *netdev_, bool *carrier)
1366{
1367 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1368 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
1369
1370 ovs_mutex_lock(&dev->mutex);
1371
1372 if (is_vhost_running(virtio_dev)) {
1373 *carrier = 1;
1374 } else {
1375 *carrier = 0;
1376 }
1377
8a9562d2
PS
1378 ovs_mutex_unlock(&dev->mutex);
1379
1380 return 0;
1381}
1382
1383static long long int
1384netdev_dpdk_get_carrier_resets(const struct netdev *netdev_)
1385{
1386 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1387 long long int carrier_resets;
1388
1389 ovs_mutex_lock(&dev->mutex);
1390 carrier_resets = dev->link_reset_cnt;
1391 ovs_mutex_unlock(&dev->mutex);
1392
1393 return carrier_resets;
1394}
1395
1396static int
1397netdev_dpdk_set_miimon(struct netdev *netdev_ OVS_UNUSED,
1398 long long int interval OVS_UNUSED)
1399{
ee32150e 1400 return EOPNOTSUPP;
8a9562d2
PS
1401}
1402
1403static int
1404netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
1405 enum netdev_flags off, enum netdev_flags on,
95fb793a 1406 enum netdev_flags *old_flagsp) OVS_REQUIRES(dev->mutex)
8a9562d2
PS
1407{
1408 int err;
1409
1410 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1411 return EINVAL;
1412 }
1413
1414 *old_flagsp = dev->flags;
1415 dev->flags |= on;
1416 dev->flags &= ~off;
1417
1418 if (dev->flags == *old_flagsp) {
1419 return 0;
1420 }
1421
58397e6c
KT
1422 if (dev->type == DPDK_DEV_ETH) {
1423 if (dev->flags & NETDEV_UP) {
1424 err = rte_eth_dev_start(dev->port_id);
1425 if (err)
1426 return -err;
1427 }
8a9562d2 1428
58397e6c
KT
1429 if (dev->flags & NETDEV_PROMISC) {
1430 rte_eth_promiscuous_enable(dev->port_id);
1431 }
8a9562d2 1432
58397e6c
KT
1433 if (!(dev->flags & NETDEV_UP)) {
1434 rte_eth_dev_stop(dev->port_id);
1435 }
8a9562d2
PS
1436 }
1437
1438 return 0;
1439}
1440
1441static int
1442netdev_dpdk_update_flags(struct netdev *netdev_,
1443 enum netdev_flags off, enum netdev_flags on,
1444 enum netdev_flags *old_flagsp)
1445{
1446 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
1447 int error;
1448
1449 ovs_mutex_lock(&netdev->mutex);
1450 error = netdev_dpdk_update_flags__(netdev, off, on, old_flagsp);
1451 ovs_mutex_unlock(&netdev->mutex);
1452
1453 return error;
1454}
1455
1456static int
1457netdev_dpdk_get_status(const struct netdev *netdev_, struct smap *args)
1458{
1459 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1460 struct rte_eth_dev_info dev_info;
1461
e0a801c7 1462 if (dev->port_id < 0)
8a9562d2
PS
1463 return ENODEV;
1464
1465 ovs_mutex_lock(&dev->mutex);
1466 rte_eth_dev_info_get(dev->port_id, &dev_info);
1467 ovs_mutex_unlock(&dev->mutex);
1468
1469 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1470
95fb793a 1471 smap_add_format(args, "port_no", "%d", dev->port_id);
8a9562d2
PS
1472 smap_add_format(args, "numa_id", "%d", rte_eth_dev_socket_id(dev->port_id));
1473 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1474 smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
1475 smap_add_format(args, "max_rx_pktlen", "%u", dev_info.max_rx_pktlen);
1476 smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
1477 smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
1478 smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);
1479 smap_add_format(args, "max_hash_mac_addrs", "%u", dev_info.max_hash_mac_addrs);
1480 smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
1481 smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
1482
1483 smap_add_format(args, "pci-vendor_id", "0x%u", dev_info.pci_dev->id.vendor_id);
1484 smap_add_format(args, "pci-device_id", "0x%x", dev_info.pci_dev->id.device_id);
1485
1486 return 0;
1487}
1488
1489static void
1490netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state)
1491 OVS_REQUIRES(dev->mutex)
1492{
1493 enum netdev_flags old_flags;
1494
1495 if (admin_state) {
1496 netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1497 } else {
1498 netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1499 }
1500}
1501
1502static void
1503netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc,
1504 const char *argv[], void *aux OVS_UNUSED)
1505{
1506 bool up;
1507
1508 if (!strcasecmp(argv[argc - 1], "up")) {
1509 up = true;
1510 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1511 up = false;
1512 } else {
1513 unixctl_command_reply_error(conn, "Invalid Admin State");
1514 return;
1515 }
1516
1517 if (argc > 2) {
1518 struct netdev *netdev = netdev_from_name(argv[1]);
1519 if (netdev && is_dpdk_class(netdev->netdev_class)) {
1520 struct netdev_dpdk *dpdk_dev = netdev_dpdk_cast(netdev);
1521
1522 ovs_mutex_lock(&dpdk_dev->mutex);
1523 netdev_dpdk_set_admin_state__(dpdk_dev, up);
1524 ovs_mutex_unlock(&dpdk_dev->mutex);
1525
1526 netdev_close(netdev);
1527 } else {
1528 unixctl_command_reply_error(conn, "Not a DPDK Interface");
1529 netdev_close(netdev);
1530 return;
1531 }
1532 } else {
1533 struct netdev_dpdk *netdev;
1534
1535 ovs_mutex_lock(&dpdk_mutex);
1536 LIST_FOR_EACH (netdev, list_node, &dpdk_list) {
1537 ovs_mutex_lock(&netdev->mutex);
1538 netdev_dpdk_set_admin_state__(netdev, up);
1539 ovs_mutex_unlock(&netdev->mutex);
1540 }
1541 ovs_mutex_unlock(&dpdk_mutex);
1542 }
1543 unixctl_command_reply(conn, "OK");
1544}
1545
58397e6c
KT
1546/*
1547 * Set virtqueue flags so that we do not receive interrupts.
1548 */
1549static void
1550set_irq_status(struct virtio_net *dev)
1551{
1552 dev->virtqueue[VIRTIO_RXQ]->used->flags = VRING_USED_F_NO_NOTIFY;
1553 dev->virtqueue[VIRTIO_TXQ]->used->flags = VRING_USED_F_NO_NOTIFY;
1554}
1555
1556/*
1557 * A new virtio-net device is added to a vhost port.
1558 */
1559static int
1560new_device(struct virtio_net *dev)
1561{
1562 struct netdev_dpdk *netdev;
1563 bool exists = false;
1564
1565 ovs_mutex_lock(&dpdk_mutex);
1566 /* Add device to the vhost port with the same name as that passed down. */
1567 LIST_FOR_EACH(netdev, list_node, &dpdk_list) {
1568 if (strncmp(dev->ifname, netdev->up.name, IFNAMSIZ) == 0) {
1569 ovs_mutex_lock(&netdev->mutex);
1570 ovsrcu_set(&netdev->virtio_dev, dev);
1571 ovs_mutex_unlock(&netdev->mutex);
1572 exists = true;
1573 dev->flags |= VIRTIO_DEV_RUNNING;
1574 /* Disable notifications. */
1575 set_irq_status(dev);
1576 break;
1577 }
1578 }
1579 ovs_mutex_unlock(&dpdk_mutex);
1580
1581 if (!exists) {
1582 VLOG_INFO("vHost Device '%s' (%ld) can't be added - name not found",
1583 dev->ifname, dev->device_fh);
1584
1585 return -1;
1586 }
1587
1588 VLOG_INFO("vHost Device '%s' (%ld) has been added",
1589 dev->ifname, dev->device_fh);
1590 return 0;
1591}
1592
1593/*
1594 * Remove a virtio-net device from the specific vhost port. Use dev->remove
1595 * flag to stop any more packets from being sent or received to/from a VM and
1596 * ensure all currently queued packets have been sent/received before removing
1597 * the device.
1598 */
1599static void
1600destroy_device(volatile struct virtio_net *dev)
1601{
1602 struct netdev_dpdk *vhost_dev;
1603
1604 ovs_mutex_lock(&dpdk_mutex);
1605 LIST_FOR_EACH (vhost_dev, list_node, &dpdk_list) {
1606 if (netdev_dpdk_get_virtio(vhost_dev) == dev) {
1607
1608 ovs_mutex_lock(&vhost_dev->mutex);
1609 dev->flags &= ~VIRTIO_DEV_RUNNING;
1610 ovsrcu_set(&vhost_dev->virtio_dev, NULL);
1611 ovs_mutex_unlock(&vhost_dev->mutex);
1612
1613 /*
1614 * Wait for other threads to quiesce before
1615 * setting the virtio_dev to NULL.
1616 */
1617 ovsrcu_synchronize();
618f44f7
KT
1618 /*
1619 * As call to ovsrcu_synchronize() will end the quiescent state,
1620 * put thread back into quiescent state before returning.
1621 */
1622 ovsrcu_quiesce_start();
58397e6c
KT
1623 }
1624 }
1625 ovs_mutex_unlock(&dpdk_mutex);
1626
1627 VLOG_INFO("vHost Device '%s' (%ld) has been removed",
1628 dev->ifname, dev->device_fh);
1629}
1630
1631struct virtio_net *
1632netdev_dpdk_get_virtio(const struct netdev_dpdk *dev)
1633{
1634 return ovsrcu_get(struct virtio_net *, &dev->virtio_dev);
1635}
1636
1637/*
1638 * These callbacks allow virtio-net devices to be added to vhost ports when
1639 * configuration has been fully complete.
1640 */
bce01e3a 1641static const struct virtio_net_device_ops virtio_net_device_ops =
58397e6c
KT
1642{
1643 .new_device = new_device,
1644 .destroy_device = destroy_device,
1645};
1646
1647static void *
1648start_cuse_session_loop(void *dummy OVS_UNUSED)
1649{
1650 pthread_detach(pthread_self());
618f44f7
KT
1651 /* Put the cuse thread into quiescent state. */
1652 ovsrcu_quiesce_start();
58397e6c
KT
1653 rte_vhost_driver_session_start();
1654 return NULL;
1655}
1656
1657static int
1658dpdk_vhost_class_init(void)
1659{
58397e6c
KT
1660 int err = -1;
1661
1662 rte_vhost_driver_callback_register(&virtio_net_device_ops);
1663
1664 /* Register CUSE device to handle IOCTLs.
1665 * Unless otherwise specified on the vswitchd command line, cuse_dev_name
1666 * is set to vhost-net.
1667 */
1668 err = rte_vhost_driver_register(cuse_dev_name);
1669
1670 if (err != 0) {
1671 VLOG_ERR("CUSE device setup failure.");
1672 return -1;
1673 }
1674
618f44f7
KT
1675 ovs_thread_create("cuse_thread", start_cuse_session_loop, NULL);
1676 return 0;
58397e6c
KT
1677}
1678
033e9df2
DDP
1679static void
1680dpdk_common_init(void)
1681{
1682 unixctl_command_register("netdev-dpdk/set-admin-state",
1683 "[netdev] up|down", 1, 2,
1684 netdev_dpdk_set_admin_state, NULL);
1685
1686 ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL);
1687}
1688
95fb793a 1689/* Client Rings */
1690
95fb793a 1691static int
1692dpdk_ring_create(const char dev_name[], unsigned int port_no,
1693 unsigned int *eth_port_id)
1694{
1695 struct dpdk_ring *ivshmem;
1696 char ring_name[10];
1697 int err;
1698
1699 ivshmem = dpdk_rte_mzalloc(sizeof *ivshmem);
1700 if (ivshmem == NULL) {
1701 return ENOMEM;
1702 }
1703
7251515e 1704 /* XXX: Add support for multiquque ring. */
95fb793a 1705 err = snprintf(ring_name, 10, "%s_tx", dev_name);
1706 if (err < 0) {
1707 return -err;
1708 }
1709
7251515e
DV
1710 /* Create single consumer/producer rings, netdev does explicit locking. */
1711 ivshmem->cring_tx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
1712 RING_F_SP_ENQ | RING_F_SC_DEQ);
95fb793a 1713 if (ivshmem->cring_tx == NULL) {
1714 rte_free(ivshmem);
1715 return ENOMEM;
1716 }
1717
1718 err = snprintf(ring_name, 10, "%s_rx", dev_name);
1719 if (err < 0) {
1720 return -err;
1721 }
1722
7251515e
DV
1723 /* Create single consumer/producer rings, netdev does explicit locking. */
1724 ivshmem->cring_rx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
1725 RING_F_SP_ENQ | RING_F_SC_DEQ);
95fb793a 1726 if (ivshmem->cring_rx == NULL) {
1727 rte_free(ivshmem);
1728 return ENOMEM;
1729 }
1730
d7310583
DDP
1731 err = rte_eth_from_rings(dev_name, &ivshmem->cring_rx, 1,
1732 &ivshmem->cring_tx, 1, SOCKET0);
1733
95fb793a 1734 if (err < 0) {
1735 rte_free(ivshmem);
1736 return ENODEV;
1737 }
1738
1739 ivshmem->user_port_id = port_no;
1740 ivshmem->eth_port_id = rte_eth_dev_count() - 1;
1741 list_push_back(&dpdk_ring_list, &ivshmem->list_node);
1742
1743 *eth_port_id = ivshmem->eth_port_id;
1744 return 0;
1745}
1746
1747static int
1748dpdk_ring_open(const char dev_name[], unsigned int *eth_port_id) OVS_REQUIRES(dpdk_mutex)
1749{
1750 struct dpdk_ring *ivshmem;
1751 unsigned int port_no;
1752 int err = 0;
1753
1754 /* Names always start with "dpdkr" */
1755 err = dpdk_dev_parse_name(dev_name, "dpdkr", &port_no);
1756 if (err) {
1757 return err;
1758 }
1759
1760 /* look through our list to find the device */
1761 LIST_FOR_EACH (ivshmem, list_node, &dpdk_ring_list) {
1762 if (ivshmem->user_port_id == port_no) {
58397e6c 1763 VLOG_INFO("Found dpdk ring device %s:", dev_name);
95fb793a 1764 *eth_port_id = ivshmem->eth_port_id; /* really all that is needed */
1765 return 0;
1766 }
1767 }
1768 /* Need to create the device rings */
1769 return dpdk_ring_create(dev_name, port_no, eth_port_id);
1770}
1771
7251515e
DV
1772static int
1773netdev_dpdk_ring_send(struct netdev *netdev, int qid OVS_UNUSED,
e14deea0 1774 struct dp_packet **pkts, int cnt, bool may_steal)
7251515e
DV
1775{
1776 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1b99bb05
MG
1777 unsigned i;
1778
1779 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
1780 * rss hash field is clear. This is because the same mbuf may be modified by
1781 * the consumer of the ring and return into the datapath without recalculating
1782 * the RSS hash. */
1783 for (i = 0; i < cnt; i++) {
1784 dp_packet_set_rss_hash(pkts[i], 0);
1785 }
7251515e
DV
1786
1787 /* DPDK Rings have a single TX queue, Therefore needs locking. */
58397e6c 1788 rte_spinlock_lock(&dev->txq_lock);
7251515e 1789 netdev_dpdk_send__(dev, 0, pkts, cnt, may_steal);
58397e6c 1790 rte_spinlock_unlock(&dev->txq_lock);
7251515e
DV
1791 return 0;
1792}
1793
95fb793a 1794static int
1795netdev_dpdk_ring_construct(struct netdev *netdev)
1796{
1797 unsigned int port_no = 0;
1798 int err = 0;
1799
1800 if (rte_eal_init_ret) {
1801 return rte_eal_init_ret;
1802 }
1803
1804 ovs_mutex_lock(&dpdk_mutex);
1805
1806 err = dpdk_ring_open(netdev->name, &port_no);
1807 if (err) {
1808 goto unlock_dpdk;
1809 }
1810
58397e6c 1811 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
95fb793a 1812
1813unlock_dpdk:
1814 ovs_mutex_unlock(&dpdk_mutex);
1815 return err;
1816}
1817
58397e6c
KT
1818#define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, MULTIQ, SEND, \
1819 GET_CARRIER, GET_STATS, GET_FEATURES, GET_STATUS, RXQ_RECV) \
95fb793a 1820{ \
1821 NAME, \
1822 INIT, /* init */ \
1823 NULL, /* netdev_dpdk_run */ \
1824 NULL, /* netdev_dpdk_wait */ \
1825 \
1826 netdev_dpdk_alloc, \
1827 CONSTRUCT, \
58397e6c 1828 DESTRUCT, \
95fb793a 1829 netdev_dpdk_dealloc, \
1830 netdev_dpdk_get_config, \
1831 NULL, /* netdev_dpdk_set_config */ \
1832 NULL, /* get_tunnel_config */ \
58397e6c
KT
1833 NULL, /* build header */ \
1834 NULL, /* push header */ \
1835 NULL, /* pop header */ \
7dec44fe 1836 netdev_dpdk_get_numa_id, /* get_numa_id */ \
5496878c 1837 MULTIQ, /* set_multiq */ \
95fb793a 1838 \
7251515e 1839 SEND, /* send */ \
95fb793a 1840 NULL, /* send_wait */ \
1841 \
1842 netdev_dpdk_set_etheraddr, \
1843 netdev_dpdk_get_etheraddr, \
1844 netdev_dpdk_get_mtu, \
1845 netdev_dpdk_set_mtu, \
1846 netdev_dpdk_get_ifindex, \
58397e6c 1847 GET_CARRIER, \
95fb793a 1848 netdev_dpdk_get_carrier_resets, \
1849 netdev_dpdk_set_miimon, \
58397e6c
KT
1850 GET_STATS, \
1851 GET_FEATURES, \
95fb793a 1852 NULL, /* set_advertisements */ \
1853 \
1854 NULL, /* set_policing */ \
1855 NULL, /* get_qos_types */ \
1856 NULL, /* get_qos_capabilities */ \
1857 NULL, /* get_qos */ \
1858 NULL, /* set_qos */ \
1859 NULL, /* get_queue */ \
1860 NULL, /* set_queue */ \
1861 NULL, /* delete_queue */ \
1862 NULL, /* get_queue_stats */ \
1863 NULL, /* queue_dump_start */ \
1864 NULL, /* queue_dump_next */ \
1865 NULL, /* queue_dump_done */ \
1866 NULL, /* dump_queue_stats */ \
1867 \
1868 NULL, /* get_in4 */ \
1869 NULL, /* set_in4 */ \
1870 NULL, /* get_in6 */ \
1871 NULL, /* add_router */ \
1872 NULL, /* get_next_hop */ \
58397e6c 1873 GET_STATUS, \
95fb793a 1874 NULL, /* arp_lookup */ \
1875 \
1876 netdev_dpdk_update_flags, \
1877 \
1878 netdev_dpdk_rxq_alloc, \
1879 netdev_dpdk_rxq_construct, \
1880 netdev_dpdk_rxq_destruct, \
1881 netdev_dpdk_rxq_dealloc, \
58397e6c 1882 RXQ_RECV, \
95fb793a 1883 NULL, /* rx_wait */ \
1884 NULL, /* rxq_drain */ \
1885}
8a9562d2
PS
1886
1887int
1888dpdk_init(int argc, char **argv)
1889{
1890 int result;
58397e6c
KT
1891 int base = 0;
1892 char *pragram_name = argv[0];
8a9562d2 1893
9441caf3 1894 if (argc < 2 || strcmp(argv[1], "--dpdk"))
8a9562d2
PS
1895 return 0;
1896
58397e6c 1897 /* Remove the --dpdk argument from arg list.*/
8a9562d2
PS
1898 argc--;
1899 argv++;
1900
58397e6c
KT
1901 /* If the cuse_dev_name parameter has been provided, set 'cuse_dev_name' to
1902 * this string if it meets the correct criteria. Otherwise, set it to the
1903 * default (vhost-net).
1904 */
1905 if (!strcmp(argv[1], "--cuse_dev_name") &&
1906 (strlen(argv[2]) <= NAME_MAX)) {
1907
1908 cuse_dev_name = strdup(argv[2]);
1909
1910 /* Remove the cuse_dev_name configuration parameters from the argument
1911 * list, so that the correct elements are passed to the DPDK
1912 * initialization function
1913 */
1914 argc -= 2;
1915 argv += 2; /* Increment by two to bypass the cuse_dev_name arguments */
1916 base = 2;
1917
1918 VLOG_ERR("User-provided cuse_dev_name in use: /dev/%s", cuse_dev_name);
1919 } else {
1920 cuse_dev_name = "vhost-net";
1921 VLOG_INFO("No cuse_dev_name provided - defaulting to /dev/vhost-net");
1922 }
1923
1924 /* Keep the program name argument as this is needed for call to
1925 * rte_eal_init()
1926 */
1927 argv[0] = pragram_name;
1928
8a9562d2
PS
1929 /* Make sure things are initialized ... */
1930 result = rte_eal_init(argc, argv);
451450fa 1931 if (result < 0) {
58397e6c 1932 ovs_abort(result, "Cannot init EAL");
451450fa 1933 }
8a9562d2 1934
d7310583 1935 rte_memzone_dump(stdout);
8a9562d2
PS
1936 rte_eal_init_ret = 0;
1937
451450fa 1938 if (argc > result) {
9441caf3 1939 argv[result] = argv[0];
451450fa 1940 }
9441caf3 1941
db73f716 1942 /* We are called from the main thread here */
d5c199ea 1943 RTE_PER_LCORE(_lcore_id) = NON_PMD_CORE_ID;
db73f716 1944
58397e6c 1945 return result + 1 + base;
8a9562d2
PS
1946}
1947
bce01e3a 1948static const struct netdev_class dpdk_class =
95fb793a 1949 NETDEV_DPDK_CLASS(
1950 "dpdk",
b8e57534 1951 NULL,
5496878c 1952 netdev_dpdk_construct,
58397e6c 1953 netdev_dpdk_destruct,
7251515e 1954 netdev_dpdk_set_multiq,
58397e6c
KT
1955 netdev_dpdk_eth_send,
1956 netdev_dpdk_get_carrier,
1957 netdev_dpdk_get_stats,
1958 netdev_dpdk_get_features,
1959 netdev_dpdk_get_status,
1960 netdev_dpdk_rxq_recv);
95fb793a 1961
bce01e3a 1962static const struct netdev_class dpdk_ring_class =
95fb793a 1963 NETDEV_DPDK_CLASS(
1964 "dpdkr",
033e9df2 1965 NULL,
5496878c 1966 netdev_dpdk_ring_construct,
58397e6c
KT
1967 netdev_dpdk_destruct,
1968 NULL,
1969 netdev_dpdk_ring_send,
1970 netdev_dpdk_get_carrier,
1971 netdev_dpdk_get_stats,
1972 netdev_dpdk_get_features,
1973 netdev_dpdk_get_status,
1974 netdev_dpdk_rxq_recv);
1975
bce01e3a 1976static const struct netdev_class dpdk_vhost_class =
58397e6c
KT
1977 NETDEV_DPDK_CLASS(
1978 "dpdkvhost",
1979 dpdk_vhost_class_init,
1980 netdev_dpdk_vhost_construct,
1981 netdev_dpdk_vhost_destruct,
1982 netdev_dpdk_vhost_set_multiq,
1983 netdev_dpdk_vhost_send,
1984 netdev_dpdk_vhost_get_carrier,
1985 netdev_dpdk_vhost_get_stats,
1986 NULL,
7251515e 1987 NULL,
58397e6c 1988 netdev_dpdk_vhost_rxq_recv);
95fb793a 1989
8a9562d2
PS
1990void
1991netdev_dpdk_register(void)
1992{
95fb793a 1993 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
1994
033e9df2
DDP
1995 if (rte_eal_init_ret) {
1996 return;
1997 }
1998
95fb793a 1999 if (ovsthread_once_start(&once)) {
033e9df2 2000 dpdk_common_init();
95fb793a 2001 netdev_register_provider(&dpdk_class);
2002 netdev_register_provider(&dpdk_ring_class);
58397e6c 2003 netdev_register_provider(&dpdk_vhost_class);
95fb793a 2004 ovsthread_once_done(&once);
2005 }
8a9562d2 2006}
8617afff
PS
2007
2008int
bd5131ba 2009pmd_thread_setaffinity_cpu(unsigned cpu)
8617afff
PS
2010{
2011 cpu_set_t cpuset;
2012 int err;
2013
2014 CPU_ZERO(&cpuset);
2015 CPU_SET(cpu, &cpuset);
2016 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
2017 if (err) {
2018 VLOG_ERR("Thread affinity error %d",err);
2019 return err;
2020 }
abb5943d
AW
2021 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
2022 ovs_assert(cpu != NON_PMD_CORE_ID);
65f13b50 2023 RTE_PER_LCORE(_lcore_id) = cpu;
8617afff
PS
2024
2025 return 0;
2026}
db73f716 2027
db73f716
DDP
2028static bool
2029thread_is_pmd(void)
2030{
abb5943d 2031 return rte_lcore_id() != NON_PMD_CORE_ID;
db73f716 2032}