]> git.proxmox.com Git - ovs.git/blame - lib/netdev-dpdk.c
lldp: Fix DPDK build.
[ovs.git] / lib / netdev-dpdk.c
CommitLineData
8a9562d2
PS
1/*
2 * Copyright (c) 2014 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <config.h>
18
19#include <stdio.h>
20#include <string.h>
21#include <signal.h>
22#include <stdlib.h>
23#include <pthread.h>
24#include <config.h>
25#include <errno.h>
26#include <sched.h>
27#include <stdlib.h>
28#include <unistd.h>
29#include <stdio.h>
30
e14deea0 31#include "dp-packet.h"
8a9562d2
PS
32#include "dpif-netdev.h"
33#include "list.h"
34#include "netdev-dpdk.h"
35#include "netdev-provider.h"
36#include "netdev-vport.h"
37#include "odp-util.h"
38#include "ofp-print.h"
94143fc4 39#include "ovs-numa.h"
8a9562d2
PS
40#include "ovs-thread.h"
41#include "ovs-rcu.h"
42#include "packets.h"
43#include "shash.h"
8a9562d2
PS
44#include "sset.h"
45#include "unaligned.h"
46#include "timeval.h"
47#include "unixctl.h"
e6211adc 48#include "openvswitch/vlog.h"
8a9562d2
PS
49
50VLOG_DEFINE_THIS_MODULE(dpdk);
51static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
52
53#define DPDK_PORT_WATCHDOG_INTERVAL 5
54
55#define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
56#define OVS_VPORT_DPDK "ovs_dpdk"
57
58/*
59 * need to reserve tons of extra space in the mbufs so we can align the
60 * DMA addresses to 4KB.
61 */
62
63#define MTU_TO_MAX_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
64#define MBUF_SIZE(mtu) (MTU_TO_MAX_LEN(mtu) + (512) + \
65 sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
66
645b8934 67/* XXX: mempool size should be based on system resources. */
8a9562d2
PS
68#define NB_MBUF (4096 * 64)
69#define MP_CACHE_SZ (256 * 2)
70#define SOCKET0 0
71
79f5354c
PM
72#define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
73#define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
74
645b8934 75/* XXX: Needs per NIC value for these constants. */
8a9562d2
PS
76#define RX_PTHRESH 32 /* Default values of RX prefetch threshold reg. */
77#define RX_HTHRESH 32 /* Default values of RX host threshold reg. */
78#define RX_WTHRESH 16 /* Default values of RX write-back threshold reg. */
79
80#define TX_PTHRESH 36 /* Default values of TX prefetch threshold reg. */
81#define TX_HTHRESH 0 /* Default values of TX host threshold reg. */
82#define TX_WTHRESH 0 /* Default values of TX write-back threshold reg. */
83
84static const struct rte_eth_conf port_conf = {
a28ddd11
DDP
85 .rxmode = {
86 .mq_mode = ETH_MQ_RX_RSS,
87 .split_hdr_size = 0,
88 .header_split = 0, /* Header Split disabled */
89 .hw_ip_checksum = 0, /* IP checksum offload disabled */
90 .hw_vlan_filter = 0, /* VLAN filtering disabled */
91 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
92 .hw_strip_crc = 0,
93 },
94 .rx_adv_conf = {
95 .rss_conf = {
96 .rss_key = NULL,
61a2647e
DDP
97 .rss_hf = ETH_RSS_IPV4_TCP | ETH_RSS_IPV4 | ETH_RSS_IPV6
98 | ETH_RSS_IPV4_UDP | ETH_RSS_IPV6_TCP | ETH_RSS_IPV6_UDP,
8a9562d2 99 },
a28ddd11
DDP
100 },
101 .txmode = {
102 .mq_mode = ETH_MQ_TX_NONE,
103 },
8a9562d2
PS
104};
105
106static const struct rte_eth_rxconf rx_conf = {
a28ddd11
DDP
107 .rx_thresh = {
108 .pthresh = RX_PTHRESH,
109 .hthresh = RX_HTHRESH,
110 .wthresh = RX_WTHRESH,
111 },
8a9562d2
PS
112};
113
114static const struct rte_eth_txconf tx_conf = {
a28ddd11
DDP
115 .tx_thresh = {
116 .pthresh = TX_PTHRESH,
117 .hthresh = TX_HTHRESH,
118 .wthresh = TX_WTHRESH,
119 },
120 .tx_free_thresh = 0,
121 .tx_rs_thresh = 0,
94777510 122 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS|ETH_TXQ_FLAGS_NOOFFLOADS,
8a9562d2
PS
123};
124
3a100265
DDP
125enum { MAX_RX_QUEUE_LEN = 192 };
126enum { MAX_TX_QUEUE_LEN = 384 };
58f7c37b
DDP
127enum { DPDK_RING_SIZE = 256 };
128BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
8a9562d2
PS
129enum { DRAIN_TSC = 200000ULL };
130
131static int rte_eal_init_ret = ENODEV;
132
133static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
134
135/* Contains all 'struct dpdk_dev's. */
ca6ba700 136static struct ovs_list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
55951e15 137 = OVS_LIST_INITIALIZER(&dpdk_list);
8a9562d2 138
ca6ba700 139static struct ovs_list dpdk_mp_list OVS_GUARDED_BY(dpdk_mutex)
55951e15 140 = OVS_LIST_INITIALIZER(&dpdk_mp_list);
8a9562d2 141
db73f716
DDP
142/* This mutex must be used by non pmd threads when allocating or freeing
143 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
144 * use mempools, a non pmd thread should hold this mutex while calling them */
145struct ovs_mutex nonpmd_mempool_mutex = OVS_MUTEX_INITIALIZER;
146
8a9562d2
PS
147struct dpdk_mp {
148 struct rte_mempool *mp;
149 int mtu;
150 int socket_id;
151 int refcount;
ca6ba700 152 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
8a9562d2
PS
153};
154
5a034064
AW
155/* There should be one 'struct dpdk_tx_queue' created for
156 * each cpu core. */
8a9562d2 157struct dpdk_tx_queue {
94143fc4
AW
158 bool flush_tx; /* Set to true to flush queue everytime */
159 /* pkts are queued. */
8a9562d2
PS
160 int count;
161 uint64_t tsc;
162 struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
163};
164
95fb793a 165/* dpdk has no way to remove dpdk ring ethernet devices
166 so we have to keep them around once they've been created
167*/
168
ca6ba700 169static struct ovs_list dpdk_ring_list OVS_GUARDED_BY(dpdk_mutex)
55951e15 170 = OVS_LIST_INITIALIZER(&dpdk_ring_list);
95fb793a 171
172struct dpdk_ring {
173 /* For the client rings */
174 struct rte_ring *cring_tx;
175 struct rte_ring *cring_rx;
176 int user_port_id; /* User given port no, parsed from port name */
177 int eth_port_id; /* ethernet device port id */
ca6ba700 178 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
95fb793a 179};
180
8a9562d2
PS
181struct netdev_dpdk {
182 struct netdev up;
183 int port_id;
184 int max_packet_len;
185
5a034064 186 struct dpdk_tx_queue *tx_q;
8a9562d2
PS
187
188 struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
189
190 struct dpdk_mp *dpdk_mp;
191 int mtu;
192 int socket_id;
193 int buf_size;
8a9562d2
PS
194 struct netdev_stats stats;
195
196 uint8_t hwaddr[ETH_ADDR_LEN];
197 enum netdev_flags flags;
198
199 struct rte_eth_link link;
200 int link_reset_cnt;
201
202 /* In dpdk_list. */
ca6ba700 203 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
7251515e 204 rte_spinlock_t dpdkr_tx_lock;
8a9562d2
PS
205};
206
207struct netdev_rxq_dpdk {
208 struct netdev_rxq up;
209 int port_id;
210};
211
db73f716
DDP
212static bool thread_is_pmd(void);
213
8a9562d2
PS
214static int netdev_dpdk_construct(struct netdev *);
215
216static bool
217is_dpdk_class(const struct netdev_class *class)
218{
219 return class->construct == netdev_dpdk_construct;
220}
221
645b8934 222/* XXX: use dpdk malloc for entire OVS. infact huge page shld be used
8a9562d2
PS
223 * for all other sengments data, bss and text. */
224
225static void *
226dpdk_rte_mzalloc(size_t sz)
227{
228 void *ptr;
229
230 ptr = rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
231 if (ptr == NULL) {
232 out_of_memory();
233 }
234 return ptr;
235}
236
db73f716
DDP
237/* XXX this function should be called only by pmd threads (or by non pmd
238 * threads holding the nonpmd_mempool_mutex) */
8a9562d2 239void
e14deea0 240free_dpdk_buf(struct dp_packet *p)
8a9562d2 241{
db73f716 242 struct rte_mbuf *pkt = (struct rte_mbuf *) p;
8a9562d2 243
db73f716 244 rte_pktmbuf_free_seg(pkt);
8a9562d2
PS
245}
246
b3cd9f9d
PS
247static void
248__rte_pktmbuf_init(struct rte_mempool *mp,
249 void *opaque_arg OVS_UNUSED,
250 void *_m,
251 unsigned i OVS_UNUSED)
252{
253 struct rte_mbuf *m = _m;
e14deea0 254 uint32_t buf_len = mp->elt_size - sizeof(struct dp_packet);
b3cd9f9d 255
e14deea0 256 RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct dp_packet));
b3cd9f9d
PS
257
258 memset(m, 0, mp->elt_size);
259
260 /* start of buffer is just after mbuf structure */
e14deea0 261 m->buf_addr = (char *)m + sizeof(struct dp_packet);
b3cd9f9d 262 m->buf_physaddr = rte_mempool_virt2phy(mp, m) +
e14deea0 263 sizeof(struct dp_packet);
b3cd9f9d
PS
264 m->buf_len = (uint16_t)buf_len;
265
266 /* keep some headroom between start of buffer and data */
267 m->pkt.data = (char*) m->buf_addr + RTE_MIN(RTE_PKTMBUF_HEADROOM, m->buf_len);
268
269 /* init some constant fields */
270 m->type = RTE_MBUF_PKT;
271 m->pool = mp;
272 m->pkt.nb_segs = 1;
273 m->pkt.in_port = 0xff;
274}
275
276static void
277ovs_rte_pktmbuf_init(struct rte_mempool *mp,
278 void *opaque_arg OVS_UNUSED,
279 void *_m,
280 unsigned i OVS_UNUSED)
281{
282 struct rte_mbuf *m = _m;
283
284 __rte_pktmbuf_init(mp, opaque_arg, _m, i);
285
cf62fa4c 286 dp_packet_init_dpdk((struct dp_packet *) m, m->buf_len);
b3cd9f9d
PS
287}
288
8a9562d2
PS
289static struct dpdk_mp *
290dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
291{
292 struct dpdk_mp *dmp = NULL;
293 char mp_name[RTE_MEMPOOL_NAMESIZE];
294
295 LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
296 if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
297 dmp->refcount++;
298 return dmp;
299 }
300 }
301
302 dmp = dpdk_rte_mzalloc(sizeof *dmp);
303 dmp->socket_id = socket_id;
304 dmp->mtu = mtu;
305 dmp->refcount = 1;
306
34631d72
AW
307 if (snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "ovs_mp_%d_%d", dmp->mtu,
308 dmp->socket_id) < 0) {
95fb793a 309 return NULL;
310 }
311
8a9562d2
PS
312 dmp->mp = rte_mempool_create(mp_name, NB_MBUF, MBUF_SIZE(mtu),
313 MP_CACHE_SZ,
314 sizeof(struct rte_pktmbuf_pool_private),
315 rte_pktmbuf_pool_init, NULL,
b3cd9f9d 316 ovs_rte_pktmbuf_init, NULL,
8a9562d2
PS
317 socket_id, 0);
318
319 if (dmp->mp == NULL) {
320 return NULL;
321 }
322
323 list_push_back(&dpdk_mp_list, &dmp->list_node);
324 return dmp;
325}
326
327static void
328dpdk_mp_put(struct dpdk_mp *dmp)
329{
330
331 if (!dmp) {
332 return;
333 }
334
335 dmp->refcount--;
336 ovs_assert(dmp->refcount >= 0);
337
338#if 0
339 /* I could not find any API to destroy mp. */
340 if (dmp->refcount == 0) {
341 list_delete(dmp->list_node);
342 /* destroy mp-pool. */
343 }
344#endif
345}
346
347static void
348check_link_status(struct netdev_dpdk *dev)
349{
350 struct rte_eth_link link;
351
352 rte_eth_link_get_nowait(dev->port_id, &link);
353
354 if (dev->link.link_status != link.link_status) {
3e912ffc 355 netdev_change_seq_changed(&dev->up);
8a9562d2
PS
356
357 dev->link_reset_cnt++;
358 dev->link = link;
359 if (dev->link.link_status) {
360 VLOG_DBG_RL(&rl, "Port %d Link Up - speed %u Mbps - %s",
361 dev->port_id, (unsigned)dev->link.link_speed,
362 (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
363 ("full-duplex") : ("half-duplex"));
364 } else {
365 VLOG_DBG_RL(&rl, "Port %d Link Down", dev->port_id);
366 }
367 }
368}
369
370static void *
371dpdk_watchdog(void *dummy OVS_UNUSED)
372{
373 struct netdev_dpdk *dev;
374
375 pthread_detach(pthread_self());
376
377 for (;;) {
378 ovs_mutex_lock(&dpdk_mutex);
379 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
380 ovs_mutex_lock(&dev->mutex);
381 check_link_status(dev);
382 ovs_mutex_unlock(&dev->mutex);
383 }
384 ovs_mutex_unlock(&dpdk_mutex);
385 xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
386 }
387
388 return NULL;
389}
390
391static int
392dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
393{
394 struct rte_pktmbuf_pool_private *mbp_priv;
395 struct ether_addr eth_addr;
396 int diag;
397 int i;
398
399 if (dev->port_id < 0 || dev->port_id >= rte_eth_dev_count()) {
95fb793a 400 return ENODEV;
8a9562d2
PS
401 }
402
5496878c
AW
403 diag = rte_eth_dev_configure(dev->port_id, dev->up.n_rxq, dev->up.n_txq,
404 &port_conf);
8a9562d2
PS
405 if (diag) {
406 VLOG_ERR("eth dev config error %d",diag);
95fb793a 407 return -diag;
8a9562d2
PS
408 }
409
5496878c 410 for (i = 0; i < dev->up.n_txq; i++) {
79f5354c 411 diag = rte_eth_tx_queue_setup(dev->port_id, i, NIC_PORT_TX_Q_SIZE,
d221ffa1 412 dev->socket_id, &tx_conf);
8a9562d2
PS
413 if (diag) {
414 VLOG_ERR("eth dev tx queue setup error %d",diag);
95fb793a 415 return -diag;
8a9562d2
PS
416 }
417 }
418
5496878c 419 for (i = 0; i < dev->up.n_rxq; i++) {
79f5354c 420 diag = rte_eth_rx_queue_setup(dev->port_id, i, NIC_PORT_RX_Q_SIZE,
d221ffa1 421 dev->socket_id,
a715f600 422 &rx_conf, dev->dpdk_mp->mp);
8a9562d2
PS
423 if (diag) {
424 VLOG_ERR("eth dev rx queue setup error %d",diag);
95fb793a 425 return -diag;
8a9562d2
PS
426 }
427 }
428
429 diag = rte_eth_dev_start(dev->port_id);
430 if (diag) {
431 VLOG_ERR("eth dev start error %d",diag);
95fb793a 432 return -diag;
8a9562d2
PS
433 }
434
435 rte_eth_promiscuous_enable(dev->port_id);
436 rte_eth_allmulticast_enable(dev->port_id);
437
438 memset(&eth_addr, 0x0, sizeof(eth_addr));
439 rte_eth_macaddr_get(dev->port_id, &eth_addr);
440 VLOG_INFO_RL(&rl, "Port %d: "ETH_ADDR_FMT"",
441 dev->port_id, ETH_ADDR_ARGS(eth_addr.addr_bytes));
442
443 memcpy(dev->hwaddr, eth_addr.addr_bytes, ETH_ADDR_LEN);
444 rte_eth_link_get_nowait(dev->port_id, &dev->link);
445
446 mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp);
447 dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
448
449 dev->flags = NETDEV_UP | NETDEV_PROMISC;
450 return 0;
451}
452
453static struct netdev_dpdk *
454netdev_dpdk_cast(const struct netdev *netdev)
455{
456 return CONTAINER_OF(netdev, struct netdev_dpdk, up);
457}
458
459static struct netdev *
460netdev_dpdk_alloc(void)
461{
462 struct netdev_dpdk *netdev = dpdk_rte_mzalloc(sizeof *netdev);
463 return &netdev->up;
464}
465
5a034064 466static void
91968eb0 467netdev_dpdk_alloc_txq(struct netdev_dpdk *netdev, unsigned int n_txqs)
5a034064
AW
468{
469 int i;
470
471 netdev->tx_q = dpdk_rte_mzalloc(n_txqs * sizeof *netdev->tx_q);
94143fc4
AW
472 /* Each index is considered as a cpu core id, since there should
473 * be one tx queue for each cpu core. */
5a034064 474 for (i = 0; i < n_txqs; i++) {
ba0358a1 475 int numa_id = ovs_numa_get_numa_id(i);
94143fc4 476
94143fc4
AW
477 /* If the corresponding core is not on the same numa node
478 * as 'netdev', flags the 'flush_tx'. */
ba0358a1 479 netdev->tx_q[i].flush_tx = netdev->socket_id == numa_id;
5a034064
AW
480 }
481}
482
8a9562d2 483static int
5a034064
AW
484netdev_dpdk_init(struct netdev *netdev_, unsigned int port_no)
485 OVS_REQUIRES(dpdk_mutex)
8a9562d2
PS
486{
487 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
1b7a04e0 488 int sid;
95fb793a 489 int err = 0;
8a9562d2 490
95fb793a 491 ovs_mutex_init(&netdev->mutex);
8a9562d2 492
95fb793a 493 ovs_mutex_lock(&netdev->mutex);
8a9562d2 494
1b7a04e0
AW
495 /* If the 'sid' is negative, it means that the kernel fails
496 * to obtain the pci numa info. In that situation, always
497 * use 'SOCKET0'. */
498 sid = rte_eth_dev_socket_id(port_no);
499 netdev->socket_id = sid < 0 ? SOCKET0 : sid;
91968eb0 500 netdev_dpdk_alloc_txq(netdev, NR_QUEUE);
95fb793a 501 netdev->port_id = port_no;
8a9562d2 502 netdev->flags = 0;
8a9562d2
PS
503 netdev->mtu = ETHER_MTU;
504 netdev->max_packet_len = MTU_TO_MAX_LEN(netdev->mtu);
7251515e 505 rte_spinlock_init(&netdev->dpdkr_tx_lock);
8a9562d2 506
8a9562d2
PS
507 netdev->dpdk_mp = dpdk_mp_get(netdev->socket_id, netdev->mtu);
508 if (!netdev->dpdk_mp) {
509 err = ENOMEM;
95fb793a 510 goto unlock;
8a9562d2
PS
511 }
512
5496878c
AW
513 netdev_->n_txq = NR_QUEUE;
514 netdev_->n_rxq = NR_QUEUE;
8a9562d2
PS
515 err = dpdk_eth_dev_init(netdev);
516 if (err) {
95fb793a 517 goto unlock;
8a9562d2 518 }
8a9562d2
PS
519
520 list_push_back(&dpdk_list, &netdev->list_node);
521
95fb793a 522unlock:
5a034064
AW
523 if (err) {
524 rte_free(netdev->tx_q);
525 }
8a9562d2 526 ovs_mutex_unlock(&netdev->mutex);
95fb793a 527 return err;
528}
529
530static int
531dpdk_dev_parse_name(const char dev_name[], const char prefix[],
532 unsigned int *port_no)
533{
534 const char *cport;
535
536 if (strncmp(dev_name, prefix, strlen(prefix))) {
537 return ENODEV;
538 }
539
540 cport = dev_name + strlen(prefix);
541 *port_no = strtol(cport, 0, 0); /* string must be null terminated */
542 return 0;
543}
544
545static int
546netdev_dpdk_construct(struct netdev *netdev)
547{
548 unsigned int port_no;
549 int err;
550
551 if (rte_eal_init_ret) {
552 return rte_eal_init_ret;
553 }
554
555 /* Names always start with "dpdk" */
556 err = dpdk_dev_parse_name(netdev->name, "dpdk", &port_no);
557 if (err) {
558 return err;
559 }
560
561 ovs_mutex_lock(&dpdk_mutex);
562 err = netdev_dpdk_init(netdev, port_no);
8a9562d2
PS
563 ovs_mutex_unlock(&dpdk_mutex);
564 return err;
565}
566
567static void
568netdev_dpdk_destruct(struct netdev *netdev_)
569{
570 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
571
572 ovs_mutex_lock(&dev->mutex);
573 rte_eth_dev_stop(dev->port_id);
574 ovs_mutex_unlock(&dev->mutex);
575
576 ovs_mutex_lock(&dpdk_mutex);
5a034064 577 rte_free(dev->tx_q);
8a9562d2
PS
578 list_remove(&dev->list_node);
579 dpdk_mp_put(dev->dpdk_mp);
580 ovs_mutex_unlock(&dpdk_mutex);
581
582 ovs_mutex_destroy(&dev->mutex);
583}
584
585static void
586netdev_dpdk_dealloc(struct netdev *netdev_)
587{
588 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
589
590 rte_free(netdev);
591}
592
593static int
594netdev_dpdk_get_config(const struct netdev *netdev_, struct smap *args)
595{
596 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
597
598 ovs_mutex_lock(&dev->mutex);
599
47659062
AW
600 smap_add_format(args, "configured_rx_queues", "%d", netdev_->n_rxq);
601 smap_add_format(args, "configured_tx_queues", "%d", netdev_->n_txq);
8a9562d2
PS
602 ovs_mutex_unlock(&dev->mutex);
603
604 return 0;
605}
606
7dec44fe
AW
607static int
608netdev_dpdk_get_numa_id(const struct netdev *netdev_)
609{
610 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
611
612 return netdev->socket_id;
613}
614
5496878c
AW
615/* Sets the number of tx queues and rx queues for the dpdk interface.
616 * If the configuration fails, do not try restoring its old configuration
617 * and just returns the error. */
618static int
619netdev_dpdk_set_multiq(struct netdev *netdev_, unsigned int n_txq,
620 unsigned int n_rxq)
621{
622 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
623 int err = 0;
624
625 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
626 return err;
627 }
628
b7ccaf67 629 ovs_mutex_lock(&dpdk_mutex);
5496878c 630 ovs_mutex_lock(&netdev->mutex);
91968eb0 631
5496878c 632 rte_eth_dev_stop(netdev->port_id);
91968eb0 633
5496878c
AW
634 netdev->up.n_txq = n_txq;
635 netdev->up.n_rxq = n_rxq;
91968eb0
AW
636 rte_free(netdev->tx_q);
637 netdev_dpdk_alloc_txq(netdev, n_txq);
5496878c 638 err = dpdk_eth_dev_init(netdev);
91968eb0 639
5496878c 640 ovs_mutex_unlock(&netdev->mutex);
b7ccaf67 641 ovs_mutex_unlock(&dpdk_mutex);
5496878c
AW
642
643 return err;
644}
645
8a9562d2
PS
646static struct netdev_rxq *
647netdev_dpdk_rxq_alloc(void)
648{
649 struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx);
650
651 return &rx->up;
652}
653
654static struct netdev_rxq_dpdk *
655netdev_rxq_dpdk_cast(const struct netdev_rxq *rx)
656{
657 return CONTAINER_OF(rx, struct netdev_rxq_dpdk, up);
658}
659
660static int
661netdev_dpdk_rxq_construct(struct netdev_rxq *rxq_)
662{
663 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
664 struct netdev_dpdk *netdev = netdev_dpdk_cast(rx->up.netdev);
665
666 ovs_mutex_lock(&netdev->mutex);
667 rx->port_id = netdev->port_id;
668 ovs_mutex_unlock(&netdev->mutex);
669
670 return 0;
671}
672
673static void
674netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq_ OVS_UNUSED)
675{
676}
677
678static void
679netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq_)
680{
681 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
682
683 rte_free(rx);
684}
685
b170db2a
RW
686static inline void
687dpdk_queue_flush__(struct netdev_dpdk *dev, int qid)
8a9562d2
PS
688{
689 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
1304f1f8
DDP
690 uint32_t nb_tx = 0;
691
692 while (nb_tx != txq->count) {
693 uint32_t ret;
694
695 ret = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts + nb_tx,
696 txq->count - nb_tx);
697 if (!ret) {
698 break;
699 }
700
701 nb_tx += ret;
702 }
8a9562d2 703
b170db2a 704 if (OVS_UNLIKELY(nb_tx != txq->count)) {
db73f716
DDP
705 /* free buffers, which we couldn't transmit, one at a time (each
706 * packet could come from a different mempool) */
707 int i;
708
709 for (i = nb_tx; i < txq->count; i++) {
710 rte_pktmbuf_free_seg(txq->burst_pkts[i]);
711 }
1304f1f8
DDP
712 ovs_mutex_lock(&dev->mutex);
713 dev->stats.tx_dropped += txq->count-nb_tx;
714 ovs_mutex_unlock(&dev->mutex);
8a9562d2 715 }
1304f1f8 716
8a9562d2 717 txq->count = 0;
844f2d74 718 txq->tsc = rte_get_timer_cycles();
b170db2a
RW
719}
720
721static inline void
722dpdk_queue_flush(struct netdev_dpdk *dev, int qid)
723{
724 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
725
726 if (txq->count == 0) {
727 return;
728 }
b170db2a 729 dpdk_queue_flush__(dev, qid);
8a9562d2
PS
730}
731
8a9562d2 732static int
e14deea0 733netdev_dpdk_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **packets,
91088554 734 int *c)
8a9562d2
PS
735{
736 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
737 struct netdev *netdev = rx->up.netdev;
738 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
8a9562d2 739 int nb_rx;
8a9562d2 740
5496878c
AW
741 /* There is only one tx queue for this core. Do not flush other
742 * queueus. */
743 if (rxq_->queue_id == rte_lcore_id()) {
744 dpdk_queue_flush(dev, rxq_->queue_id);
745 }
8a9562d2
PS
746
747 nb_rx = rte_eth_rx_burst(rx->port_id, rxq_->queue_id,
7d08d53e
DDP
748 (struct rte_mbuf **) packets,
749 MIN((int)NETDEV_MAX_RX_BATCH,
750 (int)MAX_RX_QUEUE_LEN));
8a9562d2
PS
751 if (!nb_rx) {
752 return EAGAIN;
753 }
754
8a9562d2
PS
755 *c = nb_rx;
756
757 return 0;
758}
759
760inline static void
f4fd623c
DDP
761dpdk_queue_pkts(struct netdev_dpdk *dev, int qid,
762 struct rte_mbuf **pkts, int cnt)
8a9562d2
PS
763{
764 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
765 uint64_t diff_tsc;
8a9562d2 766
f4fd623c
DDP
767 int i = 0;
768
f4fd623c
DDP
769 while (i < cnt) {
770 int freeslots = MAX_TX_QUEUE_LEN - txq->count;
771 int tocopy = MIN(freeslots, cnt-i);
8a9562d2 772
f4fd623c
DDP
773 memcpy(&txq->burst_pkts[txq->count], &pkts[i],
774 tocopy * sizeof (struct rte_mbuf *));
775
776 txq->count += tocopy;
777 i += tocopy;
778
94143fc4 779 if (txq->count == MAX_TX_QUEUE_LEN || txq->flush_tx) {
b170db2a 780 dpdk_queue_flush__(dev, qid);
f4fd623c 781 }
844f2d74 782 diff_tsc = rte_get_timer_cycles() - txq->tsc;
f4fd623c 783 if (diff_tsc >= DRAIN_TSC) {
b170db2a 784 dpdk_queue_flush__(dev, qid);
f4fd623c 785 }
8a9562d2 786 }
8a9562d2
PS
787}
788
789/* Tx function. Transmit packets indefinitely */
790static void
e14deea0 791dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet ** pkts,
2654cc33 792 int cnt)
db73f716 793 OVS_NO_THREAD_SAFETY_ANALYSIS
8a9562d2
PS
794{
795 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
f4fd623c 796 struct rte_mbuf *mbufs[cnt];
175cf4de
RW
797 int dropped = 0;
798 int newcnt = 0;
799 int i;
8a9562d2 800
db73f716
DDP
801 /* If we are on a non pmd thread we have to use the mempool mutex, because
802 * every non pmd thread shares the same mempool cache */
803
804 if (!thread_is_pmd()) {
805 ovs_mutex_lock(&nonpmd_mempool_mutex);
806 }
807
f4fd623c 808 for (i = 0; i < cnt; i++) {
cf62fa4c 809 int size = dp_packet_size(pkts[i]);
95fb793a 810
f98d7864 811 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
f4fd623c
DDP
812 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
813 (int)size , dev->max_packet_len);
814
175cf4de 815 dropped++;
f4fd623c
DDP
816 continue;
817 }
8a9562d2 818
f4fd623c 819 mbufs[newcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
8a9562d2 820
f4fd623c 821 if (!mbufs[newcnt]) {
175cf4de
RW
822 dropped += cnt - i;
823 break;
f4fd623c
DDP
824 }
825
826 /* We have to do a copy for now */
cf62fa4c 827 memcpy(mbufs[newcnt]->pkt.data, dp_packet_data(pkts[i]), size);
f4fd623c
DDP
828
829 rte_pktmbuf_data_len(mbufs[newcnt]) = size;
830 rte_pktmbuf_pkt_len(mbufs[newcnt]) = size;
831
832 newcnt++;
833 }
8a9562d2 834
f98d7864 835 if (OVS_UNLIKELY(dropped)) {
175cf4de
RW
836 ovs_mutex_lock(&dev->mutex);
837 dev->stats.tx_dropped += dropped;
838 ovs_mutex_unlock(&dev->mutex);
839 }
840
2654cc33
AW
841 dpdk_queue_pkts(dev, qid, mbufs, newcnt);
842 dpdk_queue_flush(dev, qid);
db73f716
DDP
843
844 if (!thread_is_pmd()) {
845 ovs_mutex_unlock(&nonpmd_mempool_mutex);
846 }
8a9562d2
PS
847}
848
7251515e
DV
849static inline void
850netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
e14deea0 851 struct dp_packet **pkts, int cnt, bool may_steal)
8a9562d2 852{
f4fd623c 853 int i;
8a9562d2 854
7251515e 855 if (OVS_UNLIKELY(!may_steal ||
cf62fa4c 856 pkts[0]->source != DPBUF_DPDK)) {
7251515e
DV
857 struct netdev *netdev = &dev->up;
858
2654cc33 859 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
b3cd9f9d
PS
860
861 if (may_steal) {
f4fd623c 862 for (i = 0; i < cnt; i++) {
e14deea0 863 dp_packet_delete(pkts[i]);
f4fd623c 864 }
b3cd9f9d 865 }
8a9562d2 866 } else {
f4fd623c
DDP
867 int next_tx_idx = 0;
868 int dropped = 0;
8a9562d2 869
f4fd623c 870 for (i = 0; i < cnt; i++) {
cf62fa4c 871 int size = dp_packet_size(pkts[i]);
f4fd623c
DDP
872 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
873 if (next_tx_idx != i) {
874 dpdk_queue_pkts(dev, qid,
875 (struct rte_mbuf **)&pkts[next_tx_idx],
876 i-next_tx_idx);
1ebfe1ac 877 }
f4fd623c 878
1ebfe1ac
DDP
879 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
880 (int)size , dev->max_packet_len);
f4fd623c 881
e14deea0 882 dp_packet_delete(pkts[i]);
1ebfe1ac 883 dropped++;
f4fd623c
DDP
884 next_tx_idx = i + 1;
885 }
886 }
887 if (next_tx_idx != cnt) {
888 dpdk_queue_pkts(dev, qid,
889 (struct rte_mbuf **)&pkts[next_tx_idx],
890 cnt-next_tx_idx);
891 }
8a9562d2 892
f4fd623c
DDP
893 if (OVS_UNLIKELY(dropped)) {
894 ovs_mutex_lock(&dev->mutex);
895 dev->stats.tx_dropped += dropped;
896 ovs_mutex_unlock(&dev->mutex);
897 }
8a9562d2 898 }
7251515e
DV
899}
900
901static int
902netdev_dpdk_eth_send(struct netdev *netdev, int qid,
e14deea0 903 struct dp_packet **pkts, int cnt, bool may_steal)
7251515e
DV
904{
905 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
8a9562d2 906
7251515e
DV
907 netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
908 return 0;
8a9562d2
PS
909}
910
911static int
912netdev_dpdk_set_etheraddr(struct netdev *netdev,
913 const uint8_t mac[ETH_ADDR_LEN])
914{
915 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
916
917 ovs_mutex_lock(&dev->mutex);
918 if (!eth_addr_equals(dev->hwaddr, mac)) {
919 memcpy(dev->hwaddr, mac, ETH_ADDR_LEN);
045c0d1a 920 netdev_change_seq_changed(netdev);
8a9562d2
PS
921 }
922 ovs_mutex_unlock(&dev->mutex);
923
924 return 0;
925}
926
927static int
928netdev_dpdk_get_etheraddr(const struct netdev *netdev,
929 uint8_t mac[ETH_ADDR_LEN])
930{
931 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
932
933 ovs_mutex_lock(&dev->mutex);
934 memcpy(mac, dev->hwaddr, ETH_ADDR_LEN);
935 ovs_mutex_unlock(&dev->mutex);
936
937 return 0;
938}
939
940static int
941netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup)
942{
943 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
944
945 ovs_mutex_lock(&dev->mutex);
946 *mtup = dev->mtu;
947 ovs_mutex_unlock(&dev->mutex);
948
949 return 0;
950}
951
952static int
953netdev_dpdk_set_mtu(const struct netdev *netdev, int mtu)
954{
955 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
956 int old_mtu, err;
957 struct dpdk_mp *old_mp;
958 struct dpdk_mp *mp;
959
960 ovs_mutex_lock(&dpdk_mutex);
961 ovs_mutex_lock(&dev->mutex);
962 if (dev->mtu == mtu) {
963 err = 0;
964 goto out;
965 }
966
967 mp = dpdk_mp_get(dev->socket_id, dev->mtu);
968 if (!mp) {
969 err = ENOMEM;
970 goto out;
971 }
972
973 rte_eth_dev_stop(dev->port_id);
974
975 old_mtu = dev->mtu;
976 old_mp = dev->dpdk_mp;
977 dev->dpdk_mp = mp;
978 dev->mtu = mtu;
979 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
980
981 err = dpdk_eth_dev_init(dev);
982 if (err) {
8a9562d2
PS
983 dpdk_mp_put(mp);
984 dev->mtu = old_mtu;
985 dev->dpdk_mp = old_mp;
986 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
987 dpdk_eth_dev_init(dev);
988 goto out;
989 }
990
991 dpdk_mp_put(old_mp);
045c0d1a 992 netdev_change_seq_changed(netdev);
8a9562d2
PS
993out:
994 ovs_mutex_unlock(&dev->mutex);
995 ovs_mutex_unlock(&dpdk_mutex);
996 return err;
997}
998
999static int
1000netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier);
1001
1002static int
1003netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1004{
1005 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1006 struct rte_eth_stats rte_stats;
1007 bool gg;
1008
1009 netdev_dpdk_get_carrier(netdev, &gg);
1010 ovs_mutex_lock(&dev->mutex);
1011 rte_eth_stats_get(dev->port_id, &rte_stats);
1012
2f9dd77f 1013 memset(stats, 0, sizeof(*stats));
8a9562d2 1014
2f9dd77f
PS
1015 stats->rx_packets = rte_stats.ipackets;
1016 stats->tx_packets = rte_stats.opackets;
1017 stats->rx_bytes = rte_stats.ibytes;
1018 stats->tx_bytes = rte_stats.obytes;
1019 stats->rx_errors = rte_stats.ierrors;
1020 stats->tx_errors = rte_stats.oerrors;
1021 stats->multicast = rte_stats.imcasts;
8a9562d2 1022
2f9dd77f 1023 stats->tx_dropped = dev->stats.tx_dropped;
8a9562d2
PS
1024 ovs_mutex_unlock(&dev->mutex);
1025
1026 return 0;
1027}
1028
1029static int
1030netdev_dpdk_get_features(const struct netdev *netdev_,
1031 enum netdev_features *current,
1032 enum netdev_features *advertised OVS_UNUSED,
1033 enum netdev_features *supported OVS_UNUSED,
1034 enum netdev_features *peer OVS_UNUSED)
1035{
1036 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1037 struct rte_eth_link link;
1038
1039 ovs_mutex_lock(&dev->mutex);
1040 link = dev->link;
1041 ovs_mutex_unlock(&dev->mutex);
1042
1043 if (link.link_duplex == ETH_LINK_AUTONEG_DUPLEX) {
1044 if (link.link_speed == ETH_LINK_SPEED_AUTONEG) {
1045 *current = NETDEV_F_AUTONEG;
1046 }
1047 } else if (link.link_duplex == ETH_LINK_HALF_DUPLEX) {
1048 if (link.link_speed == ETH_LINK_SPEED_10) {
1049 *current = NETDEV_F_10MB_HD;
1050 }
1051 if (link.link_speed == ETH_LINK_SPEED_100) {
1052 *current = NETDEV_F_100MB_HD;
1053 }
1054 if (link.link_speed == ETH_LINK_SPEED_1000) {
1055 *current = NETDEV_F_1GB_HD;
1056 }
1057 } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) {
1058 if (link.link_speed == ETH_LINK_SPEED_10) {
1059 *current = NETDEV_F_10MB_FD;
1060 }
1061 if (link.link_speed == ETH_LINK_SPEED_100) {
1062 *current = NETDEV_F_100MB_FD;
1063 }
1064 if (link.link_speed == ETH_LINK_SPEED_1000) {
1065 *current = NETDEV_F_1GB_FD;
1066 }
1067 if (link.link_speed == ETH_LINK_SPEED_10000) {
1068 *current = NETDEV_F_10GB_FD;
1069 }
1070 }
1071
1072 return 0;
1073}
1074
1075static int
1076netdev_dpdk_get_ifindex(const struct netdev *netdev)
1077{
1078 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1079 int ifindex;
1080
1081 ovs_mutex_lock(&dev->mutex);
1082 ifindex = dev->port_id;
1083 ovs_mutex_unlock(&dev->mutex);
1084
1085 return ifindex;
1086}
1087
1088static int
1089netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier)
1090{
1091 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1092
1093 ovs_mutex_lock(&dev->mutex);
1094 check_link_status(dev);
1095 *carrier = dev->link.link_status;
1096 ovs_mutex_unlock(&dev->mutex);
1097
1098 return 0;
1099}
1100
1101static long long int
1102netdev_dpdk_get_carrier_resets(const struct netdev *netdev_)
1103{
1104 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1105 long long int carrier_resets;
1106
1107 ovs_mutex_lock(&dev->mutex);
1108 carrier_resets = dev->link_reset_cnt;
1109 ovs_mutex_unlock(&dev->mutex);
1110
1111 return carrier_resets;
1112}
1113
1114static int
1115netdev_dpdk_set_miimon(struct netdev *netdev_ OVS_UNUSED,
1116 long long int interval OVS_UNUSED)
1117{
ee32150e 1118 return EOPNOTSUPP;
8a9562d2
PS
1119}
1120
1121static int
1122netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
1123 enum netdev_flags off, enum netdev_flags on,
95fb793a 1124 enum netdev_flags *old_flagsp) OVS_REQUIRES(dev->mutex)
8a9562d2
PS
1125{
1126 int err;
1127
1128 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1129 return EINVAL;
1130 }
1131
1132 *old_flagsp = dev->flags;
1133 dev->flags |= on;
1134 dev->flags &= ~off;
1135
1136 if (dev->flags == *old_flagsp) {
1137 return 0;
1138 }
1139
1140 if (dev->flags & NETDEV_UP) {
1141 err = rte_eth_dev_start(dev->port_id);
1142 if (err)
95fb793a 1143 return -err;
8a9562d2
PS
1144 }
1145
1146 if (dev->flags & NETDEV_PROMISC) {
1147 rte_eth_promiscuous_enable(dev->port_id);
1148 }
1149
1150 if (!(dev->flags & NETDEV_UP)) {
1151 rte_eth_dev_stop(dev->port_id);
1152 }
1153
1154 return 0;
1155}
1156
1157static int
1158netdev_dpdk_update_flags(struct netdev *netdev_,
1159 enum netdev_flags off, enum netdev_flags on,
1160 enum netdev_flags *old_flagsp)
1161{
1162 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
1163 int error;
1164
1165 ovs_mutex_lock(&netdev->mutex);
1166 error = netdev_dpdk_update_flags__(netdev, off, on, old_flagsp);
1167 ovs_mutex_unlock(&netdev->mutex);
1168
1169 return error;
1170}
1171
1172static int
1173netdev_dpdk_get_status(const struct netdev *netdev_, struct smap *args)
1174{
1175 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1176 struct rte_eth_dev_info dev_info;
1177
e0a801c7 1178 if (dev->port_id < 0)
8a9562d2
PS
1179 return ENODEV;
1180
1181 ovs_mutex_lock(&dev->mutex);
1182 rte_eth_dev_info_get(dev->port_id, &dev_info);
1183 ovs_mutex_unlock(&dev->mutex);
1184
1185 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1186
95fb793a 1187 smap_add_format(args, "port_no", "%d", dev->port_id);
8a9562d2
PS
1188 smap_add_format(args, "numa_id", "%d", rte_eth_dev_socket_id(dev->port_id));
1189 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1190 smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
1191 smap_add_format(args, "max_rx_pktlen", "%u", dev_info.max_rx_pktlen);
1192 smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
1193 smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
1194 smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);
1195 smap_add_format(args, "max_hash_mac_addrs", "%u", dev_info.max_hash_mac_addrs);
1196 smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
1197 smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
1198
1199 smap_add_format(args, "pci-vendor_id", "0x%u", dev_info.pci_dev->id.vendor_id);
1200 smap_add_format(args, "pci-device_id", "0x%x", dev_info.pci_dev->id.device_id);
1201
1202 return 0;
1203}
1204
1205static void
1206netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state)
1207 OVS_REQUIRES(dev->mutex)
1208{
1209 enum netdev_flags old_flags;
1210
1211 if (admin_state) {
1212 netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1213 } else {
1214 netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1215 }
1216}
1217
1218static void
1219netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc,
1220 const char *argv[], void *aux OVS_UNUSED)
1221{
1222 bool up;
1223
1224 if (!strcasecmp(argv[argc - 1], "up")) {
1225 up = true;
1226 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1227 up = false;
1228 } else {
1229 unixctl_command_reply_error(conn, "Invalid Admin State");
1230 return;
1231 }
1232
1233 if (argc > 2) {
1234 struct netdev *netdev = netdev_from_name(argv[1]);
1235 if (netdev && is_dpdk_class(netdev->netdev_class)) {
1236 struct netdev_dpdk *dpdk_dev = netdev_dpdk_cast(netdev);
1237
1238 ovs_mutex_lock(&dpdk_dev->mutex);
1239 netdev_dpdk_set_admin_state__(dpdk_dev, up);
1240 ovs_mutex_unlock(&dpdk_dev->mutex);
1241
1242 netdev_close(netdev);
1243 } else {
1244 unixctl_command_reply_error(conn, "Not a DPDK Interface");
1245 netdev_close(netdev);
1246 return;
1247 }
1248 } else {
1249 struct netdev_dpdk *netdev;
1250
1251 ovs_mutex_lock(&dpdk_mutex);
1252 LIST_FOR_EACH (netdev, list_node, &dpdk_list) {
1253 ovs_mutex_lock(&netdev->mutex);
1254 netdev_dpdk_set_admin_state__(netdev, up);
1255 ovs_mutex_unlock(&netdev->mutex);
1256 }
1257 ovs_mutex_unlock(&dpdk_mutex);
1258 }
1259 unixctl_command_reply(conn, "OK");
1260}
1261
033e9df2
DDP
1262static void
1263dpdk_common_init(void)
1264{
1265 unixctl_command_register("netdev-dpdk/set-admin-state",
1266 "[netdev] up|down", 1, 2,
1267 netdev_dpdk_set_admin_state, NULL);
1268
1269 ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL);
1270}
1271
8a9562d2
PS
1272static int
1273dpdk_class_init(void)
1274{
1275 int result;
1276
8a9562d2
PS
1277 result = rte_eal_pci_probe();
1278 if (result) {
1279 VLOG_ERR("Cannot probe PCI");
95fb793a 1280 return -result;
8a9562d2
PS
1281 }
1282
8a9562d2
PS
1283 VLOG_INFO("Ethernet Device Count: %d", (int)rte_eth_dev_count());
1284
8a9562d2
PS
1285 return 0;
1286}
1287
95fb793a 1288/* Client Rings */
1289
95fb793a 1290static int
1291dpdk_ring_create(const char dev_name[], unsigned int port_no,
1292 unsigned int *eth_port_id)
1293{
1294 struct dpdk_ring *ivshmem;
1295 char ring_name[10];
1296 int err;
1297
1298 ivshmem = dpdk_rte_mzalloc(sizeof *ivshmem);
1299 if (ivshmem == NULL) {
1300 return ENOMEM;
1301 }
1302
7251515e 1303 /* XXX: Add support for multiquque ring. */
95fb793a 1304 err = snprintf(ring_name, 10, "%s_tx", dev_name);
1305 if (err < 0) {
1306 return -err;
1307 }
1308
7251515e
DV
1309 /* Create single consumer/producer rings, netdev does explicit locking. */
1310 ivshmem->cring_tx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
1311 RING_F_SP_ENQ | RING_F_SC_DEQ);
95fb793a 1312 if (ivshmem->cring_tx == NULL) {
1313 rte_free(ivshmem);
1314 return ENOMEM;
1315 }
1316
1317 err = snprintf(ring_name, 10, "%s_rx", dev_name);
1318 if (err < 0) {
1319 return -err;
1320 }
1321
7251515e
DV
1322 /* Create single consumer/producer rings, netdev does explicit locking. */
1323 ivshmem->cring_rx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
1324 RING_F_SP_ENQ | RING_F_SC_DEQ);
95fb793a 1325 if (ivshmem->cring_rx == NULL) {
1326 rte_free(ivshmem);
1327 return ENOMEM;
1328 }
1329
d7310583
DDP
1330 err = rte_eth_from_rings(dev_name, &ivshmem->cring_rx, 1,
1331 &ivshmem->cring_tx, 1, SOCKET0);
1332
95fb793a 1333 if (err < 0) {
1334 rte_free(ivshmem);
1335 return ENODEV;
1336 }
1337
1338 ivshmem->user_port_id = port_no;
1339 ivshmem->eth_port_id = rte_eth_dev_count() - 1;
1340 list_push_back(&dpdk_ring_list, &ivshmem->list_node);
1341
1342 *eth_port_id = ivshmem->eth_port_id;
1343 return 0;
1344}
1345
1346static int
1347dpdk_ring_open(const char dev_name[], unsigned int *eth_port_id) OVS_REQUIRES(dpdk_mutex)
1348{
1349 struct dpdk_ring *ivshmem;
1350 unsigned int port_no;
1351 int err = 0;
1352
1353 /* Names always start with "dpdkr" */
1354 err = dpdk_dev_parse_name(dev_name, "dpdkr", &port_no);
1355 if (err) {
1356 return err;
1357 }
1358
1359 /* look through our list to find the device */
1360 LIST_FOR_EACH (ivshmem, list_node, &dpdk_ring_list) {
1361 if (ivshmem->user_port_id == port_no) {
1362 VLOG_INFO("Found dpdk ring device %s:\n", dev_name);
1363 *eth_port_id = ivshmem->eth_port_id; /* really all that is needed */
1364 return 0;
1365 }
1366 }
1367 /* Need to create the device rings */
1368 return dpdk_ring_create(dev_name, port_no, eth_port_id);
1369}
1370
7251515e
DV
1371static int
1372netdev_dpdk_ring_send(struct netdev *netdev, int qid OVS_UNUSED,
e14deea0 1373 struct dp_packet **pkts, int cnt, bool may_steal)
7251515e
DV
1374{
1375 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1376
1377 /* DPDK Rings have a single TX queue, Therefore needs locking. */
1378 rte_spinlock_lock(&dev->dpdkr_tx_lock);
1379 netdev_dpdk_send__(dev, 0, pkts, cnt, may_steal);
1380 rte_spinlock_unlock(&dev->dpdkr_tx_lock);
1381 return 0;
1382}
1383
95fb793a 1384static int
1385netdev_dpdk_ring_construct(struct netdev *netdev)
1386{
1387 unsigned int port_no = 0;
1388 int err = 0;
1389
1390 if (rte_eal_init_ret) {
1391 return rte_eal_init_ret;
1392 }
1393
1394 ovs_mutex_lock(&dpdk_mutex);
1395
1396 err = dpdk_ring_open(netdev->name, &port_no);
1397 if (err) {
1398 goto unlock_dpdk;
1399 }
1400
1401 err = netdev_dpdk_init(netdev, port_no);
1402
1403unlock_dpdk:
1404 ovs_mutex_unlock(&dpdk_mutex);
1405 return err;
1406}
1407
7251515e 1408#define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, MULTIQ, SEND) \
95fb793a 1409{ \
1410 NAME, \
1411 INIT, /* init */ \
1412 NULL, /* netdev_dpdk_run */ \
1413 NULL, /* netdev_dpdk_wait */ \
1414 \
1415 netdev_dpdk_alloc, \
1416 CONSTRUCT, \
1417 netdev_dpdk_destruct, \
1418 netdev_dpdk_dealloc, \
1419 netdev_dpdk_get_config, \
1420 NULL, /* netdev_dpdk_set_config */ \
1421 NULL, /* get_tunnel_config */ \
a36de779
PS
1422 NULL, /* build header */ \
1423 NULL, /* push header */ \
1424 NULL, /* pop header */ \
7dec44fe 1425 netdev_dpdk_get_numa_id, /* get_numa_id */ \
5496878c 1426 MULTIQ, /* set_multiq */ \
95fb793a 1427 \
7251515e 1428 SEND, /* send */ \
95fb793a 1429 NULL, /* send_wait */ \
1430 \
1431 netdev_dpdk_set_etheraddr, \
1432 netdev_dpdk_get_etheraddr, \
1433 netdev_dpdk_get_mtu, \
1434 netdev_dpdk_set_mtu, \
1435 netdev_dpdk_get_ifindex, \
1436 netdev_dpdk_get_carrier, \
1437 netdev_dpdk_get_carrier_resets, \
1438 netdev_dpdk_set_miimon, \
1439 netdev_dpdk_get_stats, \
95fb793a 1440 netdev_dpdk_get_features, \
1441 NULL, /* set_advertisements */ \
1442 \
1443 NULL, /* set_policing */ \
1444 NULL, /* get_qos_types */ \
1445 NULL, /* get_qos_capabilities */ \
1446 NULL, /* get_qos */ \
1447 NULL, /* set_qos */ \
1448 NULL, /* get_queue */ \
1449 NULL, /* set_queue */ \
1450 NULL, /* delete_queue */ \
1451 NULL, /* get_queue_stats */ \
1452 NULL, /* queue_dump_start */ \
1453 NULL, /* queue_dump_next */ \
1454 NULL, /* queue_dump_done */ \
1455 NULL, /* dump_queue_stats */ \
1456 \
1457 NULL, /* get_in4 */ \
1458 NULL, /* set_in4 */ \
1459 NULL, /* get_in6 */ \
1460 NULL, /* add_router */ \
1461 NULL, /* get_next_hop */ \
1462 netdev_dpdk_get_status, \
1463 NULL, /* arp_lookup */ \
1464 \
1465 netdev_dpdk_update_flags, \
1466 \
1467 netdev_dpdk_rxq_alloc, \
1468 netdev_dpdk_rxq_construct, \
1469 netdev_dpdk_rxq_destruct, \
1470 netdev_dpdk_rxq_dealloc, \
1471 netdev_dpdk_rxq_recv, \
1472 NULL, /* rx_wait */ \
1473 NULL, /* rxq_drain */ \
1474}
8a9562d2
PS
1475
1476int
1477dpdk_init(int argc, char **argv)
1478{
1479 int result;
1480
9441caf3 1481 if (argc < 2 || strcmp(argv[1], "--dpdk"))
8a9562d2
PS
1482 return 0;
1483
9441caf3
DDP
1484 /* Make sure program name passed to rte_eal_init() is vswitchd. */
1485 argv[1] = argv[0];
1486
8a9562d2
PS
1487 argc--;
1488 argv++;
1489
1490 /* Make sure things are initialized ... */
1491 result = rte_eal_init(argc, argv);
451450fa 1492 if (result < 0) {
8a9562d2 1493 ovs_abort(result, "Cannot init EAL\n");
451450fa 1494 }
8a9562d2 1495
d7310583 1496 rte_memzone_dump(stdout);
8a9562d2
PS
1497 rte_eal_init_ret = 0;
1498
451450fa 1499 if (argc > result) {
9441caf3 1500 argv[result] = argv[0];
451450fa 1501 }
9441caf3 1502
db73f716
DDP
1503 /* We are called from the main thread here */
1504 thread_set_nonpmd();
1505
9441caf3 1506 return result + 1;
8a9562d2
PS
1507}
1508
95fb793a 1509const struct netdev_class dpdk_class =
1510 NETDEV_DPDK_CLASS(
1511 "dpdk",
1512 dpdk_class_init,
5496878c 1513 netdev_dpdk_construct,
7251515e
DV
1514 netdev_dpdk_set_multiq,
1515 netdev_dpdk_eth_send);
95fb793a 1516
1517const struct netdev_class dpdk_ring_class =
1518 NETDEV_DPDK_CLASS(
1519 "dpdkr",
033e9df2 1520 NULL,
5496878c 1521 netdev_dpdk_ring_construct,
7251515e
DV
1522 NULL,
1523 netdev_dpdk_ring_send);
95fb793a 1524
8a9562d2
PS
1525void
1526netdev_dpdk_register(void)
1527{
95fb793a 1528 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
1529
033e9df2
DDP
1530 if (rte_eal_init_ret) {
1531 return;
1532 }
1533
95fb793a 1534 if (ovsthread_once_start(&once)) {
033e9df2 1535 dpdk_common_init();
95fb793a 1536 netdev_register_provider(&dpdk_class);
1537 netdev_register_provider(&dpdk_ring_class);
1538 ovsthread_once_done(&once);
1539 }
8a9562d2 1540}
8617afff
PS
1541
1542int
1543pmd_thread_setaffinity_cpu(int cpu)
1544{
1545 cpu_set_t cpuset;
1546 int err;
1547
1548 CPU_ZERO(&cpuset);
1549 CPU_SET(cpu, &cpuset);
1550 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
1551 if (err) {
1552 VLOG_ERR("Thread affinity error %d",err);
1553 return err;
1554 }
abb5943d
AW
1555 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
1556 ovs_assert(cpu != NON_PMD_CORE_ID);
65f13b50 1557 RTE_PER_LCORE(_lcore_id) = cpu;
8617afff
PS
1558
1559 return 0;
1560}
db73f716
DDP
1561
1562void
1563thread_set_nonpmd(void)
1564{
abb5943d
AW
1565 /* We have to use NON_PMD_CORE_ID to allow non-pmd threads to perform
1566 * certain DPDK operations, like rte_eth_dev_configure(). */
1567 RTE_PER_LCORE(_lcore_id) = NON_PMD_CORE_ID;
db73f716
DDP
1568}
1569
1570static bool
1571thread_is_pmd(void)
1572{
abb5943d 1573 return rte_lcore_id() != NON_PMD_CORE_ID;
db73f716 1574}