]> git.proxmox.com Git - ovs.git/blob - lib/netdev-dpdk.c
vswitchd: skip right number of arguments in dpdk_init()
[ovs.git] / lib / netdev-dpdk.c
1 /*
2 * Copyright (c) 2014 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include <stdio.h>
20 #include <string.h>
21 #include <signal.h>
22 #include <stdlib.h>
23 #include <pthread.h>
24 #include <config.h>
25 #include <errno.h>
26 #include <sched.h>
27 #include <stdlib.h>
28 #include <unistd.h>
29 #include <stdio.h>
30
31 #include "dpif-netdev.h"
32 #include "list.h"
33 #include "netdev-dpdk.h"
34 #include "netdev-provider.h"
35 #include "netdev-vport.h"
36 #include "odp-util.h"
37 #include "ofp-print.h"
38 #include "ofpbuf.h"
39 #include "ovs-thread.h"
40 #include "ovs-rcu.h"
41 #include "packets.h"
42 #include "shash.h"
43 #include "sset.h"
44 #include "unaligned.h"
45 #include "timeval.h"
46 #include "unixctl.h"
47 #include "vlog.h"
48
49 VLOG_DEFINE_THIS_MODULE(dpdk);
50 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
51
52 #define DPDK_PORT_WATCHDOG_INTERVAL 5
53
54 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
55 #define OVS_VPORT_DPDK "ovs_dpdk"
56
57 /*
58 * need to reserve tons of extra space in the mbufs so we can align the
59 * DMA addresses to 4KB.
60 */
61
62 #define MTU_TO_MAX_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
63 #define MBUF_SIZE(mtu) (MTU_TO_MAX_LEN(mtu) + (512) + \
64 sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
65
66 /* TODO: mempool size should be based on system resources. */
67 #define NB_MBUF (4096 * 64)
68 #define MP_CACHE_SZ (256 * 2)
69 #define SOCKET0 0
70
71 #define NON_PMD_THREAD_TX_QUEUE 0
72
73 /* TODO: Needs per NIC value for these constants. */
74 #define RX_PTHRESH 32 /* Default values of RX prefetch threshold reg. */
75 #define RX_HTHRESH 32 /* Default values of RX host threshold reg. */
76 #define RX_WTHRESH 16 /* Default values of RX write-back threshold reg. */
77
78 #define TX_PTHRESH 36 /* Default values of TX prefetch threshold reg. */
79 #define TX_HTHRESH 0 /* Default values of TX host threshold reg. */
80 #define TX_WTHRESH 0 /* Default values of TX write-back threshold reg. */
81
82 static const struct rte_eth_conf port_conf = {
83 .rxmode = {
84 .mq_mode = ETH_MQ_RX_RSS,
85 .split_hdr_size = 0,
86 .header_split = 0, /* Header Split disabled */
87 .hw_ip_checksum = 0, /* IP checksum offload disabled */
88 .hw_vlan_filter = 0, /* VLAN filtering disabled */
89 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
90 .hw_strip_crc = 0,
91 },
92 .rx_adv_conf = {
93 .rss_conf = {
94 .rss_key = NULL,
95 .rss_hf = ETH_RSS_IPV4_TCP | ETH_RSS_IPV4 | ETH_RSS_IPV6,
96 },
97 },
98 .txmode = {
99 .mq_mode = ETH_MQ_TX_NONE,
100 },
101 };
102
103 static const struct rte_eth_rxconf rx_conf = {
104 .rx_thresh = {
105 .pthresh = RX_PTHRESH,
106 .hthresh = RX_HTHRESH,
107 .wthresh = RX_WTHRESH,
108 },
109 };
110
111 static const struct rte_eth_txconf tx_conf = {
112 .tx_thresh = {
113 .pthresh = TX_PTHRESH,
114 .hthresh = TX_HTHRESH,
115 .wthresh = TX_WTHRESH,
116 },
117 .tx_free_thresh = 0,
118 .tx_rs_thresh = 0,
119 };
120
121 enum { MAX_RX_QUEUE_LEN = 64 };
122 enum { MAX_TX_QUEUE_LEN = 64 };
123 enum { DRAIN_TSC = 200000ULL };
124
125 static int rte_eal_init_ret = ENODEV;
126
127 static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
128
129 /* Contains all 'struct dpdk_dev's. */
130 static struct list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
131 = LIST_INITIALIZER(&dpdk_list);
132
133 static struct list dpdk_mp_list OVS_GUARDED_BY(dpdk_mutex)
134 = LIST_INITIALIZER(&dpdk_mp_list);
135
136 struct dpdk_mp {
137 struct rte_mempool *mp;
138 int mtu;
139 int socket_id;
140 int refcount;
141 struct list list_node OVS_GUARDED_BY(dpdk_mutex);
142 };
143
144 struct dpdk_tx_queue {
145 rte_spinlock_t tx_lock;
146 int count;
147 uint64_t tsc;
148 struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
149 };
150
151 struct netdev_dpdk {
152 struct netdev up;
153 int port_id;
154 int max_packet_len;
155
156 struct dpdk_tx_queue tx_q[NR_QUEUE];
157
158 struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
159
160 struct dpdk_mp *dpdk_mp;
161 int mtu;
162 int socket_id;
163 int buf_size;
164 struct netdev_stats stats_offset;
165 struct netdev_stats stats;
166
167 uint8_t hwaddr[ETH_ADDR_LEN];
168 enum netdev_flags flags;
169
170 struct rte_eth_link link;
171 int link_reset_cnt;
172
173 /* In dpdk_list. */
174 struct list list_node OVS_GUARDED_BY(dpdk_mutex);
175 };
176
177 struct netdev_rxq_dpdk {
178 struct netdev_rxq up;
179 int port_id;
180 };
181
182 static int netdev_dpdk_construct(struct netdev *);
183
184 static bool
185 is_dpdk_class(const struct netdev_class *class)
186 {
187 return class->construct == netdev_dpdk_construct;
188 }
189
190 /* TODO: use dpdk malloc for entire OVS. infact huge page shld be used
191 * for all other sengments data, bss and text. */
192
193 static void *
194 dpdk_rte_mzalloc(size_t sz)
195 {
196 void *ptr;
197
198 ptr = rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
199 if (ptr == NULL) {
200 out_of_memory();
201 }
202 return ptr;
203 }
204
205 void
206 free_dpdk_buf(struct ofpbuf *b)
207 {
208 struct rte_mbuf *pkt = (struct rte_mbuf *) b->dpdk_buf;
209
210 rte_mempool_put(pkt->pool, pkt);
211 }
212
213 static void
214 __rte_pktmbuf_init(struct rte_mempool *mp,
215 void *opaque_arg OVS_UNUSED,
216 void *_m,
217 unsigned i OVS_UNUSED)
218 {
219 struct rte_mbuf *m = _m;
220 uint32_t buf_len = mp->elt_size - sizeof(struct ofpbuf);
221
222 RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct ofpbuf));
223
224 memset(m, 0, mp->elt_size);
225
226 /* start of buffer is just after mbuf structure */
227 m->buf_addr = (char *)m + sizeof(struct ofpbuf);
228 m->buf_physaddr = rte_mempool_virt2phy(mp, m) +
229 sizeof(struct ofpbuf);
230 m->buf_len = (uint16_t)buf_len;
231
232 /* keep some headroom between start of buffer and data */
233 m->pkt.data = (char*) m->buf_addr + RTE_MIN(RTE_PKTMBUF_HEADROOM, m->buf_len);
234
235 /* init some constant fields */
236 m->type = RTE_MBUF_PKT;
237 m->pool = mp;
238 m->pkt.nb_segs = 1;
239 m->pkt.in_port = 0xff;
240 }
241
242 static void
243 ovs_rte_pktmbuf_init(struct rte_mempool *mp,
244 void *opaque_arg OVS_UNUSED,
245 void *_m,
246 unsigned i OVS_UNUSED)
247 {
248 struct rte_mbuf *m = _m;
249
250 __rte_pktmbuf_init(mp, opaque_arg, _m, i);
251
252 ofpbuf_init_dpdk((struct ofpbuf *) m, m->buf_len);
253 }
254
255 static struct dpdk_mp *
256 dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
257 {
258 struct dpdk_mp *dmp = NULL;
259 char mp_name[RTE_MEMPOOL_NAMESIZE];
260
261 LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
262 if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
263 dmp->refcount++;
264 return dmp;
265 }
266 }
267
268 dmp = dpdk_rte_mzalloc(sizeof *dmp);
269 dmp->socket_id = socket_id;
270 dmp->mtu = mtu;
271 dmp->refcount = 1;
272
273 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "ovs_mp_%d", dmp->mtu);
274 dmp->mp = rte_mempool_create(mp_name, NB_MBUF, MBUF_SIZE(mtu),
275 MP_CACHE_SZ,
276 sizeof(struct rte_pktmbuf_pool_private),
277 rte_pktmbuf_pool_init, NULL,
278 ovs_rte_pktmbuf_init, NULL,
279 socket_id, 0);
280
281 if (dmp->mp == NULL) {
282 return NULL;
283 }
284
285 list_push_back(&dpdk_mp_list, &dmp->list_node);
286 return dmp;
287 }
288
289 static void
290 dpdk_mp_put(struct dpdk_mp *dmp)
291 {
292
293 if (!dmp) {
294 return;
295 }
296
297 dmp->refcount--;
298 ovs_assert(dmp->refcount >= 0);
299
300 #if 0
301 /* I could not find any API to destroy mp. */
302 if (dmp->refcount == 0) {
303 list_delete(dmp->list_node);
304 /* destroy mp-pool. */
305 }
306 #endif
307 }
308
309 static void
310 check_link_status(struct netdev_dpdk *dev)
311 {
312 struct rte_eth_link link;
313
314 rte_eth_link_get_nowait(dev->port_id, &link);
315
316 if (dev->link.link_status != link.link_status) {
317 netdev_change_seq_changed(&dev->up);
318
319 dev->link_reset_cnt++;
320 dev->link = link;
321 if (dev->link.link_status) {
322 VLOG_DBG_RL(&rl, "Port %d Link Up - speed %u Mbps - %s",
323 dev->port_id, (unsigned)dev->link.link_speed,
324 (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
325 ("full-duplex") : ("half-duplex"));
326 } else {
327 VLOG_DBG_RL(&rl, "Port %d Link Down", dev->port_id);
328 }
329 }
330 }
331
332 static void *
333 dpdk_watchdog(void *dummy OVS_UNUSED)
334 {
335 struct netdev_dpdk *dev;
336
337 pthread_detach(pthread_self());
338
339 for (;;) {
340 ovs_mutex_lock(&dpdk_mutex);
341 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
342 ovs_mutex_lock(&dev->mutex);
343 check_link_status(dev);
344 ovs_mutex_unlock(&dev->mutex);
345 }
346 ovs_mutex_unlock(&dpdk_mutex);
347 xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
348 }
349
350 return NULL;
351 }
352
353 static int
354 dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
355 {
356 struct rte_pktmbuf_pool_private *mbp_priv;
357 struct ether_addr eth_addr;
358 int diag;
359 int i;
360
361 if (dev->port_id < 0 || dev->port_id >= rte_eth_dev_count()) {
362 return -ENODEV;
363 }
364
365 diag = rte_eth_dev_configure(dev->port_id, NR_QUEUE, NR_QUEUE, &port_conf);
366 if (diag) {
367 VLOG_ERR("eth dev config error %d",diag);
368 return diag;
369 }
370
371 for (i = 0; i < NR_QUEUE; i++) {
372 diag = rte_eth_tx_queue_setup(dev->port_id, i, MAX_TX_QUEUE_LEN,
373 dev->socket_id, &tx_conf);
374 if (diag) {
375 VLOG_ERR("eth dev tx queue setup error %d",diag);
376 return diag;
377 }
378 }
379
380 for (i = 0; i < NR_QUEUE; i++) {
381 diag = rte_eth_rx_queue_setup(dev->port_id, i, MAX_RX_QUEUE_LEN,
382 dev->socket_id,
383 &rx_conf, dev->dpdk_mp->mp);
384 if (diag) {
385 VLOG_ERR("eth dev rx queue setup error %d",diag);
386 return diag;
387 }
388 }
389
390 diag = rte_eth_dev_start(dev->port_id);
391 if (diag) {
392 VLOG_ERR("eth dev start error %d",diag);
393 return diag;
394 }
395
396 rte_eth_promiscuous_enable(dev->port_id);
397 rte_eth_allmulticast_enable(dev->port_id);
398
399 memset(&eth_addr, 0x0, sizeof(eth_addr));
400 rte_eth_macaddr_get(dev->port_id, &eth_addr);
401 VLOG_INFO_RL(&rl, "Port %d: "ETH_ADDR_FMT"",
402 dev->port_id, ETH_ADDR_ARGS(eth_addr.addr_bytes));
403
404 memcpy(dev->hwaddr, eth_addr.addr_bytes, ETH_ADDR_LEN);
405 rte_eth_link_get_nowait(dev->port_id, &dev->link);
406
407 mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp);
408 dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
409
410 dev->flags = NETDEV_UP | NETDEV_PROMISC;
411 return 0;
412 }
413
414 static struct netdev_dpdk *
415 netdev_dpdk_cast(const struct netdev *netdev)
416 {
417 return CONTAINER_OF(netdev, struct netdev_dpdk, up);
418 }
419
420 static struct netdev *
421 netdev_dpdk_alloc(void)
422 {
423 struct netdev_dpdk *netdev = dpdk_rte_mzalloc(sizeof *netdev);
424 return &netdev->up;
425 }
426
427 static int
428 netdev_dpdk_construct(struct netdev *netdev_)
429 {
430 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
431 unsigned int port_no;
432 char *cport;
433 int err;
434 int i;
435
436 if (rte_eal_init_ret) {
437 return rte_eal_init_ret;
438 }
439
440 ovs_mutex_lock(&dpdk_mutex);
441 cport = netdev_->name + 4; /* Names always start with "dpdk" */
442
443 if (strncmp(netdev_->name, "dpdk", 4)) {
444 err = ENODEV;
445 goto unlock_dpdk;
446 }
447
448 port_no = strtol(cport, 0, 0); /* string must be null terminated */
449
450 for (i = 0; i < NR_QUEUE; i++) {
451 rte_spinlock_init(&netdev->tx_q[i].tx_lock);
452 }
453
454 ovs_mutex_init(&netdev->mutex);
455
456 ovs_mutex_lock(&netdev->mutex);
457 netdev->flags = 0;
458
459 netdev->mtu = ETHER_MTU;
460 netdev->max_packet_len = MTU_TO_MAX_LEN(netdev->mtu);
461
462 /* TODO: need to discover device node at run time. */
463 netdev->socket_id = SOCKET0;
464 netdev->port_id = port_no;
465
466 netdev->dpdk_mp = dpdk_mp_get(netdev->socket_id, netdev->mtu);
467 if (!netdev->dpdk_mp) {
468 err = ENOMEM;
469 goto unlock_dev;
470 }
471
472 err = dpdk_eth_dev_init(netdev);
473 if (err) {
474 goto unlock_dev;
475 }
476 netdev_->n_rxq = NR_QUEUE;
477
478 list_push_back(&dpdk_list, &netdev->list_node);
479
480 unlock_dev:
481 ovs_mutex_unlock(&netdev->mutex);
482 unlock_dpdk:
483 ovs_mutex_unlock(&dpdk_mutex);
484 return err;
485 }
486
487 static void
488 netdev_dpdk_destruct(struct netdev *netdev_)
489 {
490 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
491
492 ovs_mutex_lock(&dev->mutex);
493 rte_eth_dev_stop(dev->port_id);
494 ovs_mutex_unlock(&dev->mutex);
495
496 ovs_mutex_lock(&dpdk_mutex);
497 list_remove(&dev->list_node);
498 dpdk_mp_put(dev->dpdk_mp);
499 ovs_mutex_unlock(&dpdk_mutex);
500
501 ovs_mutex_destroy(&dev->mutex);
502 }
503
504 static void
505 netdev_dpdk_dealloc(struct netdev *netdev_)
506 {
507 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
508
509 rte_free(netdev);
510 }
511
512 static int
513 netdev_dpdk_get_config(const struct netdev *netdev_, struct smap *args)
514 {
515 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
516
517 ovs_mutex_lock(&dev->mutex);
518
519 /* TODO: Allow to configure number of queues. */
520 smap_add_format(args, "configured_rx_queues", "%u", netdev_->n_rxq);
521 smap_add_format(args, "configured_tx_queues", "%u", netdev_->n_rxq);
522 ovs_mutex_unlock(&dev->mutex);
523
524 return 0;
525 }
526
527 static struct netdev_rxq *
528 netdev_dpdk_rxq_alloc(void)
529 {
530 struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx);
531
532 return &rx->up;
533 }
534
535 static struct netdev_rxq_dpdk *
536 netdev_rxq_dpdk_cast(const struct netdev_rxq *rx)
537 {
538 return CONTAINER_OF(rx, struct netdev_rxq_dpdk, up);
539 }
540
541 static int
542 netdev_dpdk_rxq_construct(struct netdev_rxq *rxq_)
543 {
544 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
545 struct netdev_dpdk *netdev = netdev_dpdk_cast(rx->up.netdev);
546
547 ovs_mutex_lock(&netdev->mutex);
548 rx->port_id = netdev->port_id;
549 ovs_mutex_unlock(&netdev->mutex);
550
551 return 0;
552 }
553
554 static void
555 netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq_ OVS_UNUSED)
556 {
557 }
558
559 static void
560 netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq_)
561 {
562 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
563
564 rte_free(rx);
565 }
566
567 inline static void
568 dpdk_queue_flush(struct netdev_dpdk *dev, int qid)
569 {
570 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
571 uint32_t nb_tx;
572
573 if (txq->count == 0) {
574 return;
575 }
576 rte_spinlock_lock(&txq->tx_lock);
577 nb_tx = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts, txq->count);
578 if (nb_tx != txq->count) {
579 /* free buffers if we couldn't transmit packets */
580 rte_mempool_put_bulk(dev->dpdk_mp->mp,
581 (void **) &txq->burst_pkts[nb_tx],
582 (txq->count - nb_tx));
583 }
584 txq->count = 0;
585 rte_spinlock_unlock(&txq->tx_lock);
586 }
587
588 static int
589 netdev_dpdk_rxq_recv(struct netdev_rxq *rxq_, struct ofpbuf **packets, int *c)
590 {
591 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
592 struct netdev *netdev = rx->up.netdev;
593 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
594 int nb_rx;
595
596 dpdk_queue_flush(dev, rxq_->queue_id);
597
598 nb_rx = rte_eth_rx_burst(rx->port_id, rxq_->queue_id,
599 (struct rte_mbuf **) packets,
600 MIN((int)NETDEV_MAX_RX_BATCH,
601 (int)MAX_RX_QUEUE_LEN));
602 if (!nb_rx) {
603 return EAGAIN;
604 }
605
606 *c = nb_rx;
607
608 return 0;
609 }
610
611 inline static void
612 dpdk_queue_pkt(struct netdev_dpdk *dev, int qid,
613 struct rte_mbuf *pkt)
614 {
615 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
616 uint64_t diff_tsc;
617 uint64_t cur_tsc;
618 uint32_t nb_tx;
619
620 rte_spinlock_lock(&txq->tx_lock);
621 txq->burst_pkts[txq->count++] = pkt;
622 if (txq->count == MAX_TX_QUEUE_LEN) {
623 goto flush;
624 }
625 cur_tsc = rte_get_timer_cycles();
626 if (txq->count == 1) {
627 txq->tsc = cur_tsc;
628 }
629 diff_tsc = cur_tsc - txq->tsc;
630 if (diff_tsc >= DRAIN_TSC) {
631 goto flush;
632 }
633 rte_spinlock_unlock(&txq->tx_lock);
634 return;
635
636 flush:
637 nb_tx = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts, txq->count);
638 if (nb_tx != txq->count) {
639 /* free buffers if we couldn't transmit packets */
640 rte_mempool_put_bulk(dev->dpdk_mp->mp,
641 (void **) &txq->burst_pkts[nb_tx],
642 (txq->count - nb_tx));
643 }
644 txq->count = 0;
645 rte_spinlock_unlock(&txq->tx_lock);
646 }
647
648 /* Tx function. Transmit packets indefinitely */
649 static void
650 dpdk_do_tx_copy(struct netdev *netdev, char *buf, int size)
651 {
652 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
653 struct rte_mbuf *pkt;
654
655 pkt = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
656 if (!pkt) {
657 ovs_mutex_lock(&dev->mutex);
658 dev->stats.tx_dropped++;
659 ovs_mutex_unlock(&dev->mutex);
660 return;
661 }
662
663 /* We have to do a copy for now */
664 memcpy(pkt->pkt.data, buf, size);
665
666 rte_pktmbuf_data_len(pkt) = size;
667 rte_pktmbuf_pkt_len(pkt) = size;
668
669 dpdk_queue_pkt(dev, NON_PMD_THREAD_TX_QUEUE, pkt);
670 dpdk_queue_flush(dev, NON_PMD_THREAD_TX_QUEUE);
671 }
672
673 static int
674 netdev_dpdk_send(struct netdev *netdev,
675 struct ofpbuf *ofpbuf, bool may_steal)
676 {
677 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
678 int ret;
679
680 if (ofpbuf_size(ofpbuf) > dev->max_packet_len) {
681 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
682 (int)ofpbuf_size(ofpbuf) , dev->max_packet_len);
683
684 ovs_mutex_lock(&dev->mutex);
685 dev->stats.tx_dropped++;
686 ovs_mutex_unlock(&dev->mutex);
687
688 ret = E2BIG;
689 goto out;
690 }
691
692 if (!may_steal || ofpbuf->source != OFPBUF_DPDK) {
693 dpdk_do_tx_copy(netdev, (char *) ofpbuf_data(ofpbuf), ofpbuf_size(ofpbuf));
694
695 if (may_steal) {
696 ofpbuf_delete(ofpbuf);
697 }
698 } else {
699 int qid;
700
701 qid = rte_lcore_id() % NR_QUEUE;
702
703 dpdk_queue_pkt(dev, qid, (struct rte_mbuf *)ofpbuf);
704
705 }
706 ret = 0;
707
708 out:
709 return ret;
710 }
711
712 static int
713 netdev_dpdk_set_etheraddr(struct netdev *netdev,
714 const uint8_t mac[ETH_ADDR_LEN])
715 {
716 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
717
718 ovs_mutex_lock(&dev->mutex);
719 if (!eth_addr_equals(dev->hwaddr, mac)) {
720 memcpy(dev->hwaddr, mac, ETH_ADDR_LEN);
721 netdev_change_seq_changed(netdev);
722 }
723 ovs_mutex_unlock(&dev->mutex);
724
725 return 0;
726 }
727
728 static int
729 netdev_dpdk_get_etheraddr(const struct netdev *netdev,
730 uint8_t mac[ETH_ADDR_LEN])
731 {
732 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
733
734 ovs_mutex_lock(&dev->mutex);
735 memcpy(mac, dev->hwaddr, ETH_ADDR_LEN);
736 ovs_mutex_unlock(&dev->mutex);
737
738 return 0;
739 }
740
741 static int
742 netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup)
743 {
744 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
745
746 ovs_mutex_lock(&dev->mutex);
747 *mtup = dev->mtu;
748 ovs_mutex_unlock(&dev->mutex);
749
750 return 0;
751 }
752
753 static int
754 netdev_dpdk_set_mtu(const struct netdev *netdev, int mtu)
755 {
756 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
757 int old_mtu, err;
758 struct dpdk_mp *old_mp;
759 struct dpdk_mp *mp;
760
761 ovs_mutex_lock(&dpdk_mutex);
762 ovs_mutex_lock(&dev->mutex);
763 if (dev->mtu == mtu) {
764 err = 0;
765 goto out;
766 }
767
768 mp = dpdk_mp_get(dev->socket_id, dev->mtu);
769 if (!mp) {
770 err = ENOMEM;
771 goto out;
772 }
773
774 rte_eth_dev_stop(dev->port_id);
775
776 old_mtu = dev->mtu;
777 old_mp = dev->dpdk_mp;
778 dev->dpdk_mp = mp;
779 dev->mtu = mtu;
780 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
781
782 err = dpdk_eth_dev_init(dev);
783 if (err) {
784
785 dpdk_mp_put(mp);
786 dev->mtu = old_mtu;
787 dev->dpdk_mp = old_mp;
788 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
789 dpdk_eth_dev_init(dev);
790 goto out;
791 }
792
793 dpdk_mp_put(old_mp);
794 netdev_change_seq_changed(netdev);
795 out:
796 ovs_mutex_unlock(&dev->mutex);
797 ovs_mutex_unlock(&dpdk_mutex);
798 return err;
799 }
800
801 static int
802 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier);
803
804 static int
805 netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
806 {
807 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
808 struct rte_eth_stats rte_stats;
809 bool gg;
810
811 netdev_dpdk_get_carrier(netdev, &gg);
812 ovs_mutex_lock(&dev->mutex);
813 rte_eth_stats_get(dev->port_id, &rte_stats);
814
815 *stats = dev->stats_offset;
816
817 stats->rx_packets += rte_stats.ipackets;
818 stats->tx_packets += rte_stats.opackets;
819 stats->rx_bytes += rte_stats.ibytes;
820 stats->tx_bytes += rte_stats.obytes;
821 stats->rx_errors += rte_stats.ierrors;
822 stats->tx_errors += rte_stats.oerrors;
823 stats->multicast += rte_stats.imcasts;
824
825 stats->tx_dropped += dev->stats.tx_dropped;
826 ovs_mutex_unlock(&dev->mutex);
827
828 return 0;
829 }
830
831 static int
832 netdev_dpdk_set_stats(struct netdev *netdev, const struct netdev_stats *stats)
833 {
834 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
835
836 ovs_mutex_lock(&dev->mutex);
837 dev->stats_offset = *stats;
838 ovs_mutex_unlock(&dev->mutex);
839
840 return 0;
841 }
842
843 static int
844 netdev_dpdk_get_features(const struct netdev *netdev_,
845 enum netdev_features *current,
846 enum netdev_features *advertised OVS_UNUSED,
847 enum netdev_features *supported OVS_UNUSED,
848 enum netdev_features *peer OVS_UNUSED)
849 {
850 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
851 struct rte_eth_link link;
852
853 ovs_mutex_lock(&dev->mutex);
854 link = dev->link;
855 ovs_mutex_unlock(&dev->mutex);
856
857 if (link.link_duplex == ETH_LINK_AUTONEG_DUPLEX) {
858 if (link.link_speed == ETH_LINK_SPEED_AUTONEG) {
859 *current = NETDEV_F_AUTONEG;
860 }
861 } else if (link.link_duplex == ETH_LINK_HALF_DUPLEX) {
862 if (link.link_speed == ETH_LINK_SPEED_10) {
863 *current = NETDEV_F_10MB_HD;
864 }
865 if (link.link_speed == ETH_LINK_SPEED_100) {
866 *current = NETDEV_F_100MB_HD;
867 }
868 if (link.link_speed == ETH_LINK_SPEED_1000) {
869 *current = NETDEV_F_1GB_HD;
870 }
871 } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) {
872 if (link.link_speed == ETH_LINK_SPEED_10) {
873 *current = NETDEV_F_10MB_FD;
874 }
875 if (link.link_speed == ETH_LINK_SPEED_100) {
876 *current = NETDEV_F_100MB_FD;
877 }
878 if (link.link_speed == ETH_LINK_SPEED_1000) {
879 *current = NETDEV_F_1GB_FD;
880 }
881 if (link.link_speed == ETH_LINK_SPEED_10000) {
882 *current = NETDEV_F_10GB_FD;
883 }
884 }
885
886 return 0;
887 }
888
889 static int
890 netdev_dpdk_get_ifindex(const struct netdev *netdev)
891 {
892 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
893 int ifindex;
894
895 ovs_mutex_lock(&dev->mutex);
896 ifindex = dev->port_id;
897 ovs_mutex_unlock(&dev->mutex);
898
899 return ifindex;
900 }
901
902 static int
903 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier)
904 {
905 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
906
907 ovs_mutex_lock(&dev->mutex);
908 check_link_status(dev);
909 *carrier = dev->link.link_status;
910 ovs_mutex_unlock(&dev->mutex);
911
912 return 0;
913 }
914
915 static long long int
916 netdev_dpdk_get_carrier_resets(const struct netdev *netdev_)
917 {
918 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
919 long long int carrier_resets;
920
921 ovs_mutex_lock(&dev->mutex);
922 carrier_resets = dev->link_reset_cnt;
923 ovs_mutex_unlock(&dev->mutex);
924
925 return carrier_resets;
926 }
927
928 static int
929 netdev_dpdk_set_miimon(struct netdev *netdev_ OVS_UNUSED,
930 long long int interval OVS_UNUSED)
931 {
932 return 0;
933 }
934
935 static int
936 netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
937 enum netdev_flags off, enum netdev_flags on,
938 enum netdev_flags *old_flagsp)
939 OVS_REQUIRES(dev->mutex)
940 {
941 int err;
942
943 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
944 return EINVAL;
945 }
946
947 *old_flagsp = dev->flags;
948 dev->flags |= on;
949 dev->flags &= ~off;
950
951 if (dev->flags == *old_flagsp) {
952 return 0;
953 }
954
955 if (dev->flags & NETDEV_UP) {
956 err = rte_eth_dev_start(dev->port_id);
957 if (err)
958 return err;
959 }
960
961 if (dev->flags & NETDEV_PROMISC) {
962 rte_eth_promiscuous_enable(dev->port_id);
963 }
964
965 if (!(dev->flags & NETDEV_UP)) {
966 rte_eth_dev_stop(dev->port_id);
967 }
968
969 return 0;
970 }
971
972 static int
973 netdev_dpdk_update_flags(struct netdev *netdev_,
974 enum netdev_flags off, enum netdev_flags on,
975 enum netdev_flags *old_flagsp)
976 {
977 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
978 int error;
979
980 ovs_mutex_lock(&netdev->mutex);
981 error = netdev_dpdk_update_flags__(netdev, off, on, old_flagsp);
982 ovs_mutex_unlock(&netdev->mutex);
983
984 return error;
985 }
986
987 static int
988 netdev_dpdk_get_status(const struct netdev *netdev_, struct smap *args)
989 {
990 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
991 struct rte_eth_dev_info dev_info;
992
993 if (dev->port_id <= 0)
994 return ENODEV;
995
996 ovs_mutex_lock(&dev->mutex);
997 rte_eth_dev_info_get(dev->port_id, &dev_info);
998 ovs_mutex_unlock(&dev->mutex);
999
1000 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1001
1002 smap_add_format(args, "numa_id", "%d", rte_eth_dev_socket_id(dev->port_id));
1003 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1004 smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
1005 smap_add_format(args, "max_rx_pktlen", "%u", dev_info.max_rx_pktlen);
1006 smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
1007 smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
1008 smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);
1009 smap_add_format(args, "max_hash_mac_addrs", "%u", dev_info.max_hash_mac_addrs);
1010 smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
1011 smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
1012
1013 smap_add_format(args, "pci-vendor_id", "0x%u", dev_info.pci_dev->id.vendor_id);
1014 smap_add_format(args, "pci-device_id", "0x%x", dev_info.pci_dev->id.device_id);
1015
1016 return 0;
1017 }
1018
1019 static void
1020 netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state)
1021 OVS_REQUIRES(dev->mutex)
1022 {
1023 enum netdev_flags old_flags;
1024
1025 if (admin_state) {
1026 netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1027 } else {
1028 netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1029 }
1030 }
1031
1032 static void
1033 netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc,
1034 const char *argv[], void *aux OVS_UNUSED)
1035 {
1036 bool up;
1037
1038 if (!strcasecmp(argv[argc - 1], "up")) {
1039 up = true;
1040 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1041 up = false;
1042 } else {
1043 unixctl_command_reply_error(conn, "Invalid Admin State");
1044 return;
1045 }
1046
1047 if (argc > 2) {
1048 struct netdev *netdev = netdev_from_name(argv[1]);
1049 if (netdev && is_dpdk_class(netdev->netdev_class)) {
1050 struct netdev_dpdk *dpdk_dev = netdev_dpdk_cast(netdev);
1051
1052 ovs_mutex_lock(&dpdk_dev->mutex);
1053 netdev_dpdk_set_admin_state__(dpdk_dev, up);
1054 ovs_mutex_unlock(&dpdk_dev->mutex);
1055
1056 netdev_close(netdev);
1057 } else {
1058 unixctl_command_reply_error(conn, "Not a DPDK Interface");
1059 netdev_close(netdev);
1060 return;
1061 }
1062 } else {
1063 struct netdev_dpdk *netdev;
1064
1065 ovs_mutex_lock(&dpdk_mutex);
1066 LIST_FOR_EACH (netdev, list_node, &dpdk_list) {
1067 ovs_mutex_lock(&netdev->mutex);
1068 netdev_dpdk_set_admin_state__(netdev, up);
1069 ovs_mutex_unlock(&netdev->mutex);
1070 }
1071 ovs_mutex_unlock(&dpdk_mutex);
1072 }
1073 unixctl_command_reply(conn, "OK");
1074 }
1075
1076 static int
1077 dpdk_class_init(void)
1078 {
1079 int result;
1080
1081 if (rte_eal_init_ret) {
1082 return 0;
1083 }
1084
1085 result = rte_pmd_init_all();
1086 if (result) {
1087 VLOG_ERR("Cannot init PMD");
1088 return result;
1089 }
1090
1091 result = rte_eal_pci_probe();
1092 if (result) {
1093 VLOG_ERR("Cannot probe PCI");
1094 return result;
1095 }
1096
1097 if (rte_eth_dev_count() < 1) {
1098 VLOG_ERR("No Ethernet devices found. Try assigning ports to UIO.");
1099 }
1100
1101 VLOG_INFO("Ethernet Device Count: %d", (int)rte_eth_dev_count());
1102
1103 list_init(&dpdk_list);
1104 list_init(&dpdk_mp_list);
1105
1106 unixctl_command_register("netdev-dpdk/set-admin-state",
1107 "[netdev] up|down", 1, 2,
1108 netdev_dpdk_set_admin_state, NULL);
1109
1110 ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL);
1111 return 0;
1112 }
1113
1114 static struct netdev_class netdev_dpdk_class = {
1115 "dpdk",
1116 dpdk_class_init, /* init */
1117 NULL, /* netdev_dpdk_run */
1118 NULL, /* netdev_dpdk_wait */
1119
1120 netdev_dpdk_alloc,
1121 netdev_dpdk_construct,
1122 netdev_dpdk_destruct,
1123 netdev_dpdk_dealloc,
1124 netdev_dpdk_get_config,
1125 NULL, /* netdev_dpdk_set_config */
1126 NULL, /* get_tunnel_config */
1127
1128 netdev_dpdk_send, /* send */
1129 NULL, /* send_wait */
1130
1131 netdev_dpdk_set_etheraddr,
1132 netdev_dpdk_get_etheraddr,
1133 netdev_dpdk_get_mtu,
1134 netdev_dpdk_set_mtu,
1135 netdev_dpdk_get_ifindex,
1136 netdev_dpdk_get_carrier,
1137 netdev_dpdk_get_carrier_resets,
1138 netdev_dpdk_set_miimon,
1139 netdev_dpdk_get_stats,
1140 netdev_dpdk_set_stats,
1141 netdev_dpdk_get_features,
1142 NULL, /* set_advertisements */
1143
1144 NULL, /* set_policing */
1145 NULL, /* get_qos_types */
1146 NULL, /* get_qos_capabilities */
1147 NULL, /* get_qos */
1148 NULL, /* set_qos */
1149 NULL, /* get_queue */
1150 NULL, /* set_queue */
1151 NULL, /* delete_queue */
1152 NULL, /* get_queue_stats */
1153 NULL, /* queue_dump_start */
1154 NULL, /* queue_dump_next */
1155 NULL, /* queue_dump_done */
1156 NULL, /* dump_queue_stats */
1157
1158 NULL, /* get_in4 */
1159 NULL, /* set_in4 */
1160 NULL, /* get_in6 */
1161 NULL, /* add_router */
1162 NULL, /* get_next_hop */
1163 netdev_dpdk_get_status,
1164 NULL, /* arp_lookup */
1165
1166 netdev_dpdk_update_flags,
1167
1168 netdev_dpdk_rxq_alloc,
1169 netdev_dpdk_rxq_construct,
1170 netdev_dpdk_rxq_destruct,
1171 netdev_dpdk_rxq_dealloc,
1172 netdev_dpdk_rxq_recv,
1173 NULL, /* rxq_wait */
1174 NULL, /* rxq_drain */
1175 };
1176
1177 int
1178 dpdk_init(int argc, char **argv)
1179 {
1180 int result;
1181
1182 if (argc < 2 || strcmp(argv[1], "--dpdk"))
1183 return 0;
1184
1185 /* Make sure program name passed to rte_eal_init() is vswitchd. */
1186 argv[1] = argv[0];
1187
1188 argc--;
1189 argv++;
1190
1191 /* Make sure things are initialized ... */
1192 result = rte_eal_init(argc, argv);
1193 if (result < 0)
1194 ovs_abort(result, "Cannot init EAL\n");
1195
1196 rte_memzone_dump();
1197 rte_eal_init_ret = 0;
1198
1199 if (argc > result)
1200 argv[result] = argv[0];
1201
1202 return result + 1;
1203 }
1204
1205 void
1206 netdev_dpdk_register(void)
1207 {
1208 netdev_register_provider(&netdev_dpdk_class);
1209 }
1210
1211 int
1212 pmd_thread_setaffinity_cpu(int cpu)
1213 {
1214 cpu_set_t cpuset;
1215 int err;
1216
1217 CPU_ZERO(&cpuset);
1218 CPU_SET(cpu, &cpuset);
1219 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
1220 if (err) {
1221 VLOG_ERR("Thread affinity error %d",err);
1222 return err;
1223 }
1224 RTE_PER_LCORE(_lcore_id) = cpu;
1225
1226 return 0;
1227 }