]> git.proxmox.com Git - mirror_ovs.git/blame - lib/netdev-dpdk.c
ofpbuf: Add DPDK mbuf to ofpbuf.
[mirror_ovs.git] / lib / netdev-dpdk.c
CommitLineData
8a9562d2
PS
1/*
2 * Copyright (c) 2014 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <config.h>
18
19#include <stdio.h>
20#include <string.h>
21#include <signal.h>
22#include <stdlib.h>
23#include <pthread.h>
24#include <config.h>
25#include <errno.h>
26#include <sched.h>
27#include <stdlib.h>
28#include <unistd.h>
29#include <stdio.h>
30
31#include "connectivity.h"
32#include "dpif-netdev.h"
33#include "list.h"
34#include "netdev-dpdk.h"
35#include "netdev-provider.h"
36#include "netdev-vport.h"
37#include "odp-util.h"
38#include "ofp-print.h"
39#include "ofpbuf.h"
40#include "ovs-thread.h"
41#include "ovs-rcu.h"
42#include "packets.h"
43#include "shash.h"
44#include "seq.h"
45#include "sset.h"
46#include "unaligned.h"
47#include "timeval.h"
48#include "unixctl.h"
49#include "vlog.h"
50
51VLOG_DEFINE_THIS_MODULE(dpdk);
52static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
53
54#define DPDK_PORT_WATCHDOG_INTERVAL 5
55
56#define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
57#define OVS_VPORT_DPDK "ovs_dpdk"
58
59/*
60 * need to reserve tons of extra space in the mbufs so we can align the
61 * DMA addresses to 4KB.
62 */
63
64#define MTU_TO_MAX_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
65#define MBUF_SIZE(mtu) (MTU_TO_MAX_LEN(mtu) + (512) + \
66 sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
67
68/* TODO: mempool size should be based on system resources. */
69#define NB_MBUF (4096 * 64)
70#define MP_CACHE_SZ (256 * 2)
71#define SOCKET0 0
72
73#define NON_PMD_THREAD_TX_QUEUE 0
74
75/* TODO: Needs per NIC value for these constants. */
76#define RX_PTHRESH 32 /* Default values of RX prefetch threshold reg. */
77#define RX_HTHRESH 32 /* Default values of RX host threshold reg. */
78#define RX_WTHRESH 16 /* Default values of RX write-back threshold reg. */
79
80#define TX_PTHRESH 36 /* Default values of TX prefetch threshold reg. */
81#define TX_HTHRESH 0 /* Default values of TX host threshold reg. */
82#define TX_WTHRESH 0 /* Default values of TX write-back threshold reg. */
83
84static const struct rte_eth_conf port_conf = {
85 .rxmode = {
86 .mq_mode = ETH_MQ_RX_RSS,
87 .split_hdr_size = 0,
88 .header_split = 0, /* Header Split disabled */
89 .hw_ip_checksum = 0, /* IP checksum offload disabled */
90 .hw_vlan_filter = 0, /* VLAN filtering disabled */
91 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
92 .hw_strip_crc = 0,
93 },
94 .rx_adv_conf = {
95 .rss_conf = {
96 .rss_key = NULL,
97 .rss_hf = ETH_RSS_IPV4_TCP | ETH_RSS_IPV4 | ETH_RSS_IPV6,
98 },
99 },
100 .txmode = {
101 .mq_mode = ETH_MQ_TX_NONE,
102 },
103};
104
105static const struct rte_eth_rxconf rx_conf = {
106 .rx_thresh = {
107 .pthresh = RX_PTHRESH,
108 .hthresh = RX_HTHRESH,
109 .wthresh = RX_WTHRESH,
110 },
111};
112
113static const struct rte_eth_txconf tx_conf = {
114 .tx_thresh = {
115 .pthresh = TX_PTHRESH,
116 .hthresh = TX_HTHRESH,
117 .wthresh = TX_WTHRESH,
118 },
119 .tx_free_thresh = 0,
120 .tx_rs_thresh = 0,
121};
122
123enum { MAX_RX_QUEUE_LEN = 64 };
124enum { MAX_TX_QUEUE_LEN = 64 };
125enum { DRAIN_TSC = 200000ULL };
126
127static int rte_eal_init_ret = ENODEV;
128
129static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
130
131/* Contains all 'struct dpdk_dev's. */
132static struct list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
133 = LIST_INITIALIZER(&dpdk_list);
134
135static struct list dpdk_mp_list OVS_GUARDED_BY(dpdk_mutex)
136 = LIST_INITIALIZER(&dpdk_mp_list);
137
138static pthread_t watchdog_thread;
139
140struct dpdk_mp {
141 struct rte_mempool *mp;
142 int mtu;
143 int socket_id;
144 int refcount;
145 struct list list_node OVS_GUARDED_BY(dpdk_mutex);
146};
147
148struct dpdk_tx_queue {
149 rte_spinlock_t tx_lock;
150 int count;
151 uint64_t tsc;
152 struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
153};
154
155struct netdev_dpdk {
156 struct netdev up;
157 int port_id;
158 int max_packet_len;
159
160 struct dpdk_tx_queue tx_q[NR_QUEUE];
161
162 struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
163
164 struct dpdk_mp *dpdk_mp;
165 int mtu;
166 int socket_id;
167 int buf_size;
168 struct netdev_stats stats_offset;
169 struct netdev_stats stats;
170
171 uint8_t hwaddr[ETH_ADDR_LEN];
172 enum netdev_flags flags;
173
174 struct rte_eth_link link;
175 int link_reset_cnt;
176
177 /* In dpdk_list. */
178 struct list list_node OVS_GUARDED_BY(dpdk_mutex);
179};
180
181struct netdev_rxq_dpdk {
182 struct netdev_rxq up;
183 int port_id;
184};
185
186static int netdev_dpdk_construct(struct netdev *);
187
188static bool
189is_dpdk_class(const struct netdev_class *class)
190{
191 return class->construct == netdev_dpdk_construct;
192}
193
194/* TODO: use dpdk malloc for entire OVS. infact huge page shld be used
195 * for all other sengments data, bss and text. */
196
197static void *
198dpdk_rte_mzalloc(size_t sz)
199{
200 void *ptr;
201
202 ptr = rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
203 if (ptr == NULL) {
204 out_of_memory();
205 }
206 return ptr;
207}
208
209void
210free_dpdk_buf(struct ofpbuf *b)
211{
212 struct rte_mbuf *pkt;
213
214 pkt = b->private_p;
215 if (!pkt) {
216 return;
217 }
218
219 rte_mempool_put(pkt->pool, pkt);
220}
221
222static struct dpdk_mp *
223dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
224{
225 struct dpdk_mp *dmp = NULL;
226 char mp_name[RTE_MEMPOOL_NAMESIZE];
227
228 LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
229 if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
230 dmp->refcount++;
231 return dmp;
232 }
233 }
234
235 dmp = dpdk_rte_mzalloc(sizeof *dmp);
236 dmp->socket_id = socket_id;
237 dmp->mtu = mtu;
238 dmp->refcount = 1;
239
240 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "ovs_mp_%d", dmp->mtu);
241 dmp->mp = rte_mempool_create(mp_name, NB_MBUF, MBUF_SIZE(mtu),
242 MP_CACHE_SZ,
243 sizeof(struct rte_pktmbuf_pool_private),
244 rte_pktmbuf_pool_init, NULL,
245 rte_pktmbuf_init, NULL,
246 socket_id, 0);
247
248 if (dmp->mp == NULL) {
249 return NULL;
250 }
251
252 list_push_back(&dpdk_mp_list, &dmp->list_node);
253 return dmp;
254}
255
256static void
257dpdk_mp_put(struct dpdk_mp *dmp)
258{
259
260 if (!dmp) {
261 return;
262 }
263
264 dmp->refcount--;
265 ovs_assert(dmp->refcount >= 0);
266
267#if 0
268 /* I could not find any API to destroy mp. */
269 if (dmp->refcount == 0) {
270 list_delete(dmp->list_node);
271 /* destroy mp-pool. */
272 }
273#endif
274}
275
276static void
277check_link_status(struct netdev_dpdk *dev)
278{
279 struct rte_eth_link link;
280
281 rte_eth_link_get_nowait(dev->port_id, &link);
282
283 if (dev->link.link_status != link.link_status) {
284 seq_change(connectivity_seq_get());
285
286 dev->link_reset_cnt++;
287 dev->link = link;
288 if (dev->link.link_status) {
289 VLOG_DBG_RL(&rl, "Port %d Link Up - speed %u Mbps - %s",
290 dev->port_id, (unsigned)dev->link.link_speed,
291 (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
292 ("full-duplex") : ("half-duplex"));
293 } else {
294 VLOG_DBG_RL(&rl, "Port %d Link Down", dev->port_id);
295 }
296 }
297}
298
299static void *
300dpdk_watchdog(void *dummy OVS_UNUSED)
301{
302 struct netdev_dpdk *dev;
303
304 pthread_detach(pthread_self());
305
306 for (;;) {
307 ovs_mutex_lock(&dpdk_mutex);
308 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
309 ovs_mutex_lock(&dev->mutex);
310 check_link_status(dev);
311 ovs_mutex_unlock(&dev->mutex);
312 }
313 ovs_mutex_unlock(&dpdk_mutex);
314 xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
315 }
316
317 return NULL;
318}
319
320static int
321dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
322{
323 struct rte_pktmbuf_pool_private *mbp_priv;
324 struct ether_addr eth_addr;
325 int diag;
326 int i;
327
328 if (dev->port_id < 0 || dev->port_id >= rte_eth_dev_count()) {
329 return -ENODEV;
330 }
331
332 diag = rte_eth_dev_configure(dev->port_id, NR_QUEUE, NR_QUEUE, &port_conf);
333 if (diag) {
334 VLOG_ERR("eth dev config error %d",diag);
335 return diag;
336 }
337
338 for (i = 0; i < NR_QUEUE; i++) {
339 diag = rte_eth_tx_queue_setup(dev->port_id, i, 64, 0, &tx_conf);
340 if (diag) {
341 VLOG_ERR("eth dev tx queue setup error %d",diag);
342 return diag;
343 }
344 }
345
346 for (i = 0; i < NR_QUEUE; i++) {
347 diag = rte_eth_rx_queue_setup(dev->port_id, i, 64, 0, &rx_conf,
348 dev->dpdk_mp->mp);
349 if (diag) {
350 VLOG_ERR("eth dev rx queue setup error %d",diag);
351 return diag;
352 }
353 }
354
355 diag = rte_eth_dev_start(dev->port_id);
356 if (diag) {
357 VLOG_ERR("eth dev start error %d",diag);
358 return diag;
359 }
360
361 rte_eth_promiscuous_enable(dev->port_id);
362 rte_eth_allmulticast_enable(dev->port_id);
363
364 memset(&eth_addr, 0x0, sizeof(eth_addr));
365 rte_eth_macaddr_get(dev->port_id, &eth_addr);
366 VLOG_INFO_RL(&rl, "Port %d: "ETH_ADDR_FMT"",
367 dev->port_id, ETH_ADDR_ARGS(eth_addr.addr_bytes));
368
369 memcpy(dev->hwaddr, eth_addr.addr_bytes, ETH_ADDR_LEN);
370 rte_eth_link_get_nowait(dev->port_id, &dev->link);
371
372 mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp);
373 dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
374
375 dev->flags = NETDEV_UP | NETDEV_PROMISC;
376 return 0;
377}
378
379static struct netdev_dpdk *
380netdev_dpdk_cast(const struct netdev *netdev)
381{
382 return CONTAINER_OF(netdev, struct netdev_dpdk, up);
383}
384
385static struct netdev *
386netdev_dpdk_alloc(void)
387{
388 struct netdev_dpdk *netdev = dpdk_rte_mzalloc(sizeof *netdev);
389 return &netdev->up;
390}
391
392static int
393netdev_dpdk_construct(struct netdev *netdev_)
394{
395 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
396 unsigned int port_no;
397 char *cport;
398 int err;
399 int i;
400
401 if (rte_eal_init_ret) {
402 return rte_eal_init_ret;
403 }
404
405 ovs_mutex_lock(&dpdk_mutex);
406 cport = netdev_->name + 4; /* Names always start with "dpdk" */
407
408 if (strncmp(netdev_->name, "dpdk", 4)) {
409 err = ENODEV;
410 goto unlock_dpdk;
411 }
412
413 port_no = strtol(cport, 0, 0); /* string must be null terminated */
414
415 for (i = 0; i < NR_QUEUE; i++) {
416 rte_spinlock_init(&netdev->tx_q[i].tx_lock);
417 }
418
419 ovs_mutex_init(&netdev->mutex);
420
421 ovs_mutex_lock(&netdev->mutex);
422 netdev->flags = 0;
423
424 netdev->mtu = ETHER_MTU;
425 netdev->max_packet_len = MTU_TO_MAX_LEN(netdev->mtu);
426
427 /* TODO: need to discover device node at run time. */
428 netdev->socket_id = SOCKET0;
429 netdev->port_id = port_no;
430
431 netdev->dpdk_mp = dpdk_mp_get(netdev->socket_id, netdev->mtu);
432 if (!netdev->dpdk_mp) {
433 err = ENOMEM;
434 goto unlock_dev;
435 }
436
437 err = dpdk_eth_dev_init(netdev);
438 if (err) {
439 goto unlock_dev;
440 }
441 netdev_->n_rxq = NR_QUEUE;
442
443 list_push_back(&dpdk_list, &netdev->list_node);
444
445unlock_dev:
446 ovs_mutex_unlock(&netdev->mutex);
447unlock_dpdk:
448 ovs_mutex_unlock(&dpdk_mutex);
449 return err;
450}
451
452static void
453netdev_dpdk_destruct(struct netdev *netdev_)
454{
455 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
456
457 ovs_mutex_lock(&dev->mutex);
458 rte_eth_dev_stop(dev->port_id);
459 ovs_mutex_unlock(&dev->mutex);
460
461 ovs_mutex_lock(&dpdk_mutex);
462 list_remove(&dev->list_node);
463 dpdk_mp_put(dev->dpdk_mp);
464 ovs_mutex_unlock(&dpdk_mutex);
465
466 ovs_mutex_destroy(&dev->mutex);
467}
468
469static void
470netdev_dpdk_dealloc(struct netdev *netdev_)
471{
472 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
473
474 rte_free(netdev);
475}
476
477static int
478netdev_dpdk_get_config(const struct netdev *netdev_, struct smap *args)
479{
480 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
481
482 ovs_mutex_lock(&dev->mutex);
483
484 /* TODO: Allow to configure number of queues. */
485 smap_add_format(args, "configured_rx_queues", "%u", netdev_->n_rxq);
486 smap_add_format(args, "configured_tx_queues", "%u", netdev_->n_rxq);
487 ovs_mutex_unlock(&dev->mutex);
488
489 return 0;
490}
491
492static struct netdev_rxq *
493netdev_dpdk_rxq_alloc(void)
494{
495 struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx);
496
497 return &rx->up;
498}
499
500static struct netdev_rxq_dpdk *
501netdev_rxq_dpdk_cast(const struct netdev_rxq *rx)
502{
503 return CONTAINER_OF(rx, struct netdev_rxq_dpdk, up);
504}
505
506static int
507netdev_dpdk_rxq_construct(struct netdev_rxq *rxq_)
508{
509 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
510 struct netdev_dpdk *netdev = netdev_dpdk_cast(rx->up.netdev);
511
512 ovs_mutex_lock(&netdev->mutex);
513 rx->port_id = netdev->port_id;
514 ovs_mutex_unlock(&netdev->mutex);
515
516 return 0;
517}
518
519static void
520netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq_ OVS_UNUSED)
521{
522}
523
524static void
525netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq_)
526{
527 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
528
529 rte_free(rx);
530}
531
532inline static void
533dpdk_queue_flush(struct netdev_dpdk *dev, int qid)
534{
535 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
536 uint32_t nb_tx;
537
538 if (txq->count == 0) {
539 return;
540 }
541 rte_spinlock_lock(&txq->tx_lock);
542 nb_tx = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts, txq->count);
543 if (nb_tx != txq->count) {
544 /* free buffers if we couldn't transmit packets */
545 rte_mempool_put_bulk(dev->dpdk_mp->mp,
546 (void **) &txq->burst_pkts[nb_tx],
547 (txq->count - nb_tx));
548 }
549 txq->count = 0;
550 rte_spinlock_unlock(&txq->tx_lock);
551}
552
553inline static struct ofpbuf *
554build_ofpbuf(struct rte_mbuf *pkt)
555{
556 struct ofpbuf *b;
557
558 b = ofpbuf_new(0);
559 b->private_p = pkt;
560
1f317cb5
PS
561 ofpbuf_set_data(b, pkt->pkt.data);
562 ofpbuf_set_base(b, (char *)ofpbuf_data(b) - DP_NETDEV_HEADROOM - VLAN_ETH_HEADER_LEN);
563 b->allocated = pkt->buf_len;
564 ofpbuf_set_size(b, rte_pktmbuf_data_len(pkt));
8a9562d2 565 b->source = OFPBUF_DPDK;
8a9562d2
PS
566
567 dp_packet_pad(b);
568
569 return b;
570}
571
572static int
573netdev_dpdk_rxq_recv(struct netdev_rxq *rxq_, struct ofpbuf **packet, int *c)
574{
575 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
576 struct netdev *netdev = rx->up.netdev;
577 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
578 struct rte_mbuf *burst_pkts[MAX_RX_QUEUE_LEN];
579 int nb_rx;
580 int i;
581
582 dpdk_queue_flush(dev, rxq_->queue_id);
583
584 nb_rx = rte_eth_rx_burst(rx->port_id, rxq_->queue_id,
585 burst_pkts, MAX_RX_QUEUE_LEN);
586 if (!nb_rx) {
587 return EAGAIN;
588 }
589
590 for (i = 0; i < nb_rx; i++) {
591 packet[i] = build_ofpbuf(burst_pkts[i]);
592 }
593
594 *c = nb_rx;
595
596 return 0;
597}
598
599inline static void
600dpdk_queue_pkt(struct netdev_dpdk *dev, int qid,
601 struct rte_mbuf *pkt)
602{
603 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
604 uint64_t diff_tsc;
605 uint64_t cur_tsc;
606 uint32_t nb_tx;
607
608 rte_spinlock_lock(&txq->tx_lock);
609 txq->burst_pkts[txq->count++] = pkt;
610 if (txq->count == MAX_TX_QUEUE_LEN) {
611 goto flush;
612 }
613 cur_tsc = rte_get_timer_cycles();
614 if (txq->count == 1) {
615 txq->tsc = cur_tsc;
616 }
617 diff_tsc = cur_tsc - txq->tsc;
618 if (diff_tsc >= DRAIN_TSC) {
619 goto flush;
620 }
621 rte_spinlock_unlock(&txq->tx_lock);
622 return;
623
624flush:
625 nb_tx = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts, txq->count);
626 if (nb_tx != txq->count) {
627 /* free buffers if we couldn't transmit packets */
628 rte_mempool_put_bulk(dev->dpdk_mp->mp,
629 (void **) &txq->burst_pkts[nb_tx],
630 (txq->count - nb_tx));
631 }
632 txq->count = 0;
633 rte_spinlock_unlock(&txq->tx_lock);
634}
635
636/* Tx function. Transmit packets indefinitely */
637static void
638dpdk_do_tx_copy(struct netdev *netdev, char *buf, int size)
639{
640 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
641 struct rte_mbuf *pkt;
642
643 pkt = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
644 if (!pkt) {
645 ovs_mutex_lock(&dev->mutex);
646 dev->stats.tx_dropped++;
647 ovs_mutex_unlock(&dev->mutex);
648 return;
649 }
650
651 /* We have to do a copy for now */
652 memcpy(pkt->pkt.data, buf, size);
653
654 rte_pktmbuf_data_len(pkt) = size;
655 rte_pktmbuf_pkt_len(pkt) = size;
656
657 dpdk_queue_pkt(dev, NON_PMD_THREAD_TX_QUEUE, pkt);
658 dpdk_queue_flush(dev, NON_PMD_THREAD_TX_QUEUE);
659}
660
661static int
662netdev_dpdk_send(struct netdev *netdev,
663 struct ofpbuf *ofpbuf, bool may_steal)
664{
665 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
666 int ret;
667
1f317cb5 668 if (ofpbuf_size(ofpbuf) > dev->max_packet_len) {
8a9562d2 669 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1f317cb5 670 (int)ofpbuf_size(ofpbuf) , dev->max_packet_len);
8a9562d2
PS
671
672 ovs_mutex_lock(&dev->mutex);
673 dev->stats.tx_dropped++;
674 ovs_mutex_unlock(&dev->mutex);
675
676 ret = E2BIG;
677 goto out;
678 }
679
680 rte_prefetch0(&ofpbuf->private_p);
681 if (!may_steal ||
682 !ofpbuf->private_p || ofpbuf->source != OFPBUF_DPDK) {
1f317cb5 683 dpdk_do_tx_copy(netdev, (char *) ofpbuf_data(ofpbuf), ofpbuf_size(ofpbuf));
8a9562d2
PS
684 } else {
685 struct rte_mbuf *pkt;
686 int qid;
687
688 pkt = ofpbuf->private_p;
689 ofpbuf->private_p = NULL;
1f317cb5
PS
690 rte_pktmbuf_data_len(pkt) = ofpbuf_size(ofpbuf);
691 rte_pktmbuf_pkt_len(pkt) = ofpbuf_size(ofpbuf);
8a9562d2
PS
692
693 qid = rte_lcore_id() % NR_QUEUE;
694
695 dpdk_queue_pkt(dev, qid, pkt);
696
697 ofpbuf->private_p = NULL;
698 }
699 ret = 0;
700
701out:
702 if (may_steal) {
703 ofpbuf_delete(ofpbuf);
704 }
705
706 return ret;
707}
708
709static int
710netdev_dpdk_set_etheraddr(struct netdev *netdev,
711 const uint8_t mac[ETH_ADDR_LEN])
712{
713 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
714
715 ovs_mutex_lock(&dev->mutex);
716 if (!eth_addr_equals(dev->hwaddr, mac)) {
717 memcpy(dev->hwaddr, mac, ETH_ADDR_LEN);
718 }
719 ovs_mutex_unlock(&dev->mutex);
720
721 return 0;
722}
723
724static int
725netdev_dpdk_get_etheraddr(const struct netdev *netdev,
726 uint8_t mac[ETH_ADDR_LEN])
727{
728 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
729
730 ovs_mutex_lock(&dev->mutex);
731 memcpy(mac, dev->hwaddr, ETH_ADDR_LEN);
732 ovs_mutex_unlock(&dev->mutex);
733
734 return 0;
735}
736
737static int
738netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup)
739{
740 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
741
742 ovs_mutex_lock(&dev->mutex);
743 *mtup = dev->mtu;
744 ovs_mutex_unlock(&dev->mutex);
745
746 return 0;
747}
748
749static int
750netdev_dpdk_set_mtu(const struct netdev *netdev, int mtu)
751{
752 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
753 int old_mtu, err;
754 struct dpdk_mp *old_mp;
755 struct dpdk_mp *mp;
756
757 ovs_mutex_lock(&dpdk_mutex);
758 ovs_mutex_lock(&dev->mutex);
759 if (dev->mtu == mtu) {
760 err = 0;
761 goto out;
762 }
763
764 mp = dpdk_mp_get(dev->socket_id, dev->mtu);
765 if (!mp) {
766 err = ENOMEM;
767 goto out;
768 }
769
770 rte_eth_dev_stop(dev->port_id);
771
772 old_mtu = dev->mtu;
773 old_mp = dev->dpdk_mp;
774 dev->dpdk_mp = mp;
775 dev->mtu = mtu;
776 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
777
778 err = dpdk_eth_dev_init(dev);
779 if (err) {
780
781 dpdk_mp_put(mp);
782 dev->mtu = old_mtu;
783 dev->dpdk_mp = old_mp;
784 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
785 dpdk_eth_dev_init(dev);
786 goto out;
787 }
788
789 dpdk_mp_put(old_mp);
790out:
791 ovs_mutex_unlock(&dev->mutex);
792 ovs_mutex_unlock(&dpdk_mutex);
793 return err;
794}
795
796static int
797netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier);
798
799static int
800netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
801{
802 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
803 struct rte_eth_stats rte_stats;
804 bool gg;
805
806 netdev_dpdk_get_carrier(netdev, &gg);
807 ovs_mutex_lock(&dev->mutex);
808 rte_eth_stats_get(dev->port_id, &rte_stats);
809
810 *stats = dev->stats_offset;
811
812 stats->rx_packets += rte_stats.ipackets;
813 stats->tx_packets += rte_stats.opackets;
814 stats->rx_bytes += rte_stats.ibytes;
815 stats->tx_bytes += rte_stats.obytes;
816 stats->rx_errors += rte_stats.ierrors;
817 stats->tx_errors += rte_stats.oerrors;
818 stats->multicast += rte_stats.imcasts;
819
820 stats->tx_dropped += dev->stats.tx_dropped;
821 ovs_mutex_unlock(&dev->mutex);
822
823 return 0;
824}
825
826static int
827netdev_dpdk_set_stats(struct netdev *netdev, const struct netdev_stats *stats)
828{
829 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
830
831 ovs_mutex_lock(&dev->mutex);
832 dev->stats_offset = *stats;
833 ovs_mutex_unlock(&dev->mutex);
834
835 return 0;
836}
837
838static int
839netdev_dpdk_get_features(const struct netdev *netdev_,
840 enum netdev_features *current,
841 enum netdev_features *advertised OVS_UNUSED,
842 enum netdev_features *supported OVS_UNUSED,
843 enum netdev_features *peer OVS_UNUSED)
844{
845 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
846 struct rte_eth_link link;
847
848 ovs_mutex_lock(&dev->mutex);
849 link = dev->link;
850 ovs_mutex_unlock(&dev->mutex);
851
852 if (link.link_duplex == ETH_LINK_AUTONEG_DUPLEX) {
853 if (link.link_speed == ETH_LINK_SPEED_AUTONEG) {
854 *current = NETDEV_F_AUTONEG;
855 }
856 } else if (link.link_duplex == ETH_LINK_HALF_DUPLEX) {
857 if (link.link_speed == ETH_LINK_SPEED_10) {
858 *current = NETDEV_F_10MB_HD;
859 }
860 if (link.link_speed == ETH_LINK_SPEED_100) {
861 *current = NETDEV_F_100MB_HD;
862 }
863 if (link.link_speed == ETH_LINK_SPEED_1000) {
864 *current = NETDEV_F_1GB_HD;
865 }
866 } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) {
867 if (link.link_speed == ETH_LINK_SPEED_10) {
868 *current = NETDEV_F_10MB_FD;
869 }
870 if (link.link_speed == ETH_LINK_SPEED_100) {
871 *current = NETDEV_F_100MB_FD;
872 }
873 if (link.link_speed == ETH_LINK_SPEED_1000) {
874 *current = NETDEV_F_1GB_FD;
875 }
876 if (link.link_speed == ETH_LINK_SPEED_10000) {
877 *current = NETDEV_F_10GB_FD;
878 }
879 }
880
881 return 0;
882}
883
884static int
885netdev_dpdk_get_ifindex(const struct netdev *netdev)
886{
887 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
888 int ifindex;
889
890 ovs_mutex_lock(&dev->mutex);
891 ifindex = dev->port_id;
892 ovs_mutex_unlock(&dev->mutex);
893
894 return ifindex;
895}
896
897static int
898netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier)
899{
900 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
901
902 ovs_mutex_lock(&dev->mutex);
903 check_link_status(dev);
904 *carrier = dev->link.link_status;
905 ovs_mutex_unlock(&dev->mutex);
906
907 return 0;
908}
909
910static long long int
911netdev_dpdk_get_carrier_resets(const struct netdev *netdev_)
912{
913 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
914 long long int carrier_resets;
915
916 ovs_mutex_lock(&dev->mutex);
917 carrier_resets = dev->link_reset_cnt;
918 ovs_mutex_unlock(&dev->mutex);
919
920 return carrier_resets;
921}
922
923static int
924netdev_dpdk_set_miimon(struct netdev *netdev_ OVS_UNUSED,
925 long long int interval OVS_UNUSED)
926{
927 return 0;
928}
929
930static int
931netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
932 enum netdev_flags off, enum netdev_flags on,
933 enum netdev_flags *old_flagsp)
934 OVS_REQUIRES(dev->mutex)
935{
936 int err;
937
938 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
939 return EINVAL;
940 }
941
942 *old_flagsp = dev->flags;
943 dev->flags |= on;
944 dev->flags &= ~off;
945
946 if (dev->flags == *old_flagsp) {
947 return 0;
948 }
949
950 if (dev->flags & NETDEV_UP) {
951 err = rte_eth_dev_start(dev->port_id);
952 if (err)
953 return err;
954 }
955
956 if (dev->flags & NETDEV_PROMISC) {
957 rte_eth_promiscuous_enable(dev->port_id);
958 }
959
960 if (!(dev->flags & NETDEV_UP)) {
961 rte_eth_dev_stop(dev->port_id);
962 }
963
964 return 0;
965}
966
967static int
968netdev_dpdk_update_flags(struct netdev *netdev_,
969 enum netdev_flags off, enum netdev_flags on,
970 enum netdev_flags *old_flagsp)
971{
972 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
973 int error;
974
975 ovs_mutex_lock(&netdev->mutex);
976 error = netdev_dpdk_update_flags__(netdev, off, on, old_flagsp);
977 ovs_mutex_unlock(&netdev->mutex);
978
979 return error;
980}
981
982static int
983netdev_dpdk_get_status(const struct netdev *netdev_, struct smap *args)
984{
985 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
986 struct rte_eth_dev_info dev_info;
987
988 if (dev->port_id <= 0)
989 return ENODEV;
990
991 ovs_mutex_lock(&dev->mutex);
992 rte_eth_dev_info_get(dev->port_id, &dev_info);
993 ovs_mutex_unlock(&dev->mutex);
994
995 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
996
997 smap_add_format(args, "numa_id", "%d", rte_eth_dev_socket_id(dev->port_id));
998 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
999 smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
1000 smap_add_format(args, "max_rx_pktlen", "%u", dev_info.max_rx_pktlen);
1001 smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
1002 smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
1003 smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);
1004 smap_add_format(args, "max_hash_mac_addrs", "%u", dev_info.max_hash_mac_addrs);
1005 smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
1006 smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
1007
1008 smap_add_format(args, "pci-vendor_id", "0x%u", dev_info.pci_dev->id.vendor_id);
1009 smap_add_format(args, "pci-device_id", "0x%x", dev_info.pci_dev->id.device_id);
1010
1011 return 0;
1012}
1013
1014static void
1015netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state)
1016 OVS_REQUIRES(dev->mutex)
1017{
1018 enum netdev_flags old_flags;
1019
1020 if (admin_state) {
1021 netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1022 } else {
1023 netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1024 }
1025}
1026
1027static void
1028netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc,
1029 const char *argv[], void *aux OVS_UNUSED)
1030{
1031 bool up;
1032
1033 if (!strcasecmp(argv[argc - 1], "up")) {
1034 up = true;
1035 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1036 up = false;
1037 } else {
1038 unixctl_command_reply_error(conn, "Invalid Admin State");
1039 return;
1040 }
1041
1042 if (argc > 2) {
1043 struct netdev *netdev = netdev_from_name(argv[1]);
1044 if (netdev && is_dpdk_class(netdev->netdev_class)) {
1045 struct netdev_dpdk *dpdk_dev = netdev_dpdk_cast(netdev);
1046
1047 ovs_mutex_lock(&dpdk_dev->mutex);
1048 netdev_dpdk_set_admin_state__(dpdk_dev, up);
1049 ovs_mutex_unlock(&dpdk_dev->mutex);
1050
1051 netdev_close(netdev);
1052 } else {
1053 unixctl_command_reply_error(conn, "Not a DPDK Interface");
1054 netdev_close(netdev);
1055 return;
1056 }
1057 } else {
1058 struct netdev_dpdk *netdev;
1059
1060 ovs_mutex_lock(&dpdk_mutex);
1061 LIST_FOR_EACH (netdev, list_node, &dpdk_list) {
1062 ovs_mutex_lock(&netdev->mutex);
1063 netdev_dpdk_set_admin_state__(netdev, up);
1064 ovs_mutex_unlock(&netdev->mutex);
1065 }
1066 ovs_mutex_unlock(&dpdk_mutex);
1067 }
1068 unixctl_command_reply(conn, "OK");
1069}
1070
1071static int
1072dpdk_class_init(void)
1073{
1074 int result;
1075
1076 if (rte_eal_init_ret) {
1077 return 0;
1078 }
1079
1080 result = rte_pmd_init_all();
1081 if (result) {
1082 VLOG_ERR("Cannot init PMD");
1083 return result;
1084 }
1085
1086 result = rte_eal_pci_probe();
1087 if (result) {
1088 VLOG_ERR("Cannot probe PCI");
1089 return result;
1090 }
1091
1092 if (rte_eth_dev_count() < 1) {
1093 VLOG_ERR("No Ethernet devices found. Try assigning ports to UIO.");
1094 }
1095
1096 VLOG_INFO("Ethernet Device Count: %d", (int)rte_eth_dev_count());
1097
1098 list_init(&dpdk_list);
1099 list_init(&dpdk_mp_list);
1100
1101 unixctl_command_register("netdev-dpdk/set-admin-state",
1102 "[netdev] up|down", 1, 2,
1103 netdev_dpdk_set_admin_state, NULL);
1104
1105 xpthread_create(&watchdog_thread, NULL, dpdk_watchdog, NULL);
1106 return 0;
1107}
1108
1109static struct netdev_class netdev_dpdk_class = {
1110 "dpdk",
1111 dpdk_class_init, /* init */
1112 NULL, /* netdev_dpdk_run */
1113 NULL, /* netdev_dpdk_wait */
1114
1115 netdev_dpdk_alloc,
1116 netdev_dpdk_construct,
1117 netdev_dpdk_destruct,
1118 netdev_dpdk_dealloc,
1119 netdev_dpdk_get_config,
1120 NULL, /* netdev_dpdk_set_config */
1121 NULL, /* get_tunnel_config */
1122
1123 netdev_dpdk_send, /* send */
1124 NULL, /* send_wait */
1125
1126 netdev_dpdk_set_etheraddr,
1127 netdev_dpdk_get_etheraddr,
1128 netdev_dpdk_get_mtu,
1129 netdev_dpdk_set_mtu,
1130 netdev_dpdk_get_ifindex,
1131 netdev_dpdk_get_carrier,
1132 netdev_dpdk_get_carrier_resets,
1133 netdev_dpdk_set_miimon,
1134 netdev_dpdk_get_stats,
1135 netdev_dpdk_set_stats,
1136 netdev_dpdk_get_features,
1137 NULL, /* set_advertisements */
1138
1139 NULL, /* set_policing */
1140 NULL, /* get_qos_types */
1141 NULL, /* get_qos_capabilities */
1142 NULL, /* get_qos */
1143 NULL, /* set_qos */
1144 NULL, /* get_queue */
1145 NULL, /* set_queue */
1146 NULL, /* delete_queue */
1147 NULL, /* get_queue_stats */
1148 NULL, /* queue_dump_start */
1149 NULL, /* queue_dump_next */
1150 NULL, /* queue_dump_done */
1151 NULL, /* dump_queue_stats */
1152
1153 NULL, /* get_in4 */
1154 NULL, /* set_in4 */
1155 NULL, /* get_in6 */
1156 NULL, /* add_router */
1157 NULL, /* get_next_hop */
1158 netdev_dpdk_get_status,
1159 NULL, /* arp_lookup */
1160
1161 netdev_dpdk_update_flags,
1162
1163 netdev_dpdk_rxq_alloc,
1164 netdev_dpdk_rxq_construct,
1165 netdev_dpdk_rxq_destruct,
1166 netdev_dpdk_rxq_dealloc,
1167 netdev_dpdk_rxq_recv,
1168 NULL, /* rxq_wait */
1169 NULL, /* rxq_drain */
1170};
1171
1172int
1173dpdk_init(int argc, char **argv)
1174{
1175 int result;
1176
1177 if (strcmp(argv[1], "--dpdk"))
1178 return 0;
1179
1180 argc--;
1181 argv++;
1182
1183 /* Make sure things are initialized ... */
1184 result = rte_eal_init(argc, argv);
1185 if (result < 0)
1186 ovs_abort(result, "Cannot init EAL\n");
1187
1188 rte_memzone_dump();
1189 rte_eal_init_ret = 0;
1190
1191 return result;
1192}
1193
1194void
1195netdev_dpdk_register(void)
1196{
1197 netdev_register_provider(&netdev_dpdk_class);
1198}
8617afff
PS
1199
1200int
1201pmd_thread_setaffinity_cpu(int cpu)
1202{
1203 cpu_set_t cpuset;
1204 int err;
1205
1206 CPU_ZERO(&cpuset);
1207 CPU_SET(cpu, &cpuset);
1208 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
1209 if (err) {
1210 VLOG_ERR("Thread affinity error %d",err);
1211 return err;
1212 }
1213 RTE_PER_LCORE(_lcore_id) = cpu;
1214
1215 return 0;
1216}