]> git.proxmox.com Git - ovs.git/blob - lib/netdev-dpdk.c
netdev-dpdk: Improve pthread_getaffinity_np() fail handling.
[ovs.git] / lib / netdev-dpdk.c
1 /*
2 * Copyright (c) 2014, 2015, 2016 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include <string.h>
20 #include <signal.h>
21 #include <stdlib.h>
22 #include <pthread.h>
23 #include <config.h>
24 #include <errno.h>
25 #include <sched.h>
26 #include <stdlib.h>
27 #include <unistd.h>
28 #include <sys/stat.h>
29 #include <stdio.h>
30 #include <sys/types.h>
31 #include <sys/stat.h>
32 #include <getopt.h>
33
34 #include "dirs.h"
35 #include "dp-packet.h"
36 #include "dpif-netdev.h"
37 #include "fatal-signal.h"
38 #include "netdev-dpdk.h"
39 #include "netdev-provider.h"
40 #include "netdev-vport.h"
41 #include "odp-util.h"
42 #include "openvswitch/dynamic-string.h"
43 #include "openvswitch/list.h"
44 #include "openvswitch/ofp-print.h"
45 #include "openvswitch/vlog.h"
46 #include "ovs-numa.h"
47 #include "ovs-thread.h"
48 #include "ovs-rcu.h"
49 #include "packets.h"
50 #include "shash.h"
51 #include "smap.h"
52 #include "sset.h"
53 #include "unaligned.h"
54 #include "timeval.h"
55 #include "unixctl.h"
56
57 #include "rte_config.h"
58 #include "rte_mbuf.h"
59 #include "rte_meter.h"
60 #include "rte_virtio_net.h"
61
62 VLOG_DEFINE_THIS_MODULE(dpdk);
63 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
64
65 #define DPDK_PORT_WATCHDOG_INTERVAL 5
66
67 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
68 #define OVS_VPORT_DPDK "ovs_dpdk"
69
70 /*
71 * need to reserve tons of extra space in the mbufs so we can align the
72 * DMA addresses to 4KB.
73 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
74 * performance for standard Ethernet MTU.
75 */
76 #define ETHER_HDR_MAX_LEN (ETHER_HDR_LEN + ETHER_CRC_LEN + (2 * VLAN_HEADER_LEN))
77 #define MTU_TO_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
78 #define MTU_TO_MAX_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_MAX_LEN)
79 #define FRAME_LEN_TO_MTU(frame_len) ((frame_len)- ETHER_HDR_LEN - ETHER_CRC_LEN)
80 #define MBUF_SIZE(mtu) ( MTU_TO_MAX_FRAME_LEN(mtu) \
81 + sizeof(struct dp_packet) \
82 + RTE_PKTMBUF_HEADROOM)
83 #define NETDEV_DPDK_MBUF_ALIGN 1024
84
85 /* Max and min number of packets in the mempool. OVS tries to allocate a
86 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
87 * enough hugepages) we keep halving the number until the allocation succeeds
88 * or we reach MIN_NB_MBUF */
89
90 #define MAX_NB_MBUF (4096 * 64)
91 #define MIN_NB_MBUF (4096 * 4)
92 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
93
94 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
95 BUILD_ASSERT_DECL(MAX_NB_MBUF % ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF) == 0);
96
97 /* The smallest possible NB_MBUF that we're going to try should be a multiple
98 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
99 BUILD_ASSERT_DECL((MAX_NB_MBUF / ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF))
100 % MP_CACHE_SZ == 0);
101
102 /*
103 * DPDK XSTATS Counter names definition
104 */
105 #define XSTAT_RX_64_PACKETS "rx_size_64_packets"
106 #define XSTAT_RX_65_TO_127_PACKETS "rx_size_65_to_127_packets"
107 #define XSTAT_RX_128_TO_255_PACKETS "rx_size_128_to_255_packets"
108 #define XSTAT_RX_256_TO_511_PACKETS "rx_size_256_to_511_packets"
109 #define XSTAT_RX_512_TO_1023_PACKETS "rx_size_512_to_1023_packets"
110 #define XSTAT_RX_1024_TO_1522_PACKETS "rx_size_1024_to_1522_packets"
111 #define XSTAT_RX_1523_TO_MAX_PACKETS "rx_size_1523_to_max_packets"
112
113 #define XSTAT_TX_64_PACKETS "tx_size_64_packets"
114 #define XSTAT_TX_65_TO_127_PACKETS "tx_size_65_to_127_packets"
115 #define XSTAT_TX_128_TO_255_PACKETS "tx_size_128_to_255_packets"
116 #define XSTAT_TX_256_TO_511_PACKETS "tx_size_256_to_511_packets"
117 #define XSTAT_TX_512_TO_1023_PACKETS "tx_size_512_to_1023_packets"
118 #define XSTAT_TX_1024_TO_1522_PACKETS "tx_size_1024_to_1522_packets"
119 #define XSTAT_TX_1523_TO_MAX_PACKETS "tx_size_1523_to_max_packets"
120
121 #define XSTAT_TX_MULTICAST_PACKETS "tx_multicast_packets"
122 #define XSTAT_RX_BROADCAST_PACKETS "rx_broadcast_packets"
123 #define XSTAT_TX_BROADCAST_PACKETS "tx_broadcast_packets"
124 #define XSTAT_RX_UNDERSIZED_ERRORS "rx_undersized_errors"
125 #define XSTAT_RX_OVERSIZE_ERRORS "rx_oversize_errors"
126 #define XSTAT_RX_FRAGMENTED_ERRORS "rx_fragmented_errors"
127 #define XSTAT_RX_JABBER_ERRORS "rx_jabber_errors"
128
129 #define SOCKET0 0
130
131 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
132 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
133
134 #define OVS_VHOST_MAX_QUEUE_NUM 1024 /* Maximum number of vHost TX queues. */
135 #define OVS_VHOST_QUEUE_MAP_UNKNOWN (-1) /* Mapping not initialized. */
136 #define OVS_VHOST_QUEUE_DISABLED (-2) /* Queue was disabled by guest and not
137 * yet mapped to another queue. */
138
139 #ifdef VHOST_CUSE
140 static char *cuse_dev_name = NULL; /* Character device cuse_dev_name. */
141 #endif
142 static char *vhost_sock_dir = NULL; /* Location of vhost-user sockets */
143
144 /*
145 * Maximum amount of time in micro seconds to try and enqueue to vhost.
146 */
147 #define VHOST_ENQ_RETRY_USECS 100
148
149 static const struct rte_eth_conf port_conf = {
150 .rxmode = {
151 .mq_mode = ETH_MQ_RX_RSS,
152 .split_hdr_size = 0,
153 .header_split = 0, /* Header Split disabled */
154 .hw_ip_checksum = 0, /* IP checksum offload disabled */
155 .hw_vlan_filter = 0, /* VLAN filtering disabled */
156 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
157 .hw_strip_crc = 0,
158 },
159 .rx_adv_conf = {
160 .rss_conf = {
161 .rss_key = NULL,
162 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP,
163 },
164 },
165 .txmode = {
166 .mq_mode = ETH_MQ_TX_NONE,
167 },
168 };
169
170 enum { MAX_TX_QUEUE_LEN = 384 };
171 enum { DPDK_RING_SIZE = 256 };
172 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
173 enum { DRAIN_TSC = 200000ULL };
174
175 enum dpdk_dev_type {
176 DPDK_DEV_ETH = 0,
177 DPDK_DEV_VHOST = 1,
178 };
179
180 static int rte_eal_init_ret = ENODEV;
181
182 static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
183
184 /* Quality of Service */
185
186 /* An instance of a QoS configuration. Always associated with a particular
187 * network device.
188 *
189 * Each QoS implementation subclasses this with whatever additional data it
190 * needs.
191 */
192 struct qos_conf {
193 const struct dpdk_qos_ops *ops;
194 };
195
196 /* A particular implementation of dpdk QoS operations.
197 *
198 * The functions below return 0 if successful or a positive errno value on
199 * failure, except where otherwise noted. All of them must be provided, except
200 * where otherwise noted.
201 */
202 struct dpdk_qos_ops {
203
204 /* Name of the QoS type */
205 const char *qos_name;
206
207 /* Called to construct the QoS implementation on 'netdev'. The
208 * implementation should make the appropriate calls to configure QoS
209 * according to 'details'. The implementation may assume that any current
210 * QoS configuration already installed should be destroyed before
211 * constructing the new configuration.
212 *
213 * The contents of 'details' should be documented as valid for 'ovs_name'
214 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
215 * (which is built as ovs-vswitchd.conf.db(8)).
216 *
217 * This function must return 0 if and only if it sets 'netdev->qos_conf'
218 * to an initialized 'struct qos_conf'.
219 *
220 * For all QoS implementations it should always be non-null.
221 */
222 int (*qos_construct)(struct netdev *netdev, const struct smap *details);
223
224 /* Destroys the data structures allocated by the implementation as part of
225 * 'qos_conf.
226 *
227 * For all QoS implementations it should always be non-null.
228 */
229 void (*qos_destruct)(struct netdev *netdev, struct qos_conf *conf);
230
231 /* Retrieves details of 'netdev->qos_conf' configuration into 'details'.
232 *
233 * The contents of 'details' should be documented as valid for 'ovs_name'
234 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
235 * (which is built as ovs-vswitchd.conf.db(8)).
236 */
237 int (*qos_get)(const struct netdev *netdev, struct smap *details);
238
239 /* Reconfigures 'netdev->qos_conf' according to 'details', performing any
240 * required calls to complete the reconfiguration.
241 *
242 * The contents of 'details' should be documented as valid for 'ovs_name'
243 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
244 * (which is built as ovs-vswitchd.conf.db(8)).
245 *
246 * This function may be null if 'qos_conf' is not configurable.
247 */
248 int (*qos_set)(struct netdev *netdev, const struct smap *details);
249
250 /* Modify an array of rte_mbufs. The modification is specific to
251 * each qos implementation.
252 *
253 * The function should take and array of mbufs and an int representing
254 * the current number of mbufs present in the array.
255 *
256 * After the function has performed a qos modification to the array of
257 * mbufs it returns an int representing the number of mbufs now present in
258 * the array. This value is can then be passed to the port send function
259 * along with the modified array for transmission.
260 *
261 * For all QoS implementations it should always be non-null.
262 */
263 int (*qos_run)(struct netdev *netdev, struct rte_mbuf **pkts,
264 int pkt_cnt);
265 };
266
267 /* dpdk_qos_ops for each type of user space QoS implementation */
268 static const struct dpdk_qos_ops egress_policer_ops;
269
270 /*
271 * Array of dpdk_qos_ops, contains pointer to all supported QoS
272 * operations.
273 */
274 static const struct dpdk_qos_ops *const qos_confs[] = {
275 &egress_policer_ops,
276 NULL
277 };
278
279 /* Contains all 'struct dpdk_dev's. */
280 static struct ovs_list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
281 = OVS_LIST_INITIALIZER(&dpdk_list);
282
283 static struct ovs_list dpdk_mp_list OVS_GUARDED_BY(dpdk_mutex)
284 = OVS_LIST_INITIALIZER(&dpdk_mp_list);
285
286 /* This mutex must be used by non pmd threads when allocating or freeing
287 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
288 * use mempools, a non pmd thread should hold this mutex while calling them */
289 static struct ovs_mutex nonpmd_mempool_mutex = OVS_MUTEX_INITIALIZER;
290
291 struct dpdk_mp {
292 struct rte_mempool *mp;
293 int mtu;
294 int socket_id;
295 int refcount;
296 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
297 };
298
299 /* There should be one 'struct dpdk_tx_queue' created for
300 * each cpu core. */
301 struct dpdk_tx_queue {
302 bool flush_tx; /* Set to true to flush queue everytime */
303 /* pkts are queued. */
304 int count;
305 rte_spinlock_t tx_lock; /* Protects the members and the NIC queue
306 * from concurrent access. It is used only
307 * if the queue is shared among different
308 * pmd threads (see 'txq_needs_locking'). */
309 int map; /* Mapping of configured vhost-user queues
310 * to enabled by guest. */
311 uint64_t tsc;
312 struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
313 };
314
315 /* dpdk has no way to remove dpdk ring ethernet devices
316 so we have to keep them around once they've been created
317 */
318
319 static struct ovs_list dpdk_ring_list OVS_GUARDED_BY(dpdk_mutex)
320 = OVS_LIST_INITIALIZER(&dpdk_ring_list);
321
322 struct dpdk_ring {
323 /* For the client rings */
324 struct rte_ring *cring_tx;
325 struct rte_ring *cring_rx;
326 unsigned int user_port_id; /* User given port no, parsed from port name */
327 int eth_port_id; /* ethernet device port id */
328 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
329 };
330
331 struct netdev_dpdk {
332 struct netdev up;
333 int port_id;
334 int max_packet_len;
335 enum dpdk_dev_type type;
336
337 struct dpdk_tx_queue *tx_q;
338
339 struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
340
341 struct dpdk_mp *dpdk_mp;
342 int mtu;
343 int socket_id;
344 int buf_size;
345 struct netdev_stats stats;
346 /* Protects stats */
347 rte_spinlock_t stats_lock;
348
349 struct eth_addr hwaddr;
350 enum netdev_flags flags;
351
352 struct rte_eth_link link;
353 int link_reset_cnt;
354
355 /* The user might request more txqs than the NIC has. We remap those
356 * ('up.n_txq') on these ('real_n_txq').
357 * If the numbers match, 'txq_needs_locking' is false, otherwise it is
358 * true and we will take a spinlock on transmission */
359 int real_n_txq;
360 int real_n_rxq;
361 bool txq_needs_locking;
362
363 /* virtio-net structure for vhost device */
364 OVSRCU_TYPE(struct virtio_net *) virtio_dev;
365
366 /* Identifier used to distinguish vhost devices from each other */
367 char vhost_id[PATH_MAX];
368
369 /* In dpdk_list. */
370 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
371
372 /* QoS configuration and lock for the device */
373 struct qos_conf *qos_conf;
374 rte_spinlock_t qos_lock;
375
376 };
377
378 struct netdev_rxq_dpdk {
379 struct netdev_rxq up;
380 int port_id;
381 };
382
383 static bool dpdk_thread_is_pmd(void);
384
385 static int netdev_dpdk_construct(struct netdev *);
386
387 struct virtio_net * netdev_dpdk_get_virtio(const struct netdev_dpdk *dev);
388
389 static bool
390 is_dpdk_class(const struct netdev_class *class)
391 {
392 return class->construct == netdev_dpdk_construct;
393 }
394
395 /* DPDK NIC drivers allocate RX buffers at a particular granularity, typically
396 * aligned at 1k or less. If a declared mbuf size is not a multiple of this
397 * value, insufficient buffers are allocated to accomodate the packet in its
398 * entirety. Furthermore, certain drivers need to ensure that there is also
399 * sufficient space in the Rx buffer to accommodate two VLAN tags (for QinQ
400 * frames). If the RX buffer is too small, then the driver enables scatter RX
401 * behaviour, which reduces performance. To prevent this, use a buffer size that
402 * is closest to 'mtu', but which satisfies the aforementioned criteria.
403 */
404 static uint32_t
405 dpdk_buf_size(int mtu)
406 {
407 return ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu) + RTE_PKTMBUF_HEADROOM),
408 NETDEV_DPDK_MBUF_ALIGN);
409 }
410
411 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
412 * for all other segments data, bss and text. */
413
414 static void *
415 dpdk_rte_mzalloc(size_t sz)
416 {
417 void *ptr;
418
419 ptr = rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
420 if (ptr == NULL) {
421 out_of_memory();
422 }
423 return ptr;
424 }
425
426 /* XXX this function should be called only by pmd threads (or by non pmd
427 * threads holding the nonpmd_mempool_mutex) */
428 void
429 free_dpdk_buf(struct dp_packet *p)
430 {
431 struct rte_mbuf *pkt = (struct rte_mbuf *) p;
432
433 rte_pktmbuf_free(pkt);
434 }
435
436 static void
437 ovs_rte_pktmbuf_init(struct rte_mempool *mp,
438 void *opaque_arg OVS_UNUSED,
439 void *_m,
440 unsigned i OVS_UNUSED)
441 {
442 struct rte_mbuf *m = _m;
443
444 rte_pktmbuf_init(mp, opaque_arg, _m, i);
445
446 dp_packet_init_dpdk((struct dp_packet *) m, m->buf_len);
447 }
448
449 static struct dpdk_mp *
450 dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
451 {
452 struct dpdk_mp *dmp = NULL;
453 char mp_name[RTE_MEMPOOL_NAMESIZE];
454 unsigned mp_size;
455 struct rte_pktmbuf_pool_private mbp_priv;
456
457 LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
458 if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
459 dmp->refcount++;
460 return dmp;
461 }
462 }
463
464 dmp = dpdk_rte_mzalloc(sizeof *dmp);
465 dmp->socket_id = socket_id;
466 dmp->mtu = mtu;
467 dmp->refcount = 1;
468 mbp_priv.mbuf_data_room_size = MBUF_SIZE(mtu) - sizeof(struct dp_packet);
469 mbp_priv.mbuf_priv_size = sizeof (struct dp_packet) - sizeof (struct rte_mbuf);
470
471 mp_size = MAX_NB_MBUF;
472 do {
473 if (snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "ovs_mp_%d_%d_%u",
474 dmp->mtu, dmp->socket_id, mp_size) < 0) {
475 return NULL;
476 }
477
478 dmp->mp = rte_mempool_create(mp_name, mp_size, MBUF_SIZE(mtu),
479 MP_CACHE_SZ,
480 sizeof(struct rte_pktmbuf_pool_private),
481 rte_pktmbuf_pool_init, &mbp_priv,
482 ovs_rte_pktmbuf_init, NULL,
483 socket_id, 0);
484 } while (!dmp->mp && rte_errno == ENOMEM && (mp_size /= 2) >= MIN_NB_MBUF);
485
486 if (dmp->mp == NULL) {
487 return NULL;
488 } else {
489 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name, mp_size );
490 }
491
492 ovs_list_push_back(&dpdk_mp_list, &dmp->list_node);
493 return dmp;
494 }
495
496 static void
497 dpdk_mp_put(struct dpdk_mp *dmp)
498 {
499
500 if (!dmp) {
501 return;
502 }
503
504 dmp->refcount--;
505 ovs_assert(dmp->refcount >= 0);
506
507 #if 0
508 /* I could not find any API to destroy mp. */
509 if (dmp->refcount == 0) {
510 list_delete(dmp->list_node);
511 /* destroy mp-pool. */
512 }
513 #endif
514 }
515
516 static void
517 check_link_status(struct netdev_dpdk *dev)
518 {
519 struct rte_eth_link link;
520
521 rte_eth_link_get_nowait(dev->port_id, &link);
522
523 if (dev->link.link_status != link.link_status) {
524 netdev_change_seq_changed(&dev->up);
525
526 dev->link_reset_cnt++;
527 dev->link = link;
528 if (dev->link.link_status) {
529 VLOG_DBG_RL(&rl, "Port %d Link Up - speed %u Mbps - %s",
530 dev->port_id, (unsigned)dev->link.link_speed,
531 (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
532 ("full-duplex") : ("half-duplex"));
533 } else {
534 VLOG_DBG_RL(&rl, "Port %d Link Down", dev->port_id);
535 }
536 }
537 }
538
539 static void *
540 dpdk_watchdog(void *dummy OVS_UNUSED)
541 {
542 struct netdev_dpdk *dev;
543
544 pthread_detach(pthread_self());
545
546 for (;;) {
547 ovs_mutex_lock(&dpdk_mutex);
548 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
549 ovs_mutex_lock(&dev->mutex);
550 check_link_status(dev);
551 ovs_mutex_unlock(&dev->mutex);
552 }
553 ovs_mutex_unlock(&dpdk_mutex);
554 xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
555 }
556
557 return NULL;
558 }
559
560 static int
561 dpdk_eth_dev_queue_setup(struct netdev_dpdk *dev, int n_rxq, int n_txq)
562 {
563 int diag = 0;
564 int i;
565
566 /* A device may report more queues than it makes available (this has
567 * been observed for Intel xl710, which reserves some of them for
568 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
569 * available. When this happens we can retry the configuration
570 * and request less queues */
571 while (n_rxq && n_txq) {
572 if (diag) {
573 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq, n_txq);
574 }
575
576 diag = rte_eth_dev_configure(dev->port_id, n_rxq, n_txq, &port_conf);
577 if (diag) {
578 break;
579 }
580
581 for (i = 0; i < n_txq; i++) {
582 diag = rte_eth_tx_queue_setup(dev->port_id, i, NIC_PORT_TX_Q_SIZE,
583 dev->socket_id, NULL);
584 if (diag) {
585 VLOG_INFO("Interface %s txq(%d) setup error: %s",
586 dev->up.name, i, rte_strerror(-diag));
587 break;
588 }
589 }
590
591 if (i != n_txq) {
592 /* Retry with less tx queues */
593 n_txq = i;
594 continue;
595 }
596
597 for (i = 0; i < n_rxq; i++) {
598 diag = rte_eth_rx_queue_setup(dev->port_id, i, NIC_PORT_RX_Q_SIZE,
599 dev->socket_id, NULL,
600 dev->dpdk_mp->mp);
601 if (diag) {
602 VLOG_INFO("Interface %s rxq(%d) setup error: %s",
603 dev->up.name, i, rte_strerror(-diag));
604 break;
605 }
606 }
607
608 if (i != n_rxq) {
609 /* Retry with less rx queues */
610 n_rxq = i;
611 continue;
612 }
613
614 dev->up.n_rxq = n_rxq;
615 dev->real_n_txq = n_txq;
616
617 return 0;
618 }
619
620 return diag;
621 }
622
623
624 static int
625 dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
626 {
627 struct rte_pktmbuf_pool_private *mbp_priv;
628 struct rte_eth_dev_info info;
629 struct ether_addr eth_addr;
630 int diag;
631 int n_rxq, n_txq;
632
633 if (dev->port_id < 0 || dev->port_id >= rte_eth_dev_count()) {
634 return ENODEV;
635 }
636
637 rte_eth_dev_info_get(dev->port_id, &info);
638
639 n_rxq = MIN(info.max_rx_queues, dev->up.n_rxq);
640 n_txq = MIN(info.max_tx_queues, dev->up.n_txq);
641
642 diag = dpdk_eth_dev_queue_setup(dev, n_rxq, n_txq);
643 if (diag) {
644 VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
645 dev->up.name, n_rxq, n_txq, rte_strerror(-diag));
646 return -diag;
647 }
648
649 diag = rte_eth_dev_start(dev->port_id);
650 if (diag) {
651 VLOG_ERR("Interface %s start error: %s", dev->up.name,
652 rte_strerror(-diag));
653 return -diag;
654 }
655
656 rte_eth_promiscuous_enable(dev->port_id);
657 rte_eth_allmulticast_enable(dev->port_id);
658
659 memset(&eth_addr, 0x0, sizeof(eth_addr));
660 rte_eth_macaddr_get(dev->port_id, &eth_addr);
661 VLOG_INFO_RL(&rl, "Port %d: "ETH_ADDR_FMT"",
662 dev->port_id, ETH_ADDR_BYTES_ARGS(eth_addr.addr_bytes));
663
664 memcpy(dev->hwaddr.ea, eth_addr.addr_bytes, ETH_ADDR_LEN);
665 rte_eth_link_get_nowait(dev->port_id, &dev->link);
666
667 mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp);
668 dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
669
670 dev->flags = NETDEV_UP | NETDEV_PROMISC;
671 return 0;
672 }
673
674 static struct netdev_dpdk *
675 netdev_dpdk_cast(const struct netdev *netdev)
676 {
677 return CONTAINER_OF(netdev, struct netdev_dpdk, up);
678 }
679
680 static struct netdev *
681 netdev_dpdk_alloc(void)
682 {
683 struct netdev_dpdk *dev;
684
685 if (!rte_eal_init_ret) { /* Only after successful initialization */
686 dev = dpdk_rte_mzalloc(sizeof *dev);
687 if (dev) {
688 return &dev->up;
689 }
690 }
691 return NULL;
692 }
693
694 static void
695 netdev_dpdk_alloc_txq(struct netdev_dpdk *dev, unsigned int n_txqs)
696 {
697 unsigned i;
698
699 dev->tx_q = dpdk_rte_mzalloc(n_txqs * sizeof *dev->tx_q);
700 for (i = 0; i < n_txqs; i++) {
701 int numa_id = ovs_numa_get_numa_id(i);
702
703 if (!dev->txq_needs_locking) {
704 /* Each index is considered as a cpu core id, since there should
705 * be one tx queue for each cpu core. If the corresponding core
706 * is not on the same numa node as 'dev', flags the
707 * 'flush_tx'. */
708 dev->tx_q[i].flush_tx = dev->socket_id == numa_id;
709 } else {
710 /* Queues are shared among CPUs. Always flush */
711 dev->tx_q[i].flush_tx = true;
712 }
713
714 /* Initialize map for vhost devices. */
715 dev->tx_q[i].map = OVS_VHOST_QUEUE_MAP_UNKNOWN;
716 rte_spinlock_init(&dev->tx_q[i].tx_lock);
717 }
718 }
719
720 static int
721 netdev_dpdk_init(struct netdev *netdev, unsigned int port_no,
722 enum dpdk_dev_type type)
723 OVS_REQUIRES(dpdk_mutex)
724 {
725 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
726 int sid;
727 int err = 0;
728 uint32_t buf_size;
729
730 ovs_mutex_init(&dev->mutex);
731 ovs_mutex_lock(&dev->mutex);
732
733 rte_spinlock_init(&dev->stats_lock);
734
735 /* If the 'sid' is negative, it means that the kernel fails
736 * to obtain the pci numa info. In that situation, always
737 * use 'SOCKET0'. */
738 if (type == DPDK_DEV_ETH) {
739 sid = rte_eth_dev_socket_id(port_no);
740 } else {
741 sid = rte_lcore_to_socket_id(rte_get_master_lcore());
742 }
743
744 dev->socket_id = sid < 0 ? SOCKET0 : sid;
745 dev->port_id = port_no;
746 dev->type = type;
747 dev->flags = 0;
748 dev->mtu = ETHER_MTU;
749 dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
750
751 buf_size = dpdk_buf_size(dev->mtu);
752 dev->dpdk_mp = dpdk_mp_get(dev->socket_id, FRAME_LEN_TO_MTU(buf_size));
753 if (!dev->dpdk_mp) {
754 err = ENOMEM;
755 goto unlock;
756 }
757
758 /* Initialise QoS configuration to NULL and qos lock to unlocked */
759 dev->qos_conf = NULL;
760 rte_spinlock_init(&dev->qos_lock);
761
762 netdev->n_txq = NR_QUEUE;
763 netdev->n_rxq = NR_QUEUE;
764 netdev->requested_n_rxq = NR_QUEUE;
765 dev->real_n_txq = NR_QUEUE;
766
767 if (type == DPDK_DEV_ETH) {
768 netdev_dpdk_alloc_txq(dev, NR_QUEUE);
769 err = dpdk_eth_dev_init(dev);
770 if (err) {
771 goto unlock;
772 }
773 } else {
774 netdev_dpdk_alloc_txq(dev, OVS_VHOST_MAX_QUEUE_NUM);
775 }
776
777 ovs_list_push_back(&dpdk_list, &dev->list_node);
778
779 unlock:
780 if (err) {
781 rte_free(dev->tx_q);
782 }
783 ovs_mutex_unlock(&dev->mutex);
784 return err;
785 }
786
787 /* dev_name must be the prefix followed by a positive decimal number.
788 * (no leading + or - signs are allowed) */
789 static int
790 dpdk_dev_parse_name(const char dev_name[], const char prefix[],
791 unsigned int *port_no)
792 {
793 const char *cport;
794
795 if (strncmp(dev_name, prefix, strlen(prefix))) {
796 return ENODEV;
797 }
798
799 cport = dev_name + strlen(prefix);
800
801 if (str_to_uint(cport, 10, port_no)) {
802 return 0;
803 } else {
804 return ENODEV;
805 }
806 }
807
808 static int
809 vhost_construct_helper(struct netdev *netdev) OVS_REQUIRES(dpdk_mutex)
810 {
811 if (rte_eal_init_ret) {
812 return rte_eal_init_ret;
813 }
814
815 return netdev_dpdk_init(netdev, -1, DPDK_DEV_VHOST);
816 }
817
818 static int
819 netdev_dpdk_vhost_cuse_construct(struct netdev *netdev)
820 {
821 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
822 int err;
823
824 if (rte_eal_init_ret) {
825 return rte_eal_init_ret;
826 }
827
828 ovs_mutex_lock(&dpdk_mutex);
829 strncpy(dev->vhost_id, netdev->name, sizeof(dev->vhost_id));
830 err = vhost_construct_helper(netdev);
831 ovs_mutex_unlock(&dpdk_mutex);
832 return err;
833 }
834
835 static int
836 netdev_dpdk_vhost_user_construct(struct netdev *netdev)
837 {
838 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
839 const char *name = netdev->name;
840 int err;
841
842 /* 'name' is appended to 'vhost_sock_dir' and used to create a socket in
843 * the file system. '/' or '\' would traverse directories, so they're not
844 * acceptable in 'name'. */
845 if (strchr(name, '/') || strchr(name, '\\')) {
846 VLOG_ERR("\"%s\" is not a valid name for a vhost-user port. "
847 "A valid name must not include '/' or '\\'",
848 name);
849 return EINVAL;
850 }
851
852 if (rte_eal_init_ret) {
853 return rte_eal_init_ret;
854 }
855
856 ovs_mutex_lock(&dpdk_mutex);
857 /* Take the name of the vhost-user port and append it to the location where
858 * the socket is to be created, then register the socket.
859 */
860 snprintf(dev->vhost_id, sizeof(dev->vhost_id), "%s/%s",
861 vhost_sock_dir, name);
862
863 err = rte_vhost_driver_register(dev->vhost_id);
864 if (err) {
865 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
866 dev->vhost_id);
867 } else {
868 fatal_signal_add_file_to_unlink(dev->vhost_id);
869 VLOG_INFO("Socket %s created for vhost-user port %s\n",
870 dev->vhost_id, name);
871 err = vhost_construct_helper(netdev);
872 }
873
874 ovs_mutex_unlock(&dpdk_mutex);
875 return err;
876 }
877
878 static int
879 netdev_dpdk_construct(struct netdev *netdev)
880 {
881 unsigned int port_no;
882 int err;
883
884 if (rte_eal_init_ret) {
885 return rte_eal_init_ret;
886 }
887
888 /* Names always start with "dpdk" */
889 err = dpdk_dev_parse_name(netdev->name, "dpdk", &port_no);
890 if (err) {
891 return err;
892 }
893
894 ovs_mutex_lock(&dpdk_mutex);
895 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
896 ovs_mutex_unlock(&dpdk_mutex);
897 return err;
898 }
899
900 static void
901 netdev_dpdk_destruct(struct netdev *netdev)
902 {
903 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
904
905 ovs_mutex_lock(&dev->mutex);
906 rte_eth_dev_stop(dev->port_id);
907 ovs_mutex_unlock(&dev->mutex);
908
909 ovs_mutex_lock(&dpdk_mutex);
910 rte_free(dev->tx_q);
911 ovs_list_remove(&dev->list_node);
912 dpdk_mp_put(dev->dpdk_mp);
913 ovs_mutex_unlock(&dpdk_mutex);
914 }
915
916 static void
917 netdev_dpdk_vhost_destruct(struct netdev *netdev)
918 {
919 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
920
921 /* Guest becomes an orphan if still attached. */
922 if (netdev_dpdk_get_virtio(dev) != NULL) {
923 VLOG_ERR("Removing port '%s' while vhost device still attached.",
924 netdev->name);
925 VLOG_ERR("To restore connectivity after re-adding of port, VM on socket"
926 " '%s' must be restarted.",
927 dev->vhost_id);
928 }
929
930 if (rte_vhost_driver_unregister(dev->vhost_id)) {
931 VLOG_ERR("Unable to remove vhost-user socket %s", dev->vhost_id);
932 } else {
933 fatal_signal_remove_file_to_unlink(dev->vhost_id);
934 }
935
936 ovs_mutex_lock(&dpdk_mutex);
937 rte_free(dev->tx_q);
938 ovs_list_remove(&dev->list_node);
939 dpdk_mp_put(dev->dpdk_mp);
940 ovs_mutex_unlock(&dpdk_mutex);
941 }
942
943 static void
944 netdev_dpdk_dealloc(struct netdev *netdev)
945 {
946 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
947
948 rte_free(dev);
949 }
950
951 static int
952 netdev_dpdk_get_config(const struct netdev *netdev, struct smap *args)
953 {
954 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
955
956 ovs_mutex_lock(&dev->mutex);
957
958 smap_add_format(args, "requested_rx_queues", "%d", netdev->requested_n_rxq);
959 smap_add_format(args, "configured_rx_queues", "%d", netdev->n_rxq);
960 smap_add_format(args, "requested_tx_queues", "%d", netdev->n_txq);
961 smap_add_format(args, "configured_tx_queues", "%d", dev->real_n_txq);
962 ovs_mutex_unlock(&dev->mutex);
963
964 return 0;
965 }
966
967 static int
968 netdev_dpdk_set_config(struct netdev *netdev, const struct smap *args)
969 {
970 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
971
972 ovs_mutex_lock(&dev->mutex);
973 netdev->requested_n_rxq = MAX(smap_get_int(args, "n_rxq",
974 netdev->requested_n_rxq), 1);
975 netdev_change_seq_changed(netdev);
976 ovs_mutex_unlock(&dev->mutex);
977
978 return 0;
979 }
980
981 static int
982 netdev_dpdk_get_numa_id(const struct netdev *netdev)
983 {
984 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
985
986 return dev->socket_id;
987 }
988
989 /* Sets the number of tx queues and rx queues for the dpdk interface.
990 * If the configuration fails, do not try restoring its old configuration
991 * and just returns the error. */
992 static int
993 netdev_dpdk_set_multiq(struct netdev *netdev, unsigned int n_txq,
994 unsigned int n_rxq)
995 {
996 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
997 int err = 0;
998 int old_rxq, old_txq;
999
1000 if (netdev->n_txq == n_txq && netdev->n_rxq == n_rxq) {
1001 return err;
1002 }
1003
1004 ovs_mutex_lock(&dpdk_mutex);
1005 ovs_mutex_lock(&dev->mutex);
1006
1007 rte_eth_dev_stop(dev->port_id);
1008
1009 old_txq = netdev->n_txq;
1010 old_rxq = netdev->n_rxq;
1011 netdev->n_txq = n_txq;
1012 netdev->n_rxq = n_rxq;
1013
1014 rte_free(dev->tx_q);
1015 err = dpdk_eth_dev_init(dev);
1016 netdev_dpdk_alloc_txq(dev, dev->real_n_txq);
1017 if (err) {
1018 /* If there has been an error, it means that the requested queues
1019 * have not been created. Restore the old numbers. */
1020 netdev->n_txq = old_txq;
1021 netdev->n_rxq = old_rxq;
1022 }
1023
1024 dev->txq_needs_locking = dev->real_n_txq != netdev->n_txq;
1025
1026 ovs_mutex_unlock(&dev->mutex);
1027 ovs_mutex_unlock(&dpdk_mutex);
1028
1029 return err;
1030 }
1031
1032 static int
1033 netdev_dpdk_vhost_cuse_set_multiq(struct netdev *netdev, unsigned int n_txq,
1034 unsigned int n_rxq)
1035 {
1036 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1037 int err = 0;
1038
1039 if (netdev->n_txq == n_txq && netdev->n_rxq == n_rxq) {
1040 return err;
1041 }
1042
1043 ovs_mutex_lock(&dpdk_mutex);
1044 ovs_mutex_lock(&dev->mutex);
1045
1046 netdev->n_txq = n_txq;
1047 dev->real_n_txq = 1;
1048 netdev->n_rxq = 1;
1049 dev->txq_needs_locking = dev->real_n_txq != netdev->n_txq;
1050
1051 ovs_mutex_unlock(&dev->mutex);
1052 ovs_mutex_unlock(&dpdk_mutex);
1053
1054 return err;
1055 }
1056
1057 static int
1058 netdev_dpdk_vhost_set_multiq(struct netdev *netdev, unsigned int n_txq,
1059 unsigned int n_rxq)
1060 {
1061 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1062 int err = 0;
1063
1064 if (netdev->n_txq == n_txq && netdev->n_rxq == n_rxq) {
1065 return err;
1066 }
1067
1068 ovs_mutex_lock(&dpdk_mutex);
1069 ovs_mutex_lock(&dev->mutex);
1070
1071 netdev->n_txq = n_txq;
1072 netdev->n_rxq = n_rxq;
1073
1074 ovs_mutex_unlock(&dev->mutex);
1075 ovs_mutex_unlock(&dpdk_mutex);
1076
1077 return err;
1078 }
1079
1080 static struct netdev_rxq *
1081 netdev_dpdk_rxq_alloc(void)
1082 {
1083 struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx);
1084
1085 return &rx->up;
1086 }
1087
1088 static struct netdev_rxq_dpdk *
1089 netdev_rxq_dpdk_cast(const struct netdev_rxq *rxq)
1090 {
1091 return CONTAINER_OF(rxq, struct netdev_rxq_dpdk, up);
1092 }
1093
1094 static int
1095 netdev_dpdk_rxq_construct(struct netdev_rxq *rxq)
1096 {
1097 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq);
1098 struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
1099
1100 ovs_mutex_lock(&dev->mutex);
1101 rx->port_id = dev->port_id;
1102 ovs_mutex_unlock(&dev->mutex);
1103
1104 return 0;
1105 }
1106
1107 static void
1108 netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq OVS_UNUSED)
1109 {
1110 }
1111
1112 static void
1113 netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq)
1114 {
1115 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq);
1116
1117 rte_free(rx);
1118 }
1119
1120 static inline void
1121 dpdk_queue_flush__(struct netdev_dpdk *dev, int qid)
1122 {
1123 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
1124 uint32_t nb_tx = 0;
1125
1126 while (nb_tx != txq->count) {
1127 uint32_t ret;
1128
1129 ret = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts + nb_tx,
1130 txq->count - nb_tx);
1131 if (!ret) {
1132 break;
1133 }
1134
1135 nb_tx += ret;
1136 }
1137
1138 if (OVS_UNLIKELY(nb_tx != txq->count)) {
1139 /* free buffers, which we couldn't transmit, one at a time (each
1140 * packet could come from a different mempool) */
1141 int i;
1142
1143 for (i = nb_tx; i < txq->count; i++) {
1144 rte_pktmbuf_free(txq->burst_pkts[i]);
1145 }
1146 rte_spinlock_lock(&dev->stats_lock);
1147 dev->stats.tx_dropped += txq->count-nb_tx;
1148 rte_spinlock_unlock(&dev->stats_lock);
1149 }
1150
1151 txq->count = 0;
1152 txq->tsc = rte_get_timer_cycles();
1153 }
1154
1155 static inline void
1156 dpdk_queue_flush(struct netdev_dpdk *dev, int qid)
1157 {
1158 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
1159
1160 if (txq->count == 0) {
1161 return;
1162 }
1163 dpdk_queue_flush__(dev, qid);
1164 }
1165
1166 static bool
1167 is_vhost_running(struct virtio_net *virtio_dev)
1168 {
1169 return (virtio_dev != NULL && (virtio_dev->flags & VIRTIO_DEV_RUNNING));
1170 }
1171
1172 static inline void
1173 netdev_dpdk_vhost_update_rx_size_counters(struct netdev_stats *stats,
1174 unsigned int packet_size)
1175 {
1176 /* Hard-coded search for the size bucket. */
1177 if (packet_size < 256) {
1178 if (packet_size >= 128) {
1179 stats->rx_128_to_255_packets++;
1180 } else if (packet_size <= 64) {
1181 stats->rx_1_to_64_packets++;
1182 } else {
1183 stats->rx_65_to_127_packets++;
1184 }
1185 } else {
1186 if (packet_size >= 1523) {
1187 stats->rx_1523_to_max_packets++;
1188 } else if (packet_size >= 1024) {
1189 stats->rx_1024_to_1522_packets++;
1190 } else if (packet_size < 512) {
1191 stats->rx_256_to_511_packets++;
1192 } else {
1193 stats->rx_512_to_1023_packets++;
1194 }
1195 }
1196 }
1197
1198 static inline void
1199 netdev_dpdk_vhost_update_rx_counters(struct netdev_stats *stats,
1200 struct dp_packet **packets, int count)
1201 {
1202 int i;
1203 unsigned int packet_size;
1204 struct dp_packet *packet;
1205
1206 stats->rx_packets += count;
1207 for (i = 0; i < count; i++) {
1208 packet = packets[i];
1209 packet_size = dp_packet_size(packet);
1210
1211 if (OVS_UNLIKELY(packet_size < ETH_HEADER_LEN)) {
1212 /* This only protects the following multicast counting from
1213 * too short packets, but it does not stop the packet from
1214 * further processing. */
1215 stats->rx_errors++;
1216 stats->rx_length_errors++;
1217 continue;
1218 }
1219
1220 netdev_dpdk_vhost_update_rx_size_counters(stats, packet_size);
1221
1222 struct eth_header *eh = (struct eth_header *) dp_packet_data(packet);
1223 if (OVS_UNLIKELY(eth_addr_is_multicast(eh->eth_dst))) {
1224 stats->multicast++;
1225 }
1226
1227 stats->rx_bytes += packet_size;
1228 }
1229 }
1230
1231 /*
1232 * The receive path for the vhost port is the TX path out from guest.
1233 */
1234 static int
1235 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
1236 struct dp_packet **packets, int *c)
1237 {
1238 struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
1239 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
1240 int qid = rxq->queue_id;
1241 uint16_t nb_rx = 0;
1242
1243 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
1244 return EAGAIN;
1245 }
1246
1247 if (rxq->queue_id >= dev->real_n_rxq) {
1248 return EOPNOTSUPP;
1249 }
1250
1251 nb_rx = rte_vhost_dequeue_burst(virtio_dev, qid * VIRTIO_QNUM + VIRTIO_TXQ,
1252 dev->dpdk_mp->mp,
1253 (struct rte_mbuf **)packets,
1254 NETDEV_MAX_BURST);
1255 if (!nb_rx) {
1256 return EAGAIN;
1257 }
1258
1259 rte_spinlock_lock(&dev->stats_lock);
1260 netdev_dpdk_vhost_update_rx_counters(&dev->stats, packets, nb_rx);
1261 rte_spinlock_unlock(&dev->stats_lock);
1262
1263 *c = (int) nb_rx;
1264 return 0;
1265 }
1266
1267 static int
1268 netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet **packets,
1269 int *c)
1270 {
1271 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq);
1272 struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
1273 int nb_rx;
1274
1275 /* There is only one tx queue for this core. Do not flush other
1276 * queues.
1277 * Do not flush tx queue which is shared among CPUs
1278 * since it is always flushed */
1279 if (rxq->queue_id == rte_lcore_id() &&
1280 OVS_LIKELY(!dev->txq_needs_locking)) {
1281 dpdk_queue_flush(dev, rxq->queue_id);
1282 }
1283
1284 nb_rx = rte_eth_rx_burst(rx->port_id, rxq->queue_id,
1285 (struct rte_mbuf **) packets,
1286 NETDEV_MAX_BURST);
1287 if (!nb_rx) {
1288 return EAGAIN;
1289 }
1290
1291 *c = nb_rx;
1292
1293 return 0;
1294 }
1295
1296 static inline int
1297 netdev_dpdk_qos_run__(struct netdev_dpdk *dev, struct rte_mbuf **pkts,
1298 int cnt)
1299 {
1300 struct netdev *netdev = &dev->up;
1301
1302 if (dev->qos_conf != NULL) {
1303 rte_spinlock_lock(&dev->qos_lock);
1304 if (dev->qos_conf != NULL) {
1305 cnt = dev->qos_conf->ops->qos_run(netdev, pkts, cnt);
1306 }
1307 rte_spinlock_unlock(&dev->qos_lock);
1308 }
1309
1310 return cnt;
1311 }
1312
1313 static inline void
1314 netdev_dpdk_vhost_update_tx_counters(struct netdev_stats *stats,
1315 struct dp_packet **packets,
1316 int attempted,
1317 int dropped)
1318 {
1319 int i;
1320 int sent = attempted - dropped;
1321
1322 stats->tx_packets += sent;
1323 stats->tx_dropped += dropped;
1324
1325 for (i = 0; i < sent; i++) {
1326 stats->tx_bytes += dp_packet_size(packets[i]);
1327 }
1328 }
1329
1330 static void
1331 __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
1332 struct dp_packet **pkts, int cnt,
1333 bool may_steal)
1334 {
1335 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1336 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
1337 struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
1338 unsigned int total_pkts = cnt;
1339 unsigned int qos_pkts = cnt;
1340 uint64_t start = 0;
1341
1342 qid = dev->tx_q[qid % dev->real_n_txq].map;
1343
1344 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev) || qid < 0)) {
1345 rte_spinlock_lock(&dev->stats_lock);
1346 dev->stats.tx_dropped+= cnt;
1347 rte_spinlock_unlock(&dev->stats_lock);
1348 goto out;
1349 }
1350
1351 rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
1352
1353 /* Check has QoS has been configured for the netdev */
1354 cnt = netdev_dpdk_qos_run__(dev, cur_pkts, cnt);
1355 qos_pkts -= cnt;
1356
1357 do {
1358 int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
1359 unsigned int tx_pkts;
1360
1361 tx_pkts = rte_vhost_enqueue_burst(virtio_dev, vhost_qid,
1362 cur_pkts, cnt);
1363 if (OVS_LIKELY(tx_pkts)) {
1364 /* Packets have been sent.*/
1365 cnt -= tx_pkts;
1366 /* Prepare for possible next iteration.*/
1367 cur_pkts = &cur_pkts[tx_pkts];
1368 } else {
1369 uint64_t timeout = VHOST_ENQ_RETRY_USECS * rte_get_timer_hz() / 1E6;
1370 unsigned int expired = 0;
1371
1372 if (!start) {
1373 start = rte_get_timer_cycles();
1374 }
1375
1376 /*
1377 * Unable to enqueue packets to vhost interface.
1378 * Check available entries before retrying.
1379 */
1380 while (!rte_vring_available_entries(virtio_dev, vhost_qid)) {
1381 if (OVS_UNLIKELY((rte_get_timer_cycles() - start) > timeout)) {
1382 expired = 1;
1383 break;
1384 }
1385 }
1386 if (expired) {
1387 /* break out of main loop. */
1388 break;
1389 }
1390 }
1391 } while (cnt);
1392
1393 rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
1394
1395 rte_spinlock_lock(&dev->stats_lock);
1396 cnt += qos_pkts;
1397 netdev_dpdk_vhost_update_tx_counters(&dev->stats, pkts, total_pkts, cnt);
1398 rte_spinlock_unlock(&dev->stats_lock);
1399
1400 out:
1401 if (may_steal) {
1402 int i;
1403
1404 for (i = 0; i < total_pkts; i++) {
1405 dp_packet_delete(pkts[i]);
1406 }
1407 }
1408 }
1409
1410 inline static void
1411 dpdk_queue_pkts(struct netdev_dpdk *dev, int qid,
1412 struct rte_mbuf **pkts, int cnt)
1413 {
1414 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
1415 uint64_t diff_tsc;
1416
1417 int i = 0;
1418
1419 while (i < cnt) {
1420 int freeslots = MAX_TX_QUEUE_LEN - txq->count;
1421 int tocopy = MIN(freeslots, cnt-i);
1422
1423 memcpy(&txq->burst_pkts[txq->count], &pkts[i],
1424 tocopy * sizeof (struct rte_mbuf *));
1425
1426 txq->count += tocopy;
1427 i += tocopy;
1428
1429 if (txq->count == MAX_TX_QUEUE_LEN || txq->flush_tx) {
1430 dpdk_queue_flush__(dev, qid);
1431 }
1432 diff_tsc = rte_get_timer_cycles() - txq->tsc;
1433 if (diff_tsc >= DRAIN_TSC) {
1434 dpdk_queue_flush__(dev, qid);
1435 }
1436 }
1437 }
1438
1439 /* Tx function. Transmit packets indefinitely */
1440 static void
1441 dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet **pkts,
1442 int cnt)
1443 OVS_NO_THREAD_SAFETY_ANALYSIS
1444 {
1445 #if !defined(__CHECKER__) && !defined(_WIN32)
1446 const size_t PKT_ARRAY_SIZE = cnt;
1447 #else
1448 /* Sparse or MSVC doesn't like variable length array. */
1449 enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
1450 #endif
1451 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1452 struct rte_mbuf *mbufs[PKT_ARRAY_SIZE];
1453 int dropped = 0;
1454 int newcnt = 0;
1455 int i;
1456
1457 /* If we are on a non pmd thread we have to use the mempool mutex, because
1458 * every non pmd thread shares the same mempool cache */
1459
1460 if (!dpdk_thread_is_pmd()) {
1461 ovs_mutex_lock(&nonpmd_mempool_mutex);
1462 }
1463
1464 for (i = 0; i < cnt; i++) {
1465 int size = dp_packet_size(pkts[i]);
1466
1467 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1468 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1469 (int)size , dev->max_packet_len);
1470
1471 dropped++;
1472 continue;
1473 }
1474
1475 mbufs[newcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
1476
1477 if (!mbufs[newcnt]) {
1478 dropped += cnt - i;
1479 break;
1480 }
1481
1482 /* We have to do a copy for now */
1483 memcpy(rte_pktmbuf_mtod(mbufs[newcnt], void *), dp_packet_data(pkts[i]), size);
1484
1485 rte_pktmbuf_data_len(mbufs[newcnt]) = size;
1486 rte_pktmbuf_pkt_len(mbufs[newcnt]) = size;
1487
1488 newcnt++;
1489 }
1490
1491 if (dev->type == DPDK_DEV_VHOST) {
1492 __netdev_dpdk_vhost_send(netdev, qid, (struct dp_packet **) mbufs, newcnt, true);
1493 } else {
1494 unsigned int qos_pkts = newcnt;
1495
1496 /* Check if QoS has been configured for this netdev. */
1497 newcnt = netdev_dpdk_qos_run__(dev, mbufs, newcnt);
1498
1499 dropped += qos_pkts - newcnt;
1500 dpdk_queue_pkts(dev, qid, mbufs, newcnt);
1501 dpdk_queue_flush(dev, qid);
1502 }
1503
1504 if (OVS_UNLIKELY(dropped)) {
1505 rte_spinlock_lock(&dev->stats_lock);
1506 dev->stats.tx_dropped += dropped;
1507 rte_spinlock_unlock(&dev->stats_lock);
1508 }
1509
1510 if (!dpdk_thread_is_pmd()) {
1511 ovs_mutex_unlock(&nonpmd_mempool_mutex);
1512 }
1513 }
1514
1515 static int
1516 netdev_dpdk_vhost_send(struct netdev *netdev, int qid, struct dp_packet **pkts,
1517 int cnt, bool may_steal)
1518 {
1519 if (OVS_UNLIKELY(pkts[0]->source != DPBUF_DPDK)) {
1520 int i;
1521
1522 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1523 if (may_steal) {
1524 for (i = 0; i < cnt; i++) {
1525 dp_packet_delete(pkts[i]);
1526 }
1527 }
1528 } else {
1529 __netdev_dpdk_vhost_send(netdev, qid, pkts, cnt, may_steal);
1530 }
1531 return 0;
1532 }
1533
1534 static inline void
1535 netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
1536 struct dp_packet **pkts, int cnt, bool may_steal)
1537 {
1538 int i;
1539
1540 if (OVS_UNLIKELY(dev->txq_needs_locking)) {
1541 qid = qid % dev->real_n_txq;
1542 rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
1543 }
1544
1545 if (OVS_UNLIKELY(!may_steal ||
1546 pkts[0]->source != DPBUF_DPDK)) {
1547 struct netdev *netdev = &dev->up;
1548
1549 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1550
1551 if (may_steal) {
1552 for (i = 0; i < cnt; i++) {
1553 dp_packet_delete(pkts[i]);
1554 }
1555 }
1556 } else {
1557 int next_tx_idx = 0;
1558 int dropped = 0;
1559 unsigned int qos_pkts = 0;
1560 unsigned int temp_cnt = 0;
1561
1562 for (i = 0; i < cnt; i++) {
1563 int size = dp_packet_size(pkts[i]);
1564
1565 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1566 if (next_tx_idx != i) {
1567 temp_cnt = i - next_tx_idx;
1568 qos_pkts = temp_cnt;
1569
1570 temp_cnt = netdev_dpdk_qos_run__(dev, (struct rte_mbuf**)pkts,
1571 temp_cnt);
1572 dropped += qos_pkts - temp_cnt;
1573 dpdk_queue_pkts(dev, qid,
1574 (struct rte_mbuf **)&pkts[next_tx_idx],
1575 temp_cnt);
1576
1577 }
1578
1579 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1580 (int)size , dev->max_packet_len);
1581
1582 dp_packet_delete(pkts[i]);
1583 dropped++;
1584 next_tx_idx = i + 1;
1585 }
1586 }
1587 if (next_tx_idx != cnt) {
1588 cnt -= next_tx_idx;
1589 qos_pkts = cnt;
1590
1591 cnt = netdev_dpdk_qos_run__(dev, (struct rte_mbuf**)pkts, cnt);
1592 dropped += qos_pkts - cnt;
1593 dpdk_queue_pkts(dev, qid, (struct rte_mbuf **)&pkts[next_tx_idx],
1594 cnt);
1595 }
1596
1597 if (OVS_UNLIKELY(dropped)) {
1598 rte_spinlock_lock(&dev->stats_lock);
1599 dev->stats.tx_dropped += dropped;
1600 rte_spinlock_unlock(&dev->stats_lock);
1601 }
1602 }
1603
1604 if (OVS_UNLIKELY(dev->txq_needs_locking)) {
1605 rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
1606 }
1607 }
1608
1609 static int
1610 netdev_dpdk_eth_send(struct netdev *netdev, int qid,
1611 struct dp_packet **pkts, int cnt, bool may_steal)
1612 {
1613 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1614
1615 netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
1616 return 0;
1617 }
1618
1619 static int
1620 netdev_dpdk_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
1621 {
1622 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1623
1624 ovs_mutex_lock(&dev->mutex);
1625 if (!eth_addr_equals(dev->hwaddr, mac)) {
1626 dev->hwaddr = mac;
1627 netdev_change_seq_changed(netdev);
1628 }
1629 ovs_mutex_unlock(&dev->mutex);
1630
1631 return 0;
1632 }
1633
1634 static int
1635 netdev_dpdk_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
1636 {
1637 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1638
1639 ovs_mutex_lock(&dev->mutex);
1640 *mac = dev->hwaddr;
1641 ovs_mutex_unlock(&dev->mutex);
1642
1643 return 0;
1644 }
1645
1646 static int
1647 netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup)
1648 {
1649 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1650
1651 ovs_mutex_lock(&dev->mutex);
1652 *mtup = dev->mtu;
1653 ovs_mutex_unlock(&dev->mutex);
1654
1655 return 0;
1656 }
1657
1658 static int
1659 netdev_dpdk_set_mtu(const struct netdev *netdev, int mtu)
1660 {
1661 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1662 int old_mtu, err, dpdk_mtu;
1663 struct dpdk_mp *old_mp;
1664 struct dpdk_mp *mp;
1665 uint32_t buf_size;
1666
1667 ovs_mutex_lock(&dpdk_mutex);
1668 ovs_mutex_lock(&dev->mutex);
1669 if (dev->mtu == mtu) {
1670 err = 0;
1671 goto out;
1672 }
1673
1674 buf_size = dpdk_buf_size(mtu);
1675 dpdk_mtu = FRAME_LEN_TO_MTU(buf_size);
1676
1677 mp = dpdk_mp_get(dev->socket_id, dpdk_mtu);
1678 if (!mp) {
1679 err = ENOMEM;
1680 goto out;
1681 }
1682
1683 rte_eth_dev_stop(dev->port_id);
1684
1685 old_mtu = dev->mtu;
1686 old_mp = dev->dpdk_mp;
1687 dev->dpdk_mp = mp;
1688 dev->mtu = mtu;
1689 dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
1690
1691 err = dpdk_eth_dev_init(dev);
1692 if (err) {
1693 dpdk_mp_put(mp);
1694 dev->mtu = old_mtu;
1695 dev->dpdk_mp = old_mp;
1696 dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
1697 dpdk_eth_dev_init(dev);
1698 goto out;
1699 }
1700
1701 dpdk_mp_put(old_mp);
1702 netdev_change_seq_changed(netdev);
1703 out:
1704 ovs_mutex_unlock(&dev->mutex);
1705 ovs_mutex_unlock(&dpdk_mutex);
1706 return err;
1707 }
1708
1709 static int
1710 netdev_dpdk_get_carrier(const struct netdev *netdev, bool *carrier);
1711
1712 static int
1713 netdev_dpdk_vhost_get_stats(const struct netdev *netdev,
1714 struct netdev_stats *stats)
1715 {
1716 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1717
1718 ovs_mutex_lock(&dev->mutex);
1719
1720 rte_spinlock_lock(&dev->stats_lock);
1721 /* Supported Stats */
1722 stats->rx_packets += dev->stats.rx_packets;
1723 stats->tx_packets += dev->stats.tx_packets;
1724 stats->tx_dropped += dev->stats.tx_dropped;
1725 stats->multicast = dev->stats.multicast;
1726 stats->rx_bytes = dev->stats.rx_bytes;
1727 stats->tx_bytes = dev->stats.tx_bytes;
1728 stats->rx_errors = dev->stats.rx_errors;
1729 stats->rx_length_errors = dev->stats.rx_length_errors;
1730
1731 stats->rx_1_to_64_packets = dev->stats.rx_1_to_64_packets;
1732 stats->rx_65_to_127_packets = dev->stats.rx_65_to_127_packets;
1733 stats->rx_128_to_255_packets = dev->stats.rx_128_to_255_packets;
1734 stats->rx_256_to_511_packets = dev->stats.rx_256_to_511_packets;
1735 stats->rx_512_to_1023_packets = dev->stats.rx_512_to_1023_packets;
1736 stats->rx_1024_to_1522_packets = dev->stats.rx_1024_to_1522_packets;
1737 stats->rx_1523_to_max_packets = dev->stats.rx_1523_to_max_packets;
1738
1739 rte_spinlock_unlock(&dev->stats_lock);
1740
1741 ovs_mutex_unlock(&dev->mutex);
1742
1743 return 0;
1744 }
1745
1746 static void
1747 netdev_dpdk_convert_xstats(struct netdev_stats *stats,
1748 const struct rte_eth_xstats *xstats,
1749 const unsigned int size)
1750 {
1751 /* XXX Current implementation is simple search through an array
1752 * to find hardcoded counter names. In future DPDK release (TBD)
1753 * XSTATS API will change so each counter will be represented by
1754 * unique ID instead of String. */
1755
1756 for (unsigned int i = 0; i < size; i++) {
1757 if (strcmp(XSTAT_RX_64_PACKETS, xstats[i].name) == 0) {
1758 stats->rx_1_to_64_packets = xstats[i].value;
1759 } else if (strcmp(XSTAT_RX_65_TO_127_PACKETS, xstats[i].name) == 0) {
1760 stats->rx_65_to_127_packets = xstats[i].value;
1761 } else if (strcmp(XSTAT_RX_128_TO_255_PACKETS, xstats[i].name) == 0) {
1762 stats->rx_128_to_255_packets = xstats[i].value;
1763 } else if (strcmp(XSTAT_RX_256_TO_511_PACKETS, xstats[i].name) == 0) {
1764 stats->rx_256_to_511_packets = xstats[i].value;
1765 } else if (strcmp(XSTAT_RX_512_TO_1023_PACKETS,
1766 xstats[i].name) == 0) {
1767 stats->rx_512_to_1023_packets = xstats[i].value;
1768 } else if (strcmp(XSTAT_RX_1024_TO_1522_PACKETS,
1769 xstats[i].name) == 0) {
1770 stats->rx_1024_to_1522_packets = xstats[i].value;
1771 } else if (strcmp(XSTAT_RX_1523_TO_MAX_PACKETS,
1772 xstats[i].name) == 0) {
1773 stats->rx_1523_to_max_packets = xstats[i].value;
1774 } else if (strcmp(XSTAT_TX_64_PACKETS, xstats[i].name) == 0) {
1775 stats->tx_1_to_64_packets = xstats[i].value;
1776 } else if (strcmp(XSTAT_TX_65_TO_127_PACKETS, xstats[i].name) == 0) {
1777 stats->tx_65_to_127_packets = xstats[i].value;
1778 } else if (strcmp(XSTAT_TX_128_TO_255_PACKETS, xstats[i].name) == 0) {
1779 stats->tx_128_to_255_packets = xstats[i].value;
1780 } else if (strcmp(XSTAT_TX_256_TO_511_PACKETS, xstats[i].name) == 0) {
1781 stats->tx_256_to_511_packets = xstats[i].value;
1782 } else if (strcmp(XSTAT_TX_512_TO_1023_PACKETS,
1783 xstats[i].name) == 0) {
1784 stats->tx_512_to_1023_packets = xstats[i].value;
1785 } else if (strcmp(XSTAT_TX_1024_TO_1522_PACKETS,
1786 xstats[i].name) == 0) {
1787 stats->tx_1024_to_1522_packets = xstats[i].value;
1788 } else if (strcmp(XSTAT_TX_1523_TO_MAX_PACKETS,
1789 xstats[i].name) == 0) {
1790 stats->tx_1523_to_max_packets = xstats[i].value;
1791 } else if (strcmp(XSTAT_TX_MULTICAST_PACKETS, xstats[i].name) == 0) {
1792 stats->tx_multicast_packets = xstats[i].value;
1793 } else if (strcmp(XSTAT_RX_BROADCAST_PACKETS, xstats[i].name) == 0) {
1794 stats->rx_broadcast_packets = xstats[i].value;
1795 } else if (strcmp(XSTAT_TX_BROADCAST_PACKETS, xstats[i].name) == 0) {
1796 stats->tx_broadcast_packets = xstats[i].value;
1797 } else if (strcmp(XSTAT_RX_UNDERSIZED_ERRORS, xstats[i].name) == 0) {
1798 stats->rx_undersized_errors = xstats[i].value;
1799 } else if (strcmp(XSTAT_RX_FRAGMENTED_ERRORS, xstats[i].name) == 0) {
1800 stats->rx_fragmented_errors = xstats[i].value;
1801 } else if (strcmp(XSTAT_RX_JABBER_ERRORS, xstats[i].name) == 0) {
1802 stats->rx_jabber_errors = xstats[i].value;
1803 }
1804 }
1805 }
1806
1807 static int
1808 netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1809 {
1810 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1811 struct rte_eth_stats rte_stats;
1812 bool gg;
1813
1814 netdev_dpdk_get_carrier(netdev, &gg);
1815 ovs_mutex_lock(&dev->mutex);
1816
1817 struct rte_eth_xstats *rte_xstats;
1818 int rte_xstats_len, rte_xstats_ret;
1819
1820 if (rte_eth_stats_get(dev->port_id, &rte_stats)) {
1821 VLOG_ERR("Can't get ETH statistics for port: %i.", dev->port_id);
1822 ovs_mutex_unlock(&dev->mutex);
1823 return EPROTO;
1824 }
1825
1826 rte_xstats_len = rte_eth_xstats_get(dev->port_id, NULL, 0);
1827 if (rte_xstats_len > 0) {
1828 rte_xstats = dpdk_rte_mzalloc(sizeof(*rte_xstats) * rte_xstats_len);
1829 memset(rte_xstats, 0xff, sizeof(*rte_xstats) * rte_xstats_len);
1830 rte_xstats_ret = rte_eth_xstats_get(dev->port_id, rte_xstats,
1831 rte_xstats_len);
1832 if (rte_xstats_ret > 0 && rte_xstats_ret <= rte_xstats_len) {
1833 netdev_dpdk_convert_xstats(stats, rte_xstats, rte_xstats_ret);
1834 }
1835 rte_free(rte_xstats);
1836 } else {
1837 VLOG_WARN("Can't get XSTATS counters for port: %i.", dev->port_id);
1838 }
1839
1840 stats->rx_packets = rte_stats.ipackets;
1841 stats->tx_packets = rte_stats.opackets;
1842 stats->rx_bytes = rte_stats.ibytes;
1843 stats->tx_bytes = rte_stats.obytes;
1844 /* DPDK counts imissed as errors, but count them here as dropped instead */
1845 stats->rx_errors = rte_stats.ierrors - rte_stats.imissed;
1846 stats->tx_errors = rte_stats.oerrors;
1847 stats->multicast = rte_stats.imcasts;
1848
1849 rte_spinlock_lock(&dev->stats_lock);
1850 stats->tx_dropped = dev->stats.tx_dropped;
1851 rte_spinlock_unlock(&dev->stats_lock);
1852
1853 /* These are the available DPDK counters for packets not received due to
1854 * local resource constraints in DPDK and NIC respectively. */
1855 stats->rx_dropped = rte_stats.rx_nombuf + rte_stats.imissed;
1856 stats->rx_missed_errors = rte_stats.imissed;
1857
1858 ovs_mutex_unlock(&dev->mutex);
1859
1860 return 0;
1861 }
1862
1863 static int
1864 netdev_dpdk_get_features(const struct netdev *netdev,
1865 enum netdev_features *current,
1866 enum netdev_features *advertised OVS_UNUSED,
1867 enum netdev_features *supported OVS_UNUSED,
1868 enum netdev_features *peer OVS_UNUSED)
1869 {
1870 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1871 struct rte_eth_link link;
1872
1873 ovs_mutex_lock(&dev->mutex);
1874 link = dev->link;
1875 ovs_mutex_unlock(&dev->mutex);
1876
1877 if (link.link_duplex == ETH_LINK_HALF_DUPLEX) {
1878 if (link.link_speed == ETH_SPEED_NUM_10M) {
1879 *current = NETDEV_F_10MB_HD;
1880 }
1881 if (link.link_speed == ETH_SPEED_NUM_100M) {
1882 *current = NETDEV_F_100MB_HD;
1883 }
1884 if (link.link_speed == ETH_SPEED_NUM_1G) {
1885 *current = NETDEV_F_1GB_HD;
1886 }
1887 } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) {
1888 if (link.link_speed == ETH_SPEED_NUM_10M) {
1889 *current = NETDEV_F_10MB_FD;
1890 }
1891 if (link.link_speed == ETH_SPEED_NUM_100M) {
1892 *current = NETDEV_F_100MB_FD;
1893 }
1894 if (link.link_speed == ETH_SPEED_NUM_1G) {
1895 *current = NETDEV_F_1GB_FD;
1896 }
1897 if (link.link_speed == ETH_SPEED_NUM_10G) {
1898 *current = NETDEV_F_10GB_FD;
1899 }
1900 }
1901
1902 if (link.link_autoneg) {
1903 *current |= NETDEV_F_AUTONEG;
1904 }
1905
1906 return 0;
1907 }
1908
1909 static int
1910 netdev_dpdk_get_ifindex(const struct netdev *netdev)
1911 {
1912 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1913 int ifindex;
1914
1915 ovs_mutex_lock(&dev->mutex);
1916 ifindex = dev->port_id;
1917 ovs_mutex_unlock(&dev->mutex);
1918
1919 return ifindex;
1920 }
1921
1922 static int
1923 netdev_dpdk_get_carrier(const struct netdev *netdev, bool *carrier)
1924 {
1925 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1926
1927 ovs_mutex_lock(&dev->mutex);
1928 check_link_status(dev);
1929 *carrier = dev->link.link_status;
1930
1931 ovs_mutex_unlock(&dev->mutex);
1932
1933 return 0;
1934 }
1935
1936 static int
1937 netdev_dpdk_vhost_get_carrier(const struct netdev *netdev, bool *carrier)
1938 {
1939 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1940 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
1941
1942 ovs_mutex_lock(&dev->mutex);
1943
1944 if (is_vhost_running(virtio_dev)) {
1945 *carrier = 1;
1946 } else {
1947 *carrier = 0;
1948 }
1949
1950 ovs_mutex_unlock(&dev->mutex);
1951
1952 return 0;
1953 }
1954
1955 static long long int
1956 netdev_dpdk_get_carrier_resets(const struct netdev *netdev)
1957 {
1958 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1959 long long int carrier_resets;
1960
1961 ovs_mutex_lock(&dev->mutex);
1962 carrier_resets = dev->link_reset_cnt;
1963 ovs_mutex_unlock(&dev->mutex);
1964
1965 return carrier_resets;
1966 }
1967
1968 static int
1969 netdev_dpdk_set_miimon(struct netdev *netdev OVS_UNUSED,
1970 long long int interval OVS_UNUSED)
1971 {
1972 return EOPNOTSUPP;
1973 }
1974
1975 static int
1976 netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
1977 enum netdev_flags off, enum netdev_flags on,
1978 enum netdev_flags *old_flagsp) OVS_REQUIRES(dev->mutex)
1979 {
1980 int err;
1981
1982 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1983 return EINVAL;
1984 }
1985
1986 *old_flagsp = dev->flags;
1987 dev->flags |= on;
1988 dev->flags &= ~off;
1989
1990 if (dev->flags == *old_flagsp) {
1991 return 0;
1992 }
1993
1994 if (dev->type == DPDK_DEV_ETH) {
1995 if (dev->flags & NETDEV_UP) {
1996 err = rte_eth_dev_start(dev->port_id);
1997 if (err)
1998 return -err;
1999 }
2000
2001 if (dev->flags & NETDEV_PROMISC) {
2002 rte_eth_promiscuous_enable(dev->port_id);
2003 }
2004
2005 if (!(dev->flags & NETDEV_UP)) {
2006 rte_eth_dev_stop(dev->port_id);
2007 }
2008 }
2009
2010 return 0;
2011 }
2012
2013 static int
2014 netdev_dpdk_update_flags(struct netdev *netdev,
2015 enum netdev_flags off, enum netdev_flags on,
2016 enum netdev_flags *old_flagsp)
2017 {
2018 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2019 int error;
2020
2021 ovs_mutex_lock(&dev->mutex);
2022 error = netdev_dpdk_update_flags__(dev, off, on, old_flagsp);
2023 ovs_mutex_unlock(&dev->mutex);
2024
2025 return error;
2026 }
2027
2028 static int
2029 netdev_dpdk_get_status(const struct netdev *netdev, struct smap *args)
2030 {
2031 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2032 struct rte_eth_dev_info dev_info;
2033
2034 if (dev->port_id < 0)
2035 return ENODEV;
2036
2037 ovs_mutex_lock(&dev->mutex);
2038 rte_eth_dev_info_get(dev->port_id, &dev_info);
2039 ovs_mutex_unlock(&dev->mutex);
2040
2041 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
2042
2043 smap_add_format(args, "port_no", "%d", dev->port_id);
2044 smap_add_format(args, "numa_id", "%d", rte_eth_dev_socket_id(dev->port_id));
2045 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
2046 smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
2047 smap_add_format(args, "max_rx_pktlen", "%u", dev->max_packet_len);
2048 smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
2049 smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
2050 smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);
2051 smap_add_format(args, "max_hash_mac_addrs", "%u", dev_info.max_hash_mac_addrs);
2052 smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
2053 smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
2054
2055 if (dev_info.pci_dev) {
2056 smap_add_format(args, "pci-vendor_id", "0x%u",
2057 dev_info.pci_dev->id.vendor_id);
2058 smap_add_format(args, "pci-device_id", "0x%x",
2059 dev_info.pci_dev->id.device_id);
2060 }
2061
2062 return 0;
2063 }
2064
2065 static void
2066 netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state)
2067 OVS_REQUIRES(dev->mutex)
2068 {
2069 enum netdev_flags old_flags;
2070
2071 if (admin_state) {
2072 netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags);
2073 } else {
2074 netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags);
2075 }
2076 }
2077
2078 static void
2079 netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc,
2080 const char *argv[], void *aux OVS_UNUSED)
2081 {
2082 bool up;
2083
2084 if (!strcasecmp(argv[argc - 1], "up")) {
2085 up = true;
2086 } else if ( !strcasecmp(argv[argc - 1], "down")) {
2087 up = false;
2088 } else {
2089 unixctl_command_reply_error(conn, "Invalid Admin State");
2090 return;
2091 }
2092
2093 if (argc > 2) {
2094 struct netdev *netdev = netdev_from_name(argv[1]);
2095 if (netdev && is_dpdk_class(netdev->netdev_class)) {
2096 struct netdev_dpdk *dpdk_dev = netdev_dpdk_cast(netdev);
2097
2098 ovs_mutex_lock(&dpdk_dev->mutex);
2099 netdev_dpdk_set_admin_state__(dpdk_dev, up);
2100 ovs_mutex_unlock(&dpdk_dev->mutex);
2101
2102 netdev_close(netdev);
2103 } else {
2104 unixctl_command_reply_error(conn, "Not a DPDK Interface");
2105 netdev_close(netdev);
2106 return;
2107 }
2108 } else {
2109 struct netdev_dpdk *netdev;
2110
2111 ovs_mutex_lock(&dpdk_mutex);
2112 LIST_FOR_EACH (netdev, list_node, &dpdk_list) {
2113 ovs_mutex_lock(&netdev->mutex);
2114 netdev_dpdk_set_admin_state__(netdev, up);
2115 ovs_mutex_unlock(&netdev->mutex);
2116 }
2117 ovs_mutex_unlock(&dpdk_mutex);
2118 }
2119 unixctl_command_reply(conn, "OK");
2120 }
2121
2122 /*
2123 * Set virtqueue flags so that we do not receive interrupts.
2124 */
2125 static void
2126 set_irq_status(struct virtio_net *virtio_dev)
2127 {
2128 uint32_t i;
2129 uint64_t idx;
2130
2131 for (i = 0; i < virtio_dev->virt_qp_nb; i++) {
2132 idx = i * VIRTIO_QNUM;
2133 rte_vhost_enable_guest_notification(virtio_dev, idx + VIRTIO_RXQ, 0);
2134 rte_vhost_enable_guest_notification(virtio_dev, idx + VIRTIO_TXQ, 0);
2135 }
2136 }
2137
2138 /*
2139 * Fixes mapping for vhost-user tx queues. Must be called after each
2140 * enabling/disabling of queues and real_n_txq modifications.
2141 */
2142 static void
2143 netdev_dpdk_remap_txqs(struct netdev_dpdk *dev)
2144 OVS_REQUIRES(dev->mutex)
2145 {
2146 int *enabled_queues, n_enabled = 0;
2147 int i, k, total_txqs = dev->real_n_txq;
2148
2149 enabled_queues = dpdk_rte_mzalloc(total_txqs * sizeof *enabled_queues);
2150
2151 for (i = 0; i < total_txqs; i++) {
2152 /* Enabled queues always mapped to themselves. */
2153 if (dev->tx_q[i].map == i) {
2154 enabled_queues[n_enabled++] = i;
2155 }
2156 }
2157
2158 if (n_enabled == 0 && total_txqs != 0) {
2159 enabled_queues[0] = OVS_VHOST_QUEUE_DISABLED;
2160 n_enabled = 1;
2161 }
2162
2163 k = 0;
2164 for (i = 0; i < total_txqs; i++) {
2165 if (dev->tx_q[i].map != i) {
2166 dev->tx_q[i].map = enabled_queues[k];
2167 k = (k + 1) % n_enabled;
2168 }
2169 }
2170
2171 VLOG_DBG("TX queue mapping for %s\n", dev->vhost_id);
2172 for (i = 0; i < total_txqs; i++) {
2173 VLOG_DBG("%2d --> %2d", i, dev->tx_q[i].map);
2174 }
2175
2176 rte_free(enabled_queues);
2177 }
2178
2179 static int
2180 netdev_dpdk_vhost_set_queues(struct netdev_dpdk *dev, struct virtio_net *virtio_dev)
2181 OVS_REQUIRES(dev->mutex)
2182 {
2183 uint32_t qp_num;
2184
2185 qp_num = virtio_dev->virt_qp_nb;
2186 if (qp_num > dev->up.n_rxq) {
2187 VLOG_ERR("vHost Device '%s' %"PRIu64" can't be added - "
2188 "too many queues %d > %d", virtio_dev->ifname, virtio_dev->device_fh,
2189 qp_num, dev->up.n_rxq);
2190 return -1;
2191 }
2192
2193 dev->real_n_rxq = qp_num;
2194 dev->real_n_txq = qp_num;
2195 dev->txq_needs_locking = true;
2196 /* Enable TX queue 0 by default if it wasn't disabled. */
2197 if (dev->tx_q[0].map == OVS_VHOST_QUEUE_MAP_UNKNOWN) {
2198 dev->tx_q[0].map = 0;
2199 }
2200
2201 netdev_dpdk_remap_txqs(dev);
2202
2203 return 0;
2204 }
2205
2206 /*
2207 * A new virtio-net device is added to a vhost port.
2208 */
2209 static int
2210 new_device(struct virtio_net *virtio_dev)
2211 {
2212 struct netdev_dpdk *dev;
2213 bool exists = false;
2214
2215 ovs_mutex_lock(&dpdk_mutex);
2216 /* Add device to the vhost port with the same name as that passed down. */
2217 LIST_FOR_EACH(dev, list_node, &dpdk_list) {
2218 if (strncmp(virtio_dev->ifname, dev->vhost_id, IF_NAME_SZ) == 0) {
2219 ovs_mutex_lock(&dev->mutex);
2220 if (netdev_dpdk_vhost_set_queues(dev, virtio_dev)) {
2221 ovs_mutex_unlock(&dev->mutex);
2222 ovs_mutex_unlock(&dpdk_mutex);
2223 return -1;
2224 }
2225 ovsrcu_set(&dev->virtio_dev, virtio_dev);
2226 exists = true;
2227 virtio_dev->flags |= VIRTIO_DEV_RUNNING;
2228 /* Disable notifications. */
2229 set_irq_status(virtio_dev);
2230 ovs_mutex_unlock(&dev->mutex);
2231 break;
2232 }
2233 }
2234 ovs_mutex_unlock(&dpdk_mutex);
2235
2236 if (!exists) {
2237 VLOG_INFO("vHost Device '%s' %"PRIu64" can't be added - name not "
2238 "found", virtio_dev->ifname, virtio_dev->device_fh);
2239
2240 return -1;
2241 }
2242
2243 VLOG_INFO("vHost Device '%s' %"PRIu64" has been added", virtio_dev->ifname,
2244 virtio_dev->device_fh);
2245 return 0;
2246 }
2247
2248 /* Clears mapping for all available queues of vhost interface. */
2249 static void
2250 netdev_dpdk_txq_map_clear(struct netdev_dpdk *dev)
2251 OVS_REQUIRES(dev->mutex)
2252 {
2253 int i;
2254
2255 for (i = 0; i < dev->real_n_txq; i++) {
2256 dev->tx_q[i].map = OVS_VHOST_QUEUE_MAP_UNKNOWN;
2257 }
2258 }
2259
2260 /*
2261 * Remove a virtio-net device from the specific vhost port. Use dev->remove
2262 * flag to stop any more packets from being sent or received to/from a VM and
2263 * ensure all currently queued packets have been sent/received before removing
2264 * the device.
2265 */
2266 static void
2267 destroy_device(volatile struct virtio_net *virtio_dev)
2268 {
2269 struct netdev_dpdk *dev;
2270 bool exists = false;
2271
2272 ovs_mutex_lock(&dpdk_mutex);
2273 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
2274 if (netdev_dpdk_get_virtio(dev) == virtio_dev) {
2275
2276 ovs_mutex_lock(&dev->mutex);
2277 virtio_dev->flags &= ~VIRTIO_DEV_RUNNING;
2278 ovsrcu_set(&dev->virtio_dev, NULL);
2279 netdev_dpdk_txq_map_clear(dev);
2280 exists = true;
2281 ovs_mutex_unlock(&dev->mutex);
2282 break;
2283 }
2284 }
2285
2286 ovs_mutex_unlock(&dpdk_mutex);
2287
2288 if (exists == true) {
2289 /*
2290 * Wait for other threads to quiesce after setting the 'virtio_dev'
2291 * to NULL, before returning.
2292 */
2293 ovsrcu_synchronize();
2294 /*
2295 * As call to ovsrcu_synchronize() will end the quiescent state,
2296 * put thread back into quiescent state before returning.
2297 */
2298 ovsrcu_quiesce_start();
2299 VLOG_INFO("vHost Device '%s' %"PRIu64" has been removed",
2300 virtio_dev->ifname, virtio_dev->device_fh);
2301 } else {
2302 VLOG_INFO("vHost Device '%s' %"PRIu64" not found", virtio_dev->ifname,
2303 virtio_dev->device_fh);
2304 }
2305 }
2306
2307 static int
2308 vring_state_changed(struct virtio_net *virtio_dev, uint16_t queue_id,
2309 int enable)
2310 {
2311 struct netdev_dpdk *dev;
2312 bool exists = false;
2313 int qid = queue_id / VIRTIO_QNUM;
2314
2315 if (queue_id % VIRTIO_QNUM == VIRTIO_TXQ) {
2316 return 0;
2317 }
2318
2319 ovs_mutex_lock(&dpdk_mutex);
2320 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
2321 if (strncmp(virtio_dev->ifname, dev->vhost_id, IF_NAME_SZ) == 0) {
2322 ovs_mutex_lock(&dev->mutex);
2323 if (enable) {
2324 dev->tx_q[qid].map = qid;
2325 } else {
2326 dev->tx_q[qid].map = OVS_VHOST_QUEUE_DISABLED;
2327 }
2328 netdev_dpdk_remap_txqs(dev);
2329 exists = true;
2330 ovs_mutex_unlock(&dev->mutex);
2331 break;
2332 }
2333 }
2334 ovs_mutex_unlock(&dpdk_mutex);
2335
2336 if (exists) {
2337 VLOG_INFO("State of queue %d ( tx_qid %d ) of vhost device '%s' %"
2338 PRIu64" changed to \'%s\'", queue_id, qid,
2339 virtio_dev->ifname, virtio_dev->device_fh,
2340 (enable == 1) ? "enabled" : "disabled");
2341 } else {
2342 VLOG_INFO("vHost Device '%s' %"PRIu64" not found", virtio_dev->ifname,
2343 virtio_dev->device_fh);
2344 return -1;
2345 }
2346
2347 return 0;
2348 }
2349
2350 struct virtio_net *
2351 netdev_dpdk_get_virtio(const struct netdev_dpdk *dev)
2352 {
2353 return ovsrcu_get(struct virtio_net *, &dev->virtio_dev);
2354 }
2355
2356 /*
2357 * These callbacks allow virtio-net devices to be added to vhost ports when
2358 * configuration has been fully complete.
2359 */
2360 static const struct virtio_net_device_ops virtio_net_device_ops =
2361 {
2362 .new_device = new_device,
2363 .destroy_device = destroy_device,
2364 .vring_state_changed = vring_state_changed
2365 };
2366
2367 static void *
2368 start_vhost_loop(void *dummy OVS_UNUSED)
2369 {
2370 pthread_detach(pthread_self());
2371 /* Put the cuse thread into quiescent state. */
2372 ovsrcu_quiesce_start();
2373 rte_vhost_driver_session_start();
2374 return NULL;
2375 }
2376
2377 static int
2378 dpdk_vhost_class_init(void)
2379 {
2380 rte_vhost_driver_callback_register(&virtio_net_device_ops);
2381 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4
2382 | 1ULL << VIRTIO_NET_F_HOST_TSO6
2383 | 1ULL << VIRTIO_NET_F_CSUM);
2384
2385 ovs_thread_create("vhost_thread", start_vhost_loop, NULL);
2386 return 0;
2387 }
2388
2389 static int
2390 dpdk_vhost_cuse_class_init(void)
2391 {
2392 return 0;
2393 }
2394
2395 static int
2396 dpdk_vhost_user_class_init(void)
2397 {
2398 return 0;
2399 }
2400
2401 static void
2402 dpdk_common_init(void)
2403 {
2404 unixctl_command_register("netdev-dpdk/set-admin-state",
2405 "[netdev] up|down", 1, 2,
2406 netdev_dpdk_set_admin_state, NULL);
2407
2408 }
2409
2410 /* Client Rings */
2411
2412 static int
2413 dpdk_ring_create(const char dev_name[], unsigned int port_no,
2414 unsigned int *eth_port_id)
2415 {
2416 struct dpdk_ring *ivshmem;
2417 char ring_name[RTE_RING_NAMESIZE];
2418 int err;
2419
2420 ivshmem = dpdk_rte_mzalloc(sizeof *ivshmem);
2421 if (ivshmem == NULL) {
2422 return ENOMEM;
2423 }
2424
2425 /* XXX: Add support for multiquque ring. */
2426 err = snprintf(ring_name, sizeof(ring_name), "%s_tx", dev_name);
2427 if (err < 0) {
2428 return -err;
2429 }
2430
2431 /* Create single producer tx ring, netdev does explicit locking. */
2432 ivshmem->cring_tx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
2433 RING_F_SP_ENQ);
2434 if (ivshmem->cring_tx == NULL) {
2435 rte_free(ivshmem);
2436 return ENOMEM;
2437 }
2438
2439 err = snprintf(ring_name, sizeof(ring_name), "%s_rx", dev_name);
2440 if (err < 0) {
2441 return -err;
2442 }
2443
2444 /* Create single consumer rx ring, netdev does explicit locking. */
2445 ivshmem->cring_rx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
2446 RING_F_SC_DEQ);
2447 if (ivshmem->cring_rx == NULL) {
2448 rte_free(ivshmem);
2449 return ENOMEM;
2450 }
2451
2452 err = rte_eth_from_rings(dev_name, &ivshmem->cring_rx, 1,
2453 &ivshmem->cring_tx, 1, SOCKET0);
2454
2455 if (err < 0) {
2456 rte_free(ivshmem);
2457 return ENODEV;
2458 }
2459
2460 ivshmem->user_port_id = port_no;
2461 ivshmem->eth_port_id = rte_eth_dev_count() - 1;
2462 ovs_list_push_back(&dpdk_ring_list, &ivshmem->list_node);
2463
2464 *eth_port_id = ivshmem->eth_port_id;
2465 return 0;
2466 }
2467
2468 static int
2469 dpdk_ring_open(const char dev_name[], unsigned int *eth_port_id) OVS_REQUIRES(dpdk_mutex)
2470 {
2471 struct dpdk_ring *ivshmem;
2472 unsigned int port_no;
2473 int err = 0;
2474
2475 /* Names always start with "dpdkr" */
2476 err = dpdk_dev_parse_name(dev_name, "dpdkr", &port_no);
2477 if (err) {
2478 return err;
2479 }
2480
2481 /* look through our list to find the device */
2482 LIST_FOR_EACH (ivshmem, list_node, &dpdk_ring_list) {
2483 if (ivshmem->user_port_id == port_no) {
2484 VLOG_INFO("Found dpdk ring device %s:", dev_name);
2485 *eth_port_id = ivshmem->eth_port_id; /* really all that is needed */
2486 return 0;
2487 }
2488 }
2489 /* Need to create the device rings */
2490 return dpdk_ring_create(dev_name, port_no, eth_port_id);
2491 }
2492
2493 static int
2494 netdev_dpdk_ring_send(struct netdev *netdev, int qid,
2495 struct dp_packet **pkts, int cnt, bool may_steal)
2496 {
2497 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2498 unsigned i;
2499
2500 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
2501 * rss hash field is clear. This is because the same mbuf may be modified by
2502 * the consumer of the ring and return into the datapath without recalculating
2503 * the RSS hash. */
2504 for (i = 0; i < cnt; i++) {
2505 dp_packet_rss_invalidate(pkts[i]);
2506 }
2507
2508 netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
2509 return 0;
2510 }
2511
2512 static int
2513 netdev_dpdk_ring_construct(struct netdev *netdev)
2514 {
2515 unsigned int port_no = 0;
2516 int err = 0;
2517
2518 if (rte_eal_init_ret) {
2519 return rte_eal_init_ret;
2520 }
2521
2522 ovs_mutex_lock(&dpdk_mutex);
2523
2524 err = dpdk_ring_open(netdev->name, &port_no);
2525 if (err) {
2526 goto unlock_dpdk;
2527 }
2528
2529 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
2530
2531 unlock_dpdk:
2532 ovs_mutex_unlock(&dpdk_mutex);
2533 return err;
2534 }
2535
2536 /* QoS Functions */
2537
2538 /*
2539 * Initialize QoS configuration operations.
2540 */
2541 static void
2542 qos_conf_init(struct qos_conf *conf, const struct dpdk_qos_ops *ops)
2543 {
2544 conf->ops = ops;
2545 }
2546
2547 /*
2548 * Search existing QoS operations in qos_ops and compare each set of
2549 * operations qos_name to name. Return a dpdk_qos_ops pointer to a match,
2550 * else return NULL
2551 */
2552 static const struct dpdk_qos_ops *
2553 qos_lookup_name(const char *name)
2554 {
2555 const struct dpdk_qos_ops *const *opsp;
2556
2557 for (opsp = qos_confs; *opsp != NULL; opsp++) {
2558 const struct dpdk_qos_ops *ops = *opsp;
2559 if (!strcmp(name, ops->qos_name)) {
2560 return ops;
2561 }
2562 }
2563 return NULL;
2564 }
2565
2566 /*
2567 * Call qos_destruct to clean up items associated with the netdevs
2568 * qos_conf. Set netdevs qos_conf to NULL.
2569 */
2570 static void
2571 qos_delete_conf(struct netdev *netdev)
2572 {
2573 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2574
2575 rte_spinlock_lock(&dev->qos_lock);
2576 if (dev->qos_conf) {
2577 if (dev->qos_conf->ops->qos_destruct) {
2578 dev->qos_conf->ops->qos_destruct(netdev, dev->qos_conf);
2579 }
2580 dev->qos_conf = NULL;
2581 }
2582 rte_spinlock_unlock(&dev->qos_lock);
2583 }
2584
2585 static int
2586 netdev_dpdk_get_qos_types(const struct netdev *netdev OVS_UNUSED,
2587 struct sset *types)
2588 {
2589 const struct dpdk_qos_ops *const *opsp;
2590
2591 for (opsp = qos_confs; *opsp != NULL; opsp++) {
2592 const struct dpdk_qos_ops *ops = *opsp;
2593 if (ops->qos_construct && ops->qos_name[0] != '\0') {
2594 sset_add(types, ops->qos_name);
2595 }
2596 }
2597 return 0;
2598 }
2599
2600 static int
2601 netdev_dpdk_get_qos(const struct netdev *netdev,
2602 const char **typep, struct smap *details)
2603 {
2604 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2605 int error = 0;
2606
2607 ovs_mutex_lock(&dev->mutex);
2608 if(dev->qos_conf) {
2609 *typep = dev->qos_conf->ops->qos_name;
2610 error = (dev->qos_conf->ops->qos_get
2611 ? dev->qos_conf->ops->qos_get(netdev, details): 0);
2612 }
2613 ovs_mutex_unlock(&dev->mutex);
2614
2615 return error;
2616 }
2617
2618 static int
2619 netdev_dpdk_set_qos(struct netdev *netdev,
2620 const char *type, const struct smap *details)
2621 {
2622 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2623 const struct dpdk_qos_ops *new_ops = NULL;
2624 int error = 0;
2625
2626 /* If type is empty or unsupported then the current QoS configuration
2627 * for the dpdk-netdev can be destroyed */
2628 new_ops = qos_lookup_name(type);
2629
2630 if (type[0] == '\0' || !new_ops || !new_ops->qos_construct) {
2631 qos_delete_conf(netdev);
2632 return EOPNOTSUPP;
2633 }
2634
2635 ovs_mutex_lock(&dev->mutex);
2636
2637 if (dev->qos_conf) {
2638 if (new_ops == dev->qos_conf->ops) {
2639 error = new_ops->qos_set ? new_ops->qos_set(netdev, details) : 0;
2640 } else {
2641 /* Delete existing QoS configuration. */
2642 qos_delete_conf(netdev);
2643 ovs_assert(dev->qos_conf == NULL);
2644
2645 /* Install new QoS configuration. */
2646 error = new_ops->qos_construct(netdev, details);
2647 ovs_assert((error == 0) == (dev->qos_conf != NULL));
2648 }
2649 } else {
2650 error = new_ops->qos_construct(netdev, details);
2651 ovs_assert((error == 0) == (dev->qos_conf != NULL));
2652 }
2653
2654 ovs_mutex_unlock(&dev->mutex);
2655 return error;
2656 }
2657
2658 /* egress-policer details */
2659
2660 struct egress_policer {
2661 struct qos_conf qos_conf;
2662 struct rte_meter_srtcm_params app_srtcm_params;
2663 struct rte_meter_srtcm egress_meter;
2664 };
2665
2666 static struct egress_policer *
2667 egress_policer_get__(const struct netdev *netdev)
2668 {
2669 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2670 return CONTAINER_OF(dev->qos_conf, struct egress_policer, qos_conf);
2671 }
2672
2673 static int
2674 egress_policer_qos_construct(struct netdev *netdev,
2675 const struct smap *details)
2676 {
2677 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2678 struct egress_policer *policer;
2679 const char *cir_s;
2680 const char *cbs_s;
2681 int err = 0;
2682
2683 rte_spinlock_lock(&dev->qos_lock);
2684 policer = xmalloc(sizeof *policer);
2685 qos_conf_init(&policer->qos_conf, &egress_policer_ops);
2686 dev->qos_conf = &policer->qos_conf;
2687 cir_s = smap_get(details, "cir");
2688 cbs_s = smap_get(details, "cbs");
2689 policer->app_srtcm_params.cir = cir_s ? strtoull(cir_s, NULL, 10) : 0;
2690 policer->app_srtcm_params.cbs = cbs_s ? strtoull(cbs_s, NULL, 10) : 0;
2691 policer->app_srtcm_params.ebs = 0;
2692 err = rte_meter_srtcm_config(&policer->egress_meter,
2693 &policer->app_srtcm_params);
2694 rte_spinlock_unlock(&dev->qos_lock);
2695
2696 return err;
2697 }
2698
2699 static void
2700 egress_policer_qos_destruct(struct netdev *netdev OVS_UNUSED,
2701 struct qos_conf *conf)
2702 {
2703 struct egress_policer *policer = CONTAINER_OF(conf, struct egress_policer,
2704 qos_conf);
2705 free(policer);
2706 }
2707
2708 static int
2709 egress_policer_qos_get(const struct netdev *netdev, struct smap *details)
2710 {
2711 struct egress_policer *policer = egress_policer_get__(netdev);
2712 smap_add_format(details, "cir", "%llu",
2713 1ULL * policer->app_srtcm_params.cir);
2714 smap_add_format(details, "cbs", "%llu",
2715 1ULL * policer->app_srtcm_params.cbs);
2716 return 0;
2717 }
2718
2719 static int
2720 egress_policer_qos_set(struct netdev *netdev, const struct smap *details)
2721 {
2722 struct egress_policer *policer;
2723 const char *cir_s;
2724 const char *cbs_s;
2725 int err = 0;
2726
2727 policer = egress_policer_get__(netdev);
2728 cir_s = smap_get(details, "cir");
2729 cbs_s = smap_get(details, "cbs");
2730 policer->app_srtcm_params.cir = cir_s ? strtoull(cir_s, NULL, 10) : 0;
2731 policer->app_srtcm_params.cbs = cbs_s ? strtoull(cbs_s, NULL, 10) : 0;
2732 policer->app_srtcm_params.ebs = 0;
2733 err = rte_meter_srtcm_config(&policer->egress_meter,
2734 &policer->app_srtcm_params);
2735
2736 return err;
2737 }
2738
2739 static inline bool
2740 egress_policer_pkt_handle__(struct rte_meter_srtcm *meter,
2741 struct rte_mbuf *pkt, uint64_t time)
2742 {
2743 uint32_t pkt_len = rte_pktmbuf_pkt_len(pkt) - sizeof(struct ether_hdr);
2744
2745 return rte_meter_srtcm_color_blind_check(meter, time, pkt_len) ==
2746 e_RTE_METER_GREEN;
2747 }
2748
2749 static int
2750 egress_policer_run(struct netdev *netdev, struct rte_mbuf **pkts,
2751 int pkt_cnt)
2752 {
2753 int i = 0;
2754 int cnt = 0;
2755 struct egress_policer *policer = egress_policer_get__(netdev);
2756 struct rte_mbuf *pkt = NULL;
2757 uint64_t current_time = rte_rdtsc();
2758
2759 for(i = 0; i < pkt_cnt; i++) {
2760 pkt = pkts[i];
2761 /* Handle current packet */
2762 if (egress_policer_pkt_handle__(&policer->egress_meter, pkt,
2763 current_time)) {
2764 if (cnt != i) {
2765 pkts[cnt] = pkt;
2766 }
2767 cnt++;
2768 } else {
2769 rte_pktmbuf_free(pkt);
2770 }
2771 }
2772
2773 return cnt;
2774 }
2775
2776 static const struct dpdk_qos_ops egress_policer_ops = {
2777 "egress-policer", /* qos_name */
2778 egress_policer_qos_construct,
2779 egress_policer_qos_destruct,
2780 egress_policer_qos_get,
2781 egress_policer_qos_set,
2782 egress_policer_run
2783 };
2784
2785 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, MULTIQ, SEND, \
2786 GET_CARRIER, GET_STATS, GET_FEATURES, GET_STATUS, RXQ_RECV) \
2787 { \
2788 NAME, \
2789 true, /* is_pmd */ \
2790 INIT, /* init */ \
2791 NULL, /* netdev_dpdk_run */ \
2792 NULL, /* netdev_dpdk_wait */ \
2793 \
2794 netdev_dpdk_alloc, \
2795 CONSTRUCT, \
2796 DESTRUCT, \
2797 netdev_dpdk_dealloc, \
2798 netdev_dpdk_get_config, \
2799 netdev_dpdk_set_config, \
2800 NULL, /* get_tunnel_config */ \
2801 NULL, /* build header */ \
2802 NULL, /* push header */ \
2803 NULL, /* pop header */ \
2804 netdev_dpdk_get_numa_id, /* get_numa_id */ \
2805 MULTIQ, /* set_multiq */ \
2806 \
2807 SEND, /* send */ \
2808 NULL, /* send_wait */ \
2809 \
2810 netdev_dpdk_set_etheraddr, \
2811 netdev_dpdk_get_etheraddr, \
2812 netdev_dpdk_get_mtu, \
2813 netdev_dpdk_set_mtu, \
2814 netdev_dpdk_get_ifindex, \
2815 GET_CARRIER, \
2816 netdev_dpdk_get_carrier_resets, \
2817 netdev_dpdk_set_miimon, \
2818 GET_STATS, \
2819 GET_FEATURES, \
2820 NULL, /* set_advertisements */ \
2821 \
2822 NULL, /* set_policing */ \
2823 netdev_dpdk_get_qos_types, \
2824 NULL, /* get_qos_capabilities */ \
2825 netdev_dpdk_get_qos, \
2826 netdev_dpdk_set_qos, \
2827 NULL, /* get_queue */ \
2828 NULL, /* set_queue */ \
2829 NULL, /* delete_queue */ \
2830 NULL, /* get_queue_stats */ \
2831 NULL, /* queue_dump_start */ \
2832 NULL, /* queue_dump_next */ \
2833 NULL, /* queue_dump_done */ \
2834 NULL, /* dump_queue_stats */ \
2835 \
2836 NULL, /* set_in4 */ \
2837 NULL, /* get_addr_list */ \
2838 NULL, /* add_router */ \
2839 NULL, /* get_next_hop */ \
2840 GET_STATUS, \
2841 NULL, /* arp_lookup */ \
2842 \
2843 netdev_dpdk_update_flags, \
2844 \
2845 netdev_dpdk_rxq_alloc, \
2846 netdev_dpdk_rxq_construct, \
2847 netdev_dpdk_rxq_destruct, \
2848 netdev_dpdk_rxq_dealloc, \
2849 RXQ_RECV, \
2850 NULL, /* rx_wait */ \
2851 NULL, /* rxq_drain */ \
2852 }
2853
2854 static int
2855 process_vhost_flags(char *flag, char *default_val, int size,
2856 const struct smap *ovs_other_config,
2857 char **new_val)
2858 {
2859 const char *val;
2860 int changed = 0;
2861
2862 val = smap_get(ovs_other_config, flag);
2863
2864 /* Depending on which version of vhost is in use, process the vhost-specific
2865 * flag if it is provided, otherwise resort to default value.
2866 */
2867 if (val && (strlen(val) <= size)) {
2868 changed = 1;
2869 *new_val = xstrdup(val);
2870 VLOG_INFO("User-provided %s in use: %s", flag, *new_val);
2871 } else {
2872 VLOG_INFO("No %s provided - defaulting to %s", flag, default_val);
2873 *new_val = default_val;
2874 }
2875
2876 return changed;
2877 }
2878
2879 static char **
2880 grow_argv(char ***argv, size_t cur_siz, size_t grow_by)
2881 {
2882 return xrealloc(*argv, sizeof(char *) * (cur_siz + grow_by));
2883 }
2884
2885 static void
2886 dpdk_option_extend(char ***argv, int argc, const char *option,
2887 const char *value)
2888 {
2889 char **newargv = grow_argv(argv, argc, 2);
2890 *argv = newargv;
2891 newargv[argc] = xstrdup(option);
2892 newargv[argc+1] = xstrdup(value);
2893 }
2894
2895 static char **
2896 move_argv(char ***argv, size_t cur_size, char **src_argv, size_t src_argc)
2897 {
2898 char **newargv = grow_argv(argv, cur_size, src_argc);
2899 while (src_argc--) {
2900 newargv[cur_size+src_argc] = src_argv[src_argc];
2901 src_argv[src_argc] = NULL;
2902 }
2903 return newargv;
2904 }
2905
2906 static int
2907 extra_dpdk_args(const char *ovs_extra_config, char ***argv, int argc)
2908 {
2909 int ret = argc;
2910 char *release_tok = xstrdup(ovs_extra_config);
2911 char *tok = release_tok, *endptr = NULL;
2912
2913 for (tok = strtok_r(release_tok, " ", &endptr); tok != NULL;
2914 tok = strtok_r(NULL, " ", &endptr)) {
2915 char **newarg = grow_argv(argv, ret, 1);
2916 *argv = newarg;
2917 newarg[ret++] = xstrdup(tok);
2918 }
2919 free(release_tok);
2920 return ret;
2921 }
2922
2923 static bool
2924 argv_contains(char **argv_haystack, const size_t argc_haystack,
2925 const char *needle)
2926 {
2927 for (size_t i = 0; i < argc_haystack; ++i) {
2928 if (!strcmp(argv_haystack[i], needle))
2929 return true;
2930 }
2931 return false;
2932 }
2933
2934 static int
2935 construct_dpdk_options(const struct smap *ovs_other_config,
2936 char ***argv, const int initial_size,
2937 char **extra_args, const size_t extra_argc)
2938 {
2939 struct dpdk_options_map {
2940 const char *ovs_configuration;
2941 const char *dpdk_option;
2942 bool default_enabled;
2943 const char *default_value;
2944 } opts[] = {
2945 {"dpdk-lcore-mask", "-c", false, NULL},
2946 {"dpdk-hugepage-dir", "--huge-dir", false, NULL},
2947 };
2948
2949 int i, ret = initial_size;
2950
2951 /*First, construct from the flat-options (non-mutex)*/
2952 for (i = 0; i < ARRAY_SIZE(opts); ++i) {
2953 const char *lookup = smap_get(ovs_other_config,
2954 opts[i].ovs_configuration);
2955 if (!lookup && opts[i].default_enabled) {
2956 lookup = opts[i].default_value;
2957 }
2958
2959 if (lookup) {
2960 if (!argv_contains(extra_args, extra_argc, opts[i].dpdk_option)) {
2961 dpdk_option_extend(argv, ret, opts[i].dpdk_option, lookup);
2962 ret += 2;
2963 } else {
2964 VLOG_WARN("Ignoring database defined option '%s' due to "
2965 "dpdk_extras config", opts[i].dpdk_option);
2966 }
2967 }
2968 }
2969
2970 return ret;
2971 }
2972
2973 #define MAX_DPDK_EXCL_OPTS 10
2974
2975 static int
2976 construct_dpdk_mutex_options(const struct smap *ovs_other_config,
2977 char ***argv, const int initial_size,
2978 char **extra_args, const size_t extra_argc)
2979 {
2980 struct dpdk_exclusive_options_map {
2981 const char *category;
2982 const char *ovs_dpdk_options[MAX_DPDK_EXCL_OPTS];
2983 const char *eal_dpdk_options[MAX_DPDK_EXCL_OPTS];
2984 const char *default_value;
2985 int default_option;
2986 } excl_opts[] = {
2987 {"memory type",
2988 {"dpdk-alloc-mem", "dpdk-socket-mem", NULL,},
2989 {"-m", "--socket-mem", NULL,},
2990 "1024,0", 1
2991 },
2992 };
2993
2994 int i, ret = initial_size;
2995 for (i = 0; i < ARRAY_SIZE(excl_opts); ++i) {
2996 int found_opts = 0, scan, found_pos = -1;
2997 const char *found_value;
2998 struct dpdk_exclusive_options_map *popt = &excl_opts[i];
2999
3000 for (scan = 0; scan < MAX_DPDK_EXCL_OPTS
3001 && popt->ovs_dpdk_options[scan]; ++scan) {
3002 const char *lookup = smap_get(ovs_other_config,
3003 popt->ovs_dpdk_options[scan]);
3004 if (lookup && strlen(lookup)) {
3005 found_opts++;
3006 found_pos = scan;
3007 found_value = lookup;
3008 }
3009 }
3010
3011 if (!found_opts) {
3012 if (popt->default_option) {
3013 found_pos = popt->default_option;
3014 found_value = popt->default_value;
3015 } else {
3016 continue;
3017 }
3018 }
3019
3020 if (found_opts > 1) {
3021 VLOG_ERR("Multiple defined options for %s. Please check your"
3022 " database settings and reconfigure if necessary.",
3023 popt->category);
3024 }
3025
3026 if (!argv_contains(extra_args, extra_argc,
3027 popt->eal_dpdk_options[found_pos])) {
3028 dpdk_option_extend(argv, ret, popt->eal_dpdk_options[found_pos],
3029 found_value);
3030 ret += 2;
3031 } else {
3032 VLOG_WARN("Ignoring database defined option '%s' due to "
3033 "dpdk_extras config", popt->eal_dpdk_options[found_pos]);
3034 }
3035 }
3036
3037 return ret;
3038 }
3039
3040 static int
3041 get_dpdk_args(const struct smap *ovs_other_config, char ***argv,
3042 int argc)
3043 {
3044 const char *extra_configuration;
3045 char **extra_args = NULL;
3046 int i;
3047 size_t extra_argc = 0;
3048
3049 extra_configuration = smap_get(ovs_other_config, "dpdk-extra");
3050 if (extra_configuration) {
3051 extra_argc = extra_dpdk_args(extra_configuration, &extra_args, 0);
3052 }
3053
3054 i = construct_dpdk_options(ovs_other_config, argv, argc, extra_args,
3055 extra_argc);
3056 i = construct_dpdk_mutex_options(ovs_other_config, argv, i, extra_args,
3057 extra_argc);
3058
3059 if (extra_configuration) {
3060 *argv = move_argv(argv, i, extra_args, extra_argc);
3061 }
3062
3063 return i + extra_argc;
3064 }
3065
3066 static char **dpdk_argv;
3067 static int dpdk_argc;
3068
3069 static void
3070 deferred_argv_release(void)
3071 {
3072 int result;
3073 for (result = 0; result < dpdk_argc; ++result) {
3074 free(dpdk_argv[result]);
3075 }
3076
3077 free(dpdk_argv);
3078 }
3079
3080 static void
3081 dpdk_init__(const struct smap *ovs_other_config)
3082 {
3083 char **argv = NULL;
3084 int result;
3085 int argc, argc_tmp;
3086 bool auto_determine = true;
3087 int err = 0;
3088 cpu_set_t cpuset;
3089 #ifndef VHOST_CUSE
3090 char *sock_dir_subcomponent;
3091 #endif
3092
3093 if (!smap_get_bool(ovs_other_config, "dpdk-init", false)) {
3094 VLOG_INFO("DPDK Disabled - to change this requires a restart.\n");
3095 return;
3096 }
3097
3098 VLOG_INFO("DPDK Enabled, initializing");
3099
3100 #ifdef VHOST_CUSE
3101 if (process_vhost_flags("cuse-dev-name", xstrdup("vhost-net"),
3102 PATH_MAX, ovs_other_config, &cuse_dev_name)) {
3103 #else
3104 if (process_vhost_flags("vhost-sock-dir", xstrdup(ovs_rundir()),
3105 NAME_MAX, ovs_other_config,
3106 &sock_dir_subcomponent)) {
3107 struct stat s;
3108 if (!strstr(sock_dir_subcomponent, "..")) {
3109 vhost_sock_dir = xasprintf("%s/%s", ovs_rundir(),
3110 sock_dir_subcomponent);
3111
3112 err = stat(vhost_sock_dir, &s);
3113 if (err) {
3114 VLOG_ERR("vhost-user sock directory '%s' does not exist.",
3115 vhost_sock_dir);
3116 }
3117 } else {
3118 vhost_sock_dir = xstrdup(ovs_rundir());
3119 VLOG_ERR("vhost-user sock directory request '%s/%s' has invalid"
3120 "characters '..' - using %s instead.",
3121 ovs_rundir(), sock_dir_subcomponent, ovs_rundir());
3122 }
3123 free(sock_dir_subcomponent);
3124 } else {
3125 vhost_sock_dir = sock_dir_subcomponent;
3126 #endif
3127 }
3128
3129 argv = grow_argv(&argv, 0, 1);
3130 argc = 1;
3131 argv[0] = xstrdup(ovs_get_program_name());
3132 argc_tmp = get_dpdk_args(ovs_other_config, &argv, argc);
3133
3134 while (argc_tmp != argc) {
3135 if (!strcmp("-c", argv[argc]) || !strcmp("-l", argv[argc])) {
3136 auto_determine = false;
3137 break;
3138 }
3139 argc++;
3140 }
3141 argc = argc_tmp;
3142
3143 /**
3144 * NOTE: This is an unsophisticated mechanism for determining the DPDK
3145 * lcore for the DPDK Master.
3146 */
3147 if (auto_determine) {
3148 int i;
3149 /* Get the main thread affinity */
3150 CPU_ZERO(&cpuset);
3151 err = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
3152 &cpuset);
3153 if (!err) {
3154 for (i = 0; i < CPU_SETSIZE; i++) {
3155 if (CPU_ISSET(i, &cpuset)) {
3156 argv = grow_argv(&argv, argc, 2);
3157 argv[argc++] = xstrdup("-c");
3158 argv[argc++] = xasprintf("0x%08llX", (1ULL<<i));
3159 i = CPU_SETSIZE;
3160 }
3161 }
3162 } else {
3163 VLOG_ERR("Thread getaffinity error %d. Using core 0x1", err);
3164 /* User did not set dpdk-lcore-mask and unable to get current
3165 * thread affintity - default to core 0x1 */
3166 argv = grow_argv(&argv, argc, 2);
3167 argv[argc++] = xstrdup("-c");
3168 argv[argc++] = xasprintf("0x%X", 1);
3169 }
3170 }
3171
3172 argv = grow_argv(&argv, argc, 1);
3173 argv[argc] = NULL;
3174
3175 optind = 1;
3176
3177 if (VLOG_IS_INFO_ENABLED()) {
3178 struct ds eal_args;
3179 int opt;
3180 ds_init(&eal_args);
3181 ds_put_cstr(&eal_args, "EAL ARGS:");
3182 for (opt = 0; opt < argc; ++opt) {
3183 ds_put_cstr(&eal_args, " ");
3184 ds_put_cstr(&eal_args, argv[opt]);
3185 }
3186 VLOG_INFO("%s", ds_cstr_ro(&eal_args));
3187 ds_destroy(&eal_args);
3188 }
3189
3190 /* Make sure things are initialized ... */
3191 result = rte_eal_init(argc, argv);
3192 if (result < 0) {
3193 ovs_abort(result, "Cannot init EAL");
3194 }
3195
3196 /* Set the main thread affinity back to pre rte_eal_init() value */
3197 if (auto_determine && !err) {
3198 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t),
3199 &cpuset);
3200 if (err) {
3201 VLOG_ERR("Thread setaffinity error %d", err);
3202 }
3203 }
3204
3205 dpdk_argv = argv;
3206 dpdk_argc = argc;
3207
3208 atexit(deferred_argv_release);
3209
3210 rte_memzone_dump(stdout);
3211 rte_eal_init_ret = 0;
3212
3213 /* We are called from the main thread here */
3214 RTE_PER_LCORE(_lcore_id) = NON_PMD_CORE_ID;
3215
3216 ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL);
3217
3218 #ifdef VHOST_CUSE
3219 /* Register CUSE device to handle IOCTLs.
3220 * Unless otherwise specified, cuse_dev_name is set to vhost-net.
3221 */
3222 err = rte_vhost_driver_register(cuse_dev_name);
3223
3224 if (err != 0) {
3225 VLOG_ERR("CUSE device setup failure.");
3226 return;
3227 }
3228 #endif
3229
3230 dpdk_vhost_class_init();
3231
3232 /* Finally, register the dpdk classes */
3233 netdev_dpdk_register();
3234 }
3235
3236 void
3237 dpdk_init(const struct smap *ovs_other_config)
3238 {
3239 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
3240
3241 if (ovs_other_config && ovsthread_once_start(&once)) {
3242 dpdk_init__(ovs_other_config);
3243 ovsthread_once_done(&once);
3244 }
3245 }
3246
3247 static const struct netdev_class dpdk_class =
3248 NETDEV_DPDK_CLASS(
3249 "dpdk",
3250 NULL,
3251 netdev_dpdk_construct,
3252 netdev_dpdk_destruct,
3253 netdev_dpdk_set_multiq,
3254 netdev_dpdk_eth_send,
3255 netdev_dpdk_get_carrier,
3256 netdev_dpdk_get_stats,
3257 netdev_dpdk_get_features,
3258 netdev_dpdk_get_status,
3259 netdev_dpdk_rxq_recv);
3260
3261 static const struct netdev_class dpdk_ring_class =
3262 NETDEV_DPDK_CLASS(
3263 "dpdkr",
3264 NULL,
3265 netdev_dpdk_ring_construct,
3266 netdev_dpdk_destruct,
3267 netdev_dpdk_set_multiq,
3268 netdev_dpdk_ring_send,
3269 netdev_dpdk_get_carrier,
3270 netdev_dpdk_get_stats,
3271 netdev_dpdk_get_features,
3272 netdev_dpdk_get_status,
3273 netdev_dpdk_rxq_recv);
3274
3275 static const struct netdev_class OVS_UNUSED dpdk_vhost_cuse_class =
3276 NETDEV_DPDK_CLASS(
3277 "dpdkvhostcuse",
3278 dpdk_vhost_cuse_class_init,
3279 netdev_dpdk_vhost_cuse_construct,
3280 netdev_dpdk_vhost_destruct,
3281 netdev_dpdk_vhost_cuse_set_multiq,
3282 netdev_dpdk_vhost_send,
3283 netdev_dpdk_vhost_get_carrier,
3284 netdev_dpdk_vhost_get_stats,
3285 NULL,
3286 NULL,
3287 netdev_dpdk_vhost_rxq_recv);
3288
3289 static const struct netdev_class OVS_UNUSED dpdk_vhost_user_class =
3290 NETDEV_DPDK_CLASS(
3291 "dpdkvhostuser",
3292 dpdk_vhost_user_class_init,
3293 netdev_dpdk_vhost_user_construct,
3294 netdev_dpdk_vhost_destruct,
3295 netdev_dpdk_vhost_set_multiq,
3296 netdev_dpdk_vhost_send,
3297 netdev_dpdk_vhost_get_carrier,
3298 netdev_dpdk_vhost_get_stats,
3299 NULL,
3300 NULL,
3301 netdev_dpdk_vhost_rxq_recv);
3302
3303 void
3304 netdev_dpdk_register(void)
3305 {
3306 dpdk_common_init();
3307 netdev_register_provider(&dpdk_class);
3308 netdev_register_provider(&dpdk_ring_class);
3309 #ifdef VHOST_CUSE
3310 netdev_register_provider(&dpdk_vhost_cuse_class);
3311 #else
3312 netdev_register_provider(&dpdk_vhost_user_class);
3313 #endif
3314 }
3315
3316 int
3317 pmd_thread_setaffinity_cpu(unsigned cpu)
3318 {
3319 cpu_set_t cpuset;
3320 int err;
3321
3322 CPU_ZERO(&cpuset);
3323 CPU_SET(cpu, &cpuset);
3324 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
3325 if (err) {
3326 VLOG_ERR("Thread affinity error %d",err);
3327 return err;
3328 }
3329 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
3330 ovs_assert(cpu != NON_PMD_CORE_ID);
3331 RTE_PER_LCORE(_lcore_id) = cpu;
3332
3333 return 0;
3334 }
3335
3336 static bool
3337 dpdk_thread_is_pmd(void)
3338 {
3339 return rte_lcore_id() != NON_PMD_CORE_ID;
3340 }