]> git.proxmox.com Git - mirror_ovs.git/blob - lib/netdev-dpdk.c
netdev-dpdk: add vhost-user get_status.
[mirror_ovs.git] / lib / netdev-dpdk.c
1 /*
2 * Copyright (c) 2014, 2015, 2016, 2017 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "netdev-dpdk.h"
19
20 #include <string.h>
21 #include <signal.h>
22 #include <stdlib.h>
23 #include <errno.h>
24 #include <unistd.h>
25 #include <linux/virtio_net.h>
26 #include <sys/socket.h>
27 #include <linux/if.h>
28
29 #include <rte_bus_pci.h>
30 #include <rte_config.h>
31 #include <rte_cycles.h>
32 #include <rte_errno.h>
33 #include <rte_eth_ring.h>
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_mbuf.h>
37 #include <rte_meter.h>
38 #include <rte_pci.h>
39 #include <rte_vhost.h>
40 #include <rte_version.h>
41
42 #include "dirs.h"
43 #include "dp-packet.h"
44 #include "dpdk.h"
45 #include "dpif-netdev.h"
46 #include "fatal-signal.h"
47 #include "netdev-provider.h"
48 #include "netdev-vport.h"
49 #include "odp-util.h"
50 #include "openvswitch/dynamic-string.h"
51 #include "openvswitch/list.h"
52 #include "openvswitch/ofp-print.h"
53 #include "openvswitch/vlog.h"
54 #include "ovs-numa.h"
55 #include "ovs-thread.h"
56 #include "ovs-rcu.h"
57 #include "packets.h"
58 #include "openvswitch/shash.h"
59 #include "smap.h"
60 #include "sset.h"
61 #include "unaligned.h"
62 #include "timeval.h"
63 #include "unixctl.h"
64
65 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
66
67 VLOG_DEFINE_THIS_MODULE(netdev_dpdk);
68 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
69
70 #define DPDK_PORT_WATCHDOG_INTERVAL 5
71
72 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
73 #define OVS_VPORT_DPDK "ovs_dpdk"
74
75 /*
76 * need to reserve tons of extra space in the mbufs so we can align the
77 * DMA addresses to 4KB.
78 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
79 * performance for standard Ethernet MTU.
80 */
81 #define ETHER_HDR_MAX_LEN (ETHER_HDR_LEN + ETHER_CRC_LEN \
82 + (2 * VLAN_HEADER_LEN))
83 #define MTU_TO_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
84 #define MTU_TO_MAX_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_MAX_LEN)
85 #define FRAME_LEN_TO_MTU(frame_len) ((frame_len) \
86 - ETHER_HDR_LEN - ETHER_CRC_LEN)
87 #define MBUF_SIZE(mtu) ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu) \
88 + sizeof(struct dp_packet) \
89 + RTE_PKTMBUF_HEADROOM), \
90 RTE_CACHE_LINE_SIZE)
91 #define NETDEV_DPDK_MBUF_ALIGN 1024
92 #define NETDEV_DPDK_MAX_PKT_LEN 9728
93
94 /* Min number of packets in the mempool. OVS tries to allocate a mempool with
95 * roughly estimated number of mbufs: if this fails (because the system doesn't
96 * have enough hugepages) we keep halving the number until the allocation
97 * succeeds or we reach MIN_NB_MBUF */
98 #define MIN_NB_MBUF (4096 * 4)
99 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
100
101 /*
102 * DPDK XSTATS Counter names definition
103 */
104 #define XSTAT_RX_64_PACKETS "rx_size_64_packets"
105 #define XSTAT_RX_65_TO_127_PACKETS "rx_size_65_to_127_packets"
106 #define XSTAT_RX_128_TO_255_PACKETS "rx_size_128_to_255_packets"
107 #define XSTAT_RX_256_TO_511_PACKETS "rx_size_256_to_511_packets"
108 #define XSTAT_RX_512_TO_1023_PACKETS "rx_size_512_to_1023_packets"
109 #define XSTAT_RX_1024_TO_1522_PACKETS "rx_size_1024_to_1522_packets"
110 #define XSTAT_RX_1523_TO_MAX_PACKETS "rx_size_1523_to_max_packets"
111
112 #define XSTAT_TX_64_PACKETS "tx_size_64_packets"
113 #define XSTAT_TX_65_TO_127_PACKETS "tx_size_65_to_127_packets"
114 #define XSTAT_TX_128_TO_255_PACKETS "tx_size_128_to_255_packets"
115 #define XSTAT_TX_256_TO_511_PACKETS "tx_size_256_to_511_packets"
116 #define XSTAT_TX_512_TO_1023_PACKETS "tx_size_512_to_1023_packets"
117 #define XSTAT_TX_1024_TO_1522_PACKETS "tx_size_1024_to_1522_packets"
118 #define XSTAT_TX_1523_TO_MAX_PACKETS "tx_size_1523_to_max_packets"
119
120 #define XSTAT_RX_MULTICAST_PACKETS "rx_multicast_packets"
121 #define XSTAT_TX_MULTICAST_PACKETS "tx_multicast_packets"
122 #define XSTAT_RX_BROADCAST_PACKETS "rx_broadcast_packets"
123 #define XSTAT_TX_BROADCAST_PACKETS "tx_broadcast_packets"
124 #define XSTAT_RX_UNDERSIZED_ERRORS "rx_undersized_errors"
125 #define XSTAT_RX_OVERSIZE_ERRORS "rx_oversize_errors"
126 #define XSTAT_RX_FRAGMENTED_ERRORS "rx_fragmented_errors"
127 #define XSTAT_RX_JABBER_ERRORS "rx_jabber_errors"
128
129 #define SOCKET0 0
130
131 /* Default size of Physical NIC RXQ */
132 #define NIC_PORT_DEFAULT_RXQ_SIZE 2048
133 /* Default size of Physical NIC TXQ */
134 #define NIC_PORT_DEFAULT_TXQ_SIZE 2048
135 /* Maximum size of Physical NIC Queues */
136 #define NIC_PORT_MAX_Q_SIZE 4096
137
138 #define OVS_VHOST_MAX_QUEUE_NUM 1024 /* Maximum number of vHost TX queues. */
139 #define OVS_VHOST_QUEUE_MAP_UNKNOWN (-1) /* Mapping not initialized. */
140 #define OVS_VHOST_QUEUE_DISABLED (-2) /* Queue was disabled by guest and not
141 * yet mapped to another queue. */
142
143 #define DPDK_ETH_PORT_ID_INVALID RTE_MAX_ETHPORTS
144
145 /* DPDK library uses uint16_t for port_id. */
146 typedef uint16_t dpdk_port_t;
147
148 #define VHOST_ENQ_RETRY_NUM 8
149 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
150
151 static const struct rte_eth_conf port_conf = {
152 .rxmode = {
153 .mq_mode = ETH_MQ_RX_RSS,
154 .split_hdr_size = 0,
155 .header_split = 0, /* Header Split disabled */
156 .hw_ip_checksum = 0, /* IP checksum offload disabled */
157 .hw_vlan_filter = 0, /* VLAN filtering disabled */
158 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
159 .hw_strip_crc = 0,
160 },
161 .rx_adv_conf = {
162 .rss_conf = {
163 .rss_key = NULL,
164 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP,
165 },
166 },
167 .txmode = {
168 .mq_mode = ETH_MQ_TX_NONE,
169 },
170 };
171
172 /*
173 * These callbacks allow virtio-net devices to be added to vhost ports when
174 * configuration has been fully completed.
175 */
176 static int new_device(int vid);
177 static void destroy_device(int vid);
178 static int vring_state_changed(int vid, uint16_t queue_id, int enable);
179 static const struct vhost_device_ops virtio_net_device_ops =
180 {
181 .new_device = new_device,
182 .destroy_device = destroy_device,
183 .vring_state_changed = vring_state_changed,
184 .features_changed = NULL
185 };
186
187 enum { DPDK_RING_SIZE = 256 };
188 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
189 enum { DRAIN_TSC = 200000ULL };
190
191 enum dpdk_dev_type {
192 DPDK_DEV_ETH = 0,
193 DPDK_DEV_VHOST = 1,
194 };
195
196 /* Quality of Service */
197
198 /* An instance of a QoS configuration. Always associated with a particular
199 * network device.
200 *
201 * Each QoS implementation subclasses this with whatever additional data it
202 * needs.
203 */
204 struct qos_conf {
205 const struct dpdk_qos_ops *ops;
206 rte_spinlock_t lock;
207 };
208
209 /* A particular implementation of dpdk QoS operations.
210 *
211 * The functions below return 0 if successful or a positive errno value on
212 * failure, except where otherwise noted. All of them must be provided, except
213 * where otherwise noted.
214 */
215 struct dpdk_qos_ops {
216
217 /* Name of the QoS type */
218 const char *qos_name;
219
220 /* Called to construct a qos_conf object. The implementation should make
221 * the appropriate calls to configure QoS according to 'details'.
222 *
223 * The contents of 'details' should be documented as valid for 'ovs_name'
224 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
225 * (which is built as ovs-vswitchd.conf.db(8)).
226 *
227 * This function must return 0 if and only if it sets '*conf' to an
228 * initialized 'struct qos_conf'.
229 *
230 * For all QoS implementations it should always be non-null.
231 */
232 int (*qos_construct)(const struct smap *details, struct qos_conf **conf);
233
234 /* Destroys the data structures allocated by the implementation as part of
235 * 'qos_conf'.
236 *
237 * For all QoS implementations it should always be non-null.
238 */
239 void (*qos_destruct)(struct qos_conf *conf);
240
241 /* Retrieves details of 'conf' configuration into 'details'.
242 *
243 * The contents of 'details' should be documented as valid for 'ovs_name'
244 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
245 * (which is built as ovs-vswitchd.conf.db(8)).
246 */
247 int (*qos_get)(const struct qos_conf *conf, struct smap *details);
248
249 /* Returns true if 'conf' is already configured according to 'details'.
250 *
251 * The contents of 'details' should be documented as valid for 'ovs_name'
252 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
253 * (which is built as ovs-vswitchd.conf.db(8)).
254 *
255 * For all QoS implementations it should always be non-null.
256 */
257 bool (*qos_is_equal)(const struct qos_conf *conf,
258 const struct smap *details);
259
260 /* Modify an array of rte_mbufs. The modification is specific to
261 * each qos implementation.
262 *
263 * The function should take and array of mbufs and an int representing
264 * the current number of mbufs present in the array.
265 *
266 * After the function has performed a qos modification to the array of
267 * mbufs it returns an int representing the number of mbufs now present in
268 * the array. This value is can then be passed to the port send function
269 * along with the modified array for transmission.
270 *
271 * For all QoS implementations it should always be non-null.
272 */
273 int (*qos_run)(struct qos_conf *qos_conf, struct rte_mbuf **pkts,
274 int pkt_cnt, bool may_steal);
275 };
276
277 /* dpdk_qos_ops for each type of user space QoS implementation */
278 static const struct dpdk_qos_ops egress_policer_ops;
279
280 /*
281 * Array of dpdk_qos_ops, contains pointer to all supported QoS
282 * operations.
283 */
284 static const struct dpdk_qos_ops *const qos_confs[] = {
285 &egress_policer_ops,
286 NULL
287 };
288
289 static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
290
291 /* Contains all 'struct dpdk_dev's. */
292 static struct ovs_list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
293 = OVS_LIST_INITIALIZER(&dpdk_list);
294
295 static struct ovs_mutex dpdk_mp_mutex OVS_ACQ_AFTER(dpdk_mutex)
296 = OVS_MUTEX_INITIALIZER;
297
298 /* There should be one 'struct dpdk_tx_queue' created for
299 * each cpu core. */
300 struct dpdk_tx_queue {
301 rte_spinlock_t tx_lock; /* Protects the members and the NIC queue
302 * from concurrent access. It is used only
303 * if the queue is shared among different
304 * pmd threads (see 'concurrent_txq'). */
305 int map; /* Mapping of configured vhost-user queues
306 * to enabled by guest. */
307 };
308
309 /* dpdk has no way to remove dpdk ring ethernet devices
310 so we have to keep them around once they've been created
311 */
312
313 static struct ovs_list dpdk_ring_list OVS_GUARDED_BY(dpdk_mutex)
314 = OVS_LIST_INITIALIZER(&dpdk_ring_list);
315
316 struct dpdk_ring {
317 /* For the client rings */
318 struct rte_ring *cring_tx;
319 struct rte_ring *cring_rx;
320 unsigned int user_port_id; /* User given port no, parsed from port name */
321 dpdk_port_t eth_port_id; /* ethernet device port id */
322 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
323 };
324
325 struct ingress_policer {
326 struct rte_meter_srtcm_params app_srtcm_params;
327 struct rte_meter_srtcm in_policer;
328 rte_spinlock_t policer_lock;
329 };
330
331 enum dpdk_hw_ol_features {
332 NETDEV_RX_CHECKSUM_OFFLOAD = 1 << 0,
333 };
334
335 /*
336 * In order to avoid confusion in variables names, following naming convention
337 * should be used, if possible:
338 *
339 * 'struct netdev' : 'netdev'
340 * 'struct netdev_dpdk' : 'dev'
341 * 'struct netdev_rxq' : 'rxq'
342 * 'struct netdev_rxq_dpdk' : 'rx'
343 *
344 * Example:
345 * struct netdev *netdev = netdev_from_name(name);
346 * struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
347 *
348 * Also, 'netdev' should be used instead of 'dev->up', where 'netdev' was
349 * already defined.
350 */
351
352 struct netdev_dpdk {
353 PADDED_MEMBERS_CACHELINE_MARKER(CACHE_LINE_SIZE, cacheline0,
354 dpdk_port_t port_id;
355
356 /* If true, device was attached by rte_eth_dev_attach(). */
357 bool attached;
358 struct eth_addr hwaddr;
359 int mtu;
360 int socket_id;
361 int buf_size;
362 int max_packet_len;
363 enum dpdk_dev_type type;
364 enum netdev_flags flags;
365 char *devargs; /* Device arguments for dpdk ports */
366 struct dpdk_tx_queue *tx_q;
367 struct rte_eth_link link;
368 int link_reset_cnt;
369 /* 4 pad bytes here. */
370 );
371
372 PADDED_MEMBERS_CACHELINE_MARKER(CACHE_LINE_SIZE, cacheline1,
373 struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
374 struct rte_mempool *mp;
375
376 /* virtio identifier for vhost devices */
377 ovsrcu_index vid;
378
379 /* True if vHost device is 'up' and has been reconfigured at least once */
380 bool vhost_reconfigured;
381 /* 3 pad bytes here. */
382 );
383
384 PADDED_MEMBERS(CACHE_LINE_SIZE,
385 /* Identifier used to distinguish vhost devices from each other. */
386 char vhost_id[PATH_MAX];
387 );
388
389 PADDED_MEMBERS(CACHE_LINE_SIZE,
390 struct netdev up;
391 /* In dpdk_list. */
392 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
393
394 /* QoS configuration and lock for the device */
395 OVSRCU_TYPE(struct qos_conf *) qos_conf;
396
397 /* Ingress Policer */
398 OVSRCU_TYPE(struct ingress_policer *) ingress_policer;
399 uint32_t policer_rate;
400 uint32_t policer_burst;
401 );
402
403 PADDED_MEMBERS(CACHE_LINE_SIZE,
404 struct netdev_stats stats;
405 /* Protects stats */
406 rte_spinlock_t stats_lock;
407 /* 44 pad bytes here. */
408 );
409
410 PADDED_MEMBERS(CACHE_LINE_SIZE,
411 /* The following properties cannot be changed when a device is running,
412 * so we remember the request and update them next time
413 * netdev_dpdk*_reconfigure() is called */
414 int requested_mtu;
415 int requested_n_txq;
416 int requested_n_rxq;
417 int requested_rxq_size;
418 int requested_txq_size;
419
420 /* Number of rx/tx descriptors for physical devices */
421 int rxq_size;
422 int txq_size;
423
424 /* Socket ID detected when vHost device is brought up */
425 int requested_socket_id;
426
427 /* Denotes whether vHost port is client/server mode */
428 uint64_t vhost_driver_flags;
429
430 /* DPDK-ETH Flow control */
431 struct rte_eth_fc_conf fc_conf;
432
433 /* DPDK-ETH hardware offload features,
434 * from the enum set 'dpdk_hw_ol_features' */
435 uint32_t hw_ol_features;
436 );
437
438 PADDED_MEMBERS(CACHE_LINE_SIZE,
439 /* Names of all XSTATS counters */
440 struct rte_eth_xstat_name *rte_xstats_names;
441 int rte_xstats_names_size;
442 int rte_xstats_ids_size;
443 uint64_t *rte_xstats_ids;
444 );
445 };
446
447 struct netdev_rxq_dpdk {
448 struct netdev_rxq up;
449 dpdk_port_t port_id;
450 };
451
452 static void netdev_dpdk_destruct(struct netdev *netdev);
453 static void netdev_dpdk_vhost_destruct(struct netdev *netdev);
454
455 int netdev_dpdk_get_vid(const struct netdev_dpdk *dev);
456
457 struct ingress_policer *
458 netdev_dpdk_get_ingress_policer(const struct netdev_dpdk *dev);
459
460 static bool
461 is_dpdk_class(const struct netdev_class *class)
462 {
463 return class->destruct == netdev_dpdk_destruct
464 || class->destruct == netdev_dpdk_vhost_destruct;
465 }
466
467 /* DPDK NIC drivers allocate RX buffers at a particular granularity, typically
468 * aligned at 1k or less. If a declared mbuf size is not a multiple of this
469 * value, insufficient buffers are allocated to accomodate the packet in its
470 * entirety. Furthermore, certain drivers need to ensure that there is also
471 * sufficient space in the Rx buffer to accommodate two VLAN tags (for QinQ
472 * frames). If the RX buffer is too small, then the driver enables scatter RX
473 * behaviour, which reduces performance. To prevent this, use a buffer size
474 * that is closest to 'mtu', but which satisfies the aforementioned criteria.
475 */
476 static uint32_t
477 dpdk_buf_size(int mtu)
478 {
479 return ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu) + RTE_PKTMBUF_HEADROOM),
480 NETDEV_DPDK_MBUF_ALIGN);
481 }
482
483 /* Allocates an area of 'sz' bytes from DPDK. The memory is zero'ed.
484 *
485 * Unlike xmalloc(), this function can return NULL on failure. */
486 static void *
487 dpdk_rte_mzalloc(size_t sz)
488 {
489 return rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
490 }
491
492 void
493 free_dpdk_buf(struct dp_packet *p)
494 {
495 struct rte_mbuf *pkt = (struct rte_mbuf *) p;
496
497 rte_pktmbuf_free(pkt);
498 }
499
500 static void
501 ovs_rte_pktmbuf_init(struct rte_mempool *mp OVS_UNUSED,
502 void *opaque_arg OVS_UNUSED,
503 void *_p,
504 unsigned i OVS_UNUSED)
505 {
506 struct rte_mbuf *pkt = _p;
507
508 dp_packet_init_dpdk((struct dp_packet *) pkt, pkt->buf_len);
509 }
510
511 /* Returns a valid pointer when either of the following is true:
512 * - a new mempool was just created;
513 * - a matching mempool already exists. */
514 static struct rte_mempool *
515 dpdk_mp_create(struct netdev_dpdk *dev, int mtu)
516 {
517 char mp_name[RTE_MEMPOOL_NAMESIZE];
518 const char *netdev_name = netdev_get_name(&dev->up);
519 int socket_id = dev->requested_socket_id;
520 uint32_t n_mbufs;
521 uint32_t hash = hash_string(netdev_name, 0);
522 struct rte_mempool *mp = NULL;
523
524 /*
525 * XXX: rough estimation of number of mbufs required for this port:
526 * <packets required to fill the device rxqs>
527 * + <packets that could be stuck on other ports txqs>
528 * + <packets in the pmd threads>
529 * + <additional memory for corner cases>
530 */
531 n_mbufs = dev->requested_n_rxq * dev->requested_rxq_size
532 + dev->requested_n_txq * dev->requested_txq_size
533 + MIN(RTE_MAX_LCORE, dev->requested_n_rxq) * NETDEV_MAX_BURST
534 + MIN_NB_MBUF;
535
536 ovs_mutex_lock(&dpdk_mp_mutex);
537 do {
538 /* Full DPDK memory pool name must be unique and cannot be
539 * longer than RTE_MEMPOOL_NAMESIZE. */
540 int ret = snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
541 "ovs%08x%02d%05d%07u",
542 hash, socket_id, mtu, n_mbufs);
543 if (ret < 0 || ret >= RTE_MEMPOOL_NAMESIZE) {
544 VLOG_DBG("snprintf returned %d. "
545 "Failed to generate a mempool name for \"%s\". "
546 "Hash:0x%x, socket_id: %d, mtu:%d, mbufs:%u.",
547 ret, netdev_name, hash, socket_id, mtu, n_mbufs);
548 break;
549 }
550
551 VLOG_DBG("Port %s: Requesting a mempool of %u mbufs "
552 "on socket %d for %d Rx and %d Tx queues.",
553 netdev_name, n_mbufs, socket_id,
554 dev->requested_n_rxq, dev->requested_n_txq);
555
556 mp = rte_pktmbuf_pool_create(mp_name, n_mbufs, MP_CACHE_SZ,
557 sizeof (struct dp_packet) - sizeof (struct rte_mbuf),
558 MBUF_SIZE(mtu) - sizeof(struct dp_packet), socket_id);
559
560 if (mp) {
561 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs",
562 mp_name, n_mbufs);
563 /* rte_pktmbuf_pool_create has done some initialization of the
564 * rte_mbuf part of each dp_packet. Some OvS specific fields
565 * of the packet still need to be initialized by
566 * ovs_rte_pktmbuf_init. */
567 rte_mempool_obj_iter(mp, ovs_rte_pktmbuf_init, NULL);
568 } else if (rte_errno == EEXIST) {
569 /* A mempool with the same name already exists. We just
570 * retrieve its pointer to be returned to the caller. */
571 mp = rte_mempool_lookup(mp_name);
572 /* As the mempool create returned EEXIST we can expect the
573 * lookup has returned a valid pointer. If for some reason
574 * that's not the case we keep track of it. */
575 VLOG_DBG("A mempool with name \"%s\" already exists at %p.",
576 mp_name, mp);
577 } else {
578 VLOG_ERR("Failed mempool \"%s\" create request of %u mbufs",
579 mp_name, n_mbufs);
580 }
581 } while (!mp && rte_errno == ENOMEM && (n_mbufs /= 2) >= MIN_NB_MBUF);
582
583 ovs_mutex_unlock(&dpdk_mp_mutex);
584 return mp;
585 }
586
587 /* Release an existing mempool. */
588 static void
589 dpdk_mp_free(struct rte_mempool *mp)
590 {
591 if (!mp) {
592 return;
593 }
594
595 ovs_mutex_lock(&dpdk_mp_mutex);
596 VLOG_DBG("Releasing \"%s\" mempool", mp->name);
597 rte_mempool_free(mp);
598 ovs_mutex_unlock(&dpdk_mp_mutex);
599 }
600
601 /* Tries to allocate a new mempool - or re-use an existing one where
602 * appropriate - on requested_socket_id with a size determined by
603 * requested_mtu and requested Rx/Tx queues.
604 * On success - or when re-using an existing mempool - the new configuration
605 * will be applied.
606 * On error, device will be left unchanged. */
607 static int
608 netdev_dpdk_mempool_configure(struct netdev_dpdk *dev)
609 OVS_REQUIRES(dev->mutex)
610 {
611 uint32_t buf_size = dpdk_buf_size(dev->requested_mtu);
612 struct rte_mempool *mp;
613 int ret = 0;
614
615 mp = dpdk_mp_create(dev, FRAME_LEN_TO_MTU(buf_size));
616 if (!mp) {
617 VLOG_ERR("Failed to create memory pool for netdev "
618 "%s, with MTU %d on socket %d: %s\n",
619 dev->up.name, dev->requested_mtu, dev->requested_socket_id,
620 rte_strerror(rte_errno));
621 ret = rte_errno;
622 } else {
623 /* If a new MTU was requested and its rounded value equals the one
624 * that is currently used, then the existing mempool is returned. */
625 if (dev->mp != mp) {
626 /* A new mempool was created, release the previous one. */
627 dpdk_mp_free(dev->mp);
628 } else {
629 ret = EEXIST;
630 }
631 dev->mp = mp;
632 dev->mtu = dev->requested_mtu;
633 dev->socket_id = dev->requested_socket_id;
634 dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
635 }
636
637 return ret;
638 }
639
640 static void
641 check_link_status(struct netdev_dpdk *dev)
642 {
643 struct rte_eth_link link;
644
645 rte_eth_link_get_nowait(dev->port_id, &link);
646
647 if (dev->link.link_status != link.link_status) {
648 netdev_change_seq_changed(&dev->up);
649
650 dev->link_reset_cnt++;
651 dev->link = link;
652 if (dev->link.link_status) {
653 VLOG_DBG_RL(&rl, "Port %"PRIu8" Link Up - speed %u Mbps - %s",
654 dev->port_id, (unsigned) dev->link.link_speed,
655 (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
656 ("full-duplex") : ("half-duplex"));
657 } else {
658 VLOG_DBG_RL(&rl, "Port %"PRIu8" Link Down", dev->port_id);
659 }
660 }
661 }
662
663 static void *
664 dpdk_watchdog(void *dummy OVS_UNUSED)
665 {
666 struct netdev_dpdk *dev;
667
668 pthread_detach(pthread_self());
669
670 for (;;) {
671 ovs_mutex_lock(&dpdk_mutex);
672 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
673 ovs_mutex_lock(&dev->mutex);
674 if (dev->type == DPDK_DEV_ETH) {
675 check_link_status(dev);
676 }
677 ovs_mutex_unlock(&dev->mutex);
678 }
679 ovs_mutex_unlock(&dpdk_mutex);
680 xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
681 }
682
683 return NULL;
684 }
685
686 static int
687 dpdk_eth_dev_queue_setup(struct netdev_dpdk *dev, int n_rxq, int n_txq)
688 {
689 int diag = 0;
690 int i;
691 struct rte_eth_conf conf = port_conf;
692
693 /* For some NICs (e.g. Niantic), scatter_rx mode needs to be explicitly
694 * enabled. */
695 if (dev->mtu > ETHER_MTU) {
696 conf.rxmode.enable_scatter = 1;
697 }
698
699 conf.rxmode.hw_ip_checksum = (dev->hw_ol_features &
700 NETDEV_RX_CHECKSUM_OFFLOAD) != 0;
701 /* A device may report more queues than it makes available (this has
702 * been observed for Intel xl710, which reserves some of them for
703 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
704 * available. When this happens we can retry the configuration
705 * and request less queues */
706 while (n_rxq && n_txq) {
707 if (diag) {
708 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq, n_txq);
709 }
710
711 diag = rte_eth_dev_configure(dev->port_id, n_rxq, n_txq, &conf);
712 if (diag) {
713 VLOG_WARN("Interface %s eth_dev setup error %s\n",
714 dev->up.name, rte_strerror(-diag));
715 break;
716 }
717
718 diag = rte_eth_dev_set_mtu(dev->port_id, dev->mtu);
719 if (diag) {
720 VLOG_ERR("Interface %s MTU (%d) setup error: %s",
721 dev->up.name, dev->mtu, rte_strerror(-diag));
722 break;
723 }
724
725 for (i = 0; i < n_txq; i++) {
726 diag = rte_eth_tx_queue_setup(dev->port_id, i, dev->txq_size,
727 dev->socket_id, NULL);
728 if (diag) {
729 VLOG_INFO("Interface %s txq(%d) setup error: %s",
730 dev->up.name, i, rte_strerror(-diag));
731 break;
732 }
733 }
734
735 if (i != n_txq) {
736 /* Retry with less tx queues */
737 n_txq = i;
738 continue;
739 }
740
741 for (i = 0; i < n_rxq; i++) {
742 diag = rte_eth_rx_queue_setup(dev->port_id, i, dev->rxq_size,
743 dev->socket_id, NULL, dev->mp);
744 if (diag) {
745 VLOG_INFO("Interface %s rxq(%d) setup error: %s",
746 dev->up.name, i, rte_strerror(-diag));
747 break;
748 }
749 }
750
751 if (i != n_rxq) {
752 /* Retry with less rx queues */
753 n_rxq = i;
754 continue;
755 }
756
757 dev->up.n_rxq = n_rxq;
758 dev->up.n_txq = n_txq;
759
760 return 0;
761 }
762
763 return diag;
764 }
765
766 static void
767 dpdk_eth_flow_ctrl_setup(struct netdev_dpdk *dev) OVS_REQUIRES(dev->mutex)
768 {
769 if (rte_eth_dev_flow_ctrl_set(dev->port_id, &dev->fc_conf)) {
770 VLOG_WARN("Failed to enable flow control on device %"PRIu8,
771 dev->port_id);
772 }
773 }
774
775 static int
776 dpdk_eth_dev_init(struct netdev_dpdk *dev)
777 OVS_REQUIRES(dev->mutex)
778 {
779 struct rte_pktmbuf_pool_private *mbp_priv;
780 struct rte_eth_dev_info info;
781 struct ether_addr eth_addr;
782 int diag;
783 int n_rxq, n_txq;
784 uint32_t rx_chksm_offload_capa = DEV_RX_OFFLOAD_UDP_CKSUM |
785 DEV_RX_OFFLOAD_TCP_CKSUM |
786 DEV_RX_OFFLOAD_IPV4_CKSUM;
787
788 rte_eth_dev_info_get(dev->port_id, &info);
789
790 if ((info.rx_offload_capa & rx_chksm_offload_capa) !=
791 rx_chksm_offload_capa) {
792 VLOG_WARN("Rx checksum offload is not supported on port %"PRIu8,
793 dev->port_id);
794 dev->hw_ol_features &= ~NETDEV_RX_CHECKSUM_OFFLOAD;
795 } else {
796 dev->hw_ol_features |= NETDEV_RX_CHECKSUM_OFFLOAD;
797 }
798
799 n_rxq = MIN(info.max_rx_queues, dev->up.n_rxq);
800 n_txq = MIN(info.max_tx_queues, dev->up.n_txq);
801
802 diag = dpdk_eth_dev_queue_setup(dev, n_rxq, n_txq);
803 if (diag) {
804 VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
805 dev->up.name, n_rxq, n_txq, rte_strerror(-diag));
806 return -diag;
807 }
808
809 diag = rte_eth_dev_start(dev->port_id);
810 if (diag) {
811 VLOG_ERR("Interface %s start error: %s", dev->up.name,
812 rte_strerror(-diag));
813 return -diag;
814 }
815
816 rte_eth_promiscuous_enable(dev->port_id);
817 rte_eth_allmulticast_enable(dev->port_id);
818
819 memset(&eth_addr, 0x0, sizeof(eth_addr));
820 rte_eth_macaddr_get(dev->port_id, &eth_addr);
821 VLOG_INFO_RL(&rl, "Port %"PRIu8": "ETH_ADDR_FMT,
822 dev->port_id, ETH_ADDR_BYTES_ARGS(eth_addr.addr_bytes));
823
824 memcpy(dev->hwaddr.ea, eth_addr.addr_bytes, ETH_ADDR_LEN);
825 rte_eth_link_get_nowait(dev->port_id, &dev->link);
826
827 mbp_priv = rte_mempool_get_priv(dev->mp);
828 dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
829
830 /* Get the Flow control configuration for DPDK-ETH */
831 diag = rte_eth_dev_flow_ctrl_get(dev->port_id, &dev->fc_conf);
832 if (diag) {
833 VLOG_DBG("cannot get flow control parameters on port=%"PRIu8", err=%d",
834 dev->port_id, diag);
835 }
836
837 return 0;
838 }
839
840 static struct netdev_dpdk *
841 netdev_dpdk_cast(const struct netdev *netdev)
842 {
843 return CONTAINER_OF(netdev, struct netdev_dpdk, up);
844 }
845
846 static struct netdev *
847 netdev_dpdk_alloc(void)
848 {
849 struct netdev_dpdk *dev;
850
851 dev = dpdk_rte_mzalloc(sizeof *dev);
852 if (dev) {
853 return &dev->up;
854 }
855
856 return NULL;
857 }
858
859 static struct dpdk_tx_queue *
860 netdev_dpdk_alloc_txq(unsigned int n_txqs)
861 {
862 struct dpdk_tx_queue *txqs;
863 unsigned i;
864
865 txqs = dpdk_rte_mzalloc(n_txqs * sizeof *txqs);
866 if (txqs) {
867 for (i = 0; i < n_txqs; i++) {
868 /* Initialize map for vhost devices. */
869 txqs[i].map = OVS_VHOST_QUEUE_MAP_UNKNOWN;
870 rte_spinlock_init(&txqs[i].tx_lock);
871 }
872 }
873
874 return txqs;
875 }
876
877 static int
878 common_construct(struct netdev *netdev, dpdk_port_t port_no,
879 enum dpdk_dev_type type, int socket_id)
880 OVS_REQUIRES(dpdk_mutex)
881 {
882 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
883
884 ovs_mutex_init(&dev->mutex);
885
886 rte_spinlock_init(&dev->stats_lock);
887
888 /* If the 'sid' is negative, it means that the kernel fails
889 * to obtain the pci numa info. In that situation, always
890 * use 'SOCKET0'. */
891 dev->socket_id = socket_id < 0 ? SOCKET0 : socket_id;
892 dev->requested_socket_id = dev->socket_id;
893 dev->port_id = port_no;
894 dev->type = type;
895 dev->flags = 0;
896 dev->requested_mtu = ETHER_MTU;
897 dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
898 ovsrcu_index_init(&dev->vid, -1);
899 dev->vhost_reconfigured = false;
900 dev->attached = false;
901
902 ovsrcu_init(&dev->qos_conf, NULL);
903
904 ovsrcu_init(&dev->ingress_policer, NULL);
905 dev->policer_rate = 0;
906 dev->policer_burst = 0;
907
908 netdev->n_rxq = 0;
909 netdev->n_txq = 0;
910 dev->requested_n_rxq = NR_QUEUE;
911 dev->requested_n_txq = NR_QUEUE;
912 dev->requested_rxq_size = NIC_PORT_DEFAULT_RXQ_SIZE;
913 dev->requested_txq_size = NIC_PORT_DEFAULT_TXQ_SIZE;
914
915 /* Initialize the flow control to NULL */
916 memset(&dev->fc_conf, 0, sizeof dev->fc_conf);
917
918 /* Initilize the hardware offload flags to 0 */
919 dev->hw_ol_features = 0;
920
921 dev->flags = NETDEV_UP | NETDEV_PROMISC;
922
923 ovs_list_push_back(&dpdk_list, &dev->list_node);
924
925 netdev_request_reconfigure(netdev);
926
927 dev->rte_xstats_names = NULL;
928 dev->rte_xstats_names_size = 0;
929
930 dev->rte_xstats_ids = NULL;
931 dev->rte_xstats_ids_size = 0;
932
933 return 0;
934 }
935
936 /* dev_name must be the prefix followed by a positive decimal number.
937 * (no leading + or - signs are allowed) */
938 static int
939 dpdk_dev_parse_name(const char dev_name[], const char prefix[],
940 unsigned int *port_no)
941 {
942 const char *cport;
943
944 if (strncmp(dev_name, prefix, strlen(prefix))) {
945 return ENODEV;
946 }
947
948 cport = dev_name + strlen(prefix);
949
950 if (str_to_uint(cport, 10, port_no)) {
951 return 0;
952 } else {
953 return ENODEV;
954 }
955 }
956
957 static int
958 vhost_common_construct(struct netdev *netdev)
959 OVS_REQUIRES(dpdk_mutex)
960 {
961 int socket_id = rte_lcore_to_socket_id(rte_get_master_lcore());
962 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
963
964 dev->tx_q = netdev_dpdk_alloc_txq(OVS_VHOST_MAX_QUEUE_NUM);
965 if (!dev->tx_q) {
966 return ENOMEM;
967 }
968
969 return common_construct(netdev, DPDK_ETH_PORT_ID_INVALID,
970 DPDK_DEV_VHOST, socket_id);
971 }
972
973 static int
974 netdev_dpdk_vhost_construct(struct netdev *netdev)
975 {
976 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
977 const char *name = netdev->name;
978 int err;
979
980 /* 'name' is appended to 'vhost_sock_dir' and used to create a socket in
981 * the file system. '/' or '\' would traverse directories, so they're not
982 * acceptable in 'name'. */
983 if (strchr(name, '/') || strchr(name, '\\')) {
984 VLOG_ERR("\"%s\" is not a valid name for a vhost-user port. "
985 "A valid name must not include '/' or '\\'",
986 name);
987 return EINVAL;
988 }
989
990 ovs_mutex_lock(&dpdk_mutex);
991 /* Take the name of the vhost-user port and append it to the location where
992 * the socket is to be created, then register the socket.
993 */
994 snprintf(dev->vhost_id, sizeof dev->vhost_id, "%s/%s",
995 dpdk_get_vhost_sock_dir(), name);
996
997 dev->vhost_driver_flags &= ~RTE_VHOST_USER_CLIENT;
998 err = rte_vhost_driver_register(dev->vhost_id, dev->vhost_driver_flags);
999 if (err) {
1000 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
1001 dev->vhost_id);
1002 goto out;
1003 } else {
1004 fatal_signal_add_file_to_unlink(dev->vhost_id);
1005 VLOG_INFO("Socket %s created for vhost-user port %s\n",
1006 dev->vhost_id, name);
1007 }
1008
1009 err = rte_vhost_driver_callback_register(dev->vhost_id,
1010 &virtio_net_device_ops);
1011 if (err) {
1012 VLOG_ERR("rte_vhost_driver_callback_register failed for vhost user "
1013 "port: %s\n", name);
1014 goto out;
1015 }
1016
1017 err = rte_vhost_driver_disable_features(dev->vhost_id,
1018 1ULL << VIRTIO_NET_F_HOST_TSO4
1019 | 1ULL << VIRTIO_NET_F_HOST_TSO6
1020 | 1ULL << VIRTIO_NET_F_CSUM);
1021 if (err) {
1022 VLOG_ERR("rte_vhost_driver_disable_features failed for vhost user "
1023 "port: %s\n", name);
1024 goto out;
1025 }
1026
1027 err = rte_vhost_driver_start(dev->vhost_id);
1028 if (err) {
1029 VLOG_ERR("rte_vhost_driver_start failed for vhost user "
1030 "port: %s\n", name);
1031 goto out;
1032 }
1033
1034 err = vhost_common_construct(netdev);
1035 if (err) {
1036 VLOG_ERR("vhost_common_construct failed for vhost user "
1037 "port: %s\n", name);
1038 }
1039
1040 out:
1041 ovs_mutex_unlock(&dpdk_mutex);
1042 VLOG_WARN_ONCE("dpdkvhostuser ports are considered deprecated; "
1043 "please migrate to dpdkvhostuserclient ports.");
1044 return err;
1045 }
1046
1047 static int
1048 netdev_dpdk_vhost_client_construct(struct netdev *netdev)
1049 {
1050 int err;
1051
1052 ovs_mutex_lock(&dpdk_mutex);
1053 err = vhost_common_construct(netdev);
1054 if (err) {
1055 VLOG_ERR("vhost_common_construct failed for vhost user client"
1056 "port: %s\n", netdev->name);
1057 }
1058 ovs_mutex_unlock(&dpdk_mutex);
1059 return err;
1060 }
1061
1062 static int
1063 netdev_dpdk_construct(struct netdev *netdev)
1064 {
1065 int err;
1066
1067 ovs_mutex_lock(&dpdk_mutex);
1068 err = common_construct(netdev, DPDK_ETH_PORT_ID_INVALID,
1069 DPDK_DEV_ETH, SOCKET0);
1070 ovs_mutex_unlock(&dpdk_mutex);
1071 return err;
1072 }
1073
1074 static void
1075 common_destruct(struct netdev_dpdk *dev)
1076 OVS_REQUIRES(dpdk_mutex)
1077 OVS_EXCLUDED(dev->mutex)
1078 {
1079 rte_free(dev->tx_q);
1080 dpdk_mp_free(dev->mp);
1081
1082 ovs_list_remove(&dev->list_node);
1083 free(ovsrcu_get_protected(struct ingress_policer *,
1084 &dev->ingress_policer));
1085 ovs_mutex_destroy(&dev->mutex);
1086 }
1087
1088 static void
1089 netdev_dpdk_destruct(struct netdev *netdev)
1090 {
1091 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1092 char devname[RTE_ETH_NAME_MAX_LEN];
1093
1094 ovs_mutex_lock(&dpdk_mutex);
1095
1096 rte_eth_dev_stop(dev->port_id);
1097
1098 if (dev->attached) {
1099 rte_eth_dev_close(dev->port_id);
1100 if (rte_eth_dev_detach(dev->port_id, devname) < 0) {
1101 VLOG_ERR("Device '%s' can not be detached", dev->devargs);
1102 } else {
1103 VLOG_INFO("Device '%s' has been detached", devname);
1104 }
1105 }
1106
1107 free(dev->devargs);
1108 common_destruct(dev);
1109
1110 ovs_mutex_unlock(&dpdk_mutex);
1111 }
1112
1113 /* rte_vhost_driver_unregister() can call back destroy_device(), which will
1114 * try to acquire 'dpdk_mutex' and possibly 'dev->mutex'. To avoid a
1115 * deadlock, none of the mutexes must be held while calling this function. */
1116 static int
1117 dpdk_vhost_driver_unregister(struct netdev_dpdk *dev OVS_UNUSED,
1118 char *vhost_id)
1119 OVS_EXCLUDED(dpdk_mutex)
1120 OVS_EXCLUDED(dev->mutex)
1121 {
1122 return rte_vhost_driver_unregister(vhost_id);
1123 }
1124
1125 static void
1126 netdev_dpdk_vhost_destruct(struct netdev *netdev)
1127 {
1128 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1129 char *vhost_id;
1130
1131 ovs_mutex_lock(&dpdk_mutex);
1132
1133 /* Guest becomes an orphan if still attached. */
1134 if (netdev_dpdk_get_vid(dev) >= 0
1135 && !(dev->vhost_driver_flags & RTE_VHOST_USER_CLIENT)) {
1136 VLOG_ERR("Removing port '%s' while vhost device still attached.",
1137 netdev->name);
1138 VLOG_ERR("To restore connectivity after re-adding of port, VM on "
1139 "socket '%s' must be restarted.", dev->vhost_id);
1140 }
1141
1142 vhost_id = xstrdup(dev->vhost_id);
1143
1144 common_destruct(dev);
1145
1146 ovs_mutex_unlock(&dpdk_mutex);
1147
1148 if (!vhost_id[0]) {
1149 goto out;
1150 }
1151
1152 if (dpdk_vhost_driver_unregister(dev, vhost_id)) {
1153 VLOG_ERR("%s: Unable to unregister vhost driver for socket '%s'.\n",
1154 netdev->name, vhost_id);
1155 } else if (!(dev->vhost_driver_flags & RTE_VHOST_USER_CLIENT)) {
1156 /* OVS server mode - remove this socket from list for deletion */
1157 fatal_signal_remove_file_to_unlink(vhost_id);
1158 }
1159 out:
1160 free(vhost_id);
1161 }
1162
1163 static void
1164 netdev_dpdk_dealloc(struct netdev *netdev)
1165 {
1166 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1167
1168 rte_free(dev);
1169 }
1170
1171 static void
1172 netdev_dpdk_clear_xstats(struct netdev_dpdk *dev) OVS_REQUIRES(dev->mutex)
1173 {
1174 /* If statistics are already allocated, we have to
1175 * reconfigure, as port_id could have been changed. */
1176 if (dev->rte_xstats_names) {
1177 free(dev->rte_xstats_names);
1178 dev->rte_xstats_names = NULL;
1179 dev->rte_xstats_names_size = 0;
1180 }
1181 if (dev->rte_xstats_ids) {
1182 free(dev->rte_xstats_ids);
1183 dev->rte_xstats_ids = NULL;
1184 dev->rte_xstats_ids_size = 0;
1185 }
1186 }
1187
1188 static const char*
1189 netdev_dpdk_get_xstat_name(struct netdev_dpdk *dev, uint64_t id)
1190 {
1191 if (id >= dev->rte_xstats_names_size) {
1192 return "UNKNOWN";
1193 }
1194 return dev->rte_xstats_names[id].name;
1195 }
1196
1197 static bool
1198 netdev_dpdk_configure_xstats(struct netdev_dpdk *dev)
1199 OVS_REQUIRES(dev->mutex)
1200 {
1201 int rte_xstats_len;
1202 bool ret;
1203 struct rte_eth_xstat *rte_xstats;
1204 uint64_t id;
1205 int xstats_no;
1206 const char *name;
1207
1208 /* Retrieving all XSTATS names. If something will go wrong
1209 * or amount of counters will be equal 0, rte_xstats_names
1210 * buffer will be marked as NULL, and any further xstats
1211 * query won't be performed (e.g. during netdev_dpdk_get_stats
1212 * execution). */
1213
1214 ret = false;
1215 rte_xstats = NULL;
1216
1217 if (dev->rte_xstats_names == NULL || dev->rte_xstats_ids == NULL) {
1218 dev->rte_xstats_names_size =
1219 rte_eth_xstats_get_names(dev->port_id, NULL, 0);
1220
1221 if (dev->rte_xstats_names_size < 0) {
1222 VLOG_WARN("Cannot get XSTATS for port: %"PRIu8, dev->port_id);
1223 dev->rte_xstats_names_size = 0;
1224 } else {
1225 /* Reserve memory for xstats names and values */
1226 dev->rte_xstats_names = xcalloc(dev->rte_xstats_names_size,
1227 sizeof *dev->rte_xstats_names);
1228
1229 if (dev->rte_xstats_names) {
1230 /* Retreive xstats names */
1231 rte_xstats_len =
1232 rte_eth_xstats_get_names(dev->port_id,
1233 dev->rte_xstats_names,
1234 dev->rte_xstats_names_size);
1235
1236 if (rte_xstats_len < 0) {
1237 VLOG_WARN("Cannot get XSTATS names for port: %"PRIu8,
1238 dev->port_id);
1239 goto out;
1240 } else if (rte_xstats_len != dev->rte_xstats_names_size) {
1241 VLOG_WARN("XSTATS size doesn't match for port: %"PRIu8,
1242 dev->port_id);
1243 goto out;
1244 }
1245
1246 dev->rte_xstats_ids = xcalloc(dev->rte_xstats_names_size,
1247 sizeof(uint64_t));
1248
1249 /* We have to calculate number of counters */
1250 rte_xstats = xmalloc(rte_xstats_len * sizeof *rte_xstats);
1251 memset(rte_xstats, 0xff, sizeof *rte_xstats * rte_xstats_len);
1252
1253 /* Retreive xstats values */
1254 if (rte_eth_xstats_get(dev->port_id, rte_xstats,
1255 rte_xstats_len) > 0) {
1256 dev->rte_xstats_ids_size = 0;
1257 xstats_no = 0;
1258 for (uint32_t i = 0; i < rte_xstats_len; i++) {
1259 id = rte_xstats[i].id;
1260 name = netdev_dpdk_get_xstat_name(dev, id);
1261 /* We need to filter out everything except
1262 * dropped, error and management counters */
1263 if (string_ends_with(name, "_errors") ||
1264 strstr(name, "_management_") ||
1265 string_ends_with(name, "_dropped")) {
1266
1267 dev->rte_xstats_ids[xstats_no] = id;
1268 xstats_no++;
1269 }
1270 }
1271 dev->rte_xstats_ids_size = xstats_no;
1272 ret = true;
1273 } else {
1274 VLOG_WARN("Can't get XSTATS IDs for port: %"PRIu8,
1275 dev->port_id);
1276 }
1277 }
1278 }
1279 } else {
1280 /* Already configured */
1281 ret = true;
1282 }
1283
1284 out:
1285 if (!ret) {
1286 netdev_dpdk_clear_xstats(dev);
1287 }
1288 return ret;
1289 }
1290
1291 static int
1292 netdev_dpdk_get_config(const struct netdev *netdev, struct smap *args)
1293 {
1294 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1295
1296 ovs_mutex_lock(&dev->mutex);
1297
1298 smap_add_format(args, "requested_rx_queues", "%d", dev->requested_n_rxq);
1299 smap_add_format(args, "configured_rx_queues", "%d", netdev->n_rxq);
1300 smap_add_format(args, "requested_tx_queues", "%d", dev->requested_n_txq);
1301 smap_add_format(args, "configured_tx_queues", "%d", netdev->n_txq);
1302 smap_add_format(args, "mtu", "%d", dev->mtu);
1303
1304 if (dev->type == DPDK_DEV_ETH) {
1305 smap_add_format(args, "requested_rxq_descriptors", "%d",
1306 dev->requested_rxq_size);
1307 smap_add_format(args, "configured_rxq_descriptors", "%d",
1308 dev->rxq_size);
1309 smap_add_format(args, "requested_txq_descriptors", "%d",
1310 dev->requested_txq_size);
1311 smap_add_format(args, "configured_txq_descriptors", "%d",
1312 dev->txq_size);
1313 if (dev->hw_ol_features & NETDEV_RX_CHECKSUM_OFFLOAD) {
1314 smap_add(args, "rx_csum_offload", "true");
1315 } else {
1316 smap_add(args, "rx_csum_offload", "false");
1317 }
1318 }
1319 ovs_mutex_unlock(&dev->mutex);
1320
1321 return 0;
1322 }
1323
1324 static struct netdev_dpdk *
1325 netdev_dpdk_lookup_by_port_id(dpdk_port_t port_id)
1326 OVS_REQUIRES(dpdk_mutex)
1327 {
1328 struct netdev_dpdk *dev;
1329
1330 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
1331 if (dev->port_id == port_id) {
1332 return dev;
1333 }
1334 }
1335
1336 return NULL;
1337 }
1338
1339 static dpdk_port_t
1340 netdev_dpdk_process_devargs(struct netdev_dpdk *dev,
1341 const char *devargs, char **errp)
1342 {
1343 /* Get the name up to the first comma. */
1344 char *name = xmemdup0(devargs, strcspn(devargs, ","));
1345 dpdk_port_t new_port_id = DPDK_ETH_PORT_ID_INVALID;
1346
1347 if (rte_eth_dev_get_port_by_name(name, &new_port_id)
1348 || !rte_eth_dev_is_valid_port(new_port_id)) {
1349 /* Device not found in DPDK, attempt to attach it */
1350 if (!rte_eth_dev_attach(devargs, &new_port_id)) {
1351 /* Attach successful */
1352 dev->attached = true;
1353 VLOG_INFO("Device '%s' attached to DPDK", devargs);
1354 } else {
1355 /* Attach unsuccessful */
1356 new_port_id = DPDK_ETH_PORT_ID_INVALID;
1357 VLOG_WARN_BUF(errp, "Error attaching device '%s' to DPDK",
1358 devargs);
1359 }
1360 }
1361
1362 free(name);
1363 return new_port_id;
1364 }
1365
1366 static void
1367 dpdk_set_rxq_config(struct netdev_dpdk *dev, const struct smap *args)
1368 OVS_REQUIRES(dev->mutex)
1369 {
1370 int new_n_rxq;
1371
1372 new_n_rxq = MAX(smap_get_int(args, "n_rxq", NR_QUEUE), 1);
1373 if (new_n_rxq != dev->requested_n_rxq) {
1374 dev->requested_n_rxq = new_n_rxq;
1375 netdev_request_reconfigure(&dev->up);
1376 }
1377 }
1378
1379 static void
1380 dpdk_process_queue_size(struct netdev *netdev, const struct smap *args,
1381 const char *flag, int default_size, int *new_size)
1382 {
1383 int queue_size = smap_get_int(args, flag, default_size);
1384
1385 if (queue_size <= 0 || queue_size > NIC_PORT_MAX_Q_SIZE
1386 || !is_pow2(queue_size)) {
1387 queue_size = default_size;
1388 }
1389
1390 if (queue_size != *new_size) {
1391 *new_size = queue_size;
1392 netdev_request_reconfigure(netdev);
1393 }
1394 }
1395
1396 static int
1397 netdev_dpdk_set_config(struct netdev *netdev, const struct smap *args,
1398 char **errp)
1399 {
1400 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1401 bool rx_fc_en, tx_fc_en, autoneg;
1402 enum rte_eth_fc_mode fc_mode;
1403 static const enum rte_eth_fc_mode fc_mode_set[2][2] = {
1404 {RTE_FC_NONE, RTE_FC_TX_PAUSE},
1405 {RTE_FC_RX_PAUSE, RTE_FC_FULL }
1406 };
1407 const char *new_devargs;
1408 int err = 0;
1409
1410 ovs_mutex_lock(&dpdk_mutex);
1411 ovs_mutex_lock(&dev->mutex);
1412
1413 dpdk_set_rxq_config(dev, args);
1414
1415 dpdk_process_queue_size(netdev, args, "n_rxq_desc",
1416 NIC_PORT_DEFAULT_RXQ_SIZE,
1417 &dev->requested_rxq_size);
1418 dpdk_process_queue_size(netdev, args, "n_txq_desc",
1419 NIC_PORT_DEFAULT_TXQ_SIZE,
1420 &dev->requested_txq_size);
1421
1422 new_devargs = smap_get(args, "dpdk-devargs");
1423
1424 if (dev->devargs && strcmp(new_devargs, dev->devargs)) {
1425 /* The user requested a new device. If we return error, the caller
1426 * will delete this netdev and try to recreate it. */
1427 err = EAGAIN;
1428 goto out;
1429 }
1430
1431 /* dpdk-devargs is required for device configuration */
1432 if (new_devargs && new_devargs[0]) {
1433 /* Don't process dpdk-devargs if value is unchanged and port id
1434 * is valid */
1435 if (!(dev->devargs && !strcmp(dev->devargs, new_devargs)
1436 && rte_eth_dev_is_valid_port(dev->port_id))) {
1437 dpdk_port_t new_port_id = netdev_dpdk_process_devargs(dev,
1438 new_devargs,
1439 errp);
1440 if (!rte_eth_dev_is_valid_port(new_port_id)) {
1441 err = EINVAL;
1442 } else if (new_port_id == dev->port_id) {
1443 /* Already configured, do not reconfigure again */
1444 err = 0;
1445 } else {
1446 struct netdev_dpdk *dup_dev;
1447
1448 dup_dev = netdev_dpdk_lookup_by_port_id(new_port_id);
1449 if (dup_dev) {
1450 VLOG_WARN_BUF(errp, "'%s' is trying to use device '%s' "
1451 "which is already in use by '%s'",
1452 netdev_get_name(netdev), new_devargs,
1453 netdev_get_name(&dup_dev->up));
1454 err = EADDRINUSE;
1455 } else {
1456 int sid = rte_eth_dev_socket_id(new_port_id);
1457
1458 dev->requested_socket_id = sid < 0 ? SOCKET0 : sid;
1459 dev->devargs = xstrdup(new_devargs);
1460 dev->port_id = new_port_id;
1461 netdev_request_reconfigure(&dev->up);
1462 netdev_dpdk_clear_xstats(dev);
1463 err = 0;
1464 }
1465 }
1466 }
1467 } else {
1468 VLOG_WARN_BUF(errp, "'%s' is missing 'options:dpdk-devargs'. "
1469 "The old 'dpdk<port_id>' names are not supported",
1470 netdev_get_name(netdev));
1471 err = EINVAL;
1472 }
1473
1474 if (err) {
1475 goto out;
1476 }
1477
1478 rx_fc_en = smap_get_bool(args, "rx-flow-ctrl", false);
1479 tx_fc_en = smap_get_bool(args, "tx-flow-ctrl", false);
1480 autoneg = smap_get_bool(args, "flow-ctrl-autoneg", false);
1481
1482 fc_mode = fc_mode_set[tx_fc_en][rx_fc_en];
1483 if (dev->fc_conf.mode != fc_mode || autoneg != dev->fc_conf.autoneg) {
1484 dev->fc_conf.mode = fc_mode;
1485 dev->fc_conf.autoneg = autoneg;
1486 dpdk_eth_flow_ctrl_setup(dev);
1487 }
1488
1489 out:
1490 ovs_mutex_unlock(&dev->mutex);
1491 ovs_mutex_unlock(&dpdk_mutex);
1492
1493 return err;
1494 }
1495
1496 static int
1497 netdev_dpdk_ring_set_config(struct netdev *netdev, const struct smap *args,
1498 char **errp OVS_UNUSED)
1499 {
1500 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1501
1502 ovs_mutex_lock(&dev->mutex);
1503 dpdk_set_rxq_config(dev, args);
1504 ovs_mutex_unlock(&dev->mutex);
1505
1506 return 0;
1507 }
1508
1509 static int
1510 netdev_dpdk_vhost_client_set_config(struct netdev *netdev,
1511 const struct smap *args,
1512 char **errp OVS_UNUSED)
1513 {
1514 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1515 const char *path;
1516
1517 ovs_mutex_lock(&dev->mutex);
1518 if (!(dev->vhost_driver_flags & RTE_VHOST_USER_CLIENT)) {
1519 path = smap_get(args, "vhost-server-path");
1520 if (path && strcmp(path, dev->vhost_id)) {
1521 strcpy(dev->vhost_id, path);
1522 netdev_request_reconfigure(netdev);
1523 }
1524 }
1525 ovs_mutex_unlock(&dev->mutex);
1526
1527 return 0;
1528 }
1529
1530 static int
1531 netdev_dpdk_get_numa_id(const struct netdev *netdev)
1532 {
1533 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1534
1535 return dev->socket_id;
1536 }
1537
1538 /* Sets the number of tx queues for the dpdk interface. */
1539 static int
1540 netdev_dpdk_set_tx_multiq(struct netdev *netdev, unsigned int n_txq)
1541 {
1542 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1543
1544 ovs_mutex_lock(&dev->mutex);
1545
1546 if (dev->requested_n_txq == n_txq) {
1547 goto out;
1548 }
1549
1550 dev->requested_n_txq = n_txq;
1551 netdev_request_reconfigure(netdev);
1552
1553 out:
1554 ovs_mutex_unlock(&dev->mutex);
1555 return 0;
1556 }
1557
1558 static struct netdev_rxq *
1559 netdev_dpdk_rxq_alloc(void)
1560 {
1561 struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx);
1562
1563 if (rx) {
1564 return &rx->up;
1565 }
1566
1567 return NULL;
1568 }
1569
1570 static struct netdev_rxq_dpdk *
1571 netdev_rxq_dpdk_cast(const struct netdev_rxq *rxq)
1572 {
1573 return CONTAINER_OF(rxq, struct netdev_rxq_dpdk, up);
1574 }
1575
1576 static int
1577 netdev_dpdk_rxq_construct(struct netdev_rxq *rxq)
1578 {
1579 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq);
1580 struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
1581
1582 ovs_mutex_lock(&dev->mutex);
1583 rx->port_id = dev->port_id;
1584 ovs_mutex_unlock(&dev->mutex);
1585
1586 return 0;
1587 }
1588
1589 static void
1590 netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq OVS_UNUSED)
1591 {
1592 }
1593
1594 static void
1595 netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq)
1596 {
1597 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq);
1598
1599 rte_free(rx);
1600 }
1601
1602 /* Tries to transmit 'pkts' to txq 'qid' of device 'dev'. Takes ownership of
1603 * 'pkts', even in case of failure.
1604 *
1605 * Returns the number of packets that weren't transmitted. */
1606 static inline int
1607 netdev_dpdk_eth_tx_burst(struct netdev_dpdk *dev, int qid,
1608 struct rte_mbuf **pkts, int cnt)
1609 {
1610 uint32_t nb_tx = 0;
1611
1612 while (nb_tx != cnt) {
1613 uint32_t ret;
1614
1615 ret = rte_eth_tx_burst(dev->port_id, qid, pkts + nb_tx, cnt - nb_tx);
1616 if (!ret) {
1617 break;
1618 }
1619
1620 nb_tx += ret;
1621 }
1622
1623 if (OVS_UNLIKELY(nb_tx != cnt)) {
1624 /* Free buffers, which we couldn't transmit, one at a time (each
1625 * packet could come from a different mempool) */
1626 int i;
1627
1628 for (i = nb_tx; i < cnt; i++) {
1629 rte_pktmbuf_free(pkts[i]);
1630 }
1631 }
1632
1633 return cnt - nb_tx;
1634 }
1635
1636 static inline bool
1637 netdev_dpdk_policer_pkt_handle(struct rte_meter_srtcm *meter,
1638 struct rte_mbuf *pkt, uint64_t time)
1639 {
1640 uint32_t pkt_len = rte_pktmbuf_pkt_len(pkt) - sizeof(struct ether_hdr);
1641
1642 return rte_meter_srtcm_color_blind_check(meter, time, pkt_len) ==
1643 e_RTE_METER_GREEN;
1644 }
1645
1646 static int
1647 netdev_dpdk_policer_run(struct rte_meter_srtcm *meter,
1648 struct rte_mbuf **pkts, int pkt_cnt,
1649 bool may_steal)
1650 {
1651 int i = 0;
1652 int cnt = 0;
1653 struct rte_mbuf *pkt = NULL;
1654 uint64_t current_time = rte_rdtsc();
1655
1656 for (i = 0; i < pkt_cnt; i++) {
1657 pkt = pkts[i];
1658 /* Handle current packet */
1659 if (netdev_dpdk_policer_pkt_handle(meter, pkt, current_time)) {
1660 if (cnt != i) {
1661 pkts[cnt] = pkt;
1662 }
1663 cnt++;
1664 } else {
1665 if (may_steal) {
1666 rte_pktmbuf_free(pkt);
1667 }
1668 }
1669 }
1670
1671 return cnt;
1672 }
1673
1674 static int
1675 ingress_policer_run(struct ingress_policer *policer, struct rte_mbuf **pkts,
1676 int pkt_cnt, bool may_steal)
1677 {
1678 int cnt = 0;
1679
1680 rte_spinlock_lock(&policer->policer_lock);
1681 cnt = netdev_dpdk_policer_run(&policer->in_policer, pkts,
1682 pkt_cnt, may_steal);
1683 rte_spinlock_unlock(&policer->policer_lock);
1684
1685 return cnt;
1686 }
1687
1688 static bool
1689 is_vhost_running(struct netdev_dpdk *dev)
1690 {
1691 return (netdev_dpdk_get_vid(dev) >= 0 && dev->vhost_reconfigured);
1692 }
1693
1694 static inline void
1695 netdev_dpdk_vhost_update_rx_size_counters(struct netdev_stats *stats,
1696 unsigned int packet_size)
1697 {
1698 /* Hard-coded search for the size bucket. */
1699 if (packet_size < 256) {
1700 if (packet_size >= 128) {
1701 stats->rx_128_to_255_packets++;
1702 } else if (packet_size <= 64) {
1703 stats->rx_1_to_64_packets++;
1704 } else {
1705 stats->rx_65_to_127_packets++;
1706 }
1707 } else {
1708 if (packet_size >= 1523) {
1709 stats->rx_1523_to_max_packets++;
1710 } else if (packet_size >= 1024) {
1711 stats->rx_1024_to_1522_packets++;
1712 } else if (packet_size < 512) {
1713 stats->rx_256_to_511_packets++;
1714 } else {
1715 stats->rx_512_to_1023_packets++;
1716 }
1717 }
1718 }
1719
1720 static inline void
1721 netdev_dpdk_vhost_update_rx_counters(struct netdev_stats *stats,
1722 struct dp_packet **packets, int count,
1723 int dropped)
1724 {
1725 int i;
1726 unsigned int packet_size;
1727 struct dp_packet *packet;
1728
1729 stats->rx_packets += count;
1730 stats->rx_dropped += dropped;
1731 for (i = 0; i < count; i++) {
1732 packet = packets[i];
1733 packet_size = dp_packet_size(packet);
1734
1735 if (OVS_UNLIKELY(packet_size < ETH_HEADER_LEN)) {
1736 /* This only protects the following multicast counting from
1737 * too short packets, but it does not stop the packet from
1738 * further processing. */
1739 stats->rx_errors++;
1740 stats->rx_length_errors++;
1741 continue;
1742 }
1743
1744 netdev_dpdk_vhost_update_rx_size_counters(stats, packet_size);
1745
1746 struct eth_header *eh = (struct eth_header *) dp_packet_data(packet);
1747 if (OVS_UNLIKELY(eth_addr_is_multicast(eh->eth_dst))) {
1748 stats->multicast++;
1749 }
1750
1751 stats->rx_bytes += packet_size;
1752 }
1753 }
1754
1755 /*
1756 * The receive path for the vhost port is the TX path out from guest.
1757 */
1758 static int
1759 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
1760 struct dp_packet_batch *batch)
1761 {
1762 struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
1763 struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
1764 uint16_t nb_rx = 0;
1765 uint16_t dropped = 0;
1766 int qid = rxq->queue_id;
1767 int vid = netdev_dpdk_get_vid(dev);
1768
1769 if (OVS_UNLIKELY(vid < 0 || !dev->vhost_reconfigured
1770 || !(dev->flags & NETDEV_UP))) {
1771 return EAGAIN;
1772 }
1773
1774 nb_rx = rte_vhost_dequeue_burst(vid, qid * VIRTIO_QNUM + VIRTIO_TXQ,
1775 dev->mp,
1776 (struct rte_mbuf **) batch->packets,
1777 NETDEV_MAX_BURST);
1778 if (!nb_rx) {
1779 return EAGAIN;
1780 }
1781
1782 if (policer) {
1783 dropped = nb_rx;
1784 nb_rx = ingress_policer_run(policer,
1785 (struct rte_mbuf **) batch->packets,
1786 nb_rx, true);
1787 dropped -= nb_rx;
1788 }
1789
1790 rte_spinlock_lock(&dev->stats_lock);
1791 netdev_dpdk_vhost_update_rx_counters(&dev->stats, batch->packets,
1792 nb_rx, dropped);
1793 rte_spinlock_unlock(&dev->stats_lock);
1794
1795 batch->count = nb_rx;
1796 dp_packet_batch_init_packet_fields(batch);
1797
1798 return 0;
1799 }
1800
1801 static int
1802 netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet_batch *batch)
1803 {
1804 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq);
1805 struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
1806 struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
1807 int nb_rx;
1808 int dropped = 0;
1809
1810 if (OVS_UNLIKELY(!(dev->flags & NETDEV_UP))) {
1811 return EAGAIN;
1812 }
1813
1814 nb_rx = rte_eth_rx_burst(rx->port_id, rxq->queue_id,
1815 (struct rte_mbuf **) batch->packets,
1816 NETDEV_MAX_BURST);
1817 if (!nb_rx) {
1818 return EAGAIN;
1819 }
1820
1821 if (policer) {
1822 dropped = nb_rx;
1823 nb_rx = ingress_policer_run(policer,
1824 (struct rte_mbuf **) batch->packets,
1825 nb_rx, true);
1826 dropped -= nb_rx;
1827 }
1828
1829 /* Update stats to reflect dropped packets */
1830 if (OVS_UNLIKELY(dropped)) {
1831 rte_spinlock_lock(&dev->stats_lock);
1832 dev->stats.rx_dropped += dropped;
1833 rte_spinlock_unlock(&dev->stats_lock);
1834 }
1835
1836 batch->count = nb_rx;
1837 dp_packet_batch_init_packet_fields(batch);
1838
1839 return 0;
1840 }
1841
1842 static inline int
1843 netdev_dpdk_qos_run(struct netdev_dpdk *dev, struct rte_mbuf **pkts,
1844 int cnt, bool may_steal)
1845 {
1846 struct qos_conf *qos_conf = ovsrcu_get(struct qos_conf *, &dev->qos_conf);
1847
1848 if (qos_conf) {
1849 rte_spinlock_lock(&qos_conf->lock);
1850 cnt = qos_conf->ops->qos_run(qos_conf, pkts, cnt, may_steal);
1851 rte_spinlock_unlock(&qos_conf->lock);
1852 }
1853
1854 return cnt;
1855 }
1856
1857 static int
1858 netdev_dpdk_filter_packet_len(struct netdev_dpdk *dev, struct rte_mbuf **pkts,
1859 int pkt_cnt)
1860 {
1861 int i = 0;
1862 int cnt = 0;
1863 struct rte_mbuf *pkt;
1864
1865 for (i = 0; i < pkt_cnt; i++) {
1866 pkt = pkts[i];
1867 if (OVS_UNLIKELY(pkt->pkt_len > dev->max_packet_len)) {
1868 VLOG_WARN_RL(&rl, "%s: Too big size %" PRIu32 " max_packet_len %d",
1869 dev->up.name, pkt->pkt_len, dev->max_packet_len);
1870 rte_pktmbuf_free(pkt);
1871 continue;
1872 }
1873
1874 if (OVS_UNLIKELY(i != cnt)) {
1875 pkts[cnt] = pkt;
1876 }
1877 cnt++;
1878 }
1879
1880 return cnt;
1881 }
1882
1883 static inline void
1884 netdev_dpdk_vhost_update_tx_counters(struct netdev_stats *stats,
1885 struct dp_packet **packets,
1886 int attempted,
1887 int dropped)
1888 {
1889 int i;
1890 int sent = attempted - dropped;
1891
1892 stats->tx_packets += sent;
1893 stats->tx_dropped += dropped;
1894
1895 for (i = 0; i < sent; i++) {
1896 stats->tx_bytes += dp_packet_size(packets[i]);
1897 }
1898 }
1899
1900 static void
1901 __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
1902 struct dp_packet **pkts, int cnt)
1903 {
1904 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1905 struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
1906 unsigned int total_pkts = cnt;
1907 unsigned int dropped = 0;
1908 int i, retries = 0;
1909 int vid = netdev_dpdk_get_vid(dev);
1910
1911 qid = dev->tx_q[qid % netdev->n_txq].map;
1912
1913 if (OVS_UNLIKELY(vid < 0 || !dev->vhost_reconfigured || qid < 0
1914 || !(dev->flags & NETDEV_UP))) {
1915 rte_spinlock_lock(&dev->stats_lock);
1916 dev->stats.tx_dropped+= cnt;
1917 rte_spinlock_unlock(&dev->stats_lock);
1918 goto out;
1919 }
1920
1921 rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
1922
1923 cnt = netdev_dpdk_filter_packet_len(dev, cur_pkts, cnt);
1924 /* Check has QoS has been configured for the netdev */
1925 cnt = netdev_dpdk_qos_run(dev, cur_pkts, cnt, true);
1926 dropped = total_pkts - cnt;
1927
1928 do {
1929 int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
1930 unsigned int tx_pkts;
1931
1932 tx_pkts = rte_vhost_enqueue_burst(vid, vhost_qid, cur_pkts, cnt);
1933 if (OVS_LIKELY(tx_pkts)) {
1934 /* Packets have been sent.*/
1935 cnt -= tx_pkts;
1936 /* Prepare for possible retry.*/
1937 cur_pkts = &cur_pkts[tx_pkts];
1938 } else {
1939 /* No packets sent - do not retry.*/
1940 break;
1941 }
1942 } while (cnt && (retries++ <= VHOST_ENQ_RETRY_NUM));
1943
1944 rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
1945
1946 rte_spinlock_lock(&dev->stats_lock);
1947 netdev_dpdk_vhost_update_tx_counters(&dev->stats, pkts, total_pkts,
1948 cnt + dropped);
1949 rte_spinlock_unlock(&dev->stats_lock);
1950
1951 out:
1952 for (i = 0; i < total_pkts - dropped; i++) {
1953 dp_packet_delete(pkts[i]);
1954 }
1955 }
1956
1957 /* Tx function. Transmit packets indefinitely */
1958 static void
1959 dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet_batch *batch)
1960 OVS_NO_THREAD_SAFETY_ANALYSIS
1961 {
1962 const size_t batch_cnt = dp_packet_batch_size(batch);
1963 #if !defined(__CHECKER__) && !defined(_WIN32)
1964 const size_t PKT_ARRAY_SIZE = batch_cnt;
1965 #else
1966 /* Sparse or MSVC doesn't like variable length array. */
1967 enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
1968 #endif
1969 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1970 struct rte_mbuf *pkts[PKT_ARRAY_SIZE];
1971 uint32_t cnt = batch_cnt;
1972 uint32_t dropped = 0;
1973
1974 if (dev->type != DPDK_DEV_VHOST) {
1975 /* Check if QoS has been configured for this netdev. */
1976 cnt = netdev_dpdk_qos_run(dev, (struct rte_mbuf **) batch->packets,
1977 batch_cnt, false);
1978 dropped += batch_cnt - cnt;
1979 }
1980
1981 uint32_t txcnt = 0;
1982
1983 for (uint32_t i = 0; i < cnt; i++) {
1984 struct dp_packet *packet = batch->packets[i];
1985 uint32_t size = dp_packet_size(packet);
1986
1987 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1988 VLOG_WARN_RL(&rl, "Too big size %u max_packet_len %d",
1989 size, dev->max_packet_len);
1990
1991 dropped++;
1992 continue;
1993 }
1994
1995 pkts[txcnt] = rte_pktmbuf_alloc(dev->mp);
1996 if (OVS_UNLIKELY(!pkts[txcnt])) {
1997 dropped += cnt - i;
1998 break;
1999 }
2000
2001 /* We have to do a copy for now */
2002 memcpy(rte_pktmbuf_mtod(pkts[txcnt], void *),
2003 dp_packet_data(packet), size);
2004 dp_packet_set_size((struct dp_packet *)pkts[txcnt], size);
2005
2006 txcnt++;
2007 }
2008
2009 if (OVS_LIKELY(txcnt)) {
2010 if (dev->type == DPDK_DEV_VHOST) {
2011 __netdev_dpdk_vhost_send(netdev, qid, (struct dp_packet **) pkts,
2012 txcnt);
2013 } else {
2014 dropped += netdev_dpdk_eth_tx_burst(dev, qid, pkts, txcnt);
2015 }
2016 }
2017
2018 if (OVS_UNLIKELY(dropped)) {
2019 rte_spinlock_lock(&dev->stats_lock);
2020 dev->stats.tx_dropped += dropped;
2021 rte_spinlock_unlock(&dev->stats_lock);
2022 }
2023 }
2024
2025 static int
2026 netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
2027 struct dp_packet_batch *batch,
2028 bool concurrent_txq OVS_UNUSED)
2029 {
2030
2031 if (OVS_UNLIKELY(batch->packets[0]->source != DPBUF_DPDK)) {
2032 dpdk_do_tx_copy(netdev, qid, batch);
2033 dp_packet_delete_batch(batch, true);
2034 } else {
2035 __netdev_dpdk_vhost_send(netdev, qid, batch->packets, batch->count);
2036 }
2037 return 0;
2038 }
2039
2040 static inline void
2041 netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
2042 struct dp_packet_batch *batch,
2043 bool concurrent_txq)
2044 {
2045 if (OVS_UNLIKELY(!(dev->flags & NETDEV_UP))) {
2046 dp_packet_delete_batch(batch, true);
2047 return;
2048 }
2049
2050 if (OVS_UNLIKELY(concurrent_txq)) {
2051 qid = qid % dev->up.n_txq;
2052 rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
2053 }
2054
2055 if (OVS_UNLIKELY(batch->packets[0]->source != DPBUF_DPDK)) {
2056 struct netdev *netdev = &dev->up;
2057
2058 dpdk_do_tx_copy(netdev, qid, batch);
2059 dp_packet_delete_batch(batch, true);
2060 } else {
2061 int tx_cnt, dropped;
2062 int batch_cnt = dp_packet_batch_size(batch);
2063 struct rte_mbuf **pkts = (struct rte_mbuf **) batch->packets;
2064
2065 tx_cnt = netdev_dpdk_filter_packet_len(dev, pkts, batch_cnt);
2066 tx_cnt = netdev_dpdk_qos_run(dev, pkts, tx_cnt, true);
2067 dropped = batch_cnt - tx_cnt;
2068
2069 dropped += netdev_dpdk_eth_tx_burst(dev, qid, pkts, tx_cnt);
2070
2071 if (OVS_UNLIKELY(dropped)) {
2072 rte_spinlock_lock(&dev->stats_lock);
2073 dev->stats.tx_dropped += dropped;
2074 rte_spinlock_unlock(&dev->stats_lock);
2075 }
2076 }
2077
2078 if (OVS_UNLIKELY(concurrent_txq)) {
2079 rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
2080 }
2081 }
2082
2083 static int
2084 netdev_dpdk_eth_send(struct netdev *netdev, int qid,
2085 struct dp_packet_batch *batch, bool concurrent_txq)
2086 {
2087 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2088
2089 netdev_dpdk_send__(dev, qid, batch, concurrent_txq);
2090 return 0;
2091 }
2092
2093 static int
2094 netdev_dpdk_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
2095 {
2096 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2097
2098 ovs_mutex_lock(&dev->mutex);
2099 if (!eth_addr_equals(dev->hwaddr, mac)) {
2100 dev->hwaddr = mac;
2101 netdev_change_seq_changed(netdev);
2102 }
2103 ovs_mutex_unlock(&dev->mutex);
2104
2105 return 0;
2106 }
2107
2108 static int
2109 netdev_dpdk_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
2110 {
2111 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2112
2113 ovs_mutex_lock(&dev->mutex);
2114 *mac = dev->hwaddr;
2115 ovs_mutex_unlock(&dev->mutex);
2116
2117 return 0;
2118 }
2119
2120 static int
2121 netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup)
2122 {
2123 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2124
2125 ovs_mutex_lock(&dev->mutex);
2126 *mtup = dev->mtu;
2127 ovs_mutex_unlock(&dev->mutex);
2128
2129 return 0;
2130 }
2131
2132 static int
2133 netdev_dpdk_set_mtu(struct netdev *netdev, int mtu)
2134 {
2135 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2136
2137 if (MTU_TO_FRAME_LEN(mtu) > NETDEV_DPDK_MAX_PKT_LEN
2138 || mtu < ETHER_MIN_MTU) {
2139 VLOG_WARN("%s: unsupported MTU %d\n", dev->up.name, mtu);
2140 return EINVAL;
2141 }
2142
2143 ovs_mutex_lock(&dev->mutex);
2144 if (dev->requested_mtu != mtu) {
2145 dev->requested_mtu = mtu;
2146 netdev_request_reconfigure(netdev);
2147 }
2148 ovs_mutex_unlock(&dev->mutex);
2149
2150 return 0;
2151 }
2152
2153 static int
2154 netdev_dpdk_get_carrier(const struct netdev *netdev, bool *carrier);
2155
2156 static int
2157 netdev_dpdk_vhost_get_stats(const struct netdev *netdev,
2158 struct netdev_stats *stats)
2159 {
2160 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2161
2162 ovs_mutex_lock(&dev->mutex);
2163
2164 rte_spinlock_lock(&dev->stats_lock);
2165 /* Supported Stats */
2166 stats->rx_packets = dev->stats.rx_packets;
2167 stats->tx_packets = dev->stats.tx_packets;
2168 stats->rx_dropped = dev->stats.rx_dropped;
2169 stats->tx_dropped = dev->stats.tx_dropped;
2170 stats->multicast = dev->stats.multicast;
2171 stats->rx_bytes = dev->stats.rx_bytes;
2172 stats->tx_bytes = dev->stats.tx_bytes;
2173 stats->rx_errors = dev->stats.rx_errors;
2174 stats->rx_length_errors = dev->stats.rx_length_errors;
2175
2176 stats->rx_1_to_64_packets = dev->stats.rx_1_to_64_packets;
2177 stats->rx_65_to_127_packets = dev->stats.rx_65_to_127_packets;
2178 stats->rx_128_to_255_packets = dev->stats.rx_128_to_255_packets;
2179 stats->rx_256_to_511_packets = dev->stats.rx_256_to_511_packets;
2180 stats->rx_512_to_1023_packets = dev->stats.rx_512_to_1023_packets;
2181 stats->rx_1024_to_1522_packets = dev->stats.rx_1024_to_1522_packets;
2182 stats->rx_1523_to_max_packets = dev->stats.rx_1523_to_max_packets;
2183
2184 rte_spinlock_unlock(&dev->stats_lock);
2185
2186 ovs_mutex_unlock(&dev->mutex);
2187
2188 return 0;
2189 }
2190
2191 static void
2192 netdev_dpdk_convert_xstats(struct netdev_stats *stats,
2193 const struct rte_eth_xstat *xstats,
2194 const struct rte_eth_xstat_name *names,
2195 const unsigned int size)
2196 {
2197 for (unsigned int i = 0; i < size; i++) {
2198 if (strcmp(XSTAT_RX_64_PACKETS, names[i].name) == 0) {
2199 stats->rx_1_to_64_packets = xstats[i].value;
2200 } else if (strcmp(XSTAT_RX_65_TO_127_PACKETS, names[i].name) == 0) {
2201 stats->rx_65_to_127_packets = xstats[i].value;
2202 } else if (strcmp(XSTAT_RX_128_TO_255_PACKETS, names[i].name) == 0) {
2203 stats->rx_128_to_255_packets = xstats[i].value;
2204 } else if (strcmp(XSTAT_RX_256_TO_511_PACKETS, names[i].name) == 0) {
2205 stats->rx_256_to_511_packets = xstats[i].value;
2206 } else if (strcmp(XSTAT_RX_512_TO_1023_PACKETS, names[i].name) == 0) {
2207 stats->rx_512_to_1023_packets = xstats[i].value;
2208 } else if (strcmp(XSTAT_RX_1024_TO_1522_PACKETS, names[i].name) == 0) {
2209 stats->rx_1024_to_1522_packets = xstats[i].value;
2210 } else if (strcmp(XSTAT_RX_1523_TO_MAX_PACKETS, names[i].name) == 0) {
2211 stats->rx_1523_to_max_packets = xstats[i].value;
2212 } else if (strcmp(XSTAT_TX_64_PACKETS, names[i].name) == 0) {
2213 stats->tx_1_to_64_packets = xstats[i].value;
2214 } else if (strcmp(XSTAT_TX_65_TO_127_PACKETS, names[i].name) == 0) {
2215 stats->tx_65_to_127_packets = xstats[i].value;
2216 } else if (strcmp(XSTAT_TX_128_TO_255_PACKETS, names[i].name) == 0) {
2217 stats->tx_128_to_255_packets = xstats[i].value;
2218 } else if (strcmp(XSTAT_TX_256_TO_511_PACKETS, names[i].name) == 0) {
2219 stats->tx_256_to_511_packets = xstats[i].value;
2220 } else if (strcmp(XSTAT_TX_512_TO_1023_PACKETS, names[i].name) == 0) {
2221 stats->tx_512_to_1023_packets = xstats[i].value;
2222 } else if (strcmp(XSTAT_TX_1024_TO_1522_PACKETS, names[i].name) == 0) {
2223 stats->tx_1024_to_1522_packets = xstats[i].value;
2224 } else if (strcmp(XSTAT_TX_1523_TO_MAX_PACKETS, names[i].name) == 0) {
2225 stats->tx_1523_to_max_packets = xstats[i].value;
2226 } else if (strcmp(XSTAT_RX_MULTICAST_PACKETS, names[i].name) == 0) {
2227 stats->multicast = xstats[i].value;
2228 } else if (strcmp(XSTAT_TX_MULTICAST_PACKETS, names[i].name) == 0) {
2229 stats->tx_multicast_packets = xstats[i].value;
2230 } else if (strcmp(XSTAT_RX_BROADCAST_PACKETS, names[i].name) == 0) {
2231 stats->rx_broadcast_packets = xstats[i].value;
2232 } else if (strcmp(XSTAT_TX_BROADCAST_PACKETS, names[i].name) == 0) {
2233 stats->tx_broadcast_packets = xstats[i].value;
2234 } else if (strcmp(XSTAT_RX_UNDERSIZED_ERRORS, names[i].name) == 0) {
2235 stats->rx_undersized_errors = xstats[i].value;
2236 } else if (strcmp(XSTAT_RX_FRAGMENTED_ERRORS, names[i].name) == 0) {
2237 stats->rx_fragmented_errors = xstats[i].value;
2238 } else if (strcmp(XSTAT_RX_JABBER_ERRORS, names[i].name) == 0) {
2239 stats->rx_jabber_errors = xstats[i].value;
2240 }
2241 }
2242 }
2243
2244 static int
2245 netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
2246 {
2247 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2248 struct rte_eth_stats rte_stats;
2249 bool gg;
2250
2251 netdev_dpdk_get_carrier(netdev, &gg);
2252 ovs_mutex_lock(&dev->mutex);
2253
2254 struct rte_eth_xstat *rte_xstats = NULL;
2255 struct rte_eth_xstat_name *rte_xstats_names = NULL;
2256 int rte_xstats_len, rte_xstats_new_len, rte_xstats_ret;
2257
2258 if (rte_eth_stats_get(dev->port_id, &rte_stats)) {
2259 VLOG_ERR("Can't get ETH statistics for port: %"PRIu8, dev->port_id);
2260 ovs_mutex_unlock(&dev->mutex);
2261 return EPROTO;
2262 }
2263
2264 /* Get length of statistics */
2265 rte_xstats_len = rte_eth_xstats_get_names(dev->port_id, NULL, 0);
2266 if (rte_xstats_len < 0) {
2267 VLOG_WARN("Cannot get XSTATS values for port: %"PRIu8, dev->port_id);
2268 goto out;
2269 }
2270 /* Reserve memory for xstats names and values */
2271 rte_xstats_names = xcalloc(rte_xstats_len, sizeof *rte_xstats_names);
2272 rte_xstats = xcalloc(rte_xstats_len, sizeof *rte_xstats);
2273
2274 /* Retreive xstats names */
2275 rte_xstats_new_len = rte_eth_xstats_get_names(dev->port_id,
2276 rte_xstats_names,
2277 rte_xstats_len);
2278 if (rte_xstats_new_len != rte_xstats_len) {
2279 VLOG_WARN("Cannot get XSTATS names for port: %"PRIu8, dev->port_id);
2280 goto out;
2281 }
2282 /* Retreive xstats values */
2283 memset(rte_xstats, 0xff, sizeof *rte_xstats * rte_xstats_len);
2284 rte_xstats_ret = rte_eth_xstats_get(dev->port_id, rte_xstats,
2285 rte_xstats_len);
2286 if (rte_xstats_ret > 0 && rte_xstats_ret <= rte_xstats_len) {
2287 netdev_dpdk_convert_xstats(stats, rte_xstats, rte_xstats_names,
2288 rte_xstats_len);
2289 } else {
2290 VLOG_WARN("Cannot get XSTATS values for port: %"PRIu8, dev->port_id);
2291 }
2292
2293 out:
2294 free(rte_xstats);
2295 free(rte_xstats_names);
2296
2297 stats->rx_packets = rte_stats.ipackets;
2298 stats->tx_packets = rte_stats.opackets;
2299 stats->rx_bytes = rte_stats.ibytes;
2300 stats->tx_bytes = rte_stats.obytes;
2301 stats->rx_errors = rte_stats.ierrors;
2302 stats->tx_errors = rte_stats.oerrors;
2303
2304 rte_spinlock_lock(&dev->stats_lock);
2305 stats->tx_dropped = dev->stats.tx_dropped;
2306 stats->rx_dropped = dev->stats.rx_dropped;
2307 rte_spinlock_unlock(&dev->stats_lock);
2308
2309 /* These are the available DPDK counters for packets not received due to
2310 * local resource constraints in DPDK and NIC respectively. */
2311 stats->rx_dropped += rte_stats.rx_nombuf + rte_stats.imissed;
2312 stats->rx_missed_errors = rte_stats.imissed;
2313
2314 ovs_mutex_unlock(&dev->mutex);
2315
2316 return 0;
2317 }
2318
2319 static int
2320 netdev_dpdk_get_custom_stats(const struct netdev *netdev,
2321 struct netdev_custom_stats *custom_stats)
2322 {
2323
2324 uint32_t i;
2325 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2326 int rte_xstats_ret;
2327
2328 ovs_mutex_lock(&dev->mutex);
2329
2330 if (netdev_dpdk_configure_xstats(dev)) {
2331 uint64_t *values = xcalloc(dev->rte_xstats_ids_size,
2332 sizeof(uint64_t));
2333
2334 rte_xstats_ret =
2335 rte_eth_xstats_get_by_id(dev->port_id, dev->rte_xstats_ids,
2336 values, dev->rte_xstats_ids_size);
2337
2338 if (rte_xstats_ret > 0 &&
2339 rte_xstats_ret <= dev->rte_xstats_ids_size) {
2340
2341 custom_stats->size = rte_xstats_ret;
2342 custom_stats->counters =
2343 (struct netdev_custom_counter *) xcalloc(rte_xstats_ret,
2344 sizeof(struct netdev_custom_counter));
2345
2346 for (i = 0; i < rte_xstats_ret; i++) {
2347 ovs_strlcpy(custom_stats->counters[i].name,
2348 netdev_dpdk_get_xstat_name(dev,
2349 dev->rte_xstats_ids[i]),
2350 NETDEV_CUSTOM_STATS_NAME_SIZE);
2351 custom_stats->counters[i].value = values[i];
2352 }
2353 } else {
2354 VLOG_WARN("Cannot get XSTATS values for port: %"PRIu8,
2355 dev->port_id);
2356 custom_stats->counters = NULL;
2357 custom_stats->size = 0;
2358 /* Let's clear statistics cache, so it will be
2359 * reconfigured */
2360 netdev_dpdk_clear_xstats(dev);
2361 }
2362 }
2363
2364 ovs_mutex_unlock(&dev->mutex);
2365
2366 return 0;
2367 }
2368
2369 static int
2370 netdev_dpdk_get_features(const struct netdev *netdev,
2371 enum netdev_features *current,
2372 enum netdev_features *advertised,
2373 enum netdev_features *supported,
2374 enum netdev_features *peer)
2375 {
2376 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2377 struct rte_eth_link link;
2378
2379 ovs_mutex_lock(&dev->mutex);
2380 link = dev->link;
2381 ovs_mutex_unlock(&dev->mutex);
2382
2383 if (link.link_duplex == ETH_LINK_HALF_DUPLEX) {
2384 if (link.link_speed == ETH_SPEED_NUM_10M) {
2385 *current = NETDEV_F_10MB_HD;
2386 }
2387 if (link.link_speed == ETH_SPEED_NUM_100M) {
2388 *current = NETDEV_F_100MB_HD;
2389 }
2390 if (link.link_speed == ETH_SPEED_NUM_1G) {
2391 *current = NETDEV_F_1GB_HD;
2392 }
2393 } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) {
2394 if (link.link_speed == ETH_SPEED_NUM_10M) {
2395 *current = NETDEV_F_10MB_FD;
2396 }
2397 if (link.link_speed == ETH_SPEED_NUM_100M) {
2398 *current = NETDEV_F_100MB_FD;
2399 }
2400 if (link.link_speed == ETH_SPEED_NUM_1G) {
2401 *current = NETDEV_F_1GB_FD;
2402 }
2403 if (link.link_speed == ETH_SPEED_NUM_10G) {
2404 *current = NETDEV_F_10GB_FD;
2405 }
2406 }
2407
2408 if (link.link_autoneg) {
2409 *current |= NETDEV_F_AUTONEG;
2410 }
2411
2412 *advertised = *supported = *peer = 0;
2413
2414 return 0;
2415 }
2416
2417 static struct ingress_policer *
2418 netdev_dpdk_policer_construct(uint32_t rate, uint32_t burst)
2419 {
2420 struct ingress_policer *policer = NULL;
2421 uint64_t rate_bytes;
2422 uint64_t burst_bytes;
2423 int err = 0;
2424
2425 policer = xmalloc(sizeof *policer);
2426 rte_spinlock_init(&policer->policer_lock);
2427
2428 /* rte_meter requires bytes so convert kbits rate and burst to bytes. */
2429 rate_bytes = rate * 1000ULL / 8;
2430 burst_bytes = burst * 1000ULL / 8;
2431
2432 policer->app_srtcm_params.cir = rate_bytes;
2433 policer->app_srtcm_params.cbs = burst_bytes;
2434 policer->app_srtcm_params.ebs = 0;
2435 err = rte_meter_srtcm_config(&policer->in_policer,
2436 &policer->app_srtcm_params);
2437 if (err) {
2438 VLOG_ERR("Could not create rte meter for ingress policer");
2439 free(policer);
2440 return NULL;
2441 }
2442
2443 return policer;
2444 }
2445
2446 static int
2447 netdev_dpdk_set_policing(struct netdev* netdev, uint32_t policer_rate,
2448 uint32_t policer_burst)
2449 {
2450 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2451 struct ingress_policer *policer;
2452
2453 /* Force to 0 if no rate specified,
2454 * default to 8000 kbits if burst is 0,
2455 * else stick with user-specified value.
2456 */
2457 policer_burst = (!policer_rate ? 0
2458 : !policer_burst ? 8000
2459 : policer_burst);
2460
2461 ovs_mutex_lock(&dev->mutex);
2462
2463 policer = ovsrcu_get_protected(struct ingress_policer *,
2464 &dev->ingress_policer);
2465
2466 if (dev->policer_rate == policer_rate &&
2467 dev->policer_burst == policer_burst) {
2468 /* Assume that settings haven't changed since we last set them. */
2469 ovs_mutex_unlock(&dev->mutex);
2470 return 0;
2471 }
2472
2473 /* Destroy any existing ingress policer for the device if one exists */
2474 if (policer) {
2475 ovsrcu_postpone(free, policer);
2476 }
2477
2478 if (policer_rate != 0) {
2479 policer = netdev_dpdk_policer_construct(policer_rate, policer_burst);
2480 } else {
2481 policer = NULL;
2482 }
2483 ovsrcu_set(&dev->ingress_policer, policer);
2484 dev->policer_rate = policer_rate;
2485 dev->policer_burst = policer_burst;
2486 ovs_mutex_unlock(&dev->mutex);
2487
2488 return 0;
2489 }
2490
2491 static int
2492 netdev_dpdk_get_ifindex(const struct netdev *netdev)
2493 {
2494 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2495
2496 ovs_mutex_lock(&dev->mutex);
2497 /* Calculate hash from the netdev name. Ensure that ifindex is a 24-bit
2498 * postive integer to meet RFC 2863 recommendations.
2499 */
2500 int ifindex = hash_string(netdev->name, 0) % 0xfffffe + 1;
2501 ovs_mutex_unlock(&dev->mutex);
2502
2503 return ifindex;
2504 }
2505
2506 static int
2507 netdev_dpdk_get_carrier(const struct netdev *netdev, bool *carrier)
2508 {
2509 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2510
2511 ovs_mutex_lock(&dev->mutex);
2512 check_link_status(dev);
2513 *carrier = dev->link.link_status;
2514
2515 ovs_mutex_unlock(&dev->mutex);
2516
2517 return 0;
2518 }
2519
2520 static int
2521 netdev_dpdk_vhost_get_carrier(const struct netdev *netdev, bool *carrier)
2522 {
2523 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2524
2525 ovs_mutex_lock(&dev->mutex);
2526
2527 if (is_vhost_running(dev)) {
2528 *carrier = 1;
2529 } else {
2530 *carrier = 0;
2531 }
2532
2533 ovs_mutex_unlock(&dev->mutex);
2534
2535 return 0;
2536 }
2537
2538 static long long int
2539 netdev_dpdk_get_carrier_resets(const struct netdev *netdev)
2540 {
2541 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2542 long long int carrier_resets;
2543
2544 ovs_mutex_lock(&dev->mutex);
2545 carrier_resets = dev->link_reset_cnt;
2546 ovs_mutex_unlock(&dev->mutex);
2547
2548 return carrier_resets;
2549 }
2550
2551 static int
2552 netdev_dpdk_set_miimon(struct netdev *netdev OVS_UNUSED,
2553 long long int interval OVS_UNUSED)
2554 {
2555 return EOPNOTSUPP;
2556 }
2557
2558 static int
2559 netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
2560 enum netdev_flags off, enum netdev_flags on,
2561 enum netdev_flags *old_flagsp)
2562 OVS_REQUIRES(dev->mutex)
2563 {
2564 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
2565 return EINVAL;
2566 }
2567
2568 *old_flagsp = dev->flags;
2569 dev->flags |= on;
2570 dev->flags &= ~off;
2571
2572 if (dev->flags == *old_flagsp) {
2573 return 0;
2574 }
2575
2576 if (dev->type == DPDK_DEV_ETH) {
2577 if (dev->flags & NETDEV_PROMISC) {
2578 rte_eth_promiscuous_enable(dev->port_id);
2579 }
2580
2581 netdev_change_seq_changed(&dev->up);
2582 } else {
2583 /* If DPDK_DEV_VHOST device's NETDEV_UP flag was changed and vhost is
2584 * running then change netdev's change_seq to trigger link state
2585 * update. */
2586
2587 if ((NETDEV_UP & ((*old_flagsp ^ on) | (*old_flagsp ^ off)))
2588 && is_vhost_running(dev)) {
2589 netdev_change_seq_changed(&dev->up);
2590
2591 /* Clear statistics if device is getting up. */
2592 if (NETDEV_UP & on) {
2593 rte_spinlock_lock(&dev->stats_lock);
2594 memset(&dev->stats, 0, sizeof dev->stats);
2595 rte_spinlock_unlock(&dev->stats_lock);
2596 }
2597 }
2598 }
2599
2600 return 0;
2601 }
2602
2603 static int
2604 netdev_dpdk_update_flags(struct netdev *netdev,
2605 enum netdev_flags off, enum netdev_flags on,
2606 enum netdev_flags *old_flagsp)
2607 {
2608 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2609 int error;
2610
2611 ovs_mutex_lock(&dev->mutex);
2612 error = netdev_dpdk_update_flags__(dev, off, on, old_flagsp);
2613 ovs_mutex_unlock(&dev->mutex);
2614
2615 return error;
2616 }
2617
2618 static int
2619 netdev_dpdk_vhost_user_get_status(const struct netdev *netdev,
2620 struct smap *args)
2621 {
2622 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2623
2624 ovs_mutex_lock(&dev->mutex);
2625
2626 bool client_mode = dev->vhost_driver_flags & RTE_VHOST_USER_CLIENT;
2627 smap_add_format(args, "mode", "%s", client_mode ? "client" : "server");
2628
2629 int vid = netdev_dpdk_get_vid(dev);
2630 if (vid < 0) {
2631 smap_add_format(args, "status", "disconnected");
2632 ovs_mutex_unlock(&dev->mutex);
2633 return 0;
2634 } else {
2635 smap_add_format(args, "status", "connected");
2636 }
2637
2638 char socket_name[PATH_MAX];
2639 if (!rte_vhost_get_ifname(vid, socket_name, PATH_MAX)) {
2640 smap_add_format(args, "socket", "%s", socket_name);
2641 }
2642
2643 uint64_t features;
2644 if (!rte_vhost_get_negotiated_features(vid, &features)) {
2645 smap_add_format(args, "features", "0x%016"PRIx64, features);
2646 }
2647
2648 uint16_t mtu;
2649 if (!rte_vhost_get_mtu(vid, &mtu)) {
2650 smap_add_format(args, "mtu", "%d", mtu);
2651 }
2652
2653 int numa = rte_vhost_get_numa_node(vid);
2654 if (numa >= 0) {
2655 smap_add_format(args, "numa", "%d", numa);
2656 }
2657
2658 uint16_t vring_num = rte_vhost_get_vring_num(vid);
2659 if (vring_num) {
2660 smap_add_format(args, "num_of_vrings", "%d", vring_num);
2661 }
2662
2663 for (int i = 0; i < vring_num; i++) {
2664 struct rte_vhost_vring vring;
2665 char vhost_vring[16];
2666
2667 rte_vhost_get_vhost_vring(vid, i, &vring);
2668 snprintf(vhost_vring, 16, "vring_%d_size", i);
2669 smap_add_format(args, vhost_vring, "%d", vring.size);
2670 }
2671
2672 ovs_mutex_unlock(&dev->mutex);
2673 return 0;
2674 }
2675
2676 static int
2677 netdev_dpdk_get_status(const struct netdev *netdev, struct smap *args)
2678 {
2679 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2680 struct rte_eth_dev_info dev_info;
2681
2682 if (!rte_eth_dev_is_valid_port(dev->port_id)) {
2683 return ENODEV;
2684 }
2685
2686 ovs_mutex_lock(&dev->mutex);
2687 rte_eth_dev_info_get(dev->port_id, &dev_info);
2688 ovs_mutex_unlock(&dev->mutex);
2689
2690 smap_add_format(args, "port_no", "%d", dev->port_id);
2691 smap_add_format(args, "numa_id", "%d",
2692 rte_eth_dev_socket_id(dev->port_id));
2693 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
2694 smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
2695 smap_add_format(args, "max_rx_pktlen", "%u", dev->max_packet_len);
2696 smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
2697 smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
2698 smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);
2699 smap_add_format(args, "max_hash_mac_addrs", "%u",
2700 dev_info.max_hash_mac_addrs);
2701 smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
2702 smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
2703
2704 /* Querying the DPDK library for iftype may be done in future, pending
2705 * support; cf. RFC 3635 Section 3.2.4. */
2706 enum { IF_TYPE_ETHERNETCSMACD = 6 };
2707
2708 smap_add_format(args, "if_type", "%"PRIu32, IF_TYPE_ETHERNETCSMACD);
2709 smap_add_format(args, "if_descr", "%s %s", rte_version(),
2710 dev_info.driver_name);
2711
2712 if (dev_info.pci_dev) {
2713 smap_add_format(args, "pci-vendor_id", "0x%u",
2714 dev_info.pci_dev->id.vendor_id);
2715 smap_add_format(args, "pci-device_id", "0x%x",
2716 dev_info.pci_dev->id.device_id);
2717 }
2718
2719 return 0;
2720 }
2721
2722 static void
2723 netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state)
2724 OVS_REQUIRES(dev->mutex)
2725 {
2726 enum netdev_flags old_flags;
2727
2728 if (admin_state) {
2729 netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags);
2730 } else {
2731 netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags);
2732 }
2733 }
2734
2735 static void
2736 netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc,
2737 const char *argv[], void *aux OVS_UNUSED)
2738 {
2739 bool up;
2740
2741 if (!strcasecmp(argv[argc - 1], "up")) {
2742 up = true;
2743 } else if ( !strcasecmp(argv[argc - 1], "down")) {
2744 up = false;
2745 } else {
2746 unixctl_command_reply_error(conn, "Invalid Admin State");
2747 return;
2748 }
2749
2750 if (argc > 2) {
2751 struct netdev *netdev = netdev_from_name(argv[1]);
2752
2753 if (netdev && is_dpdk_class(netdev->netdev_class)) {
2754 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2755
2756 ovs_mutex_lock(&dev->mutex);
2757 netdev_dpdk_set_admin_state__(dev, up);
2758 ovs_mutex_unlock(&dev->mutex);
2759
2760 netdev_close(netdev);
2761 } else {
2762 unixctl_command_reply_error(conn, "Not a DPDK Interface");
2763 netdev_close(netdev);
2764 return;
2765 }
2766 } else {
2767 struct netdev_dpdk *dev;
2768
2769 ovs_mutex_lock(&dpdk_mutex);
2770 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
2771 ovs_mutex_lock(&dev->mutex);
2772 netdev_dpdk_set_admin_state__(dev, up);
2773 ovs_mutex_unlock(&dev->mutex);
2774 }
2775 ovs_mutex_unlock(&dpdk_mutex);
2776 }
2777 unixctl_command_reply(conn, "OK");
2778 }
2779
2780 static void
2781 netdev_dpdk_detach(struct unixctl_conn *conn, int argc OVS_UNUSED,
2782 const char *argv[], void *aux OVS_UNUSED)
2783 {
2784 int ret;
2785 char *response;
2786 dpdk_port_t port_id;
2787 char devname[RTE_ETH_NAME_MAX_LEN];
2788 struct netdev_dpdk *dev;
2789
2790 ovs_mutex_lock(&dpdk_mutex);
2791
2792 if (rte_eth_dev_get_port_by_name(argv[1], &port_id)) {
2793 response = xasprintf("Device '%s' not found in DPDK", argv[1]);
2794 goto error;
2795 }
2796
2797 dev = netdev_dpdk_lookup_by_port_id(port_id);
2798 if (dev) {
2799 response = xasprintf("Device '%s' is being used by interface '%s'. "
2800 "Remove it before detaching",
2801 argv[1], netdev_get_name(&dev->up));
2802 goto error;
2803 }
2804
2805 rte_eth_dev_close(port_id);
2806
2807 ret = rte_eth_dev_detach(port_id, devname);
2808 if (ret < 0) {
2809 response = xasprintf("Device '%s' can not be detached", argv[1]);
2810 goto error;
2811 }
2812
2813 response = xasprintf("Device '%s' has been detached", argv[1]);
2814
2815 ovs_mutex_unlock(&dpdk_mutex);
2816 unixctl_command_reply(conn, response);
2817 free(response);
2818 return;
2819
2820 error:
2821 ovs_mutex_unlock(&dpdk_mutex);
2822 unixctl_command_reply_error(conn, response);
2823 free(response);
2824 }
2825
2826 static void
2827 netdev_dpdk_get_mempool_info(struct unixctl_conn *conn,
2828 int argc, const char *argv[],
2829 void *aux OVS_UNUSED)
2830 {
2831 size_t size;
2832 FILE *stream;
2833 char *response = NULL;
2834 struct netdev *netdev = NULL;
2835
2836 if (argc == 2) {
2837 netdev = netdev_from_name(argv[1]);
2838 if (!netdev || !is_dpdk_class(netdev->netdev_class)) {
2839 unixctl_command_reply_error(conn, "Not a DPDK Interface");
2840 goto out;
2841 }
2842 }
2843
2844 stream = open_memstream(&response, &size);
2845 if (!stream) {
2846 response = xasprintf("Unable to open memstream: %s.",
2847 ovs_strerror(errno));
2848 unixctl_command_reply_error(conn, response);
2849 goto out;
2850 }
2851
2852 if (netdev) {
2853 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2854
2855 ovs_mutex_lock(&dev->mutex);
2856 ovs_mutex_lock(&dpdk_mp_mutex);
2857
2858 rte_mempool_dump(stream, dev->mp);
2859
2860 ovs_mutex_unlock(&dpdk_mp_mutex);
2861 ovs_mutex_unlock(&dev->mutex);
2862 } else {
2863 ovs_mutex_lock(&dpdk_mp_mutex);
2864 rte_mempool_list_dump(stream);
2865 ovs_mutex_unlock(&dpdk_mp_mutex);
2866 }
2867
2868 fclose(stream);
2869
2870 unixctl_command_reply(conn, response);
2871 out:
2872 free(response);
2873 netdev_close(netdev);
2874 }
2875
2876 /*
2877 * Set virtqueue flags so that we do not receive interrupts.
2878 */
2879 static void
2880 set_irq_status(int vid)
2881 {
2882 uint32_t i;
2883
2884 for (i = 0; i < rte_vhost_get_vring_num(vid); i++) {
2885 rte_vhost_enable_guest_notification(vid, i, 0);
2886 }
2887 }
2888
2889 /*
2890 * Fixes mapping for vhost-user tx queues. Must be called after each
2891 * enabling/disabling of queues and n_txq modifications.
2892 */
2893 static void
2894 netdev_dpdk_remap_txqs(struct netdev_dpdk *dev)
2895 OVS_REQUIRES(dev->mutex)
2896 {
2897 int *enabled_queues, n_enabled = 0;
2898 int i, k, total_txqs = dev->up.n_txq;
2899
2900 enabled_queues = xcalloc(total_txqs, sizeof *enabled_queues);
2901
2902 for (i = 0; i < total_txqs; i++) {
2903 /* Enabled queues always mapped to themselves. */
2904 if (dev->tx_q[i].map == i) {
2905 enabled_queues[n_enabled++] = i;
2906 }
2907 }
2908
2909 if (n_enabled == 0 && total_txqs != 0) {
2910 enabled_queues[0] = OVS_VHOST_QUEUE_DISABLED;
2911 n_enabled = 1;
2912 }
2913
2914 k = 0;
2915 for (i = 0; i < total_txqs; i++) {
2916 if (dev->tx_q[i].map != i) {
2917 dev->tx_q[i].map = enabled_queues[k];
2918 k = (k + 1) % n_enabled;
2919 }
2920 }
2921
2922 VLOG_DBG("TX queue mapping for %s\n", dev->vhost_id);
2923 for (i = 0; i < total_txqs; i++) {
2924 VLOG_DBG("%2d --> %2d", i, dev->tx_q[i].map);
2925 }
2926
2927 free(enabled_queues);
2928 }
2929
2930 /*
2931 * A new virtio-net device is added to a vhost port.
2932 */
2933 static int
2934 new_device(int vid)
2935 {
2936 struct netdev_dpdk *dev;
2937 bool exists = false;
2938 int newnode = 0;
2939 char ifname[IF_NAME_SZ];
2940
2941 rte_vhost_get_ifname(vid, ifname, sizeof ifname);
2942
2943 ovs_mutex_lock(&dpdk_mutex);
2944 /* Add device to the vhost port with the same name as that passed down. */
2945 LIST_FOR_EACH(dev, list_node, &dpdk_list) {
2946 ovs_mutex_lock(&dev->mutex);
2947 if (strncmp(ifname, dev->vhost_id, IF_NAME_SZ) == 0) {
2948 uint32_t qp_num = rte_vhost_get_vring_num(vid)/VIRTIO_QNUM;
2949
2950 /* Get NUMA information */
2951 newnode = rte_vhost_get_numa_node(vid);
2952 if (newnode == -1) {
2953 #ifdef VHOST_NUMA
2954 VLOG_INFO("Error getting NUMA info for vHost Device '%s'",
2955 ifname);
2956 #endif
2957 newnode = dev->socket_id;
2958 }
2959
2960 if (dev->requested_n_txq != qp_num
2961 || dev->requested_n_rxq != qp_num
2962 || dev->requested_socket_id != newnode) {
2963 dev->requested_socket_id = newnode;
2964 dev->requested_n_rxq = qp_num;
2965 dev->requested_n_txq = qp_num;
2966 netdev_request_reconfigure(&dev->up);
2967 } else {
2968 /* Reconfiguration not required. */
2969 dev->vhost_reconfigured = true;
2970 }
2971
2972 ovsrcu_index_set(&dev->vid, vid);
2973 exists = true;
2974
2975 /* Disable notifications. */
2976 set_irq_status(vid);
2977 netdev_change_seq_changed(&dev->up);
2978 ovs_mutex_unlock(&dev->mutex);
2979 break;
2980 }
2981 ovs_mutex_unlock(&dev->mutex);
2982 }
2983 ovs_mutex_unlock(&dpdk_mutex);
2984
2985 if (!exists) {
2986 VLOG_INFO("vHost Device '%s' can't be added - name not found", ifname);
2987
2988 return -1;
2989 }
2990
2991 VLOG_INFO("vHost Device '%s' has been added on numa node %i",
2992 ifname, newnode);
2993
2994 return 0;
2995 }
2996
2997 /* Clears mapping for all available queues of vhost interface. */
2998 static void
2999 netdev_dpdk_txq_map_clear(struct netdev_dpdk *dev)
3000 OVS_REQUIRES(dev->mutex)
3001 {
3002 int i;
3003
3004 for (i = 0; i < dev->up.n_txq; i++) {
3005 dev->tx_q[i].map = OVS_VHOST_QUEUE_MAP_UNKNOWN;
3006 }
3007 }
3008
3009 /*
3010 * Remove a virtio-net device from the specific vhost port. Use dev->remove
3011 * flag to stop any more packets from being sent or received to/from a VM and
3012 * ensure all currently queued packets have been sent/received before removing
3013 * the device.
3014 */
3015 static void
3016 destroy_device(int vid)
3017 {
3018 struct netdev_dpdk *dev;
3019 bool exists = false;
3020 char ifname[IF_NAME_SZ];
3021
3022 rte_vhost_get_ifname(vid, ifname, sizeof ifname);
3023
3024 ovs_mutex_lock(&dpdk_mutex);
3025 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
3026 if (netdev_dpdk_get_vid(dev) == vid) {
3027
3028 ovs_mutex_lock(&dev->mutex);
3029 dev->vhost_reconfigured = false;
3030 ovsrcu_index_set(&dev->vid, -1);
3031 netdev_dpdk_txq_map_clear(dev);
3032
3033 netdev_change_seq_changed(&dev->up);
3034 ovs_mutex_unlock(&dev->mutex);
3035 exists = true;
3036 break;
3037 }
3038 }
3039
3040 ovs_mutex_unlock(&dpdk_mutex);
3041
3042 if (exists) {
3043 /*
3044 * Wait for other threads to quiesce after setting the 'virtio_dev'
3045 * to NULL, before returning.
3046 */
3047 ovsrcu_synchronize();
3048 /*
3049 * As call to ovsrcu_synchronize() will end the quiescent state,
3050 * put thread back into quiescent state before returning.
3051 */
3052 ovsrcu_quiesce_start();
3053 VLOG_INFO("vHost Device '%s' has been removed", ifname);
3054 } else {
3055 VLOG_INFO("vHost Device '%s' not found", ifname);
3056 }
3057 }
3058
3059 static int
3060 vring_state_changed(int vid, uint16_t queue_id, int enable)
3061 {
3062 struct netdev_dpdk *dev;
3063 bool exists = false;
3064 int qid = queue_id / VIRTIO_QNUM;
3065 char ifname[IF_NAME_SZ];
3066
3067 rte_vhost_get_ifname(vid, ifname, sizeof ifname);
3068
3069 if (queue_id % VIRTIO_QNUM == VIRTIO_TXQ) {
3070 return 0;
3071 }
3072
3073 ovs_mutex_lock(&dpdk_mutex);
3074 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
3075 ovs_mutex_lock(&dev->mutex);
3076 if (strncmp(ifname, dev->vhost_id, IF_NAME_SZ) == 0) {
3077 if (enable) {
3078 dev->tx_q[qid].map = qid;
3079 } else {
3080 dev->tx_q[qid].map = OVS_VHOST_QUEUE_DISABLED;
3081 }
3082 netdev_dpdk_remap_txqs(dev);
3083 exists = true;
3084 ovs_mutex_unlock(&dev->mutex);
3085 break;
3086 }
3087 ovs_mutex_unlock(&dev->mutex);
3088 }
3089 ovs_mutex_unlock(&dpdk_mutex);
3090
3091 if (exists) {
3092 VLOG_INFO("State of queue %d ( tx_qid %d ) of vhost device '%s'"
3093 "changed to \'%s\'", queue_id, qid, ifname,
3094 (enable == 1) ? "enabled" : "disabled");
3095 } else {
3096 VLOG_INFO("vHost Device '%s' not found", ifname);
3097 return -1;
3098 }
3099
3100 return 0;
3101 }
3102
3103 int
3104 netdev_dpdk_get_vid(const struct netdev_dpdk *dev)
3105 {
3106 return ovsrcu_index_get(&dev->vid);
3107 }
3108
3109 struct ingress_policer *
3110 netdev_dpdk_get_ingress_policer(const struct netdev_dpdk *dev)
3111 {
3112 return ovsrcu_get(struct ingress_policer *, &dev->ingress_policer);
3113 }
3114
3115 static int
3116 netdev_dpdk_class_init(void)
3117 {
3118 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
3119
3120 /* This function can be called for different classes. The initialization
3121 * needs to be done only once */
3122 if (ovsthread_once_start(&once)) {
3123 ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL);
3124 unixctl_command_register("netdev-dpdk/set-admin-state",
3125 "[netdev] up|down", 1, 2,
3126 netdev_dpdk_set_admin_state, NULL);
3127
3128 unixctl_command_register("netdev-dpdk/detach",
3129 "pci address of device", 1, 1,
3130 netdev_dpdk_detach, NULL);
3131
3132 unixctl_command_register("netdev-dpdk/get-mempool-info",
3133 "[netdev]", 0, 1,
3134 netdev_dpdk_get_mempool_info, NULL);
3135
3136 ovsthread_once_done(&once);
3137 }
3138
3139 return 0;
3140 }
3141
3142
3143 /* Client Rings */
3144
3145 static int
3146 dpdk_ring_create(const char dev_name[], unsigned int port_no,
3147 dpdk_port_t *eth_port_id)
3148 {
3149 struct dpdk_ring *ring_pair;
3150 char *ring_name;
3151 int port_id;
3152
3153 ring_pair = dpdk_rte_mzalloc(sizeof *ring_pair);
3154 if (!ring_pair) {
3155 return ENOMEM;
3156 }
3157
3158 /* XXX: Add support for multiquque ring. */
3159 ring_name = xasprintf("%s_tx", dev_name);
3160
3161 /* Create single producer tx ring, netdev does explicit locking. */
3162 ring_pair->cring_tx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
3163 RING_F_SP_ENQ);
3164 free(ring_name);
3165 if (ring_pair->cring_tx == NULL) {
3166 rte_free(ring_pair);
3167 return ENOMEM;
3168 }
3169
3170 ring_name = xasprintf("%s_rx", dev_name);
3171
3172 /* Create single consumer rx ring, netdev does explicit locking. */
3173 ring_pair->cring_rx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
3174 RING_F_SC_DEQ);
3175 free(ring_name);
3176 if (ring_pair->cring_rx == NULL) {
3177 rte_free(ring_pair);
3178 return ENOMEM;
3179 }
3180
3181 port_id = rte_eth_from_rings(dev_name, &ring_pair->cring_rx, 1,
3182 &ring_pair->cring_tx, 1, SOCKET0);
3183
3184 if (port_id < 0) {
3185 rte_free(ring_pair);
3186 return ENODEV;
3187 }
3188
3189 ring_pair->user_port_id = port_no;
3190 ring_pair->eth_port_id = port_id;
3191 *eth_port_id = port_id;
3192
3193 ovs_list_push_back(&dpdk_ring_list, &ring_pair->list_node);
3194
3195 return 0;
3196 }
3197
3198 static int
3199 dpdk_ring_open(const char dev_name[], dpdk_port_t *eth_port_id)
3200 OVS_REQUIRES(dpdk_mutex)
3201 {
3202 struct dpdk_ring *ring_pair;
3203 unsigned int port_no;
3204 int err = 0;
3205
3206 /* Names always start with "dpdkr" */
3207 err = dpdk_dev_parse_name(dev_name, "dpdkr", &port_no);
3208 if (err) {
3209 return err;
3210 }
3211
3212 /* Look through our list to find the device */
3213 LIST_FOR_EACH (ring_pair, list_node, &dpdk_ring_list) {
3214 if (ring_pair->user_port_id == port_no) {
3215 VLOG_INFO("Found dpdk ring device %s:", dev_name);
3216 /* Really all that is needed */
3217 *eth_port_id = ring_pair->eth_port_id;
3218 return 0;
3219 }
3220 }
3221 /* Need to create the device rings */
3222 return dpdk_ring_create(dev_name, port_no, eth_port_id);
3223 }
3224
3225 static int
3226 netdev_dpdk_ring_send(struct netdev *netdev, int qid,
3227 struct dp_packet_batch *batch, bool concurrent_txq)
3228 {
3229 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
3230 struct dp_packet *packet;
3231
3232 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that
3233 * the rss hash field is clear. This is because the same mbuf may be
3234 * modified by the consumer of the ring and return into the datapath
3235 * without recalculating the RSS hash. */
3236 DP_PACKET_BATCH_FOR_EACH (packet, batch) {
3237 dp_packet_mbuf_rss_flag_reset(packet);
3238 }
3239
3240 netdev_dpdk_send__(dev, qid, batch, concurrent_txq);
3241 return 0;
3242 }
3243
3244 static int
3245 netdev_dpdk_ring_construct(struct netdev *netdev)
3246 {
3247 dpdk_port_t port_no = 0;
3248 int err = 0;
3249
3250 ovs_mutex_lock(&dpdk_mutex);
3251
3252 err = dpdk_ring_open(netdev->name, &port_no);
3253 if (err) {
3254 goto unlock_dpdk;
3255 }
3256
3257 err = common_construct(netdev, port_no, DPDK_DEV_ETH,
3258 rte_eth_dev_socket_id(port_no));
3259 unlock_dpdk:
3260 ovs_mutex_unlock(&dpdk_mutex);
3261 return err;
3262 }
3263
3264 /* QoS Functions */
3265
3266 /*
3267 * Initialize QoS configuration operations.
3268 */
3269 static void
3270 qos_conf_init(struct qos_conf *conf, const struct dpdk_qos_ops *ops)
3271 {
3272 conf->ops = ops;
3273 rte_spinlock_init(&conf->lock);
3274 }
3275
3276 /*
3277 * Search existing QoS operations in qos_ops and compare each set of
3278 * operations qos_name to name. Return a dpdk_qos_ops pointer to a match,
3279 * else return NULL
3280 */
3281 static const struct dpdk_qos_ops *
3282 qos_lookup_name(const char *name)
3283 {
3284 const struct dpdk_qos_ops *const *opsp;
3285
3286 for (opsp = qos_confs; *opsp != NULL; opsp++) {
3287 const struct dpdk_qos_ops *ops = *opsp;
3288 if (!strcmp(name, ops->qos_name)) {
3289 return ops;
3290 }
3291 }
3292 return NULL;
3293 }
3294
3295 static int
3296 netdev_dpdk_get_qos_types(const struct netdev *netdev OVS_UNUSED,
3297 struct sset *types)
3298 {
3299 const struct dpdk_qos_ops *const *opsp;
3300
3301 for (opsp = qos_confs; *opsp != NULL; opsp++) {
3302 const struct dpdk_qos_ops *ops = *opsp;
3303 if (ops->qos_construct && ops->qos_name[0] != '\0') {
3304 sset_add(types, ops->qos_name);
3305 }
3306 }
3307 return 0;
3308 }
3309
3310 static int
3311 netdev_dpdk_get_qos(const struct netdev *netdev,
3312 const char **typep, struct smap *details)
3313 {
3314 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
3315 struct qos_conf *qos_conf;
3316 int error = 0;
3317
3318 ovs_mutex_lock(&dev->mutex);
3319 qos_conf = ovsrcu_get_protected(struct qos_conf *, &dev->qos_conf);
3320 if (qos_conf) {
3321 *typep = qos_conf->ops->qos_name;
3322 error = (qos_conf->ops->qos_get
3323 ? qos_conf->ops->qos_get(qos_conf, details): 0);
3324 } else {
3325 /* No QoS configuration set, return an empty string */
3326 *typep = "";
3327 }
3328 ovs_mutex_unlock(&dev->mutex);
3329
3330 return error;
3331 }
3332
3333 static int
3334 netdev_dpdk_set_qos(struct netdev *netdev, const char *type,
3335 const struct smap *details)
3336 {
3337 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
3338 const struct dpdk_qos_ops *new_ops = NULL;
3339 struct qos_conf *qos_conf, *new_qos_conf = NULL;
3340 int error = 0;
3341
3342 ovs_mutex_lock(&dev->mutex);
3343
3344 qos_conf = ovsrcu_get_protected(struct qos_conf *, &dev->qos_conf);
3345
3346 new_ops = qos_lookup_name(type);
3347
3348 if (!new_ops || !new_ops->qos_construct) {
3349 new_qos_conf = NULL;
3350 if (type && type[0]) {
3351 error = EOPNOTSUPP;
3352 }
3353 } else if (qos_conf && qos_conf->ops == new_ops
3354 && qos_conf->ops->qos_is_equal(qos_conf, details)) {
3355 new_qos_conf = qos_conf;
3356 } else {
3357 error = new_ops->qos_construct(details, &new_qos_conf);
3358 }
3359
3360 if (error) {
3361 VLOG_ERR("Failed to set QoS type %s on port %s: %s",
3362 type, netdev->name, rte_strerror(error));
3363 }
3364
3365 if (new_qos_conf != qos_conf) {
3366 ovsrcu_set(&dev->qos_conf, new_qos_conf);
3367 if (qos_conf) {
3368 ovsrcu_postpone(qos_conf->ops->qos_destruct, qos_conf);
3369 }
3370 }
3371
3372 ovs_mutex_unlock(&dev->mutex);
3373
3374 return error;
3375 }
3376
3377 /* egress-policer details */
3378
3379 struct egress_policer {
3380 struct qos_conf qos_conf;
3381 struct rte_meter_srtcm_params app_srtcm_params;
3382 struct rte_meter_srtcm egress_meter;
3383 };
3384
3385 static void
3386 egress_policer_details_to_param(const struct smap *details,
3387 struct rte_meter_srtcm_params *params)
3388 {
3389 memset(params, 0, sizeof *params);
3390 params->cir = smap_get_ullong(details, "cir", 0);
3391 params->cbs = smap_get_ullong(details, "cbs", 0);
3392 params->ebs = 0;
3393 }
3394
3395 static int
3396 egress_policer_qos_construct(const struct smap *details,
3397 struct qos_conf **conf)
3398 {
3399 struct egress_policer *policer;
3400 int err = 0;
3401
3402 policer = xmalloc(sizeof *policer);
3403 qos_conf_init(&policer->qos_conf, &egress_policer_ops);
3404 egress_policer_details_to_param(details, &policer->app_srtcm_params);
3405 err = rte_meter_srtcm_config(&policer->egress_meter,
3406 &policer->app_srtcm_params);
3407 if (!err) {
3408 *conf = &policer->qos_conf;
3409 } else {
3410 free(policer);
3411 *conf = NULL;
3412 err = -err;
3413 }
3414
3415 return err;
3416 }
3417
3418 static void
3419 egress_policer_qos_destruct(struct qos_conf *conf)
3420 {
3421 struct egress_policer *policer = CONTAINER_OF(conf, struct egress_policer,
3422 qos_conf);
3423 free(policer);
3424 }
3425
3426 static int
3427 egress_policer_qos_get(const struct qos_conf *conf, struct smap *details)
3428 {
3429 struct egress_policer *policer =
3430 CONTAINER_OF(conf, struct egress_policer, qos_conf);
3431
3432 smap_add_format(details, "cir", "%"PRIu64, policer->app_srtcm_params.cir);
3433 smap_add_format(details, "cbs", "%"PRIu64, policer->app_srtcm_params.cbs);
3434
3435 return 0;
3436 }
3437
3438 static bool
3439 egress_policer_qos_is_equal(const struct qos_conf *conf,
3440 const struct smap *details)
3441 {
3442 struct egress_policer *policer =
3443 CONTAINER_OF(conf, struct egress_policer, qos_conf);
3444 struct rte_meter_srtcm_params params;
3445
3446 egress_policer_details_to_param(details, &params);
3447
3448 return !memcmp(&params, &policer->app_srtcm_params, sizeof params);
3449 }
3450
3451 static int
3452 egress_policer_run(struct qos_conf *conf, struct rte_mbuf **pkts, int pkt_cnt,
3453 bool may_steal)
3454 {
3455 int cnt = 0;
3456 struct egress_policer *policer =
3457 CONTAINER_OF(conf, struct egress_policer, qos_conf);
3458
3459 cnt = netdev_dpdk_policer_run(&policer->egress_meter, pkts,
3460 pkt_cnt, may_steal);
3461
3462 return cnt;
3463 }
3464
3465 static const struct dpdk_qos_ops egress_policer_ops = {
3466 "egress-policer", /* qos_name */
3467 egress_policer_qos_construct,
3468 egress_policer_qos_destruct,
3469 egress_policer_qos_get,
3470 egress_policer_qos_is_equal,
3471 egress_policer_run
3472 };
3473
3474 static int
3475 netdev_dpdk_reconfigure(struct netdev *netdev)
3476 {
3477 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
3478 int err = 0;
3479
3480 ovs_mutex_lock(&dev->mutex);
3481
3482 if (netdev->n_txq == dev->requested_n_txq
3483 && netdev->n_rxq == dev->requested_n_rxq
3484 && dev->mtu == dev->requested_mtu
3485 && dev->rxq_size == dev->requested_rxq_size
3486 && dev->txq_size == dev->requested_txq_size
3487 && dev->socket_id == dev->requested_socket_id) {
3488 /* Reconfiguration is unnecessary */
3489
3490 goto out;
3491 }
3492
3493 rte_eth_dev_stop(dev->port_id);
3494
3495 err = netdev_dpdk_mempool_configure(dev);
3496 if (err && err != EEXIST) {
3497 goto out;
3498 }
3499
3500 netdev->n_txq = dev->requested_n_txq;
3501 netdev->n_rxq = dev->requested_n_rxq;
3502
3503 dev->rxq_size = dev->requested_rxq_size;
3504 dev->txq_size = dev->requested_txq_size;
3505
3506 rte_free(dev->tx_q);
3507 err = dpdk_eth_dev_init(dev);
3508 dev->tx_q = netdev_dpdk_alloc_txq(netdev->n_txq);
3509 if (!dev->tx_q) {
3510 err = ENOMEM;
3511 }
3512
3513 netdev_change_seq_changed(netdev);
3514
3515 out:
3516 ovs_mutex_unlock(&dev->mutex);
3517 return err;
3518 }
3519
3520 static int
3521 dpdk_vhost_reconfigure_helper(struct netdev_dpdk *dev)
3522 OVS_REQUIRES(dev->mutex)
3523 {
3524 dev->up.n_txq = dev->requested_n_txq;
3525 dev->up.n_rxq = dev->requested_n_rxq;
3526 int err;
3527
3528 /* Enable TX queue 0 by default if it wasn't disabled. */
3529 if (dev->tx_q[0].map == OVS_VHOST_QUEUE_MAP_UNKNOWN) {
3530 dev->tx_q[0].map = 0;
3531 }
3532
3533 netdev_dpdk_remap_txqs(dev);
3534
3535 err = netdev_dpdk_mempool_configure(dev);
3536 if (!err) {
3537 /* A new mempool was created. */
3538 netdev_change_seq_changed(&dev->up);
3539 } else if (err != EEXIST){
3540 return err;
3541 }
3542 if (netdev_dpdk_get_vid(dev) >= 0) {
3543 if (dev->vhost_reconfigured == false) {
3544 dev->vhost_reconfigured = true;
3545 /* Carrier status may need updating. */
3546 netdev_change_seq_changed(&dev->up);
3547 }
3548 }
3549
3550 return 0;
3551 }
3552
3553 static int
3554 netdev_dpdk_vhost_reconfigure(struct netdev *netdev)
3555 {
3556 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
3557 int err;
3558
3559 ovs_mutex_lock(&dev->mutex);
3560 err = dpdk_vhost_reconfigure_helper(dev);
3561 ovs_mutex_unlock(&dev->mutex);
3562
3563 return err;
3564 }
3565
3566 static int
3567 netdev_dpdk_vhost_client_reconfigure(struct netdev *netdev)
3568 {
3569 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
3570 int err;
3571 uint64_t vhost_flags = 0;
3572
3573 ovs_mutex_lock(&dev->mutex);
3574
3575 /* Configure vHost client mode if requested and if the following criteria
3576 * are met:
3577 * 1. Device hasn't been registered yet.
3578 * 2. A path has been specified.
3579 */
3580 if (!(dev->vhost_driver_flags & RTE_VHOST_USER_CLIENT)
3581 && strlen(dev->vhost_id)) {
3582 /* Register client-mode device. */
3583 vhost_flags |= RTE_VHOST_USER_CLIENT;
3584
3585 /* Enable IOMMU support, if explicitly requested. */
3586 if (dpdk_vhost_iommu_enabled()) {
3587 vhost_flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
3588 }
3589 err = rte_vhost_driver_register(dev->vhost_id, vhost_flags);
3590 if (err) {
3591 VLOG_ERR("vhost-user device setup failure for device %s\n",
3592 dev->vhost_id);
3593 goto unlock;
3594 } else {
3595 /* Configuration successful */
3596 dev->vhost_driver_flags |= vhost_flags;
3597 VLOG_INFO("vHost User device '%s' created in 'client' mode, "
3598 "using client socket '%s'",
3599 dev->up.name, dev->vhost_id);
3600 }
3601
3602 err = rte_vhost_driver_callback_register(dev->vhost_id,
3603 &virtio_net_device_ops);
3604 if (err) {
3605 VLOG_ERR("rte_vhost_driver_callback_register failed for "
3606 "vhost user client port: %s\n", dev->up.name);
3607 goto unlock;
3608 }
3609
3610 err = rte_vhost_driver_disable_features(dev->vhost_id,
3611 1ULL << VIRTIO_NET_F_HOST_TSO4
3612 | 1ULL << VIRTIO_NET_F_HOST_TSO6
3613 | 1ULL << VIRTIO_NET_F_CSUM);
3614 if (err) {
3615 VLOG_ERR("rte_vhost_driver_disable_features failed for vhost user "
3616 "client port: %s\n", dev->up.name);
3617 goto unlock;
3618 }
3619
3620 err = rte_vhost_driver_start(dev->vhost_id);
3621 if (err) {
3622 VLOG_ERR("rte_vhost_driver_start failed for vhost user "
3623 "client port: %s\n", dev->up.name);
3624 goto unlock;
3625 }
3626 }
3627
3628 err = dpdk_vhost_reconfigure_helper(dev);
3629
3630 unlock:
3631 ovs_mutex_unlock(&dev->mutex);
3632
3633 return err;
3634 }
3635
3636 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, \
3637 SET_CONFIG, SET_TX_MULTIQ, SEND, \
3638 GET_CARRIER, GET_STATS, \
3639 GET_CUSTOM_STATS, \
3640 GET_FEATURES, GET_STATUS, \
3641 RECONFIGURE, RXQ_RECV) \
3642 { \
3643 NAME, \
3644 true, /* is_pmd */ \
3645 INIT, /* init */ \
3646 NULL, /* netdev_dpdk_run */ \
3647 NULL, /* netdev_dpdk_wait */ \
3648 \
3649 netdev_dpdk_alloc, \
3650 CONSTRUCT, \
3651 DESTRUCT, \
3652 netdev_dpdk_dealloc, \
3653 netdev_dpdk_get_config, \
3654 SET_CONFIG, \
3655 NULL, /* get_tunnel_config */ \
3656 NULL, /* build header */ \
3657 NULL, /* push header */ \
3658 NULL, /* pop header */ \
3659 netdev_dpdk_get_numa_id, /* get_numa_id */ \
3660 SET_TX_MULTIQ, \
3661 \
3662 SEND, /* send */ \
3663 NULL, /* send_wait */ \
3664 \
3665 netdev_dpdk_set_etheraddr, \
3666 netdev_dpdk_get_etheraddr, \
3667 netdev_dpdk_get_mtu, \
3668 netdev_dpdk_set_mtu, \
3669 netdev_dpdk_get_ifindex, \
3670 GET_CARRIER, \
3671 netdev_dpdk_get_carrier_resets, \
3672 netdev_dpdk_set_miimon, \
3673 GET_STATS, \
3674 GET_CUSTOM_STATS, \
3675 GET_FEATURES, \
3676 NULL, /* set_advertisements */ \
3677 NULL, /* get_pt_mode */ \
3678 \
3679 netdev_dpdk_set_policing, \
3680 netdev_dpdk_get_qos_types, \
3681 NULL, /* get_qos_capabilities */ \
3682 netdev_dpdk_get_qos, \
3683 netdev_dpdk_set_qos, \
3684 NULL, /* get_queue */ \
3685 NULL, /* set_queue */ \
3686 NULL, /* delete_queue */ \
3687 NULL, /* get_queue_stats */ \
3688 NULL, /* queue_dump_start */ \
3689 NULL, /* queue_dump_next */ \
3690 NULL, /* queue_dump_done */ \
3691 NULL, /* dump_queue_stats */ \
3692 \
3693 NULL, /* set_in4 */ \
3694 NULL, /* get_addr_list */ \
3695 NULL, /* add_router */ \
3696 NULL, /* get_next_hop */ \
3697 GET_STATUS, \
3698 NULL, /* arp_lookup */ \
3699 \
3700 netdev_dpdk_update_flags, \
3701 RECONFIGURE, \
3702 \
3703 netdev_dpdk_rxq_alloc, \
3704 netdev_dpdk_rxq_construct, \
3705 netdev_dpdk_rxq_destruct, \
3706 netdev_dpdk_rxq_dealloc, \
3707 RXQ_RECV, \
3708 NULL, /* rx_wait */ \
3709 NULL, /* rxq_drain */ \
3710 NO_OFFLOAD_API \
3711 }
3712
3713 static const struct netdev_class dpdk_class =
3714 NETDEV_DPDK_CLASS(
3715 "dpdk",
3716 netdev_dpdk_class_init,
3717 netdev_dpdk_construct,
3718 netdev_dpdk_destruct,
3719 netdev_dpdk_set_config,
3720 netdev_dpdk_set_tx_multiq,
3721 netdev_dpdk_eth_send,
3722 netdev_dpdk_get_carrier,
3723 netdev_dpdk_get_stats,
3724 netdev_dpdk_get_custom_stats,
3725 netdev_dpdk_get_features,
3726 netdev_dpdk_get_status,
3727 netdev_dpdk_reconfigure,
3728 netdev_dpdk_rxq_recv);
3729
3730 static const struct netdev_class dpdk_ring_class =
3731 NETDEV_DPDK_CLASS(
3732 "dpdkr",
3733 netdev_dpdk_class_init,
3734 netdev_dpdk_ring_construct,
3735 netdev_dpdk_destruct,
3736 netdev_dpdk_ring_set_config,
3737 netdev_dpdk_set_tx_multiq,
3738 netdev_dpdk_ring_send,
3739 netdev_dpdk_get_carrier,
3740 netdev_dpdk_get_stats,
3741 netdev_dpdk_get_custom_stats,
3742 netdev_dpdk_get_features,
3743 netdev_dpdk_get_status,
3744 netdev_dpdk_reconfigure,
3745 netdev_dpdk_rxq_recv);
3746
3747 static const struct netdev_class dpdk_vhost_class =
3748 NETDEV_DPDK_CLASS(
3749 "dpdkvhostuser",
3750 NULL,
3751 netdev_dpdk_vhost_construct,
3752 netdev_dpdk_vhost_destruct,
3753 NULL,
3754 NULL,
3755 netdev_dpdk_vhost_send,
3756 netdev_dpdk_vhost_get_carrier,
3757 netdev_dpdk_vhost_get_stats,
3758 NULL,
3759 NULL,
3760 netdev_dpdk_vhost_user_get_status,
3761 netdev_dpdk_vhost_reconfigure,
3762 netdev_dpdk_vhost_rxq_recv);
3763 static const struct netdev_class dpdk_vhost_client_class =
3764 NETDEV_DPDK_CLASS(
3765 "dpdkvhostuserclient",
3766 NULL,
3767 netdev_dpdk_vhost_client_construct,
3768 netdev_dpdk_vhost_destruct,
3769 netdev_dpdk_vhost_client_set_config,
3770 NULL,
3771 netdev_dpdk_vhost_send,
3772 netdev_dpdk_vhost_get_carrier,
3773 netdev_dpdk_vhost_get_stats,
3774 NULL,
3775 NULL,
3776 netdev_dpdk_vhost_user_get_status,
3777 netdev_dpdk_vhost_client_reconfigure,
3778 netdev_dpdk_vhost_rxq_recv);
3779
3780 void
3781 netdev_dpdk_register(void)
3782 {
3783 netdev_register_provider(&dpdk_class);
3784 netdev_register_provider(&dpdk_ring_class);
3785 netdev_register_provider(&dpdk_vhost_class);
3786 netdev_register_provider(&dpdk_vhost_client_class);
3787 }