]> git.proxmox.com Git - mirror_ovs.git/blame - lib/netdev-dpdk.c
netdev-dpdk: use 64-bit arithmetic when converting rates.
[mirror_ovs.git] / lib / netdev-dpdk.c
CommitLineData
8a9562d2 1/*
12d0d124 2 * Copyright (c) 2014, 2015, 2016, 2017 Nicira, Inc.
8a9562d2
PS
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <config.h>
01961bbd 18#include "netdev-dpdk.h"
8a9562d2 19
8a9562d2
PS
20#include <string.h>
21#include <signal.h>
22#include <stdlib.h>
8a9562d2 23#include <errno.h>
8a9562d2 24#include <unistd.h>
f3e7ec25
MW
25#include <linux/virtio_net.h>
26#include <sys/socket.h>
27#include <linux/if.h>
01961bbd
DDP
28
29#include <rte_config.h>
30#include <rte_cycles.h>
31#include <rte_errno.h>
32#include <rte_eth_ring.h>
33#include <rte_ethdev.h>
34#include <rte_malloc.h>
35#include <rte_mbuf.h>
36#include <rte_meter.h>
fc56f5e0 37#include <rte_pci.h>
f3e7ec25 38#include <rte_vhost.h>
8a9562d2 39
7d1ced01 40#include "dirs.h"
e14deea0 41#include "dp-packet.h"
01961bbd 42#include "dpdk.h"
8a9562d2 43#include "dpif-netdev.h"
e5c0f5a4 44#include "fatal-signal.h"
8a9562d2
PS
45#include "netdev-provider.h"
46#include "netdev-vport.h"
47#include "odp-util.h"
eac84432 48#include "openvswitch/dynamic-string.h"
25d436fb
BW
49#include "openvswitch/list.h"
50#include "openvswitch/ofp-print.h"
51#include "openvswitch/vlog.h"
94143fc4 52#include "ovs-numa.h"
8a9562d2
PS
53#include "ovs-thread.h"
54#include "ovs-rcu.h"
55#include "packets.h"
ee89ea7b 56#include "openvswitch/shash.h"
0bf765f7 57#include "smap.h"
8a9562d2
PS
58#include "sset.h"
59#include "unaligned.h"
60#include "timeval.h"
61#include "unixctl.h"
8a9562d2 62
f3e7ec25
MW
63enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
64
05b49df6 65VLOG_DEFINE_THIS_MODULE(netdev_dpdk);
8a9562d2
PS
66static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
67
68#define DPDK_PORT_WATCHDOG_INTERVAL 5
69
70#define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
71#define OVS_VPORT_DPDK "ovs_dpdk"
72
73/*
74 * need to reserve tons of extra space in the mbufs so we can align the
75 * DMA addresses to 4KB.
18f777b2
TP
76 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
77 * performance for standard Ethernet MTU.
8a9562d2 78 */
58be5c0e
MK
79#define ETHER_HDR_MAX_LEN (ETHER_HDR_LEN + ETHER_CRC_LEN \
80 + (2 * VLAN_HEADER_LEN))
4be4d22c
MK
81#define MTU_TO_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
82#define MTU_TO_MAX_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_MAX_LEN)
58be5c0e
MK
83#define FRAME_LEN_TO_MTU(frame_len) ((frame_len) \
84 - ETHER_HDR_LEN - ETHER_CRC_LEN)
31b88c97
SS
85#define MBUF_SIZE(mtu) ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu) \
86 + sizeof(struct dp_packet) \
87 + RTE_PKTMBUF_HEADROOM), \
88 RTE_CACHE_LINE_SIZE)
4be4d22c 89#define NETDEV_DPDK_MBUF_ALIGN 1024
0072e931 90#define NETDEV_DPDK_MAX_PKT_LEN 9728
8a9562d2 91
da79ce2b
DDP
92/* Max and min number of packets in the mempool. OVS tries to allocate a
93 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
94 * enough hugepages) we keep halving the number until the allocation succeeds
95 * or we reach MIN_NB_MBUF */
96
97#define MAX_NB_MBUF (4096 * 64)
98#define MIN_NB_MBUF (4096 * 4)
99#define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
100
101/* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
102BUILD_ASSERT_DECL(MAX_NB_MBUF % ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF) == 0);
103
104/* The smallest possible NB_MBUF that we're going to try should be a multiple
105 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
106BUILD_ASSERT_DECL((MAX_NB_MBUF / ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF))
107 % MP_CACHE_SZ == 0);
108
d6e3feb5 109/*
110 * DPDK XSTATS Counter names definition
111 */
112#define XSTAT_RX_64_PACKETS "rx_size_64_packets"
113#define XSTAT_RX_65_TO_127_PACKETS "rx_size_65_to_127_packets"
114#define XSTAT_RX_128_TO_255_PACKETS "rx_size_128_to_255_packets"
115#define XSTAT_RX_256_TO_511_PACKETS "rx_size_256_to_511_packets"
116#define XSTAT_RX_512_TO_1023_PACKETS "rx_size_512_to_1023_packets"
117#define XSTAT_RX_1024_TO_1522_PACKETS "rx_size_1024_to_1522_packets"
118#define XSTAT_RX_1523_TO_MAX_PACKETS "rx_size_1523_to_max_packets"
119
120#define XSTAT_TX_64_PACKETS "tx_size_64_packets"
121#define XSTAT_TX_65_TO_127_PACKETS "tx_size_65_to_127_packets"
122#define XSTAT_TX_128_TO_255_PACKETS "tx_size_128_to_255_packets"
123#define XSTAT_TX_256_TO_511_PACKETS "tx_size_256_to_511_packets"
124#define XSTAT_TX_512_TO_1023_PACKETS "tx_size_512_to_1023_packets"
125#define XSTAT_TX_1024_TO_1522_PACKETS "tx_size_1024_to_1522_packets"
126#define XSTAT_TX_1523_TO_MAX_PACKETS "tx_size_1523_to_max_packets"
127
d57f777f 128#define XSTAT_RX_MULTICAST_PACKETS "rx_multicast_packets"
d6e3feb5 129#define XSTAT_TX_MULTICAST_PACKETS "tx_multicast_packets"
130#define XSTAT_RX_BROADCAST_PACKETS "rx_broadcast_packets"
131#define XSTAT_TX_BROADCAST_PACKETS "tx_broadcast_packets"
132#define XSTAT_RX_UNDERSIZED_ERRORS "rx_undersized_errors"
133#define XSTAT_RX_OVERSIZE_ERRORS "rx_oversize_errors"
134#define XSTAT_RX_FRAGMENTED_ERRORS "rx_fragmented_errors"
135#define XSTAT_RX_JABBER_ERRORS "rx_jabber_errors"
136
8a9562d2
PS
137#define SOCKET0 0
138
b685696b
CL
139/* Default size of Physical NIC RXQ */
140#define NIC_PORT_DEFAULT_RXQ_SIZE 2048
141/* Default size of Physical NIC TXQ */
142#define NIC_PORT_DEFAULT_TXQ_SIZE 2048
143/* Maximum size of Physical NIC Queues */
144#define NIC_PORT_MAX_Q_SIZE 4096
79f5354c 145
585a5bea 146#define OVS_VHOST_MAX_QUEUE_NUM 1024 /* Maximum number of vHost TX queues. */
f3ea2ad2
IM
147#define OVS_VHOST_QUEUE_MAP_UNKNOWN (-1) /* Mapping not initialized. */
148#define OVS_VHOST_QUEUE_DISABLED (-2) /* Queue was disabled by guest and not
149 * yet mapped to another queue. */
585a5bea 150
bb37956a
IM
151#define DPDK_ETH_PORT_ID_INVALID RTE_MAX_ETHPORTS
152
153/* DPDK library uses uint8_t for port_id. */
154typedef uint8_t dpdk_port_t;
155
31871ee3 156#define VHOST_ENQ_RETRY_NUM 8
0a0f39df 157#define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
95e9881f 158
8a9562d2 159static const struct rte_eth_conf port_conf = {
a28ddd11
DDP
160 .rxmode = {
161 .mq_mode = ETH_MQ_RX_RSS,
162 .split_hdr_size = 0,
163 .header_split = 0, /* Header Split disabled */
164 .hw_ip_checksum = 0, /* IP checksum offload disabled */
165 .hw_vlan_filter = 0, /* VLAN filtering disabled */
166 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
167 .hw_strip_crc = 0,
168 },
169 .rx_adv_conf = {
170 .rss_conf = {
171 .rss_key = NULL,
543342a4 172 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP,
8a9562d2 173 },
a28ddd11
DDP
174 },
175 .txmode = {
176 .mq_mode = ETH_MQ_TX_NONE,
177 },
8a9562d2
PS
178};
179
f3e7ec25
MW
180/*
181 * These callbacks allow virtio-net devices to be added to vhost ports when
182 * configuration has been fully completed.
183 */
184static int new_device(int vid);
185static void destroy_device(int vid);
186static int vring_state_changed(int vid, uint16_t queue_id, int enable);
187static const struct vhost_device_ops virtio_net_device_ops =
188{
189 .new_device = new_device,
190 .destroy_device = destroy_device,
191 .vring_state_changed = vring_state_changed,
192 .features_changed = NULL
193};
194
58f7c37b
DDP
195enum { DPDK_RING_SIZE = 256 };
196BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
8a9562d2
PS
197enum { DRAIN_TSC = 200000ULL };
198
58397e6c
KT
199enum dpdk_dev_type {
200 DPDK_DEV_ETH = 0,
7d1ced01 201 DPDK_DEV_VHOST = 1,
58397e6c
KT
202};
203
0bf765f7
IS
204/* Quality of Service */
205
206/* An instance of a QoS configuration. Always associated with a particular
207 * network device.
208 *
209 * Each QoS implementation subclasses this with whatever additional data it
210 * needs.
211 */
212struct qos_conf {
213 const struct dpdk_qos_ops *ops;
78bd47cf 214 rte_spinlock_t lock;
0bf765f7
IS
215};
216
217/* A particular implementation of dpdk QoS operations.
218 *
219 * The functions below return 0 if successful or a positive errno value on
220 * failure, except where otherwise noted. All of them must be provided, except
221 * where otherwise noted.
222 */
223struct dpdk_qos_ops {
224
225 /* Name of the QoS type */
226 const char *qos_name;
227
78bd47cf
DDP
228 /* Called to construct a qos_conf object. The implementation should make
229 * the appropriate calls to configure QoS according to 'details'.
0bf765f7
IS
230 *
231 * The contents of 'details' should be documented as valid for 'ovs_name'
232 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
233 * (which is built as ovs-vswitchd.conf.db(8)).
234 *
78bd47cf
DDP
235 * This function must return 0 if and only if it sets '*conf' to an
236 * initialized 'struct qos_conf'.
0bf765f7
IS
237 *
238 * For all QoS implementations it should always be non-null.
239 */
78bd47cf 240 int (*qos_construct)(const struct smap *details, struct qos_conf **conf);
0bf765f7
IS
241
242 /* Destroys the data structures allocated by the implementation as part of
78bd47cf 243 * 'qos_conf'.
0bf765f7
IS
244 *
245 * For all QoS implementations it should always be non-null.
246 */
78bd47cf 247 void (*qos_destruct)(struct qos_conf *conf);
0bf765f7 248
78bd47cf 249 /* Retrieves details of 'conf' configuration into 'details'.
0bf765f7
IS
250 *
251 * The contents of 'details' should be documented as valid for 'ovs_name'
252 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
253 * (which is built as ovs-vswitchd.conf.db(8)).
254 */
78bd47cf 255 int (*qos_get)(const struct qos_conf *conf, struct smap *details);
0bf765f7 256
78bd47cf 257 /* Returns true if 'conf' is already configured according to 'details'.
0bf765f7
IS
258 *
259 * The contents of 'details' should be documented as valid for 'ovs_name'
260 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
261 * (which is built as ovs-vswitchd.conf.db(8)).
262 *
78bd47cf 263 * For all QoS implementations it should always be non-null.
0bf765f7 264 */
78bd47cf
DDP
265 bool (*qos_is_equal)(const struct qos_conf *conf,
266 const struct smap *details);
0bf765f7
IS
267
268 /* Modify an array of rte_mbufs. The modification is specific to
269 * each qos implementation.
270 *
271 * The function should take and array of mbufs and an int representing
272 * the current number of mbufs present in the array.
273 *
274 * After the function has performed a qos modification to the array of
275 * mbufs it returns an int representing the number of mbufs now present in
276 * the array. This value is can then be passed to the port send function
277 * along with the modified array for transmission.
278 *
279 * For all QoS implementations it should always be non-null.
280 */
78bd47cf
DDP
281 int (*qos_run)(struct qos_conf *qos_conf, struct rte_mbuf **pkts,
282 int pkt_cnt);
0bf765f7
IS
283};
284
285/* dpdk_qos_ops for each type of user space QoS implementation */
286static const struct dpdk_qos_ops egress_policer_ops;
287
288/*
289 * Array of dpdk_qos_ops, contains pointer to all supported QoS
290 * operations.
291 */
292static const struct dpdk_qos_ops *const qos_confs[] = {
293 &egress_policer_ops,
294 NULL
295};
296
c2adb102
IM
297static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
298
8a9562d2 299/* Contains all 'struct dpdk_dev's. */
ca6ba700 300static struct ovs_list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
55951e15 301 = OVS_LIST_INITIALIZER(&dpdk_list);
8a9562d2 302
c2adb102
IM
303static struct ovs_mutex dpdk_mp_mutex OVS_ACQ_AFTER(dpdk_mutex)
304 = OVS_MUTEX_INITIALIZER;
305
306static struct ovs_list dpdk_mp_list OVS_GUARDED_BY(dpdk_mp_mutex)
55951e15 307 = OVS_LIST_INITIALIZER(&dpdk_mp_list);
8a9562d2 308
8a9562d2
PS
309struct dpdk_mp {
310 struct rte_mempool *mp;
311 int mtu;
312 int socket_id;
313 int refcount;
c2adb102 314 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mp_mutex);
8a9562d2
PS
315};
316
5a034064
AW
317/* There should be one 'struct dpdk_tx_queue' created for
318 * each cpu core. */
8a9562d2 319struct dpdk_tx_queue {
a0cb2d66
DDP
320 rte_spinlock_t tx_lock; /* Protects the members and the NIC queue
321 * from concurrent access. It is used only
322 * if the queue is shared among different
324c8374 323 * pmd threads (see 'concurrent_txq'). */
585a5bea
IM
324 int map; /* Mapping of configured vhost-user queues
325 * to enabled by guest. */
8a9562d2
PS
326};
327
95fb793a 328/* dpdk has no way to remove dpdk ring ethernet devices
329 so we have to keep them around once they've been created
330*/
331
ca6ba700 332static struct ovs_list dpdk_ring_list OVS_GUARDED_BY(dpdk_mutex)
55951e15 333 = OVS_LIST_INITIALIZER(&dpdk_ring_list);
95fb793a 334
335struct dpdk_ring {
336 /* For the client rings */
337 struct rte_ring *cring_tx;
338 struct rte_ring *cring_rx;
b83a2df1 339 unsigned int user_port_id; /* User given port no, parsed from port name */
bb37956a 340 dpdk_port_t eth_port_id; /* ethernet device port id */
ca6ba700 341 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
95fb793a 342};
343
9509913a
IS
344struct ingress_policer {
345 struct rte_meter_srtcm_params app_srtcm_params;
346 struct rte_meter_srtcm in_policer;
347 rte_spinlock_t policer_lock;
348};
349
1a2bb118
SC
350enum dpdk_hw_ol_features {
351 NETDEV_RX_CHECKSUM_OFFLOAD = 1 << 0,
352};
353
8a9562d2
PS
354struct netdev_dpdk {
355 struct netdev up;
bb37956a 356 dpdk_port_t port_id;
8a9562d2 357 int max_packet_len;
58397e6c 358 enum dpdk_dev_type type;
8a9562d2 359
5a034064 360 struct dpdk_tx_queue *tx_q;
8a9562d2
PS
361
362 struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
363
364 struct dpdk_mp *dpdk_mp;
365 int mtu;
366 int socket_id;
367 int buf_size;
8a9562d2 368 struct netdev_stats stats;
45d947c4
DDP
369 /* Protects stats */
370 rte_spinlock_t stats_lock;
8a9562d2 371
74ff3298 372 struct eth_addr hwaddr;
8a9562d2
PS
373 enum netdev_flags flags;
374
375 struct rte_eth_link link;
376 int link_reset_cnt;
377
0a0f39df
CL
378 /* virtio identifier for vhost devices */
379 ovsrcu_index vid;
380
381 /* True if vHost device is 'up' and has been reconfigured at least once */
382 bool vhost_reconfigured;
58397e6c 383
2d24d165
CL
384 /* Identifier used to distinguish vhost devices from each other. */
385 char vhost_id[PATH_MAX];
7d1ced01 386
55e075e6
CL
387 /* Device arguments for dpdk ports */
388 char *devargs;
389
5dcde09c
IM
390 /* If true, device was attached by rte_eth_dev_attach(). */
391 bool attached;
392
8a9562d2 393 /* In dpdk_list. */
ca6ba700 394 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
0bf765f7
IS
395
396 /* QoS configuration and lock for the device */
78bd47cf 397 OVSRCU_TYPE(struct qos_conf *) qos_conf;
0bf765f7 398
050c60bf
DDP
399 /* The following properties cannot be changed when a device is running,
400 * so we remember the request and update them next time
401 * netdev_dpdk*_reconfigure() is called */
0072e931 402 int requested_mtu;
050c60bf
DDP
403 int requested_n_txq;
404 int requested_n_rxq;
b685696b
CL
405 int requested_rxq_size;
406 int requested_txq_size;
407
408 /* Number of rx/tx descriptors for physical devices */
409 int rxq_size;
410 int txq_size;
9509913a 411
db8f13b0
CL
412 /* Socket ID detected when vHost device is brought up */
413 int requested_socket_id;
414
c1ff66ac
CL
415 /* Denotes whether vHost port is client/server mode */
416 uint64_t vhost_driver_flags;
417
9509913a
IS
418 /* Ingress Policer */
419 OVSRCU_TYPE(struct ingress_policer *) ingress_policer;
420 uint32_t policer_rate;
421 uint32_t policer_burst;
9fd39370
SC
422
423 /* DPDK-ETH Flow control */
424 struct rte_eth_fc_conf fc_conf;
1a2bb118
SC
425
426 /* DPDK-ETH hardware offload features,
427 * from the enum set 'dpdk_hw_ol_features' */
428 uint32_t hw_ol_features;
8a9562d2
PS
429};
430
431struct netdev_rxq_dpdk {
432 struct netdev_rxq up;
bb37956a 433 dpdk_port_t port_id;
8a9562d2
PS
434};
435
f3e7ec25
MW
436static void netdev_dpdk_destruct(struct netdev *netdev);
437static void netdev_dpdk_vhost_destruct(struct netdev *netdev);
8a9562d2 438
0a0f39df 439int netdev_dpdk_get_vid(const struct netdev_dpdk *dev);
58397e6c 440
9509913a
IS
441struct ingress_policer *
442netdev_dpdk_get_ingress_policer(const struct netdev_dpdk *dev);
443
8a9562d2
PS
444static bool
445is_dpdk_class(const struct netdev_class *class)
446{
f3e7ec25
MW
447 return class->destruct == netdev_dpdk_destruct
448 || class->destruct == netdev_dpdk_vhost_destruct;
8a9562d2
PS
449}
450
4be4d22c
MK
451/* DPDK NIC drivers allocate RX buffers at a particular granularity, typically
452 * aligned at 1k or less. If a declared mbuf size is not a multiple of this
453 * value, insufficient buffers are allocated to accomodate the packet in its
454 * entirety. Furthermore, certain drivers need to ensure that there is also
455 * sufficient space in the Rx buffer to accommodate two VLAN tags (for QinQ
456 * frames). If the RX buffer is too small, then the driver enables scatter RX
58be5c0e
MK
457 * behaviour, which reduces performance. To prevent this, use a buffer size
458 * that is closest to 'mtu', but which satisfies the aforementioned criteria.
4be4d22c
MK
459 */
460static uint32_t
461dpdk_buf_size(int mtu)
462{
463 return ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu) + RTE_PKTMBUF_HEADROOM),
464 NETDEV_DPDK_MBUF_ALIGN);
465}
466
eff23640
DDP
467/* Allocates an area of 'sz' bytes from DPDK. The memory is zero'ed.
468 *
469 * Unlike xmalloc(), this function can return NULL on failure. */
8a9562d2
PS
470static void *
471dpdk_rte_mzalloc(size_t sz)
472{
eff23640 473 return rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
8a9562d2
PS
474}
475
476void
e14deea0 477free_dpdk_buf(struct dp_packet *p)
8a9562d2 478{
db73f716 479 struct rte_mbuf *pkt = (struct rte_mbuf *) p;
8a9562d2 480
b00b4a81 481 rte_pktmbuf_free(pkt);
8a9562d2
PS
482}
483
b3cd9f9d 484static void
401b70d6 485ovs_rte_pktmbuf_init(struct rte_mempool *mp OVS_UNUSED,
b3cd9f9d 486 void *opaque_arg OVS_UNUSED,
2391135c 487 void *_p,
b3cd9f9d
PS
488 unsigned i OVS_UNUSED)
489{
2391135c 490 struct rte_mbuf *pkt = _p;
b3cd9f9d 491
2391135c 492 dp_packet_init_dpdk((struct dp_packet *) pkt, pkt->buf_len);
b3cd9f9d
PS
493}
494
8a9562d2 495static struct dpdk_mp *
2ae3d542 496dpdk_mp_create(int socket_id, int mtu)
8a9562d2 497{
2ae3d542 498 struct dpdk_mp *dmp;
da79ce2b 499 unsigned mp_size;
0c6f39e5 500 char *mp_name;
8a9562d2
PS
501
502 dmp = dpdk_rte_mzalloc(sizeof *dmp);
eff23640
DDP
503 if (!dmp) {
504 return NULL;
505 }
8a9562d2
PS
506 dmp->socket_id = socket_id;
507 dmp->mtu = mtu;
508 dmp->refcount = 1;
0072e931 509 /* XXX: this is a really rough method of provisioning memory.
2ae3d542 510 * It's impossible to determine what the exact memory requirements are
58be5c0e
MK
511 * when the number of ports and rxqs that utilize a particular mempool can
512 * change dynamically at runtime. For now, use this rough heurisitic.
0072e931
MK
513 */
514 if (mtu >= ETHER_MTU) {
515 mp_size = MAX_NB_MBUF;
516 } else {
517 mp_size = MIN_NB_MBUF;
518 }
8a9562d2 519
da79ce2b 520 do {
0c6f39e5
DDP
521 mp_name = xasprintf("ovs_mp_%d_%d_%u", dmp->mtu, dmp->socket_id,
522 mp_size);
95fb793a 523
401b70d6
HA
524 dmp->mp = rte_pktmbuf_pool_create(mp_name, mp_size,
525 MP_CACHE_SZ,
526 sizeof (struct dp_packet)
527 - sizeof (struct rte_mbuf),
528 MBUF_SIZE(mtu)
529 - sizeof(struct dp_packet),
530 socket_id);
0c6f39e5
DDP
531 if (dmp->mp) {
532 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs",
533 mp_name, mp_size);
534 }
535 free(mp_name);
536 if (dmp->mp) {
401b70d6
HA
537 /* rte_pktmbuf_pool_create has done some initialization of the
538 * rte_mbuf part of each dp_packet, while ovs_rte_pktmbuf_init
539 * initializes some OVS specific fields of dp_packet.
540 */
541 rte_mempool_obj_iter(dmp->mp, ovs_rte_pktmbuf_init, NULL);
0c6f39e5
DDP
542 return dmp;
543 }
544 } while (rte_errno == ENOMEM && (mp_size /= 2) >= MIN_NB_MBUF);
2ae3d542 545
2ae3d542
DDP
546 rte_free(dmp);
547 return NULL;
548}
549
550static struct dpdk_mp *
551dpdk_mp_get(int socket_id, int mtu)
552{
553 struct dpdk_mp *dmp;
554
555 ovs_mutex_lock(&dpdk_mp_mutex);
556 LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
557 if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
558 dmp->refcount++;
559 goto out;
560 }
561 }
562
563 dmp = dpdk_mp_create(socket_id, mtu);
96e9b168
IS
564 if (dmp) {
565 ovs_list_push_back(&dpdk_mp_list, &dmp->list_node);
566 }
5f88de0d 567
c2adb102
IM
568out:
569 ovs_mutex_unlock(&dpdk_mp_mutex);
570
c2adb102 571 return dmp;
8a9562d2
PS
572}
573
574static void
c2adb102 575dpdk_mp_put(struct dpdk_mp *dmp)
8a9562d2 576{
8a9562d2
PS
577 if (!dmp) {
578 return;
579 }
580
c2adb102 581 ovs_mutex_lock(&dpdk_mp_mutex);
8d38823b 582 ovs_assert(dmp->refcount);
8a9562d2 583
8d38823b
MK
584 if (!--dmp->refcount) {
585 ovs_list_remove(&dmp->list_node);
586 rte_mempool_free(dmp->mp);
5f88de0d 587 rte_free(dmp);
8a9562d2 588 }
c2adb102 589 ovs_mutex_unlock(&dpdk_mp_mutex);
8a9562d2
PS
590}
591
0072e931
MK
592/* Tries to allocate new mempool on requested_socket_id with
593 * mbuf size corresponding to requested_mtu.
594 * On success new configuration will be applied.
595 * On error, device will be left unchanged. */
596static int
597netdev_dpdk_mempool_configure(struct netdev_dpdk *dev)
0072e931
MK
598 OVS_REQUIRES(dev->mutex)
599{
600 uint32_t buf_size = dpdk_buf_size(dev->requested_mtu);
601 struct dpdk_mp *mp;
602
603 mp = dpdk_mp_get(dev->requested_socket_id, FRAME_LEN_TO_MTU(buf_size));
604 if (!mp) {
c67e46c0
MK
605 VLOG_ERR("Failed to create memory pool for netdev "
606 "%s, with MTU %d on socket %d: %s\n",
607 dev->up.name, dev->requested_mtu, dev->requested_socket_id,
608 rte_strerror(rte_errno));
609 return rte_errno;
0072e931
MK
610 } else {
611 dpdk_mp_put(dev->dpdk_mp);
612 dev->dpdk_mp = mp;
613 dev->mtu = dev->requested_mtu;
614 dev->socket_id = dev->requested_socket_id;
615 dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
616 }
617
618 return 0;
619}
620
8a9562d2
PS
621static void
622check_link_status(struct netdev_dpdk *dev)
623{
624 struct rte_eth_link link;
625
626 rte_eth_link_get_nowait(dev->port_id, &link);
627
628 if (dev->link.link_status != link.link_status) {
3e912ffc 629 netdev_change_seq_changed(&dev->up);
8a9562d2
PS
630
631 dev->link_reset_cnt++;
632 dev->link = link;
633 if (dev->link.link_status) {
bb37956a 634 VLOG_DBG_RL(&rl, "Port %"PRIu8" Link Up - speed %u Mbps - %s",
58be5c0e 635 dev->port_id, (unsigned) dev->link.link_speed,
8a9562d2
PS
636 (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
637 ("full-duplex") : ("half-duplex"));
638 } else {
bb37956a 639 VLOG_DBG_RL(&rl, "Port %"PRIu8" Link Down", dev->port_id);
8a9562d2
PS
640 }
641 }
642}
643
644static void *
645dpdk_watchdog(void *dummy OVS_UNUSED)
646{
647 struct netdev_dpdk *dev;
648
649 pthread_detach(pthread_self());
650
651 for (;;) {
652 ovs_mutex_lock(&dpdk_mutex);
653 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
654 ovs_mutex_lock(&dev->mutex);
1f5b157e
IM
655 if (dev->type == DPDK_DEV_ETH) {
656 check_link_status(dev);
657 }
8a9562d2
PS
658 ovs_mutex_unlock(&dev->mutex);
659 }
660 ovs_mutex_unlock(&dpdk_mutex);
661 xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
662 }
663
664 return NULL;
665}
666
b98d7669
DDP
667static int
668dpdk_eth_dev_queue_setup(struct netdev_dpdk *dev, int n_rxq, int n_txq)
669{
670 int diag = 0;
671 int i;
0072e931 672 struct rte_eth_conf conf = port_conf;
b98d7669 673
67fe6d63
MK
674 /* For some NICs (e.g. Niantic), scatter_rx mode needs to be explicitly
675 * enabled. */
0072e931 676 if (dev->mtu > ETHER_MTU) {
67fe6d63 677 conf.rxmode.enable_scatter = 1;
0072e931 678 }
67fe6d63 679
1a2bb118
SC
680 conf.rxmode.hw_ip_checksum = (dev->hw_ol_features &
681 NETDEV_RX_CHECKSUM_OFFLOAD) != 0;
b98d7669
DDP
682 /* A device may report more queues than it makes available (this has
683 * been observed for Intel xl710, which reserves some of them for
684 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
685 * available. When this happens we can retry the configuration
686 * and request less queues */
687 while (n_rxq && n_txq) {
688 if (diag) {
689 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq, n_txq);
690 }
691
0072e931 692 diag = rte_eth_dev_configure(dev->port_id, n_rxq, n_txq, &conf);
b98d7669 693 if (diag) {
0072e931
MK
694 VLOG_WARN("Interface %s eth_dev setup error %s\n",
695 dev->up.name, rte_strerror(-diag));
b98d7669
DDP
696 break;
697 }
698
67fe6d63
MK
699 diag = rte_eth_dev_set_mtu(dev->port_id, dev->mtu);
700 if (diag) {
701 VLOG_ERR("Interface %s MTU (%d) setup error: %s",
702 dev->up.name, dev->mtu, rte_strerror(-diag));
703 break;
704 }
705
b98d7669 706 for (i = 0; i < n_txq; i++) {
b685696b 707 diag = rte_eth_tx_queue_setup(dev->port_id, i, dev->txq_size,
b98d7669
DDP
708 dev->socket_id, NULL);
709 if (diag) {
710 VLOG_INFO("Interface %s txq(%d) setup error: %s",
711 dev->up.name, i, rte_strerror(-diag));
712 break;
713 }
714 }
715
716 if (i != n_txq) {
717 /* Retry with less tx queues */
718 n_txq = i;
719 continue;
720 }
721
722 for (i = 0; i < n_rxq; i++) {
b685696b 723 diag = rte_eth_rx_queue_setup(dev->port_id, i, dev->rxq_size,
b98d7669
DDP
724 dev->socket_id, NULL,
725 dev->dpdk_mp->mp);
726 if (diag) {
727 VLOG_INFO("Interface %s rxq(%d) setup error: %s",
728 dev->up.name, i, rte_strerror(-diag));
729 break;
730 }
731 }
732
733 if (i != n_rxq) {
734 /* Retry with less rx queues */
735 n_rxq = i;
736 continue;
737 }
738
739 dev->up.n_rxq = n_rxq;
81acebda 740 dev->up.n_txq = n_txq;
b98d7669
DDP
741
742 return 0;
743 }
744
745 return diag;
746}
747
9fd39370
SC
748static void
749dpdk_eth_flow_ctrl_setup(struct netdev_dpdk *dev) OVS_REQUIRES(dev->mutex)
750{
751 if (rte_eth_dev_flow_ctrl_set(dev->port_id, &dev->fc_conf)) {
bb37956a
IM
752 VLOG_WARN("Failed to enable flow control on device %"PRIu8,
753 dev->port_id);
9fd39370
SC
754 }
755}
b98d7669 756
8a9562d2 757static int
c2adb102
IM
758dpdk_eth_dev_init(struct netdev_dpdk *dev)
759 OVS_REQUIRES(dev->mutex)
8a9562d2
PS
760{
761 struct rte_pktmbuf_pool_private *mbp_priv;
a0cb2d66 762 struct rte_eth_dev_info info;
8a9562d2
PS
763 struct ether_addr eth_addr;
764 int diag;
b98d7669 765 int n_rxq, n_txq;
d4f5282c
KT
766 uint32_t rx_chksm_offload_capa = DEV_RX_OFFLOAD_UDP_CKSUM |
767 DEV_RX_OFFLOAD_TCP_CKSUM |
768 DEV_RX_OFFLOAD_IPV4_CKSUM;
8a9562d2 769
a0cb2d66 770 rte_eth_dev_info_get(dev->port_id, &info);
a0cb2d66 771
d4f5282c
KT
772 if ((info.rx_offload_capa & rx_chksm_offload_capa) !=
773 rx_chksm_offload_capa) {
2cfe866f 774 VLOG_WARN("Rx checksum offload is not supported on port %"PRIu8,
d4f5282c
KT
775 dev->port_id);
776 dev->hw_ol_features &= ~NETDEV_RX_CHECKSUM_OFFLOAD;
777 } else {
778 dev->hw_ol_features |= NETDEV_RX_CHECKSUM_OFFLOAD;
779 }
780
b98d7669
DDP
781 n_rxq = MIN(info.max_rx_queues, dev->up.n_rxq);
782 n_txq = MIN(info.max_tx_queues, dev->up.n_txq);
783
784 diag = dpdk_eth_dev_queue_setup(dev, n_rxq, n_txq);
8a9562d2 785 if (diag) {
b98d7669
DDP
786 VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
787 dev->up.name, n_rxq, n_txq, rte_strerror(-diag));
95fb793a 788 return -diag;
8a9562d2
PS
789 }
790
8a9562d2
PS
791 diag = rte_eth_dev_start(dev->port_id);
792 if (diag) {
b98d7669
DDP
793 VLOG_ERR("Interface %s start error: %s", dev->up.name,
794 rte_strerror(-diag));
95fb793a 795 return -diag;
8a9562d2
PS
796 }
797
798 rte_eth_promiscuous_enable(dev->port_id);
799 rte_eth_allmulticast_enable(dev->port_id);
800
801 memset(&eth_addr, 0x0, sizeof(eth_addr));
802 rte_eth_macaddr_get(dev->port_id, &eth_addr);
bb37956a 803 VLOG_INFO_RL(&rl, "Port %"PRIu8": "ETH_ADDR_FMT,
ca92d173 804 dev->port_id, ETH_ADDR_BYTES_ARGS(eth_addr.addr_bytes));
8a9562d2 805
ca92d173 806 memcpy(dev->hwaddr.ea, eth_addr.addr_bytes, ETH_ADDR_LEN);
8a9562d2
PS
807 rte_eth_link_get_nowait(dev->port_id, &dev->link);
808
809 mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp);
810 dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
811
9fd39370
SC
812 /* Get the Flow control configuration for DPDK-ETH */
813 diag = rte_eth_dev_flow_ctrl_get(dev->port_id, &dev->fc_conf);
814 if (diag) {
bb37956a 815 VLOG_DBG("cannot get flow control parameters on port=%"PRIu8", err=%d",
9fd39370
SC
816 dev->port_id, diag);
817 }
818
8a9562d2
PS
819 return 0;
820}
821
822static struct netdev_dpdk *
823netdev_dpdk_cast(const struct netdev *netdev)
824{
825 return CONTAINER_OF(netdev, struct netdev_dpdk, up);
826}
827
828static struct netdev *
829netdev_dpdk_alloc(void)
830{
bab69409
AC
831 struct netdev_dpdk *dev;
832
65e19e70
DDP
833 dev = dpdk_rte_mzalloc(sizeof *dev);
834 if (dev) {
835 return &dev->up;
bab69409 836 }
65e19e70 837
bab69409 838 return NULL;
8a9562d2
PS
839}
840
eff23640
DDP
841static struct dpdk_tx_queue *
842netdev_dpdk_alloc_txq(unsigned int n_txqs)
5a034064 843{
eff23640 844 struct dpdk_tx_queue *txqs;
bd5131ba 845 unsigned i;
5a034064 846
eff23640
DDP
847 txqs = dpdk_rte_mzalloc(n_txqs * sizeof *txqs);
848 if (txqs) {
849 for (i = 0; i < n_txqs; i++) {
850 /* Initialize map for vhost devices. */
851 txqs[i].map = OVS_VHOST_QUEUE_MAP_UNKNOWN;
852 rte_spinlock_init(&txqs[i].tx_lock);
853 }
5a034064 854 }
eff23640
DDP
855
856 return txqs;
5a034064
AW
857}
858
8a9562d2 859static int
bb37956a 860common_construct(struct netdev *netdev, dpdk_port_t port_no,
1ce30dfd 861 enum dpdk_dev_type type, int socket_id)
5a034064 862 OVS_REQUIRES(dpdk_mutex)
8a9562d2 863{
d46285a2 864 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
8a9562d2 865
d46285a2 866 ovs_mutex_init(&dev->mutex);
8a9562d2 867
d46285a2 868 rte_spinlock_init(&dev->stats_lock);
45d947c4 869
1b7a04e0
AW
870 /* If the 'sid' is negative, it means that the kernel fails
871 * to obtain the pci numa info. In that situation, always
872 * use 'SOCKET0'. */
1ce30dfd 873 dev->socket_id = socket_id < 0 ? SOCKET0 : socket_id;
db8f13b0 874 dev->requested_socket_id = dev->socket_id;
d46285a2
DDP
875 dev->port_id = port_no;
876 dev->type = type;
877 dev->flags = 0;
7f381c2e 878 dev->requested_mtu = ETHER_MTU;
d46285a2 879 dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
0a0f39df
CL
880 ovsrcu_index_init(&dev->vid, -1);
881 dev->vhost_reconfigured = false;
5dcde09c 882 dev->attached = false;
8a9562d2 883
78bd47cf 884 ovsrcu_init(&dev->qos_conf, NULL);
0bf765f7 885
9509913a
IS
886 ovsrcu_init(&dev->ingress_policer, NULL);
887 dev->policer_rate = 0;
888 dev->policer_burst = 0;
889
7f381c2e
DDP
890 netdev->n_rxq = 0;
891 netdev->n_txq = 0;
892 dev->requested_n_rxq = NR_QUEUE;
893 dev->requested_n_txq = NR_QUEUE;
894 dev->requested_rxq_size = NIC_PORT_DEFAULT_RXQ_SIZE;
895 dev->requested_txq_size = NIC_PORT_DEFAULT_TXQ_SIZE;
58397e6c 896
9fd39370
SC
897 /* Initialize the flow control to NULL */
898 memset(&dev->fc_conf, 0, sizeof dev->fc_conf);
1a2bb118
SC
899
900 /* Initilize the hardware offload flags to 0 */
901 dev->hw_ol_features = 0;
3b1fb077
DDP
902
903 dev->flags = NETDEV_UP | NETDEV_PROMISC;
904
d46285a2 905 ovs_list_push_back(&dpdk_list, &dev->list_node);
8a9562d2 906
7f381c2e
DDP
907 netdev_request_reconfigure(netdev);
908
1ce30dfd 909 return 0;
95fb793a 910}
911
b83a2df1
MV
912/* dev_name must be the prefix followed by a positive decimal number.
913 * (no leading + or - signs are allowed) */
95fb793a 914static int
915dpdk_dev_parse_name(const char dev_name[], const char prefix[],
916 unsigned int *port_no)
917{
918 const char *cport;
919
920 if (strncmp(dev_name, prefix, strlen(prefix))) {
921 return ENODEV;
922 }
923
924 cport = dev_name + strlen(prefix);
b83a2df1
MV
925
926 if (str_to_uint(cport, 10, port_no)) {
927 return 0;
928 } else {
929 return ENODEV;
930 }
95fb793a 931}
932
1ce30dfd
DDP
933static int
934vhost_common_construct(struct netdev *netdev)
935 OVS_REQUIRES(dpdk_mutex)
936{
937 int socket_id = rte_lcore_to_socket_id(rte_get_master_lcore());
938 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
939
940 dev->tx_q = netdev_dpdk_alloc_txq(OVS_VHOST_MAX_QUEUE_NUM);
941 if (!dev->tx_q) {
942 return ENOMEM;
943 }
944
bb37956a
IM
945 return common_construct(netdev, DPDK_ETH_PORT_ID_INVALID,
946 DPDK_DEV_VHOST, socket_id);
1ce30dfd
DDP
947}
948
7d1ced01 949static int
53f50d24 950netdev_dpdk_vhost_construct(struct netdev *netdev)
7d1ced01 951{
d46285a2
DDP
952 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
953 const char *name = netdev->name;
7d1ced01 954 int err;
a0cb2d66 955
1af27e8a
DDP
956 /* 'name' is appended to 'vhost_sock_dir' and used to create a socket in
957 * the file system. '/' or '\' would traverse directories, so they're not
958 * acceptable in 'name'. */
959 if (strchr(name, '/') || strchr(name, '\\')) {
960 VLOG_ERR("\"%s\" is not a valid name for a vhost-user port. "
961 "A valid name must not include '/' or '\\'",
962 name);
963 return EINVAL;
964 }
965
7d1ced01
CL
966 ovs_mutex_lock(&dpdk_mutex);
967 /* Take the name of the vhost-user port and append it to the location where
2d24d165 968 * the socket is to be created, then register the socket.
7d1ced01 969 */
2d24d165 970 snprintf(dev->vhost_id, sizeof dev->vhost_id, "%s/%s",
01961bbd 971 dpdk_get_vhost_sock_dir(), name);
1af27e8a 972
2d24d165
CL
973 dev->vhost_driver_flags &= ~RTE_VHOST_USER_CLIENT;
974 err = rte_vhost_driver_register(dev->vhost_id, dev->vhost_driver_flags);
7d1ced01
CL
975 if (err) {
976 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
2d24d165 977 dev->vhost_id);
f3e7ec25 978 goto out;
e5c0f5a4 979 } else {
2d24d165
CL
980 fatal_signal_add_file_to_unlink(dev->vhost_id);
981 VLOG_INFO("Socket %s created for vhost-user port %s\n",
982 dev->vhost_id, name);
983 }
f3e7ec25
MW
984
985 err = rte_vhost_driver_callback_register(dev->vhost_id,
986 &virtio_net_device_ops);
987 if (err) {
988 VLOG_ERR("rte_vhost_driver_callback_register failed for vhost user "
989 "port: %s\n", name);
990 goto out;
991 }
992
993 err = rte_vhost_driver_disable_features(dev->vhost_id,
994 1ULL << VIRTIO_NET_F_HOST_TSO4
995 | 1ULL << VIRTIO_NET_F_HOST_TSO6
996 | 1ULL << VIRTIO_NET_F_CSUM);
997 if (err) {
998 VLOG_ERR("rte_vhost_driver_disable_features failed for vhost user "
999 "port: %s\n", name);
1000 goto out;
1001 }
1002
1003 err = rte_vhost_driver_start(dev->vhost_id);
1004 if (err) {
1005 VLOG_ERR("rte_vhost_driver_start failed for vhost user "
1006 "port: %s\n", name);
1007 goto out;
1008 }
1009
1ce30dfd 1010 err = vhost_common_construct(netdev);
f3e7ec25
MW
1011 if (err) {
1012 VLOG_ERR("vhost_common_construct failed for vhost user "
1013 "port: %s\n", name);
1014 }
2d24d165 1015
f3e7ec25 1016out:
2d24d165 1017 ovs_mutex_unlock(&dpdk_mutex);
28ca969e
AC
1018 VLOG_WARN_ONCE("dpdkvhostuser ports are considered deprecated; "
1019 "please migrate to dpdkvhostuserclient ports.");
2d24d165
CL
1020 return err;
1021}
1022
1023static int
1024netdev_dpdk_vhost_client_construct(struct netdev *netdev)
1025{
1026 int err;
1027
2d24d165 1028 ovs_mutex_lock(&dpdk_mutex);
1ce30dfd 1029 err = vhost_common_construct(netdev);
f3e7ec25
MW
1030 if (err) {
1031 VLOG_ERR("vhost_common_construct failed for vhost user client"
1032 "port: %s\n", netdev->name);
1033 }
7d1ced01 1034 ovs_mutex_unlock(&dpdk_mutex);
58397e6c
KT
1035 return err;
1036}
1037
95fb793a 1038static int
1039netdev_dpdk_construct(struct netdev *netdev)
1040{
95fb793a 1041 int err;
1042
95fb793a 1043 ovs_mutex_lock(&dpdk_mutex);
bb37956a
IM
1044 err = common_construct(netdev, DPDK_ETH_PORT_ID_INVALID,
1045 DPDK_DEV_ETH, SOCKET0);
8a9562d2
PS
1046 ovs_mutex_unlock(&dpdk_mutex);
1047 return err;
1048}
1049
1ce30dfd
DDP
1050static void
1051common_destruct(struct netdev_dpdk *dev)
1052 OVS_REQUIRES(dpdk_mutex)
1053 OVS_EXCLUDED(dev->mutex)
1054{
1055 rte_free(dev->tx_q);
1056 dpdk_mp_put(dev->dpdk_mp);
1057
1058 ovs_list_remove(&dev->list_node);
1059 free(ovsrcu_get_protected(struct ingress_policer *,
1060 &dev->ingress_policer));
1061 ovs_mutex_destroy(&dev->mutex);
1062}
1063
8a9562d2 1064static void
d46285a2 1065netdev_dpdk_destruct(struct netdev *netdev)
8a9562d2 1066{
d46285a2 1067 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
5dcde09c 1068 char devname[RTE_ETH_NAME_MAX_LEN];
8a9562d2 1069
8d38823b 1070 ovs_mutex_lock(&dpdk_mutex);
8d38823b 1071
8a9562d2 1072 rte_eth_dev_stop(dev->port_id);
5dcde09c
IM
1073
1074 if (dev->attached) {
1075 rte_eth_dev_close(dev->port_id);
1076 if (rte_eth_dev_detach(dev->port_id, devname) < 0) {
1077 VLOG_ERR("Device '%s' can not be detached", dev->devargs);
1078 } else {
0ee821c2 1079 VLOG_INFO("Device '%s' has been detached", devname);
5dcde09c
IM
1080 }
1081 }
1082
55e075e6 1083 free(dev->devargs);
1ce30dfd 1084 common_destruct(dev);
8d38823b 1085
8a9562d2 1086 ovs_mutex_unlock(&dpdk_mutex);
58397e6c 1087}
8a9562d2 1088
3f891bbe
DDP
1089/* rte_vhost_driver_unregister() can call back destroy_device(), which will
1090 * try to acquire 'dpdk_mutex' and possibly 'dev->mutex'. To avoid a
1091 * deadlock, none of the mutexes must be held while calling this function. */
1092static int
c1ff66ac
CL
1093dpdk_vhost_driver_unregister(struct netdev_dpdk *dev OVS_UNUSED,
1094 char *vhost_id)
3f891bbe
DDP
1095 OVS_EXCLUDED(dpdk_mutex)
1096 OVS_EXCLUDED(dev->mutex)
1097{
c1ff66ac 1098 return rte_vhost_driver_unregister(vhost_id);
3f891bbe
DDP
1099}
1100
58397e6c 1101static void
d46285a2 1102netdev_dpdk_vhost_destruct(struct netdev *netdev)
58397e6c 1103{
d46285a2 1104 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
c1ff66ac 1105 char *vhost_id;
58397e6c 1106
8d38823b 1107 ovs_mutex_lock(&dpdk_mutex);
8d38823b 1108
c62da695 1109 /* Guest becomes an orphan if still attached. */
c1ff66ac
CL
1110 if (netdev_dpdk_get_vid(dev) >= 0
1111 && !(dev->vhost_driver_flags & RTE_VHOST_USER_CLIENT)) {
c62da695 1112 VLOG_ERR("Removing port '%s' while vhost device still attached.",
d46285a2 1113 netdev->name);
58be5c0e
MK
1114 VLOG_ERR("To restore connectivity after re-adding of port, VM on "
1115 "socket '%s' must be restarted.", dev->vhost_id);
58397e6c
KT
1116 }
1117
2d24d165 1118 vhost_id = xstrdup(dev->vhost_id);
c1ff66ac 1119
1ce30dfd
DDP
1120 common_destruct(dev);
1121
58397e6c 1122 ovs_mutex_unlock(&dpdk_mutex);
3f891bbe 1123
569c26da 1124 if (!vhost_id[0]) {
821b8664
IM
1125 goto out;
1126 }
1127
c1ff66ac 1128 if (dpdk_vhost_driver_unregister(dev, vhost_id)) {
41964543
IM
1129 VLOG_ERR("%s: Unable to unregister vhost driver for socket '%s'.\n",
1130 netdev->name, vhost_id);
c1ff66ac
CL
1131 } else if (!(dev->vhost_driver_flags & RTE_VHOST_USER_CLIENT)) {
1132 /* OVS server mode - remove this socket from list for deletion */
1133 fatal_signal_remove_file_to_unlink(vhost_id);
3f891bbe 1134 }
821b8664 1135out:
c1ff66ac 1136 free(vhost_id);
8a9562d2
PS
1137}
1138
1139static void
d46285a2 1140netdev_dpdk_dealloc(struct netdev *netdev)
8a9562d2 1141{
d46285a2 1142 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
8a9562d2 1143
d46285a2 1144 rte_free(dev);
8a9562d2
PS
1145}
1146
1147static int
a14b8947 1148netdev_dpdk_get_config(const struct netdev *netdev, struct smap *args)
8a9562d2 1149{
a14b8947 1150 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
8a9562d2
PS
1151
1152 ovs_mutex_lock(&dev->mutex);
1153
050c60bf 1154 smap_add_format(args, "requested_rx_queues", "%d", dev->requested_n_rxq);
a14b8947 1155 smap_add_format(args, "configured_rx_queues", "%d", netdev->n_rxq);
81acebda
IM
1156 smap_add_format(args, "requested_tx_queues", "%d", dev->requested_n_txq);
1157 smap_add_format(args, "configured_tx_queues", "%d", netdev->n_txq);
0072e931 1158 smap_add_format(args, "mtu", "%d", dev->mtu);
451f26fd
IM
1159
1160 if (dev->type == DPDK_DEV_ETH) {
1161 smap_add_format(args, "requested_rxq_descriptors", "%d",
1162 dev->requested_rxq_size);
1163 smap_add_format(args, "configured_rxq_descriptors", "%d",
1164 dev->rxq_size);
1165 smap_add_format(args, "requested_txq_descriptors", "%d",
1166 dev->requested_txq_size);
1167 smap_add_format(args, "configured_txq_descriptors", "%d",
1168 dev->txq_size);
1a2bb118
SC
1169 if (dev->hw_ol_features & NETDEV_RX_CHECKSUM_OFFLOAD) {
1170 smap_add(args, "rx_csum_offload", "true");
8155ab7e
KT
1171 } else {
1172 smap_add(args, "rx_csum_offload", "false");
1a2bb118 1173 }
451f26fd 1174 }
8a9562d2
PS
1175 ovs_mutex_unlock(&dev->mutex);
1176
1177 return 0;
1178}
1179
55e075e6 1180static struct netdev_dpdk *
bb37956a 1181netdev_dpdk_lookup_by_port_id(dpdk_port_t port_id)
55e075e6
CL
1182 OVS_REQUIRES(dpdk_mutex)
1183{
1184 struct netdev_dpdk *dev;
1185
1186 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
1187 if (dev->port_id == port_id) {
1188 return dev;
1189 }
1190 }
1191
1192 return NULL;
1193}
1194
bb37956a 1195static dpdk_port_t
5dcde09c
IM
1196netdev_dpdk_process_devargs(struct netdev_dpdk *dev,
1197 const char *devargs, char **errp)
55e075e6 1198{
03d6399e
IM
1199 /* Get the name up to the first comma. */
1200 char *name = xmemdup0(devargs, strcspn(devargs, ","));
bb37956a 1201 dpdk_port_t new_port_id = DPDK_ETH_PORT_ID_INVALID;
55e075e6 1202
69876ed7 1203 if (!rte_eth_dev_count()
03d6399e 1204 || rte_eth_dev_get_port_by_name(name, &new_port_id)
69876ed7
CL
1205 || !rte_eth_dev_is_valid_port(new_port_id)) {
1206 /* Device not found in DPDK, attempt to attach it */
1207 if (!rte_eth_dev_attach(devargs, &new_port_id)) {
1208 /* Attach successful */
5dcde09c 1209 dev->attached = true;
69876ed7
CL
1210 VLOG_INFO("Device '%s' attached to DPDK", devargs);
1211 } else {
1212 /* Attach unsuccessful */
bb37956a 1213 new_port_id = DPDK_ETH_PORT_ID_INVALID;
47a45d86
KT
1214 VLOG_WARN_BUF(errp, "Error attaching device '%s' to DPDK",
1215 devargs);
55e075e6
CL
1216 }
1217 }
1218
03d6399e 1219 free(name);
55e075e6
CL
1220 return new_port_id;
1221}
1222
c3d062a7
CL
1223static void
1224dpdk_set_rxq_config(struct netdev_dpdk *dev, const struct smap *args)
b614c894 1225 OVS_REQUIRES(dev->mutex)
a14b8947 1226{
050c60bf 1227 int new_n_rxq;
a14b8947 1228
2a21e757 1229 new_n_rxq = MAX(smap_get_int(args, "n_rxq", NR_QUEUE), 1);
050c60bf
DDP
1230 if (new_n_rxq != dev->requested_n_rxq) {
1231 dev->requested_n_rxq = new_n_rxq;
c3d062a7 1232 netdev_request_reconfigure(&dev->up);
050c60bf 1233 }
c3d062a7
CL
1234}
1235
b685696b
CL
1236static void
1237dpdk_process_queue_size(struct netdev *netdev, const struct smap *args,
1238 const char *flag, int default_size, int *new_size)
1239{
1240 int queue_size = smap_get_int(args, flag, default_size);
1241
1242 if (queue_size <= 0 || queue_size > NIC_PORT_MAX_Q_SIZE
1243 || !is_pow2(queue_size)) {
1244 queue_size = default_size;
1245 }
1246
1247 if (queue_size != *new_size) {
1248 *new_size = queue_size;
1249 netdev_request_reconfigure(netdev);
1250 }
1251}
1252
c3d062a7 1253static int
9fff138e
DDP
1254netdev_dpdk_set_config(struct netdev *netdev, const struct smap *args,
1255 char **errp)
c3d062a7
CL
1256{
1257 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
b614c894
IM
1258 bool rx_fc_en, tx_fc_en, autoneg;
1259 enum rte_eth_fc_mode fc_mode;
1260 static const enum rte_eth_fc_mode fc_mode_set[2][2] = {
1261 {RTE_FC_NONE, RTE_FC_TX_PAUSE},
1262 {RTE_FC_RX_PAUSE, RTE_FC_FULL }
1263 };
55e075e6
CL
1264 const char *new_devargs;
1265 int err = 0;
c3d062a7 1266
55e075e6 1267 ovs_mutex_lock(&dpdk_mutex);
c3d062a7
CL
1268 ovs_mutex_lock(&dev->mutex);
1269
1270 dpdk_set_rxq_config(dev, args);
1271
b685696b
CL
1272 dpdk_process_queue_size(netdev, args, "n_rxq_desc",
1273 NIC_PORT_DEFAULT_RXQ_SIZE,
1274 &dev->requested_rxq_size);
1275 dpdk_process_queue_size(netdev, args, "n_txq_desc",
1276 NIC_PORT_DEFAULT_TXQ_SIZE,
1277 &dev->requested_txq_size);
1278
55e075e6
CL
1279 new_devargs = smap_get(args, "dpdk-devargs");
1280
1281 if (dev->devargs && strcmp(new_devargs, dev->devargs)) {
1282 /* The user requested a new device. If we return error, the caller
1283 * will delete this netdev and try to recreate it. */
1284 err = EAGAIN;
1285 goto out;
1286 }
1287
1288 /* dpdk-devargs is required for device configuration */
1289 if (new_devargs && new_devargs[0]) {
1290 /* Don't process dpdk-devargs if value is unchanged and port id
1291 * is valid */
1292 if (!(dev->devargs && !strcmp(dev->devargs, new_devargs)
1293 && rte_eth_dev_is_valid_port(dev->port_id))) {
bb37956a
IM
1294 dpdk_port_t new_port_id = netdev_dpdk_process_devargs(dev,
1295 new_devargs,
1296 errp);
55e075e6
CL
1297 if (!rte_eth_dev_is_valid_port(new_port_id)) {
1298 err = EINVAL;
1299 } else if (new_port_id == dev->port_id) {
1300 /* Already configured, do not reconfigure again */
1301 err = 0;
1302 } else {
1303 struct netdev_dpdk *dup_dev;
bb37956a 1304
55e075e6
CL
1305 dup_dev = netdev_dpdk_lookup_by_port_id(new_port_id);
1306 if (dup_dev) {
9fff138e
DDP
1307 VLOG_WARN_BUF(errp, "'%s' is trying to use device '%s' "
1308 "which is already in use by '%s'",
1309 netdev_get_name(netdev), new_devargs,
1310 netdev_get_name(&dup_dev->up));
55e075e6
CL
1311 err = EADDRINUSE;
1312 } else {
bd4e172b 1313 int sid = rte_eth_dev_socket_id(new_port_id);
bb37956a 1314
bd4e172b 1315 dev->requested_socket_id = sid < 0 ? SOCKET0 : sid;
55e075e6
CL
1316 dev->devargs = xstrdup(new_devargs);
1317 dev->port_id = new_port_id;
1318 netdev_request_reconfigure(&dev->up);
1319 err = 0;
1320 }
1321 }
1322 }
1323 } else {
9fff138e
DDP
1324 VLOG_WARN_BUF(errp, "'%s' is missing 'options:dpdk-devargs'. "
1325 "The old 'dpdk<port_id>' names are not supported",
1326 netdev_get_name(netdev));
55e075e6
CL
1327 err = EINVAL;
1328 }
1329
1330 if (err) {
1331 goto out;
1332 }
1333
c3d062a7
CL
1334 rx_fc_en = smap_get_bool(args, "rx-flow-ctrl", false);
1335 tx_fc_en = smap_get_bool(args, "tx-flow-ctrl", false);
b614c894 1336 autoneg = smap_get_bool(args, "flow-ctrl-autoneg", false);
c3d062a7 1337
b614c894
IM
1338 fc_mode = fc_mode_set[tx_fc_en][rx_fc_en];
1339 if (dev->fc_conf.mode != fc_mode || autoneg != dev->fc_conf.autoneg) {
1340 dev->fc_conf.mode = fc_mode;
1341 dev->fc_conf.autoneg = autoneg;
1342 dpdk_eth_flow_ctrl_setup(dev);
1343 }
9fd39370 1344
55e075e6 1345out:
c3d062a7 1346 ovs_mutex_unlock(&dev->mutex);
55e075e6 1347 ovs_mutex_unlock(&dpdk_mutex);
c3d062a7 1348
55e075e6 1349 return err;
c3d062a7
CL
1350}
1351
1352static int
9fff138e
DDP
1353netdev_dpdk_ring_set_config(struct netdev *netdev, const struct smap *args,
1354 char **errp OVS_UNUSED)
c3d062a7
CL
1355{
1356 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1357
1358 ovs_mutex_lock(&dev->mutex);
1359 dpdk_set_rxq_config(dev, args);
a14b8947
IM
1360 ovs_mutex_unlock(&dev->mutex);
1361
1362 return 0;
1363}
1364
c1ff66ac 1365static int
2d24d165 1366netdev_dpdk_vhost_client_set_config(struct netdev *netdev,
9fff138e
DDP
1367 const struct smap *args,
1368 char **errp OVS_UNUSED)
c1ff66ac
CL
1369{
1370 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1371 const char *path;
1372
6881885a 1373 ovs_mutex_lock(&dev->mutex);
c1ff66ac
CL
1374 if (!(dev->vhost_driver_flags & RTE_VHOST_USER_CLIENT)) {
1375 path = smap_get(args, "vhost-server-path");
2d24d165
CL
1376 if (path && strcmp(path, dev->vhost_id)) {
1377 strcpy(dev->vhost_id, path);
c1ff66ac
CL
1378 netdev_request_reconfigure(netdev);
1379 }
1380 }
6881885a 1381 ovs_mutex_unlock(&dev->mutex);
c1ff66ac
CL
1382
1383 return 0;
1384}
1385
7dec44fe 1386static int
d46285a2 1387netdev_dpdk_get_numa_id(const struct netdev *netdev)
7dec44fe 1388{
d46285a2 1389 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
7dec44fe 1390
d46285a2 1391 return dev->socket_id;
7dec44fe
AW
1392}
1393
050c60bf 1394/* Sets the number of tx queues for the dpdk interface. */
5496878c 1395static int
050c60bf 1396netdev_dpdk_set_tx_multiq(struct netdev *netdev, unsigned int n_txq)
5496878c 1397{
d46285a2 1398 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
5496878c 1399
d46285a2 1400 ovs_mutex_lock(&dev->mutex);
91968eb0 1401
050c60bf
DDP
1402 if (dev->requested_n_txq == n_txq) {
1403 goto out;
4573fbd3
FL
1404 }
1405
050c60bf
DDP
1406 dev->requested_n_txq = n_txq;
1407 netdev_request_reconfigure(netdev);
58397e6c 1408
050c60bf 1409out:
d46285a2 1410 ovs_mutex_unlock(&dev->mutex);
050c60bf 1411 return 0;
58397e6c
KT
1412}
1413
8a9562d2
PS
1414static struct netdev_rxq *
1415netdev_dpdk_rxq_alloc(void)
1416{
1417 struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx);
1418
eff23640
DDP
1419 if (rx) {
1420 return &rx->up;
1421 }
1422
1423 return NULL;
8a9562d2
PS
1424}
1425
1426static struct netdev_rxq_dpdk *
d46285a2 1427netdev_rxq_dpdk_cast(const struct netdev_rxq *rxq)
8a9562d2 1428{
d46285a2 1429 return CONTAINER_OF(rxq, struct netdev_rxq_dpdk, up);
8a9562d2
PS
1430}
1431
1432static int
d46285a2 1433netdev_dpdk_rxq_construct(struct netdev_rxq *rxq)
8a9562d2 1434{
d46285a2
DDP
1435 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq);
1436 struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
8a9562d2 1437
d46285a2
DDP
1438 ovs_mutex_lock(&dev->mutex);
1439 rx->port_id = dev->port_id;
1440 ovs_mutex_unlock(&dev->mutex);
8a9562d2
PS
1441
1442 return 0;
1443}
1444
1445static void
d46285a2 1446netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq OVS_UNUSED)
8a9562d2
PS
1447{
1448}
1449
1450static void
d46285a2 1451netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq)
8a9562d2 1452{
d46285a2 1453 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq);
8a9562d2
PS
1454
1455 rte_free(rx);
1456}
1457
819f13bd
DDP
1458/* Tries to transmit 'pkts' to txq 'qid' of device 'dev'. Takes ownership of
1459 * 'pkts', even in case of failure.
1460 *
1461 * Returns the number of packets that weren't transmitted. */
1462static inline int
b59cc14e 1463netdev_dpdk_eth_tx_burst(struct netdev_dpdk *dev, int qid,
819f13bd 1464 struct rte_mbuf **pkts, int cnt)
8a9562d2 1465{
1304f1f8
DDP
1466 uint32_t nb_tx = 0;
1467
b59cc14e 1468 while (nb_tx != cnt) {
1304f1f8
DDP
1469 uint32_t ret;
1470
b59cc14e 1471 ret = rte_eth_tx_burst(dev->port_id, qid, pkts + nb_tx, cnt - nb_tx);
1304f1f8
DDP
1472 if (!ret) {
1473 break;
1474 }
1475
1476 nb_tx += ret;
1477 }
8a9562d2 1478
b59cc14e 1479 if (OVS_UNLIKELY(nb_tx != cnt)) {
819f13bd 1480 /* Free buffers, which we couldn't transmit, one at a time (each
db73f716
DDP
1481 * packet could come from a different mempool) */
1482 int i;
1483
b59cc14e
IM
1484 for (i = nb_tx; i < cnt; i++) {
1485 rte_pktmbuf_free(pkts[i]);
db73f716 1486 }
8a9562d2 1487 }
819f13bd
DDP
1488
1489 return cnt - nb_tx;
8a9562d2
PS
1490}
1491
f3926f29
IS
1492static inline bool
1493netdev_dpdk_policer_pkt_handle(struct rte_meter_srtcm *meter,
1494 struct rte_mbuf *pkt, uint64_t time)
1495{
1496 uint32_t pkt_len = rte_pktmbuf_pkt_len(pkt) - sizeof(struct ether_hdr);
1497
1498 return rte_meter_srtcm_color_blind_check(meter, time, pkt_len) ==
1499 e_RTE_METER_GREEN;
1500}
1501
1502static int
1503netdev_dpdk_policer_run(struct rte_meter_srtcm *meter,
1504 struct rte_mbuf **pkts, int pkt_cnt)
1505{
1506 int i = 0;
1507 int cnt = 0;
1508 struct rte_mbuf *pkt = NULL;
1509 uint64_t current_time = rte_rdtsc();
1510
1511 for (i = 0; i < pkt_cnt; i++) {
1512 pkt = pkts[i];
1513 /* Handle current packet */
1514 if (netdev_dpdk_policer_pkt_handle(meter, pkt, current_time)) {
1515 if (cnt != i) {
1516 pkts[cnt] = pkt;
1517 }
1518 cnt++;
1519 } else {
1520 rte_pktmbuf_free(pkt);
1521 }
1522 }
1523
1524 return cnt;
1525}
1526
9509913a
IS
1527static int
1528ingress_policer_run(struct ingress_policer *policer, struct rte_mbuf **pkts,
1529 int pkt_cnt)
1530{
1531 int cnt = 0;
1532
1533 rte_spinlock_lock(&policer->policer_lock);
1534 cnt = netdev_dpdk_policer_run(&policer->in_policer, pkts, pkt_cnt);
1535 rte_spinlock_unlock(&policer->policer_lock);
1536
1537 return cnt;
1538}
1539
58397e6c 1540static bool
0a0f39df 1541is_vhost_running(struct netdev_dpdk *dev)
58397e6c 1542{
0a0f39df 1543 return (netdev_dpdk_get_vid(dev) >= 0 && dev->vhost_reconfigured);
58397e6c
KT
1544}
1545
d6e3feb5 1546static inline void
1547netdev_dpdk_vhost_update_rx_size_counters(struct netdev_stats *stats,
1548 unsigned int packet_size)
1549{
1550 /* Hard-coded search for the size bucket. */
1551 if (packet_size < 256) {
1552 if (packet_size >= 128) {
1553 stats->rx_128_to_255_packets++;
1554 } else if (packet_size <= 64) {
1555 stats->rx_1_to_64_packets++;
1556 } else {
1557 stats->rx_65_to_127_packets++;
1558 }
1559 } else {
1560 if (packet_size >= 1523) {
1561 stats->rx_1523_to_max_packets++;
1562 } else if (packet_size >= 1024) {
1563 stats->rx_1024_to_1522_packets++;
1564 } else if (packet_size < 512) {
1565 stats->rx_256_to_511_packets++;
1566 } else {
1567 stats->rx_512_to_1023_packets++;
1568 }
1569 }
1570}
1571
9e3ddd45
TP
1572static inline void
1573netdev_dpdk_vhost_update_rx_counters(struct netdev_stats *stats,
9509913a
IS
1574 struct dp_packet **packets, int count,
1575 int dropped)
9e3ddd45
TP
1576{
1577 int i;
d6e3feb5 1578 unsigned int packet_size;
9e3ddd45
TP
1579 struct dp_packet *packet;
1580
1581 stats->rx_packets += count;
9509913a 1582 stats->rx_dropped += dropped;
9e3ddd45
TP
1583 for (i = 0; i < count; i++) {
1584 packet = packets[i];
d6e3feb5 1585 packet_size = dp_packet_size(packet);
9e3ddd45 1586
d6e3feb5 1587 if (OVS_UNLIKELY(packet_size < ETH_HEADER_LEN)) {
9e3ddd45
TP
1588 /* This only protects the following multicast counting from
1589 * too short packets, but it does not stop the packet from
1590 * further processing. */
1591 stats->rx_errors++;
1592 stats->rx_length_errors++;
1593 continue;
1594 }
1595
d6e3feb5 1596 netdev_dpdk_vhost_update_rx_size_counters(stats, packet_size);
1597
9e3ddd45
TP
1598 struct eth_header *eh = (struct eth_header *) dp_packet_data(packet);
1599 if (OVS_UNLIKELY(eth_addr_is_multicast(eh->eth_dst))) {
1600 stats->multicast++;
1601 }
1602
d6e3feb5 1603 stats->rx_bytes += packet_size;
9e3ddd45
TP
1604 }
1605}
1606
58397e6c
KT
1607/*
1608 * The receive path for the vhost port is the TX path out from guest.
1609 */
1610static int
d46285a2 1611netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
64839cf4 1612 struct dp_packet_batch *batch)
58397e6c 1613{
d46285a2 1614 struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
d46285a2 1615 int qid = rxq->queue_id;
9509913a 1616 struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
58397e6c 1617 uint16_t nb_rx = 0;
9509913a 1618 uint16_t dropped = 0;
58397e6c 1619
0a0f39df 1620 if (OVS_UNLIKELY(!is_vhost_running(dev)
e543851d 1621 || !(dev->flags & NETDEV_UP))) {
58397e6c
KT
1622 return EAGAIN;
1623 }
1624
0a0f39df
CL
1625 nb_rx = rte_vhost_dequeue_burst(netdev_dpdk_get_vid(dev),
1626 qid * VIRTIO_QNUM + VIRTIO_TXQ,
d46285a2 1627 dev->dpdk_mp->mp,
64839cf4 1628 (struct rte_mbuf **) batch->packets,
cd159f1a 1629 NETDEV_MAX_BURST);
58397e6c
KT
1630 if (!nb_rx) {
1631 return EAGAIN;
1632 }
1633
9509913a
IS
1634 if (policer) {
1635 dropped = nb_rx;
64839cf4
WT
1636 nb_rx = ingress_policer_run(policer,
1637 (struct rte_mbuf **) batch->packets,
1638 nb_rx);
9509913a
IS
1639 dropped -= nb_rx;
1640 }
1641
d46285a2 1642 rte_spinlock_lock(&dev->stats_lock);
64839cf4
WT
1643 netdev_dpdk_vhost_update_rx_counters(&dev->stats, batch->packets,
1644 nb_rx, dropped);
d46285a2 1645 rte_spinlock_unlock(&dev->stats_lock);
45d947c4 1646
11d4c7a8 1647 dp_packet_batch_init_cutlen(batch);
64839cf4 1648 batch->count = (int) nb_rx;
58397e6c
KT
1649 return 0;
1650}
1651
8a9562d2 1652static int
64839cf4 1653netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet_batch *batch)
8a9562d2 1654{
d46285a2
DDP
1655 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq);
1656 struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
9509913a 1657 struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
8a9562d2 1658 int nb_rx;
9509913a 1659 int dropped = 0;
8a9562d2 1660
3b1fb077
DDP
1661 if (OVS_UNLIKELY(!(dev->flags & NETDEV_UP))) {
1662 return EAGAIN;
1663 }
1664
d46285a2 1665 nb_rx = rte_eth_rx_burst(rx->port_id, rxq->queue_id,
64839cf4 1666 (struct rte_mbuf **) batch->packets,
cd159f1a 1667 NETDEV_MAX_BURST);
8a9562d2
PS
1668 if (!nb_rx) {
1669 return EAGAIN;
1670 }
1671
9509913a
IS
1672 if (policer) {
1673 dropped = nb_rx;
64839cf4 1674 nb_rx = ingress_policer_run(policer,
58be5c0e 1675 (struct rte_mbuf **) batch->packets,
64839cf4 1676 nb_rx);
9509913a
IS
1677 dropped -= nb_rx;
1678 }
1679
1680 /* Update stats to reflect dropped packets */
1681 if (OVS_UNLIKELY(dropped)) {
1682 rte_spinlock_lock(&dev->stats_lock);
1683 dev->stats.rx_dropped += dropped;
1684 rte_spinlock_unlock(&dev->stats_lock);
1685 }
1686
11d4c7a8 1687 dp_packet_batch_init_cutlen(batch);
64839cf4 1688 batch->count = nb_rx;
8a9562d2
PS
1689
1690 return 0;
1691}
1692
0bf765f7 1693static inline int
78bd47cf
DDP
1694netdev_dpdk_qos_run(struct netdev_dpdk *dev, struct rte_mbuf **pkts,
1695 int cnt)
0bf765f7 1696{
78bd47cf 1697 struct qos_conf *qos_conf = ovsrcu_get(struct qos_conf *, &dev->qos_conf);
0bf765f7 1698
78bd47cf
DDP
1699 if (qos_conf) {
1700 rte_spinlock_lock(&qos_conf->lock);
1701 cnt = qos_conf->ops->qos_run(qos_conf, pkts, cnt);
1702 rte_spinlock_unlock(&qos_conf->lock);
0bf765f7
IS
1703 }
1704
1705 return cnt;
1706}
1707
c6ec9d17
IM
1708static int
1709netdev_dpdk_filter_packet_len(struct netdev_dpdk *dev, struct rte_mbuf **pkts,
1710 int pkt_cnt)
1711{
1712 int i = 0;
1713 int cnt = 0;
1714 struct rte_mbuf *pkt;
1715
1716 for (i = 0; i < pkt_cnt; i++) {
1717 pkt = pkts[i];
1718 if (OVS_UNLIKELY(pkt->pkt_len > dev->max_packet_len)) {
1719 VLOG_WARN_RL(&rl, "%s: Too big size %" PRIu32 " max_packet_len %d",
1720 dev->up.name, pkt->pkt_len, dev->max_packet_len);
1721 rte_pktmbuf_free(pkt);
1722 continue;
1723 }
1724
1725 if (OVS_UNLIKELY(i != cnt)) {
1726 pkts[cnt] = pkt;
1727 }
1728 cnt++;
1729 }
1730
1731 return cnt;
1732}
1733
9e3ddd45
TP
1734static inline void
1735netdev_dpdk_vhost_update_tx_counters(struct netdev_stats *stats,
1736 struct dp_packet **packets,
1737 int attempted,
1738 int dropped)
1739{
1740 int i;
1741 int sent = attempted - dropped;
1742
1743 stats->tx_packets += sent;
1744 stats->tx_dropped += dropped;
1745
1746 for (i = 0; i < sent; i++) {
1747 stats->tx_bytes += dp_packet_size(packets[i]);
1748 }
1749}
1750
58397e6c 1751static void
4573fbd3 1752__netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
dd52de45 1753 struct dp_packet **pkts, int cnt)
58397e6c 1754{
d46285a2 1755 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
95e9881f
KT
1756 struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
1757 unsigned int total_pkts = cnt;
c6ec9d17 1758 unsigned int dropped = 0;
dd52de45 1759 int i, retries = 0;
58397e6c 1760
81acebda 1761 qid = dev->tx_q[qid % netdev->n_txq].map;
585a5bea 1762
0a0f39df 1763 if (OVS_UNLIKELY(!is_vhost_running(dev) || qid < 0
e543851d 1764 || !(dev->flags & NETDEV_UP))) {
d46285a2
DDP
1765 rte_spinlock_lock(&dev->stats_lock);
1766 dev->stats.tx_dropped+= cnt;
1767 rte_spinlock_unlock(&dev->stats_lock);
1b99bb05 1768 goto out;
58397e6c
KT
1769 }
1770
d46285a2 1771 rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
58397e6c 1772
c6ec9d17 1773 cnt = netdev_dpdk_filter_packet_len(dev, cur_pkts, cnt);
0bf765f7 1774 /* Check has QoS has been configured for the netdev */
78bd47cf 1775 cnt = netdev_dpdk_qos_run(dev, cur_pkts, cnt);
c6ec9d17 1776 dropped = total_pkts - cnt;
0bf765f7 1777
95e9881f 1778 do {
4573fbd3 1779 int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
95e9881f
KT
1780 unsigned int tx_pkts;
1781
0a0f39df 1782 tx_pkts = rte_vhost_enqueue_burst(netdev_dpdk_get_vid(dev),
c6ec9d17 1783 vhost_qid, cur_pkts, cnt);
95e9881f
KT
1784 if (OVS_LIKELY(tx_pkts)) {
1785 /* Packets have been sent.*/
1786 cnt -= tx_pkts;
31871ee3 1787 /* Prepare for possible retry.*/
95e9881f
KT
1788 cur_pkts = &cur_pkts[tx_pkts];
1789 } else {
31871ee3
KT
1790 /* No packets sent - do not retry.*/
1791 break;
95e9881f 1792 }
c6ec9d17 1793 } while (cnt && (retries++ <= VHOST_ENQ_RETRY_NUM));
4573fbd3 1794
d46285a2 1795 rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
95e9881f 1796
d46285a2 1797 rte_spinlock_lock(&dev->stats_lock);
0072e931 1798 netdev_dpdk_vhost_update_tx_counters(&dev->stats, pkts, total_pkts,
c6ec9d17 1799 cnt + dropped);
d46285a2 1800 rte_spinlock_unlock(&dev->stats_lock);
58397e6c
KT
1801
1802out:
c6ec9d17 1803 for (i = 0; i < total_pkts - dropped; i++) {
dd52de45 1804 dp_packet_delete(pkts[i]);
58397e6c
KT
1805 }
1806}
1807
8a9562d2
PS
1808/* Tx function. Transmit packets indefinitely */
1809static void
64839cf4 1810dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet_batch *batch)
db73f716 1811 OVS_NO_THREAD_SAFETY_ANALYSIS
8a9562d2 1812{
bce01e3a 1813#if !defined(__CHECKER__) && !defined(_WIN32)
64839cf4 1814 const size_t PKT_ARRAY_SIZE = batch->count;
bce01e3a
EJ
1815#else
1816 /* Sparse or MSVC doesn't like variable length array. */
cd159f1a 1817 enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
bce01e3a 1818#endif
8a9562d2 1819 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2391135c 1820 struct rte_mbuf *pkts[PKT_ARRAY_SIZE];
175cf4de
RW
1821 int dropped = 0;
1822 int newcnt = 0;
1823 int i;
8a9562d2 1824
7d6d1a40
WT
1825 dp_packet_batch_apply_cutlen(batch);
1826
64839cf4
WT
1827 for (i = 0; i < batch->count; i++) {
1828 int size = dp_packet_size(batch->packets[i]);
95fb793a 1829
f98d7864 1830 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
f4fd623c 1831 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
58be5c0e 1832 (int) size, dev->max_packet_len);
f4fd623c 1833
175cf4de 1834 dropped++;
f4fd623c
DDP
1835 continue;
1836 }
8a9562d2 1837
2391135c 1838 pkts[newcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
8a9562d2 1839
2391135c 1840 if (!pkts[newcnt]) {
64839cf4 1841 dropped += batch->count - i;
175cf4de 1842 break;
f4fd623c
DDP
1843 }
1844
1845 /* We have to do a copy for now */
2391135c 1846 memcpy(rte_pktmbuf_mtod(pkts[newcnt], void *),
64839cf4 1847 dp_packet_data(batch->packets[i]), size);
f4fd623c 1848
2391135c
MK
1849 rte_pktmbuf_data_len(pkts[newcnt]) = size;
1850 rte_pktmbuf_pkt_len(pkts[newcnt]) = size;
f4fd623c
DDP
1851
1852 newcnt++;
1853 }
8a9562d2 1854
58397e6c 1855 if (dev->type == DPDK_DEV_VHOST) {
2391135c 1856 __netdev_dpdk_vhost_send(netdev, qid, (struct dp_packet **) pkts,
dd52de45 1857 newcnt);
58397e6c 1858 } else {
0bf765f7
IS
1859 unsigned int qos_pkts = newcnt;
1860
1861 /* Check if QoS has been configured for this netdev. */
78bd47cf 1862 newcnt = netdev_dpdk_qos_run(dev, pkts, newcnt);
0bf765f7
IS
1863
1864 dropped += qos_pkts - newcnt;
819f13bd 1865 dropped += netdev_dpdk_eth_tx_burst(dev, qid, pkts, newcnt);
58397e6c 1866 }
db73f716 1867
0bf765f7
IS
1868 if (OVS_UNLIKELY(dropped)) {
1869 rte_spinlock_lock(&dev->stats_lock);
1870 dev->stats.tx_dropped += dropped;
1871 rte_spinlock_unlock(&dev->stats_lock);
1872 }
8a9562d2
PS
1873}
1874
58397e6c 1875static int
64839cf4
WT
1876netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
1877 struct dp_packet_batch *batch,
324c8374 1878 bool may_steal, bool concurrent_txq OVS_UNUSED)
58397e6c 1879{
58397e6c 1880
dd52de45 1881 if (OVS_UNLIKELY(!may_steal || batch->packets[0]->source != DPBUF_DPDK)) {
64839cf4
WT
1882 dpdk_do_tx_copy(netdev, qid, batch);
1883 dp_packet_delete_batch(batch, may_steal);
58397e6c 1884 } else {
64839cf4 1885 dp_packet_batch_apply_cutlen(batch);
dd52de45 1886 __netdev_dpdk_vhost_send(netdev, qid, batch->packets, batch->count);
58397e6c
KT
1887 }
1888 return 0;
1889}
1890
7251515e
DV
1891static inline void
1892netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
324c8374
IM
1893 struct dp_packet_batch *batch, bool may_steal,
1894 bool concurrent_txq)
8a9562d2 1895{
3b1fb077
DDP
1896 if (OVS_UNLIKELY(!(dev->flags & NETDEV_UP))) {
1897 dp_packet_delete_batch(batch, may_steal);
1898 return;
1899 }
1900
324c8374 1901 if (OVS_UNLIKELY(concurrent_txq)) {
81acebda 1902 qid = qid % dev->up.n_txq;
a0cb2d66
DDP
1903 rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
1904 }
1905
7251515e 1906 if (OVS_UNLIKELY(!may_steal ||
64839cf4 1907 batch->packets[0]->source != DPBUF_DPDK)) {
7251515e
DV
1908 struct netdev *netdev = &dev->up;
1909
64839cf4
WT
1910 dpdk_do_tx_copy(netdev, qid, batch);
1911 dp_packet_delete_batch(batch, may_steal);
8a9562d2 1912 } else {
6b094bf4 1913 int dropped;
64839cf4 1914 int cnt = batch->count;
2391135c 1915 struct rte_mbuf **pkts = (struct rte_mbuf **) batch->packets;
8a9562d2 1916
7d6d1a40
WT
1917 dp_packet_batch_apply_cutlen(batch);
1918
2391135c 1919 cnt = netdev_dpdk_filter_packet_len(dev, pkts, cnt);
78bd47cf 1920 cnt = netdev_dpdk_qos_run(dev, pkts, cnt);
6b094bf4 1921 dropped = batch->count - cnt;
1b99bb05 1922
819f13bd 1923 dropped += netdev_dpdk_eth_tx_burst(dev, qid, pkts, cnt);
8a9562d2 1924
f4fd623c 1925 if (OVS_UNLIKELY(dropped)) {
45d947c4 1926 rte_spinlock_lock(&dev->stats_lock);
f4fd623c 1927 dev->stats.tx_dropped += dropped;
45d947c4 1928 rte_spinlock_unlock(&dev->stats_lock);
f4fd623c 1929 }
8a9562d2 1930 }
a0cb2d66 1931
324c8374 1932 if (OVS_UNLIKELY(concurrent_txq)) {
a0cb2d66
DDP
1933 rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
1934 }
7251515e
DV
1935}
1936
1937static int
1938netdev_dpdk_eth_send(struct netdev *netdev, int qid,
324c8374
IM
1939 struct dp_packet_batch *batch, bool may_steal,
1940 bool concurrent_txq)
7251515e
DV
1941{
1942 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
8a9562d2 1943
324c8374 1944 netdev_dpdk_send__(dev, qid, batch, may_steal, concurrent_txq);
7251515e 1945 return 0;
8a9562d2
PS
1946}
1947
1948static int
74ff3298 1949netdev_dpdk_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
8a9562d2
PS
1950{
1951 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1952
1953 ovs_mutex_lock(&dev->mutex);
1954 if (!eth_addr_equals(dev->hwaddr, mac)) {
74ff3298 1955 dev->hwaddr = mac;
045c0d1a 1956 netdev_change_seq_changed(netdev);
8a9562d2
PS
1957 }
1958 ovs_mutex_unlock(&dev->mutex);
1959
1960 return 0;
1961}
1962
1963static int
74ff3298 1964netdev_dpdk_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
8a9562d2
PS
1965{
1966 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1967
1968 ovs_mutex_lock(&dev->mutex);
74ff3298 1969 *mac = dev->hwaddr;
8a9562d2
PS
1970 ovs_mutex_unlock(&dev->mutex);
1971
1972 return 0;
1973}
1974
1975static int
1976netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup)
1977{
1978 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1979
1980 ovs_mutex_lock(&dev->mutex);
1981 *mtup = dev->mtu;
1982 ovs_mutex_unlock(&dev->mutex);
1983
1984 return 0;
1985}
1986
0072e931
MK
1987static int
1988netdev_dpdk_set_mtu(struct netdev *netdev, int mtu)
1989{
1990 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1991
1992 if (MTU_TO_FRAME_LEN(mtu) > NETDEV_DPDK_MAX_PKT_LEN
1993 || mtu < ETHER_MIN_MTU) {
1994 VLOG_WARN("%s: unsupported MTU %d\n", dev->up.name, mtu);
1995 return EINVAL;
1996 }
1997
1998 ovs_mutex_lock(&dev->mutex);
1999 if (dev->requested_mtu != mtu) {
2000 dev->requested_mtu = mtu;
2001 netdev_request_reconfigure(netdev);
2002 }
2003 ovs_mutex_unlock(&dev->mutex);
2004
2005 return 0;
2006}
2007
8a9562d2 2008static int
d46285a2 2009netdev_dpdk_get_carrier(const struct netdev *netdev, bool *carrier);
8a9562d2 2010
58397e6c
KT
2011static int
2012netdev_dpdk_vhost_get_stats(const struct netdev *netdev,
2013 struct netdev_stats *stats)
2014{
2015 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2016
2017 ovs_mutex_lock(&dev->mutex);
58397e6c 2018
45d947c4 2019 rte_spinlock_lock(&dev->stats_lock);
58397e6c
KT
2020 /* Supported Stats */
2021 stats->rx_packets += dev->stats.rx_packets;
2022 stats->tx_packets += dev->stats.tx_packets;
9509913a 2023 stats->rx_dropped = dev->stats.rx_dropped;
58397e6c 2024 stats->tx_dropped += dev->stats.tx_dropped;
9e3ddd45
TP
2025 stats->multicast = dev->stats.multicast;
2026 stats->rx_bytes = dev->stats.rx_bytes;
2027 stats->tx_bytes = dev->stats.tx_bytes;
2028 stats->rx_errors = dev->stats.rx_errors;
2029 stats->rx_length_errors = dev->stats.rx_length_errors;
d6e3feb5 2030
2031 stats->rx_1_to_64_packets = dev->stats.rx_1_to_64_packets;
2032 stats->rx_65_to_127_packets = dev->stats.rx_65_to_127_packets;
2033 stats->rx_128_to_255_packets = dev->stats.rx_128_to_255_packets;
2034 stats->rx_256_to_511_packets = dev->stats.rx_256_to_511_packets;
2035 stats->rx_512_to_1023_packets = dev->stats.rx_512_to_1023_packets;
2036 stats->rx_1024_to_1522_packets = dev->stats.rx_1024_to_1522_packets;
2037 stats->rx_1523_to_max_packets = dev->stats.rx_1523_to_max_packets;
2038
45d947c4 2039 rte_spinlock_unlock(&dev->stats_lock);
9e3ddd45 2040
58397e6c
KT
2041 ovs_mutex_unlock(&dev->mutex);
2042
2043 return 0;
2044}
2045
d6e3feb5 2046static void
2047netdev_dpdk_convert_xstats(struct netdev_stats *stats,
0a0f39df
CL
2048 const struct rte_eth_xstat *xstats,
2049 const struct rte_eth_xstat_name *names,
d6e3feb5 2050 const unsigned int size)
2051{
d6e3feb5 2052 for (unsigned int i = 0; i < size; i++) {
0a0f39df 2053 if (strcmp(XSTAT_RX_64_PACKETS, names[i].name) == 0) {
d6e3feb5 2054 stats->rx_1_to_64_packets = xstats[i].value;
0a0f39df 2055 } else if (strcmp(XSTAT_RX_65_TO_127_PACKETS, names[i].name) == 0) {
d6e3feb5 2056 stats->rx_65_to_127_packets = xstats[i].value;
0a0f39df 2057 } else if (strcmp(XSTAT_RX_128_TO_255_PACKETS, names[i].name) == 0) {
d6e3feb5 2058 stats->rx_128_to_255_packets = xstats[i].value;
0a0f39df 2059 } else if (strcmp(XSTAT_RX_256_TO_511_PACKETS, names[i].name) == 0) {
d6e3feb5 2060 stats->rx_256_to_511_packets = xstats[i].value;
0a0f39df 2061 } else if (strcmp(XSTAT_RX_512_TO_1023_PACKETS, names[i].name) == 0) {
d6e3feb5 2062 stats->rx_512_to_1023_packets = xstats[i].value;
0a0f39df 2063 } else if (strcmp(XSTAT_RX_1024_TO_1522_PACKETS, names[i].name) == 0) {
d6e3feb5 2064 stats->rx_1024_to_1522_packets = xstats[i].value;
0a0f39df 2065 } else if (strcmp(XSTAT_RX_1523_TO_MAX_PACKETS, names[i].name) == 0) {
d6e3feb5 2066 stats->rx_1523_to_max_packets = xstats[i].value;
0a0f39df 2067 } else if (strcmp(XSTAT_TX_64_PACKETS, names[i].name) == 0) {
d6e3feb5 2068 stats->tx_1_to_64_packets = xstats[i].value;
0a0f39df 2069 } else if (strcmp(XSTAT_TX_65_TO_127_PACKETS, names[i].name) == 0) {
d6e3feb5 2070 stats->tx_65_to_127_packets = xstats[i].value;
0a0f39df 2071 } else if (strcmp(XSTAT_TX_128_TO_255_PACKETS, names[i].name) == 0) {
d6e3feb5 2072 stats->tx_128_to_255_packets = xstats[i].value;
0a0f39df 2073 } else if (strcmp(XSTAT_TX_256_TO_511_PACKETS, names[i].name) == 0) {
d6e3feb5 2074 stats->tx_256_to_511_packets = xstats[i].value;
0a0f39df 2075 } else if (strcmp(XSTAT_TX_512_TO_1023_PACKETS, names[i].name) == 0) {
d6e3feb5 2076 stats->tx_512_to_1023_packets = xstats[i].value;
0a0f39df 2077 } else if (strcmp(XSTAT_TX_1024_TO_1522_PACKETS, names[i].name) == 0) {
d6e3feb5 2078 stats->tx_1024_to_1522_packets = xstats[i].value;
0a0f39df 2079 } else if (strcmp(XSTAT_TX_1523_TO_MAX_PACKETS, names[i].name) == 0) {
d6e3feb5 2080 stats->tx_1523_to_max_packets = xstats[i].value;
d57f777f
PS
2081 } else if (strcmp(XSTAT_RX_MULTICAST_PACKETS, names[i].name) == 0) {
2082 stats->multicast = xstats[i].value;
0a0f39df 2083 } else if (strcmp(XSTAT_TX_MULTICAST_PACKETS, names[i].name) == 0) {
d6e3feb5 2084 stats->tx_multicast_packets = xstats[i].value;
0a0f39df 2085 } else if (strcmp(XSTAT_RX_BROADCAST_PACKETS, names[i].name) == 0) {
d6e3feb5 2086 stats->rx_broadcast_packets = xstats[i].value;
0a0f39df 2087 } else if (strcmp(XSTAT_TX_BROADCAST_PACKETS, names[i].name) == 0) {
d6e3feb5 2088 stats->tx_broadcast_packets = xstats[i].value;
0a0f39df 2089 } else if (strcmp(XSTAT_RX_UNDERSIZED_ERRORS, names[i].name) == 0) {
d6e3feb5 2090 stats->rx_undersized_errors = xstats[i].value;
0a0f39df 2091 } else if (strcmp(XSTAT_RX_FRAGMENTED_ERRORS, names[i].name) == 0) {
d6e3feb5 2092 stats->rx_fragmented_errors = xstats[i].value;
0a0f39df 2093 } else if (strcmp(XSTAT_RX_JABBER_ERRORS, names[i].name) == 0) {
d6e3feb5 2094 stats->rx_jabber_errors = xstats[i].value;
2095 }
2096 }
2097}
2098
8a9562d2
PS
2099static int
2100netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
2101{
2102 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2103 struct rte_eth_stats rte_stats;
2104 bool gg;
2105
2106 netdev_dpdk_get_carrier(netdev, &gg);
2107 ovs_mutex_lock(&dev->mutex);
8a9562d2 2108
0a0f39df
CL
2109 struct rte_eth_xstat *rte_xstats = NULL;
2110 struct rte_eth_xstat_name *rte_xstats_names = NULL;
2111 int rte_xstats_len, rte_xstats_new_len, rte_xstats_ret;
d6e3feb5 2112
2113 if (rte_eth_stats_get(dev->port_id, &rte_stats)) {
bb37956a 2114 VLOG_ERR("Can't get ETH statistics for port: %"PRIu8, dev->port_id);
f9256822 2115 ovs_mutex_unlock(&dev->mutex);
d6e3feb5 2116 return EPROTO;
2117 }
2118
0a0f39df
CL
2119 /* Get length of statistics */
2120 rte_xstats_len = rte_eth_xstats_get_names(dev->port_id, NULL, 0);
2121 if (rte_xstats_len < 0) {
bb37956a 2122 VLOG_WARN("Cannot get XSTATS values for port: %"PRIu8, dev->port_id);
0a0f39df
CL
2123 goto out;
2124 }
2125 /* Reserve memory for xstats names and values */
2126 rte_xstats_names = xcalloc(rte_xstats_len, sizeof *rte_xstats_names);
2127 rte_xstats = xcalloc(rte_xstats_len, sizeof *rte_xstats);
2128
2129 /* Retreive xstats names */
2130 rte_xstats_new_len = rte_eth_xstats_get_names(dev->port_id,
2131 rte_xstats_names,
2132 rte_xstats_len);
2133 if (rte_xstats_new_len != rte_xstats_len) {
bb37956a 2134 VLOG_WARN("Cannot get XSTATS names for port: %"PRIu8, dev->port_id);
0a0f39df
CL
2135 goto out;
2136 }
2137 /* Retreive xstats values */
2138 memset(rte_xstats, 0xff, sizeof *rte_xstats * rte_xstats_len);
2139 rte_xstats_ret = rte_eth_xstats_get(dev->port_id, rte_xstats,
2140 rte_xstats_len);
2141 if (rte_xstats_ret > 0 && rte_xstats_ret <= rte_xstats_len) {
2142 netdev_dpdk_convert_xstats(stats, rte_xstats, rte_xstats_names,
2143 rte_xstats_len);
d6e3feb5 2144 } else {
bb37956a 2145 VLOG_WARN("Cannot get XSTATS values for port: %"PRIu8, dev->port_id);
d6e3feb5 2146 }
8a9562d2 2147
0a0f39df
CL
2148out:
2149 free(rte_xstats);
2150 free(rte_xstats_names);
2151
2f9dd77f
PS
2152 stats->rx_packets = rte_stats.ipackets;
2153 stats->tx_packets = rte_stats.opackets;
2154 stats->rx_bytes = rte_stats.ibytes;
2155 stats->tx_bytes = rte_stats.obytes;
21e9844c 2156 stats->rx_errors = rte_stats.ierrors;
2f9dd77f 2157 stats->tx_errors = rte_stats.oerrors;
8a9562d2 2158
45d947c4 2159 rte_spinlock_lock(&dev->stats_lock);
2f9dd77f 2160 stats->tx_dropped = dev->stats.tx_dropped;
9509913a 2161 stats->rx_dropped = dev->stats.rx_dropped;
45d947c4 2162 rte_spinlock_unlock(&dev->stats_lock);
9e3ddd45
TP
2163
2164 /* These are the available DPDK counters for packets not received due to
2165 * local resource constraints in DPDK and NIC respectively. */
9509913a 2166 stats->rx_dropped += rte_stats.rx_nombuf + rte_stats.imissed;
9e3ddd45
TP
2167 stats->rx_missed_errors = rte_stats.imissed;
2168
8a9562d2
PS
2169 ovs_mutex_unlock(&dev->mutex);
2170
2171 return 0;
2172}
2173
2174static int
d46285a2 2175netdev_dpdk_get_features(const struct netdev *netdev,
8a9562d2 2176 enum netdev_features *current,
ca3d4f55
BX
2177 enum netdev_features *advertised,
2178 enum netdev_features *supported,
2179 enum netdev_features *peer)
8a9562d2 2180{
d46285a2 2181 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
8a9562d2
PS
2182 struct rte_eth_link link;
2183
2184 ovs_mutex_lock(&dev->mutex);
2185 link = dev->link;
2186 ovs_mutex_unlock(&dev->mutex);
2187
362ca396 2188 if (link.link_duplex == ETH_LINK_HALF_DUPLEX) {
2189 if (link.link_speed == ETH_SPEED_NUM_10M) {
8a9562d2
PS
2190 *current = NETDEV_F_10MB_HD;
2191 }
362ca396 2192 if (link.link_speed == ETH_SPEED_NUM_100M) {
8a9562d2
PS
2193 *current = NETDEV_F_100MB_HD;
2194 }
362ca396 2195 if (link.link_speed == ETH_SPEED_NUM_1G) {
8a9562d2
PS
2196 *current = NETDEV_F_1GB_HD;
2197 }
2198 } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) {
362ca396 2199 if (link.link_speed == ETH_SPEED_NUM_10M) {
8a9562d2
PS
2200 *current = NETDEV_F_10MB_FD;
2201 }
362ca396 2202 if (link.link_speed == ETH_SPEED_NUM_100M) {
8a9562d2
PS
2203 *current = NETDEV_F_100MB_FD;
2204 }
362ca396 2205 if (link.link_speed == ETH_SPEED_NUM_1G) {
8a9562d2
PS
2206 *current = NETDEV_F_1GB_FD;
2207 }
362ca396 2208 if (link.link_speed == ETH_SPEED_NUM_10G) {
8a9562d2
PS
2209 *current = NETDEV_F_10GB_FD;
2210 }
2211 }
2212
362ca396 2213 if (link.link_autoneg) {
2214 *current |= NETDEV_F_AUTONEG;
2215 }
2216
ca3d4f55
BX
2217 *advertised = *supported = *peer = 0;
2218
8a9562d2
PS
2219 return 0;
2220}
2221
9509913a
IS
2222static struct ingress_policer *
2223netdev_dpdk_policer_construct(uint32_t rate, uint32_t burst)
2224{
2225 struct ingress_policer *policer = NULL;
2226 uint64_t rate_bytes;
2227 uint64_t burst_bytes;
2228 int err = 0;
2229
2230 policer = xmalloc(sizeof *policer);
2231 rte_spinlock_init(&policer->policer_lock);
2232
2233 /* rte_meter requires bytes so convert kbits rate and burst to bytes. */
602c8668
LR
2234 rate_bytes = rate * 1000ULL / 8;
2235 burst_bytes = burst * 1000ULL / 8;
9509913a
IS
2236
2237 policer->app_srtcm_params.cir = rate_bytes;
2238 policer->app_srtcm_params.cbs = burst_bytes;
2239 policer->app_srtcm_params.ebs = 0;
2240 err = rte_meter_srtcm_config(&policer->in_policer,
2241 &policer->app_srtcm_params);
58be5c0e 2242 if (err) {
9509913a
IS
2243 VLOG_ERR("Could not create rte meter for ingress policer");
2244 return NULL;
2245 }
2246
2247 return policer;
2248}
2249
2250static int
2251netdev_dpdk_set_policing(struct netdev* netdev, uint32_t policer_rate,
2252 uint32_t policer_burst)
2253{
2254 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
2255 struct ingress_policer *policer;
2256
2257 /* Force to 0 if no rate specified,
2258 * default to 8000 kbits if burst is 0,
2259 * else stick with user-specified value.
2260 */
2261 policer_burst = (!policer_rate ? 0
2262 : !policer_burst ? 8000
2263 : policer_burst);
2264
2265 ovs_mutex_lock(&dev->mutex);
2266
2267 policer = ovsrcu_get_protected(struct ingress_policer *,
2268 &dev->ingress_policer);
2269
2270 if (dev->policer_rate == policer_rate &&
2271 dev->policer_burst == policer_burst) {
2272 /* Assume that settings haven't changed since we last set them. */
2273 ovs_mutex_unlock(&dev->mutex);
2274 return 0;
2275 }
2276
2277 /* Destroy any existing ingress policer for the device if one exists */
2278 if (policer) {
2279 ovsrcu_postpone(free, policer);
2280 }
2281
2282 if (policer_rate != 0) {
2283 policer = netdev_dpdk_policer_construct(policer_rate, policer_burst);
2284 } else {
2285 policer = NULL;
2286 }
2287 ovsrcu_set(&dev->ingress_policer, policer);
2288 dev->policer_rate = policer_rate;
2289 dev->policer_burst = policer_burst;
2290 ovs_mutex_unlock(&dev->mutex);
2291
2292 return 0;
2293}
2294
8a9562d2
PS
2295static int
2296netdev_dpdk_get_ifindex(const struct netdev *netdev)
2297{
2298 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
8a9562d2
PS
2299
2300 ovs_mutex_lock(&dev->mutex);
12d0d124
PL
2301 /* Calculate hash from the netdev name. Ensure that ifindex is a 24-bit
2302 * postive integer to meet RFC 2863 recommendations.
2303 */
2304 int ifindex = hash_string(netdev->name, 0) % 0xfffffe + 1;
8a9562d2
PS
2305 ovs_mutex_unlock(&dev->mutex);
2306
2307 return ifindex;
2308}
2309
2310static int
d46285a2 2311netdev_dpdk_get_carrier(const struct netdev *netdev, bool *carrier)
8a9562d2 2312{
d46285a2 2313 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
8a9562d2
PS
2314
2315 ovs_mutex_lock(&dev->mutex);
2316 check_link_status(dev);
2317 *carrier = dev->link.link_status;
58397e6c
KT
2318
2319 ovs_mutex_unlock(&dev->mutex);
2320
2321 return 0;
2322}
2323
2324static int
d46285a2 2325netdev_dpdk_vhost_get_carrier(const struct netdev *netdev, bool *carrier)
58397e6c 2326{
d46285a2 2327 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
58397e6c
KT
2328
2329 ovs_mutex_lock(&dev->mutex);
2330
0a0f39df 2331 if (is_vhost_running(dev)) {
58397e6c
KT
2332 *carrier = 1;
2333 } else {
2334 *carrier = 0;
2335 }
2336
8a9562d2
PS
2337 ovs_mutex_unlock(&dev->mutex);
2338
2339 return 0;
2340}
2341
2342static long long int
d46285a2 2343netdev_dpdk_get_carrier_resets(const struct netdev *netdev)
8a9562d2 2344{
d46285a2 2345 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
8a9562d2
PS
2346 long long int carrier_resets;
2347
2348 ovs_mutex_lock(&dev->mutex);
2349 carrier_resets = dev->link_reset_cnt;
2350 ovs_mutex_unlock(&dev->mutex);
2351
2352 return carrier_resets;
2353}
2354
2355static int
d46285a2 2356netdev_dpdk_set_miimon(struct netdev *netdev OVS_UNUSED,
8a9562d2
PS
2357 long long int interval OVS_UNUSED)
2358{
ee32150e 2359 return EOPNOTSUPP;
8a9562d2
PS
2360}
2361
2362static int
2363netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
2364 enum netdev_flags off, enum netdev_flags on,
64839cf4
WT
2365 enum netdev_flags *old_flagsp)
2366 OVS_REQUIRES(dev->mutex)
8a9562d2 2367{
8a9562d2
PS
2368 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
2369 return EINVAL;
2370 }
2371
2372 *old_flagsp = dev->flags;
2373 dev->flags |= on;
2374 dev->flags &= ~off;
2375
2376 if (dev->flags == *old_flagsp) {
2377 return 0;
2378 }
2379
58397e6c 2380 if (dev->type == DPDK_DEV_ETH) {
58397e6c
KT
2381 if (dev->flags & NETDEV_PROMISC) {
2382 rte_eth_promiscuous_enable(dev->port_id);
2383 }
8a9562d2 2384
314fb5ad 2385 netdev_change_seq_changed(&dev->up);
e543851d
ZB
2386 } else {
2387 /* If DPDK_DEV_VHOST device's NETDEV_UP flag was changed and vhost is
2388 * running then change netdev's change_seq to trigger link state
2389 * update. */
e543851d
ZB
2390
2391 if ((NETDEV_UP & ((*old_flagsp ^ on) | (*old_flagsp ^ off)))
0a0f39df 2392 && is_vhost_running(dev)) {
e543851d
ZB
2393 netdev_change_seq_changed(&dev->up);
2394
2395 /* Clear statistics if device is getting up. */
2396 if (NETDEV_UP & on) {
2397 rte_spinlock_lock(&dev->stats_lock);
58be5c0e 2398 memset(&dev->stats, 0, sizeof dev->stats);
e543851d
ZB
2399 rte_spinlock_unlock(&dev->stats_lock);
2400 }
2401 }
8a9562d2
PS
2402 }
2403
2404 return 0;
2405}
2406
2407static int
d46285a2 2408netdev_dpdk_update_flags(struct netdev *netdev,
8a9562d2
PS
2409 enum netdev_flags off, enum netdev_flags on,
2410 enum netdev_flags *old_flagsp)
2411{
d46285a2 2412 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
8a9562d2
PS
2413 int error;
2414
d46285a2
DDP
2415 ovs_mutex_lock(&dev->mutex);
2416 error = netdev_dpdk_update_flags__(dev, off, on, old_flagsp);
2417 ovs_mutex_unlock(&dev->mutex);
8a9562d2
PS
2418
2419 return error;
2420}
2421
2422static int
d46285a2 2423netdev_dpdk_get_status(const struct netdev *netdev, struct smap *args)
8a9562d2 2424{
d46285a2 2425 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
8a9562d2
PS
2426 struct rte_eth_dev_info dev_info;
2427
7cd1261d 2428 if (!rte_eth_dev_is_valid_port(dev->port_id)) {
8a9562d2 2429 return ENODEV;
7cd1261d 2430 }
8a9562d2
PS
2431
2432 ovs_mutex_lock(&dev->mutex);
2433 rte_eth_dev_info_get(dev->port_id, &dev_info);
2434 ovs_mutex_unlock(&dev->mutex);
2435
95fb793a 2436 smap_add_format(args, "port_no", "%d", dev->port_id);
58be5c0e
MK
2437 smap_add_format(args, "numa_id", "%d",
2438 rte_eth_dev_socket_id(dev->port_id));
8a9562d2
PS
2439 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
2440 smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
4be4d22c 2441 smap_add_format(args, "max_rx_pktlen", "%u", dev->max_packet_len);
8a9562d2
PS
2442 smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
2443 smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
2444 smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);
58be5c0e
MK
2445 smap_add_format(args, "max_hash_mac_addrs", "%u",
2446 dev_info.max_hash_mac_addrs);
8a9562d2
PS
2447 smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
2448 smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
2449
39c2baa9 2450 if (dev_info.pci_dev) {
2451 smap_add_format(args, "pci-vendor_id", "0x%u",
2452 dev_info.pci_dev->id.vendor_id);
2453 smap_add_format(args, "pci-device_id", "0x%x",
2454 dev_info.pci_dev->id.device_id);
2455 }
8a9562d2
PS
2456
2457 return 0;
2458}
2459
2460static void
2461netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state)
2462 OVS_REQUIRES(dev->mutex)
2463{
2464 enum netdev_flags old_flags;
2465
2466 if (admin_state) {
2467 netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags);
2468 } else {
2469 netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags);
2470 }
2471}
2472
2473static void
2474netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc,
2475 const char *argv[], void *aux OVS_UNUSED)
2476{
2477 bool up;
2478
2479 if (!strcasecmp(argv[argc - 1], "up")) {
2480 up = true;
2481 } else if ( !strcasecmp(argv[argc - 1], "down")) {
2482 up = false;
2483 } else {
2484 unixctl_command_reply_error(conn, "Invalid Admin State");
2485 return;
2486 }
2487
2488 if (argc > 2) {
2489 struct netdev *netdev = netdev_from_name(argv[1]);
2490 if (netdev && is_dpdk_class(netdev->netdev_class)) {
2491 struct netdev_dpdk *dpdk_dev = netdev_dpdk_cast(netdev);
2492
2493 ovs_mutex_lock(&dpdk_dev->mutex);
2494 netdev_dpdk_set_admin_state__(dpdk_dev, up);
2495 ovs_mutex_unlock(&dpdk_dev->mutex);
2496
2497 netdev_close(netdev);
2498 } else {
2499 unixctl_command_reply_error(conn, "Not a DPDK Interface");
2500 netdev_close(netdev);
2501 return;
2502 }
2503 } else {
2504 struct netdev_dpdk *netdev;
2505
2506 ovs_mutex_lock(&dpdk_mutex);
2507 LIST_FOR_EACH (netdev, list_node, &dpdk_list) {
2508 ovs_mutex_lock(&netdev->mutex);
2509 netdev_dpdk_set_admin_state__(netdev, up);
2510 ovs_mutex_unlock(&netdev->mutex);
2511 }
2512 ovs_mutex_unlock(&dpdk_mutex);
2513 }
2514 unixctl_command_reply(conn, "OK");
2515}
2516
0ee821c2
DB
2517static void
2518netdev_dpdk_detach(struct unixctl_conn *conn, int argc OVS_UNUSED,
2519 const char *argv[], void *aux OVS_UNUSED)
2520{
2521 int ret;
2522 char *response;
2523 uint8_t port_id;
2524 char devname[RTE_ETH_NAME_MAX_LEN];
2525 struct netdev_dpdk *dev;
2526
2527 ovs_mutex_lock(&dpdk_mutex);
2528
2529 if (!rte_eth_dev_count() || rte_eth_dev_get_port_by_name(argv[1],
2530 &port_id)) {
2531 response = xasprintf("Device '%s' not found in DPDK", argv[1]);
2532 goto error;
2533 }
2534
2535 dev = netdev_dpdk_lookup_by_port_id(port_id);
2536 if (dev) {
2537 response = xasprintf("Device '%s' is being used by interface '%s'. "
2538 "Remove it before detaching",
2539 argv[1], netdev_get_name(&dev->up));
2540 goto error;
2541 }
2542
2543 rte_eth_dev_close(port_id);
2544
2545 ret = rte_eth_dev_detach(port_id, devname);
2546 if (ret < 0) {
2547 response = xasprintf("Device '%s' can not be detached", argv[1]);
2548 goto error;
2549 }
2550
2551 response = xasprintf("Device '%s' has been detached", argv[1]);
2552
2553 ovs_mutex_unlock(&dpdk_mutex);
2554 unixctl_command_reply(conn, response);
2555 free(response);
2556 return;
2557
2558error:
2559 ovs_mutex_unlock(&dpdk_mutex);
2560 unixctl_command_reply_error(conn, response);
2561 free(response);
2562}
2563
58397e6c
KT
2564/*
2565 * Set virtqueue flags so that we do not receive interrupts.
2566 */
2567static void
0a0f39df 2568set_irq_status(int vid)
58397e6c 2569{
4573fbd3 2570 uint32_t i;
4573fbd3 2571
f3e7ec25
MW
2572 for (i = 0; i < rte_vhost_get_vring_num(vid); i++) {
2573 rte_vhost_enable_guest_notification(vid, i, 0);
4573fbd3
FL
2574 }
2575}
2576
585a5bea
IM
2577/*
2578 * Fixes mapping for vhost-user tx queues. Must be called after each
81acebda 2579 * enabling/disabling of queues and n_txq modifications.
585a5bea
IM
2580 */
2581static void
d46285a2
DDP
2582netdev_dpdk_remap_txqs(struct netdev_dpdk *dev)
2583 OVS_REQUIRES(dev->mutex)
585a5bea
IM
2584{
2585 int *enabled_queues, n_enabled = 0;
81acebda 2586 int i, k, total_txqs = dev->up.n_txq;
585a5bea 2587
eff23640 2588 enabled_queues = xcalloc(total_txqs, sizeof *enabled_queues);
585a5bea
IM
2589
2590 for (i = 0; i < total_txqs; i++) {
2591 /* Enabled queues always mapped to themselves. */
d46285a2 2592 if (dev->tx_q[i].map == i) {
585a5bea
IM
2593 enabled_queues[n_enabled++] = i;
2594 }
2595 }
2596
2597 if (n_enabled == 0 && total_txqs != 0) {
f3ea2ad2 2598 enabled_queues[0] = OVS_VHOST_QUEUE_DISABLED;
585a5bea
IM
2599 n_enabled = 1;
2600 }
2601
2602 k = 0;
2603 for (i = 0; i < total_txqs; i++) {
d46285a2
DDP
2604 if (dev->tx_q[i].map != i) {
2605 dev->tx_q[i].map = enabled_queues[k];
585a5bea
IM
2606 k = (k + 1) % n_enabled;
2607 }
2608 }
2609
2d24d165 2610 VLOG_DBG("TX queue mapping for %s\n", dev->vhost_id);
585a5bea 2611 for (i = 0; i < total_txqs; i++) {
d46285a2 2612 VLOG_DBG("%2d --> %2d", i, dev->tx_q[i].map);
585a5bea
IM
2613 }
2614
eff23640 2615 free(enabled_queues);
585a5bea 2616}
4573fbd3 2617
58397e6c
KT
2618/*
2619 * A new virtio-net device is added to a vhost port.
2620 */
2621static int
0a0f39df 2622new_device(int vid)
58397e6c 2623{
d46285a2 2624 struct netdev_dpdk *dev;
58397e6c 2625 bool exists = false;
db8f13b0 2626 int newnode = 0;
0a0f39df
CL
2627 char ifname[IF_NAME_SZ];
2628
58be5c0e 2629 rte_vhost_get_ifname(vid, ifname, sizeof ifname);
58397e6c
KT
2630
2631 ovs_mutex_lock(&dpdk_mutex);
2632 /* Add device to the vhost port with the same name as that passed down. */
d46285a2 2633 LIST_FOR_EACH(dev, list_node, &dpdk_list) {
c1ff66ac 2634 ovs_mutex_lock(&dev->mutex);
2d24d165 2635 if (strncmp(ifname, dev->vhost_id, IF_NAME_SZ) == 0) {
f3e7ec25 2636 uint32_t qp_num = rte_vhost_get_vring_num(vid)/VIRTIO_QNUM;
db8f13b0
CL
2637
2638 /* Get NUMA information */
0a0f39df
CL
2639 newnode = rte_vhost_get_numa_node(vid);
2640 if (newnode == -1) {
5b9bf9e0 2641#ifdef VHOST_NUMA
db8f13b0 2642 VLOG_INFO("Error getting NUMA info for vHost Device '%s'",
0a0f39df 2643 ifname);
5b9bf9e0 2644#endif
db8f13b0 2645 newnode = dev->socket_id;
db8f13b0
CL
2646 }
2647
7f5f2bd0
IM
2648 if (dev->requested_n_txq != qp_num
2649 || dev->requested_n_rxq != qp_num
2650 || dev->requested_socket_id != newnode) {
2651 dev->requested_socket_id = newnode;
2652 dev->requested_n_rxq = qp_num;
2653 dev->requested_n_txq = qp_num;
2654 netdev_request_reconfigure(&dev->up);
2655 } else {
2656 /* Reconfiguration not required. */
2657 dev->vhost_reconfigured = true;
2658 }
81acebda 2659
0a0f39df 2660 ovsrcu_index_set(&dev->vid, vid);
81acebda
IM
2661 exists = true;
2662
58397e6c 2663 /* Disable notifications. */
0a0f39df 2664 set_irq_status(vid);
e543851d 2665 netdev_change_seq_changed(&dev->up);
d46285a2 2666 ovs_mutex_unlock(&dev->mutex);
58397e6c
KT
2667 break;
2668 }
c1ff66ac 2669 ovs_mutex_unlock(&dev->mutex);
58397e6c
KT
2670 }
2671 ovs_mutex_unlock(&dpdk_mutex);
2672
2673 if (!exists) {
0a0f39df 2674 VLOG_INFO("vHost Device '%s' can't be added - name not found", ifname);
58397e6c
KT
2675
2676 return -1;
2677 }
2678
0a0f39df
CL
2679 VLOG_INFO("vHost Device '%s' has been added on numa node %i",
2680 ifname, newnode);
2681
58397e6c
KT
2682 return 0;
2683}
2684
f3ea2ad2
IM
2685/* Clears mapping for all available queues of vhost interface. */
2686static void
2687netdev_dpdk_txq_map_clear(struct netdev_dpdk *dev)
2688 OVS_REQUIRES(dev->mutex)
2689{
2690 int i;
2691
81acebda 2692 for (i = 0; i < dev->up.n_txq; i++) {
f3ea2ad2
IM
2693 dev->tx_q[i].map = OVS_VHOST_QUEUE_MAP_UNKNOWN;
2694 }
2695}
2696
58397e6c
KT
2697/*
2698 * Remove a virtio-net device from the specific vhost port. Use dev->remove
2699 * flag to stop any more packets from being sent or received to/from a VM and
2700 * ensure all currently queued packets have been sent/received before removing
2701 * the device.
2702 */
2703static void
0a0f39df 2704destroy_device(int vid)
58397e6c 2705{
d46285a2 2706 struct netdev_dpdk *dev;
afee281f 2707 bool exists = false;
0a0f39df
CL
2708 char ifname[IF_NAME_SZ];
2709
58be5c0e 2710 rte_vhost_get_ifname(vid, ifname, sizeof ifname);
58397e6c
KT
2711
2712 ovs_mutex_lock(&dpdk_mutex);
d46285a2 2713 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
0a0f39df 2714 if (netdev_dpdk_get_vid(dev) == vid) {
58397e6c 2715
d46285a2 2716 ovs_mutex_lock(&dev->mutex);
0a0f39df
CL
2717 dev->vhost_reconfigured = false;
2718 ovsrcu_index_set(&dev->vid, -1);
d46285a2 2719 netdev_dpdk_txq_map_clear(dev);
81acebda 2720
e543851d 2721 netdev_change_seq_changed(&dev->up);
d46285a2 2722 ovs_mutex_unlock(&dev->mutex);
81acebda 2723 exists = true;
afee281f 2724 break;
58397e6c
KT
2725 }
2726 }
afee281f 2727
58397e6c
KT
2728 ovs_mutex_unlock(&dpdk_mutex);
2729
0a0f39df 2730 if (exists) {
afee281f
KT
2731 /*
2732 * Wait for other threads to quiesce after setting the 'virtio_dev'
2733 * to NULL, before returning.
2734 */
2735 ovsrcu_synchronize();
2736 /*
2737 * As call to ovsrcu_synchronize() will end the quiescent state,
2738 * put thread back into quiescent state before returning.
2739 */
2740 ovsrcu_quiesce_start();
0a0f39df 2741 VLOG_INFO("vHost Device '%s' has been removed", ifname);
afee281f 2742 } else {
0a0f39df 2743 VLOG_INFO("vHost Device '%s' not found", ifname);
afee281f 2744 }
58397e6c
KT
2745}
2746
585a5bea 2747static int
0a0f39df 2748vring_state_changed(int vid, uint16_t queue_id, int enable)
585a5bea 2749{
d46285a2 2750 struct netdev_dpdk *dev;
585a5bea
IM
2751 bool exists = false;
2752 int qid = queue_id / VIRTIO_QNUM;
0a0f39df
CL
2753 char ifname[IF_NAME_SZ];
2754
58be5c0e 2755 rte_vhost_get_ifname(vid, ifname, sizeof ifname);
585a5bea
IM
2756
2757 if (queue_id % VIRTIO_QNUM == VIRTIO_TXQ) {
2758 return 0;
2759 }
2760
2761 ovs_mutex_lock(&dpdk_mutex);
d46285a2 2762 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
c1ff66ac 2763 ovs_mutex_lock(&dev->mutex);
2d24d165 2764 if (strncmp(ifname, dev->vhost_id, IF_NAME_SZ) == 0) {
585a5bea 2765 if (enable) {
d46285a2 2766 dev->tx_q[qid].map = qid;
585a5bea 2767 } else {
d46285a2 2768 dev->tx_q[qid].map = OVS_VHOST_QUEUE_DISABLED;
585a5bea 2769 }
d46285a2 2770 netdev_dpdk_remap_txqs(dev);
585a5bea 2771 exists = true;
d46285a2 2772 ovs_mutex_unlock(&dev->mutex);
585a5bea
IM
2773 break;
2774 }
c1ff66ac 2775 ovs_mutex_unlock(&dev->mutex);
585a5bea
IM
2776 }
2777 ovs_mutex_unlock(&dpdk_mutex);
2778
2779 if (exists) {
0a0f39df
CL
2780 VLOG_INFO("State of queue %d ( tx_qid %d ) of vhost device '%s'"
2781 "changed to \'%s\'", queue_id, qid, ifname,
d46285a2 2782 (enable == 1) ? "enabled" : "disabled");
585a5bea 2783 } else {
0a0f39df 2784 VLOG_INFO("vHost Device '%s' not found", ifname);
585a5bea
IM
2785 return -1;
2786 }
2787
2788 return 0;
2789}
2790
0a0f39df
CL
2791int
2792netdev_dpdk_get_vid(const struct netdev_dpdk *dev)
58397e6c 2793{
0a0f39df 2794 return ovsrcu_index_get(&dev->vid);
58397e6c
KT
2795}
2796
9509913a
IS
2797struct ingress_policer *
2798netdev_dpdk_get_ingress_policer(const struct netdev_dpdk *dev)
2799{
2800 return ovsrcu_get(struct ingress_policer *, &dev->ingress_policer);
2801}
2802
58397e6c 2803static int
ecc1a34e 2804netdev_dpdk_class_init(void)
7d1ced01 2805{
ecc1a34e
DDP
2806 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
2807
2808 /* This function can be called for different classes. The initialization
2809 * needs to be done only once */
2810 if (ovsthread_once_start(&once)) {
2811 ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL);
2812 unixctl_command_register("netdev-dpdk/set-admin-state",
2813 "[netdev] up|down", 1, 2,
2814 netdev_dpdk_set_admin_state, NULL);
2815
0ee821c2
DB
2816 unixctl_command_register("netdev-dpdk/detach",
2817 "pci address of device", 1, 1,
2818 netdev_dpdk_detach, NULL);
2819
ecc1a34e
DDP
2820 ovsthread_once_done(&once);
2821 }
362ca396 2822
7d1ced01
CL
2823 return 0;
2824}
2825
033e9df2 2826
95fb793a 2827/* Client Rings */
2828
95fb793a 2829static int
2830dpdk_ring_create(const char dev_name[], unsigned int port_no,
bb37956a 2831 dpdk_port_t *eth_port_id)
95fb793a 2832{
48fffdee 2833 struct dpdk_ring *ring_pair;
0c6f39e5 2834 char *ring_name;
b8374d0d 2835 int port_id;
95fb793a 2836
48fffdee
KT
2837 ring_pair = dpdk_rte_mzalloc(sizeof *ring_pair);
2838 if (!ring_pair) {
95fb793a 2839 return ENOMEM;
2840 }
2841
7251515e 2842 /* XXX: Add support for multiquque ring. */
0c6f39e5 2843 ring_name = xasprintf("%s_tx", dev_name);
95fb793a 2844
8f0a76c9 2845 /* Create single producer tx ring, netdev does explicit locking. */
48fffdee 2846 ring_pair->cring_tx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
8f0a76c9 2847 RING_F_SP_ENQ);
0c6f39e5 2848 free(ring_name);
48fffdee
KT
2849 if (ring_pair->cring_tx == NULL) {
2850 rte_free(ring_pair);
95fb793a 2851 return ENOMEM;
2852 }
2853
0c6f39e5 2854 ring_name = xasprintf("%s_rx", dev_name);
95fb793a 2855
8f0a76c9 2856 /* Create single consumer rx ring, netdev does explicit locking. */
48fffdee 2857 ring_pair->cring_rx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
8f0a76c9 2858 RING_F_SC_DEQ);
0c6f39e5 2859 free(ring_name);
48fffdee
KT
2860 if (ring_pair->cring_rx == NULL) {
2861 rte_free(ring_pair);
95fb793a 2862 return ENOMEM;
2863 }
2864
b8374d0d
MV
2865 port_id = rte_eth_from_rings(dev_name, &ring_pair->cring_rx, 1,
2866 &ring_pair->cring_tx, 1, SOCKET0);
d7310583 2867
b8374d0d 2868 if (port_id < 0) {
48fffdee 2869 rte_free(ring_pair);
95fb793a 2870 return ENODEV;
2871 }
2872
48fffdee 2873 ring_pair->user_port_id = port_no;
b8374d0d
MV
2874 ring_pair->eth_port_id = port_id;
2875 *eth_port_id = port_id;
2876
48fffdee 2877 ovs_list_push_back(&dpdk_ring_list, &ring_pair->list_node);
95fb793a 2878
95fb793a 2879 return 0;
2880}
2881
2882static int
bb37956a 2883dpdk_ring_open(const char dev_name[], dpdk_port_t *eth_port_id)
64839cf4 2884 OVS_REQUIRES(dpdk_mutex)
95fb793a 2885{
48fffdee 2886 struct dpdk_ring *ring_pair;
95fb793a 2887 unsigned int port_no;
2888 int err = 0;
2889
2890 /* Names always start with "dpdkr" */
2891 err = dpdk_dev_parse_name(dev_name, "dpdkr", &port_no);
2892 if (err) {
2893 return err;
2894 }
2895
58be5c0e 2896 /* Look through our list to find the device */
48fffdee
KT
2897 LIST_FOR_EACH (ring_pair, list_node, &dpdk_ring_list) {
2898 if (ring_pair->user_port_id == port_no) {
58397e6c 2899 VLOG_INFO("Found dpdk ring device %s:", dev_name);
58be5c0e 2900 /* Really all that is needed */
48fffdee 2901 *eth_port_id = ring_pair->eth_port_id;
95fb793a 2902 return 0;
2903 }
2904 }
2905 /* Need to create the device rings */
2906 return dpdk_ring_create(dev_name, port_no, eth_port_id);
2907}
2908
7251515e 2909static int
d46285a2 2910netdev_dpdk_ring_send(struct netdev *netdev, int qid,
324c8374
IM
2911 struct dp_packet_batch *batch, bool may_steal,
2912 bool concurrent_txq)
7251515e 2913{
d46285a2 2914 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1b99bb05
MG
2915 unsigned i;
2916
58be5c0e
MK
2917 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that
2918 * the rss hash field is clear. This is because the same mbuf may be
2919 * modified by the consumer of the ring and return into the datapath
2920 * without recalculating the RSS hash. */
64839cf4 2921 for (i = 0; i < batch->count; i++) {
f8121b39 2922 dp_packet_mbuf_rss_flag_reset(batch->packets[i]);
1b99bb05 2923 }
7251515e 2924
324c8374 2925 netdev_dpdk_send__(dev, qid, batch, may_steal, concurrent_txq);
7251515e
DV
2926 return 0;
2927}
2928
95fb793a 2929static int
2930netdev_dpdk_ring_construct(struct netdev *netdev)
2931{
bb37956a 2932 dpdk_port_t port_no = 0;
95fb793a 2933 int err = 0;
2934
95fb793a 2935 ovs_mutex_lock(&dpdk_mutex);
2936
2937 err = dpdk_ring_open(netdev->name, &port_no);
2938 if (err) {
2939 goto unlock_dpdk;
2940 }
2941
1ce30dfd
DDP
2942 err = common_construct(netdev, port_no, DPDK_DEV_ETH,
2943 rte_eth_dev_socket_id(port_no));
95fb793a 2944unlock_dpdk:
2945 ovs_mutex_unlock(&dpdk_mutex);
2946 return err;
2947}
2948
0bf765f7
IS
2949/* QoS Functions */
2950
2951/*
2952 * Initialize QoS configuration operations.
2953 */
2954static void
2955qos_conf_init(struct qos_conf *conf, const struct dpdk_qos_ops *ops)
2956{
2957 conf->ops = ops;
78bd47cf 2958 rte_spinlock_init(&conf->lock);
0bf765f7
IS
2959}
2960
2961/*
2962 * Search existing QoS operations in qos_ops and compare each set of
2963 * operations qos_name to name. Return a dpdk_qos_ops pointer to a match,
2964 * else return NULL
2965 */
2966static const struct dpdk_qos_ops *
2967qos_lookup_name(const char *name)
2968{
2969 const struct dpdk_qos_ops *const *opsp;
2970
2971 for (opsp = qos_confs; *opsp != NULL; opsp++) {
2972 const struct dpdk_qos_ops *ops = *opsp;
2973 if (!strcmp(name, ops->qos_name)) {
2974 return ops;
2975 }
2976 }
2977 return NULL;
2978}
2979
0bf765f7
IS
2980static int
2981netdev_dpdk_get_qos_types(const struct netdev *netdev OVS_UNUSED,
2982 struct sset *types)
2983{
2984 const struct dpdk_qos_ops *const *opsp;
2985
2986 for (opsp = qos_confs; *opsp != NULL; opsp++) {
2987 const struct dpdk_qos_ops *ops = *opsp;
2988 if (ops->qos_construct && ops->qos_name[0] != '\0') {
2989 sset_add(types, ops->qos_name);
2990 }
2991 }
2992 return 0;
2993}
2994
2995static int
d46285a2 2996netdev_dpdk_get_qos(const struct netdev *netdev,
0bf765f7
IS
2997 const char **typep, struct smap *details)
2998{
d46285a2 2999 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
78bd47cf 3000 struct qos_conf *qos_conf;
0bf765f7
IS
3001 int error = 0;
3002
d46285a2 3003 ovs_mutex_lock(&dev->mutex);
78bd47cf
DDP
3004 qos_conf = ovsrcu_get_protected(struct qos_conf *, &dev->qos_conf);
3005 if (qos_conf) {
3006 *typep = qos_conf->ops->qos_name;
3007 error = (qos_conf->ops->qos_get
3008 ? qos_conf->ops->qos_get(qos_conf, details): 0);
d03603c4
MC
3009 } else {
3010 /* No QoS configuration set, return an empty string */
3011 *typep = "";
0bf765f7 3012 }
d46285a2 3013 ovs_mutex_unlock(&dev->mutex);
0bf765f7
IS
3014
3015 return error;
3016}
3017
3018static int
78bd47cf
DDP
3019netdev_dpdk_set_qos(struct netdev *netdev, const char *type,
3020 const struct smap *details)
0bf765f7 3021{
d46285a2 3022 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
0bf765f7 3023 const struct dpdk_qos_ops *new_ops = NULL;
78bd47cf 3024 struct qos_conf *qos_conf, *new_qos_conf = NULL;
0bf765f7
IS
3025 int error = 0;
3026
d46285a2 3027 ovs_mutex_lock(&dev->mutex);
0bf765f7 3028
78bd47cf 3029 qos_conf = ovsrcu_get_protected(struct qos_conf *, &dev->qos_conf);
0bf765f7 3030
78bd47cf
DDP
3031 new_ops = qos_lookup_name(type);
3032
3033 if (!new_ops || !new_ops->qos_construct) {
3034 new_qos_conf = NULL;
3035 if (type && type[0]) {
3036 error = EOPNOTSUPP;
0bf765f7 3037 }
44975bb0 3038 } else if (qos_conf && qos_conf->ops == new_ops
78bd47cf
DDP
3039 && qos_conf->ops->qos_is_equal(qos_conf, details)) {
3040 new_qos_conf = qos_conf;
0bf765f7 3041 } else {
78bd47cf 3042 error = new_ops->qos_construct(details, &new_qos_conf);
7ea266e9
IS
3043 }
3044
7ea266e9 3045 if (error) {
78bd47cf
DDP
3046 VLOG_ERR("Failed to set QoS type %s on port %s: %s",
3047 type, netdev->name, rte_strerror(error));
3048 }
3049
3050 if (new_qos_conf != qos_conf) {
3051 ovsrcu_set(&dev->qos_conf, new_qos_conf);
3052 if (qos_conf) {
3053 ovsrcu_postpone(qos_conf->ops->qos_destruct, qos_conf);
3054 }
0bf765f7
IS
3055 }
3056
d46285a2 3057 ovs_mutex_unlock(&dev->mutex);
78bd47cf 3058
0bf765f7
IS
3059 return error;
3060}
3061
3062/* egress-policer details */
3063
3064struct egress_policer {
3065 struct qos_conf qos_conf;
3066 struct rte_meter_srtcm_params app_srtcm_params;
3067 struct rte_meter_srtcm egress_meter;
3068};
3069
78bd47cf
DDP
3070static void
3071egress_policer_details_to_param(const struct smap *details,
3072 struct rte_meter_srtcm_params *params)
0bf765f7 3073{
78bd47cf
DDP
3074 memset(params, 0, sizeof *params);
3075 params->cir = smap_get_ullong(details, "cir", 0);
3076 params->cbs = smap_get_ullong(details, "cbs", 0);
3077 params->ebs = 0;
0bf765f7
IS
3078}
3079
3080static int
78bd47cf
DDP
3081egress_policer_qos_construct(const struct smap *details,
3082 struct qos_conf **conf)
0bf765f7 3083{
0bf765f7 3084 struct egress_policer *policer;
0bf765f7
IS
3085 int err = 0;
3086
0bf765f7
IS
3087 policer = xmalloc(sizeof *policer);
3088 qos_conf_init(&policer->qos_conf, &egress_policer_ops);
78bd47cf 3089 egress_policer_details_to_param(details, &policer->app_srtcm_params);
0bf765f7 3090 err = rte_meter_srtcm_config(&policer->egress_meter,
78bd47cf
DDP
3091 &policer->app_srtcm_params);
3092 if (!err) {
3093 *conf = &policer->qos_conf;
3094 } else {
7ea266e9 3095 free(policer);
78bd47cf 3096 *conf = NULL;
7ea266e9
IS
3097 err = -err;
3098 }
0bf765f7
IS
3099
3100 return err;
3101}
3102
3103static void
78bd47cf 3104egress_policer_qos_destruct(struct qos_conf *conf)
0bf765f7
IS
3105{
3106 struct egress_policer *policer = CONTAINER_OF(conf, struct egress_policer,
78bd47cf 3107 qos_conf);
0bf765f7
IS
3108 free(policer);
3109}
3110
3111static int
78bd47cf 3112egress_policer_qos_get(const struct qos_conf *conf, struct smap *details)
0bf765f7 3113{
78bd47cf
DDP
3114 struct egress_policer *policer =
3115 CONTAINER_OF(conf, struct egress_policer, qos_conf);
3116
3117 smap_add_format(details, "cir", "%"PRIu64, policer->app_srtcm_params.cir);
3118 smap_add_format(details, "cbs", "%"PRIu64, policer->app_srtcm_params.cbs);
050c60bf 3119
0bf765f7
IS
3120 return 0;
3121}
3122
78bd47cf 3123static bool
47a45d86
KT
3124egress_policer_qos_is_equal(const struct qos_conf *conf,
3125 const struct smap *details)
0bf765f7 3126{
78bd47cf
DDP
3127 struct egress_policer *policer =
3128 CONTAINER_OF(conf, struct egress_policer, qos_conf);
3129 struct rte_meter_srtcm_params params;
0bf765f7 3130
78bd47cf 3131 egress_policer_details_to_param(details, &params);
7ea266e9 3132
78bd47cf 3133 return !memcmp(&params, &policer->app_srtcm_params, sizeof params);
0bf765f7
IS
3134}
3135
0bf765f7 3136static int
78bd47cf 3137egress_policer_run(struct qos_conf *conf, struct rte_mbuf **pkts, int pkt_cnt)
0bf765f7 3138{
0bf765f7 3139 int cnt = 0;
78bd47cf
DDP
3140 struct egress_policer *policer =
3141 CONTAINER_OF(conf, struct egress_policer, qos_conf);
0bf765f7 3142
f3926f29 3143 cnt = netdev_dpdk_policer_run(&policer->egress_meter, pkts, pkt_cnt);
0bf765f7
IS
3144
3145 return cnt;
3146}
3147
3148static const struct dpdk_qos_ops egress_policer_ops = {
3149 "egress-policer", /* qos_name */
3150 egress_policer_qos_construct,
3151 egress_policer_qos_destruct,
3152 egress_policer_qos_get,
78bd47cf 3153 egress_policer_qos_is_equal,
0bf765f7
IS
3154 egress_policer_run
3155};
3156
050c60bf
DDP
3157static int
3158netdev_dpdk_reconfigure(struct netdev *netdev)
3159{
3160 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
3161 int err = 0;
3162
050c60bf
DDP
3163 ovs_mutex_lock(&dev->mutex);
3164
3165 if (netdev->n_txq == dev->requested_n_txq
0072e931 3166 && netdev->n_rxq == dev->requested_n_rxq
b685696b
CL
3167 && dev->mtu == dev->requested_mtu
3168 && dev->rxq_size == dev->requested_rxq_size
bd4e172b 3169 && dev->txq_size == dev->requested_txq_size
3170 && dev->socket_id == dev->requested_socket_id) {
050c60bf
DDP
3171 /* Reconfiguration is unnecessary */
3172
3173 goto out;
3174 }
3175
3176 rte_eth_dev_stop(dev->port_id);
3177
bd4e172b 3178 if (dev->mtu != dev->requested_mtu
3179 || dev->socket_id != dev->requested_socket_id) {
96e9b168
IS
3180 err = netdev_dpdk_mempool_configure(dev);
3181 if (err) {
3182 goto out;
3183 }
0072e931
MK
3184 }
3185
050c60bf
DDP
3186 netdev->n_txq = dev->requested_n_txq;
3187 netdev->n_rxq = dev->requested_n_rxq;
3188
b685696b
CL
3189 dev->rxq_size = dev->requested_rxq_size;
3190 dev->txq_size = dev->requested_txq_size;
3191
050c60bf
DDP
3192 rte_free(dev->tx_q);
3193 err = dpdk_eth_dev_init(dev);
eff23640
DDP
3194 dev->tx_q = netdev_dpdk_alloc_txq(netdev->n_txq);
3195 if (!dev->tx_q) {
3196 err = ENOMEM;
3197 }
050c60bf 3198
0072e931
MK
3199 netdev_change_seq_changed(netdev);
3200
050c60bf 3201out:
050c60bf 3202 ovs_mutex_unlock(&dev->mutex);
050c60bf
DDP
3203 return err;
3204}
3205
7f381c2e 3206static int
2d24d165 3207dpdk_vhost_reconfigure_helper(struct netdev_dpdk *dev)
2d24d165 3208 OVS_REQUIRES(dev->mutex)
050c60bf 3209{
2d24d165
CL
3210 dev->up.n_txq = dev->requested_n_txq;
3211 dev->up.n_rxq = dev->requested_n_rxq;
96e9b168 3212 int err;
050c60bf 3213
81acebda
IM
3214 /* Enable TX queue 0 by default if it wasn't disabled. */
3215 if (dev->tx_q[0].map == OVS_VHOST_QUEUE_MAP_UNKNOWN) {
3216 dev->tx_q[0].map = 0;
3217 }
3218
3219 netdev_dpdk_remap_txqs(dev);
3220
0072e931
MK
3221 if (dev->requested_socket_id != dev->socket_id
3222 || dev->requested_mtu != dev->mtu) {
96e9b168
IS
3223 err = netdev_dpdk_mempool_configure(dev);
3224 if (err) {
3225 return err;
3226 } else {
2d24d165 3227 netdev_change_seq_changed(&dev->up);
db8f13b0
CL
3228 }
3229 }
3230
0a0f39df
CL
3231 if (netdev_dpdk_get_vid(dev) >= 0) {
3232 dev->vhost_reconfigured = true;
81acebda 3233 }
7f381c2e
DDP
3234
3235 return 0;
2d24d165
CL
3236}
3237
3238static int
3239netdev_dpdk_vhost_reconfigure(struct netdev *netdev)
3240{
3241 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
7f381c2e 3242 int err;
2d24d165 3243
2d24d165 3244 ovs_mutex_lock(&dev->mutex);
7f381c2e 3245 err = dpdk_vhost_reconfigure_helper(dev);
2d24d165 3246 ovs_mutex_unlock(&dev->mutex);
7f381c2e
DDP
3247
3248 return err;
2d24d165
CL
3249}
3250
3251static int
3252netdev_dpdk_vhost_client_reconfigure(struct netdev *netdev)
3253{
3254 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
7f381c2e 3255 int err;
2d24d165 3256
2d24d165
CL
3257 ovs_mutex_lock(&dev->mutex);
3258
c1ff66ac
CL
3259 /* Configure vHost client mode if requested and if the following criteria
3260 * are met:
2d24d165
CL
3261 * 1. Device hasn't been registered yet.
3262 * 2. A path has been specified.
c1ff66ac
CL
3263 */
3264 if (!(dev->vhost_driver_flags & RTE_VHOST_USER_CLIENT)
2d24d165
CL
3265 && strlen(dev->vhost_id)) {
3266 /* Register client-mode device */
3267 err = rte_vhost_driver_register(dev->vhost_id,
3268 RTE_VHOST_USER_CLIENT);
c1ff66ac 3269 if (err) {
2d24d165
CL
3270 VLOG_ERR("vhost-user device setup failure for device %s\n",
3271 dev->vhost_id);
7f381c2e 3272 goto unlock;
c1ff66ac 3273 } else {
2d24d165
CL
3274 /* Configuration successful */
3275 dev->vhost_driver_flags |= RTE_VHOST_USER_CLIENT;
3276 VLOG_INFO("vHost User device '%s' created in 'client' mode, "
3277 "using client socket '%s'",
3278 dev->up.name, dev->vhost_id);
c1ff66ac 3279 }
f3e7ec25
MW
3280
3281 err = rte_vhost_driver_callback_register(dev->vhost_id,
3282 &virtio_net_device_ops);
3283 if (err) {
3284 VLOG_ERR("rte_vhost_driver_callback_register failed for "
3285 "vhost user client port: %s\n", dev->up.name);
3286 goto unlock;
3287 }
3288
3289 err = rte_vhost_driver_disable_features(dev->vhost_id,
3290 1ULL << VIRTIO_NET_F_HOST_TSO4
3291 | 1ULL << VIRTIO_NET_F_HOST_TSO6
3292 | 1ULL << VIRTIO_NET_F_CSUM);
3293 if (err) {
3294 VLOG_ERR("rte_vhost_driver_disable_features failed for vhost user "
3295 "client port: %s\n", dev->up.name);
3296 goto unlock;
3297 }
3298
3299 err = rte_vhost_driver_start(dev->vhost_id);
3300 if (err) {
3301 VLOG_ERR("rte_vhost_driver_start failed for vhost user "
3302 "client port: %s\n", dev->up.name);
3303 goto unlock;
3304 }
c1ff66ac
CL
3305 }
3306
7f381c2e
DDP
3307 err = dpdk_vhost_reconfigure_helper(dev);
3308
3309unlock:
050c60bf 3310 ovs_mutex_unlock(&dev->mutex);
050c60bf 3311
7f381c2e 3312 return err;
050c60bf
DDP
3313}
3314
ecc1a34e 3315#define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, \
81acebda
IM
3316 SET_CONFIG, SET_TX_MULTIQ, SEND, \
3317 GET_CARRIER, GET_STATS, \
3318 GET_FEATURES, GET_STATUS, \
3319 RECONFIGURE, RXQ_RECV) \
95fb793a 3320{ \
3321 NAME, \
118c77b1 3322 true, /* is_pmd */ \
ecc1a34e 3323 INIT, /* init */ \
95fb793a 3324 NULL, /* netdev_dpdk_run */ \
3325 NULL, /* netdev_dpdk_wait */ \
3326 \
3327 netdev_dpdk_alloc, \
3328 CONSTRUCT, \
58397e6c 3329 DESTRUCT, \
95fb793a 3330 netdev_dpdk_dealloc, \
3331 netdev_dpdk_get_config, \
81acebda 3332 SET_CONFIG, \
95fb793a 3333 NULL, /* get_tunnel_config */ \
58397e6c
KT
3334 NULL, /* build header */ \
3335 NULL, /* push header */ \
3336 NULL, /* pop header */ \
7dec44fe 3337 netdev_dpdk_get_numa_id, /* get_numa_id */ \
81acebda 3338 SET_TX_MULTIQ, \
95fb793a 3339 \
7251515e 3340 SEND, /* send */ \
95fb793a 3341 NULL, /* send_wait */ \
3342 \
3343 netdev_dpdk_set_etheraddr, \
3344 netdev_dpdk_get_etheraddr, \
3345 netdev_dpdk_get_mtu, \
0072e931 3346 netdev_dpdk_set_mtu, \
95fb793a 3347 netdev_dpdk_get_ifindex, \
58397e6c 3348 GET_CARRIER, \
95fb793a 3349 netdev_dpdk_get_carrier_resets, \
3350 netdev_dpdk_set_miimon, \
58397e6c
KT
3351 GET_STATS, \
3352 GET_FEATURES, \
95fb793a 3353 NULL, /* set_advertisements */ \
875ab130 3354 NULL, /* get_pt_mode */ \
95fb793a 3355 \
9509913a 3356 netdev_dpdk_set_policing, \
0bf765f7 3357 netdev_dpdk_get_qos_types, \
95fb793a 3358 NULL, /* get_qos_capabilities */ \
0bf765f7
IS
3359 netdev_dpdk_get_qos, \
3360 netdev_dpdk_set_qos, \
95fb793a 3361 NULL, /* get_queue */ \
3362 NULL, /* set_queue */ \
3363 NULL, /* delete_queue */ \
3364 NULL, /* get_queue_stats */ \
3365 NULL, /* queue_dump_start */ \
3366 NULL, /* queue_dump_next */ \
3367 NULL, /* queue_dump_done */ \
3368 NULL, /* dump_queue_stats */ \
3369 \
95fb793a 3370 NULL, /* set_in4 */ \
a8704b50 3371 NULL, /* get_addr_list */ \
95fb793a 3372 NULL, /* add_router */ \
3373 NULL, /* get_next_hop */ \
58397e6c 3374 GET_STATUS, \
95fb793a 3375 NULL, /* arp_lookup */ \
3376 \
3377 netdev_dpdk_update_flags, \
050c60bf 3378 RECONFIGURE, \
95fb793a 3379 \
3380 netdev_dpdk_rxq_alloc, \
3381 netdev_dpdk_rxq_construct, \
3382 netdev_dpdk_rxq_destruct, \
3383 netdev_dpdk_rxq_dealloc, \
58397e6c 3384 RXQ_RECV, \
95fb793a 3385 NULL, /* rx_wait */ \
3386 NULL, /* rxq_drain */ \
18ebd48c 3387 NO_OFFLOAD_API \
95fb793a 3388}
8a9562d2 3389
bce01e3a 3390static const struct netdev_class dpdk_class =
95fb793a 3391 NETDEV_DPDK_CLASS(
3392 "dpdk",
ecc1a34e 3393 netdev_dpdk_class_init,
5496878c 3394 netdev_dpdk_construct,
58397e6c 3395 netdev_dpdk_destruct,
81acebda
IM
3396 netdev_dpdk_set_config,
3397 netdev_dpdk_set_tx_multiq,
58397e6c
KT
3398 netdev_dpdk_eth_send,
3399 netdev_dpdk_get_carrier,
3400 netdev_dpdk_get_stats,
3401 netdev_dpdk_get_features,
3402 netdev_dpdk_get_status,
050c60bf 3403 netdev_dpdk_reconfigure,
58397e6c 3404 netdev_dpdk_rxq_recv);
95fb793a 3405
bce01e3a 3406static const struct netdev_class dpdk_ring_class =
95fb793a 3407 NETDEV_DPDK_CLASS(
3408 "dpdkr",
ecc1a34e 3409 netdev_dpdk_class_init,
5496878c 3410 netdev_dpdk_ring_construct,
58397e6c 3411 netdev_dpdk_destruct,
c3d062a7 3412 netdev_dpdk_ring_set_config,
81acebda 3413 netdev_dpdk_set_tx_multiq,
58397e6c
KT
3414 netdev_dpdk_ring_send,
3415 netdev_dpdk_get_carrier,
3416 netdev_dpdk_get_stats,
3417 netdev_dpdk_get_features,
3418 netdev_dpdk_get_status,
050c60bf 3419 netdev_dpdk_reconfigure,
58397e6c
KT
3420 netdev_dpdk_rxq_recv);
3421
53f50d24 3422static const struct netdev_class dpdk_vhost_class =
7d1ced01
CL
3423 NETDEV_DPDK_CLASS(
3424 "dpdkvhostuser",
f3e7ec25 3425 NULL,
53f50d24 3426 netdev_dpdk_vhost_construct,
58397e6c 3427 netdev_dpdk_vhost_destruct,
2d24d165 3428 NULL,
81acebda 3429 NULL,
58397e6c
KT
3430 netdev_dpdk_vhost_send,
3431 netdev_dpdk_vhost_get_carrier,
3432 netdev_dpdk_vhost_get_stats,
3433 NULL,
7251515e 3434 NULL,
53f50d24 3435 netdev_dpdk_vhost_reconfigure,
58397e6c 3436 netdev_dpdk_vhost_rxq_recv);
2d24d165
CL
3437static const struct netdev_class dpdk_vhost_client_class =
3438 NETDEV_DPDK_CLASS(
3439 "dpdkvhostuserclient",
f3e7ec25 3440 NULL,
2d24d165
CL
3441 netdev_dpdk_vhost_client_construct,
3442 netdev_dpdk_vhost_destruct,
3443 netdev_dpdk_vhost_client_set_config,
3444 NULL,
3445 netdev_dpdk_vhost_send,
3446 netdev_dpdk_vhost_get_carrier,
3447 netdev_dpdk_vhost_get_stats,
3448 NULL,
3449 NULL,
3450 netdev_dpdk_vhost_client_reconfigure,
3451 netdev_dpdk_vhost_rxq_recv);
95fb793a 3452
8a9562d2
PS
3453void
3454netdev_dpdk_register(void)
3455{
bab69409
AC
3456 netdev_register_provider(&dpdk_class);
3457 netdev_register_provider(&dpdk_ring_class);
53f50d24 3458 netdev_register_provider(&dpdk_vhost_class);
2d24d165 3459 netdev_register_provider(&dpdk_vhost_client_class);
8a9562d2 3460}