]>
Commit | Line | Data |
---|---|---|
8a9562d2 | 1 | /* |
12d0d124 | 2 | * Copyright (c) 2014, 2015, 2016, 2017 Nicira, Inc. |
8a9562d2 PS |
3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | |
5 | * you may not use this file except in compliance with the License. | |
6 | * You may obtain a copy of the License at: | |
7 | * | |
8 | * http://www.apache.org/licenses/LICENSE-2.0 | |
9 | * | |
10 | * Unless required by applicable law or agreed to in writing, software | |
11 | * distributed under the License is distributed on an "AS IS" BASIS, | |
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
13 | * See the License for the specific language governing permissions and | |
14 | * limitations under the License. | |
15 | */ | |
16 | ||
17 | #include <config.h> | |
01961bbd | 18 | #include "netdev-dpdk.h" |
8a9562d2 | 19 | |
6ebc4b09 | 20 | #include <errno.h> |
8a9562d2 PS |
21 | #include <signal.h> |
22 | #include <stdlib.h> | |
6ebc4b09 | 23 | #include <string.h> |
8a9562d2 | 24 | #include <unistd.h> |
f3e7ec25 MW |
25 | #include <linux/virtio_net.h> |
26 | #include <sys/socket.h> | |
27 | #include <linux/if.h> | |
01961bbd | 28 | |
5e925ccc | 29 | #include <rte_bus_pci.h> |
01961bbd DDP |
30 | #include <rte_config.h> |
31 | #include <rte_cycles.h> | |
32 | #include <rte_errno.h> | |
01961bbd | 33 | #include <rte_ethdev.h> |
6ebc4b09 | 34 | #include <rte_flow.h> |
01961bbd DDP |
35 | #include <rte_malloc.h> |
36 | #include <rte_mbuf.h> | |
37 | #include <rte_meter.h> | |
fc56f5e0 | 38 | #include <rte_pci.h> |
3eb8d4fa | 39 | #include <rte_version.h> |
6ebc4b09 | 40 | #include <rte_vhost.h> |
8a9562d2 | 41 | |
e8a2b5bf | 42 | #include "cmap.h" |
9ff24b9c | 43 | #include "coverage.h" |
7d1ced01 | 44 | #include "dirs.h" |
e14deea0 | 45 | #include "dp-packet.h" |
01961bbd | 46 | #include "dpdk.h" |
8a9562d2 | 47 | #include "dpif-netdev.h" |
e5c0f5a4 | 48 | #include "fatal-signal.h" |
988fd463 | 49 | #include "if-notifier.h" |
8a9562d2 PS |
50 | #include "netdev-provider.h" |
51 | #include "netdev-vport.h" | |
52 | #include "odp-util.h" | |
eac84432 | 53 | #include "openvswitch/dynamic-string.h" |
25d436fb | 54 | #include "openvswitch/list.h" |
6ebc4b09 | 55 | #include "openvswitch/match.h" |
25d436fb | 56 | #include "openvswitch/ofp-print.h" |
6ebc4b09 | 57 | #include "openvswitch/shash.h" |
25d436fb | 58 | #include "openvswitch/vlog.h" |
94143fc4 | 59 | #include "ovs-numa.h" |
8a9562d2 | 60 | #include "ovs-rcu.h" |
6ebc4b09 | 61 | #include "ovs-thread.h" |
8a9562d2 | 62 | #include "packets.h" |
0bf765f7 | 63 | #include "smap.h" |
8a9562d2 | 64 | #include "sset.h" |
8a9562d2 | 65 | #include "timeval.h" |
6ebc4b09 | 66 | #include "unaligned.h" |
8a9562d2 | 67 | #include "unixctl.h" |
29cf9c1b | 68 | #include "userspace-tso.h" |
6ebc4b09 IM |
69 | #include "util.h" |
70 | #include "uuid.h" | |
8a9562d2 | 71 | |
f3e7ec25 MW |
72 | enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM}; |
73 | ||
05b49df6 | 74 | VLOG_DEFINE_THIS_MODULE(netdev_dpdk); |
8a9562d2 PS |
75 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20); |
76 | ||
9ff24b9c | 77 | COVERAGE_DEFINE(vhost_tx_contention); |
3d56e4ac | 78 | COVERAGE_DEFINE(vhost_notification); |
9ff24b9c | 79 | |
8a9562d2 PS |
80 | #define DPDK_PORT_WATCHDOG_INTERVAL 5 |
81 | ||
82 | #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE | |
83 | #define OVS_VPORT_DPDK "ovs_dpdk" | |
84 | ||
85 | /* | |
86 | * need to reserve tons of extra space in the mbufs so we can align the | |
87 | * DMA addresses to 4KB. | |
18f777b2 TP |
88 | * The minimum mbuf size is limited to avoid scatter behaviour and drop in |
89 | * performance for standard Ethernet MTU. | |
8a9562d2 | 90 | */ |
127b6a6e | 91 | #define ETHER_HDR_MAX_LEN (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN \ |
58be5c0e | 92 | + (2 * VLAN_HEADER_LEN)) |
127b6a6e IS |
93 | #define MTU_TO_FRAME_LEN(mtu) ((mtu) + RTE_ETHER_HDR_LEN + \ |
94 | RTE_ETHER_CRC_LEN) | |
4be4d22c | 95 | #define MTU_TO_MAX_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_MAX_LEN) |
58be5c0e | 96 | #define FRAME_LEN_TO_MTU(frame_len) ((frame_len) \ |
127b6a6e | 97 | - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN) |
4be4d22c | 98 | #define NETDEV_DPDK_MBUF_ALIGN 1024 |
0072e931 | 99 | #define NETDEV_DPDK_MAX_PKT_LEN 9728 |
8a9562d2 | 100 | |
43307ad0 IS |
101 | /* Max and min number of packets in the mempool. OVS tries to allocate a |
102 | * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have | |
103 | * enough hugepages) we keep halving the number until the allocation succeeds | |
104 | * or we reach MIN_NB_MBUF */ | |
105 | ||
106 | #define MAX_NB_MBUF (4096 * 64) | |
da79ce2b DDP |
107 | #define MIN_NB_MBUF (4096 * 4) |
108 | #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE | |
109 | ||
43307ad0 IS |
110 | /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */ |
111 | BUILD_ASSERT_DECL(MAX_NB_MBUF % ROUND_DOWN_POW2(MAX_NB_MBUF / MIN_NB_MBUF) | |
112 | == 0); | |
113 | ||
114 | /* The smallest possible NB_MBUF that we're going to try should be a multiple | |
115 | * of MP_CACHE_SZ. This is advised by DPDK documentation. */ | |
116 | BUILD_ASSERT_DECL((MAX_NB_MBUF / ROUND_DOWN_POW2(MAX_NB_MBUF / MIN_NB_MBUF)) | |
117 | % MP_CACHE_SZ == 0); | |
118 | ||
8a9562d2 PS |
119 | #define SOCKET0 0 |
120 | ||
b685696b CL |
121 | /* Default size of Physical NIC RXQ */ |
122 | #define NIC_PORT_DEFAULT_RXQ_SIZE 2048 | |
123 | /* Default size of Physical NIC TXQ */ | |
124 | #define NIC_PORT_DEFAULT_TXQ_SIZE 2048 | |
125 | /* Maximum size of Physical NIC Queues */ | |
126 | #define NIC_PORT_MAX_Q_SIZE 4096 | |
79f5354c | 127 | |
585a5bea | 128 | #define OVS_VHOST_MAX_QUEUE_NUM 1024 /* Maximum number of vHost TX queues. */ |
f3ea2ad2 IM |
129 | #define OVS_VHOST_QUEUE_MAP_UNKNOWN (-1) /* Mapping not initialized. */ |
130 | #define OVS_VHOST_QUEUE_DISABLED (-2) /* Queue was disabled by guest and not | |
131 | * yet mapped to another queue. */ | |
585a5bea | 132 | |
bb37956a IM |
133 | #define DPDK_ETH_PORT_ID_INVALID RTE_MAX_ETHPORTS |
134 | ||
5e925ccc MK |
135 | /* DPDK library uses uint16_t for port_id. */ |
136 | typedef uint16_t dpdk_port_t; | |
fa9f4eeb | 137 | #define DPDK_PORT_ID_FMT "%"PRIu16 |
bb37956a | 138 | |
080f080c KT |
139 | /* Minimum amount of vhost tx retries, effectively a disable. */ |
140 | #define VHOST_ENQ_RETRY_MIN 0 | |
141 | /* Maximum amount of vhost tx retries. */ | |
142 | #define VHOST_ENQ_RETRY_MAX 32 | |
143 | /* Legacy default value for vhost tx retries. */ | |
144 | #define VHOST_ENQ_RETRY_DEF 8 | |
145 | ||
0a0f39df | 146 | #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ) |
95e9881f | 147 | |
35b5586b FL |
148 | /* List of required flags advertised by the hardware that will be used |
149 | * if TSO is enabled. Ideally this should include DEV_TX_OFFLOAD_SCTP_CKSUM. | |
150 | * However, very few drivers supports that the moment and SCTP is not a | |
151 | * widely used protocol as TCP and UDP, so it's optional. */ | |
8c5163fe FL |
152 | #define DPDK_TX_TSO_OFFLOAD_FLAGS (DEV_TX_OFFLOAD_TCP_TSO \ |
153 | | DEV_TX_OFFLOAD_TCP_CKSUM \ | |
154 | | DEV_TX_OFFLOAD_UDP_CKSUM \ | |
155 | | DEV_TX_OFFLOAD_IPV4_CKSUM) | |
156 | ||
157 | ||
8a9562d2 | 158 | static const struct rte_eth_conf port_conf = { |
a28ddd11 | 159 | .rxmode = { |
a28ddd11 | 160 | .split_hdr_size = 0, |
03f3f9c0 | 161 | .offloads = 0, |
a28ddd11 DDP |
162 | }, |
163 | .rx_adv_conf = { | |
164 | .rss_conf = { | |
165 | .rss_key = NULL, | |
543342a4 | 166 | .rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP, |
8a9562d2 | 167 | }, |
a28ddd11 DDP |
168 | }, |
169 | .txmode = { | |
170 | .mq_mode = ETH_MQ_TX_NONE, | |
171 | }, | |
8a9562d2 PS |
172 | }; |
173 | ||
f3e7ec25 MW |
174 | /* |
175 | * These callbacks allow virtio-net devices to be added to vhost ports when | |
176 | * configuration has been fully completed. | |
177 | */ | |
178 | static int new_device(int vid); | |
179 | static void destroy_device(int vid); | |
180 | static int vring_state_changed(int vid, uint16_t queue_id, int enable); | |
61473a0e | 181 | static void destroy_connection(int vid); |
3d56e4ac EC |
182 | static void vhost_guest_notified(int vid); |
183 | ||
f3e7ec25 MW |
184 | static const struct vhost_device_ops virtio_net_device_ops = |
185 | { | |
186 | .new_device = new_device, | |
187 | .destroy_device = destroy_device, | |
188 | .vring_state_changed = vring_state_changed, | |
61473a0e DM |
189 | .features_changed = NULL, |
190 | .new_connection = NULL, | |
191 | .destroy_connection = destroy_connection, | |
3d56e4ac | 192 | .guest_notified = vhost_guest_notified, |
f3e7ec25 MW |
193 | }; |
194 | ||
2f862c71 SV |
195 | /* Custom software stats for dpdk ports */ |
196 | struct netdev_dpdk_sw_stats { | |
197 | /* No. of retries when unable to transmit. */ | |
198 | uint64_t tx_retries; | |
199 | /* Packet drops when unable to transmit; Probably Tx queue is full. */ | |
200 | uint64_t tx_failure_drops; | |
201 | /* Packet length greater than device MTU. */ | |
202 | uint64_t tx_mtu_exceeded_drops; | |
203 | /* Packet drops in egress policer processing. */ | |
204 | uint64_t tx_qos_drops; | |
205 | /* Packet drops in ingress policer processing. */ | |
206 | uint64_t rx_qos_drops; | |
29cf9c1b FL |
207 | /* Packet drops in HWOL processing. */ |
208 | uint64_t tx_invalid_hwol_drops; | |
2f862c71 SV |
209 | }; |
210 | ||
58397e6c KT |
211 | enum dpdk_dev_type { |
212 | DPDK_DEV_ETH = 0, | |
7d1ced01 | 213 | DPDK_DEV_VHOST = 1, |
58397e6c KT |
214 | }; |
215 | ||
0bf765f7 IS |
216 | /* Quality of Service */ |
217 | ||
218 | /* An instance of a QoS configuration. Always associated with a particular | |
219 | * network device. | |
220 | * | |
221 | * Each QoS implementation subclasses this with whatever additional data it | |
222 | * needs. | |
223 | */ | |
224 | struct qos_conf { | |
225 | const struct dpdk_qos_ops *ops; | |
78bd47cf | 226 | rte_spinlock_t lock; |
0bf765f7 IS |
227 | }; |
228 | ||
23c01b19 EC |
229 | /* QoS queue information used by the netdev queue dump functions. */ |
230 | struct netdev_dpdk_queue_state { | |
231 | uint32_t *queues; | |
232 | size_t cur_queue; | |
233 | size_t n_queues; | |
234 | }; | |
235 | ||
0bf765f7 IS |
236 | /* A particular implementation of dpdk QoS operations. |
237 | * | |
238 | * The functions below return 0 if successful or a positive errno value on | |
239 | * failure, except where otherwise noted. All of them must be provided, except | |
240 | * where otherwise noted. | |
241 | */ | |
242 | struct dpdk_qos_ops { | |
243 | ||
244 | /* Name of the QoS type */ | |
245 | const char *qos_name; | |
246 | ||
78bd47cf DDP |
247 | /* Called to construct a qos_conf object. The implementation should make |
248 | * the appropriate calls to configure QoS according to 'details'. | |
0bf765f7 IS |
249 | * |
250 | * The contents of 'details' should be documented as valid for 'ovs_name' | |
251 | * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml | |
252 | * (which is built as ovs-vswitchd.conf.db(8)). | |
253 | * | |
78bd47cf DDP |
254 | * This function must return 0 if and only if it sets '*conf' to an |
255 | * initialized 'struct qos_conf'. | |
0bf765f7 IS |
256 | * |
257 | * For all QoS implementations it should always be non-null. | |
258 | */ | |
78bd47cf | 259 | int (*qos_construct)(const struct smap *details, struct qos_conf **conf); |
0bf765f7 IS |
260 | |
261 | /* Destroys the data structures allocated by the implementation as part of | |
78bd47cf | 262 | * 'qos_conf'. |
0bf765f7 IS |
263 | * |
264 | * For all QoS implementations it should always be non-null. | |
265 | */ | |
78bd47cf | 266 | void (*qos_destruct)(struct qos_conf *conf); |
0bf765f7 | 267 | |
78bd47cf | 268 | /* Retrieves details of 'conf' configuration into 'details'. |
0bf765f7 IS |
269 | * |
270 | * The contents of 'details' should be documented as valid for 'ovs_name' | |
271 | * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml | |
272 | * (which is built as ovs-vswitchd.conf.db(8)). | |
273 | */ | |
78bd47cf | 274 | int (*qos_get)(const struct qos_conf *conf, struct smap *details); |
0bf765f7 | 275 | |
78bd47cf | 276 | /* Returns true if 'conf' is already configured according to 'details'. |
0bf765f7 IS |
277 | * |
278 | * The contents of 'details' should be documented as valid for 'ovs_name' | |
279 | * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml | |
280 | * (which is built as ovs-vswitchd.conf.db(8)). | |
281 | * | |
78bd47cf | 282 | * For all QoS implementations it should always be non-null. |
0bf765f7 | 283 | */ |
78bd47cf DDP |
284 | bool (*qos_is_equal)(const struct qos_conf *conf, |
285 | const struct smap *details); | |
0bf765f7 IS |
286 | |
287 | /* Modify an array of rte_mbufs. The modification is specific to | |
288 | * each qos implementation. | |
289 | * | |
290 | * The function should take and array of mbufs and an int representing | |
291 | * the current number of mbufs present in the array. | |
292 | * | |
293 | * After the function has performed a qos modification to the array of | |
294 | * mbufs it returns an int representing the number of mbufs now present in | |
295 | * the array. This value is can then be passed to the port send function | |
296 | * along with the modified array for transmission. | |
297 | * | |
298 | * For all QoS implementations it should always be non-null. | |
299 | */ | |
78bd47cf | 300 | int (*qos_run)(struct qos_conf *qos_conf, struct rte_mbuf **pkts, |
7d7ded7a | 301 | int pkt_cnt, bool should_steal); |
23c01b19 EC |
302 | |
303 | /* Called to construct a QoS Queue. The implementation should make | |
304 | * the appropriate calls to configure QoS Queue according to 'details'. | |
305 | * | |
306 | * The contents of 'details' should be documented as valid for 'ovs_name' | |
307 | * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml | |
308 | * (which is built as ovs-vswitchd.conf.db(8)). | |
309 | * | |
310 | * This function must return 0 if and only if it constructs | |
311 | * QoS queue successfully. | |
312 | */ | |
313 | int (*qos_queue_construct)(const struct smap *details, | |
314 | uint32_t queue_id, struct qos_conf *conf); | |
315 | ||
316 | /* Destroys the QoS Queue. */ | |
317 | void (*qos_queue_destruct)(struct qos_conf *conf, uint32_t queue_id); | |
318 | ||
319 | /* Retrieves details of QoS Queue configuration into 'details'. | |
320 | * | |
321 | * The contents of 'details' should be documented as valid for 'ovs_name' | |
322 | * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml | |
323 | * (which is built as ovs-vswitchd.conf.db(8)). | |
324 | */ | |
325 | int (*qos_queue_get)(struct smap *details, uint32_t queue_id, | |
326 | const struct qos_conf *conf); | |
327 | ||
328 | /* Retrieves statistics of QoS Queue configuration into 'stats'. */ | |
329 | int (*qos_queue_get_stats)(const struct qos_conf *conf, uint32_t queue_id, | |
330 | struct netdev_queue_stats *stats); | |
331 | ||
332 | /* Setup the 'netdev_dpdk_queue_state' structure used by the dpdk queue | |
333 | * dump functions. | |
334 | */ | |
335 | int (*qos_queue_dump_state_init)(const struct qos_conf *conf, | |
336 | struct netdev_dpdk_queue_state *state); | |
0bf765f7 IS |
337 | }; |
338 | ||
e61bdffc | 339 | /* dpdk_qos_ops for each type of user space QoS implementation. */ |
0bf765f7 | 340 | static const struct dpdk_qos_ops egress_policer_ops; |
e61bdffc | 341 | static const struct dpdk_qos_ops trtcm_policer_ops; |
0bf765f7 IS |
342 | |
343 | /* | |
344 | * Array of dpdk_qos_ops, contains pointer to all supported QoS | |
345 | * operations. | |
346 | */ | |
347 | static const struct dpdk_qos_ops *const qos_confs[] = { | |
348 | &egress_policer_ops, | |
e61bdffc | 349 | &trtcm_policer_ops, |
0bf765f7 IS |
350 | NULL |
351 | }; | |
352 | ||
c2adb102 IM |
353 | static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER; |
354 | ||
8a9562d2 | 355 | /* Contains all 'struct dpdk_dev's. */ |
ca6ba700 | 356 | static struct ovs_list dpdk_list OVS_GUARDED_BY(dpdk_mutex) |
55951e15 | 357 | = OVS_LIST_INITIALIZER(&dpdk_list); |
8a9562d2 | 358 | |
c2adb102 IM |
359 | static struct ovs_mutex dpdk_mp_mutex OVS_ACQ_AFTER(dpdk_mutex) |
360 | = OVS_MUTEX_INITIALIZER; | |
361 | ||
91fccdad | 362 | /* Contains all 'struct dpdk_mp's. */ |
43307ad0 IS |
363 | static struct ovs_list dpdk_mp_list OVS_GUARDED_BY(dpdk_mp_mutex) |
364 | = OVS_LIST_INITIALIZER(&dpdk_mp_list); | |
91fccdad | 365 | |
91fccdad KT |
366 | struct dpdk_mp { |
367 | struct rte_mempool *mp; | |
43307ad0 IS |
368 | int mtu; |
369 | int socket_id; | |
370 | int refcount; | |
91fccdad KT |
371 | struct ovs_list list_node OVS_GUARDED_BY(dpdk_mp_mutex); |
372 | }; | |
373 | ||
5a034064 | 374 | /* There should be one 'struct dpdk_tx_queue' created for |
15ba075d | 375 | * each netdev tx queue. */ |
8a9562d2 | 376 | struct dpdk_tx_queue { |
15ba075d IM |
377 | /* Padding to make dpdk_tx_queue exactly one cache line long. */ |
378 | PADDED_MEMBERS(CACHE_LINE_SIZE, | |
379 | /* Protects the members and the NIC queue from concurrent access. | |
380 | * It is used only if the queue is shared among different pmd threads | |
381 | * (see 'concurrent_txq'). */ | |
382 | rte_spinlock_t tx_lock; | |
383 | /* Mapping of configured vhost-user queue to enabled by guest. */ | |
384 | int map; | |
385 | ); | |
8a9562d2 PS |
386 | }; |
387 | ||
9509913a IS |
388 | struct ingress_policer { |
389 | struct rte_meter_srtcm_params app_srtcm_params; | |
390 | struct rte_meter_srtcm in_policer; | |
03f3f9c0 | 391 | struct rte_meter_srtcm_profile in_prof; |
9509913a IS |
392 | rte_spinlock_t policer_lock; |
393 | }; | |
394 | ||
1a2bb118 SC |
395 | enum dpdk_hw_ol_features { |
396 | NETDEV_RX_CHECKSUM_OFFLOAD = 1 << 0, | |
e10ca8b9 | 397 | NETDEV_RX_HW_CRC_STRIP = 1 << 1, |
29cf9c1b FL |
398 | NETDEV_RX_HW_SCATTER = 1 << 2, |
399 | NETDEV_TX_TSO_OFFLOAD = 1 << 3, | |
35b5586b | 400 | NETDEV_TX_SCTP_CHECKSUM_OFFLOAD = 1 << 4, |
1a2bb118 SC |
401 | }; |
402 | ||
b2e72a9c IM |
403 | /* |
404 | * In order to avoid confusion in variables names, following naming convention | |
405 | * should be used, if possible: | |
406 | * | |
407 | * 'struct netdev' : 'netdev' | |
408 | * 'struct netdev_dpdk' : 'dev' | |
409 | * 'struct netdev_rxq' : 'rxq' | |
410 | * 'struct netdev_rxq_dpdk' : 'rx' | |
411 | * | |
412 | * Example: | |
413 | * struct netdev *netdev = netdev_from_name(name); | |
414 | * struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
415 | * | |
416 | * Also, 'netdev' should be used instead of 'dev->up', where 'netdev' was | |
417 | * already defined. | |
418 | */ | |
419 | ||
8a9562d2 | 420 | struct netdev_dpdk { |
23d4d53f BB |
421 | PADDED_MEMBERS_CACHELINE_MARKER(CACHE_LINE_SIZE, cacheline0, |
422 | dpdk_port_t port_id; | |
423 | ||
424 | /* If true, device was attached by rte_eth_dev_attach(). */ | |
425 | bool attached; | |
606f6650 EC |
426 | /* If true, rte_eth_dev_start() was successfully called */ |
427 | bool started; | |
988fd463 EC |
428 | bool reset_needed; |
429 | /* 1 pad byte here. */ | |
23d4d53f BB |
430 | struct eth_addr hwaddr; |
431 | int mtu; | |
432 | int socket_id; | |
433 | int buf_size; | |
434 | int max_packet_len; | |
435 | enum dpdk_dev_type type; | |
436 | enum netdev_flags flags; | |
eaa43581 | 437 | int link_reset_cnt; |
bb9d2623 IM |
438 | union { |
439 | /* Device arguments for dpdk ports. */ | |
440 | char *devargs; | |
441 | /* Identifier used to distinguish vhost devices from each other. */ | |
442 | char *vhost_id; | |
443 | }; | |
23d4d53f BB |
444 | struct dpdk_tx_queue *tx_q; |
445 | struct rte_eth_link link; | |
23d4d53f BB |
446 | ); |
447 | ||
448 | PADDED_MEMBERS_CACHELINE_MARKER(CACHE_LINE_SIZE, cacheline1, | |
449 | struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex); | |
43307ad0 | 450 | struct dpdk_mp *dpdk_mp; |
23d4d53f BB |
451 | |
452 | /* virtio identifier for vhost devices */ | |
453 | ovsrcu_index vid; | |
454 | ||
455 | /* True if vHost device is 'up' and has been reconfigured at least once */ | |
456 | bool vhost_reconfigured; | |
080f080c KT |
457 | |
458 | atomic_uint8_t vhost_tx_retries_max; | |
459 | /* 2 pad bytes here. */ | |
23d4d53f BB |
460 | ); |
461 | ||
23d4d53f BB |
462 | PADDED_MEMBERS(CACHE_LINE_SIZE, |
463 | struct netdev up; | |
464 | /* In dpdk_list. */ | |
465 | struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex); | |
466 | ||
467 | /* QoS configuration and lock for the device */ | |
468 | OVSRCU_TYPE(struct qos_conf *) qos_conf; | |
469 | ||
470 | /* Ingress Policer */ | |
471 | OVSRCU_TYPE(struct ingress_policer *) ingress_policer; | |
472 | uint32_t policer_rate; | |
473 | uint32_t policer_burst; | |
35c91567 DM |
474 | |
475 | /* Array of vhost rxq states, see vring_state_changed. */ | |
476 | bool *vhost_rxq_enabled; | |
23d4d53f BB |
477 | ); |
478 | ||
479 | PADDED_MEMBERS(CACHE_LINE_SIZE, | |
480 | struct netdev_stats stats; | |
2f862c71 | 481 | struct netdev_dpdk_sw_stats *sw_stats; |
23d4d53f BB |
482 | /* Protects stats */ |
483 | rte_spinlock_t stats_lock; | |
2f862c71 | 484 | /* 36 pad bytes here. */ |
23d4d53f BB |
485 | ); |
486 | ||
487 | PADDED_MEMBERS(CACHE_LINE_SIZE, | |
488 | /* The following properties cannot be changed when a device is running, | |
489 | * so we remember the request and update them next time | |
490 | * netdev_dpdk*_reconfigure() is called */ | |
491 | int requested_mtu; | |
492 | int requested_n_txq; | |
493 | int requested_n_rxq; | |
494 | int requested_rxq_size; | |
495 | int requested_txq_size; | |
496 | ||
497 | /* Number of rx/tx descriptors for physical devices */ | |
498 | int rxq_size; | |
499 | int txq_size; | |
500 | ||
501 | /* Socket ID detected when vHost device is brought up */ | |
502 | int requested_socket_id; | |
503 | ||
504 | /* Denotes whether vHost port is client/server mode */ | |
505 | uint64_t vhost_driver_flags; | |
506 | ||
507 | /* DPDK-ETH Flow control */ | |
508 | struct rte_eth_fc_conf fc_conf; | |
509 | ||
510 | /* DPDK-ETH hardware offload features, | |
511 | * from the enum set 'dpdk_hw_ol_features' */ | |
512 | uint32_t hw_ol_features; | |
f8b64a61 RM |
513 | |
514 | /* Properties for link state change detection mode. | |
515 | * If lsc_interrupt_mode is set to false, poll mode is used, | |
516 | * otherwise interrupt mode is used. */ | |
517 | bool requested_lsc_interrupt_mode; | |
518 | bool lsc_interrupt_mode; | |
f4336f50 GR |
519 | |
520 | /* VF configuration. */ | |
521 | struct eth_addr requested_hwaddr; | |
23d4d53f | 522 | ); |
971f4b39 MW |
523 | |
524 | PADDED_MEMBERS(CACHE_LINE_SIZE, | |
525 | /* Names of all XSTATS counters */ | |
526 | struct rte_eth_xstat_name *rte_xstats_names; | |
527 | int rte_xstats_names_size; | |
528 | int rte_xstats_ids_size; | |
529 | uint64_t *rte_xstats_ids; | |
530 | ); | |
8a9562d2 PS |
531 | }; |
532 | ||
533 | struct netdev_rxq_dpdk { | |
534 | struct netdev_rxq up; | |
bb37956a | 535 | dpdk_port_t port_id; |
8a9562d2 PS |
536 | }; |
537 | ||
f3e7ec25 MW |
538 | static void netdev_dpdk_destruct(struct netdev *netdev); |
539 | static void netdev_dpdk_vhost_destruct(struct netdev *netdev); | |
8a9562d2 | 540 | |
b99ab8aa IM |
541 | static int netdev_dpdk_get_sw_custom_stats(const struct netdev *, |
542 | struct netdev_custom_stats *); | |
ac1a9bb9 IM |
543 | static void netdev_dpdk_clear_xstats(struct netdev_dpdk *dev); |
544 | ||
0a0f39df | 545 | int netdev_dpdk_get_vid(const struct netdev_dpdk *dev); |
58397e6c | 546 | |
9509913a IS |
547 | struct ingress_policer * |
548 | netdev_dpdk_get_ingress_policer(const struct netdev_dpdk *dev); | |
549 | ||
8a9562d2 PS |
550 | static bool |
551 | is_dpdk_class(const struct netdev_class *class) | |
552 | { | |
f3e7ec25 MW |
553 | return class->destruct == netdev_dpdk_destruct |
554 | || class->destruct == netdev_dpdk_vhost_destruct; | |
8a9562d2 PS |
555 | } |
556 | ||
4be4d22c MK |
557 | /* DPDK NIC drivers allocate RX buffers at a particular granularity, typically |
558 | * aligned at 1k or less. If a declared mbuf size is not a multiple of this | |
559 | * value, insufficient buffers are allocated to accomodate the packet in its | |
560 | * entirety. Furthermore, certain drivers need to ensure that there is also | |
561 | * sufficient space in the Rx buffer to accommodate two VLAN tags (for QinQ | |
562 | * frames). If the RX buffer is too small, then the driver enables scatter RX | |
58be5c0e MK |
563 | * behaviour, which reduces performance. To prevent this, use a buffer size |
564 | * that is closest to 'mtu', but which satisfies the aforementioned criteria. | |
4be4d22c MK |
565 | */ |
566 | static uint32_t | |
567 | dpdk_buf_size(int mtu) | |
568 | { | |
a32bab26 TL |
569 | return ROUND_UP(MTU_TO_MAX_FRAME_LEN(mtu), NETDEV_DPDK_MBUF_ALIGN) |
570 | + RTE_PKTMBUF_HEADROOM; | |
4be4d22c MK |
571 | } |
572 | ||
eff23640 DDP |
573 | /* Allocates an area of 'sz' bytes from DPDK. The memory is zero'ed. |
574 | * | |
575 | * Unlike xmalloc(), this function can return NULL on failure. */ | |
8a9562d2 PS |
576 | static void * |
577 | dpdk_rte_mzalloc(size_t sz) | |
578 | { | |
eff23640 | 579 | return rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE); |
8a9562d2 PS |
580 | } |
581 | ||
582 | void | |
e14deea0 | 583 | free_dpdk_buf(struct dp_packet *p) |
8a9562d2 | 584 | { |
db73f716 | 585 | struct rte_mbuf *pkt = (struct rte_mbuf *) p; |
8a9562d2 | 586 | |
b00b4a81 | 587 | rte_pktmbuf_free(pkt); |
8a9562d2 PS |
588 | } |
589 | ||
b3cd9f9d | 590 | static void |
401b70d6 | 591 | ovs_rte_pktmbuf_init(struct rte_mempool *mp OVS_UNUSED, |
b3cd9f9d | 592 | void *opaque_arg OVS_UNUSED, |
2391135c | 593 | void *_p, |
b3cd9f9d PS |
594 | unsigned i OVS_UNUSED) |
595 | { | |
2391135c | 596 | struct rte_mbuf *pkt = _p; |
b3cd9f9d | 597 | |
3aaa6201 | 598 | dp_packet_init_dpdk((struct dp_packet *) pkt); |
b3cd9f9d PS |
599 | } |
600 | ||
91fccdad KT |
601 | static int |
602 | dpdk_mp_full(const struct rte_mempool *mp) OVS_REQUIRES(dpdk_mp_mutex) | |
603 | { | |
1f84a2d5 KT |
604 | /* At this point we want to know if all the mbufs are back |
605 | * in the mempool. rte_mempool_full() is not atomic but it's | |
606 | * the best available and as we are no longer requesting mbufs | |
607 | * from the mempool, it means mbufs will not move from | |
608 | * 'mempool ring' --> 'mempool cache'. In rte_mempool_full() | |
609 | * the ring is counted before caches, so we won't get false | |
610 | * positives in this use case and we handle false negatives. | |
611 | * | |
612 | * If future implementations of rte_mempool_full() were to change | |
613 | * it could be possible for a false positive. Even that would | |
614 | * likely be ok, as there are additional checks during mempool | |
615 | * freeing but it would make things racey. | |
91fccdad | 616 | */ |
1f84a2d5 | 617 | return rte_mempool_full(mp); |
91fccdad KT |
618 | } |
619 | ||
620 | /* Free unused mempools. */ | |
621 | static void | |
43307ad0 | 622 | dpdk_mp_sweep(void) OVS_REQUIRES(dpdk_mp_mutex) |
91fccdad KT |
623 | { |
624 | struct dpdk_mp *dmp, *next; | |
625 | ||
43307ad0 IS |
626 | LIST_FOR_EACH_SAFE (dmp, next, list_node, &dpdk_mp_list) { |
627 | if (!dmp->refcount && dpdk_mp_full(dmp->mp)) { | |
91fccdad KT |
628 | VLOG_DBG("Freeing mempool \"%s\"", dmp->mp->name); |
629 | ovs_list_remove(&dmp->list_node); | |
630 | rte_mempool_free(dmp->mp); | |
631 | rte_free(dmp); | |
632 | } | |
633 | } | |
91fccdad KT |
634 | } |
635 | ||
43307ad0 IS |
636 | /* Calculating the required number of mbufs differs depending on the |
637 | * mempool model being used. Check if per port memory is in use before | |
638 | * calculating. | |
639 | */ | |
640 | static uint32_t | |
641 | dpdk_calculate_mbufs(struct netdev_dpdk *dev, int mtu, bool per_port_mp) | |
91fccdad | 642 | { |
43307ad0 | 643 | uint32_t n_mbufs; |
91fccdad | 644 | |
43307ad0 IS |
645 | if (!per_port_mp) { |
646 | /* Shared memory are being used. | |
647 | * XXX: this is a really rough method of provisioning memory. | |
648 | * It's impossible to determine what the exact memory requirements are | |
649 | * when the number of ports and rxqs that utilize a particular mempool | |
650 | * can change dynamically at runtime. For now, use this rough | |
651 | * heurisitic. | |
652 | */ | |
127b6a6e | 653 | if (mtu >= RTE_ETHER_MTU) { |
43307ad0 IS |
654 | n_mbufs = MAX_NB_MBUF; |
655 | } else { | |
656 | n_mbufs = MIN_NB_MBUF; | |
91fccdad | 657 | } |
43307ad0 IS |
658 | } else { |
659 | /* Per port memory is being used. | |
660 | * XXX: rough estimation of number of mbufs required for this port: | |
661 | * <packets required to fill the device rxqs> | |
662 | * + <packets that could be stuck on other ports txqs> | |
663 | * + <packets in the pmd threads> | |
664 | * + <additional memory for corner cases> | |
665 | */ | |
666 | n_mbufs = dev->requested_n_rxq * dev->requested_rxq_size | |
667 | + dev->requested_n_txq * dev->requested_txq_size | |
668 | + MIN(RTE_MAX_LCORE, dev->requested_n_rxq) * NETDEV_MAX_BURST | |
669 | + MIN_NB_MBUF; | |
91fccdad | 670 | } |
43307ad0 IS |
671 | |
672 | return n_mbufs; | |
91fccdad KT |
673 | } |
674 | ||
43307ad0 IS |
675 | static struct dpdk_mp * |
676 | dpdk_mp_create(struct netdev_dpdk *dev, int mtu, bool per_port_mp) | |
8a9562d2 | 677 | { |
24e78f93 IM |
678 | char mp_name[RTE_MEMPOOL_NAMESIZE]; |
679 | const char *netdev_name = netdev_get_name(&dev->up); | |
680 | int socket_id = dev->requested_socket_id; | |
dfaf00e8 MK |
681 | uint32_t n_mbufs = 0; |
682 | uint32_t mbuf_size = 0; | |
683 | uint32_t aligned_mbuf_size = 0; | |
684 | uint32_t mbuf_priv_data_len = 0; | |
685 | uint32_t pkt_size = 0; | |
24e78f93 | 686 | uint32_t hash = hash_string(netdev_name, 0); |
43307ad0 IS |
687 | struct dpdk_mp *dmp = NULL; |
688 | int ret; | |
689 | ||
690 | dmp = dpdk_rte_mzalloc(sizeof *dmp); | |
691 | if (!dmp) { | |
692 | return NULL; | |
693 | } | |
694 | dmp->socket_id = socket_id; | |
695 | dmp->mtu = mtu; | |
696 | dmp->refcount = 1; | |
697 | ||
dfaf00e8 MK |
698 | /* Get the size of each mbuf, based on the MTU */ |
699 | mbuf_size = MTU_TO_FRAME_LEN(mtu); | |
700 | ||
43307ad0 | 701 | n_mbufs = dpdk_calculate_mbufs(dev, mtu, per_port_mp); |
d555d9bd | 702 | |
da79ce2b | 703 | do { |
24e78f93 | 704 | /* Full DPDK memory pool name must be unique and cannot be |
43307ad0 IS |
705 | * longer than RTE_MEMPOOL_NAMESIZE. Note that for the shared |
706 | * mempool case this can result in one device using a mempool | |
707 | * which references a different device in it's name. However as | |
708 | * mempool names are hashed, the device name will not be readable | |
709 | * so this is not an issue for tasks such as debugging. | |
710 | */ | |
711 | ret = snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, | |
dfaf00e8 MK |
712 | "ovs%08x%02d%05d%07u", |
713 | hash, socket_id, mtu, n_mbufs); | |
24e78f93 IM |
714 | if (ret < 0 || ret >= RTE_MEMPOOL_NAMESIZE) { |
715 | VLOG_DBG("snprintf returned %d. " | |
716 | "Failed to generate a mempool name for \"%s\". " | |
717 | "Hash:0x%x, socket_id: %d, mtu:%d, mbufs:%u.", | |
718 | ret, netdev_name, hash, socket_id, mtu, n_mbufs); | |
719 | break; | |
65056fd7 | 720 | } |
95fb793a | 721 | |
dfaf00e8 MK |
722 | VLOG_DBG("Port %s: Requesting a mempool of %u mbufs of size %u " |
723 | "on socket %d for %d Rx and %d Tx queues, " | |
724 | "cache line size of %u", | |
725 | netdev_name, n_mbufs, mbuf_size, socket_id, | |
726 | dev->requested_n_rxq, dev->requested_n_txq, | |
727 | RTE_CACHE_LINE_SIZE); | |
728 | ||
a32bab26 TL |
729 | /* The size of the mbuf's private area (i.e. area that holds OvS' |
730 | * dp_packet data)*/ | |
dfaf00e8 MK |
731 | mbuf_priv_data_len = sizeof(struct dp_packet) - |
732 | sizeof(struct rte_mbuf); | |
733 | /* The size of the entire dp_packet. */ | |
734 | pkt_size = sizeof(struct dp_packet) + mbuf_size; | |
735 | /* mbuf size, rounded up to cacheline size. */ | |
736 | aligned_mbuf_size = ROUND_UP(pkt_size, RTE_CACHE_LINE_SIZE); | |
737 | /* If there is a size discrepancy, add padding to mbuf_priv_data_len. | |
738 | * This maintains mbuf size cache alignment, while also honoring RX | |
739 | * buffer alignment in the data portion of the mbuf. If this adjustment | |
740 | * is not made, there is a possiblity later on that for an element of | |
741 | * the mempool, buf, buf->data_len < (buf->buf_len - buf->data_off). | |
742 | * This is problematic in the case of multi-segment mbufs, particularly | |
743 | * when an mbuf segment needs to be resized (when [push|popp]ing a VLAN | |
744 | * header, for example. | |
745 | */ | |
746 | mbuf_priv_data_len += (aligned_mbuf_size - pkt_size); | |
747 | ||
748 | dmp->mp = rte_pktmbuf_pool_create(mp_name, n_mbufs, MP_CACHE_SZ, | |
749 | mbuf_priv_data_len, | |
750 | mbuf_size, | |
43307ad0 | 751 | socket_id); |
24e78f93 | 752 | |
43307ad0 | 753 | if (dmp->mp) { |
24e78f93 IM |
754 | VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", |
755 | mp_name, n_mbufs); | |
837c1761 | 756 | /* rte_pktmbuf_pool_create has done some initialization of the |
43307ad0 IS |
757 | * rte_mbuf part of each dp_packet, while ovs_rte_pktmbuf_init |
758 | * initializes some OVS specific fields of dp_packet. | |
759 | */ | |
760 | rte_mempool_obj_iter(dmp->mp, ovs_rte_pktmbuf_init, NULL); | |
761 | return dmp; | |
d555d9bd RW |
762 | } else if (rte_errno == EEXIST) { |
763 | /* A mempool with the same name already exists. We just | |
764 | * retrieve its pointer to be returned to the caller. */ | |
43307ad0 | 765 | dmp->mp = rte_mempool_lookup(mp_name); |
d555d9bd RW |
766 | /* As the mempool create returned EEXIST we can expect the |
767 | * lookup has returned a valid pointer. If for some reason | |
768 | * that's not the case we keep track of it. */ | |
24e78f93 | 769 | VLOG_DBG("A mempool with name \"%s\" already exists at %p.", |
43307ad0 IS |
770 | mp_name, dmp->mp); |
771 | return dmp; | |
d555d9bd | 772 | } else { |
43307ad0 IS |
773 | VLOG_DBG("Failed to create mempool \"%s\" with a request of " |
774 | "%u mbufs, retrying with %u mbufs", | |
775 | mp_name, n_mbufs, n_mbufs / 2); | |
0c6f39e5 | 776 | } |
43307ad0 | 777 | } while (!dmp->mp && rte_errno == ENOMEM && (n_mbufs /= 2) >= MIN_NB_MBUF); |
2ae3d542 | 778 | |
43307ad0 IS |
779 | VLOG_ERR("Failed to create mempool \"%s\" with a request of %u mbufs", |
780 | mp_name, n_mbufs); | |
781 | ||
782 | rte_free(dmp); | |
783 | return NULL; | |
8a9562d2 PS |
784 | } |
785 | ||
43307ad0 IS |
786 | static struct dpdk_mp * |
787 | dpdk_mp_get(struct netdev_dpdk *dev, int mtu, bool per_port_mp) | |
8a9562d2 | 788 | { |
43307ad0 IS |
789 | struct dpdk_mp *dmp, *next; |
790 | bool reuse = false; | |
8a9562d2 | 791 | |
c2adb102 | 792 | ovs_mutex_lock(&dpdk_mp_mutex); |
43307ad0 IS |
793 | /* Check if shared memory is being used, if so check existing mempools |
794 | * to see if reuse is possible. */ | |
795 | if (!per_port_mp) { | |
796 | LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) { | |
797 | if (dmp->socket_id == dev->requested_socket_id | |
798 | && dmp->mtu == mtu) { | |
799 | VLOG_DBG("Reusing mempool \"%s\"", dmp->mp->name); | |
800 | dmp->refcount++; | |
801 | reuse = true; | |
802 | break; | |
803 | } | |
804 | } | |
805 | } | |
806 | /* Sweep mempools after reuse or before create. */ | |
807 | dpdk_mp_sweep(); | |
91fccdad | 808 | |
43307ad0 IS |
809 | if (!reuse) { |
810 | dmp = dpdk_mp_create(dev, mtu, per_port_mp); | |
91fccdad | 811 | if (dmp) { |
43307ad0 IS |
812 | /* Shared memory will hit the reuse case above so will not |
813 | * request a mempool that already exists but we need to check | |
814 | * for the EEXIST case for per port memory case. Compare the | |
815 | * mempool returned by dmp to each entry in dpdk_mp_list. If a | |
816 | * match is found, free dmp as a new entry is not required, set | |
817 | * dmp to point to the existing entry and increment the refcount | |
818 | * to avoid being freed at a later stage. | |
819 | */ | |
820 | if (per_port_mp && rte_errno == EEXIST) { | |
821 | LIST_FOR_EACH (next, list_node, &dpdk_mp_list) { | |
822 | if (dmp->mp == next->mp) { | |
823 | rte_free(dmp); | |
824 | dmp = next; | |
825 | dmp->refcount++; | |
826 | } | |
827 | } | |
828 | } else { | |
829 | ovs_list_push_back(&dpdk_mp_list, &dmp->list_node); | |
830 | } | |
91fccdad KT |
831 | } |
832 | } | |
43307ad0 | 833 | |
43307ad0 IS |
834 | ovs_mutex_unlock(&dpdk_mp_mutex); |
835 | ||
836 | return dmp; | |
837 | } | |
838 | ||
839 | /* Decrement reference to a mempool. */ | |
840 | static void | |
841 | dpdk_mp_put(struct dpdk_mp *dmp) | |
842 | { | |
843 | if (!dmp) { | |
844 | return; | |
845 | } | |
846 | ||
847 | ovs_mutex_lock(&dpdk_mp_mutex); | |
848 | ovs_assert(dmp->refcount); | |
849 | dmp->refcount--; | |
c2adb102 | 850 | ovs_mutex_unlock(&dpdk_mp_mutex); |
8a9562d2 PS |
851 | } |
852 | ||
43307ad0 IS |
853 | /* Depending on the memory model being used this function tries to |
854 | * identify and reuse an existing mempool or tries to allocate a new | |
855 | * mempool on requested_socket_id with mbuf size corresponding to the | |
856 | * requested_mtu. On success, a new configuration will be applied. | |
0072e931 MK |
857 | * On error, device will be left unchanged. */ |
858 | static int | |
859 | netdev_dpdk_mempool_configure(struct netdev_dpdk *dev) | |
0072e931 MK |
860 | OVS_REQUIRES(dev->mutex) |
861 | { | |
862 | uint32_t buf_size = dpdk_buf_size(dev->requested_mtu); | |
43307ad0 | 863 | struct dpdk_mp *dmp; |
24e78f93 | 864 | int ret = 0; |
43307ad0 | 865 | bool per_port_mp = dpdk_per_port_memory(); |
0072e931 | 866 | |
43307ad0 IS |
867 | /* With shared memory we do not need to configure a mempool if the MTU |
868 | * and socket ID have not changed, the previous configuration is still | |
869 | * valid so return 0 */ | |
870 | if (!per_port_mp && dev->mtu == dev->requested_mtu | |
871 | && dev->socket_id == dev->requested_socket_id) { | |
872 | return ret; | |
873 | } | |
91fccdad | 874 | |
43307ad0 IS |
875 | dmp = dpdk_mp_get(dev, FRAME_LEN_TO_MTU(buf_size), per_port_mp); |
876 | if (!dmp) { | |
c67e46c0 MK |
877 | VLOG_ERR("Failed to create memory pool for netdev " |
878 | "%s, with MTU %d on socket %d: %s\n", | |
879 | dev->up.name, dev->requested_mtu, dev->requested_socket_id, | |
880 | rte_strerror(rte_errno)); | |
24e78f93 | 881 | ret = rte_errno; |
0072e931 | 882 | } else { |
43307ad0 IS |
883 | /* Check for any pre-existing dpdk_mp for the device before accessing |
884 | * the associated mempool. | |
885 | */ | |
886 | if (dev->dpdk_mp != NULL) { | |
887 | /* A new MTU was requested, decrement the reference count for the | |
888 | * devices current dpdk_mp. This is required even if a pointer to | |
889 | * same dpdk_mp is returned by dpdk_mp_get. The refcount for dmp | |
890 | * has already been incremented by dpdk_mp_get at this stage so it | |
891 | * must be decremented to keep an accurate refcount for the | |
892 | * dpdk_mp. | |
893 | */ | |
894 | dpdk_mp_put(dev->dpdk_mp); | |
895 | } | |
896 | dev->dpdk_mp = dmp; | |
0072e931 MK |
897 | dev->mtu = dev->requested_mtu; |
898 | dev->socket_id = dev->requested_socket_id; | |
899 | dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu); | |
900 | } | |
901 | ||
24e78f93 | 902 | return ret; |
0072e931 MK |
903 | } |
904 | ||
8a9562d2 PS |
905 | static void |
906 | check_link_status(struct netdev_dpdk *dev) | |
907 | { | |
908 | struct rte_eth_link link; | |
909 | ||
910 | rte_eth_link_get_nowait(dev->port_id, &link); | |
911 | ||
912 | if (dev->link.link_status != link.link_status) { | |
3e912ffc | 913 | netdev_change_seq_changed(&dev->up); |
8a9562d2 PS |
914 | |
915 | dev->link_reset_cnt++; | |
916 | dev->link = link; | |
917 | if (dev->link.link_status) { | |
fa9f4eeb IM |
918 | VLOG_DBG_RL(&rl, |
919 | "Port "DPDK_PORT_ID_FMT" Link Up - speed %u Mbps - %s", | |
58be5c0e | 920 | dev->port_id, (unsigned) dev->link.link_speed, |
fa9f4eeb IM |
921 | (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) |
922 | ? "full-duplex" : "half-duplex"); | |
8a9562d2 | 923 | } else { |
fa9f4eeb IM |
924 | VLOG_DBG_RL(&rl, "Port "DPDK_PORT_ID_FMT" Link Down", |
925 | dev->port_id); | |
8a9562d2 PS |
926 | } |
927 | } | |
928 | } | |
929 | ||
930 | static void * | |
931 | dpdk_watchdog(void *dummy OVS_UNUSED) | |
932 | { | |
933 | struct netdev_dpdk *dev; | |
934 | ||
935 | pthread_detach(pthread_self()); | |
936 | ||
937 | for (;;) { | |
938 | ovs_mutex_lock(&dpdk_mutex); | |
939 | LIST_FOR_EACH (dev, list_node, &dpdk_list) { | |
940 | ovs_mutex_lock(&dev->mutex); | |
1f5b157e IM |
941 | if (dev->type == DPDK_DEV_ETH) { |
942 | check_link_status(dev); | |
943 | } | |
8a9562d2 PS |
944 | ovs_mutex_unlock(&dev->mutex); |
945 | } | |
946 | ovs_mutex_unlock(&dpdk_mutex); | |
947 | xsleep(DPDK_PORT_WATCHDOG_INTERVAL); | |
948 | } | |
949 | ||
950 | return NULL; | |
951 | } | |
952 | ||
b98d7669 | 953 | static int |
f8b64a61 | 954 | dpdk_eth_dev_port_config(struct netdev_dpdk *dev, int n_rxq, int n_txq) |
b98d7669 DDP |
955 | { |
956 | int diag = 0; | |
957 | int i; | |
0072e931 | 958 | struct rte_eth_conf conf = port_conf; |
65a87968 | 959 | struct rte_eth_dev_info info; |
4dd16ca0 | 960 | uint16_t conf_mtu; |
65a87968 | 961 | |
03f3f9c0 OM |
962 | rte_eth_dev_info_get(dev->port_id, &info); |
963 | ||
db704171 JCR |
964 | /* As of DPDK 19.11, it is not allowed to set a mq_mode for |
965 | * virtio PMD driver. */ | |
966 | if (!strcmp(info.driver_name, "net_virtio")) { | |
967 | conf.rxmode.mq_mode = ETH_MQ_RX_NONE; | |
968 | } else { | |
969 | conf.rxmode.mq_mode = ETH_MQ_RX_RSS; | |
970 | } | |
971 | ||
65a87968 | 972 | /* As of DPDK 17.11.1 a few PMDs require to explicitly enable |
03f3f9c0 OM |
973 | * scatter to support jumbo RX. |
974 | * Setting scatter for the device is done after checking for | |
975 | * scatter support in the device capabilites. */ | |
127b6a6e | 976 | if (dev->mtu > RTE_ETHER_MTU) { |
03f3f9c0 OM |
977 | if (dev->hw_ol_features & NETDEV_RX_HW_SCATTER) { |
978 | conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER; | |
65a87968 | 979 | } |
0072e931 | 980 | } |
67fe6d63 | 981 | |
f8b64a61 | 982 | conf.intr_conf.lsc = dev->lsc_interrupt_mode; |
e10ca8b9 | 983 | |
03f3f9c0 OM |
984 | if (dev->hw_ol_features & NETDEV_RX_CHECKSUM_OFFLOAD) { |
985 | conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM; | |
986 | } | |
987 | ||
988 | if (!(dev->hw_ol_features & NETDEV_RX_HW_CRC_STRIP) | |
989 | && info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) { | |
990 | conf.rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC; | |
e10ca8b9 MW |
991 | } |
992 | ||
29cf9c1b | 993 | if (dev->hw_ol_features & NETDEV_TX_TSO_OFFLOAD) { |
8c5163fe | 994 | conf.txmode.offloads |= DPDK_TX_TSO_OFFLOAD_FLAGS; |
35b5586b FL |
995 | if (dev->hw_ol_features & NETDEV_TX_SCTP_CHECKSUM_OFFLOAD) { |
996 | conf.txmode.offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM; | |
997 | } | |
29cf9c1b FL |
998 | } |
999 | ||
03f3f9c0 OM |
1000 | /* Limit configured rss hash functions to only those supported |
1001 | * by the eth device. */ | |
1002 | conf.rx_adv_conf.rss_conf.rss_hf &= info.flow_type_rss_offloads; | |
1003 | ||
b98d7669 DDP |
1004 | /* A device may report more queues than it makes available (this has |
1005 | * been observed for Intel xl710, which reserves some of them for | |
1006 | * SRIOV): rte_eth_*_queue_setup will fail if a queue is not | |
1007 | * available. When this happens we can retry the configuration | |
1008 | * and request less queues */ | |
1009 | while (n_rxq && n_txq) { | |
1010 | if (diag) { | |
1011 | VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq, n_txq); | |
1012 | } | |
1013 | ||
0072e931 | 1014 | diag = rte_eth_dev_configure(dev->port_id, n_rxq, n_txq, &conf); |
b98d7669 | 1015 | if (diag) { |
0072e931 MK |
1016 | VLOG_WARN("Interface %s eth_dev setup error %s\n", |
1017 | dev->up.name, rte_strerror(-diag)); | |
b98d7669 DDP |
1018 | break; |
1019 | } | |
1020 | ||
67fe6d63 MK |
1021 | diag = rte_eth_dev_set_mtu(dev->port_id, dev->mtu); |
1022 | if (diag) { | |
4dd16ca0 IS |
1023 | /* A device may not support rte_eth_dev_set_mtu, in this case |
1024 | * flag a warning to the user and include the devices configured | |
1025 | * MTU value that will be used instead. */ | |
1026 | if (-ENOTSUP == diag) { | |
1027 | rte_eth_dev_get_mtu(dev->port_id, &conf_mtu); | |
1028 | VLOG_WARN("Interface %s does not support MTU configuration, " | |
1029 | "max packet size supported is %"PRIu16".", | |
1030 | dev->up.name, conf_mtu); | |
1031 | } else { | |
1032 | VLOG_ERR("Interface %s MTU (%d) setup error: %s", | |
1033 | dev->up.name, dev->mtu, rte_strerror(-diag)); | |
1034 | break; | |
1035 | } | |
67fe6d63 MK |
1036 | } |
1037 | ||
b98d7669 | 1038 | for (i = 0; i < n_txq; i++) { |
b685696b | 1039 | diag = rte_eth_tx_queue_setup(dev->port_id, i, dev->txq_size, |
b98d7669 DDP |
1040 | dev->socket_id, NULL); |
1041 | if (diag) { | |
1dfebee9 | 1042 | VLOG_INFO("Interface %s unable to setup txq(%d): %s", |
b98d7669 DDP |
1043 | dev->up.name, i, rte_strerror(-diag)); |
1044 | break; | |
1045 | } | |
1046 | } | |
1047 | ||
1048 | if (i != n_txq) { | |
1049 | /* Retry with less tx queues */ | |
1050 | n_txq = i; | |
1051 | continue; | |
1052 | } | |
1053 | ||
1054 | for (i = 0; i < n_rxq; i++) { | |
b685696b | 1055 | diag = rte_eth_rx_queue_setup(dev->port_id, i, dev->rxq_size, |
43307ad0 IS |
1056 | dev->socket_id, NULL, |
1057 | dev->dpdk_mp->mp); | |
b98d7669 | 1058 | if (diag) { |
1dfebee9 | 1059 | VLOG_INFO("Interface %s unable to setup rxq(%d): %s", |
b98d7669 DDP |
1060 | dev->up.name, i, rte_strerror(-diag)); |
1061 | break; | |
1062 | } | |
1063 | } | |
1064 | ||
1065 | if (i != n_rxq) { | |
1066 | /* Retry with less rx queues */ | |
1067 | n_rxq = i; | |
1068 | continue; | |
1069 | } | |
1070 | ||
1071 | dev->up.n_rxq = n_rxq; | |
81acebda | 1072 | dev->up.n_txq = n_txq; |
b98d7669 DDP |
1073 | |
1074 | return 0; | |
1075 | } | |
1076 | ||
1077 | return diag; | |
1078 | } | |
1079 | ||
9fd39370 SC |
1080 | static void |
1081 | dpdk_eth_flow_ctrl_setup(struct netdev_dpdk *dev) OVS_REQUIRES(dev->mutex) | |
1082 | { | |
1083 | if (rte_eth_dev_flow_ctrl_set(dev->port_id, &dev->fc_conf)) { | |
fa9f4eeb | 1084 | VLOG_WARN("Failed to enable flow control on device "DPDK_PORT_ID_FMT, |
bb37956a | 1085 | dev->port_id); |
9fd39370 SC |
1086 | } |
1087 | } | |
b98d7669 | 1088 | |
8a9562d2 | 1089 | static int |
c2adb102 IM |
1090 | dpdk_eth_dev_init(struct netdev_dpdk *dev) |
1091 | OVS_REQUIRES(dev->mutex) | |
8a9562d2 PS |
1092 | { |
1093 | struct rte_pktmbuf_pool_private *mbp_priv; | |
a0cb2d66 | 1094 | struct rte_eth_dev_info info; |
127b6a6e | 1095 | struct rte_ether_addr eth_addr; |
8a9562d2 | 1096 | int diag; |
b98d7669 | 1097 | int n_rxq, n_txq; |
8c5163fe | 1098 | uint32_t tx_tso_offload_capa = DPDK_TX_TSO_OFFLOAD_FLAGS; |
d4f5282c KT |
1099 | uint32_t rx_chksm_offload_capa = DEV_RX_OFFLOAD_UDP_CKSUM | |
1100 | DEV_RX_OFFLOAD_TCP_CKSUM | | |
1101 | DEV_RX_OFFLOAD_IPV4_CKSUM; | |
8a9562d2 | 1102 | |
a0cb2d66 | 1103 | rte_eth_dev_info_get(dev->port_id, &info); |
a0cb2d66 | 1104 | |
e10ca8b9 MW |
1105 | if (strstr(info.driver_name, "vf") != NULL) { |
1106 | VLOG_INFO("Virtual function detected, HW_CRC_STRIP will be enabled"); | |
1107 | dev->hw_ol_features |= NETDEV_RX_HW_CRC_STRIP; | |
1108 | } else { | |
1109 | dev->hw_ol_features &= ~NETDEV_RX_HW_CRC_STRIP; | |
1110 | } | |
1111 | ||
d4f5282c KT |
1112 | if ((info.rx_offload_capa & rx_chksm_offload_capa) != |
1113 | rx_chksm_offload_capa) { | |
fa9f4eeb IM |
1114 | VLOG_WARN("Rx checksum offload is not supported on port " |
1115 | DPDK_PORT_ID_FMT, dev->port_id); | |
d4f5282c KT |
1116 | dev->hw_ol_features &= ~NETDEV_RX_CHECKSUM_OFFLOAD; |
1117 | } else { | |
1118 | dev->hw_ol_features |= NETDEV_RX_CHECKSUM_OFFLOAD; | |
1119 | } | |
1120 | ||
03f3f9c0 OM |
1121 | if (info.rx_offload_capa & DEV_RX_OFFLOAD_SCATTER) { |
1122 | dev->hw_ol_features |= NETDEV_RX_HW_SCATTER; | |
1123 | } else { | |
1124 | /* Do not warn on lack of scatter support */ | |
1125 | dev->hw_ol_features &= ~NETDEV_RX_HW_SCATTER; | |
1126 | } | |
1127 | ||
1223cf12 IM |
1128 | dev->hw_ol_features &= ~NETDEV_TX_TSO_OFFLOAD; |
1129 | if (userspace_tso_enabled()) { | |
1130 | if ((info.tx_offload_capa & tx_tso_offload_capa) | |
1131 | == tx_tso_offload_capa) { | |
1132 | dev->hw_ol_features |= NETDEV_TX_TSO_OFFLOAD; | |
35b5586b FL |
1133 | if (info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { |
1134 | dev->hw_ol_features |= NETDEV_TX_SCTP_CHECKSUM_OFFLOAD; | |
1135 | } else { | |
1136 | VLOG_WARN("%s: Tx SCTP checksum offload is not supported, " | |
1137 | "SCTP packets sent to this device will be dropped", | |
1138 | netdev_get_name(&dev->up)); | |
1139 | } | |
1223cf12 IM |
1140 | } else { |
1141 | VLOG_WARN("%s: Tx TSO offload is not supported.", | |
1142 | netdev_get_name(&dev->up)); | |
1143 | } | |
29cf9c1b FL |
1144 | } |
1145 | ||
b98d7669 DDP |
1146 | n_rxq = MIN(info.max_rx_queues, dev->up.n_rxq); |
1147 | n_txq = MIN(info.max_tx_queues, dev->up.n_txq); | |
1148 | ||
f8b64a61 | 1149 | diag = dpdk_eth_dev_port_config(dev, n_rxq, n_txq); |
8a9562d2 | 1150 | if (diag) { |
f8b64a61 RM |
1151 | VLOG_ERR("Interface %s(rxq:%d txq:%d lsc interrupt mode:%s) " |
1152 | "configure error: %s", | |
1153 | dev->up.name, n_rxq, n_txq, | |
1154 | dev->lsc_interrupt_mode ? "true" : "false", | |
1155 | rte_strerror(-diag)); | |
95fb793a | 1156 | return -diag; |
8a9562d2 PS |
1157 | } |
1158 | ||
8a9562d2 PS |
1159 | diag = rte_eth_dev_start(dev->port_id); |
1160 | if (diag) { | |
b98d7669 DDP |
1161 | VLOG_ERR("Interface %s start error: %s", dev->up.name, |
1162 | rte_strerror(-diag)); | |
95fb793a | 1163 | return -diag; |
8a9562d2 | 1164 | } |
606f6650 | 1165 | dev->started = true; |
8a9562d2 PS |
1166 | |
1167 | rte_eth_promiscuous_enable(dev->port_id); | |
1168 | rte_eth_allmulticast_enable(dev->port_id); | |
1169 | ||
1170 | memset(ð_addr, 0x0, sizeof(eth_addr)); | |
1171 | rte_eth_macaddr_get(dev->port_id, ð_addr); | |
fa9f4eeb IM |
1172 | VLOG_INFO_RL(&rl, "Port "DPDK_PORT_ID_FMT": "ETH_ADDR_FMT, |
1173 | dev->port_id, ETH_ADDR_BYTES_ARGS(eth_addr.addr_bytes)); | |
8a9562d2 | 1174 | |
ca92d173 | 1175 | memcpy(dev->hwaddr.ea, eth_addr.addr_bytes, ETH_ADDR_LEN); |
8a9562d2 PS |
1176 | rte_eth_link_get_nowait(dev->port_id, &dev->link); |
1177 | ||
43307ad0 | 1178 | mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp); |
8a9562d2 | 1179 | dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; |
8a9562d2 PS |
1180 | return 0; |
1181 | } | |
1182 | ||
1183 | static struct netdev_dpdk * | |
1184 | netdev_dpdk_cast(const struct netdev *netdev) | |
1185 | { | |
1186 | return CONTAINER_OF(netdev, struct netdev_dpdk, up); | |
1187 | } | |
1188 | ||
1189 | static struct netdev * | |
1190 | netdev_dpdk_alloc(void) | |
1191 | { | |
bab69409 AC |
1192 | struct netdev_dpdk *dev; |
1193 | ||
65e19e70 DDP |
1194 | dev = dpdk_rte_mzalloc(sizeof *dev); |
1195 | if (dev) { | |
1196 | return &dev->up; | |
bab69409 | 1197 | } |
65e19e70 | 1198 | |
bab69409 | 1199 | return NULL; |
8a9562d2 PS |
1200 | } |
1201 | ||
eff23640 DDP |
1202 | static struct dpdk_tx_queue * |
1203 | netdev_dpdk_alloc_txq(unsigned int n_txqs) | |
5a034064 | 1204 | { |
eff23640 | 1205 | struct dpdk_tx_queue *txqs; |
bd5131ba | 1206 | unsigned i; |
5a034064 | 1207 | |
eff23640 DDP |
1208 | txqs = dpdk_rte_mzalloc(n_txqs * sizeof *txqs); |
1209 | if (txqs) { | |
1210 | for (i = 0; i < n_txqs; i++) { | |
1211 | /* Initialize map for vhost devices. */ | |
1212 | txqs[i].map = OVS_VHOST_QUEUE_MAP_UNKNOWN; | |
1213 | rte_spinlock_init(&txqs[i].tx_lock); | |
1214 | } | |
5a034064 | 1215 | } |
eff23640 DDP |
1216 | |
1217 | return txqs; | |
5a034064 AW |
1218 | } |
1219 | ||
8a9562d2 | 1220 | static int |
bb37956a | 1221 | common_construct(struct netdev *netdev, dpdk_port_t port_no, |
1ce30dfd | 1222 | enum dpdk_dev_type type, int socket_id) |
5a034064 | 1223 | OVS_REQUIRES(dpdk_mutex) |
8a9562d2 | 1224 | { |
d46285a2 | 1225 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
8a9562d2 | 1226 | |
d46285a2 | 1227 | ovs_mutex_init(&dev->mutex); |
8a9562d2 | 1228 | |
d46285a2 | 1229 | rte_spinlock_init(&dev->stats_lock); |
45d947c4 | 1230 | |
1b7a04e0 AW |
1231 | /* If the 'sid' is negative, it means that the kernel fails |
1232 | * to obtain the pci numa info. In that situation, always | |
1233 | * use 'SOCKET0'. */ | |
1ce30dfd | 1234 | dev->socket_id = socket_id < 0 ? SOCKET0 : socket_id; |
db8f13b0 | 1235 | dev->requested_socket_id = dev->socket_id; |
d46285a2 DDP |
1236 | dev->port_id = port_no; |
1237 | dev->type = type; | |
1238 | dev->flags = 0; | |
127b6a6e | 1239 | dev->requested_mtu = RTE_ETHER_MTU; |
d46285a2 | 1240 | dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu); |
f8b64a61 | 1241 | dev->requested_lsc_interrupt_mode = 0; |
0a0f39df CL |
1242 | ovsrcu_index_init(&dev->vid, -1); |
1243 | dev->vhost_reconfigured = false; | |
5dcde09c | 1244 | dev->attached = false; |
988fd463 EC |
1245 | dev->started = false; |
1246 | dev->reset_needed = false; | |
8a9562d2 | 1247 | |
78bd47cf | 1248 | ovsrcu_init(&dev->qos_conf, NULL); |
0bf765f7 | 1249 | |
9509913a IS |
1250 | ovsrcu_init(&dev->ingress_policer, NULL); |
1251 | dev->policer_rate = 0; | |
1252 | dev->policer_burst = 0; | |
1253 | ||
7f381c2e DDP |
1254 | netdev->n_rxq = 0; |
1255 | netdev->n_txq = 0; | |
1256 | dev->requested_n_rxq = NR_QUEUE; | |
1257 | dev->requested_n_txq = NR_QUEUE; | |
1258 | dev->requested_rxq_size = NIC_PORT_DEFAULT_RXQ_SIZE; | |
1259 | dev->requested_txq_size = NIC_PORT_DEFAULT_TXQ_SIZE; | |
58397e6c | 1260 | |
9fd39370 SC |
1261 | /* Initialize the flow control to NULL */ |
1262 | memset(&dev->fc_conf, 0, sizeof dev->fc_conf); | |
1a2bb118 SC |
1263 | |
1264 | /* Initilize the hardware offload flags to 0 */ | |
1265 | dev->hw_ol_features = 0; | |
3b1fb077 DDP |
1266 | |
1267 | dev->flags = NETDEV_UP | NETDEV_PROMISC; | |
1268 | ||
d46285a2 | 1269 | ovs_list_push_back(&dpdk_list, &dev->list_node); |
8a9562d2 | 1270 | |
7f381c2e DDP |
1271 | netdev_request_reconfigure(netdev); |
1272 | ||
971f4b39 MW |
1273 | dev->rte_xstats_names = NULL; |
1274 | dev->rte_xstats_names_size = 0; | |
1275 | ||
1276 | dev->rte_xstats_ids = NULL; | |
1277 | dev->rte_xstats_ids_size = 0; | |
1278 | ||
2f862c71 SV |
1279 | dev->sw_stats = xzalloc(sizeof *dev->sw_stats); |
1280 | dev->sw_stats->tx_retries = (dev->type == DPDK_DEV_VHOST) ? 0 : UINT64_MAX; | |
c161357d | 1281 | |
1ce30dfd | 1282 | return 0; |
95fb793a | 1283 | } |
1284 | ||
40e940e4 OM |
1285 | /* Get the number of OVS interfaces which have the same DPDK |
1286 | * rte device (e.g. same pci bus address). | |
1287 | * FIXME: avoid direct access to DPDK internal array rte_eth_devices. | |
1288 | */ | |
1289 | static int | |
1290 | netdev_dpdk_get_num_ports(struct rte_device *device) | |
1291 | OVS_REQUIRES(dpdk_mutex) | |
1292 | { | |
1293 | struct netdev_dpdk *dev; | |
1294 | int count = 0; | |
1295 | ||
1296 | LIST_FOR_EACH (dev, list_node, &dpdk_list) { | |
1297 | if (rte_eth_devices[dev->port_id].device == device | |
1298 | && rte_eth_devices[dev->port_id].state != RTE_ETH_DEV_UNUSED) { | |
1299 | count++; | |
1300 | } | |
1301 | } | |
1302 | return count; | |
1303 | } | |
1304 | ||
1ce30dfd DDP |
1305 | static int |
1306 | vhost_common_construct(struct netdev *netdev) | |
1307 | OVS_REQUIRES(dpdk_mutex) | |
1308 | { | |
252e1e57 | 1309 | int socket_id = rte_lcore_to_socket_id(rte_get_main_lcore()); |
1ce30dfd DDP |
1310 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
1311 | ||
35c91567 DM |
1312 | dev->vhost_rxq_enabled = dpdk_rte_mzalloc(OVS_VHOST_MAX_QUEUE_NUM * |
1313 | sizeof *dev->vhost_rxq_enabled); | |
1314 | if (!dev->vhost_rxq_enabled) { | |
1315 | return ENOMEM; | |
1316 | } | |
1ce30dfd DDP |
1317 | dev->tx_q = netdev_dpdk_alloc_txq(OVS_VHOST_MAX_QUEUE_NUM); |
1318 | if (!dev->tx_q) { | |
35c91567 | 1319 | rte_free(dev->vhost_rxq_enabled); |
1ce30dfd DDP |
1320 | return ENOMEM; |
1321 | } | |
1322 | ||
080f080c KT |
1323 | atomic_init(&dev->vhost_tx_retries_max, VHOST_ENQ_RETRY_DEF); |
1324 | ||
bb37956a IM |
1325 | return common_construct(netdev, DPDK_ETH_PORT_ID_INVALID, |
1326 | DPDK_DEV_VHOST, socket_id); | |
1ce30dfd DDP |
1327 | } |
1328 | ||
7d1ced01 | 1329 | static int |
53f50d24 | 1330 | netdev_dpdk_vhost_construct(struct netdev *netdev) |
7d1ced01 | 1331 | { |
d46285a2 DDP |
1332 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
1333 | const char *name = netdev->name; | |
7d1ced01 | 1334 | int err; |
a0cb2d66 | 1335 | |
1af27e8a DDP |
1336 | /* 'name' is appended to 'vhost_sock_dir' and used to create a socket in |
1337 | * the file system. '/' or '\' would traverse directories, so they're not | |
1338 | * acceptable in 'name'. */ | |
1339 | if (strchr(name, '/') || strchr(name, '\\')) { | |
1340 | VLOG_ERR("\"%s\" is not a valid name for a vhost-user port. " | |
1341 | "A valid name must not include '/' or '\\'", | |
1342 | name); | |
1343 | return EINVAL; | |
1344 | } | |
1345 | ||
7d1ced01 CL |
1346 | ovs_mutex_lock(&dpdk_mutex); |
1347 | /* Take the name of the vhost-user port and append it to the location where | |
2d24d165 | 1348 | * the socket is to be created, then register the socket. |
7d1ced01 | 1349 | */ |
bb9d2623 | 1350 | dev->vhost_id = xasprintf("%s/%s", dpdk_get_vhost_sock_dir(), name); |
1af27e8a | 1351 | |
2d24d165 | 1352 | dev->vhost_driver_flags &= ~RTE_VHOST_USER_CLIENT; |
e666e8e0 FL |
1353 | |
1354 | /* There is no support for multi-segments buffers. */ | |
1355 | dev->vhost_driver_flags |= RTE_VHOST_USER_LINEARBUF_SUPPORT; | |
2d24d165 | 1356 | err = rte_vhost_driver_register(dev->vhost_id, dev->vhost_driver_flags); |
7d1ced01 CL |
1357 | if (err) { |
1358 | VLOG_ERR("vhost-user socket device setup failure for socket %s\n", | |
2d24d165 | 1359 | dev->vhost_id); |
f3e7ec25 | 1360 | goto out; |
e5c0f5a4 | 1361 | } else { |
2d24d165 CL |
1362 | fatal_signal_add_file_to_unlink(dev->vhost_id); |
1363 | VLOG_INFO("Socket %s created for vhost-user port %s\n", | |
1364 | dev->vhost_id, name); | |
1365 | } | |
f3e7ec25 MW |
1366 | |
1367 | err = rte_vhost_driver_callback_register(dev->vhost_id, | |
1368 | &virtio_net_device_ops); | |
1369 | if (err) { | |
1370 | VLOG_ERR("rte_vhost_driver_callback_register failed for vhost user " | |
1371 | "port: %s\n", name); | |
1372 | goto out; | |
1373 | } | |
1374 | ||
29cf9c1b FL |
1375 | if (!userspace_tso_enabled()) { |
1376 | err = rte_vhost_driver_disable_features(dev->vhost_id, | |
1377 | 1ULL << VIRTIO_NET_F_HOST_TSO4 | |
1378 | | 1ULL << VIRTIO_NET_F_HOST_TSO6 | |
1379 | | 1ULL << VIRTIO_NET_F_CSUM); | |
1380 | if (err) { | |
1381 | VLOG_ERR("rte_vhost_driver_disable_features failed for vhost user " | |
1382 | "port: %s\n", name); | |
1383 | goto out; | |
1384 | } | |
f3e7ec25 MW |
1385 | } |
1386 | ||
1387 | err = rte_vhost_driver_start(dev->vhost_id); | |
1388 | if (err) { | |
1389 | VLOG_ERR("rte_vhost_driver_start failed for vhost user " | |
1390 | "port: %s\n", name); | |
1391 | goto out; | |
1392 | } | |
1393 | ||
1ce30dfd | 1394 | err = vhost_common_construct(netdev); |
f3e7ec25 MW |
1395 | if (err) { |
1396 | VLOG_ERR("vhost_common_construct failed for vhost user " | |
1397 | "port: %s\n", name); | |
1398 | } | |
2d24d165 | 1399 | |
f3e7ec25 | 1400 | out: |
bb9d2623 IM |
1401 | if (err) { |
1402 | free(dev->vhost_id); | |
1403 | dev->vhost_id = NULL; | |
1404 | } | |
1405 | ||
2d24d165 | 1406 | ovs_mutex_unlock(&dpdk_mutex); |
28ca969e AC |
1407 | VLOG_WARN_ONCE("dpdkvhostuser ports are considered deprecated; " |
1408 | "please migrate to dpdkvhostuserclient ports."); | |
2d24d165 CL |
1409 | return err; |
1410 | } | |
1411 | ||
1412 | static int | |
1413 | netdev_dpdk_vhost_client_construct(struct netdev *netdev) | |
1414 | { | |
1415 | int err; | |
1416 | ||
2d24d165 | 1417 | ovs_mutex_lock(&dpdk_mutex); |
1ce30dfd | 1418 | err = vhost_common_construct(netdev); |
f3e7ec25 MW |
1419 | if (err) { |
1420 | VLOG_ERR("vhost_common_construct failed for vhost user client" | |
1421 | "port: %s\n", netdev->name); | |
1422 | } | |
7d1ced01 | 1423 | ovs_mutex_unlock(&dpdk_mutex); |
58397e6c KT |
1424 | return err; |
1425 | } | |
1426 | ||
95fb793a | 1427 | static int |
1428 | netdev_dpdk_construct(struct netdev *netdev) | |
1429 | { | |
95fb793a | 1430 | int err; |
1431 | ||
95fb793a | 1432 | ovs_mutex_lock(&dpdk_mutex); |
bb37956a IM |
1433 | err = common_construct(netdev, DPDK_ETH_PORT_ID_INVALID, |
1434 | DPDK_DEV_ETH, SOCKET0); | |
8a9562d2 PS |
1435 | ovs_mutex_unlock(&dpdk_mutex); |
1436 | return err; | |
1437 | } | |
1438 | ||
1ce30dfd DDP |
1439 | static void |
1440 | common_destruct(struct netdev_dpdk *dev) | |
1441 | OVS_REQUIRES(dpdk_mutex) | |
1442 | OVS_EXCLUDED(dev->mutex) | |
1443 | { | |
1444 | rte_free(dev->tx_q); | |
43307ad0 | 1445 | dpdk_mp_put(dev->dpdk_mp); |
1ce30dfd DDP |
1446 | |
1447 | ovs_list_remove(&dev->list_node); | |
1448 | free(ovsrcu_get_protected(struct ingress_policer *, | |
1449 | &dev->ingress_policer)); | |
2f862c71 | 1450 | free(dev->sw_stats); |
1ce30dfd DDP |
1451 | ovs_mutex_destroy(&dev->mutex); |
1452 | } | |
1453 | ||
8a9562d2 | 1454 | static void |
d46285a2 | 1455 | netdev_dpdk_destruct(struct netdev *netdev) |
8a9562d2 | 1456 | { |
d46285a2 | 1457 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
40e940e4 OM |
1458 | struct rte_device *rte_dev; |
1459 | struct rte_eth_dev *eth_dev; | |
8a9562d2 | 1460 | |
8d38823b | 1461 | ovs_mutex_lock(&dpdk_mutex); |
8d38823b | 1462 | |
8a9562d2 | 1463 | rte_eth_dev_stop(dev->port_id); |
606f6650 | 1464 | dev->started = false; |
5dcde09c IM |
1465 | |
1466 | if (dev->attached) { | |
40e940e4 OM |
1467 | /* Retrieve eth device data before closing it. |
1468 | * FIXME: avoid direct access to DPDK internal array rte_eth_devices. | |
1469 | */ | |
1470 | eth_dev = &rte_eth_devices[dev->port_id]; | |
40e940e4 OM |
1471 | rte_dev = eth_dev->device; |
1472 | ||
1473 | /* Remove the eth device. */ | |
5dcde09c | 1474 | rte_eth_dev_close(dev->port_id); |
40e940e4 | 1475 | |
252e1e57 IS |
1476 | /* Remove this rte device and all its eth devices if all the eth |
1477 | * devices belonging to the rte device are closed. | |
40e940e4 | 1478 | */ |
252e1e57 | 1479 | if (!netdev_dpdk_get_num_ports(rte_dev)) { |
595ce47c IM |
1480 | int ret = rte_dev_remove(rte_dev); |
1481 | ||
1482 | if (ret < 0) { | |
1483 | VLOG_ERR("Device '%s' can not be detached: %s.", | |
1484 | dev->devargs, rte_strerror(-ret)); | |
40e940e4 OM |
1485 | } else { |
1486 | /* Device was closed and detached. */ | |
1487 | VLOG_INFO("Device '%s' has been removed and detached", | |
1488 | dev->devargs); | |
1489 | } | |
5dcde09c | 1490 | } else { |
40e940e4 OM |
1491 | /* Device was only closed. rte_dev_remove() was not called. */ |
1492 | VLOG_INFO("Device '%s' has been removed", dev->devargs); | |
5dcde09c IM |
1493 | } |
1494 | } | |
1495 | ||
ac1a9bb9 | 1496 | netdev_dpdk_clear_xstats(dev); |
55e075e6 | 1497 | free(dev->devargs); |
1ce30dfd | 1498 | common_destruct(dev); |
8d38823b | 1499 | |
8a9562d2 | 1500 | ovs_mutex_unlock(&dpdk_mutex); |
58397e6c | 1501 | } |
8a9562d2 | 1502 | |
3f891bbe DDP |
1503 | /* rte_vhost_driver_unregister() can call back destroy_device(), which will |
1504 | * try to acquire 'dpdk_mutex' and possibly 'dev->mutex'. To avoid a | |
1505 | * deadlock, none of the mutexes must be held while calling this function. */ | |
1506 | static int | |
c1ff66ac CL |
1507 | dpdk_vhost_driver_unregister(struct netdev_dpdk *dev OVS_UNUSED, |
1508 | char *vhost_id) | |
3f891bbe DDP |
1509 | OVS_EXCLUDED(dpdk_mutex) |
1510 | OVS_EXCLUDED(dev->mutex) | |
1511 | { | |
c1ff66ac | 1512 | return rte_vhost_driver_unregister(vhost_id); |
3f891bbe DDP |
1513 | } |
1514 | ||
58397e6c | 1515 | static void |
d46285a2 | 1516 | netdev_dpdk_vhost_destruct(struct netdev *netdev) |
58397e6c | 1517 | { |
d46285a2 | 1518 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
c1ff66ac | 1519 | char *vhost_id; |
58397e6c | 1520 | |
8d38823b | 1521 | ovs_mutex_lock(&dpdk_mutex); |
8d38823b | 1522 | |
c62da695 | 1523 | /* Guest becomes an orphan if still attached. */ |
c1ff66ac CL |
1524 | if (netdev_dpdk_get_vid(dev) >= 0 |
1525 | && !(dev->vhost_driver_flags & RTE_VHOST_USER_CLIENT)) { | |
c62da695 | 1526 | VLOG_ERR("Removing port '%s' while vhost device still attached.", |
d46285a2 | 1527 | netdev->name); |
58be5c0e MK |
1528 | VLOG_ERR("To restore connectivity after re-adding of port, VM on " |
1529 | "socket '%s' must be restarted.", dev->vhost_id); | |
58397e6c KT |
1530 | } |
1531 | ||
bb9d2623 IM |
1532 | vhost_id = dev->vhost_id; |
1533 | dev->vhost_id = NULL; | |
35c91567 | 1534 | rte_free(dev->vhost_rxq_enabled); |
c1ff66ac | 1535 | |
1ce30dfd DDP |
1536 | common_destruct(dev); |
1537 | ||
58397e6c | 1538 | ovs_mutex_unlock(&dpdk_mutex); |
3f891bbe | 1539 | |
bb9d2623 | 1540 | if (!vhost_id) { |
821b8664 IM |
1541 | goto out; |
1542 | } | |
1543 | ||
c1ff66ac | 1544 | if (dpdk_vhost_driver_unregister(dev, vhost_id)) { |
41964543 IM |
1545 | VLOG_ERR("%s: Unable to unregister vhost driver for socket '%s'.\n", |
1546 | netdev->name, vhost_id); | |
c1ff66ac CL |
1547 | } else if (!(dev->vhost_driver_flags & RTE_VHOST_USER_CLIENT)) { |
1548 | /* OVS server mode - remove this socket from list for deletion */ | |
1549 | fatal_signal_remove_file_to_unlink(vhost_id); | |
3f891bbe | 1550 | } |
821b8664 | 1551 | out: |
c1ff66ac | 1552 | free(vhost_id); |
8a9562d2 PS |
1553 | } |
1554 | ||
1555 | static void | |
d46285a2 | 1556 | netdev_dpdk_dealloc(struct netdev *netdev) |
8a9562d2 | 1557 | { |
d46285a2 | 1558 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
8a9562d2 | 1559 | |
d46285a2 | 1560 | rte_free(dev); |
8a9562d2 PS |
1561 | } |
1562 | ||
971f4b39 | 1563 | static void |
ac1a9bb9 | 1564 | netdev_dpdk_clear_xstats(struct netdev_dpdk *dev) |
971f4b39 MW |
1565 | { |
1566 | /* If statistics are already allocated, we have to | |
1567 | * reconfigure, as port_id could have been changed. */ | |
1568 | if (dev->rte_xstats_names) { | |
1569 | free(dev->rte_xstats_names); | |
1570 | dev->rte_xstats_names = NULL; | |
1571 | dev->rte_xstats_names_size = 0; | |
1572 | } | |
1573 | if (dev->rte_xstats_ids) { | |
1574 | free(dev->rte_xstats_ids); | |
1575 | dev->rte_xstats_ids = NULL; | |
1576 | dev->rte_xstats_ids_size = 0; | |
1577 | } | |
1578 | } | |
1579 | ||
1580 | static const char* | |
1581 | netdev_dpdk_get_xstat_name(struct netdev_dpdk *dev, uint64_t id) | |
1582 | { | |
1583 | if (id >= dev->rte_xstats_names_size) { | |
1584 | return "UNKNOWN"; | |
1585 | } | |
1586 | return dev->rte_xstats_names[id].name; | |
1587 | } | |
1588 | ||
1589 | static bool | |
1590 | netdev_dpdk_configure_xstats(struct netdev_dpdk *dev) | |
1591 | OVS_REQUIRES(dev->mutex) | |
1592 | { | |
1593 | int rte_xstats_len; | |
1594 | bool ret; | |
1595 | struct rte_eth_xstat *rte_xstats; | |
1596 | uint64_t id; | |
1597 | int xstats_no; | |
1598 | const char *name; | |
1599 | ||
1600 | /* Retrieving all XSTATS names. If something will go wrong | |
1601 | * or amount of counters will be equal 0, rte_xstats_names | |
1602 | * buffer will be marked as NULL, and any further xstats | |
1603 | * query won't be performed (e.g. during netdev_dpdk_get_stats | |
1604 | * execution). */ | |
1605 | ||
1606 | ret = false; | |
1607 | rte_xstats = NULL; | |
1608 | ||
1609 | if (dev->rte_xstats_names == NULL || dev->rte_xstats_ids == NULL) { | |
1610 | dev->rte_xstats_names_size = | |
1611 | rte_eth_xstats_get_names(dev->port_id, NULL, 0); | |
1612 | ||
1613 | if (dev->rte_xstats_names_size < 0) { | |
fa9f4eeb IM |
1614 | VLOG_WARN("Cannot get XSTATS for port: "DPDK_PORT_ID_FMT, |
1615 | dev->port_id); | |
971f4b39 MW |
1616 | dev->rte_xstats_names_size = 0; |
1617 | } else { | |
1618 | /* Reserve memory for xstats names and values */ | |
1619 | dev->rte_xstats_names = xcalloc(dev->rte_xstats_names_size, | |
1620 | sizeof *dev->rte_xstats_names); | |
1621 | ||
1622 | if (dev->rte_xstats_names) { | |
1623 | /* Retreive xstats names */ | |
1624 | rte_xstats_len = | |
1625 | rte_eth_xstats_get_names(dev->port_id, | |
1626 | dev->rte_xstats_names, | |
1627 | dev->rte_xstats_names_size); | |
1628 | ||
1629 | if (rte_xstats_len < 0) { | |
fa9f4eeb IM |
1630 | VLOG_WARN("Cannot get XSTATS names for port: " |
1631 | DPDK_PORT_ID_FMT, dev->port_id); | |
971f4b39 MW |
1632 | goto out; |
1633 | } else if (rte_xstats_len != dev->rte_xstats_names_size) { | |
fa9f4eeb IM |
1634 | VLOG_WARN("XSTATS size doesn't match for port: " |
1635 | DPDK_PORT_ID_FMT, dev->port_id); | |
971f4b39 MW |
1636 | goto out; |
1637 | } | |
1638 | ||
1639 | dev->rte_xstats_ids = xcalloc(dev->rte_xstats_names_size, | |
1640 | sizeof(uint64_t)); | |
1641 | ||
1642 | /* We have to calculate number of counters */ | |
1643 | rte_xstats = xmalloc(rte_xstats_len * sizeof *rte_xstats); | |
1644 | memset(rte_xstats, 0xff, sizeof *rte_xstats * rte_xstats_len); | |
1645 | ||
1646 | /* Retreive xstats values */ | |
1647 | if (rte_eth_xstats_get(dev->port_id, rte_xstats, | |
1648 | rte_xstats_len) > 0) { | |
1649 | dev->rte_xstats_ids_size = 0; | |
1650 | xstats_no = 0; | |
1651 | for (uint32_t i = 0; i < rte_xstats_len; i++) { | |
1652 | id = rte_xstats[i].id; | |
1653 | name = netdev_dpdk_get_xstat_name(dev, id); | |
1654 | /* We need to filter out everything except | |
1655 | * dropped, error and management counters */ | |
1656 | if (string_ends_with(name, "_errors") || | |
1657 | strstr(name, "_management_") || | |
1658 | string_ends_with(name, "_dropped")) { | |
1659 | ||
1660 | dev->rte_xstats_ids[xstats_no] = id; | |
1661 | xstats_no++; | |
1662 | } | |
1663 | } | |
1664 | dev->rte_xstats_ids_size = xstats_no; | |
1665 | ret = true; | |
1666 | } else { | |
fa9f4eeb IM |
1667 | VLOG_WARN("Can't get XSTATS IDs for port: " |
1668 | DPDK_PORT_ID_FMT, dev->port_id); | |
971f4b39 | 1669 | } |
34eb0863 IM |
1670 | |
1671 | free(rte_xstats); | |
971f4b39 MW |
1672 | } |
1673 | } | |
1674 | } else { | |
1675 | /* Already configured */ | |
1676 | ret = true; | |
1677 | } | |
1678 | ||
1679 | out: | |
1680 | if (!ret) { | |
1681 | netdev_dpdk_clear_xstats(dev); | |
1682 | } | |
1683 | return ret; | |
1684 | } | |
1685 | ||
f4336f50 GR |
1686 | static bool |
1687 | dpdk_port_is_representor(struct netdev_dpdk *dev) | |
1688 | OVS_REQUIRES(dev->mutex) | |
1689 | { | |
1690 | struct rte_eth_dev_info dev_info; | |
1691 | ||
1692 | rte_eth_dev_info_get(dev->port_id, &dev_info); | |
1693 | return (*dev_info.dev_flags) & RTE_ETH_DEV_REPRESENTOR; | |
1694 | } | |
1695 | ||
8a9562d2 | 1696 | static int |
a14b8947 | 1697 | netdev_dpdk_get_config(const struct netdev *netdev, struct smap *args) |
8a9562d2 | 1698 | { |
a14b8947 | 1699 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
8a9562d2 PS |
1700 | |
1701 | ovs_mutex_lock(&dev->mutex); | |
1702 | ||
050c60bf | 1703 | smap_add_format(args, "requested_rx_queues", "%d", dev->requested_n_rxq); |
a14b8947 | 1704 | smap_add_format(args, "configured_rx_queues", "%d", netdev->n_rxq); |
81acebda IM |
1705 | smap_add_format(args, "requested_tx_queues", "%d", dev->requested_n_txq); |
1706 | smap_add_format(args, "configured_tx_queues", "%d", netdev->n_txq); | |
0072e931 | 1707 | smap_add_format(args, "mtu", "%d", dev->mtu); |
451f26fd IM |
1708 | |
1709 | if (dev->type == DPDK_DEV_ETH) { | |
1710 | smap_add_format(args, "requested_rxq_descriptors", "%d", | |
1711 | dev->requested_rxq_size); | |
1712 | smap_add_format(args, "configured_rxq_descriptors", "%d", | |
1713 | dev->rxq_size); | |
1714 | smap_add_format(args, "requested_txq_descriptors", "%d", | |
1715 | dev->requested_txq_size); | |
1716 | smap_add_format(args, "configured_txq_descriptors", "%d", | |
1717 | dev->txq_size); | |
1a2bb118 SC |
1718 | if (dev->hw_ol_features & NETDEV_RX_CHECKSUM_OFFLOAD) { |
1719 | smap_add(args, "rx_csum_offload", "true"); | |
8155ab7e KT |
1720 | } else { |
1721 | smap_add(args, "rx_csum_offload", "false"); | |
1a2bb118 | 1722 | } |
29cf9c1b FL |
1723 | if (dev->hw_ol_features & NETDEV_TX_TSO_OFFLOAD) { |
1724 | smap_add(args, "tx_tso_offload", "true"); | |
1725 | } else { | |
1726 | smap_add(args, "tx_tso_offload", "false"); | |
1727 | } | |
f8b64a61 RM |
1728 | smap_add(args, "lsc_interrupt_mode", |
1729 | dev->lsc_interrupt_mode ? "true" : "false"); | |
f4336f50 GR |
1730 | |
1731 | if (dpdk_port_is_representor(dev)) { | |
1732 | smap_add_format(args, "dpdk-vf-mac", ETH_ADDR_FMT, | |
1733 | ETH_ADDR_ARGS(dev->requested_hwaddr)); | |
1734 | } | |
451f26fd | 1735 | } |
8a9562d2 PS |
1736 | ovs_mutex_unlock(&dev->mutex); |
1737 | ||
1738 | return 0; | |
1739 | } | |
1740 | ||
55e075e6 | 1741 | static struct netdev_dpdk * |
bb37956a | 1742 | netdev_dpdk_lookup_by_port_id(dpdk_port_t port_id) |
55e075e6 CL |
1743 | OVS_REQUIRES(dpdk_mutex) |
1744 | { | |
1745 | struct netdev_dpdk *dev; | |
1746 | ||
1747 | LIST_FOR_EACH (dev, list_node, &dpdk_list) { | |
1748 | if (dev->port_id == port_id) { | |
1749 | return dev; | |
1750 | } | |
1751 | } | |
1752 | ||
1753 | return NULL; | |
1754 | } | |
1755 | ||
5e758818 YL |
1756 | static dpdk_port_t |
1757 | netdev_dpdk_get_port_by_mac(const char *mac_str) | |
1758 | { | |
1759 | dpdk_port_t port_id; | |
1760 | struct eth_addr mac, port_mac; | |
1761 | ||
1762 | if (!eth_addr_from_string(mac_str, &mac)) { | |
1763 | VLOG_ERR("invalid mac: %s", mac_str); | |
1764 | return DPDK_ETH_PORT_ID_INVALID; | |
1765 | } | |
1766 | ||
1767 | RTE_ETH_FOREACH_DEV (port_id) { | |
127b6a6e | 1768 | struct rte_ether_addr ea; |
5e758818 YL |
1769 | |
1770 | rte_eth_macaddr_get(port_id, &ea); | |
1771 | memcpy(port_mac.ea, ea.addr_bytes, ETH_ADDR_LEN); | |
1772 | if (eth_addr_equals(mac, port_mac)) { | |
1773 | return port_id; | |
1774 | } | |
1775 | } | |
1776 | ||
1777 | return DPDK_ETH_PORT_ID_INVALID; | |
1778 | } | |
1779 | ||
40e940e4 OM |
1780 | /* Return the first DPDK port id matching the devargs pattern. */ |
1781 | static dpdk_port_t netdev_dpdk_get_port_by_devargs(const char *devargs) | |
1782 | OVS_REQUIRES(dpdk_mutex) | |
1783 | { | |
1784 | dpdk_port_t port_id; | |
1785 | struct rte_dev_iterator iterator; | |
1786 | ||
1787 | RTE_ETH_FOREACH_MATCHING_DEV (port_id, devargs, &iterator) { | |
1788 | /* If a break is done - must call rte_eth_iterator_cleanup. */ | |
1789 | rte_eth_iterator_cleanup(&iterator); | |
1790 | break; | |
1791 | } | |
1792 | ||
1793 | return port_id; | |
1794 | } | |
1795 | ||
5e758818 | 1796 | /* |
40e940e4 OM |
1797 | * Normally, a PCI id (optionally followed by a representor number) |
1798 | * is enough for identifying a specific DPDK port. | |
5e758818 YL |
1799 | * However, for some NICs having multiple ports sharing the same PCI |
1800 | * id, using PCI id won't work then. | |
1801 | * | |
1802 | * To fix that, here one more method is introduced: "class=eth,mac=$MAC". | |
1803 | * | |
1804 | * Note that the compatibility is fully kept: user can still use the | |
1805 | * PCI id for adding ports (when it's enough for them). | |
1806 | */ | |
bb37956a | 1807 | static dpdk_port_t |
5dcde09c IM |
1808 | netdev_dpdk_process_devargs(struct netdev_dpdk *dev, |
1809 | const char *devargs, char **errp) | |
40e940e4 | 1810 | OVS_REQUIRES(dpdk_mutex) |
55e075e6 | 1811 | { |
40e940e4 | 1812 | dpdk_port_t new_port_id; |
55e075e6 | 1813 | |
5e758818 YL |
1814 | if (strncmp(devargs, "class=eth,mac=", 14) == 0) { |
1815 | new_port_id = netdev_dpdk_get_port_by_mac(&devargs[14]); | |
1816 | } else { | |
40e940e4 OM |
1817 | new_port_id = netdev_dpdk_get_port_by_devargs(devargs); |
1818 | if (!rte_eth_dev_is_valid_port(new_port_id)) { | |
5e758818 | 1819 | /* Device not found in DPDK, attempt to attach it */ |
40e940e4 | 1820 | if (rte_dev_probe(devargs)) { |
5e758818 | 1821 | new_port_id = DPDK_ETH_PORT_ID_INVALID; |
40e940e4 OM |
1822 | } else { |
1823 | new_port_id = netdev_dpdk_get_port_by_devargs(devargs); | |
1824 | if (rte_eth_dev_is_valid_port(new_port_id)) { | |
1825 | /* Attach successful */ | |
1826 | dev->attached = true; | |
1827 | VLOG_INFO("Device '%s' attached to DPDK", devargs); | |
1828 | } else { | |
1829 | /* Attach unsuccessful */ | |
1830 | new_port_id = DPDK_ETH_PORT_ID_INVALID; | |
1831 | } | |
5e758818 | 1832 | } |
55e075e6 | 1833 | } |
5e758818 YL |
1834 | } |
1835 | ||
1836 | if (new_port_id == DPDK_ETH_PORT_ID_INVALID) { | |
1837 | VLOG_WARN_BUF(errp, "Error attaching device '%s' to DPDK", devargs); | |
55e075e6 CL |
1838 | } |
1839 | ||
1840 | return new_port_id; | |
1841 | } | |
1842 | ||
988fd463 EC |
1843 | static int |
1844 | dpdk_eth_event_callback(dpdk_port_t port_id, enum rte_eth_event_type type, | |
1845 | void *param OVS_UNUSED, void *ret_param OVS_UNUSED) | |
1846 | { | |
1847 | struct netdev_dpdk *dev; | |
1848 | ||
1849 | switch ((int) type) { | |
1850 | case RTE_ETH_EVENT_INTR_RESET: | |
1851 | ovs_mutex_lock(&dpdk_mutex); | |
1852 | dev = netdev_dpdk_lookup_by_port_id(port_id); | |
1853 | if (dev) { | |
1854 | ovs_mutex_lock(&dev->mutex); | |
1855 | dev->reset_needed = true; | |
1856 | netdev_request_reconfigure(&dev->up); | |
1857 | VLOG_DBG_RL(&rl, "%s: Device reset requested.", | |
1858 | netdev_get_name(&dev->up)); | |
1859 | ovs_mutex_unlock(&dev->mutex); | |
1860 | } | |
1861 | ovs_mutex_unlock(&dpdk_mutex); | |
1862 | break; | |
1863 | ||
1864 | default: | |
1865 | /* Ignore all other types. */ | |
1866 | break; | |
1867 | } | |
1868 | return 0; | |
1869 | } | |
1870 | ||
c3d062a7 CL |
1871 | static void |
1872 | dpdk_set_rxq_config(struct netdev_dpdk *dev, const struct smap *args) | |
b614c894 | 1873 | OVS_REQUIRES(dev->mutex) |
a14b8947 | 1874 | { |
050c60bf | 1875 | int new_n_rxq; |
a14b8947 | 1876 | |
2a21e757 | 1877 | new_n_rxq = MAX(smap_get_int(args, "n_rxq", NR_QUEUE), 1); |
050c60bf DDP |
1878 | if (new_n_rxq != dev->requested_n_rxq) { |
1879 | dev->requested_n_rxq = new_n_rxq; | |
c3d062a7 | 1880 | netdev_request_reconfigure(&dev->up); |
050c60bf | 1881 | } |
c3d062a7 CL |
1882 | } |
1883 | ||
b685696b CL |
1884 | static void |
1885 | dpdk_process_queue_size(struct netdev *netdev, const struct smap *args, | |
1886 | const char *flag, int default_size, int *new_size) | |
1887 | { | |
1888 | int queue_size = smap_get_int(args, flag, default_size); | |
1889 | ||
1890 | if (queue_size <= 0 || queue_size > NIC_PORT_MAX_Q_SIZE | |
1891 | || !is_pow2(queue_size)) { | |
1892 | queue_size = default_size; | |
1893 | } | |
1894 | ||
1895 | if (queue_size != *new_size) { | |
1896 | *new_size = queue_size; | |
1897 | netdev_request_reconfigure(netdev); | |
1898 | } | |
1899 | } | |
1900 | ||
c3d062a7 | 1901 | static int |
9fff138e DDP |
1902 | netdev_dpdk_set_config(struct netdev *netdev, const struct smap *args, |
1903 | char **errp) | |
c3d062a7 CL |
1904 | { |
1905 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
f8b64a61 | 1906 | bool rx_fc_en, tx_fc_en, autoneg, lsc_interrupt_mode; |
c2c84474 | 1907 | bool flow_control_requested = true; |
b614c894 IM |
1908 | enum rte_eth_fc_mode fc_mode; |
1909 | static const enum rte_eth_fc_mode fc_mode_set[2][2] = { | |
1910 | {RTE_FC_NONE, RTE_FC_TX_PAUSE}, | |
1911 | {RTE_FC_RX_PAUSE, RTE_FC_FULL } | |
1912 | }; | |
55e075e6 | 1913 | const char *new_devargs; |
f4336f50 | 1914 | const char *vf_mac; |
55e075e6 | 1915 | int err = 0; |
c3d062a7 | 1916 | |
55e075e6 | 1917 | ovs_mutex_lock(&dpdk_mutex); |
c3d062a7 CL |
1918 | ovs_mutex_lock(&dev->mutex); |
1919 | ||
1920 | dpdk_set_rxq_config(dev, args); | |
1921 | ||
b685696b CL |
1922 | dpdk_process_queue_size(netdev, args, "n_rxq_desc", |
1923 | NIC_PORT_DEFAULT_RXQ_SIZE, | |
1924 | &dev->requested_rxq_size); | |
1925 | dpdk_process_queue_size(netdev, args, "n_txq_desc", | |
1926 | NIC_PORT_DEFAULT_TXQ_SIZE, | |
1927 | &dev->requested_txq_size); | |
1928 | ||
55e075e6 CL |
1929 | new_devargs = smap_get(args, "dpdk-devargs"); |
1930 | ||
cefdd80a | 1931 | if (dev->devargs && new_devargs && strcmp(new_devargs, dev->devargs)) { |
55e075e6 CL |
1932 | /* The user requested a new device. If we return error, the caller |
1933 | * will delete this netdev and try to recreate it. */ | |
1934 | err = EAGAIN; | |
1935 | goto out; | |
1936 | } | |
1937 | ||
1938 | /* dpdk-devargs is required for device configuration */ | |
1939 | if (new_devargs && new_devargs[0]) { | |
1940 | /* Don't process dpdk-devargs if value is unchanged and port id | |
1941 | * is valid */ | |
1942 | if (!(dev->devargs && !strcmp(dev->devargs, new_devargs) | |
1943 | && rte_eth_dev_is_valid_port(dev->port_id))) { | |
bb37956a IM |
1944 | dpdk_port_t new_port_id = netdev_dpdk_process_devargs(dev, |
1945 | new_devargs, | |
1946 | errp); | |
55e075e6 CL |
1947 | if (!rte_eth_dev_is_valid_port(new_port_id)) { |
1948 | err = EINVAL; | |
1949 | } else if (new_port_id == dev->port_id) { | |
1950 | /* Already configured, do not reconfigure again */ | |
1951 | err = 0; | |
1952 | } else { | |
1953 | struct netdev_dpdk *dup_dev; | |
bb37956a | 1954 | |
55e075e6 CL |
1955 | dup_dev = netdev_dpdk_lookup_by_port_id(new_port_id); |
1956 | if (dup_dev) { | |
9fff138e | 1957 | VLOG_WARN_BUF(errp, "'%s' is trying to use device '%s' " |
40e940e4 | 1958 | "which is already in use by '%s'", |
9fff138e DDP |
1959 | netdev_get_name(netdev), new_devargs, |
1960 | netdev_get_name(&dup_dev->up)); | |
55e075e6 CL |
1961 | err = EADDRINUSE; |
1962 | } else { | |
bd4e172b | 1963 | int sid = rte_eth_dev_socket_id(new_port_id); |
bb37956a | 1964 | |
bd4e172b | 1965 | dev->requested_socket_id = sid < 0 ? SOCKET0 : sid; |
55e075e6 CL |
1966 | dev->devargs = xstrdup(new_devargs); |
1967 | dev->port_id = new_port_id; | |
1968 | netdev_request_reconfigure(&dev->up); | |
971f4b39 | 1969 | netdev_dpdk_clear_xstats(dev); |
55e075e6 CL |
1970 | err = 0; |
1971 | } | |
1972 | } | |
1973 | } | |
1974 | } else { | |
9fff138e DDP |
1975 | VLOG_WARN_BUF(errp, "'%s' is missing 'options:dpdk-devargs'. " |
1976 | "The old 'dpdk<port_id>' names are not supported", | |
1977 | netdev_get_name(netdev)); | |
55e075e6 CL |
1978 | err = EINVAL; |
1979 | } | |
1980 | ||
1981 | if (err) { | |
1982 | goto out; | |
1983 | } | |
1984 | ||
f4336f50 GR |
1985 | vf_mac = smap_get(args, "dpdk-vf-mac"); |
1986 | if (vf_mac) { | |
1987 | struct eth_addr mac; | |
1988 | ||
1989 | if (!dpdk_port_is_representor(dev)) { | |
1990 | VLOG_WARN_BUF(errp, "'%s' is trying to set the VF MAC '%s' " | |
1991 | "but 'options:dpdk-vf-mac' is only supported for " | |
1992 | "VF representors.", | |
1993 | netdev_get_name(netdev), vf_mac); | |
1994 | } else if (!eth_addr_from_string(vf_mac, &mac)) { | |
1995 | VLOG_WARN_BUF(errp, "interface '%s': cannot parse VF MAC '%s'.", | |
1996 | netdev_get_name(netdev), vf_mac); | |
1997 | } else if (eth_addr_is_multicast(mac)) { | |
1998 | VLOG_WARN_BUF(errp, | |
1999 | "interface '%s': cannot set VF MAC to multicast " | |
2000 | "address '%s'.", netdev_get_name(netdev), vf_mac); | |
2001 | } else if (!eth_addr_equals(dev->requested_hwaddr, mac)) { | |
2002 | dev->requested_hwaddr = mac; | |
2003 | netdev_request_reconfigure(netdev); | |
2004 | } | |
2005 | } | |
2006 | ||
f8b64a61 RM |
2007 | lsc_interrupt_mode = smap_get_bool(args, "dpdk-lsc-interrupt", false); |
2008 | if (dev->requested_lsc_interrupt_mode != lsc_interrupt_mode) { | |
2009 | dev->requested_lsc_interrupt_mode = lsc_interrupt_mode; | |
2010 | netdev_request_reconfigure(netdev); | |
2011 | } | |
2012 | ||
c3d062a7 CL |
2013 | rx_fc_en = smap_get_bool(args, "rx-flow-ctrl", false); |
2014 | tx_fc_en = smap_get_bool(args, "tx-flow-ctrl", false); | |
b614c894 | 2015 | autoneg = smap_get_bool(args, "flow-ctrl-autoneg", false); |
c3d062a7 | 2016 | |
b614c894 | 2017 | fc_mode = fc_mode_set[tx_fc_en][rx_fc_en]; |
c2c84474 TK |
2018 | |
2019 | if (!smap_get(args, "rx-flow-ctrl") && !smap_get(args, "tx-flow-ctrl") | |
2020 | && !smap_get(args, "flow-ctrl-autoneg")) { | |
2021 | /* FIXME: User didn't ask for flow control configuration. | |
2022 | * For now we'll not print a warning if flow control is not | |
2023 | * supported by the DPDK port. */ | |
2024 | flow_control_requested = false; | |
2025 | } | |
2026 | ||
2027 | /* Get the Flow control configuration. */ | |
2028 | err = -rte_eth_dev_flow_ctrl_get(dev->port_id, &dev->fc_conf); | |
2029 | if (err) { | |
2030 | if (err == ENOTSUP) { | |
2031 | if (flow_control_requested) { | |
2032 | VLOG_WARN("%s: Flow control is not supported.", | |
2033 | netdev_get_name(netdev)); | |
2034 | } | |
2035 | err = 0; /* Not fatal. */ | |
2036 | } else { | |
2037 | VLOG_WARN("%s: Cannot get flow control parameters: %s", | |
2038 | netdev_get_name(netdev), rte_strerror(err)); | |
2039 | } | |
2040 | goto out; | |
2041 | } | |
2042 | ||
b614c894 IM |
2043 | if (dev->fc_conf.mode != fc_mode || autoneg != dev->fc_conf.autoneg) { |
2044 | dev->fc_conf.mode = fc_mode; | |
2045 | dev->fc_conf.autoneg = autoneg; | |
2046 | dpdk_eth_flow_ctrl_setup(dev); | |
2047 | } | |
9fd39370 | 2048 | |
55e075e6 | 2049 | out: |
c3d062a7 | 2050 | ovs_mutex_unlock(&dev->mutex); |
55e075e6 | 2051 | ovs_mutex_unlock(&dpdk_mutex); |
c3d062a7 | 2052 | |
55e075e6 | 2053 | return err; |
c3d062a7 CL |
2054 | } |
2055 | ||
c1ff66ac | 2056 | static int |
2d24d165 | 2057 | netdev_dpdk_vhost_client_set_config(struct netdev *netdev, |
9fff138e DDP |
2058 | const struct smap *args, |
2059 | char **errp OVS_UNUSED) | |
c1ff66ac CL |
2060 | { |
2061 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
2062 | const char *path; | |
080f080c | 2063 | int max_tx_retries, cur_max_tx_retries; |
c1ff66ac | 2064 | |
6881885a | 2065 | ovs_mutex_lock(&dev->mutex); |
c1ff66ac CL |
2066 | if (!(dev->vhost_driver_flags & RTE_VHOST_USER_CLIENT)) { |
2067 | path = smap_get(args, "vhost-server-path"); | |
bb9d2623 IM |
2068 | if (!nullable_string_is_equal(path, dev->vhost_id)) { |
2069 | free(dev->vhost_id); | |
2070 | dev->vhost_id = nullable_xstrdup(path); | |
c1ff66ac CL |
2071 | netdev_request_reconfigure(netdev); |
2072 | } | |
2073 | } | |
080f080c KT |
2074 | |
2075 | max_tx_retries = smap_get_int(args, "tx-retries-max", | |
2076 | VHOST_ENQ_RETRY_DEF); | |
2077 | if (max_tx_retries < VHOST_ENQ_RETRY_MIN | |
2078 | || max_tx_retries > VHOST_ENQ_RETRY_MAX) { | |
2079 | max_tx_retries = VHOST_ENQ_RETRY_DEF; | |
2080 | } | |
2081 | atomic_read_relaxed(&dev->vhost_tx_retries_max, &cur_max_tx_retries); | |
2082 | if (max_tx_retries != cur_max_tx_retries) { | |
2083 | atomic_store_relaxed(&dev->vhost_tx_retries_max, max_tx_retries); | |
2084 | VLOG_INFO("Max Tx retries for vhost device '%s' set to %d", | |
2085 | netdev_get_name(netdev), max_tx_retries); | |
2086 | } | |
6881885a | 2087 | ovs_mutex_unlock(&dev->mutex); |
c1ff66ac CL |
2088 | |
2089 | return 0; | |
2090 | } | |
2091 | ||
7dec44fe | 2092 | static int |
d46285a2 | 2093 | netdev_dpdk_get_numa_id(const struct netdev *netdev) |
7dec44fe | 2094 | { |
d46285a2 | 2095 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
7dec44fe | 2096 | |
d46285a2 | 2097 | return dev->socket_id; |
7dec44fe AW |
2098 | } |
2099 | ||
050c60bf | 2100 | /* Sets the number of tx queues for the dpdk interface. */ |
5496878c | 2101 | static int |
050c60bf | 2102 | netdev_dpdk_set_tx_multiq(struct netdev *netdev, unsigned int n_txq) |
5496878c | 2103 | { |
d46285a2 | 2104 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
5496878c | 2105 | |
d46285a2 | 2106 | ovs_mutex_lock(&dev->mutex); |
91968eb0 | 2107 | |
050c60bf DDP |
2108 | if (dev->requested_n_txq == n_txq) { |
2109 | goto out; | |
4573fbd3 FL |
2110 | } |
2111 | ||
050c60bf DDP |
2112 | dev->requested_n_txq = n_txq; |
2113 | netdev_request_reconfigure(netdev); | |
58397e6c | 2114 | |
050c60bf | 2115 | out: |
d46285a2 | 2116 | ovs_mutex_unlock(&dev->mutex); |
050c60bf | 2117 | return 0; |
58397e6c KT |
2118 | } |
2119 | ||
8a9562d2 PS |
2120 | static struct netdev_rxq * |
2121 | netdev_dpdk_rxq_alloc(void) | |
2122 | { | |
2123 | struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx); | |
2124 | ||
eff23640 DDP |
2125 | if (rx) { |
2126 | return &rx->up; | |
2127 | } | |
2128 | ||
2129 | return NULL; | |
8a9562d2 PS |
2130 | } |
2131 | ||
2132 | static struct netdev_rxq_dpdk * | |
d46285a2 | 2133 | netdev_rxq_dpdk_cast(const struct netdev_rxq *rxq) |
8a9562d2 | 2134 | { |
d46285a2 | 2135 | return CONTAINER_OF(rxq, struct netdev_rxq_dpdk, up); |
8a9562d2 PS |
2136 | } |
2137 | ||
2138 | static int | |
d46285a2 | 2139 | netdev_dpdk_rxq_construct(struct netdev_rxq *rxq) |
8a9562d2 | 2140 | { |
d46285a2 DDP |
2141 | struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq); |
2142 | struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev); | |
8a9562d2 | 2143 | |
d46285a2 DDP |
2144 | ovs_mutex_lock(&dev->mutex); |
2145 | rx->port_id = dev->port_id; | |
2146 | ovs_mutex_unlock(&dev->mutex); | |
8a9562d2 PS |
2147 | |
2148 | return 0; | |
2149 | } | |
2150 | ||
2151 | static void | |
d46285a2 | 2152 | netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq OVS_UNUSED) |
8a9562d2 PS |
2153 | { |
2154 | } | |
2155 | ||
2156 | static void | |
d46285a2 | 2157 | netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq) |
8a9562d2 | 2158 | { |
d46285a2 | 2159 | struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq); |
8a9562d2 PS |
2160 | |
2161 | rte_free(rx); | |
2162 | } | |
2163 | ||
29cf9c1b FL |
2164 | /* Prepare the packet for HWOL. |
2165 | * Return True if the packet is OK to continue. */ | |
2166 | static bool | |
2167 | netdev_dpdk_prep_hwol_packet(struct netdev_dpdk *dev, struct rte_mbuf *mbuf) | |
2168 | { | |
2169 | struct dp_packet *pkt = CONTAINER_OF(mbuf, struct dp_packet, mbuf); | |
2170 | ||
2171 | if (mbuf->ol_flags & PKT_TX_L4_MASK) { | |
2172 | mbuf->l2_len = (char *)dp_packet_l3(pkt) - (char *)dp_packet_eth(pkt); | |
2173 | mbuf->l3_len = (char *)dp_packet_l4(pkt) - (char *)dp_packet_l3(pkt); | |
2174 | mbuf->outer_l2_len = 0; | |
2175 | mbuf->outer_l3_len = 0; | |
2176 | } | |
2177 | ||
2178 | if (mbuf->ol_flags & PKT_TX_TCP_SEG) { | |
2179 | struct tcp_header *th = dp_packet_l4(pkt); | |
2180 | ||
2181 | if (!th) { | |
2182 | VLOG_WARN_RL(&rl, "%s: TCP Segmentation without L4 header" | |
2183 | " pkt len: %"PRIu32"", dev->up.name, mbuf->pkt_len); | |
2184 | return false; | |
2185 | } | |
2186 | ||
2187 | mbuf->l4_len = TCP_OFFSET(th->tcp_ctl) * 4; | |
2188 | mbuf->ol_flags |= PKT_TX_TCP_CKSUM; | |
2189 | mbuf->tso_segsz = dev->mtu - mbuf->l3_len - mbuf->l4_len; | |
2190 | ||
2191 | if (mbuf->ol_flags & PKT_TX_IPV4) { | |
2192 | mbuf->ol_flags |= PKT_TX_IP_CKSUM; | |
2193 | } | |
2194 | } | |
2195 | return true; | |
2196 | } | |
2197 | ||
2198 | /* Prepare a batch for HWOL. | |
2199 | * Return the number of good packets in the batch. */ | |
2200 | static int | |
2201 | netdev_dpdk_prep_hwol_batch(struct netdev_dpdk *dev, struct rte_mbuf **pkts, | |
2202 | int pkt_cnt) | |
2203 | { | |
2204 | int i = 0; | |
2205 | int cnt = 0; | |
2206 | struct rte_mbuf *pkt; | |
2207 | ||
2208 | /* Prepare and filter bad HWOL packets. */ | |
2209 | for (i = 0; i < pkt_cnt; i++) { | |
2210 | pkt = pkts[i]; | |
2211 | if (!netdev_dpdk_prep_hwol_packet(dev, pkt)) { | |
2212 | rte_pktmbuf_free(pkt); | |
2213 | continue; | |
2214 | } | |
2215 | ||
2216 | if (OVS_UNLIKELY(i != cnt)) { | |
2217 | pkts[cnt] = pkt; | |
2218 | } | |
2219 | cnt++; | |
2220 | } | |
2221 | ||
2222 | return cnt; | |
2223 | } | |
2224 | ||
819f13bd DDP |
2225 | /* Tries to transmit 'pkts' to txq 'qid' of device 'dev'. Takes ownership of |
2226 | * 'pkts', even in case of failure. | |
2227 | * | |
2228 | * Returns the number of packets that weren't transmitted. */ | |
2229 | static inline int | |
b59cc14e | 2230 | netdev_dpdk_eth_tx_burst(struct netdev_dpdk *dev, int qid, |
819f13bd | 2231 | struct rte_mbuf **pkts, int cnt) |
8a9562d2 | 2232 | { |
1304f1f8 | 2233 | uint32_t nb_tx = 0; |
29cf9c1b FL |
2234 | uint16_t nb_tx_prep = cnt; |
2235 | ||
2236 | if (userspace_tso_enabled()) { | |
2237 | nb_tx_prep = rte_eth_tx_prepare(dev->port_id, qid, pkts, cnt); | |
2238 | if (nb_tx_prep != cnt) { | |
2239 | VLOG_WARN_RL(&rl, "%s: Output batch contains invalid packets. " | |
2240 | "Only %u/%u are valid: %s", dev->up.name, nb_tx_prep, | |
2241 | cnt, rte_strerror(rte_errno)); | |
2242 | } | |
2243 | } | |
1304f1f8 | 2244 | |
29cf9c1b | 2245 | while (nb_tx != nb_tx_prep) { |
1304f1f8 DDP |
2246 | uint32_t ret; |
2247 | ||
29cf9c1b FL |
2248 | ret = rte_eth_tx_burst(dev->port_id, qid, pkts + nb_tx, |
2249 | nb_tx_prep - nb_tx); | |
1304f1f8 DDP |
2250 | if (!ret) { |
2251 | break; | |
2252 | } | |
2253 | ||
2254 | nb_tx += ret; | |
2255 | } | |
8a9562d2 | 2256 | |
b59cc14e | 2257 | if (OVS_UNLIKELY(nb_tx != cnt)) { |
819f13bd | 2258 | /* Free buffers, which we couldn't transmit, one at a time (each |
db73f716 DDP |
2259 | * packet could come from a different mempool) */ |
2260 | int i; | |
2261 | ||
b59cc14e IM |
2262 | for (i = nb_tx; i < cnt; i++) { |
2263 | rte_pktmbuf_free(pkts[i]); | |
db73f716 | 2264 | } |
8a9562d2 | 2265 | } |
819f13bd DDP |
2266 | |
2267 | return cnt - nb_tx; | |
8a9562d2 PS |
2268 | } |
2269 | ||
f3926f29 | 2270 | static inline bool |
e61bdffc EC |
2271 | netdev_dpdk_srtcm_policer_pkt_handle(struct rte_meter_srtcm *meter, |
2272 | struct rte_meter_srtcm_profile *profile, | |
2273 | struct rte_mbuf *pkt, uint64_t time) | |
f3926f29 | 2274 | { |
127b6a6e | 2275 | uint32_t pkt_len = rte_pktmbuf_pkt_len(pkt) - sizeof(struct rte_ether_hdr); |
f3926f29 | 2276 | |
03f3f9c0 | 2277 | return rte_meter_srtcm_color_blind_check(meter, profile, time, pkt_len) == |
127b6a6e | 2278 | RTE_COLOR_GREEN; |
f3926f29 IS |
2279 | } |
2280 | ||
2281 | static int | |
e61bdffc EC |
2282 | srtcm_policer_run_single_packet(struct rte_meter_srtcm *meter, |
2283 | struct rte_meter_srtcm_profile *profile, | |
2284 | struct rte_mbuf **pkts, int pkt_cnt, | |
2285 | bool should_steal) | |
f3926f29 IS |
2286 | { |
2287 | int i = 0; | |
2288 | int cnt = 0; | |
2289 | struct rte_mbuf *pkt = NULL; | |
2290 | uint64_t current_time = rte_rdtsc(); | |
2291 | ||
2292 | for (i = 0; i < pkt_cnt; i++) { | |
2293 | pkt = pkts[i]; | |
2294 | /* Handle current packet */ | |
e61bdffc EC |
2295 | if (netdev_dpdk_srtcm_policer_pkt_handle(meter, profile, |
2296 | pkt, current_time)) { | |
f3926f29 IS |
2297 | if (cnt != i) { |
2298 | pkts[cnt] = pkt; | |
2299 | } | |
2300 | cnt++; | |
2301 | } else { | |
7d7ded7a | 2302 | if (should_steal) { |
3e90f7d7 GZ |
2303 | rte_pktmbuf_free(pkt); |
2304 | } | |
f3926f29 IS |
2305 | } |
2306 | } | |
2307 | ||
2308 | return cnt; | |
2309 | } | |
2310 | ||
9509913a IS |
2311 | static int |
2312 | ingress_policer_run(struct ingress_policer *policer, struct rte_mbuf **pkts, | |
7d7ded7a | 2313 | int pkt_cnt, bool should_steal) |
9509913a IS |
2314 | { |
2315 | int cnt = 0; | |
2316 | ||
2317 | rte_spinlock_lock(&policer->policer_lock); | |
e61bdffc EC |
2318 | cnt = srtcm_policer_run_single_packet(&policer->in_policer, |
2319 | &policer->in_prof, | |
2320 | pkts, pkt_cnt, should_steal); | |
9509913a IS |
2321 | rte_spinlock_unlock(&policer->policer_lock); |
2322 | ||
2323 | return cnt; | |
2324 | } | |
2325 | ||
58397e6c | 2326 | static bool |
0a0f39df | 2327 | is_vhost_running(struct netdev_dpdk *dev) |
58397e6c | 2328 | { |
0a0f39df | 2329 | return (netdev_dpdk_get_vid(dev) >= 0 && dev->vhost_reconfigured); |
58397e6c KT |
2330 | } |
2331 | ||
d6e3feb5 | 2332 | static inline void |
2333 | netdev_dpdk_vhost_update_rx_size_counters(struct netdev_stats *stats, | |
2334 | unsigned int packet_size) | |
2335 | { | |
2336 | /* Hard-coded search for the size bucket. */ | |
2337 | if (packet_size < 256) { | |
2338 | if (packet_size >= 128) { | |
2339 | stats->rx_128_to_255_packets++; | |
2340 | } else if (packet_size <= 64) { | |
2341 | stats->rx_1_to_64_packets++; | |
2342 | } else { | |
2343 | stats->rx_65_to_127_packets++; | |
2344 | } | |
2345 | } else { | |
2346 | if (packet_size >= 1523) { | |
2347 | stats->rx_1523_to_max_packets++; | |
2348 | } else if (packet_size >= 1024) { | |
2349 | stats->rx_1024_to_1522_packets++; | |
2350 | } else if (packet_size < 512) { | |
2351 | stats->rx_256_to_511_packets++; | |
2352 | } else { | |
2353 | stats->rx_512_to_1023_packets++; | |
2354 | } | |
2355 | } | |
2356 | } | |
2357 | ||
9e3ddd45 | 2358 | static inline void |
2f862c71 | 2359 | netdev_dpdk_vhost_update_rx_counters(struct netdev_dpdk *dev, |
9509913a | 2360 | struct dp_packet **packets, int count, |
2f862c71 | 2361 | int qos_drops) |
9e3ddd45 | 2362 | { |
2f862c71 | 2363 | struct netdev_stats *stats = &dev->stats; |
9e3ddd45 | 2364 | struct dp_packet *packet; |
2f862c71 SV |
2365 | unsigned int packet_size; |
2366 | int i; | |
9e3ddd45 TP |
2367 | |
2368 | stats->rx_packets += count; | |
2f862c71 | 2369 | stats->rx_dropped += qos_drops; |
9e3ddd45 TP |
2370 | for (i = 0; i < count; i++) { |
2371 | packet = packets[i]; | |
d6e3feb5 | 2372 | packet_size = dp_packet_size(packet); |
9e3ddd45 | 2373 | |
d6e3feb5 | 2374 | if (OVS_UNLIKELY(packet_size < ETH_HEADER_LEN)) { |
9e3ddd45 TP |
2375 | /* This only protects the following multicast counting from |
2376 | * too short packets, but it does not stop the packet from | |
2377 | * further processing. */ | |
2378 | stats->rx_errors++; | |
2379 | stats->rx_length_errors++; | |
2380 | continue; | |
2381 | } | |
2382 | ||
d6e3feb5 | 2383 | netdev_dpdk_vhost_update_rx_size_counters(stats, packet_size); |
2384 | ||
9e3ddd45 TP |
2385 | struct eth_header *eh = (struct eth_header *) dp_packet_data(packet); |
2386 | if (OVS_UNLIKELY(eth_addr_is_multicast(eh->eth_dst))) { | |
2387 | stats->multicast++; | |
2388 | } | |
2389 | ||
d6e3feb5 | 2390 | stats->rx_bytes += packet_size; |
9e3ddd45 | 2391 | } |
2f862c71 | 2392 | |
6d77abf4 KT |
2393 | if (OVS_UNLIKELY(qos_drops)) { |
2394 | dev->sw_stats->rx_qos_drops += qos_drops; | |
2395 | } | |
9e3ddd45 TP |
2396 | } |
2397 | ||
58397e6c KT |
2398 | /* |
2399 | * The receive path for the vhost port is the TX path out from guest. | |
2400 | */ | |
2401 | static int | |
d46285a2 | 2402 | netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq, |
8492adc2 | 2403 | struct dp_packet_batch *batch, int *qfill) |
58397e6c | 2404 | { |
d46285a2 | 2405 | struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev); |
9509913a | 2406 | struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev); |
58397e6c | 2407 | uint16_t nb_rx = 0; |
2f862c71 | 2408 | uint16_t qos_drops = 0; |
8492adc2 | 2409 | int qid = rxq->queue_id * VIRTIO_QNUM + VIRTIO_TXQ; |
daf22bf7 | 2410 | int vid = netdev_dpdk_get_vid(dev); |
58397e6c | 2411 | |
daf22bf7 | 2412 | if (OVS_UNLIKELY(vid < 0 || !dev->vhost_reconfigured |
e543851d | 2413 | || !(dev->flags & NETDEV_UP))) { |
58397e6c KT |
2414 | return EAGAIN; |
2415 | } | |
2416 | ||
43307ad0 | 2417 | nb_rx = rte_vhost_dequeue_burst(vid, qid, dev->dpdk_mp->mp, |
64839cf4 | 2418 | (struct rte_mbuf **) batch->packets, |
cd159f1a | 2419 | NETDEV_MAX_BURST); |
58397e6c KT |
2420 | if (!nb_rx) { |
2421 | return EAGAIN; | |
2422 | } | |
2423 | ||
8492adc2 JS |
2424 | if (qfill) { |
2425 | if (nb_rx == NETDEV_MAX_BURST) { | |
2426 | /* The DPDK API returns a uint32_t which often has invalid bits in | |
2427 | * the upper 16-bits. Need to restrict the value to uint16_t. */ | |
2428 | *qfill = rte_vhost_rx_queue_count(vid, qid) & UINT16_MAX; | |
2429 | } else { | |
2430 | *qfill = 0; | |
2431 | } | |
2432 | } | |
2433 | ||
9509913a | 2434 | if (policer) { |
2f862c71 | 2435 | qos_drops = nb_rx; |
64839cf4 WT |
2436 | nb_rx = ingress_policer_run(policer, |
2437 | (struct rte_mbuf **) batch->packets, | |
3e90f7d7 | 2438 | nb_rx, true); |
2f862c71 | 2439 | qos_drops -= nb_rx; |
9509913a IS |
2440 | } |
2441 | ||
d46285a2 | 2442 | rte_spinlock_lock(&dev->stats_lock); |
2f862c71 SV |
2443 | netdev_dpdk_vhost_update_rx_counters(dev, batch->packets, |
2444 | nb_rx, qos_drops); | |
d46285a2 | 2445 | rte_spinlock_unlock(&dev->stats_lock); |
45d947c4 | 2446 | |
75fb9148 ZB |
2447 | batch->count = nb_rx; |
2448 | dp_packet_batch_init_packet_fields(batch); | |
2449 | ||
58397e6c KT |
2450 | return 0; |
2451 | } | |
2452 | ||
35c91567 DM |
2453 | static bool |
2454 | netdev_dpdk_vhost_rxq_enabled(struct netdev_rxq *rxq) | |
2455 | { | |
2456 | struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev); | |
2457 | ||
2458 | return dev->vhost_rxq_enabled[rxq->queue_id]; | |
2459 | } | |
2460 | ||
8a9562d2 | 2461 | static int |
8492adc2 JS |
2462 | netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet_batch *batch, |
2463 | int *qfill) | |
8a9562d2 | 2464 | { |
d46285a2 DDP |
2465 | struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq); |
2466 | struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev); | |
9509913a | 2467 | struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev); |
8a9562d2 | 2468 | int nb_rx; |
9509913a | 2469 | int dropped = 0; |
8a9562d2 | 2470 | |
3b1fb077 DDP |
2471 | if (OVS_UNLIKELY(!(dev->flags & NETDEV_UP))) { |
2472 | return EAGAIN; | |
2473 | } | |
2474 | ||
d46285a2 | 2475 | nb_rx = rte_eth_rx_burst(rx->port_id, rxq->queue_id, |
64839cf4 | 2476 | (struct rte_mbuf **) batch->packets, |
cd159f1a | 2477 | NETDEV_MAX_BURST); |
8a9562d2 PS |
2478 | if (!nb_rx) { |
2479 | return EAGAIN; | |
2480 | } | |
2481 | ||
9509913a IS |
2482 | if (policer) { |
2483 | dropped = nb_rx; | |
64839cf4 | 2484 | nb_rx = ingress_policer_run(policer, |
58be5c0e | 2485 | (struct rte_mbuf **) batch->packets, |
3e90f7d7 | 2486 | nb_rx, true); |
9509913a IS |
2487 | dropped -= nb_rx; |
2488 | } | |
2489 | ||
2490 | /* Update stats to reflect dropped packets */ | |
2491 | if (OVS_UNLIKELY(dropped)) { | |
2492 | rte_spinlock_lock(&dev->stats_lock); | |
2493 | dev->stats.rx_dropped += dropped; | |
2f862c71 | 2494 | dev->sw_stats->rx_qos_drops += dropped; |
9509913a IS |
2495 | rte_spinlock_unlock(&dev->stats_lock); |
2496 | } | |
2497 | ||
64839cf4 | 2498 | batch->count = nb_rx; |
75fb9148 | 2499 | dp_packet_batch_init_packet_fields(batch); |
8a9562d2 | 2500 | |
8492adc2 JS |
2501 | if (qfill) { |
2502 | if (nb_rx == NETDEV_MAX_BURST) { | |
2503 | *qfill = rte_eth_rx_queue_count(rx->port_id, rxq->queue_id); | |
2504 | } else { | |
2505 | *qfill = 0; | |
2506 | } | |
2507 | } | |
2508 | ||
8a9562d2 PS |
2509 | return 0; |
2510 | } | |
2511 | ||
0bf765f7 | 2512 | static inline int |
78bd47cf | 2513 | netdev_dpdk_qos_run(struct netdev_dpdk *dev, struct rte_mbuf **pkts, |
7d7ded7a | 2514 | int cnt, bool should_steal) |
0bf765f7 | 2515 | { |
78bd47cf | 2516 | struct qos_conf *qos_conf = ovsrcu_get(struct qos_conf *, &dev->qos_conf); |
0bf765f7 | 2517 | |
78bd47cf DDP |
2518 | if (qos_conf) { |
2519 | rte_spinlock_lock(&qos_conf->lock); | |
7d7ded7a | 2520 | cnt = qos_conf->ops->qos_run(qos_conf, pkts, cnt, should_steal); |
78bd47cf | 2521 | rte_spinlock_unlock(&qos_conf->lock); |
0bf765f7 IS |
2522 | } |
2523 | ||
2524 | return cnt; | |
2525 | } | |
2526 | ||
c6ec9d17 IM |
2527 | static int |
2528 | netdev_dpdk_filter_packet_len(struct netdev_dpdk *dev, struct rte_mbuf **pkts, | |
2529 | int pkt_cnt) | |
2530 | { | |
2531 | int i = 0; | |
2532 | int cnt = 0; | |
2533 | struct rte_mbuf *pkt; | |
2534 | ||
29cf9c1b | 2535 | /* Filter oversized packets, unless are marked for TSO. */ |
c6ec9d17 IM |
2536 | for (i = 0; i < pkt_cnt; i++) { |
2537 | pkt = pkts[i]; | |
29cf9c1b FL |
2538 | if (OVS_UNLIKELY((pkt->pkt_len > dev->max_packet_len) |
2539 | && !(pkt->ol_flags & PKT_TX_TCP_SEG))) { | |
2540 | VLOG_WARN_RL(&rl, "%s: Too big size %" PRIu32 " " | |
2541 | "max_packet_len %d", dev->up.name, pkt->pkt_len, | |
2542 | dev->max_packet_len); | |
c6ec9d17 IM |
2543 | rte_pktmbuf_free(pkt); |
2544 | continue; | |
2545 | } | |
2546 | ||
2547 | if (OVS_UNLIKELY(i != cnt)) { | |
2548 | pkts[cnt] = pkt; | |
2549 | } | |
2550 | cnt++; | |
2551 | } | |
2552 | ||
2553 | return cnt; | |
2554 | } | |
2555 | ||
9e3ddd45 | 2556 | static inline void |
2f862c71 | 2557 | netdev_dpdk_vhost_update_tx_counters(struct netdev_dpdk *dev, |
9e3ddd45 TP |
2558 | struct dp_packet **packets, |
2559 | int attempted, | |
2f862c71 | 2560 | struct netdev_dpdk_sw_stats *sw_stats_add) |
9e3ddd45 | 2561 | { |
2f862c71 SV |
2562 | int dropped = sw_stats_add->tx_mtu_exceeded_drops + |
2563 | sw_stats_add->tx_qos_drops + | |
29cf9c1b FL |
2564 | sw_stats_add->tx_failure_drops + |
2565 | sw_stats_add->tx_invalid_hwol_drops; | |
2f862c71 | 2566 | struct netdev_stats *stats = &dev->stats; |
9e3ddd45 | 2567 | int sent = attempted - dropped; |
2f862c71 | 2568 | int i; |
9e3ddd45 TP |
2569 | |
2570 | stats->tx_packets += sent; | |
2571 | stats->tx_dropped += dropped; | |
2572 | ||
2573 | for (i = 0; i < sent; i++) { | |
2574 | stats->tx_bytes += dp_packet_size(packets[i]); | |
2575 | } | |
2f862c71 | 2576 | |
6d77abf4 KT |
2577 | if (OVS_UNLIKELY(dropped || sw_stats_add->tx_retries)) { |
2578 | struct netdev_dpdk_sw_stats *sw_stats = dev->sw_stats; | |
2579 | ||
2580 | sw_stats->tx_retries += sw_stats_add->tx_retries; | |
2581 | sw_stats->tx_failure_drops += sw_stats_add->tx_failure_drops; | |
2582 | sw_stats->tx_mtu_exceeded_drops += sw_stats_add->tx_mtu_exceeded_drops; | |
2583 | sw_stats->tx_qos_drops += sw_stats_add->tx_qos_drops; | |
29cf9c1b | 2584 | sw_stats->tx_invalid_hwol_drops += sw_stats_add->tx_invalid_hwol_drops; |
6d77abf4 | 2585 | } |
9e3ddd45 TP |
2586 | } |
2587 | ||
58397e6c | 2588 | static void |
4573fbd3 | 2589 | __netdev_dpdk_vhost_send(struct netdev *netdev, int qid, |
dd52de45 | 2590 | struct dp_packet **pkts, int cnt) |
58397e6c | 2591 | { |
d46285a2 | 2592 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
95e9881f | 2593 | struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts; |
2f862c71 SV |
2594 | struct netdev_dpdk_sw_stats sw_stats_add; |
2595 | unsigned int n_packets_to_free = cnt; | |
2596 | unsigned int total_packets = cnt; | |
dd52de45 | 2597 | int i, retries = 0; |
080f080c | 2598 | int max_retries = VHOST_ENQ_RETRY_MIN; |
daf22bf7 | 2599 | int vid = netdev_dpdk_get_vid(dev); |
58397e6c | 2600 | |
81acebda | 2601 | qid = dev->tx_q[qid % netdev->n_txq].map; |
585a5bea | 2602 | |
daf22bf7 | 2603 | if (OVS_UNLIKELY(vid < 0 || !dev->vhost_reconfigured || qid < 0 |
e543851d | 2604 | || !(dev->flags & NETDEV_UP))) { |
d46285a2 DDP |
2605 | rte_spinlock_lock(&dev->stats_lock); |
2606 | dev->stats.tx_dropped+= cnt; | |
2607 | rte_spinlock_unlock(&dev->stats_lock); | |
1b99bb05 | 2608 | goto out; |
58397e6c KT |
2609 | } |
2610 | ||
9ff24b9c DM |
2611 | if (OVS_UNLIKELY(!rte_spinlock_trylock(&dev->tx_q[qid].tx_lock))) { |
2612 | COVERAGE_INC(vhost_tx_contention); | |
2613 | rte_spinlock_lock(&dev->tx_q[qid].tx_lock); | |
2614 | } | |
58397e6c | 2615 | |
29cf9c1b FL |
2616 | sw_stats_add.tx_invalid_hwol_drops = cnt; |
2617 | if (userspace_tso_enabled()) { | |
2618 | cnt = netdev_dpdk_prep_hwol_batch(dev, cur_pkts, cnt); | |
2619 | } | |
2620 | ||
2621 | sw_stats_add.tx_invalid_hwol_drops -= cnt; | |
2622 | sw_stats_add.tx_mtu_exceeded_drops = cnt; | |
c6ec9d17 | 2623 | cnt = netdev_dpdk_filter_packet_len(dev, cur_pkts, cnt); |
29cf9c1b | 2624 | sw_stats_add.tx_mtu_exceeded_drops -= cnt; |
2f862c71 | 2625 | |
0bf765f7 | 2626 | /* Check has QoS has been configured for the netdev */ |
2f862c71 | 2627 | sw_stats_add.tx_qos_drops = cnt; |
3e90f7d7 | 2628 | cnt = netdev_dpdk_qos_run(dev, cur_pkts, cnt, true); |
2f862c71 SV |
2629 | sw_stats_add.tx_qos_drops -= cnt; |
2630 | ||
2631 | n_packets_to_free = cnt; | |
0bf765f7 | 2632 | |
95e9881f | 2633 | do { |
4573fbd3 | 2634 | int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ; |
95e9881f KT |
2635 | unsigned int tx_pkts; |
2636 | ||
daf22bf7 | 2637 | tx_pkts = rte_vhost_enqueue_burst(vid, vhost_qid, cur_pkts, cnt); |
95e9881f KT |
2638 | if (OVS_LIKELY(tx_pkts)) { |
2639 | /* Packets have been sent.*/ | |
2640 | cnt -= tx_pkts; | |
31871ee3 | 2641 | /* Prepare for possible retry.*/ |
95e9881f | 2642 | cur_pkts = &cur_pkts[tx_pkts]; |
080f080c KT |
2643 | if (OVS_UNLIKELY(cnt && !retries)) { |
2644 | /* | |
2645 | * Read max retries as there are packets not sent | |
2646 | * and no retries have already occurred. | |
2647 | */ | |
2648 | atomic_read_relaxed(&dev->vhost_tx_retries_max, &max_retries); | |
2649 | } | |
95e9881f | 2650 | } else { |
31871ee3 KT |
2651 | /* No packets sent - do not retry.*/ |
2652 | break; | |
95e9881f | 2653 | } |
080f080c | 2654 | } while (cnt && (retries++ < max_retries)); |
4573fbd3 | 2655 | |
d46285a2 | 2656 | rte_spinlock_unlock(&dev->tx_q[qid].tx_lock); |
95e9881f | 2657 | |
2f862c71 SV |
2658 | sw_stats_add.tx_failure_drops = cnt; |
2659 | sw_stats_add.tx_retries = MIN(retries, max_retries); | |
2660 | ||
d46285a2 | 2661 | rte_spinlock_lock(&dev->stats_lock); |
2f862c71 SV |
2662 | netdev_dpdk_vhost_update_tx_counters(dev, pkts, total_packets, |
2663 | &sw_stats_add); | |
d46285a2 | 2664 | rte_spinlock_unlock(&dev->stats_lock); |
58397e6c KT |
2665 | |
2666 | out: | |
2f862c71 | 2667 | for (i = 0; i < n_packets_to_free; i++) { |
dd52de45 | 2668 | dp_packet_delete(pkts[i]); |
58397e6c KT |
2669 | } |
2670 | } | |
2671 | ||
29cf9c1b FL |
2672 | static void |
2673 | netdev_dpdk_extbuf_free(void *addr OVS_UNUSED, void *opaque) | |
2674 | { | |
2675 | rte_free(opaque); | |
2676 | } | |
2677 | ||
2678 | static struct rte_mbuf * | |
2679 | dpdk_pktmbuf_attach_extbuf(struct rte_mbuf *pkt, uint32_t data_len) | |
2680 | { | |
2681 | uint32_t total_len = RTE_PKTMBUF_HEADROOM + data_len; | |
2682 | struct rte_mbuf_ext_shared_info *shinfo = NULL; | |
2683 | uint16_t buf_len; | |
2684 | void *buf; | |
2685 | ||
2686 | if (rte_pktmbuf_tailroom(pkt) >= sizeof *shinfo) { | |
2687 | shinfo = rte_pktmbuf_mtod(pkt, struct rte_mbuf_ext_shared_info *); | |
2688 | } else { | |
2689 | total_len += sizeof *shinfo + sizeof(uintptr_t); | |
2690 | total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t)); | |
2691 | } | |
2692 | ||
2693 | if (OVS_UNLIKELY(total_len > UINT16_MAX)) { | |
2694 | VLOG_ERR("Can't copy packet: too big %u", total_len); | |
2695 | return NULL; | |
2696 | } | |
2697 | ||
2698 | buf_len = total_len; | |
2699 | buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE); | |
2700 | if (OVS_UNLIKELY(buf == NULL)) { | |
2701 | VLOG_ERR("Failed to allocate memory using rte_malloc: %u", buf_len); | |
2702 | return NULL; | |
2703 | } | |
2704 | ||
2705 | /* Initialize shinfo. */ | |
2706 | if (shinfo) { | |
2707 | shinfo->free_cb = netdev_dpdk_extbuf_free; | |
2708 | shinfo->fcb_opaque = buf; | |
2709 | rte_mbuf_ext_refcnt_set(shinfo, 1); | |
2710 | } else { | |
2711 | shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len, | |
2712 | netdev_dpdk_extbuf_free, | |
2713 | buf); | |
2714 | if (OVS_UNLIKELY(shinfo == NULL)) { | |
2715 | rte_free(buf); | |
2716 | VLOG_ERR("Failed to initialize shared info for mbuf while " | |
2717 | "attempting to attach an external buffer."); | |
2718 | return NULL; | |
2719 | } | |
2720 | } | |
2721 | ||
2722 | rte_pktmbuf_attach_extbuf(pkt, buf, rte_malloc_virt2iova(buf), buf_len, | |
2723 | shinfo); | |
2724 | rte_pktmbuf_reset_headroom(pkt); | |
2725 | ||
2726 | return pkt; | |
2727 | } | |
2728 | ||
2729 | static struct rte_mbuf * | |
2730 | dpdk_pktmbuf_alloc(struct rte_mempool *mp, uint32_t data_len) | |
2731 | { | |
2732 | struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp); | |
2733 | ||
2734 | if (OVS_UNLIKELY(!pkt)) { | |
2735 | return NULL; | |
2736 | } | |
2737 | ||
2738 | if (rte_pktmbuf_tailroom(pkt) >= data_len) { | |
2739 | return pkt; | |
2740 | } | |
2741 | ||
2742 | if (dpdk_pktmbuf_attach_extbuf(pkt, data_len)) { | |
2743 | return pkt; | |
2744 | } | |
2745 | ||
2746 | rte_pktmbuf_free(pkt); | |
2747 | ||
2748 | return NULL; | |
2749 | } | |
2750 | ||
2751 | static struct dp_packet * | |
2752 | dpdk_copy_dp_packet_to_mbuf(struct rte_mempool *mp, struct dp_packet *pkt_orig) | |
2753 | { | |
2754 | struct rte_mbuf *mbuf_dest; | |
2755 | struct dp_packet *pkt_dest; | |
2756 | uint32_t pkt_len; | |
2757 | ||
2758 | pkt_len = dp_packet_size(pkt_orig); | |
2759 | mbuf_dest = dpdk_pktmbuf_alloc(mp, pkt_len); | |
2760 | if (OVS_UNLIKELY(mbuf_dest == NULL)) { | |
2761 | return NULL; | |
2762 | } | |
2763 | ||
2764 | pkt_dest = CONTAINER_OF(mbuf_dest, struct dp_packet, mbuf); | |
2765 | memcpy(dp_packet_data(pkt_dest), dp_packet_data(pkt_orig), pkt_len); | |
2766 | dp_packet_set_size(pkt_dest, pkt_len); | |
2767 | ||
2768 | mbuf_dest->tx_offload = pkt_orig->mbuf.tx_offload; | |
2769 | mbuf_dest->packet_type = pkt_orig->mbuf.packet_type; | |
2770 | mbuf_dest->ol_flags |= (pkt_orig->mbuf.ol_flags & | |
2771 | ~(EXT_ATTACHED_MBUF | IND_ATTACHED_MBUF)); | |
2772 | ||
2773 | memcpy(&pkt_dest->l2_pad_size, &pkt_orig->l2_pad_size, | |
2774 | sizeof(struct dp_packet) - offsetof(struct dp_packet, l2_pad_size)); | |
2775 | ||
2776 | if (mbuf_dest->ol_flags & PKT_TX_L4_MASK) { | |
2777 | mbuf_dest->l2_len = (char *)dp_packet_l3(pkt_dest) | |
2778 | - (char *)dp_packet_eth(pkt_dest); | |
2779 | mbuf_dest->l3_len = (char *)dp_packet_l4(pkt_dest) | |
2780 | - (char *) dp_packet_l3(pkt_dest); | |
2781 | } | |
2782 | ||
2783 | return pkt_dest; | |
2784 | } | |
2785 | ||
8a9562d2 PS |
2786 | /* Tx function. Transmit packets indefinitely */ |
2787 | static void | |
64839cf4 | 2788 | dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet_batch *batch) |
db73f716 | 2789 | OVS_NO_THREAD_SAFETY_ANALYSIS |
8a9562d2 | 2790 | { |
8a14bd7b | 2791 | const size_t batch_cnt = dp_packet_batch_size(batch); |
bce01e3a | 2792 | #if !defined(__CHECKER__) && !defined(_WIN32) |
8a14bd7b | 2793 | const size_t PKT_ARRAY_SIZE = batch_cnt; |
bce01e3a EJ |
2794 | #else |
2795 | /* Sparse or MSVC doesn't like variable length array. */ | |
cd159f1a | 2796 | enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST }; |
bce01e3a | 2797 | #endif |
8a9562d2 | 2798 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
29cf9c1b | 2799 | struct dp_packet *pkts[PKT_ARRAY_SIZE]; |
2f862c71 | 2800 | struct netdev_dpdk_sw_stats *sw_stats = dev->sw_stats; |
8a14bd7b | 2801 | uint32_t cnt = batch_cnt; |
3e90f7d7 | 2802 | uint32_t dropped = 0; |
2f862c71 SV |
2803 | uint32_t tx_failure = 0; |
2804 | uint32_t mtu_drops = 0; | |
2805 | uint32_t qos_drops = 0; | |
3e90f7d7 GZ |
2806 | |
2807 | if (dev->type != DPDK_DEV_VHOST) { | |
2808 | /* Check if QoS has been configured for this netdev. */ | |
2809 | cnt = netdev_dpdk_qos_run(dev, (struct rte_mbuf **) batch->packets, | |
8a14bd7b | 2810 | batch_cnt, false); |
2f862c71 | 2811 | qos_drops = batch_cnt - cnt; |
3e90f7d7 | 2812 | } |
8a9562d2 | 2813 | |
3e90f7d7 GZ |
2814 | uint32_t txcnt = 0; |
2815 | ||
2816 | for (uint32_t i = 0; i < cnt; i++) { | |
8a14bd7b BB |
2817 | struct dp_packet *packet = batch->packets[i]; |
2818 | uint32_t size = dp_packet_size(packet); | |
95fb793a | 2819 | |
29cf9c1b FL |
2820 | if (size > dev->max_packet_len |
2821 | && !(packet->mbuf.ol_flags & PKT_TX_TCP_SEG)) { | |
2822 | VLOG_WARN_RL(&rl, "Too big size %u max_packet_len %d", size, | |
2823 | dev->max_packet_len); | |
2f862c71 | 2824 | mtu_drops++; |
f4fd623c DDP |
2825 | continue; |
2826 | } | |
8a9562d2 | 2827 | |
29cf9c1b | 2828 | pkts[txcnt] = dpdk_copy_dp_packet_to_mbuf(dev->dpdk_mp->mp, packet); |
8a14bd7b | 2829 | if (OVS_UNLIKELY(!pkts[txcnt])) { |
2f862c71 | 2830 | dropped = cnt - i; |
175cf4de | 2831 | break; |
f4fd623c DDP |
2832 | } |
2833 | ||
3e90f7d7 | 2834 | txcnt++; |
f4fd623c | 2835 | } |
8a9562d2 | 2836 | |
3e90f7d7 GZ |
2837 | if (OVS_LIKELY(txcnt)) { |
2838 | if (dev->type == DPDK_DEV_VHOST) { | |
29cf9c1b | 2839 | __netdev_dpdk_vhost_send(netdev, qid, pkts, txcnt); |
3e90f7d7 | 2840 | } else { |
29cf9c1b FL |
2841 | tx_failure += netdev_dpdk_eth_tx_burst(dev, qid, |
2842 | (struct rte_mbuf **)pkts, | |
2843 | txcnt); | |
3e90f7d7 | 2844 | } |
58397e6c | 2845 | } |
db73f716 | 2846 | |
2f862c71 | 2847 | dropped += qos_drops + mtu_drops + tx_failure; |
0bf765f7 IS |
2848 | if (OVS_UNLIKELY(dropped)) { |
2849 | rte_spinlock_lock(&dev->stats_lock); | |
2850 | dev->stats.tx_dropped += dropped; | |
2f862c71 SV |
2851 | sw_stats->tx_failure_drops += tx_failure; |
2852 | sw_stats->tx_mtu_exceeded_drops += mtu_drops; | |
2853 | sw_stats->tx_qos_drops += qos_drops; | |
0bf765f7 IS |
2854 | rte_spinlock_unlock(&dev->stats_lock); |
2855 | } | |
8a9562d2 PS |
2856 | } |
2857 | ||
58397e6c | 2858 | static int |
64839cf4 WT |
2859 | netdev_dpdk_vhost_send(struct netdev *netdev, int qid, |
2860 | struct dp_packet_batch *batch, | |
b30896c9 | 2861 | bool concurrent_txq OVS_UNUSED) |
58397e6c | 2862 | { |
58397e6c | 2863 | |
b30896c9 | 2864 | if (OVS_UNLIKELY(batch->packets[0]->source != DPBUF_DPDK)) { |
64839cf4 | 2865 | dpdk_do_tx_copy(netdev, qid, batch); |
b30896c9 | 2866 | dp_packet_delete_batch(batch, true); |
58397e6c | 2867 | } else { |
940ac2ce PC |
2868 | __netdev_dpdk_vhost_send(netdev, qid, batch->packets, |
2869 | dp_packet_batch_size(batch)); | |
58397e6c KT |
2870 | } |
2871 | return 0; | |
2872 | } | |
2873 | ||
7251515e DV |
2874 | static inline void |
2875 | netdev_dpdk_send__(struct netdev_dpdk *dev, int qid, | |
b30896c9 | 2876 | struct dp_packet_batch *batch, |
324c8374 | 2877 | bool concurrent_txq) |
8a9562d2 | 2878 | { |
3b1fb077 | 2879 | if (OVS_UNLIKELY(!(dev->flags & NETDEV_UP))) { |
b30896c9 | 2880 | dp_packet_delete_batch(batch, true); |
3b1fb077 DDP |
2881 | return; |
2882 | } | |
2883 | ||
324c8374 | 2884 | if (OVS_UNLIKELY(concurrent_txq)) { |
81acebda | 2885 | qid = qid % dev->up.n_txq; |
a0cb2d66 DDP |
2886 | rte_spinlock_lock(&dev->tx_q[qid].tx_lock); |
2887 | } | |
2888 | ||
b30896c9 | 2889 | if (OVS_UNLIKELY(batch->packets[0]->source != DPBUF_DPDK)) { |
7251515e DV |
2890 | struct netdev *netdev = &dev->up; |
2891 | ||
64839cf4 | 2892 | dpdk_do_tx_copy(netdev, qid, batch); |
b30896c9 | 2893 | dp_packet_delete_batch(batch, true); |
8a9562d2 | 2894 | } else { |
2f862c71 | 2895 | struct netdev_dpdk_sw_stats *sw_stats = dev->sw_stats; |
29cf9c1b FL |
2896 | int dropped; |
2897 | int tx_failure, mtu_drops, qos_drops, hwol_drops; | |
fd57eeba | 2898 | int batch_cnt = dp_packet_batch_size(batch); |
2391135c | 2899 | struct rte_mbuf **pkts = (struct rte_mbuf **) batch->packets; |
8a9562d2 | 2900 | |
29cf9c1b FL |
2901 | hwol_drops = batch_cnt; |
2902 | if (userspace_tso_enabled()) { | |
2903 | batch_cnt = netdev_dpdk_prep_hwol_batch(dev, pkts, batch_cnt); | |
2904 | } | |
2905 | hwol_drops -= batch_cnt; | |
2906 | mtu_drops = batch_cnt; | |
2907 | batch_cnt = netdev_dpdk_filter_packet_len(dev, pkts, batch_cnt); | |
2908 | mtu_drops -= batch_cnt; | |
2909 | qos_drops = batch_cnt; | |
2910 | batch_cnt = netdev_dpdk_qos_run(dev, pkts, batch_cnt, true); | |
2911 | qos_drops -= batch_cnt; | |
1b99bb05 | 2912 | |
29cf9c1b | 2913 | tx_failure = netdev_dpdk_eth_tx_burst(dev, qid, pkts, batch_cnt); |
8a9562d2 | 2914 | |
29cf9c1b | 2915 | dropped = tx_failure + mtu_drops + qos_drops + hwol_drops; |
f4fd623c | 2916 | if (OVS_UNLIKELY(dropped)) { |
45d947c4 | 2917 | rte_spinlock_lock(&dev->stats_lock); |
f4fd623c | 2918 | dev->stats.tx_dropped += dropped; |
2f862c71 SV |
2919 | sw_stats->tx_failure_drops += tx_failure; |
2920 | sw_stats->tx_mtu_exceeded_drops += mtu_drops; | |
2921 | sw_stats->tx_qos_drops += qos_drops; | |
29cf9c1b | 2922 | sw_stats->tx_invalid_hwol_drops += hwol_drops; |
45d947c4 | 2923 | rte_spinlock_unlock(&dev->stats_lock); |
f4fd623c | 2924 | } |
8a9562d2 | 2925 | } |
a0cb2d66 | 2926 | |
324c8374 | 2927 | if (OVS_UNLIKELY(concurrent_txq)) { |
a0cb2d66 DDP |
2928 | rte_spinlock_unlock(&dev->tx_q[qid].tx_lock); |
2929 | } | |
7251515e DV |
2930 | } |
2931 | ||
2932 | static int | |
2933 | netdev_dpdk_eth_send(struct netdev *netdev, int qid, | |
b30896c9 | 2934 | struct dp_packet_batch *batch, bool concurrent_txq) |
7251515e DV |
2935 | { |
2936 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
8a9562d2 | 2937 | |
b30896c9 | 2938 | netdev_dpdk_send__(dev, qid, batch, concurrent_txq); |
7251515e | 2939 | return 0; |
8a9562d2 PS |
2940 | } |
2941 | ||
f9b0107d IM |
2942 | static int |
2943 | netdev_dpdk_set_etheraddr__(struct netdev_dpdk *dev, const struct eth_addr mac) | |
2944 | OVS_REQUIRES(dev->mutex) | |
2945 | { | |
2946 | int err = 0; | |
2947 | ||
2948 | if (dev->type == DPDK_DEV_ETH) { | |
2949 | struct rte_ether_addr ea; | |
2950 | ||
2951 | memcpy(ea.addr_bytes, mac.ea, ETH_ADDR_LEN); | |
2952 | err = -rte_eth_dev_default_mac_addr_set(dev->port_id, &ea); | |
2953 | } | |
2954 | if (!err) { | |
2955 | dev->hwaddr = mac; | |
2956 | } else { | |
2957 | VLOG_WARN("%s: Failed to set requested mac("ETH_ADDR_FMT"): %s", | |
2958 | netdev_get_name(&dev->up), ETH_ADDR_ARGS(mac), | |
2959 | rte_strerror(err)); | |
2960 | } | |
2961 | ||
2962 | return err; | |
2963 | } | |
2964 | ||
8a9562d2 | 2965 | static int |
74ff3298 | 2966 | netdev_dpdk_set_etheraddr(struct netdev *netdev, const struct eth_addr mac) |
8a9562d2 PS |
2967 | { |
2968 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
f9b0107d | 2969 | int err = 0; |
8a9562d2 PS |
2970 | |
2971 | ovs_mutex_lock(&dev->mutex); | |
2972 | if (!eth_addr_equals(dev->hwaddr, mac)) { | |
f9b0107d IM |
2973 | err = netdev_dpdk_set_etheraddr__(dev, mac); |
2974 | if (!err) { | |
2975 | netdev_change_seq_changed(netdev); | |
2976 | } | |
8a9562d2 PS |
2977 | } |
2978 | ovs_mutex_unlock(&dev->mutex); | |
2979 | ||
f9b0107d | 2980 | return err; |
8a9562d2 PS |
2981 | } |
2982 | ||
2983 | static int | |
74ff3298 | 2984 | netdev_dpdk_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac) |
8a9562d2 PS |
2985 | { |
2986 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
2987 | ||
2988 | ovs_mutex_lock(&dev->mutex); | |
74ff3298 | 2989 | *mac = dev->hwaddr; |
8a9562d2 PS |
2990 | ovs_mutex_unlock(&dev->mutex); |
2991 | ||
2992 | return 0; | |
2993 | } | |
2994 | ||
2995 | static int | |
2996 | netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup) | |
2997 | { | |
2998 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
2999 | ||
3000 | ovs_mutex_lock(&dev->mutex); | |
3001 | *mtup = dev->mtu; | |
3002 | ovs_mutex_unlock(&dev->mutex); | |
3003 | ||
3004 | return 0; | |
3005 | } | |
3006 | ||
0072e931 MK |
3007 | static int |
3008 | netdev_dpdk_set_mtu(struct netdev *netdev, int mtu) | |
3009 | { | |
3010 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
3011 | ||
f6f50552 IS |
3012 | /* XXX: Ensure that the overall frame length of the requested MTU does not |
3013 | * surpass the NETDEV_DPDK_MAX_PKT_LEN. DPDK device drivers differ in how | |
3014 | * the L2 frame length is calculated for a given MTU when | |
3015 | * rte_eth_dev_set_mtu(mtu) is called e.g. i40e driver includes 2 x vlan | |
3016 | * headers, the em driver includes 1 x vlan header, the ixgbe driver does | |
3017 | * not include vlan headers. As such we should use | |
3018 | * MTU_TO_MAX_FRAME_LEN(mtu) which includes an additional 2 x vlan headers | |
3019 | * (8 bytes) for comparison. This avoids a failure later with | |
3020 | * rte_eth_dev_set_mtu(). This approach should be used until DPDK provides | |
3021 | * a method to retrieve the upper bound MTU for a given device. | |
3022 | */ | |
3023 | if (MTU_TO_MAX_FRAME_LEN(mtu) > NETDEV_DPDK_MAX_PKT_LEN | |
127b6a6e | 3024 | || mtu < RTE_ETHER_MIN_MTU) { |
0072e931 MK |
3025 | VLOG_WARN("%s: unsupported MTU %d\n", dev->up.name, mtu); |
3026 | return EINVAL; | |
3027 | } | |
3028 | ||
3029 | ovs_mutex_lock(&dev->mutex); | |
3030 | if (dev->requested_mtu != mtu) { | |
3031 | dev->requested_mtu = mtu; | |
3032 | netdev_request_reconfigure(netdev); | |
3033 | } | |
3034 | ovs_mutex_unlock(&dev->mutex); | |
3035 | ||
3036 | return 0; | |
3037 | } | |
3038 | ||
8a9562d2 | 3039 | static int |
d46285a2 | 3040 | netdev_dpdk_get_carrier(const struct netdev *netdev, bool *carrier); |
8a9562d2 | 3041 | |
58397e6c KT |
3042 | static int |
3043 | netdev_dpdk_vhost_get_stats(const struct netdev *netdev, | |
3044 | struct netdev_stats *stats) | |
3045 | { | |
3046 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
3047 | ||
3048 | ovs_mutex_lock(&dev->mutex); | |
58397e6c | 3049 | |
45d947c4 | 3050 | rte_spinlock_lock(&dev->stats_lock); |
58397e6c | 3051 | /* Supported Stats */ |
50986e78 | 3052 | stats->rx_packets = dev->stats.rx_packets; |
3053 | stats->tx_packets = dev->stats.tx_packets; | |
9509913a | 3054 | stats->rx_dropped = dev->stats.rx_dropped; |
50986e78 | 3055 | stats->tx_dropped = dev->stats.tx_dropped; |
9e3ddd45 TP |
3056 | stats->multicast = dev->stats.multicast; |
3057 | stats->rx_bytes = dev->stats.rx_bytes; | |
3058 | stats->tx_bytes = dev->stats.tx_bytes; | |
3059 | stats->rx_errors = dev->stats.rx_errors; | |
3060 | stats->rx_length_errors = dev->stats.rx_length_errors; | |
d6e3feb5 | 3061 | |
3062 | stats->rx_1_to_64_packets = dev->stats.rx_1_to_64_packets; | |
3063 | stats->rx_65_to_127_packets = dev->stats.rx_65_to_127_packets; | |
3064 | stats->rx_128_to_255_packets = dev->stats.rx_128_to_255_packets; | |
3065 | stats->rx_256_to_511_packets = dev->stats.rx_256_to_511_packets; | |
3066 | stats->rx_512_to_1023_packets = dev->stats.rx_512_to_1023_packets; | |
3067 | stats->rx_1024_to_1522_packets = dev->stats.rx_1024_to_1522_packets; | |
3068 | stats->rx_1523_to_max_packets = dev->stats.rx_1523_to_max_packets; | |
3069 | ||
45d947c4 | 3070 | rte_spinlock_unlock(&dev->stats_lock); |
9e3ddd45 | 3071 | |
58397e6c KT |
3072 | ovs_mutex_unlock(&dev->mutex); |
3073 | ||
3074 | return 0; | |
3075 | } | |
3076 | ||
d6e3feb5 | 3077 | static void |
3078 | netdev_dpdk_convert_xstats(struct netdev_stats *stats, | |
0a0f39df CL |
3079 | const struct rte_eth_xstat *xstats, |
3080 | const struct rte_eth_xstat_name *names, | |
d6e3feb5 | 3081 | const unsigned int size) |
3082 | { | |
18366d16 IM |
3083 | /* DPDK XSTATS Counter names definition. */ |
3084 | #define DPDK_XSTATS \ | |
3085 | DPDK_XSTAT(multicast, "rx_multicast_packets" ) \ | |
3086 | DPDK_XSTAT(tx_multicast_packets, "tx_multicast_packets" ) \ | |
3087 | DPDK_XSTAT(rx_broadcast_packets, "rx_broadcast_packets" ) \ | |
3088 | DPDK_XSTAT(tx_broadcast_packets, "tx_broadcast_packets" ) \ | |
3089 | DPDK_XSTAT(rx_undersized_errors, "rx_undersized_errors" ) \ | |
3090 | DPDK_XSTAT(rx_oversize_errors, "rx_oversize_errors" ) \ | |
3091 | DPDK_XSTAT(rx_fragmented_errors, "rx_fragmented_errors" ) \ | |
3092 | DPDK_XSTAT(rx_jabber_errors, "rx_jabber_errors" ) \ | |
3093 | DPDK_XSTAT(rx_1_to_64_packets, "rx_size_64_packets" ) \ | |
3094 | DPDK_XSTAT(rx_65_to_127_packets, "rx_size_65_to_127_packets" ) \ | |
3095 | DPDK_XSTAT(rx_128_to_255_packets, "rx_size_128_to_255_packets" ) \ | |
3096 | DPDK_XSTAT(rx_256_to_511_packets, "rx_size_256_to_511_packets" ) \ | |
3097 | DPDK_XSTAT(rx_512_to_1023_packets, "rx_size_512_to_1023_packets" ) \ | |
3098 | DPDK_XSTAT(rx_1024_to_1522_packets, "rx_size_1024_to_1522_packets" ) \ | |
3099 | DPDK_XSTAT(rx_1523_to_max_packets, "rx_size_1523_to_max_packets" ) \ | |
3100 | DPDK_XSTAT(tx_1_to_64_packets, "tx_size_64_packets" ) \ | |
3101 | DPDK_XSTAT(tx_65_to_127_packets, "tx_size_65_to_127_packets" ) \ | |
3102 | DPDK_XSTAT(tx_128_to_255_packets, "tx_size_128_to_255_packets" ) \ | |
3103 | DPDK_XSTAT(tx_256_to_511_packets, "tx_size_256_to_511_packets" ) \ | |
3104 | DPDK_XSTAT(tx_512_to_1023_packets, "tx_size_512_to_1023_packets" ) \ | |
3105 | DPDK_XSTAT(tx_1024_to_1522_packets, "tx_size_1024_to_1522_packets" ) \ | |
3106 | DPDK_XSTAT(tx_1523_to_max_packets, "tx_size_1523_to_max_packets" ) | |
3107 | ||
d6e3feb5 | 3108 | for (unsigned int i = 0; i < size; i++) { |
18366d16 IM |
3109 | #define DPDK_XSTAT(MEMBER, NAME) \ |
3110 | if (strcmp(NAME, names[i].name) == 0) { \ | |
3111 | stats->MEMBER = xstats[i].value; \ | |
3112 | continue; \ | |
d6e3feb5 | 3113 | } |
18366d16 IM |
3114 | DPDK_XSTATS; |
3115 | #undef DPDK_XSTAT | |
d6e3feb5 | 3116 | } |
18366d16 | 3117 | #undef DPDK_XSTATS |
d6e3feb5 | 3118 | } |
3119 | ||
8a9562d2 PS |
3120 | static int |
3121 | netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats) | |
3122 | { | |
3123 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
3124 | struct rte_eth_stats rte_stats; | |
3125 | bool gg; | |
3126 | ||
3127 | netdev_dpdk_get_carrier(netdev, &gg); | |
3128 | ovs_mutex_lock(&dev->mutex); | |
8a9562d2 | 3129 | |
0a0f39df CL |
3130 | struct rte_eth_xstat *rte_xstats = NULL; |
3131 | struct rte_eth_xstat_name *rte_xstats_names = NULL; | |
3132 | int rte_xstats_len, rte_xstats_new_len, rte_xstats_ret; | |
d6e3feb5 | 3133 | |
3134 | if (rte_eth_stats_get(dev->port_id, &rte_stats)) { | |
fa9f4eeb IM |
3135 | VLOG_ERR("Can't get ETH statistics for port: "DPDK_PORT_ID_FMT, |
3136 | dev->port_id); | |
f9256822 | 3137 | ovs_mutex_unlock(&dev->mutex); |
d6e3feb5 | 3138 | return EPROTO; |
3139 | } | |
3140 | ||
0a0f39df CL |
3141 | /* Get length of statistics */ |
3142 | rte_xstats_len = rte_eth_xstats_get_names(dev->port_id, NULL, 0); | |
3143 | if (rte_xstats_len < 0) { | |
fa9f4eeb IM |
3144 | VLOG_WARN("Cannot get XSTATS values for port: "DPDK_PORT_ID_FMT, |
3145 | dev->port_id); | |
0a0f39df CL |
3146 | goto out; |
3147 | } | |
3148 | /* Reserve memory for xstats names and values */ | |
3149 | rte_xstats_names = xcalloc(rte_xstats_len, sizeof *rte_xstats_names); | |
3150 | rte_xstats = xcalloc(rte_xstats_len, sizeof *rte_xstats); | |
3151 | ||
3152 | /* Retreive xstats names */ | |
3153 | rte_xstats_new_len = rte_eth_xstats_get_names(dev->port_id, | |
3154 | rte_xstats_names, | |
3155 | rte_xstats_len); | |
3156 | if (rte_xstats_new_len != rte_xstats_len) { | |
fa9f4eeb IM |
3157 | VLOG_WARN("Cannot get XSTATS names for port: "DPDK_PORT_ID_FMT, |
3158 | dev->port_id); | |
0a0f39df CL |
3159 | goto out; |
3160 | } | |
3161 | /* Retreive xstats values */ | |
3162 | memset(rte_xstats, 0xff, sizeof *rte_xstats * rte_xstats_len); | |
3163 | rte_xstats_ret = rte_eth_xstats_get(dev->port_id, rte_xstats, | |
3164 | rte_xstats_len); | |
3165 | if (rte_xstats_ret > 0 && rte_xstats_ret <= rte_xstats_len) { | |
3166 | netdev_dpdk_convert_xstats(stats, rte_xstats, rte_xstats_names, | |
3167 | rte_xstats_len); | |
d6e3feb5 | 3168 | } else { |
fa9f4eeb IM |
3169 | VLOG_WARN("Cannot get XSTATS values for port: "DPDK_PORT_ID_FMT, |
3170 | dev->port_id); | |
d6e3feb5 | 3171 | } |
8a9562d2 | 3172 | |
0a0f39df CL |
3173 | out: |
3174 | free(rte_xstats); | |
3175 | free(rte_xstats_names); | |
3176 | ||
2f9dd77f PS |
3177 | stats->rx_packets = rte_stats.ipackets; |
3178 | stats->tx_packets = rte_stats.opackets; | |
3179 | stats->rx_bytes = rte_stats.ibytes; | |
3180 | stats->tx_bytes = rte_stats.obytes; | |
21e9844c | 3181 | stats->rx_errors = rte_stats.ierrors; |
2f9dd77f | 3182 | stats->tx_errors = rte_stats.oerrors; |
8a9562d2 | 3183 | |
45d947c4 | 3184 | rte_spinlock_lock(&dev->stats_lock); |
2f9dd77f | 3185 | stats->tx_dropped = dev->stats.tx_dropped; |
9509913a | 3186 | stats->rx_dropped = dev->stats.rx_dropped; |
45d947c4 | 3187 | rte_spinlock_unlock(&dev->stats_lock); |
9e3ddd45 TP |
3188 | |
3189 | /* These are the available DPDK counters for packets not received due to | |
3190 | * local resource constraints in DPDK and NIC respectively. */ | |
9509913a | 3191 | stats->rx_dropped += rte_stats.rx_nombuf + rte_stats.imissed; |
9e3ddd45 TP |
3192 | stats->rx_missed_errors = rte_stats.imissed; |
3193 | ||
8a9562d2 PS |
3194 | ovs_mutex_unlock(&dev->mutex); |
3195 | ||
3196 | return 0; | |
3197 | } | |
3198 | ||
971f4b39 MW |
3199 | static int |
3200 | netdev_dpdk_get_custom_stats(const struct netdev *netdev, | |
3201 | struct netdev_custom_stats *custom_stats) | |
3202 | { | |
3203 | ||
3204 | uint32_t i; | |
3205 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
b99ab8aa IM |
3206 | int rte_xstats_ret, sw_stats_size; |
3207 | ||
3208 | netdev_dpdk_get_sw_custom_stats(netdev, custom_stats); | |
971f4b39 MW |
3209 | |
3210 | ovs_mutex_lock(&dev->mutex); | |
3211 | ||
3212 | if (netdev_dpdk_configure_xstats(dev)) { | |
3213 | uint64_t *values = xcalloc(dev->rte_xstats_ids_size, | |
3214 | sizeof(uint64_t)); | |
3215 | ||
3216 | rte_xstats_ret = | |
3217 | rte_eth_xstats_get_by_id(dev->port_id, dev->rte_xstats_ids, | |
3218 | values, dev->rte_xstats_ids_size); | |
3219 | ||
3220 | if (rte_xstats_ret > 0 && | |
3221 | rte_xstats_ret <= dev->rte_xstats_ids_size) { | |
3222 | ||
b99ab8aa IM |
3223 | sw_stats_size = custom_stats->size; |
3224 | custom_stats->size += rte_xstats_ret; | |
3225 | custom_stats->counters = xrealloc(custom_stats->counters, | |
3226 | custom_stats->size * | |
3227 | sizeof *custom_stats->counters); | |
971f4b39 MW |
3228 | |
3229 | for (i = 0; i < rte_xstats_ret; i++) { | |
b99ab8aa | 3230 | ovs_strlcpy(custom_stats->counters[sw_stats_size + i].name, |
971f4b39 MW |
3231 | netdev_dpdk_get_xstat_name(dev, |
3232 | dev->rte_xstats_ids[i]), | |
3233 | NETDEV_CUSTOM_STATS_NAME_SIZE); | |
b99ab8aa | 3234 | custom_stats->counters[sw_stats_size + i].value = values[i]; |
971f4b39 MW |
3235 | } |
3236 | } else { | |
fa9f4eeb | 3237 | VLOG_WARN("Cannot get XSTATS values for port: "DPDK_PORT_ID_FMT, |
971f4b39 | 3238 | dev->port_id); |
971f4b39 MW |
3239 | /* Let's clear statistics cache, so it will be |
3240 | * reconfigured */ | |
3241 | netdev_dpdk_clear_xstats(dev); | |
3242 | } | |
526259f2 IM |
3243 | |
3244 | free(values); | |
971f4b39 MW |
3245 | } |
3246 | ||
3247 | ovs_mutex_unlock(&dev->mutex); | |
3248 | ||
3249 | return 0; | |
3250 | } | |
3251 | ||
c161357d | 3252 | static int |
b99ab8aa IM |
3253 | netdev_dpdk_get_sw_custom_stats(const struct netdev *netdev, |
3254 | struct netdev_custom_stats *custom_stats) | |
c161357d KT |
3255 | { |
3256 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
b99ab8aa | 3257 | int i, n; |
c161357d | 3258 | |
2f862c71 SV |
3259 | #define SW_CSTATS \ |
3260 | SW_CSTAT(tx_retries) \ | |
3261 | SW_CSTAT(tx_failure_drops) \ | |
3262 | SW_CSTAT(tx_mtu_exceeded_drops) \ | |
3263 | SW_CSTAT(tx_qos_drops) \ | |
29cf9c1b FL |
3264 | SW_CSTAT(rx_qos_drops) \ |
3265 | SW_CSTAT(tx_invalid_hwol_drops) | |
c161357d | 3266 | |
b99ab8aa IM |
3267 | #define SW_CSTAT(NAME) + 1 |
3268 | custom_stats->size = SW_CSTATS; | |
3269 | #undef SW_CSTAT | |
c161357d KT |
3270 | custom_stats->counters = xcalloc(custom_stats->size, |
3271 | sizeof *custom_stats->counters); | |
5c7ba90d IM |
3272 | |
3273 | ovs_mutex_lock(&dev->mutex); | |
c161357d KT |
3274 | |
3275 | rte_spinlock_lock(&dev->stats_lock); | |
5c7ba90d | 3276 | i = 0; |
b99ab8aa | 3277 | #define SW_CSTAT(NAME) \ |
2f862c71 | 3278 | custom_stats->counters[i++].value = dev->sw_stats->NAME; |
b99ab8aa IM |
3279 | SW_CSTATS; |
3280 | #undef SW_CSTAT | |
c161357d KT |
3281 | rte_spinlock_unlock(&dev->stats_lock); |
3282 | ||
3283 | ovs_mutex_unlock(&dev->mutex); | |
3284 | ||
b99ab8aa IM |
3285 | i = 0; |
3286 | n = 0; | |
3287 | #define SW_CSTAT(NAME) \ | |
3288 | if (custom_stats->counters[i].value != UINT64_MAX) { \ | |
2f862c71 SV |
3289 | ovs_strlcpy(custom_stats->counters[n].name, \ |
3290 | "ovs_"#NAME, NETDEV_CUSTOM_STATS_NAME_SIZE); \ | |
b99ab8aa IM |
3291 | custom_stats->counters[n].value = custom_stats->counters[i].value; \ |
3292 | n++; \ | |
3293 | } \ | |
3294 | i++; | |
3295 | SW_CSTATS; | |
3296 | #undef SW_CSTAT | |
3297 | ||
3298 | custom_stats->size = n; | |
c161357d KT |
3299 | return 0; |
3300 | } | |
3301 | ||
8a9562d2 | 3302 | static int |
d46285a2 | 3303 | netdev_dpdk_get_features(const struct netdev *netdev, |
8a9562d2 | 3304 | enum netdev_features *current, |
ca3d4f55 BX |
3305 | enum netdev_features *advertised, |
3306 | enum netdev_features *supported, | |
3307 | enum netdev_features *peer) | |
8a9562d2 | 3308 | { |
d46285a2 | 3309 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
8a9562d2 | 3310 | struct rte_eth_link link; |
dfcb5b8a | 3311 | uint32_t feature = 0; |
8a9562d2 PS |
3312 | |
3313 | ovs_mutex_lock(&dev->mutex); | |
3314 | link = dev->link; | |
3315 | ovs_mutex_unlock(&dev->mutex); | |
3316 | ||
dfcb5b8a IS |
3317 | /* Match against OpenFlow defined link speed values. */ |
3318 | if (link.link_duplex == ETH_LINK_FULL_DUPLEX) { | |
3319 | switch (link.link_speed) { | |
3320 | case ETH_SPEED_NUM_10M: | |
3321 | feature |= NETDEV_F_10MB_FD; | |
3322 | break; | |
3323 | case ETH_SPEED_NUM_100M: | |
3324 | feature |= NETDEV_F_100MB_FD; | |
3325 | break; | |
3326 | case ETH_SPEED_NUM_1G: | |
3327 | feature |= NETDEV_F_1GB_FD; | |
3328 | break; | |
3329 | case ETH_SPEED_NUM_10G: | |
3330 | feature |= NETDEV_F_10GB_FD; | |
3331 | break; | |
3332 | case ETH_SPEED_NUM_40G: | |
3333 | feature |= NETDEV_F_40GB_FD; | |
3334 | break; | |
3335 | case ETH_SPEED_NUM_100G: | |
3336 | feature |= NETDEV_F_100GB_FD; | |
3337 | break; | |
3338 | default: | |
3339 | feature |= NETDEV_F_OTHER; | |
8a9562d2 | 3340 | } |
dfcb5b8a IS |
3341 | } else if (link.link_duplex == ETH_LINK_HALF_DUPLEX) { |
3342 | switch (link.link_speed) { | |
3343 | case ETH_SPEED_NUM_10M: | |
3344 | feature |= NETDEV_F_10MB_HD; | |
3345 | break; | |
3346 | case ETH_SPEED_NUM_100M: | |
3347 | feature |= NETDEV_F_100MB_HD; | |
3348 | break; | |
3349 | case ETH_SPEED_NUM_1G: | |
3350 | feature |= NETDEV_F_1GB_HD; | |
3351 | break; | |
3352 | default: | |
3353 | feature |= NETDEV_F_OTHER; | |
74cd69a4 | 3354 | } |
8a9562d2 PS |
3355 | } |
3356 | ||
362ca396 | 3357 | if (link.link_autoneg) { |
dfcb5b8a | 3358 | feature |= NETDEV_F_AUTONEG; |
362ca396 | 3359 | } |
3360 | ||
dfcb5b8a | 3361 | *current = feature; |
ca3d4f55 BX |
3362 | *advertised = *supported = *peer = 0; |
3363 | ||
8a9562d2 PS |
3364 | return 0; |
3365 | } | |
3366 | ||
9509913a IS |
3367 | static struct ingress_policer * |
3368 | netdev_dpdk_policer_construct(uint32_t rate, uint32_t burst) | |
3369 | { | |
3370 | struct ingress_policer *policer = NULL; | |
3371 | uint64_t rate_bytes; | |
3372 | uint64_t burst_bytes; | |
3373 | int err = 0; | |
3374 | ||
3375 | policer = xmalloc(sizeof *policer); | |
3376 | rte_spinlock_init(&policer->policer_lock); | |
3377 | ||
3378 | /* rte_meter requires bytes so convert kbits rate and burst to bytes. */ | |
602c8668 LR |
3379 | rate_bytes = rate * 1000ULL / 8; |
3380 | burst_bytes = burst * 1000ULL / 8; | |
9509913a IS |
3381 | |
3382 | policer->app_srtcm_params.cir = rate_bytes; | |
3383 | policer->app_srtcm_params.cbs = burst_bytes; | |
3384 | policer->app_srtcm_params.ebs = 0; | |
03f3f9c0 OM |
3385 | err = rte_meter_srtcm_profile_config(&policer->in_prof, |
3386 | &policer->app_srtcm_params); | |
3387 | if (!err) { | |
3388 | err = rte_meter_srtcm_config(&policer->in_policer, | |
3389 | &policer->in_prof); | |
3390 | } | |
58be5c0e | 3391 | if (err) { |
9509913a | 3392 | VLOG_ERR("Could not create rte meter for ingress policer"); |
4c47ddde | 3393 | free(policer); |
9509913a IS |
3394 | return NULL; |
3395 | } | |
3396 | ||
3397 | return policer; | |
3398 | } | |
3399 | ||
3400 | static int | |
3401 | netdev_dpdk_set_policing(struct netdev* netdev, uint32_t policer_rate, | |
3402 | uint32_t policer_burst) | |
3403 | { | |
3404 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
3405 | struct ingress_policer *policer; | |
3406 | ||
3407 | /* Force to 0 if no rate specified, | |
3408 | * default to 8000 kbits if burst is 0, | |
3409 | * else stick with user-specified value. | |
3410 | */ | |
3411 | policer_burst = (!policer_rate ? 0 | |
3412 | : !policer_burst ? 8000 | |
3413 | : policer_burst); | |
3414 | ||
3415 | ovs_mutex_lock(&dev->mutex); | |
3416 | ||
3417 | policer = ovsrcu_get_protected(struct ingress_policer *, | |
3418 | &dev->ingress_policer); | |
3419 | ||
3420 | if (dev->policer_rate == policer_rate && | |
3421 | dev->policer_burst == policer_burst) { | |
3422 | /* Assume that settings haven't changed since we last set them. */ | |
3423 | ovs_mutex_unlock(&dev->mutex); | |
3424 | return 0; | |
3425 | } | |
3426 | ||
3427 | /* Destroy any existing ingress policer for the device if one exists */ | |
3428 | if (policer) { | |
3429 | ovsrcu_postpone(free, policer); | |
3430 | } | |
3431 | ||
3432 | if (policer_rate != 0) { | |
3433 | policer = netdev_dpdk_policer_construct(policer_rate, policer_burst); | |
3434 | } else { | |
3435 | policer = NULL; | |
3436 | } | |
3437 | ovsrcu_set(&dev->ingress_policer, policer); | |
3438 | dev->policer_rate = policer_rate; | |
3439 | dev->policer_burst = policer_burst; | |
3440 | ovs_mutex_unlock(&dev->mutex); | |
3441 | ||
3442 | return 0; | |
3443 | } | |
3444 | ||
8a9562d2 PS |
3445 | static int |
3446 | netdev_dpdk_get_ifindex(const struct netdev *netdev) | |
3447 | { | |
3448 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
8a9562d2 PS |
3449 | |
3450 | ovs_mutex_lock(&dev->mutex); | |
12d0d124 PL |
3451 | /* Calculate hash from the netdev name. Ensure that ifindex is a 24-bit |
3452 | * postive integer to meet RFC 2863 recommendations. | |
3453 | */ | |
3454 | int ifindex = hash_string(netdev->name, 0) % 0xfffffe + 1; | |
8a9562d2 PS |
3455 | ovs_mutex_unlock(&dev->mutex); |
3456 | ||
3457 | return ifindex; | |
3458 | } | |
3459 | ||
3460 | static int | |
d46285a2 | 3461 | netdev_dpdk_get_carrier(const struct netdev *netdev, bool *carrier) |
8a9562d2 | 3462 | { |
d46285a2 | 3463 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
8a9562d2 PS |
3464 | |
3465 | ovs_mutex_lock(&dev->mutex); | |
3466 | check_link_status(dev); | |
3467 | *carrier = dev->link.link_status; | |
58397e6c KT |
3468 | |
3469 | ovs_mutex_unlock(&dev->mutex); | |
3470 | ||
3471 | return 0; | |
3472 | } | |
3473 | ||
3474 | static int | |
d46285a2 | 3475 | netdev_dpdk_vhost_get_carrier(const struct netdev *netdev, bool *carrier) |
58397e6c | 3476 | { |
d46285a2 | 3477 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
58397e6c KT |
3478 | |
3479 | ovs_mutex_lock(&dev->mutex); | |
3480 | ||
0a0f39df | 3481 | if (is_vhost_running(dev)) { |
58397e6c KT |
3482 | *carrier = 1; |
3483 | } else { | |
3484 | *carrier = 0; | |
3485 | } | |
3486 | ||
8a9562d2 PS |
3487 | ovs_mutex_unlock(&dev->mutex); |
3488 | ||
3489 | return 0; | |
3490 | } | |
3491 | ||
3492 | static long long int | |
d46285a2 | 3493 | netdev_dpdk_get_carrier_resets(const struct netdev *netdev) |
8a9562d2 | 3494 | { |
d46285a2 | 3495 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
8a9562d2 PS |
3496 | long long int carrier_resets; |
3497 | ||
3498 | ovs_mutex_lock(&dev->mutex); | |
3499 | carrier_resets = dev->link_reset_cnt; | |
3500 | ovs_mutex_unlock(&dev->mutex); | |
3501 | ||
3502 | return carrier_resets; | |
3503 | } | |
3504 | ||
3505 | static int | |
d46285a2 | 3506 | netdev_dpdk_set_miimon(struct netdev *netdev OVS_UNUSED, |
8a9562d2 PS |
3507 | long long int interval OVS_UNUSED) |
3508 | { | |
ee32150e | 3509 | return EOPNOTSUPP; |
8a9562d2 PS |
3510 | } |
3511 | ||
3512 | static int | |
3513 | netdev_dpdk_update_flags__(struct netdev_dpdk *dev, | |
3514 | enum netdev_flags off, enum netdev_flags on, | |
64839cf4 WT |
3515 | enum netdev_flags *old_flagsp) |
3516 | OVS_REQUIRES(dev->mutex) | |
8a9562d2 | 3517 | { |
8a9562d2 PS |
3518 | if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) { |
3519 | return EINVAL; | |
3520 | } | |
3521 | ||
3522 | *old_flagsp = dev->flags; | |
3523 | dev->flags |= on; | |
3524 | dev->flags &= ~off; | |
3525 | ||
3526 | if (dev->flags == *old_flagsp) { | |
3527 | return 0; | |
3528 | } | |
3529 | ||
58397e6c | 3530 | if (dev->type == DPDK_DEV_ETH) { |
2d37de73 EC |
3531 | |
3532 | if ((dev->flags ^ *old_flagsp) & NETDEV_UP) { | |
3533 | int err; | |
3534 | ||
3535 | if (dev->flags & NETDEV_UP) { | |
3536 | err = rte_eth_dev_set_link_up(dev->port_id); | |
3537 | } else { | |
3538 | err = rte_eth_dev_set_link_down(dev->port_id); | |
3539 | } | |
3540 | if (err == -ENOTSUP) { | |
3541 | VLOG_INFO("Interface %s does not support link state " | |
3542 | "configuration", netdev_get_name(&dev->up)); | |
3543 | } else if (err < 0) { | |
3544 | VLOG_ERR("Interface %s link change error: %s", | |
3545 | netdev_get_name(&dev->up), rte_strerror(-err)); | |
3546 | dev->flags = *old_flagsp; | |
3547 | return -err; | |
3548 | } | |
3549 | } | |
3550 | ||
58397e6c KT |
3551 | if (dev->flags & NETDEV_PROMISC) { |
3552 | rte_eth_promiscuous_enable(dev->port_id); | |
3553 | } | |
8a9562d2 | 3554 | |
314fb5ad | 3555 | netdev_change_seq_changed(&dev->up); |
e543851d ZB |
3556 | } else { |
3557 | /* If DPDK_DEV_VHOST device's NETDEV_UP flag was changed and vhost is | |
3558 | * running then change netdev's change_seq to trigger link state | |
3559 | * update. */ | |
e543851d ZB |
3560 | |
3561 | if ((NETDEV_UP & ((*old_flagsp ^ on) | (*old_flagsp ^ off))) | |
0a0f39df | 3562 | && is_vhost_running(dev)) { |
e543851d ZB |
3563 | netdev_change_seq_changed(&dev->up); |
3564 | ||
3565 | /* Clear statistics if device is getting up. */ | |
3566 | if (NETDEV_UP & on) { | |
3567 | rte_spinlock_lock(&dev->stats_lock); | |
58be5c0e | 3568 | memset(&dev->stats, 0, sizeof dev->stats); |
e543851d ZB |
3569 | rte_spinlock_unlock(&dev->stats_lock); |
3570 | } | |
3571 | } | |
8a9562d2 PS |
3572 | } |
3573 | ||
3574 | return 0; | |
3575 | } | |
3576 | ||
3577 | static int | |
d46285a2 | 3578 | netdev_dpdk_update_flags(struct netdev *netdev, |
8a9562d2 PS |
3579 | enum netdev_flags off, enum netdev_flags on, |
3580 | enum netdev_flags *old_flagsp) | |
3581 | { | |
d46285a2 | 3582 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
8a9562d2 PS |
3583 | int error; |
3584 | ||
d46285a2 DDP |
3585 | ovs_mutex_lock(&dev->mutex); |
3586 | error = netdev_dpdk_update_flags__(dev, off, on, old_flagsp); | |
3587 | ovs_mutex_unlock(&dev->mutex); | |
8a9562d2 PS |
3588 | |
3589 | return error; | |
3590 | } | |
3591 | ||
b2e8b12f FL |
3592 | static int |
3593 | netdev_dpdk_vhost_user_get_status(const struct netdev *netdev, | |
3594 | struct smap *args) | |
3595 | { | |
3596 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
3597 | ||
3598 | ovs_mutex_lock(&dev->mutex); | |
3599 | ||
3600 | bool client_mode = dev->vhost_driver_flags & RTE_VHOST_USER_CLIENT; | |
3601 | smap_add_format(args, "mode", "%s", client_mode ? "client" : "server"); | |
3602 | ||
3603 | int vid = netdev_dpdk_get_vid(dev); | |
3604 | if (vid < 0) { | |
3605 | smap_add_format(args, "status", "disconnected"); | |
3606 | ovs_mutex_unlock(&dev->mutex); | |
3607 | return 0; | |
3608 | } else { | |
3609 | smap_add_format(args, "status", "connected"); | |
3610 | } | |
3611 | ||
3612 | char socket_name[PATH_MAX]; | |
3613 | if (!rte_vhost_get_ifname(vid, socket_name, PATH_MAX)) { | |
3614 | smap_add_format(args, "socket", "%s", socket_name); | |
3615 | } | |
3616 | ||
3617 | uint64_t features; | |
3618 | if (!rte_vhost_get_negotiated_features(vid, &features)) { | |
3619 | smap_add_format(args, "features", "0x%016"PRIx64, features); | |
3620 | } | |
3621 | ||
3622 | uint16_t mtu; | |
3623 | if (!rte_vhost_get_mtu(vid, &mtu)) { | |
3624 | smap_add_format(args, "mtu", "%d", mtu); | |
3625 | } | |
3626 | ||
3627 | int numa = rte_vhost_get_numa_node(vid); | |
3628 | if (numa >= 0) { | |
3629 | smap_add_format(args, "numa", "%d", numa); | |
3630 | } | |
3631 | ||
3632 | uint16_t vring_num = rte_vhost_get_vring_num(vid); | |
3633 | if (vring_num) { | |
3634 | smap_add_format(args, "num_of_vrings", "%d", vring_num); | |
3635 | } | |
3636 | ||
3637 | for (int i = 0; i < vring_num; i++) { | |
3638 | struct rte_vhost_vring vring; | |
b2e8b12f FL |
3639 | |
3640 | rte_vhost_get_vhost_vring(vid, i, &vring); | |
b9a3183d AC |
3641 | smap_add_nocopy(args, xasprintf("vring_%d_size", i), |
3642 | xasprintf("%d", vring.size)); | |
b2e8b12f FL |
3643 | } |
3644 | ||
3645 | ovs_mutex_unlock(&dev->mutex); | |
3646 | return 0; | |
3647 | } | |
3648 | ||
31154f95 IS |
3649 | /* |
3650 | * Convert a given uint32_t link speed defined in DPDK to a string | |
3651 | * equivalent. | |
3652 | */ | |
3653 | static const char * | |
3654 | netdev_dpdk_link_speed_to_str__(uint32_t link_speed) | |
3655 | { | |
3656 | switch (link_speed) { | |
3657 | case ETH_SPEED_NUM_10M: return "10Mbps"; | |
3658 | case ETH_SPEED_NUM_100M: return "100Mbps"; | |
3659 | case ETH_SPEED_NUM_1G: return "1Gbps"; | |
3660 | case ETH_SPEED_NUM_2_5G: return "2.5Gbps"; | |
3661 | case ETH_SPEED_NUM_5G: return "5Gbps"; | |
3662 | case ETH_SPEED_NUM_10G: return "10Gbps"; | |
3663 | case ETH_SPEED_NUM_20G: return "20Gbps"; | |
3664 | case ETH_SPEED_NUM_25G: return "25Gbps"; | |
3665 | case ETH_SPEED_NUM_40G: return "40Gbps"; | |
3666 | case ETH_SPEED_NUM_50G: return "50Gbps"; | |
3667 | case ETH_SPEED_NUM_56G: return "56Gbps"; | |
3668 | case ETH_SPEED_NUM_100G: return "100Gbps"; | |
3669 | default: return "Not Defined"; | |
3670 | } | |
3671 | } | |
3672 | ||
8a9562d2 | 3673 | static int |
d46285a2 | 3674 | netdev_dpdk_get_status(const struct netdev *netdev, struct smap *args) |
8a9562d2 | 3675 | { |
d46285a2 | 3676 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
8a9562d2 | 3677 | struct rte_eth_dev_info dev_info; |
31154f95 | 3678 | uint32_t link_speed; |
f4336f50 | 3679 | uint32_t dev_flags; |
8a9562d2 | 3680 | |
7cd1261d | 3681 | if (!rte_eth_dev_is_valid_port(dev->port_id)) { |
8a9562d2 | 3682 | return ENODEV; |
7cd1261d | 3683 | } |
8a9562d2 | 3684 | |
03f3f9c0 | 3685 | ovs_mutex_lock(&dpdk_mutex); |
8a9562d2 PS |
3686 | ovs_mutex_lock(&dev->mutex); |
3687 | rte_eth_dev_info_get(dev->port_id, &dev_info); | |
31154f95 | 3688 | link_speed = dev->link.link_speed; |
f4336f50 | 3689 | dev_flags = *dev_info.dev_flags; |
8a9562d2 | 3690 | ovs_mutex_unlock(&dev->mutex); |
03f3f9c0 OM |
3691 | const struct rte_bus *bus; |
3692 | const struct rte_pci_device *pci_dev; | |
3693 | uint16_t vendor_id = PCI_ANY_ID; | |
3694 | uint16_t device_id = PCI_ANY_ID; | |
3695 | bus = rte_bus_find_by_device(dev_info.device); | |
3696 | if (bus && !strcmp(bus->name, "pci")) { | |
3697 | pci_dev = RTE_DEV_TO_PCI(dev_info.device); | |
3698 | if (pci_dev) { | |
3699 | vendor_id = pci_dev->id.vendor_id; | |
3700 | device_id = pci_dev->id.device_id; | |
3701 | } | |
3702 | } | |
3703 | ovs_mutex_unlock(&dpdk_mutex); | |
8a9562d2 | 3704 | |
fa9f4eeb | 3705 | smap_add_format(args, "port_no", DPDK_PORT_ID_FMT, dev->port_id); |
58be5c0e MK |
3706 | smap_add_format(args, "numa_id", "%d", |
3707 | rte_eth_dev_socket_id(dev->port_id)); | |
8a9562d2 PS |
3708 | smap_add_format(args, "driver_name", "%s", dev_info.driver_name); |
3709 | smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize); | |
4be4d22c | 3710 | smap_add_format(args, "max_rx_pktlen", "%u", dev->max_packet_len); |
8a9562d2 PS |
3711 | smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues); |
3712 | smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues); | |
3713 | smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs); | |
58be5c0e MK |
3714 | smap_add_format(args, "max_hash_mac_addrs", "%u", |
3715 | dev_info.max_hash_mac_addrs); | |
8a9562d2 PS |
3716 | smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs); |
3717 | smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools); | |
3718 | ||
3eb8d4fa MW |
3719 | /* Querying the DPDK library for iftype may be done in future, pending |
3720 | * support; cf. RFC 3635 Section 3.2.4. */ | |
3721 | enum { IF_TYPE_ETHERNETCSMACD = 6 }; | |
3722 | ||
3723 | smap_add_format(args, "if_type", "%"PRIu32, IF_TYPE_ETHERNETCSMACD); | |
3724 | smap_add_format(args, "if_descr", "%s %s", rte_version(), | |
3725 | dev_info.driver_name); | |
03f3f9c0 OM |
3726 | smap_add_format(args, "pci-vendor_id", "0x%x", vendor_id); |
3727 | smap_add_format(args, "pci-device_id", "0x%x", device_id); | |
8a9562d2 | 3728 | |
31154f95 IS |
3729 | /* Not all link speeds are defined in the OpenFlow specs e.g. 25 Gbps. |
3730 | * In that case the speed will not be reported as part of the usual | |
3731 | * call to get_features(). Get the link speed of the device and add it | |
3732 | * to the device status in an easy to read string format. | |
3733 | */ | |
3734 | smap_add(args, "link_speed", | |
3735 | netdev_dpdk_link_speed_to_str__(link_speed)); | |
3736 | ||
f4336f50 GR |
3737 | if (dev_flags & RTE_ETH_DEV_REPRESENTOR) { |
3738 | smap_add_format(args, "dpdk-vf-mac", ETH_ADDR_FMT, | |
3739 | ETH_ADDR_ARGS(dev->hwaddr)); | |
3740 | } | |
3741 | ||
8a9562d2 PS |
3742 | return 0; |
3743 | } | |
3744 | ||
3745 | static void | |
3746 | netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state) | |
3747 | OVS_REQUIRES(dev->mutex) | |
3748 | { | |
3749 | enum netdev_flags old_flags; | |
3750 | ||
3751 | if (admin_state) { | |
3752 | netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags); | |
3753 | } else { | |
3754 | netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags); | |
3755 | } | |
3756 | } | |
3757 | ||
3758 | static void | |
3759 | netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc, | |
3760 | const char *argv[], void *aux OVS_UNUSED) | |
3761 | { | |
3762 | bool up; | |
3763 | ||
3764 | if (!strcasecmp(argv[argc - 1], "up")) { | |
3765 | up = true; | |
3766 | } else if ( !strcasecmp(argv[argc - 1], "down")) { | |
3767 | up = false; | |
3768 | } else { | |
3769 | unixctl_command_reply_error(conn, "Invalid Admin State"); | |
3770 | return; | |
3771 | } | |
3772 | ||
3773 | if (argc > 2) { | |
3774 | struct netdev *netdev = netdev_from_name(argv[1]); | |
3d0d5ab1 | 3775 | |
8a9562d2 | 3776 | if (netdev && is_dpdk_class(netdev->netdev_class)) { |
3d0d5ab1 | 3777 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
8a9562d2 | 3778 | |
3d0d5ab1 IM |
3779 | ovs_mutex_lock(&dev->mutex); |
3780 | netdev_dpdk_set_admin_state__(dev, up); | |
3781 | ovs_mutex_unlock(&dev->mutex); | |
8a9562d2 PS |
3782 | |
3783 | netdev_close(netdev); | |
3784 | } else { | |
3785 | unixctl_command_reply_error(conn, "Not a DPDK Interface"); | |
3786 | netdev_close(netdev); | |
3787 | return; | |
3788 | } | |
3789 | } else { | |
3d0d5ab1 | 3790 | struct netdev_dpdk *dev; |
8a9562d2 PS |
3791 | |
3792 | ovs_mutex_lock(&dpdk_mutex); | |
3d0d5ab1 IM |
3793 | LIST_FOR_EACH (dev, list_node, &dpdk_list) { |
3794 | ovs_mutex_lock(&dev->mutex); | |
3795 | netdev_dpdk_set_admin_state__(dev, up); | |
3796 | ovs_mutex_unlock(&dev->mutex); | |
8a9562d2 PS |
3797 | } |
3798 | ovs_mutex_unlock(&dpdk_mutex); | |
3799 | } | |
3800 | unixctl_command_reply(conn, "OK"); | |
3801 | } | |
3802 | ||
0ee821c2 DB |
3803 | static void |
3804 | netdev_dpdk_detach(struct unixctl_conn *conn, int argc OVS_UNUSED, | |
3805 | const char *argv[], void *aux OVS_UNUSED) | |
3806 | { | |
0ee821c2 | 3807 | char *response; |
7ee94cba | 3808 | dpdk_port_t port_id; |
0ee821c2 | 3809 | struct netdev_dpdk *dev; |
40e940e4 OM |
3810 | struct rte_device *rte_dev; |
3811 | struct ds used_interfaces = DS_EMPTY_INITIALIZER; | |
3812 | bool used = false; | |
0ee821c2 DB |
3813 | |
3814 | ovs_mutex_lock(&dpdk_mutex); | |
3815 | ||
40e940e4 OM |
3816 | port_id = netdev_dpdk_get_port_by_devargs(argv[1]); |
3817 | if (!rte_eth_dev_is_valid_port(port_id)) { | |
0ee821c2 DB |
3818 | response = xasprintf("Device '%s' not found in DPDK", argv[1]); |
3819 | goto error; | |
3820 | } | |
3821 | ||
40e940e4 OM |
3822 | rte_dev = rte_eth_devices[port_id].device; |
3823 | ds_put_format(&used_interfaces, | |
3824 | "Device '%s' is being used by the following interfaces:", | |
3825 | argv[1]); | |
3826 | ||
3827 | LIST_FOR_EACH (dev, list_node, &dpdk_list) { | |
3828 | /* FIXME: avoid direct access to DPDK array rte_eth_devices. */ | |
3829 | if (rte_eth_devices[dev->port_id].device == rte_dev | |
3830 | && rte_eth_devices[dev->port_id].state != RTE_ETH_DEV_UNUSED) { | |
3831 | used = true; | |
3832 | ds_put_format(&used_interfaces, " %s", | |
3833 | netdev_get_name(&dev->up)); | |
3834 | } | |
3835 | } | |
3836 | ||
3837 | if (used) { | |
3838 | ds_put_cstr(&used_interfaces, ". Remove them before detaching."); | |
3839 | response = ds_steal_cstr(&used_interfaces); | |
3840 | ds_destroy(&used_interfaces); | |
0ee821c2 DB |
3841 | goto error; |
3842 | } | |
40e940e4 | 3843 | ds_destroy(&used_interfaces); |
0ee821c2 DB |
3844 | |
3845 | rte_eth_dev_close(port_id); | |
40e940e4 | 3846 | if (rte_dev_remove(rte_dev) < 0) { |
0ee821c2 DB |
3847 | response = xasprintf("Device '%s' can not be detached", argv[1]); |
3848 | goto error; | |
3849 | } | |
3850 | ||
40e940e4 OM |
3851 | response = xasprintf("All devices shared with device '%s' " |
3852 | "have been detached", argv[1]); | |
0ee821c2 DB |
3853 | |
3854 | ovs_mutex_unlock(&dpdk_mutex); | |
3855 | unixctl_command_reply(conn, response); | |
3856 | free(response); | |
3857 | return; | |
3858 | ||
3859 | error: | |
3860 | ovs_mutex_unlock(&dpdk_mutex); | |
3861 | unixctl_command_reply_error(conn, response); | |
3862 | free(response); | |
3863 | } | |
3864 | ||
be481733 IM |
3865 | static void |
3866 | netdev_dpdk_get_mempool_info(struct unixctl_conn *conn, | |
3867 | int argc, const char *argv[], | |
3868 | void *aux OVS_UNUSED) | |
3869 | { | |
3870 | size_t size; | |
3871 | FILE *stream; | |
3872 | char *response = NULL; | |
3873 | struct netdev *netdev = NULL; | |
3874 | ||
3875 | if (argc == 2) { | |
3876 | netdev = netdev_from_name(argv[1]); | |
3877 | if (!netdev || !is_dpdk_class(netdev->netdev_class)) { | |
3878 | unixctl_command_reply_error(conn, "Not a DPDK Interface"); | |
3879 | goto out; | |
3880 | } | |
3881 | } | |
3882 | ||
3883 | stream = open_memstream(&response, &size); | |
3884 | if (!stream) { | |
3885 | response = xasprintf("Unable to open memstream: %s.", | |
3886 | ovs_strerror(errno)); | |
3887 | unixctl_command_reply_error(conn, response); | |
3888 | goto out; | |
3889 | } | |
3890 | ||
3891 | if (netdev) { | |
3892 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
3893 | ||
3894 | ovs_mutex_lock(&dev->mutex); | |
3895 | ovs_mutex_lock(&dpdk_mp_mutex); | |
3896 | ||
43307ad0 | 3897 | rte_mempool_dump(stream, dev->dpdk_mp->mp); |
be481733 IM |
3898 | |
3899 | ovs_mutex_unlock(&dpdk_mp_mutex); | |
3900 | ovs_mutex_unlock(&dev->mutex); | |
3901 | } else { | |
3902 | ovs_mutex_lock(&dpdk_mp_mutex); | |
3903 | rte_mempool_list_dump(stream); | |
3904 | ovs_mutex_unlock(&dpdk_mp_mutex); | |
3905 | } | |
3906 | ||
3907 | fclose(stream); | |
3908 | ||
3909 | unixctl_command_reply(conn, response); | |
3910 | out: | |
3911 | free(response); | |
3912 | netdev_close(netdev); | |
3913 | } | |
3914 | ||
58397e6c KT |
3915 | /* |
3916 | * Set virtqueue flags so that we do not receive interrupts. | |
3917 | */ | |
3918 | static void | |
0a0f39df | 3919 | set_irq_status(int vid) |
58397e6c | 3920 | { |
4573fbd3 | 3921 | uint32_t i; |
4573fbd3 | 3922 | |
f3e7ec25 MW |
3923 | for (i = 0; i < rte_vhost_get_vring_num(vid); i++) { |
3924 | rte_vhost_enable_guest_notification(vid, i, 0); | |
4573fbd3 FL |
3925 | } |
3926 | } | |
3927 | ||
585a5bea IM |
3928 | /* |
3929 | * Fixes mapping for vhost-user tx queues. Must be called after each | |
81acebda | 3930 | * enabling/disabling of queues and n_txq modifications. |
585a5bea IM |
3931 | */ |
3932 | static void | |
d46285a2 DDP |
3933 | netdev_dpdk_remap_txqs(struct netdev_dpdk *dev) |
3934 | OVS_REQUIRES(dev->mutex) | |
585a5bea IM |
3935 | { |
3936 | int *enabled_queues, n_enabled = 0; | |
81acebda | 3937 | int i, k, total_txqs = dev->up.n_txq; |
585a5bea | 3938 | |
eff23640 | 3939 | enabled_queues = xcalloc(total_txqs, sizeof *enabled_queues); |
585a5bea IM |
3940 | |
3941 | for (i = 0; i < total_txqs; i++) { | |
3942 | /* Enabled queues always mapped to themselves. */ | |
d46285a2 | 3943 | if (dev->tx_q[i].map == i) { |
585a5bea IM |
3944 | enabled_queues[n_enabled++] = i; |
3945 | } | |
3946 | } | |
3947 | ||
3948 | if (n_enabled == 0 && total_txqs != 0) { | |
f3ea2ad2 | 3949 | enabled_queues[0] = OVS_VHOST_QUEUE_DISABLED; |
585a5bea IM |
3950 | n_enabled = 1; |
3951 | } | |
3952 | ||
3953 | k = 0; | |
3954 | for (i = 0; i < total_txqs; i++) { | |
d46285a2 DDP |
3955 | if (dev->tx_q[i].map != i) { |
3956 | dev->tx_q[i].map = enabled_queues[k]; | |
585a5bea IM |
3957 | k = (k + 1) % n_enabled; |
3958 | } | |
3959 | } | |
3960 | ||
170ef726 IM |
3961 | if (VLOG_IS_DBG_ENABLED()) { |
3962 | struct ds mapping = DS_EMPTY_INITIALIZER; | |
3963 | ||
3964 | ds_put_format(&mapping, "TX queue mapping for port '%s':\n", | |
3965 | netdev_get_name(&dev->up)); | |
3966 | for (i = 0; i < total_txqs; i++) { | |
3967 | ds_put_format(&mapping, "%2d --> %2d\n", i, dev->tx_q[i].map); | |
3968 | } | |
3969 | ||
3970 | VLOG_DBG("%s", ds_cstr(&mapping)); | |
3971 | ds_destroy(&mapping); | |
585a5bea IM |
3972 | } |
3973 | ||
eff23640 | 3974 | free(enabled_queues); |
585a5bea | 3975 | } |
4573fbd3 | 3976 | |
58397e6c KT |
3977 | /* |
3978 | * A new virtio-net device is added to a vhost port. | |
3979 | */ | |
3980 | static int | |
0a0f39df | 3981 | new_device(int vid) |
58397e6c | 3982 | { |
d46285a2 | 3983 | struct netdev_dpdk *dev; |
58397e6c | 3984 | bool exists = false; |
db8f13b0 | 3985 | int newnode = 0; |
0a0f39df CL |
3986 | char ifname[IF_NAME_SZ]; |
3987 | ||
58be5c0e | 3988 | rte_vhost_get_ifname(vid, ifname, sizeof ifname); |
58397e6c KT |
3989 | |
3990 | ovs_mutex_lock(&dpdk_mutex); | |
3991 | /* Add device to the vhost port with the same name as that passed down. */ | |
d46285a2 | 3992 | LIST_FOR_EACH(dev, list_node, &dpdk_list) { |
c1ff66ac | 3993 | ovs_mutex_lock(&dev->mutex); |
bb9d2623 IM |
3994 | if (nullable_string_is_equal(ifname, dev->vhost_id)) { |
3995 | uint32_t qp_num = rte_vhost_get_vring_num(vid) / VIRTIO_QNUM; | |
db8f13b0 CL |
3996 | |
3997 | /* Get NUMA information */ | |
0a0f39df CL |
3998 | newnode = rte_vhost_get_numa_node(vid); |
3999 | if (newnode == -1) { | |
5b9bf9e0 | 4000 | #ifdef VHOST_NUMA |
db8f13b0 | 4001 | VLOG_INFO("Error getting NUMA info for vHost Device '%s'", |
0a0f39df | 4002 | ifname); |
5b9bf9e0 | 4003 | #endif |
db8f13b0 | 4004 | newnode = dev->socket_id; |
db8f13b0 CL |
4005 | } |
4006 | ||
7235cd20 DM |
4007 | if (dev->requested_n_txq < qp_num |
4008 | || dev->requested_n_rxq < qp_num | |
7f5f2bd0 IM |
4009 | || dev->requested_socket_id != newnode) { |
4010 | dev->requested_socket_id = newnode; | |
4011 | dev->requested_n_rxq = qp_num; | |
4012 | dev->requested_n_txq = qp_num; | |
4013 | netdev_request_reconfigure(&dev->up); | |
4014 | } else { | |
4015 | /* Reconfiguration not required. */ | |
4016 | dev->vhost_reconfigured = true; | |
4017 | } | |
81acebda | 4018 | |
0a0f39df | 4019 | ovsrcu_index_set(&dev->vid, vid); |
81acebda IM |
4020 | exists = true; |
4021 | ||
58397e6c | 4022 | /* Disable notifications. */ |
0a0f39df | 4023 | set_irq_status(vid); |
e543851d | 4024 | netdev_change_seq_changed(&dev->up); |
d46285a2 | 4025 | ovs_mutex_unlock(&dev->mutex); |
58397e6c KT |
4026 | break; |
4027 | } | |
c1ff66ac | 4028 | ovs_mutex_unlock(&dev->mutex); |
58397e6c KT |
4029 | } |
4030 | ovs_mutex_unlock(&dpdk_mutex); | |
4031 | ||
4032 | if (!exists) { | |
0a0f39df | 4033 | VLOG_INFO("vHost Device '%s' can't be added - name not found", ifname); |
58397e6c KT |
4034 | |
4035 | return -1; | |
4036 | } | |
4037 | ||
0a0f39df CL |
4038 | VLOG_INFO("vHost Device '%s' has been added on numa node %i", |
4039 | ifname, newnode); | |
4040 | ||
58397e6c KT |
4041 | return 0; |
4042 | } | |
4043 | ||
f3ea2ad2 IM |
4044 | /* Clears mapping for all available queues of vhost interface. */ |
4045 | static void | |
4046 | netdev_dpdk_txq_map_clear(struct netdev_dpdk *dev) | |
4047 | OVS_REQUIRES(dev->mutex) | |
4048 | { | |
4049 | int i; | |
4050 | ||
81acebda | 4051 | for (i = 0; i < dev->up.n_txq; i++) { |
f3ea2ad2 IM |
4052 | dev->tx_q[i].map = OVS_VHOST_QUEUE_MAP_UNKNOWN; |
4053 | } | |
4054 | } | |
4055 | ||
58397e6c KT |
4056 | /* |
4057 | * Remove a virtio-net device from the specific vhost port. Use dev->remove | |
4058 | * flag to stop any more packets from being sent or received to/from a VM and | |
4059 | * ensure all currently queued packets have been sent/received before removing | |
4060 | * the device. | |
4061 | */ | |
4062 | static void | |
0a0f39df | 4063 | destroy_device(int vid) |
58397e6c | 4064 | { |
d46285a2 | 4065 | struct netdev_dpdk *dev; |
afee281f | 4066 | bool exists = false; |
0a0f39df CL |
4067 | char ifname[IF_NAME_SZ]; |
4068 | ||
58be5c0e | 4069 | rte_vhost_get_ifname(vid, ifname, sizeof ifname); |
58397e6c KT |
4070 | |
4071 | ovs_mutex_lock(&dpdk_mutex); | |
d46285a2 | 4072 | LIST_FOR_EACH (dev, list_node, &dpdk_list) { |
0a0f39df | 4073 | if (netdev_dpdk_get_vid(dev) == vid) { |
58397e6c | 4074 | |
d46285a2 | 4075 | ovs_mutex_lock(&dev->mutex); |
0a0f39df CL |
4076 | dev->vhost_reconfigured = false; |
4077 | ovsrcu_index_set(&dev->vid, -1); | |
35c91567 DM |
4078 | memset(dev->vhost_rxq_enabled, 0, |
4079 | dev->up.n_rxq * sizeof *dev->vhost_rxq_enabled); | |
d46285a2 | 4080 | netdev_dpdk_txq_map_clear(dev); |
81acebda | 4081 | |
e543851d | 4082 | netdev_change_seq_changed(&dev->up); |
d46285a2 | 4083 | ovs_mutex_unlock(&dev->mutex); |
81acebda | 4084 | exists = true; |
afee281f | 4085 | break; |
58397e6c KT |
4086 | } |
4087 | } | |
afee281f | 4088 | |
58397e6c KT |
4089 | ovs_mutex_unlock(&dpdk_mutex); |
4090 | ||
0a0f39df | 4091 | if (exists) { |
afee281f KT |
4092 | /* |
4093 | * Wait for other threads to quiesce after setting the 'virtio_dev' | |
4094 | * to NULL, before returning. | |
4095 | */ | |
4096 | ovsrcu_synchronize(); | |
4097 | /* | |
4098 | * As call to ovsrcu_synchronize() will end the quiescent state, | |
4099 | * put thread back into quiescent state before returning. | |
4100 | */ | |
4101 | ovsrcu_quiesce_start(); | |
0a0f39df | 4102 | VLOG_INFO("vHost Device '%s' has been removed", ifname); |
afee281f | 4103 | } else { |
0a0f39df | 4104 | VLOG_INFO("vHost Device '%s' not found", ifname); |
afee281f | 4105 | } |
58397e6c KT |
4106 | } |
4107 | ||
585a5bea | 4108 | static int |
0a0f39df | 4109 | vring_state_changed(int vid, uint16_t queue_id, int enable) |
585a5bea | 4110 | { |
d46285a2 | 4111 | struct netdev_dpdk *dev; |
585a5bea IM |
4112 | bool exists = false; |
4113 | int qid = queue_id / VIRTIO_QNUM; | |
35c91567 | 4114 | bool is_rx = (queue_id % VIRTIO_QNUM) == VIRTIO_TXQ; |
0a0f39df CL |
4115 | char ifname[IF_NAME_SZ]; |
4116 | ||
58be5c0e | 4117 | rte_vhost_get_ifname(vid, ifname, sizeof ifname); |
585a5bea | 4118 | |
585a5bea | 4119 | ovs_mutex_lock(&dpdk_mutex); |
d46285a2 | 4120 | LIST_FOR_EACH (dev, list_node, &dpdk_list) { |
c1ff66ac | 4121 | ovs_mutex_lock(&dev->mutex); |
bb9d2623 | 4122 | if (nullable_string_is_equal(ifname, dev->vhost_id)) { |
35c91567 DM |
4123 | if (is_rx) { |
4124 | bool old_state = dev->vhost_rxq_enabled[qid]; | |
4125 | ||
4126 | dev->vhost_rxq_enabled[qid] = enable != 0; | |
4127 | if (old_state != dev->vhost_rxq_enabled[qid]) { | |
4128 | netdev_change_seq_changed(&dev->up); | |
4129 | } | |
585a5bea | 4130 | } else { |
35c91567 DM |
4131 | if (enable) { |
4132 | dev->tx_q[qid].map = qid; | |
4133 | } else { | |
4134 | dev->tx_q[qid].map = OVS_VHOST_QUEUE_DISABLED; | |
4135 | } | |
4136 | netdev_dpdk_remap_txqs(dev); | |
585a5bea | 4137 | } |
585a5bea | 4138 | exists = true; |
d46285a2 | 4139 | ovs_mutex_unlock(&dev->mutex); |
585a5bea IM |
4140 | break; |
4141 | } | |
c1ff66ac | 4142 | ovs_mutex_unlock(&dev->mutex); |
585a5bea IM |
4143 | } |
4144 | ovs_mutex_unlock(&dpdk_mutex); | |
4145 | ||
4146 | if (exists) { | |
35c91567 DM |
4147 | VLOG_INFO("State of queue %d ( %s_qid %d ) of vhost device '%s' " |
4148 | "changed to \'%s\'", queue_id, is_rx == true ? "rx" : "tx", | |
4149 | qid, ifname, (enable == 1) ? "enabled" : "disabled"); | |
585a5bea | 4150 | } else { |
0a0f39df | 4151 | VLOG_INFO("vHost Device '%s' not found", ifname); |
585a5bea IM |
4152 | return -1; |
4153 | } | |
4154 | ||
4155 | return 0; | |
4156 | } | |
4157 | ||
61473a0e DM |
4158 | static void |
4159 | destroy_connection(int vid) | |
4160 | { | |
4161 | struct netdev_dpdk *dev; | |
4162 | char ifname[IF_NAME_SZ]; | |
4163 | bool exists = false; | |
4164 | ||
4165 | rte_vhost_get_ifname(vid, ifname, sizeof ifname); | |
4166 | ||
4167 | ovs_mutex_lock(&dpdk_mutex); | |
4168 | LIST_FOR_EACH (dev, list_node, &dpdk_list) { | |
4169 | ovs_mutex_lock(&dev->mutex); | |
4170 | if (nullable_string_is_equal(ifname, dev->vhost_id)) { | |
4171 | uint32_t qp_num = NR_QUEUE; | |
4172 | ||
4173 | if (netdev_dpdk_get_vid(dev) >= 0) { | |
4174 | VLOG_ERR("Connection on socket '%s' destroyed while vhost " | |
4175 | "device still attached.", dev->vhost_id); | |
4176 | } | |
4177 | ||
4178 | /* Restore the number of queue pairs to default. */ | |
4179 | if (dev->requested_n_txq != qp_num | |
4180 | || dev->requested_n_rxq != qp_num) { | |
4181 | dev->requested_n_rxq = qp_num; | |
4182 | dev->requested_n_txq = qp_num; | |
4183 | netdev_request_reconfigure(&dev->up); | |
4184 | } | |
4185 | ovs_mutex_unlock(&dev->mutex); | |
4186 | exists = true; | |
4187 | break; | |
4188 | } | |
4189 | ovs_mutex_unlock(&dev->mutex); | |
4190 | } | |
4191 | ovs_mutex_unlock(&dpdk_mutex); | |
4192 | ||
4193 | if (exists) { | |
4194 | VLOG_INFO("vHost Device '%s' connection has been destroyed", ifname); | |
4195 | } else { | |
4196 | VLOG_INFO("vHost Device '%s' not found", ifname); | |
4197 | } | |
4198 | } | |
4199 | ||
3d56e4ac EC |
4200 | static |
4201 | void vhost_guest_notified(int vid OVS_UNUSED) | |
4202 | { | |
4203 | COVERAGE_INC(vhost_notification); | |
4204 | } | |
4205 | ||
8492adc2 JS |
4206 | /* |
4207 | * Retrieve the DPDK virtio device ID (vid) associated with a vhostuser | |
4208 | * or vhostuserclient netdev. | |
4209 | * | |
4210 | * Returns a value greater or equal to zero for a valid vid or '-1' if | |
4211 | * there is no valid vid associated. A vid of '-1' must not be used in | |
4212 | * rte_vhost_ APi calls. | |
4213 | * | |
4214 | * Once obtained and validated, a vid can be used by a PMD for multiple | |
4215 | * subsequent rte_vhost API calls until the PMD quiesces. A PMD should | |
4216 | * not fetch the vid again for each of a series of API calls. | |
4217 | */ | |
4218 | ||
0a0f39df CL |
4219 | int |
4220 | netdev_dpdk_get_vid(const struct netdev_dpdk *dev) | |
58397e6c | 4221 | { |
0a0f39df | 4222 | return ovsrcu_index_get(&dev->vid); |
58397e6c KT |
4223 | } |
4224 | ||
9509913a IS |
4225 | struct ingress_policer * |
4226 | netdev_dpdk_get_ingress_policer(const struct netdev_dpdk *dev) | |
4227 | { | |
4228 | return ovsrcu_get(struct ingress_policer *, &dev->ingress_policer); | |
4229 | } | |
4230 | ||
58397e6c | 4231 | static int |
ecc1a34e | 4232 | netdev_dpdk_class_init(void) |
7d1ced01 | 4233 | { |
ecc1a34e DDP |
4234 | static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER; |
4235 | ||
4236 | /* This function can be called for different classes. The initialization | |
4237 | * needs to be done only once */ | |
4238 | if (ovsthread_once_start(&once)) { | |
988fd463 EC |
4239 | int ret; |
4240 | ||
ecc1a34e DDP |
4241 | ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL); |
4242 | unixctl_command_register("netdev-dpdk/set-admin-state", | |
4243 | "[netdev] up|down", 1, 2, | |
4244 | netdev_dpdk_set_admin_state, NULL); | |
4245 | ||
0ee821c2 DB |
4246 | unixctl_command_register("netdev-dpdk/detach", |
4247 | "pci address of device", 1, 1, | |
4248 | netdev_dpdk_detach, NULL); | |
4249 | ||
be481733 IM |
4250 | unixctl_command_register("netdev-dpdk/get-mempool-info", |
4251 | "[netdev]", 0, 1, | |
4252 | netdev_dpdk_get_mempool_info, NULL); | |
4253 | ||
988fd463 EC |
4254 | ret = rte_eth_dev_callback_register(RTE_ETH_ALL, |
4255 | RTE_ETH_EVENT_INTR_RESET, | |
4256 | dpdk_eth_event_callback, NULL); | |
4257 | if (ret != 0) { | |
4258 | VLOG_ERR("Ethernet device callback register error: %s", | |
4259 | rte_strerror(-ret)); | |
4260 | } | |
4261 | ||
ecc1a34e DDP |
4262 | ovsthread_once_done(&once); |
4263 | } | |
362ca396 | 4264 | |
7d1ced01 CL |
4265 | return 0; |
4266 | } | |
4267 | ||
0bf765f7 IS |
4268 | /* QoS Functions */ |
4269 | ||
4270 | /* | |
4271 | * Initialize QoS configuration operations. | |
4272 | */ | |
4273 | static void | |
4274 | qos_conf_init(struct qos_conf *conf, const struct dpdk_qos_ops *ops) | |
4275 | { | |
4276 | conf->ops = ops; | |
78bd47cf | 4277 | rte_spinlock_init(&conf->lock); |
0bf765f7 IS |
4278 | } |
4279 | ||
4280 | /* | |
4281 | * Search existing QoS operations in qos_ops and compare each set of | |
4282 | * operations qos_name to name. Return a dpdk_qos_ops pointer to a match, | |
4283 | * else return NULL | |
4284 | */ | |
4285 | static const struct dpdk_qos_ops * | |
4286 | qos_lookup_name(const char *name) | |
4287 | { | |
4288 | const struct dpdk_qos_ops *const *opsp; | |
4289 | ||
4290 | for (opsp = qos_confs; *opsp != NULL; opsp++) { | |
4291 | const struct dpdk_qos_ops *ops = *opsp; | |
4292 | if (!strcmp(name, ops->qos_name)) { | |
4293 | return ops; | |
4294 | } | |
4295 | } | |
4296 | return NULL; | |
4297 | } | |
4298 | ||
0bf765f7 IS |
4299 | static int |
4300 | netdev_dpdk_get_qos_types(const struct netdev *netdev OVS_UNUSED, | |
4301 | struct sset *types) | |
4302 | { | |
4303 | const struct dpdk_qos_ops *const *opsp; | |
4304 | ||
4305 | for (opsp = qos_confs; *opsp != NULL; opsp++) { | |
4306 | const struct dpdk_qos_ops *ops = *opsp; | |
4307 | if (ops->qos_construct && ops->qos_name[0] != '\0') { | |
4308 | sset_add(types, ops->qos_name); | |
4309 | } | |
4310 | } | |
4311 | return 0; | |
4312 | } | |
4313 | ||
4314 | static int | |
d46285a2 | 4315 | netdev_dpdk_get_qos(const struct netdev *netdev, |
0bf765f7 IS |
4316 | const char **typep, struct smap *details) |
4317 | { | |
d46285a2 | 4318 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
78bd47cf | 4319 | struct qos_conf *qos_conf; |
0bf765f7 IS |
4320 | int error = 0; |
4321 | ||
d46285a2 | 4322 | ovs_mutex_lock(&dev->mutex); |
78bd47cf DDP |
4323 | qos_conf = ovsrcu_get_protected(struct qos_conf *, &dev->qos_conf); |
4324 | if (qos_conf) { | |
4325 | *typep = qos_conf->ops->qos_name; | |
4326 | error = (qos_conf->ops->qos_get | |
4327 | ? qos_conf->ops->qos_get(qos_conf, details): 0); | |
d03603c4 MC |
4328 | } else { |
4329 | /* No QoS configuration set, return an empty string */ | |
4330 | *typep = ""; | |
0bf765f7 | 4331 | } |
d46285a2 | 4332 | ovs_mutex_unlock(&dev->mutex); |
0bf765f7 IS |
4333 | |
4334 | return error; | |
4335 | } | |
4336 | ||
4337 | static int | |
78bd47cf DDP |
4338 | netdev_dpdk_set_qos(struct netdev *netdev, const char *type, |
4339 | const struct smap *details) | |
0bf765f7 | 4340 | { |
d46285a2 | 4341 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
0bf765f7 | 4342 | const struct dpdk_qos_ops *new_ops = NULL; |
78bd47cf | 4343 | struct qos_conf *qos_conf, *new_qos_conf = NULL; |
0bf765f7 IS |
4344 | int error = 0; |
4345 | ||
d46285a2 | 4346 | ovs_mutex_lock(&dev->mutex); |
0bf765f7 | 4347 | |
78bd47cf | 4348 | qos_conf = ovsrcu_get_protected(struct qos_conf *, &dev->qos_conf); |
0bf765f7 | 4349 | |
78bd47cf DDP |
4350 | new_ops = qos_lookup_name(type); |
4351 | ||
4352 | if (!new_ops || !new_ops->qos_construct) { | |
4353 | new_qos_conf = NULL; | |
4354 | if (type && type[0]) { | |
4355 | error = EOPNOTSUPP; | |
0bf765f7 | 4356 | } |
44975bb0 | 4357 | } else if (qos_conf && qos_conf->ops == new_ops |
78bd47cf DDP |
4358 | && qos_conf->ops->qos_is_equal(qos_conf, details)) { |
4359 | new_qos_conf = qos_conf; | |
0bf765f7 | 4360 | } else { |
78bd47cf | 4361 | error = new_ops->qos_construct(details, &new_qos_conf); |
7ea266e9 IS |
4362 | } |
4363 | ||
7ea266e9 | 4364 | if (error) { |
78bd47cf DDP |
4365 | VLOG_ERR("Failed to set QoS type %s on port %s: %s", |
4366 | type, netdev->name, rte_strerror(error)); | |
4367 | } | |
4368 | ||
4369 | if (new_qos_conf != qos_conf) { | |
4370 | ovsrcu_set(&dev->qos_conf, new_qos_conf); | |
4371 | if (qos_conf) { | |
4372 | ovsrcu_postpone(qos_conf->ops->qos_destruct, qos_conf); | |
4373 | } | |
0bf765f7 IS |
4374 | } |
4375 | ||
d46285a2 | 4376 | ovs_mutex_unlock(&dev->mutex); |
78bd47cf | 4377 | |
0bf765f7 IS |
4378 | return error; |
4379 | } | |
4380 | ||
23c01b19 EC |
4381 | static int |
4382 | netdev_dpdk_get_queue(const struct netdev *netdev, uint32_t queue_id, | |
4383 | struct smap *details) | |
4384 | { | |
4385 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
4386 | struct qos_conf *qos_conf; | |
4387 | int error = 0; | |
4388 | ||
4389 | ovs_mutex_lock(&dev->mutex); | |
4390 | ||
4391 | qos_conf = ovsrcu_get_protected(struct qos_conf *, &dev->qos_conf); | |
4392 | if (!qos_conf || !qos_conf->ops || !qos_conf->ops->qos_queue_get) { | |
4393 | error = EOPNOTSUPP; | |
4394 | } else { | |
4395 | error = qos_conf->ops->qos_queue_get(details, queue_id, qos_conf); | |
4396 | } | |
4397 | ||
4398 | ovs_mutex_unlock(&dev->mutex); | |
4399 | ||
4400 | return error; | |
4401 | } | |
4402 | ||
4403 | static int | |
4404 | netdev_dpdk_set_queue(struct netdev *netdev, uint32_t queue_id, | |
4405 | const struct smap *details) | |
4406 | { | |
4407 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
4408 | struct qos_conf *qos_conf; | |
4409 | int error = 0; | |
4410 | ||
4411 | ovs_mutex_lock(&dev->mutex); | |
4412 | ||
4413 | qos_conf = ovsrcu_get_protected(struct qos_conf *, &dev->qos_conf); | |
4414 | if (!qos_conf || !qos_conf->ops || !qos_conf->ops->qos_queue_construct) { | |
4415 | error = EOPNOTSUPP; | |
4416 | } else { | |
4417 | error = qos_conf->ops->qos_queue_construct(details, queue_id, | |
4418 | qos_conf); | |
4419 | } | |
4420 | ||
4421 | if (error && error != EOPNOTSUPP) { | |
4422 | VLOG_ERR("Failed to set QoS queue %d on port %s: %s", | |
4423 | queue_id, netdev_get_name(netdev), rte_strerror(error)); | |
4424 | } | |
4425 | ||
4426 | ovs_mutex_unlock(&dev->mutex); | |
4427 | ||
4428 | return error; | |
4429 | } | |
4430 | ||
4431 | static int | |
4432 | netdev_dpdk_delete_queue(struct netdev *netdev, uint32_t queue_id) | |
4433 | { | |
4434 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
4435 | struct qos_conf *qos_conf; | |
4436 | int error = 0; | |
4437 | ||
4438 | ovs_mutex_lock(&dev->mutex); | |
4439 | ||
4440 | qos_conf = ovsrcu_get_protected(struct qos_conf *, &dev->qos_conf); | |
4441 | if (qos_conf && qos_conf->ops && qos_conf->ops->qos_queue_destruct) { | |
4442 | qos_conf->ops->qos_queue_destruct(qos_conf, queue_id); | |
4443 | } else { | |
4444 | error = EOPNOTSUPP; | |
4445 | } | |
4446 | ||
4447 | ovs_mutex_unlock(&dev->mutex); | |
4448 | ||
4449 | return error; | |
4450 | } | |
4451 | ||
4452 | static int | |
4453 | netdev_dpdk_get_queue_stats(const struct netdev *netdev, uint32_t queue_id, | |
4454 | struct netdev_queue_stats *stats) | |
4455 | { | |
4456 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
4457 | struct qos_conf *qos_conf; | |
4458 | int error = 0; | |
4459 | ||
4460 | ovs_mutex_lock(&dev->mutex); | |
4461 | ||
4462 | qos_conf = ovsrcu_get_protected(struct qos_conf *, &dev->qos_conf); | |
4463 | if (qos_conf && qos_conf->ops && qos_conf->ops->qos_queue_get_stats) { | |
4464 | qos_conf->ops->qos_queue_get_stats(qos_conf, queue_id, stats); | |
4465 | } else { | |
4466 | error = EOPNOTSUPP; | |
4467 | } | |
4468 | ||
4469 | ovs_mutex_unlock(&dev->mutex); | |
4470 | ||
4471 | return error; | |
4472 | } | |
4473 | ||
4474 | static int | |
4475 | netdev_dpdk_queue_dump_start(const struct netdev *netdev, void **statep) | |
4476 | { | |
4477 | int error = 0; | |
4478 | struct qos_conf *qos_conf; | |
4479 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
4480 | ||
4481 | ovs_mutex_lock(&dev->mutex); | |
4482 | ||
4483 | qos_conf = ovsrcu_get_protected(struct qos_conf *, &dev->qos_conf); | |
4484 | if (qos_conf && qos_conf->ops | |
4485 | && qos_conf->ops->qos_queue_dump_state_init) { | |
4486 | struct netdev_dpdk_queue_state *state; | |
4487 | ||
4488 | *statep = state = xmalloc(sizeof *state); | |
4489 | error = qos_conf->ops->qos_queue_dump_state_init(qos_conf, state); | |
4490 | } else { | |
4491 | error = EOPNOTSUPP; | |
4492 | } | |
4493 | ||
4494 | ovs_mutex_unlock(&dev->mutex); | |
4495 | ||
4496 | return error; | |
4497 | } | |
4498 | ||
4499 | static int | |
4500 | netdev_dpdk_queue_dump_next(const struct netdev *netdev, void *state_, | |
4501 | uint32_t *queue_idp, struct smap *details) | |
4502 | { | |
4503 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
4504 | struct netdev_dpdk_queue_state *state = state_; | |
4505 | struct qos_conf *qos_conf; | |
4506 | int error = EOF; | |
4507 | ||
4508 | ovs_mutex_lock(&dev->mutex); | |
4509 | ||
4510 | while (state->cur_queue < state->n_queues) { | |
4511 | uint32_t queue_id = state->queues[state->cur_queue++]; | |
4512 | ||
4513 | qos_conf = ovsrcu_get_protected(struct qos_conf *, &dev->qos_conf); | |
4514 | if (qos_conf && qos_conf->ops && qos_conf->ops->qos_queue_get) { | |
4515 | *queue_idp = queue_id; | |
4516 | error = qos_conf->ops->qos_queue_get(details, queue_id, qos_conf); | |
4517 | break; | |
4518 | } | |
4519 | } | |
4520 | ||
4521 | ovs_mutex_unlock(&dev->mutex); | |
4522 | ||
4523 | return error; | |
4524 | } | |
4525 | ||
4526 | static int | |
4527 | netdev_dpdk_queue_dump_done(const struct netdev *netdev OVS_UNUSED, | |
4528 | void *state_) | |
4529 | { | |
4530 | struct netdev_dpdk_queue_state *state = state_; | |
4531 | ||
4532 | free(state->queues); | |
4533 | free(state); | |
4534 | return 0; | |
4535 | } | |
4536 | ||
4537 | ||
4538 | ||
0bf765f7 IS |
4539 | /* egress-policer details */ |
4540 | ||
4541 | struct egress_policer { | |
4542 | struct qos_conf qos_conf; | |
4543 | struct rte_meter_srtcm_params app_srtcm_params; | |
4544 | struct rte_meter_srtcm egress_meter; | |
03f3f9c0 | 4545 | struct rte_meter_srtcm_profile egress_prof; |
0bf765f7 IS |
4546 | }; |
4547 | ||
78bd47cf DDP |
4548 | static void |
4549 | egress_policer_details_to_param(const struct smap *details, | |
4550 | struct rte_meter_srtcm_params *params) | |
0bf765f7 | 4551 | { |
78bd47cf DDP |
4552 | memset(params, 0, sizeof *params); |
4553 | params->cir = smap_get_ullong(details, "cir", 0); | |
4554 | params->cbs = smap_get_ullong(details, "cbs", 0); | |
4555 | params->ebs = 0; | |
0bf765f7 IS |
4556 | } |
4557 | ||
4558 | static int | |
78bd47cf DDP |
4559 | egress_policer_qos_construct(const struct smap *details, |
4560 | struct qos_conf **conf) | |
0bf765f7 | 4561 | { |
0bf765f7 | 4562 | struct egress_policer *policer; |
0bf765f7 IS |
4563 | int err = 0; |
4564 | ||
0bf765f7 IS |
4565 | policer = xmalloc(sizeof *policer); |
4566 | qos_conf_init(&policer->qos_conf, &egress_policer_ops); | |
78bd47cf | 4567 | egress_policer_details_to_param(details, &policer->app_srtcm_params); |
03f3f9c0 OM |
4568 | err = rte_meter_srtcm_profile_config(&policer->egress_prof, |
4569 | &policer->app_srtcm_params); | |
4570 | if (!err) { | |
4571 | err = rte_meter_srtcm_config(&policer->egress_meter, | |
4572 | &policer->egress_prof); | |
4573 | } | |
4574 | ||
78bd47cf DDP |
4575 | if (!err) { |
4576 | *conf = &policer->qos_conf; | |
4577 | } else { | |
03f3f9c0 | 4578 | VLOG_ERR("Could not create rte meter for egress policer"); |
7ea266e9 | 4579 | free(policer); |
78bd47cf | 4580 | *conf = NULL; |
7ea266e9 IS |
4581 | err = -err; |
4582 | } | |
0bf765f7 IS |
4583 | |
4584 | return err; | |
4585 | } | |
4586 | ||
4587 | static void | |
78bd47cf | 4588 | egress_policer_qos_destruct(struct qos_conf *conf) |
0bf765f7 IS |
4589 | { |
4590 | struct egress_policer *policer = CONTAINER_OF(conf, struct egress_policer, | |
78bd47cf | 4591 | qos_conf); |
0bf765f7 IS |
4592 | free(policer); |
4593 | } | |
4594 | ||
4595 | static int | |
78bd47cf | 4596 | egress_policer_qos_get(const struct qos_conf *conf, struct smap *details) |
0bf765f7 | 4597 | { |
78bd47cf DDP |
4598 | struct egress_policer *policer = |
4599 | CONTAINER_OF(conf, struct egress_policer, qos_conf); | |
4600 | ||
4601 | smap_add_format(details, "cir", "%"PRIu64, policer->app_srtcm_params.cir); | |
4602 | smap_add_format(details, "cbs", "%"PRIu64, policer->app_srtcm_params.cbs); | |
050c60bf | 4603 | |
0bf765f7 IS |
4604 | return 0; |
4605 | } | |
4606 | ||
78bd47cf | 4607 | static bool |
47a45d86 KT |
4608 | egress_policer_qos_is_equal(const struct qos_conf *conf, |
4609 | const struct smap *details) | |
0bf765f7 | 4610 | { |
78bd47cf DDP |
4611 | struct egress_policer *policer = |
4612 | CONTAINER_OF(conf, struct egress_policer, qos_conf); | |
4613 | struct rte_meter_srtcm_params params; | |
0bf765f7 | 4614 | |
78bd47cf | 4615 | egress_policer_details_to_param(details, ¶ms); |
7ea266e9 | 4616 | |
78bd47cf | 4617 | return !memcmp(¶ms, &policer->app_srtcm_params, sizeof params); |
0bf765f7 IS |
4618 | } |
4619 | ||
0bf765f7 | 4620 | static int |
3e90f7d7 | 4621 | egress_policer_run(struct qos_conf *conf, struct rte_mbuf **pkts, int pkt_cnt, |
7d7ded7a | 4622 | bool should_steal) |
0bf765f7 | 4623 | { |
0bf765f7 | 4624 | int cnt = 0; |
78bd47cf DDP |
4625 | struct egress_policer *policer = |
4626 | CONTAINER_OF(conf, struct egress_policer, qos_conf); | |
0bf765f7 | 4627 | |
e61bdffc EC |
4628 | cnt = srtcm_policer_run_single_packet(&policer->egress_meter, |
4629 | &policer->egress_prof, pkts, | |
4630 | pkt_cnt, should_steal); | |
0bf765f7 IS |
4631 | |
4632 | return cnt; | |
4633 | } | |
4634 | ||
4635 | static const struct dpdk_qos_ops egress_policer_ops = { | |
23c01b19 EC |
4636 | .qos_name = "egress-policer", /* qos_name */ |
4637 | .qos_construct = egress_policer_qos_construct, | |
4638 | .qos_destruct = egress_policer_qos_destruct, | |
4639 | .qos_get = egress_policer_qos_get, | |
4640 | .qos_is_equal = egress_policer_qos_is_equal, | |
4641 | .qos_run = egress_policer_run | |
0bf765f7 IS |
4642 | }; |
4643 | ||
e61bdffc EC |
4644 | /* trtcm-policer details */ |
4645 | ||
4646 | struct trtcm_policer { | |
4647 | struct qos_conf qos_conf; | |
4648 | struct rte_meter_trtcm_rfc4115_params meter_params; | |
4649 | struct rte_meter_trtcm_rfc4115_profile meter_profile; | |
4650 | struct rte_meter_trtcm_rfc4115 meter; | |
4651 | struct netdev_queue_stats stats; | |
4652 | struct hmap queues; | |
4653 | }; | |
4654 | ||
4655 | struct trtcm_policer_queue { | |
4656 | struct hmap_node hmap_node; | |
4657 | uint32_t queue_id; | |
4658 | struct rte_meter_trtcm_rfc4115_params meter_params; | |
4659 | struct rte_meter_trtcm_rfc4115_profile meter_profile; | |
4660 | struct rte_meter_trtcm_rfc4115 meter; | |
4661 | struct netdev_queue_stats stats; | |
4662 | }; | |
4663 | ||
4664 | static void | |
4665 | trtcm_policer_details_to_param(const struct smap *details, | |
4666 | struct rte_meter_trtcm_rfc4115_params *params) | |
4667 | { | |
4668 | memset(params, 0, sizeof *params); | |
4669 | params->cir = smap_get_ullong(details, "cir", 0); | |
4670 | params->eir = smap_get_ullong(details, "eir", 0); | |
4671 | params->cbs = smap_get_ullong(details, "cbs", 0); | |
4672 | params->ebs = smap_get_ullong(details, "ebs", 0); | |
4673 | } | |
4674 | ||
4675 | static void | |
4676 | trtcm_policer_param_to_detail( | |
4677 | const struct rte_meter_trtcm_rfc4115_params *params, | |
4678 | struct smap *details) | |
4679 | { | |
4680 | smap_add_format(details, "cir", "%"PRIu64, params->cir); | |
4681 | smap_add_format(details, "eir", "%"PRIu64, params->eir); | |
4682 | smap_add_format(details, "cbs", "%"PRIu64, params->cbs); | |
4683 | smap_add_format(details, "ebs", "%"PRIu64, params->ebs); | |
4684 | } | |
4685 | ||
4686 | ||
4687 | static int | |
4688 | trtcm_policer_qos_construct(const struct smap *details, | |
4689 | struct qos_conf **conf) | |
4690 | { | |
4691 | struct trtcm_policer *policer; | |
4692 | int err = 0; | |
4693 | ||
4694 | policer = xmalloc(sizeof *policer); | |
4695 | qos_conf_init(&policer->qos_conf, &trtcm_policer_ops); | |
4696 | trtcm_policer_details_to_param(details, &policer->meter_params); | |
4697 | err = rte_meter_trtcm_rfc4115_profile_config(&policer->meter_profile, | |
4698 | &policer->meter_params); | |
4699 | if (!err) { | |
4700 | err = rte_meter_trtcm_rfc4115_config(&policer->meter, | |
4701 | &policer->meter_profile); | |
4702 | } | |
4703 | ||
4704 | if (!err) { | |
4705 | *conf = &policer->qos_conf; | |
4706 | memset(&policer->stats, 0, sizeof policer->stats); | |
4707 | hmap_init(&policer->queues); | |
4708 | } else { | |
4709 | free(policer); | |
4710 | *conf = NULL; | |
4711 | err = -err; | |
4712 | } | |
4713 | ||
4714 | return err; | |
4715 | } | |
4716 | ||
4717 | static void | |
4718 | trtcm_policer_qos_destruct(struct qos_conf *conf) | |
4719 | { | |
4720 | struct trtcm_policer_queue *queue, *next_queue; | |
4721 | struct trtcm_policer *policer = CONTAINER_OF(conf, struct trtcm_policer, | |
4722 | qos_conf); | |
4723 | ||
4724 | HMAP_FOR_EACH_SAFE (queue, next_queue, hmap_node, &policer->queues) { | |
4725 | hmap_remove(&policer->queues, &queue->hmap_node); | |
4726 | free(queue); | |
4727 | } | |
4728 | hmap_destroy(&policer->queues); | |
4729 | free(policer); | |
4730 | } | |
4731 | ||
4732 | static int | |
4733 | trtcm_policer_qos_get(const struct qos_conf *conf, struct smap *details) | |
4734 | { | |
4735 | struct trtcm_policer *policer = CONTAINER_OF(conf, struct trtcm_policer, | |
4736 | qos_conf); | |
4737 | ||
4738 | trtcm_policer_param_to_detail(&policer->meter_params, details); | |
4739 | return 0; | |
4740 | } | |
4741 | ||
4742 | static bool | |
4743 | trtcm_policer_qos_is_equal(const struct qos_conf *conf, | |
4744 | const struct smap *details) | |
4745 | { | |
4746 | struct trtcm_policer *policer = CONTAINER_OF(conf, struct trtcm_policer, | |
4747 | qos_conf); | |
4748 | struct rte_meter_trtcm_rfc4115_params params; | |
4749 | ||
4750 | trtcm_policer_details_to_param(details, ¶ms); | |
4751 | ||
4752 | return !memcmp(¶ms, &policer->meter_params, sizeof params); | |
4753 | } | |
4754 | ||
4755 | static struct trtcm_policer_queue * | |
4756 | trtcm_policer_qos_find_queue(struct trtcm_policer *policer, uint32_t queue_id) | |
4757 | { | |
4758 | struct trtcm_policer_queue *queue; | |
4759 | HMAP_FOR_EACH_WITH_HASH (queue, hmap_node, hash_2words(queue_id, 0), | |
4760 | &policer->queues) { | |
4761 | if (queue->queue_id == queue_id) { | |
4762 | return queue; | |
4763 | } | |
4764 | } | |
4765 | return NULL; | |
4766 | } | |
4767 | ||
4768 | static inline bool | |
4769 | trtcm_policer_run_single_packet(struct trtcm_policer *policer, | |
4770 | struct rte_mbuf *pkt, uint64_t time) | |
4771 | { | |
4772 | enum rte_color pkt_color; | |
4773 | struct trtcm_policer_queue *queue; | |
4774 | uint32_t pkt_len = rte_pktmbuf_pkt_len(pkt) - sizeof(struct rte_ether_hdr); | |
4775 | struct dp_packet *dpkt = CONTAINER_OF(pkt, struct dp_packet, mbuf); | |
4776 | ||
4777 | queue = trtcm_policer_qos_find_queue(policer, dpkt->md.skb_priority); | |
4778 | if (!queue) { | |
4779 | /* If no queue is found, use the default queue, which MUST exist. */ | |
4780 | queue = trtcm_policer_qos_find_queue(policer, 0); | |
4781 | if (!queue) { | |
4782 | return false; | |
4783 | } | |
4784 | } | |
4785 | ||
4786 | pkt_color = rte_meter_trtcm_rfc4115_color_blind_check(&queue->meter, | |
4787 | &queue->meter_profile, | |
4788 | time, | |
4789 | pkt_len); | |
4790 | ||
4791 | if (pkt_color == RTE_COLOR_RED) { | |
4792 | queue->stats.tx_errors++; | |
4793 | } else { | |
4794 | queue->stats.tx_bytes += pkt_len; | |
4795 | queue->stats.tx_packets++; | |
4796 | } | |
4797 | ||
4798 | pkt_color = rte_meter_trtcm_rfc4115_color_aware_check(&policer->meter, | |
4799 | &policer->meter_profile, | |
4800 | time, pkt_len, | |
4801 | pkt_color); | |
4802 | ||
4803 | if (pkt_color == RTE_COLOR_RED) { | |
4804 | policer->stats.tx_errors++; | |
4805 | return false; | |
4806 | } | |
4807 | ||
4808 | policer->stats.tx_bytes += pkt_len; | |
4809 | policer->stats.tx_packets++; | |
4810 | return true; | |
4811 | } | |
4812 | ||
4813 | static int | |
4814 | trtcm_policer_run(struct qos_conf *conf, struct rte_mbuf **pkts, int pkt_cnt, | |
4815 | bool should_steal) | |
4816 | { | |
4817 | int i = 0; | |
4818 | int cnt = 0; | |
4819 | struct rte_mbuf *pkt = NULL; | |
4820 | uint64_t current_time = rte_rdtsc(); | |
4821 | ||
4822 | struct trtcm_policer *policer = CONTAINER_OF(conf, struct trtcm_policer, | |
4823 | qos_conf); | |
4824 | ||
4825 | for (i = 0; i < pkt_cnt; i++) { | |
4826 | pkt = pkts[i]; | |
4827 | ||
4828 | if (trtcm_policer_run_single_packet(policer, pkt, current_time)) { | |
4829 | if (cnt != i) { | |
4830 | pkts[cnt] = pkt; | |
4831 | } | |
4832 | cnt++; | |
4833 | } else { | |
4834 | if (should_steal) { | |
4835 | rte_pktmbuf_free(pkt); | |
4836 | } | |
4837 | } | |
4838 | } | |
4839 | return cnt; | |
4840 | } | |
4841 | ||
4842 | static int | |
4843 | trtcm_policer_qos_queue_construct(const struct smap *details, | |
4844 | uint32_t queue_id, struct qos_conf *conf) | |
4845 | { | |
4846 | int err = 0; | |
4847 | struct trtcm_policer_queue *queue; | |
4848 | struct trtcm_policer *policer = CONTAINER_OF(conf, struct trtcm_policer, | |
4849 | qos_conf); | |
4850 | ||
4851 | queue = trtcm_policer_qos_find_queue(policer, queue_id); | |
4852 | if (!queue) { | |
4853 | queue = xmalloc(sizeof *queue); | |
4854 | queue->queue_id = queue_id; | |
4855 | memset(&queue->stats, 0, sizeof queue->stats); | |
4856 | queue->stats.created = time_msec(); | |
4857 | hmap_insert(&policer->queues, &queue->hmap_node, | |
4858 | hash_2words(queue_id, 0)); | |
4859 | } | |
4860 | if (queue_id == 0 && smap_is_empty(details)) { | |
4861 | /* No default queue configured, use port values */ | |
4862 | memcpy(&queue->meter_params, &policer->meter_params, | |
4863 | sizeof queue->meter_params); | |
4864 | } else { | |
4865 | trtcm_policer_details_to_param(details, &queue->meter_params); | |
4866 | } | |
4867 | ||
4868 | err = rte_meter_trtcm_rfc4115_profile_config(&queue->meter_profile, | |
4869 | &queue->meter_params); | |
4870 | ||
4871 | if (!err) { | |
4872 | err = rte_meter_trtcm_rfc4115_config(&queue->meter, | |
4873 | &queue->meter_profile); | |
4874 | } | |
4875 | if (err) { | |
4876 | hmap_remove(&policer->queues, &queue->hmap_node); | |
4877 | free(queue); | |
4878 | err = -err; | |
4879 | } | |
4880 | return err; | |
4881 | } | |
4882 | ||
4883 | static void | |
4884 | trtcm_policer_qos_queue_destruct(struct qos_conf *conf, uint32_t queue_id) | |
4885 | { | |
4886 | struct trtcm_policer_queue *queue; | |
4887 | struct trtcm_policer *policer = CONTAINER_OF(conf, struct trtcm_policer, | |
4888 | qos_conf); | |
4889 | ||
4890 | queue = trtcm_policer_qos_find_queue(policer, queue_id); | |
4891 | if (queue) { | |
4892 | hmap_remove(&policer->queues, &queue->hmap_node); | |
4893 | free(queue); | |
4894 | } | |
4895 | } | |
4896 | ||
4897 | static int | |
4898 | trtcm_policer_qos_queue_get(struct smap *details, uint32_t queue_id, | |
4899 | const struct qos_conf *conf) | |
4900 | { | |
4901 | struct trtcm_policer_queue *queue; | |
4902 | struct trtcm_policer *policer = CONTAINER_OF(conf, struct trtcm_policer, | |
4903 | qos_conf); | |
4904 | ||
4905 | queue = trtcm_policer_qos_find_queue(policer, queue_id); | |
4906 | if (!queue) { | |
4907 | return EINVAL; | |
4908 | } | |
4909 | ||
4910 | trtcm_policer_param_to_detail(&queue->meter_params, details); | |
4911 | return 0; | |
4912 | } | |
4913 | ||
4914 | static int | |
4915 | trtcm_policer_qos_queue_get_stats(const struct qos_conf *conf, | |
4916 | uint32_t queue_id, | |
4917 | struct netdev_queue_stats *stats) | |
4918 | { | |
4919 | struct trtcm_policer_queue *queue; | |
4920 | struct trtcm_policer *policer = CONTAINER_OF(conf, struct trtcm_policer, | |
4921 | qos_conf); | |
4922 | ||
4923 | queue = trtcm_policer_qos_find_queue(policer, queue_id); | |
4924 | if (!queue) { | |
4925 | return EINVAL; | |
4926 | } | |
4927 | memcpy(stats, &queue->stats, sizeof *stats); | |
4928 | return 0; | |
4929 | } | |
4930 | ||
4931 | static int | |
4932 | trtcm_policer_qos_queue_dump_state_init(const struct qos_conf *conf, | |
4933 | struct netdev_dpdk_queue_state *state) | |
4934 | { | |
4935 | uint32_t i = 0; | |
4936 | struct trtcm_policer_queue *queue; | |
4937 | struct trtcm_policer *policer = CONTAINER_OF(conf, struct trtcm_policer, | |
4938 | qos_conf); | |
4939 | ||
4940 | state->n_queues = hmap_count(&policer->queues); | |
4941 | state->cur_queue = 0; | |
4942 | state->queues = xmalloc(state->n_queues * sizeof *state->queues); | |
4943 | ||
4944 | HMAP_FOR_EACH (queue, hmap_node, &policer->queues) { | |
4945 | state->queues[i++] = queue->queue_id; | |
4946 | } | |
4947 | return 0; | |
4948 | } | |
4949 | ||
4950 | static const struct dpdk_qos_ops trtcm_policer_ops = { | |
4951 | .qos_name = "trtcm-policer", | |
4952 | .qos_construct = trtcm_policer_qos_construct, | |
4953 | .qos_destruct = trtcm_policer_qos_destruct, | |
4954 | .qos_get = trtcm_policer_qos_get, | |
4955 | .qos_is_equal = trtcm_policer_qos_is_equal, | |
4956 | .qos_run = trtcm_policer_run, | |
4957 | .qos_queue_construct = trtcm_policer_qos_queue_construct, | |
4958 | .qos_queue_destruct = trtcm_policer_qos_queue_destruct, | |
4959 | .qos_queue_get = trtcm_policer_qos_queue_get, | |
4960 | .qos_queue_get_stats = trtcm_policer_qos_queue_get_stats, | |
4961 | .qos_queue_dump_state_init = trtcm_policer_qos_queue_dump_state_init | |
4962 | }; | |
4963 | ||
050c60bf DDP |
4964 | static int |
4965 | netdev_dpdk_reconfigure(struct netdev *netdev) | |
4966 | { | |
4967 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
4968 | int err = 0; | |
4969 | ||
050c60bf DDP |
4970 | ovs_mutex_lock(&dev->mutex); |
4971 | ||
4972 | if (netdev->n_txq == dev->requested_n_txq | |
0072e931 | 4973 | && netdev->n_rxq == dev->requested_n_rxq |
b685696b | 4974 | && dev->mtu == dev->requested_mtu |
f8b64a61 | 4975 | && dev->lsc_interrupt_mode == dev->requested_lsc_interrupt_mode |
b685696b | 4976 | && dev->rxq_size == dev->requested_rxq_size |
bd4e172b | 4977 | && dev->txq_size == dev->requested_txq_size |
f4336f50 | 4978 | && eth_addr_equals(dev->hwaddr, dev->requested_hwaddr) |
606f6650 | 4979 | && dev->socket_id == dev->requested_socket_id |
988fd463 | 4980 | && dev->started && !dev->reset_needed) { |
050c60bf DDP |
4981 | /* Reconfiguration is unnecessary */ |
4982 | ||
4983 | goto out; | |
4984 | } | |
4985 | ||
988fd463 EC |
4986 | if (dev->reset_needed) { |
4987 | rte_eth_dev_reset(dev->port_id); | |
4988 | if_notifier_manual_report(); | |
4989 | dev->reset_needed = false; | |
4990 | } else { | |
4991 | rte_eth_dev_stop(dev->port_id); | |
4992 | } | |
4993 | ||
606f6650 | 4994 | dev->started = false; |
050c60bf | 4995 | |
d555d9bd | 4996 | err = netdev_dpdk_mempool_configure(dev); |
b6b26021 | 4997 | if (err && err != EEXIST) { |
d555d9bd | 4998 | goto out; |
0072e931 MK |
4999 | } |
5000 | ||
f8b64a61 RM |
5001 | dev->lsc_interrupt_mode = dev->requested_lsc_interrupt_mode; |
5002 | ||
050c60bf DDP |
5003 | netdev->n_txq = dev->requested_n_txq; |
5004 | netdev->n_rxq = dev->requested_n_rxq; | |
5005 | ||
b685696b CL |
5006 | dev->rxq_size = dev->requested_rxq_size; |
5007 | dev->txq_size = dev->requested_txq_size; | |
5008 | ||
050c60bf | 5009 | rte_free(dev->tx_q); |
f4336f50 GR |
5010 | |
5011 | if (!eth_addr_equals(dev->hwaddr, dev->requested_hwaddr)) { | |
5012 | err = netdev_dpdk_set_etheraddr__(dev, dev->requested_hwaddr); | |
5013 | if (err) { | |
5014 | goto out; | |
5015 | } | |
5016 | } | |
5017 | ||
050c60bf | 5018 | err = dpdk_eth_dev_init(dev); |
29cf9c1b FL |
5019 | if (dev->hw_ol_features & NETDEV_TX_TSO_OFFLOAD) { |
5020 | netdev->ol_flags |= NETDEV_TX_OFFLOAD_TCP_TSO; | |
5021 | netdev->ol_flags |= NETDEV_TX_OFFLOAD_TCP_CKSUM; | |
8c5163fe | 5022 | netdev->ol_flags |= NETDEV_TX_OFFLOAD_UDP_CKSUM; |
29cf9c1b | 5023 | netdev->ol_flags |= NETDEV_TX_OFFLOAD_IPV4_CKSUM; |
35b5586b FL |
5024 | if (dev->hw_ol_features & NETDEV_TX_SCTP_CHECKSUM_OFFLOAD) { |
5025 | netdev->ol_flags |= NETDEV_TX_OFFLOAD_SCTP_CKSUM; | |
5026 | } | |
29cf9c1b FL |
5027 | } |
5028 | ||
f4336f50 GR |
5029 | /* If both requested and actual hwaddr were previously |
5030 | * unset (initialized to 0), then first device init above | |
5031 | * will have set actual hwaddr to something new. | |
5032 | * This would trigger spurious MAC reconfiguration unless | |
5033 | * the requested MAC is kept in sync. | |
5034 | * | |
5035 | * This is harmless in case requested_hwaddr was | |
5036 | * configured by the user, as netdev_dpdk_set_etheraddr__() | |
5037 | * will have succeeded to get to this point. | |
5038 | */ | |
5039 | dev->requested_hwaddr = dev->hwaddr; | |
5040 | ||
eff23640 DDP |
5041 | dev->tx_q = netdev_dpdk_alloc_txq(netdev->n_txq); |
5042 | if (!dev->tx_q) { | |
5043 | err = ENOMEM; | |
5044 | } | |
050c60bf | 5045 | |
0072e931 MK |
5046 | netdev_change_seq_changed(netdev); |
5047 | ||
050c60bf | 5048 | out: |
050c60bf | 5049 | ovs_mutex_unlock(&dev->mutex); |
050c60bf DDP |
5050 | return err; |
5051 | } | |
5052 | ||
7f381c2e | 5053 | static int |
2d24d165 | 5054 | dpdk_vhost_reconfigure_helper(struct netdev_dpdk *dev) |
2d24d165 | 5055 | OVS_REQUIRES(dev->mutex) |
050c60bf | 5056 | { |
2d24d165 CL |
5057 | dev->up.n_txq = dev->requested_n_txq; |
5058 | dev->up.n_rxq = dev->requested_n_rxq; | |
96e9b168 | 5059 | int err; |
050c60bf | 5060 | |
35c91567 DM |
5061 | /* Always keep RX queue 0 enabled for implementations that won't |
5062 | * report vring states. */ | |
5063 | dev->vhost_rxq_enabled[0] = true; | |
5064 | ||
81acebda IM |
5065 | /* Enable TX queue 0 by default if it wasn't disabled. */ |
5066 | if (dev->tx_q[0].map == OVS_VHOST_QUEUE_MAP_UNKNOWN) { | |
5067 | dev->tx_q[0].map = 0; | |
5068 | } | |
5069 | ||
29cf9c1b FL |
5070 | if (userspace_tso_enabled()) { |
5071 | dev->hw_ol_features |= NETDEV_TX_TSO_OFFLOAD; | |
5072 | VLOG_DBG("%s: TSO enabled on vhost port", netdev_get_name(&dev->up)); | |
5073 | } | |
5074 | ||
81acebda IM |
5075 | netdev_dpdk_remap_txqs(dev); |
5076 | ||
d555d9bd | 5077 | err = netdev_dpdk_mempool_configure(dev); |
b6b26021 | 5078 | if (!err) { |
43307ad0 | 5079 | /* A new mempool was created or re-used. */ |
d555d9bd | 5080 | netdev_change_seq_changed(&dev->up); |
03f3f9c0 | 5081 | } else if (err != EEXIST) { |
b6b26021 | 5082 | return err; |
db8f13b0 | 5083 | } |
0a0f39df | 5084 | if (netdev_dpdk_get_vid(dev) >= 0) { |
894af647 | 5085 | if (dev->vhost_reconfigured == false) { |
5086 | dev->vhost_reconfigured = true; | |
5087 | /* Carrier status may need updating. */ | |
5088 | netdev_change_seq_changed(&dev->up); | |
5089 | } | |
81acebda | 5090 | } |
7f381c2e DDP |
5091 | |
5092 | return 0; | |
2d24d165 CL |
5093 | } |
5094 | ||
5095 | static int | |
5096 | netdev_dpdk_vhost_reconfigure(struct netdev *netdev) | |
5097 | { | |
5098 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
7f381c2e | 5099 | int err; |
2d24d165 | 5100 | |
2d24d165 | 5101 | ovs_mutex_lock(&dev->mutex); |
7f381c2e | 5102 | err = dpdk_vhost_reconfigure_helper(dev); |
2d24d165 | 5103 | ovs_mutex_unlock(&dev->mutex); |
7f381c2e DDP |
5104 | |
5105 | return err; | |
2d24d165 CL |
5106 | } |
5107 | ||
5108 | static int | |
5109 | netdev_dpdk_vhost_client_reconfigure(struct netdev *netdev) | |
5110 | { | |
5111 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
7f381c2e | 5112 | int err; |
a14d1cc8 | 5113 | uint64_t vhost_flags = 0; |
514950d3 | 5114 | uint64_t vhost_unsup_flags; |
2d24d165 | 5115 | |
2d24d165 CL |
5116 | ovs_mutex_lock(&dev->mutex); |
5117 | ||
c1ff66ac CL |
5118 | /* Configure vHost client mode if requested and if the following criteria |
5119 | * are met: | |
2d24d165 CL |
5120 | * 1. Device hasn't been registered yet. |
5121 | * 2. A path has been specified. | |
c1ff66ac | 5122 | */ |
bb9d2623 | 5123 | if (!(dev->vhost_driver_flags & RTE_VHOST_USER_CLIENT) && dev->vhost_id) { |
a14d1cc8 MK |
5124 | /* Register client-mode device. */ |
5125 | vhost_flags |= RTE_VHOST_USER_CLIENT; | |
5126 | ||
e666e8e0 FL |
5127 | /* There is no support for multi-segments buffers. */ |
5128 | vhost_flags |= RTE_VHOST_USER_LINEARBUF_SUPPORT; | |
5129 | ||
a14d1cc8 MK |
5130 | /* Enable IOMMU support, if explicitly requested. */ |
5131 | if (dpdk_vhost_iommu_enabled()) { | |
5132 | vhost_flags |= RTE_VHOST_USER_IOMMU_SUPPORT; | |
5133 | } | |
10087cba | 5134 | |
30e834dc LB |
5135 | /* Enable POSTCOPY support, if explicitly requested. */ |
5136 | if (dpdk_vhost_postcopy_enabled()) { | |
5137 | vhost_flags |= RTE_VHOST_USER_POSTCOPY_SUPPORT; | |
5138 | } | |
5139 | ||
29cf9c1b FL |
5140 | /* Enable External Buffers if TCP Segmentation Offload is enabled. */ |
5141 | if (userspace_tso_enabled()) { | |
5142 | vhost_flags |= RTE_VHOST_USER_EXTBUF_SUPPORT; | |
5143 | } | |
5144 | ||
a14d1cc8 | 5145 | err = rte_vhost_driver_register(dev->vhost_id, vhost_flags); |
c1ff66ac | 5146 | if (err) { |
2d24d165 CL |
5147 | VLOG_ERR("vhost-user device setup failure for device %s\n", |
5148 | dev->vhost_id); | |
7f381c2e | 5149 | goto unlock; |
c1ff66ac | 5150 | } else { |
2d24d165 | 5151 | /* Configuration successful */ |
a14d1cc8 | 5152 | dev->vhost_driver_flags |= vhost_flags; |
2d24d165 CL |
5153 | VLOG_INFO("vHost User device '%s' created in 'client' mode, " |
5154 | "using client socket '%s'", | |
5155 | dev->up.name, dev->vhost_id); | |
c1ff66ac | 5156 | } |
f3e7ec25 MW |
5157 | |
5158 | err = rte_vhost_driver_callback_register(dev->vhost_id, | |
5159 | &virtio_net_device_ops); | |
5160 | if (err) { | |
5161 | VLOG_ERR("rte_vhost_driver_callback_register failed for " | |
5162 | "vhost user client port: %s\n", dev->up.name); | |
5163 | goto unlock; | |
5164 | } | |
5165 | ||
29cf9c1b FL |
5166 | if (userspace_tso_enabled()) { |
5167 | netdev->ol_flags |= NETDEV_TX_OFFLOAD_TCP_TSO; | |
5168 | netdev->ol_flags |= NETDEV_TX_OFFLOAD_TCP_CKSUM; | |
8c5163fe | 5169 | netdev->ol_flags |= NETDEV_TX_OFFLOAD_UDP_CKSUM; |
35b5586b | 5170 | netdev->ol_flags |= NETDEV_TX_OFFLOAD_SCTP_CKSUM; |
29cf9c1b | 5171 | netdev->ol_flags |= NETDEV_TX_OFFLOAD_IPV4_CKSUM; |
514950d3 FL |
5172 | vhost_unsup_flags = 1ULL << VIRTIO_NET_F_HOST_ECN |
5173 | | 1ULL << VIRTIO_NET_F_HOST_UFO; | |
29cf9c1b | 5174 | } else { |
514950d3 FL |
5175 | /* This disables checksum offloading and all the features |
5176 | * that depends on it (TSO, UFO, ECN) according to virtio | |
5177 | * specification. */ | |
5178 | vhost_unsup_flags = 1ULL << VIRTIO_NET_F_CSUM; | |
5179 | } | |
5180 | ||
5181 | err = rte_vhost_driver_disable_features(dev->vhost_id, | |
5182 | vhost_unsup_flags); | |
5183 | if (err) { | |
5184 | VLOG_ERR("rte_vhost_driver_disable_features failed for " | |
5185 | "vhost user client port: %s\n", dev->up.name); | |
5186 | goto unlock; | |
f3e7ec25 MW |
5187 | } |
5188 | ||
5189 | err = rte_vhost_driver_start(dev->vhost_id); | |
5190 | if (err) { | |
5191 | VLOG_ERR("rte_vhost_driver_start failed for vhost user " | |
5192 | "client port: %s\n", dev->up.name); | |
5193 | goto unlock; | |
5194 | } | |
c1ff66ac CL |
5195 | } |
5196 | ||
7f381c2e DDP |
5197 | err = dpdk_vhost_reconfigure_helper(dev); |
5198 | ||
5199 | unlock: | |
050c60bf | 5200 | ovs_mutex_unlock(&dev->mutex); |
050c60bf | 5201 | |
7f381c2e | 5202 | return err; |
050c60bf DDP |
5203 | } |
5204 | ||
2f7f9284 EB |
5205 | int |
5206 | netdev_dpdk_get_port_id(struct netdev *netdev) | |
5207 | { | |
5208 | struct netdev_dpdk *dev; | |
5209 | int ret = -1; | |
5210 | ||
5211 | if (!is_dpdk_class(netdev->netdev_class)) { | |
5212 | goto out; | |
5213 | } | |
5214 | ||
5215 | dev = netdev_dpdk_cast(netdev); | |
5216 | ovs_mutex_lock(&dev->mutex); | |
5217 | ret = dev->port_id; | |
5218 | ovs_mutex_unlock(&dev->mutex); | |
5219 | out: | |
5220 | return ret; | |
5221 | } | |
5222 | ||
5fc5c50f IM |
5223 | bool |
5224 | netdev_dpdk_flow_api_supported(struct netdev *netdev) | |
5225 | { | |
5226 | struct netdev_dpdk *dev; | |
5227 | bool ret = false; | |
5228 | ||
5229 | if (!is_dpdk_class(netdev->netdev_class)) { | |
5230 | goto out; | |
5231 | } | |
5232 | ||
5233 | dev = netdev_dpdk_cast(netdev); | |
5234 | ovs_mutex_lock(&dev->mutex); | |
5235 | if (dev->type == DPDK_DEV_ETH) { | |
5236 | /* TODO: Check if we able to offload some minimal flow. */ | |
5237 | ret = true; | |
5238 | } | |
5239 | ovs_mutex_unlock(&dev->mutex); | |
5240 | out: | |
5241 | return ret; | |
5242 | } | |
5243 | ||
6775bdfc RBY |
5244 | int |
5245 | netdev_dpdk_rte_flow_destroy(struct netdev *netdev, | |
5246 | struct rte_flow *rte_flow, | |
5247 | struct rte_flow_error *error) | |
5248 | { | |
5249 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
5250 | int ret; | |
5251 | ||
5252 | ovs_mutex_lock(&dev->mutex); | |
5253 | ret = rte_flow_destroy(dev->port_id, rte_flow, error); | |
5254 | ovs_mutex_unlock(&dev->mutex); | |
5255 | return ret; | |
5256 | } | |
5257 | ||
5258 | struct rte_flow * | |
5259 | netdev_dpdk_rte_flow_create(struct netdev *netdev, | |
5260 | const struct rte_flow_attr *attr, | |
5261 | const struct rte_flow_item *items, | |
5262 | const struct rte_flow_action *actions, | |
5263 | struct rte_flow_error *error) | |
5264 | { | |
5265 | struct rte_flow *flow; | |
5266 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
5267 | ||
5268 | ovs_mutex_lock(&dev->mutex); | |
5269 | flow = rte_flow_create(dev->port_id, attr, items, actions, error); | |
5270 | ovs_mutex_unlock(&dev->mutex); | |
5271 | return flow; | |
5272 | } | |
e8a2b5bf | 5273 | |
63556d85 EB |
5274 | int |
5275 | netdev_dpdk_rte_flow_query_count(struct netdev *netdev, | |
5276 | struct rte_flow *rte_flow, | |
5277 | struct rte_flow_query_count *query, | |
5278 | struct rte_flow_error *error) | |
5279 | { | |
5280 | struct rte_flow_action_count count = { .shared = 0, .id = 0 }; | |
5281 | const struct rte_flow_action actions[] = { | |
5282 | { | |
5283 | .type = RTE_FLOW_ACTION_TYPE_COUNT, | |
5284 | .conf = &count, | |
5285 | }, | |
5286 | { | |
5287 | .type = RTE_FLOW_ACTION_TYPE_END, | |
5288 | }, | |
5289 | }; | |
5290 | struct netdev_dpdk *dev; | |
5291 | int ret; | |
5292 | ||
5293 | if (!is_dpdk_class(netdev->netdev_class)) { | |
5294 | return -1; | |
5295 | } | |
5296 | ||
5297 | dev = netdev_dpdk_cast(netdev); | |
5298 | ovs_mutex_lock(&dev->mutex); | |
5299 | ret = rte_flow_query(dev->port_id, rte_flow, actions, query, error); | |
5300 | ovs_mutex_unlock(&dev->mutex); | |
5301 | return ret; | |
5302 | } | |
5303 | ||
89c09c1c BP |
5304 | #define NETDEV_DPDK_CLASS_COMMON \ |
5305 | .is_pmd = true, \ | |
5306 | .alloc = netdev_dpdk_alloc, \ | |
5307 | .dealloc = netdev_dpdk_dealloc, \ | |
5308 | .get_config = netdev_dpdk_get_config, \ | |
5309 | .get_numa_id = netdev_dpdk_get_numa_id, \ | |
5310 | .set_etheraddr = netdev_dpdk_set_etheraddr, \ | |
5311 | .get_etheraddr = netdev_dpdk_get_etheraddr, \ | |
5312 | .get_mtu = netdev_dpdk_get_mtu, \ | |
5313 | .set_mtu = netdev_dpdk_set_mtu, \ | |
5314 | .get_ifindex = netdev_dpdk_get_ifindex, \ | |
5315 | .get_carrier_resets = netdev_dpdk_get_carrier_resets, \ | |
5316 | .set_miimon_interval = netdev_dpdk_set_miimon, \ | |
5317 | .set_policing = netdev_dpdk_set_policing, \ | |
5318 | .get_qos_types = netdev_dpdk_get_qos_types, \ | |
5319 | .get_qos = netdev_dpdk_get_qos, \ | |
5320 | .set_qos = netdev_dpdk_set_qos, \ | |
23c01b19 EC |
5321 | .get_queue = netdev_dpdk_get_queue, \ |
5322 | .set_queue = netdev_dpdk_set_queue, \ | |
5323 | .delete_queue = netdev_dpdk_delete_queue, \ | |
5324 | .get_queue_stats = netdev_dpdk_get_queue_stats, \ | |
5325 | .queue_dump_start = netdev_dpdk_queue_dump_start, \ | |
5326 | .queue_dump_next = netdev_dpdk_queue_dump_next, \ | |
5327 | .queue_dump_done = netdev_dpdk_queue_dump_done, \ | |
89c09c1c BP |
5328 | .update_flags = netdev_dpdk_update_flags, \ |
5329 | .rxq_alloc = netdev_dpdk_rxq_alloc, \ | |
5330 | .rxq_construct = netdev_dpdk_rxq_construct, \ | |
5331 | .rxq_destruct = netdev_dpdk_rxq_destruct, \ | |
c0af6425 | 5332 | .rxq_dealloc = netdev_dpdk_rxq_dealloc |
89c09c1c BP |
5333 | |
5334 | #define NETDEV_DPDK_CLASS_BASE \ | |
5335 | NETDEV_DPDK_CLASS_COMMON, \ | |
5336 | .init = netdev_dpdk_class_init, \ | |
5337 | .destruct = netdev_dpdk_destruct, \ | |
5338 | .set_tx_multiq = netdev_dpdk_set_tx_multiq, \ | |
5339 | .get_carrier = netdev_dpdk_get_carrier, \ | |
5340 | .get_stats = netdev_dpdk_get_stats, \ | |
5341 | .get_custom_stats = netdev_dpdk_get_custom_stats, \ | |
5342 | .get_features = netdev_dpdk_get_features, \ | |
5343 | .get_status = netdev_dpdk_get_status, \ | |
5344 | .reconfigure = netdev_dpdk_reconfigure, \ | |
5fc5c50f | 5345 | .rxq_recv = netdev_dpdk_rxq_recv |
89c09c1c BP |
5346 | |
5347 | static const struct netdev_class dpdk_class = { | |
5348 | .type = "dpdk", | |
5349 | NETDEV_DPDK_CLASS_BASE, | |
5350 | .construct = netdev_dpdk_construct, | |
5351 | .set_config = netdev_dpdk_set_config, | |
5352 | .send = netdev_dpdk_eth_send, | |
5353 | }; | |
5354 | ||
89c09c1c BP |
5355 | static const struct netdev_class dpdk_vhost_class = { |
5356 | .type = "dpdkvhostuser", | |
5357 | NETDEV_DPDK_CLASS_COMMON, | |
5358 | .construct = netdev_dpdk_vhost_construct, | |
5359 | .destruct = netdev_dpdk_vhost_destruct, | |
5360 | .send = netdev_dpdk_vhost_send, | |
5361 | .get_carrier = netdev_dpdk_vhost_get_carrier, | |
5362 | .get_stats = netdev_dpdk_vhost_get_stats, | |
b99ab8aa | 5363 | .get_custom_stats = netdev_dpdk_get_sw_custom_stats, |
89c09c1c BP |
5364 | .get_status = netdev_dpdk_vhost_user_get_status, |
5365 | .reconfigure = netdev_dpdk_vhost_reconfigure, | |
35c91567 DM |
5366 | .rxq_recv = netdev_dpdk_vhost_rxq_recv, |
5367 | .rxq_enabled = netdev_dpdk_vhost_rxq_enabled, | |
89c09c1c BP |
5368 | }; |
5369 | ||
5370 | static const struct netdev_class dpdk_vhost_client_class = { | |
5371 | .type = "dpdkvhostuserclient", | |
5372 | NETDEV_DPDK_CLASS_COMMON, | |
5373 | .construct = netdev_dpdk_vhost_client_construct, | |
5374 | .destruct = netdev_dpdk_vhost_destruct, | |
5375 | .set_config = netdev_dpdk_vhost_client_set_config, | |
5376 | .send = netdev_dpdk_vhost_send, | |
5377 | .get_carrier = netdev_dpdk_vhost_get_carrier, | |
5378 | .get_stats = netdev_dpdk_vhost_get_stats, | |
b99ab8aa | 5379 | .get_custom_stats = netdev_dpdk_get_sw_custom_stats, |
89c09c1c BP |
5380 | .get_status = netdev_dpdk_vhost_user_get_status, |
5381 | .reconfigure = netdev_dpdk_vhost_client_reconfigure, | |
35c91567 DM |
5382 | .rxq_recv = netdev_dpdk_vhost_rxq_recv, |
5383 | .rxq_enabled = netdev_dpdk_vhost_rxq_enabled, | |
89c09c1c | 5384 | }; |
95fb793a | 5385 | |
8a9562d2 PS |
5386 | void |
5387 | netdev_dpdk_register(void) | |
5388 | { | |
bab69409 | 5389 | netdev_register_provider(&dpdk_class); |
53f50d24 | 5390 | netdev_register_provider(&dpdk_vhost_class); |
2d24d165 | 5391 | netdev_register_provider(&dpdk_vhost_client_class); |
8a9562d2 | 5392 | } |