]>
Commit | Line | Data |
---|---|---|
8a9562d2 | 1 | /* |
12d0d124 | 2 | * Copyright (c) 2014, 2015, 2016, 2017 Nicira, Inc. |
8a9562d2 PS |
3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | |
5 | * you may not use this file except in compliance with the License. | |
6 | * You may obtain a copy of the License at: | |
7 | * | |
8 | * http://www.apache.org/licenses/LICENSE-2.0 | |
9 | * | |
10 | * Unless required by applicable law or agreed to in writing, software | |
11 | * distributed under the License is distributed on an "AS IS" BASIS, | |
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
13 | * See the License for the specific language governing permissions and | |
14 | * limitations under the License. | |
15 | */ | |
16 | ||
17 | #include <config.h> | |
01961bbd | 18 | #include "netdev-dpdk.h" |
8a9562d2 | 19 | |
6ebc4b09 | 20 | #include <errno.h> |
8a9562d2 PS |
21 | #include <signal.h> |
22 | #include <stdlib.h> | |
6ebc4b09 | 23 | #include <string.h> |
8a9562d2 | 24 | #include <unistd.h> |
f3e7ec25 MW |
25 | #include <linux/virtio_net.h> |
26 | #include <sys/socket.h> | |
27 | #include <linux/if.h> | |
01961bbd | 28 | |
e61bdffc EC |
29 | /* Include rte_compat.h first to allow experimental API's needed for the |
30 | * rte_meter.h rfc4115 functions. Once they are no longer marked as | |
31 | * experimental the #define and rte_compat.h include can be removed. | |
32 | */ | |
33 | #define ALLOW_EXPERIMENTAL_API | |
34 | #include <rte_compat.h> | |
5e925ccc | 35 | #include <rte_bus_pci.h> |
01961bbd DDP |
36 | #include <rte_config.h> |
37 | #include <rte_cycles.h> | |
38 | #include <rte_errno.h> | |
01961bbd | 39 | #include <rte_ethdev.h> |
6ebc4b09 | 40 | #include <rte_flow.h> |
01961bbd DDP |
41 | #include <rte_malloc.h> |
42 | #include <rte_mbuf.h> | |
43 | #include <rte_meter.h> | |
fc56f5e0 | 44 | #include <rte_pci.h> |
3eb8d4fa | 45 | #include <rte_version.h> |
6ebc4b09 | 46 | #include <rte_vhost.h> |
8a9562d2 | 47 | |
e8a2b5bf | 48 | #include "cmap.h" |
9ff24b9c | 49 | #include "coverage.h" |
7d1ced01 | 50 | #include "dirs.h" |
e14deea0 | 51 | #include "dp-packet.h" |
01961bbd | 52 | #include "dpdk.h" |
8a9562d2 | 53 | #include "dpif-netdev.h" |
e5c0f5a4 | 54 | #include "fatal-signal.h" |
988fd463 | 55 | #include "if-notifier.h" |
8a9562d2 PS |
56 | #include "netdev-provider.h" |
57 | #include "netdev-vport.h" | |
58 | #include "odp-util.h" | |
eac84432 | 59 | #include "openvswitch/dynamic-string.h" |
25d436fb | 60 | #include "openvswitch/list.h" |
6ebc4b09 | 61 | #include "openvswitch/match.h" |
25d436fb | 62 | #include "openvswitch/ofp-print.h" |
6ebc4b09 | 63 | #include "openvswitch/shash.h" |
25d436fb | 64 | #include "openvswitch/vlog.h" |
94143fc4 | 65 | #include "ovs-numa.h" |
8a9562d2 | 66 | #include "ovs-rcu.h" |
6ebc4b09 | 67 | #include "ovs-thread.h" |
8a9562d2 | 68 | #include "packets.h" |
0bf765f7 | 69 | #include "smap.h" |
8a9562d2 | 70 | #include "sset.h" |
8a9562d2 | 71 | #include "timeval.h" |
6ebc4b09 | 72 | #include "unaligned.h" |
8a9562d2 | 73 | #include "unixctl.h" |
29cf9c1b | 74 | #include "userspace-tso.h" |
6ebc4b09 IM |
75 | #include "util.h" |
76 | #include "uuid.h" | |
8a9562d2 | 77 | |
f3e7ec25 MW |
78 | enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM}; |
79 | ||
05b49df6 | 80 | VLOG_DEFINE_THIS_MODULE(netdev_dpdk); |
8a9562d2 PS |
81 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20); |
82 | ||
9ff24b9c | 83 | COVERAGE_DEFINE(vhost_tx_contention); |
3d56e4ac | 84 | COVERAGE_DEFINE(vhost_notification); |
9ff24b9c | 85 | |
8a9562d2 PS |
86 | #define DPDK_PORT_WATCHDOG_INTERVAL 5 |
87 | ||
88 | #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE | |
89 | #define OVS_VPORT_DPDK "ovs_dpdk" | |
90 | ||
91 | /* | |
92 | * need to reserve tons of extra space in the mbufs so we can align the | |
93 | * DMA addresses to 4KB. | |
18f777b2 TP |
94 | * The minimum mbuf size is limited to avoid scatter behaviour and drop in |
95 | * performance for standard Ethernet MTU. | |
8a9562d2 | 96 | */ |
127b6a6e | 97 | #define ETHER_HDR_MAX_LEN (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN \ |
58be5c0e | 98 | + (2 * VLAN_HEADER_LEN)) |
127b6a6e IS |
99 | #define MTU_TO_FRAME_LEN(mtu) ((mtu) + RTE_ETHER_HDR_LEN + \ |
100 | RTE_ETHER_CRC_LEN) | |
4be4d22c | 101 | #define MTU_TO_MAX_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_MAX_LEN) |
58be5c0e | 102 | #define FRAME_LEN_TO_MTU(frame_len) ((frame_len) \ |
127b6a6e | 103 | - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN) |
4be4d22c | 104 | #define NETDEV_DPDK_MBUF_ALIGN 1024 |
0072e931 | 105 | #define NETDEV_DPDK_MAX_PKT_LEN 9728 |
8a9562d2 | 106 | |
43307ad0 IS |
107 | /* Max and min number of packets in the mempool. OVS tries to allocate a |
108 | * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have | |
109 | * enough hugepages) we keep halving the number until the allocation succeeds | |
110 | * or we reach MIN_NB_MBUF */ | |
111 | ||
112 | #define MAX_NB_MBUF (4096 * 64) | |
da79ce2b DDP |
113 | #define MIN_NB_MBUF (4096 * 4) |
114 | #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE | |
115 | ||
43307ad0 IS |
116 | /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */ |
117 | BUILD_ASSERT_DECL(MAX_NB_MBUF % ROUND_DOWN_POW2(MAX_NB_MBUF / MIN_NB_MBUF) | |
118 | == 0); | |
119 | ||
120 | /* The smallest possible NB_MBUF that we're going to try should be a multiple | |
121 | * of MP_CACHE_SZ. This is advised by DPDK documentation. */ | |
122 | BUILD_ASSERT_DECL((MAX_NB_MBUF / ROUND_DOWN_POW2(MAX_NB_MBUF / MIN_NB_MBUF)) | |
123 | % MP_CACHE_SZ == 0); | |
124 | ||
8a9562d2 PS |
125 | #define SOCKET0 0 |
126 | ||
b685696b CL |
127 | /* Default size of Physical NIC RXQ */ |
128 | #define NIC_PORT_DEFAULT_RXQ_SIZE 2048 | |
129 | /* Default size of Physical NIC TXQ */ | |
130 | #define NIC_PORT_DEFAULT_TXQ_SIZE 2048 | |
131 | /* Maximum size of Physical NIC Queues */ | |
132 | #define NIC_PORT_MAX_Q_SIZE 4096 | |
79f5354c | 133 | |
585a5bea | 134 | #define OVS_VHOST_MAX_QUEUE_NUM 1024 /* Maximum number of vHost TX queues. */ |
f3ea2ad2 IM |
135 | #define OVS_VHOST_QUEUE_MAP_UNKNOWN (-1) /* Mapping not initialized. */ |
136 | #define OVS_VHOST_QUEUE_DISABLED (-2) /* Queue was disabled by guest and not | |
137 | * yet mapped to another queue. */ | |
585a5bea | 138 | |
bb37956a IM |
139 | #define DPDK_ETH_PORT_ID_INVALID RTE_MAX_ETHPORTS |
140 | ||
5e925ccc MK |
141 | /* DPDK library uses uint16_t for port_id. */ |
142 | typedef uint16_t dpdk_port_t; | |
fa9f4eeb | 143 | #define DPDK_PORT_ID_FMT "%"PRIu16 |
bb37956a | 144 | |
080f080c KT |
145 | /* Minimum amount of vhost tx retries, effectively a disable. */ |
146 | #define VHOST_ENQ_RETRY_MIN 0 | |
147 | /* Maximum amount of vhost tx retries. */ | |
148 | #define VHOST_ENQ_RETRY_MAX 32 | |
149 | /* Legacy default value for vhost tx retries. */ | |
150 | #define VHOST_ENQ_RETRY_DEF 8 | |
151 | ||
0a0f39df | 152 | #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ) |
95e9881f | 153 | |
35b5586b FL |
154 | /* List of required flags advertised by the hardware that will be used |
155 | * if TSO is enabled. Ideally this should include DEV_TX_OFFLOAD_SCTP_CKSUM. | |
156 | * However, very few drivers supports that the moment and SCTP is not a | |
157 | * widely used protocol as TCP and UDP, so it's optional. */ | |
8c5163fe FL |
158 | #define DPDK_TX_TSO_OFFLOAD_FLAGS (DEV_TX_OFFLOAD_TCP_TSO \ |
159 | | DEV_TX_OFFLOAD_TCP_CKSUM \ | |
160 | | DEV_TX_OFFLOAD_UDP_CKSUM \ | |
161 | | DEV_TX_OFFLOAD_IPV4_CKSUM) | |
162 | ||
163 | ||
8a9562d2 | 164 | static const struct rte_eth_conf port_conf = { |
a28ddd11 | 165 | .rxmode = { |
a28ddd11 | 166 | .split_hdr_size = 0, |
03f3f9c0 | 167 | .offloads = 0, |
a28ddd11 DDP |
168 | }, |
169 | .rx_adv_conf = { | |
170 | .rss_conf = { | |
171 | .rss_key = NULL, | |
543342a4 | 172 | .rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP, |
8a9562d2 | 173 | }, |
a28ddd11 DDP |
174 | }, |
175 | .txmode = { | |
176 | .mq_mode = ETH_MQ_TX_NONE, | |
177 | }, | |
8a9562d2 PS |
178 | }; |
179 | ||
f3e7ec25 MW |
180 | /* |
181 | * These callbacks allow virtio-net devices to be added to vhost ports when | |
182 | * configuration has been fully completed. | |
183 | */ | |
184 | static int new_device(int vid); | |
185 | static void destroy_device(int vid); | |
186 | static int vring_state_changed(int vid, uint16_t queue_id, int enable); | |
61473a0e | 187 | static void destroy_connection(int vid); |
3d56e4ac EC |
188 | static void vhost_guest_notified(int vid); |
189 | ||
f3e7ec25 MW |
190 | static const struct vhost_device_ops virtio_net_device_ops = |
191 | { | |
192 | .new_device = new_device, | |
193 | .destroy_device = destroy_device, | |
194 | .vring_state_changed = vring_state_changed, | |
61473a0e DM |
195 | .features_changed = NULL, |
196 | .new_connection = NULL, | |
197 | .destroy_connection = destroy_connection, | |
3d56e4ac | 198 | .guest_notified = vhost_guest_notified, |
f3e7ec25 MW |
199 | }; |
200 | ||
2f862c71 SV |
201 | /* Custom software stats for dpdk ports */ |
202 | struct netdev_dpdk_sw_stats { | |
203 | /* No. of retries when unable to transmit. */ | |
204 | uint64_t tx_retries; | |
205 | /* Packet drops when unable to transmit; Probably Tx queue is full. */ | |
206 | uint64_t tx_failure_drops; | |
207 | /* Packet length greater than device MTU. */ | |
208 | uint64_t tx_mtu_exceeded_drops; | |
209 | /* Packet drops in egress policer processing. */ | |
210 | uint64_t tx_qos_drops; | |
211 | /* Packet drops in ingress policer processing. */ | |
212 | uint64_t rx_qos_drops; | |
29cf9c1b FL |
213 | /* Packet drops in HWOL processing. */ |
214 | uint64_t tx_invalid_hwol_drops; | |
2f862c71 SV |
215 | }; |
216 | ||
58397e6c KT |
217 | enum dpdk_dev_type { |
218 | DPDK_DEV_ETH = 0, | |
7d1ced01 | 219 | DPDK_DEV_VHOST = 1, |
58397e6c KT |
220 | }; |
221 | ||
0bf765f7 IS |
222 | /* Quality of Service */ |
223 | ||
224 | /* An instance of a QoS configuration. Always associated with a particular | |
225 | * network device. | |
226 | * | |
227 | * Each QoS implementation subclasses this with whatever additional data it | |
228 | * needs. | |
229 | */ | |
230 | struct qos_conf { | |
231 | const struct dpdk_qos_ops *ops; | |
78bd47cf | 232 | rte_spinlock_t lock; |
0bf765f7 IS |
233 | }; |
234 | ||
23c01b19 EC |
235 | /* QoS queue information used by the netdev queue dump functions. */ |
236 | struct netdev_dpdk_queue_state { | |
237 | uint32_t *queues; | |
238 | size_t cur_queue; | |
239 | size_t n_queues; | |
240 | }; | |
241 | ||
0bf765f7 IS |
242 | /* A particular implementation of dpdk QoS operations. |
243 | * | |
244 | * The functions below return 0 if successful or a positive errno value on | |
245 | * failure, except where otherwise noted. All of them must be provided, except | |
246 | * where otherwise noted. | |
247 | */ | |
248 | struct dpdk_qos_ops { | |
249 | ||
250 | /* Name of the QoS type */ | |
251 | const char *qos_name; | |
252 | ||
78bd47cf DDP |
253 | /* Called to construct a qos_conf object. The implementation should make |
254 | * the appropriate calls to configure QoS according to 'details'. | |
0bf765f7 IS |
255 | * |
256 | * The contents of 'details' should be documented as valid for 'ovs_name' | |
257 | * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml | |
258 | * (which is built as ovs-vswitchd.conf.db(8)). | |
259 | * | |
78bd47cf DDP |
260 | * This function must return 0 if and only if it sets '*conf' to an |
261 | * initialized 'struct qos_conf'. | |
0bf765f7 IS |
262 | * |
263 | * For all QoS implementations it should always be non-null. | |
264 | */ | |
78bd47cf | 265 | int (*qos_construct)(const struct smap *details, struct qos_conf **conf); |
0bf765f7 IS |
266 | |
267 | /* Destroys the data structures allocated by the implementation as part of | |
78bd47cf | 268 | * 'qos_conf'. |
0bf765f7 IS |
269 | * |
270 | * For all QoS implementations it should always be non-null. | |
271 | */ | |
78bd47cf | 272 | void (*qos_destruct)(struct qos_conf *conf); |
0bf765f7 | 273 | |
78bd47cf | 274 | /* Retrieves details of 'conf' configuration into 'details'. |
0bf765f7 IS |
275 | * |
276 | * The contents of 'details' should be documented as valid for 'ovs_name' | |
277 | * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml | |
278 | * (which is built as ovs-vswitchd.conf.db(8)). | |
279 | */ | |
78bd47cf | 280 | int (*qos_get)(const struct qos_conf *conf, struct smap *details); |
0bf765f7 | 281 | |
78bd47cf | 282 | /* Returns true if 'conf' is already configured according to 'details'. |
0bf765f7 IS |
283 | * |
284 | * The contents of 'details' should be documented as valid for 'ovs_name' | |
285 | * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml | |
286 | * (which is built as ovs-vswitchd.conf.db(8)). | |
287 | * | |
78bd47cf | 288 | * For all QoS implementations it should always be non-null. |
0bf765f7 | 289 | */ |
78bd47cf DDP |
290 | bool (*qos_is_equal)(const struct qos_conf *conf, |
291 | const struct smap *details); | |
0bf765f7 IS |
292 | |
293 | /* Modify an array of rte_mbufs. The modification is specific to | |
294 | * each qos implementation. | |
295 | * | |
296 | * The function should take and array of mbufs and an int representing | |
297 | * the current number of mbufs present in the array. | |
298 | * | |
299 | * After the function has performed a qos modification to the array of | |
300 | * mbufs it returns an int representing the number of mbufs now present in | |
301 | * the array. This value is can then be passed to the port send function | |
302 | * along with the modified array for transmission. | |
303 | * | |
304 | * For all QoS implementations it should always be non-null. | |
305 | */ | |
78bd47cf | 306 | int (*qos_run)(struct qos_conf *qos_conf, struct rte_mbuf **pkts, |
7d7ded7a | 307 | int pkt_cnt, bool should_steal); |
23c01b19 EC |
308 | |
309 | /* Called to construct a QoS Queue. The implementation should make | |
310 | * the appropriate calls to configure QoS Queue according to 'details'. | |
311 | * | |
312 | * The contents of 'details' should be documented as valid for 'ovs_name' | |
313 | * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml | |
314 | * (which is built as ovs-vswitchd.conf.db(8)). | |
315 | * | |
316 | * This function must return 0 if and only if it constructs | |
317 | * QoS queue successfully. | |
318 | */ | |
319 | int (*qos_queue_construct)(const struct smap *details, | |
320 | uint32_t queue_id, struct qos_conf *conf); | |
321 | ||
322 | /* Destroys the QoS Queue. */ | |
323 | void (*qos_queue_destruct)(struct qos_conf *conf, uint32_t queue_id); | |
324 | ||
325 | /* Retrieves details of QoS Queue configuration into 'details'. | |
326 | * | |
327 | * The contents of 'details' should be documented as valid for 'ovs_name' | |
328 | * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml | |
329 | * (which is built as ovs-vswitchd.conf.db(8)). | |
330 | */ | |
331 | int (*qos_queue_get)(struct smap *details, uint32_t queue_id, | |
332 | const struct qos_conf *conf); | |
333 | ||
334 | /* Retrieves statistics of QoS Queue configuration into 'stats'. */ | |
335 | int (*qos_queue_get_stats)(const struct qos_conf *conf, uint32_t queue_id, | |
336 | struct netdev_queue_stats *stats); | |
337 | ||
338 | /* Setup the 'netdev_dpdk_queue_state' structure used by the dpdk queue | |
339 | * dump functions. | |
340 | */ | |
341 | int (*qos_queue_dump_state_init)(const struct qos_conf *conf, | |
342 | struct netdev_dpdk_queue_state *state); | |
0bf765f7 IS |
343 | }; |
344 | ||
e61bdffc | 345 | /* dpdk_qos_ops for each type of user space QoS implementation. */ |
0bf765f7 | 346 | static const struct dpdk_qos_ops egress_policer_ops; |
e61bdffc | 347 | static const struct dpdk_qos_ops trtcm_policer_ops; |
0bf765f7 IS |
348 | |
349 | /* | |
350 | * Array of dpdk_qos_ops, contains pointer to all supported QoS | |
351 | * operations. | |
352 | */ | |
353 | static const struct dpdk_qos_ops *const qos_confs[] = { | |
354 | &egress_policer_ops, | |
e61bdffc | 355 | &trtcm_policer_ops, |
0bf765f7 IS |
356 | NULL |
357 | }; | |
358 | ||
c2adb102 IM |
359 | static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER; |
360 | ||
8a9562d2 | 361 | /* Contains all 'struct dpdk_dev's. */ |
ca6ba700 | 362 | static struct ovs_list dpdk_list OVS_GUARDED_BY(dpdk_mutex) |
55951e15 | 363 | = OVS_LIST_INITIALIZER(&dpdk_list); |
8a9562d2 | 364 | |
c2adb102 IM |
365 | static struct ovs_mutex dpdk_mp_mutex OVS_ACQ_AFTER(dpdk_mutex) |
366 | = OVS_MUTEX_INITIALIZER; | |
367 | ||
91fccdad | 368 | /* Contains all 'struct dpdk_mp's. */ |
43307ad0 IS |
369 | static struct ovs_list dpdk_mp_list OVS_GUARDED_BY(dpdk_mp_mutex) |
370 | = OVS_LIST_INITIALIZER(&dpdk_mp_list); | |
91fccdad | 371 | |
91fccdad KT |
372 | struct dpdk_mp { |
373 | struct rte_mempool *mp; | |
43307ad0 IS |
374 | int mtu; |
375 | int socket_id; | |
376 | int refcount; | |
91fccdad KT |
377 | struct ovs_list list_node OVS_GUARDED_BY(dpdk_mp_mutex); |
378 | }; | |
379 | ||
5a034064 | 380 | /* There should be one 'struct dpdk_tx_queue' created for |
15ba075d | 381 | * each netdev tx queue. */ |
8a9562d2 | 382 | struct dpdk_tx_queue { |
15ba075d IM |
383 | /* Padding to make dpdk_tx_queue exactly one cache line long. */ |
384 | PADDED_MEMBERS(CACHE_LINE_SIZE, | |
385 | /* Protects the members and the NIC queue from concurrent access. | |
386 | * It is used only if the queue is shared among different pmd threads | |
387 | * (see 'concurrent_txq'). */ | |
388 | rte_spinlock_t tx_lock; | |
389 | /* Mapping of configured vhost-user queue to enabled by guest. */ | |
390 | int map; | |
391 | ); | |
8a9562d2 PS |
392 | }; |
393 | ||
9509913a IS |
394 | struct ingress_policer { |
395 | struct rte_meter_srtcm_params app_srtcm_params; | |
396 | struct rte_meter_srtcm in_policer; | |
03f3f9c0 | 397 | struct rte_meter_srtcm_profile in_prof; |
9509913a IS |
398 | rte_spinlock_t policer_lock; |
399 | }; | |
400 | ||
1a2bb118 SC |
401 | enum dpdk_hw_ol_features { |
402 | NETDEV_RX_CHECKSUM_OFFLOAD = 1 << 0, | |
e10ca8b9 | 403 | NETDEV_RX_HW_CRC_STRIP = 1 << 1, |
29cf9c1b FL |
404 | NETDEV_RX_HW_SCATTER = 1 << 2, |
405 | NETDEV_TX_TSO_OFFLOAD = 1 << 3, | |
35b5586b | 406 | NETDEV_TX_SCTP_CHECKSUM_OFFLOAD = 1 << 4, |
1a2bb118 SC |
407 | }; |
408 | ||
b2e72a9c IM |
409 | /* |
410 | * In order to avoid confusion in variables names, following naming convention | |
411 | * should be used, if possible: | |
412 | * | |
413 | * 'struct netdev' : 'netdev' | |
414 | * 'struct netdev_dpdk' : 'dev' | |
415 | * 'struct netdev_rxq' : 'rxq' | |
416 | * 'struct netdev_rxq_dpdk' : 'rx' | |
417 | * | |
418 | * Example: | |
419 | * struct netdev *netdev = netdev_from_name(name); | |
420 | * struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
421 | * | |
422 | * Also, 'netdev' should be used instead of 'dev->up', where 'netdev' was | |
423 | * already defined. | |
424 | */ | |
425 | ||
8a9562d2 | 426 | struct netdev_dpdk { |
23d4d53f BB |
427 | PADDED_MEMBERS_CACHELINE_MARKER(CACHE_LINE_SIZE, cacheline0, |
428 | dpdk_port_t port_id; | |
429 | ||
430 | /* If true, device was attached by rte_eth_dev_attach(). */ | |
431 | bool attached; | |
606f6650 EC |
432 | /* If true, rte_eth_dev_start() was successfully called */ |
433 | bool started; | |
988fd463 EC |
434 | bool reset_needed; |
435 | /* 1 pad byte here. */ | |
23d4d53f BB |
436 | struct eth_addr hwaddr; |
437 | int mtu; | |
438 | int socket_id; | |
439 | int buf_size; | |
440 | int max_packet_len; | |
441 | enum dpdk_dev_type type; | |
442 | enum netdev_flags flags; | |
eaa43581 | 443 | int link_reset_cnt; |
bb9d2623 IM |
444 | union { |
445 | /* Device arguments for dpdk ports. */ | |
446 | char *devargs; | |
447 | /* Identifier used to distinguish vhost devices from each other. */ | |
448 | char *vhost_id; | |
449 | }; | |
23d4d53f BB |
450 | struct dpdk_tx_queue *tx_q; |
451 | struct rte_eth_link link; | |
23d4d53f BB |
452 | ); |
453 | ||
454 | PADDED_MEMBERS_CACHELINE_MARKER(CACHE_LINE_SIZE, cacheline1, | |
455 | struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex); | |
43307ad0 | 456 | struct dpdk_mp *dpdk_mp; |
23d4d53f BB |
457 | |
458 | /* virtio identifier for vhost devices */ | |
459 | ovsrcu_index vid; | |
460 | ||
461 | /* True if vHost device is 'up' and has been reconfigured at least once */ | |
462 | bool vhost_reconfigured; | |
080f080c KT |
463 | |
464 | atomic_uint8_t vhost_tx_retries_max; | |
465 | /* 2 pad bytes here. */ | |
23d4d53f BB |
466 | ); |
467 | ||
23d4d53f BB |
468 | PADDED_MEMBERS(CACHE_LINE_SIZE, |
469 | struct netdev up; | |
470 | /* In dpdk_list. */ | |
471 | struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex); | |
472 | ||
473 | /* QoS configuration and lock for the device */ | |
474 | OVSRCU_TYPE(struct qos_conf *) qos_conf; | |
475 | ||
476 | /* Ingress Policer */ | |
477 | OVSRCU_TYPE(struct ingress_policer *) ingress_policer; | |
478 | uint32_t policer_rate; | |
479 | uint32_t policer_burst; | |
35c91567 DM |
480 | |
481 | /* Array of vhost rxq states, see vring_state_changed. */ | |
482 | bool *vhost_rxq_enabled; | |
23d4d53f BB |
483 | ); |
484 | ||
485 | PADDED_MEMBERS(CACHE_LINE_SIZE, | |
486 | struct netdev_stats stats; | |
2f862c71 | 487 | struct netdev_dpdk_sw_stats *sw_stats; |
23d4d53f BB |
488 | /* Protects stats */ |
489 | rte_spinlock_t stats_lock; | |
2f862c71 | 490 | /* 36 pad bytes here. */ |
23d4d53f BB |
491 | ); |
492 | ||
493 | PADDED_MEMBERS(CACHE_LINE_SIZE, | |
494 | /* The following properties cannot be changed when a device is running, | |
495 | * so we remember the request and update them next time | |
496 | * netdev_dpdk*_reconfigure() is called */ | |
497 | int requested_mtu; | |
498 | int requested_n_txq; | |
499 | int requested_n_rxq; | |
500 | int requested_rxq_size; | |
501 | int requested_txq_size; | |
502 | ||
503 | /* Number of rx/tx descriptors for physical devices */ | |
504 | int rxq_size; | |
505 | int txq_size; | |
506 | ||
507 | /* Socket ID detected when vHost device is brought up */ | |
508 | int requested_socket_id; | |
509 | ||
510 | /* Denotes whether vHost port is client/server mode */ | |
511 | uint64_t vhost_driver_flags; | |
512 | ||
513 | /* DPDK-ETH Flow control */ | |
514 | struct rte_eth_fc_conf fc_conf; | |
515 | ||
516 | /* DPDK-ETH hardware offload features, | |
517 | * from the enum set 'dpdk_hw_ol_features' */ | |
518 | uint32_t hw_ol_features; | |
f8b64a61 RM |
519 | |
520 | /* Properties for link state change detection mode. | |
521 | * If lsc_interrupt_mode is set to false, poll mode is used, | |
522 | * otherwise interrupt mode is used. */ | |
523 | bool requested_lsc_interrupt_mode; | |
524 | bool lsc_interrupt_mode; | |
23d4d53f | 525 | ); |
971f4b39 MW |
526 | |
527 | PADDED_MEMBERS(CACHE_LINE_SIZE, | |
528 | /* Names of all XSTATS counters */ | |
529 | struct rte_eth_xstat_name *rte_xstats_names; | |
530 | int rte_xstats_names_size; | |
531 | int rte_xstats_ids_size; | |
532 | uint64_t *rte_xstats_ids; | |
533 | ); | |
8a9562d2 PS |
534 | }; |
535 | ||
536 | struct netdev_rxq_dpdk { | |
537 | struct netdev_rxq up; | |
bb37956a | 538 | dpdk_port_t port_id; |
8a9562d2 PS |
539 | }; |
540 | ||
f3e7ec25 MW |
541 | static void netdev_dpdk_destruct(struct netdev *netdev); |
542 | static void netdev_dpdk_vhost_destruct(struct netdev *netdev); | |
8a9562d2 | 543 | |
b99ab8aa IM |
544 | static int netdev_dpdk_get_sw_custom_stats(const struct netdev *, |
545 | struct netdev_custom_stats *); | |
ac1a9bb9 IM |
546 | static void netdev_dpdk_clear_xstats(struct netdev_dpdk *dev); |
547 | ||
0a0f39df | 548 | int netdev_dpdk_get_vid(const struct netdev_dpdk *dev); |
58397e6c | 549 | |
9509913a IS |
550 | struct ingress_policer * |
551 | netdev_dpdk_get_ingress_policer(const struct netdev_dpdk *dev); | |
552 | ||
8a9562d2 PS |
553 | static bool |
554 | is_dpdk_class(const struct netdev_class *class) | |
555 | { | |
f3e7ec25 MW |
556 | return class->destruct == netdev_dpdk_destruct |
557 | || class->destruct == netdev_dpdk_vhost_destruct; | |
8a9562d2 PS |
558 | } |
559 | ||
4be4d22c MK |
560 | /* DPDK NIC drivers allocate RX buffers at a particular granularity, typically |
561 | * aligned at 1k or less. If a declared mbuf size is not a multiple of this | |
562 | * value, insufficient buffers are allocated to accomodate the packet in its | |
563 | * entirety. Furthermore, certain drivers need to ensure that there is also | |
564 | * sufficient space in the Rx buffer to accommodate two VLAN tags (for QinQ | |
565 | * frames). If the RX buffer is too small, then the driver enables scatter RX | |
58be5c0e MK |
566 | * behaviour, which reduces performance. To prevent this, use a buffer size |
567 | * that is closest to 'mtu', but which satisfies the aforementioned criteria. | |
4be4d22c MK |
568 | */ |
569 | static uint32_t | |
570 | dpdk_buf_size(int mtu) | |
571 | { | |
a32bab26 TL |
572 | return ROUND_UP(MTU_TO_MAX_FRAME_LEN(mtu), NETDEV_DPDK_MBUF_ALIGN) |
573 | + RTE_PKTMBUF_HEADROOM; | |
4be4d22c MK |
574 | } |
575 | ||
eff23640 DDP |
576 | /* Allocates an area of 'sz' bytes from DPDK. The memory is zero'ed. |
577 | * | |
578 | * Unlike xmalloc(), this function can return NULL on failure. */ | |
8a9562d2 PS |
579 | static void * |
580 | dpdk_rte_mzalloc(size_t sz) | |
581 | { | |
eff23640 | 582 | return rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE); |
8a9562d2 PS |
583 | } |
584 | ||
585 | void | |
e14deea0 | 586 | free_dpdk_buf(struct dp_packet *p) |
8a9562d2 | 587 | { |
db73f716 | 588 | struct rte_mbuf *pkt = (struct rte_mbuf *) p; |
8a9562d2 | 589 | |
b00b4a81 | 590 | rte_pktmbuf_free(pkt); |
8a9562d2 PS |
591 | } |
592 | ||
b3cd9f9d | 593 | static void |
401b70d6 | 594 | ovs_rte_pktmbuf_init(struct rte_mempool *mp OVS_UNUSED, |
b3cd9f9d | 595 | void *opaque_arg OVS_UNUSED, |
2391135c | 596 | void *_p, |
b3cd9f9d PS |
597 | unsigned i OVS_UNUSED) |
598 | { | |
2391135c | 599 | struct rte_mbuf *pkt = _p; |
b3cd9f9d | 600 | |
3aaa6201 | 601 | dp_packet_init_dpdk((struct dp_packet *) pkt); |
b3cd9f9d PS |
602 | } |
603 | ||
91fccdad KT |
604 | static int |
605 | dpdk_mp_full(const struct rte_mempool *mp) OVS_REQUIRES(dpdk_mp_mutex) | |
606 | { | |
1f84a2d5 KT |
607 | /* At this point we want to know if all the mbufs are back |
608 | * in the mempool. rte_mempool_full() is not atomic but it's | |
609 | * the best available and as we are no longer requesting mbufs | |
610 | * from the mempool, it means mbufs will not move from | |
611 | * 'mempool ring' --> 'mempool cache'. In rte_mempool_full() | |
612 | * the ring is counted before caches, so we won't get false | |
613 | * positives in this use case and we handle false negatives. | |
614 | * | |
615 | * If future implementations of rte_mempool_full() were to change | |
616 | * it could be possible for a false positive. Even that would | |
617 | * likely be ok, as there are additional checks during mempool | |
618 | * freeing but it would make things racey. | |
91fccdad | 619 | */ |
1f84a2d5 | 620 | return rte_mempool_full(mp); |
91fccdad KT |
621 | } |
622 | ||
623 | /* Free unused mempools. */ | |
624 | static void | |
43307ad0 | 625 | dpdk_mp_sweep(void) OVS_REQUIRES(dpdk_mp_mutex) |
91fccdad KT |
626 | { |
627 | struct dpdk_mp *dmp, *next; | |
628 | ||
43307ad0 IS |
629 | LIST_FOR_EACH_SAFE (dmp, next, list_node, &dpdk_mp_list) { |
630 | if (!dmp->refcount && dpdk_mp_full(dmp->mp)) { | |
91fccdad KT |
631 | VLOG_DBG("Freeing mempool \"%s\"", dmp->mp->name); |
632 | ovs_list_remove(&dmp->list_node); | |
633 | rte_mempool_free(dmp->mp); | |
634 | rte_free(dmp); | |
635 | } | |
636 | } | |
91fccdad KT |
637 | } |
638 | ||
43307ad0 IS |
639 | /* Calculating the required number of mbufs differs depending on the |
640 | * mempool model being used. Check if per port memory is in use before | |
641 | * calculating. | |
642 | */ | |
643 | static uint32_t | |
644 | dpdk_calculate_mbufs(struct netdev_dpdk *dev, int mtu, bool per_port_mp) | |
91fccdad | 645 | { |
43307ad0 | 646 | uint32_t n_mbufs; |
91fccdad | 647 | |
43307ad0 IS |
648 | if (!per_port_mp) { |
649 | /* Shared memory are being used. | |
650 | * XXX: this is a really rough method of provisioning memory. | |
651 | * It's impossible to determine what the exact memory requirements are | |
652 | * when the number of ports and rxqs that utilize a particular mempool | |
653 | * can change dynamically at runtime. For now, use this rough | |
654 | * heurisitic. | |
655 | */ | |
127b6a6e | 656 | if (mtu >= RTE_ETHER_MTU) { |
43307ad0 IS |
657 | n_mbufs = MAX_NB_MBUF; |
658 | } else { | |
659 | n_mbufs = MIN_NB_MBUF; | |
91fccdad | 660 | } |
43307ad0 IS |
661 | } else { |
662 | /* Per port memory is being used. | |
663 | * XXX: rough estimation of number of mbufs required for this port: | |
664 | * <packets required to fill the device rxqs> | |
665 | * + <packets that could be stuck on other ports txqs> | |
666 | * + <packets in the pmd threads> | |
667 | * + <additional memory for corner cases> | |
668 | */ | |
669 | n_mbufs = dev->requested_n_rxq * dev->requested_rxq_size | |
670 | + dev->requested_n_txq * dev->requested_txq_size | |
671 | + MIN(RTE_MAX_LCORE, dev->requested_n_rxq) * NETDEV_MAX_BURST | |
672 | + MIN_NB_MBUF; | |
91fccdad | 673 | } |
43307ad0 IS |
674 | |
675 | return n_mbufs; | |
91fccdad KT |
676 | } |
677 | ||
43307ad0 IS |
678 | static struct dpdk_mp * |
679 | dpdk_mp_create(struct netdev_dpdk *dev, int mtu, bool per_port_mp) | |
8a9562d2 | 680 | { |
24e78f93 IM |
681 | char mp_name[RTE_MEMPOOL_NAMESIZE]; |
682 | const char *netdev_name = netdev_get_name(&dev->up); | |
683 | int socket_id = dev->requested_socket_id; | |
dfaf00e8 MK |
684 | uint32_t n_mbufs = 0; |
685 | uint32_t mbuf_size = 0; | |
686 | uint32_t aligned_mbuf_size = 0; | |
687 | uint32_t mbuf_priv_data_len = 0; | |
688 | uint32_t pkt_size = 0; | |
24e78f93 | 689 | uint32_t hash = hash_string(netdev_name, 0); |
43307ad0 IS |
690 | struct dpdk_mp *dmp = NULL; |
691 | int ret; | |
692 | ||
693 | dmp = dpdk_rte_mzalloc(sizeof *dmp); | |
694 | if (!dmp) { | |
695 | return NULL; | |
696 | } | |
697 | dmp->socket_id = socket_id; | |
698 | dmp->mtu = mtu; | |
699 | dmp->refcount = 1; | |
700 | ||
dfaf00e8 MK |
701 | /* Get the size of each mbuf, based on the MTU */ |
702 | mbuf_size = MTU_TO_FRAME_LEN(mtu); | |
703 | ||
43307ad0 | 704 | n_mbufs = dpdk_calculate_mbufs(dev, mtu, per_port_mp); |
d555d9bd | 705 | |
da79ce2b | 706 | do { |
24e78f93 | 707 | /* Full DPDK memory pool name must be unique and cannot be |
43307ad0 IS |
708 | * longer than RTE_MEMPOOL_NAMESIZE. Note that for the shared |
709 | * mempool case this can result in one device using a mempool | |
710 | * which references a different device in it's name. However as | |
711 | * mempool names are hashed, the device name will not be readable | |
712 | * so this is not an issue for tasks such as debugging. | |
713 | */ | |
714 | ret = snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, | |
dfaf00e8 MK |
715 | "ovs%08x%02d%05d%07u", |
716 | hash, socket_id, mtu, n_mbufs); | |
24e78f93 IM |
717 | if (ret < 0 || ret >= RTE_MEMPOOL_NAMESIZE) { |
718 | VLOG_DBG("snprintf returned %d. " | |
719 | "Failed to generate a mempool name for \"%s\". " | |
720 | "Hash:0x%x, socket_id: %d, mtu:%d, mbufs:%u.", | |
721 | ret, netdev_name, hash, socket_id, mtu, n_mbufs); | |
722 | break; | |
65056fd7 | 723 | } |
95fb793a | 724 | |
dfaf00e8 MK |
725 | VLOG_DBG("Port %s: Requesting a mempool of %u mbufs of size %u " |
726 | "on socket %d for %d Rx and %d Tx queues, " | |
727 | "cache line size of %u", | |
728 | netdev_name, n_mbufs, mbuf_size, socket_id, | |
729 | dev->requested_n_rxq, dev->requested_n_txq, | |
730 | RTE_CACHE_LINE_SIZE); | |
731 | ||
a32bab26 TL |
732 | /* The size of the mbuf's private area (i.e. area that holds OvS' |
733 | * dp_packet data)*/ | |
dfaf00e8 MK |
734 | mbuf_priv_data_len = sizeof(struct dp_packet) - |
735 | sizeof(struct rte_mbuf); | |
736 | /* The size of the entire dp_packet. */ | |
737 | pkt_size = sizeof(struct dp_packet) + mbuf_size; | |
738 | /* mbuf size, rounded up to cacheline size. */ | |
739 | aligned_mbuf_size = ROUND_UP(pkt_size, RTE_CACHE_LINE_SIZE); | |
740 | /* If there is a size discrepancy, add padding to mbuf_priv_data_len. | |
741 | * This maintains mbuf size cache alignment, while also honoring RX | |
742 | * buffer alignment in the data portion of the mbuf. If this adjustment | |
743 | * is not made, there is a possiblity later on that for an element of | |
744 | * the mempool, buf, buf->data_len < (buf->buf_len - buf->data_off). | |
745 | * This is problematic in the case of multi-segment mbufs, particularly | |
746 | * when an mbuf segment needs to be resized (when [push|popp]ing a VLAN | |
747 | * header, for example. | |
748 | */ | |
749 | mbuf_priv_data_len += (aligned_mbuf_size - pkt_size); | |
750 | ||
751 | dmp->mp = rte_pktmbuf_pool_create(mp_name, n_mbufs, MP_CACHE_SZ, | |
752 | mbuf_priv_data_len, | |
753 | mbuf_size, | |
43307ad0 | 754 | socket_id); |
24e78f93 | 755 | |
43307ad0 | 756 | if (dmp->mp) { |
24e78f93 IM |
757 | VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", |
758 | mp_name, n_mbufs); | |
837c1761 | 759 | /* rte_pktmbuf_pool_create has done some initialization of the |
43307ad0 IS |
760 | * rte_mbuf part of each dp_packet, while ovs_rte_pktmbuf_init |
761 | * initializes some OVS specific fields of dp_packet. | |
762 | */ | |
763 | rte_mempool_obj_iter(dmp->mp, ovs_rte_pktmbuf_init, NULL); | |
764 | return dmp; | |
d555d9bd RW |
765 | } else if (rte_errno == EEXIST) { |
766 | /* A mempool with the same name already exists. We just | |
767 | * retrieve its pointer to be returned to the caller. */ | |
43307ad0 | 768 | dmp->mp = rte_mempool_lookup(mp_name); |
d555d9bd RW |
769 | /* As the mempool create returned EEXIST we can expect the |
770 | * lookup has returned a valid pointer. If for some reason | |
771 | * that's not the case we keep track of it. */ | |
24e78f93 | 772 | VLOG_DBG("A mempool with name \"%s\" already exists at %p.", |
43307ad0 IS |
773 | mp_name, dmp->mp); |
774 | return dmp; | |
d555d9bd | 775 | } else { |
43307ad0 IS |
776 | VLOG_DBG("Failed to create mempool \"%s\" with a request of " |
777 | "%u mbufs, retrying with %u mbufs", | |
778 | mp_name, n_mbufs, n_mbufs / 2); | |
0c6f39e5 | 779 | } |
43307ad0 | 780 | } while (!dmp->mp && rte_errno == ENOMEM && (n_mbufs /= 2) >= MIN_NB_MBUF); |
2ae3d542 | 781 | |
43307ad0 IS |
782 | VLOG_ERR("Failed to create mempool \"%s\" with a request of %u mbufs", |
783 | mp_name, n_mbufs); | |
784 | ||
785 | rte_free(dmp); | |
786 | return NULL; | |
8a9562d2 PS |
787 | } |
788 | ||
43307ad0 IS |
789 | static struct dpdk_mp * |
790 | dpdk_mp_get(struct netdev_dpdk *dev, int mtu, bool per_port_mp) | |
8a9562d2 | 791 | { |
43307ad0 IS |
792 | struct dpdk_mp *dmp, *next; |
793 | bool reuse = false; | |
8a9562d2 | 794 | |
c2adb102 | 795 | ovs_mutex_lock(&dpdk_mp_mutex); |
43307ad0 IS |
796 | /* Check if shared memory is being used, if so check existing mempools |
797 | * to see if reuse is possible. */ | |
798 | if (!per_port_mp) { | |
799 | LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) { | |
800 | if (dmp->socket_id == dev->requested_socket_id | |
801 | && dmp->mtu == mtu) { | |
802 | VLOG_DBG("Reusing mempool \"%s\"", dmp->mp->name); | |
803 | dmp->refcount++; | |
804 | reuse = true; | |
805 | break; | |
806 | } | |
807 | } | |
808 | } | |
809 | /* Sweep mempools after reuse or before create. */ | |
810 | dpdk_mp_sweep(); | |
91fccdad | 811 | |
43307ad0 IS |
812 | if (!reuse) { |
813 | dmp = dpdk_mp_create(dev, mtu, per_port_mp); | |
91fccdad | 814 | if (dmp) { |
43307ad0 IS |
815 | /* Shared memory will hit the reuse case above so will not |
816 | * request a mempool that already exists but we need to check | |
817 | * for the EEXIST case for per port memory case. Compare the | |
818 | * mempool returned by dmp to each entry in dpdk_mp_list. If a | |
819 | * match is found, free dmp as a new entry is not required, set | |
820 | * dmp to point to the existing entry and increment the refcount | |
821 | * to avoid being freed at a later stage. | |
822 | */ | |
823 | if (per_port_mp && rte_errno == EEXIST) { | |
824 | LIST_FOR_EACH (next, list_node, &dpdk_mp_list) { | |
825 | if (dmp->mp == next->mp) { | |
826 | rte_free(dmp); | |
827 | dmp = next; | |
828 | dmp->refcount++; | |
829 | } | |
830 | } | |
831 | } else { | |
832 | ovs_list_push_back(&dpdk_mp_list, &dmp->list_node); | |
833 | } | |
91fccdad KT |
834 | } |
835 | } | |
43307ad0 | 836 | |
43307ad0 IS |
837 | ovs_mutex_unlock(&dpdk_mp_mutex); |
838 | ||
839 | return dmp; | |
840 | } | |
841 | ||
842 | /* Decrement reference to a mempool. */ | |
843 | static void | |
844 | dpdk_mp_put(struct dpdk_mp *dmp) | |
845 | { | |
846 | if (!dmp) { | |
847 | return; | |
848 | } | |
849 | ||
850 | ovs_mutex_lock(&dpdk_mp_mutex); | |
851 | ovs_assert(dmp->refcount); | |
852 | dmp->refcount--; | |
c2adb102 | 853 | ovs_mutex_unlock(&dpdk_mp_mutex); |
8a9562d2 PS |
854 | } |
855 | ||
43307ad0 IS |
856 | /* Depending on the memory model being used this function tries to |
857 | * identify and reuse an existing mempool or tries to allocate a new | |
858 | * mempool on requested_socket_id with mbuf size corresponding to the | |
859 | * requested_mtu. On success, a new configuration will be applied. | |
0072e931 MK |
860 | * On error, device will be left unchanged. */ |
861 | static int | |
862 | netdev_dpdk_mempool_configure(struct netdev_dpdk *dev) | |
0072e931 MK |
863 | OVS_REQUIRES(dev->mutex) |
864 | { | |
865 | uint32_t buf_size = dpdk_buf_size(dev->requested_mtu); | |
43307ad0 | 866 | struct dpdk_mp *dmp; |
24e78f93 | 867 | int ret = 0; |
43307ad0 | 868 | bool per_port_mp = dpdk_per_port_memory(); |
0072e931 | 869 | |
43307ad0 IS |
870 | /* With shared memory we do not need to configure a mempool if the MTU |
871 | * and socket ID have not changed, the previous configuration is still | |
872 | * valid so return 0 */ | |
873 | if (!per_port_mp && dev->mtu == dev->requested_mtu | |
874 | && dev->socket_id == dev->requested_socket_id) { | |
875 | return ret; | |
876 | } | |
91fccdad | 877 | |
43307ad0 IS |
878 | dmp = dpdk_mp_get(dev, FRAME_LEN_TO_MTU(buf_size), per_port_mp); |
879 | if (!dmp) { | |
c67e46c0 MK |
880 | VLOG_ERR("Failed to create memory pool for netdev " |
881 | "%s, with MTU %d on socket %d: %s\n", | |
882 | dev->up.name, dev->requested_mtu, dev->requested_socket_id, | |
883 | rte_strerror(rte_errno)); | |
24e78f93 | 884 | ret = rte_errno; |
0072e931 | 885 | } else { |
43307ad0 IS |
886 | /* Check for any pre-existing dpdk_mp for the device before accessing |
887 | * the associated mempool. | |
888 | */ | |
889 | if (dev->dpdk_mp != NULL) { | |
890 | /* A new MTU was requested, decrement the reference count for the | |
891 | * devices current dpdk_mp. This is required even if a pointer to | |
892 | * same dpdk_mp is returned by dpdk_mp_get. The refcount for dmp | |
893 | * has already been incremented by dpdk_mp_get at this stage so it | |
894 | * must be decremented to keep an accurate refcount for the | |
895 | * dpdk_mp. | |
896 | */ | |
897 | dpdk_mp_put(dev->dpdk_mp); | |
898 | } | |
899 | dev->dpdk_mp = dmp; | |
0072e931 MK |
900 | dev->mtu = dev->requested_mtu; |
901 | dev->socket_id = dev->requested_socket_id; | |
902 | dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu); | |
903 | } | |
904 | ||
24e78f93 | 905 | return ret; |
0072e931 MK |
906 | } |
907 | ||
8a9562d2 PS |
908 | static void |
909 | check_link_status(struct netdev_dpdk *dev) | |
910 | { | |
911 | struct rte_eth_link link; | |
912 | ||
913 | rte_eth_link_get_nowait(dev->port_id, &link); | |
914 | ||
915 | if (dev->link.link_status != link.link_status) { | |
3e912ffc | 916 | netdev_change_seq_changed(&dev->up); |
8a9562d2 PS |
917 | |
918 | dev->link_reset_cnt++; | |
919 | dev->link = link; | |
920 | if (dev->link.link_status) { | |
fa9f4eeb IM |
921 | VLOG_DBG_RL(&rl, |
922 | "Port "DPDK_PORT_ID_FMT" Link Up - speed %u Mbps - %s", | |
58be5c0e | 923 | dev->port_id, (unsigned) dev->link.link_speed, |
fa9f4eeb IM |
924 | (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) |
925 | ? "full-duplex" : "half-duplex"); | |
8a9562d2 | 926 | } else { |
fa9f4eeb IM |
927 | VLOG_DBG_RL(&rl, "Port "DPDK_PORT_ID_FMT" Link Down", |
928 | dev->port_id); | |
8a9562d2 PS |
929 | } |
930 | } | |
931 | } | |
932 | ||
933 | static void * | |
934 | dpdk_watchdog(void *dummy OVS_UNUSED) | |
935 | { | |
936 | struct netdev_dpdk *dev; | |
937 | ||
938 | pthread_detach(pthread_self()); | |
939 | ||
940 | for (;;) { | |
941 | ovs_mutex_lock(&dpdk_mutex); | |
942 | LIST_FOR_EACH (dev, list_node, &dpdk_list) { | |
943 | ovs_mutex_lock(&dev->mutex); | |
1f5b157e IM |
944 | if (dev->type == DPDK_DEV_ETH) { |
945 | check_link_status(dev); | |
946 | } | |
8a9562d2 PS |
947 | ovs_mutex_unlock(&dev->mutex); |
948 | } | |
949 | ovs_mutex_unlock(&dpdk_mutex); | |
950 | xsleep(DPDK_PORT_WATCHDOG_INTERVAL); | |
951 | } | |
952 | ||
953 | return NULL; | |
954 | } | |
955 | ||
b98d7669 | 956 | static int |
f8b64a61 | 957 | dpdk_eth_dev_port_config(struct netdev_dpdk *dev, int n_rxq, int n_txq) |
b98d7669 DDP |
958 | { |
959 | int diag = 0; | |
960 | int i; | |
0072e931 | 961 | struct rte_eth_conf conf = port_conf; |
65a87968 | 962 | struct rte_eth_dev_info info; |
4dd16ca0 | 963 | uint16_t conf_mtu; |
65a87968 | 964 | |
03f3f9c0 OM |
965 | rte_eth_dev_info_get(dev->port_id, &info); |
966 | ||
db704171 JCR |
967 | /* As of DPDK 19.11, it is not allowed to set a mq_mode for |
968 | * virtio PMD driver. */ | |
969 | if (!strcmp(info.driver_name, "net_virtio")) { | |
970 | conf.rxmode.mq_mode = ETH_MQ_RX_NONE; | |
971 | } else { | |
972 | conf.rxmode.mq_mode = ETH_MQ_RX_RSS; | |
973 | } | |
974 | ||
65a87968 | 975 | /* As of DPDK 17.11.1 a few PMDs require to explicitly enable |
03f3f9c0 OM |
976 | * scatter to support jumbo RX. |
977 | * Setting scatter for the device is done after checking for | |
978 | * scatter support in the device capabilites. */ | |
127b6a6e | 979 | if (dev->mtu > RTE_ETHER_MTU) { |
03f3f9c0 OM |
980 | if (dev->hw_ol_features & NETDEV_RX_HW_SCATTER) { |
981 | conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER; | |
65a87968 | 982 | } |
0072e931 | 983 | } |
67fe6d63 | 984 | |
f8b64a61 | 985 | conf.intr_conf.lsc = dev->lsc_interrupt_mode; |
e10ca8b9 | 986 | |
03f3f9c0 OM |
987 | if (dev->hw_ol_features & NETDEV_RX_CHECKSUM_OFFLOAD) { |
988 | conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM; | |
989 | } | |
990 | ||
991 | if (!(dev->hw_ol_features & NETDEV_RX_HW_CRC_STRIP) | |
992 | && info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) { | |
993 | conf.rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC; | |
e10ca8b9 MW |
994 | } |
995 | ||
29cf9c1b | 996 | if (dev->hw_ol_features & NETDEV_TX_TSO_OFFLOAD) { |
8c5163fe | 997 | conf.txmode.offloads |= DPDK_TX_TSO_OFFLOAD_FLAGS; |
35b5586b FL |
998 | if (dev->hw_ol_features & NETDEV_TX_SCTP_CHECKSUM_OFFLOAD) { |
999 | conf.txmode.offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM; | |
1000 | } | |
29cf9c1b FL |
1001 | } |
1002 | ||
03f3f9c0 OM |
1003 | /* Limit configured rss hash functions to only those supported |
1004 | * by the eth device. */ | |
1005 | conf.rx_adv_conf.rss_conf.rss_hf &= info.flow_type_rss_offloads; | |
1006 | ||
b98d7669 DDP |
1007 | /* A device may report more queues than it makes available (this has |
1008 | * been observed for Intel xl710, which reserves some of them for | |
1009 | * SRIOV): rte_eth_*_queue_setup will fail if a queue is not | |
1010 | * available. When this happens we can retry the configuration | |
1011 | * and request less queues */ | |
1012 | while (n_rxq && n_txq) { | |
1013 | if (diag) { | |
1014 | VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq, n_txq); | |
1015 | } | |
1016 | ||
0072e931 | 1017 | diag = rte_eth_dev_configure(dev->port_id, n_rxq, n_txq, &conf); |
b98d7669 | 1018 | if (diag) { |
0072e931 MK |
1019 | VLOG_WARN("Interface %s eth_dev setup error %s\n", |
1020 | dev->up.name, rte_strerror(-diag)); | |
b98d7669 DDP |
1021 | break; |
1022 | } | |
1023 | ||
67fe6d63 MK |
1024 | diag = rte_eth_dev_set_mtu(dev->port_id, dev->mtu); |
1025 | if (diag) { | |
4dd16ca0 IS |
1026 | /* A device may not support rte_eth_dev_set_mtu, in this case |
1027 | * flag a warning to the user and include the devices configured | |
1028 | * MTU value that will be used instead. */ | |
1029 | if (-ENOTSUP == diag) { | |
1030 | rte_eth_dev_get_mtu(dev->port_id, &conf_mtu); | |
1031 | VLOG_WARN("Interface %s does not support MTU configuration, " | |
1032 | "max packet size supported is %"PRIu16".", | |
1033 | dev->up.name, conf_mtu); | |
1034 | } else { | |
1035 | VLOG_ERR("Interface %s MTU (%d) setup error: %s", | |
1036 | dev->up.name, dev->mtu, rte_strerror(-diag)); | |
1037 | break; | |
1038 | } | |
67fe6d63 MK |
1039 | } |
1040 | ||
b98d7669 | 1041 | for (i = 0; i < n_txq; i++) { |
b685696b | 1042 | diag = rte_eth_tx_queue_setup(dev->port_id, i, dev->txq_size, |
b98d7669 DDP |
1043 | dev->socket_id, NULL); |
1044 | if (diag) { | |
1dfebee9 | 1045 | VLOG_INFO("Interface %s unable to setup txq(%d): %s", |
b98d7669 DDP |
1046 | dev->up.name, i, rte_strerror(-diag)); |
1047 | break; | |
1048 | } | |
1049 | } | |
1050 | ||
1051 | if (i != n_txq) { | |
1052 | /* Retry with less tx queues */ | |
1053 | n_txq = i; | |
1054 | continue; | |
1055 | } | |
1056 | ||
1057 | for (i = 0; i < n_rxq; i++) { | |
b685696b | 1058 | diag = rte_eth_rx_queue_setup(dev->port_id, i, dev->rxq_size, |
43307ad0 IS |
1059 | dev->socket_id, NULL, |
1060 | dev->dpdk_mp->mp); | |
b98d7669 | 1061 | if (diag) { |
1dfebee9 | 1062 | VLOG_INFO("Interface %s unable to setup rxq(%d): %s", |
b98d7669 DDP |
1063 | dev->up.name, i, rte_strerror(-diag)); |
1064 | break; | |
1065 | } | |
1066 | } | |
1067 | ||
1068 | if (i != n_rxq) { | |
1069 | /* Retry with less rx queues */ | |
1070 | n_rxq = i; | |
1071 | continue; | |
1072 | } | |
1073 | ||
1074 | dev->up.n_rxq = n_rxq; | |
81acebda | 1075 | dev->up.n_txq = n_txq; |
b98d7669 DDP |
1076 | |
1077 | return 0; | |
1078 | } | |
1079 | ||
1080 | return diag; | |
1081 | } | |
1082 | ||
9fd39370 SC |
1083 | static void |
1084 | dpdk_eth_flow_ctrl_setup(struct netdev_dpdk *dev) OVS_REQUIRES(dev->mutex) | |
1085 | { | |
1086 | if (rte_eth_dev_flow_ctrl_set(dev->port_id, &dev->fc_conf)) { | |
fa9f4eeb | 1087 | VLOG_WARN("Failed to enable flow control on device "DPDK_PORT_ID_FMT, |
bb37956a | 1088 | dev->port_id); |
9fd39370 SC |
1089 | } |
1090 | } | |
b98d7669 | 1091 | |
8a9562d2 | 1092 | static int |
c2adb102 IM |
1093 | dpdk_eth_dev_init(struct netdev_dpdk *dev) |
1094 | OVS_REQUIRES(dev->mutex) | |
8a9562d2 PS |
1095 | { |
1096 | struct rte_pktmbuf_pool_private *mbp_priv; | |
a0cb2d66 | 1097 | struct rte_eth_dev_info info; |
127b6a6e | 1098 | struct rte_ether_addr eth_addr; |
8a9562d2 | 1099 | int diag; |
b98d7669 | 1100 | int n_rxq, n_txq; |
8c5163fe | 1101 | uint32_t tx_tso_offload_capa = DPDK_TX_TSO_OFFLOAD_FLAGS; |
d4f5282c KT |
1102 | uint32_t rx_chksm_offload_capa = DEV_RX_OFFLOAD_UDP_CKSUM | |
1103 | DEV_RX_OFFLOAD_TCP_CKSUM | | |
1104 | DEV_RX_OFFLOAD_IPV4_CKSUM; | |
8a9562d2 | 1105 | |
a0cb2d66 | 1106 | rte_eth_dev_info_get(dev->port_id, &info); |
a0cb2d66 | 1107 | |
e10ca8b9 MW |
1108 | if (strstr(info.driver_name, "vf") != NULL) { |
1109 | VLOG_INFO("Virtual function detected, HW_CRC_STRIP will be enabled"); | |
1110 | dev->hw_ol_features |= NETDEV_RX_HW_CRC_STRIP; | |
1111 | } else { | |
1112 | dev->hw_ol_features &= ~NETDEV_RX_HW_CRC_STRIP; | |
1113 | } | |
1114 | ||
d4f5282c KT |
1115 | if ((info.rx_offload_capa & rx_chksm_offload_capa) != |
1116 | rx_chksm_offload_capa) { | |
fa9f4eeb IM |
1117 | VLOG_WARN("Rx checksum offload is not supported on port " |
1118 | DPDK_PORT_ID_FMT, dev->port_id); | |
d4f5282c KT |
1119 | dev->hw_ol_features &= ~NETDEV_RX_CHECKSUM_OFFLOAD; |
1120 | } else { | |
1121 | dev->hw_ol_features |= NETDEV_RX_CHECKSUM_OFFLOAD; | |
1122 | } | |
1123 | ||
03f3f9c0 OM |
1124 | if (info.rx_offload_capa & DEV_RX_OFFLOAD_SCATTER) { |
1125 | dev->hw_ol_features |= NETDEV_RX_HW_SCATTER; | |
1126 | } else { | |
1127 | /* Do not warn on lack of scatter support */ | |
1128 | dev->hw_ol_features &= ~NETDEV_RX_HW_SCATTER; | |
1129 | } | |
1130 | ||
1223cf12 IM |
1131 | dev->hw_ol_features &= ~NETDEV_TX_TSO_OFFLOAD; |
1132 | if (userspace_tso_enabled()) { | |
1133 | if ((info.tx_offload_capa & tx_tso_offload_capa) | |
1134 | == tx_tso_offload_capa) { | |
1135 | dev->hw_ol_features |= NETDEV_TX_TSO_OFFLOAD; | |
35b5586b FL |
1136 | if (info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { |
1137 | dev->hw_ol_features |= NETDEV_TX_SCTP_CHECKSUM_OFFLOAD; | |
1138 | } else { | |
1139 | VLOG_WARN("%s: Tx SCTP checksum offload is not supported, " | |
1140 | "SCTP packets sent to this device will be dropped", | |
1141 | netdev_get_name(&dev->up)); | |
1142 | } | |
1223cf12 IM |
1143 | } else { |
1144 | VLOG_WARN("%s: Tx TSO offload is not supported.", | |
1145 | netdev_get_name(&dev->up)); | |
1146 | } | |
29cf9c1b FL |
1147 | } |
1148 | ||
b98d7669 DDP |
1149 | n_rxq = MIN(info.max_rx_queues, dev->up.n_rxq); |
1150 | n_txq = MIN(info.max_tx_queues, dev->up.n_txq); | |
1151 | ||
f8b64a61 | 1152 | diag = dpdk_eth_dev_port_config(dev, n_rxq, n_txq); |
8a9562d2 | 1153 | if (diag) { |
f8b64a61 RM |
1154 | VLOG_ERR("Interface %s(rxq:%d txq:%d lsc interrupt mode:%s) " |
1155 | "configure error: %s", | |
1156 | dev->up.name, n_rxq, n_txq, | |
1157 | dev->lsc_interrupt_mode ? "true" : "false", | |
1158 | rte_strerror(-diag)); | |
95fb793a | 1159 | return -diag; |
8a9562d2 PS |
1160 | } |
1161 | ||
8a9562d2 PS |
1162 | diag = rte_eth_dev_start(dev->port_id); |
1163 | if (diag) { | |
b98d7669 DDP |
1164 | VLOG_ERR("Interface %s start error: %s", dev->up.name, |
1165 | rte_strerror(-diag)); | |
95fb793a | 1166 | return -diag; |
8a9562d2 | 1167 | } |
606f6650 | 1168 | dev->started = true; |
8a9562d2 PS |
1169 | |
1170 | rte_eth_promiscuous_enable(dev->port_id); | |
1171 | rte_eth_allmulticast_enable(dev->port_id); | |
1172 | ||
1173 | memset(ð_addr, 0x0, sizeof(eth_addr)); | |
1174 | rte_eth_macaddr_get(dev->port_id, ð_addr); | |
fa9f4eeb IM |
1175 | VLOG_INFO_RL(&rl, "Port "DPDK_PORT_ID_FMT": "ETH_ADDR_FMT, |
1176 | dev->port_id, ETH_ADDR_BYTES_ARGS(eth_addr.addr_bytes)); | |
8a9562d2 | 1177 | |
ca92d173 | 1178 | memcpy(dev->hwaddr.ea, eth_addr.addr_bytes, ETH_ADDR_LEN); |
8a9562d2 PS |
1179 | rte_eth_link_get_nowait(dev->port_id, &dev->link); |
1180 | ||
43307ad0 | 1181 | mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp); |
8a9562d2 | 1182 | dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; |
8a9562d2 PS |
1183 | return 0; |
1184 | } | |
1185 | ||
1186 | static struct netdev_dpdk * | |
1187 | netdev_dpdk_cast(const struct netdev *netdev) | |
1188 | { | |
1189 | return CONTAINER_OF(netdev, struct netdev_dpdk, up); | |
1190 | } | |
1191 | ||
1192 | static struct netdev * | |
1193 | netdev_dpdk_alloc(void) | |
1194 | { | |
bab69409 AC |
1195 | struct netdev_dpdk *dev; |
1196 | ||
65e19e70 DDP |
1197 | dev = dpdk_rte_mzalloc(sizeof *dev); |
1198 | if (dev) { | |
1199 | return &dev->up; | |
bab69409 | 1200 | } |
65e19e70 | 1201 | |
bab69409 | 1202 | return NULL; |
8a9562d2 PS |
1203 | } |
1204 | ||
eff23640 DDP |
1205 | static struct dpdk_tx_queue * |
1206 | netdev_dpdk_alloc_txq(unsigned int n_txqs) | |
5a034064 | 1207 | { |
eff23640 | 1208 | struct dpdk_tx_queue *txqs; |
bd5131ba | 1209 | unsigned i; |
5a034064 | 1210 | |
eff23640 DDP |
1211 | txqs = dpdk_rte_mzalloc(n_txqs * sizeof *txqs); |
1212 | if (txqs) { | |
1213 | for (i = 0; i < n_txqs; i++) { | |
1214 | /* Initialize map for vhost devices. */ | |
1215 | txqs[i].map = OVS_VHOST_QUEUE_MAP_UNKNOWN; | |
1216 | rte_spinlock_init(&txqs[i].tx_lock); | |
1217 | } | |
5a034064 | 1218 | } |
eff23640 DDP |
1219 | |
1220 | return txqs; | |
5a034064 AW |
1221 | } |
1222 | ||
8a9562d2 | 1223 | static int |
bb37956a | 1224 | common_construct(struct netdev *netdev, dpdk_port_t port_no, |
1ce30dfd | 1225 | enum dpdk_dev_type type, int socket_id) |
5a034064 | 1226 | OVS_REQUIRES(dpdk_mutex) |
8a9562d2 | 1227 | { |
d46285a2 | 1228 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
8a9562d2 | 1229 | |
d46285a2 | 1230 | ovs_mutex_init(&dev->mutex); |
8a9562d2 | 1231 | |
d46285a2 | 1232 | rte_spinlock_init(&dev->stats_lock); |
45d947c4 | 1233 | |
1b7a04e0 AW |
1234 | /* If the 'sid' is negative, it means that the kernel fails |
1235 | * to obtain the pci numa info. In that situation, always | |
1236 | * use 'SOCKET0'. */ | |
1ce30dfd | 1237 | dev->socket_id = socket_id < 0 ? SOCKET0 : socket_id; |
db8f13b0 | 1238 | dev->requested_socket_id = dev->socket_id; |
d46285a2 DDP |
1239 | dev->port_id = port_no; |
1240 | dev->type = type; | |
1241 | dev->flags = 0; | |
127b6a6e | 1242 | dev->requested_mtu = RTE_ETHER_MTU; |
d46285a2 | 1243 | dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu); |
f8b64a61 | 1244 | dev->requested_lsc_interrupt_mode = 0; |
0a0f39df CL |
1245 | ovsrcu_index_init(&dev->vid, -1); |
1246 | dev->vhost_reconfigured = false; | |
5dcde09c | 1247 | dev->attached = false; |
988fd463 EC |
1248 | dev->started = false; |
1249 | dev->reset_needed = false; | |
8a9562d2 | 1250 | |
78bd47cf | 1251 | ovsrcu_init(&dev->qos_conf, NULL); |
0bf765f7 | 1252 | |
9509913a IS |
1253 | ovsrcu_init(&dev->ingress_policer, NULL); |
1254 | dev->policer_rate = 0; | |
1255 | dev->policer_burst = 0; | |
1256 | ||
7f381c2e DDP |
1257 | netdev->n_rxq = 0; |
1258 | netdev->n_txq = 0; | |
1259 | dev->requested_n_rxq = NR_QUEUE; | |
1260 | dev->requested_n_txq = NR_QUEUE; | |
1261 | dev->requested_rxq_size = NIC_PORT_DEFAULT_RXQ_SIZE; | |
1262 | dev->requested_txq_size = NIC_PORT_DEFAULT_TXQ_SIZE; | |
58397e6c | 1263 | |
9fd39370 SC |
1264 | /* Initialize the flow control to NULL */ |
1265 | memset(&dev->fc_conf, 0, sizeof dev->fc_conf); | |
1a2bb118 SC |
1266 | |
1267 | /* Initilize the hardware offload flags to 0 */ | |
1268 | dev->hw_ol_features = 0; | |
3b1fb077 DDP |
1269 | |
1270 | dev->flags = NETDEV_UP | NETDEV_PROMISC; | |
1271 | ||
d46285a2 | 1272 | ovs_list_push_back(&dpdk_list, &dev->list_node); |
8a9562d2 | 1273 | |
7f381c2e DDP |
1274 | netdev_request_reconfigure(netdev); |
1275 | ||
971f4b39 MW |
1276 | dev->rte_xstats_names = NULL; |
1277 | dev->rte_xstats_names_size = 0; | |
1278 | ||
1279 | dev->rte_xstats_ids = NULL; | |
1280 | dev->rte_xstats_ids_size = 0; | |
1281 | ||
2f862c71 SV |
1282 | dev->sw_stats = xzalloc(sizeof *dev->sw_stats); |
1283 | dev->sw_stats->tx_retries = (dev->type == DPDK_DEV_VHOST) ? 0 : UINT64_MAX; | |
c161357d | 1284 | |
1ce30dfd | 1285 | return 0; |
95fb793a | 1286 | } |
1287 | ||
40e940e4 OM |
1288 | /* Get the number of OVS interfaces which have the same DPDK |
1289 | * rte device (e.g. same pci bus address). | |
1290 | * FIXME: avoid direct access to DPDK internal array rte_eth_devices. | |
1291 | */ | |
1292 | static int | |
1293 | netdev_dpdk_get_num_ports(struct rte_device *device) | |
1294 | OVS_REQUIRES(dpdk_mutex) | |
1295 | { | |
1296 | struct netdev_dpdk *dev; | |
1297 | int count = 0; | |
1298 | ||
1299 | LIST_FOR_EACH (dev, list_node, &dpdk_list) { | |
1300 | if (rte_eth_devices[dev->port_id].device == device | |
1301 | && rte_eth_devices[dev->port_id].state != RTE_ETH_DEV_UNUSED) { | |
1302 | count++; | |
1303 | } | |
1304 | } | |
1305 | return count; | |
1306 | } | |
1307 | ||
1ce30dfd DDP |
1308 | static int |
1309 | vhost_common_construct(struct netdev *netdev) | |
1310 | OVS_REQUIRES(dpdk_mutex) | |
1311 | { | |
1312 | int socket_id = rte_lcore_to_socket_id(rte_get_master_lcore()); | |
1313 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
1314 | ||
35c91567 DM |
1315 | dev->vhost_rxq_enabled = dpdk_rte_mzalloc(OVS_VHOST_MAX_QUEUE_NUM * |
1316 | sizeof *dev->vhost_rxq_enabled); | |
1317 | if (!dev->vhost_rxq_enabled) { | |
1318 | return ENOMEM; | |
1319 | } | |
1ce30dfd DDP |
1320 | dev->tx_q = netdev_dpdk_alloc_txq(OVS_VHOST_MAX_QUEUE_NUM); |
1321 | if (!dev->tx_q) { | |
35c91567 | 1322 | rte_free(dev->vhost_rxq_enabled); |
1ce30dfd DDP |
1323 | return ENOMEM; |
1324 | } | |
1325 | ||
080f080c KT |
1326 | atomic_init(&dev->vhost_tx_retries_max, VHOST_ENQ_RETRY_DEF); |
1327 | ||
bb37956a IM |
1328 | return common_construct(netdev, DPDK_ETH_PORT_ID_INVALID, |
1329 | DPDK_DEV_VHOST, socket_id); | |
1ce30dfd DDP |
1330 | } |
1331 | ||
7d1ced01 | 1332 | static int |
53f50d24 | 1333 | netdev_dpdk_vhost_construct(struct netdev *netdev) |
7d1ced01 | 1334 | { |
d46285a2 DDP |
1335 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
1336 | const char *name = netdev->name; | |
7d1ced01 | 1337 | int err; |
a0cb2d66 | 1338 | |
1af27e8a DDP |
1339 | /* 'name' is appended to 'vhost_sock_dir' and used to create a socket in |
1340 | * the file system. '/' or '\' would traverse directories, so they're not | |
1341 | * acceptable in 'name'. */ | |
1342 | if (strchr(name, '/') || strchr(name, '\\')) { | |
1343 | VLOG_ERR("\"%s\" is not a valid name for a vhost-user port. " | |
1344 | "A valid name must not include '/' or '\\'", | |
1345 | name); | |
1346 | return EINVAL; | |
1347 | } | |
1348 | ||
7d1ced01 CL |
1349 | ovs_mutex_lock(&dpdk_mutex); |
1350 | /* Take the name of the vhost-user port and append it to the location where | |
2d24d165 | 1351 | * the socket is to be created, then register the socket. |
7d1ced01 | 1352 | */ |
bb9d2623 | 1353 | dev->vhost_id = xasprintf("%s/%s", dpdk_get_vhost_sock_dir(), name); |
1af27e8a | 1354 | |
2d24d165 | 1355 | dev->vhost_driver_flags &= ~RTE_VHOST_USER_CLIENT; |
e666e8e0 FL |
1356 | |
1357 | /* There is no support for multi-segments buffers. */ | |
1358 | dev->vhost_driver_flags |= RTE_VHOST_USER_LINEARBUF_SUPPORT; | |
2d24d165 | 1359 | err = rte_vhost_driver_register(dev->vhost_id, dev->vhost_driver_flags); |
7d1ced01 CL |
1360 | if (err) { |
1361 | VLOG_ERR("vhost-user socket device setup failure for socket %s\n", | |
2d24d165 | 1362 | dev->vhost_id); |
f3e7ec25 | 1363 | goto out; |
e5c0f5a4 | 1364 | } else { |
2d24d165 CL |
1365 | fatal_signal_add_file_to_unlink(dev->vhost_id); |
1366 | VLOG_INFO("Socket %s created for vhost-user port %s\n", | |
1367 | dev->vhost_id, name); | |
1368 | } | |
f3e7ec25 MW |
1369 | |
1370 | err = rte_vhost_driver_callback_register(dev->vhost_id, | |
1371 | &virtio_net_device_ops); | |
1372 | if (err) { | |
1373 | VLOG_ERR("rte_vhost_driver_callback_register failed for vhost user " | |
1374 | "port: %s\n", name); | |
1375 | goto out; | |
1376 | } | |
1377 | ||
29cf9c1b FL |
1378 | if (!userspace_tso_enabled()) { |
1379 | err = rte_vhost_driver_disable_features(dev->vhost_id, | |
1380 | 1ULL << VIRTIO_NET_F_HOST_TSO4 | |
1381 | | 1ULL << VIRTIO_NET_F_HOST_TSO6 | |
1382 | | 1ULL << VIRTIO_NET_F_CSUM); | |
1383 | if (err) { | |
1384 | VLOG_ERR("rte_vhost_driver_disable_features failed for vhost user " | |
1385 | "port: %s\n", name); | |
1386 | goto out; | |
1387 | } | |
f3e7ec25 MW |
1388 | } |
1389 | ||
1390 | err = rte_vhost_driver_start(dev->vhost_id); | |
1391 | if (err) { | |
1392 | VLOG_ERR("rte_vhost_driver_start failed for vhost user " | |
1393 | "port: %s\n", name); | |
1394 | goto out; | |
1395 | } | |
1396 | ||
1ce30dfd | 1397 | err = vhost_common_construct(netdev); |
f3e7ec25 MW |
1398 | if (err) { |
1399 | VLOG_ERR("vhost_common_construct failed for vhost user " | |
1400 | "port: %s\n", name); | |
1401 | } | |
2d24d165 | 1402 | |
f3e7ec25 | 1403 | out: |
bb9d2623 IM |
1404 | if (err) { |
1405 | free(dev->vhost_id); | |
1406 | dev->vhost_id = NULL; | |
1407 | } | |
1408 | ||
2d24d165 | 1409 | ovs_mutex_unlock(&dpdk_mutex); |
28ca969e AC |
1410 | VLOG_WARN_ONCE("dpdkvhostuser ports are considered deprecated; " |
1411 | "please migrate to dpdkvhostuserclient ports."); | |
2d24d165 CL |
1412 | return err; |
1413 | } | |
1414 | ||
1415 | static int | |
1416 | netdev_dpdk_vhost_client_construct(struct netdev *netdev) | |
1417 | { | |
1418 | int err; | |
1419 | ||
2d24d165 | 1420 | ovs_mutex_lock(&dpdk_mutex); |
1ce30dfd | 1421 | err = vhost_common_construct(netdev); |
f3e7ec25 MW |
1422 | if (err) { |
1423 | VLOG_ERR("vhost_common_construct failed for vhost user client" | |
1424 | "port: %s\n", netdev->name); | |
1425 | } | |
7d1ced01 | 1426 | ovs_mutex_unlock(&dpdk_mutex); |
58397e6c KT |
1427 | return err; |
1428 | } | |
1429 | ||
95fb793a | 1430 | static int |
1431 | netdev_dpdk_construct(struct netdev *netdev) | |
1432 | { | |
95fb793a | 1433 | int err; |
1434 | ||
95fb793a | 1435 | ovs_mutex_lock(&dpdk_mutex); |
bb37956a IM |
1436 | err = common_construct(netdev, DPDK_ETH_PORT_ID_INVALID, |
1437 | DPDK_DEV_ETH, SOCKET0); | |
8a9562d2 PS |
1438 | ovs_mutex_unlock(&dpdk_mutex); |
1439 | return err; | |
1440 | } | |
1441 | ||
1ce30dfd DDP |
1442 | static void |
1443 | common_destruct(struct netdev_dpdk *dev) | |
1444 | OVS_REQUIRES(dpdk_mutex) | |
1445 | OVS_EXCLUDED(dev->mutex) | |
1446 | { | |
1447 | rte_free(dev->tx_q); | |
43307ad0 | 1448 | dpdk_mp_put(dev->dpdk_mp); |
1ce30dfd DDP |
1449 | |
1450 | ovs_list_remove(&dev->list_node); | |
1451 | free(ovsrcu_get_protected(struct ingress_policer *, | |
1452 | &dev->ingress_policer)); | |
2f862c71 | 1453 | free(dev->sw_stats); |
1ce30dfd DDP |
1454 | ovs_mutex_destroy(&dev->mutex); |
1455 | } | |
1456 | ||
8a9562d2 | 1457 | static void |
d46285a2 | 1458 | netdev_dpdk_destruct(struct netdev *netdev) |
8a9562d2 | 1459 | { |
d46285a2 | 1460 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
40e940e4 OM |
1461 | struct rte_device *rte_dev; |
1462 | struct rte_eth_dev *eth_dev; | |
1463 | bool remove_on_close; | |
8a9562d2 | 1464 | |
8d38823b | 1465 | ovs_mutex_lock(&dpdk_mutex); |
8d38823b | 1466 | |
8a9562d2 | 1467 | rte_eth_dev_stop(dev->port_id); |
606f6650 | 1468 | dev->started = false; |
5dcde09c IM |
1469 | |
1470 | if (dev->attached) { | |
40e940e4 OM |
1471 | /* Retrieve eth device data before closing it. |
1472 | * FIXME: avoid direct access to DPDK internal array rte_eth_devices. | |
1473 | */ | |
1474 | eth_dev = &rte_eth_devices[dev->port_id]; | |
1475 | remove_on_close = | |
1476 | eth_dev->data && | |
1477 | (eth_dev->data->dev_flags & RTE_ETH_DEV_CLOSE_REMOVE); | |
1478 | rte_dev = eth_dev->device; | |
1479 | ||
1480 | /* Remove the eth device. */ | |
5dcde09c | 1481 | rte_eth_dev_close(dev->port_id); |
40e940e4 OM |
1482 | |
1483 | /* Remove this rte device and all its eth devices if flag | |
1484 | * RTE_ETH_DEV_CLOSE_REMOVE is not supported (which means representors | |
1485 | * are not supported), or if all the eth devices belonging to the rte | |
1486 | * device are closed. | |
1487 | */ | |
1488 | if (!remove_on_close || !netdev_dpdk_get_num_ports(rte_dev)) { | |
595ce47c IM |
1489 | int ret = rte_dev_remove(rte_dev); |
1490 | ||
1491 | if (ret < 0) { | |
1492 | VLOG_ERR("Device '%s' can not be detached: %s.", | |
1493 | dev->devargs, rte_strerror(-ret)); | |
40e940e4 OM |
1494 | } else { |
1495 | /* Device was closed and detached. */ | |
1496 | VLOG_INFO("Device '%s' has been removed and detached", | |
1497 | dev->devargs); | |
1498 | } | |
5dcde09c | 1499 | } else { |
40e940e4 OM |
1500 | /* Device was only closed. rte_dev_remove() was not called. */ |
1501 | VLOG_INFO("Device '%s' has been removed", dev->devargs); | |
5dcde09c IM |
1502 | } |
1503 | } | |
1504 | ||
ac1a9bb9 | 1505 | netdev_dpdk_clear_xstats(dev); |
55e075e6 | 1506 | free(dev->devargs); |
1ce30dfd | 1507 | common_destruct(dev); |
8d38823b | 1508 | |
8a9562d2 | 1509 | ovs_mutex_unlock(&dpdk_mutex); |
58397e6c | 1510 | } |
8a9562d2 | 1511 | |
3f891bbe DDP |
1512 | /* rte_vhost_driver_unregister() can call back destroy_device(), which will |
1513 | * try to acquire 'dpdk_mutex' and possibly 'dev->mutex'. To avoid a | |
1514 | * deadlock, none of the mutexes must be held while calling this function. */ | |
1515 | static int | |
c1ff66ac CL |
1516 | dpdk_vhost_driver_unregister(struct netdev_dpdk *dev OVS_UNUSED, |
1517 | char *vhost_id) | |
3f891bbe DDP |
1518 | OVS_EXCLUDED(dpdk_mutex) |
1519 | OVS_EXCLUDED(dev->mutex) | |
1520 | { | |
c1ff66ac | 1521 | return rte_vhost_driver_unregister(vhost_id); |
3f891bbe DDP |
1522 | } |
1523 | ||
58397e6c | 1524 | static void |
d46285a2 | 1525 | netdev_dpdk_vhost_destruct(struct netdev *netdev) |
58397e6c | 1526 | { |
d46285a2 | 1527 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
c1ff66ac | 1528 | char *vhost_id; |
58397e6c | 1529 | |
8d38823b | 1530 | ovs_mutex_lock(&dpdk_mutex); |
8d38823b | 1531 | |
c62da695 | 1532 | /* Guest becomes an orphan if still attached. */ |
c1ff66ac CL |
1533 | if (netdev_dpdk_get_vid(dev) >= 0 |
1534 | && !(dev->vhost_driver_flags & RTE_VHOST_USER_CLIENT)) { | |
c62da695 | 1535 | VLOG_ERR("Removing port '%s' while vhost device still attached.", |
d46285a2 | 1536 | netdev->name); |
58be5c0e MK |
1537 | VLOG_ERR("To restore connectivity after re-adding of port, VM on " |
1538 | "socket '%s' must be restarted.", dev->vhost_id); | |
58397e6c KT |
1539 | } |
1540 | ||
bb9d2623 IM |
1541 | vhost_id = dev->vhost_id; |
1542 | dev->vhost_id = NULL; | |
35c91567 | 1543 | rte_free(dev->vhost_rxq_enabled); |
c1ff66ac | 1544 | |
1ce30dfd DDP |
1545 | common_destruct(dev); |
1546 | ||
58397e6c | 1547 | ovs_mutex_unlock(&dpdk_mutex); |
3f891bbe | 1548 | |
bb9d2623 | 1549 | if (!vhost_id) { |
821b8664 IM |
1550 | goto out; |
1551 | } | |
1552 | ||
c1ff66ac | 1553 | if (dpdk_vhost_driver_unregister(dev, vhost_id)) { |
41964543 IM |
1554 | VLOG_ERR("%s: Unable to unregister vhost driver for socket '%s'.\n", |
1555 | netdev->name, vhost_id); | |
c1ff66ac CL |
1556 | } else if (!(dev->vhost_driver_flags & RTE_VHOST_USER_CLIENT)) { |
1557 | /* OVS server mode - remove this socket from list for deletion */ | |
1558 | fatal_signal_remove_file_to_unlink(vhost_id); | |
3f891bbe | 1559 | } |
821b8664 | 1560 | out: |
c1ff66ac | 1561 | free(vhost_id); |
8a9562d2 PS |
1562 | } |
1563 | ||
1564 | static void | |
d46285a2 | 1565 | netdev_dpdk_dealloc(struct netdev *netdev) |
8a9562d2 | 1566 | { |
d46285a2 | 1567 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
8a9562d2 | 1568 | |
d46285a2 | 1569 | rte_free(dev); |
8a9562d2 PS |
1570 | } |
1571 | ||
971f4b39 | 1572 | static void |
ac1a9bb9 | 1573 | netdev_dpdk_clear_xstats(struct netdev_dpdk *dev) |
971f4b39 MW |
1574 | { |
1575 | /* If statistics are already allocated, we have to | |
1576 | * reconfigure, as port_id could have been changed. */ | |
1577 | if (dev->rte_xstats_names) { | |
1578 | free(dev->rte_xstats_names); | |
1579 | dev->rte_xstats_names = NULL; | |
1580 | dev->rte_xstats_names_size = 0; | |
1581 | } | |
1582 | if (dev->rte_xstats_ids) { | |
1583 | free(dev->rte_xstats_ids); | |
1584 | dev->rte_xstats_ids = NULL; | |
1585 | dev->rte_xstats_ids_size = 0; | |
1586 | } | |
1587 | } | |
1588 | ||
1589 | static const char* | |
1590 | netdev_dpdk_get_xstat_name(struct netdev_dpdk *dev, uint64_t id) | |
1591 | { | |
1592 | if (id >= dev->rte_xstats_names_size) { | |
1593 | return "UNKNOWN"; | |
1594 | } | |
1595 | return dev->rte_xstats_names[id].name; | |
1596 | } | |
1597 | ||
1598 | static bool | |
1599 | netdev_dpdk_configure_xstats(struct netdev_dpdk *dev) | |
1600 | OVS_REQUIRES(dev->mutex) | |
1601 | { | |
1602 | int rte_xstats_len; | |
1603 | bool ret; | |
1604 | struct rte_eth_xstat *rte_xstats; | |
1605 | uint64_t id; | |
1606 | int xstats_no; | |
1607 | const char *name; | |
1608 | ||
1609 | /* Retrieving all XSTATS names. If something will go wrong | |
1610 | * or amount of counters will be equal 0, rte_xstats_names | |
1611 | * buffer will be marked as NULL, and any further xstats | |
1612 | * query won't be performed (e.g. during netdev_dpdk_get_stats | |
1613 | * execution). */ | |
1614 | ||
1615 | ret = false; | |
1616 | rte_xstats = NULL; | |
1617 | ||
1618 | if (dev->rte_xstats_names == NULL || dev->rte_xstats_ids == NULL) { | |
1619 | dev->rte_xstats_names_size = | |
1620 | rte_eth_xstats_get_names(dev->port_id, NULL, 0); | |
1621 | ||
1622 | if (dev->rte_xstats_names_size < 0) { | |
fa9f4eeb IM |
1623 | VLOG_WARN("Cannot get XSTATS for port: "DPDK_PORT_ID_FMT, |
1624 | dev->port_id); | |
971f4b39 MW |
1625 | dev->rte_xstats_names_size = 0; |
1626 | } else { | |
1627 | /* Reserve memory for xstats names and values */ | |
1628 | dev->rte_xstats_names = xcalloc(dev->rte_xstats_names_size, | |
1629 | sizeof *dev->rte_xstats_names); | |
1630 | ||
1631 | if (dev->rte_xstats_names) { | |
1632 | /* Retreive xstats names */ | |
1633 | rte_xstats_len = | |
1634 | rte_eth_xstats_get_names(dev->port_id, | |
1635 | dev->rte_xstats_names, | |
1636 | dev->rte_xstats_names_size); | |
1637 | ||
1638 | if (rte_xstats_len < 0) { | |
fa9f4eeb IM |
1639 | VLOG_WARN("Cannot get XSTATS names for port: " |
1640 | DPDK_PORT_ID_FMT, dev->port_id); | |
971f4b39 MW |
1641 | goto out; |
1642 | } else if (rte_xstats_len != dev->rte_xstats_names_size) { | |
fa9f4eeb IM |
1643 | VLOG_WARN("XSTATS size doesn't match for port: " |
1644 | DPDK_PORT_ID_FMT, dev->port_id); | |
971f4b39 MW |
1645 | goto out; |
1646 | } | |
1647 | ||
1648 | dev->rte_xstats_ids = xcalloc(dev->rte_xstats_names_size, | |
1649 | sizeof(uint64_t)); | |
1650 | ||
1651 | /* We have to calculate number of counters */ | |
1652 | rte_xstats = xmalloc(rte_xstats_len * sizeof *rte_xstats); | |
1653 | memset(rte_xstats, 0xff, sizeof *rte_xstats * rte_xstats_len); | |
1654 | ||
1655 | /* Retreive xstats values */ | |
1656 | if (rte_eth_xstats_get(dev->port_id, rte_xstats, | |
1657 | rte_xstats_len) > 0) { | |
1658 | dev->rte_xstats_ids_size = 0; | |
1659 | xstats_no = 0; | |
1660 | for (uint32_t i = 0; i < rte_xstats_len; i++) { | |
1661 | id = rte_xstats[i].id; | |
1662 | name = netdev_dpdk_get_xstat_name(dev, id); | |
1663 | /* We need to filter out everything except | |
1664 | * dropped, error and management counters */ | |
1665 | if (string_ends_with(name, "_errors") || | |
1666 | strstr(name, "_management_") || | |
1667 | string_ends_with(name, "_dropped")) { | |
1668 | ||
1669 | dev->rte_xstats_ids[xstats_no] = id; | |
1670 | xstats_no++; | |
1671 | } | |
1672 | } | |
1673 | dev->rte_xstats_ids_size = xstats_no; | |
1674 | ret = true; | |
1675 | } else { | |
fa9f4eeb IM |
1676 | VLOG_WARN("Can't get XSTATS IDs for port: " |
1677 | DPDK_PORT_ID_FMT, dev->port_id); | |
971f4b39 | 1678 | } |
34eb0863 IM |
1679 | |
1680 | free(rte_xstats); | |
971f4b39 MW |
1681 | } |
1682 | } | |
1683 | } else { | |
1684 | /* Already configured */ | |
1685 | ret = true; | |
1686 | } | |
1687 | ||
1688 | out: | |
1689 | if (!ret) { | |
1690 | netdev_dpdk_clear_xstats(dev); | |
1691 | } | |
1692 | return ret; | |
1693 | } | |
1694 | ||
8a9562d2 | 1695 | static int |
a14b8947 | 1696 | netdev_dpdk_get_config(const struct netdev *netdev, struct smap *args) |
8a9562d2 | 1697 | { |
a14b8947 | 1698 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
8a9562d2 PS |
1699 | |
1700 | ovs_mutex_lock(&dev->mutex); | |
1701 | ||
050c60bf | 1702 | smap_add_format(args, "requested_rx_queues", "%d", dev->requested_n_rxq); |
a14b8947 | 1703 | smap_add_format(args, "configured_rx_queues", "%d", netdev->n_rxq); |
81acebda IM |
1704 | smap_add_format(args, "requested_tx_queues", "%d", dev->requested_n_txq); |
1705 | smap_add_format(args, "configured_tx_queues", "%d", netdev->n_txq); | |
0072e931 | 1706 | smap_add_format(args, "mtu", "%d", dev->mtu); |
451f26fd IM |
1707 | |
1708 | if (dev->type == DPDK_DEV_ETH) { | |
1709 | smap_add_format(args, "requested_rxq_descriptors", "%d", | |
1710 | dev->requested_rxq_size); | |
1711 | smap_add_format(args, "configured_rxq_descriptors", "%d", | |
1712 | dev->rxq_size); | |
1713 | smap_add_format(args, "requested_txq_descriptors", "%d", | |
1714 | dev->requested_txq_size); | |
1715 | smap_add_format(args, "configured_txq_descriptors", "%d", | |
1716 | dev->txq_size); | |
1a2bb118 SC |
1717 | if (dev->hw_ol_features & NETDEV_RX_CHECKSUM_OFFLOAD) { |
1718 | smap_add(args, "rx_csum_offload", "true"); | |
8155ab7e KT |
1719 | } else { |
1720 | smap_add(args, "rx_csum_offload", "false"); | |
1a2bb118 | 1721 | } |
29cf9c1b FL |
1722 | if (dev->hw_ol_features & NETDEV_TX_TSO_OFFLOAD) { |
1723 | smap_add(args, "tx_tso_offload", "true"); | |
1724 | } else { | |
1725 | smap_add(args, "tx_tso_offload", "false"); | |
1726 | } | |
f8b64a61 RM |
1727 | smap_add(args, "lsc_interrupt_mode", |
1728 | dev->lsc_interrupt_mode ? "true" : "false"); | |
451f26fd | 1729 | } |
8a9562d2 PS |
1730 | ovs_mutex_unlock(&dev->mutex); |
1731 | ||
1732 | return 0; | |
1733 | } | |
1734 | ||
55e075e6 | 1735 | static struct netdev_dpdk * |
bb37956a | 1736 | netdev_dpdk_lookup_by_port_id(dpdk_port_t port_id) |
55e075e6 CL |
1737 | OVS_REQUIRES(dpdk_mutex) |
1738 | { | |
1739 | struct netdev_dpdk *dev; | |
1740 | ||
1741 | LIST_FOR_EACH (dev, list_node, &dpdk_list) { | |
1742 | if (dev->port_id == port_id) { | |
1743 | return dev; | |
1744 | } | |
1745 | } | |
1746 | ||
1747 | return NULL; | |
1748 | } | |
1749 | ||
5e758818 YL |
1750 | static dpdk_port_t |
1751 | netdev_dpdk_get_port_by_mac(const char *mac_str) | |
1752 | { | |
1753 | dpdk_port_t port_id; | |
1754 | struct eth_addr mac, port_mac; | |
1755 | ||
1756 | if (!eth_addr_from_string(mac_str, &mac)) { | |
1757 | VLOG_ERR("invalid mac: %s", mac_str); | |
1758 | return DPDK_ETH_PORT_ID_INVALID; | |
1759 | } | |
1760 | ||
1761 | RTE_ETH_FOREACH_DEV (port_id) { | |
127b6a6e | 1762 | struct rte_ether_addr ea; |
5e758818 YL |
1763 | |
1764 | rte_eth_macaddr_get(port_id, &ea); | |
1765 | memcpy(port_mac.ea, ea.addr_bytes, ETH_ADDR_LEN); | |
1766 | if (eth_addr_equals(mac, port_mac)) { | |
1767 | return port_id; | |
1768 | } | |
1769 | } | |
1770 | ||
1771 | return DPDK_ETH_PORT_ID_INVALID; | |
1772 | } | |
1773 | ||
40e940e4 OM |
1774 | /* Return the first DPDK port id matching the devargs pattern. */ |
1775 | static dpdk_port_t netdev_dpdk_get_port_by_devargs(const char *devargs) | |
1776 | OVS_REQUIRES(dpdk_mutex) | |
1777 | { | |
1778 | dpdk_port_t port_id; | |
1779 | struct rte_dev_iterator iterator; | |
1780 | ||
1781 | RTE_ETH_FOREACH_MATCHING_DEV (port_id, devargs, &iterator) { | |
1782 | /* If a break is done - must call rte_eth_iterator_cleanup. */ | |
1783 | rte_eth_iterator_cleanup(&iterator); | |
1784 | break; | |
1785 | } | |
1786 | ||
1787 | return port_id; | |
1788 | } | |
1789 | ||
5e758818 | 1790 | /* |
40e940e4 OM |
1791 | * Normally, a PCI id (optionally followed by a representor number) |
1792 | * is enough for identifying a specific DPDK port. | |
5e758818 YL |
1793 | * However, for some NICs having multiple ports sharing the same PCI |
1794 | * id, using PCI id won't work then. | |
1795 | * | |
1796 | * To fix that, here one more method is introduced: "class=eth,mac=$MAC". | |
1797 | * | |
1798 | * Note that the compatibility is fully kept: user can still use the | |
1799 | * PCI id for adding ports (when it's enough for them). | |
1800 | */ | |
bb37956a | 1801 | static dpdk_port_t |
5dcde09c IM |
1802 | netdev_dpdk_process_devargs(struct netdev_dpdk *dev, |
1803 | const char *devargs, char **errp) | |
40e940e4 | 1804 | OVS_REQUIRES(dpdk_mutex) |
55e075e6 | 1805 | { |
40e940e4 | 1806 | dpdk_port_t new_port_id; |
55e075e6 | 1807 | |
5e758818 YL |
1808 | if (strncmp(devargs, "class=eth,mac=", 14) == 0) { |
1809 | new_port_id = netdev_dpdk_get_port_by_mac(&devargs[14]); | |
1810 | } else { | |
40e940e4 OM |
1811 | new_port_id = netdev_dpdk_get_port_by_devargs(devargs); |
1812 | if (!rte_eth_dev_is_valid_port(new_port_id)) { | |
5e758818 | 1813 | /* Device not found in DPDK, attempt to attach it */ |
40e940e4 | 1814 | if (rte_dev_probe(devargs)) { |
5e758818 | 1815 | new_port_id = DPDK_ETH_PORT_ID_INVALID; |
40e940e4 OM |
1816 | } else { |
1817 | new_port_id = netdev_dpdk_get_port_by_devargs(devargs); | |
1818 | if (rte_eth_dev_is_valid_port(new_port_id)) { | |
1819 | /* Attach successful */ | |
1820 | dev->attached = true; | |
1821 | VLOG_INFO("Device '%s' attached to DPDK", devargs); | |
1822 | } else { | |
1823 | /* Attach unsuccessful */ | |
1824 | new_port_id = DPDK_ETH_PORT_ID_INVALID; | |
1825 | } | |
5e758818 | 1826 | } |
55e075e6 | 1827 | } |
5e758818 YL |
1828 | } |
1829 | ||
1830 | if (new_port_id == DPDK_ETH_PORT_ID_INVALID) { | |
1831 | VLOG_WARN_BUF(errp, "Error attaching device '%s' to DPDK", devargs); | |
55e075e6 CL |
1832 | } |
1833 | ||
1834 | return new_port_id; | |
1835 | } | |
1836 | ||
988fd463 EC |
1837 | static int |
1838 | dpdk_eth_event_callback(dpdk_port_t port_id, enum rte_eth_event_type type, | |
1839 | void *param OVS_UNUSED, void *ret_param OVS_UNUSED) | |
1840 | { | |
1841 | struct netdev_dpdk *dev; | |
1842 | ||
1843 | switch ((int) type) { | |
1844 | case RTE_ETH_EVENT_INTR_RESET: | |
1845 | ovs_mutex_lock(&dpdk_mutex); | |
1846 | dev = netdev_dpdk_lookup_by_port_id(port_id); | |
1847 | if (dev) { | |
1848 | ovs_mutex_lock(&dev->mutex); | |
1849 | dev->reset_needed = true; | |
1850 | netdev_request_reconfigure(&dev->up); | |
1851 | VLOG_DBG_RL(&rl, "%s: Device reset requested.", | |
1852 | netdev_get_name(&dev->up)); | |
1853 | ovs_mutex_unlock(&dev->mutex); | |
1854 | } | |
1855 | ovs_mutex_unlock(&dpdk_mutex); | |
1856 | break; | |
1857 | ||
1858 | default: | |
1859 | /* Ignore all other types. */ | |
1860 | break; | |
1861 | } | |
1862 | return 0; | |
1863 | } | |
1864 | ||
c3d062a7 CL |
1865 | static void |
1866 | dpdk_set_rxq_config(struct netdev_dpdk *dev, const struct smap *args) | |
b614c894 | 1867 | OVS_REQUIRES(dev->mutex) |
a14b8947 | 1868 | { |
050c60bf | 1869 | int new_n_rxq; |
a14b8947 | 1870 | |
2a21e757 | 1871 | new_n_rxq = MAX(smap_get_int(args, "n_rxq", NR_QUEUE), 1); |
050c60bf DDP |
1872 | if (new_n_rxq != dev->requested_n_rxq) { |
1873 | dev->requested_n_rxq = new_n_rxq; | |
c3d062a7 | 1874 | netdev_request_reconfigure(&dev->up); |
050c60bf | 1875 | } |
c3d062a7 CL |
1876 | } |
1877 | ||
b685696b CL |
1878 | static void |
1879 | dpdk_process_queue_size(struct netdev *netdev, const struct smap *args, | |
1880 | const char *flag, int default_size, int *new_size) | |
1881 | { | |
1882 | int queue_size = smap_get_int(args, flag, default_size); | |
1883 | ||
1884 | if (queue_size <= 0 || queue_size > NIC_PORT_MAX_Q_SIZE | |
1885 | || !is_pow2(queue_size)) { | |
1886 | queue_size = default_size; | |
1887 | } | |
1888 | ||
1889 | if (queue_size != *new_size) { | |
1890 | *new_size = queue_size; | |
1891 | netdev_request_reconfigure(netdev); | |
1892 | } | |
1893 | } | |
1894 | ||
c3d062a7 | 1895 | static int |
9fff138e DDP |
1896 | netdev_dpdk_set_config(struct netdev *netdev, const struct smap *args, |
1897 | char **errp) | |
c3d062a7 CL |
1898 | { |
1899 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
f8b64a61 | 1900 | bool rx_fc_en, tx_fc_en, autoneg, lsc_interrupt_mode; |
c2c84474 | 1901 | bool flow_control_requested = true; |
b614c894 IM |
1902 | enum rte_eth_fc_mode fc_mode; |
1903 | static const enum rte_eth_fc_mode fc_mode_set[2][2] = { | |
1904 | {RTE_FC_NONE, RTE_FC_TX_PAUSE}, | |
1905 | {RTE_FC_RX_PAUSE, RTE_FC_FULL } | |
1906 | }; | |
55e075e6 CL |
1907 | const char *new_devargs; |
1908 | int err = 0; | |
c3d062a7 | 1909 | |
55e075e6 | 1910 | ovs_mutex_lock(&dpdk_mutex); |
c3d062a7 CL |
1911 | ovs_mutex_lock(&dev->mutex); |
1912 | ||
1913 | dpdk_set_rxq_config(dev, args); | |
1914 | ||
b685696b CL |
1915 | dpdk_process_queue_size(netdev, args, "n_rxq_desc", |
1916 | NIC_PORT_DEFAULT_RXQ_SIZE, | |
1917 | &dev->requested_rxq_size); | |
1918 | dpdk_process_queue_size(netdev, args, "n_txq_desc", | |
1919 | NIC_PORT_DEFAULT_TXQ_SIZE, | |
1920 | &dev->requested_txq_size); | |
1921 | ||
55e075e6 CL |
1922 | new_devargs = smap_get(args, "dpdk-devargs"); |
1923 | ||
cefdd80a | 1924 | if (dev->devargs && new_devargs && strcmp(new_devargs, dev->devargs)) { |
55e075e6 CL |
1925 | /* The user requested a new device. If we return error, the caller |
1926 | * will delete this netdev and try to recreate it. */ | |
1927 | err = EAGAIN; | |
1928 | goto out; | |
1929 | } | |
1930 | ||
1931 | /* dpdk-devargs is required for device configuration */ | |
1932 | if (new_devargs && new_devargs[0]) { | |
1933 | /* Don't process dpdk-devargs if value is unchanged and port id | |
1934 | * is valid */ | |
1935 | if (!(dev->devargs && !strcmp(dev->devargs, new_devargs) | |
1936 | && rte_eth_dev_is_valid_port(dev->port_id))) { | |
bb37956a IM |
1937 | dpdk_port_t new_port_id = netdev_dpdk_process_devargs(dev, |
1938 | new_devargs, | |
1939 | errp); | |
55e075e6 CL |
1940 | if (!rte_eth_dev_is_valid_port(new_port_id)) { |
1941 | err = EINVAL; | |
1942 | } else if (new_port_id == dev->port_id) { | |
1943 | /* Already configured, do not reconfigure again */ | |
1944 | err = 0; | |
1945 | } else { | |
1946 | struct netdev_dpdk *dup_dev; | |
bb37956a | 1947 | |
55e075e6 CL |
1948 | dup_dev = netdev_dpdk_lookup_by_port_id(new_port_id); |
1949 | if (dup_dev) { | |
9fff138e | 1950 | VLOG_WARN_BUF(errp, "'%s' is trying to use device '%s' " |
40e940e4 | 1951 | "which is already in use by '%s'", |
9fff138e DDP |
1952 | netdev_get_name(netdev), new_devargs, |
1953 | netdev_get_name(&dup_dev->up)); | |
55e075e6 CL |
1954 | err = EADDRINUSE; |
1955 | } else { | |
bd4e172b | 1956 | int sid = rte_eth_dev_socket_id(new_port_id); |
bb37956a | 1957 | |
bd4e172b | 1958 | dev->requested_socket_id = sid < 0 ? SOCKET0 : sid; |
55e075e6 CL |
1959 | dev->devargs = xstrdup(new_devargs); |
1960 | dev->port_id = new_port_id; | |
1961 | netdev_request_reconfigure(&dev->up); | |
971f4b39 | 1962 | netdev_dpdk_clear_xstats(dev); |
55e075e6 CL |
1963 | err = 0; |
1964 | } | |
1965 | } | |
1966 | } | |
1967 | } else { | |
9fff138e DDP |
1968 | VLOG_WARN_BUF(errp, "'%s' is missing 'options:dpdk-devargs'. " |
1969 | "The old 'dpdk<port_id>' names are not supported", | |
1970 | netdev_get_name(netdev)); | |
55e075e6 CL |
1971 | err = EINVAL; |
1972 | } | |
1973 | ||
1974 | if (err) { | |
1975 | goto out; | |
1976 | } | |
1977 | ||
f8b64a61 RM |
1978 | lsc_interrupt_mode = smap_get_bool(args, "dpdk-lsc-interrupt", false); |
1979 | if (dev->requested_lsc_interrupt_mode != lsc_interrupt_mode) { | |
1980 | dev->requested_lsc_interrupt_mode = lsc_interrupt_mode; | |
1981 | netdev_request_reconfigure(netdev); | |
1982 | } | |
1983 | ||
c3d062a7 CL |
1984 | rx_fc_en = smap_get_bool(args, "rx-flow-ctrl", false); |
1985 | tx_fc_en = smap_get_bool(args, "tx-flow-ctrl", false); | |
b614c894 | 1986 | autoneg = smap_get_bool(args, "flow-ctrl-autoneg", false); |
c3d062a7 | 1987 | |
b614c894 | 1988 | fc_mode = fc_mode_set[tx_fc_en][rx_fc_en]; |
c2c84474 TK |
1989 | |
1990 | if (!smap_get(args, "rx-flow-ctrl") && !smap_get(args, "tx-flow-ctrl") | |
1991 | && !smap_get(args, "flow-ctrl-autoneg")) { | |
1992 | /* FIXME: User didn't ask for flow control configuration. | |
1993 | * For now we'll not print a warning if flow control is not | |
1994 | * supported by the DPDK port. */ | |
1995 | flow_control_requested = false; | |
1996 | } | |
1997 | ||
1998 | /* Get the Flow control configuration. */ | |
1999 | err = -rte_eth_dev_flow_ctrl_get(dev->port_id, &dev->fc_conf); | |
2000 | if (err) { | |
2001 | if (err == ENOTSUP) { | |
2002 | if (flow_control_requested) { | |
2003 | VLOG_WARN("%s: Flow control is not supported.", | |
2004 | netdev_get_name(netdev)); | |
2005 | } | |
2006 | err = 0; /* Not fatal. */ | |
2007 | } else { | |
2008 | VLOG_WARN("%s: Cannot get flow control parameters: %s", | |
2009 | netdev_get_name(netdev), rte_strerror(err)); | |
2010 | } | |
2011 | goto out; | |
2012 | } | |
2013 | ||
b614c894 IM |
2014 | if (dev->fc_conf.mode != fc_mode || autoneg != dev->fc_conf.autoneg) { |
2015 | dev->fc_conf.mode = fc_mode; | |
2016 | dev->fc_conf.autoneg = autoneg; | |
2017 | dpdk_eth_flow_ctrl_setup(dev); | |
2018 | } | |
9fd39370 | 2019 | |
55e075e6 | 2020 | out: |
c3d062a7 | 2021 | ovs_mutex_unlock(&dev->mutex); |
55e075e6 | 2022 | ovs_mutex_unlock(&dpdk_mutex); |
c3d062a7 | 2023 | |
55e075e6 | 2024 | return err; |
c3d062a7 CL |
2025 | } |
2026 | ||
c1ff66ac | 2027 | static int |
2d24d165 | 2028 | netdev_dpdk_vhost_client_set_config(struct netdev *netdev, |
9fff138e DDP |
2029 | const struct smap *args, |
2030 | char **errp OVS_UNUSED) | |
c1ff66ac CL |
2031 | { |
2032 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
2033 | const char *path; | |
080f080c | 2034 | int max_tx_retries, cur_max_tx_retries; |
c1ff66ac | 2035 | |
6881885a | 2036 | ovs_mutex_lock(&dev->mutex); |
c1ff66ac CL |
2037 | if (!(dev->vhost_driver_flags & RTE_VHOST_USER_CLIENT)) { |
2038 | path = smap_get(args, "vhost-server-path"); | |
bb9d2623 IM |
2039 | if (!nullable_string_is_equal(path, dev->vhost_id)) { |
2040 | free(dev->vhost_id); | |
2041 | dev->vhost_id = nullable_xstrdup(path); | |
c1ff66ac CL |
2042 | netdev_request_reconfigure(netdev); |
2043 | } | |
2044 | } | |
080f080c KT |
2045 | |
2046 | max_tx_retries = smap_get_int(args, "tx-retries-max", | |
2047 | VHOST_ENQ_RETRY_DEF); | |
2048 | if (max_tx_retries < VHOST_ENQ_RETRY_MIN | |
2049 | || max_tx_retries > VHOST_ENQ_RETRY_MAX) { | |
2050 | max_tx_retries = VHOST_ENQ_RETRY_DEF; | |
2051 | } | |
2052 | atomic_read_relaxed(&dev->vhost_tx_retries_max, &cur_max_tx_retries); | |
2053 | if (max_tx_retries != cur_max_tx_retries) { | |
2054 | atomic_store_relaxed(&dev->vhost_tx_retries_max, max_tx_retries); | |
2055 | VLOG_INFO("Max Tx retries for vhost device '%s' set to %d", | |
2056 | netdev_get_name(netdev), max_tx_retries); | |
2057 | } | |
6881885a | 2058 | ovs_mutex_unlock(&dev->mutex); |
c1ff66ac CL |
2059 | |
2060 | return 0; | |
2061 | } | |
2062 | ||
7dec44fe | 2063 | static int |
d46285a2 | 2064 | netdev_dpdk_get_numa_id(const struct netdev *netdev) |
7dec44fe | 2065 | { |
d46285a2 | 2066 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
7dec44fe | 2067 | |
d46285a2 | 2068 | return dev->socket_id; |
7dec44fe AW |
2069 | } |
2070 | ||
050c60bf | 2071 | /* Sets the number of tx queues for the dpdk interface. */ |
5496878c | 2072 | static int |
050c60bf | 2073 | netdev_dpdk_set_tx_multiq(struct netdev *netdev, unsigned int n_txq) |
5496878c | 2074 | { |
d46285a2 | 2075 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
5496878c | 2076 | |
d46285a2 | 2077 | ovs_mutex_lock(&dev->mutex); |
91968eb0 | 2078 | |
050c60bf DDP |
2079 | if (dev->requested_n_txq == n_txq) { |
2080 | goto out; | |
4573fbd3 FL |
2081 | } |
2082 | ||
050c60bf DDP |
2083 | dev->requested_n_txq = n_txq; |
2084 | netdev_request_reconfigure(netdev); | |
58397e6c | 2085 | |
050c60bf | 2086 | out: |
d46285a2 | 2087 | ovs_mutex_unlock(&dev->mutex); |
050c60bf | 2088 | return 0; |
58397e6c KT |
2089 | } |
2090 | ||
8a9562d2 PS |
2091 | static struct netdev_rxq * |
2092 | netdev_dpdk_rxq_alloc(void) | |
2093 | { | |
2094 | struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx); | |
2095 | ||
eff23640 DDP |
2096 | if (rx) { |
2097 | return &rx->up; | |
2098 | } | |
2099 | ||
2100 | return NULL; | |
8a9562d2 PS |
2101 | } |
2102 | ||
2103 | static struct netdev_rxq_dpdk * | |
d46285a2 | 2104 | netdev_rxq_dpdk_cast(const struct netdev_rxq *rxq) |
8a9562d2 | 2105 | { |
d46285a2 | 2106 | return CONTAINER_OF(rxq, struct netdev_rxq_dpdk, up); |
8a9562d2 PS |
2107 | } |
2108 | ||
2109 | static int | |
d46285a2 | 2110 | netdev_dpdk_rxq_construct(struct netdev_rxq *rxq) |
8a9562d2 | 2111 | { |
d46285a2 DDP |
2112 | struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq); |
2113 | struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev); | |
8a9562d2 | 2114 | |
d46285a2 DDP |
2115 | ovs_mutex_lock(&dev->mutex); |
2116 | rx->port_id = dev->port_id; | |
2117 | ovs_mutex_unlock(&dev->mutex); | |
8a9562d2 PS |
2118 | |
2119 | return 0; | |
2120 | } | |
2121 | ||
2122 | static void | |
d46285a2 | 2123 | netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq OVS_UNUSED) |
8a9562d2 PS |
2124 | { |
2125 | } | |
2126 | ||
2127 | static void | |
d46285a2 | 2128 | netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq) |
8a9562d2 | 2129 | { |
d46285a2 | 2130 | struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq); |
8a9562d2 PS |
2131 | |
2132 | rte_free(rx); | |
2133 | } | |
2134 | ||
29cf9c1b FL |
2135 | /* Prepare the packet for HWOL. |
2136 | * Return True if the packet is OK to continue. */ | |
2137 | static bool | |
2138 | netdev_dpdk_prep_hwol_packet(struct netdev_dpdk *dev, struct rte_mbuf *mbuf) | |
2139 | { | |
2140 | struct dp_packet *pkt = CONTAINER_OF(mbuf, struct dp_packet, mbuf); | |
2141 | ||
2142 | if (mbuf->ol_flags & PKT_TX_L4_MASK) { | |
2143 | mbuf->l2_len = (char *)dp_packet_l3(pkt) - (char *)dp_packet_eth(pkt); | |
2144 | mbuf->l3_len = (char *)dp_packet_l4(pkt) - (char *)dp_packet_l3(pkt); | |
2145 | mbuf->outer_l2_len = 0; | |
2146 | mbuf->outer_l3_len = 0; | |
2147 | } | |
2148 | ||
2149 | if (mbuf->ol_flags & PKT_TX_TCP_SEG) { | |
2150 | struct tcp_header *th = dp_packet_l4(pkt); | |
2151 | ||
2152 | if (!th) { | |
2153 | VLOG_WARN_RL(&rl, "%s: TCP Segmentation without L4 header" | |
2154 | " pkt len: %"PRIu32"", dev->up.name, mbuf->pkt_len); | |
2155 | return false; | |
2156 | } | |
2157 | ||
2158 | mbuf->l4_len = TCP_OFFSET(th->tcp_ctl) * 4; | |
2159 | mbuf->ol_flags |= PKT_TX_TCP_CKSUM; | |
2160 | mbuf->tso_segsz = dev->mtu - mbuf->l3_len - mbuf->l4_len; | |
2161 | ||
2162 | if (mbuf->ol_flags & PKT_TX_IPV4) { | |
2163 | mbuf->ol_flags |= PKT_TX_IP_CKSUM; | |
2164 | } | |
2165 | } | |
2166 | return true; | |
2167 | } | |
2168 | ||
2169 | /* Prepare a batch for HWOL. | |
2170 | * Return the number of good packets in the batch. */ | |
2171 | static int | |
2172 | netdev_dpdk_prep_hwol_batch(struct netdev_dpdk *dev, struct rte_mbuf **pkts, | |
2173 | int pkt_cnt) | |
2174 | { | |
2175 | int i = 0; | |
2176 | int cnt = 0; | |
2177 | struct rte_mbuf *pkt; | |
2178 | ||
2179 | /* Prepare and filter bad HWOL packets. */ | |
2180 | for (i = 0; i < pkt_cnt; i++) { | |
2181 | pkt = pkts[i]; | |
2182 | if (!netdev_dpdk_prep_hwol_packet(dev, pkt)) { | |
2183 | rte_pktmbuf_free(pkt); | |
2184 | continue; | |
2185 | } | |
2186 | ||
2187 | if (OVS_UNLIKELY(i != cnt)) { | |
2188 | pkts[cnt] = pkt; | |
2189 | } | |
2190 | cnt++; | |
2191 | } | |
2192 | ||
2193 | return cnt; | |
2194 | } | |
2195 | ||
819f13bd DDP |
2196 | /* Tries to transmit 'pkts' to txq 'qid' of device 'dev'. Takes ownership of |
2197 | * 'pkts', even in case of failure. | |
2198 | * | |
2199 | * Returns the number of packets that weren't transmitted. */ | |
2200 | static inline int | |
b59cc14e | 2201 | netdev_dpdk_eth_tx_burst(struct netdev_dpdk *dev, int qid, |
819f13bd | 2202 | struct rte_mbuf **pkts, int cnt) |
8a9562d2 | 2203 | { |
1304f1f8 | 2204 | uint32_t nb_tx = 0; |
29cf9c1b FL |
2205 | uint16_t nb_tx_prep = cnt; |
2206 | ||
2207 | if (userspace_tso_enabled()) { | |
2208 | nb_tx_prep = rte_eth_tx_prepare(dev->port_id, qid, pkts, cnt); | |
2209 | if (nb_tx_prep != cnt) { | |
2210 | VLOG_WARN_RL(&rl, "%s: Output batch contains invalid packets. " | |
2211 | "Only %u/%u are valid: %s", dev->up.name, nb_tx_prep, | |
2212 | cnt, rte_strerror(rte_errno)); | |
2213 | } | |
2214 | } | |
1304f1f8 | 2215 | |
29cf9c1b | 2216 | while (nb_tx != nb_tx_prep) { |
1304f1f8 DDP |
2217 | uint32_t ret; |
2218 | ||
29cf9c1b FL |
2219 | ret = rte_eth_tx_burst(dev->port_id, qid, pkts + nb_tx, |
2220 | nb_tx_prep - nb_tx); | |
1304f1f8 DDP |
2221 | if (!ret) { |
2222 | break; | |
2223 | } | |
2224 | ||
2225 | nb_tx += ret; | |
2226 | } | |
8a9562d2 | 2227 | |
b59cc14e | 2228 | if (OVS_UNLIKELY(nb_tx != cnt)) { |
819f13bd | 2229 | /* Free buffers, which we couldn't transmit, one at a time (each |
db73f716 DDP |
2230 | * packet could come from a different mempool) */ |
2231 | int i; | |
2232 | ||
b59cc14e IM |
2233 | for (i = nb_tx; i < cnt; i++) { |
2234 | rte_pktmbuf_free(pkts[i]); | |
db73f716 | 2235 | } |
8a9562d2 | 2236 | } |
819f13bd DDP |
2237 | |
2238 | return cnt - nb_tx; | |
8a9562d2 PS |
2239 | } |
2240 | ||
f3926f29 | 2241 | static inline bool |
e61bdffc EC |
2242 | netdev_dpdk_srtcm_policer_pkt_handle(struct rte_meter_srtcm *meter, |
2243 | struct rte_meter_srtcm_profile *profile, | |
2244 | struct rte_mbuf *pkt, uint64_t time) | |
f3926f29 | 2245 | { |
127b6a6e | 2246 | uint32_t pkt_len = rte_pktmbuf_pkt_len(pkt) - sizeof(struct rte_ether_hdr); |
f3926f29 | 2247 | |
03f3f9c0 | 2248 | return rte_meter_srtcm_color_blind_check(meter, profile, time, pkt_len) == |
127b6a6e | 2249 | RTE_COLOR_GREEN; |
f3926f29 IS |
2250 | } |
2251 | ||
2252 | static int | |
e61bdffc EC |
2253 | srtcm_policer_run_single_packet(struct rte_meter_srtcm *meter, |
2254 | struct rte_meter_srtcm_profile *profile, | |
2255 | struct rte_mbuf **pkts, int pkt_cnt, | |
2256 | bool should_steal) | |
f3926f29 IS |
2257 | { |
2258 | int i = 0; | |
2259 | int cnt = 0; | |
2260 | struct rte_mbuf *pkt = NULL; | |
2261 | uint64_t current_time = rte_rdtsc(); | |
2262 | ||
2263 | for (i = 0; i < pkt_cnt; i++) { | |
2264 | pkt = pkts[i]; | |
2265 | /* Handle current packet */ | |
e61bdffc EC |
2266 | if (netdev_dpdk_srtcm_policer_pkt_handle(meter, profile, |
2267 | pkt, current_time)) { | |
f3926f29 IS |
2268 | if (cnt != i) { |
2269 | pkts[cnt] = pkt; | |
2270 | } | |
2271 | cnt++; | |
2272 | } else { | |
7d7ded7a | 2273 | if (should_steal) { |
3e90f7d7 GZ |
2274 | rte_pktmbuf_free(pkt); |
2275 | } | |
f3926f29 IS |
2276 | } |
2277 | } | |
2278 | ||
2279 | return cnt; | |
2280 | } | |
2281 | ||
9509913a IS |
2282 | static int |
2283 | ingress_policer_run(struct ingress_policer *policer, struct rte_mbuf **pkts, | |
7d7ded7a | 2284 | int pkt_cnt, bool should_steal) |
9509913a IS |
2285 | { |
2286 | int cnt = 0; | |
2287 | ||
2288 | rte_spinlock_lock(&policer->policer_lock); | |
e61bdffc EC |
2289 | cnt = srtcm_policer_run_single_packet(&policer->in_policer, |
2290 | &policer->in_prof, | |
2291 | pkts, pkt_cnt, should_steal); | |
9509913a IS |
2292 | rte_spinlock_unlock(&policer->policer_lock); |
2293 | ||
2294 | return cnt; | |
2295 | } | |
2296 | ||
58397e6c | 2297 | static bool |
0a0f39df | 2298 | is_vhost_running(struct netdev_dpdk *dev) |
58397e6c | 2299 | { |
0a0f39df | 2300 | return (netdev_dpdk_get_vid(dev) >= 0 && dev->vhost_reconfigured); |
58397e6c KT |
2301 | } |
2302 | ||
d6e3feb5 | 2303 | static inline void |
2304 | netdev_dpdk_vhost_update_rx_size_counters(struct netdev_stats *stats, | |
2305 | unsigned int packet_size) | |
2306 | { | |
2307 | /* Hard-coded search for the size bucket. */ | |
2308 | if (packet_size < 256) { | |
2309 | if (packet_size >= 128) { | |
2310 | stats->rx_128_to_255_packets++; | |
2311 | } else if (packet_size <= 64) { | |
2312 | stats->rx_1_to_64_packets++; | |
2313 | } else { | |
2314 | stats->rx_65_to_127_packets++; | |
2315 | } | |
2316 | } else { | |
2317 | if (packet_size >= 1523) { | |
2318 | stats->rx_1523_to_max_packets++; | |
2319 | } else if (packet_size >= 1024) { | |
2320 | stats->rx_1024_to_1522_packets++; | |
2321 | } else if (packet_size < 512) { | |
2322 | stats->rx_256_to_511_packets++; | |
2323 | } else { | |
2324 | stats->rx_512_to_1023_packets++; | |
2325 | } | |
2326 | } | |
2327 | } | |
2328 | ||
9e3ddd45 | 2329 | static inline void |
2f862c71 | 2330 | netdev_dpdk_vhost_update_rx_counters(struct netdev_dpdk *dev, |
9509913a | 2331 | struct dp_packet **packets, int count, |
2f862c71 | 2332 | int qos_drops) |
9e3ddd45 | 2333 | { |
2f862c71 | 2334 | struct netdev_stats *stats = &dev->stats; |
9e3ddd45 | 2335 | struct dp_packet *packet; |
2f862c71 SV |
2336 | unsigned int packet_size; |
2337 | int i; | |
9e3ddd45 TP |
2338 | |
2339 | stats->rx_packets += count; | |
2f862c71 | 2340 | stats->rx_dropped += qos_drops; |
9e3ddd45 TP |
2341 | for (i = 0; i < count; i++) { |
2342 | packet = packets[i]; | |
d6e3feb5 | 2343 | packet_size = dp_packet_size(packet); |
9e3ddd45 | 2344 | |
d6e3feb5 | 2345 | if (OVS_UNLIKELY(packet_size < ETH_HEADER_LEN)) { |
9e3ddd45 TP |
2346 | /* This only protects the following multicast counting from |
2347 | * too short packets, but it does not stop the packet from | |
2348 | * further processing. */ | |
2349 | stats->rx_errors++; | |
2350 | stats->rx_length_errors++; | |
2351 | continue; | |
2352 | } | |
2353 | ||
d6e3feb5 | 2354 | netdev_dpdk_vhost_update_rx_size_counters(stats, packet_size); |
2355 | ||
9e3ddd45 TP |
2356 | struct eth_header *eh = (struct eth_header *) dp_packet_data(packet); |
2357 | if (OVS_UNLIKELY(eth_addr_is_multicast(eh->eth_dst))) { | |
2358 | stats->multicast++; | |
2359 | } | |
2360 | ||
d6e3feb5 | 2361 | stats->rx_bytes += packet_size; |
9e3ddd45 | 2362 | } |
2f862c71 | 2363 | |
6d77abf4 KT |
2364 | if (OVS_UNLIKELY(qos_drops)) { |
2365 | dev->sw_stats->rx_qos_drops += qos_drops; | |
2366 | } | |
9e3ddd45 TP |
2367 | } |
2368 | ||
58397e6c KT |
2369 | /* |
2370 | * The receive path for the vhost port is the TX path out from guest. | |
2371 | */ | |
2372 | static int | |
d46285a2 | 2373 | netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq, |
8492adc2 | 2374 | struct dp_packet_batch *batch, int *qfill) |
58397e6c | 2375 | { |
d46285a2 | 2376 | struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev); |
9509913a | 2377 | struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev); |
58397e6c | 2378 | uint16_t nb_rx = 0; |
2f862c71 | 2379 | uint16_t qos_drops = 0; |
8492adc2 | 2380 | int qid = rxq->queue_id * VIRTIO_QNUM + VIRTIO_TXQ; |
daf22bf7 | 2381 | int vid = netdev_dpdk_get_vid(dev); |
58397e6c | 2382 | |
daf22bf7 | 2383 | if (OVS_UNLIKELY(vid < 0 || !dev->vhost_reconfigured |
e543851d | 2384 | || !(dev->flags & NETDEV_UP))) { |
58397e6c KT |
2385 | return EAGAIN; |
2386 | } | |
2387 | ||
43307ad0 | 2388 | nb_rx = rte_vhost_dequeue_burst(vid, qid, dev->dpdk_mp->mp, |
64839cf4 | 2389 | (struct rte_mbuf **) batch->packets, |
cd159f1a | 2390 | NETDEV_MAX_BURST); |
58397e6c KT |
2391 | if (!nb_rx) { |
2392 | return EAGAIN; | |
2393 | } | |
2394 | ||
8492adc2 JS |
2395 | if (qfill) { |
2396 | if (nb_rx == NETDEV_MAX_BURST) { | |
2397 | /* The DPDK API returns a uint32_t which often has invalid bits in | |
2398 | * the upper 16-bits. Need to restrict the value to uint16_t. */ | |
2399 | *qfill = rte_vhost_rx_queue_count(vid, qid) & UINT16_MAX; | |
2400 | } else { | |
2401 | *qfill = 0; | |
2402 | } | |
2403 | } | |
2404 | ||
9509913a | 2405 | if (policer) { |
2f862c71 | 2406 | qos_drops = nb_rx; |
64839cf4 WT |
2407 | nb_rx = ingress_policer_run(policer, |
2408 | (struct rte_mbuf **) batch->packets, | |
3e90f7d7 | 2409 | nb_rx, true); |
2f862c71 | 2410 | qos_drops -= nb_rx; |
9509913a IS |
2411 | } |
2412 | ||
d46285a2 | 2413 | rte_spinlock_lock(&dev->stats_lock); |
2f862c71 SV |
2414 | netdev_dpdk_vhost_update_rx_counters(dev, batch->packets, |
2415 | nb_rx, qos_drops); | |
d46285a2 | 2416 | rte_spinlock_unlock(&dev->stats_lock); |
45d947c4 | 2417 | |
75fb9148 ZB |
2418 | batch->count = nb_rx; |
2419 | dp_packet_batch_init_packet_fields(batch); | |
2420 | ||
58397e6c KT |
2421 | return 0; |
2422 | } | |
2423 | ||
35c91567 DM |
2424 | static bool |
2425 | netdev_dpdk_vhost_rxq_enabled(struct netdev_rxq *rxq) | |
2426 | { | |
2427 | struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev); | |
2428 | ||
2429 | return dev->vhost_rxq_enabled[rxq->queue_id]; | |
2430 | } | |
2431 | ||
8a9562d2 | 2432 | static int |
8492adc2 JS |
2433 | netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet_batch *batch, |
2434 | int *qfill) | |
8a9562d2 | 2435 | { |
d46285a2 DDP |
2436 | struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq); |
2437 | struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev); | |
9509913a | 2438 | struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev); |
8a9562d2 | 2439 | int nb_rx; |
9509913a | 2440 | int dropped = 0; |
8a9562d2 | 2441 | |
3b1fb077 DDP |
2442 | if (OVS_UNLIKELY(!(dev->flags & NETDEV_UP))) { |
2443 | return EAGAIN; | |
2444 | } | |
2445 | ||
d46285a2 | 2446 | nb_rx = rte_eth_rx_burst(rx->port_id, rxq->queue_id, |
64839cf4 | 2447 | (struct rte_mbuf **) batch->packets, |
cd159f1a | 2448 | NETDEV_MAX_BURST); |
8a9562d2 PS |
2449 | if (!nb_rx) { |
2450 | return EAGAIN; | |
2451 | } | |
2452 | ||
9509913a IS |
2453 | if (policer) { |
2454 | dropped = nb_rx; | |
64839cf4 | 2455 | nb_rx = ingress_policer_run(policer, |
58be5c0e | 2456 | (struct rte_mbuf **) batch->packets, |
3e90f7d7 | 2457 | nb_rx, true); |
9509913a IS |
2458 | dropped -= nb_rx; |
2459 | } | |
2460 | ||
2461 | /* Update stats to reflect dropped packets */ | |
2462 | if (OVS_UNLIKELY(dropped)) { | |
2463 | rte_spinlock_lock(&dev->stats_lock); | |
2464 | dev->stats.rx_dropped += dropped; | |
2f862c71 | 2465 | dev->sw_stats->rx_qos_drops += dropped; |
9509913a IS |
2466 | rte_spinlock_unlock(&dev->stats_lock); |
2467 | } | |
2468 | ||
64839cf4 | 2469 | batch->count = nb_rx; |
75fb9148 | 2470 | dp_packet_batch_init_packet_fields(batch); |
8a9562d2 | 2471 | |
8492adc2 JS |
2472 | if (qfill) { |
2473 | if (nb_rx == NETDEV_MAX_BURST) { | |
2474 | *qfill = rte_eth_rx_queue_count(rx->port_id, rxq->queue_id); | |
2475 | } else { | |
2476 | *qfill = 0; | |
2477 | } | |
2478 | } | |
2479 | ||
8a9562d2 PS |
2480 | return 0; |
2481 | } | |
2482 | ||
0bf765f7 | 2483 | static inline int |
78bd47cf | 2484 | netdev_dpdk_qos_run(struct netdev_dpdk *dev, struct rte_mbuf **pkts, |
7d7ded7a | 2485 | int cnt, bool should_steal) |
0bf765f7 | 2486 | { |
78bd47cf | 2487 | struct qos_conf *qos_conf = ovsrcu_get(struct qos_conf *, &dev->qos_conf); |
0bf765f7 | 2488 | |
78bd47cf DDP |
2489 | if (qos_conf) { |
2490 | rte_spinlock_lock(&qos_conf->lock); | |
7d7ded7a | 2491 | cnt = qos_conf->ops->qos_run(qos_conf, pkts, cnt, should_steal); |
78bd47cf | 2492 | rte_spinlock_unlock(&qos_conf->lock); |
0bf765f7 IS |
2493 | } |
2494 | ||
2495 | return cnt; | |
2496 | } | |
2497 | ||
c6ec9d17 IM |
2498 | static int |
2499 | netdev_dpdk_filter_packet_len(struct netdev_dpdk *dev, struct rte_mbuf **pkts, | |
2500 | int pkt_cnt) | |
2501 | { | |
2502 | int i = 0; | |
2503 | int cnt = 0; | |
2504 | struct rte_mbuf *pkt; | |
2505 | ||
29cf9c1b | 2506 | /* Filter oversized packets, unless are marked for TSO. */ |
c6ec9d17 IM |
2507 | for (i = 0; i < pkt_cnt; i++) { |
2508 | pkt = pkts[i]; | |
29cf9c1b FL |
2509 | if (OVS_UNLIKELY((pkt->pkt_len > dev->max_packet_len) |
2510 | && !(pkt->ol_flags & PKT_TX_TCP_SEG))) { | |
2511 | VLOG_WARN_RL(&rl, "%s: Too big size %" PRIu32 " " | |
2512 | "max_packet_len %d", dev->up.name, pkt->pkt_len, | |
2513 | dev->max_packet_len); | |
c6ec9d17 IM |
2514 | rte_pktmbuf_free(pkt); |
2515 | continue; | |
2516 | } | |
2517 | ||
2518 | if (OVS_UNLIKELY(i != cnt)) { | |
2519 | pkts[cnt] = pkt; | |
2520 | } | |
2521 | cnt++; | |
2522 | } | |
2523 | ||
2524 | return cnt; | |
2525 | } | |
2526 | ||
9e3ddd45 | 2527 | static inline void |
2f862c71 | 2528 | netdev_dpdk_vhost_update_tx_counters(struct netdev_dpdk *dev, |
9e3ddd45 TP |
2529 | struct dp_packet **packets, |
2530 | int attempted, | |
2f862c71 | 2531 | struct netdev_dpdk_sw_stats *sw_stats_add) |
9e3ddd45 | 2532 | { |
2f862c71 SV |
2533 | int dropped = sw_stats_add->tx_mtu_exceeded_drops + |
2534 | sw_stats_add->tx_qos_drops + | |
29cf9c1b FL |
2535 | sw_stats_add->tx_failure_drops + |
2536 | sw_stats_add->tx_invalid_hwol_drops; | |
2f862c71 | 2537 | struct netdev_stats *stats = &dev->stats; |
9e3ddd45 | 2538 | int sent = attempted - dropped; |
2f862c71 | 2539 | int i; |
9e3ddd45 TP |
2540 | |
2541 | stats->tx_packets += sent; | |
2542 | stats->tx_dropped += dropped; | |
2543 | ||
2544 | for (i = 0; i < sent; i++) { | |
2545 | stats->tx_bytes += dp_packet_size(packets[i]); | |
2546 | } | |
2f862c71 | 2547 | |
6d77abf4 KT |
2548 | if (OVS_UNLIKELY(dropped || sw_stats_add->tx_retries)) { |
2549 | struct netdev_dpdk_sw_stats *sw_stats = dev->sw_stats; | |
2550 | ||
2551 | sw_stats->tx_retries += sw_stats_add->tx_retries; | |
2552 | sw_stats->tx_failure_drops += sw_stats_add->tx_failure_drops; | |
2553 | sw_stats->tx_mtu_exceeded_drops += sw_stats_add->tx_mtu_exceeded_drops; | |
2554 | sw_stats->tx_qos_drops += sw_stats_add->tx_qos_drops; | |
29cf9c1b | 2555 | sw_stats->tx_invalid_hwol_drops += sw_stats_add->tx_invalid_hwol_drops; |
6d77abf4 | 2556 | } |
9e3ddd45 TP |
2557 | } |
2558 | ||
58397e6c | 2559 | static void |
4573fbd3 | 2560 | __netdev_dpdk_vhost_send(struct netdev *netdev, int qid, |
dd52de45 | 2561 | struct dp_packet **pkts, int cnt) |
58397e6c | 2562 | { |
d46285a2 | 2563 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
95e9881f | 2564 | struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts; |
2f862c71 SV |
2565 | struct netdev_dpdk_sw_stats sw_stats_add; |
2566 | unsigned int n_packets_to_free = cnt; | |
2567 | unsigned int total_packets = cnt; | |
dd52de45 | 2568 | int i, retries = 0; |
080f080c | 2569 | int max_retries = VHOST_ENQ_RETRY_MIN; |
daf22bf7 | 2570 | int vid = netdev_dpdk_get_vid(dev); |
58397e6c | 2571 | |
81acebda | 2572 | qid = dev->tx_q[qid % netdev->n_txq].map; |
585a5bea | 2573 | |
daf22bf7 | 2574 | if (OVS_UNLIKELY(vid < 0 || !dev->vhost_reconfigured || qid < 0 |
e543851d | 2575 | || !(dev->flags & NETDEV_UP))) { |
d46285a2 DDP |
2576 | rte_spinlock_lock(&dev->stats_lock); |
2577 | dev->stats.tx_dropped+= cnt; | |
2578 | rte_spinlock_unlock(&dev->stats_lock); | |
1b99bb05 | 2579 | goto out; |
58397e6c KT |
2580 | } |
2581 | ||
9ff24b9c DM |
2582 | if (OVS_UNLIKELY(!rte_spinlock_trylock(&dev->tx_q[qid].tx_lock))) { |
2583 | COVERAGE_INC(vhost_tx_contention); | |
2584 | rte_spinlock_lock(&dev->tx_q[qid].tx_lock); | |
2585 | } | |
58397e6c | 2586 | |
29cf9c1b FL |
2587 | sw_stats_add.tx_invalid_hwol_drops = cnt; |
2588 | if (userspace_tso_enabled()) { | |
2589 | cnt = netdev_dpdk_prep_hwol_batch(dev, cur_pkts, cnt); | |
2590 | } | |
2591 | ||
2592 | sw_stats_add.tx_invalid_hwol_drops -= cnt; | |
2593 | sw_stats_add.tx_mtu_exceeded_drops = cnt; | |
c6ec9d17 | 2594 | cnt = netdev_dpdk_filter_packet_len(dev, cur_pkts, cnt); |
29cf9c1b | 2595 | sw_stats_add.tx_mtu_exceeded_drops -= cnt; |
2f862c71 | 2596 | |
0bf765f7 | 2597 | /* Check has QoS has been configured for the netdev */ |
2f862c71 | 2598 | sw_stats_add.tx_qos_drops = cnt; |
3e90f7d7 | 2599 | cnt = netdev_dpdk_qos_run(dev, cur_pkts, cnt, true); |
2f862c71 SV |
2600 | sw_stats_add.tx_qos_drops -= cnt; |
2601 | ||
2602 | n_packets_to_free = cnt; | |
0bf765f7 | 2603 | |
95e9881f | 2604 | do { |
4573fbd3 | 2605 | int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ; |
95e9881f KT |
2606 | unsigned int tx_pkts; |
2607 | ||
daf22bf7 | 2608 | tx_pkts = rte_vhost_enqueue_burst(vid, vhost_qid, cur_pkts, cnt); |
95e9881f KT |
2609 | if (OVS_LIKELY(tx_pkts)) { |
2610 | /* Packets have been sent.*/ | |
2611 | cnt -= tx_pkts; | |
31871ee3 | 2612 | /* Prepare for possible retry.*/ |
95e9881f | 2613 | cur_pkts = &cur_pkts[tx_pkts]; |
080f080c KT |
2614 | if (OVS_UNLIKELY(cnt && !retries)) { |
2615 | /* | |
2616 | * Read max retries as there are packets not sent | |
2617 | * and no retries have already occurred. | |
2618 | */ | |
2619 | atomic_read_relaxed(&dev->vhost_tx_retries_max, &max_retries); | |
2620 | } | |
95e9881f | 2621 | } else { |
31871ee3 KT |
2622 | /* No packets sent - do not retry.*/ |
2623 | break; | |
95e9881f | 2624 | } |
080f080c | 2625 | } while (cnt && (retries++ < max_retries)); |
4573fbd3 | 2626 | |
d46285a2 | 2627 | rte_spinlock_unlock(&dev->tx_q[qid].tx_lock); |
95e9881f | 2628 | |
2f862c71 SV |
2629 | sw_stats_add.tx_failure_drops = cnt; |
2630 | sw_stats_add.tx_retries = MIN(retries, max_retries); | |
2631 | ||
d46285a2 | 2632 | rte_spinlock_lock(&dev->stats_lock); |
2f862c71 SV |
2633 | netdev_dpdk_vhost_update_tx_counters(dev, pkts, total_packets, |
2634 | &sw_stats_add); | |
d46285a2 | 2635 | rte_spinlock_unlock(&dev->stats_lock); |
58397e6c KT |
2636 | |
2637 | out: | |
2f862c71 | 2638 | for (i = 0; i < n_packets_to_free; i++) { |
dd52de45 | 2639 | dp_packet_delete(pkts[i]); |
58397e6c KT |
2640 | } |
2641 | } | |
2642 | ||
29cf9c1b FL |
2643 | static void |
2644 | netdev_dpdk_extbuf_free(void *addr OVS_UNUSED, void *opaque) | |
2645 | { | |
2646 | rte_free(opaque); | |
2647 | } | |
2648 | ||
2649 | static struct rte_mbuf * | |
2650 | dpdk_pktmbuf_attach_extbuf(struct rte_mbuf *pkt, uint32_t data_len) | |
2651 | { | |
2652 | uint32_t total_len = RTE_PKTMBUF_HEADROOM + data_len; | |
2653 | struct rte_mbuf_ext_shared_info *shinfo = NULL; | |
2654 | uint16_t buf_len; | |
2655 | void *buf; | |
2656 | ||
2657 | if (rte_pktmbuf_tailroom(pkt) >= sizeof *shinfo) { | |
2658 | shinfo = rte_pktmbuf_mtod(pkt, struct rte_mbuf_ext_shared_info *); | |
2659 | } else { | |
2660 | total_len += sizeof *shinfo + sizeof(uintptr_t); | |
2661 | total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t)); | |
2662 | } | |
2663 | ||
2664 | if (OVS_UNLIKELY(total_len > UINT16_MAX)) { | |
2665 | VLOG_ERR("Can't copy packet: too big %u", total_len); | |
2666 | return NULL; | |
2667 | } | |
2668 | ||
2669 | buf_len = total_len; | |
2670 | buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE); | |
2671 | if (OVS_UNLIKELY(buf == NULL)) { | |
2672 | VLOG_ERR("Failed to allocate memory using rte_malloc: %u", buf_len); | |
2673 | return NULL; | |
2674 | } | |
2675 | ||
2676 | /* Initialize shinfo. */ | |
2677 | if (shinfo) { | |
2678 | shinfo->free_cb = netdev_dpdk_extbuf_free; | |
2679 | shinfo->fcb_opaque = buf; | |
2680 | rte_mbuf_ext_refcnt_set(shinfo, 1); | |
2681 | } else { | |
2682 | shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len, | |
2683 | netdev_dpdk_extbuf_free, | |
2684 | buf); | |
2685 | if (OVS_UNLIKELY(shinfo == NULL)) { | |
2686 | rte_free(buf); | |
2687 | VLOG_ERR("Failed to initialize shared info for mbuf while " | |
2688 | "attempting to attach an external buffer."); | |
2689 | return NULL; | |
2690 | } | |
2691 | } | |
2692 | ||
2693 | rte_pktmbuf_attach_extbuf(pkt, buf, rte_malloc_virt2iova(buf), buf_len, | |
2694 | shinfo); | |
2695 | rte_pktmbuf_reset_headroom(pkt); | |
2696 | ||
2697 | return pkt; | |
2698 | } | |
2699 | ||
2700 | static struct rte_mbuf * | |
2701 | dpdk_pktmbuf_alloc(struct rte_mempool *mp, uint32_t data_len) | |
2702 | { | |
2703 | struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp); | |
2704 | ||
2705 | if (OVS_UNLIKELY(!pkt)) { | |
2706 | return NULL; | |
2707 | } | |
2708 | ||
2709 | if (rte_pktmbuf_tailroom(pkt) >= data_len) { | |
2710 | return pkt; | |
2711 | } | |
2712 | ||
2713 | if (dpdk_pktmbuf_attach_extbuf(pkt, data_len)) { | |
2714 | return pkt; | |
2715 | } | |
2716 | ||
2717 | rte_pktmbuf_free(pkt); | |
2718 | ||
2719 | return NULL; | |
2720 | } | |
2721 | ||
2722 | static struct dp_packet * | |
2723 | dpdk_copy_dp_packet_to_mbuf(struct rte_mempool *mp, struct dp_packet *pkt_orig) | |
2724 | { | |
2725 | struct rte_mbuf *mbuf_dest; | |
2726 | struct dp_packet *pkt_dest; | |
2727 | uint32_t pkt_len; | |
2728 | ||
2729 | pkt_len = dp_packet_size(pkt_orig); | |
2730 | mbuf_dest = dpdk_pktmbuf_alloc(mp, pkt_len); | |
2731 | if (OVS_UNLIKELY(mbuf_dest == NULL)) { | |
2732 | return NULL; | |
2733 | } | |
2734 | ||
2735 | pkt_dest = CONTAINER_OF(mbuf_dest, struct dp_packet, mbuf); | |
2736 | memcpy(dp_packet_data(pkt_dest), dp_packet_data(pkt_orig), pkt_len); | |
2737 | dp_packet_set_size(pkt_dest, pkt_len); | |
2738 | ||
2739 | mbuf_dest->tx_offload = pkt_orig->mbuf.tx_offload; | |
2740 | mbuf_dest->packet_type = pkt_orig->mbuf.packet_type; | |
2741 | mbuf_dest->ol_flags |= (pkt_orig->mbuf.ol_flags & | |
2742 | ~(EXT_ATTACHED_MBUF | IND_ATTACHED_MBUF)); | |
2743 | ||
2744 | memcpy(&pkt_dest->l2_pad_size, &pkt_orig->l2_pad_size, | |
2745 | sizeof(struct dp_packet) - offsetof(struct dp_packet, l2_pad_size)); | |
2746 | ||
2747 | if (mbuf_dest->ol_flags & PKT_TX_L4_MASK) { | |
2748 | mbuf_dest->l2_len = (char *)dp_packet_l3(pkt_dest) | |
2749 | - (char *)dp_packet_eth(pkt_dest); | |
2750 | mbuf_dest->l3_len = (char *)dp_packet_l4(pkt_dest) | |
2751 | - (char *) dp_packet_l3(pkt_dest); | |
2752 | } | |
2753 | ||
2754 | return pkt_dest; | |
2755 | } | |
2756 | ||
8a9562d2 PS |
2757 | /* Tx function. Transmit packets indefinitely */ |
2758 | static void | |
64839cf4 | 2759 | dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet_batch *batch) |
db73f716 | 2760 | OVS_NO_THREAD_SAFETY_ANALYSIS |
8a9562d2 | 2761 | { |
8a14bd7b | 2762 | const size_t batch_cnt = dp_packet_batch_size(batch); |
bce01e3a | 2763 | #if !defined(__CHECKER__) && !defined(_WIN32) |
8a14bd7b | 2764 | const size_t PKT_ARRAY_SIZE = batch_cnt; |
bce01e3a EJ |
2765 | #else |
2766 | /* Sparse or MSVC doesn't like variable length array. */ | |
cd159f1a | 2767 | enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST }; |
bce01e3a | 2768 | #endif |
8a9562d2 | 2769 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
29cf9c1b | 2770 | struct dp_packet *pkts[PKT_ARRAY_SIZE]; |
2f862c71 | 2771 | struct netdev_dpdk_sw_stats *sw_stats = dev->sw_stats; |
8a14bd7b | 2772 | uint32_t cnt = batch_cnt; |
3e90f7d7 | 2773 | uint32_t dropped = 0; |
2f862c71 SV |
2774 | uint32_t tx_failure = 0; |
2775 | uint32_t mtu_drops = 0; | |
2776 | uint32_t qos_drops = 0; | |
3e90f7d7 GZ |
2777 | |
2778 | if (dev->type != DPDK_DEV_VHOST) { | |
2779 | /* Check if QoS has been configured for this netdev. */ | |
2780 | cnt = netdev_dpdk_qos_run(dev, (struct rte_mbuf **) batch->packets, | |
8a14bd7b | 2781 | batch_cnt, false); |
2f862c71 | 2782 | qos_drops = batch_cnt - cnt; |
3e90f7d7 | 2783 | } |
8a9562d2 | 2784 | |
3e90f7d7 GZ |
2785 | uint32_t txcnt = 0; |
2786 | ||
2787 | for (uint32_t i = 0; i < cnt; i++) { | |
8a14bd7b BB |
2788 | struct dp_packet *packet = batch->packets[i]; |
2789 | uint32_t size = dp_packet_size(packet); | |
95fb793a | 2790 | |
29cf9c1b FL |
2791 | if (size > dev->max_packet_len |
2792 | && !(packet->mbuf.ol_flags & PKT_TX_TCP_SEG)) { | |
2793 | VLOG_WARN_RL(&rl, "Too big size %u max_packet_len %d", size, | |
2794 | dev->max_packet_len); | |
2f862c71 | 2795 | mtu_drops++; |
f4fd623c DDP |
2796 | continue; |
2797 | } | |
8a9562d2 | 2798 | |
29cf9c1b | 2799 | pkts[txcnt] = dpdk_copy_dp_packet_to_mbuf(dev->dpdk_mp->mp, packet); |
8a14bd7b | 2800 | if (OVS_UNLIKELY(!pkts[txcnt])) { |
2f862c71 | 2801 | dropped = cnt - i; |
175cf4de | 2802 | break; |
f4fd623c DDP |
2803 | } |
2804 | ||
3e90f7d7 | 2805 | txcnt++; |
f4fd623c | 2806 | } |
8a9562d2 | 2807 | |
3e90f7d7 GZ |
2808 | if (OVS_LIKELY(txcnt)) { |
2809 | if (dev->type == DPDK_DEV_VHOST) { | |
29cf9c1b | 2810 | __netdev_dpdk_vhost_send(netdev, qid, pkts, txcnt); |
3e90f7d7 | 2811 | } else { |
29cf9c1b FL |
2812 | tx_failure += netdev_dpdk_eth_tx_burst(dev, qid, |
2813 | (struct rte_mbuf **)pkts, | |
2814 | txcnt); | |
3e90f7d7 | 2815 | } |
58397e6c | 2816 | } |
db73f716 | 2817 | |
2f862c71 | 2818 | dropped += qos_drops + mtu_drops + tx_failure; |
0bf765f7 IS |
2819 | if (OVS_UNLIKELY(dropped)) { |
2820 | rte_spinlock_lock(&dev->stats_lock); | |
2821 | dev->stats.tx_dropped += dropped; | |
2f862c71 SV |
2822 | sw_stats->tx_failure_drops += tx_failure; |
2823 | sw_stats->tx_mtu_exceeded_drops += mtu_drops; | |
2824 | sw_stats->tx_qos_drops += qos_drops; | |
0bf765f7 IS |
2825 | rte_spinlock_unlock(&dev->stats_lock); |
2826 | } | |
8a9562d2 PS |
2827 | } |
2828 | ||
58397e6c | 2829 | static int |
64839cf4 WT |
2830 | netdev_dpdk_vhost_send(struct netdev *netdev, int qid, |
2831 | struct dp_packet_batch *batch, | |
b30896c9 | 2832 | bool concurrent_txq OVS_UNUSED) |
58397e6c | 2833 | { |
58397e6c | 2834 | |
b30896c9 | 2835 | if (OVS_UNLIKELY(batch->packets[0]->source != DPBUF_DPDK)) { |
64839cf4 | 2836 | dpdk_do_tx_copy(netdev, qid, batch); |
b30896c9 | 2837 | dp_packet_delete_batch(batch, true); |
58397e6c | 2838 | } else { |
940ac2ce PC |
2839 | __netdev_dpdk_vhost_send(netdev, qid, batch->packets, |
2840 | dp_packet_batch_size(batch)); | |
58397e6c KT |
2841 | } |
2842 | return 0; | |
2843 | } | |
2844 | ||
7251515e DV |
2845 | static inline void |
2846 | netdev_dpdk_send__(struct netdev_dpdk *dev, int qid, | |
b30896c9 | 2847 | struct dp_packet_batch *batch, |
324c8374 | 2848 | bool concurrent_txq) |
8a9562d2 | 2849 | { |
3b1fb077 | 2850 | if (OVS_UNLIKELY(!(dev->flags & NETDEV_UP))) { |
b30896c9 | 2851 | dp_packet_delete_batch(batch, true); |
3b1fb077 DDP |
2852 | return; |
2853 | } | |
2854 | ||
324c8374 | 2855 | if (OVS_UNLIKELY(concurrent_txq)) { |
81acebda | 2856 | qid = qid % dev->up.n_txq; |
a0cb2d66 DDP |
2857 | rte_spinlock_lock(&dev->tx_q[qid].tx_lock); |
2858 | } | |
2859 | ||
b30896c9 | 2860 | if (OVS_UNLIKELY(batch->packets[0]->source != DPBUF_DPDK)) { |
7251515e DV |
2861 | struct netdev *netdev = &dev->up; |
2862 | ||
64839cf4 | 2863 | dpdk_do_tx_copy(netdev, qid, batch); |
b30896c9 | 2864 | dp_packet_delete_batch(batch, true); |
8a9562d2 | 2865 | } else { |
2f862c71 | 2866 | struct netdev_dpdk_sw_stats *sw_stats = dev->sw_stats; |
29cf9c1b FL |
2867 | int dropped; |
2868 | int tx_failure, mtu_drops, qos_drops, hwol_drops; | |
fd57eeba | 2869 | int batch_cnt = dp_packet_batch_size(batch); |
2391135c | 2870 | struct rte_mbuf **pkts = (struct rte_mbuf **) batch->packets; |
8a9562d2 | 2871 | |
29cf9c1b FL |
2872 | hwol_drops = batch_cnt; |
2873 | if (userspace_tso_enabled()) { | |
2874 | batch_cnt = netdev_dpdk_prep_hwol_batch(dev, pkts, batch_cnt); | |
2875 | } | |
2876 | hwol_drops -= batch_cnt; | |
2877 | mtu_drops = batch_cnt; | |
2878 | batch_cnt = netdev_dpdk_filter_packet_len(dev, pkts, batch_cnt); | |
2879 | mtu_drops -= batch_cnt; | |
2880 | qos_drops = batch_cnt; | |
2881 | batch_cnt = netdev_dpdk_qos_run(dev, pkts, batch_cnt, true); | |
2882 | qos_drops -= batch_cnt; | |
1b99bb05 | 2883 | |
29cf9c1b | 2884 | tx_failure = netdev_dpdk_eth_tx_burst(dev, qid, pkts, batch_cnt); |
8a9562d2 | 2885 | |
29cf9c1b | 2886 | dropped = tx_failure + mtu_drops + qos_drops + hwol_drops; |
f4fd623c | 2887 | if (OVS_UNLIKELY(dropped)) { |
45d947c4 | 2888 | rte_spinlock_lock(&dev->stats_lock); |
f4fd623c | 2889 | dev->stats.tx_dropped += dropped; |
2f862c71 SV |
2890 | sw_stats->tx_failure_drops += tx_failure; |
2891 | sw_stats->tx_mtu_exceeded_drops += mtu_drops; | |
2892 | sw_stats->tx_qos_drops += qos_drops; | |
29cf9c1b | 2893 | sw_stats->tx_invalid_hwol_drops += hwol_drops; |
45d947c4 | 2894 | rte_spinlock_unlock(&dev->stats_lock); |
f4fd623c | 2895 | } |
8a9562d2 | 2896 | } |
a0cb2d66 | 2897 | |
324c8374 | 2898 | if (OVS_UNLIKELY(concurrent_txq)) { |
a0cb2d66 DDP |
2899 | rte_spinlock_unlock(&dev->tx_q[qid].tx_lock); |
2900 | } | |
7251515e DV |
2901 | } |
2902 | ||
2903 | static int | |
2904 | netdev_dpdk_eth_send(struct netdev *netdev, int qid, | |
b30896c9 | 2905 | struct dp_packet_batch *batch, bool concurrent_txq) |
7251515e DV |
2906 | { |
2907 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
8a9562d2 | 2908 | |
b30896c9 | 2909 | netdev_dpdk_send__(dev, qid, batch, concurrent_txq); |
7251515e | 2910 | return 0; |
8a9562d2 PS |
2911 | } |
2912 | ||
f9b0107d IM |
2913 | static int |
2914 | netdev_dpdk_set_etheraddr__(struct netdev_dpdk *dev, const struct eth_addr mac) | |
2915 | OVS_REQUIRES(dev->mutex) | |
2916 | { | |
2917 | int err = 0; | |
2918 | ||
2919 | if (dev->type == DPDK_DEV_ETH) { | |
2920 | struct rte_ether_addr ea; | |
2921 | ||
2922 | memcpy(ea.addr_bytes, mac.ea, ETH_ADDR_LEN); | |
2923 | err = -rte_eth_dev_default_mac_addr_set(dev->port_id, &ea); | |
2924 | } | |
2925 | if (!err) { | |
2926 | dev->hwaddr = mac; | |
2927 | } else { | |
2928 | VLOG_WARN("%s: Failed to set requested mac("ETH_ADDR_FMT"): %s", | |
2929 | netdev_get_name(&dev->up), ETH_ADDR_ARGS(mac), | |
2930 | rte_strerror(err)); | |
2931 | } | |
2932 | ||
2933 | return err; | |
2934 | } | |
2935 | ||
8a9562d2 | 2936 | static int |
74ff3298 | 2937 | netdev_dpdk_set_etheraddr(struct netdev *netdev, const struct eth_addr mac) |
8a9562d2 PS |
2938 | { |
2939 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
f9b0107d | 2940 | int err = 0; |
8a9562d2 PS |
2941 | |
2942 | ovs_mutex_lock(&dev->mutex); | |
2943 | if (!eth_addr_equals(dev->hwaddr, mac)) { | |
f9b0107d IM |
2944 | err = netdev_dpdk_set_etheraddr__(dev, mac); |
2945 | if (!err) { | |
2946 | netdev_change_seq_changed(netdev); | |
2947 | } | |
8a9562d2 PS |
2948 | } |
2949 | ovs_mutex_unlock(&dev->mutex); | |
2950 | ||
f9b0107d | 2951 | return err; |
8a9562d2 PS |
2952 | } |
2953 | ||
2954 | static int | |
74ff3298 | 2955 | netdev_dpdk_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac) |
8a9562d2 PS |
2956 | { |
2957 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
2958 | ||
2959 | ovs_mutex_lock(&dev->mutex); | |
74ff3298 | 2960 | *mac = dev->hwaddr; |
8a9562d2 PS |
2961 | ovs_mutex_unlock(&dev->mutex); |
2962 | ||
2963 | return 0; | |
2964 | } | |
2965 | ||
2966 | static int | |
2967 | netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup) | |
2968 | { | |
2969 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
2970 | ||
2971 | ovs_mutex_lock(&dev->mutex); | |
2972 | *mtup = dev->mtu; | |
2973 | ovs_mutex_unlock(&dev->mutex); | |
2974 | ||
2975 | return 0; | |
2976 | } | |
2977 | ||
0072e931 MK |
2978 | static int |
2979 | netdev_dpdk_set_mtu(struct netdev *netdev, int mtu) | |
2980 | { | |
2981 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
2982 | ||
f6f50552 IS |
2983 | /* XXX: Ensure that the overall frame length of the requested MTU does not |
2984 | * surpass the NETDEV_DPDK_MAX_PKT_LEN. DPDK device drivers differ in how | |
2985 | * the L2 frame length is calculated for a given MTU when | |
2986 | * rte_eth_dev_set_mtu(mtu) is called e.g. i40e driver includes 2 x vlan | |
2987 | * headers, the em driver includes 1 x vlan header, the ixgbe driver does | |
2988 | * not include vlan headers. As such we should use | |
2989 | * MTU_TO_MAX_FRAME_LEN(mtu) which includes an additional 2 x vlan headers | |
2990 | * (8 bytes) for comparison. This avoids a failure later with | |
2991 | * rte_eth_dev_set_mtu(). This approach should be used until DPDK provides | |
2992 | * a method to retrieve the upper bound MTU for a given device. | |
2993 | */ | |
2994 | if (MTU_TO_MAX_FRAME_LEN(mtu) > NETDEV_DPDK_MAX_PKT_LEN | |
127b6a6e | 2995 | || mtu < RTE_ETHER_MIN_MTU) { |
0072e931 MK |
2996 | VLOG_WARN("%s: unsupported MTU %d\n", dev->up.name, mtu); |
2997 | return EINVAL; | |
2998 | } | |
2999 | ||
3000 | ovs_mutex_lock(&dev->mutex); | |
3001 | if (dev->requested_mtu != mtu) { | |
3002 | dev->requested_mtu = mtu; | |
3003 | netdev_request_reconfigure(netdev); | |
3004 | } | |
3005 | ovs_mutex_unlock(&dev->mutex); | |
3006 | ||
3007 | return 0; | |
3008 | } | |
3009 | ||
8a9562d2 | 3010 | static int |
d46285a2 | 3011 | netdev_dpdk_get_carrier(const struct netdev *netdev, bool *carrier); |
8a9562d2 | 3012 | |
58397e6c KT |
3013 | static int |
3014 | netdev_dpdk_vhost_get_stats(const struct netdev *netdev, | |
3015 | struct netdev_stats *stats) | |
3016 | { | |
3017 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
3018 | ||
3019 | ovs_mutex_lock(&dev->mutex); | |
58397e6c | 3020 | |
45d947c4 | 3021 | rte_spinlock_lock(&dev->stats_lock); |
58397e6c | 3022 | /* Supported Stats */ |
50986e78 | 3023 | stats->rx_packets = dev->stats.rx_packets; |
3024 | stats->tx_packets = dev->stats.tx_packets; | |
9509913a | 3025 | stats->rx_dropped = dev->stats.rx_dropped; |
50986e78 | 3026 | stats->tx_dropped = dev->stats.tx_dropped; |
9e3ddd45 TP |
3027 | stats->multicast = dev->stats.multicast; |
3028 | stats->rx_bytes = dev->stats.rx_bytes; | |
3029 | stats->tx_bytes = dev->stats.tx_bytes; | |
3030 | stats->rx_errors = dev->stats.rx_errors; | |
3031 | stats->rx_length_errors = dev->stats.rx_length_errors; | |
d6e3feb5 | 3032 | |
3033 | stats->rx_1_to_64_packets = dev->stats.rx_1_to_64_packets; | |
3034 | stats->rx_65_to_127_packets = dev->stats.rx_65_to_127_packets; | |
3035 | stats->rx_128_to_255_packets = dev->stats.rx_128_to_255_packets; | |
3036 | stats->rx_256_to_511_packets = dev->stats.rx_256_to_511_packets; | |
3037 | stats->rx_512_to_1023_packets = dev->stats.rx_512_to_1023_packets; | |
3038 | stats->rx_1024_to_1522_packets = dev->stats.rx_1024_to_1522_packets; | |
3039 | stats->rx_1523_to_max_packets = dev->stats.rx_1523_to_max_packets; | |
3040 | ||
45d947c4 | 3041 | rte_spinlock_unlock(&dev->stats_lock); |
9e3ddd45 | 3042 | |
58397e6c KT |
3043 | ovs_mutex_unlock(&dev->mutex); |
3044 | ||
3045 | return 0; | |
3046 | } | |
3047 | ||
d6e3feb5 | 3048 | static void |
3049 | netdev_dpdk_convert_xstats(struct netdev_stats *stats, | |
0a0f39df CL |
3050 | const struct rte_eth_xstat *xstats, |
3051 | const struct rte_eth_xstat_name *names, | |
d6e3feb5 | 3052 | const unsigned int size) |
3053 | { | |
18366d16 IM |
3054 | /* DPDK XSTATS Counter names definition. */ |
3055 | #define DPDK_XSTATS \ | |
3056 | DPDK_XSTAT(multicast, "rx_multicast_packets" ) \ | |
3057 | DPDK_XSTAT(tx_multicast_packets, "tx_multicast_packets" ) \ | |
3058 | DPDK_XSTAT(rx_broadcast_packets, "rx_broadcast_packets" ) \ | |
3059 | DPDK_XSTAT(tx_broadcast_packets, "tx_broadcast_packets" ) \ | |
3060 | DPDK_XSTAT(rx_undersized_errors, "rx_undersized_errors" ) \ | |
3061 | DPDK_XSTAT(rx_oversize_errors, "rx_oversize_errors" ) \ | |
3062 | DPDK_XSTAT(rx_fragmented_errors, "rx_fragmented_errors" ) \ | |
3063 | DPDK_XSTAT(rx_jabber_errors, "rx_jabber_errors" ) \ | |
3064 | DPDK_XSTAT(rx_1_to_64_packets, "rx_size_64_packets" ) \ | |
3065 | DPDK_XSTAT(rx_65_to_127_packets, "rx_size_65_to_127_packets" ) \ | |
3066 | DPDK_XSTAT(rx_128_to_255_packets, "rx_size_128_to_255_packets" ) \ | |
3067 | DPDK_XSTAT(rx_256_to_511_packets, "rx_size_256_to_511_packets" ) \ | |
3068 | DPDK_XSTAT(rx_512_to_1023_packets, "rx_size_512_to_1023_packets" ) \ | |
3069 | DPDK_XSTAT(rx_1024_to_1522_packets, "rx_size_1024_to_1522_packets" ) \ | |
3070 | DPDK_XSTAT(rx_1523_to_max_packets, "rx_size_1523_to_max_packets" ) \ | |
3071 | DPDK_XSTAT(tx_1_to_64_packets, "tx_size_64_packets" ) \ | |
3072 | DPDK_XSTAT(tx_65_to_127_packets, "tx_size_65_to_127_packets" ) \ | |
3073 | DPDK_XSTAT(tx_128_to_255_packets, "tx_size_128_to_255_packets" ) \ | |
3074 | DPDK_XSTAT(tx_256_to_511_packets, "tx_size_256_to_511_packets" ) \ | |
3075 | DPDK_XSTAT(tx_512_to_1023_packets, "tx_size_512_to_1023_packets" ) \ | |
3076 | DPDK_XSTAT(tx_1024_to_1522_packets, "tx_size_1024_to_1522_packets" ) \ | |
3077 | DPDK_XSTAT(tx_1523_to_max_packets, "tx_size_1523_to_max_packets" ) | |
3078 | ||
d6e3feb5 | 3079 | for (unsigned int i = 0; i < size; i++) { |
18366d16 IM |
3080 | #define DPDK_XSTAT(MEMBER, NAME) \ |
3081 | if (strcmp(NAME, names[i].name) == 0) { \ | |
3082 | stats->MEMBER = xstats[i].value; \ | |
3083 | continue; \ | |
d6e3feb5 | 3084 | } |
18366d16 IM |
3085 | DPDK_XSTATS; |
3086 | #undef DPDK_XSTAT | |
d6e3feb5 | 3087 | } |
18366d16 | 3088 | #undef DPDK_XSTATS |
d6e3feb5 | 3089 | } |
3090 | ||
8a9562d2 PS |
3091 | static int |
3092 | netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats) | |
3093 | { | |
3094 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
3095 | struct rte_eth_stats rte_stats; | |
3096 | bool gg; | |
3097 | ||
3098 | netdev_dpdk_get_carrier(netdev, &gg); | |
3099 | ovs_mutex_lock(&dev->mutex); | |
8a9562d2 | 3100 | |
0a0f39df CL |
3101 | struct rte_eth_xstat *rte_xstats = NULL; |
3102 | struct rte_eth_xstat_name *rte_xstats_names = NULL; | |
3103 | int rte_xstats_len, rte_xstats_new_len, rte_xstats_ret; | |
d6e3feb5 | 3104 | |
3105 | if (rte_eth_stats_get(dev->port_id, &rte_stats)) { | |
fa9f4eeb IM |
3106 | VLOG_ERR("Can't get ETH statistics for port: "DPDK_PORT_ID_FMT, |
3107 | dev->port_id); | |
f9256822 | 3108 | ovs_mutex_unlock(&dev->mutex); |
d6e3feb5 | 3109 | return EPROTO; |
3110 | } | |
3111 | ||
0a0f39df CL |
3112 | /* Get length of statistics */ |
3113 | rte_xstats_len = rte_eth_xstats_get_names(dev->port_id, NULL, 0); | |
3114 | if (rte_xstats_len < 0) { | |
fa9f4eeb IM |
3115 | VLOG_WARN("Cannot get XSTATS values for port: "DPDK_PORT_ID_FMT, |
3116 | dev->port_id); | |
0a0f39df CL |
3117 | goto out; |
3118 | } | |
3119 | /* Reserve memory for xstats names and values */ | |
3120 | rte_xstats_names = xcalloc(rte_xstats_len, sizeof *rte_xstats_names); | |
3121 | rte_xstats = xcalloc(rte_xstats_len, sizeof *rte_xstats); | |
3122 | ||
3123 | /* Retreive xstats names */ | |
3124 | rte_xstats_new_len = rte_eth_xstats_get_names(dev->port_id, | |
3125 | rte_xstats_names, | |
3126 | rte_xstats_len); | |
3127 | if (rte_xstats_new_len != rte_xstats_len) { | |
fa9f4eeb IM |
3128 | VLOG_WARN("Cannot get XSTATS names for port: "DPDK_PORT_ID_FMT, |
3129 | dev->port_id); | |
0a0f39df CL |
3130 | goto out; |
3131 | } | |
3132 | /* Retreive xstats values */ | |
3133 | memset(rte_xstats, 0xff, sizeof *rte_xstats * rte_xstats_len); | |
3134 | rte_xstats_ret = rte_eth_xstats_get(dev->port_id, rte_xstats, | |
3135 | rte_xstats_len); | |
3136 | if (rte_xstats_ret > 0 && rte_xstats_ret <= rte_xstats_len) { | |
3137 | netdev_dpdk_convert_xstats(stats, rte_xstats, rte_xstats_names, | |
3138 | rte_xstats_len); | |
d6e3feb5 | 3139 | } else { |
fa9f4eeb IM |
3140 | VLOG_WARN("Cannot get XSTATS values for port: "DPDK_PORT_ID_FMT, |
3141 | dev->port_id); | |
d6e3feb5 | 3142 | } |
8a9562d2 | 3143 | |
0a0f39df CL |
3144 | out: |
3145 | free(rte_xstats); | |
3146 | free(rte_xstats_names); | |
3147 | ||
2f9dd77f PS |
3148 | stats->rx_packets = rte_stats.ipackets; |
3149 | stats->tx_packets = rte_stats.opackets; | |
3150 | stats->rx_bytes = rte_stats.ibytes; | |
3151 | stats->tx_bytes = rte_stats.obytes; | |
21e9844c | 3152 | stats->rx_errors = rte_stats.ierrors; |
2f9dd77f | 3153 | stats->tx_errors = rte_stats.oerrors; |
8a9562d2 | 3154 | |
45d947c4 | 3155 | rte_spinlock_lock(&dev->stats_lock); |
2f9dd77f | 3156 | stats->tx_dropped = dev->stats.tx_dropped; |
9509913a | 3157 | stats->rx_dropped = dev->stats.rx_dropped; |
45d947c4 | 3158 | rte_spinlock_unlock(&dev->stats_lock); |
9e3ddd45 TP |
3159 | |
3160 | /* These are the available DPDK counters for packets not received due to | |
3161 | * local resource constraints in DPDK and NIC respectively. */ | |
9509913a | 3162 | stats->rx_dropped += rte_stats.rx_nombuf + rte_stats.imissed; |
9e3ddd45 TP |
3163 | stats->rx_missed_errors = rte_stats.imissed; |
3164 | ||
8a9562d2 PS |
3165 | ovs_mutex_unlock(&dev->mutex); |
3166 | ||
3167 | return 0; | |
3168 | } | |
3169 | ||
971f4b39 MW |
3170 | static int |
3171 | netdev_dpdk_get_custom_stats(const struct netdev *netdev, | |
3172 | struct netdev_custom_stats *custom_stats) | |
3173 | { | |
3174 | ||
3175 | uint32_t i; | |
3176 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
b99ab8aa IM |
3177 | int rte_xstats_ret, sw_stats_size; |
3178 | ||
3179 | netdev_dpdk_get_sw_custom_stats(netdev, custom_stats); | |
971f4b39 MW |
3180 | |
3181 | ovs_mutex_lock(&dev->mutex); | |
3182 | ||
3183 | if (netdev_dpdk_configure_xstats(dev)) { | |
3184 | uint64_t *values = xcalloc(dev->rte_xstats_ids_size, | |
3185 | sizeof(uint64_t)); | |
3186 | ||
3187 | rte_xstats_ret = | |
3188 | rte_eth_xstats_get_by_id(dev->port_id, dev->rte_xstats_ids, | |
3189 | values, dev->rte_xstats_ids_size); | |
3190 | ||
3191 | if (rte_xstats_ret > 0 && | |
3192 | rte_xstats_ret <= dev->rte_xstats_ids_size) { | |
3193 | ||
b99ab8aa IM |
3194 | sw_stats_size = custom_stats->size; |
3195 | custom_stats->size += rte_xstats_ret; | |
3196 | custom_stats->counters = xrealloc(custom_stats->counters, | |
3197 | custom_stats->size * | |
3198 | sizeof *custom_stats->counters); | |
971f4b39 MW |
3199 | |
3200 | for (i = 0; i < rte_xstats_ret; i++) { | |
b99ab8aa | 3201 | ovs_strlcpy(custom_stats->counters[sw_stats_size + i].name, |
971f4b39 MW |
3202 | netdev_dpdk_get_xstat_name(dev, |
3203 | dev->rte_xstats_ids[i]), | |
3204 | NETDEV_CUSTOM_STATS_NAME_SIZE); | |
b99ab8aa | 3205 | custom_stats->counters[sw_stats_size + i].value = values[i]; |
971f4b39 MW |
3206 | } |
3207 | } else { | |
fa9f4eeb | 3208 | VLOG_WARN("Cannot get XSTATS values for port: "DPDK_PORT_ID_FMT, |
971f4b39 | 3209 | dev->port_id); |
971f4b39 MW |
3210 | /* Let's clear statistics cache, so it will be |
3211 | * reconfigured */ | |
3212 | netdev_dpdk_clear_xstats(dev); | |
3213 | } | |
526259f2 IM |
3214 | |
3215 | free(values); | |
971f4b39 MW |
3216 | } |
3217 | ||
3218 | ovs_mutex_unlock(&dev->mutex); | |
3219 | ||
3220 | return 0; | |
3221 | } | |
3222 | ||
c161357d | 3223 | static int |
b99ab8aa IM |
3224 | netdev_dpdk_get_sw_custom_stats(const struct netdev *netdev, |
3225 | struct netdev_custom_stats *custom_stats) | |
c161357d KT |
3226 | { |
3227 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
b99ab8aa | 3228 | int i, n; |
c161357d | 3229 | |
2f862c71 SV |
3230 | #define SW_CSTATS \ |
3231 | SW_CSTAT(tx_retries) \ | |
3232 | SW_CSTAT(tx_failure_drops) \ | |
3233 | SW_CSTAT(tx_mtu_exceeded_drops) \ | |
3234 | SW_CSTAT(tx_qos_drops) \ | |
29cf9c1b FL |
3235 | SW_CSTAT(rx_qos_drops) \ |
3236 | SW_CSTAT(tx_invalid_hwol_drops) | |
c161357d | 3237 | |
b99ab8aa IM |
3238 | #define SW_CSTAT(NAME) + 1 |
3239 | custom_stats->size = SW_CSTATS; | |
3240 | #undef SW_CSTAT | |
c161357d KT |
3241 | custom_stats->counters = xcalloc(custom_stats->size, |
3242 | sizeof *custom_stats->counters); | |
5c7ba90d IM |
3243 | |
3244 | ovs_mutex_lock(&dev->mutex); | |
c161357d KT |
3245 | |
3246 | rte_spinlock_lock(&dev->stats_lock); | |
5c7ba90d | 3247 | i = 0; |
b99ab8aa | 3248 | #define SW_CSTAT(NAME) \ |
2f862c71 | 3249 | custom_stats->counters[i++].value = dev->sw_stats->NAME; |
b99ab8aa IM |
3250 | SW_CSTATS; |
3251 | #undef SW_CSTAT | |
c161357d KT |
3252 | rte_spinlock_unlock(&dev->stats_lock); |
3253 | ||
3254 | ovs_mutex_unlock(&dev->mutex); | |
3255 | ||
b99ab8aa IM |
3256 | i = 0; |
3257 | n = 0; | |
3258 | #define SW_CSTAT(NAME) \ | |
3259 | if (custom_stats->counters[i].value != UINT64_MAX) { \ | |
2f862c71 SV |
3260 | ovs_strlcpy(custom_stats->counters[n].name, \ |
3261 | "ovs_"#NAME, NETDEV_CUSTOM_STATS_NAME_SIZE); \ | |
b99ab8aa IM |
3262 | custom_stats->counters[n].value = custom_stats->counters[i].value; \ |
3263 | n++; \ | |
3264 | } \ | |
3265 | i++; | |
3266 | SW_CSTATS; | |
3267 | #undef SW_CSTAT | |
3268 | ||
3269 | custom_stats->size = n; | |
c161357d KT |
3270 | return 0; |
3271 | } | |
3272 | ||
8a9562d2 | 3273 | static int |
d46285a2 | 3274 | netdev_dpdk_get_features(const struct netdev *netdev, |
8a9562d2 | 3275 | enum netdev_features *current, |
ca3d4f55 BX |
3276 | enum netdev_features *advertised, |
3277 | enum netdev_features *supported, | |
3278 | enum netdev_features *peer) | |
8a9562d2 | 3279 | { |
d46285a2 | 3280 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
8a9562d2 | 3281 | struct rte_eth_link link; |
dfcb5b8a | 3282 | uint32_t feature = 0; |
8a9562d2 PS |
3283 | |
3284 | ovs_mutex_lock(&dev->mutex); | |
3285 | link = dev->link; | |
3286 | ovs_mutex_unlock(&dev->mutex); | |
3287 | ||
dfcb5b8a IS |
3288 | /* Match against OpenFlow defined link speed values. */ |
3289 | if (link.link_duplex == ETH_LINK_FULL_DUPLEX) { | |
3290 | switch (link.link_speed) { | |
3291 | case ETH_SPEED_NUM_10M: | |
3292 | feature |= NETDEV_F_10MB_FD; | |
3293 | break; | |
3294 | case ETH_SPEED_NUM_100M: | |
3295 | feature |= NETDEV_F_100MB_FD; | |
3296 | break; | |
3297 | case ETH_SPEED_NUM_1G: | |
3298 | feature |= NETDEV_F_1GB_FD; | |
3299 | break; | |
3300 | case ETH_SPEED_NUM_10G: | |
3301 | feature |= NETDEV_F_10GB_FD; | |
3302 | break; | |
3303 | case ETH_SPEED_NUM_40G: | |
3304 | feature |= NETDEV_F_40GB_FD; | |
3305 | break; | |
3306 | case ETH_SPEED_NUM_100G: | |
3307 | feature |= NETDEV_F_100GB_FD; | |
3308 | break; | |
3309 | default: | |
3310 | feature |= NETDEV_F_OTHER; | |
8a9562d2 | 3311 | } |
dfcb5b8a IS |
3312 | } else if (link.link_duplex == ETH_LINK_HALF_DUPLEX) { |
3313 | switch (link.link_speed) { | |
3314 | case ETH_SPEED_NUM_10M: | |
3315 | feature |= NETDEV_F_10MB_HD; | |
3316 | break; | |
3317 | case ETH_SPEED_NUM_100M: | |
3318 | feature |= NETDEV_F_100MB_HD; | |
3319 | break; | |
3320 | case ETH_SPEED_NUM_1G: | |
3321 | feature |= NETDEV_F_1GB_HD; | |
3322 | break; | |
3323 | default: | |
3324 | feature |= NETDEV_F_OTHER; | |
74cd69a4 | 3325 | } |
8a9562d2 PS |
3326 | } |
3327 | ||
362ca396 | 3328 | if (link.link_autoneg) { |
dfcb5b8a | 3329 | feature |= NETDEV_F_AUTONEG; |
362ca396 | 3330 | } |
3331 | ||
dfcb5b8a | 3332 | *current = feature; |
ca3d4f55 BX |
3333 | *advertised = *supported = *peer = 0; |
3334 | ||
8a9562d2 PS |
3335 | return 0; |
3336 | } | |
3337 | ||
9509913a IS |
3338 | static struct ingress_policer * |
3339 | netdev_dpdk_policer_construct(uint32_t rate, uint32_t burst) | |
3340 | { | |
3341 | struct ingress_policer *policer = NULL; | |
3342 | uint64_t rate_bytes; | |
3343 | uint64_t burst_bytes; | |
3344 | int err = 0; | |
3345 | ||
3346 | policer = xmalloc(sizeof *policer); | |
3347 | rte_spinlock_init(&policer->policer_lock); | |
3348 | ||
3349 | /* rte_meter requires bytes so convert kbits rate and burst to bytes. */ | |
602c8668 LR |
3350 | rate_bytes = rate * 1000ULL / 8; |
3351 | burst_bytes = burst * 1000ULL / 8; | |
9509913a IS |
3352 | |
3353 | policer->app_srtcm_params.cir = rate_bytes; | |
3354 | policer->app_srtcm_params.cbs = burst_bytes; | |
3355 | policer->app_srtcm_params.ebs = 0; | |
03f3f9c0 OM |
3356 | err = rte_meter_srtcm_profile_config(&policer->in_prof, |
3357 | &policer->app_srtcm_params); | |
3358 | if (!err) { | |
3359 | err = rte_meter_srtcm_config(&policer->in_policer, | |
3360 | &policer->in_prof); | |
3361 | } | |
58be5c0e | 3362 | if (err) { |
9509913a | 3363 | VLOG_ERR("Could not create rte meter for ingress policer"); |
4c47ddde | 3364 | free(policer); |
9509913a IS |
3365 | return NULL; |
3366 | } | |
3367 | ||
3368 | return policer; | |
3369 | } | |
3370 | ||
3371 | static int | |
3372 | netdev_dpdk_set_policing(struct netdev* netdev, uint32_t policer_rate, | |
3373 | uint32_t policer_burst) | |
3374 | { | |
3375 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
3376 | struct ingress_policer *policer; | |
3377 | ||
3378 | /* Force to 0 if no rate specified, | |
3379 | * default to 8000 kbits if burst is 0, | |
3380 | * else stick with user-specified value. | |
3381 | */ | |
3382 | policer_burst = (!policer_rate ? 0 | |
3383 | : !policer_burst ? 8000 | |
3384 | : policer_burst); | |
3385 | ||
3386 | ovs_mutex_lock(&dev->mutex); | |
3387 | ||
3388 | policer = ovsrcu_get_protected(struct ingress_policer *, | |
3389 | &dev->ingress_policer); | |
3390 | ||
3391 | if (dev->policer_rate == policer_rate && | |
3392 | dev->policer_burst == policer_burst) { | |
3393 | /* Assume that settings haven't changed since we last set them. */ | |
3394 | ovs_mutex_unlock(&dev->mutex); | |
3395 | return 0; | |
3396 | } | |
3397 | ||
3398 | /* Destroy any existing ingress policer for the device if one exists */ | |
3399 | if (policer) { | |
3400 | ovsrcu_postpone(free, policer); | |
3401 | } | |
3402 | ||
3403 | if (policer_rate != 0) { | |
3404 | policer = netdev_dpdk_policer_construct(policer_rate, policer_burst); | |
3405 | } else { | |
3406 | policer = NULL; | |
3407 | } | |
3408 | ovsrcu_set(&dev->ingress_policer, policer); | |
3409 | dev->policer_rate = policer_rate; | |
3410 | dev->policer_burst = policer_burst; | |
3411 | ovs_mutex_unlock(&dev->mutex); | |
3412 | ||
3413 | return 0; | |
3414 | } | |
3415 | ||
8a9562d2 PS |
3416 | static int |
3417 | netdev_dpdk_get_ifindex(const struct netdev *netdev) | |
3418 | { | |
3419 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
8a9562d2 PS |
3420 | |
3421 | ovs_mutex_lock(&dev->mutex); | |
12d0d124 PL |
3422 | /* Calculate hash from the netdev name. Ensure that ifindex is a 24-bit |
3423 | * postive integer to meet RFC 2863 recommendations. | |
3424 | */ | |
3425 | int ifindex = hash_string(netdev->name, 0) % 0xfffffe + 1; | |
8a9562d2 PS |
3426 | ovs_mutex_unlock(&dev->mutex); |
3427 | ||
3428 | return ifindex; | |
3429 | } | |
3430 | ||
3431 | static int | |
d46285a2 | 3432 | netdev_dpdk_get_carrier(const struct netdev *netdev, bool *carrier) |
8a9562d2 | 3433 | { |
d46285a2 | 3434 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
8a9562d2 PS |
3435 | |
3436 | ovs_mutex_lock(&dev->mutex); | |
3437 | check_link_status(dev); | |
3438 | *carrier = dev->link.link_status; | |
58397e6c KT |
3439 | |
3440 | ovs_mutex_unlock(&dev->mutex); | |
3441 | ||
3442 | return 0; | |
3443 | } | |
3444 | ||
3445 | static int | |
d46285a2 | 3446 | netdev_dpdk_vhost_get_carrier(const struct netdev *netdev, bool *carrier) |
58397e6c | 3447 | { |
d46285a2 | 3448 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
58397e6c KT |
3449 | |
3450 | ovs_mutex_lock(&dev->mutex); | |
3451 | ||
0a0f39df | 3452 | if (is_vhost_running(dev)) { |
58397e6c KT |
3453 | *carrier = 1; |
3454 | } else { | |
3455 | *carrier = 0; | |
3456 | } | |
3457 | ||
8a9562d2 PS |
3458 | ovs_mutex_unlock(&dev->mutex); |
3459 | ||
3460 | return 0; | |
3461 | } | |
3462 | ||
3463 | static long long int | |
d46285a2 | 3464 | netdev_dpdk_get_carrier_resets(const struct netdev *netdev) |
8a9562d2 | 3465 | { |
d46285a2 | 3466 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
8a9562d2 PS |
3467 | long long int carrier_resets; |
3468 | ||
3469 | ovs_mutex_lock(&dev->mutex); | |
3470 | carrier_resets = dev->link_reset_cnt; | |
3471 | ovs_mutex_unlock(&dev->mutex); | |
3472 | ||
3473 | return carrier_resets; | |
3474 | } | |
3475 | ||
3476 | static int | |
d46285a2 | 3477 | netdev_dpdk_set_miimon(struct netdev *netdev OVS_UNUSED, |
8a9562d2 PS |
3478 | long long int interval OVS_UNUSED) |
3479 | { | |
ee32150e | 3480 | return EOPNOTSUPP; |
8a9562d2 PS |
3481 | } |
3482 | ||
3483 | static int | |
3484 | netdev_dpdk_update_flags__(struct netdev_dpdk *dev, | |
3485 | enum netdev_flags off, enum netdev_flags on, | |
64839cf4 WT |
3486 | enum netdev_flags *old_flagsp) |
3487 | OVS_REQUIRES(dev->mutex) | |
8a9562d2 | 3488 | { |
8a9562d2 PS |
3489 | if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) { |
3490 | return EINVAL; | |
3491 | } | |
3492 | ||
3493 | *old_flagsp = dev->flags; | |
3494 | dev->flags |= on; | |
3495 | dev->flags &= ~off; | |
3496 | ||
3497 | if (dev->flags == *old_flagsp) { | |
3498 | return 0; | |
3499 | } | |
3500 | ||
58397e6c | 3501 | if (dev->type == DPDK_DEV_ETH) { |
2d37de73 EC |
3502 | |
3503 | if ((dev->flags ^ *old_flagsp) & NETDEV_UP) { | |
3504 | int err; | |
3505 | ||
3506 | if (dev->flags & NETDEV_UP) { | |
3507 | err = rte_eth_dev_set_link_up(dev->port_id); | |
3508 | } else { | |
3509 | err = rte_eth_dev_set_link_down(dev->port_id); | |
3510 | } | |
3511 | if (err == -ENOTSUP) { | |
3512 | VLOG_INFO("Interface %s does not support link state " | |
3513 | "configuration", netdev_get_name(&dev->up)); | |
3514 | } else if (err < 0) { | |
3515 | VLOG_ERR("Interface %s link change error: %s", | |
3516 | netdev_get_name(&dev->up), rte_strerror(-err)); | |
3517 | dev->flags = *old_flagsp; | |
3518 | return -err; | |
3519 | } | |
3520 | } | |
3521 | ||
58397e6c KT |
3522 | if (dev->flags & NETDEV_PROMISC) { |
3523 | rte_eth_promiscuous_enable(dev->port_id); | |
3524 | } | |
8a9562d2 | 3525 | |
314fb5ad | 3526 | netdev_change_seq_changed(&dev->up); |
e543851d ZB |
3527 | } else { |
3528 | /* If DPDK_DEV_VHOST device's NETDEV_UP flag was changed and vhost is | |
3529 | * running then change netdev's change_seq to trigger link state | |
3530 | * update. */ | |
e543851d ZB |
3531 | |
3532 | if ((NETDEV_UP & ((*old_flagsp ^ on) | (*old_flagsp ^ off))) | |
0a0f39df | 3533 | && is_vhost_running(dev)) { |
e543851d ZB |
3534 | netdev_change_seq_changed(&dev->up); |
3535 | ||
3536 | /* Clear statistics if device is getting up. */ | |
3537 | if (NETDEV_UP & on) { | |
3538 | rte_spinlock_lock(&dev->stats_lock); | |
58be5c0e | 3539 | memset(&dev->stats, 0, sizeof dev->stats); |
e543851d ZB |
3540 | rte_spinlock_unlock(&dev->stats_lock); |
3541 | } | |
3542 | } | |
8a9562d2 PS |
3543 | } |
3544 | ||
3545 | return 0; | |
3546 | } | |
3547 | ||
3548 | static int | |
d46285a2 | 3549 | netdev_dpdk_update_flags(struct netdev *netdev, |
8a9562d2 PS |
3550 | enum netdev_flags off, enum netdev_flags on, |
3551 | enum netdev_flags *old_flagsp) | |
3552 | { | |
d46285a2 | 3553 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
8a9562d2 PS |
3554 | int error; |
3555 | ||
d46285a2 DDP |
3556 | ovs_mutex_lock(&dev->mutex); |
3557 | error = netdev_dpdk_update_flags__(dev, off, on, old_flagsp); | |
3558 | ovs_mutex_unlock(&dev->mutex); | |
8a9562d2 PS |
3559 | |
3560 | return error; | |
3561 | } | |
3562 | ||
b2e8b12f FL |
3563 | static int |
3564 | netdev_dpdk_vhost_user_get_status(const struct netdev *netdev, | |
3565 | struct smap *args) | |
3566 | { | |
3567 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
3568 | ||
3569 | ovs_mutex_lock(&dev->mutex); | |
3570 | ||
3571 | bool client_mode = dev->vhost_driver_flags & RTE_VHOST_USER_CLIENT; | |
3572 | smap_add_format(args, "mode", "%s", client_mode ? "client" : "server"); | |
3573 | ||
3574 | int vid = netdev_dpdk_get_vid(dev); | |
3575 | if (vid < 0) { | |
3576 | smap_add_format(args, "status", "disconnected"); | |
3577 | ovs_mutex_unlock(&dev->mutex); | |
3578 | return 0; | |
3579 | } else { | |
3580 | smap_add_format(args, "status", "connected"); | |
3581 | } | |
3582 | ||
3583 | char socket_name[PATH_MAX]; | |
3584 | if (!rte_vhost_get_ifname(vid, socket_name, PATH_MAX)) { | |
3585 | smap_add_format(args, "socket", "%s", socket_name); | |
3586 | } | |
3587 | ||
3588 | uint64_t features; | |
3589 | if (!rte_vhost_get_negotiated_features(vid, &features)) { | |
3590 | smap_add_format(args, "features", "0x%016"PRIx64, features); | |
3591 | } | |
3592 | ||
3593 | uint16_t mtu; | |
3594 | if (!rte_vhost_get_mtu(vid, &mtu)) { | |
3595 | smap_add_format(args, "mtu", "%d", mtu); | |
3596 | } | |
3597 | ||
3598 | int numa = rte_vhost_get_numa_node(vid); | |
3599 | if (numa >= 0) { | |
3600 | smap_add_format(args, "numa", "%d", numa); | |
3601 | } | |
3602 | ||
3603 | uint16_t vring_num = rte_vhost_get_vring_num(vid); | |
3604 | if (vring_num) { | |
3605 | smap_add_format(args, "num_of_vrings", "%d", vring_num); | |
3606 | } | |
3607 | ||
3608 | for (int i = 0; i < vring_num; i++) { | |
3609 | struct rte_vhost_vring vring; | |
b2e8b12f FL |
3610 | |
3611 | rte_vhost_get_vhost_vring(vid, i, &vring); | |
b9a3183d AC |
3612 | smap_add_nocopy(args, xasprintf("vring_%d_size", i), |
3613 | xasprintf("%d", vring.size)); | |
b2e8b12f FL |
3614 | } |
3615 | ||
3616 | ovs_mutex_unlock(&dev->mutex); | |
3617 | return 0; | |
3618 | } | |
3619 | ||
31154f95 IS |
3620 | /* |
3621 | * Convert a given uint32_t link speed defined in DPDK to a string | |
3622 | * equivalent. | |
3623 | */ | |
3624 | static const char * | |
3625 | netdev_dpdk_link_speed_to_str__(uint32_t link_speed) | |
3626 | { | |
3627 | switch (link_speed) { | |
3628 | case ETH_SPEED_NUM_10M: return "10Mbps"; | |
3629 | case ETH_SPEED_NUM_100M: return "100Mbps"; | |
3630 | case ETH_SPEED_NUM_1G: return "1Gbps"; | |
3631 | case ETH_SPEED_NUM_2_5G: return "2.5Gbps"; | |
3632 | case ETH_SPEED_NUM_5G: return "5Gbps"; | |
3633 | case ETH_SPEED_NUM_10G: return "10Gbps"; | |
3634 | case ETH_SPEED_NUM_20G: return "20Gbps"; | |
3635 | case ETH_SPEED_NUM_25G: return "25Gbps"; | |
3636 | case ETH_SPEED_NUM_40G: return "40Gbps"; | |
3637 | case ETH_SPEED_NUM_50G: return "50Gbps"; | |
3638 | case ETH_SPEED_NUM_56G: return "56Gbps"; | |
3639 | case ETH_SPEED_NUM_100G: return "100Gbps"; | |
3640 | default: return "Not Defined"; | |
3641 | } | |
3642 | } | |
3643 | ||
8a9562d2 | 3644 | static int |
d46285a2 | 3645 | netdev_dpdk_get_status(const struct netdev *netdev, struct smap *args) |
8a9562d2 | 3646 | { |
d46285a2 | 3647 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
8a9562d2 | 3648 | struct rte_eth_dev_info dev_info; |
31154f95 | 3649 | uint32_t link_speed; |
8a9562d2 | 3650 | |
7cd1261d | 3651 | if (!rte_eth_dev_is_valid_port(dev->port_id)) { |
8a9562d2 | 3652 | return ENODEV; |
7cd1261d | 3653 | } |
8a9562d2 | 3654 | |
03f3f9c0 | 3655 | ovs_mutex_lock(&dpdk_mutex); |
8a9562d2 PS |
3656 | ovs_mutex_lock(&dev->mutex); |
3657 | rte_eth_dev_info_get(dev->port_id, &dev_info); | |
31154f95 | 3658 | link_speed = dev->link.link_speed; |
8a9562d2 | 3659 | ovs_mutex_unlock(&dev->mutex); |
03f3f9c0 OM |
3660 | const struct rte_bus *bus; |
3661 | const struct rte_pci_device *pci_dev; | |
3662 | uint16_t vendor_id = PCI_ANY_ID; | |
3663 | uint16_t device_id = PCI_ANY_ID; | |
3664 | bus = rte_bus_find_by_device(dev_info.device); | |
3665 | if (bus && !strcmp(bus->name, "pci")) { | |
3666 | pci_dev = RTE_DEV_TO_PCI(dev_info.device); | |
3667 | if (pci_dev) { | |
3668 | vendor_id = pci_dev->id.vendor_id; | |
3669 | device_id = pci_dev->id.device_id; | |
3670 | } | |
3671 | } | |
3672 | ovs_mutex_unlock(&dpdk_mutex); | |
8a9562d2 | 3673 | |
fa9f4eeb | 3674 | smap_add_format(args, "port_no", DPDK_PORT_ID_FMT, dev->port_id); |
58be5c0e MK |
3675 | smap_add_format(args, "numa_id", "%d", |
3676 | rte_eth_dev_socket_id(dev->port_id)); | |
8a9562d2 PS |
3677 | smap_add_format(args, "driver_name", "%s", dev_info.driver_name); |
3678 | smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize); | |
4be4d22c | 3679 | smap_add_format(args, "max_rx_pktlen", "%u", dev->max_packet_len); |
8a9562d2 PS |
3680 | smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues); |
3681 | smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues); | |
3682 | smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs); | |
58be5c0e MK |
3683 | smap_add_format(args, "max_hash_mac_addrs", "%u", |
3684 | dev_info.max_hash_mac_addrs); | |
8a9562d2 PS |
3685 | smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs); |
3686 | smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools); | |
3687 | ||
3eb8d4fa MW |
3688 | /* Querying the DPDK library for iftype may be done in future, pending |
3689 | * support; cf. RFC 3635 Section 3.2.4. */ | |
3690 | enum { IF_TYPE_ETHERNETCSMACD = 6 }; | |
3691 | ||
3692 | smap_add_format(args, "if_type", "%"PRIu32, IF_TYPE_ETHERNETCSMACD); | |
3693 | smap_add_format(args, "if_descr", "%s %s", rte_version(), | |
3694 | dev_info.driver_name); | |
03f3f9c0 OM |
3695 | smap_add_format(args, "pci-vendor_id", "0x%x", vendor_id); |
3696 | smap_add_format(args, "pci-device_id", "0x%x", device_id); | |
8a9562d2 | 3697 | |
31154f95 IS |
3698 | /* Not all link speeds are defined in the OpenFlow specs e.g. 25 Gbps. |
3699 | * In that case the speed will not be reported as part of the usual | |
3700 | * call to get_features(). Get the link speed of the device and add it | |
3701 | * to the device status in an easy to read string format. | |
3702 | */ | |
3703 | smap_add(args, "link_speed", | |
3704 | netdev_dpdk_link_speed_to_str__(link_speed)); | |
3705 | ||
8a9562d2 PS |
3706 | return 0; |
3707 | } | |
3708 | ||
3709 | static void | |
3710 | netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state) | |
3711 | OVS_REQUIRES(dev->mutex) | |
3712 | { | |
3713 | enum netdev_flags old_flags; | |
3714 | ||
3715 | if (admin_state) { | |
3716 | netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags); | |
3717 | } else { | |
3718 | netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags); | |
3719 | } | |
3720 | } | |
3721 | ||
3722 | static void | |
3723 | netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc, | |
3724 | const char *argv[], void *aux OVS_UNUSED) | |
3725 | { | |
3726 | bool up; | |
3727 | ||
3728 | if (!strcasecmp(argv[argc - 1], "up")) { | |
3729 | up = true; | |
3730 | } else if ( !strcasecmp(argv[argc - 1], "down")) { | |
3731 | up = false; | |
3732 | } else { | |
3733 | unixctl_command_reply_error(conn, "Invalid Admin State"); | |
3734 | return; | |
3735 | } | |
3736 | ||
3737 | if (argc > 2) { | |
3738 | struct netdev *netdev = netdev_from_name(argv[1]); | |
3d0d5ab1 | 3739 | |
8a9562d2 | 3740 | if (netdev && is_dpdk_class(netdev->netdev_class)) { |
3d0d5ab1 | 3741 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
8a9562d2 | 3742 | |
3d0d5ab1 IM |
3743 | ovs_mutex_lock(&dev->mutex); |
3744 | netdev_dpdk_set_admin_state__(dev, up); | |
3745 | ovs_mutex_unlock(&dev->mutex); | |
8a9562d2 PS |
3746 | |
3747 | netdev_close(netdev); | |
3748 | } else { | |
3749 | unixctl_command_reply_error(conn, "Not a DPDK Interface"); | |
3750 | netdev_close(netdev); | |
3751 | return; | |
3752 | } | |
3753 | } else { | |
3d0d5ab1 | 3754 | struct netdev_dpdk *dev; |
8a9562d2 PS |
3755 | |
3756 | ovs_mutex_lock(&dpdk_mutex); | |
3d0d5ab1 IM |
3757 | LIST_FOR_EACH (dev, list_node, &dpdk_list) { |
3758 | ovs_mutex_lock(&dev->mutex); | |
3759 | netdev_dpdk_set_admin_state__(dev, up); | |
3760 | ovs_mutex_unlock(&dev->mutex); | |
8a9562d2 PS |
3761 | } |
3762 | ovs_mutex_unlock(&dpdk_mutex); | |
3763 | } | |
3764 | unixctl_command_reply(conn, "OK"); | |
3765 | } | |
3766 | ||
0ee821c2 DB |
3767 | static void |
3768 | netdev_dpdk_detach(struct unixctl_conn *conn, int argc OVS_UNUSED, | |
3769 | const char *argv[], void *aux OVS_UNUSED) | |
3770 | { | |
0ee821c2 | 3771 | char *response; |
7ee94cba | 3772 | dpdk_port_t port_id; |
0ee821c2 | 3773 | struct netdev_dpdk *dev; |
40e940e4 OM |
3774 | struct rte_device *rte_dev; |
3775 | struct ds used_interfaces = DS_EMPTY_INITIALIZER; | |
3776 | bool used = false; | |
0ee821c2 DB |
3777 | |
3778 | ovs_mutex_lock(&dpdk_mutex); | |
3779 | ||
40e940e4 OM |
3780 | port_id = netdev_dpdk_get_port_by_devargs(argv[1]); |
3781 | if (!rte_eth_dev_is_valid_port(port_id)) { | |
0ee821c2 DB |
3782 | response = xasprintf("Device '%s' not found in DPDK", argv[1]); |
3783 | goto error; | |
3784 | } | |
3785 | ||
40e940e4 OM |
3786 | rte_dev = rte_eth_devices[port_id].device; |
3787 | ds_put_format(&used_interfaces, | |
3788 | "Device '%s' is being used by the following interfaces:", | |
3789 | argv[1]); | |
3790 | ||
3791 | LIST_FOR_EACH (dev, list_node, &dpdk_list) { | |
3792 | /* FIXME: avoid direct access to DPDK array rte_eth_devices. */ | |
3793 | if (rte_eth_devices[dev->port_id].device == rte_dev | |
3794 | && rte_eth_devices[dev->port_id].state != RTE_ETH_DEV_UNUSED) { | |
3795 | used = true; | |
3796 | ds_put_format(&used_interfaces, " %s", | |
3797 | netdev_get_name(&dev->up)); | |
3798 | } | |
3799 | } | |
3800 | ||
3801 | if (used) { | |
3802 | ds_put_cstr(&used_interfaces, ". Remove them before detaching."); | |
3803 | response = ds_steal_cstr(&used_interfaces); | |
3804 | ds_destroy(&used_interfaces); | |
0ee821c2 DB |
3805 | goto error; |
3806 | } | |
40e940e4 | 3807 | ds_destroy(&used_interfaces); |
0ee821c2 DB |
3808 | |
3809 | rte_eth_dev_close(port_id); | |
40e940e4 | 3810 | if (rte_dev_remove(rte_dev) < 0) { |
0ee821c2 DB |
3811 | response = xasprintf("Device '%s' can not be detached", argv[1]); |
3812 | goto error; | |
3813 | } | |
3814 | ||
40e940e4 OM |
3815 | response = xasprintf("All devices shared with device '%s' " |
3816 | "have been detached", argv[1]); | |
0ee821c2 DB |
3817 | |
3818 | ovs_mutex_unlock(&dpdk_mutex); | |
3819 | unixctl_command_reply(conn, response); | |
3820 | free(response); | |
3821 | return; | |
3822 | ||
3823 | error: | |
3824 | ovs_mutex_unlock(&dpdk_mutex); | |
3825 | unixctl_command_reply_error(conn, response); | |
3826 | free(response); | |
3827 | } | |
3828 | ||
be481733 IM |
3829 | static void |
3830 | netdev_dpdk_get_mempool_info(struct unixctl_conn *conn, | |
3831 | int argc, const char *argv[], | |
3832 | void *aux OVS_UNUSED) | |
3833 | { | |
3834 | size_t size; | |
3835 | FILE *stream; | |
3836 | char *response = NULL; | |
3837 | struct netdev *netdev = NULL; | |
3838 | ||
3839 | if (argc == 2) { | |
3840 | netdev = netdev_from_name(argv[1]); | |
3841 | if (!netdev || !is_dpdk_class(netdev->netdev_class)) { | |
3842 | unixctl_command_reply_error(conn, "Not a DPDK Interface"); | |
3843 | goto out; | |
3844 | } | |
3845 | } | |
3846 | ||
3847 | stream = open_memstream(&response, &size); | |
3848 | if (!stream) { | |
3849 | response = xasprintf("Unable to open memstream: %s.", | |
3850 | ovs_strerror(errno)); | |
3851 | unixctl_command_reply_error(conn, response); | |
3852 | goto out; | |
3853 | } | |
3854 | ||
3855 | if (netdev) { | |
3856 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
3857 | ||
3858 | ovs_mutex_lock(&dev->mutex); | |
3859 | ovs_mutex_lock(&dpdk_mp_mutex); | |
3860 | ||
43307ad0 | 3861 | rte_mempool_dump(stream, dev->dpdk_mp->mp); |
be481733 IM |
3862 | |
3863 | ovs_mutex_unlock(&dpdk_mp_mutex); | |
3864 | ovs_mutex_unlock(&dev->mutex); | |
3865 | } else { | |
3866 | ovs_mutex_lock(&dpdk_mp_mutex); | |
3867 | rte_mempool_list_dump(stream); | |
3868 | ovs_mutex_unlock(&dpdk_mp_mutex); | |
3869 | } | |
3870 | ||
3871 | fclose(stream); | |
3872 | ||
3873 | unixctl_command_reply(conn, response); | |
3874 | out: | |
3875 | free(response); | |
3876 | netdev_close(netdev); | |
3877 | } | |
3878 | ||
58397e6c KT |
3879 | /* |
3880 | * Set virtqueue flags so that we do not receive interrupts. | |
3881 | */ | |
3882 | static void | |
0a0f39df | 3883 | set_irq_status(int vid) |
58397e6c | 3884 | { |
4573fbd3 | 3885 | uint32_t i; |
4573fbd3 | 3886 | |
f3e7ec25 MW |
3887 | for (i = 0; i < rte_vhost_get_vring_num(vid); i++) { |
3888 | rte_vhost_enable_guest_notification(vid, i, 0); | |
4573fbd3 FL |
3889 | } |
3890 | } | |
3891 | ||
585a5bea IM |
3892 | /* |
3893 | * Fixes mapping for vhost-user tx queues. Must be called after each | |
81acebda | 3894 | * enabling/disabling of queues and n_txq modifications. |
585a5bea IM |
3895 | */ |
3896 | static void | |
d46285a2 DDP |
3897 | netdev_dpdk_remap_txqs(struct netdev_dpdk *dev) |
3898 | OVS_REQUIRES(dev->mutex) | |
585a5bea IM |
3899 | { |
3900 | int *enabled_queues, n_enabled = 0; | |
81acebda | 3901 | int i, k, total_txqs = dev->up.n_txq; |
585a5bea | 3902 | |
eff23640 | 3903 | enabled_queues = xcalloc(total_txqs, sizeof *enabled_queues); |
585a5bea IM |
3904 | |
3905 | for (i = 0; i < total_txqs; i++) { | |
3906 | /* Enabled queues always mapped to themselves. */ | |
d46285a2 | 3907 | if (dev->tx_q[i].map == i) { |
585a5bea IM |
3908 | enabled_queues[n_enabled++] = i; |
3909 | } | |
3910 | } | |
3911 | ||
3912 | if (n_enabled == 0 && total_txqs != 0) { | |
f3ea2ad2 | 3913 | enabled_queues[0] = OVS_VHOST_QUEUE_DISABLED; |
585a5bea IM |
3914 | n_enabled = 1; |
3915 | } | |
3916 | ||
3917 | k = 0; | |
3918 | for (i = 0; i < total_txqs; i++) { | |
d46285a2 DDP |
3919 | if (dev->tx_q[i].map != i) { |
3920 | dev->tx_q[i].map = enabled_queues[k]; | |
585a5bea IM |
3921 | k = (k + 1) % n_enabled; |
3922 | } | |
3923 | } | |
3924 | ||
170ef726 IM |
3925 | if (VLOG_IS_DBG_ENABLED()) { |
3926 | struct ds mapping = DS_EMPTY_INITIALIZER; | |
3927 | ||
3928 | ds_put_format(&mapping, "TX queue mapping for port '%s':\n", | |
3929 | netdev_get_name(&dev->up)); | |
3930 | for (i = 0; i < total_txqs; i++) { | |
3931 | ds_put_format(&mapping, "%2d --> %2d\n", i, dev->tx_q[i].map); | |
3932 | } | |
3933 | ||
3934 | VLOG_DBG("%s", ds_cstr(&mapping)); | |
3935 | ds_destroy(&mapping); | |
585a5bea IM |
3936 | } |
3937 | ||
eff23640 | 3938 | free(enabled_queues); |
585a5bea | 3939 | } |
4573fbd3 | 3940 | |
58397e6c KT |
3941 | /* |
3942 | * A new virtio-net device is added to a vhost port. | |
3943 | */ | |
3944 | static int | |
0a0f39df | 3945 | new_device(int vid) |
58397e6c | 3946 | { |
d46285a2 | 3947 | struct netdev_dpdk *dev; |
58397e6c | 3948 | bool exists = false; |
db8f13b0 | 3949 | int newnode = 0; |
0a0f39df CL |
3950 | char ifname[IF_NAME_SZ]; |
3951 | ||
58be5c0e | 3952 | rte_vhost_get_ifname(vid, ifname, sizeof ifname); |
58397e6c KT |
3953 | |
3954 | ovs_mutex_lock(&dpdk_mutex); | |
3955 | /* Add device to the vhost port with the same name as that passed down. */ | |
d46285a2 | 3956 | LIST_FOR_EACH(dev, list_node, &dpdk_list) { |
c1ff66ac | 3957 | ovs_mutex_lock(&dev->mutex); |
bb9d2623 IM |
3958 | if (nullable_string_is_equal(ifname, dev->vhost_id)) { |
3959 | uint32_t qp_num = rte_vhost_get_vring_num(vid) / VIRTIO_QNUM; | |
db8f13b0 CL |
3960 | |
3961 | /* Get NUMA information */ | |
0a0f39df CL |
3962 | newnode = rte_vhost_get_numa_node(vid); |
3963 | if (newnode == -1) { | |
5b9bf9e0 | 3964 | #ifdef VHOST_NUMA |
db8f13b0 | 3965 | VLOG_INFO("Error getting NUMA info for vHost Device '%s'", |
0a0f39df | 3966 | ifname); |
5b9bf9e0 | 3967 | #endif |
db8f13b0 | 3968 | newnode = dev->socket_id; |
db8f13b0 CL |
3969 | } |
3970 | ||
7235cd20 DM |
3971 | if (dev->requested_n_txq < qp_num |
3972 | || dev->requested_n_rxq < qp_num | |
7f5f2bd0 IM |
3973 | || dev->requested_socket_id != newnode) { |
3974 | dev->requested_socket_id = newnode; | |
3975 | dev->requested_n_rxq = qp_num; | |
3976 | dev->requested_n_txq = qp_num; | |
3977 | netdev_request_reconfigure(&dev->up); | |
3978 | } else { | |
3979 | /* Reconfiguration not required. */ | |
3980 | dev->vhost_reconfigured = true; | |
3981 | } | |
81acebda | 3982 | |
0a0f39df | 3983 | ovsrcu_index_set(&dev->vid, vid); |
81acebda IM |
3984 | exists = true; |
3985 | ||
58397e6c | 3986 | /* Disable notifications. */ |
0a0f39df | 3987 | set_irq_status(vid); |
e543851d | 3988 | netdev_change_seq_changed(&dev->up); |
d46285a2 | 3989 | ovs_mutex_unlock(&dev->mutex); |
58397e6c KT |
3990 | break; |
3991 | } | |
c1ff66ac | 3992 | ovs_mutex_unlock(&dev->mutex); |
58397e6c KT |
3993 | } |
3994 | ovs_mutex_unlock(&dpdk_mutex); | |
3995 | ||
3996 | if (!exists) { | |
0a0f39df | 3997 | VLOG_INFO("vHost Device '%s' can't be added - name not found", ifname); |
58397e6c KT |
3998 | |
3999 | return -1; | |
4000 | } | |
4001 | ||
0a0f39df CL |
4002 | VLOG_INFO("vHost Device '%s' has been added on numa node %i", |
4003 | ifname, newnode); | |
4004 | ||
58397e6c KT |
4005 | return 0; |
4006 | } | |
4007 | ||
f3ea2ad2 IM |
4008 | /* Clears mapping for all available queues of vhost interface. */ |
4009 | static void | |
4010 | netdev_dpdk_txq_map_clear(struct netdev_dpdk *dev) | |
4011 | OVS_REQUIRES(dev->mutex) | |
4012 | { | |
4013 | int i; | |
4014 | ||
81acebda | 4015 | for (i = 0; i < dev->up.n_txq; i++) { |
f3ea2ad2 IM |
4016 | dev->tx_q[i].map = OVS_VHOST_QUEUE_MAP_UNKNOWN; |
4017 | } | |
4018 | } | |
4019 | ||
58397e6c KT |
4020 | /* |
4021 | * Remove a virtio-net device from the specific vhost port. Use dev->remove | |
4022 | * flag to stop any more packets from being sent or received to/from a VM and | |
4023 | * ensure all currently queued packets have been sent/received before removing | |
4024 | * the device. | |
4025 | */ | |
4026 | static void | |
0a0f39df | 4027 | destroy_device(int vid) |
58397e6c | 4028 | { |
d46285a2 | 4029 | struct netdev_dpdk *dev; |
afee281f | 4030 | bool exists = false; |
0a0f39df CL |
4031 | char ifname[IF_NAME_SZ]; |
4032 | ||
58be5c0e | 4033 | rte_vhost_get_ifname(vid, ifname, sizeof ifname); |
58397e6c KT |
4034 | |
4035 | ovs_mutex_lock(&dpdk_mutex); | |
d46285a2 | 4036 | LIST_FOR_EACH (dev, list_node, &dpdk_list) { |
0a0f39df | 4037 | if (netdev_dpdk_get_vid(dev) == vid) { |
58397e6c | 4038 | |
d46285a2 | 4039 | ovs_mutex_lock(&dev->mutex); |
0a0f39df CL |
4040 | dev->vhost_reconfigured = false; |
4041 | ovsrcu_index_set(&dev->vid, -1); | |
35c91567 DM |
4042 | memset(dev->vhost_rxq_enabled, 0, |
4043 | dev->up.n_rxq * sizeof *dev->vhost_rxq_enabled); | |
d46285a2 | 4044 | netdev_dpdk_txq_map_clear(dev); |
81acebda | 4045 | |
e543851d | 4046 | netdev_change_seq_changed(&dev->up); |
d46285a2 | 4047 | ovs_mutex_unlock(&dev->mutex); |
81acebda | 4048 | exists = true; |
afee281f | 4049 | break; |
58397e6c KT |
4050 | } |
4051 | } | |
afee281f | 4052 | |
58397e6c KT |
4053 | ovs_mutex_unlock(&dpdk_mutex); |
4054 | ||
0a0f39df | 4055 | if (exists) { |
afee281f KT |
4056 | /* |
4057 | * Wait for other threads to quiesce after setting the 'virtio_dev' | |
4058 | * to NULL, before returning. | |
4059 | */ | |
4060 | ovsrcu_synchronize(); | |
4061 | /* | |
4062 | * As call to ovsrcu_synchronize() will end the quiescent state, | |
4063 | * put thread back into quiescent state before returning. | |
4064 | */ | |
4065 | ovsrcu_quiesce_start(); | |
0a0f39df | 4066 | VLOG_INFO("vHost Device '%s' has been removed", ifname); |
afee281f | 4067 | } else { |
0a0f39df | 4068 | VLOG_INFO("vHost Device '%s' not found", ifname); |
afee281f | 4069 | } |
58397e6c KT |
4070 | } |
4071 | ||
585a5bea | 4072 | static int |
0a0f39df | 4073 | vring_state_changed(int vid, uint16_t queue_id, int enable) |
585a5bea | 4074 | { |
d46285a2 | 4075 | struct netdev_dpdk *dev; |
585a5bea IM |
4076 | bool exists = false; |
4077 | int qid = queue_id / VIRTIO_QNUM; | |
35c91567 | 4078 | bool is_rx = (queue_id % VIRTIO_QNUM) == VIRTIO_TXQ; |
0a0f39df CL |
4079 | char ifname[IF_NAME_SZ]; |
4080 | ||
58be5c0e | 4081 | rte_vhost_get_ifname(vid, ifname, sizeof ifname); |
585a5bea | 4082 | |
585a5bea | 4083 | ovs_mutex_lock(&dpdk_mutex); |
d46285a2 | 4084 | LIST_FOR_EACH (dev, list_node, &dpdk_list) { |
c1ff66ac | 4085 | ovs_mutex_lock(&dev->mutex); |
bb9d2623 | 4086 | if (nullable_string_is_equal(ifname, dev->vhost_id)) { |
35c91567 DM |
4087 | if (is_rx) { |
4088 | bool old_state = dev->vhost_rxq_enabled[qid]; | |
4089 | ||
4090 | dev->vhost_rxq_enabled[qid] = enable != 0; | |
4091 | if (old_state != dev->vhost_rxq_enabled[qid]) { | |
4092 | netdev_change_seq_changed(&dev->up); | |
4093 | } | |
585a5bea | 4094 | } else { |
35c91567 DM |
4095 | if (enable) { |
4096 | dev->tx_q[qid].map = qid; | |
4097 | } else { | |
4098 | dev->tx_q[qid].map = OVS_VHOST_QUEUE_DISABLED; | |
4099 | } | |
4100 | netdev_dpdk_remap_txqs(dev); | |
585a5bea | 4101 | } |
585a5bea | 4102 | exists = true; |
d46285a2 | 4103 | ovs_mutex_unlock(&dev->mutex); |
585a5bea IM |
4104 | break; |
4105 | } | |
c1ff66ac | 4106 | ovs_mutex_unlock(&dev->mutex); |
585a5bea IM |
4107 | } |
4108 | ovs_mutex_unlock(&dpdk_mutex); | |
4109 | ||
4110 | if (exists) { | |
35c91567 DM |
4111 | VLOG_INFO("State of queue %d ( %s_qid %d ) of vhost device '%s' " |
4112 | "changed to \'%s\'", queue_id, is_rx == true ? "rx" : "tx", | |
4113 | qid, ifname, (enable == 1) ? "enabled" : "disabled"); | |
585a5bea | 4114 | } else { |
0a0f39df | 4115 | VLOG_INFO("vHost Device '%s' not found", ifname); |
585a5bea IM |
4116 | return -1; |
4117 | } | |
4118 | ||
4119 | return 0; | |
4120 | } | |
4121 | ||
61473a0e DM |
4122 | static void |
4123 | destroy_connection(int vid) | |
4124 | { | |
4125 | struct netdev_dpdk *dev; | |
4126 | char ifname[IF_NAME_SZ]; | |
4127 | bool exists = false; | |
4128 | ||
4129 | rte_vhost_get_ifname(vid, ifname, sizeof ifname); | |
4130 | ||
4131 | ovs_mutex_lock(&dpdk_mutex); | |
4132 | LIST_FOR_EACH (dev, list_node, &dpdk_list) { | |
4133 | ovs_mutex_lock(&dev->mutex); | |
4134 | if (nullable_string_is_equal(ifname, dev->vhost_id)) { | |
4135 | uint32_t qp_num = NR_QUEUE; | |
4136 | ||
4137 | if (netdev_dpdk_get_vid(dev) >= 0) { | |
4138 | VLOG_ERR("Connection on socket '%s' destroyed while vhost " | |
4139 | "device still attached.", dev->vhost_id); | |
4140 | } | |
4141 | ||
4142 | /* Restore the number of queue pairs to default. */ | |
4143 | if (dev->requested_n_txq != qp_num | |
4144 | || dev->requested_n_rxq != qp_num) { | |
4145 | dev->requested_n_rxq = qp_num; | |
4146 | dev->requested_n_txq = qp_num; | |
4147 | netdev_request_reconfigure(&dev->up); | |
4148 | } | |
4149 | ovs_mutex_unlock(&dev->mutex); | |
4150 | exists = true; | |
4151 | break; | |
4152 | } | |
4153 | ovs_mutex_unlock(&dev->mutex); | |
4154 | } | |
4155 | ovs_mutex_unlock(&dpdk_mutex); | |
4156 | ||
4157 | if (exists) { | |
4158 | VLOG_INFO("vHost Device '%s' connection has been destroyed", ifname); | |
4159 | } else { | |
4160 | VLOG_INFO("vHost Device '%s' not found", ifname); | |
4161 | } | |
4162 | } | |
4163 | ||
3d56e4ac EC |
4164 | static |
4165 | void vhost_guest_notified(int vid OVS_UNUSED) | |
4166 | { | |
4167 | COVERAGE_INC(vhost_notification); | |
4168 | } | |
4169 | ||
8492adc2 JS |
4170 | /* |
4171 | * Retrieve the DPDK virtio device ID (vid) associated with a vhostuser | |
4172 | * or vhostuserclient netdev. | |
4173 | * | |
4174 | * Returns a value greater or equal to zero for a valid vid or '-1' if | |
4175 | * there is no valid vid associated. A vid of '-1' must not be used in | |
4176 | * rte_vhost_ APi calls. | |
4177 | * | |
4178 | * Once obtained and validated, a vid can be used by a PMD for multiple | |
4179 | * subsequent rte_vhost API calls until the PMD quiesces. A PMD should | |
4180 | * not fetch the vid again for each of a series of API calls. | |
4181 | */ | |
4182 | ||
0a0f39df CL |
4183 | int |
4184 | netdev_dpdk_get_vid(const struct netdev_dpdk *dev) | |
58397e6c | 4185 | { |
0a0f39df | 4186 | return ovsrcu_index_get(&dev->vid); |
58397e6c KT |
4187 | } |
4188 | ||
9509913a IS |
4189 | struct ingress_policer * |
4190 | netdev_dpdk_get_ingress_policer(const struct netdev_dpdk *dev) | |
4191 | { | |
4192 | return ovsrcu_get(struct ingress_policer *, &dev->ingress_policer); | |
4193 | } | |
4194 | ||
58397e6c | 4195 | static int |
ecc1a34e | 4196 | netdev_dpdk_class_init(void) |
7d1ced01 | 4197 | { |
ecc1a34e DDP |
4198 | static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER; |
4199 | ||
4200 | /* This function can be called for different classes. The initialization | |
4201 | * needs to be done only once */ | |
4202 | if (ovsthread_once_start(&once)) { | |
988fd463 EC |
4203 | int ret; |
4204 | ||
ecc1a34e DDP |
4205 | ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL); |
4206 | unixctl_command_register("netdev-dpdk/set-admin-state", | |
4207 | "[netdev] up|down", 1, 2, | |
4208 | netdev_dpdk_set_admin_state, NULL); | |
4209 | ||
0ee821c2 DB |
4210 | unixctl_command_register("netdev-dpdk/detach", |
4211 | "pci address of device", 1, 1, | |
4212 | netdev_dpdk_detach, NULL); | |
4213 | ||
be481733 IM |
4214 | unixctl_command_register("netdev-dpdk/get-mempool-info", |
4215 | "[netdev]", 0, 1, | |
4216 | netdev_dpdk_get_mempool_info, NULL); | |
4217 | ||
988fd463 EC |
4218 | ret = rte_eth_dev_callback_register(RTE_ETH_ALL, |
4219 | RTE_ETH_EVENT_INTR_RESET, | |
4220 | dpdk_eth_event_callback, NULL); | |
4221 | if (ret != 0) { | |
4222 | VLOG_ERR("Ethernet device callback register error: %s", | |
4223 | rte_strerror(-ret)); | |
4224 | } | |
4225 | ||
ecc1a34e DDP |
4226 | ovsthread_once_done(&once); |
4227 | } | |
362ca396 | 4228 | |
7d1ced01 CL |
4229 | return 0; |
4230 | } | |
4231 | ||
0bf765f7 IS |
4232 | /* QoS Functions */ |
4233 | ||
4234 | /* | |
4235 | * Initialize QoS configuration operations. | |
4236 | */ | |
4237 | static void | |
4238 | qos_conf_init(struct qos_conf *conf, const struct dpdk_qos_ops *ops) | |
4239 | { | |
4240 | conf->ops = ops; | |
78bd47cf | 4241 | rte_spinlock_init(&conf->lock); |
0bf765f7 IS |
4242 | } |
4243 | ||
4244 | /* | |
4245 | * Search existing QoS operations in qos_ops and compare each set of | |
4246 | * operations qos_name to name. Return a dpdk_qos_ops pointer to a match, | |
4247 | * else return NULL | |
4248 | */ | |
4249 | static const struct dpdk_qos_ops * | |
4250 | qos_lookup_name(const char *name) | |
4251 | { | |
4252 | const struct dpdk_qos_ops *const *opsp; | |
4253 | ||
4254 | for (opsp = qos_confs; *opsp != NULL; opsp++) { | |
4255 | const struct dpdk_qos_ops *ops = *opsp; | |
4256 | if (!strcmp(name, ops->qos_name)) { | |
4257 | return ops; | |
4258 | } | |
4259 | } | |
4260 | return NULL; | |
4261 | } | |
4262 | ||
0bf765f7 IS |
4263 | static int |
4264 | netdev_dpdk_get_qos_types(const struct netdev *netdev OVS_UNUSED, | |
4265 | struct sset *types) | |
4266 | { | |
4267 | const struct dpdk_qos_ops *const *opsp; | |
4268 | ||
4269 | for (opsp = qos_confs; *opsp != NULL; opsp++) { | |
4270 | const struct dpdk_qos_ops *ops = *opsp; | |
4271 | if (ops->qos_construct && ops->qos_name[0] != '\0') { | |
4272 | sset_add(types, ops->qos_name); | |
4273 | } | |
4274 | } | |
4275 | return 0; | |
4276 | } | |
4277 | ||
4278 | static int | |
d46285a2 | 4279 | netdev_dpdk_get_qos(const struct netdev *netdev, |
0bf765f7 IS |
4280 | const char **typep, struct smap *details) |
4281 | { | |
d46285a2 | 4282 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
78bd47cf | 4283 | struct qos_conf *qos_conf; |
0bf765f7 IS |
4284 | int error = 0; |
4285 | ||
d46285a2 | 4286 | ovs_mutex_lock(&dev->mutex); |
78bd47cf DDP |
4287 | qos_conf = ovsrcu_get_protected(struct qos_conf *, &dev->qos_conf); |
4288 | if (qos_conf) { | |
4289 | *typep = qos_conf->ops->qos_name; | |
4290 | error = (qos_conf->ops->qos_get | |
4291 | ? qos_conf->ops->qos_get(qos_conf, details): 0); | |
d03603c4 MC |
4292 | } else { |
4293 | /* No QoS configuration set, return an empty string */ | |
4294 | *typep = ""; | |
0bf765f7 | 4295 | } |
d46285a2 | 4296 | ovs_mutex_unlock(&dev->mutex); |
0bf765f7 IS |
4297 | |
4298 | return error; | |
4299 | } | |
4300 | ||
4301 | static int | |
78bd47cf DDP |
4302 | netdev_dpdk_set_qos(struct netdev *netdev, const char *type, |
4303 | const struct smap *details) | |
0bf765f7 | 4304 | { |
d46285a2 | 4305 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); |
0bf765f7 | 4306 | const struct dpdk_qos_ops *new_ops = NULL; |
78bd47cf | 4307 | struct qos_conf *qos_conf, *new_qos_conf = NULL; |
0bf765f7 IS |
4308 | int error = 0; |
4309 | ||
d46285a2 | 4310 | ovs_mutex_lock(&dev->mutex); |
0bf765f7 | 4311 | |
78bd47cf | 4312 | qos_conf = ovsrcu_get_protected(struct qos_conf *, &dev->qos_conf); |
0bf765f7 | 4313 | |
78bd47cf DDP |
4314 | new_ops = qos_lookup_name(type); |
4315 | ||
4316 | if (!new_ops || !new_ops->qos_construct) { | |
4317 | new_qos_conf = NULL; | |
4318 | if (type && type[0]) { | |
4319 | error = EOPNOTSUPP; | |
0bf765f7 | 4320 | } |
44975bb0 | 4321 | } else if (qos_conf && qos_conf->ops == new_ops |
78bd47cf DDP |
4322 | && qos_conf->ops->qos_is_equal(qos_conf, details)) { |
4323 | new_qos_conf = qos_conf; | |
0bf765f7 | 4324 | } else { |
78bd47cf | 4325 | error = new_ops->qos_construct(details, &new_qos_conf); |
7ea266e9 IS |
4326 | } |
4327 | ||
7ea266e9 | 4328 | if (error) { |
78bd47cf DDP |
4329 | VLOG_ERR("Failed to set QoS type %s on port %s: %s", |
4330 | type, netdev->name, rte_strerror(error)); | |
4331 | } | |
4332 | ||
4333 | if (new_qos_conf != qos_conf) { | |
4334 | ovsrcu_set(&dev->qos_conf, new_qos_conf); | |
4335 | if (qos_conf) { | |
4336 | ovsrcu_postpone(qos_conf->ops->qos_destruct, qos_conf); | |
4337 | } | |
0bf765f7 IS |
4338 | } |
4339 | ||
d46285a2 | 4340 | ovs_mutex_unlock(&dev->mutex); |
78bd47cf | 4341 | |
0bf765f7 IS |
4342 | return error; |
4343 | } | |
4344 | ||
23c01b19 EC |
4345 | static int |
4346 | netdev_dpdk_get_queue(const struct netdev *netdev, uint32_t queue_id, | |
4347 | struct smap *details) | |
4348 | { | |
4349 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
4350 | struct qos_conf *qos_conf; | |
4351 | int error = 0; | |
4352 | ||
4353 | ovs_mutex_lock(&dev->mutex); | |
4354 | ||
4355 | qos_conf = ovsrcu_get_protected(struct qos_conf *, &dev->qos_conf); | |
4356 | if (!qos_conf || !qos_conf->ops || !qos_conf->ops->qos_queue_get) { | |
4357 | error = EOPNOTSUPP; | |
4358 | } else { | |
4359 | error = qos_conf->ops->qos_queue_get(details, queue_id, qos_conf); | |
4360 | } | |
4361 | ||
4362 | ovs_mutex_unlock(&dev->mutex); | |
4363 | ||
4364 | return error; | |
4365 | } | |
4366 | ||
4367 | static int | |
4368 | netdev_dpdk_set_queue(struct netdev *netdev, uint32_t queue_id, | |
4369 | const struct smap *details) | |
4370 | { | |
4371 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
4372 | struct qos_conf *qos_conf; | |
4373 | int error = 0; | |
4374 | ||
4375 | ovs_mutex_lock(&dev->mutex); | |
4376 | ||
4377 | qos_conf = ovsrcu_get_protected(struct qos_conf *, &dev->qos_conf); | |
4378 | if (!qos_conf || !qos_conf->ops || !qos_conf->ops->qos_queue_construct) { | |
4379 | error = EOPNOTSUPP; | |
4380 | } else { | |
4381 | error = qos_conf->ops->qos_queue_construct(details, queue_id, | |
4382 | qos_conf); | |
4383 | } | |
4384 | ||
4385 | if (error && error != EOPNOTSUPP) { | |
4386 | VLOG_ERR("Failed to set QoS queue %d on port %s: %s", | |
4387 | queue_id, netdev_get_name(netdev), rte_strerror(error)); | |
4388 | } | |
4389 | ||
4390 | ovs_mutex_unlock(&dev->mutex); | |
4391 | ||
4392 | return error; | |
4393 | } | |
4394 | ||
4395 | static int | |
4396 | netdev_dpdk_delete_queue(struct netdev *netdev, uint32_t queue_id) | |
4397 | { | |
4398 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
4399 | struct qos_conf *qos_conf; | |
4400 | int error = 0; | |
4401 | ||
4402 | ovs_mutex_lock(&dev->mutex); | |
4403 | ||
4404 | qos_conf = ovsrcu_get_protected(struct qos_conf *, &dev->qos_conf); | |
4405 | if (qos_conf && qos_conf->ops && qos_conf->ops->qos_queue_destruct) { | |
4406 | qos_conf->ops->qos_queue_destruct(qos_conf, queue_id); | |
4407 | } else { | |
4408 | error = EOPNOTSUPP; | |
4409 | } | |
4410 | ||
4411 | ovs_mutex_unlock(&dev->mutex); | |
4412 | ||
4413 | return error; | |
4414 | } | |
4415 | ||
4416 | static int | |
4417 | netdev_dpdk_get_queue_stats(const struct netdev *netdev, uint32_t queue_id, | |
4418 | struct netdev_queue_stats *stats) | |
4419 | { | |
4420 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
4421 | struct qos_conf *qos_conf; | |
4422 | int error = 0; | |
4423 | ||
4424 | ovs_mutex_lock(&dev->mutex); | |
4425 | ||
4426 | qos_conf = ovsrcu_get_protected(struct qos_conf *, &dev->qos_conf); | |
4427 | if (qos_conf && qos_conf->ops && qos_conf->ops->qos_queue_get_stats) { | |
4428 | qos_conf->ops->qos_queue_get_stats(qos_conf, queue_id, stats); | |
4429 | } else { | |
4430 | error = EOPNOTSUPP; | |
4431 | } | |
4432 | ||
4433 | ovs_mutex_unlock(&dev->mutex); | |
4434 | ||
4435 | return error; | |
4436 | } | |
4437 | ||
4438 | static int | |
4439 | netdev_dpdk_queue_dump_start(const struct netdev *netdev, void **statep) | |
4440 | { | |
4441 | int error = 0; | |
4442 | struct qos_conf *qos_conf; | |
4443 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
4444 | ||
4445 | ovs_mutex_lock(&dev->mutex); | |
4446 | ||
4447 | qos_conf = ovsrcu_get_protected(struct qos_conf *, &dev->qos_conf); | |
4448 | if (qos_conf && qos_conf->ops | |
4449 | && qos_conf->ops->qos_queue_dump_state_init) { | |
4450 | struct netdev_dpdk_queue_state *state; | |
4451 | ||
4452 | *statep = state = xmalloc(sizeof *state); | |
4453 | error = qos_conf->ops->qos_queue_dump_state_init(qos_conf, state); | |
4454 | } else { | |
4455 | error = EOPNOTSUPP; | |
4456 | } | |
4457 | ||
4458 | ovs_mutex_unlock(&dev->mutex); | |
4459 | ||
4460 | return error; | |
4461 | } | |
4462 | ||
4463 | static int | |
4464 | netdev_dpdk_queue_dump_next(const struct netdev *netdev, void *state_, | |
4465 | uint32_t *queue_idp, struct smap *details) | |
4466 | { | |
4467 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
4468 | struct netdev_dpdk_queue_state *state = state_; | |
4469 | struct qos_conf *qos_conf; | |
4470 | int error = EOF; | |
4471 | ||
4472 | ovs_mutex_lock(&dev->mutex); | |
4473 | ||
4474 | while (state->cur_queue < state->n_queues) { | |
4475 | uint32_t queue_id = state->queues[state->cur_queue++]; | |
4476 | ||
4477 | qos_conf = ovsrcu_get_protected(struct qos_conf *, &dev->qos_conf); | |
4478 | if (qos_conf && qos_conf->ops && qos_conf->ops->qos_queue_get) { | |
4479 | *queue_idp = queue_id; | |
4480 | error = qos_conf->ops->qos_queue_get(details, queue_id, qos_conf); | |
4481 | break; | |
4482 | } | |
4483 | } | |
4484 | ||
4485 | ovs_mutex_unlock(&dev->mutex); | |
4486 | ||
4487 | return error; | |
4488 | } | |
4489 | ||
4490 | static int | |
4491 | netdev_dpdk_queue_dump_done(const struct netdev *netdev OVS_UNUSED, | |
4492 | void *state_) | |
4493 | { | |
4494 | struct netdev_dpdk_queue_state *state = state_; | |
4495 | ||
4496 | free(state->queues); | |
4497 | free(state); | |
4498 | return 0; | |
4499 | } | |
4500 | ||
4501 | ||
4502 | ||
0bf765f7 IS |
4503 | /* egress-policer details */ |
4504 | ||
4505 | struct egress_policer { | |
4506 | struct qos_conf qos_conf; | |
4507 | struct rte_meter_srtcm_params app_srtcm_params; | |
4508 | struct rte_meter_srtcm egress_meter; | |
03f3f9c0 | 4509 | struct rte_meter_srtcm_profile egress_prof; |
0bf765f7 IS |
4510 | }; |
4511 | ||
78bd47cf DDP |
4512 | static void |
4513 | egress_policer_details_to_param(const struct smap *details, | |
4514 | struct rte_meter_srtcm_params *params) | |
0bf765f7 | 4515 | { |
78bd47cf DDP |
4516 | memset(params, 0, sizeof *params); |
4517 | params->cir = smap_get_ullong(details, "cir", 0); | |
4518 | params->cbs = smap_get_ullong(details, "cbs", 0); | |
4519 | params->ebs = 0; | |
0bf765f7 IS |
4520 | } |
4521 | ||
4522 | static int | |
78bd47cf DDP |
4523 | egress_policer_qos_construct(const struct smap *details, |
4524 | struct qos_conf **conf) | |
0bf765f7 | 4525 | { |
0bf765f7 | 4526 | struct egress_policer *policer; |
0bf765f7 IS |
4527 | int err = 0; |
4528 | ||
0bf765f7 IS |
4529 | policer = xmalloc(sizeof *policer); |
4530 | qos_conf_init(&policer->qos_conf, &egress_policer_ops); | |
78bd47cf | 4531 | egress_policer_details_to_param(details, &policer->app_srtcm_params); |
03f3f9c0 OM |
4532 | err = rte_meter_srtcm_profile_config(&policer->egress_prof, |
4533 | &policer->app_srtcm_params); | |
4534 | if (!err) { | |
4535 | err = rte_meter_srtcm_config(&policer->egress_meter, | |
4536 | &policer->egress_prof); | |
4537 | } | |
4538 | ||
78bd47cf DDP |
4539 | if (!err) { |
4540 | *conf = &policer->qos_conf; | |
4541 | } else { | |
03f3f9c0 | 4542 | VLOG_ERR("Could not create rte meter for egress policer"); |
7ea266e9 | 4543 | free(policer); |
78bd47cf | 4544 | *conf = NULL; |
7ea266e9 IS |
4545 | err = -err; |
4546 | } | |
0bf765f7 IS |
4547 | |
4548 | return err; | |
4549 | } | |
4550 | ||
4551 | static void | |
78bd47cf | 4552 | egress_policer_qos_destruct(struct qos_conf *conf) |
0bf765f7 IS |
4553 | { |
4554 | struct egress_policer *policer = CONTAINER_OF(conf, struct egress_policer, | |
78bd47cf | 4555 | qos_conf); |
0bf765f7 IS |
4556 | free(policer); |
4557 | } | |
4558 | ||
4559 | static int | |
78bd47cf | 4560 | egress_policer_qos_get(const struct qos_conf *conf, struct smap *details) |
0bf765f7 | 4561 | { |
78bd47cf DDP |
4562 | struct egress_policer *policer = |
4563 | CONTAINER_OF(conf, struct egress_policer, qos_conf); | |
4564 | ||
4565 | smap_add_format(details, "cir", "%"PRIu64, policer->app_srtcm_params.cir); | |
4566 | smap_add_format(details, "cbs", "%"PRIu64, policer->app_srtcm_params.cbs); | |
050c60bf | 4567 | |
0bf765f7 IS |
4568 | return 0; |
4569 | } | |
4570 | ||
78bd47cf | 4571 | static bool |
47a45d86 KT |
4572 | egress_policer_qos_is_equal(const struct qos_conf *conf, |
4573 | const struct smap *details) | |
0bf765f7 | 4574 | { |
78bd47cf DDP |
4575 | struct egress_policer *policer = |
4576 | CONTAINER_OF(conf, struct egress_policer, qos_conf); | |
4577 | struct rte_meter_srtcm_params params; | |
0bf765f7 | 4578 | |
78bd47cf | 4579 | egress_policer_details_to_param(details, ¶ms); |
7ea266e9 | 4580 | |
78bd47cf | 4581 | return !memcmp(¶ms, &policer->app_srtcm_params, sizeof params); |
0bf765f7 IS |
4582 | } |
4583 | ||
0bf765f7 | 4584 | static int |
3e90f7d7 | 4585 | egress_policer_run(struct qos_conf *conf, struct rte_mbuf **pkts, int pkt_cnt, |
7d7ded7a | 4586 | bool should_steal) |
0bf765f7 | 4587 | { |
0bf765f7 | 4588 | int cnt = 0; |
78bd47cf DDP |
4589 | struct egress_policer *policer = |
4590 | CONTAINER_OF(conf, struct egress_policer, qos_conf); | |
0bf765f7 | 4591 | |
e61bdffc EC |
4592 | cnt = srtcm_policer_run_single_packet(&policer->egress_meter, |
4593 | &policer->egress_prof, pkts, | |
4594 | pkt_cnt, should_steal); | |
0bf765f7 IS |
4595 | |
4596 | return cnt; | |
4597 | } | |
4598 | ||
4599 | static const struct dpdk_qos_ops egress_policer_ops = { | |
23c01b19 EC |
4600 | .qos_name = "egress-policer", /* qos_name */ |
4601 | .qos_construct = egress_policer_qos_construct, | |
4602 | .qos_destruct = egress_policer_qos_destruct, | |
4603 | .qos_get = egress_policer_qos_get, | |
4604 | .qos_is_equal = egress_policer_qos_is_equal, | |
4605 | .qos_run = egress_policer_run | |
0bf765f7 IS |
4606 | }; |
4607 | ||
e61bdffc EC |
4608 | /* trtcm-policer details */ |
4609 | ||
4610 | struct trtcm_policer { | |
4611 | struct qos_conf qos_conf; | |
4612 | struct rte_meter_trtcm_rfc4115_params meter_params; | |
4613 | struct rte_meter_trtcm_rfc4115_profile meter_profile; | |
4614 | struct rte_meter_trtcm_rfc4115 meter; | |
4615 | struct netdev_queue_stats stats; | |
4616 | struct hmap queues; | |
4617 | }; | |
4618 | ||
4619 | struct trtcm_policer_queue { | |
4620 | struct hmap_node hmap_node; | |
4621 | uint32_t queue_id; | |
4622 | struct rte_meter_trtcm_rfc4115_params meter_params; | |
4623 | struct rte_meter_trtcm_rfc4115_profile meter_profile; | |
4624 | struct rte_meter_trtcm_rfc4115 meter; | |
4625 | struct netdev_queue_stats stats; | |
4626 | }; | |
4627 | ||
4628 | static void | |
4629 | trtcm_policer_details_to_param(const struct smap *details, | |
4630 | struct rte_meter_trtcm_rfc4115_params *params) | |
4631 | { | |
4632 | memset(params, 0, sizeof *params); | |
4633 | params->cir = smap_get_ullong(details, "cir", 0); | |
4634 | params->eir = smap_get_ullong(details, "eir", 0); | |
4635 | params->cbs = smap_get_ullong(details, "cbs", 0); | |
4636 | params->ebs = smap_get_ullong(details, "ebs", 0); | |
4637 | } | |
4638 | ||
4639 | static void | |
4640 | trtcm_policer_param_to_detail( | |
4641 | const struct rte_meter_trtcm_rfc4115_params *params, | |
4642 | struct smap *details) | |
4643 | { | |
4644 | smap_add_format(details, "cir", "%"PRIu64, params->cir); | |
4645 | smap_add_format(details, "eir", "%"PRIu64, params->eir); | |
4646 | smap_add_format(details, "cbs", "%"PRIu64, params->cbs); | |
4647 | smap_add_format(details, "ebs", "%"PRIu64, params->ebs); | |
4648 | } | |
4649 | ||
4650 | ||
4651 | static int | |
4652 | trtcm_policer_qos_construct(const struct smap *details, | |
4653 | struct qos_conf **conf) | |
4654 | { | |
4655 | struct trtcm_policer *policer; | |
4656 | int err = 0; | |
4657 | ||
4658 | policer = xmalloc(sizeof *policer); | |
4659 | qos_conf_init(&policer->qos_conf, &trtcm_policer_ops); | |
4660 | trtcm_policer_details_to_param(details, &policer->meter_params); | |
4661 | err = rte_meter_trtcm_rfc4115_profile_config(&policer->meter_profile, | |
4662 | &policer->meter_params); | |
4663 | if (!err) { | |
4664 | err = rte_meter_trtcm_rfc4115_config(&policer->meter, | |
4665 | &policer->meter_profile); | |
4666 | } | |
4667 | ||
4668 | if (!err) { | |
4669 | *conf = &policer->qos_conf; | |
4670 | memset(&policer->stats, 0, sizeof policer->stats); | |
4671 | hmap_init(&policer->queues); | |
4672 | } else { | |
4673 | free(policer); | |
4674 | *conf = NULL; | |
4675 | err = -err; | |
4676 | } | |
4677 | ||
4678 | return err; | |
4679 | } | |
4680 | ||
4681 | static void | |
4682 | trtcm_policer_qos_destruct(struct qos_conf *conf) | |
4683 | { | |
4684 | struct trtcm_policer_queue *queue, *next_queue; | |
4685 | struct trtcm_policer *policer = CONTAINER_OF(conf, struct trtcm_policer, | |
4686 | qos_conf); | |
4687 | ||
4688 | HMAP_FOR_EACH_SAFE (queue, next_queue, hmap_node, &policer->queues) { | |
4689 | hmap_remove(&policer->queues, &queue->hmap_node); | |
4690 | free(queue); | |
4691 | } | |
4692 | hmap_destroy(&policer->queues); | |
4693 | free(policer); | |
4694 | } | |
4695 | ||
4696 | static int | |
4697 | trtcm_policer_qos_get(const struct qos_conf *conf, struct smap *details) | |
4698 | { | |
4699 | struct trtcm_policer *policer = CONTAINER_OF(conf, struct trtcm_policer, | |
4700 | qos_conf); | |
4701 | ||
4702 | trtcm_policer_param_to_detail(&policer->meter_params, details); | |
4703 | return 0; | |
4704 | } | |
4705 | ||
4706 | static bool | |
4707 | trtcm_policer_qos_is_equal(const struct qos_conf *conf, | |
4708 | const struct smap *details) | |
4709 | { | |
4710 | struct trtcm_policer *policer = CONTAINER_OF(conf, struct trtcm_policer, | |
4711 | qos_conf); | |
4712 | struct rte_meter_trtcm_rfc4115_params params; | |
4713 | ||
4714 | trtcm_policer_details_to_param(details, ¶ms); | |
4715 | ||
4716 | return !memcmp(¶ms, &policer->meter_params, sizeof params); | |
4717 | } | |
4718 | ||
4719 | static struct trtcm_policer_queue * | |
4720 | trtcm_policer_qos_find_queue(struct trtcm_policer *policer, uint32_t queue_id) | |
4721 | { | |
4722 | struct trtcm_policer_queue *queue; | |
4723 | HMAP_FOR_EACH_WITH_HASH (queue, hmap_node, hash_2words(queue_id, 0), | |
4724 | &policer->queues) { | |
4725 | if (queue->queue_id == queue_id) { | |
4726 | return queue; | |
4727 | } | |
4728 | } | |
4729 | return NULL; | |
4730 | } | |
4731 | ||
4732 | static inline bool | |
4733 | trtcm_policer_run_single_packet(struct trtcm_policer *policer, | |
4734 | struct rte_mbuf *pkt, uint64_t time) | |
4735 | { | |
4736 | enum rte_color pkt_color; | |
4737 | struct trtcm_policer_queue *queue; | |
4738 | uint32_t pkt_len = rte_pktmbuf_pkt_len(pkt) - sizeof(struct rte_ether_hdr); | |
4739 | struct dp_packet *dpkt = CONTAINER_OF(pkt, struct dp_packet, mbuf); | |
4740 | ||
4741 | queue = trtcm_policer_qos_find_queue(policer, dpkt->md.skb_priority); | |
4742 | if (!queue) { | |
4743 | /* If no queue is found, use the default queue, which MUST exist. */ | |
4744 | queue = trtcm_policer_qos_find_queue(policer, 0); | |
4745 | if (!queue) { | |
4746 | return false; | |
4747 | } | |
4748 | } | |
4749 | ||
4750 | pkt_color = rte_meter_trtcm_rfc4115_color_blind_check(&queue->meter, | |
4751 | &queue->meter_profile, | |
4752 | time, | |
4753 | pkt_len); | |
4754 | ||
4755 | if (pkt_color == RTE_COLOR_RED) { | |
4756 | queue->stats.tx_errors++; | |
4757 | } else { | |
4758 | queue->stats.tx_bytes += pkt_len; | |
4759 | queue->stats.tx_packets++; | |
4760 | } | |
4761 | ||
4762 | pkt_color = rte_meter_trtcm_rfc4115_color_aware_check(&policer->meter, | |
4763 | &policer->meter_profile, | |
4764 | time, pkt_len, | |
4765 | pkt_color); | |
4766 | ||
4767 | if (pkt_color == RTE_COLOR_RED) { | |
4768 | policer->stats.tx_errors++; | |
4769 | return false; | |
4770 | } | |
4771 | ||
4772 | policer->stats.tx_bytes += pkt_len; | |
4773 | policer->stats.tx_packets++; | |
4774 | return true; | |
4775 | } | |
4776 | ||
4777 | static int | |
4778 | trtcm_policer_run(struct qos_conf *conf, struct rte_mbuf **pkts, int pkt_cnt, | |
4779 | bool should_steal) | |
4780 | { | |
4781 | int i = 0; | |
4782 | int cnt = 0; | |
4783 | struct rte_mbuf *pkt = NULL; | |
4784 | uint64_t current_time = rte_rdtsc(); | |
4785 | ||
4786 | struct trtcm_policer *policer = CONTAINER_OF(conf, struct trtcm_policer, | |
4787 | qos_conf); | |
4788 | ||
4789 | for (i = 0; i < pkt_cnt; i++) { | |
4790 | pkt = pkts[i]; | |
4791 | ||
4792 | if (trtcm_policer_run_single_packet(policer, pkt, current_time)) { | |
4793 | if (cnt != i) { | |
4794 | pkts[cnt] = pkt; | |
4795 | } | |
4796 | cnt++; | |
4797 | } else { | |
4798 | if (should_steal) { | |
4799 | rte_pktmbuf_free(pkt); | |
4800 | } | |
4801 | } | |
4802 | } | |
4803 | return cnt; | |
4804 | } | |
4805 | ||
4806 | static int | |
4807 | trtcm_policer_qos_queue_construct(const struct smap *details, | |
4808 | uint32_t queue_id, struct qos_conf *conf) | |
4809 | { | |
4810 | int err = 0; | |
4811 | struct trtcm_policer_queue *queue; | |
4812 | struct trtcm_policer *policer = CONTAINER_OF(conf, struct trtcm_policer, | |
4813 | qos_conf); | |
4814 | ||
4815 | queue = trtcm_policer_qos_find_queue(policer, queue_id); | |
4816 | if (!queue) { | |
4817 | queue = xmalloc(sizeof *queue); | |
4818 | queue->queue_id = queue_id; | |
4819 | memset(&queue->stats, 0, sizeof queue->stats); | |
4820 | queue->stats.created = time_msec(); | |
4821 | hmap_insert(&policer->queues, &queue->hmap_node, | |
4822 | hash_2words(queue_id, 0)); | |
4823 | } | |
4824 | if (queue_id == 0 && smap_is_empty(details)) { | |
4825 | /* No default queue configured, use port values */ | |
4826 | memcpy(&queue->meter_params, &policer->meter_params, | |
4827 | sizeof queue->meter_params); | |
4828 | } else { | |
4829 | trtcm_policer_details_to_param(details, &queue->meter_params); | |
4830 | } | |
4831 | ||
4832 | err = rte_meter_trtcm_rfc4115_profile_config(&queue->meter_profile, | |
4833 | &queue->meter_params); | |
4834 | ||
4835 | if (!err) { | |
4836 | err = rte_meter_trtcm_rfc4115_config(&queue->meter, | |
4837 | &queue->meter_profile); | |
4838 | } | |
4839 | if (err) { | |
4840 | hmap_remove(&policer->queues, &queue->hmap_node); | |
4841 | free(queue); | |
4842 | err = -err; | |
4843 | } | |
4844 | return err; | |
4845 | } | |
4846 | ||
4847 | static void | |
4848 | trtcm_policer_qos_queue_destruct(struct qos_conf *conf, uint32_t queue_id) | |
4849 | { | |
4850 | struct trtcm_policer_queue *queue; | |
4851 | struct trtcm_policer *policer = CONTAINER_OF(conf, struct trtcm_policer, | |
4852 | qos_conf); | |
4853 | ||
4854 | queue = trtcm_policer_qos_find_queue(policer, queue_id); | |
4855 | if (queue) { | |
4856 | hmap_remove(&policer->queues, &queue->hmap_node); | |
4857 | free(queue); | |
4858 | } | |
4859 | } | |
4860 | ||
4861 | static int | |
4862 | trtcm_policer_qos_queue_get(struct smap *details, uint32_t queue_id, | |
4863 | const struct qos_conf *conf) | |
4864 | { | |
4865 | struct trtcm_policer_queue *queue; | |
4866 | struct trtcm_policer *policer = CONTAINER_OF(conf, struct trtcm_policer, | |
4867 | qos_conf); | |
4868 | ||
4869 | queue = trtcm_policer_qos_find_queue(policer, queue_id); | |
4870 | if (!queue) { | |
4871 | return EINVAL; | |
4872 | } | |
4873 | ||
4874 | trtcm_policer_param_to_detail(&queue->meter_params, details); | |
4875 | return 0; | |
4876 | } | |
4877 | ||
4878 | static int | |
4879 | trtcm_policer_qos_queue_get_stats(const struct qos_conf *conf, | |
4880 | uint32_t queue_id, | |
4881 | struct netdev_queue_stats *stats) | |
4882 | { | |
4883 | struct trtcm_policer_queue *queue; | |
4884 | struct trtcm_policer *policer = CONTAINER_OF(conf, struct trtcm_policer, | |
4885 | qos_conf); | |
4886 | ||
4887 | queue = trtcm_policer_qos_find_queue(policer, queue_id); | |
4888 | if (!queue) { | |
4889 | return EINVAL; | |
4890 | } | |
4891 | memcpy(stats, &queue->stats, sizeof *stats); | |
4892 | return 0; | |
4893 | } | |
4894 | ||
4895 | static int | |
4896 | trtcm_policer_qos_queue_dump_state_init(const struct qos_conf *conf, | |
4897 | struct netdev_dpdk_queue_state *state) | |
4898 | { | |
4899 | uint32_t i = 0; | |
4900 | struct trtcm_policer_queue *queue; | |
4901 | struct trtcm_policer *policer = CONTAINER_OF(conf, struct trtcm_policer, | |
4902 | qos_conf); | |
4903 | ||
4904 | state->n_queues = hmap_count(&policer->queues); | |
4905 | state->cur_queue = 0; | |
4906 | state->queues = xmalloc(state->n_queues * sizeof *state->queues); | |
4907 | ||
4908 | HMAP_FOR_EACH (queue, hmap_node, &policer->queues) { | |
4909 | state->queues[i++] = queue->queue_id; | |
4910 | } | |
4911 | return 0; | |
4912 | } | |
4913 | ||
4914 | static const struct dpdk_qos_ops trtcm_policer_ops = { | |
4915 | .qos_name = "trtcm-policer", | |
4916 | .qos_construct = trtcm_policer_qos_construct, | |
4917 | .qos_destruct = trtcm_policer_qos_destruct, | |
4918 | .qos_get = trtcm_policer_qos_get, | |
4919 | .qos_is_equal = trtcm_policer_qos_is_equal, | |
4920 | .qos_run = trtcm_policer_run, | |
4921 | .qos_queue_construct = trtcm_policer_qos_queue_construct, | |
4922 | .qos_queue_destruct = trtcm_policer_qos_queue_destruct, | |
4923 | .qos_queue_get = trtcm_policer_qos_queue_get, | |
4924 | .qos_queue_get_stats = trtcm_policer_qos_queue_get_stats, | |
4925 | .qos_queue_dump_state_init = trtcm_policer_qos_queue_dump_state_init | |
4926 | }; | |
4927 | ||
050c60bf DDP |
4928 | static int |
4929 | netdev_dpdk_reconfigure(struct netdev *netdev) | |
4930 | { | |
4931 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
4932 | int err = 0; | |
4933 | ||
050c60bf DDP |
4934 | ovs_mutex_lock(&dev->mutex); |
4935 | ||
4936 | if (netdev->n_txq == dev->requested_n_txq | |
0072e931 | 4937 | && netdev->n_rxq == dev->requested_n_rxq |
b685696b | 4938 | && dev->mtu == dev->requested_mtu |
f8b64a61 | 4939 | && dev->lsc_interrupt_mode == dev->requested_lsc_interrupt_mode |
b685696b | 4940 | && dev->rxq_size == dev->requested_rxq_size |
bd4e172b | 4941 | && dev->txq_size == dev->requested_txq_size |
606f6650 | 4942 | && dev->socket_id == dev->requested_socket_id |
988fd463 | 4943 | && dev->started && !dev->reset_needed) { |
050c60bf DDP |
4944 | /* Reconfiguration is unnecessary */ |
4945 | ||
4946 | goto out; | |
4947 | } | |
4948 | ||
988fd463 EC |
4949 | if (dev->reset_needed) { |
4950 | rte_eth_dev_reset(dev->port_id); | |
4951 | if_notifier_manual_report(); | |
4952 | dev->reset_needed = false; | |
4953 | } else { | |
4954 | rte_eth_dev_stop(dev->port_id); | |
4955 | } | |
4956 | ||
606f6650 | 4957 | dev->started = false; |
050c60bf | 4958 | |
d555d9bd | 4959 | err = netdev_dpdk_mempool_configure(dev); |
b6b26021 | 4960 | if (err && err != EEXIST) { |
d555d9bd | 4961 | goto out; |
0072e931 MK |
4962 | } |
4963 | ||
f8b64a61 RM |
4964 | dev->lsc_interrupt_mode = dev->requested_lsc_interrupt_mode; |
4965 | ||
050c60bf DDP |
4966 | netdev->n_txq = dev->requested_n_txq; |
4967 | netdev->n_rxq = dev->requested_n_rxq; | |
4968 | ||
b685696b CL |
4969 | dev->rxq_size = dev->requested_rxq_size; |
4970 | dev->txq_size = dev->requested_txq_size; | |
4971 | ||
050c60bf DDP |
4972 | rte_free(dev->tx_q); |
4973 | err = dpdk_eth_dev_init(dev); | |
29cf9c1b FL |
4974 | if (dev->hw_ol_features & NETDEV_TX_TSO_OFFLOAD) { |
4975 | netdev->ol_flags |= NETDEV_TX_OFFLOAD_TCP_TSO; | |
4976 | netdev->ol_flags |= NETDEV_TX_OFFLOAD_TCP_CKSUM; | |
8c5163fe | 4977 | netdev->ol_flags |= NETDEV_TX_OFFLOAD_UDP_CKSUM; |
29cf9c1b | 4978 | netdev->ol_flags |= NETDEV_TX_OFFLOAD_IPV4_CKSUM; |
35b5586b FL |
4979 | if (dev->hw_ol_features & NETDEV_TX_SCTP_CHECKSUM_OFFLOAD) { |
4980 | netdev->ol_flags |= NETDEV_TX_OFFLOAD_SCTP_CKSUM; | |
4981 | } | |
29cf9c1b FL |
4982 | } |
4983 | ||
eff23640 DDP |
4984 | dev->tx_q = netdev_dpdk_alloc_txq(netdev->n_txq); |
4985 | if (!dev->tx_q) { | |
4986 | err = ENOMEM; | |
4987 | } | |
050c60bf | 4988 | |
0072e931 MK |
4989 | netdev_change_seq_changed(netdev); |
4990 | ||
050c60bf | 4991 | out: |
050c60bf | 4992 | ovs_mutex_unlock(&dev->mutex); |
050c60bf DDP |
4993 | return err; |
4994 | } | |
4995 | ||
7f381c2e | 4996 | static int |
2d24d165 | 4997 | dpdk_vhost_reconfigure_helper(struct netdev_dpdk *dev) |
2d24d165 | 4998 | OVS_REQUIRES(dev->mutex) |
050c60bf | 4999 | { |
2d24d165 CL |
5000 | dev->up.n_txq = dev->requested_n_txq; |
5001 | dev->up.n_rxq = dev->requested_n_rxq; | |
96e9b168 | 5002 | int err; |
050c60bf | 5003 | |
35c91567 DM |
5004 | /* Always keep RX queue 0 enabled for implementations that won't |
5005 | * report vring states. */ | |
5006 | dev->vhost_rxq_enabled[0] = true; | |
5007 | ||
81acebda IM |
5008 | /* Enable TX queue 0 by default if it wasn't disabled. */ |
5009 | if (dev->tx_q[0].map == OVS_VHOST_QUEUE_MAP_UNKNOWN) { | |
5010 | dev->tx_q[0].map = 0; | |
5011 | } | |
5012 | ||
29cf9c1b FL |
5013 | if (userspace_tso_enabled()) { |
5014 | dev->hw_ol_features |= NETDEV_TX_TSO_OFFLOAD; | |
5015 | VLOG_DBG("%s: TSO enabled on vhost port", netdev_get_name(&dev->up)); | |
5016 | } | |
5017 | ||
81acebda IM |
5018 | netdev_dpdk_remap_txqs(dev); |
5019 | ||
d555d9bd | 5020 | err = netdev_dpdk_mempool_configure(dev); |
b6b26021 | 5021 | if (!err) { |
43307ad0 | 5022 | /* A new mempool was created or re-used. */ |
d555d9bd | 5023 | netdev_change_seq_changed(&dev->up); |
03f3f9c0 | 5024 | } else if (err != EEXIST) { |
b6b26021 | 5025 | return err; |
db8f13b0 | 5026 | } |
0a0f39df | 5027 | if (netdev_dpdk_get_vid(dev) >= 0) { |
894af647 | 5028 | if (dev->vhost_reconfigured == false) { |
5029 | dev->vhost_reconfigured = true; | |
5030 | /* Carrier status may need updating. */ | |
5031 | netdev_change_seq_changed(&dev->up); | |
5032 | } | |
81acebda | 5033 | } |
7f381c2e DDP |
5034 | |
5035 | return 0; | |
2d24d165 CL |
5036 | } |
5037 | ||
5038 | static int | |
5039 | netdev_dpdk_vhost_reconfigure(struct netdev *netdev) | |
5040 | { | |
5041 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
7f381c2e | 5042 | int err; |
2d24d165 | 5043 | |
2d24d165 | 5044 | ovs_mutex_lock(&dev->mutex); |
7f381c2e | 5045 | err = dpdk_vhost_reconfigure_helper(dev); |
2d24d165 | 5046 | ovs_mutex_unlock(&dev->mutex); |
7f381c2e DDP |
5047 | |
5048 | return err; | |
2d24d165 CL |
5049 | } |
5050 | ||
5051 | static int | |
5052 | netdev_dpdk_vhost_client_reconfigure(struct netdev *netdev) | |
5053 | { | |
5054 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
7f381c2e | 5055 | int err; |
a14d1cc8 | 5056 | uint64_t vhost_flags = 0; |
514950d3 | 5057 | uint64_t vhost_unsup_flags; |
2d24d165 | 5058 | |
2d24d165 CL |
5059 | ovs_mutex_lock(&dev->mutex); |
5060 | ||
c1ff66ac CL |
5061 | /* Configure vHost client mode if requested and if the following criteria |
5062 | * are met: | |
2d24d165 CL |
5063 | * 1. Device hasn't been registered yet. |
5064 | * 2. A path has been specified. | |
c1ff66ac | 5065 | */ |
bb9d2623 | 5066 | if (!(dev->vhost_driver_flags & RTE_VHOST_USER_CLIENT) && dev->vhost_id) { |
a14d1cc8 MK |
5067 | /* Register client-mode device. */ |
5068 | vhost_flags |= RTE_VHOST_USER_CLIENT; | |
5069 | ||
e666e8e0 FL |
5070 | /* There is no support for multi-segments buffers. */ |
5071 | vhost_flags |= RTE_VHOST_USER_LINEARBUF_SUPPORT; | |
5072 | ||
a14d1cc8 MK |
5073 | /* Enable IOMMU support, if explicitly requested. */ |
5074 | if (dpdk_vhost_iommu_enabled()) { | |
5075 | vhost_flags |= RTE_VHOST_USER_IOMMU_SUPPORT; | |
5076 | } | |
10087cba | 5077 | |
30e834dc LB |
5078 | /* Enable POSTCOPY support, if explicitly requested. */ |
5079 | if (dpdk_vhost_postcopy_enabled()) { | |
5080 | vhost_flags |= RTE_VHOST_USER_POSTCOPY_SUPPORT; | |
5081 | } | |
5082 | ||
29cf9c1b FL |
5083 | /* Enable External Buffers if TCP Segmentation Offload is enabled. */ |
5084 | if (userspace_tso_enabled()) { | |
5085 | vhost_flags |= RTE_VHOST_USER_EXTBUF_SUPPORT; | |
5086 | } | |
5087 | ||
a14d1cc8 | 5088 | err = rte_vhost_driver_register(dev->vhost_id, vhost_flags); |
c1ff66ac | 5089 | if (err) { |
2d24d165 CL |
5090 | VLOG_ERR("vhost-user device setup failure for device %s\n", |
5091 | dev->vhost_id); | |
7f381c2e | 5092 | goto unlock; |
c1ff66ac | 5093 | } else { |
2d24d165 | 5094 | /* Configuration successful */ |
a14d1cc8 | 5095 | dev->vhost_driver_flags |= vhost_flags; |
2d24d165 CL |
5096 | VLOG_INFO("vHost User device '%s' created in 'client' mode, " |
5097 | "using client socket '%s'", | |
5098 | dev->up.name, dev->vhost_id); | |
c1ff66ac | 5099 | } |
f3e7ec25 MW |
5100 | |
5101 | err = rte_vhost_driver_callback_register(dev->vhost_id, | |
5102 | &virtio_net_device_ops); | |
5103 | if (err) { | |
5104 | VLOG_ERR("rte_vhost_driver_callback_register failed for " | |
5105 | "vhost user client port: %s\n", dev->up.name); | |
5106 | goto unlock; | |
5107 | } | |
5108 | ||
29cf9c1b FL |
5109 | if (userspace_tso_enabled()) { |
5110 | netdev->ol_flags |= NETDEV_TX_OFFLOAD_TCP_TSO; | |
5111 | netdev->ol_flags |= NETDEV_TX_OFFLOAD_TCP_CKSUM; | |
8c5163fe | 5112 | netdev->ol_flags |= NETDEV_TX_OFFLOAD_UDP_CKSUM; |
35b5586b | 5113 | netdev->ol_flags |= NETDEV_TX_OFFLOAD_SCTP_CKSUM; |
29cf9c1b | 5114 | netdev->ol_flags |= NETDEV_TX_OFFLOAD_IPV4_CKSUM; |
514950d3 FL |
5115 | vhost_unsup_flags = 1ULL << VIRTIO_NET_F_HOST_ECN |
5116 | | 1ULL << VIRTIO_NET_F_HOST_UFO; | |
29cf9c1b | 5117 | } else { |
514950d3 FL |
5118 | /* This disables checksum offloading and all the features |
5119 | * that depends on it (TSO, UFO, ECN) according to virtio | |
5120 | * specification. */ | |
5121 | vhost_unsup_flags = 1ULL << VIRTIO_NET_F_CSUM; | |
5122 | } | |
5123 | ||
5124 | err = rte_vhost_driver_disable_features(dev->vhost_id, | |
5125 | vhost_unsup_flags); | |
5126 | if (err) { | |
5127 | VLOG_ERR("rte_vhost_driver_disable_features failed for " | |
5128 | "vhost user client port: %s\n", dev->up.name); | |
5129 | goto unlock; | |
f3e7ec25 MW |
5130 | } |
5131 | ||
5132 | err = rte_vhost_driver_start(dev->vhost_id); | |
5133 | if (err) { | |
5134 | VLOG_ERR("rte_vhost_driver_start failed for vhost user " | |
5135 | "client port: %s\n", dev->up.name); | |
5136 | goto unlock; | |
5137 | } | |
c1ff66ac CL |
5138 | } |
5139 | ||
7f381c2e DDP |
5140 | err = dpdk_vhost_reconfigure_helper(dev); |
5141 | ||
5142 | unlock: | |
050c60bf | 5143 | ovs_mutex_unlock(&dev->mutex); |
050c60bf | 5144 | |
7f381c2e | 5145 | return err; |
050c60bf DDP |
5146 | } |
5147 | ||
2f7f9284 EB |
5148 | int |
5149 | netdev_dpdk_get_port_id(struct netdev *netdev) | |
5150 | { | |
5151 | struct netdev_dpdk *dev; | |
5152 | int ret = -1; | |
5153 | ||
5154 | if (!is_dpdk_class(netdev->netdev_class)) { | |
5155 | goto out; | |
5156 | } | |
5157 | ||
5158 | dev = netdev_dpdk_cast(netdev); | |
5159 | ovs_mutex_lock(&dev->mutex); | |
5160 | ret = dev->port_id; | |
5161 | ovs_mutex_unlock(&dev->mutex); | |
5162 | out: | |
5163 | return ret; | |
5164 | } | |
5165 | ||
5fc5c50f IM |
5166 | bool |
5167 | netdev_dpdk_flow_api_supported(struct netdev *netdev) | |
5168 | { | |
5169 | struct netdev_dpdk *dev; | |
5170 | bool ret = false; | |
5171 | ||
5172 | if (!is_dpdk_class(netdev->netdev_class)) { | |
5173 | goto out; | |
5174 | } | |
5175 | ||
5176 | dev = netdev_dpdk_cast(netdev); | |
5177 | ovs_mutex_lock(&dev->mutex); | |
5178 | if (dev->type == DPDK_DEV_ETH) { | |
5179 | /* TODO: Check if we able to offload some minimal flow. */ | |
5180 | ret = true; | |
5181 | } | |
5182 | ovs_mutex_unlock(&dev->mutex); | |
5183 | out: | |
5184 | return ret; | |
5185 | } | |
5186 | ||
6775bdfc RBY |
5187 | int |
5188 | netdev_dpdk_rte_flow_destroy(struct netdev *netdev, | |
5189 | struct rte_flow *rte_flow, | |
5190 | struct rte_flow_error *error) | |
5191 | { | |
5192 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
5193 | int ret; | |
5194 | ||
5195 | ovs_mutex_lock(&dev->mutex); | |
5196 | ret = rte_flow_destroy(dev->port_id, rte_flow, error); | |
5197 | ovs_mutex_unlock(&dev->mutex); | |
5198 | return ret; | |
5199 | } | |
5200 | ||
5201 | struct rte_flow * | |
5202 | netdev_dpdk_rte_flow_create(struct netdev *netdev, | |
5203 | const struct rte_flow_attr *attr, | |
5204 | const struct rte_flow_item *items, | |
5205 | const struct rte_flow_action *actions, | |
5206 | struct rte_flow_error *error) | |
5207 | { | |
5208 | struct rte_flow *flow; | |
5209 | struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); | |
5210 | ||
5211 | ovs_mutex_lock(&dev->mutex); | |
5212 | flow = rte_flow_create(dev->port_id, attr, items, actions, error); | |
5213 | ovs_mutex_unlock(&dev->mutex); | |
5214 | return flow; | |
5215 | } | |
e8a2b5bf | 5216 | |
63556d85 EB |
5217 | int |
5218 | netdev_dpdk_rte_flow_query_count(struct netdev *netdev, | |
5219 | struct rte_flow *rte_flow, | |
5220 | struct rte_flow_query_count *query, | |
5221 | struct rte_flow_error *error) | |
5222 | { | |
5223 | struct rte_flow_action_count count = { .shared = 0, .id = 0 }; | |
5224 | const struct rte_flow_action actions[] = { | |
5225 | { | |
5226 | .type = RTE_FLOW_ACTION_TYPE_COUNT, | |
5227 | .conf = &count, | |
5228 | }, | |
5229 | { | |
5230 | .type = RTE_FLOW_ACTION_TYPE_END, | |
5231 | }, | |
5232 | }; | |
5233 | struct netdev_dpdk *dev; | |
5234 | int ret; | |
5235 | ||
5236 | if (!is_dpdk_class(netdev->netdev_class)) { | |
5237 | return -1; | |
5238 | } | |
5239 | ||
5240 | dev = netdev_dpdk_cast(netdev); | |
5241 | ovs_mutex_lock(&dev->mutex); | |
5242 | ret = rte_flow_query(dev->port_id, rte_flow, actions, query, error); | |
5243 | ovs_mutex_unlock(&dev->mutex); | |
5244 | return ret; | |
5245 | } | |
5246 | ||
89c09c1c BP |
5247 | #define NETDEV_DPDK_CLASS_COMMON \ |
5248 | .is_pmd = true, \ | |
5249 | .alloc = netdev_dpdk_alloc, \ | |
5250 | .dealloc = netdev_dpdk_dealloc, \ | |
5251 | .get_config = netdev_dpdk_get_config, \ | |
5252 | .get_numa_id = netdev_dpdk_get_numa_id, \ | |
5253 | .set_etheraddr = netdev_dpdk_set_etheraddr, \ | |
5254 | .get_etheraddr = netdev_dpdk_get_etheraddr, \ | |
5255 | .get_mtu = netdev_dpdk_get_mtu, \ | |
5256 | .set_mtu = netdev_dpdk_set_mtu, \ | |
5257 | .get_ifindex = netdev_dpdk_get_ifindex, \ | |
5258 | .get_carrier_resets = netdev_dpdk_get_carrier_resets, \ | |
5259 | .set_miimon_interval = netdev_dpdk_set_miimon, \ | |
5260 | .set_policing = netdev_dpdk_set_policing, \ | |
5261 | .get_qos_types = netdev_dpdk_get_qos_types, \ | |
5262 | .get_qos = netdev_dpdk_get_qos, \ | |
5263 | .set_qos = netdev_dpdk_set_qos, \ | |
23c01b19 EC |
5264 | .get_queue = netdev_dpdk_get_queue, \ |
5265 | .set_queue = netdev_dpdk_set_queue, \ | |
5266 | .delete_queue = netdev_dpdk_delete_queue, \ | |
5267 | .get_queue_stats = netdev_dpdk_get_queue_stats, \ | |
5268 | .queue_dump_start = netdev_dpdk_queue_dump_start, \ | |
5269 | .queue_dump_next = netdev_dpdk_queue_dump_next, \ | |
5270 | .queue_dump_done = netdev_dpdk_queue_dump_done, \ | |
89c09c1c BP |
5271 | .update_flags = netdev_dpdk_update_flags, \ |
5272 | .rxq_alloc = netdev_dpdk_rxq_alloc, \ | |
5273 | .rxq_construct = netdev_dpdk_rxq_construct, \ | |
5274 | .rxq_destruct = netdev_dpdk_rxq_destruct, \ | |
c0af6425 | 5275 | .rxq_dealloc = netdev_dpdk_rxq_dealloc |
89c09c1c BP |
5276 | |
5277 | #define NETDEV_DPDK_CLASS_BASE \ | |
5278 | NETDEV_DPDK_CLASS_COMMON, \ | |
5279 | .init = netdev_dpdk_class_init, \ | |
5280 | .destruct = netdev_dpdk_destruct, \ | |
5281 | .set_tx_multiq = netdev_dpdk_set_tx_multiq, \ | |
5282 | .get_carrier = netdev_dpdk_get_carrier, \ | |
5283 | .get_stats = netdev_dpdk_get_stats, \ | |
5284 | .get_custom_stats = netdev_dpdk_get_custom_stats, \ | |
5285 | .get_features = netdev_dpdk_get_features, \ | |
5286 | .get_status = netdev_dpdk_get_status, \ | |
5287 | .reconfigure = netdev_dpdk_reconfigure, \ | |
5fc5c50f | 5288 | .rxq_recv = netdev_dpdk_rxq_recv |
89c09c1c BP |
5289 | |
5290 | static const struct netdev_class dpdk_class = { | |
5291 | .type = "dpdk", | |
5292 | NETDEV_DPDK_CLASS_BASE, | |
5293 | .construct = netdev_dpdk_construct, | |
5294 | .set_config = netdev_dpdk_set_config, | |
5295 | .send = netdev_dpdk_eth_send, | |
5296 | }; | |
5297 | ||
89c09c1c BP |
5298 | static const struct netdev_class dpdk_vhost_class = { |
5299 | .type = "dpdkvhostuser", | |
5300 | NETDEV_DPDK_CLASS_COMMON, | |
5301 | .construct = netdev_dpdk_vhost_construct, | |
5302 | .destruct = netdev_dpdk_vhost_destruct, | |
5303 | .send = netdev_dpdk_vhost_send, | |
5304 | .get_carrier = netdev_dpdk_vhost_get_carrier, | |
5305 | .get_stats = netdev_dpdk_vhost_get_stats, | |
b99ab8aa | 5306 | .get_custom_stats = netdev_dpdk_get_sw_custom_stats, |
89c09c1c BP |
5307 | .get_status = netdev_dpdk_vhost_user_get_status, |
5308 | .reconfigure = netdev_dpdk_vhost_reconfigure, | |
35c91567 DM |
5309 | .rxq_recv = netdev_dpdk_vhost_rxq_recv, |
5310 | .rxq_enabled = netdev_dpdk_vhost_rxq_enabled, | |
89c09c1c BP |
5311 | }; |
5312 | ||
5313 | static const struct netdev_class dpdk_vhost_client_class = { | |
5314 | .type = "dpdkvhostuserclient", | |
5315 | NETDEV_DPDK_CLASS_COMMON, | |
5316 | .construct = netdev_dpdk_vhost_client_construct, | |
5317 | .destruct = netdev_dpdk_vhost_destruct, | |
5318 | .set_config = netdev_dpdk_vhost_client_set_config, | |
5319 | .send = netdev_dpdk_vhost_send, | |
5320 | .get_carrier = netdev_dpdk_vhost_get_carrier, | |
5321 | .get_stats = netdev_dpdk_vhost_get_stats, | |
b99ab8aa | 5322 | .get_custom_stats = netdev_dpdk_get_sw_custom_stats, |
89c09c1c BP |
5323 | .get_status = netdev_dpdk_vhost_user_get_status, |
5324 | .reconfigure = netdev_dpdk_vhost_client_reconfigure, | |
35c91567 DM |
5325 | .rxq_recv = netdev_dpdk_vhost_rxq_recv, |
5326 | .rxq_enabled = netdev_dpdk_vhost_rxq_enabled, | |
89c09c1c | 5327 | }; |
95fb793a | 5328 | |
8a9562d2 PS |
5329 | void |
5330 | netdev_dpdk_register(void) | |
5331 | { | |
bab69409 | 5332 | netdev_register_provider(&dpdk_class); |
53f50d24 | 5333 | netdev_register_provider(&dpdk_vhost_class); |
2d24d165 | 5334 | netdev_register_provider(&dpdk_vhost_client_class); |
8a9562d2 | 5335 | } |