]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/netvsc/hn_vf.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / netvsc / hn_vf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2018 Microsoft Corp.
3 * All rights reserved.
4 */
5
6 #include <stdio.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <stdbool.h>
10 #include <errno.h>
11 #include <unistd.h>
12 #include <dirent.h>
13 #include <fcntl.h>
14 #include <sys/types.h>
15 #include <sys/uio.h>
16
17 #include <rte_ether.h>
18 #include <rte_ethdev.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_lcore.h>
21 #include <rte_memory.h>
22 #include <rte_bus_vmbus.h>
23 #include <rte_pci.h>
24 #include <rte_bus_pci.h>
25 #include <rte_log.h>
26 #include <rte_string_fns.h>
27
28 #include "hn_logs.h"
29 #include "hn_var.h"
30 #include "hn_nvs.h"
31
32 /* Search for VF with matching MAC address, return port id */
33 static int hn_vf_match(const struct rte_eth_dev *dev)
34 {
35 const struct ether_addr *mac = dev->data->mac_addrs;
36 int i;
37
38 RTE_ETH_FOREACH_DEV(i) {
39 const struct rte_eth_dev *vf_dev = &rte_eth_devices[i];
40 const struct ether_addr *vf_mac = vf_dev->data->mac_addrs;
41
42 if (vf_dev == dev)
43 continue;
44
45 if (is_same_ether_addr(mac, vf_mac))
46 return i;
47 }
48 return -ENOENT;
49 }
50
51
52 /*
53 * Attach new PCI VF device and return the port_id
54 */
55 static int hn_vf_attach(struct hn_data *hv, uint16_t port_id)
56 {
57 struct rte_eth_dev_owner owner = { .id = RTE_ETH_DEV_NO_OWNER };
58 int ret;
59
60 if (hn_vf_attached(hv)) {
61 PMD_DRV_LOG(ERR, "VF already attached");
62 return -EEXIST;
63 }
64
65 ret = rte_eth_dev_owner_get(port_id, &owner);
66 if (ret < 0) {
67 PMD_DRV_LOG(ERR, "Can not find owner for port %d", port_id);
68 return ret;
69 }
70
71 if (owner.id != RTE_ETH_DEV_NO_OWNER) {
72 PMD_DRV_LOG(ERR, "Port %u already owned by other device %s",
73 port_id, owner.name);
74 return -EBUSY;
75 }
76
77 ret = rte_eth_dev_owner_set(port_id, &hv->owner);
78 if (ret < 0) {
79 PMD_DRV_LOG(ERR, "Can set owner for port %d", port_id);
80 return ret;
81 }
82
83 PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id);
84 hv->vf_port = port_id;
85 rte_smp_wmb();
86
87 return 0;
88 }
89
90 /* Add new VF device to synthetic device */
91 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
92 {
93 int port, err;
94
95 port = hn_vf_match(dev);
96 if (port < 0) {
97 PMD_DRV_LOG(NOTICE, "No matching MAC found");
98 return port;
99 }
100
101 rte_spinlock_lock(&hv->vf_lock);
102 err = hn_vf_attach(hv, port);
103
104 if (err == 0) {
105 dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
106 hv->vf_intr = (struct rte_intr_handle) {
107 .fd = -1,
108 .type = RTE_INTR_HANDLE_EXT,
109 };
110 dev->intr_handle = &hv->vf_intr;
111 hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
112 }
113 rte_spinlock_unlock(&hv->vf_lock);
114
115 return err;
116 }
117
118 /* Remove new VF device */
119 static void hn_vf_remove(struct hn_data *hv)
120 {
121
122 rte_spinlock_lock(&hv->vf_lock);
123
124 if (!hn_vf_attached(hv)) {
125 PMD_DRV_LOG(ERR, "VF path not active");
126 } else {
127 /* Stop incoming packets from arriving on VF */
128 hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC);
129
130 /* Stop transmission over VF */
131 hv->vf_port = HN_INVALID_PORT;
132 rte_smp_wmb();
133
134 /* Give back ownership */
135 rte_eth_dev_owner_unset(hv->vf_port, hv->owner.id);
136 }
137 rte_spinlock_unlock(&hv->vf_lock);
138 }
139
140 /* Handle VF association message from host */
141 void
142 hn_nvs_handle_vfassoc(struct rte_eth_dev *dev,
143 const struct vmbus_chanpkt_hdr *hdr,
144 const void *data)
145 {
146 struct hn_data *hv = dev->data->dev_private;
147 const struct hn_nvs_vf_association *vf_assoc = data;
148
149 if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*vf_assoc))) {
150 PMD_DRV_LOG(ERR, "invalid vf association NVS");
151 return;
152 }
153
154 PMD_DRV_LOG(DEBUG, "VF serial %u %s port %u",
155 vf_assoc->serial,
156 vf_assoc->allocated ? "add to" : "remove from",
157 dev->data->port_id);
158
159 hv->vf_present = vf_assoc->allocated;
160
161 if (dev->state != RTE_ETH_DEV_ATTACHED)
162 return;
163
164 if (vf_assoc->allocated)
165 hn_vf_add(dev, hv);
166 else
167 hn_vf_remove(hv);
168 }
169
170 /*
171 * Merge the info from the VF and synthetic path.
172 * use the default config of the VF
173 * and the minimum number of queues and buffer sizes.
174 */
175 static void hn_vf_info_merge(struct rte_eth_dev *vf_dev,
176 struct rte_eth_dev_info *info)
177 {
178 struct rte_eth_dev_info vf_info;
179
180 rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
181
182 info->speed_capa = vf_info.speed_capa;
183 info->default_rxportconf = vf_info.default_rxportconf;
184 info->default_txportconf = vf_info.default_txportconf;
185
186 info->max_rx_queues = RTE_MIN(vf_info.max_rx_queues,
187 info->max_rx_queues);
188 info->rx_offload_capa &= vf_info.rx_offload_capa;
189 info->rx_queue_offload_capa &= vf_info.rx_queue_offload_capa;
190 info->flow_type_rss_offloads &= vf_info.flow_type_rss_offloads;
191
192 info->max_tx_queues = RTE_MIN(vf_info.max_tx_queues,
193 info->max_tx_queues);
194 info->tx_offload_capa &= vf_info.tx_offload_capa;
195 info->tx_queue_offload_capa &= vf_info.tx_queue_offload_capa;
196
197 info->min_rx_bufsize = RTE_MAX(vf_info.min_rx_bufsize,
198 info->min_rx_bufsize);
199 info->max_rx_pktlen = RTE_MAX(vf_info.max_rx_pktlen,
200 info->max_rx_pktlen);
201 }
202
203 void hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
204 {
205 struct rte_eth_dev *vf_dev;
206
207 rte_spinlock_lock(&hv->vf_lock);
208 vf_dev = hn_get_vf_dev(hv);
209 if (vf_dev)
210 hn_vf_info_merge(vf_dev, info);
211 rte_spinlock_unlock(&hv->vf_lock);
212 }
213
214 int hn_vf_link_update(struct rte_eth_dev *dev,
215 int wait_to_complete)
216 {
217 struct hn_data *hv = dev->data->dev_private;
218 struct rte_eth_dev *vf_dev;
219 int ret = 0;
220
221 rte_spinlock_lock(&hv->vf_lock);
222 vf_dev = hn_get_vf_dev(hv);
223 if (vf_dev && vf_dev->dev_ops->link_update)
224 ret = (*vf_dev->dev_ops->link_update)(vf_dev, wait_to_complete);
225 rte_spinlock_unlock(&hv->vf_lock);
226
227 return ret;
228 }
229
230 /* called when VF has link state interrupts enabled */
231 static int hn_vf_lsc_event(uint16_t port_id __rte_unused,
232 enum rte_eth_event_type event,
233 void *cb_arg, void *out __rte_unused)
234 {
235 struct rte_eth_dev *dev = cb_arg;
236
237 if (event != RTE_ETH_EVENT_INTR_LSC)
238 return 0;
239
240 /* if link state has changed pass on */
241 if (hn_dev_link_update(dev, 0) == 0)
242 return 0; /* no change */
243
244 return _rte_eth_dev_callback_process(dev,
245 RTE_ETH_EVENT_INTR_LSC,
246 NULL);
247 }
248
249 static int _hn_vf_configure(struct rte_eth_dev *dev,
250 uint16_t vf_port,
251 const struct rte_eth_conf *dev_conf)
252 {
253 struct rte_eth_conf vf_conf = *dev_conf;
254 struct rte_eth_dev *vf_dev;
255 int ret;
256
257 vf_dev = &rte_eth_devices[vf_port];
258 if (dev_conf->intr_conf.lsc &&
259 (vf_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
260 PMD_DRV_LOG(DEBUG, "enabling LSC for VF %u",
261 vf_port);
262 vf_conf.intr_conf.lsc = 1;
263 } else {
264 PMD_DRV_LOG(DEBUG, "disabling LSC for VF %u",
265 vf_port);
266 vf_conf.intr_conf.lsc = 0;
267 }
268
269 ret = rte_eth_dev_configure(vf_port,
270 dev->data->nb_rx_queues,
271 dev->data->nb_tx_queues,
272 &vf_conf);
273 if (ret) {
274 PMD_DRV_LOG(ERR,
275 "VF configuration failed: %d", ret);
276 } else if (vf_conf.intr_conf.lsc) {
277 ret = rte_eth_dev_callback_register(vf_port,
278 RTE_ETH_DEV_INTR_LSC,
279 hn_vf_lsc_event, dev);
280 if (ret)
281 PMD_DRV_LOG(ERR,
282 "Failed to register LSC callback for VF %u",
283 vf_port);
284 }
285 return ret;
286 }
287
288 /*
289 * Configure VF if present.
290 * Force VF to have same number of queues as synthetic device
291 */
292 int hn_vf_configure(struct rte_eth_dev *dev,
293 const struct rte_eth_conf *dev_conf)
294 {
295 struct hn_data *hv = dev->data->dev_private;
296 int ret = 0;
297
298 rte_spinlock_lock(&hv->vf_lock);
299 if (hv->vf_port != HN_INVALID_PORT)
300 ret = _hn_vf_configure(dev, hv->vf_port, dev_conf);
301 rte_spinlock_unlock(&hv->vf_lock);
302 return ret;
303 }
304
305 const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
306 {
307 struct hn_data *hv = dev->data->dev_private;
308 struct rte_eth_dev *vf_dev;
309 const uint32_t *ptypes = NULL;
310
311 rte_spinlock_lock(&hv->vf_lock);
312 vf_dev = hn_get_vf_dev(hv);
313 if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
314 ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
315 rte_spinlock_unlock(&hv->vf_lock);
316
317 return ptypes;
318 }
319
320 int hn_vf_start(struct rte_eth_dev *dev)
321 {
322 struct hn_data *hv = dev->data->dev_private;
323 struct rte_eth_dev *vf_dev;
324 int ret = 0;
325
326 rte_spinlock_lock(&hv->vf_lock);
327 vf_dev = hn_get_vf_dev(hv);
328 if (vf_dev)
329 ret = rte_eth_dev_start(vf_dev->data->port_id);
330 rte_spinlock_unlock(&hv->vf_lock);
331 return ret;
332 }
333
334 void hn_vf_stop(struct rte_eth_dev *dev)
335 {
336 struct hn_data *hv = dev->data->dev_private;
337 struct rte_eth_dev *vf_dev;
338
339 rte_spinlock_lock(&hv->vf_lock);
340 vf_dev = hn_get_vf_dev(hv);
341 if (vf_dev)
342 rte_eth_dev_stop(vf_dev->data->port_id);
343 rte_spinlock_unlock(&hv->vf_lock);
344 }
345
346 /* If VF is present, then cascade configuration down */
347 #define VF_ETHDEV_FUNC(dev, func) \
348 { \
349 struct hn_data *hv = (dev)->data->dev_private; \
350 struct rte_eth_dev *vf_dev; \
351 rte_spinlock_lock(&hv->vf_lock); \
352 vf_dev = hn_get_vf_dev(hv); \
353 if (vf_dev) \
354 func(vf_dev->data->port_id); \
355 rte_spinlock_unlock(&hv->vf_lock); \
356 }
357
358 void hn_vf_reset(struct rte_eth_dev *dev)
359 {
360 VF_ETHDEV_FUNC(dev, rte_eth_dev_reset);
361 }
362
363 void hn_vf_close(struct rte_eth_dev *dev)
364 {
365 struct hn_data *hv = dev->data->dev_private;
366 uint16_t vf_port;
367
368 rte_spinlock_lock(&hv->vf_lock);
369 vf_port = hv->vf_port;
370 if (vf_port != HN_INVALID_PORT)
371 rte_eth_dev_close(vf_port);
372
373 hv->vf_port = HN_INVALID_PORT;
374 rte_spinlock_unlock(&hv->vf_lock);
375 }
376
377 void hn_vf_stats_reset(struct rte_eth_dev *dev)
378 {
379 VF_ETHDEV_FUNC(dev, rte_eth_stats_reset);
380 }
381
382 void hn_vf_allmulticast_enable(struct rte_eth_dev *dev)
383 {
384 VF_ETHDEV_FUNC(dev, rte_eth_allmulticast_enable);
385 }
386
387 void hn_vf_allmulticast_disable(struct rte_eth_dev *dev)
388 {
389 VF_ETHDEV_FUNC(dev, rte_eth_allmulticast_disable);
390 }
391
392 void hn_vf_promiscuous_enable(struct rte_eth_dev *dev)
393 {
394 VF_ETHDEV_FUNC(dev, rte_eth_promiscuous_enable);
395 }
396
397 void hn_vf_promiscuous_disable(struct rte_eth_dev *dev)
398 {
399 VF_ETHDEV_FUNC(dev, rte_eth_promiscuous_disable);
400 }
401
402 int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
403 struct ether_addr *mc_addr_set,
404 uint32_t nb_mc_addr)
405 {
406 struct hn_data *hv = dev->data->dev_private;
407 struct rte_eth_dev *vf_dev;
408 int ret = 0;
409
410 rte_spinlock_lock(&hv->vf_lock);
411 vf_dev = hn_get_vf_dev(hv);
412 if (vf_dev)
413 ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
414 mc_addr_set, nb_mc_addr);
415 rte_spinlock_unlock(&hv->vf_lock);
416 return ret;
417 }
418
419 int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
420 uint16_t queue_idx, uint16_t nb_desc,
421 unsigned int socket_id,
422 const struct rte_eth_txconf *tx_conf)
423 {
424 struct hn_data *hv = dev->data->dev_private;
425 struct rte_eth_dev *vf_dev;
426 int ret = 0;
427
428 rte_spinlock_lock(&hv->vf_lock);
429 vf_dev = hn_get_vf_dev(hv);
430 if (vf_dev)
431 ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
432 queue_idx, nb_desc,
433 socket_id, tx_conf);
434 rte_spinlock_unlock(&hv->vf_lock);
435 return ret;
436 }
437
438 void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
439 {
440 struct rte_eth_dev *vf_dev;
441
442 rte_spinlock_lock(&hv->vf_lock);
443 vf_dev = hn_get_vf_dev(hv);
444 if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
445 void *subq = vf_dev->data->tx_queues[queue_id];
446
447 (*vf_dev->dev_ops->tx_queue_release)(subq);
448 }
449
450 rte_spinlock_unlock(&hv->vf_lock);
451 }
452
453 int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
454 uint16_t queue_idx, uint16_t nb_desc,
455 unsigned int socket_id,
456 const struct rte_eth_rxconf *rx_conf,
457 struct rte_mempool *mp)
458 {
459 struct hn_data *hv = dev->data->dev_private;
460 struct rte_eth_dev *vf_dev;
461 int ret = 0;
462
463 rte_spinlock_lock(&hv->vf_lock);
464 vf_dev = hn_get_vf_dev(hv);
465 if (vf_dev)
466 ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
467 queue_idx, nb_desc,
468 socket_id, rx_conf, mp);
469 rte_spinlock_unlock(&hv->vf_lock);
470 return ret;
471 }
472
473 void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
474 {
475 struct rte_eth_dev *vf_dev;
476
477 rte_spinlock_lock(&hv->vf_lock);
478 vf_dev = hn_get_vf_dev(hv);
479 if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
480 void *subq = vf_dev->data->rx_queues[queue_id];
481
482 (*vf_dev->dev_ops->rx_queue_release)(subq);
483 }
484 rte_spinlock_unlock(&hv->vf_lock);
485 }
486
487 int hn_vf_stats_get(struct rte_eth_dev *dev,
488 struct rte_eth_stats *stats)
489 {
490 struct hn_data *hv = dev->data->dev_private;
491 struct rte_eth_dev *vf_dev;
492 int ret = 0;
493
494 rte_spinlock_lock(&hv->vf_lock);
495 vf_dev = hn_get_vf_dev(hv);
496 if (vf_dev)
497 ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
498 rte_spinlock_unlock(&hv->vf_lock);
499 return ret;
500 }
501
502 int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
503 struct rte_eth_xstat_name *names,
504 unsigned int n)
505 {
506 struct hn_data *hv = dev->data->dev_private;
507 struct rte_eth_dev *vf_dev;
508 int i, count = 0;
509 char tmp[RTE_ETH_XSTATS_NAME_SIZE];
510
511 rte_spinlock_lock(&hv->vf_lock);
512 vf_dev = hn_get_vf_dev(hv);
513 if (vf_dev && vf_dev->dev_ops->xstats_get_names)
514 count = vf_dev->dev_ops->xstats_get_names(vf_dev, names, n);
515 rte_spinlock_unlock(&hv->vf_lock);
516
517 /* add vf_ prefix to xstat names */
518 if (names) {
519 for (i = 0; i < count; i++) {
520 snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name);
521 strlcpy(names[i].name, tmp, sizeof(names[i].name));
522 }
523 }
524
525 return count;
526 }
527
528 int hn_vf_xstats_get(struct rte_eth_dev *dev,
529 struct rte_eth_xstat *xstats,
530 unsigned int n)
531 {
532 struct hn_data *hv = dev->data->dev_private;
533 struct rte_eth_dev *vf_dev;
534 int count = 0;
535
536 rte_spinlock_lock(&hv->vf_lock);
537 vf_dev = hn_get_vf_dev(hv);
538 if (vf_dev && vf_dev->dev_ops->xstats_get)
539 count = vf_dev->dev_ops->xstats_get(vf_dev, xstats, n);
540 rte_spinlock_unlock(&hv->vf_lock);
541
542 return count;
543 }
544
545 void hn_vf_xstats_reset(struct rte_eth_dev *dev)
546 {
547 struct hn_data *hv = dev->data->dev_private;
548 struct rte_eth_dev *vf_dev;
549
550 rte_spinlock_lock(&hv->vf_lock);
551 vf_dev = hn_get_vf_dev(hv);
552 if (vf_dev && vf_dev->dev_ops->xstats_reset)
553 vf_dev->dev_ops->xstats_reset(vf_dev);
554 rte_spinlock_unlock(&hv->vf_lock);
555 }