1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2018 Microsoft Corp.
14 #include <sys/types.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_lcore.h>
21 #include <rte_memory.h>
22 #include <rte_bus_vmbus.h>
24 #include <rte_bus_pci.h>
26 #include <rte_string_fns.h>
32 /* Search for VF with matching MAC address, return port id */
33 static int hn_vf_match(const struct rte_eth_dev
*dev
)
35 const struct ether_addr
*mac
= dev
->data
->mac_addrs
;
38 RTE_ETH_FOREACH_DEV(i
) {
39 const struct rte_eth_dev
*vf_dev
= &rte_eth_devices
[i
];
40 const struct ether_addr
*vf_mac
= vf_dev
->data
->mac_addrs
;
45 if (is_same_ether_addr(mac
, vf_mac
))
53 * Attach new PCI VF device and return the port_id
55 static int hn_vf_attach(struct hn_data
*hv
, uint16_t port_id
)
57 struct rte_eth_dev_owner owner
= { .id
= RTE_ETH_DEV_NO_OWNER
};
60 if (hn_vf_attached(hv
)) {
61 PMD_DRV_LOG(ERR
, "VF already attached");
65 ret
= rte_eth_dev_owner_get(port_id
, &owner
);
67 PMD_DRV_LOG(ERR
, "Can not find owner for port %d", port_id
);
71 if (owner
.id
!= RTE_ETH_DEV_NO_OWNER
) {
72 PMD_DRV_LOG(ERR
, "Port %u already owned by other device %s",
77 ret
= rte_eth_dev_owner_set(port_id
, &hv
->owner
);
79 PMD_DRV_LOG(ERR
, "Can set owner for port %d", port_id
);
83 PMD_DRV_LOG(DEBUG
, "Attach VF device %u", port_id
);
84 hv
->vf_port
= port_id
;
90 /* Add new VF device to synthetic device */
91 int hn_vf_add(struct rte_eth_dev
*dev
, struct hn_data
*hv
)
95 port
= hn_vf_match(dev
);
97 PMD_DRV_LOG(NOTICE
, "No matching MAC found");
101 rte_spinlock_lock(&hv
->vf_lock
);
102 err
= hn_vf_attach(hv
, port
);
105 dev
->data
->dev_flags
|= RTE_ETH_DEV_INTR_LSC
;
106 hv
->vf_intr
= (struct rte_intr_handle
) {
108 .type
= RTE_INTR_HANDLE_EXT
,
110 dev
->intr_handle
= &hv
->vf_intr
;
111 hn_nvs_set_datapath(hv
, NVS_DATAPATH_VF
);
113 rte_spinlock_unlock(&hv
->vf_lock
);
118 /* Remove new VF device */
119 static void hn_vf_remove(struct hn_data
*hv
)
122 rte_spinlock_lock(&hv
->vf_lock
);
124 if (!hn_vf_attached(hv
)) {
125 PMD_DRV_LOG(ERR
, "VF path not active");
127 /* Stop incoming packets from arriving on VF */
128 hn_nvs_set_datapath(hv
, NVS_DATAPATH_SYNTHETIC
);
130 /* Stop transmission over VF */
131 hv
->vf_port
= HN_INVALID_PORT
;
134 /* Give back ownership */
135 rte_eth_dev_owner_unset(hv
->vf_port
, hv
->owner
.id
);
137 rte_spinlock_unlock(&hv
->vf_lock
);
140 /* Handle VF association message from host */
142 hn_nvs_handle_vfassoc(struct rte_eth_dev
*dev
,
143 const struct vmbus_chanpkt_hdr
*hdr
,
146 struct hn_data
*hv
= dev
->data
->dev_private
;
147 const struct hn_nvs_vf_association
*vf_assoc
= data
;
149 if (unlikely(vmbus_chanpkt_datalen(hdr
) < sizeof(*vf_assoc
))) {
150 PMD_DRV_LOG(ERR
, "invalid vf association NVS");
154 PMD_DRV_LOG(DEBUG
, "VF serial %u %s port %u",
156 vf_assoc
->allocated
? "add to" : "remove from",
159 hv
->vf_present
= vf_assoc
->allocated
;
161 if (dev
->state
!= RTE_ETH_DEV_ATTACHED
)
164 if (vf_assoc
->allocated
)
171 * Merge the info from the VF and synthetic path.
172 * use the default config of the VF
173 * and the minimum number of queues and buffer sizes.
175 static void hn_vf_info_merge(struct rte_eth_dev
*vf_dev
,
176 struct rte_eth_dev_info
*info
)
178 struct rte_eth_dev_info vf_info
;
180 rte_eth_dev_info_get(vf_dev
->data
->port_id
, &vf_info
);
182 info
->speed_capa
= vf_info
.speed_capa
;
183 info
->default_rxportconf
= vf_info
.default_rxportconf
;
184 info
->default_txportconf
= vf_info
.default_txportconf
;
186 info
->max_rx_queues
= RTE_MIN(vf_info
.max_rx_queues
,
187 info
->max_rx_queues
);
188 info
->rx_offload_capa
&= vf_info
.rx_offload_capa
;
189 info
->rx_queue_offload_capa
&= vf_info
.rx_queue_offload_capa
;
190 info
->flow_type_rss_offloads
&= vf_info
.flow_type_rss_offloads
;
192 info
->max_tx_queues
= RTE_MIN(vf_info
.max_tx_queues
,
193 info
->max_tx_queues
);
194 info
->tx_offload_capa
&= vf_info
.tx_offload_capa
;
195 info
->tx_queue_offload_capa
&= vf_info
.tx_queue_offload_capa
;
197 info
->min_rx_bufsize
= RTE_MAX(vf_info
.min_rx_bufsize
,
198 info
->min_rx_bufsize
);
199 info
->max_rx_pktlen
= RTE_MAX(vf_info
.max_rx_pktlen
,
200 info
->max_rx_pktlen
);
203 void hn_vf_info_get(struct hn_data
*hv
, struct rte_eth_dev_info
*info
)
205 struct rte_eth_dev
*vf_dev
;
207 rte_spinlock_lock(&hv
->vf_lock
);
208 vf_dev
= hn_get_vf_dev(hv
);
210 hn_vf_info_merge(vf_dev
, info
);
211 rte_spinlock_unlock(&hv
->vf_lock
);
214 int hn_vf_link_update(struct rte_eth_dev
*dev
,
215 int wait_to_complete
)
217 struct hn_data
*hv
= dev
->data
->dev_private
;
218 struct rte_eth_dev
*vf_dev
;
221 rte_spinlock_lock(&hv
->vf_lock
);
222 vf_dev
= hn_get_vf_dev(hv
);
223 if (vf_dev
&& vf_dev
->dev_ops
->link_update
)
224 ret
= (*vf_dev
->dev_ops
->link_update
)(vf_dev
, wait_to_complete
);
225 rte_spinlock_unlock(&hv
->vf_lock
);
230 /* called when VF has link state interrupts enabled */
231 static int hn_vf_lsc_event(uint16_t port_id __rte_unused
,
232 enum rte_eth_event_type event
,
233 void *cb_arg
, void *out __rte_unused
)
235 struct rte_eth_dev
*dev
= cb_arg
;
237 if (event
!= RTE_ETH_EVENT_INTR_LSC
)
240 /* if link state has changed pass on */
241 if (hn_dev_link_update(dev
, 0) == 0)
242 return 0; /* no change */
244 return _rte_eth_dev_callback_process(dev
,
245 RTE_ETH_EVENT_INTR_LSC
,
249 static int _hn_vf_configure(struct rte_eth_dev
*dev
,
251 const struct rte_eth_conf
*dev_conf
)
253 struct rte_eth_conf vf_conf
= *dev_conf
;
254 struct rte_eth_dev
*vf_dev
;
257 vf_dev
= &rte_eth_devices
[vf_port
];
258 if (dev_conf
->intr_conf
.lsc
&&
259 (vf_dev
->data
->dev_flags
& RTE_ETH_DEV_INTR_LSC
)) {
260 PMD_DRV_LOG(DEBUG
, "enabling LSC for VF %u",
262 vf_conf
.intr_conf
.lsc
= 1;
264 PMD_DRV_LOG(DEBUG
, "disabling LSC for VF %u",
266 vf_conf
.intr_conf
.lsc
= 0;
269 ret
= rte_eth_dev_configure(vf_port
,
270 dev
->data
->nb_rx_queues
,
271 dev
->data
->nb_tx_queues
,
275 "VF configuration failed: %d", ret
);
276 } else if (vf_conf
.intr_conf
.lsc
) {
277 ret
= rte_eth_dev_callback_register(vf_port
,
278 RTE_ETH_DEV_INTR_LSC
,
279 hn_vf_lsc_event
, dev
);
282 "Failed to register LSC callback for VF %u",
289 * Configure VF if present.
290 * Force VF to have same number of queues as synthetic device
292 int hn_vf_configure(struct rte_eth_dev
*dev
,
293 const struct rte_eth_conf
*dev_conf
)
295 struct hn_data
*hv
= dev
->data
->dev_private
;
298 rte_spinlock_lock(&hv
->vf_lock
);
299 if (hv
->vf_port
!= HN_INVALID_PORT
)
300 ret
= _hn_vf_configure(dev
, hv
->vf_port
, dev_conf
);
301 rte_spinlock_unlock(&hv
->vf_lock
);
305 const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev
*dev
)
307 struct hn_data
*hv
= dev
->data
->dev_private
;
308 struct rte_eth_dev
*vf_dev
;
309 const uint32_t *ptypes
= NULL
;
311 rte_spinlock_lock(&hv
->vf_lock
);
312 vf_dev
= hn_get_vf_dev(hv
);
313 if (vf_dev
&& vf_dev
->dev_ops
->dev_supported_ptypes_get
)
314 ptypes
= (*vf_dev
->dev_ops
->dev_supported_ptypes_get
)(vf_dev
);
315 rte_spinlock_unlock(&hv
->vf_lock
);
320 int hn_vf_start(struct rte_eth_dev
*dev
)
322 struct hn_data
*hv
= dev
->data
->dev_private
;
323 struct rte_eth_dev
*vf_dev
;
326 rte_spinlock_lock(&hv
->vf_lock
);
327 vf_dev
= hn_get_vf_dev(hv
);
329 ret
= rte_eth_dev_start(vf_dev
->data
->port_id
);
330 rte_spinlock_unlock(&hv
->vf_lock
);
334 void hn_vf_stop(struct rte_eth_dev
*dev
)
336 struct hn_data
*hv
= dev
->data
->dev_private
;
337 struct rte_eth_dev
*vf_dev
;
339 rte_spinlock_lock(&hv
->vf_lock
);
340 vf_dev
= hn_get_vf_dev(hv
);
342 rte_eth_dev_stop(vf_dev
->data
->port_id
);
343 rte_spinlock_unlock(&hv
->vf_lock
);
346 /* If VF is present, then cascade configuration down */
347 #define VF_ETHDEV_FUNC(dev, func) \
349 struct hn_data *hv = (dev)->data->dev_private; \
350 struct rte_eth_dev *vf_dev; \
351 rte_spinlock_lock(&hv->vf_lock); \
352 vf_dev = hn_get_vf_dev(hv); \
354 func(vf_dev->data->port_id); \
355 rte_spinlock_unlock(&hv->vf_lock); \
358 void hn_vf_reset(struct rte_eth_dev
*dev
)
360 VF_ETHDEV_FUNC(dev
, rte_eth_dev_reset
);
363 void hn_vf_close(struct rte_eth_dev
*dev
)
365 struct hn_data
*hv
= dev
->data
->dev_private
;
368 rte_spinlock_lock(&hv
->vf_lock
);
369 vf_port
= hv
->vf_port
;
370 if (vf_port
!= HN_INVALID_PORT
)
371 rte_eth_dev_close(vf_port
);
373 hv
->vf_port
= HN_INVALID_PORT
;
374 rte_spinlock_unlock(&hv
->vf_lock
);
377 void hn_vf_stats_reset(struct rte_eth_dev
*dev
)
379 VF_ETHDEV_FUNC(dev
, rte_eth_stats_reset
);
382 void hn_vf_allmulticast_enable(struct rte_eth_dev
*dev
)
384 VF_ETHDEV_FUNC(dev
, rte_eth_allmulticast_enable
);
387 void hn_vf_allmulticast_disable(struct rte_eth_dev
*dev
)
389 VF_ETHDEV_FUNC(dev
, rte_eth_allmulticast_disable
);
392 void hn_vf_promiscuous_enable(struct rte_eth_dev
*dev
)
394 VF_ETHDEV_FUNC(dev
, rte_eth_promiscuous_enable
);
397 void hn_vf_promiscuous_disable(struct rte_eth_dev
*dev
)
399 VF_ETHDEV_FUNC(dev
, rte_eth_promiscuous_disable
);
402 int hn_vf_mc_addr_list(struct rte_eth_dev
*dev
,
403 struct ether_addr
*mc_addr_set
,
406 struct hn_data
*hv
= dev
->data
->dev_private
;
407 struct rte_eth_dev
*vf_dev
;
410 rte_spinlock_lock(&hv
->vf_lock
);
411 vf_dev
= hn_get_vf_dev(hv
);
413 ret
= rte_eth_dev_set_mc_addr_list(vf_dev
->data
->port_id
,
414 mc_addr_set
, nb_mc_addr
);
415 rte_spinlock_unlock(&hv
->vf_lock
);
419 int hn_vf_tx_queue_setup(struct rte_eth_dev
*dev
,
420 uint16_t queue_idx
, uint16_t nb_desc
,
421 unsigned int socket_id
,
422 const struct rte_eth_txconf
*tx_conf
)
424 struct hn_data
*hv
= dev
->data
->dev_private
;
425 struct rte_eth_dev
*vf_dev
;
428 rte_spinlock_lock(&hv
->vf_lock
);
429 vf_dev
= hn_get_vf_dev(hv
);
431 ret
= rte_eth_tx_queue_setup(vf_dev
->data
->port_id
,
434 rte_spinlock_unlock(&hv
->vf_lock
);
438 void hn_vf_tx_queue_release(struct hn_data
*hv
, uint16_t queue_id
)
440 struct rte_eth_dev
*vf_dev
;
442 rte_spinlock_lock(&hv
->vf_lock
);
443 vf_dev
= hn_get_vf_dev(hv
);
444 if (vf_dev
&& vf_dev
->dev_ops
->tx_queue_release
) {
445 void *subq
= vf_dev
->data
->tx_queues
[queue_id
];
447 (*vf_dev
->dev_ops
->tx_queue_release
)(subq
);
450 rte_spinlock_unlock(&hv
->vf_lock
);
453 int hn_vf_rx_queue_setup(struct rte_eth_dev
*dev
,
454 uint16_t queue_idx
, uint16_t nb_desc
,
455 unsigned int socket_id
,
456 const struct rte_eth_rxconf
*rx_conf
,
457 struct rte_mempool
*mp
)
459 struct hn_data
*hv
= dev
->data
->dev_private
;
460 struct rte_eth_dev
*vf_dev
;
463 rte_spinlock_lock(&hv
->vf_lock
);
464 vf_dev
= hn_get_vf_dev(hv
);
466 ret
= rte_eth_rx_queue_setup(vf_dev
->data
->port_id
,
468 socket_id
, rx_conf
, mp
);
469 rte_spinlock_unlock(&hv
->vf_lock
);
473 void hn_vf_rx_queue_release(struct hn_data
*hv
, uint16_t queue_id
)
475 struct rte_eth_dev
*vf_dev
;
477 rte_spinlock_lock(&hv
->vf_lock
);
478 vf_dev
= hn_get_vf_dev(hv
);
479 if (vf_dev
&& vf_dev
->dev_ops
->rx_queue_release
) {
480 void *subq
= vf_dev
->data
->rx_queues
[queue_id
];
482 (*vf_dev
->dev_ops
->rx_queue_release
)(subq
);
484 rte_spinlock_unlock(&hv
->vf_lock
);
487 int hn_vf_stats_get(struct rte_eth_dev
*dev
,
488 struct rte_eth_stats
*stats
)
490 struct hn_data
*hv
= dev
->data
->dev_private
;
491 struct rte_eth_dev
*vf_dev
;
494 rte_spinlock_lock(&hv
->vf_lock
);
495 vf_dev
= hn_get_vf_dev(hv
);
497 ret
= rte_eth_stats_get(vf_dev
->data
->port_id
, stats
);
498 rte_spinlock_unlock(&hv
->vf_lock
);
502 int hn_vf_xstats_get_names(struct rte_eth_dev
*dev
,
503 struct rte_eth_xstat_name
*names
,
506 struct hn_data
*hv
= dev
->data
->dev_private
;
507 struct rte_eth_dev
*vf_dev
;
509 char tmp
[RTE_ETH_XSTATS_NAME_SIZE
];
511 rte_spinlock_lock(&hv
->vf_lock
);
512 vf_dev
= hn_get_vf_dev(hv
);
513 if (vf_dev
&& vf_dev
->dev_ops
->xstats_get_names
)
514 count
= vf_dev
->dev_ops
->xstats_get_names(vf_dev
, names
, n
);
515 rte_spinlock_unlock(&hv
->vf_lock
);
517 /* add vf_ prefix to xstat names */
519 for (i
= 0; i
< count
; i
++) {
520 snprintf(tmp
, sizeof(tmp
), "vf_%s", names
[i
].name
);
521 strlcpy(names
[i
].name
, tmp
, sizeof(names
[i
].name
));
528 int hn_vf_xstats_get(struct rte_eth_dev
*dev
,
529 struct rte_eth_xstat
*xstats
,
532 struct hn_data
*hv
= dev
->data
->dev_private
;
533 struct rte_eth_dev
*vf_dev
;
536 rte_spinlock_lock(&hv
->vf_lock
);
537 vf_dev
= hn_get_vf_dev(hv
);
538 if (vf_dev
&& vf_dev
->dev_ops
->xstats_get
)
539 count
= vf_dev
->dev_ops
->xstats_get(vf_dev
, xstats
, n
);
540 rte_spinlock_unlock(&hv
->vf_lock
);
545 void hn_vf_xstats_reset(struct rte_eth_dev
*dev
)
547 struct hn_data
*hv
= dev
->data
->dev_private
;
548 struct rte_eth_dev
*vf_dev
;
550 rte_spinlock_lock(&hv
->vf_lock
);
551 vf_dev
= hn_get_vf_dev(hv
);
552 if (vf_dev
&& vf_dev
->dev_ops
->xstats_reset
)
553 vf_dev
->dev_ops
->xstats_reset(vf_dev
);
554 rte_spinlock_unlock(&hv
->vf_lock
);