]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/drivers/net/netvsc/hn_vf.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / netvsc / hn_vf.c
CommitLineData
9f95a23c
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2018 Microsoft Corp.
3 * All rights reserved.
4 */
5
6#include <stdio.h>
7#include <stdint.h>
8#include <string.h>
9#include <stdbool.h>
10#include <errno.h>
11#include <unistd.h>
12#include <dirent.h>
13#include <fcntl.h>
14#include <sys/types.h>
15#include <sys/uio.h>
16
17#include <rte_ether.h>
18#include <rte_ethdev.h>
19#include <rte_ethdev_driver.h>
20#include <rte_lcore.h>
21#include <rte_memory.h>
22#include <rte_bus_vmbus.h>
23#include <rte_pci.h>
24#include <rte_bus_pci.h>
25#include <rte_log.h>
26#include <rte_string_fns.h>
27
28#include "hn_logs.h"
29#include "hn_var.h"
30#include "hn_nvs.h"
31
32/* Search for VF with matching MAC address, return port id */
33static int hn_vf_match(const struct rte_eth_dev *dev)
34{
f67539c2 35 const struct rte_ether_addr *mac = dev->data->mac_addrs;
9f95a23c
TL
36 int i;
37
38 RTE_ETH_FOREACH_DEV(i) {
39 const struct rte_eth_dev *vf_dev = &rte_eth_devices[i];
f67539c2 40 const struct rte_ether_addr *vf_mac = vf_dev->data->mac_addrs;
9f95a23c
TL
41
42 if (vf_dev == dev)
43 continue;
44
f67539c2 45 if (rte_is_same_ether_addr(mac, vf_mac))
9f95a23c
TL
46 return i;
47 }
48 return -ENOENT;
49}
50
51
52/*
53 * Attach new PCI VF device and return the port_id
54 */
55static int hn_vf_attach(struct hn_data *hv, uint16_t port_id)
56{
57 struct rte_eth_dev_owner owner = { .id = RTE_ETH_DEV_NO_OWNER };
58 int ret;
59
60 if (hn_vf_attached(hv)) {
61 PMD_DRV_LOG(ERR, "VF already attached");
62 return -EEXIST;
63 }
64
65 ret = rte_eth_dev_owner_get(port_id, &owner);
66 if (ret < 0) {
67 PMD_DRV_LOG(ERR, "Can not find owner for port %d", port_id);
68 return ret;
69 }
70
71 if (owner.id != RTE_ETH_DEV_NO_OWNER) {
72 PMD_DRV_LOG(ERR, "Port %u already owned by other device %s",
73 port_id, owner.name);
74 return -EBUSY;
75 }
76
77 ret = rte_eth_dev_owner_set(port_id, &hv->owner);
78 if (ret < 0) {
79 PMD_DRV_LOG(ERR, "Can set owner for port %d", port_id);
80 return ret;
81 }
82
83 PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id);
84 hv->vf_port = port_id;
9f95a23c
TL
85 return 0;
86}
87
88/* Add new VF device to synthetic device */
89int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
90{
91 int port, err;
92
93 port = hn_vf_match(dev);
94 if (port < 0) {
95 PMD_DRV_LOG(NOTICE, "No matching MAC found");
96 return port;
97 }
98
9f95a23c 99 err = hn_vf_attach(hv, port);
9f95a23c
TL
100 if (err == 0) {
101 dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
102 hv->vf_intr = (struct rte_intr_handle) {
103 .fd = -1,
104 .type = RTE_INTR_HANDLE_EXT,
105 };
106 dev->intr_handle = &hv->vf_intr;
107 hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
108 }
9f95a23c
TL
109
110 return err;
111}
112
113/* Remove new VF device */
114static void hn_vf_remove(struct hn_data *hv)
115{
116
9f95a23c
TL
117 if (!hn_vf_attached(hv)) {
118 PMD_DRV_LOG(ERR, "VF path not active");
119 } else {
120 /* Stop incoming packets from arriving on VF */
121 hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC);
122
123 /* Stop transmission over VF */
124 hv->vf_port = HN_INVALID_PORT;
9f95a23c
TL
125
126 /* Give back ownership */
127 rte_eth_dev_owner_unset(hv->vf_port, hv->owner.id);
128 }
9f95a23c
TL
129}
130
131/* Handle VF association message from host */
132void
133hn_nvs_handle_vfassoc(struct rte_eth_dev *dev,
134 const struct vmbus_chanpkt_hdr *hdr,
135 const void *data)
136{
137 struct hn_data *hv = dev->data->dev_private;
138 const struct hn_nvs_vf_association *vf_assoc = data;
139
140 if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*vf_assoc))) {
141 PMD_DRV_LOG(ERR, "invalid vf association NVS");
142 return;
143 }
144
145 PMD_DRV_LOG(DEBUG, "VF serial %u %s port %u",
146 vf_assoc->serial,
147 vf_assoc->allocated ? "add to" : "remove from",
148 dev->data->port_id);
149
f67539c2 150 rte_rwlock_write_lock(&hv->vf_lock);
9f95a23c
TL
151 hv->vf_present = vf_assoc->allocated;
152
f67539c2
TL
153 if (dev->state == RTE_ETH_DEV_ATTACHED) {
154 if (vf_assoc->allocated)
155 hn_vf_add(dev, hv);
156 else
157 hn_vf_remove(hv);
158 }
159 rte_rwlock_write_unlock(&hv->vf_lock);
160}
9f95a23c 161
f67539c2
TL
162static void
163hn_vf_merge_desc_lim(struct rte_eth_desc_lim *lim,
164 const struct rte_eth_desc_lim *vf_lim)
165{
166 lim->nb_max = RTE_MIN(vf_lim->nb_max, lim->nb_max);
167 lim->nb_min = RTE_MAX(vf_lim->nb_min, lim->nb_min);
168 lim->nb_align = RTE_MAX(vf_lim->nb_align, lim->nb_align);
169 lim->nb_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
170 lim->nb_mtu_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
9f95a23c
TL
171}
172
173/*
174 * Merge the info from the VF and synthetic path.
175 * use the default config of the VF
176 * and the minimum number of queues and buffer sizes.
177 */
f67539c2 178static int hn_vf_info_merge(struct rte_eth_dev *vf_dev,
9f95a23c
TL
179 struct rte_eth_dev_info *info)
180{
181 struct rte_eth_dev_info vf_info;
f67539c2 182 int ret;
9f95a23c 183
f67539c2
TL
184 ret = rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
185 if (ret != 0)
186 return ret;
9f95a23c
TL
187
188 info->speed_capa = vf_info.speed_capa;
189 info->default_rxportconf = vf_info.default_rxportconf;
190 info->default_txportconf = vf_info.default_txportconf;
191
192 info->max_rx_queues = RTE_MIN(vf_info.max_rx_queues,
193 info->max_rx_queues);
194 info->rx_offload_capa &= vf_info.rx_offload_capa;
195 info->rx_queue_offload_capa &= vf_info.rx_queue_offload_capa;
196 info->flow_type_rss_offloads &= vf_info.flow_type_rss_offloads;
197
198 info->max_tx_queues = RTE_MIN(vf_info.max_tx_queues,
199 info->max_tx_queues);
200 info->tx_offload_capa &= vf_info.tx_offload_capa;
201 info->tx_queue_offload_capa &= vf_info.tx_queue_offload_capa;
f67539c2 202 hn_vf_merge_desc_lim(&info->tx_desc_lim, &vf_info.tx_desc_lim);
9f95a23c
TL
203
204 info->min_rx_bufsize = RTE_MAX(vf_info.min_rx_bufsize,
205 info->min_rx_bufsize);
206 info->max_rx_pktlen = RTE_MAX(vf_info.max_rx_pktlen,
207 info->max_rx_pktlen);
f67539c2
TL
208 hn_vf_merge_desc_lim(&info->rx_desc_lim, &vf_info.rx_desc_lim);
209
210 return 0;
9f95a23c
TL
211}
212
f67539c2 213int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
9f95a23c
TL
214{
215 struct rte_eth_dev *vf_dev;
f67539c2 216 int ret = 0;
9f95a23c 217
f67539c2 218 rte_rwlock_read_lock(&hv->vf_lock);
9f95a23c
TL
219 vf_dev = hn_get_vf_dev(hv);
220 if (vf_dev)
f67539c2
TL
221 ret = hn_vf_info_merge(vf_dev, info);
222 rte_rwlock_read_unlock(&hv->vf_lock);
223 return ret;
9f95a23c
TL
224}
225
226int hn_vf_link_update(struct rte_eth_dev *dev,
227 int wait_to_complete)
228{
229 struct hn_data *hv = dev->data->dev_private;
230 struct rte_eth_dev *vf_dev;
231 int ret = 0;
232
f67539c2 233 rte_rwlock_read_lock(&hv->vf_lock);
9f95a23c
TL
234 vf_dev = hn_get_vf_dev(hv);
235 if (vf_dev && vf_dev->dev_ops->link_update)
236 ret = (*vf_dev->dev_ops->link_update)(vf_dev, wait_to_complete);
f67539c2 237 rte_rwlock_read_unlock(&hv->vf_lock);
9f95a23c
TL
238
239 return ret;
240}
241
242/* called when VF has link state interrupts enabled */
243static int hn_vf_lsc_event(uint16_t port_id __rte_unused,
244 enum rte_eth_event_type event,
245 void *cb_arg, void *out __rte_unused)
246{
247 struct rte_eth_dev *dev = cb_arg;
248
249 if (event != RTE_ETH_EVENT_INTR_LSC)
250 return 0;
251
252 /* if link state has changed pass on */
253 if (hn_dev_link_update(dev, 0) == 0)
254 return 0; /* no change */
255
256 return _rte_eth_dev_callback_process(dev,
257 RTE_ETH_EVENT_INTR_LSC,
258 NULL);
259}
260
261static int _hn_vf_configure(struct rte_eth_dev *dev,
262 uint16_t vf_port,
263 const struct rte_eth_conf *dev_conf)
264{
265 struct rte_eth_conf vf_conf = *dev_conf;
266 struct rte_eth_dev *vf_dev;
267 int ret;
268
269 vf_dev = &rte_eth_devices[vf_port];
270 if (dev_conf->intr_conf.lsc &&
271 (vf_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
272 PMD_DRV_LOG(DEBUG, "enabling LSC for VF %u",
273 vf_port);
274 vf_conf.intr_conf.lsc = 1;
275 } else {
276 PMD_DRV_LOG(DEBUG, "disabling LSC for VF %u",
277 vf_port);
278 vf_conf.intr_conf.lsc = 0;
279 }
280
281 ret = rte_eth_dev_configure(vf_port,
282 dev->data->nb_rx_queues,
283 dev->data->nb_tx_queues,
284 &vf_conf);
285 if (ret) {
286 PMD_DRV_LOG(ERR,
287 "VF configuration failed: %d", ret);
288 } else if (vf_conf.intr_conf.lsc) {
289 ret = rte_eth_dev_callback_register(vf_port,
290 RTE_ETH_DEV_INTR_LSC,
291 hn_vf_lsc_event, dev);
292 if (ret)
293 PMD_DRV_LOG(ERR,
294 "Failed to register LSC callback for VF %u",
295 vf_port);
296 }
297 return ret;
298}
299
300/*
301 * Configure VF if present.
302 * Force VF to have same number of queues as synthetic device
303 */
304int hn_vf_configure(struct rte_eth_dev *dev,
305 const struct rte_eth_conf *dev_conf)
306{
307 struct hn_data *hv = dev->data->dev_private;
308 int ret = 0;
309
f67539c2 310 rte_rwlock_read_lock(&hv->vf_lock);
9f95a23c
TL
311 if (hv->vf_port != HN_INVALID_PORT)
312 ret = _hn_vf_configure(dev, hv->vf_port, dev_conf);
f67539c2 313 rte_rwlock_read_unlock(&hv->vf_lock);
9f95a23c
TL
314 return ret;
315}
316
317const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
318{
319 struct hn_data *hv = dev->data->dev_private;
320 struct rte_eth_dev *vf_dev;
321 const uint32_t *ptypes = NULL;
322
f67539c2 323 rte_rwlock_read_lock(&hv->vf_lock);
9f95a23c
TL
324 vf_dev = hn_get_vf_dev(hv);
325 if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
326 ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
f67539c2 327 rte_rwlock_read_unlock(&hv->vf_lock);
9f95a23c
TL
328
329 return ptypes;
330}
331
332int hn_vf_start(struct rte_eth_dev *dev)
333{
334 struct hn_data *hv = dev->data->dev_private;
335 struct rte_eth_dev *vf_dev;
336 int ret = 0;
337
f67539c2 338 rte_rwlock_read_lock(&hv->vf_lock);
9f95a23c
TL
339 vf_dev = hn_get_vf_dev(hv);
340 if (vf_dev)
341 ret = rte_eth_dev_start(vf_dev->data->port_id);
f67539c2 342 rte_rwlock_read_unlock(&hv->vf_lock);
9f95a23c
TL
343 return ret;
344}
345
346void hn_vf_stop(struct rte_eth_dev *dev)
347{
348 struct hn_data *hv = dev->data->dev_private;
349 struct rte_eth_dev *vf_dev;
350
f67539c2 351 rte_rwlock_read_lock(&hv->vf_lock);
9f95a23c
TL
352 vf_dev = hn_get_vf_dev(hv);
353 if (vf_dev)
354 rte_eth_dev_stop(vf_dev->data->port_id);
f67539c2 355 rte_rwlock_read_unlock(&hv->vf_lock);
9f95a23c
TL
356}
357
358/* If VF is present, then cascade configuration down */
359#define VF_ETHDEV_FUNC(dev, func) \
360 { \
361 struct hn_data *hv = (dev)->data->dev_private; \
362 struct rte_eth_dev *vf_dev; \
f67539c2 363 rte_rwlock_read_lock(&hv->vf_lock); \
9f95a23c
TL
364 vf_dev = hn_get_vf_dev(hv); \
365 if (vf_dev) \
366 func(vf_dev->data->port_id); \
f67539c2
TL
367 rte_rwlock_read_unlock(&hv->vf_lock); \
368 }
369
370/* If VF is present, then cascade configuration down */
371#define VF_ETHDEV_FUNC_RET_STATUS(dev, func) \
372 { \
373 struct hn_data *hv = (dev)->data->dev_private; \
374 struct rte_eth_dev *vf_dev; \
375 int ret = 0; \
376 rte_rwlock_read_lock(&hv->vf_lock); \
377 vf_dev = hn_get_vf_dev(hv); \
378 if (vf_dev) \
379 ret = func(vf_dev->data->port_id); \
380 rte_rwlock_read_unlock(&hv->vf_lock); \
381 return ret; \
9f95a23c
TL
382 }
383
384void hn_vf_reset(struct rte_eth_dev *dev)
385{
386 VF_ETHDEV_FUNC(dev, rte_eth_dev_reset);
387}
388
389void hn_vf_close(struct rte_eth_dev *dev)
390{
391 struct hn_data *hv = dev->data->dev_private;
392 uint16_t vf_port;
393
f67539c2 394 rte_rwlock_read_lock(&hv->vf_lock);
9f95a23c
TL
395 vf_port = hv->vf_port;
396 if (vf_port != HN_INVALID_PORT)
397 rte_eth_dev_close(vf_port);
398
399 hv->vf_port = HN_INVALID_PORT;
f67539c2 400 rte_rwlock_read_unlock(&hv->vf_lock);
9f95a23c
TL
401}
402
f67539c2 403int hn_vf_stats_reset(struct rte_eth_dev *dev)
9f95a23c 404{
f67539c2 405 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_stats_reset);
9f95a23c
TL
406}
407
f67539c2 408int hn_vf_allmulticast_enable(struct rte_eth_dev *dev)
9f95a23c 409{
f67539c2 410 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_enable);
9f95a23c
TL
411}
412
f67539c2 413int hn_vf_allmulticast_disable(struct rte_eth_dev *dev)
9f95a23c 414{
f67539c2 415 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_disable);
9f95a23c
TL
416}
417
f67539c2 418int hn_vf_promiscuous_enable(struct rte_eth_dev *dev)
9f95a23c 419{
f67539c2 420 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_enable);
9f95a23c
TL
421}
422
f67539c2 423int hn_vf_promiscuous_disable(struct rte_eth_dev *dev)
9f95a23c 424{
f67539c2 425 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_disable);
9f95a23c
TL
426}
427
428int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
f67539c2 429 struct rte_ether_addr *mc_addr_set,
9f95a23c
TL
430 uint32_t nb_mc_addr)
431{
432 struct hn_data *hv = dev->data->dev_private;
433 struct rte_eth_dev *vf_dev;
434 int ret = 0;
435
f67539c2 436 rte_rwlock_read_lock(&hv->vf_lock);
9f95a23c
TL
437 vf_dev = hn_get_vf_dev(hv);
438 if (vf_dev)
439 ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
440 mc_addr_set, nb_mc_addr);
f67539c2 441 rte_rwlock_read_unlock(&hv->vf_lock);
9f95a23c
TL
442 return ret;
443}
444
445int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
446 uint16_t queue_idx, uint16_t nb_desc,
447 unsigned int socket_id,
448 const struct rte_eth_txconf *tx_conf)
449{
450 struct hn_data *hv = dev->data->dev_private;
451 struct rte_eth_dev *vf_dev;
452 int ret = 0;
453
f67539c2 454 rte_rwlock_read_lock(&hv->vf_lock);
9f95a23c
TL
455 vf_dev = hn_get_vf_dev(hv);
456 if (vf_dev)
457 ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
458 queue_idx, nb_desc,
459 socket_id, tx_conf);
f67539c2 460 rte_rwlock_read_unlock(&hv->vf_lock);
9f95a23c
TL
461 return ret;
462}
463
464void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
465{
466 struct rte_eth_dev *vf_dev;
467
f67539c2 468 rte_rwlock_read_lock(&hv->vf_lock);
9f95a23c
TL
469 vf_dev = hn_get_vf_dev(hv);
470 if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
471 void *subq = vf_dev->data->tx_queues[queue_id];
472
473 (*vf_dev->dev_ops->tx_queue_release)(subq);
474 }
475
f67539c2 476 rte_rwlock_read_unlock(&hv->vf_lock);
9f95a23c
TL
477}
478
479int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
480 uint16_t queue_idx, uint16_t nb_desc,
481 unsigned int socket_id,
482 const struct rte_eth_rxconf *rx_conf,
483 struct rte_mempool *mp)
484{
485 struct hn_data *hv = dev->data->dev_private;
486 struct rte_eth_dev *vf_dev;
487 int ret = 0;
488
f67539c2 489 rte_rwlock_read_lock(&hv->vf_lock);
9f95a23c
TL
490 vf_dev = hn_get_vf_dev(hv);
491 if (vf_dev)
492 ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
493 queue_idx, nb_desc,
494 socket_id, rx_conf, mp);
f67539c2 495 rte_rwlock_read_unlock(&hv->vf_lock);
9f95a23c
TL
496 return ret;
497}
498
499void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
500{
501 struct rte_eth_dev *vf_dev;
502
f67539c2 503 rte_rwlock_read_lock(&hv->vf_lock);
9f95a23c
TL
504 vf_dev = hn_get_vf_dev(hv);
505 if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
506 void *subq = vf_dev->data->rx_queues[queue_id];
507
508 (*vf_dev->dev_ops->rx_queue_release)(subq);
509 }
f67539c2 510 rte_rwlock_read_unlock(&hv->vf_lock);
9f95a23c
TL
511}
512
513int hn_vf_stats_get(struct rte_eth_dev *dev,
514 struct rte_eth_stats *stats)
515{
516 struct hn_data *hv = dev->data->dev_private;
517 struct rte_eth_dev *vf_dev;
518 int ret = 0;
519
f67539c2 520 rte_rwlock_read_lock(&hv->vf_lock);
9f95a23c
TL
521 vf_dev = hn_get_vf_dev(hv);
522 if (vf_dev)
523 ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
f67539c2 524 rte_rwlock_read_unlock(&hv->vf_lock);
9f95a23c
TL
525 return ret;
526}
527
528int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
529 struct rte_eth_xstat_name *names,
530 unsigned int n)
531{
532 struct hn_data *hv = dev->data->dev_private;
533 struct rte_eth_dev *vf_dev;
534 int i, count = 0;
9f95a23c 535
f67539c2 536 rte_rwlock_read_lock(&hv->vf_lock);
9f95a23c 537 vf_dev = hn_get_vf_dev(hv);
f67539c2
TL
538 if (vf_dev)
539 count = rte_eth_xstats_get_names(vf_dev->data->port_id,
540 names, n);
541 rte_rwlock_read_unlock(&hv->vf_lock);
9f95a23c
TL
542
543 /* add vf_ prefix to xstat names */
544 if (names) {
545 for (i = 0; i < count; i++) {
f67539c2
TL
546 char tmp[RTE_ETH_XSTATS_NAME_SIZE];
547
9f95a23c
TL
548 snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name);
549 strlcpy(names[i].name, tmp, sizeof(names[i].name));
550 }
551 }
552
553 return count;
554}
555
556int hn_vf_xstats_get(struct rte_eth_dev *dev,
557 struct rte_eth_xstat *xstats,
f67539c2 558 unsigned int offset,
9f95a23c
TL
559 unsigned int n)
560{
561 struct hn_data *hv = dev->data->dev_private;
562 struct rte_eth_dev *vf_dev;
f67539c2 563 int i, count = 0;
9f95a23c 564
f67539c2 565 rte_rwlock_read_lock(&hv->vf_lock);
9f95a23c 566 vf_dev = hn_get_vf_dev(hv);
f67539c2
TL
567 if (vf_dev)
568 count = rte_eth_xstats_get(vf_dev->data->port_id,
569 xstats + offset, n - offset);
570 rte_rwlock_read_unlock(&hv->vf_lock);
571
572 /* Offset id's for VF stats */
573 if (count > 0) {
574 for (i = 0; i < count; i++)
575 xstats[i + offset].id += offset;
576 }
9f95a23c
TL
577
578 return count;
579}
580
f67539c2
TL
581int hn_vf_xstats_reset(struct rte_eth_dev *dev)
582{
583 struct hn_data *hv = dev->data->dev_private;
584 struct rte_eth_dev *vf_dev;
585 int ret;
586
587 rte_rwlock_read_lock(&hv->vf_lock);
588 vf_dev = hn_get_vf_dev(hv);
589 if (vf_dev)
590 ret = rte_eth_xstats_reset(vf_dev->data->port_id);
591 else
592 ret = -EINVAL;
593 rte_rwlock_read_unlock(&hv->vf_lock);
594
595 return ret;
596}
597
598int hn_vf_rss_hash_update(struct rte_eth_dev *dev,
599 struct rte_eth_rss_conf *rss_conf)
9f95a23c
TL
600{
601 struct hn_data *hv = dev->data->dev_private;
602 struct rte_eth_dev *vf_dev;
f67539c2 603 int ret = 0;
9f95a23c 604
f67539c2 605 rte_rwlock_read_lock(&hv->vf_lock);
9f95a23c 606 vf_dev = hn_get_vf_dev(hv);
f67539c2
TL
607 if (vf_dev && vf_dev->dev_ops->rss_hash_update)
608 ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf);
609 rte_rwlock_read_unlock(&hv->vf_lock);
610
611 return ret;
612}
613
614int hn_vf_reta_hash_update(struct rte_eth_dev *dev,
615 struct rte_eth_rss_reta_entry64 *reta_conf,
616 uint16_t reta_size)
617{
618 struct hn_data *hv = dev->data->dev_private;
619 struct rte_eth_dev *vf_dev;
620 int ret = 0;
621
622 rte_rwlock_read_lock(&hv->vf_lock);
623 vf_dev = hn_get_vf_dev(hv);
624 if (vf_dev && vf_dev->dev_ops->reta_update)
625 ret = vf_dev->dev_ops->reta_update(vf_dev,
626 reta_conf, reta_size);
627 rte_rwlock_read_unlock(&hv->vf_lock);
628
629 return ret;
9f95a23c 630}