]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/drivers/net/i40e/i40e_ethdev_vf.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / i40e / i40e_ethdev_vf.c
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
7c673cae
FG
3 */
4
5#include <sys/queue.h>
6#include <stdio.h>
7#include <errno.h>
8#include <stdint.h>
9#include <string.h>
10#include <unistd.h>
11#include <stdarg.h>
12#include <inttypes.h>
13#include <rte_byteorder.h>
14#include <rte_common.h>
15#include <rte_cycles.h>
16
17#include <rte_interrupts.h>
18#include <rte_log.h>
19#include <rte_debug.h>
20#include <rte_pci.h>
11fdf7f2 21#include <rte_bus_pci.h>
7c673cae
FG
22#include <rte_atomic.h>
23#include <rte_branch_prediction.h>
24#include <rte_memory.h>
7c673cae
FG
25#include <rte_eal.h>
26#include <rte_alarm.h>
27#include <rte_ether.h>
11fdf7f2
TL
28#include <rte_ethdev_driver.h>
29#include <rte_ethdev_pci.h>
7c673cae
FG
30#include <rte_malloc.h>
31#include <rte_dev.h>
32
33#include "i40e_logs.h"
34#include "base/i40e_prototype.h"
35#include "base/i40e_adminq_cmd.h"
36#include "base/i40e_type.h"
37
38#include "i40e_rxtx.h"
39#include "i40e_ethdev.h"
40#include "i40e_pf.h"
7c673cae
FG
41
42/* busy wait delay in msec */
43#define I40EVF_BUSY_WAIT_DELAY 10
44#define I40EVF_BUSY_WAIT_COUNT 50
45#define MAX_RESET_WAIT_CNT 20
46
11fdf7f2
TL
47#define I40EVF_ALARM_INTERVAL 50000 /* us */
48
7c673cae 49struct i40evf_arq_msg_info {
11fdf7f2 50 enum virtchnl_ops ops;
7c673cae
FG
51 enum i40e_status_code result;
52 uint16_t buf_len;
53 uint16_t msg_len;
54 uint8_t *msg;
55};
56
57struct vf_cmd_info {
11fdf7f2 58 enum virtchnl_ops ops;
7c673cae
FG
59 uint8_t *in_args;
60 uint32_t in_args_size;
61 uint8_t *out_buffer;
62 /* Input & output type. pass in buffer size and pass out
63 * actual return result
64 */
65 uint32_t out_size;
66};
67
68enum i40evf_aq_result {
69 I40EVF_MSG_ERR = -1, /* Meet error when accessing admin queue */
70 I40EVF_MSG_NON, /* Read nothing from admin queue */
71 I40EVF_MSG_SYS, /* Read system msg from admin queue */
72 I40EVF_MSG_CMD, /* Read async command result */
73};
74
75static int i40evf_dev_configure(struct rte_eth_dev *dev);
76static int i40evf_dev_start(struct rte_eth_dev *dev);
77static void i40evf_dev_stop(struct rte_eth_dev *dev);
78static void i40evf_dev_info_get(struct rte_eth_dev *dev,
79 struct rte_eth_dev_info *dev_info);
80static int i40evf_dev_link_update(struct rte_eth_dev *dev,
11fdf7f2
TL
81 int wait_to_complete);
82static int i40evf_dev_stats_get(struct rte_eth_dev *dev,
7c673cae
FG
83 struct rte_eth_stats *stats);
84static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
85 struct rte_eth_xstat *xstats, unsigned n);
86static int i40evf_dev_xstats_get_names(struct rte_eth_dev *dev,
87 struct rte_eth_xstat_name *xstats_names,
88 unsigned limit);
89static void i40evf_dev_xstats_reset(struct rte_eth_dev *dev);
90static int i40evf_vlan_filter_set(struct rte_eth_dev *dev,
91 uint16_t vlan_id, int on);
11fdf7f2 92static int i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
7c673cae 93static void i40evf_dev_close(struct rte_eth_dev *dev);
11fdf7f2 94static int i40evf_dev_reset(struct rte_eth_dev *dev);
7c673cae
FG
95static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev);
96static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev);
97static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev);
98static void i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev);
99static int i40evf_init_vlan(struct rte_eth_dev *dev);
100static int i40evf_dev_rx_queue_start(struct rte_eth_dev *dev,
101 uint16_t rx_queue_id);
102static int i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev,
103 uint16_t rx_queue_id);
104static int i40evf_dev_tx_queue_start(struct rte_eth_dev *dev,
105 uint16_t tx_queue_id);
106static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev,
107 uint16_t tx_queue_id);
11fdf7f2
TL
108static int i40evf_add_mac_addr(struct rte_eth_dev *dev,
109 struct ether_addr *addr,
110 uint32_t index,
111 uint32_t pool);
7c673cae
FG
112static void i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
113static int i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
114 struct rte_eth_rss_reta_entry64 *reta_conf,
115 uint16_t reta_size);
116static int i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
117 struct rte_eth_rss_reta_entry64 *reta_conf,
118 uint16_t reta_size);
119static int i40evf_config_rss(struct i40e_vf *vf);
120static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
121 struct rte_eth_rss_conf *rss_conf);
122static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
123 struct rte_eth_rss_conf *rss_conf);
11fdf7f2
TL
124static int i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
125static int i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
126 struct ether_addr *mac_addr);
7c673cae
FG
127static int
128i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
129static int
130i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
11fdf7f2 131static void i40evf_handle_pf_event(struct rte_eth_dev *dev,
7c673cae
FG
132 uint8_t *msg,
133 uint16_t msglen);
134
11fdf7f2
TL
135static int
136i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev,
137 struct ether_addr *mc_addr_set,
138 uint32_t nb_mc_addr, bool add);
139static int
140i40evf_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
141 uint32_t nb_mc_addr);
142
7c673cae
FG
143/* Default hash key buffer for RSS */
144static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1];
145
146struct rte_i40evf_xstats_name_off {
147 char name[RTE_ETH_XSTATS_NAME_SIZE];
148 unsigned offset;
149};
150
151static const struct rte_i40evf_xstats_name_off rte_i40evf_stats_strings[] = {
152 {"rx_bytes", offsetof(struct i40e_eth_stats, rx_bytes)},
153 {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
154 {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
155 {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
156 {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
157 {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
158 rx_unknown_protocol)},
159 {"tx_bytes", offsetof(struct i40e_eth_stats, tx_bytes)},
11fdf7f2
TL
160 {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
161 {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
162 {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
163 {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)},
164 {"tx_error_packets", offsetof(struct i40e_eth_stats, tx_errors)},
7c673cae
FG
165};
166
167#define I40EVF_NB_XSTATS (sizeof(rte_i40evf_stats_strings) / \
168 sizeof(rte_i40evf_stats_strings[0]))
169
170static const struct eth_dev_ops i40evf_eth_dev_ops = {
171 .dev_configure = i40evf_dev_configure,
172 .dev_start = i40evf_dev_start,
173 .dev_stop = i40evf_dev_stop,
174 .promiscuous_enable = i40evf_dev_promiscuous_enable,
175 .promiscuous_disable = i40evf_dev_promiscuous_disable,
176 .allmulticast_enable = i40evf_dev_allmulticast_enable,
177 .allmulticast_disable = i40evf_dev_allmulticast_disable,
178 .link_update = i40evf_dev_link_update,
179 .stats_get = i40evf_dev_stats_get,
11fdf7f2 180 .stats_reset = i40evf_dev_xstats_reset,
7c673cae
FG
181 .xstats_get = i40evf_dev_xstats_get,
182 .xstats_get_names = i40evf_dev_xstats_get_names,
183 .xstats_reset = i40evf_dev_xstats_reset,
184 .dev_close = i40evf_dev_close,
11fdf7f2 185 .dev_reset = i40evf_dev_reset,
7c673cae
FG
186 .dev_infos_get = i40evf_dev_info_get,
187 .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
188 .vlan_filter_set = i40evf_vlan_filter_set,
189 .vlan_offload_set = i40evf_vlan_offload_set,
7c673cae
FG
190 .rx_queue_start = i40evf_dev_rx_queue_start,
191 .rx_queue_stop = i40evf_dev_rx_queue_stop,
192 .tx_queue_start = i40evf_dev_tx_queue_start,
193 .tx_queue_stop = i40evf_dev_tx_queue_stop,
194 .rx_queue_setup = i40e_dev_rx_queue_setup,
195 .rx_queue_release = i40e_dev_rx_queue_release,
196 .rx_queue_intr_enable = i40evf_dev_rx_queue_intr_enable,
197 .rx_queue_intr_disable = i40evf_dev_rx_queue_intr_disable,
198 .rx_descriptor_done = i40e_dev_rx_descriptor_done,
11fdf7f2
TL
199 .rx_descriptor_status = i40e_dev_rx_descriptor_status,
200 .tx_descriptor_status = i40e_dev_tx_descriptor_status,
7c673cae
FG
201 .tx_queue_setup = i40e_dev_tx_queue_setup,
202 .tx_queue_release = i40e_dev_tx_queue_release,
203 .rx_queue_count = i40e_dev_rx_queue_count,
204 .rxq_info_get = i40e_rxq_info_get,
205 .txq_info_get = i40e_txq_info_get,
206 .mac_addr_add = i40evf_add_mac_addr,
207 .mac_addr_remove = i40evf_del_mac_addr,
11fdf7f2 208 .set_mc_addr_list = i40evf_set_mc_addr_list,
7c673cae
FG
209 .reta_update = i40evf_dev_rss_reta_update,
210 .reta_query = i40evf_dev_rss_reta_query,
211 .rss_hash_update = i40evf_dev_rss_hash_update,
212 .rss_hash_conf_get = i40evf_dev_rss_hash_conf_get,
11fdf7f2
TL
213 .mtu_set = i40evf_dev_mtu_set,
214 .mac_addr_set = i40evf_set_default_mac_addr,
7c673cae
FG
215};
216
217/*
218 * Read data in admin queue to get msg from pf driver
219 */
220static enum i40evf_aq_result
221i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
222{
223 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
224 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
225 struct i40e_arq_event_info event;
11fdf7f2 226 enum virtchnl_ops opcode;
7c673cae
FG
227 enum i40e_status_code retval;
228 int ret;
229 enum i40evf_aq_result result = I40EVF_MSG_NON;
230
231 event.buf_len = data->buf_len;
232 event.msg_buf = data->msg;
233 ret = i40e_clean_arq_element(hw, &event, NULL);
234 /* Can't read any msg from adminQ */
235 if (ret) {
236 if (ret != I40E_ERR_ADMIN_QUEUE_NO_WORK)
237 result = I40EVF_MSG_ERR;
238 return result;
239 }
240
11fdf7f2 241 opcode = (enum virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high);
7c673cae
FG
242 retval = (enum i40e_status_code)rte_le_to_cpu_32(event.desc.cookie_low);
243 /* pf sys event */
11fdf7f2
TL
244 if (opcode == VIRTCHNL_OP_EVENT) {
245 struct virtchnl_pf_event *vpe =
246 (struct virtchnl_pf_event *)event.msg_buf;
7c673cae
FG
247
248 result = I40EVF_MSG_SYS;
249 switch (vpe->event) {
11fdf7f2 250 case VIRTCHNL_EVENT_LINK_CHANGE:
7c673cae
FG
251 vf->link_up =
252 vpe->event_data.link_event.link_status;
253 vf->link_speed =
254 vpe->event_data.link_event.link_speed;
255 vf->pend_msg |= PFMSG_LINK_CHANGE;
256 PMD_DRV_LOG(INFO, "Link status update:%s",
257 vf->link_up ? "up" : "down");
258 break;
11fdf7f2 259 case VIRTCHNL_EVENT_RESET_IMPENDING:
7c673cae
FG
260 vf->vf_reset = true;
261 vf->pend_msg |= PFMSG_RESET_IMPENDING;
262 PMD_DRV_LOG(INFO, "vf is reseting");
263 break;
11fdf7f2 264 case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
7c673cae
FG
265 vf->dev_closed = true;
266 vf->pend_msg |= PFMSG_DRIVER_CLOSE;
267 PMD_DRV_LOG(INFO, "PF driver closed");
268 break;
269 default:
270 PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf",
271 __func__, vpe->event);
272 }
273 } else {
274 /* async reply msg on command issued by vf previously */
275 result = I40EVF_MSG_CMD;
276 /* Actual data length read from PF */
277 data->msg_len = event.msg_len;
278 }
279
280 data->result = retval;
281 data->ops = opcode;
282
283 return result;
284}
285
286/**
287 * clear current command. Only call in case execute
288 * _atomic_set_cmd successfully.
289 */
290static inline void
291_clear_cmd(struct i40e_vf *vf)
292{
293 rte_wmb();
11fdf7f2 294 vf->pend_cmd = VIRTCHNL_OP_UNKNOWN;
7c673cae
FG
295}
296
297/*
298 * Check there is pending cmd in execution. If none, set new command.
299 */
300static inline int
11fdf7f2 301_atomic_set_cmd(struct i40e_vf *vf, enum virtchnl_ops ops)
7c673cae
FG
302{
303 int ret = rte_atomic32_cmpset(&vf->pend_cmd,
11fdf7f2 304 VIRTCHNL_OP_UNKNOWN, ops);
7c673cae
FG
305
306 if (!ret)
307 PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
308
309 return !ret;
310}
311
312#define MAX_TRY_TIMES 200
313#define ASQ_DELAY_MS 10
314
315static int
316i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
317{
318 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
319 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
320 struct i40evf_arq_msg_info info;
321 enum i40evf_aq_result ret;
322 int err, i = 0;
323
324 if (_atomic_set_cmd(vf, args->ops))
325 return -1;
326
327 info.msg = args->out_buffer;
328 info.buf_len = args->out_size;
11fdf7f2 329 info.ops = VIRTCHNL_OP_UNKNOWN;
7c673cae
FG
330 info.result = I40E_SUCCESS;
331
332 err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS,
333 args->in_args, args->in_args_size, NULL);
334 if (err) {
335 PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
336 _clear_cmd(vf);
337 return err;
338 }
339
340 switch (args->ops) {
11fdf7f2 341 case VIRTCHNL_OP_RESET_VF:
7c673cae
FG
342 /*no need to process in this function */
343 err = 0;
344 break;
11fdf7f2
TL
345 case VIRTCHNL_OP_VERSION:
346 case VIRTCHNL_OP_GET_VF_RESOURCES:
7c673cae
FG
347 /* for init adminq commands, need to poll the response */
348 err = -1;
349 do {
350 ret = i40evf_read_pfmsg(dev, &info);
11fdf7f2 351 vf->cmd_retval = info.result;
7c673cae
FG
352 if (ret == I40EVF_MSG_CMD) {
353 err = 0;
354 break;
355 } else if (ret == I40EVF_MSG_ERR)
356 break;
357 rte_delay_ms(ASQ_DELAY_MS);
358 /* If don't read msg or read sys event, continue */
359 } while (i++ < MAX_TRY_TIMES);
360 _clear_cmd(vf);
361 break;
9f95a23c
TL
362 case VIRTCHNL_OP_REQUEST_QUEUES:
363 /**
364 * ignore async reply, only wait for system message,
365 * vf_reset = true if get VIRTCHNL_EVENT_RESET_IMPENDING,
366 * if not, means request queues failed.
367 */
368 err = -1;
369 do {
370 ret = i40evf_read_pfmsg(dev, &info);
371 vf->cmd_retval = info.result;
372 if (ret == I40EVF_MSG_SYS && vf->vf_reset) {
373 err = 0;
374 break;
375 } else if (ret == I40EVF_MSG_ERR ||
376 ret == I40EVF_MSG_CMD) {
377 break;
378 }
379 rte_delay_ms(ASQ_DELAY_MS);
380 /* If don't read msg or read sys event, continue */
381 } while (i++ < MAX_TRY_TIMES);
382 _clear_cmd(vf);
383 break;
7c673cae
FG
384
385 default:
386 /* for other adminq in running time, waiting the cmd done flag */
387 err = -1;
388 do {
11fdf7f2 389 if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) {
7c673cae
FG
390 err = 0;
391 break;
392 }
393 rte_delay_ms(ASQ_DELAY_MS);
394 /* If don't read msg or read sys event, continue */
395 } while (i++ < MAX_TRY_TIMES);
11fdf7f2
TL
396 /* If there's no response is received, clear command */
397 if (i >= MAX_TRY_TIMES) {
398 PMD_DRV_LOG(WARNING, "No response for %d", args->ops);
399 _clear_cmd(vf);
400 }
7c673cae
FG
401 break;
402 }
403
404 return err | vf->cmd_retval;
405}
406
407/*
408 * Check API version with sync wait until version read or fail from admin queue
409 */
410static int
411i40evf_check_api_version(struct rte_eth_dev *dev)
412{
11fdf7f2 413 struct virtchnl_version_info version, *pver;
7c673cae
FG
414 int err;
415 struct vf_cmd_info args;
416 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
417
11fdf7f2
TL
418 version.major = VIRTCHNL_VERSION_MAJOR;
419 version.minor = VIRTCHNL_VERSION_MINOR;
7c673cae 420
11fdf7f2 421 args.ops = VIRTCHNL_OP_VERSION;
7c673cae
FG
422 args.in_args = (uint8_t *)&version;
423 args.in_args_size = sizeof(version);
424 args.out_buffer = vf->aq_resp;
425 args.out_size = I40E_AQ_BUF_SZ;
426
427 err = i40evf_execute_vf_cmd(dev, &args);
428 if (err) {
429 PMD_INIT_LOG(ERR, "fail to execute command OP_VERSION");
430 return err;
431 }
432
11fdf7f2 433 pver = (struct virtchnl_version_info *)args.out_buffer;
7c673cae
FG
434 vf->version_major = pver->major;
435 vf->version_minor = pver->minor;
11fdf7f2
TL
436 if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
437 (vf->version_minor <= VIRTCHNL_VERSION_MINOR))
7c673cae
FG
438 PMD_DRV_LOG(INFO, "Peer is Linux PF host");
439 else {
440 PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
441 vf->version_major, vf->version_minor,
11fdf7f2
TL
442 VIRTCHNL_VERSION_MAJOR,
443 VIRTCHNL_VERSION_MINOR);
7c673cae
FG
444 return -1;
445 }
446
447 return 0;
448}
449
450static int
451i40evf_get_vf_resource(struct rte_eth_dev *dev)
452{
453 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
454 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
455 int err;
456 struct vf_cmd_info args;
457 uint32_t caps, len;
458
11fdf7f2 459 args.ops = VIRTCHNL_OP_GET_VF_RESOURCES;
7c673cae
FG
460 args.out_buffer = vf->aq_resp;
461 args.out_size = I40E_AQ_BUF_SZ;
462 if (PF_IS_V11(vf)) {
11fdf7f2
TL
463 caps = VIRTCHNL_VF_OFFLOAD_L2 |
464 VIRTCHNL_VF_OFFLOAD_RSS_AQ |
465 VIRTCHNL_VF_OFFLOAD_RSS_REG |
466 VIRTCHNL_VF_OFFLOAD_VLAN |
467 VIRTCHNL_VF_OFFLOAD_RX_POLLING;
7c673cae
FG
468 args.in_args = (uint8_t *)&caps;
469 args.in_args_size = sizeof(caps);
470 } else {
471 args.in_args = NULL;
472 args.in_args_size = 0;
473 }
474 err = i40evf_execute_vf_cmd(dev, &args);
475
476 if (err) {
477 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_VF_RESOURCE");
478 return err;
479 }
480
11fdf7f2
TL
481 len = sizeof(struct virtchnl_vf_resource) +
482 I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
7c673cae 483
11fdf7f2 484 rte_memcpy(vf->vf_res, args.out_buffer,
7c673cae
FG
485 RTE_MIN(args.out_size, len));
486 i40e_vf_parse_hw_config(hw, vf->vf_res);
487
488 return 0;
489}
490
491static int
492i40evf_config_promisc(struct rte_eth_dev *dev,
493 bool enable_unicast,
494 bool enable_multicast)
495{
496 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
497 int err;
498 struct vf_cmd_info args;
11fdf7f2 499 struct virtchnl_promisc_info promisc;
7c673cae
FG
500
501 promisc.flags = 0;
502 promisc.vsi_id = vf->vsi_res->vsi_id;
503
504 if (enable_unicast)
11fdf7f2 505 promisc.flags |= FLAG_VF_UNICAST_PROMISC;
7c673cae
FG
506
507 if (enable_multicast)
11fdf7f2 508 promisc.flags |= FLAG_VF_MULTICAST_PROMISC;
7c673cae 509
11fdf7f2 510 args.ops = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
7c673cae
FG
511 args.in_args = (uint8_t *)&promisc;
512 args.in_args_size = sizeof(promisc);
513 args.out_buffer = vf->aq_resp;
514 args.out_size = I40E_AQ_BUF_SZ;
515
516 err = i40evf_execute_vf_cmd(dev, &args);
517
518 if (err)
519 PMD_DRV_LOG(ERR, "fail to execute command "
520 "CONFIG_PROMISCUOUS_MODE");
521 return err;
522}
523
7c673cae 524static int
11fdf7f2 525i40evf_enable_vlan_strip(struct rte_eth_dev *dev)
7c673cae
FG
526{
527 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
7c673cae 528 struct vf_cmd_info args;
11fdf7f2 529 int ret;
7c673cae 530
11fdf7f2
TL
531 memset(&args, 0, sizeof(args));
532 args.ops = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
533 args.in_args = NULL;
534 args.in_args_size = 0;
7c673cae
FG
535 args.out_buffer = vf->aq_resp;
536 args.out_size = I40E_AQ_BUF_SZ;
11fdf7f2
TL
537 ret = i40evf_execute_vf_cmd(dev, &args);
538 if (ret)
539 PMD_DRV_LOG(ERR, "Failed to execute command of "
540 "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING");
7c673cae 541
11fdf7f2 542 return ret;
7c673cae
FG
543}
544
545static int
11fdf7f2 546i40evf_disable_vlan_strip(struct rte_eth_dev *dev)
7c673cae
FG
547{
548 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
7c673cae 549 struct vf_cmd_info args;
11fdf7f2 550 int ret;
7c673cae 551
11fdf7f2
TL
552 memset(&args, 0, sizeof(args));
553 args.ops = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
554 args.in_args = NULL;
555 args.in_args_size = 0;
7c673cae
FG
556 args.out_buffer = vf->aq_resp;
557 args.out_size = I40E_AQ_BUF_SZ;
11fdf7f2
TL
558 ret = i40evf_execute_vf_cmd(dev, &args);
559 if (ret)
560 PMD_DRV_LOG(ERR, "Failed to execute command of "
561 "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING");
7c673cae 562
11fdf7f2 563 return ret;
7c673cae
FG
564}
565
566static void
11fdf7f2 567i40evf_fill_virtchnl_vsi_txq_info(struct virtchnl_txq_info *txq_info,
7c673cae
FG
568 uint16_t vsi_id,
569 uint16_t queue_id,
570 uint16_t nb_txq,
571 struct i40e_tx_queue *txq)
572{
573 txq_info->vsi_id = vsi_id;
574 txq_info->queue_id = queue_id;
575 if (queue_id < nb_txq) {
576 txq_info->ring_len = txq->nb_tx_desc;
577 txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
578 }
579}
580
581static void
11fdf7f2 582i40evf_fill_virtchnl_vsi_rxq_info(struct virtchnl_rxq_info *rxq_info,
7c673cae
FG
583 uint16_t vsi_id,
584 uint16_t queue_id,
585 uint16_t nb_rxq,
586 uint32_t max_pkt_size,
587 struct i40e_rx_queue *rxq)
588{
589 rxq_info->vsi_id = vsi_id;
590 rxq_info->queue_id = queue_id;
591 rxq_info->max_pkt_size = max_pkt_size;
592 if (queue_id < nb_rxq) {
593 rxq_info->ring_len = rxq->nb_rx_desc;
594 rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
595 rxq_info->databuffer_size =
596 (rte_pktmbuf_data_room_size(rxq->mp) -
597 RTE_PKTMBUF_HEADROOM);
598 }
599}
600
7c673cae
FG
601static int
602i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
603{
604 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
605 struct i40e_rx_queue **rxq =
606 (struct i40e_rx_queue **)dev->data->rx_queues;
607 struct i40e_tx_queue **txq =
608 (struct i40e_tx_queue **)dev->data->tx_queues;
11fdf7f2
TL
609 struct virtchnl_vsi_queue_config_info *vc_vqci;
610 struct virtchnl_queue_pair_info *vc_qpi;
7c673cae
FG
611 struct vf_cmd_info args;
612 uint16_t i, nb_qp = vf->num_queue_pairs;
613 const uint32_t size =
614 I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci, nb_qp);
615 uint8_t buff[size];
616 int ret;
617
618 memset(buff, 0, sizeof(buff));
11fdf7f2 619 vc_vqci = (struct virtchnl_vsi_queue_config_info *)buff;
7c673cae
FG
620 vc_vqci->vsi_id = vf->vsi_res->vsi_id;
621 vc_vqci->num_queue_pairs = nb_qp;
622
623 for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) {
624 i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq,
625 vc_vqci->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
626 i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq,
627 vc_vqci->vsi_id, i, dev->data->nb_rx_queues,
628 vf->max_pkt_len, rxq[i]);
629 }
630 memset(&args, 0, sizeof(args));
11fdf7f2 631 args.ops = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
7c673cae
FG
632 args.in_args = (uint8_t *)vc_vqci;
633 args.in_args_size = size;
634 args.out_buffer = vf->aq_resp;
635 args.out_size = I40E_AQ_BUF_SZ;
636 ret = i40evf_execute_vf_cmd(dev, &args);
637 if (ret)
638 PMD_DRV_LOG(ERR, "Failed to execute command of "
11fdf7f2 639 "VIRTCHNL_OP_CONFIG_VSI_QUEUES");
7c673cae
FG
640
641 return ret;
642}
643
7c673cae
FG
644static int
645i40evf_config_irq_map(struct rte_eth_dev *dev)
646{
647 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
648 struct vf_cmd_info args;
11fdf7f2
TL
649 uint8_t cmd_buffer[sizeof(struct virtchnl_irq_map_info) + \
650 sizeof(struct virtchnl_vector_map)];
651 struct virtchnl_irq_map_info *map_info;
652 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
653 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
7c673cae
FG
654 uint32_t vector_id;
655 int i, err;
656
11fdf7f2
TL
657 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
658 rte_intr_allow_others(intr_handle))
659 vector_id = I40E_RX_VEC_START;
660 else
7c673cae 661 vector_id = I40E_MISC_VEC_ID;
7c673cae 662
11fdf7f2 663 map_info = (struct virtchnl_irq_map_info *)cmd_buffer;
7c673cae
FG
664 map_info->num_vectors = 1;
665 map_info->vecmap[0].rxitr_idx = I40E_ITR_INDEX_DEFAULT;
666 map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
667 /* Alway use default dynamic MSIX interrupt */
668 map_info->vecmap[0].vector_id = vector_id;
669 /* Don't map any tx queue */
670 map_info->vecmap[0].txq_map = 0;
671 map_info->vecmap[0].rxq_map = 0;
672 for (i = 0; i < dev->data->nb_rx_queues; i++) {
673 map_info->vecmap[0].rxq_map |= 1 << i;
674 if (rte_intr_dp_is_en(intr_handle))
675 intr_handle->intr_vec[i] = vector_id;
676 }
677
11fdf7f2 678 args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
7c673cae
FG
679 args.in_args = (u8 *)cmd_buffer;
680 args.in_args_size = sizeof(cmd_buffer);
681 args.out_buffer = vf->aq_resp;
682 args.out_size = I40E_AQ_BUF_SZ;
683 err = i40evf_execute_vf_cmd(dev, &args);
684 if (err)
685 PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES");
686
687 return err;
688}
689
690static int
691i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
692 bool on)
693{
694 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
11fdf7f2 695 struct virtchnl_queue_select queue_select;
7c673cae
FG
696 int err;
697 struct vf_cmd_info args;
698 memset(&queue_select, 0, sizeof(queue_select));
699 queue_select.vsi_id = vf->vsi_res->vsi_id;
700
701 if (isrx)
702 queue_select.rx_queues |= 1 << qid;
703 else
704 queue_select.tx_queues |= 1 << qid;
705
706 if (on)
11fdf7f2 707 args.ops = VIRTCHNL_OP_ENABLE_QUEUES;
7c673cae 708 else
11fdf7f2 709 args.ops = VIRTCHNL_OP_DISABLE_QUEUES;
7c673cae
FG
710 args.in_args = (u8 *)&queue_select;
711 args.in_args_size = sizeof(queue_select);
712 args.out_buffer = vf->aq_resp;
713 args.out_size = I40E_AQ_BUF_SZ;
714 err = i40evf_execute_vf_cmd(dev, &args);
715 if (err)
716 PMD_DRV_LOG(ERR, "fail to switch %s %u %s",
717 isrx ? "RX" : "TX", qid, on ? "on" : "off");
718
719 return err;
720}
721
722static int
723i40evf_start_queues(struct rte_eth_dev *dev)
724{
725 struct rte_eth_dev_data *dev_data = dev->data;
726 int i;
727 struct i40e_rx_queue *rxq;
728 struct i40e_tx_queue *txq;
729
730 for (i = 0; i < dev->data->nb_rx_queues; i++) {
731 rxq = dev_data->rx_queues[i];
732 if (rxq->rx_deferred_start)
733 continue;
734 if (i40evf_dev_rx_queue_start(dev, i) != 0) {
735 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
736 return -1;
737 }
738 }
739
740 for (i = 0; i < dev->data->nb_tx_queues; i++) {
741 txq = dev_data->tx_queues[i];
742 if (txq->tx_deferred_start)
743 continue;
744 if (i40evf_dev_tx_queue_start(dev, i) != 0) {
745 PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
746 return -1;
747 }
748 }
749
750 return 0;
751}
752
753static int
754i40evf_stop_queues(struct rte_eth_dev *dev)
755{
756 int i;
757
758 /* Stop TX queues first */
759 for (i = 0; i < dev->data->nb_tx_queues; i++) {
760 if (i40evf_dev_tx_queue_stop(dev, i) != 0) {
761 PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
762 return -1;
763 }
764 }
765
766 /* Then stop RX queues */
767 for (i = 0; i < dev->data->nb_rx_queues; i++) {
768 if (i40evf_dev_rx_queue_stop(dev, i) != 0) {
769 PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
770 return -1;
771 }
772 }
773
774 return 0;
775}
776
11fdf7f2 777static int
7c673cae
FG
778i40evf_add_mac_addr(struct rte_eth_dev *dev,
779 struct ether_addr *addr,
780 __rte_unused uint32_t index,
781 __rte_unused uint32_t pool)
782{
11fdf7f2 783 struct virtchnl_ether_addr_list *list;
7c673cae 784 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
11fdf7f2
TL
785 uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) + \
786 sizeof(struct virtchnl_ether_addr)];
7c673cae
FG
787 int err;
788 struct vf_cmd_info args;
789
11fdf7f2 790 if (is_zero_ether_addr(addr)) {
7c673cae
FG
791 PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
792 addr->addr_bytes[0], addr->addr_bytes[1],
793 addr->addr_bytes[2], addr->addr_bytes[3],
794 addr->addr_bytes[4], addr->addr_bytes[5]);
11fdf7f2 795 return I40E_ERR_INVALID_MAC_ADDR;
7c673cae
FG
796 }
797
11fdf7f2 798 list = (struct virtchnl_ether_addr_list *)cmd_buffer;
7c673cae
FG
799 list->vsi_id = vf->vsi_res->vsi_id;
800 list->num_elements = 1;
11fdf7f2 801 rte_memcpy(list->list[0].addr, addr->addr_bytes,
7c673cae
FG
802 sizeof(addr->addr_bytes));
803
11fdf7f2 804 args.ops = VIRTCHNL_OP_ADD_ETH_ADDR;
7c673cae
FG
805 args.in_args = cmd_buffer;
806 args.in_args_size = sizeof(cmd_buffer);
807 args.out_buffer = vf->aq_resp;
808 args.out_size = I40E_AQ_BUF_SZ;
809 err = i40evf_execute_vf_cmd(dev, &args);
810 if (err)
811 PMD_DRV_LOG(ERR, "fail to execute command "
812 "OP_ADD_ETHER_ADDRESS");
11fdf7f2
TL
813 else
814 vf->vsi.mac_num++;
7c673cae 815
11fdf7f2 816 return err;
7c673cae
FG
817}
818
819static void
11fdf7f2
TL
820i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev,
821 struct ether_addr *addr)
7c673cae 822{
11fdf7f2 823 struct virtchnl_ether_addr_list *list;
7c673cae 824 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
11fdf7f2
TL
825 uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) + \
826 sizeof(struct virtchnl_ether_addr)];
7c673cae
FG
827 int err;
828 struct vf_cmd_info args;
829
7c673cae
FG
830 if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
831 PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x",
832 addr->addr_bytes[0], addr->addr_bytes[1],
833 addr->addr_bytes[2], addr->addr_bytes[3],
834 addr->addr_bytes[4], addr->addr_bytes[5]);
835 return;
836 }
837
11fdf7f2 838 list = (struct virtchnl_ether_addr_list *)cmd_buffer;
7c673cae
FG
839 list->vsi_id = vf->vsi_res->vsi_id;
840 list->num_elements = 1;
11fdf7f2 841 rte_memcpy(list->list[0].addr, addr->addr_bytes,
7c673cae
FG
842 sizeof(addr->addr_bytes));
843
11fdf7f2 844 args.ops = VIRTCHNL_OP_DEL_ETH_ADDR;
7c673cae
FG
845 args.in_args = cmd_buffer;
846 args.in_args_size = sizeof(cmd_buffer);
847 args.out_buffer = vf->aq_resp;
848 args.out_size = I40E_AQ_BUF_SZ;
849 err = i40evf_execute_vf_cmd(dev, &args);
850 if (err)
851 PMD_DRV_LOG(ERR, "fail to execute command "
852 "OP_DEL_ETHER_ADDRESS");
11fdf7f2
TL
853 else
854 vf->vsi.mac_num--;
7c673cae
FG
855 return;
856}
857
11fdf7f2
TL
858static void
859i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
860{
861 struct rte_eth_dev_data *data = dev->data;
862 struct ether_addr *addr;
863
864 addr = &data->mac_addrs[index];
865
866 i40evf_del_mac_addr_by_addr(dev, addr);
867}
868
7c673cae 869static int
11fdf7f2 870i40evf_query_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
7c673cae
FG
871{
872 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
11fdf7f2 873 struct virtchnl_queue_select q_stats;
7c673cae
FG
874 int err;
875 struct vf_cmd_info args;
876
877 memset(&q_stats, 0, sizeof(q_stats));
878 q_stats.vsi_id = vf->vsi_res->vsi_id;
11fdf7f2 879 args.ops = VIRTCHNL_OP_GET_STATS;
7c673cae
FG
880 args.in_args = (u8 *)&q_stats;
881 args.in_args_size = sizeof(q_stats);
882 args.out_buffer = vf->aq_resp;
883 args.out_size = I40E_AQ_BUF_SZ;
884
885 err = i40evf_execute_vf_cmd(dev, &args);
886 if (err) {
887 PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
888 *pstats = NULL;
889 return err;
890 }
891 *pstats = (struct i40e_eth_stats *)args.out_buffer;
892 return 0;
893}
894
11fdf7f2
TL
895static void
896i40evf_stat_update_48(uint64_t *offset,
897 uint64_t *stat)
7c673cae 898{
11fdf7f2
TL
899 if (*stat >= *offset)
900 *stat = *stat - *offset;
901 else
902 *stat = (uint64_t)((*stat +
903 ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
7c673cae 904
11fdf7f2
TL
905 *stat &= I40E_48_BIT_MASK;
906}
7c673cae 907
11fdf7f2
TL
908static void
909i40evf_stat_update_32(uint64_t *offset,
910 uint64_t *stat)
911{
912 if (*stat >= *offset)
913 *stat = (uint64_t)(*stat - *offset);
914 else
915 *stat = (uint64_t)((*stat +
916 ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
917}
7c673cae 918
11fdf7f2
TL
919static void
920i40evf_update_stats(struct i40e_vsi *vsi,
921 struct i40e_eth_stats *nes)
922{
923 struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
924
925 i40evf_stat_update_48(&oes->rx_bytes,
926 &nes->rx_bytes);
927 i40evf_stat_update_48(&oes->rx_unicast,
928 &nes->rx_unicast);
929 i40evf_stat_update_48(&oes->rx_multicast,
930 &nes->rx_multicast);
931 i40evf_stat_update_48(&oes->rx_broadcast,
932 &nes->rx_broadcast);
933 i40evf_stat_update_32(&oes->rx_discards,
934 &nes->rx_discards);
935 i40evf_stat_update_32(&oes->rx_unknown_protocol,
936 &nes->rx_unknown_protocol);
937 i40evf_stat_update_48(&oes->tx_bytes,
938 &nes->tx_bytes);
939 i40evf_stat_update_48(&oes->tx_unicast,
940 &nes->tx_unicast);
941 i40evf_stat_update_48(&oes->tx_multicast,
942 &nes->tx_multicast);
943 i40evf_stat_update_48(&oes->tx_broadcast,
944 &nes->tx_broadcast);
945 i40evf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
946 i40evf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
7c673cae
FG
947}
948
949static void
950i40evf_dev_xstats_reset(struct rte_eth_dev *dev)
951{
11fdf7f2 952 int ret;
7c673cae
FG
953 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
954 struct i40e_eth_stats *pstats = NULL;
955
956 /* read stat values to clear hardware registers */
11fdf7f2 957 ret = i40evf_query_stats(dev, &pstats);
7c673cae
FG
958
959 /* set stats offset base on current values */
11fdf7f2
TL
960 if (ret == 0)
961 vf->vsi.eth_stats_offset = *pstats;
7c673cae
FG
962}
963
964static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
965 struct rte_eth_xstat_name *xstats_names,
966 __rte_unused unsigned limit)
967{
968 unsigned i;
969
970 if (xstats_names != NULL)
971 for (i = 0; i < I40EVF_NB_XSTATS; i++) {
972 snprintf(xstats_names[i].name,
973 sizeof(xstats_names[i].name),
974 "%s", rte_i40evf_stats_strings[i].name);
975 }
976 return I40EVF_NB_XSTATS;
977}
978
979static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
980 struct rte_eth_xstat *xstats, unsigned n)
981{
982 int ret;
983 unsigned i;
984 struct i40e_eth_stats *pstats = NULL;
11fdf7f2
TL
985 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
986 struct i40e_vsi *vsi = &vf->vsi;
7c673cae
FG
987
988 if (n < I40EVF_NB_XSTATS)
989 return I40EVF_NB_XSTATS;
990
11fdf7f2 991 ret = i40evf_query_stats(dev, &pstats);
7c673cae
FG
992 if (ret != 0)
993 return 0;
994
995 if (!xstats)
996 return 0;
997
11fdf7f2
TL
998 i40evf_update_stats(vsi, pstats);
999
7c673cae
FG
1000 /* loop over xstats array and values from pstats */
1001 for (i = 0; i < I40EVF_NB_XSTATS; i++) {
1002 xstats[i].id = i;
1003 xstats[i].value = *(uint64_t *)(((char *)pstats) +
1004 rte_i40evf_stats_strings[i].offset);
1005 }
1006
1007 return I40EVF_NB_XSTATS;
1008}
1009
1010static int
1011i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
1012{
1013 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
11fdf7f2
TL
1014 struct virtchnl_vlan_filter_list *vlan_list;
1015 uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
7c673cae
FG
1016 sizeof(uint16_t)];
1017 int err;
1018 struct vf_cmd_info args;
1019
11fdf7f2 1020 vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
7c673cae
FG
1021 vlan_list->vsi_id = vf->vsi_res->vsi_id;
1022 vlan_list->num_elements = 1;
1023 vlan_list->vlan_id[0] = vlanid;
1024
11fdf7f2 1025 args.ops = VIRTCHNL_OP_ADD_VLAN;
7c673cae
FG
1026 args.in_args = (u8 *)&cmd_buffer;
1027 args.in_args_size = sizeof(cmd_buffer);
1028 args.out_buffer = vf->aq_resp;
1029 args.out_size = I40E_AQ_BUF_SZ;
1030 err = i40evf_execute_vf_cmd(dev, &args);
1031 if (err)
1032 PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_VLAN");
1033
1034 return err;
1035}
1036
9f95a23c
TL
1037static int
1038i40evf_request_queues(struct rte_eth_dev *dev, uint16_t num)
1039{
1040 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1041 struct virtchnl_vf_res_request vfres;
1042 struct vf_cmd_info args;
1043 int err;
1044
1045 vfres.num_queue_pairs = num;
1046
1047 args.ops = VIRTCHNL_OP_REQUEST_QUEUES;
1048 args.in_args = (u8 *)&vfres;
1049 args.in_args_size = sizeof(vfres);
1050 args.out_buffer = vf->aq_resp;
1051 args.out_size = I40E_AQ_BUF_SZ;
1052 err = i40evf_execute_vf_cmd(dev, &args);
1053 if (err)
1054 PMD_DRV_LOG(ERR, "fail to execute command OP_REQUEST_QUEUES");
1055
1056 return err;
1057}
1058
7c673cae
FG
1059static int
1060i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
1061{
1062 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
11fdf7f2
TL
1063 struct virtchnl_vlan_filter_list *vlan_list;
1064 uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
7c673cae
FG
1065 sizeof(uint16_t)];
1066 int err;
1067 struct vf_cmd_info args;
1068
11fdf7f2 1069 vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
7c673cae
FG
1070 vlan_list->vsi_id = vf->vsi_res->vsi_id;
1071 vlan_list->num_elements = 1;
1072 vlan_list->vlan_id[0] = vlanid;
1073
11fdf7f2 1074 args.ops = VIRTCHNL_OP_DEL_VLAN;
7c673cae
FG
1075 args.in_args = (u8 *)&cmd_buffer;
1076 args.in_args_size = sizeof(cmd_buffer);
1077 args.out_buffer = vf->aq_resp;
1078 args.out_size = I40E_AQ_BUF_SZ;
1079 err = i40evf_execute_vf_cmd(dev, &args);
1080 if (err)
1081 PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_VLAN");
1082
1083 return err;
1084}
1085
1086static const struct rte_pci_id pci_id_i40evf_map[] = {
1087 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF) },
1088 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV) },
1089 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF) },
1090 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF) },
7c673cae
FG
1091 { .vendor_id = 0, /* sentinel */ },
1092};
1093
7c673cae
FG
1094/* Disable IRQ0 */
1095static inline void
1096i40evf_disable_irq0(struct i40e_hw *hw)
1097{
1098 /* Disable all interrupt types */
1099 I40E_WRITE_REG(hw, I40E_VFINT_ICR0_ENA1, 0);
1100 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1101 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1102 I40EVF_WRITE_FLUSH(hw);
1103}
1104
1105/* Enable IRQ0 */
1106static inline void
1107i40evf_enable_irq0(struct i40e_hw *hw)
1108{
1109 /* Enable admin queue interrupt trigger */
1110 uint32_t val;
1111
1112 i40evf_disable_irq0(hw);
1113 val = I40E_READ_REG(hw, I40E_VFINT_ICR0_ENA1);
1114 val |= I40E_VFINT_ICR0_ENA1_ADMINQ_MASK |
1115 I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK;
1116 I40E_WRITE_REG(hw, I40E_VFINT_ICR0_ENA1, val);
1117
1118 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1119 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1120 I40E_VFINT_DYN_CTL01_CLEARPBA_MASK |
1121 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1122
1123 I40EVF_WRITE_FLUSH(hw);
1124}
1125
1126static int
9f95a23c 1127i40evf_check_vf_reset_done(struct rte_eth_dev *dev)
7c673cae
FG
1128{
1129 int i, reset;
9f95a23c
TL
1130 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1131 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
7c673cae 1132
11fdf7f2
TL
1133 for (i = 0; i < MAX_RESET_WAIT_CNT; i++) {
1134 reset = I40E_READ_REG(hw, I40E_VFGEN_RSTAT) &
1135 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1136 reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT;
1137 if (reset == VIRTCHNL_VFR_VFACTIVE ||
1138 reset == VIRTCHNL_VFR_COMPLETED)
1139 break;
1140 rte_delay_ms(50);
1141 }
1142
1143 if (i >= MAX_RESET_WAIT_CNT)
1144 return -1;
1145
9f95a23c
TL
1146 vf->vf_reset = false;
1147 vf->pend_msg &= ~PFMSG_RESET_IMPENDING;
1148
11fdf7f2
TL
1149 return 0;
1150}
1151static int
9f95a23c 1152i40evf_reset_vf(struct rte_eth_dev *dev)
11fdf7f2
TL
1153{
1154 int ret;
9f95a23c 1155 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11fdf7f2 1156
7c673cae
FG
1157 if (i40e_vf_reset(hw) != I40E_SUCCESS) {
1158 PMD_INIT_LOG(ERR, "Reset VF NIC failed");
1159 return -1;
1160 }
1161 /**
1162 * After issuing vf reset command to pf, pf won't necessarily
1163 * reset vf, it depends on what state it exactly is. If it's not
1164 * initialized yet, it won't have vf reset since it's in a certain
1165 * state. If not, it will try to reset. Even vf is reset, pf will
1166 * set I40E_VFGEN_RSTAT to COMPLETE first, then wait 10ms and set
1167 * it to ACTIVE. In this duration, vf may not catch the moment that
1168 * COMPLETE is set. So, for vf, we'll try to wait a long time.
1169 */
1170 rte_delay_ms(200);
1171
9f95a23c 1172 ret = i40evf_check_vf_reset_done(dev);
11fdf7f2
TL
1173 if (ret) {
1174 PMD_INIT_LOG(ERR, "VF is still resetting");
1175 return ret;
7c673cae
FG
1176 }
1177
1178 return 0;
1179}
1180
1181static int
1182i40evf_init_vf(struct rte_eth_dev *dev)
1183{
1184 int i, err, bufsz;
1185 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1186 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
7c673cae 1187 uint16_t interval =
11fdf7f2 1188 i40e_calc_itr_interval(0, 0);
7c673cae
FG
1189
1190 vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1191 vf->dev_data = dev->data;
1192 err = i40e_set_mac_type(hw);
1193 if (err) {
1194 PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
1195 goto err;
1196 }
1197
9f95a23c 1198 err = i40evf_check_vf_reset_done(dev);
11fdf7f2
TL
1199 if (err)
1200 goto err;
1201
7c673cae
FG
1202 i40e_init_adminq_parameter(hw);
1203 err = i40e_init_adminq(hw);
1204 if (err) {
1205 PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
1206 goto err;
1207 }
1208
1209 /* Reset VF and wait until it's complete */
9f95a23c 1210 if (i40evf_reset_vf(dev)) {
7c673cae
FG
1211 PMD_INIT_LOG(ERR, "reset NIC failed");
1212 goto err_aq;
1213 }
1214
1215 /* VF reset, shutdown admin queue and initialize again */
1216 if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) {
1217 PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed");
11fdf7f2 1218 goto err;
7c673cae
FG
1219 }
1220
1221 i40e_init_adminq_parameter(hw);
1222 if (i40e_init_adminq(hw) != I40E_SUCCESS) {
1223 PMD_INIT_LOG(ERR, "init_adminq failed");
11fdf7f2 1224 goto err;
7c673cae 1225 }
11fdf7f2 1226
7c673cae
FG
1227 vf->aq_resp = rte_zmalloc("vf_aq_resp", I40E_AQ_BUF_SZ, 0);
1228 if (!vf->aq_resp) {
1229 PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
11fdf7f2 1230 goto err_aq;
7c673cae
FG
1231 }
1232 if (i40evf_check_api_version(dev) != 0) {
1233 PMD_INIT_LOG(ERR, "check_api version failed");
11fdf7f2 1234 goto err_api;
7c673cae 1235 }
11fdf7f2
TL
1236 bufsz = sizeof(struct virtchnl_vf_resource) +
1237 (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
7c673cae
FG
1238 vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
1239 if (!vf->vf_res) {
1240 PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
11fdf7f2 1241 goto err_api;
7c673cae
FG
1242 }
1243
1244 if (i40evf_get_vf_resource(dev) != 0) {
1245 PMD_INIT_LOG(ERR, "i40evf_get_vf_config failed");
1246 goto err_alloc;
1247 }
1248
1249 /* got VF config message back from PF, now we can parse it */
1250 for (i = 0; i < vf->vf_res->num_vsis; i++) {
11fdf7f2 1251 if (vf->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
7c673cae
FG
1252 vf->vsi_res = &vf->vf_res->vsi_res[i];
1253 }
1254
1255 if (!vf->vsi_res) {
1256 PMD_INIT_LOG(ERR, "no LAN VSI found");
1257 goto err_alloc;
1258 }
1259
1260 if (hw->mac.type == I40E_MAC_X722_VF)
1261 vf->flags = I40E_FLAG_RSS_AQ_CAPABLE;
1262 vf->vsi.vsi_id = vf->vsi_res->vsi_id;
11fdf7f2
TL
1263
1264 switch (vf->vsi_res->vsi_type) {
1265 case VIRTCHNL_VSI_SRIOV:
1266 vf->vsi.type = I40E_VSI_SRIOV;
1267 break;
1268 default:
1269 vf->vsi.type = I40E_VSI_TYPE_UNKNOWN;
1270 break;
1271 }
7c673cae
FG
1272 vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
1273 vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1274
1275 /* Store the MAC address configured by host, or generate random one */
11fdf7f2
TL
1276 if (is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
1277 vf->flags |= I40E_FLAG_VF_MAC_BY_PF;
7c673cae
FG
1278 else
1279 eth_random_addr(hw->mac.addr); /* Generate a random one */
1280
11fdf7f2
TL
1281 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1282 (I40E_ITR_INDEX_DEFAULT <<
1283 I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) |
1284 (interval <<
1285 I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT));
1286 I40EVF_WRITE_FLUSH(hw);
7c673cae
FG
1287
1288 return 0;
1289
1290err_alloc:
1291 rte_free(vf->vf_res);
11fdf7f2
TL
1292 vf->vsi_res = NULL;
1293err_api:
1294 rte_free(vf->aq_resp);
7c673cae
FG
1295err_aq:
1296 i40e_shutdown_adminq(hw); /* ignore error */
1297err:
1298 return -1;
1299}
1300
1301static int
1302i40evf_uninit_vf(struct rte_eth_dev *dev)
1303{
1304 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1305 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1306
1307 PMD_INIT_FUNC_TRACE();
1308
9f95a23c 1309 if (hw->adapter_closed == 0)
7c673cae
FG
1310 i40evf_dev_close(dev);
1311 rte_free(vf->vf_res);
1312 vf->vf_res = NULL;
1313 rte_free(vf->aq_resp);
1314 vf->aq_resp = NULL;
1315
1316 return 0;
1317}
1318
1319static void
11fdf7f2
TL
1320i40evf_handle_pf_event(struct rte_eth_dev *dev, uint8_t *msg,
1321 __rte_unused uint16_t msglen)
7c673cae 1322{
11fdf7f2
TL
1323 struct virtchnl_pf_event *pf_msg =
1324 (struct virtchnl_pf_event *)msg;
7c673cae
FG
1325 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1326
1327 switch (pf_msg->event) {
11fdf7f2
TL
1328 case VIRTCHNL_EVENT_RESET_IMPENDING:
1329 PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
1330 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
1331 NULL);
7c673cae 1332 break;
11fdf7f2
TL
1333 case VIRTCHNL_EVENT_LINK_CHANGE:
1334 PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
7c673cae
FG
1335 vf->link_up = pf_msg->event_data.link_event.link_status;
1336 vf->link_speed = pf_msg->event_data.link_event.link_speed;
1337 break;
11fdf7f2
TL
1338 case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
1339 PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
7c673cae
FG
1340 break;
1341 default:
1342 PMD_DRV_LOG(ERR, " unknown event received %u", pf_msg->event);
1343 break;
1344 }
1345}
1346
1347static void
1348i40evf_handle_aq_msg(struct rte_eth_dev *dev)
1349{
1350 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1351 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1352 struct i40e_arq_event_info info;
11fdf7f2
TL
1353 uint16_t pending, aq_opc;
1354 enum virtchnl_ops msg_opc;
1355 enum i40e_status_code msg_ret;
7c673cae
FG
1356 int ret;
1357
1358 info.buf_len = I40E_AQ_BUF_SZ;
1359 if (!vf->aq_resp) {
1360 PMD_DRV_LOG(ERR, "Buffer for adminq resp should not be NULL");
1361 return;
1362 }
1363 info.msg_buf = vf->aq_resp;
7c673cae
FG
1364
1365 pending = 1;
1366 while (pending) {
1367 ret = i40e_clean_arq_element(hw, &info, &pending);
1368
1369 if (ret != I40E_SUCCESS) {
1370 PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ,"
1371 "ret: %d", ret);
1372 break;
1373 }
11fdf7f2
TL
1374 aq_opc = rte_le_to_cpu_16(info.desc.opcode);
1375 /* For the message sent from pf to vf, opcode is stored in
1376 * cookie_high of struct i40e_aq_desc, while return error code
1377 * are stored in cookie_low, Which is done by
1378 * i40e_aq_send_msg_to_vf in PF driver.*/
1379 msg_opc = (enum virtchnl_ops)rte_le_to_cpu_32(
1380 info.desc.cookie_high);
1381 msg_ret = (enum i40e_status_code)rte_le_to_cpu_32(
1382 info.desc.cookie_low);
1383 switch (aq_opc) {
7c673cae 1384 case i40e_aqc_opc_send_msg_to_vf:
11fdf7f2 1385 if (msg_opc == VIRTCHNL_OP_EVENT)
7c673cae
FG
1386 /* process event*/
1387 i40evf_handle_pf_event(dev, info.msg_buf,
1388 info.msg_len);
1389 else {
1390 /* read message and it's expected one */
11fdf7f2
TL
1391 if (msg_opc == vf->pend_cmd) {
1392 vf->cmd_retval = msg_ret;
7c673cae
FG
1393 /* prevent compiler reordering */
1394 rte_compiler_barrier();
1395 _clear_cmd(vf);
1396 } else
1397 PMD_DRV_LOG(ERR, "command mismatch,"
1398 "expect %u, get %u",
11fdf7f2 1399 vf->pend_cmd, msg_opc);
7c673cae 1400 PMD_DRV_LOG(DEBUG, "adminq response is received,"
11fdf7f2 1401 " opcode = %d", msg_opc);
7c673cae
FG
1402 }
1403 break;
1404 default:
1405 PMD_DRV_LOG(ERR, "Request %u is not supported yet",
11fdf7f2 1406 aq_opc);
7c673cae
FG
1407 break;
1408 }
1409 }
1410}
1411
1412/**
1413 * Interrupt handler triggered by NIC for handling
1414 * specific interrupt. Only adminq interrupt is processed in VF.
1415 *
1416 * @param handle
1417 * Pointer to interrupt handle.
1418 * @param param
1419 * The address of parameter (struct rte_eth_dev *) regsitered before.
1420 *
1421 * @return
1422 * void
1423 */
1424static void
11fdf7f2 1425i40evf_dev_alarm_handler(void *param)
7c673cae
FG
1426{
1427 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1428 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1429 uint32_t icr0;
1430
1431 i40evf_disable_irq0(hw);
1432
1433 /* read out interrupt causes */
1434 icr0 = I40E_READ_REG(hw, I40E_VFINT_ICR01);
1435
1436 /* No interrupt event indicated */
9f95a23c 1437 if (!(icr0 & I40E_VFINT_ICR01_INTEVENT_MASK))
7c673cae 1438 goto done;
7c673cae
FG
1439
1440 if (icr0 & I40E_VFINT_ICR01_ADMINQ_MASK) {
11fdf7f2 1441 PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
7c673cae
FG
1442 i40evf_handle_aq_msg(dev);
1443 }
1444
1445 /* Link Status Change interrupt */
1446 if (icr0 & I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK)
1447 PMD_DRV_LOG(DEBUG, "LINK_STAT_CHANGE is reported,"
11fdf7f2 1448 " do nothing");
7c673cae
FG
1449
1450done:
1451 i40evf_enable_irq0(hw);
11fdf7f2
TL
1452 rte_eal_alarm_set(I40EVF_ALARM_INTERVAL,
1453 i40evf_dev_alarm_handler, dev);
7c673cae
FG
1454}
1455
1456static int
1457i40evf_dev_init(struct rte_eth_dev *eth_dev)
1458{
11fdf7f2
TL
1459 struct i40e_hw *hw
1460 = I40E_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1461 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
7c673cae
FG
1462
1463 PMD_INIT_FUNC_TRACE();
1464
1465 /* assign ops func pointer */
1466 eth_dev->dev_ops = &i40evf_eth_dev_ops;
1467 eth_dev->rx_pkt_burst = &i40e_recv_pkts;
1468 eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
1469
1470 /*
1471 * For secondary processes, we don't initialise any further as primary
1472 * has already done this work.
1473 */
1474 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1475 i40e_set_rx_function(eth_dev);
1476 i40e_set_tx_function(eth_dev);
1477 return 0;
1478 }
11fdf7f2
TL
1479 i40e_set_default_ptype_table(eth_dev);
1480 i40e_set_default_pctype_table(eth_dev);
1481 rte_eth_copy_pci_info(eth_dev, pci_dev);
1482
1483 hw->vendor_id = pci_dev->id.vendor_id;
1484 hw->device_id = pci_dev->id.device_id;
1485 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1486 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1487 hw->bus.device = pci_dev->addr.devid;
1488 hw->bus.func = pci_dev->addr.function;
1489 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
7c673cae 1490 hw->adapter_stopped = 0;
9f95a23c 1491 hw->adapter_closed = 0;
7c673cae
FG
1492
1493 if(i40evf_init_vf(eth_dev) != 0) {
1494 PMD_INIT_LOG(ERR, "Init vf failed");
1495 return -1;
1496 }
1497
11fdf7f2
TL
1498 rte_eal_alarm_set(I40EVF_ALARM_INTERVAL,
1499 i40evf_dev_alarm_handler, eth_dev);
7c673cae
FG
1500
1501 /* configure and enable device interrupt */
1502 i40evf_enable_irq0(hw);
1503
1504 /* copy mac addr */
1505 eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
1506 ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX,
1507 0);
1508 if (eth_dev->data->mac_addrs == NULL) {
1509 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
1510 " store MAC addresses",
1511 ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX);
1512 return -ENOMEM;
1513 }
1514 ether_addr_copy((struct ether_addr *)hw->mac.addr,
1515 &eth_dev->data->mac_addrs[0]);
1516
1517 return 0;
1518}
1519
1520static int
1521i40evf_dev_uninit(struct rte_eth_dev *eth_dev)
1522{
1523 PMD_INIT_FUNC_TRACE();
1524
1525 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1526 return -EPERM;
1527
1528 eth_dev->dev_ops = NULL;
1529 eth_dev->rx_pkt_burst = NULL;
1530 eth_dev->tx_pkt_burst = NULL;
1531
1532 if (i40evf_uninit_vf(eth_dev) != 0) {
1533 PMD_INIT_LOG(ERR, "i40evf_uninit_vf failed");
1534 return -1;
1535 }
1536
7c673cae
FG
1537 return 0;
1538}
11fdf7f2
TL
1539
1540static int eth_i40evf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1541 struct rte_pci_device *pci_dev)
1542{
1543 return rte_eth_dev_pci_generic_probe(pci_dev,
1544 sizeof(struct i40e_adapter), i40evf_dev_init);
1545}
1546
1547static int eth_i40evf_pci_remove(struct rte_pci_device *pci_dev)
1548{
1549 return rte_eth_dev_pci_generic_remove(pci_dev, i40evf_dev_uninit);
1550}
1551
7c673cae
FG
1552/*
1553 * virtual function driver struct
1554 */
11fdf7f2
TL
1555static struct rte_pci_driver rte_i40evf_pmd = {
1556 .id_table = pci_id_i40evf_map,
1557 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
1558 .probe = eth_i40evf_pci_probe,
1559 .remove = eth_i40evf_pci_remove,
7c673cae
FG
1560};
1561
11fdf7f2 1562RTE_PMD_REGISTER_PCI(net_i40e_vf, rte_i40evf_pmd);
7c673cae 1563RTE_PMD_REGISTER_PCI_TABLE(net_i40e_vf, pci_id_i40evf_map);
11fdf7f2 1564RTE_PMD_REGISTER_KMOD_DEP(net_i40e_vf, "* igb_uio | vfio-pci");
7c673cae
FG
1565
1566static int
1567i40evf_dev_configure(struct rte_eth_dev *dev)
1568{
9f95a23c 1569 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
7c673cae
FG
1570 struct i40e_adapter *ad =
1571 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
9f95a23c
TL
1572 uint16_t num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
1573 dev->data->nb_tx_queues);
7c673cae
FG
1574
1575 /* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1576 * allocation or vector Rx preconditions we will reset it.
1577 */
1578 ad->rx_bulk_alloc_allowed = true;
1579 ad->rx_vec_allowed = true;
1580 ad->tx_simple_allowed = true;
1581 ad->tx_vec_allowed = true;
1582
9f95a23c
TL
1583 if (num_queue_pairs > vf->vsi_res->num_queue_pairs) {
1584 int ret = 0;
1585
1586 PMD_DRV_LOG(INFO, "change queue pairs from %u to %u",
1587 vf->vsi_res->num_queue_pairs, num_queue_pairs);
1588 ret = i40evf_request_queues(dev, num_queue_pairs);
1589 if (ret != 0)
1590 return ret;
1591
1592 ret = i40evf_dev_reset(dev);
1593 if (ret != 0)
1594 return ret;
7c673cae
FG
1595 }
1596
1597 return i40evf_init_vlan(dev);
1598}
1599
1600static int
1601i40evf_init_vlan(struct rte_eth_dev *dev)
1602{
7c673cae
FG
1603 /* Apply vlan offload setting */
1604 i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
1605
11fdf7f2 1606 return 0;
7c673cae
FG
1607}
1608
11fdf7f2 1609static int
7c673cae
FG
1610i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1611{
7c673cae
FG
1612 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1613 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1614
11fdf7f2
TL
1615 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
1616 return -ENOTSUP;
7c673cae 1617
11fdf7f2
TL
1618 /* Vlan stripping setting */
1619 if (mask & ETH_VLAN_STRIP_MASK) {
1620 /* Enable or disable VLAN stripping */
1621 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1622 i40evf_enable_vlan_strip(dev);
1623 else
1624 i40evf_disable_vlan_strip(dev);
7c673cae
FG
1625 }
1626
1627 return 0;
1628}
1629
1630static int
1631i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1632{
1633 struct i40e_rx_queue *rxq;
11fdf7f2 1634 int err;
7c673cae
FG
1635 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1636
1637 PMD_INIT_FUNC_TRACE();
1638
11fdf7f2 1639 rxq = dev->data->rx_queues[rx_queue_id];
7c673cae 1640
11fdf7f2
TL
1641 err = i40e_alloc_rx_queue_mbufs(rxq);
1642 if (err) {
1643 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
1644 return err;
1645 }
7c673cae 1646
11fdf7f2 1647 rte_wmb();
7c673cae 1648
11fdf7f2
TL
1649 /* Init the RX tail register. */
1650 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1651 I40EVF_WRITE_FLUSH(hw);
7c673cae 1652
11fdf7f2
TL
1653 /* Ready to switch the queue on */
1654 err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
1655 if (err) {
1656 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
1657 rx_queue_id);
1658 return err;
7c673cae 1659 }
11fdf7f2 1660 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
7c673cae 1661
11fdf7f2 1662 return 0;
7c673cae
FG
1663}
1664
1665static int
1666i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1667{
1668 struct i40e_rx_queue *rxq;
1669 int err;
1670
11fdf7f2 1671 rxq = dev->data->rx_queues[rx_queue_id];
7c673cae 1672
11fdf7f2
TL
1673 err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
1674 if (err) {
1675 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
1676 rx_queue_id);
1677 return err;
7c673cae
FG
1678 }
1679
11fdf7f2
TL
1680 i40e_rx_queue_release_mbufs(rxq);
1681 i40e_reset_rx_queue(rxq);
1682 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1683
7c673cae
FG
1684 return 0;
1685}
1686
1687static int
1688i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1689{
11fdf7f2 1690 int err;
7c673cae
FG
1691
1692 PMD_INIT_FUNC_TRACE();
1693
11fdf7f2
TL
1694 /* Ready to switch the queue on */
1695 err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
1696 if (err) {
1697 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
1698 tx_queue_id);
1699 return err;
7c673cae 1700 }
11fdf7f2 1701 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
7c673cae 1702
11fdf7f2 1703 return 0;
7c673cae
FG
1704}
1705
1706static int
1707i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1708{
1709 struct i40e_tx_queue *txq;
1710 int err;
1711
11fdf7f2 1712 txq = dev->data->tx_queues[tx_queue_id];
7c673cae 1713
11fdf7f2
TL
1714 err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
1715 if (err) {
1716 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
1717 tx_queue_id);
1718 return err;
7c673cae
FG
1719 }
1720
11fdf7f2
TL
1721 i40e_tx_queue_release_mbufs(txq);
1722 i40e_reset_tx_queue(txq);
1723 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1724
7c673cae
FG
1725 return 0;
1726}
1727
1728static int
1729i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1730{
1731 int ret;
1732
1733 if (on)
1734 ret = i40evf_add_vlan(dev, vlan_id);
1735 else
1736 ret = i40evf_del_vlan(dev,vlan_id);
1737
1738 return ret;
1739}
1740
1741static int
1742i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
1743{
1744 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1745 struct rte_eth_dev_data *dev_data = dev->data;
1746 struct rte_pktmbuf_pool_private *mbp_priv;
1747 uint16_t buf_size, len;
1748
1749 rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL1(rxq->queue_id);
1750 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1751 I40EVF_WRITE_FLUSH(hw);
1752
1753 /* Calculate the maximum packet length allowed */
1754 mbp_priv = rte_mempool_get_priv(rxq->mp);
1755 buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
1756 RTE_PKTMBUF_HEADROOM);
1757 rxq->hs_mode = i40e_header_split_none;
1758 rxq->rx_hdr_len = 0;
1759 rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
1760 len = rxq->rx_buf_len * I40E_MAX_CHAINED_RX_BUFFERS;
1761 rxq->max_pkt_len = RTE_MIN(len,
1762 dev_data->dev_conf.rxmode.max_rx_pkt_len);
1763
1764 /**
1765 * Check if the jumbo frame and maximum packet length are set correctly
1766 */
11fdf7f2 1767 if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
7c673cae
FG
1768 if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
1769 rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
1770 PMD_DRV_LOG(ERR, "maximum packet length must be "
1771 "larger than %u and smaller than %u, as jumbo "
1772 "frame is enabled", (uint32_t)ETHER_MAX_LEN,
1773 (uint32_t)I40E_FRAME_SIZE_MAX);
1774 return I40E_ERR_CONFIG;
1775 }
1776 } else {
1777 if (rxq->max_pkt_len < ETHER_MIN_LEN ||
1778 rxq->max_pkt_len > ETHER_MAX_LEN) {
1779 PMD_DRV_LOG(ERR, "maximum packet length must be "
1780 "larger than %u and smaller than %u, as jumbo "
1781 "frame is disabled", (uint32_t)ETHER_MIN_LEN,
1782 (uint32_t)ETHER_MAX_LEN);
1783 return I40E_ERR_CONFIG;
1784 }
1785 }
1786
11fdf7f2 1787 if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
9f95a23c 1788 rxq->max_pkt_len > buf_size)
7c673cae 1789 dev_data->scattered_rx = 1;
7c673cae
FG
1790
1791 return 0;
1792}
1793
1794static int
1795i40evf_rx_init(struct rte_eth_dev *dev)
1796{
1797 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1798 uint16_t i;
1799 int ret = I40E_SUCCESS;
1800 struct i40e_rx_queue **rxq =
1801 (struct i40e_rx_queue **)dev->data->rx_queues;
1802
1803 i40evf_config_rss(vf);
1804 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1805 if (!rxq[i] || !rxq[i]->q_set)
1806 continue;
1807 ret = i40evf_rxq_init(dev, rxq[i]);
1808 if (ret != I40E_SUCCESS)
1809 break;
1810 }
1811 if (ret == I40E_SUCCESS)
1812 i40e_set_rx_function(dev);
1813
1814 return ret;
1815}
1816
1817static void
1818i40evf_tx_init(struct rte_eth_dev *dev)
1819{
1820 uint16_t i;
1821 struct i40e_tx_queue **txq =
1822 (struct i40e_tx_queue **)dev->data->tx_queues;
1823 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1824
1825 for (i = 0; i < dev->data->nb_tx_queues; i++)
1826 txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i);
1827
1828 i40e_set_tx_function(dev);
1829}
1830
1831static inline void
1832i40evf_enable_queues_intr(struct rte_eth_dev *dev)
1833{
7c673cae 1834 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11fdf7f2
TL
1835 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1836 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
7c673cae
FG
1837
1838 if (!rte_intr_allow_others(intr_handle)) {
1839 I40E_WRITE_REG(hw,
1840 I40E_VFINT_DYN_CTL01,
1841 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1842 I40E_VFINT_DYN_CTL01_CLEARPBA_MASK |
1843 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1844 I40EVF_WRITE_FLUSH(hw);
1845 return;
1846 }
1847
7c673cae
FG
1848 I40EVF_WRITE_FLUSH(hw);
1849}
1850
1851static inline void
1852i40evf_disable_queues_intr(struct rte_eth_dev *dev)
1853{
7c673cae 1854 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11fdf7f2
TL
1855 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1856 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
7c673cae
FG
1857
1858 if (!rte_intr_allow_others(intr_handle)) {
1859 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1860 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1861 I40EVF_WRITE_FLUSH(hw);
1862 return;
1863 }
1864
7c673cae
FG
1865 I40EVF_WRITE_FLUSH(hw);
1866}
1867
1868static int
1869i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1870{
11fdf7f2
TL
1871 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1872 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
7c673cae
FG
1873 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1874 uint16_t interval =
11fdf7f2 1875 i40e_calc_itr_interval(0, 0);
7c673cae
FG
1876 uint16_t msix_intr;
1877
1878 msix_intr = intr_handle->intr_vec[queue_id];
1879 if (msix_intr == I40E_MISC_VEC_ID)
1880 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1881 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1882 I40E_VFINT_DYN_CTL01_CLEARPBA_MASK |
1883 (0 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
1884 (interval <<
1885 I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT));
1886 else
1887 I40E_WRITE_REG(hw,
1888 I40E_VFINT_DYN_CTLN1(msix_intr -
1889 I40E_RX_VEC_START),
1890 I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1891 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
1892 (0 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
1893 (interval <<
1894 I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
1895
1896 I40EVF_WRITE_FLUSH(hw);
1897
7c673cae
FG
1898 return 0;
1899}
1900
1901static int
1902i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1903{
11fdf7f2
TL
1904 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1905 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
7c673cae
FG
1906 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1907 uint16_t msix_intr;
1908
1909 msix_intr = intr_handle->intr_vec[queue_id];
1910 if (msix_intr == I40E_MISC_VEC_ID)
1911 I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0);
1912 else
1913 I40E_WRITE_REG(hw,
1914 I40E_VFINT_DYN_CTLN1(msix_intr -
1915 I40E_RX_VEC_START),
1916 0);
1917
1918 I40EVF_WRITE_FLUSH(hw);
1919
1920 return 0;
1921}
1922
1923static void
1924i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add)
1925{
11fdf7f2 1926 struct virtchnl_ether_addr_list *list;
7c673cae
FG
1927 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1928 int err, i, j;
1929 int next_begin = 0;
1930 int begin = 0;
1931 uint32_t len;
1932 struct ether_addr *addr;
1933 struct vf_cmd_info args;
1934
1935 do {
1936 j = 0;
11fdf7f2 1937 len = sizeof(struct virtchnl_ether_addr_list);
7c673cae
FG
1938 for (i = begin; i < I40E_NUM_MACADDR_MAX; i++, next_begin++) {
1939 if (is_zero_ether_addr(&dev->data->mac_addrs[i]))
1940 continue;
11fdf7f2 1941 len += sizeof(struct virtchnl_ether_addr);
7c673cae
FG
1942 if (len >= I40E_AQ_BUF_SZ) {
1943 next_begin = i + 1;
1944 break;
1945 }
1946 }
1947
1948 list = rte_zmalloc("i40evf_del_mac_buffer", len, 0);
11fdf7f2
TL
1949 if (!list) {
1950 PMD_DRV_LOG(ERR, "fail to allocate memory");
1951 return;
1952 }
7c673cae
FG
1953
1954 for (i = begin; i < next_begin; i++) {
1955 addr = &dev->data->mac_addrs[i];
1956 if (is_zero_ether_addr(addr))
1957 continue;
11fdf7f2 1958 rte_memcpy(list->list[j].addr, addr->addr_bytes,
7c673cae
FG
1959 sizeof(addr->addr_bytes));
1960 PMD_DRV_LOG(DEBUG, "add/rm mac:%x:%x:%x:%x:%x:%x",
1961 addr->addr_bytes[0], addr->addr_bytes[1],
1962 addr->addr_bytes[2], addr->addr_bytes[3],
1963 addr->addr_bytes[4], addr->addr_bytes[5]);
1964 j++;
1965 }
1966 list->vsi_id = vf->vsi_res->vsi_id;
1967 list->num_elements = j;
11fdf7f2
TL
1968 args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
1969 VIRTCHNL_OP_DEL_ETH_ADDR;
7c673cae
FG
1970 args.in_args = (uint8_t *)list;
1971 args.in_args_size = len;
1972 args.out_buffer = vf->aq_resp;
1973 args.out_size = I40E_AQ_BUF_SZ;
1974 err = i40evf_execute_vf_cmd(dev, &args);
11fdf7f2 1975 if (err) {
7c673cae
FG
1976 PMD_DRV_LOG(ERR, "fail to execute command %s",
1977 add ? "OP_ADD_ETHER_ADDRESS" :
1978 "OP_DEL_ETHER_ADDRESS");
11fdf7f2
TL
1979 } else {
1980 if (add)
1981 vf->vsi.mac_num++;
1982 else
1983 vf->vsi.mac_num--;
1984 }
7c673cae
FG
1985 rte_free(list);
1986 begin = next_begin;
1987 } while (begin < I40E_NUM_MACADDR_MAX);
1988}
1989
1990static int
1991i40evf_dev_start(struct rte_eth_dev *dev)
1992{
1993 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1994 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11fdf7f2
TL
1995 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1996 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
7c673cae
FG
1997 uint32_t intr_vector = 0;
1998
1999 PMD_INIT_FUNC_TRACE();
2000
2001 hw->adapter_stopped = 0;
2002
2003 vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
2004 vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
2005 dev->data->nb_tx_queues);
2006
2007 /* check and configure queue intr-vector mapping */
11fdf7f2
TL
2008 if (rte_intr_cap_multiple(intr_handle) &&
2009 dev->data->dev_conf.intr_conf.rxq) {
7c673cae
FG
2010 intr_vector = dev->data->nb_rx_queues;
2011 if (rte_intr_efd_enable(intr_handle, intr_vector))
2012 return -1;
2013 }
2014
2015 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2016 intr_handle->intr_vec =
2017 rte_zmalloc("intr_vec",
2018 dev->data->nb_rx_queues * sizeof(int), 0);
2019 if (!intr_handle->intr_vec) {
2020 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
11fdf7f2 2021 " intr_vec", dev->data->nb_rx_queues);
7c673cae
FG
2022 return -ENOMEM;
2023 }
2024 }
2025
2026 if (i40evf_rx_init(dev) != 0){
2027 PMD_DRV_LOG(ERR, "failed to do RX init");
2028 return -1;
2029 }
2030
2031 i40evf_tx_init(dev);
2032
11fdf7f2 2033 if (i40evf_configure_vsi_queues(dev) != 0) {
7c673cae
FG
2034 PMD_DRV_LOG(ERR, "configure queues failed");
2035 goto err_queue;
2036 }
2037 if (i40evf_config_irq_map(dev)) {
2038 PMD_DRV_LOG(ERR, "config_irq_map failed");
2039 goto err_queue;
2040 }
2041
2042 /* Set all mac addrs */
2043 i40evf_add_del_all_mac_addr(dev, TRUE);
11fdf7f2
TL
2044 /* Set all multicast addresses */
2045 i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
2046 TRUE);
7c673cae
FG
2047
2048 if (i40evf_start_queues(dev) != 0) {
2049 PMD_DRV_LOG(ERR, "enable queues failed");
2050 goto err_mac;
2051 }
2052
11fdf7f2
TL
2053 /* only enable interrupt in rx interrupt mode */
2054 if (dev->data->dev_conf.intr_conf.rxq != 0)
2055 rte_intr_enable(intr_handle);
2056
7c673cae 2057 i40evf_enable_queues_intr(dev);
11fdf7f2 2058
7c673cae
FG
2059 return 0;
2060
2061err_mac:
2062 i40evf_add_del_all_mac_addr(dev, FALSE);
11fdf7f2
TL
2063 i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
2064 FALSE);
7c673cae
FG
2065err_queue:
2066 return -1;
2067}
2068
2069static void
2070i40evf_dev_stop(struct rte_eth_dev *dev)
2071{
11fdf7f2
TL
2072 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2073 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2074 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2075 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
7c673cae
FG
2076
2077 PMD_INIT_FUNC_TRACE();
2078
11fdf7f2
TL
2079 if (dev->data->dev_conf.intr_conf.rxq != 0)
2080 rte_intr_disable(intr_handle);
2081
2082 if (hw->adapter_stopped == 1)
2083 return;
7c673cae
FG
2084 i40evf_stop_queues(dev);
2085 i40evf_disable_queues_intr(dev);
2086 i40e_dev_clear_queues(dev);
2087
2088 /* Clean datapath event and queue/vec mapping */
2089 rte_intr_efd_disable(intr_handle);
2090 if (intr_handle->intr_vec) {
2091 rte_free(intr_handle->intr_vec);
2092 intr_handle->intr_vec = NULL;
2093 }
2094 /* remove all mac addrs */
2095 i40evf_add_del_all_mac_addr(dev, FALSE);
11fdf7f2
TL
2096 /* remove all multicast addresses */
2097 i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
2098 FALSE);
2099 hw->adapter_stopped = 1;
7c673cae
FG
2100
2101}
2102
2103static int
2104i40evf_dev_link_update(struct rte_eth_dev *dev,
2105 __rte_unused int wait_to_complete)
2106{
2107 struct rte_eth_link new_link;
2108 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2109 /*
2110 * DPDK pf host provide interfacet to acquire link status
2111 * while Linux driver does not
2112 */
2113
11fdf7f2 2114 memset(&new_link, 0, sizeof(new_link));
7c673cae
FG
2115 /* Linux driver PF host */
2116 switch (vf->link_speed) {
2117 case I40E_LINK_SPEED_100MB:
2118 new_link.link_speed = ETH_SPEED_NUM_100M;
2119 break;
2120 case I40E_LINK_SPEED_1GB:
2121 new_link.link_speed = ETH_SPEED_NUM_1G;
2122 break;
2123 case I40E_LINK_SPEED_10GB:
2124 new_link.link_speed = ETH_SPEED_NUM_10G;
2125 break;
2126 case I40E_LINK_SPEED_20GB:
2127 new_link.link_speed = ETH_SPEED_NUM_20G;
2128 break;
11fdf7f2
TL
2129 case I40E_LINK_SPEED_25GB:
2130 new_link.link_speed = ETH_SPEED_NUM_25G;
2131 break;
7c673cae
FG
2132 case I40E_LINK_SPEED_40GB:
2133 new_link.link_speed = ETH_SPEED_NUM_40G;
2134 break;
2135 default:
2136 new_link.link_speed = ETH_SPEED_NUM_100M;
2137 break;
2138 }
2139 /* full duplex only */
2140 new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
2141 new_link.link_status = vf->link_up ? ETH_LINK_UP :
2142 ETH_LINK_DOWN;
11fdf7f2
TL
2143 new_link.link_autoneg =
2144 !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
7c673cae 2145
11fdf7f2 2146 return rte_eth_linkstatus_set(dev, &new_link);
7c673cae
FG
2147}
2148
2149static void
2150i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev)
2151{
2152 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2153 int ret;
2154
2155 /* If enabled, just return */
2156 if (vf->promisc_unicast_enabled)
2157 return;
2158
2159 ret = i40evf_config_promisc(dev, 1, vf->promisc_multicast_enabled);
2160 if (ret == 0)
2161 vf->promisc_unicast_enabled = TRUE;
2162}
2163
2164static void
2165i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev)
2166{
2167 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2168 int ret;
2169
2170 /* If disabled, just return */
2171 if (!vf->promisc_unicast_enabled)
2172 return;
2173
2174 ret = i40evf_config_promisc(dev, 0, vf->promisc_multicast_enabled);
2175 if (ret == 0)
2176 vf->promisc_unicast_enabled = FALSE;
2177}
2178
2179static void
2180i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev)
2181{
2182 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2183 int ret;
2184
2185 /* If enabled, just return */
2186 if (vf->promisc_multicast_enabled)
2187 return;
2188
2189 ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 1);
2190 if (ret == 0)
2191 vf->promisc_multicast_enabled = TRUE;
2192}
2193
2194static void
2195i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev)
2196{
2197 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2198 int ret;
2199
2200 /* If enabled, just return */
2201 if (!vf->promisc_multicast_enabled)
2202 return;
2203
2204 ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 0);
2205 if (ret == 0)
2206 vf->promisc_multicast_enabled = FALSE;
2207}
2208
2209static void
2210i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2211{
2212 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2213
9f95a23c
TL
2214 dev_info->max_rx_queues = I40E_MAX_QP_NUM_PER_VF;
2215 dev_info->max_tx_queues = I40E_MAX_QP_NUM_PER_VF;
7c673cae
FG
2216 dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
2217 dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
9f95a23c
TL
2218 dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
2219 dev_info->min_mtu = ETHER_MIN_MTU;
7c673cae
FG
2220 dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2221 dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
11fdf7f2 2222 dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
7c673cae 2223 dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
11fdf7f2 2224 dev_info->rx_queue_offload_capa = 0;
7c673cae
FG
2225 dev_info->rx_offload_capa =
2226 DEV_RX_OFFLOAD_VLAN_STRIP |
2227 DEV_RX_OFFLOAD_QINQ_STRIP |
2228 DEV_RX_OFFLOAD_IPV4_CKSUM |
2229 DEV_RX_OFFLOAD_UDP_CKSUM |
11fdf7f2
TL
2230 DEV_RX_OFFLOAD_TCP_CKSUM |
2231 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
11fdf7f2
TL
2232 DEV_RX_OFFLOAD_SCATTER |
2233 DEV_RX_OFFLOAD_JUMBO_FRAME |
2234 DEV_RX_OFFLOAD_VLAN_FILTER;
2235
2236 dev_info->tx_queue_offload_capa = 0;
7c673cae
FG
2237 dev_info->tx_offload_capa =
2238 DEV_TX_OFFLOAD_VLAN_INSERT |
2239 DEV_TX_OFFLOAD_QINQ_INSERT |
2240 DEV_TX_OFFLOAD_IPV4_CKSUM |
2241 DEV_TX_OFFLOAD_UDP_CKSUM |
2242 DEV_TX_OFFLOAD_TCP_CKSUM |
11fdf7f2
TL
2243 DEV_TX_OFFLOAD_SCTP_CKSUM |
2244 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
2245 DEV_TX_OFFLOAD_TCP_TSO |
2246 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
2247 DEV_TX_OFFLOAD_GRE_TNL_TSO |
2248 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
2249 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
2250 DEV_TX_OFFLOAD_MULTI_SEGS;
7c673cae
FG
2251
2252 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2253 .rx_thresh = {
2254 .pthresh = I40E_DEFAULT_RX_PTHRESH,
2255 .hthresh = I40E_DEFAULT_RX_HTHRESH,
2256 .wthresh = I40E_DEFAULT_RX_WTHRESH,
2257 },
2258 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
2259 .rx_drop_en = 0,
11fdf7f2 2260 .offloads = 0,
7c673cae
FG
2261 };
2262
2263 dev_info->default_txconf = (struct rte_eth_txconf) {
2264 .tx_thresh = {
2265 .pthresh = I40E_DEFAULT_TX_PTHRESH,
2266 .hthresh = I40E_DEFAULT_TX_HTHRESH,
2267 .wthresh = I40E_DEFAULT_TX_WTHRESH,
2268 },
2269 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
2270 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
11fdf7f2 2271 .offloads = 0,
7c673cae
FG
2272 };
2273
2274 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
2275 .nb_max = I40E_MAX_RING_DESC,
2276 .nb_min = I40E_MIN_RING_DESC,
2277 .nb_align = I40E_ALIGN_RING_DESC,
2278 };
2279
2280 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
2281 .nb_max = I40E_MAX_RING_DESC,
2282 .nb_min = I40E_MIN_RING_DESC,
2283 .nb_align = I40E_ALIGN_RING_DESC,
2284 };
2285}
2286
11fdf7f2 2287static int
7c673cae
FG
2288i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2289{
11fdf7f2
TL
2290 int ret;
2291 struct i40e_eth_stats *pstats = NULL;
2292 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2293 struct i40e_vsi *vsi = &vf->vsi;
2294
2295 ret = i40evf_query_stats(dev, &pstats);
2296 if (ret == 0) {
2297 i40evf_update_stats(vsi, pstats);
2298
2299 stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
2300 pstats->rx_broadcast;
2301 stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
2302 pstats->tx_unicast;
2303 stats->imissed = pstats->rx_discards;
2304 stats->oerrors = pstats->tx_errors + pstats->tx_discards;
2305 stats->ibytes = pstats->rx_bytes;
2306 stats->obytes = pstats->tx_bytes;
2307 } else {
2308 PMD_DRV_LOG(ERR, "Get statistics failed");
2309 }
2310 return ret;
7c673cae
FG
2311}
2312
2313static void
2314i40evf_dev_close(struct rte_eth_dev *dev)
2315{
2316 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7c673cae
FG
2317
2318 i40evf_dev_stop(dev);
7c673cae 2319 i40e_dev_free_queues(dev);
11fdf7f2
TL
2320 /*
2321 * disable promiscuous mode before reset vf
2322 * it is a workaround solution when work with kernel driver
2323 * and it is not the normal way
2324 */
2325 i40evf_dev_promiscuous_disable(dev);
2326 i40evf_dev_allmulticast_disable(dev);
9f95a23c 2327 rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev);
11fdf7f2 2328
9f95a23c 2329 i40evf_reset_vf(dev);
7c673cae 2330 i40e_shutdown_adminq(hw);
7c673cae 2331 i40evf_disable_irq0(hw);
9f95a23c 2332 hw->adapter_closed = 1;
7c673cae
FG
2333}
2334
11fdf7f2
TL
2335/*
2336 * Reset VF device only to re-initialize resources in PMD layer
2337 */
2338static int
2339i40evf_dev_reset(struct rte_eth_dev *dev)
2340{
2341 int ret;
2342
2343 ret = i40evf_dev_uninit(dev);
2344 if (ret)
2345 return ret;
2346
2347 ret = i40evf_dev_init(dev);
2348
2349 return ret;
2350}
2351
7c673cae
FG
2352static int
2353i40evf_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2354{
2355 struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2356 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2357 int ret;
2358
2359 if (!lut)
2360 return -EINVAL;
2361
2362 if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2363 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, FALSE,
2364 lut, lut_size);
2365 if (ret) {
2366 PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
2367 return ret;
2368 }
2369 } else {
2370 uint32_t *lut_dw = (uint32_t *)lut;
2371 uint16_t i, lut_size_dw = lut_size / 4;
2372
2373 for (i = 0; i < lut_size_dw; i++)
2374 lut_dw[i] = I40E_READ_REG(hw, I40E_VFQF_HLUT(i));
2375 }
2376
2377 return 0;
2378}
2379
2380static int
2381i40evf_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2382{
2383 struct i40e_vf *vf;
2384 struct i40e_hw *hw;
2385 int ret;
2386
2387 if (!vsi || !lut)
2388 return -EINVAL;
2389
2390 vf = I40E_VSI_TO_VF(vsi);
2391 hw = I40E_VSI_TO_HW(vsi);
2392
2393 if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2394 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, FALSE,
2395 lut, lut_size);
2396 if (ret) {
2397 PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
2398 return ret;
2399 }
2400 } else {
2401 uint32_t *lut_dw = (uint32_t *)lut;
2402 uint16_t i, lut_size_dw = lut_size / 4;
2403
2404 for (i = 0; i < lut_size_dw; i++)
2405 I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i), lut_dw[i]);
2406 I40EVF_WRITE_FLUSH(hw);
2407 }
2408
2409 return 0;
2410}
2411
2412static int
2413i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
2414 struct rte_eth_rss_reta_entry64 *reta_conf,
2415 uint16_t reta_size)
2416{
2417 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2418 uint8_t *lut;
2419 uint16_t i, idx, shift;
2420 int ret;
2421
2422 if (reta_size != ETH_RSS_RETA_SIZE_64) {
2423 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2424 "(%d) doesn't match the number of hardware can "
11fdf7f2 2425 "support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
7c673cae
FG
2426 return -EINVAL;
2427 }
2428
2429 lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
2430 if (!lut) {
2431 PMD_DRV_LOG(ERR, "No memory can be allocated");
2432 return -ENOMEM;
2433 }
2434 ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size);
2435 if (ret)
2436 goto out;
2437 for (i = 0; i < reta_size; i++) {
2438 idx = i / RTE_RETA_GROUP_SIZE;
2439 shift = i % RTE_RETA_GROUP_SIZE;
2440 if (reta_conf[idx].mask & (1ULL << shift))
2441 lut[i] = reta_conf[idx].reta[shift];
2442 }
2443 ret = i40evf_set_rss_lut(&vf->vsi, lut, reta_size);
2444
2445out:
2446 rte_free(lut);
2447
2448 return ret;
2449}
2450
2451static int
2452i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
2453 struct rte_eth_rss_reta_entry64 *reta_conf,
2454 uint16_t reta_size)
2455{
2456 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2457 uint16_t i, idx, shift;
2458 uint8_t *lut;
2459 int ret;
2460
2461 if (reta_size != ETH_RSS_RETA_SIZE_64) {
2462 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2463 "(%d) doesn't match the number of hardware can "
11fdf7f2 2464 "support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
7c673cae
FG
2465 return -EINVAL;
2466 }
2467
2468 lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
2469 if (!lut) {
2470 PMD_DRV_LOG(ERR, "No memory can be allocated");
2471 return -ENOMEM;
2472 }
2473
2474 ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size);
2475 if (ret)
2476 goto out;
2477 for (i = 0; i < reta_size; i++) {
2478 idx = i / RTE_RETA_GROUP_SIZE;
2479 shift = i % RTE_RETA_GROUP_SIZE;
2480 if (reta_conf[idx].mask & (1ULL << shift))
2481 reta_conf[idx].reta[shift] = lut[i];
2482 }
2483
2484out:
2485 rte_free(lut);
2486
2487 return ret;
2488}
2489
2490static int
2491i40evf_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
2492{
2493 struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2494 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2495 int ret = 0;
2496
2497 if (!key || key_len == 0) {
2498 PMD_DRV_LOG(DEBUG, "No key to be configured");
2499 return 0;
2500 } else if (key_len != (I40E_VFQF_HKEY_MAX_INDEX + 1) *
2501 sizeof(uint32_t)) {
2502 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
2503 return -EINVAL;
2504 }
2505
2506 if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2507 struct i40e_aqc_get_set_rss_key_data *key_dw =
2508 (struct i40e_aqc_get_set_rss_key_data *)key;
2509
2510 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
2511 if (ret)
2512 PMD_INIT_LOG(ERR, "Failed to configure RSS key "
2513 "via AQ");
2514 } else {
2515 uint32_t *hash_key = (uint32_t *)key;
2516 uint16_t i;
2517
2518 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2519 i40e_write_rx_ctl(hw, I40E_VFQF_HKEY(i), hash_key[i]);
2520 I40EVF_WRITE_FLUSH(hw);
2521 }
2522
2523 return ret;
2524}
2525
2526static int
2527i40evf_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
2528{
2529 struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2530 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2531 int ret;
2532
2533 if (!key || !key_len)
2534 return -EINVAL;
2535
2536 if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2537 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
2538 (struct i40e_aqc_get_set_rss_key_data *)key);
2539 if (ret) {
2540 PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
2541 return ret;
2542 }
2543 } else {
2544 uint32_t *key_dw = (uint32_t *)key;
2545 uint16_t i;
2546
2547 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2548 key_dw[i] = i40e_read_rx_ctl(hw, I40E_VFQF_HKEY(i));
2549 }
2550 *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2551
2552 return 0;
2553}
2554
2555static int
2556i40evf_hw_rss_hash_set(struct i40e_vf *vf, struct rte_eth_rss_conf *rss_conf)
2557{
2558 struct i40e_hw *hw = I40E_VF_TO_HW(vf);
11fdf7f2 2559 uint64_t hena;
7c673cae
FG
2560 int ret;
2561
2562 ret = i40evf_set_rss_key(&vf->vsi, rss_conf->rss_key,
2563 rss_conf->rss_key_len);
2564 if (ret)
2565 return ret;
2566
11fdf7f2 2567 hena = i40e_config_hena(vf->adapter, rss_conf->rss_hf);
7c673cae
FG
2568 i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
2569 i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
2570 I40EVF_WRITE_FLUSH(hw);
2571
2572 return 0;
2573}
2574
2575static void
2576i40evf_disable_rss(struct i40e_vf *vf)
2577{
2578 struct i40e_hw *hw = I40E_VF_TO_HW(vf);
7c673cae 2579
11fdf7f2
TL
2580 i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), 0);
2581 i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), 0);
7c673cae
FG
2582 I40EVF_WRITE_FLUSH(hw);
2583}
2584
2585static int
2586i40evf_config_rss(struct i40e_vf *vf)
2587{
2588 struct i40e_hw *hw = I40E_VF_TO_HW(vf);
2589 struct rte_eth_rss_conf rss_conf;
2590 uint32_t i, j, lut = 0, nb_q = (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
2591 uint16_t num;
2592
2593 if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
2594 i40evf_disable_rss(vf);
11fdf7f2 2595 PMD_DRV_LOG(DEBUG, "RSS not configured");
7c673cae
FG
2596 return 0;
2597 }
2598
2599 num = RTE_MIN(vf->dev_data->nb_rx_queues, I40E_MAX_QP_NUM_PER_VF);
2600 /* Fill out the look up table */
2601 for (i = 0, j = 0; i < nb_q; i++, j++) {
2602 if (j >= num)
2603 j = 0;
2604 lut = (lut << 8) | j;
2605 if ((i & 3) == 3)
2606 I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut);
2607 }
2608
2609 rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf;
11fdf7f2 2610 if ((rss_conf.rss_hf & vf->adapter->flow_types_mask) == 0) {
7c673cae 2611 i40evf_disable_rss(vf);
11fdf7f2 2612 PMD_DRV_LOG(DEBUG, "No hash flag is set");
7c673cae
FG
2613 return 0;
2614 }
2615
2616 if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
2617 (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
2618 /* Calculate the default hash key */
2619 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2620 rss_key_default[i] = (uint32_t)rte_rand();
2621 rss_conf.rss_key = (uint8_t *)rss_key_default;
2622 rss_conf.rss_key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
2623 sizeof(uint32_t);
2624 }
2625
2626 return i40evf_hw_rss_hash_set(vf, &rss_conf);
2627}
2628
2629static int
2630i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
2631 struct rte_eth_rss_conf *rss_conf)
2632{
2633 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2634 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11fdf7f2 2635 uint64_t rss_hf = rss_conf->rss_hf & vf->adapter->flow_types_mask;
7c673cae
FG
2636 uint64_t hena;
2637
2638 hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
2639 hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
11fdf7f2
TL
2640
2641 if (!(hena & vf->adapter->pctypes_mask)) { /* RSS disabled */
7c673cae
FG
2642 if (rss_hf != 0) /* Enable RSS */
2643 return -EINVAL;
2644 return 0;
2645 }
2646
2647 /* RSS enabled */
2648 if (rss_hf == 0) /* Disable RSS */
2649 return -EINVAL;
2650
2651 return i40evf_hw_rss_hash_set(vf, rss_conf);
2652}
2653
2654static int
2655i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2656 struct rte_eth_rss_conf *rss_conf)
2657{
2658 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2659 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2660 uint64_t hena;
2661
2662 i40evf_get_rss_key(&vf->vsi, rss_conf->rss_key,
2663 &rss_conf->rss_key_len);
2664
2665 hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
2666 hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
11fdf7f2
TL
2667 rss_conf->rss_hf = i40e_parse_hena(vf->adapter, hena);
2668
2669 return 0;
2670}
2671
2672static int
2673i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2674{
2675 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2676 struct rte_eth_dev_data *dev_data = vf->dev_data;
2677 uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
2678 int ret = 0;
2679
2680 /* check if mtu is within the allowed range */
2681 if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
2682 return -EINVAL;
2683
2684 /* mtu setting is forbidden if port is start */
2685 if (dev_data->dev_started) {
2686 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
2687 dev_data->port_id);
2688 return -EBUSY;
2689 }
2690
2691 if (frame_size > ETHER_MAX_LEN)
2692 dev_data->dev_conf.rxmode.offloads |=
2693 DEV_RX_OFFLOAD_JUMBO_FRAME;
2694 else
2695 dev_data->dev_conf.rxmode.offloads &=
2696 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2697 dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
2698
2699 return ret;
2700}
2701
2702static int
2703i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
2704 struct ether_addr *mac_addr)
2705{
2706 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2707 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2708
2709 if (!is_valid_assigned_ether_addr(mac_addr)) {
2710 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
2711 return -EINVAL;
2712 }
2713
2714 if (vf->flags & I40E_FLAG_VF_MAC_BY_PF)
2715 return -EPERM;
2716
2717 i40evf_del_mac_addr_by_addr(dev, (struct ether_addr *)hw->mac.addr);
2718
2719 if (i40evf_add_mac_addr(dev, mac_addr, 0, 0) != 0)
2720 return -EIO;
2721
2722 ether_addr_copy(mac_addr, (struct ether_addr *)hw->mac.addr);
2723 return 0;
2724}
2725
2726static int
2727i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev,
2728 struct ether_addr *mc_addrs,
2729 uint32_t mc_addrs_num, bool add)
2730{
2731 struct virtchnl_ether_addr_list *list;
2732 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2733 uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) +
2734 (I40E_NUM_MACADDR_MAX * sizeof(struct virtchnl_ether_addr))];
2735 uint32_t i;
2736 int err;
2737 struct vf_cmd_info args;
2738
2739 if (mc_addrs == NULL || mc_addrs_num == 0)
2740 return 0;
2741
2742 if (mc_addrs_num > I40E_NUM_MACADDR_MAX)
2743 return -EINVAL;
2744
2745 list = (struct virtchnl_ether_addr_list *)cmd_buffer;
2746 list->vsi_id = vf->vsi_res->vsi_id;
2747 list->num_elements = mc_addrs_num;
2748
2749 for (i = 0; i < mc_addrs_num; i++) {
2750 if (!I40E_IS_MULTICAST(mc_addrs[i].addr_bytes)) {
2751 PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
2752 mc_addrs[i].addr_bytes[0],
2753 mc_addrs[i].addr_bytes[1],
2754 mc_addrs[i].addr_bytes[2],
2755 mc_addrs[i].addr_bytes[3],
2756 mc_addrs[i].addr_bytes[4],
2757 mc_addrs[i].addr_bytes[5]);
2758 return -EINVAL;
2759 }
2760
2761 memcpy(list->list[i].addr, mc_addrs[i].addr_bytes,
2762 sizeof(list->list[i].addr));
2763 }
2764
2765 args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR : VIRTCHNL_OP_DEL_ETH_ADDR;
2766 args.in_args = cmd_buffer;
2767 args.in_args_size = sizeof(struct virtchnl_ether_addr_list) +
2768 i * sizeof(struct virtchnl_ether_addr);
2769 args.out_buffer = vf->aq_resp;
2770 args.out_size = I40E_AQ_BUF_SZ;
2771 err = i40evf_execute_vf_cmd(dev, &args);
2772 if (err) {
2773 PMD_DRV_LOG(ERR, "fail to execute command %s",
2774 add ? "OP_ADD_ETH_ADDR" : "OP_DEL_ETH_ADDR");
2775 return err;
2776 }
2777
2778 return 0;
2779}
2780
2781static int
2782i40evf_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addrs,
2783 uint32_t mc_addrs_num)
2784{
2785 struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2786 int err;
2787
2788 /* flush previous addresses */
2789 err = i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
2790 FALSE);
2791 if (err)
2792 return err;
2793
2794 vf->mc_addrs_num = 0;
2795
2796 /* add new ones */
2797 err = i40evf_add_del_mc_addr_list(dev, mc_addrs, mc_addrs_num,
2798 TRUE);
2799 if (err)
2800 return err;
2801
2802 vf->mc_addrs_num = mc_addrs_num;
2803 memcpy(vf->mc_addrs, mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
7c673cae
FG
2804
2805 return 0;
2806}