]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | /*- |
2 | * BSD LICENSE | |
3 | * | |
4 | * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. | |
5 | * All rights reserved. | |
6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | |
10 | * | |
11 | * * Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * * Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in | |
15 | * the documentation and/or other materials provided with the | |
16 | * distribution. | |
17 | * * Neither the name of Intel Corporation nor the names of its | |
18 | * contributors may be used to endorse or promote products derived | |
19 | * from this software without specific prior written permission. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
22 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
23 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
24 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
25 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
26 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
27 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
28 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
29 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
31 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
32 | */ | |
33 | ||
34 | #include <sys/queue.h> | |
35 | #include <stdio.h> | |
36 | #include <errno.h> | |
37 | #include <stdint.h> | |
38 | #include <string.h> | |
39 | #include <unistd.h> | |
40 | #include <stdarg.h> | |
41 | #include <inttypes.h> | |
42 | #include <rte_byteorder.h> | |
43 | #include <rte_common.h> | |
44 | #include <rte_cycles.h> | |
45 | ||
46 | #include <rte_interrupts.h> | |
47 | #include <rte_log.h> | |
48 | #include <rte_debug.h> | |
49 | #include <rte_pci.h> | |
50 | #include <rte_atomic.h> | |
51 | #include <rte_branch_prediction.h> | |
52 | #include <rte_memory.h> | |
53 | #include <rte_memzone.h> | |
54 | #include <rte_eal.h> | |
55 | #include <rte_alarm.h> | |
56 | #include <rte_ether.h> | |
57 | #include <rte_ethdev.h> | |
58 | #include <rte_atomic.h> | |
59 | #include <rte_malloc.h> | |
60 | #include <rte_dev.h> | |
61 | ||
62 | #include "i40e_logs.h" | |
63 | #include "base/i40e_prototype.h" | |
64 | #include "base/i40e_adminq_cmd.h" | |
65 | #include "base/i40e_type.h" | |
66 | ||
67 | #include "i40e_rxtx.h" | |
68 | #include "i40e_ethdev.h" | |
69 | #include "i40e_pf.h" | |
70 | #define I40EVF_VSI_DEFAULT_MSIX_INTR 1 | |
71 | #define I40EVF_VSI_DEFAULT_MSIX_INTR_LNX 0 | |
72 | ||
73 | /* busy wait delay in msec */ | |
74 | #define I40EVF_BUSY_WAIT_DELAY 10 | |
75 | #define I40EVF_BUSY_WAIT_COUNT 50 | |
76 | #define MAX_RESET_WAIT_CNT 20 | |
77 | ||
78 | struct i40evf_arq_msg_info { | |
79 | enum i40e_virtchnl_ops ops; | |
80 | enum i40e_status_code result; | |
81 | uint16_t buf_len; | |
82 | uint16_t msg_len; | |
83 | uint8_t *msg; | |
84 | }; | |
85 | ||
86 | struct vf_cmd_info { | |
87 | enum i40e_virtchnl_ops ops; | |
88 | uint8_t *in_args; | |
89 | uint32_t in_args_size; | |
90 | uint8_t *out_buffer; | |
91 | /* Input & output type. pass in buffer size and pass out | |
92 | * actual return result | |
93 | */ | |
94 | uint32_t out_size; | |
95 | }; | |
96 | ||
97 | enum i40evf_aq_result { | |
98 | I40EVF_MSG_ERR = -1, /* Meet error when accessing admin queue */ | |
99 | I40EVF_MSG_NON, /* Read nothing from admin queue */ | |
100 | I40EVF_MSG_SYS, /* Read system msg from admin queue */ | |
101 | I40EVF_MSG_CMD, /* Read async command result */ | |
102 | }; | |
103 | ||
104 | static int i40evf_dev_configure(struct rte_eth_dev *dev); | |
105 | static int i40evf_dev_start(struct rte_eth_dev *dev); | |
106 | static void i40evf_dev_stop(struct rte_eth_dev *dev); | |
107 | static void i40evf_dev_info_get(struct rte_eth_dev *dev, | |
108 | struct rte_eth_dev_info *dev_info); | |
109 | static int i40evf_dev_link_update(struct rte_eth_dev *dev, | |
110 | __rte_unused int wait_to_complete); | |
111 | static void i40evf_dev_stats_get(struct rte_eth_dev *dev, | |
112 | struct rte_eth_stats *stats); | |
113 | static int i40evf_dev_xstats_get(struct rte_eth_dev *dev, | |
114 | struct rte_eth_xstat *xstats, unsigned n); | |
115 | static int i40evf_dev_xstats_get_names(struct rte_eth_dev *dev, | |
116 | struct rte_eth_xstat_name *xstats_names, | |
117 | unsigned limit); | |
118 | static void i40evf_dev_xstats_reset(struct rte_eth_dev *dev); | |
119 | static int i40evf_vlan_filter_set(struct rte_eth_dev *dev, | |
120 | uint16_t vlan_id, int on); | |
121 | static void i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask); | |
122 | static int i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, | |
123 | int on); | |
124 | static void i40evf_dev_close(struct rte_eth_dev *dev); | |
125 | static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev); | |
126 | static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev); | |
127 | static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev); | |
128 | static void i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev); | |
129 | static int i40evf_init_vlan(struct rte_eth_dev *dev); | |
130 | static int i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, | |
131 | uint16_t rx_queue_id); | |
132 | static int i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, | |
133 | uint16_t rx_queue_id); | |
134 | static int i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, | |
135 | uint16_t tx_queue_id); | |
136 | static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, | |
137 | uint16_t tx_queue_id); | |
138 | static void i40evf_add_mac_addr(struct rte_eth_dev *dev, | |
139 | struct ether_addr *addr, | |
140 | uint32_t index, | |
141 | uint32_t pool); | |
142 | static void i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index); | |
143 | static int i40evf_dev_rss_reta_update(struct rte_eth_dev *dev, | |
144 | struct rte_eth_rss_reta_entry64 *reta_conf, | |
145 | uint16_t reta_size); | |
146 | static int i40evf_dev_rss_reta_query(struct rte_eth_dev *dev, | |
147 | struct rte_eth_rss_reta_entry64 *reta_conf, | |
148 | uint16_t reta_size); | |
149 | static int i40evf_config_rss(struct i40e_vf *vf); | |
150 | static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev, | |
151 | struct rte_eth_rss_conf *rss_conf); | |
152 | static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, | |
153 | struct rte_eth_rss_conf *rss_conf); | |
154 | static int | |
155 | i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); | |
156 | static int | |
157 | i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); | |
158 | static void i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev, | |
159 | uint8_t *msg, | |
160 | uint16_t msglen); | |
161 | ||
162 | /* Default hash key buffer for RSS */ | |
163 | static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1]; | |
164 | ||
165 | struct rte_i40evf_xstats_name_off { | |
166 | char name[RTE_ETH_XSTATS_NAME_SIZE]; | |
167 | unsigned offset; | |
168 | }; | |
169 | ||
170 | static const struct rte_i40evf_xstats_name_off rte_i40evf_stats_strings[] = { | |
171 | {"rx_bytes", offsetof(struct i40e_eth_stats, rx_bytes)}, | |
172 | {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)}, | |
173 | {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)}, | |
174 | {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)}, | |
175 | {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)}, | |
176 | {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats, | |
177 | rx_unknown_protocol)}, | |
178 | {"tx_bytes", offsetof(struct i40e_eth_stats, tx_bytes)}, | |
179 | {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_bytes)}, | |
180 | {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_bytes)}, | |
181 | {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_bytes)}, | |
182 | {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_bytes)}, | |
183 | {"tx_error_packets", offsetof(struct i40e_eth_stats, tx_bytes)}, | |
184 | }; | |
185 | ||
186 | #define I40EVF_NB_XSTATS (sizeof(rte_i40evf_stats_strings) / \ | |
187 | sizeof(rte_i40evf_stats_strings[0])) | |
188 | ||
189 | static const struct eth_dev_ops i40evf_eth_dev_ops = { | |
190 | .dev_configure = i40evf_dev_configure, | |
191 | .dev_start = i40evf_dev_start, | |
192 | .dev_stop = i40evf_dev_stop, | |
193 | .promiscuous_enable = i40evf_dev_promiscuous_enable, | |
194 | .promiscuous_disable = i40evf_dev_promiscuous_disable, | |
195 | .allmulticast_enable = i40evf_dev_allmulticast_enable, | |
196 | .allmulticast_disable = i40evf_dev_allmulticast_disable, | |
197 | .link_update = i40evf_dev_link_update, | |
198 | .stats_get = i40evf_dev_stats_get, | |
199 | .xstats_get = i40evf_dev_xstats_get, | |
200 | .xstats_get_names = i40evf_dev_xstats_get_names, | |
201 | .xstats_reset = i40evf_dev_xstats_reset, | |
202 | .dev_close = i40evf_dev_close, | |
203 | .dev_infos_get = i40evf_dev_info_get, | |
204 | .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get, | |
205 | .vlan_filter_set = i40evf_vlan_filter_set, | |
206 | .vlan_offload_set = i40evf_vlan_offload_set, | |
207 | .vlan_pvid_set = i40evf_vlan_pvid_set, | |
208 | .rx_queue_start = i40evf_dev_rx_queue_start, | |
209 | .rx_queue_stop = i40evf_dev_rx_queue_stop, | |
210 | .tx_queue_start = i40evf_dev_tx_queue_start, | |
211 | .tx_queue_stop = i40evf_dev_tx_queue_stop, | |
212 | .rx_queue_setup = i40e_dev_rx_queue_setup, | |
213 | .rx_queue_release = i40e_dev_rx_queue_release, | |
214 | .rx_queue_intr_enable = i40evf_dev_rx_queue_intr_enable, | |
215 | .rx_queue_intr_disable = i40evf_dev_rx_queue_intr_disable, | |
216 | .rx_descriptor_done = i40e_dev_rx_descriptor_done, | |
217 | .tx_queue_setup = i40e_dev_tx_queue_setup, | |
218 | .tx_queue_release = i40e_dev_tx_queue_release, | |
219 | .rx_queue_count = i40e_dev_rx_queue_count, | |
220 | .rxq_info_get = i40e_rxq_info_get, | |
221 | .txq_info_get = i40e_txq_info_get, | |
222 | .mac_addr_add = i40evf_add_mac_addr, | |
223 | .mac_addr_remove = i40evf_del_mac_addr, | |
224 | .reta_update = i40evf_dev_rss_reta_update, | |
225 | .reta_query = i40evf_dev_rss_reta_query, | |
226 | .rss_hash_update = i40evf_dev_rss_hash_update, | |
227 | .rss_hash_conf_get = i40evf_dev_rss_hash_conf_get, | |
228 | }; | |
229 | ||
230 | /* | |
231 | * Read data in admin queue to get msg from pf driver | |
232 | */ | |
233 | static enum i40evf_aq_result | |
234 | i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data) | |
235 | { | |
236 | struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
237 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
238 | struct i40e_arq_event_info event; | |
239 | enum i40e_virtchnl_ops opcode; | |
240 | enum i40e_status_code retval; | |
241 | int ret; | |
242 | enum i40evf_aq_result result = I40EVF_MSG_NON; | |
243 | ||
244 | event.buf_len = data->buf_len; | |
245 | event.msg_buf = data->msg; | |
246 | ret = i40e_clean_arq_element(hw, &event, NULL); | |
247 | /* Can't read any msg from adminQ */ | |
248 | if (ret) { | |
249 | if (ret != I40E_ERR_ADMIN_QUEUE_NO_WORK) | |
250 | result = I40EVF_MSG_ERR; | |
251 | return result; | |
252 | } | |
253 | ||
254 | opcode = (enum i40e_virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high); | |
255 | retval = (enum i40e_status_code)rte_le_to_cpu_32(event.desc.cookie_low); | |
256 | /* pf sys event */ | |
257 | if (opcode == I40E_VIRTCHNL_OP_EVENT) { | |
258 | struct i40e_virtchnl_pf_event *vpe = | |
259 | (struct i40e_virtchnl_pf_event *)event.msg_buf; | |
260 | ||
261 | result = I40EVF_MSG_SYS; | |
262 | switch (vpe->event) { | |
263 | case I40E_VIRTCHNL_EVENT_LINK_CHANGE: | |
264 | vf->link_up = | |
265 | vpe->event_data.link_event.link_status; | |
266 | vf->link_speed = | |
267 | vpe->event_data.link_event.link_speed; | |
268 | vf->pend_msg |= PFMSG_LINK_CHANGE; | |
269 | PMD_DRV_LOG(INFO, "Link status update:%s", | |
270 | vf->link_up ? "up" : "down"); | |
271 | break; | |
272 | case I40E_VIRTCHNL_EVENT_RESET_IMPENDING: | |
273 | vf->vf_reset = true; | |
274 | vf->pend_msg |= PFMSG_RESET_IMPENDING; | |
275 | PMD_DRV_LOG(INFO, "vf is reseting"); | |
276 | break; | |
277 | case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE: | |
278 | vf->dev_closed = true; | |
279 | vf->pend_msg |= PFMSG_DRIVER_CLOSE; | |
280 | PMD_DRV_LOG(INFO, "PF driver closed"); | |
281 | break; | |
282 | default: | |
283 | PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf", | |
284 | __func__, vpe->event); | |
285 | } | |
286 | } else { | |
287 | /* async reply msg on command issued by vf previously */ | |
288 | result = I40EVF_MSG_CMD; | |
289 | /* Actual data length read from PF */ | |
290 | data->msg_len = event.msg_len; | |
291 | } | |
292 | ||
293 | data->result = retval; | |
294 | data->ops = opcode; | |
295 | ||
296 | return result; | |
297 | } | |
298 | ||
299 | /** | |
300 | * clear current command. Only call in case execute | |
301 | * _atomic_set_cmd successfully. | |
302 | */ | |
303 | static inline void | |
304 | _clear_cmd(struct i40e_vf *vf) | |
305 | { | |
306 | rte_wmb(); | |
307 | vf->pend_cmd = I40E_VIRTCHNL_OP_UNKNOWN; | |
308 | } | |
309 | ||
310 | /* | |
311 | * Check there is pending cmd in execution. If none, set new command. | |
312 | */ | |
313 | static inline int | |
314 | _atomic_set_cmd(struct i40e_vf *vf, enum i40e_virtchnl_ops ops) | |
315 | { | |
316 | int ret = rte_atomic32_cmpset(&vf->pend_cmd, | |
317 | I40E_VIRTCHNL_OP_UNKNOWN, ops); | |
318 | ||
319 | if (!ret) | |
320 | PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd); | |
321 | ||
322 | return !ret; | |
323 | } | |
324 | ||
325 | #define MAX_TRY_TIMES 200 | |
326 | #define ASQ_DELAY_MS 10 | |
327 | ||
328 | static int | |
329 | i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args) | |
330 | { | |
331 | struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
332 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
333 | struct i40evf_arq_msg_info info; | |
334 | enum i40evf_aq_result ret; | |
335 | int err, i = 0; | |
336 | ||
337 | if (_atomic_set_cmd(vf, args->ops)) | |
338 | return -1; | |
339 | ||
340 | info.msg = args->out_buffer; | |
341 | info.buf_len = args->out_size; | |
342 | info.ops = I40E_VIRTCHNL_OP_UNKNOWN; | |
343 | info.result = I40E_SUCCESS; | |
344 | ||
345 | err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS, | |
346 | args->in_args, args->in_args_size, NULL); | |
347 | if (err) { | |
348 | PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops); | |
349 | _clear_cmd(vf); | |
350 | return err; | |
351 | } | |
352 | ||
353 | switch (args->ops) { | |
354 | case I40E_VIRTCHNL_OP_RESET_VF: | |
355 | /*no need to process in this function */ | |
356 | err = 0; | |
357 | break; | |
358 | case I40E_VIRTCHNL_OP_VERSION: | |
359 | case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: | |
360 | /* for init adminq commands, need to poll the response */ | |
361 | err = -1; | |
362 | do { | |
363 | ret = i40evf_read_pfmsg(dev, &info); | |
364 | if (ret == I40EVF_MSG_CMD) { | |
365 | err = 0; | |
366 | break; | |
367 | } else if (ret == I40EVF_MSG_ERR) | |
368 | break; | |
369 | rte_delay_ms(ASQ_DELAY_MS); | |
370 | /* If don't read msg or read sys event, continue */ | |
371 | } while (i++ < MAX_TRY_TIMES); | |
372 | _clear_cmd(vf); | |
373 | break; | |
374 | ||
375 | default: | |
376 | /* for other adminq in running time, waiting the cmd done flag */ | |
377 | err = -1; | |
378 | do { | |
379 | if (vf->pend_cmd == I40E_VIRTCHNL_OP_UNKNOWN) { | |
380 | err = 0; | |
381 | break; | |
382 | } | |
383 | rte_delay_ms(ASQ_DELAY_MS); | |
384 | /* If don't read msg or read sys event, continue */ | |
385 | } while (i++ < MAX_TRY_TIMES); | |
386 | break; | |
387 | } | |
388 | ||
389 | return err | vf->cmd_retval; | |
390 | } | |
391 | ||
392 | /* | |
393 | * Check API version with sync wait until version read or fail from admin queue | |
394 | */ | |
395 | static int | |
396 | i40evf_check_api_version(struct rte_eth_dev *dev) | |
397 | { | |
398 | struct i40e_virtchnl_version_info version, *pver; | |
399 | int err; | |
400 | struct vf_cmd_info args; | |
401 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
402 | ||
403 | version.major = I40E_VIRTCHNL_VERSION_MAJOR; | |
404 | version.minor = I40E_VIRTCHNL_VERSION_MINOR; | |
405 | ||
406 | args.ops = I40E_VIRTCHNL_OP_VERSION; | |
407 | args.in_args = (uint8_t *)&version; | |
408 | args.in_args_size = sizeof(version); | |
409 | args.out_buffer = vf->aq_resp; | |
410 | args.out_size = I40E_AQ_BUF_SZ; | |
411 | ||
412 | err = i40evf_execute_vf_cmd(dev, &args); | |
413 | if (err) { | |
414 | PMD_INIT_LOG(ERR, "fail to execute command OP_VERSION"); | |
415 | return err; | |
416 | } | |
417 | ||
418 | pver = (struct i40e_virtchnl_version_info *)args.out_buffer; | |
419 | vf->version_major = pver->major; | |
420 | vf->version_minor = pver->minor; | |
421 | if (vf->version_major == I40E_DPDK_VERSION_MAJOR) | |
422 | PMD_DRV_LOG(INFO, "Peer is DPDK PF host"); | |
423 | else if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) && | |
424 | (vf->version_minor <= I40E_VIRTCHNL_VERSION_MINOR)) | |
425 | PMD_DRV_LOG(INFO, "Peer is Linux PF host"); | |
426 | else { | |
427 | PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)", | |
428 | vf->version_major, vf->version_minor, | |
429 | I40E_VIRTCHNL_VERSION_MAJOR, | |
430 | I40E_VIRTCHNL_VERSION_MINOR); | |
431 | return -1; | |
432 | } | |
433 | ||
434 | return 0; | |
435 | } | |
436 | ||
437 | static int | |
438 | i40evf_get_vf_resource(struct rte_eth_dev *dev) | |
439 | { | |
440 | struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
441 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
442 | int err; | |
443 | struct vf_cmd_info args; | |
444 | uint32_t caps, len; | |
445 | ||
446 | args.ops = I40E_VIRTCHNL_OP_GET_VF_RESOURCES; | |
447 | args.out_buffer = vf->aq_resp; | |
448 | args.out_size = I40E_AQ_BUF_SZ; | |
449 | if (PF_IS_V11(vf)) { | |
450 | caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 | | |
451 | I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | | |
452 | I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | | |
453 | I40E_VIRTCHNL_VF_OFFLOAD_VLAN | | |
454 | I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING; | |
455 | args.in_args = (uint8_t *)∩︀ | |
456 | args.in_args_size = sizeof(caps); | |
457 | } else { | |
458 | args.in_args = NULL; | |
459 | args.in_args_size = 0; | |
460 | } | |
461 | err = i40evf_execute_vf_cmd(dev, &args); | |
462 | ||
463 | if (err) { | |
464 | PMD_DRV_LOG(ERR, "fail to execute command OP_GET_VF_RESOURCE"); | |
465 | return err; | |
466 | } | |
467 | ||
468 | len = sizeof(struct i40e_virtchnl_vf_resource) + | |
469 | I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource); | |
470 | ||
471 | (void)rte_memcpy(vf->vf_res, args.out_buffer, | |
472 | RTE_MIN(args.out_size, len)); | |
473 | i40e_vf_parse_hw_config(hw, vf->vf_res); | |
474 | ||
475 | return 0; | |
476 | } | |
477 | ||
478 | static int | |
479 | i40evf_config_promisc(struct rte_eth_dev *dev, | |
480 | bool enable_unicast, | |
481 | bool enable_multicast) | |
482 | { | |
483 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
484 | int err; | |
485 | struct vf_cmd_info args; | |
486 | struct i40e_virtchnl_promisc_info promisc; | |
487 | ||
488 | promisc.flags = 0; | |
489 | promisc.vsi_id = vf->vsi_res->vsi_id; | |
490 | ||
491 | if (enable_unicast) | |
492 | promisc.flags |= I40E_FLAG_VF_UNICAST_PROMISC; | |
493 | ||
494 | if (enable_multicast) | |
495 | promisc.flags |= I40E_FLAG_VF_MULTICAST_PROMISC; | |
496 | ||
497 | args.ops = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; | |
498 | args.in_args = (uint8_t *)&promisc; | |
499 | args.in_args_size = sizeof(promisc); | |
500 | args.out_buffer = vf->aq_resp; | |
501 | args.out_size = I40E_AQ_BUF_SZ; | |
502 | ||
503 | err = i40evf_execute_vf_cmd(dev, &args); | |
504 | ||
505 | if (err) | |
506 | PMD_DRV_LOG(ERR, "fail to execute command " | |
507 | "CONFIG_PROMISCUOUS_MODE"); | |
508 | return err; | |
509 | } | |
510 | ||
511 | /* Configure vlan and double vlan offload. Use flag to specify which part to configure */ | |
512 | static int | |
513 | i40evf_config_vlan_offload(struct rte_eth_dev *dev, | |
514 | bool enable_vlan_strip) | |
515 | { | |
516 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
517 | int err; | |
518 | struct vf_cmd_info args; | |
519 | struct i40e_virtchnl_vlan_offload_info offload; | |
520 | ||
521 | offload.vsi_id = vf->vsi_res->vsi_id; | |
522 | offload.enable_vlan_strip = enable_vlan_strip; | |
523 | ||
524 | args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD; | |
525 | args.in_args = (uint8_t *)&offload; | |
526 | args.in_args_size = sizeof(offload); | |
527 | args.out_buffer = vf->aq_resp; | |
528 | args.out_size = I40E_AQ_BUF_SZ; | |
529 | ||
530 | err = i40evf_execute_vf_cmd(dev, &args); | |
531 | if (err) | |
532 | PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_OFFLOAD"); | |
533 | ||
534 | return err; | |
535 | } | |
536 | ||
537 | static int | |
538 | i40evf_config_vlan_pvid(struct rte_eth_dev *dev, | |
539 | struct i40e_vsi_vlan_pvid_info *info) | |
540 | { | |
541 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
542 | int err; | |
543 | struct vf_cmd_info args; | |
544 | struct i40e_virtchnl_pvid_info tpid_info; | |
545 | ||
546 | if (info == NULL) { | |
547 | PMD_DRV_LOG(ERR, "invalid parameters"); | |
548 | return I40E_ERR_PARAM; | |
549 | } | |
550 | ||
551 | memset(&tpid_info, 0, sizeof(tpid_info)); | |
552 | tpid_info.vsi_id = vf->vsi_res->vsi_id; | |
553 | (void)rte_memcpy(&tpid_info.info, info, sizeof(*info)); | |
554 | ||
555 | args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_PVID; | |
556 | args.in_args = (uint8_t *)&tpid_info; | |
557 | args.in_args_size = sizeof(tpid_info); | |
558 | args.out_buffer = vf->aq_resp; | |
559 | args.out_size = I40E_AQ_BUF_SZ; | |
560 | ||
561 | err = i40evf_execute_vf_cmd(dev, &args); | |
562 | if (err) | |
563 | PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_PVID"); | |
564 | ||
565 | return err; | |
566 | } | |
567 | ||
568 | static void | |
569 | i40evf_fill_virtchnl_vsi_txq_info(struct i40e_virtchnl_txq_info *txq_info, | |
570 | uint16_t vsi_id, | |
571 | uint16_t queue_id, | |
572 | uint16_t nb_txq, | |
573 | struct i40e_tx_queue *txq) | |
574 | { | |
575 | txq_info->vsi_id = vsi_id; | |
576 | txq_info->queue_id = queue_id; | |
577 | if (queue_id < nb_txq) { | |
578 | txq_info->ring_len = txq->nb_tx_desc; | |
579 | txq_info->dma_ring_addr = txq->tx_ring_phys_addr; | |
580 | } | |
581 | } | |
582 | ||
583 | static void | |
584 | i40evf_fill_virtchnl_vsi_rxq_info(struct i40e_virtchnl_rxq_info *rxq_info, | |
585 | uint16_t vsi_id, | |
586 | uint16_t queue_id, | |
587 | uint16_t nb_rxq, | |
588 | uint32_t max_pkt_size, | |
589 | struct i40e_rx_queue *rxq) | |
590 | { | |
591 | rxq_info->vsi_id = vsi_id; | |
592 | rxq_info->queue_id = queue_id; | |
593 | rxq_info->max_pkt_size = max_pkt_size; | |
594 | if (queue_id < nb_rxq) { | |
595 | rxq_info->ring_len = rxq->nb_rx_desc; | |
596 | rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr; | |
597 | rxq_info->databuffer_size = | |
598 | (rte_pktmbuf_data_room_size(rxq->mp) - | |
599 | RTE_PKTMBUF_HEADROOM); | |
600 | } | |
601 | } | |
602 | ||
603 | /* It configures VSI queues to co-work with Linux PF host */ | |
604 | static int | |
605 | i40evf_configure_vsi_queues(struct rte_eth_dev *dev) | |
606 | { | |
607 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
608 | struct i40e_rx_queue **rxq = | |
609 | (struct i40e_rx_queue **)dev->data->rx_queues; | |
610 | struct i40e_tx_queue **txq = | |
611 | (struct i40e_tx_queue **)dev->data->tx_queues; | |
612 | struct i40e_virtchnl_vsi_queue_config_info *vc_vqci; | |
613 | struct i40e_virtchnl_queue_pair_info *vc_qpi; | |
614 | struct vf_cmd_info args; | |
615 | uint16_t i, nb_qp = vf->num_queue_pairs; | |
616 | const uint32_t size = | |
617 | I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci, nb_qp); | |
618 | uint8_t buff[size]; | |
619 | int ret; | |
620 | ||
621 | memset(buff, 0, sizeof(buff)); | |
622 | vc_vqci = (struct i40e_virtchnl_vsi_queue_config_info *)buff; | |
623 | vc_vqci->vsi_id = vf->vsi_res->vsi_id; | |
624 | vc_vqci->num_queue_pairs = nb_qp; | |
625 | ||
626 | for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) { | |
627 | i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq, | |
628 | vc_vqci->vsi_id, i, dev->data->nb_tx_queues, txq[i]); | |
629 | i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq, | |
630 | vc_vqci->vsi_id, i, dev->data->nb_rx_queues, | |
631 | vf->max_pkt_len, rxq[i]); | |
632 | } | |
633 | memset(&args, 0, sizeof(args)); | |
634 | args.ops = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES; | |
635 | args.in_args = (uint8_t *)vc_vqci; | |
636 | args.in_args_size = size; | |
637 | args.out_buffer = vf->aq_resp; | |
638 | args.out_size = I40E_AQ_BUF_SZ; | |
639 | ret = i40evf_execute_vf_cmd(dev, &args); | |
640 | if (ret) | |
641 | PMD_DRV_LOG(ERR, "Failed to execute command of " | |
642 | "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES\n"); | |
643 | ||
644 | return ret; | |
645 | } | |
646 | ||
647 | /* It configures VSI queues to co-work with DPDK PF host */ | |
648 | static int | |
649 | i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev) | |
650 | { | |
651 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
652 | struct i40e_rx_queue **rxq = | |
653 | (struct i40e_rx_queue **)dev->data->rx_queues; | |
654 | struct i40e_tx_queue **txq = | |
655 | (struct i40e_tx_queue **)dev->data->tx_queues; | |
656 | struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei; | |
657 | struct i40e_virtchnl_queue_pair_ext_info *vc_qpei; | |
658 | struct vf_cmd_info args; | |
659 | uint16_t i, nb_qp = vf->num_queue_pairs; | |
660 | const uint32_t size = | |
661 | I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei, nb_qp); | |
662 | uint8_t buff[size]; | |
663 | int ret; | |
664 | ||
665 | memset(buff, 0, sizeof(buff)); | |
666 | vc_vqcei = (struct i40e_virtchnl_vsi_queue_config_ext_info *)buff; | |
667 | vc_vqcei->vsi_id = vf->vsi_res->vsi_id; | |
668 | vc_vqcei->num_queue_pairs = nb_qp; | |
669 | vc_qpei = vc_vqcei->qpair; | |
670 | for (i = 0; i < nb_qp; i++, vc_qpei++) { | |
671 | i40evf_fill_virtchnl_vsi_txq_info(&vc_qpei->txq, | |
672 | vc_vqcei->vsi_id, i, dev->data->nb_tx_queues, txq[i]); | |
673 | i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpei->rxq, | |
674 | vc_vqcei->vsi_id, i, dev->data->nb_rx_queues, | |
675 | vf->max_pkt_len, rxq[i]); | |
676 | if (i < dev->data->nb_rx_queues) | |
677 | /* | |
678 | * It adds extra info for configuring VSI queues, which | |
679 | * is needed to enable the configurable crc stripping | |
680 | * in VF. | |
681 | */ | |
682 | vc_qpei->rxq_ext.crcstrip = | |
683 | dev->data->dev_conf.rxmode.hw_strip_crc; | |
684 | } | |
685 | memset(&args, 0, sizeof(args)); | |
686 | args.ops = | |
687 | (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT; | |
688 | args.in_args = (uint8_t *)vc_vqcei; | |
689 | args.in_args_size = size; | |
690 | args.out_buffer = vf->aq_resp; | |
691 | args.out_size = I40E_AQ_BUF_SZ; | |
692 | ret = i40evf_execute_vf_cmd(dev, &args); | |
693 | if (ret) | |
694 | PMD_DRV_LOG(ERR, "Failed to execute command of " | |
695 | "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT\n"); | |
696 | ||
697 | return ret; | |
698 | } | |
699 | ||
700 | static int | |
701 | i40evf_configure_queues(struct rte_eth_dev *dev) | |
702 | { | |
703 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
704 | ||
705 | if (vf->version_major == I40E_DPDK_VERSION_MAJOR) | |
706 | /* To support DPDK PF host */ | |
707 | return i40evf_configure_vsi_queues_ext(dev); | |
708 | else | |
709 | /* To support Linux PF host */ | |
710 | return i40evf_configure_vsi_queues(dev); | |
711 | } | |
712 | ||
713 | static int | |
714 | i40evf_config_irq_map(struct rte_eth_dev *dev) | |
715 | { | |
716 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
717 | struct vf_cmd_info args; | |
718 | uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \ | |
719 | sizeof(struct i40e_virtchnl_vector_map)]; | |
720 | struct i40e_virtchnl_irq_map_info *map_info; | |
721 | struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; | |
722 | uint32_t vector_id; | |
723 | int i, err; | |
724 | ||
725 | if (rte_intr_allow_others(intr_handle)) { | |
726 | if (vf->version_major == I40E_DPDK_VERSION_MAJOR) | |
727 | vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR; | |
728 | else | |
729 | vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR_LNX; | |
730 | } else { | |
731 | vector_id = I40E_MISC_VEC_ID; | |
732 | } | |
733 | ||
734 | map_info = (struct i40e_virtchnl_irq_map_info *)cmd_buffer; | |
735 | map_info->num_vectors = 1; | |
736 | map_info->vecmap[0].rxitr_idx = I40E_ITR_INDEX_DEFAULT; | |
737 | map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id; | |
738 | /* Alway use default dynamic MSIX interrupt */ | |
739 | map_info->vecmap[0].vector_id = vector_id; | |
740 | /* Don't map any tx queue */ | |
741 | map_info->vecmap[0].txq_map = 0; | |
742 | map_info->vecmap[0].rxq_map = 0; | |
743 | for (i = 0; i < dev->data->nb_rx_queues; i++) { | |
744 | map_info->vecmap[0].rxq_map |= 1 << i; | |
745 | if (rte_intr_dp_is_en(intr_handle)) | |
746 | intr_handle->intr_vec[i] = vector_id; | |
747 | } | |
748 | ||
749 | args.ops = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP; | |
750 | args.in_args = (u8 *)cmd_buffer; | |
751 | args.in_args_size = sizeof(cmd_buffer); | |
752 | args.out_buffer = vf->aq_resp; | |
753 | args.out_size = I40E_AQ_BUF_SZ; | |
754 | err = i40evf_execute_vf_cmd(dev, &args); | |
755 | if (err) | |
756 | PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES"); | |
757 | ||
758 | return err; | |
759 | } | |
760 | ||
761 | static int | |
762 | i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid, | |
763 | bool on) | |
764 | { | |
765 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
766 | struct i40e_virtchnl_queue_select queue_select; | |
767 | int err; | |
768 | struct vf_cmd_info args; | |
769 | memset(&queue_select, 0, sizeof(queue_select)); | |
770 | queue_select.vsi_id = vf->vsi_res->vsi_id; | |
771 | ||
772 | if (isrx) | |
773 | queue_select.rx_queues |= 1 << qid; | |
774 | else | |
775 | queue_select.tx_queues |= 1 << qid; | |
776 | ||
777 | if (on) | |
778 | args.ops = I40E_VIRTCHNL_OP_ENABLE_QUEUES; | |
779 | else | |
780 | args.ops = I40E_VIRTCHNL_OP_DISABLE_QUEUES; | |
781 | args.in_args = (u8 *)&queue_select; | |
782 | args.in_args_size = sizeof(queue_select); | |
783 | args.out_buffer = vf->aq_resp; | |
784 | args.out_size = I40E_AQ_BUF_SZ; | |
785 | err = i40evf_execute_vf_cmd(dev, &args); | |
786 | if (err) | |
787 | PMD_DRV_LOG(ERR, "fail to switch %s %u %s", | |
788 | isrx ? "RX" : "TX", qid, on ? "on" : "off"); | |
789 | ||
790 | return err; | |
791 | } | |
792 | ||
793 | static int | |
794 | i40evf_start_queues(struct rte_eth_dev *dev) | |
795 | { | |
796 | struct rte_eth_dev_data *dev_data = dev->data; | |
797 | int i; | |
798 | struct i40e_rx_queue *rxq; | |
799 | struct i40e_tx_queue *txq; | |
800 | ||
801 | for (i = 0; i < dev->data->nb_rx_queues; i++) { | |
802 | rxq = dev_data->rx_queues[i]; | |
803 | if (rxq->rx_deferred_start) | |
804 | continue; | |
805 | if (i40evf_dev_rx_queue_start(dev, i) != 0) { | |
806 | PMD_DRV_LOG(ERR, "Fail to start queue %u", i); | |
807 | return -1; | |
808 | } | |
809 | } | |
810 | ||
811 | for (i = 0; i < dev->data->nb_tx_queues; i++) { | |
812 | txq = dev_data->tx_queues[i]; | |
813 | if (txq->tx_deferred_start) | |
814 | continue; | |
815 | if (i40evf_dev_tx_queue_start(dev, i) != 0) { | |
816 | PMD_DRV_LOG(ERR, "Fail to start queue %u", i); | |
817 | return -1; | |
818 | } | |
819 | } | |
820 | ||
821 | return 0; | |
822 | } | |
823 | ||
824 | static int | |
825 | i40evf_stop_queues(struct rte_eth_dev *dev) | |
826 | { | |
827 | int i; | |
828 | ||
829 | /* Stop TX queues first */ | |
830 | for (i = 0; i < dev->data->nb_tx_queues; i++) { | |
831 | if (i40evf_dev_tx_queue_stop(dev, i) != 0) { | |
832 | PMD_DRV_LOG(ERR, "Fail to stop queue %u", i); | |
833 | return -1; | |
834 | } | |
835 | } | |
836 | ||
837 | /* Then stop RX queues */ | |
838 | for (i = 0; i < dev->data->nb_rx_queues; i++) { | |
839 | if (i40evf_dev_rx_queue_stop(dev, i) != 0) { | |
840 | PMD_DRV_LOG(ERR, "Fail to stop queue %u", i); | |
841 | return -1; | |
842 | } | |
843 | } | |
844 | ||
845 | return 0; | |
846 | } | |
847 | ||
848 | static void | |
849 | i40evf_add_mac_addr(struct rte_eth_dev *dev, | |
850 | struct ether_addr *addr, | |
851 | __rte_unused uint32_t index, | |
852 | __rte_unused uint32_t pool) | |
853 | { | |
854 | struct i40e_virtchnl_ether_addr_list *list; | |
855 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
856 | uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \ | |
857 | sizeof(struct i40e_virtchnl_ether_addr)]; | |
858 | int err; | |
859 | struct vf_cmd_info args; | |
860 | ||
861 | if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) { | |
862 | PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x", | |
863 | addr->addr_bytes[0], addr->addr_bytes[1], | |
864 | addr->addr_bytes[2], addr->addr_bytes[3], | |
865 | addr->addr_bytes[4], addr->addr_bytes[5]); | |
866 | return; | |
867 | } | |
868 | ||
869 | list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer; | |
870 | list->vsi_id = vf->vsi_res->vsi_id; | |
871 | list->num_elements = 1; | |
872 | (void)rte_memcpy(list->list[0].addr, addr->addr_bytes, | |
873 | sizeof(addr->addr_bytes)); | |
874 | ||
875 | args.ops = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS; | |
876 | args.in_args = cmd_buffer; | |
877 | args.in_args_size = sizeof(cmd_buffer); | |
878 | args.out_buffer = vf->aq_resp; | |
879 | args.out_size = I40E_AQ_BUF_SZ; | |
880 | err = i40evf_execute_vf_cmd(dev, &args); | |
881 | if (err) | |
882 | PMD_DRV_LOG(ERR, "fail to execute command " | |
883 | "OP_ADD_ETHER_ADDRESS"); | |
884 | ||
885 | return; | |
886 | } | |
887 | ||
888 | static void | |
889 | i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index) | |
890 | { | |
891 | struct i40e_virtchnl_ether_addr_list *list; | |
892 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
893 | struct rte_eth_dev_data *data = dev->data; | |
894 | struct ether_addr *addr; | |
895 | uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \ | |
896 | sizeof(struct i40e_virtchnl_ether_addr)]; | |
897 | int err; | |
898 | struct vf_cmd_info args; | |
899 | ||
900 | addr = &(data->mac_addrs[index]); | |
901 | ||
902 | if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) { | |
903 | PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x", | |
904 | addr->addr_bytes[0], addr->addr_bytes[1], | |
905 | addr->addr_bytes[2], addr->addr_bytes[3], | |
906 | addr->addr_bytes[4], addr->addr_bytes[5]); | |
907 | return; | |
908 | } | |
909 | ||
910 | list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer; | |
911 | list->vsi_id = vf->vsi_res->vsi_id; | |
912 | list->num_elements = 1; | |
913 | (void)rte_memcpy(list->list[0].addr, addr->addr_bytes, | |
914 | sizeof(addr->addr_bytes)); | |
915 | ||
916 | args.ops = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS; | |
917 | args.in_args = cmd_buffer; | |
918 | args.in_args_size = sizeof(cmd_buffer); | |
919 | args.out_buffer = vf->aq_resp; | |
920 | args.out_size = I40E_AQ_BUF_SZ; | |
921 | err = i40evf_execute_vf_cmd(dev, &args); | |
922 | if (err) | |
923 | PMD_DRV_LOG(ERR, "fail to execute command " | |
924 | "OP_DEL_ETHER_ADDRESS"); | |
925 | return; | |
926 | } | |
927 | ||
928 | static int | |
929 | i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats) | |
930 | { | |
931 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
932 | struct i40e_virtchnl_queue_select q_stats; | |
933 | int err; | |
934 | struct vf_cmd_info args; | |
935 | ||
936 | memset(&q_stats, 0, sizeof(q_stats)); | |
937 | q_stats.vsi_id = vf->vsi_res->vsi_id; | |
938 | args.ops = I40E_VIRTCHNL_OP_GET_STATS; | |
939 | args.in_args = (u8 *)&q_stats; | |
940 | args.in_args_size = sizeof(q_stats); | |
941 | args.out_buffer = vf->aq_resp; | |
942 | args.out_size = I40E_AQ_BUF_SZ; | |
943 | ||
944 | err = i40evf_execute_vf_cmd(dev, &args); | |
945 | if (err) { | |
946 | PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS"); | |
947 | *pstats = NULL; | |
948 | return err; | |
949 | } | |
950 | *pstats = (struct i40e_eth_stats *)args.out_buffer; | |
951 | return 0; | |
952 | } | |
953 | ||
954 | static int | |
955 | i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats) | |
956 | { | |
957 | int ret; | |
958 | struct i40e_eth_stats *pstats = NULL; | |
959 | ||
960 | ret = i40evf_update_stats(dev, &pstats); | |
961 | if (ret != 0) | |
962 | return 0; | |
963 | ||
964 | stats->ipackets = pstats->rx_unicast + pstats->rx_multicast + | |
965 | pstats->rx_broadcast; | |
966 | stats->opackets = pstats->tx_broadcast + pstats->tx_multicast + | |
967 | pstats->tx_unicast; | |
968 | stats->ierrors = pstats->rx_discards; | |
969 | stats->oerrors = pstats->tx_errors + pstats->tx_discards; | |
970 | stats->ibytes = pstats->rx_bytes; | |
971 | stats->obytes = pstats->tx_bytes; | |
972 | ||
973 | return 0; | |
974 | } | |
975 | ||
976 | static void | |
977 | i40evf_dev_xstats_reset(struct rte_eth_dev *dev) | |
978 | { | |
979 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
980 | struct i40e_eth_stats *pstats = NULL; | |
981 | ||
982 | /* read stat values to clear hardware registers */ | |
983 | i40evf_update_stats(dev, &pstats); | |
984 | ||
985 | /* set stats offset base on current values */ | |
986 | vf->vsi.eth_stats_offset = vf->vsi.eth_stats; | |
987 | } | |
988 | ||
989 | static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, | |
990 | struct rte_eth_xstat_name *xstats_names, | |
991 | __rte_unused unsigned limit) | |
992 | { | |
993 | unsigned i; | |
994 | ||
995 | if (xstats_names != NULL) | |
996 | for (i = 0; i < I40EVF_NB_XSTATS; i++) { | |
997 | snprintf(xstats_names[i].name, | |
998 | sizeof(xstats_names[i].name), | |
999 | "%s", rte_i40evf_stats_strings[i].name); | |
1000 | } | |
1001 | return I40EVF_NB_XSTATS; | |
1002 | } | |
1003 | ||
1004 | static int i40evf_dev_xstats_get(struct rte_eth_dev *dev, | |
1005 | struct rte_eth_xstat *xstats, unsigned n) | |
1006 | { | |
1007 | int ret; | |
1008 | unsigned i; | |
1009 | struct i40e_eth_stats *pstats = NULL; | |
1010 | ||
1011 | if (n < I40EVF_NB_XSTATS) | |
1012 | return I40EVF_NB_XSTATS; | |
1013 | ||
1014 | ret = i40evf_update_stats(dev, &pstats); | |
1015 | if (ret != 0) | |
1016 | return 0; | |
1017 | ||
1018 | if (!xstats) | |
1019 | return 0; | |
1020 | ||
1021 | /* loop over xstats array and values from pstats */ | |
1022 | for (i = 0; i < I40EVF_NB_XSTATS; i++) { | |
1023 | xstats[i].id = i; | |
1024 | xstats[i].value = *(uint64_t *)(((char *)pstats) + | |
1025 | rte_i40evf_stats_strings[i].offset); | |
1026 | } | |
1027 | ||
1028 | return I40EVF_NB_XSTATS; | |
1029 | } | |
1030 | ||
1031 | static int | |
1032 | i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid) | |
1033 | { | |
1034 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
1035 | struct i40e_virtchnl_vlan_filter_list *vlan_list; | |
1036 | uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) + | |
1037 | sizeof(uint16_t)]; | |
1038 | int err; | |
1039 | struct vf_cmd_info args; | |
1040 | ||
1041 | vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer; | |
1042 | vlan_list->vsi_id = vf->vsi_res->vsi_id; | |
1043 | vlan_list->num_elements = 1; | |
1044 | vlan_list->vlan_id[0] = vlanid; | |
1045 | ||
1046 | args.ops = I40E_VIRTCHNL_OP_ADD_VLAN; | |
1047 | args.in_args = (u8 *)&cmd_buffer; | |
1048 | args.in_args_size = sizeof(cmd_buffer); | |
1049 | args.out_buffer = vf->aq_resp; | |
1050 | args.out_size = I40E_AQ_BUF_SZ; | |
1051 | err = i40evf_execute_vf_cmd(dev, &args); | |
1052 | if (err) | |
1053 | PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_VLAN"); | |
1054 | ||
1055 | return err; | |
1056 | } | |
1057 | ||
1058 | static int | |
1059 | i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid) | |
1060 | { | |
1061 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
1062 | struct i40e_virtchnl_vlan_filter_list *vlan_list; | |
1063 | uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) + | |
1064 | sizeof(uint16_t)]; | |
1065 | int err; | |
1066 | struct vf_cmd_info args; | |
1067 | ||
1068 | vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer; | |
1069 | vlan_list->vsi_id = vf->vsi_res->vsi_id; | |
1070 | vlan_list->num_elements = 1; | |
1071 | vlan_list->vlan_id[0] = vlanid; | |
1072 | ||
1073 | args.ops = I40E_VIRTCHNL_OP_DEL_VLAN; | |
1074 | args.in_args = (u8 *)&cmd_buffer; | |
1075 | args.in_args_size = sizeof(cmd_buffer); | |
1076 | args.out_buffer = vf->aq_resp; | |
1077 | args.out_size = I40E_AQ_BUF_SZ; | |
1078 | err = i40evf_execute_vf_cmd(dev, &args); | |
1079 | if (err) | |
1080 | PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_VLAN"); | |
1081 | ||
1082 | return err; | |
1083 | } | |
1084 | ||
1085 | static const struct rte_pci_id pci_id_i40evf_map[] = { | |
1086 | { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF) }, | |
1087 | { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV) }, | |
1088 | { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF) }, | |
1089 | { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF) }, | |
1090 | { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF_HV) }, | |
1091 | { .vendor_id = 0, /* sentinel */ }, | |
1092 | }; | |
1093 | ||
1094 | static inline int | |
1095 | i40evf_dev_atomic_write_link_status(struct rte_eth_dev *dev, | |
1096 | struct rte_eth_link *link) | |
1097 | { | |
1098 | struct rte_eth_link *dst = &(dev->data->dev_link); | |
1099 | struct rte_eth_link *src = link; | |
1100 | ||
1101 | if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, | |
1102 | *(uint64_t *)src) == 0) | |
1103 | return -1; | |
1104 | ||
1105 | return 0; | |
1106 | } | |
1107 | ||
1108 | /* Disable IRQ0 */ | |
1109 | static inline void | |
1110 | i40evf_disable_irq0(struct i40e_hw *hw) | |
1111 | { | |
1112 | /* Disable all interrupt types */ | |
1113 | I40E_WRITE_REG(hw, I40E_VFINT_ICR0_ENA1, 0); | |
1114 | I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, | |
1115 | I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); | |
1116 | I40EVF_WRITE_FLUSH(hw); | |
1117 | } | |
1118 | ||
1119 | /* Enable IRQ0 */ | |
1120 | static inline void | |
1121 | i40evf_enable_irq0(struct i40e_hw *hw) | |
1122 | { | |
1123 | /* Enable admin queue interrupt trigger */ | |
1124 | uint32_t val; | |
1125 | ||
1126 | i40evf_disable_irq0(hw); | |
1127 | val = I40E_READ_REG(hw, I40E_VFINT_ICR0_ENA1); | |
1128 | val |= I40E_VFINT_ICR0_ENA1_ADMINQ_MASK | | |
1129 | I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK; | |
1130 | I40E_WRITE_REG(hw, I40E_VFINT_ICR0_ENA1, val); | |
1131 | ||
1132 | I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, | |
1133 | I40E_VFINT_DYN_CTL01_INTENA_MASK | | |
1134 | I40E_VFINT_DYN_CTL01_CLEARPBA_MASK | | |
1135 | I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); | |
1136 | ||
1137 | I40EVF_WRITE_FLUSH(hw); | |
1138 | } | |
1139 | ||
1140 | static int | |
1141 | i40evf_reset_vf(struct i40e_hw *hw) | |
1142 | { | |
1143 | int i, reset; | |
1144 | ||
1145 | if (i40e_vf_reset(hw) != I40E_SUCCESS) { | |
1146 | PMD_INIT_LOG(ERR, "Reset VF NIC failed"); | |
1147 | return -1; | |
1148 | } | |
1149 | /** | |
1150 | * After issuing vf reset command to pf, pf won't necessarily | |
1151 | * reset vf, it depends on what state it exactly is. If it's not | |
1152 | * initialized yet, it won't have vf reset since it's in a certain | |
1153 | * state. If not, it will try to reset. Even vf is reset, pf will | |
1154 | * set I40E_VFGEN_RSTAT to COMPLETE first, then wait 10ms and set | |
1155 | * it to ACTIVE. In this duration, vf may not catch the moment that | |
1156 | * COMPLETE is set. So, for vf, we'll try to wait a long time. | |
1157 | */ | |
1158 | rte_delay_ms(200); | |
1159 | ||
1160 | for (i = 0; i < MAX_RESET_WAIT_CNT; i++) { | |
1161 | reset = rd32(hw, I40E_VFGEN_RSTAT) & | |
1162 | I40E_VFGEN_RSTAT_VFR_STATE_MASK; | |
1163 | reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT; | |
1164 | if (I40E_VFR_COMPLETED == reset || I40E_VFR_VFACTIVE == reset) | |
1165 | break; | |
1166 | else | |
1167 | rte_delay_ms(50); | |
1168 | } | |
1169 | ||
1170 | if (i >= MAX_RESET_WAIT_CNT) { | |
1171 | PMD_INIT_LOG(ERR, "Reset VF NIC failed"); | |
1172 | return -1; | |
1173 | } | |
1174 | ||
1175 | return 0; | |
1176 | } | |
1177 | ||
1178 | static int | |
1179 | i40evf_init_vf(struct rte_eth_dev *dev) | |
1180 | { | |
1181 | int i, err, bufsz; | |
1182 | struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1183 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
1184 | struct ether_addr *p_mac_addr; | |
1185 | uint16_t interval = | |
1186 | i40e_calc_itr_interval(I40E_QUEUE_ITR_INTERVAL_MAX); | |
1187 | ||
1188 | vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
1189 | vf->dev_data = dev->data; | |
1190 | err = i40e_set_mac_type(hw); | |
1191 | if (err) { | |
1192 | PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err); | |
1193 | goto err; | |
1194 | } | |
1195 | ||
1196 | i40e_init_adminq_parameter(hw); | |
1197 | err = i40e_init_adminq(hw); | |
1198 | if (err) { | |
1199 | PMD_INIT_LOG(ERR, "init_adminq failed: %d", err); | |
1200 | goto err; | |
1201 | } | |
1202 | ||
1203 | /* Reset VF and wait until it's complete */ | |
1204 | if (i40evf_reset_vf(hw)) { | |
1205 | PMD_INIT_LOG(ERR, "reset NIC failed"); | |
1206 | goto err_aq; | |
1207 | } | |
1208 | ||
1209 | /* VF reset, shutdown admin queue and initialize again */ | |
1210 | if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) { | |
1211 | PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed"); | |
1212 | return -1; | |
1213 | } | |
1214 | ||
1215 | i40e_init_adminq_parameter(hw); | |
1216 | if (i40e_init_adminq(hw) != I40E_SUCCESS) { | |
1217 | PMD_INIT_LOG(ERR, "init_adminq failed"); | |
1218 | return -1; | |
1219 | } | |
1220 | vf->aq_resp = rte_zmalloc("vf_aq_resp", I40E_AQ_BUF_SZ, 0); | |
1221 | if (!vf->aq_resp) { | |
1222 | PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory"); | |
1223 | goto err_aq; | |
1224 | } | |
1225 | if (i40evf_check_api_version(dev) != 0) { | |
1226 | PMD_INIT_LOG(ERR, "check_api version failed"); | |
1227 | goto err_aq; | |
1228 | } | |
1229 | bufsz = sizeof(struct i40e_virtchnl_vf_resource) + | |
1230 | (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource)); | |
1231 | vf->vf_res = rte_zmalloc("vf_res", bufsz, 0); | |
1232 | if (!vf->vf_res) { | |
1233 | PMD_INIT_LOG(ERR, "unable to allocate vf_res memory"); | |
1234 | goto err_aq; | |
1235 | } | |
1236 | ||
1237 | if (i40evf_get_vf_resource(dev) != 0) { | |
1238 | PMD_INIT_LOG(ERR, "i40evf_get_vf_config failed"); | |
1239 | goto err_alloc; | |
1240 | } | |
1241 | ||
1242 | /* got VF config message back from PF, now we can parse it */ | |
1243 | for (i = 0; i < vf->vf_res->num_vsis; i++) { | |
1244 | if (vf->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV) | |
1245 | vf->vsi_res = &vf->vf_res->vsi_res[i]; | |
1246 | } | |
1247 | ||
1248 | if (!vf->vsi_res) { | |
1249 | PMD_INIT_LOG(ERR, "no LAN VSI found"); | |
1250 | goto err_alloc; | |
1251 | } | |
1252 | ||
1253 | if (hw->mac.type == I40E_MAC_X722_VF) | |
1254 | vf->flags = I40E_FLAG_RSS_AQ_CAPABLE; | |
1255 | vf->vsi.vsi_id = vf->vsi_res->vsi_id; | |
1256 | vf->vsi.type = vf->vsi_res->vsi_type; | |
1257 | vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs; | |
1258 | vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
1259 | ||
1260 | /* Store the MAC address configured by host, or generate random one */ | |
1261 | p_mac_addr = (struct ether_addr *)(vf->vsi_res->default_mac_addr); | |
1262 | if (is_valid_assigned_ether_addr(p_mac_addr)) /* Configured by host */ | |
1263 | ether_addr_copy(p_mac_addr, (struct ether_addr *)hw->mac.addr); | |
1264 | else | |
1265 | eth_random_addr(hw->mac.addr); /* Generate a random one */ | |
1266 | ||
1267 | /* If the PF host is not DPDK, set the interval of ITR0 to max*/ | |
1268 | if (vf->version_major != I40E_DPDK_VERSION_MAJOR) { | |
1269 | I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, | |
1270 | (I40E_ITR_INDEX_DEFAULT << | |
1271 | I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) | | |
1272 | (interval << | |
1273 | I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)); | |
1274 | I40EVF_WRITE_FLUSH(hw); | |
1275 | } | |
1276 | ||
1277 | return 0; | |
1278 | ||
1279 | err_alloc: | |
1280 | rte_free(vf->vf_res); | |
1281 | err_aq: | |
1282 | i40e_shutdown_adminq(hw); /* ignore error */ | |
1283 | err: | |
1284 | return -1; | |
1285 | } | |
1286 | ||
1287 | static int | |
1288 | i40evf_uninit_vf(struct rte_eth_dev *dev) | |
1289 | { | |
1290 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
1291 | struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1292 | ||
1293 | PMD_INIT_FUNC_TRACE(); | |
1294 | ||
1295 | if (hw->adapter_stopped == 0) | |
1296 | i40evf_dev_close(dev); | |
1297 | rte_free(vf->vf_res); | |
1298 | vf->vf_res = NULL; | |
1299 | rte_free(vf->aq_resp); | |
1300 | vf->aq_resp = NULL; | |
1301 | ||
1302 | return 0; | |
1303 | } | |
1304 | ||
1305 | static void | |
1306 | i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev, | |
1307 | uint8_t *msg, | |
1308 | __rte_unused uint16_t msglen) | |
1309 | { | |
1310 | struct i40e_virtchnl_pf_event *pf_msg = | |
1311 | (struct i40e_virtchnl_pf_event *)msg; | |
1312 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
1313 | ||
1314 | switch (pf_msg->event) { | |
1315 | case I40E_VIRTCHNL_EVENT_RESET_IMPENDING: | |
1316 | PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event\n"); | |
1317 | _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL); | |
1318 | break; | |
1319 | case I40E_VIRTCHNL_EVENT_LINK_CHANGE: | |
1320 | PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event\n"); | |
1321 | vf->link_up = pf_msg->event_data.link_event.link_status; | |
1322 | vf->link_speed = pf_msg->event_data.link_event.link_speed; | |
1323 | break; | |
1324 | case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE: | |
1325 | PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event\n"); | |
1326 | break; | |
1327 | default: | |
1328 | PMD_DRV_LOG(ERR, " unknown event received %u", pf_msg->event); | |
1329 | break; | |
1330 | } | |
1331 | } | |
1332 | ||
1333 | static void | |
1334 | i40evf_handle_aq_msg(struct rte_eth_dev *dev) | |
1335 | { | |
1336 | struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1337 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
1338 | struct i40e_arq_event_info info; | |
1339 | struct i40e_virtchnl_msg *v_msg; | |
1340 | uint16_t pending, opcode; | |
1341 | int ret; | |
1342 | ||
1343 | info.buf_len = I40E_AQ_BUF_SZ; | |
1344 | if (!vf->aq_resp) { | |
1345 | PMD_DRV_LOG(ERR, "Buffer for adminq resp should not be NULL"); | |
1346 | return; | |
1347 | } | |
1348 | info.msg_buf = vf->aq_resp; | |
1349 | v_msg = (struct i40e_virtchnl_msg *)&info.desc; | |
1350 | ||
1351 | pending = 1; | |
1352 | while (pending) { | |
1353 | ret = i40e_clean_arq_element(hw, &info, &pending); | |
1354 | ||
1355 | if (ret != I40E_SUCCESS) { | |
1356 | PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ," | |
1357 | "ret: %d", ret); | |
1358 | break; | |
1359 | } | |
1360 | opcode = rte_le_to_cpu_16(info.desc.opcode); | |
1361 | ||
1362 | switch (opcode) { | |
1363 | case i40e_aqc_opc_send_msg_to_vf: | |
1364 | if (v_msg->v_opcode == I40E_VIRTCHNL_OP_EVENT) | |
1365 | /* process event*/ | |
1366 | i40evf_handle_pf_event(dev, info.msg_buf, | |
1367 | info.msg_len); | |
1368 | else { | |
1369 | /* read message and it's expected one */ | |
1370 | if (v_msg->v_opcode == vf->pend_cmd) { | |
1371 | vf->cmd_retval = v_msg->v_retval; | |
1372 | /* prevent compiler reordering */ | |
1373 | rte_compiler_barrier(); | |
1374 | _clear_cmd(vf); | |
1375 | } else | |
1376 | PMD_DRV_LOG(ERR, "command mismatch," | |
1377 | "expect %u, get %u", | |
1378 | vf->pend_cmd, v_msg->v_opcode); | |
1379 | PMD_DRV_LOG(DEBUG, "adminq response is received," | |
1380 | " opcode = %d\n", v_msg->v_opcode); | |
1381 | } | |
1382 | break; | |
1383 | default: | |
1384 | PMD_DRV_LOG(ERR, "Request %u is not supported yet", | |
1385 | opcode); | |
1386 | break; | |
1387 | } | |
1388 | } | |
1389 | } | |
1390 | ||
1391 | /** | |
1392 | * Interrupt handler triggered by NIC for handling | |
1393 | * specific interrupt. Only adminq interrupt is processed in VF. | |
1394 | * | |
1395 | * @param handle | |
1396 | * Pointer to interrupt handle. | |
1397 | * @param param | |
1398 | * The address of parameter (struct rte_eth_dev *) regsitered before. | |
1399 | * | |
1400 | * @return | |
1401 | * void | |
1402 | */ | |
1403 | static void | |
1404 | i40evf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, | |
1405 | void *param) | |
1406 | { | |
1407 | struct rte_eth_dev *dev = (struct rte_eth_dev *)param; | |
1408 | struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1409 | uint32_t icr0; | |
1410 | ||
1411 | i40evf_disable_irq0(hw); | |
1412 | ||
1413 | /* read out interrupt causes */ | |
1414 | icr0 = I40E_READ_REG(hw, I40E_VFINT_ICR01); | |
1415 | ||
1416 | /* No interrupt event indicated */ | |
1417 | if (!(icr0 & I40E_VFINT_ICR01_INTEVENT_MASK)) { | |
1418 | PMD_DRV_LOG(DEBUG, "No interrupt event, nothing to do\n"); | |
1419 | goto done; | |
1420 | } | |
1421 | ||
1422 | if (icr0 & I40E_VFINT_ICR01_ADMINQ_MASK) { | |
1423 | PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported\n"); | |
1424 | i40evf_handle_aq_msg(dev); | |
1425 | } | |
1426 | ||
1427 | /* Link Status Change interrupt */ | |
1428 | if (icr0 & I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK) | |
1429 | PMD_DRV_LOG(DEBUG, "LINK_STAT_CHANGE is reported," | |
1430 | " do nothing\n"); | |
1431 | ||
1432 | done: | |
1433 | i40evf_enable_irq0(hw); | |
1434 | rte_intr_enable(&dev->pci_dev->intr_handle); | |
1435 | } | |
1436 | ||
1437 | static int | |
1438 | i40evf_dev_init(struct rte_eth_dev *eth_dev) | |
1439 | { | |
1440 | struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(\ | |
1441 | eth_dev->data->dev_private); | |
1442 | struct rte_pci_device *pci_dev = eth_dev->pci_dev; | |
1443 | ||
1444 | PMD_INIT_FUNC_TRACE(); | |
1445 | ||
1446 | /* assign ops func pointer */ | |
1447 | eth_dev->dev_ops = &i40evf_eth_dev_ops; | |
1448 | eth_dev->rx_pkt_burst = &i40e_recv_pkts; | |
1449 | eth_dev->tx_pkt_burst = &i40e_xmit_pkts; | |
1450 | ||
1451 | /* | |
1452 | * For secondary processes, we don't initialise any further as primary | |
1453 | * has already done this work. | |
1454 | */ | |
1455 | if (rte_eal_process_type() != RTE_PROC_PRIMARY){ | |
1456 | i40e_set_rx_function(eth_dev); | |
1457 | i40e_set_tx_function(eth_dev); | |
1458 | return 0; | |
1459 | } | |
1460 | ||
1461 | rte_eth_copy_pci_info(eth_dev, eth_dev->pci_dev); | |
1462 | ||
1463 | hw->vendor_id = eth_dev->pci_dev->id.vendor_id; | |
1464 | hw->device_id = eth_dev->pci_dev->id.device_id; | |
1465 | hw->subsystem_vendor_id = eth_dev->pci_dev->id.subsystem_vendor_id; | |
1466 | hw->subsystem_device_id = eth_dev->pci_dev->id.subsystem_device_id; | |
1467 | hw->bus.device = eth_dev->pci_dev->addr.devid; | |
1468 | hw->bus.func = eth_dev->pci_dev->addr.function; | |
1469 | hw->hw_addr = (void *)eth_dev->pci_dev->mem_resource[0].addr; | |
1470 | hw->adapter_stopped = 0; | |
1471 | ||
1472 | if(i40evf_init_vf(eth_dev) != 0) { | |
1473 | PMD_INIT_LOG(ERR, "Init vf failed"); | |
1474 | return -1; | |
1475 | } | |
1476 | ||
1477 | /* register callback func to eal lib */ | |
1478 | rte_intr_callback_register(&pci_dev->intr_handle, | |
1479 | i40evf_dev_interrupt_handler, (void *)eth_dev); | |
1480 | ||
1481 | /* enable uio intr after callback register */ | |
1482 | rte_intr_enable(&pci_dev->intr_handle); | |
1483 | ||
1484 | /* configure and enable device interrupt */ | |
1485 | i40evf_enable_irq0(hw); | |
1486 | ||
1487 | /* copy mac addr */ | |
1488 | eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac", | |
1489 | ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX, | |
1490 | 0); | |
1491 | if (eth_dev->data->mac_addrs == NULL) { | |
1492 | PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to" | |
1493 | " store MAC addresses", | |
1494 | ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX); | |
1495 | return -ENOMEM; | |
1496 | } | |
1497 | ether_addr_copy((struct ether_addr *)hw->mac.addr, | |
1498 | ð_dev->data->mac_addrs[0]); | |
1499 | ||
1500 | return 0; | |
1501 | } | |
1502 | ||
1503 | static int | |
1504 | i40evf_dev_uninit(struct rte_eth_dev *eth_dev) | |
1505 | { | |
1506 | PMD_INIT_FUNC_TRACE(); | |
1507 | ||
1508 | if (rte_eal_process_type() != RTE_PROC_PRIMARY) | |
1509 | return -EPERM; | |
1510 | ||
1511 | eth_dev->dev_ops = NULL; | |
1512 | eth_dev->rx_pkt_burst = NULL; | |
1513 | eth_dev->tx_pkt_burst = NULL; | |
1514 | ||
1515 | if (i40evf_uninit_vf(eth_dev) != 0) { | |
1516 | PMD_INIT_LOG(ERR, "i40evf_uninit_vf failed"); | |
1517 | return -1; | |
1518 | } | |
1519 | ||
1520 | rte_free(eth_dev->data->mac_addrs); | |
1521 | eth_dev->data->mac_addrs = NULL; | |
1522 | ||
1523 | return 0; | |
1524 | } | |
1525 | /* | |
1526 | * virtual function driver struct | |
1527 | */ | |
1528 | static struct eth_driver rte_i40evf_pmd = { | |
1529 | .pci_drv = { | |
1530 | .id_table = pci_id_i40evf_map, | |
1531 | .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE, | |
1532 | .probe = rte_eth_dev_pci_probe, | |
1533 | .remove = rte_eth_dev_pci_remove, | |
1534 | }, | |
1535 | .eth_dev_init = i40evf_dev_init, | |
1536 | .eth_dev_uninit = i40evf_dev_uninit, | |
1537 | .dev_private_size = sizeof(struct i40e_adapter), | |
1538 | }; | |
1539 | ||
1540 | RTE_PMD_REGISTER_PCI(net_i40e_vf, rte_i40evf_pmd.pci_drv); | |
1541 | RTE_PMD_REGISTER_PCI_TABLE(net_i40e_vf, pci_id_i40evf_map); | |
1542 | ||
1543 | static int | |
1544 | i40evf_dev_configure(struct rte_eth_dev *dev) | |
1545 | { | |
1546 | struct i40e_adapter *ad = | |
1547 | I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
1548 | struct rte_eth_conf *conf = &dev->data->dev_conf; | |
1549 | struct i40e_vf *vf; | |
1550 | ||
1551 | /* Initialize to TRUE. If any of Rx queues doesn't meet the bulk | |
1552 | * allocation or vector Rx preconditions we will reset it. | |
1553 | */ | |
1554 | ad->rx_bulk_alloc_allowed = true; | |
1555 | ad->rx_vec_allowed = true; | |
1556 | ad->tx_simple_allowed = true; | |
1557 | ad->tx_vec_allowed = true; | |
1558 | ||
1559 | /* For non-DPDK PF drivers, VF has no ability to disable HW | |
1560 | * CRC strip, and is implicitly enabled by the PF. | |
1561 | */ | |
1562 | if (!conf->rxmode.hw_strip_crc) { | |
1563 | vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
1564 | if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) && | |
1565 | (vf->version_minor <= I40E_VIRTCHNL_VERSION_MINOR)) { | |
1566 | /* Peer is running non-DPDK PF driver. */ | |
1567 | PMD_INIT_LOG(ERR, "VF can't disable HW CRC Strip"); | |
1568 | return -EINVAL; | |
1569 | } | |
1570 | } | |
1571 | ||
1572 | return i40evf_init_vlan(dev); | |
1573 | } | |
1574 | ||
1575 | static int | |
1576 | i40evf_init_vlan(struct rte_eth_dev *dev) | |
1577 | { | |
1578 | struct rte_eth_dev_data *data = dev->data; | |
1579 | int ret; | |
1580 | ||
1581 | /* Apply vlan offload setting */ | |
1582 | i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK); | |
1583 | ||
1584 | /* Apply pvid setting */ | |
1585 | ret = i40evf_vlan_pvid_set(dev, data->dev_conf.txmode.pvid, | |
1586 | data->dev_conf.txmode.hw_vlan_insert_pvid); | |
1587 | return ret; | |
1588 | } | |
1589 | ||
1590 | static void | |
1591 | i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask) | |
1592 | { | |
1593 | bool enable_vlan_strip = 0; | |
1594 | struct rte_eth_conf *dev_conf = &dev->data->dev_conf; | |
1595 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
1596 | ||
1597 | /* Linux pf host doesn't support vlan offload yet */ | |
1598 | if (vf->version_major == I40E_DPDK_VERSION_MAJOR) { | |
1599 | /* Vlan stripping setting */ | |
1600 | if (mask & ETH_VLAN_STRIP_MASK) { | |
1601 | /* Enable or disable VLAN stripping */ | |
1602 | if (dev_conf->rxmode.hw_vlan_strip) | |
1603 | enable_vlan_strip = 1; | |
1604 | else | |
1605 | enable_vlan_strip = 0; | |
1606 | ||
1607 | i40evf_config_vlan_offload(dev, enable_vlan_strip); | |
1608 | } | |
1609 | } | |
1610 | } | |
1611 | ||
1612 | static int | |
1613 | i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on) | |
1614 | { | |
1615 | struct rte_eth_conf *dev_conf = &dev->data->dev_conf; | |
1616 | struct i40e_vsi_vlan_pvid_info info; | |
1617 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
1618 | ||
1619 | memset(&info, 0, sizeof(info)); | |
1620 | info.on = on; | |
1621 | ||
1622 | /* Linux pf host don't support vlan offload yet */ | |
1623 | if (vf->version_major == I40E_DPDK_VERSION_MAJOR) { | |
1624 | if (info.on) | |
1625 | info.config.pvid = pvid; | |
1626 | else { | |
1627 | info.config.reject.tagged = | |
1628 | dev_conf->txmode.hw_vlan_reject_tagged; | |
1629 | info.config.reject.untagged = | |
1630 | dev_conf->txmode.hw_vlan_reject_untagged; | |
1631 | } | |
1632 | return i40evf_config_vlan_pvid(dev, &info); | |
1633 | } | |
1634 | ||
1635 | return 0; | |
1636 | } | |
1637 | ||
1638 | static int | |
1639 | i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) | |
1640 | { | |
1641 | struct i40e_rx_queue *rxq; | |
1642 | int err = 0; | |
1643 | struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1644 | ||
1645 | PMD_INIT_FUNC_TRACE(); | |
1646 | ||
1647 | if (rx_queue_id < dev->data->nb_rx_queues) { | |
1648 | rxq = dev->data->rx_queues[rx_queue_id]; | |
1649 | ||
1650 | err = i40e_alloc_rx_queue_mbufs(rxq); | |
1651 | if (err) { | |
1652 | PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf"); | |
1653 | return err; | |
1654 | } | |
1655 | ||
1656 | rte_wmb(); | |
1657 | ||
1658 | /* Init the RX tail register. */ | |
1659 | I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); | |
1660 | I40EVF_WRITE_FLUSH(hw); | |
1661 | ||
1662 | /* Ready to switch the queue on */ | |
1663 | err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE); | |
1664 | ||
1665 | if (err) | |
1666 | PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", | |
1667 | rx_queue_id); | |
1668 | else | |
1669 | dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; | |
1670 | } | |
1671 | ||
1672 | return err; | |
1673 | } | |
1674 | ||
1675 | static int | |
1676 | i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) | |
1677 | { | |
1678 | struct i40e_rx_queue *rxq; | |
1679 | int err; | |
1680 | ||
1681 | if (rx_queue_id < dev->data->nb_rx_queues) { | |
1682 | rxq = dev->data->rx_queues[rx_queue_id]; | |
1683 | ||
1684 | err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE); | |
1685 | ||
1686 | if (err) { | |
1687 | PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", | |
1688 | rx_queue_id); | |
1689 | return err; | |
1690 | } | |
1691 | ||
1692 | i40e_rx_queue_release_mbufs(rxq); | |
1693 | i40e_reset_rx_queue(rxq); | |
1694 | dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; | |
1695 | } | |
1696 | ||
1697 | return 0; | |
1698 | } | |
1699 | ||
1700 | static int | |
1701 | i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) | |
1702 | { | |
1703 | int err = 0; | |
1704 | ||
1705 | PMD_INIT_FUNC_TRACE(); | |
1706 | ||
1707 | if (tx_queue_id < dev->data->nb_tx_queues) { | |
1708 | ||
1709 | /* Ready to switch the queue on */ | |
1710 | err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE); | |
1711 | ||
1712 | if (err) | |
1713 | PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on", | |
1714 | tx_queue_id); | |
1715 | else | |
1716 | dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; | |
1717 | } | |
1718 | ||
1719 | return err; | |
1720 | } | |
1721 | ||
1722 | static int | |
1723 | i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) | |
1724 | { | |
1725 | struct i40e_tx_queue *txq; | |
1726 | int err; | |
1727 | ||
1728 | if (tx_queue_id < dev->data->nb_tx_queues) { | |
1729 | txq = dev->data->tx_queues[tx_queue_id]; | |
1730 | ||
1731 | err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE); | |
1732 | ||
1733 | if (err) { | |
1734 | PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off", | |
1735 | tx_queue_id); | |
1736 | return err; | |
1737 | } | |
1738 | ||
1739 | i40e_tx_queue_release_mbufs(txq); | |
1740 | i40e_reset_tx_queue(txq); | |
1741 | dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; | |
1742 | } | |
1743 | ||
1744 | return 0; | |
1745 | } | |
1746 | ||
1747 | static int | |
1748 | i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) | |
1749 | { | |
1750 | int ret; | |
1751 | ||
1752 | if (on) | |
1753 | ret = i40evf_add_vlan(dev, vlan_id); | |
1754 | else | |
1755 | ret = i40evf_del_vlan(dev,vlan_id); | |
1756 | ||
1757 | return ret; | |
1758 | } | |
1759 | ||
1760 | static int | |
1761 | i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq) | |
1762 | { | |
1763 | struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1764 | struct rte_eth_dev_data *dev_data = dev->data; | |
1765 | struct rte_pktmbuf_pool_private *mbp_priv; | |
1766 | uint16_t buf_size, len; | |
1767 | ||
1768 | rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL1(rxq->queue_id); | |
1769 | I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); | |
1770 | I40EVF_WRITE_FLUSH(hw); | |
1771 | ||
1772 | /* Calculate the maximum packet length allowed */ | |
1773 | mbp_priv = rte_mempool_get_priv(rxq->mp); | |
1774 | buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size - | |
1775 | RTE_PKTMBUF_HEADROOM); | |
1776 | rxq->hs_mode = i40e_header_split_none; | |
1777 | rxq->rx_hdr_len = 0; | |
1778 | rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); | |
1779 | len = rxq->rx_buf_len * I40E_MAX_CHAINED_RX_BUFFERS; | |
1780 | rxq->max_pkt_len = RTE_MIN(len, | |
1781 | dev_data->dev_conf.rxmode.max_rx_pkt_len); | |
1782 | ||
1783 | /** | |
1784 | * Check if the jumbo frame and maximum packet length are set correctly | |
1785 | */ | |
1786 | if (dev_data->dev_conf.rxmode.jumbo_frame == 1) { | |
1787 | if (rxq->max_pkt_len <= ETHER_MAX_LEN || | |
1788 | rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) { | |
1789 | PMD_DRV_LOG(ERR, "maximum packet length must be " | |
1790 | "larger than %u and smaller than %u, as jumbo " | |
1791 | "frame is enabled", (uint32_t)ETHER_MAX_LEN, | |
1792 | (uint32_t)I40E_FRAME_SIZE_MAX); | |
1793 | return I40E_ERR_CONFIG; | |
1794 | } | |
1795 | } else { | |
1796 | if (rxq->max_pkt_len < ETHER_MIN_LEN || | |
1797 | rxq->max_pkt_len > ETHER_MAX_LEN) { | |
1798 | PMD_DRV_LOG(ERR, "maximum packet length must be " | |
1799 | "larger than %u and smaller than %u, as jumbo " | |
1800 | "frame is disabled", (uint32_t)ETHER_MIN_LEN, | |
1801 | (uint32_t)ETHER_MAX_LEN); | |
1802 | return I40E_ERR_CONFIG; | |
1803 | } | |
1804 | } | |
1805 | ||
1806 | if (dev_data->dev_conf.rxmode.enable_scatter || | |
1807 | (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) { | |
1808 | dev_data->scattered_rx = 1; | |
1809 | } | |
1810 | ||
1811 | return 0; | |
1812 | } | |
1813 | ||
1814 | static int | |
1815 | i40evf_rx_init(struct rte_eth_dev *dev) | |
1816 | { | |
1817 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
1818 | uint16_t i; | |
1819 | int ret = I40E_SUCCESS; | |
1820 | struct i40e_rx_queue **rxq = | |
1821 | (struct i40e_rx_queue **)dev->data->rx_queues; | |
1822 | ||
1823 | i40evf_config_rss(vf); | |
1824 | for (i = 0; i < dev->data->nb_rx_queues; i++) { | |
1825 | if (!rxq[i] || !rxq[i]->q_set) | |
1826 | continue; | |
1827 | ret = i40evf_rxq_init(dev, rxq[i]); | |
1828 | if (ret != I40E_SUCCESS) | |
1829 | break; | |
1830 | } | |
1831 | if (ret == I40E_SUCCESS) | |
1832 | i40e_set_rx_function(dev); | |
1833 | ||
1834 | return ret; | |
1835 | } | |
1836 | ||
1837 | static void | |
1838 | i40evf_tx_init(struct rte_eth_dev *dev) | |
1839 | { | |
1840 | uint16_t i; | |
1841 | struct i40e_tx_queue **txq = | |
1842 | (struct i40e_tx_queue **)dev->data->tx_queues; | |
1843 | struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1844 | ||
1845 | for (i = 0; i < dev->data->nb_tx_queues; i++) | |
1846 | txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i); | |
1847 | ||
1848 | i40e_set_tx_function(dev); | |
1849 | } | |
1850 | ||
1851 | static inline void | |
1852 | i40evf_enable_queues_intr(struct rte_eth_dev *dev) | |
1853 | { | |
1854 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
1855 | struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1856 | struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; | |
1857 | ||
1858 | if (!rte_intr_allow_others(intr_handle)) { | |
1859 | I40E_WRITE_REG(hw, | |
1860 | I40E_VFINT_DYN_CTL01, | |
1861 | I40E_VFINT_DYN_CTL01_INTENA_MASK | | |
1862 | I40E_VFINT_DYN_CTL01_CLEARPBA_MASK | | |
1863 | I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); | |
1864 | I40EVF_WRITE_FLUSH(hw); | |
1865 | return; | |
1866 | } | |
1867 | ||
1868 | if (vf->version_major == I40E_DPDK_VERSION_MAJOR) | |
1869 | /* To support DPDK PF host */ | |
1870 | I40E_WRITE_REG(hw, | |
1871 | I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1), | |
1872 | I40E_VFINT_DYN_CTLN1_INTENA_MASK | | |
1873 | I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); | |
1874 | /* If host driver is kernel driver, do nothing. | |
1875 | * Interrupt 0 is used for rx packets, but don't set | |
1876 | * I40E_VFINT_DYN_CTL01, | |
1877 | * because it is already done in i40evf_enable_irq0. | |
1878 | */ | |
1879 | ||
1880 | I40EVF_WRITE_FLUSH(hw); | |
1881 | } | |
1882 | ||
1883 | static inline void | |
1884 | i40evf_disable_queues_intr(struct rte_eth_dev *dev) | |
1885 | { | |
1886 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
1887 | struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1888 | struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; | |
1889 | ||
1890 | if (!rte_intr_allow_others(intr_handle)) { | |
1891 | I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, | |
1892 | I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); | |
1893 | I40EVF_WRITE_FLUSH(hw); | |
1894 | return; | |
1895 | } | |
1896 | ||
1897 | if (vf->version_major == I40E_DPDK_VERSION_MAJOR) | |
1898 | I40E_WRITE_REG(hw, | |
1899 | I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR | |
1900 | - 1), | |
1901 | 0); | |
1902 | /* If host driver is kernel driver, do nothing. | |
1903 | * Interrupt 0 is used for rx packets, but don't zero | |
1904 | * I40E_VFINT_DYN_CTL01, | |
1905 | * because interrupt 0 is also used for adminq processing. | |
1906 | */ | |
1907 | ||
1908 | I40EVF_WRITE_FLUSH(hw); | |
1909 | } | |
1910 | ||
1911 | static int | |
1912 | i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) | |
1913 | { | |
1914 | struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; | |
1915 | struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1916 | uint16_t interval = | |
1917 | i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL); | |
1918 | uint16_t msix_intr; | |
1919 | ||
1920 | msix_intr = intr_handle->intr_vec[queue_id]; | |
1921 | if (msix_intr == I40E_MISC_VEC_ID) | |
1922 | I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, | |
1923 | I40E_VFINT_DYN_CTL01_INTENA_MASK | | |
1924 | I40E_VFINT_DYN_CTL01_CLEARPBA_MASK | | |
1925 | (0 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) | | |
1926 | (interval << | |
1927 | I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)); | |
1928 | else | |
1929 | I40E_WRITE_REG(hw, | |
1930 | I40E_VFINT_DYN_CTLN1(msix_intr - | |
1931 | I40E_RX_VEC_START), | |
1932 | I40E_VFINT_DYN_CTLN1_INTENA_MASK | | |
1933 | I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | | |
1934 | (0 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | | |
1935 | (interval << | |
1936 | I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)); | |
1937 | ||
1938 | I40EVF_WRITE_FLUSH(hw); | |
1939 | ||
1940 | rte_intr_enable(&dev->pci_dev->intr_handle); | |
1941 | ||
1942 | return 0; | |
1943 | } | |
1944 | ||
1945 | static int | |
1946 | i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) | |
1947 | { | |
1948 | struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; | |
1949 | struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1950 | uint16_t msix_intr; | |
1951 | ||
1952 | msix_intr = intr_handle->intr_vec[queue_id]; | |
1953 | if (msix_intr == I40E_MISC_VEC_ID) | |
1954 | I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0); | |
1955 | else | |
1956 | I40E_WRITE_REG(hw, | |
1957 | I40E_VFINT_DYN_CTLN1(msix_intr - | |
1958 | I40E_RX_VEC_START), | |
1959 | 0); | |
1960 | ||
1961 | I40EVF_WRITE_FLUSH(hw); | |
1962 | ||
1963 | return 0; | |
1964 | } | |
1965 | ||
1966 | static void | |
1967 | i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add) | |
1968 | { | |
1969 | struct i40e_virtchnl_ether_addr_list *list; | |
1970 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
1971 | int err, i, j; | |
1972 | int next_begin = 0; | |
1973 | int begin = 0; | |
1974 | uint32_t len; | |
1975 | struct ether_addr *addr; | |
1976 | struct vf_cmd_info args; | |
1977 | ||
1978 | do { | |
1979 | j = 0; | |
1980 | len = sizeof(struct i40e_virtchnl_ether_addr_list); | |
1981 | for (i = begin; i < I40E_NUM_MACADDR_MAX; i++, next_begin++) { | |
1982 | if (is_zero_ether_addr(&dev->data->mac_addrs[i])) | |
1983 | continue; | |
1984 | len += sizeof(struct i40e_virtchnl_ether_addr); | |
1985 | if (len >= I40E_AQ_BUF_SZ) { | |
1986 | next_begin = i + 1; | |
1987 | break; | |
1988 | } | |
1989 | } | |
1990 | ||
1991 | list = rte_zmalloc("i40evf_del_mac_buffer", len, 0); | |
1992 | ||
1993 | for (i = begin; i < next_begin; i++) { | |
1994 | addr = &dev->data->mac_addrs[i]; | |
1995 | if (is_zero_ether_addr(addr)) | |
1996 | continue; | |
1997 | (void)rte_memcpy(list->list[j].addr, addr->addr_bytes, | |
1998 | sizeof(addr->addr_bytes)); | |
1999 | PMD_DRV_LOG(DEBUG, "add/rm mac:%x:%x:%x:%x:%x:%x", | |
2000 | addr->addr_bytes[0], addr->addr_bytes[1], | |
2001 | addr->addr_bytes[2], addr->addr_bytes[3], | |
2002 | addr->addr_bytes[4], addr->addr_bytes[5]); | |
2003 | j++; | |
2004 | } | |
2005 | list->vsi_id = vf->vsi_res->vsi_id; | |
2006 | list->num_elements = j; | |
2007 | args.ops = add ? I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS : | |
2008 | I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS; | |
2009 | args.in_args = (uint8_t *)list; | |
2010 | args.in_args_size = len; | |
2011 | args.out_buffer = vf->aq_resp; | |
2012 | args.out_size = I40E_AQ_BUF_SZ; | |
2013 | err = i40evf_execute_vf_cmd(dev, &args); | |
2014 | if (err) | |
2015 | PMD_DRV_LOG(ERR, "fail to execute command %s", | |
2016 | add ? "OP_ADD_ETHER_ADDRESS" : | |
2017 | "OP_DEL_ETHER_ADDRESS"); | |
2018 | rte_free(list); | |
2019 | begin = next_begin; | |
2020 | } while (begin < I40E_NUM_MACADDR_MAX); | |
2021 | } | |
2022 | ||
2023 | static int | |
2024 | i40evf_dev_start(struct rte_eth_dev *dev) | |
2025 | { | |
2026 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
2027 | struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
2028 | struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; | |
2029 | uint32_t intr_vector = 0; | |
2030 | ||
2031 | PMD_INIT_FUNC_TRACE(); | |
2032 | ||
2033 | hw->adapter_stopped = 0; | |
2034 | ||
2035 | vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; | |
2036 | vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues, | |
2037 | dev->data->nb_tx_queues); | |
2038 | ||
2039 | /* check and configure queue intr-vector mapping */ | |
2040 | if (dev->data->dev_conf.intr_conf.rxq != 0) { | |
2041 | intr_vector = dev->data->nb_rx_queues; | |
2042 | if (rte_intr_efd_enable(intr_handle, intr_vector)) | |
2043 | return -1; | |
2044 | } | |
2045 | ||
2046 | if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { | |
2047 | intr_handle->intr_vec = | |
2048 | rte_zmalloc("intr_vec", | |
2049 | dev->data->nb_rx_queues * sizeof(int), 0); | |
2050 | if (!intr_handle->intr_vec) { | |
2051 | PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" | |
2052 | " intr_vec\n", dev->data->nb_rx_queues); | |
2053 | return -ENOMEM; | |
2054 | } | |
2055 | } | |
2056 | ||
2057 | if (i40evf_rx_init(dev) != 0){ | |
2058 | PMD_DRV_LOG(ERR, "failed to do RX init"); | |
2059 | return -1; | |
2060 | } | |
2061 | ||
2062 | i40evf_tx_init(dev); | |
2063 | ||
2064 | if (i40evf_configure_queues(dev) != 0) { | |
2065 | PMD_DRV_LOG(ERR, "configure queues failed"); | |
2066 | goto err_queue; | |
2067 | } | |
2068 | if (i40evf_config_irq_map(dev)) { | |
2069 | PMD_DRV_LOG(ERR, "config_irq_map failed"); | |
2070 | goto err_queue; | |
2071 | } | |
2072 | ||
2073 | /* Set all mac addrs */ | |
2074 | i40evf_add_del_all_mac_addr(dev, TRUE); | |
2075 | ||
2076 | if (i40evf_start_queues(dev) != 0) { | |
2077 | PMD_DRV_LOG(ERR, "enable queues failed"); | |
2078 | goto err_mac; | |
2079 | } | |
2080 | ||
2081 | i40evf_enable_queues_intr(dev); | |
2082 | return 0; | |
2083 | ||
2084 | err_mac: | |
2085 | i40evf_add_del_all_mac_addr(dev, FALSE); | |
2086 | err_queue: | |
2087 | return -1; | |
2088 | } | |
2089 | ||
2090 | static void | |
2091 | i40evf_dev_stop(struct rte_eth_dev *dev) | |
2092 | { | |
2093 | struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; | |
2094 | ||
2095 | PMD_INIT_FUNC_TRACE(); | |
2096 | ||
2097 | i40evf_stop_queues(dev); | |
2098 | i40evf_disable_queues_intr(dev); | |
2099 | i40e_dev_clear_queues(dev); | |
2100 | ||
2101 | /* Clean datapath event and queue/vec mapping */ | |
2102 | rte_intr_efd_disable(intr_handle); | |
2103 | if (intr_handle->intr_vec) { | |
2104 | rte_free(intr_handle->intr_vec); | |
2105 | intr_handle->intr_vec = NULL; | |
2106 | } | |
2107 | /* remove all mac addrs */ | |
2108 | i40evf_add_del_all_mac_addr(dev, FALSE); | |
2109 | ||
2110 | } | |
2111 | ||
2112 | static int | |
2113 | i40evf_dev_link_update(struct rte_eth_dev *dev, | |
2114 | __rte_unused int wait_to_complete) | |
2115 | { | |
2116 | struct rte_eth_link new_link; | |
2117 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
2118 | /* | |
2119 | * DPDK pf host provide interfacet to acquire link status | |
2120 | * while Linux driver does not | |
2121 | */ | |
2122 | ||
2123 | /* Linux driver PF host */ | |
2124 | switch (vf->link_speed) { | |
2125 | case I40E_LINK_SPEED_100MB: | |
2126 | new_link.link_speed = ETH_SPEED_NUM_100M; | |
2127 | break; | |
2128 | case I40E_LINK_SPEED_1GB: | |
2129 | new_link.link_speed = ETH_SPEED_NUM_1G; | |
2130 | break; | |
2131 | case I40E_LINK_SPEED_10GB: | |
2132 | new_link.link_speed = ETH_SPEED_NUM_10G; | |
2133 | break; | |
2134 | case I40E_LINK_SPEED_20GB: | |
2135 | new_link.link_speed = ETH_SPEED_NUM_20G; | |
2136 | break; | |
2137 | case I40E_LINK_SPEED_40GB: | |
2138 | new_link.link_speed = ETH_SPEED_NUM_40G; | |
2139 | break; | |
2140 | default: | |
2141 | new_link.link_speed = ETH_SPEED_NUM_100M; | |
2142 | break; | |
2143 | } | |
2144 | /* full duplex only */ | |
2145 | new_link.link_duplex = ETH_LINK_FULL_DUPLEX; | |
2146 | new_link.link_status = vf->link_up ? ETH_LINK_UP : | |
2147 | ETH_LINK_DOWN; | |
2148 | ||
2149 | i40evf_dev_atomic_write_link_status(dev, &new_link); | |
2150 | ||
2151 | return 0; | |
2152 | } | |
2153 | ||
2154 | static void | |
2155 | i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev) | |
2156 | { | |
2157 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
2158 | int ret; | |
2159 | ||
2160 | /* If enabled, just return */ | |
2161 | if (vf->promisc_unicast_enabled) | |
2162 | return; | |
2163 | ||
2164 | ret = i40evf_config_promisc(dev, 1, vf->promisc_multicast_enabled); | |
2165 | if (ret == 0) | |
2166 | vf->promisc_unicast_enabled = TRUE; | |
2167 | } | |
2168 | ||
2169 | static void | |
2170 | i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev) | |
2171 | { | |
2172 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
2173 | int ret; | |
2174 | ||
2175 | /* If disabled, just return */ | |
2176 | if (!vf->promisc_unicast_enabled) | |
2177 | return; | |
2178 | ||
2179 | ret = i40evf_config_promisc(dev, 0, vf->promisc_multicast_enabled); | |
2180 | if (ret == 0) | |
2181 | vf->promisc_unicast_enabled = FALSE; | |
2182 | } | |
2183 | ||
2184 | static void | |
2185 | i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev) | |
2186 | { | |
2187 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
2188 | int ret; | |
2189 | ||
2190 | /* If enabled, just return */ | |
2191 | if (vf->promisc_multicast_enabled) | |
2192 | return; | |
2193 | ||
2194 | ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 1); | |
2195 | if (ret == 0) | |
2196 | vf->promisc_multicast_enabled = TRUE; | |
2197 | } | |
2198 | ||
2199 | static void | |
2200 | i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev) | |
2201 | { | |
2202 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
2203 | int ret; | |
2204 | ||
2205 | /* If enabled, just return */ | |
2206 | if (!vf->promisc_multicast_enabled) | |
2207 | return; | |
2208 | ||
2209 | ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 0); | |
2210 | if (ret == 0) | |
2211 | vf->promisc_multicast_enabled = FALSE; | |
2212 | } | |
2213 | ||
2214 | static void | |
2215 | i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) | |
2216 | { | |
2217 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
2218 | ||
2219 | memset(dev_info, 0, sizeof(*dev_info)); | |
2220 | dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs; | |
2221 | dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs; | |
2222 | dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN; | |
2223 | dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX; | |
2224 | dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); | |
2225 | dev_info->reta_size = ETH_RSS_RETA_SIZE_64; | |
2226 | dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL; | |
2227 | dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX; | |
2228 | dev_info->rx_offload_capa = | |
2229 | DEV_RX_OFFLOAD_VLAN_STRIP | | |
2230 | DEV_RX_OFFLOAD_QINQ_STRIP | | |
2231 | DEV_RX_OFFLOAD_IPV4_CKSUM | | |
2232 | DEV_RX_OFFLOAD_UDP_CKSUM | | |
2233 | DEV_RX_OFFLOAD_TCP_CKSUM; | |
2234 | dev_info->tx_offload_capa = | |
2235 | DEV_TX_OFFLOAD_VLAN_INSERT | | |
2236 | DEV_TX_OFFLOAD_QINQ_INSERT | | |
2237 | DEV_TX_OFFLOAD_IPV4_CKSUM | | |
2238 | DEV_TX_OFFLOAD_UDP_CKSUM | | |
2239 | DEV_TX_OFFLOAD_TCP_CKSUM | | |
2240 | DEV_TX_OFFLOAD_SCTP_CKSUM; | |
2241 | ||
2242 | dev_info->default_rxconf = (struct rte_eth_rxconf) { | |
2243 | .rx_thresh = { | |
2244 | .pthresh = I40E_DEFAULT_RX_PTHRESH, | |
2245 | .hthresh = I40E_DEFAULT_RX_HTHRESH, | |
2246 | .wthresh = I40E_DEFAULT_RX_WTHRESH, | |
2247 | }, | |
2248 | .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH, | |
2249 | .rx_drop_en = 0, | |
2250 | }; | |
2251 | ||
2252 | dev_info->default_txconf = (struct rte_eth_txconf) { | |
2253 | .tx_thresh = { | |
2254 | .pthresh = I40E_DEFAULT_TX_PTHRESH, | |
2255 | .hthresh = I40E_DEFAULT_TX_HTHRESH, | |
2256 | .wthresh = I40E_DEFAULT_TX_WTHRESH, | |
2257 | }, | |
2258 | .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH, | |
2259 | .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH, | |
2260 | .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | | |
2261 | ETH_TXQ_FLAGS_NOOFFLOADS, | |
2262 | }; | |
2263 | ||
2264 | dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { | |
2265 | .nb_max = I40E_MAX_RING_DESC, | |
2266 | .nb_min = I40E_MIN_RING_DESC, | |
2267 | .nb_align = I40E_ALIGN_RING_DESC, | |
2268 | }; | |
2269 | ||
2270 | dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { | |
2271 | .nb_max = I40E_MAX_RING_DESC, | |
2272 | .nb_min = I40E_MIN_RING_DESC, | |
2273 | .nb_align = I40E_ALIGN_RING_DESC, | |
2274 | }; | |
2275 | } | |
2276 | ||
2277 | static void | |
2278 | i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) | |
2279 | { | |
2280 | if (i40evf_get_statics(dev, stats)) | |
2281 | PMD_DRV_LOG(ERR, "Get statics failed"); | |
2282 | } | |
2283 | ||
2284 | static void | |
2285 | i40evf_dev_close(struct rte_eth_dev *dev) | |
2286 | { | |
2287 | struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
2288 | struct rte_pci_device *pci_dev = dev->pci_dev; | |
2289 | ||
2290 | i40evf_dev_stop(dev); | |
2291 | hw->adapter_stopped = 1; | |
2292 | i40e_dev_free_queues(dev); | |
2293 | i40evf_reset_vf(hw); | |
2294 | i40e_shutdown_adminq(hw); | |
2295 | /* disable uio intr before callback unregister */ | |
2296 | rte_intr_disable(&pci_dev->intr_handle); | |
2297 | ||
2298 | /* unregister callback func from eal lib */ | |
2299 | rte_intr_callback_unregister(&pci_dev->intr_handle, | |
2300 | i40evf_dev_interrupt_handler, (void *)dev); | |
2301 | i40evf_disable_irq0(hw); | |
2302 | } | |
2303 | ||
2304 | static int | |
2305 | i40evf_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) | |
2306 | { | |
2307 | struct i40e_vf *vf = I40E_VSI_TO_VF(vsi); | |
2308 | struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); | |
2309 | int ret; | |
2310 | ||
2311 | if (!lut) | |
2312 | return -EINVAL; | |
2313 | ||
2314 | if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { | |
2315 | ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, FALSE, | |
2316 | lut, lut_size); | |
2317 | if (ret) { | |
2318 | PMD_DRV_LOG(ERR, "Failed to get RSS lookup table"); | |
2319 | return ret; | |
2320 | } | |
2321 | } else { | |
2322 | uint32_t *lut_dw = (uint32_t *)lut; | |
2323 | uint16_t i, lut_size_dw = lut_size / 4; | |
2324 | ||
2325 | for (i = 0; i < lut_size_dw; i++) | |
2326 | lut_dw[i] = I40E_READ_REG(hw, I40E_VFQF_HLUT(i)); | |
2327 | } | |
2328 | ||
2329 | return 0; | |
2330 | } | |
2331 | ||
2332 | static int | |
2333 | i40evf_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) | |
2334 | { | |
2335 | struct i40e_vf *vf; | |
2336 | struct i40e_hw *hw; | |
2337 | int ret; | |
2338 | ||
2339 | if (!vsi || !lut) | |
2340 | return -EINVAL; | |
2341 | ||
2342 | vf = I40E_VSI_TO_VF(vsi); | |
2343 | hw = I40E_VSI_TO_HW(vsi); | |
2344 | ||
2345 | if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { | |
2346 | ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, FALSE, | |
2347 | lut, lut_size); | |
2348 | if (ret) { | |
2349 | PMD_DRV_LOG(ERR, "Failed to set RSS lookup table"); | |
2350 | return ret; | |
2351 | } | |
2352 | } else { | |
2353 | uint32_t *lut_dw = (uint32_t *)lut; | |
2354 | uint16_t i, lut_size_dw = lut_size / 4; | |
2355 | ||
2356 | for (i = 0; i < lut_size_dw; i++) | |
2357 | I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i), lut_dw[i]); | |
2358 | I40EVF_WRITE_FLUSH(hw); | |
2359 | } | |
2360 | ||
2361 | return 0; | |
2362 | } | |
2363 | ||
2364 | static int | |
2365 | i40evf_dev_rss_reta_update(struct rte_eth_dev *dev, | |
2366 | struct rte_eth_rss_reta_entry64 *reta_conf, | |
2367 | uint16_t reta_size) | |
2368 | { | |
2369 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
2370 | uint8_t *lut; | |
2371 | uint16_t i, idx, shift; | |
2372 | int ret; | |
2373 | ||
2374 | if (reta_size != ETH_RSS_RETA_SIZE_64) { | |
2375 | PMD_DRV_LOG(ERR, "The size of hash lookup table configured " | |
2376 | "(%d) doesn't match the number of hardware can " | |
2377 | "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64); | |
2378 | return -EINVAL; | |
2379 | } | |
2380 | ||
2381 | lut = rte_zmalloc("i40e_rss_lut", reta_size, 0); | |
2382 | if (!lut) { | |
2383 | PMD_DRV_LOG(ERR, "No memory can be allocated"); | |
2384 | return -ENOMEM; | |
2385 | } | |
2386 | ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size); | |
2387 | if (ret) | |
2388 | goto out; | |
2389 | for (i = 0; i < reta_size; i++) { | |
2390 | idx = i / RTE_RETA_GROUP_SIZE; | |
2391 | shift = i % RTE_RETA_GROUP_SIZE; | |
2392 | if (reta_conf[idx].mask & (1ULL << shift)) | |
2393 | lut[i] = reta_conf[idx].reta[shift]; | |
2394 | } | |
2395 | ret = i40evf_set_rss_lut(&vf->vsi, lut, reta_size); | |
2396 | ||
2397 | out: | |
2398 | rte_free(lut); | |
2399 | ||
2400 | return ret; | |
2401 | } | |
2402 | ||
2403 | static int | |
2404 | i40evf_dev_rss_reta_query(struct rte_eth_dev *dev, | |
2405 | struct rte_eth_rss_reta_entry64 *reta_conf, | |
2406 | uint16_t reta_size) | |
2407 | { | |
2408 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
2409 | uint16_t i, idx, shift; | |
2410 | uint8_t *lut; | |
2411 | int ret; | |
2412 | ||
2413 | if (reta_size != ETH_RSS_RETA_SIZE_64) { | |
2414 | PMD_DRV_LOG(ERR, "The size of hash lookup table configured " | |
2415 | "(%d) doesn't match the number of hardware can " | |
2416 | "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64); | |
2417 | return -EINVAL; | |
2418 | } | |
2419 | ||
2420 | lut = rte_zmalloc("i40e_rss_lut", reta_size, 0); | |
2421 | if (!lut) { | |
2422 | PMD_DRV_LOG(ERR, "No memory can be allocated"); | |
2423 | return -ENOMEM; | |
2424 | } | |
2425 | ||
2426 | ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size); | |
2427 | if (ret) | |
2428 | goto out; | |
2429 | for (i = 0; i < reta_size; i++) { | |
2430 | idx = i / RTE_RETA_GROUP_SIZE; | |
2431 | shift = i % RTE_RETA_GROUP_SIZE; | |
2432 | if (reta_conf[idx].mask & (1ULL << shift)) | |
2433 | reta_conf[idx].reta[shift] = lut[i]; | |
2434 | } | |
2435 | ||
2436 | out: | |
2437 | rte_free(lut); | |
2438 | ||
2439 | return ret; | |
2440 | } | |
2441 | ||
2442 | static int | |
2443 | i40evf_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len) | |
2444 | { | |
2445 | struct i40e_vf *vf = I40E_VSI_TO_VF(vsi); | |
2446 | struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); | |
2447 | int ret = 0; | |
2448 | ||
2449 | if (!key || key_len == 0) { | |
2450 | PMD_DRV_LOG(DEBUG, "No key to be configured"); | |
2451 | return 0; | |
2452 | } else if (key_len != (I40E_VFQF_HKEY_MAX_INDEX + 1) * | |
2453 | sizeof(uint32_t)) { | |
2454 | PMD_DRV_LOG(ERR, "Invalid key length %u", key_len); | |
2455 | return -EINVAL; | |
2456 | } | |
2457 | ||
2458 | if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { | |
2459 | struct i40e_aqc_get_set_rss_key_data *key_dw = | |
2460 | (struct i40e_aqc_get_set_rss_key_data *)key; | |
2461 | ||
2462 | ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw); | |
2463 | if (ret) | |
2464 | PMD_INIT_LOG(ERR, "Failed to configure RSS key " | |
2465 | "via AQ"); | |
2466 | } else { | |
2467 | uint32_t *hash_key = (uint32_t *)key; | |
2468 | uint16_t i; | |
2469 | ||
2470 | for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) | |
2471 | i40e_write_rx_ctl(hw, I40E_VFQF_HKEY(i), hash_key[i]); | |
2472 | I40EVF_WRITE_FLUSH(hw); | |
2473 | } | |
2474 | ||
2475 | return ret; | |
2476 | } | |
2477 | ||
2478 | static int | |
2479 | i40evf_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len) | |
2480 | { | |
2481 | struct i40e_vf *vf = I40E_VSI_TO_VF(vsi); | |
2482 | struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); | |
2483 | int ret; | |
2484 | ||
2485 | if (!key || !key_len) | |
2486 | return -EINVAL; | |
2487 | ||
2488 | if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { | |
2489 | ret = i40e_aq_get_rss_key(hw, vsi->vsi_id, | |
2490 | (struct i40e_aqc_get_set_rss_key_data *)key); | |
2491 | if (ret) { | |
2492 | PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ"); | |
2493 | return ret; | |
2494 | } | |
2495 | } else { | |
2496 | uint32_t *key_dw = (uint32_t *)key; | |
2497 | uint16_t i; | |
2498 | ||
2499 | for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) | |
2500 | key_dw[i] = i40e_read_rx_ctl(hw, I40E_VFQF_HKEY(i)); | |
2501 | } | |
2502 | *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); | |
2503 | ||
2504 | return 0; | |
2505 | } | |
2506 | ||
2507 | static int | |
2508 | i40evf_hw_rss_hash_set(struct i40e_vf *vf, struct rte_eth_rss_conf *rss_conf) | |
2509 | { | |
2510 | struct i40e_hw *hw = I40E_VF_TO_HW(vf); | |
2511 | uint64_t rss_hf, hena; | |
2512 | int ret; | |
2513 | ||
2514 | ret = i40evf_set_rss_key(&vf->vsi, rss_conf->rss_key, | |
2515 | rss_conf->rss_key_len); | |
2516 | if (ret) | |
2517 | return ret; | |
2518 | ||
2519 | rss_hf = rss_conf->rss_hf; | |
2520 | hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0)); | |
2521 | hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32; | |
2522 | if (hw->mac.type == I40E_MAC_X722) | |
2523 | hena &= ~I40E_RSS_HENA_ALL_X722; | |
2524 | else | |
2525 | hena &= ~I40E_RSS_HENA_ALL; | |
2526 | hena |= i40e_config_hena(rss_hf, hw->mac.type); | |
2527 | i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena); | |
2528 | i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32)); | |
2529 | I40EVF_WRITE_FLUSH(hw); | |
2530 | ||
2531 | return 0; | |
2532 | } | |
2533 | ||
2534 | static void | |
2535 | i40evf_disable_rss(struct i40e_vf *vf) | |
2536 | { | |
2537 | struct i40e_hw *hw = I40E_VF_TO_HW(vf); | |
2538 | uint64_t hena; | |
2539 | ||
2540 | hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0)); | |
2541 | hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32; | |
2542 | if (hw->mac.type == I40E_MAC_X722) | |
2543 | hena &= ~I40E_RSS_HENA_ALL_X722; | |
2544 | else | |
2545 | hena &= ~I40E_RSS_HENA_ALL; | |
2546 | i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena); | |
2547 | i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32)); | |
2548 | I40EVF_WRITE_FLUSH(hw); | |
2549 | } | |
2550 | ||
2551 | static int | |
2552 | i40evf_config_rss(struct i40e_vf *vf) | |
2553 | { | |
2554 | struct i40e_hw *hw = I40E_VF_TO_HW(vf); | |
2555 | struct rte_eth_rss_conf rss_conf; | |
2556 | uint32_t i, j, lut = 0, nb_q = (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4; | |
2557 | uint16_t num; | |
2558 | ||
2559 | if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) { | |
2560 | i40evf_disable_rss(vf); | |
2561 | PMD_DRV_LOG(DEBUG, "RSS not configured\n"); | |
2562 | return 0; | |
2563 | } | |
2564 | ||
2565 | num = RTE_MIN(vf->dev_data->nb_rx_queues, I40E_MAX_QP_NUM_PER_VF); | |
2566 | /* Fill out the look up table */ | |
2567 | for (i = 0, j = 0; i < nb_q; i++, j++) { | |
2568 | if (j >= num) | |
2569 | j = 0; | |
2570 | lut = (lut << 8) | j; | |
2571 | if ((i & 3) == 3) | |
2572 | I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut); | |
2573 | } | |
2574 | ||
2575 | rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf; | |
2576 | if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) { | |
2577 | i40evf_disable_rss(vf); | |
2578 | PMD_DRV_LOG(DEBUG, "No hash flag is set\n"); | |
2579 | return 0; | |
2580 | } | |
2581 | ||
2582 | if (rss_conf.rss_key == NULL || rss_conf.rss_key_len < | |
2583 | (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) { | |
2584 | /* Calculate the default hash key */ | |
2585 | for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) | |
2586 | rss_key_default[i] = (uint32_t)rte_rand(); | |
2587 | rss_conf.rss_key = (uint8_t *)rss_key_default; | |
2588 | rss_conf.rss_key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) * | |
2589 | sizeof(uint32_t); | |
2590 | } | |
2591 | ||
2592 | return i40evf_hw_rss_hash_set(vf, &rss_conf); | |
2593 | } | |
2594 | ||
2595 | static int | |
2596 | i40evf_dev_rss_hash_update(struct rte_eth_dev *dev, | |
2597 | struct rte_eth_rss_conf *rss_conf) | |
2598 | { | |
2599 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
2600 | struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
2601 | uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL; | |
2602 | uint64_t hena; | |
2603 | ||
2604 | hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0)); | |
2605 | hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32; | |
2606 | if (!(hena & ((hw->mac.type == I40E_MAC_X722) | |
2607 | ? I40E_RSS_HENA_ALL_X722 | |
2608 | : I40E_RSS_HENA_ALL))) { /* RSS disabled */ | |
2609 | if (rss_hf != 0) /* Enable RSS */ | |
2610 | return -EINVAL; | |
2611 | return 0; | |
2612 | } | |
2613 | ||
2614 | /* RSS enabled */ | |
2615 | if (rss_hf == 0) /* Disable RSS */ | |
2616 | return -EINVAL; | |
2617 | ||
2618 | return i40evf_hw_rss_hash_set(vf, rss_conf); | |
2619 | } | |
2620 | ||
2621 | static int | |
2622 | i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, | |
2623 | struct rte_eth_rss_conf *rss_conf) | |
2624 | { | |
2625 | struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
2626 | struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
2627 | uint64_t hena; | |
2628 | ||
2629 | i40evf_get_rss_key(&vf->vsi, rss_conf->rss_key, | |
2630 | &rss_conf->rss_key_len); | |
2631 | ||
2632 | hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0)); | |
2633 | hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32; | |
2634 | rss_conf->rss_hf = i40e_parse_hena(hena); | |
2635 | ||
2636 | return 0; | |
2637 | } |