]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright(c) 2017 Intel Corporation | |
3 | */ | |
4 | ||
5 | #include <sys/queue.h> | |
6 | #include <stdio.h> | |
7 | #include <errno.h> | |
8 | #include <stdint.h> | |
9 | #include <string.h> | |
10 | #include <unistd.h> | |
11 | #include <stdarg.h> | |
12 | #include <inttypes.h> | |
13 | #include <rte_byteorder.h> | |
14 | #include <rte_common.h> | |
15 | ||
16 | #include <rte_interrupts.h> | |
17 | #include <rte_debug.h> | |
18 | #include <rte_pci.h> | |
19 | #include <rte_atomic.h> | |
20 | #include <rte_eal.h> | |
21 | #include <rte_ether.h> | |
22 | #include <rte_ethdev_driver.h> | |
23 | #include <rte_ethdev_pci.h> | |
24 | #include <rte_malloc.h> | |
25 | #include <rte_memzone.h> | |
26 | #include <rte_dev.h> | |
27 | ||
28 | #include "avf_log.h" | |
29 | #include "base/avf_prototype.h" | |
30 | #include "base/avf_adminq_cmd.h" | |
31 | #include "base/avf_type.h" | |
32 | ||
33 | #include "avf.h" | |
34 | #include "avf_rxtx.h" | |
35 | ||
36 | static int avf_dev_configure(struct rte_eth_dev *dev); | |
37 | static int avf_dev_start(struct rte_eth_dev *dev); | |
38 | static void avf_dev_stop(struct rte_eth_dev *dev); | |
39 | static void avf_dev_close(struct rte_eth_dev *dev); | |
40 | static void avf_dev_info_get(struct rte_eth_dev *dev, | |
41 | struct rte_eth_dev_info *dev_info); | |
42 | static const uint32_t *avf_dev_supported_ptypes_get(struct rte_eth_dev *dev); | |
43 | static int avf_dev_stats_get(struct rte_eth_dev *dev, | |
44 | struct rte_eth_stats *stats); | |
45 | static void avf_dev_promiscuous_enable(struct rte_eth_dev *dev); | |
46 | static void avf_dev_promiscuous_disable(struct rte_eth_dev *dev); | |
47 | static void avf_dev_allmulticast_enable(struct rte_eth_dev *dev); | |
48 | static void avf_dev_allmulticast_disable(struct rte_eth_dev *dev); | |
49 | static int avf_dev_add_mac_addr(struct rte_eth_dev *dev, | |
50 | struct ether_addr *addr, | |
51 | uint32_t index, | |
52 | uint32_t pool); | |
53 | static void avf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index); | |
54 | static int avf_dev_vlan_filter_set(struct rte_eth_dev *dev, | |
55 | uint16_t vlan_id, int on); | |
56 | static int avf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask); | |
57 | static int avf_dev_rss_reta_update(struct rte_eth_dev *dev, | |
58 | struct rte_eth_rss_reta_entry64 *reta_conf, | |
59 | uint16_t reta_size); | |
60 | static int avf_dev_rss_reta_query(struct rte_eth_dev *dev, | |
61 | struct rte_eth_rss_reta_entry64 *reta_conf, | |
62 | uint16_t reta_size); | |
63 | static int avf_dev_rss_hash_update(struct rte_eth_dev *dev, | |
64 | struct rte_eth_rss_conf *rss_conf); | |
65 | static int avf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, | |
66 | struct rte_eth_rss_conf *rss_conf); | |
67 | static int avf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); | |
68 | static int avf_dev_set_default_mac_addr(struct rte_eth_dev *dev, | |
69 | struct ether_addr *mac_addr); | |
70 | static int avf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, | |
71 | uint16_t queue_id); | |
72 | static int avf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, | |
73 | uint16_t queue_id); | |
74 | ||
75 | int avf_logtype_init; | |
76 | int avf_logtype_driver; | |
77 | ||
78 | static const struct rte_pci_id pci_id_avf_map[] = { | |
79 | { RTE_PCI_DEVICE(AVF_INTEL_VENDOR_ID, AVF_DEV_ID_ADAPTIVE_VF) }, | |
80 | { .vendor_id = 0, /* sentinel */ }, | |
81 | }; | |
82 | ||
83 | static const struct eth_dev_ops avf_eth_dev_ops = { | |
84 | .dev_configure = avf_dev_configure, | |
85 | .dev_start = avf_dev_start, | |
86 | .dev_stop = avf_dev_stop, | |
87 | .dev_close = avf_dev_close, | |
88 | .dev_infos_get = avf_dev_info_get, | |
89 | .dev_supported_ptypes_get = avf_dev_supported_ptypes_get, | |
90 | .link_update = avf_dev_link_update, | |
91 | .stats_get = avf_dev_stats_get, | |
92 | .promiscuous_enable = avf_dev_promiscuous_enable, | |
93 | .promiscuous_disable = avf_dev_promiscuous_disable, | |
94 | .allmulticast_enable = avf_dev_allmulticast_enable, | |
95 | .allmulticast_disable = avf_dev_allmulticast_disable, | |
96 | .mac_addr_add = avf_dev_add_mac_addr, | |
97 | .mac_addr_remove = avf_dev_del_mac_addr, | |
98 | .vlan_filter_set = avf_dev_vlan_filter_set, | |
99 | .vlan_offload_set = avf_dev_vlan_offload_set, | |
100 | .rx_queue_start = avf_dev_rx_queue_start, | |
101 | .rx_queue_stop = avf_dev_rx_queue_stop, | |
102 | .tx_queue_start = avf_dev_tx_queue_start, | |
103 | .tx_queue_stop = avf_dev_tx_queue_stop, | |
104 | .rx_queue_setup = avf_dev_rx_queue_setup, | |
105 | .rx_queue_release = avf_dev_rx_queue_release, | |
106 | .tx_queue_setup = avf_dev_tx_queue_setup, | |
107 | .tx_queue_release = avf_dev_tx_queue_release, | |
108 | .mac_addr_set = avf_dev_set_default_mac_addr, | |
109 | .reta_update = avf_dev_rss_reta_update, | |
110 | .reta_query = avf_dev_rss_reta_query, | |
111 | .rss_hash_update = avf_dev_rss_hash_update, | |
112 | .rss_hash_conf_get = avf_dev_rss_hash_conf_get, | |
113 | .rxq_info_get = avf_dev_rxq_info_get, | |
114 | .txq_info_get = avf_dev_txq_info_get, | |
115 | .rx_queue_count = avf_dev_rxq_count, | |
116 | .rx_descriptor_status = avf_dev_rx_desc_status, | |
117 | .tx_descriptor_status = avf_dev_tx_desc_status, | |
118 | .mtu_set = avf_dev_mtu_set, | |
119 | .rx_queue_intr_enable = avf_dev_rx_queue_intr_enable, | |
120 | .rx_queue_intr_disable = avf_dev_rx_queue_intr_disable, | |
121 | }; | |
122 | ||
123 | static int | |
124 | avf_dev_configure(struct rte_eth_dev *dev) | |
125 | { | |
126 | struct avf_adapter *ad = | |
127 | AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
128 | struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(ad); | |
129 | struct rte_eth_conf *dev_conf = &dev->data->dev_conf; | |
130 | ||
131 | ad->rx_bulk_alloc_allowed = true; | |
132 | #ifdef RTE_LIBRTE_AVF_INC_VECTOR | |
133 | /* Initialize to TRUE. If any of Rx queues doesn't meet the | |
134 | * vector Rx/Tx preconditions, it will be reset. | |
135 | */ | |
136 | ad->rx_vec_allowed = true; | |
137 | ad->tx_vec_allowed = true; | |
138 | #else | |
139 | ad->rx_vec_allowed = false; | |
140 | ad->tx_vec_allowed = false; | |
141 | #endif | |
142 | ||
143 | /* Vlan stripping setting */ | |
144 | if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) { | |
145 | if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) | |
146 | avf_enable_vlan_strip(ad); | |
147 | else | |
148 | avf_disable_vlan_strip(ad); | |
149 | } | |
150 | return 0; | |
151 | } | |
152 | ||
153 | static int | |
154 | avf_init_rss(struct avf_adapter *adapter) | |
155 | { | |
156 | struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); | |
157 | struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter); | |
158 | struct rte_eth_rss_conf *rss_conf; | |
159 | uint8_t i, j, nb_q; | |
160 | int ret; | |
161 | ||
162 | rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf; | |
163 | nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues, | |
164 | AVF_MAX_NUM_QUEUES); | |
165 | ||
166 | if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) { | |
167 | PMD_DRV_LOG(DEBUG, "RSS is not supported"); | |
168 | return -ENOTSUP; | |
169 | } | |
170 | if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) { | |
171 | PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default"); | |
172 | /* set all lut items to default queue */ | |
173 | for (i = 0; i < vf->vf_res->rss_lut_size; i++) | |
174 | vf->rss_lut[i] = 0; | |
175 | ret = avf_configure_rss_lut(adapter); | |
176 | return ret; | |
177 | } | |
178 | ||
179 | /* In AVF, RSS enablement is set by PF driver. It is not supported | |
180 | * to set based on rss_conf->rss_hf. | |
181 | */ | |
182 | ||
183 | /* configure RSS key */ | |
184 | if (!rss_conf->rss_key) { | |
185 | /* Calculate the default hash key */ | |
186 | for (i = 0; i <= vf->vf_res->rss_key_size; i++) | |
187 | vf->rss_key[i] = (uint8_t)rte_rand(); | |
188 | } else | |
189 | rte_memcpy(vf->rss_key, rss_conf->rss_key, | |
190 | RTE_MIN(rss_conf->rss_key_len, | |
191 | vf->vf_res->rss_key_size)); | |
192 | ||
193 | /* init RSS LUT table */ | |
194 | for (i = 0, j = 0; i < vf->vf_res->rss_lut_size; i++, j++) { | |
195 | if (j >= nb_q) | |
196 | j = 0; | |
197 | vf->rss_lut[i] = j; | |
198 | } | |
199 | /* send virtchnnl ops to configure rss*/ | |
200 | ret = avf_configure_rss_lut(adapter); | |
201 | if (ret) | |
202 | return ret; | |
203 | ret = avf_configure_rss_key(adapter); | |
204 | if (ret) | |
205 | return ret; | |
206 | ||
207 | return 0; | |
208 | } | |
209 | ||
210 | static int | |
211 | avf_init_rxq(struct rte_eth_dev *dev, struct avf_rx_queue *rxq) | |
212 | { | |
213 | struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
214 | struct rte_eth_dev_data *dev_data = dev->data; | |
215 | uint16_t buf_size, max_pkt_len, len; | |
216 | ||
217 | buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM; | |
218 | ||
219 | /* Calculate the maximum packet length allowed */ | |
220 | len = rxq->rx_buf_len * AVF_MAX_CHAINED_RX_BUFFERS; | |
221 | max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len); | |
222 | ||
223 | /* Check if the jumbo frame and maximum packet length are set | |
224 | * correctly. | |
225 | */ | |
226 | if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { | |
227 | if (max_pkt_len <= ETHER_MAX_LEN || | |
228 | max_pkt_len > AVF_FRAME_SIZE_MAX) { | |
229 | PMD_DRV_LOG(ERR, "maximum packet length must be " | |
230 | "larger than %u and smaller than %u, " | |
231 | "as jumbo frame is enabled", | |
232 | (uint32_t)ETHER_MAX_LEN, | |
233 | (uint32_t)AVF_FRAME_SIZE_MAX); | |
234 | return -EINVAL; | |
235 | } | |
236 | } else { | |
237 | if (max_pkt_len < ETHER_MIN_LEN || | |
238 | max_pkt_len > ETHER_MAX_LEN) { | |
239 | PMD_DRV_LOG(ERR, "maximum packet length must be " | |
240 | "larger than %u and smaller than %u, " | |
241 | "as jumbo frame is disabled", | |
242 | (uint32_t)ETHER_MIN_LEN, | |
243 | (uint32_t)ETHER_MAX_LEN); | |
244 | return -EINVAL; | |
245 | } | |
246 | } | |
247 | ||
248 | rxq->max_pkt_len = max_pkt_len; | |
249 | if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) || | |
250 | (rxq->max_pkt_len + 2 * AVF_VLAN_TAG_SIZE) > buf_size) { | |
251 | dev_data->scattered_rx = 1; | |
252 | } | |
253 | AVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); | |
254 | AVF_WRITE_FLUSH(hw); | |
255 | ||
256 | return 0; | |
257 | } | |
258 | ||
259 | static int | |
260 | avf_init_queues(struct rte_eth_dev *dev) | |
261 | { | |
262 | struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
263 | struct avf_rx_queue **rxq = | |
264 | (struct avf_rx_queue **)dev->data->rx_queues; | |
265 | struct avf_tx_queue **txq = | |
266 | (struct avf_tx_queue **)dev->data->tx_queues; | |
267 | int i, ret = AVF_SUCCESS; | |
268 | ||
269 | for (i = 0; i < dev->data->nb_rx_queues; i++) { | |
270 | if (!rxq[i] || !rxq[i]->q_set) | |
271 | continue; | |
272 | ret = avf_init_rxq(dev, rxq[i]); | |
273 | if (ret != AVF_SUCCESS) | |
274 | break; | |
275 | } | |
276 | /* set rx/tx function to vector/scatter/single-segment | |
277 | * according to parameters | |
278 | */ | |
279 | avf_set_rx_function(dev); | |
280 | avf_set_tx_function(dev); | |
281 | ||
282 | return ret; | |
283 | } | |
284 | ||
285 | static int avf_config_rx_queues_irqs(struct rte_eth_dev *dev, | |
286 | struct rte_intr_handle *intr_handle) | |
287 | { | |
288 | struct avf_adapter *adapter = | |
289 | AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
290 | struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); | |
291 | struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter); | |
292 | uint16_t interval, i; | |
293 | int vec; | |
294 | ||
295 | if (rte_intr_cap_multiple(intr_handle) && | |
296 | dev->data->dev_conf.intr_conf.rxq) { | |
297 | if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues)) | |
298 | return -1; | |
299 | } | |
300 | ||
301 | if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { | |
302 | intr_handle->intr_vec = | |
303 | rte_zmalloc("intr_vec", | |
304 | dev->data->nb_rx_queues * sizeof(int), 0); | |
305 | if (!intr_handle->intr_vec) { | |
306 | PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec", | |
307 | dev->data->nb_rx_queues); | |
308 | return -1; | |
309 | } | |
310 | } | |
311 | ||
312 | if (!dev->data->dev_conf.intr_conf.rxq || | |
313 | !rte_intr_dp_is_en(intr_handle)) { | |
314 | /* Rx interrupt disabled, Map interrupt only for writeback */ | |
315 | vf->nb_msix = 1; | |
316 | if (vf->vf_res->vf_cap_flags & | |
317 | VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { | |
318 | /* If WB_ON_ITR supports, enable it */ | |
319 | vf->msix_base = AVF_RX_VEC_START; | |
320 | AVF_WRITE_REG(hw, AVFINT_DYN_CTLN1(vf->msix_base - 1), | |
321 | AVFINT_DYN_CTLN1_ITR_INDX_MASK | | |
322 | AVFINT_DYN_CTLN1_WB_ON_ITR_MASK); | |
323 | } else { | |
324 | /* If no WB_ON_ITR offload flags, need to set | |
325 | * interrupt for descriptor write back. | |
326 | */ | |
327 | vf->msix_base = AVF_MISC_VEC_ID; | |
328 | ||
329 | /* set ITR to max */ | |
330 | interval = avf_calc_itr_interval( | |
331 | AVF_QUEUE_ITR_INTERVAL_MAX); | |
332 | AVF_WRITE_REG(hw, AVFINT_DYN_CTL01, | |
333 | AVFINT_DYN_CTL01_INTENA_MASK | | |
334 | (AVF_ITR_INDEX_DEFAULT << | |
335 | AVFINT_DYN_CTL01_ITR_INDX_SHIFT) | | |
336 | (interval << | |
337 | AVFINT_DYN_CTL01_INTERVAL_SHIFT)); | |
338 | } | |
339 | AVF_WRITE_FLUSH(hw); | |
340 | /* map all queues to the same interrupt */ | |
341 | for (i = 0; i < dev->data->nb_rx_queues; i++) | |
342 | vf->rxq_map[vf->msix_base] |= 1 << i; | |
343 | } else { | |
344 | if (!rte_intr_allow_others(intr_handle)) { | |
345 | vf->nb_msix = 1; | |
346 | vf->msix_base = AVF_MISC_VEC_ID; | |
347 | for (i = 0; i < dev->data->nb_rx_queues; i++) { | |
348 | vf->rxq_map[vf->msix_base] |= 1 << i; | |
349 | intr_handle->intr_vec[i] = AVF_MISC_VEC_ID; | |
350 | } | |
351 | PMD_DRV_LOG(DEBUG, | |
352 | "vector %u are mapping to all Rx queues", | |
353 | vf->msix_base); | |
354 | } else { | |
355 | /* If Rx interrupt is reuquired, and we can use | |
356 | * multi interrupts, then the vec is from 1 | |
357 | */ | |
358 | vf->nb_msix = RTE_MIN(vf->vf_res->max_vectors, | |
359 | intr_handle->nb_efd); | |
360 | vf->msix_base = AVF_RX_VEC_START; | |
361 | vec = AVF_RX_VEC_START; | |
362 | for (i = 0; i < dev->data->nb_rx_queues; i++) { | |
363 | vf->rxq_map[vec] |= 1 << i; | |
364 | intr_handle->intr_vec[i] = vec++; | |
365 | if (vec >= vf->nb_msix) | |
366 | vec = AVF_RX_VEC_START; | |
367 | } | |
368 | PMD_DRV_LOG(DEBUG, | |
369 | "%u vectors are mapping to %u Rx queues", | |
370 | vf->nb_msix, dev->data->nb_rx_queues); | |
371 | } | |
372 | } | |
373 | ||
374 | if (avf_config_irq_map(adapter)) { | |
375 | PMD_DRV_LOG(ERR, "config interrupt mapping failed"); | |
376 | return -1; | |
377 | } | |
378 | return 0; | |
379 | } | |
380 | ||
381 | static int | |
382 | avf_start_queues(struct rte_eth_dev *dev) | |
383 | { | |
384 | struct avf_rx_queue *rxq; | |
385 | struct avf_tx_queue *txq; | |
386 | int i; | |
387 | ||
388 | for (i = 0; i < dev->data->nb_tx_queues; i++) { | |
389 | txq = dev->data->tx_queues[i]; | |
390 | if (txq->tx_deferred_start) | |
391 | continue; | |
392 | if (avf_dev_tx_queue_start(dev, i) != 0) { | |
393 | PMD_DRV_LOG(ERR, "Fail to start queue %u", i); | |
394 | return -1; | |
395 | } | |
396 | } | |
397 | ||
398 | for (i = 0; i < dev->data->nb_rx_queues; i++) { | |
399 | rxq = dev->data->rx_queues[i]; | |
400 | if (rxq->rx_deferred_start) | |
401 | continue; | |
402 | if (avf_dev_rx_queue_start(dev, i) != 0) { | |
403 | PMD_DRV_LOG(ERR, "Fail to start queue %u", i); | |
404 | return -1; | |
405 | } | |
406 | } | |
407 | ||
408 | return 0; | |
409 | } | |
410 | ||
411 | static int | |
412 | avf_dev_start(struct rte_eth_dev *dev) | |
413 | { | |
414 | struct avf_adapter *adapter = | |
415 | AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
416 | struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
417 | struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
418 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); | |
419 | struct rte_intr_handle *intr_handle = dev->intr_handle; | |
420 | ||
421 | PMD_INIT_FUNC_TRACE(); | |
422 | ||
423 | hw->adapter_stopped = 0; | |
424 | ||
425 | vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; | |
426 | vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues, | |
427 | dev->data->nb_tx_queues); | |
428 | ||
429 | if (avf_init_queues(dev) != 0) { | |
430 | PMD_DRV_LOG(ERR, "failed to do Queue init"); | |
431 | return -1; | |
432 | } | |
433 | ||
434 | if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { | |
435 | if (avf_init_rss(adapter) != 0) { | |
436 | PMD_DRV_LOG(ERR, "configure rss failed"); | |
437 | goto err_rss; | |
438 | } | |
439 | } | |
440 | ||
441 | if (avf_configure_queues(adapter) != 0) { | |
442 | PMD_DRV_LOG(ERR, "configure queues failed"); | |
443 | goto err_queue; | |
444 | } | |
445 | ||
446 | if (avf_config_rx_queues_irqs(dev, intr_handle) != 0) { | |
447 | PMD_DRV_LOG(ERR, "configure irq failed"); | |
448 | goto err_queue; | |
449 | } | |
450 | /* re-enable intr again, because efd assign may change */ | |
451 | if (dev->data->dev_conf.intr_conf.rxq != 0) { | |
452 | rte_intr_disable(intr_handle); | |
453 | rte_intr_enable(intr_handle); | |
454 | } | |
455 | ||
456 | /* Set all mac addrs */ | |
457 | avf_add_del_all_mac_addr(adapter, TRUE); | |
458 | ||
459 | if (avf_start_queues(dev) != 0) { | |
460 | PMD_DRV_LOG(ERR, "enable queues failed"); | |
461 | goto err_mac; | |
462 | } | |
463 | ||
464 | return 0; | |
465 | ||
466 | err_mac: | |
467 | avf_add_del_all_mac_addr(adapter, FALSE); | |
468 | err_queue: | |
469 | err_rss: | |
470 | return -1; | |
471 | } | |
472 | ||
473 | static void | |
474 | avf_dev_stop(struct rte_eth_dev *dev) | |
475 | { | |
476 | struct avf_adapter *adapter = | |
477 | AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
478 | struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
479 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); | |
480 | struct rte_intr_handle *intr_handle = dev->intr_handle; | |
481 | int ret, i; | |
482 | ||
483 | PMD_INIT_FUNC_TRACE(); | |
484 | ||
485 | if (hw->adapter_stopped == 1) | |
486 | return; | |
487 | ||
488 | avf_stop_queues(dev); | |
489 | ||
490 | /* Disable the interrupt for Rx */ | |
491 | rte_intr_efd_disable(intr_handle); | |
492 | /* Rx interrupt vector mapping free */ | |
493 | if (intr_handle->intr_vec) { | |
494 | rte_free(intr_handle->intr_vec); | |
495 | intr_handle->intr_vec = NULL; | |
496 | } | |
497 | ||
498 | /* remove all mac addrs */ | |
499 | avf_add_del_all_mac_addr(adapter, FALSE); | |
500 | hw->adapter_stopped = 1; | |
501 | } | |
502 | ||
503 | static void | |
504 | avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) | |
505 | { | |
506 | struct avf_adapter *adapter = | |
507 | AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
508 | struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
509 | ||
510 | memset(dev_info, 0, sizeof(*dev_info)); | |
511 | dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs; | |
512 | dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs; | |
513 | dev_info->min_rx_bufsize = AVF_BUF_SIZE_MIN; | |
514 | dev_info->max_rx_pktlen = AVF_FRAME_SIZE_MAX; | |
515 | dev_info->hash_key_size = vf->vf_res->rss_key_size; | |
516 | dev_info->reta_size = vf->vf_res->rss_lut_size; | |
517 | dev_info->flow_type_rss_offloads = AVF_RSS_OFFLOAD_ALL; | |
518 | dev_info->max_mac_addrs = AVF_NUM_MACADDR_MAX; | |
519 | dev_info->rx_offload_capa = | |
520 | DEV_RX_OFFLOAD_VLAN_STRIP | | |
521 | DEV_RX_OFFLOAD_QINQ_STRIP | | |
522 | DEV_RX_OFFLOAD_IPV4_CKSUM | | |
523 | DEV_RX_OFFLOAD_UDP_CKSUM | | |
524 | DEV_RX_OFFLOAD_TCP_CKSUM | | |
525 | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | | |
526 | DEV_RX_OFFLOAD_CRC_STRIP | | |
527 | DEV_RX_OFFLOAD_KEEP_CRC | | |
528 | DEV_RX_OFFLOAD_SCATTER | | |
529 | DEV_RX_OFFLOAD_JUMBO_FRAME | | |
530 | DEV_RX_OFFLOAD_VLAN_FILTER; | |
531 | dev_info->tx_offload_capa = | |
532 | DEV_TX_OFFLOAD_VLAN_INSERT | | |
533 | DEV_TX_OFFLOAD_QINQ_INSERT | | |
534 | DEV_TX_OFFLOAD_IPV4_CKSUM | | |
535 | DEV_TX_OFFLOAD_UDP_CKSUM | | |
536 | DEV_TX_OFFLOAD_TCP_CKSUM | | |
537 | DEV_TX_OFFLOAD_SCTP_CKSUM | | |
538 | DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | | |
539 | DEV_TX_OFFLOAD_TCP_TSO | | |
540 | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | | |
541 | DEV_TX_OFFLOAD_GRE_TNL_TSO | | |
542 | DEV_TX_OFFLOAD_IPIP_TNL_TSO | | |
543 | DEV_TX_OFFLOAD_GENEVE_TNL_TSO | | |
544 | DEV_TX_OFFLOAD_MULTI_SEGS; | |
545 | ||
546 | dev_info->default_rxconf = (struct rte_eth_rxconf) { | |
547 | .rx_free_thresh = AVF_DEFAULT_RX_FREE_THRESH, | |
548 | .rx_drop_en = 0, | |
549 | .offloads = 0, | |
550 | }; | |
551 | ||
552 | dev_info->default_txconf = (struct rte_eth_txconf) { | |
553 | .tx_free_thresh = AVF_DEFAULT_TX_FREE_THRESH, | |
554 | .tx_rs_thresh = AVF_DEFAULT_TX_RS_THRESH, | |
555 | .offloads = 0, | |
556 | }; | |
557 | ||
558 | dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { | |
559 | .nb_max = AVF_MAX_RING_DESC, | |
560 | .nb_min = AVF_MIN_RING_DESC, | |
561 | .nb_align = AVF_ALIGN_RING_DESC, | |
562 | }; | |
563 | ||
564 | dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { | |
565 | .nb_max = AVF_MAX_RING_DESC, | |
566 | .nb_min = AVF_MIN_RING_DESC, | |
567 | .nb_align = AVF_ALIGN_RING_DESC, | |
568 | }; | |
569 | } | |
570 | ||
571 | static const uint32_t * | |
572 | avf_dev_supported_ptypes_get(struct rte_eth_dev *dev) | |
573 | { | |
574 | static const uint32_t ptypes[] = { | |
575 | RTE_PTYPE_L2_ETHER, | |
576 | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, | |
577 | RTE_PTYPE_L4_FRAG, | |
578 | RTE_PTYPE_L4_ICMP, | |
579 | RTE_PTYPE_L4_NONFRAG, | |
580 | RTE_PTYPE_L4_SCTP, | |
581 | RTE_PTYPE_L4_TCP, | |
582 | RTE_PTYPE_L4_UDP, | |
583 | RTE_PTYPE_UNKNOWN | |
584 | }; | |
585 | return ptypes; | |
586 | } | |
587 | ||
588 | int | |
589 | avf_dev_link_update(struct rte_eth_dev *dev, | |
590 | __rte_unused int wait_to_complete) | |
591 | { | |
592 | struct rte_eth_link new_link; | |
593 | struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
594 | ||
595 | /* Only read status info stored in VF, and the info is updated | |
596 | * when receive LINK_CHANGE evnet from PF by Virtchnnl. | |
597 | */ | |
598 | switch (vf->link_speed) { | |
599 | case VIRTCHNL_LINK_SPEED_100MB: | |
600 | new_link.link_speed = ETH_SPEED_NUM_100M; | |
601 | break; | |
602 | case VIRTCHNL_LINK_SPEED_1GB: | |
603 | new_link.link_speed = ETH_SPEED_NUM_1G; | |
604 | break; | |
605 | case VIRTCHNL_LINK_SPEED_10GB: | |
606 | new_link.link_speed = ETH_SPEED_NUM_10G; | |
607 | break; | |
608 | case VIRTCHNL_LINK_SPEED_20GB: | |
609 | new_link.link_speed = ETH_SPEED_NUM_20G; | |
610 | break; | |
611 | case VIRTCHNL_LINK_SPEED_25GB: | |
612 | new_link.link_speed = ETH_SPEED_NUM_25G; | |
613 | break; | |
614 | case VIRTCHNL_LINK_SPEED_40GB: | |
615 | new_link.link_speed = ETH_SPEED_NUM_40G; | |
616 | break; | |
617 | default: | |
618 | new_link.link_speed = ETH_SPEED_NUM_NONE; | |
619 | break; | |
620 | } | |
621 | ||
622 | new_link.link_duplex = ETH_LINK_FULL_DUPLEX; | |
623 | new_link.link_status = vf->link_up ? ETH_LINK_UP : | |
624 | ETH_LINK_DOWN; | |
625 | new_link.link_autoneg = !(dev->data->dev_conf.link_speeds & | |
626 | ETH_LINK_SPEED_FIXED); | |
627 | ||
628 | if (rte_atomic64_cmpset((uint64_t *)&dev->data->dev_link, | |
629 | *(uint64_t *)&dev->data->dev_link, | |
630 | *(uint64_t *)&new_link) == 0) | |
631 | return -1; | |
632 | ||
633 | return 0; | |
634 | } | |
635 | ||
636 | static void | |
637 | avf_dev_promiscuous_enable(struct rte_eth_dev *dev) | |
638 | { | |
639 | struct avf_adapter *adapter = | |
640 | AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
641 | struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); | |
642 | int ret; | |
643 | ||
644 | if (vf->promisc_unicast_enabled) | |
645 | return; | |
646 | ||
647 | ret = avf_config_promisc(adapter, TRUE, vf->promisc_multicast_enabled); | |
648 | if (!ret) | |
649 | vf->promisc_unicast_enabled = TRUE; | |
650 | } | |
651 | ||
652 | static void | |
653 | avf_dev_promiscuous_disable(struct rte_eth_dev *dev) | |
654 | { | |
655 | struct avf_adapter *adapter = | |
656 | AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
657 | struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); | |
658 | int ret; | |
659 | ||
660 | if (!vf->promisc_unicast_enabled) | |
661 | return; | |
662 | ||
663 | ret = avf_config_promisc(adapter, FALSE, vf->promisc_multicast_enabled); | |
664 | if (!ret) | |
665 | vf->promisc_unicast_enabled = FALSE; | |
666 | } | |
667 | ||
668 | static void | |
669 | avf_dev_allmulticast_enable(struct rte_eth_dev *dev) | |
670 | { | |
671 | struct avf_adapter *adapter = | |
672 | AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
673 | struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); | |
674 | int ret; | |
675 | ||
676 | if (vf->promisc_multicast_enabled) | |
677 | return; | |
678 | ||
679 | ret = avf_config_promisc(adapter, vf->promisc_unicast_enabled, TRUE); | |
680 | if (!ret) | |
681 | vf->promisc_multicast_enabled = TRUE; | |
682 | } | |
683 | ||
684 | static void | |
685 | avf_dev_allmulticast_disable(struct rte_eth_dev *dev) | |
686 | { | |
687 | struct avf_adapter *adapter = | |
688 | AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
689 | struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); | |
690 | int ret; | |
691 | ||
692 | if (!vf->promisc_multicast_enabled) | |
693 | return; | |
694 | ||
695 | ret = avf_config_promisc(adapter, vf->promisc_unicast_enabled, FALSE); | |
696 | if (!ret) | |
697 | vf->promisc_multicast_enabled = FALSE; | |
698 | } | |
699 | ||
700 | static int | |
701 | avf_dev_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr, | |
702 | __rte_unused uint32_t index, | |
703 | __rte_unused uint32_t pool) | |
704 | { | |
705 | struct avf_adapter *adapter = | |
706 | AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
707 | struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); | |
708 | int err; | |
709 | ||
710 | if (is_zero_ether_addr(addr)) { | |
711 | PMD_DRV_LOG(ERR, "Invalid Ethernet Address"); | |
712 | return -EINVAL; | |
713 | } | |
714 | ||
715 | err = avf_add_del_eth_addr(adapter, addr, TRUE); | |
716 | if (err) { | |
717 | PMD_DRV_LOG(ERR, "fail to add MAC address"); | |
718 | return -EIO; | |
719 | } | |
720 | ||
721 | vf->mac_num++; | |
722 | ||
723 | return 0; | |
724 | } | |
725 | ||
726 | static void | |
727 | avf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index) | |
728 | { | |
729 | struct avf_adapter *adapter = | |
730 | AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
731 | struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); | |
732 | struct ether_addr *addr; | |
733 | int err; | |
734 | ||
735 | addr = &dev->data->mac_addrs[index]; | |
736 | ||
737 | err = avf_add_del_eth_addr(adapter, addr, FALSE); | |
738 | if (err) | |
739 | PMD_DRV_LOG(ERR, "fail to delete MAC address"); | |
740 | ||
741 | vf->mac_num--; | |
742 | } | |
743 | ||
744 | static int | |
745 | avf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) | |
746 | { | |
747 | struct avf_adapter *adapter = | |
748 | AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
749 | struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); | |
750 | int err; | |
751 | ||
752 | if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) | |
753 | return -ENOTSUP; | |
754 | ||
755 | err = avf_add_del_vlan(adapter, vlan_id, on); | |
756 | if (err) | |
757 | return -EIO; | |
758 | return 0; | |
759 | } | |
760 | ||
761 | static int | |
762 | avf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) | |
763 | { | |
764 | struct avf_adapter *adapter = | |
765 | AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
766 | struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); | |
767 | struct rte_eth_conf *dev_conf = &dev->data->dev_conf; | |
768 | int err; | |
769 | ||
770 | if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) | |
771 | return -ENOTSUP; | |
772 | ||
773 | /* Vlan stripping setting */ | |
774 | if (mask & ETH_VLAN_STRIP_MASK) { | |
775 | /* Enable or disable VLAN stripping */ | |
776 | if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) | |
777 | err = avf_enable_vlan_strip(adapter); | |
778 | else | |
779 | err = avf_disable_vlan_strip(adapter); | |
780 | ||
781 | if (err) | |
782 | return -EIO; | |
783 | } | |
784 | return 0; | |
785 | } | |
786 | ||
787 | static int | |
788 | avf_dev_rss_reta_update(struct rte_eth_dev *dev, | |
789 | struct rte_eth_rss_reta_entry64 *reta_conf, | |
790 | uint16_t reta_size) | |
791 | { | |
792 | struct avf_adapter *adapter = | |
793 | AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
794 | struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); | |
795 | uint8_t *lut; | |
796 | uint16_t i, idx, shift; | |
797 | int ret; | |
798 | ||
799 | if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) | |
800 | return -ENOTSUP; | |
801 | ||
802 | if (reta_size != vf->vf_res->rss_lut_size) { | |
803 | PMD_DRV_LOG(ERR, "The size of hash lookup table configured " | |
804 | "(%d) doesn't match the number of hardware can " | |
805 | "support (%d)", reta_size, vf->vf_res->rss_lut_size); | |
806 | return -EINVAL; | |
807 | } | |
808 | ||
809 | lut = rte_zmalloc("rss_lut", reta_size, 0); | |
810 | if (!lut) { | |
811 | PMD_DRV_LOG(ERR, "No memory can be allocated"); | |
812 | return -ENOMEM; | |
813 | } | |
814 | /* store the old lut table temporarily */ | |
815 | rte_memcpy(lut, vf->rss_lut, reta_size); | |
816 | ||
817 | for (i = 0; i < reta_size; i++) { | |
818 | idx = i / RTE_RETA_GROUP_SIZE; | |
819 | shift = i % RTE_RETA_GROUP_SIZE; | |
820 | if (reta_conf[idx].mask & (1ULL << shift)) | |
821 | lut[i] = reta_conf[idx].reta[shift]; | |
822 | } | |
823 | ||
824 | rte_memcpy(vf->rss_lut, lut, reta_size); | |
825 | /* send virtchnnl ops to configure rss*/ | |
826 | ret = avf_configure_rss_lut(adapter); | |
827 | if (ret) /* revert back */ | |
828 | rte_memcpy(vf->rss_lut, lut, reta_size); | |
829 | rte_free(lut); | |
830 | ||
831 | return ret; | |
832 | } | |
833 | ||
834 | static int | |
835 | avf_dev_rss_reta_query(struct rte_eth_dev *dev, | |
836 | struct rte_eth_rss_reta_entry64 *reta_conf, | |
837 | uint16_t reta_size) | |
838 | { | |
839 | struct avf_adapter *adapter = | |
840 | AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
841 | struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); | |
842 | uint16_t i, idx, shift; | |
843 | ||
844 | if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) | |
845 | return -ENOTSUP; | |
846 | ||
847 | if (reta_size != vf->vf_res->rss_lut_size) { | |
848 | PMD_DRV_LOG(ERR, "The size of hash lookup table configured " | |
849 | "(%d) doesn't match the number of hardware can " | |
850 | "support (%d)", reta_size, vf->vf_res->rss_lut_size); | |
851 | return -EINVAL; | |
852 | } | |
853 | ||
854 | for (i = 0; i < reta_size; i++) { | |
855 | idx = i / RTE_RETA_GROUP_SIZE; | |
856 | shift = i % RTE_RETA_GROUP_SIZE; | |
857 | if (reta_conf[idx].mask & (1ULL << shift)) | |
858 | reta_conf[idx].reta[shift] = vf->rss_lut[i]; | |
859 | } | |
860 | ||
861 | return 0; | |
862 | } | |
863 | ||
864 | static int | |
865 | avf_dev_rss_hash_update(struct rte_eth_dev *dev, | |
866 | struct rte_eth_rss_conf *rss_conf) | |
867 | { | |
868 | struct avf_adapter *adapter = | |
869 | AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
870 | struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); | |
871 | ||
872 | if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) | |
873 | return -ENOTSUP; | |
874 | ||
875 | /* HENA setting, it is enabled by default, no change */ | |
876 | if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) { | |
877 | PMD_DRV_LOG(DEBUG, "No key to be configured"); | |
878 | return 0; | |
879 | } else if (rss_conf->rss_key_len != vf->vf_res->rss_key_size) { | |
880 | PMD_DRV_LOG(ERR, "The size of hash key configured " | |
881 | "(%d) doesn't match the size of hardware can " | |
882 | "support (%d)", rss_conf->rss_key_len, | |
883 | vf->vf_res->rss_key_size); | |
884 | return -EINVAL; | |
885 | } | |
886 | ||
887 | rte_memcpy(vf->rss_key, rss_conf->rss_key, rss_conf->rss_key_len); | |
888 | ||
889 | return avf_configure_rss_key(adapter); | |
890 | } | |
891 | ||
892 | static int | |
893 | avf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, | |
894 | struct rte_eth_rss_conf *rss_conf) | |
895 | { | |
896 | struct avf_adapter *adapter = | |
897 | AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
898 | struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); | |
899 | ||
900 | if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) | |
901 | return -ENOTSUP; | |
902 | ||
903 | /* Just set it to default value now. */ | |
904 | rss_conf->rss_hf = AVF_RSS_OFFLOAD_ALL; | |
905 | ||
906 | if (!rss_conf->rss_key) | |
907 | return 0; | |
908 | ||
909 | rss_conf->rss_key_len = vf->vf_res->rss_key_size; | |
910 | rte_memcpy(rss_conf->rss_key, vf->rss_key, rss_conf->rss_key_len); | |
911 | ||
912 | return 0; | |
913 | } | |
914 | ||
915 | static int | |
916 | avf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) | |
917 | { | |
918 | struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
919 | uint32_t frame_size = mtu + AVF_ETH_OVERHEAD; | |
920 | int ret = 0; | |
921 | ||
922 | if (mtu < ETHER_MIN_MTU || frame_size > AVF_FRAME_SIZE_MAX) | |
923 | return -EINVAL; | |
924 | ||
925 | /* mtu setting is forbidden if port is start */ | |
926 | if (dev->data->dev_started) { | |
927 | PMD_DRV_LOG(ERR, "port must be stopped before configuration"); | |
928 | return -EBUSY; | |
929 | } | |
930 | ||
931 | if (frame_size > ETHER_MAX_LEN) | |
932 | dev->data->dev_conf.rxmode.offloads |= | |
933 | DEV_RX_OFFLOAD_JUMBO_FRAME; | |
934 | else | |
935 | dev->data->dev_conf.rxmode.offloads &= | |
936 | ~DEV_RX_OFFLOAD_JUMBO_FRAME; | |
937 | ||
938 | dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; | |
939 | ||
940 | return ret; | |
941 | } | |
942 | ||
943 | static int | |
944 | avf_dev_set_default_mac_addr(struct rte_eth_dev *dev, | |
945 | struct ether_addr *mac_addr) | |
946 | { | |
947 | struct avf_adapter *adapter = | |
948 | AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
949 | struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter); | |
950 | struct ether_addr *perm_addr, *old_addr; | |
951 | int ret; | |
952 | ||
953 | old_addr = (struct ether_addr *)hw->mac.addr; | |
954 | perm_addr = (struct ether_addr *)hw->mac.perm_addr; | |
955 | ||
956 | if (is_same_ether_addr(mac_addr, old_addr)) | |
957 | return 0; | |
958 | ||
959 | /* If the MAC address is configured by host, skip the setting */ | |
960 | if (is_valid_assigned_ether_addr(perm_addr)) | |
961 | return -EPERM; | |
962 | ||
963 | ret = avf_add_del_eth_addr(adapter, old_addr, FALSE); | |
964 | if (ret) | |
965 | PMD_DRV_LOG(ERR, "Fail to delete old MAC:" | |
966 | " %02X:%02X:%02X:%02X:%02X:%02X", | |
967 | old_addr->addr_bytes[0], | |
968 | old_addr->addr_bytes[1], | |
969 | old_addr->addr_bytes[2], | |
970 | old_addr->addr_bytes[3], | |
971 | old_addr->addr_bytes[4], | |
972 | old_addr->addr_bytes[5]); | |
973 | ||
974 | ret = avf_add_del_eth_addr(adapter, mac_addr, TRUE); | |
975 | if (ret) | |
976 | PMD_DRV_LOG(ERR, "Fail to add new MAC:" | |
977 | " %02X:%02X:%02X:%02X:%02X:%02X", | |
978 | mac_addr->addr_bytes[0], | |
979 | mac_addr->addr_bytes[1], | |
980 | mac_addr->addr_bytes[2], | |
981 | mac_addr->addr_bytes[3], | |
982 | mac_addr->addr_bytes[4], | |
983 | mac_addr->addr_bytes[5]); | |
984 | ||
985 | if (ret) | |
986 | return -EIO; | |
987 | ||
988 | ether_addr_copy(mac_addr, (struct ether_addr *)hw->mac.addr); | |
989 | return 0; | |
990 | } | |
991 | ||
992 | static int | |
993 | avf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) | |
994 | { | |
995 | struct avf_adapter *adapter = | |
996 | AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
997 | struct virtchnl_eth_stats *pstats = NULL; | |
998 | int ret; | |
999 | ||
1000 | ret = avf_query_stats(adapter, &pstats); | |
1001 | if (ret == 0) { | |
1002 | stats->ipackets = pstats->rx_unicast + pstats->rx_multicast + | |
1003 | pstats->rx_broadcast; | |
1004 | stats->opackets = pstats->tx_broadcast + pstats->tx_multicast + | |
1005 | pstats->tx_unicast; | |
1006 | stats->imissed = pstats->rx_discards; | |
1007 | stats->oerrors = pstats->tx_errors + pstats->tx_discards; | |
1008 | stats->ibytes = pstats->rx_bytes; | |
1009 | stats->obytes = pstats->tx_bytes; | |
1010 | } else { | |
1011 | PMD_DRV_LOG(ERR, "Get statistics failed"); | |
1012 | } | |
1013 | return -EIO; | |
1014 | } | |
1015 | ||
1016 | static int | |
1017 | avf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) | |
1018 | { | |
1019 | struct avf_adapter *adapter = | |
1020 | AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
1021 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); | |
1022 | struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter); | |
1023 | uint16_t msix_intr; | |
1024 | ||
1025 | msix_intr = pci_dev->intr_handle.intr_vec[queue_id]; | |
1026 | if (msix_intr == AVF_MISC_VEC_ID) { | |
1027 | PMD_DRV_LOG(INFO, "MISC is also enabled for control"); | |
1028 | AVF_WRITE_REG(hw, AVFINT_DYN_CTL01, | |
1029 | AVFINT_DYN_CTL01_INTENA_MASK | | |
1030 | AVFINT_DYN_CTL01_ITR_INDX_MASK); | |
1031 | } else { | |
1032 | AVF_WRITE_REG(hw, | |
1033 | AVFINT_DYN_CTLN1(msix_intr - AVF_RX_VEC_START), | |
1034 | AVFINT_DYN_CTLN1_INTENA_MASK | | |
1035 | AVFINT_DYN_CTLN1_ITR_INDX_MASK); | |
1036 | } | |
1037 | ||
1038 | AVF_WRITE_FLUSH(hw); | |
1039 | ||
1040 | rte_intr_enable(&pci_dev->intr_handle); | |
1041 | ||
1042 | return 0; | |
1043 | } | |
1044 | ||
1045 | static int | |
1046 | avf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) | |
1047 | { | |
1048 | struct avf_adapter *adapter = | |
1049 | AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
1050 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); | |
1051 | struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1052 | uint16_t msix_intr; | |
1053 | ||
1054 | msix_intr = pci_dev->intr_handle.intr_vec[queue_id]; | |
1055 | if (msix_intr == AVF_MISC_VEC_ID) { | |
1056 | PMD_DRV_LOG(ERR, "MISC is used for control, cannot disable it"); | |
1057 | return -EIO; | |
1058 | } | |
1059 | ||
1060 | AVF_WRITE_REG(hw, | |
1061 | AVFINT_DYN_CTLN1(msix_intr - AVF_RX_VEC_START), | |
1062 | 0); | |
1063 | ||
1064 | AVF_WRITE_FLUSH(hw); | |
1065 | return 0; | |
1066 | } | |
1067 | ||
1068 | static int | |
1069 | avf_check_vf_reset_done(struct avf_hw *hw) | |
1070 | { | |
1071 | int i, reset; | |
1072 | ||
1073 | for (i = 0; i < AVF_RESET_WAIT_CNT; i++) { | |
1074 | reset = AVF_READ_REG(hw, AVFGEN_RSTAT) & | |
1075 | AVFGEN_RSTAT_VFR_STATE_MASK; | |
1076 | reset = reset >> AVFGEN_RSTAT_VFR_STATE_SHIFT; | |
1077 | if (reset == VIRTCHNL_VFR_VFACTIVE || | |
1078 | reset == VIRTCHNL_VFR_COMPLETED) | |
1079 | break; | |
1080 | rte_delay_ms(20); | |
1081 | } | |
1082 | ||
1083 | if (i >= AVF_RESET_WAIT_CNT) | |
1084 | return -1; | |
1085 | ||
1086 | return 0; | |
1087 | } | |
1088 | ||
1089 | static int | |
1090 | avf_init_vf(struct rte_eth_dev *dev) | |
1091 | { | |
1092 | int i, err, bufsz; | |
1093 | struct avf_adapter *adapter = | |
1094 | AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); | |
1095 | struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1096 | struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
1097 | ||
1098 | err = avf_set_mac_type(hw); | |
1099 | if (err) { | |
1100 | PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err); | |
1101 | goto err; | |
1102 | } | |
1103 | ||
1104 | err = avf_check_vf_reset_done(hw); | |
1105 | if (err) { | |
1106 | PMD_INIT_LOG(ERR, "VF is still resetting"); | |
1107 | goto err; | |
1108 | } | |
1109 | ||
1110 | avf_init_adminq_parameter(hw); | |
1111 | err = avf_init_adminq(hw); | |
1112 | if (err) { | |
1113 | PMD_INIT_LOG(ERR, "init_adminq failed: %d", err); | |
1114 | goto err; | |
1115 | } | |
1116 | ||
1117 | vf->aq_resp = rte_zmalloc("vf_aq_resp", AVF_AQ_BUF_SZ, 0); | |
1118 | if (!vf->aq_resp) { | |
1119 | PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory"); | |
1120 | goto err_aq; | |
1121 | } | |
1122 | if (avf_check_api_version(adapter) != 0) { | |
1123 | PMD_INIT_LOG(ERR, "check_api version failed"); | |
1124 | goto err_api; | |
1125 | } | |
1126 | ||
1127 | bufsz = sizeof(struct virtchnl_vf_resource) + | |
1128 | (AVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource)); | |
1129 | vf->vf_res = rte_zmalloc("vf_res", bufsz, 0); | |
1130 | if (!vf->vf_res) { | |
1131 | PMD_INIT_LOG(ERR, "unable to allocate vf_res memory"); | |
1132 | goto err_api; | |
1133 | } | |
1134 | if (avf_get_vf_resource(adapter) != 0) { | |
1135 | PMD_INIT_LOG(ERR, "avf_get_vf_config failed"); | |
1136 | goto err_alloc; | |
1137 | } | |
1138 | /* Allocate memort for RSS info */ | |
1139 | if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { | |
1140 | vf->rss_key = rte_zmalloc("rss_key", | |
1141 | vf->vf_res->rss_key_size, 0); | |
1142 | if (!vf->rss_key) { | |
1143 | PMD_INIT_LOG(ERR, "unable to allocate rss_key memory"); | |
1144 | goto err_rss; | |
1145 | } | |
1146 | vf->rss_lut = rte_zmalloc("rss_lut", | |
1147 | vf->vf_res->rss_lut_size, 0); | |
1148 | if (!vf->rss_lut) { | |
1149 | PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory"); | |
1150 | goto err_rss; | |
1151 | } | |
1152 | } | |
1153 | return 0; | |
1154 | err_rss: | |
1155 | rte_free(vf->rss_key); | |
1156 | rte_free(vf->rss_lut); | |
1157 | err_alloc: | |
1158 | rte_free(vf->vf_res); | |
1159 | vf->vsi_res = NULL; | |
1160 | err_api: | |
1161 | rte_free(vf->aq_resp); | |
1162 | err_aq: | |
1163 | avf_shutdown_adminq(hw); | |
1164 | err: | |
1165 | return -1; | |
1166 | } | |
1167 | ||
1168 | /* Enable default admin queue interrupt setting */ | |
1169 | static inline void | |
1170 | avf_enable_irq0(struct avf_hw *hw) | |
1171 | { | |
1172 | /* Enable admin queue interrupt trigger */ | |
1173 | AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, AVFINT_ICR0_ENA1_ADMINQ_MASK); | |
1174 | ||
1175 | AVF_WRITE_REG(hw, AVFINT_DYN_CTL01, AVFINT_DYN_CTL01_INTENA_MASK | | |
1176 | AVFINT_DYN_CTL01_ITR_INDX_MASK); | |
1177 | ||
1178 | AVF_WRITE_FLUSH(hw); | |
1179 | } | |
1180 | ||
1181 | static inline void | |
1182 | avf_disable_irq0(struct avf_hw *hw) | |
1183 | { | |
1184 | /* Disable all interrupt types */ | |
1185 | AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, 0); | |
1186 | AVF_WRITE_REG(hw, AVFINT_DYN_CTL01, | |
1187 | AVFINT_DYN_CTL01_ITR_INDX_MASK); | |
1188 | AVF_WRITE_FLUSH(hw); | |
1189 | } | |
1190 | ||
1191 | static void | |
1192 | avf_dev_interrupt_handler(void *param) | |
1193 | { | |
1194 | struct rte_eth_dev *dev = (struct rte_eth_dev *)param; | |
1195 | struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1196 | ||
1197 | avf_disable_irq0(hw); | |
1198 | ||
1199 | avf_handle_virtchnl_msg(dev); | |
1200 | ||
1201 | done: | |
1202 | avf_enable_irq0(hw); | |
1203 | } | |
1204 | ||
1205 | static int | |
1206 | avf_dev_init(struct rte_eth_dev *eth_dev) | |
1207 | { | |
1208 | struct avf_adapter *adapter = | |
1209 | AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private); | |
1210 | struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter); | |
1211 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); | |
1212 | ||
1213 | PMD_INIT_FUNC_TRACE(); | |
1214 | ||
1215 | /* assign ops func pointer */ | |
1216 | eth_dev->dev_ops = &avf_eth_dev_ops; | |
1217 | eth_dev->rx_pkt_burst = &avf_recv_pkts; | |
1218 | eth_dev->tx_pkt_burst = &avf_xmit_pkts; | |
1219 | eth_dev->tx_pkt_prepare = &avf_prep_pkts; | |
1220 | ||
1221 | /* For secondary processes, we don't initialise any further as primary | |
1222 | * has already done this work. Only check if we need a different RX | |
1223 | * and TX function. | |
1224 | */ | |
1225 | if (rte_eal_process_type() != RTE_PROC_PRIMARY) { | |
1226 | avf_set_rx_function(eth_dev); | |
1227 | avf_set_tx_function(eth_dev); | |
1228 | return 0; | |
1229 | } | |
1230 | rte_eth_copy_pci_info(eth_dev, pci_dev); | |
1231 | ||
1232 | hw->vendor_id = pci_dev->id.vendor_id; | |
1233 | hw->device_id = pci_dev->id.device_id; | |
1234 | hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; | |
1235 | hw->subsystem_device_id = pci_dev->id.subsystem_device_id; | |
1236 | hw->bus.bus_id = pci_dev->addr.bus; | |
1237 | hw->bus.device = pci_dev->addr.devid; | |
1238 | hw->bus.func = pci_dev->addr.function; | |
1239 | hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; | |
1240 | hw->back = AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private); | |
1241 | adapter->eth_dev = eth_dev; | |
1242 | ||
1243 | if (avf_init_vf(eth_dev) != 0) { | |
1244 | PMD_INIT_LOG(ERR, "Init vf failed"); | |
1245 | return -1; | |
1246 | } | |
1247 | ||
1248 | /* copy mac addr */ | |
1249 | eth_dev->data->mac_addrs = rte_zmalloc( | |
1250 | "avf_mac", | |
1251 | ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX, | |
1252 | 0); | |
1253 | if (!eth_dev->data->mac_addrs) { | |
1254 | PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to" | |
1255 | " store MAC addresses", | |
1256 | ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX); | |
1257 | return -ENOMEM; | |
1258 | } | |
1259 | /* If the MAC address is not configured by host, | |
1260 | * generate a random one. | |
1261 | */ | |
1262 | if (!is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr)) | |
1263 | eth_random_addr(hw->mac.addr); | |
1264 | ether_addr_copy((struct ether_addr *)hw->mac.addr, | |
1265 | ð_dev->data->mac_addrs[0]); | |
1266 | ||
1267 | /* register callback func to eal lib */ | |
1268 | rte_intr_callback_register(&pci_dev->intr_handle, | |
1269 | avf_dev_interrupt_handler, | |
1270 | (void *)eth_dev); | |
1271 | ||
1272 | /* enable uio intr after callback register */ | |
1273 | rte_intr_enable(&pci_dev->intr_handle); | |
1274 | ||
1275 | /* configure and enable device interrupt */ | |
1276 | avf_enable_irq0(hw); | |
1277 | ||
1278 | return 0; | |
1279 | } | |
1280 | ||
1281 | static void | |
1282 | avf_dev_close(struct rte_eth_dev *dev) | |
1283 | { | |
1284 | struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1285 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); | |
1286 | struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; | |
1287 | ||
1288 | avf_dev_stop(dev); | |
1289 | avf_shutdown_adminq(hw); | |
1290 | /* disable uio intr before callback unregister */ | |
1291 | rte_intr_disable(intr_handle); | |
1292 | ||
1293 | /* unregister callback func from eal lib */ | |
1294 | rte_intr_callback_unregister(intr_handle, | |
1295 | avf_dev_interrupt_handler, dev); | |
1296 | avf_disable_irq0(hw); | |
1297 | } | |
1298 | ||
1299 | static int | |
1300 | avf_dev_uninit(struct rte_eth_dev *dev) | |
1301 | { | |
1302 | struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); | |
1303 | struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1304 | ||
1305 | if (rte_eal_process_type() != RTE_PROC_PRIMARY) | |
1306 | return -EPERM; | |
1307 | ||
1308 | dev->dev_ops = NULL; | |
1309 | dev->rx_pkt_burst = NULL; | |
1310 | dev->tx_pkt_burst = NULL; | |
1311 | if (hw->adapter_stopped == 0) | |
1312 | avf_dev_close(dev); | |
1313 | ||
1314 | rte_free(vf->vf_res); | |
1315 | vf->vsi_res = NULL; | |
1316 | vf->vf_res = NULL; | |
1317 | ||
1318 | rte_free(vf->aq_resp); | |
1319 | vf->aq_resp = NULL; | |
1320 | ||
1321 | rte_free(dev->data->mac_addrs); | |
1322 | dev->data->mac_addrs = NULL; | |
1323 | ||
1324 | if (vf->rss_lut) { | |
1325 | rte_free(vf->rss_lut); | |
1326 | vf->rss_lut = NULL; | |
1327 | } | |
1328 | if (vf->rss_key) { | |
1329 | rte_free(vf->rss_key); | |
1330 | vf->rss_key = NULL; | |
1331 | } | |
1332 | ||
1333 | return 0; | |
1334 | } | |
1335 | ||
1336 | static int eth_avf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, | |
1337 | struct rte_pci_device *pci_dev) | |
1338 | { | |
1339 | return rte_eth_dev_pci_generic_probe(pci_dev, | |
1340 | sizeof(struct avf_adapter), avf_dev_init); | |
1341 | } | |
1342 | ||
1343 | static int eth_avf_pci_remove(struct rte_pci_device *pci_dev) | |
1344 | { | |
1345 | return rte_eth_dev_pci_generic_remove(pci_dev, avf_dev_uninit); | |
1346 | } | |
1347 | ||
1348 | /* Adaptive virtual function driver struct */ | |
1349 | static struct rte_pci_driver rte_avf_pmd = { | |
1350 | .id_table = pci_id_avf_map, | |
1351 | .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | | |
1352 | RTE_PCI_DRV_IOVA_AS_VA, | |
1353 | .probe = eth_avf_pci_probe, | |
1354 | .remove = eth_avf_pci_remove, | |
1355 | }; | |
1356 | ||
1357 | RTE_PMD_REGISTER_PCI(net_avf, rte_avf_pmd); | |
1358 | RTE_PMD_REGISTER_PCI_TABLE(net_avf, pci_id_avf_map); | |
1359 | RTE_PMD_REGISTER_KMOD_DEP(net_avf, "* igb_uio | vfio-pci"); | |
1360 | RTE_INIT(avf_init_log) | |
1361 | { | |
1362 | avf_logtype_init = rte_log_register("pmd.net.avf.init"); | |
1363 | if (avf_logtype_init >= 0) | |
1364 | rte_log_set_level(avf_logtype_init, RTE_LOG_NOTICE); | |
1365 | avf_logtype_driver = rte_log_register("pmd.net.avf.driver"); | |
1366 | if (avf_logtype_driver >= 0) | |
1367 | rte_log_set_level(avf_logtype_driver, RTE_LOG_NOTICE); | |
1368 | } | |
1369 | ||
1370 | /* memory func for base code */ | |
1371 | enum avf_status_code | |
1372 | avf_allocate_dma_mem_d(__rte_unused struct avf_hw *hw, | |
1373 | struct avf_dma_mem *mem, | |
1374 | u64 size, | |
1375 | u32 alignment) | |
1376 | { | |
1377 | const struct rte_memzone *mz = NULL; | |
1378 | char z_name[RTE_MEMZONE_NAMESIZE]; | |
1379 | ||
1380 | if (!mem) | |
1381 | return AVF_ERR_PARAM; | |
1382 | ||
1383 | snprintf(z_name, sizeof(z_name), "avf_dma_%"PRIu64, rte_rand()); | |
1384 | mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, | |
1385 | RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M); | |
1386 | if (!mz) | |
1387 | return AVF_ERR_NO_MEMORY; | |
1388 | ||
1389 | mem->size = size; | |
1390 | mem->va = mz->addr; | |
1391 | mem->pa = mz->phys_addr; | |
1392 | mem->zone = (const void *)mz; | |
1393 | PMD_DRV_LOG(DEBUG, | |
1394 | "memzone %s allocated with physical address: %"PRIu64, | |
1395 | mz->name, mem->pa); | |
1396 | ||
1397 | return AVF_SUCCESS; | |
1398 | } | |
1399 | ||
1400 | enum avf_status_code | |
1401 | avf_free_dma_mem_d(__rte_unused struct avf_hw *hw, | |
1402 | struct avf_dma_mem *mem) | |
1403 | { | |
1404 | if (!mem) | |
1405 | return AVF_ERR_PARAM; | |
1406 | ||
1407 | PMD_DRV_LOG(DEBUG, | |
1408 | "memzone %s to be freed with physical address: %"PRIu64, | |
1409 | ((const struct rte_memzone *)mem->zone)->name, mem->pa); | |
1410 | rte_memzone_free((const struct rte_memzone *)mem->zone); | |
1411 | mem->zone = NULL; | |
1412 | mem->va = NULL; | |
1413 | mem->pa = (u64)0; | |
1414 | ||
1415 | return AVF_SUCCESS; | |
1416 | } | |
1417 | ||
1418 | enum avf_status_code | |
1419 | avf_allocate_virt_mem_d(__rte_unused struct avf_hw *hw, | |
1420 | struct avf_virt_mem *mem, | |
1421 | u32 size) | |
1422 | { | |
1423 | if (!mem) | |
1424 | return AVF_ERR_PARAM; | |
1425 | ||
1426 | mem->size = size; | |
1427 | mem->va = rte_zmalloc("avf", size, 0); | |
1428 | ||
1429 | if (mem->va) | |
1430 | return AVF_SUCCESS; | |
1431 | else | |
1432 | return AVF_ERR_NO_MEMORY; | |
1433 | } | |
1434 | ||
1435 | enum avf_status_code | |
1436 | avf_free_virt_mem_d(__rte_unused struct avf_hw *hw, | |
1437 | struct avf_virt_mem *mem) | |
1438 | { | |
1439 | if (!mem) | |
1440 | return AVF_ERR_PARAM; | |
1441 | ||
1442 | rte_free(mem->va); | |
1443 | mem->va = NULL; | |
1444 | ||
1445 | return AVF_SUCCESS; | |
1446 | } |