]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/drivers/net/i40e/rte_pmd_i40e.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / seastar / dpdk / drivers / net / i40e / rte_pmd_i40e.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <rte_malloc.h>
35 #include <rte_tailq.h>
36
37 #include "base/i40e_prototype.h"
38 #include "i40e_ethdev.h"
39 #include "i40e_pf.h"
40 #include "i40e_rxtx.h"
41 #include "rte_pmd_i40e.h"
42
43 /* The max bandwidth of i40e is 40Gbps. */
44 #define I40E_QOS_BW_MAX 40000
45 /* The bandwidth should be the multiple of 50Mbps. */
46 #define I40E_QOS_BW_GRANULARITY 50
47 /* The min bandwidth weight is 1. */
48 #define I40E_QOS_BW_WEIGHT_MIN 1
49 /* The max bandwidth weight is 127. */
50 #define I40E_QOS_BW_WEIGHT_MAX 127
51
52 int
53 rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf)
54 {
55 struct rte_eth_dev *dev;
56 struct i40e_pf *pf;
57
58 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
59
60 dev = &rte_eth_devices[port];
61
62 if (!is_i40e_supported(dev))
63 return -ENOTSUP;
64
65 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
66
67 if (vf >= pf->vf_num || !pf->vfs) {
68 PMD_DRV_LOG(ERR, "Invalid argument.");
69 return -EINVAL;
70 }
71
72 i40e_notify_vf_link_status(dev, &pf->vfs[vf]);
73
74 return 0;
75 }
76
77 int
78 rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
79 {
80 struct rte_eth_dev *dev;
81 struct i40e_pf *pf;
82 struct i40e_vsi *vsi;
83 struct i40e_hw *hw;
84 struct i40e_vsi_context ctxt;
85 int ret;
86
87 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
88
89 dev = &rte_eth_devices[port];
90
91 if (!is_i40e_supported(dev))
92 return -ENOTSUP;
93
94 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
95
96 if (vf_id >= pf->vf_num || !pf->vfs) {
97 PMD_DRV_LOG(ERR, "Invalid argument.");
98 return -EINVAL;
99 }
100
101 vsi = pf->vfs[vf_id].vsi;
102 if (!vsi) {
103 PMD_DRV_LOG(ERR, "Invalid VSI.");
104 return -EINVAL;
105 }
106
107 /* Check if it has been already on or off */
108 if (vsi->info.valid_sections &
109 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) {
110 if (on) {
111 if ((vsi->info.sec_flags &
112 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) ==
113 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK)
114 return 0; /* already on */
115 } else {
116 if ((vsi->info.sec_flags &
117 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0)
118 return 0; /* already off */
119 }
120 }
121
122 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
123 if (on)
124 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
125 else
126 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
127
128 memset(&ctxt, 0, sizeof(ctxt));
129 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
130 ctxt.seid = vsi->seid;
131
132 hw = I40E_VSI_TO_HW(vsi);
133 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
134 if (ret != I40E_SUCCESS) {
135 ret = -ENOTSUP;
136 PMD_DRV_LOG(ERR, "Failed to update VSI params");
137 }
138
139 return ret;
140 }
141
142 static int
143 i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add)
144 {
145 uint32_t j, k;
146 uint16_t vlan_id;
147 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
148 struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
149 int ret;
150
151 for (j = 0; j < I40E_VFTA_SIZE; j++) {
152 if (!vsi->vfta[j])
153 continue;
154
155 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
156 if (!(vsi->vfta[j] & (1 << k)))
157 continue;
158
159 vlan_id = j * I40E_UINT32_BIT_SIZE + k;
160 if (!vlan_id)
161 continue;
162
163 vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
164 if (add)
165 ret = i40e_aq_add_vlan(hw, vsi->seid,
166 &vlan_data, 1, NULL);
167 else
168 ret = i40e_aq_remove_vlan(hw, vsi->seid,
169 &vlan_data, 1, NULL);
170 if (ret != I40E_SUCCESS) {
171 PMD_DRV_LOG(ERR,
172 "Failed to add/rm vlan filter");
173 return ret;
174 }
175 }
176 }
177
178 return I40E_SUCCESS;
179 }
180
181 int
182 rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
183 {
184 struct rte_eth_dev *dev;
185 struct i40e_pf *pf;
186 struct i40e_vsi *vsi;
187 struct i40e_hw *hw;
188 struct i40e_vsi_context ctxt;
189 int ret;
190
191 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
192
193 dev = &rte_eth_devices[port];
194
195 if (!is_i40e_supported(dev))
196 return -ENOTSUP;
197
198 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
199
200 if (vf_id >= pf->vf_num || !pf->vfs) {
201 PMD_DRV_LOG(ERR, "Invalid argument.");
202 return -EINVAL;
203 }
204
205 vsi = pf->vfs[vf_id].vsi;
206 if (!vsi) {
207 PMD_DRV_LOG(ERR, "Invalid VSI.");
208 return -EINVAL;
209 }
210
211 /* Check if it has been already on or off */
212 if (vsi->vlan_anti_spoof_on == on)
213 return 0; /* already on or off */
214
215 vsi->vlan_anti_spoof_on = on;
216 if (!vsi->vlan_filter_on) {
217 ret = i40e_add_rm_all_vlan_filter(vsi, on);
218 if (ret) {
219 PMD_DRV_LOG(ERR, "Failed to add/remove VLAN filters.");
220 return -ENOTSUP;
221 }
222 }
223
224 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
225 if (on)
226 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
227 else
228 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
229
230 memset(&ctxt, 0, sizeof(ctxt));
231 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
232 ctxt.seid = vsi->seid;
233
234 hw = I40E_VSI_TO_HW(vsi);
235 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
236 if (ret != I40E_SUCCESS) {
237 ret = -ENOTSUP;
238 PMD_DRV_LOG(ERR, "Failed to update VSI params");
239 }
240
241 return ret;
242 }
243
244 static int
245 i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
246 {
247 struct i40e_mac_filter *f;
248 struct i40e_macvlan_filter *mv_f;
249 int i, vlan_num;
250 enum rte_mac_filter_type filter_type;
251 int ret = I40E_SUCCESS;
252 void *temp;
253
254 /* remove all the MACs */
255 TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
256 vlan_num = vsi->vlan_num;
257 filter_type = f->mac_info.filter_type;
258 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
259 filter_type == RTE_MACVLAN_HASH_MATCH) {
260 if (vlan_num == 0) {
261 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
262 return I40E_ERR_PARAM;
263 }
264 } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
265 filter_type == RTE_MAC_HASH_MATCH)
266 vlan_num = 1;
267
268 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
269 if (!mv_f) {
270 PMD_DRV_LOG(ERR, "failed to allocate memory");
271 return I40E_ERR_NO_MEMORY;
272 }
273
274 for (i = 0; i < vlan_num; i++) {
275 mv_f[i].filter_type = filter_type;
276 (void)rte_memcpy(&mv_f[i].macaddr,
277 &f->mac_info.mac_addr,
278 ETH_ADDR_LEN);
279 }
280 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
281 filter_type == RTE_MACVLAN_HASH_MATCH) {
282 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
283 &f->mac_info.mac_addr);
284 if (ret != I40E_SUCCESS) {
285 rte_free(mv_f);
286 return ret;
287 }
288 }
289
290 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
291 if (ret != I40E_SUCCESS) {
292 rte_free(mv_f);
293 return ret;
294 }
295
296 rte_free(mv_f);
297 ret = I40E_SUCCESS;
298 }
299
300 return ret;
301 }
302
303 static int
304 i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi)
305 {
306 struct i40e_mac_filter *f;
307 struct i40e_macvlan_filter *mv_f;
308 int i, vlan_num = 0;
309 int ret = I40E_SUCCESS;
310 void *temp;
311
312 /* restore all the MACs */
313 TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
314 if ((f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
315 (f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH)) {
316 /**
317 * If vlan_num is 0, that's the first time to add mac,
318 * set mask for vlan_id 0.
319 */
320 if (vsi->vlan_num == 0) {
321 i40e_set_vlan_filter(vsi, 0, 1);
322 vsi->vlan_num = 1;
323 }
324 vlan_num = vsi->vlan_num;
325 } else if ((f->mac_info.filter_type == RTE_MAC_PERFECT_MATCH) ||
326 (f->mac_info.filter_type == RTE_MAC_HASH_MATCH))
327 vlan_num = 1;
328
329 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
330 if (!mv_f) {
331 PMD_DRV_LOG(ERR, "failed to allocate memory");
332 return I40E_ERR_NO_MEMORY;
333 }
334
335 for (i = 0; i < vlan_num; i++) {
336 mv_f[i].filter_type = f->mac_info.filter_type;
337 (void)rte_memcpy(&mv_f[i].macaddr,
338 &f->mac_info.mac_addr,
339 ETH_ADDR_LEN);
340 }
341
342 if (f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH ||
343 f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH) {
344 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
345 &f->mac_info.mac_addr);
346 if (ret != I40E_SUCCESS) {
347 rte_free(mv_f);
348 return ret;
349 }
350 }
351
352 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
353 if (ret != I40E_SUCCESS) {
354 rte_free(mv_f);
355 return ret;
356 }
357
358 rte_free(mv_f);
359 ret = I40E_SUCCESS;
360 }
361
362 return ret;
363 }
364
365 static int
366 i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
367 {
368 struct i40e_vsi_context ctxt;
369 struct i40e_hw *hw;
370 int ret;
371
372 if (!vsi)
373 return -EINVAL;
374
375 hw = I40E_VSI_TO_HW(vsi);
376
377 /* Use the FW API if FW >= v5.0 */
378 if (hw->aq.fw_maj_ver < 5) {
379 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
380 return -ENOTSUP;
381 }
382
383 /* Check if it has been already on or off */
384 if (vsi->info.valid_sections &
385 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) {
386 if (on) {
387 if ((vsi->info.switch_id &
388 I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) ==
389 I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB)
390 return 0; /* already on */
391 } else {
392 if ((vsi->info.switch_id &
393 I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0)
394 return 0; /* already off */
395 }
396 }
397
398 /* remove all the MAC and VLAN first */
399 ret = i40e_vsi_rm_mac_filter(vsi);
400 if (ret) {
401 PMD_INIT_LOG(ERR, "Failed to remove MAC filters.");
402 return ret;
403 }
404 if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
405 ret = i40e_add_rm_all_vlan_filter(vsi, 0);
406 if (ret) {
407 PMD_INIT_LOG(ERR, "Failed to remove VLAN filters.");
408 return ret;
409 }
410 }
411
412 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
413 if (on)
414 vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
415 else
416 vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
417
418 memset(&ctxt, 0, sizeof(ctxt));
419 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
420 ctxt.seid = vsi->seid;
421
422 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
423 if (ret != I40E_SUCCESS) {
424 PMD_DRV_LOG(ERR, "Failed to update VSI params");
425 return ret;
426 }
427
428 /* add all the MAC and VLAN back */
429 ret = i40e_vsi_restore_mac_filter(vsi);
430 if (ret)
431 return ret;
432 if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
433 ret = i40e_add_rm_all_vlan_filter(vsi, 1);
434 if (ret)
435 return ret;
436 }
437
438 return ret;
439 }
440
441 int
442 rte_pmd_i40e_set_tx_loopback(uint8_t port, uint8_t on)
443 {
444 struct rte_eth_dev *dev;
445 struct i40e_pf *pf;
446 struct i40e_pf_vf *vf;
447 struct i40e_vsi *vsi;
448 uint16_t vf_id;
449 int ret;
450
451 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
452
453 dev = &rte_eth_devices[port];
454
455 if (!is_i40e_supported(dev))
456 return -ENOTSUP;
457
458 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
459
460 /* setup PF TX loopback */
461 vsi = pf->main_vsi;
462 ret = i40e_vsi_set_tx_loopback(vsi, on);
463 if (ret)
464 return -ENOTSUP;
465
466 /* setup TX loopback for all the VFs */
467 if (!pf->vfs) {
468 /* if no VF, do nothing. */
469 return 0;
470 }
471
472 for (vf_id = 0; vf_id < pf->vf_num; vf_id++) {
473 vf = &pf->vfs[vf_id];
474 vsi = vf->vsi;
475
476 ret = i40e_vsi_set_tx_loopback(vsi, on);
477 if (ret)
478 return -ENOTSUP;
479 }
480
481 return ret;
482 }
483
484 int
485 rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
486 {
487 struct rte_eth_dev *dev;
488 struct i40e_pf *pf;
489 struct i40e_vsi *vsi;
490 struct i40e_hw *hw;
491 int ret;
492
493 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
494
495 dev = &rte_eth_devices[port];
496
497 if (!is_i40e_supported(dev))
498 return -ENOTSUP;
499
500 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
501
502 if (vf_id >= pf->vf_num || !pf->vfs) {
503 PMD_DRV_LOG(ERR, "Invalid argument.");
504 return -EINVAL;
505 }
506
507 vsi = pf->vfs[vf_id].vsi;
508 if (!vsi) {
509 PMD_DRV_LOG(ERR, "Invalid VSI.");
510 return -EINVAL;
511 }
512
513 hw = I40E_VSI_TO_HW(vsi);
514
515 ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
516 on, NULL, true);
517 if (ret != I40E_SUCCESS) {
518 ret = -ENOTSUP;
519 PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode");
520 }
521
522 return ret;
523 }
524
525 int
526 rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
527 {
528 struct rte_eth_dev *dev;
529 struct i40e_pf *pf;
530 struct i40e_vsi *vsi;
531 struct i40e_hw *hw;
532 int ret;
533
534 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
535
536 dev = &rte_eth_devices[port];
537
538 if (!is_i40e_supported(dev))
539 return -ENOTSUP;
540
541 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
542
543 if (vf_id >= pf->vf_num || !pf->vfs) {
544 PMD_DRV_LOG(ERR, "Invalid argument.");
545 return -EINVAL;
546 }
547
548 vsi = pf->vfs[vf_id].vsi;
549 if (!vsi) {
550 PMD_DRV_LOG(ERR, "Invalid VSI.");
551 return -EINVAL;
552 }
553
554 hw = I40E_VSI_TO_HW(vsi);
555
556 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
557 on, NULL);
558 if (ret != I40E_SUCCESS) {
559 ret = -ENOTSUP;
560 PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode");
561 }
562
563 return ret;
564 }
565
566 int
567 rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id,
568 struct ether_addr *mac_addr)
569 {
570 struct i40e_mac_filter *f;
571 struct rte_eth_dev *dev;
572 struct i40e_pf_vf *vf;
573 struct i40e_vsi *vsi;
574 struct i40e_pf *pf;
575 void *temp;
576
577 if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
578 return -EINVAL;
579
580 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
581
582 dev = &rte_eth_devices[port];
583
584 if (!is_i40e_supported(dev))
585 return -ENOTSUP;
586
587 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
588
589 if (vf_id >= pf->vf_num || !pf->vfs)
590 return -EINVAL;
591
592 vf = &pf->vfs[vf_id];
593 vsi = vf->vsi;
594 if (!vsi) {
595 PMD_DRV_LOG(ERR, "Invalid VSI.");
596 return -EINVAL;
597 }
598
599 ether_addr_copy(mac_addr, &vf->mac_addr);
600
601 /* Remove all existing mac */
602 TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
603 i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
604
605 return 0;
606 }
607
608 /* Set vlan strip on/off for specific VF from host */
609 int
610 rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf_id, uint8_t on)
611 {
612 struct rte_eth_dev *dev;
613 struct i40e_pf *pf;
614 struct i40e_vsi *vsi;
615 int ret;
616
617 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
618
619 dev = &rte_eth_devices[port];
620
621 if (!is_i40e_supported(dev))
622 return -ENOTSUP;
623
624 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
625
626 if (vf_id >= pf->vf_num || !pf->vfs) {
627 PMD_DRV_LOG(ERR, "Invalid argument.");
628 return -EINVAL;
629 }
630
631 vsi = pf->vfs[vf_id].vsi;
632
633 if (!vsi)
634 return -EINVAL;
635
636 ret = i40e_vsi_config_vlan_stripping(vsi, !!on);
637 if (ret != I40E_SUCCESS) {
638 ret = -ENOTSUP;
639 PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!");
640 }
641
642 return ret;
643 }
644
645 int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id,
646 uint16_t vlan_id)
647 {
648 struct rte_eth_dev *dev;
649 struct i40e_pf *pf;
650 struct i40e_hw *hw;
651 struct i40e_vsi *vsi;
652 struct i40e_vsi_context ctxt;
653 int ret;
654
655 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
656
657 if (vlan_id > ETHER_MAX_VLAN_ID) {
658 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
659 return -EINVAL;
660 }
661
662 dev = &rte_eth_devices[port];
663
664 if (!is_i40e_supported(dev))
665 return -ENOTSUP;
666
667 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
668 hw = I40E_PF_TO_HW(pf);
669
670 /**
671 * return -ENODEV if SRIOV not enabled, VF number not configured
672 * or no queue assigned.
673 */
674 if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
675 pf->vf_nb_qps == 0)
676 return -ENODEV;
677
678 if (vf_id >= pf->vf_num || !pf->vfs) {
679 PMD_DRV_LOG(ERR, "Invalid VF ID.");
680 return -EINVAL;
681 }
682
683 vsi = pf->vfs[vf_id].vsi;
684 if (!vsi) {
685 PMD_DRV_LOG(ERR, "Invalid VSI.");
686 return -EINVAL;
687 }
688
689 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
690 vsi->info.pvid = vlan_id;
691 if (vlan_id > 0)
692 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
693 else
694 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
695
696 memset(&ctxt, 0, sizeof(ctxt));
697 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
698 ctxt.seid = vsi->seid;
699
700 hw = I40E_VSI_TO_HW(vsi);
701 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
702 if (ret != I40E_SUCCESS) {
703 ret = -ENOTSUP;
704 PMD_DRV_LOG(ERR, "Failed to update VSI params");
705 }
706
707 return ret;
708 }
709
710 int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id,
711 uint8_t on)
712 {
713 struct rte_eth_dev *dev;
714 struct i40e_pf *pf;
715 struct i40e_vsi *vsi;
716 struct i40e_hw *hw;
717 struct i40e_mac_filter_info filter;
718 struct ether_addr broadcast = {
719 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
720 int ret;
721
722 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
723
724 if (on > 1) {
725 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
726 return -EINVAL;
727 }
728
729 dev = &rte_eth_devices[port];
730
731 if (!is_i40e_supported(dev))
732 return -ENOTSUP;
733
734 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
735 hw = I40E_PF_TO_HW(pf);
736
737 if (vf_id >= pf->vf_num || !pf->vfs) {
738 PMD_DRV_LOG(ERR, "Invalid VF ID.");
739 return -EINVAL;
740 }
741
742 /**
743 * return -ENODEV if SRIOV not enabled, VF number not configured
744 * or no queue assigned.
745 */
746 if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
747 pf->vf_nb_qps == 0) {
748 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
749 return -ENODEV;
750 }
751
752 vsi = pf->vfs[vf_id].vsi;
753 if (!vsi) {
754 PMD_DRV_LOG(ERR, "Invalid VSI.");
755 return -EINVAL;
756 }
757
758 if (on) {
759 (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
760 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
761 ret = i40e_vsi_add_mac(vsi, &filter);
762 } else {
763 ret = i40e_vsi_delete_mac(vsi, &broadcast);
764 }
765
766 if (ret != I40E_SUCCESS && ret != I40E_ERR_PARAM) {
767 ret = -ENOTSUP;
768 PMD_DRV_LOG(ERR, "Failed to set VSI broadcast");
769 } else {
770 ret = 0;
771 }
772
773 return ret;
774 }
775
776 int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on)
777 {
778 struct rte_eth_dev *dev;
779 struct i40e_pf *pf;
780 struct i40e_hw *hw;
781 struct i40e_vsi *vsi;
782 struct i40e_vsi_context ctxt;
783 int ret;
784
785 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
786
787 if (on > 1) {
788 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
789 return -EINVAL;
790 }
791
792 dev = &rte_eth_devices[port];
793
794 if (!is_i40e_supported(dev))
795 return -ENOTSUP;
796
797 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
798 hw = I40E_PF_TO_HW(pf);
799
800 /**
801 * return -ENODEV if SRIOV not enabled, VF number not configured
802 * or no queue assigned.
803 */
804 if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
805 pf->vf_nb_qps == 0) {
806 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
807 return -ENODEV;
808 }
809
810 if (vf_id >= pf->vf_num || !pf->vfs) {
811 PMD_DRV_LOG(ERR, "Invalid VF ID.");
812 return -EINVAL;
813 }
814
815 vsi = pf->vfs[vf_id].vsi;
816 if (!vsi) {
817 PMD_DRV_LOG(ERR, "Invalid VSI.");
818 return -EINVAL;
819 }
820
821 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
822 if (on) {
823 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
824 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
825 } else {
826 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
827 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED;
828 }
829
830 memset(&ctxt, 0, sizeof(ctxt));
831 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
832 ctxt.seid = vsi->seid;
833
834 hw = I40E_VSI_TO_HW(vsi);
835 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
836 if (ret != I40E_SUCCESS) {
837 ret = -ENOTSUP;
838 PMD_DRV_LOG(ERR, "Failed to update VSI params");
839 }
840
841 return ret;
842 }
843
844 static int
845 i40e_vlan_filter_count(struct i40e_vsi *vsi)
846 {
847 uint32_t j, k;
848 uint16_t vlan_id;
849 int count = 0;
850
851 for (j = 0; j < I40E_VFTA_SIZE; j++) {
852 if (!vsi->vfta[j])
853 continue;
854
855 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
856 if (!(vsi->vfta[j] & (1 << k)))
857 continue;
858
859 vlan_id = j * I40E_UINT32_BIT_SIZE + k;
860 if (!vlan_id)
861 continue;
862
863 count++;
864 }
865 }
866
867 return count;
868 }
869
870 int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
871 uint64_t vf_mask, uint8_t on)
872 {
873 struct rte_eth_dev *dev;
874 struct i40e_pf *pf;
875 struct i40e_hw *hw;
876 struct i40e_vsi *vsi;
877 uint16_t vf_idx;
878 int ret = I40E_SUCCESS;
879
880 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
881
882 dev = &rte_eth_devices[port];
883
884 if (!is_i40e_supported(dev))
885 return -ENOTSUP;
886
887 if (vlan_id > ETHER_MAX_VLAN_ID || !vlan_id) {
888 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
889 return -EINVAL;
890 }
891
892 if (vf_mask == 0) {
893 PMD_DRV_LOG(ERR, "No VF.");
894 return -EINVAL;
895 }
896
897 if (on > 1) {
898 PMD_DRV_LOG(ERR, "on is should be 0 or 1.");
899 return -EINVAL;
900 }
901
902 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
903 hw = I40E_PF_TO_HW(pf);
904
905 /**
906 * return -ENODEV if SRIOV not enabled, VF number not configured
907 * or no queue assigned.
908 */
909 if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
910 pf->vf_nb_qps == 0) {
911 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
912 return -ENODEV;
913 }
914
915 for (vf_idx = 0; vf_idx < pf->vf_num && ret == I40E_SUCCESS; vf_idx++) {
916 if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
917 vsi = pf->vfs[vf_idx].vsi;
918 if (on) {
919 if (!vsi->vlan_filter_on) {
920 vsi->vlan_filter_on = true;
921 i40e_aq_set_vsi_vlan_promisc(hw,
922 vsi->seid,
923 false,
924 NULL);
925 if (!vsi->vlan_anti_spoof_on)
926 i40e_add_rm_all_vlan_filter(
927 vsi, true);
928 }
929 ret = i40e_vsi_add_vlan(vsi, vlan_id);
930 } else {
931 ret = i40e_vsi_delete_vlan(vsi, vlan_id);
932
933 if (!i40e_vlan_filter_count(vsi)) {
934 vsi->vlan_filter_on = false;
935 i40e_aq_set_vsi_vlan_promisc(hw,
936 vsi->seid,
937 true,
938 NULL);
939 }
940 }
941 }
942 }
943
944 if (ret != I40E_SUCCESS) {
945 ret = -ENOTSUP;
946 PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on);
947 }
948
949 return ret;
950 }
951
952 int
953 rte_pmd_i40e_get_vf_stats(uint8_t port,
954 uint16_t vf_id,
955 struct rte_eth_stats *stats)
956 {
957 struct rte_eth_dev *dev;
958 struct i40e_pf *pf;
959 struct i40e_vsi *vsi;
960
961 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
962
963 dev = &rte_eth_devices[port];
964
965 if (!is_i40e_supported(dev))
966 return -ENOTSUP;
967
968 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
969
970 if (vf_id >= pf->vf_num || !pf->vfs) {
971 PMD_DRV_LOG(ERR, "Invalid VF ID.");
972 return -EINVAL;
973 }
974
975 vsi = pf->vfs[vf_id].vsi;
976 if (!vsi) {
977 PMD_DRV_LOG(ERR, "Invalid VSI.");
978 return -EINVAL;
979 }
980
981 i40e_update_vsi_stats(vsi);
982
983 stats->ipackets = vsi->eth_stats.rx_unicast +
984 vsi->eth_stats.rx_multicast +
985 vsi->eth_stats.rx_broadcast;
986 stats->opackets = vsi->eth_stats.tx_unicast +
987 vsi->eth_stats.tx_multicast +
988 vsi->eth_stats.tx_broadcast;
989 stats->ibytes = vsi->eth_stats.rx_bytes;
990 stats->obytes = vsi->eth_stats.tx_bytes;
991 stats->ierrors = vsi->eth_stats.rx_discards;
992 stats->oerrors = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards;
993
994 return 0;
995 }
996
997 int
998 rte_pmd_i40e_reset_vf_stats(uint8_t port,
999 uint16_t vf_id)
1000 {
1001 struct rte_eth_dev *dev;
1002 struct i40e_pf *pf;
1003 struct i40e_vsi *vsi;
1004
1005 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1006
1007 dev = &rte_eth_devices[port];
1008
1009 if (!is_i40e_supported(dev))
1010 return -ENOTSUP;
1011
1012 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1013
1014 if (vf_id >= pf->vf_num || !pf->vfs) {
1015 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1016 return -EINVAL;
1017 }
1018
1019 vsi = pf->vfs[vf_id].vsi;
1020 if (!vsi) {
1021 PMD_DRV_LOG(ERR, "Invalid VSI.");
1022 return -EINVAL;
1023 }
1024
1025 vsi->offset_loaded = false;
1026 i40e_update_vsi_stats(vsi);
1027
1028 return 0;
1029 }
1030
1031 int
1032 rte_pmd_i40e_set_vf_max_bw(uint8_t port, uint16_t vf_id, uint32_t bw)
1033 {
1034 struct rte_eth_dev *dev;
1035 struct i40e_pf *pf;
1036 struct i40e_vsi *vsi;
1037 struct i40e_hw *hw;
1038 int ret = 0;
1039 int i;
1040
1041 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1042
1043 dev = &rte_eth_devices[port];
1044
1045 if (!is_i40e_supported(dev))
1046 return -ENOTSUP;
1047
1048 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1049
1050 if (vf_id >= pf->vf_num || !pf->vfs) {
1051 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1052 return -EINVAL;
1053 }
1054
1055 vsi = pf->vfs[vf_id].vsi;
1056 if (!vsi) {
1057 PMD_DRV_LOG(ERR, "Invalid VSI.");
1058 return -EINVAL;
1059 }
1060
1061 if (bw > I40E_QOS_BW_MAX) {
1062 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1063 I40E_QOS_BW_MAX);
1064 return -EINVAL;
1065 }
1066
1067 if (bw % I40E_QOS_BW_GRANULARITY) {
1068 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1069 I40E_QOS_BW_GRANULARITY);
1070 return -EINVAL;
1071 }
1072
1073 bw /= I40E_QOS_BW_GRANULARITY;
1074
1075 hw = I40E_VSI_TO_HW(vsi);
1076
1077 /* No change. */
1078 if (bw == vsi->bw_info.bw_limit) {
1079 PMD_DRV_LOG(INFO,
1080 "No change for VF max bandwidth. Nothing to do.");
1081 return 0;
1082 }
1083
1084 /**
1085 * VF bandwidth limitation and TC bandwidth limitation cannot be
1086 * enabled in parallel, quit if TC bandwidth limitation is enabled.
1087 *
1088 * If bw is 0, means disable bandwidth limitation. Then no need to
1089 * check TC bandwidth limitation.
1090 */
1091 if (bw) {
1092 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1093 if ((vsi->enabled_tc & BIT_ULL(i)) &&
1094 vsi->bw_info.bw_ets_credits[i])
1095 break;
1096 }
1097 if (i != I40E_MAX_TRAFFIC_CLASS) {
1098 PMD_DRV_LOG(ERR,
1099 "TC max bandwidth has been set on this VF,"
1100 " please disable it first.");
1101 return -EINVAL;
1102 }
1103 }
1104
1105 ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, (uint16_t)bw, 0, NULL);
1106 if (ret) {
1107 PMD_DRV_LOG(ERR,
1108 "Failed to set VF %d bandwidth, err(%d).",
1109 vf_id, ret);
1110 return -EINVAL;
1111 }
1112
1113 /* Store the configuration. */
1114 vsi->bw_info.bw_limit = (uint16_t)bw;
1115 vsi->bw_info.bw_max = 0;
1116
1117 return 0;
1118 }
1119
1120 int
1121 rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port, uint16_t vf_id,
1122 uint8_t tc_num, uint8_t *bw_weight)
1123 {
1124 struct rte_eth_dev *dev;
1125 struct i40e_pf *pf;
1126 struct i40e_vsi *vsi;
1127 struct i40e_hw *hw;
1128 struct i40e_aqc_configure_vsi_tc_bw_data tc_bw;
1129 int ret = 0;
1130 int i, j;
1131 uint16_t sum;
1132 bool b_change = false;
1133
1134 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1135
1136 dev = &rte_eth_devices[port];
1137
1138 if (!is_i40e_supported(dev))
1139 return -ENOTSUP;
1140
1141 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1142
1143 if (vf_id >= pf->vf_num || !pf->vfs) {
1144 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1145 return -EINVAL;
1146 }
1147
1148 vsi = pf->vfs[vf_id].vsi;
1149 if (!vsi) {
1150 PMD_DRV_LOG(ERR, "Invalid VSI.");
1151 return -EINVAL;
1152 }
1153
1154 if (tc_num > I40E_MAX_TRAFFIC_CLASS) {
1155 PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
1156 I40E_MAX_TRAFFIC_CLASS);
1157 return -EINVAL;
1158 }
1159
1160 sum = 0;
1161 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1162 if (vsi->enabled_tc & BIT_ULL(i))
1163 sum++;
1164 }
1165 if (sum != tc_num) {
1166 PMD_DRV_LOG(ERR,
1167 "Weight should be set for all %d enabled TCs.",
1168 sum);
1169 return -EINVAL;
1170 }
1171
1172 sum = 0;
1173 for (i = 0; i < tc_num; i++) {
1174 if (!bw_weight[i]) {
1175 PMD_DRV_LOG(ERR,
1176 "The weight should be 1 at least.");
1177 return -EINVAL;
1178 }
1179 sum += bw_weight[i];
1180 }
1181 if (sum != 100) {
1182 PMD_DRV_LOG(ERR,
1183 "The summary of the TC weight should be 100.");
1184 return -EINVAL;
1185 }
1186
1187 /**
1188 * Create the configuration for all the TCs.
1189 */
1190 memset(&tc_bw, 0, sizeof(tc_bw));
1191 tc_bw.tc_valid_bits = vsi->enabled_tc;
1192 j = 0;
1193 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1194 if (vsi->enabled_tc & BIT_ULL(i)) {
1195 if (bw_weight[j] !=
1196 vsi->bw_info.bw_ets_share_credits[i])
1197 b_change = true;
1198
1199 tc_bw.tc_bw_credits[i] = bw_weight[j];
1200 j++;
1201 }
1202 }
1203
1204 /* No change. */
1205 if (!b_change) {
1206 PMD_DRV_LOG(INFO,
1207 "No change for TC allocated bandwidth."
1208 " Nothing to do.");
1209 return 0;
1210 }
1211
1212 hw = I40E_VSI_TO_HW(vsi);
1213
1214 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw, NULL);
1215 if (ret) {
1216 PMD_DRV_LOG(ERR,
1217 "Failed to set VF %d TC bandwidth weight, err(%d).",
1218 vf_id, ret);
1219 return -EINVAL;
1220 }
1221
1222 /* Store the configuration. */
1223 j = 0;
1224 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1225 if (vsi->enabled_tc & BIT_ULL(i)) {
1226 vsi->bw_info.bw_ets_share_credits[i] = bw_weight[j];
1227 j++;
1228 }
1229 }
1230
1231 return 0;
1232 }
1233
1234 int
1235 rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port, uint16_t vf_id,
1236 uint8_t tc_no, uint32_t bw)
1237 {
1238 struct rte_eth_dev *dev;
1239 struct i40e_pf *pf;
1240 struct i40e_vsi *vsi;
1241 struct i40e_hw *hw;
1242 struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
1243 int ret = 0;
1244 int i;
1245
1246 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1247
1248 dev = &rte_eth_devices[port];
1249
1250 if (!is_i40e_supported(dev))
1251 return -ENOTSUP;
1252
1253 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1254
1255 if (vf_id >= pf->vf_num || !pf->vfs) {
1256 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1257 return -EINVAL;
1258 }
1259
1260 vsi = pf->vfs[vf_id].vsi;
1261 if (!vsi) {
1262 PMD_DRV_LOG(ERR, "Invalid VSI.");
1263 return -EINVAL;
1264 }
1265
1266 if (bw > I40E_QOS_BW_MAX) {
1267 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1268 I40E_QOS_BW_MAX);
1269 return -EINVAL;
1270 }
1271
1272 if (bw % I40E_QOS_BW_GRANULARITY) {
1273 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1274 I40E_QOS_BW_GRANULARITY);
1275 return -EINVAL;
1276 }
1277
1278 bw /= I40E_QOS_BW_GRANULARITY;
1279
1280 if (tc_no >= I40E_MAX_TRAFFIC_CLASS) {
1281 PMD_DRV_LOG(ERR, "TC No. should be less than %d.",
1282 I40E_MAX_TRAFFIC_CLASS);
1283 return -EINVAL;
1284 }
1285
1286 hw = I40E_VSI_TO_HW(vsi);
1287
1288 if (!(vsi->enabled_tc & BIT_ULL(tc_no))) {
1289 PMD_DRV_LOG(ERR, "VF %d TC %d isn't enabled.",
1290 vf_id, tc_no);
1291 return -EINVAL;
1292 }
1293
1294 /* No change. */
1295 if (bw == vsi->bw_info.bw_ets_credits[tc_no]) {
1296 PMD_DRV_LOG(INFO,
1297 "No change for TC max bandwidth. Nothing to do.");
1298 return 0;
1299 }
1300
1301 /**
1302 * VF bandwidth limitation and TC bandwidth limitation cannot be
1303 * enabled in parallel, disable VF bandwidth limitation if it's
1304 * enabled.
1305 * If bw is 0, means disable bandwidth limitation. Then no need to
1306 * care about VF bandwidth limitation configuration.
1307 */
1308 if (bw && vsi->bw_info.bw_limit) {
1309 ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, 0, 0, NULL);
1310 if (ret) {
1311 PMD_DRV_LOG(ERR,
1312 "Failed to disable VF(%d)"
1313 " bandwidth limitation, err(%d).",
1314 vf_id, ret);
1315 return -EINVAL;
1316 }
1317
1318 PMD_DRV_LOG(INFO,
1319 "VF max bandwidth is disabled according"
1320 " to TC max bandwidth setting.");
1321 }
1322
1323 /**
1324 * Get all the TCs' info to create a whole picture.
1325 * Because the incremental change isn't permitted.
1326 */
1327 memset(&tc_bw, 0, sizeof(tc_bw));
1328 tc_bw.tc_valid_bits = vsi->enabled_tc;
1329 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1330 if (vsi->enabled_tc & BIT_ULL(i)) {
1331 tc_bw.tc_bw_credits[i] =
1332 rte_cpu_to_le_16(
1333 vsi->bw_info.bw_ets_credits[i]);
1334 }
1335 }
1336 tc_bw.tc_bw_credits[tc_no] = rte_cpu_to_le_16((uint16_t)bw);
1337
1338 ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
1339 if (ret) {
1340 PMD_DRV_LOG(ERR,
1341 "Failed to set VF %d TC %d max bandwidth, err(%d).",
1342 vf_id, tc_no, ret);
1343 return -EINVAL;
1344 }
1345
1346 /* Store the configuration. */
1347 vsi->bw_info.bw_ets_credits[tc_no] = (uint16_t)bw;
1348
1349 return 0;
1350 }
1351
1352 int
1353 rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map)
1354 {
1355 struct rte_eth_dev *dev;
1356 struct i40e_pf *pf;
1357 struct i40e_vsi *vsi;
1358 struct i40e_veb *veb;
1359 struct i40e_hw *hw;
1360 struct i40e_aqc_configure_switching_comp_ets_data ets_data;
1361 int i;
1362 int ret;
1363
1364 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1365
1366 dev = &rte_eth_devices[port];
1367
1368 if (!is_i40e_supported(dev))
1369 return -ENOTSUP;
1370
1371 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1372
1373 vsi = pf->main_vsi;
1374 if (!vsi) {
1375 PMD_DRV_LOG(ERR, "Invalid VSI.");
1376 return -EINVAL;
1377 }
1378
1379 veb = vsi->veb;
1380 if (!veb) {
1381 PMD_DRV_LOG(ERR, "Invalid VEB.");
1382 return -EINVAL;
1383 }
1384
1385 if ((tc_map & veb->enabled_tc) != tc_map) {
1386 PMD_DRV_LOG(ERR,
1387 "TC bitmap isn't the subset of enabled TCs 0x%x.",
1388 veb->enabled_tc);
1389 return -EINVAL;
1390 }
1391
1392 if (tc_map == veb->strict_prio_tc) {
1393 PMD_DRV_LOG(INFO, "No change for TC bitmap. Nothing to do.");
1394 return 0;
1395 }
1396
1397 hw = I40E_VSI_TO_HW(vsi);
1398
1399 /* Disable DCBx if it's the first time to set strict priority. */
1400 if (!veb->strict_prio_tc) {
1401 ret = i40e_aq_stop_lldp(hw, true, NULL);
1402 if (ret)
1403 PMD_DRV_LOG(INFO,
1404 "Failed to disable DCBx as it's already"
1405 " disabled.");
1406 else
1407 PMD_DRV_LOG(INFO,
1408 "DCBx is disabled according to strict"
1409 " priority setting.");
1410 }
1411
1412 memset(&ets_data, 0, sizeof(ets_data));
1413 ets_data.tc_valid_bits = veb->enabled_tc;
1414 ets_data.seepage = I40E_AQ_ETS_SEEPAGE_EN_MASK;
1415 ets_data.tc_strict_priority_flags = tc_map;
1416 /* Get all TCs' bandwidth. */
1417 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1418 if (veb->enabled_tc & BIT_ULL(i)) {
1419 /* For rubust, if bandwidth is 0, use 1 instead. */
1420 if (veb->bw_info.bw_ets_share_credits[i])
1421 ets_data.tc_bw_share_credits[i] =
1422 veb->bw_info.bw_ets_share_credits[i];
1423 else
1424 ets_data.tc_bw_share_credits[i] =
1425 I40E_QOS_BW_WEIGHT_MIN;
1426 }
1427 }
1428
1429 if (!veb->strict_prio_tc)
1430 ret = i40e_aq_config_switch_comp_ets(
1431 hw, veb->uplink_seid,
1432 &ets_data, i40e_aqc_opc_enable_switching_comp_ets,
1433 NULL);
1434 else if (tc_map)
1435 ret = i40e_aq_config_switch_comp_ets(
1436 hw, veb->uplink_seid,
1437 &ets_data, i40e_aqc_opc_modify_switching_comp_ets,
1438 NULL);
1439 else
1440 ret = i40e_aq_config_switch_comp_ets(
1441 hw, veb->uplink_seid,
1442 &ets_data, i40e_aqc_opc_disable_switching_comp_ets,
1443 NULL);
1444
1445 if (ret) {
1446 PMD_DRV_LOG(ERR,
1447 "Failed to set TCs' strict priority mode."
1448 " err (%d)", ret);
1449 return -EINVAL;
1450 }
1451
1452 veb->strict_prio_tc = tc_map;
1453
1454 /* Enable DCBx again, if all the TCs' strict priority disabled. */
1455 if (!tc_map) {
1456 ret = i40e_aq_start_lldp(hw, NULL);
1457 if (ret) {
1458 PMD_DRV_LOG(ERR,
1459 "Failed to enable DCBx, err(%d).", ret);
1460 return -EINVAL;
1461 }
1462
1463 PMD_DRV_LOG(INFO,
1464 "DCBx is enabled again according to strict"
1465 " priority setting.");
1466 }
1467
1468 return ret;
1469 }
1470
1471 #define I40E_PROFILE_INFO_SIZE 48
1472 #define I40E_MAX_PROFILE_NUM 16
1473
1474 static void
1475 i40e_generate_profile_info_sec(char *name, struct i40e_ddp_version *version,
1476 uint32_t track_id, uint8_t *profile_info_sec,
1477 bool add)
1478 {
1479 struct i40e_profile_section_header *sec = NULL;
1480 struct i40e_profile_info *pinfo;
1481
1482 sec = (struct i40e_profile_section_header *)profile_info_sec;
1483 sec->tbl_size = 1;
1484 sec->data_end = sizeof(struct i40e_profile_section_header) +
1485 sizeof(struct i40e_profile_info);
1486 sec->section.type = SECTION_TYPE_INFO;
1487 sec->section.offset = sizeof(struct i40e_profile_section_header);
1488 sec->section.size = sizeof(struct i40e_profile_info);
1489 pinfo = (struct i40e_profile_info *)(profile_info_sec +
1490 sec->section.offset);
1491 pinfo->track_id = track_id;
1492 memcpy(pinfo->name, name, I40E_DDP_NAME_SIZE);
1493 memcpy(&pinfo->version, version, sizeof(struct i40e_ddp_version));
1494 if (add)
1495 pinfo->op = I40E_DDP_ADD_TRACKID;
1496 else
1497 pinfo->op = I40E_DDP_REMOVE_TRACKID;
1498 }
1499
1500 static enum i40e_status_code
1501 i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
1502 {
1503 enum i40e_status_code status = I40E_SUCCESS;
1504 struct i40e_profile_section_header *sec;
1505 uint32_t track_id;
1506 uint32_t offset = 0;
1507 uint32_t info = 0;
1508
1509 sec = (struct i40e_profile_section_header *)profile_info_sec;
1510 track_id = ((struct i40e_profile_info *)(profile_info_sec +
1511 sec->section.offset))->track_id;
1512
1513 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
1514 track_id, &offset, &info, NULL);
1515 if (status)
1516 PMD_DRV_LOG(ERR, "Failed to add/remove profile info: "
1517 "offset %d, info %d",
1518 offset, info);
1519
1520 return status;
1521 }
1522
1523 #define I40E_PROFILE_INFO_SIZE 48
1524 #define I40E_MAX_PROFILE_NUM 16
1525
1526 /* Check if the profile info exists */
1527 static int
1528 i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec)
1529 {
1530 struct rte_eth_dev *dev = &rte_eth_devices[port];
1531 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1532 uint8_t *buff;
1533 struct rte_pmd_i40e_profile_list *p_list;
1534 struct rte_pmd_i40e_profile_info *pinfo, *p;
1535 uint32_t i;
1536 int ret;
1537
1538 buff = rte_zmalloc("pinfo_list",
1539 (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1540 0);
1541 if (!buff) {
1542 PMD_DRV_LOG(ERR, "failed to allocate memory");
1543 return -1;
1544 }
1545
1546 ret = i40e_aq_get_ddp_list(
1547 hw, (void *)buff,
1548 (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1549 0, NULL);
1550 if (ret) {
1551 PMD_DRV_LOG(ERR, "Failed to get profile info list.");
1552 rte_free(buff);
1553 return -1;
1554 }
1555 p_list = (struct rte_pmd_i40e_profile_list *)buff;
1556 pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec +
1557 sizeof(struct i40e_profile_section_header));
1558 for (i = 0; i < p_list->p_count; i++) {
1559 p = &p_list->p_info[i];
1560 if ((pinfo->track_id == p->track_id) &&
1561 !memcmp(&pinfo->version, &p->version,
1562 sizeof(struct i40e_ddp_version)) &&
1563 !memcmp(&pinfo->name, &p->name,
1564 I40E_DDP_NAME_SIZE)) {
1565 PMD_DRV_LOG(INFO, "Profile exists.");
1566 rte_free(buff);
1567 return 1;
1568 }
1569 }
1570
1571 rte_free(buff);
1572 return 0;
1573 }
1574
1575 int
1576 rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
1577 uint32_t size,
1578 enum rte_pmd_i40e_package_op op)
1579 {
1580 struct rte_eth_dev *dev;
1581 struct i40e_hw *hw;
1582 struct i40e_package_header *pkg_hdr;
1583 struct i40e_generic_seg_header *profile_seg_hdr;
1584 struct i40e_generic_seg_header *metadata_seg_hdr;
1585 uint32_t track_id;
1586 uint8_t *profile_info_sec;
1587 int is_exist;
1588 enum i40e_status_code status = I40E_SUCCESS;
1589
1590 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1591
1592 dev = &rte_eth_devices[port];
1593
1594 if (!is_i40e_supported(dev))
1595 return -ENOTSUP;
1596
1597 hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1598
1599 if (size < (sizeof(struct i40e_package_header) +
1600 sizeof(struct i40e_metadata_segment) +
1601 sizeof(uint32_t) * 2)) {
1602 PMD_DRV_LOG(ERR, "Buff is invalid.");
1603 return -EINVAL;
1604 }
1605
1606 pkg_hdr = (struct i40e_package_header *)buff;
1607
1608 if (!pkg_hdr) {
1609 PMD_DRV_LOG(ERR, "Failed to fill the package structure");
1610 return -EINVAL;
1611 }
1612
1613 if (pkg_hdr->segment_count < 2) {
1614 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1615 return -EINVAL;
1616 }
1617
1618 /* Find metadata segment */
1619 metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1620 pkg_hdr);
1621 if (!metadata_seg_hdr) {
1622 PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1623 return -EINVAL;
1624 }
1625 track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1626
1627 /* Find profile segment */
1628 profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
1629 pkg_hdr);
1630 if (!profile_seg_hdr) {
1631 PMD_DRV_LOG(ERR, "Failed to find profile segment header");
1632 return -EINVAL;
1633 }
1634
1635 profile_info_sec = rte_zmalloc(
1636 "i40e_profile_info",
1637 sizeof(struct i40e_profile_section_header) +
1638 sizeof(struct i40e_profile_info),
1639 0);
1640 if (!profile_info_sec) {
1641 PMD_DRV_LOG(ERR, "Failed to allocate memory");
1642 return -EINVAL;
1643 }
1644
1645 if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
1646 /* Check if the profile exists */
1647 i40e_generate_profile_info_sec(
1648 ((struct i40e_profile_segment *)profile_seg_hdr)->name,
1649 &((struct i40e_profile_segment *)profile_seg_hdr)->version,
1650 track_id, profile_info_sec, 1);
1651 is_exist = i40e_check_profile_info(port, profile_info_sec);
1652 if (is_exist > 0) {
1653 PMD_DRV_LOG(ERR, "Profile already exists.");
1654 rte_free(profile_info_sec);
1655 return 1;
1656 } else if (is_exist < 0) {
1657 PMD_DRV_LOG(ERR, "Failed to check profile.");
1658 rte_free(profile_info_sec);
1659 return -EINVAL;
1660 }
1661
1662 /* Write profile to HW */
1663 status = i40e_write_profile(
1664 hw,
1665 (struct i40e_profile_segment *)profile_seg_hdr,
1666 track_id);
1667 if (status) {
1668 PMD_DRV_LOG(ERR, "Failed to write profile.");
1669 rte_free(profile_info_sec);
1670 return status;
1671 }
1672
1673 /* Add profile info to info list */
1674 status = i40e_add_rm_profile_info(hw, profile_info_sec);
1675 if (status)
1676 PMD_DRV_LOG(ERR, "Failed to add profile info.");
1677 } else {
1678 PMD_DRV_LOG(ERR, "Operation not supported.");
1679 }
1680
1681 rte_free(profile_info_sec);
1682 return status;
1683 }
1684
1685 int
1686 rte_pmd_i40e_get_ddp_list(uint8_t port, uint8_t *buff, uint32_t size)
1687 {
1688 struct rte_eth_dev *dev;
1689 struct i40e_hw *hw;
1690 enum i40e_status_code status = I40E_SUCCESS;
1691
1692 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1693
1694 dev = &rte_eth_devices[port];
1695
1696 if (!is_i40e_supported(dev))
1697 return -ENOTSUP;
1698
1699 if (size < (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4))
1700 return -EINVAL;
1701
1702 hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1703
1704 status = i40e_aq_get_ddp_list(hw, (void *)buff,
1705 size, 0, NULL);
1706
1707 return status;
1708 }
1709
1710 static int check_invalid_pkt_type(uint32_t pkt_type)
1711 {
1712 uint32_t l2, l3, l4, tnl, il2, il3, il4;
1713
1714 l2 = pkt_type & RTE_PTYPE_L2_MASK;
1715 l3 = pkt_type & RTE_PTYPE_L3_MASK;
1716 l4 = pkt_type & RTE_PTYPE_L4_MASK;
1717 tnl = pkt_type & RTE_PTYPE_TUNNEL_MASK;
1718 il2 = pkt_type & RTE_PTYPE_INNER_L2_MASK;
1719 il3 = pkt_type & RTE_PTYPE_INNER_L3_MASK;
1720 il4 = pkt_type & RTE_PTYPE_INNER_L4_MASK;
1721
1722 if (l2 &&
1723 l2 != RTE_PTYPE_L2_ETHER &&
1724 l2 != RTE_PTYPE_L2_ETHER_TIMESYNC &&
1725 l2 != RTE_PTYPE_L2_ETHER_ARP &&
1726 l2 != RTE_PTYPE_L2_ETHER_LLDP &&
1727 l2 != RTE_PTYPE_L2_ETHER_NSH &&
1728 l2 != RTE_PTYPE_L2_ETHER_VLAN &&
1729 l2 != RTE_PTYPE_L2_ETHER_QINQ)
1730 return -1;
1731
1732 if (l3 &&
1733 l3 != RTE_PTYPE_L3_IPV4 &&
1734 l3 != RTE_PTYPE_L3_IPV4_EXT &&
1735 l3 != RTE_PTYPE_L3_IPV6 &&
1736 l3 != RTE_PTYPE_L3_IPV4_EXT_UNKNOWN &&
1737 l3 != RTE_PTYPE_L3_IPV6_EXT &&
1738 l3 != RTE_PTYPE_L3_IPV6_EXT_UNKNOWN)
1739 return -1;
1740
1741 if (l4 &&
1742 l4 != RTE_PTYPE_L4_TCP &&
1743 l4 != RTE_PTYPE_L4_UDP &&
1744 l4 != RTE_PTYPE_L4_FRAG &&
1745 l4 != RTE_PTYPE_L4_SCTP &&
1746 l4 != RTE_PTYPE_L4_ICMP &&
1747 l4 != RTE_PTYPE_L4_NONFRAG)
1748 return -1;
1749
1750 if (tnl &&
1751 tnl != RTE_PTYPE_TUNNEL_IP &&
1752 tnl != RTE_PTYPE_TUNNEL_GRENAT &&
1753 tnl != RTE_PTYPE_TUNNEL_VXLAN &&
1754 tnl != RTE_PTYPE_TUNNEL_NVGRE &&
1755 tnl != RTE_PTYPE_TUNNEL_GENEVE &&
1756 tnl != RTE_PTYPE_TUNNEL_GRENAT)
1757 return -1;
1758
1759 if (il2 &&
1760 il2 != RTE_PTYPE_INNER_L2_ETHER &&
1761 il2 != RTE_PTYPE_INNER_L2_ETHER_VLAN &&
1762 il2 != RTE_PTYPE_INNER_L2_ETHER_QINQ)
1763 return -1;
1764
1765 if (il3 &&
1766 il3 != RTE_PTYPE_INNER_L3_IPV4 &&
1767 il3 != RTE_PTYPE_INNER_L3_IPV4_EXT &&
1768 il3 != RTE_PTYPE_INNER_L3_IPV6 &&
1769 il3 != RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN &&
1770 il3 != RTE_PTYPE_INNER_L3_IPV6_EXT &&
1771 il3 != RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN)
1772 return -1;
1773
1774 if (il4 &&
1775 il4 != RTE_PTYPE_INNER_L4_TCP &&
1776 il4 != RTE_PTYPE_INNER_L4_UDP &&
1777 il4 != RTE_PTYPE_INNER_L4_FRAG &&
1778 il4 != RTE_PTYPE_INNER_L4_SCTP &&
1779 il4 != RTE_PTYPE_INNER_L4_ICMP &&
1780 il4 != RTE_PTYPE_INNER_L4_NONFRAG)
1781 return -1;
1782
1783 return 0;
1784 }
1785
1786 static int check_invalid_ptype_mapping(
1787 struct rte_pmd_i40e_ptype_mapping *mapping_table,
1788 uint16_t count)
1789 {
1790 int i;
1791
1792 for (i = 0; i < count; i++) {
1793 uint16_t ptype = mapping_table[i].hw_ptype;
1794 uint32_t pkt_type = mapping_table[i].sw_ptype;
1795
1796 if (ptype >= I40E_MAX_PKT_TYPE)
1797 return -1;
1798
1799 if (pkt_type == RTE_PTYPE_UNKNOWN)
1800 continue;
1801
1802 if (pkt_type & RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK)
1803 continue;
1804
1805 if (check_invalid_pkt_type(pkt_type))
1806 return -1;
1807 }
1808
1809 return 0;
1810 }
1811
1812 int
1813 rte_pmd_i40e_ptype_mapping_update(
1814 uint8_t port,
1815 struct rte_pmd_i40e_ptype_mapping *mapping_items,
1816 uint16_t count,
1817 uint8_t exclusive)
1818 {
1819 struct rte_eth_dev *dev;
1820 struct i40e_adapter *ad;
1821 int i;
1822
1823 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1824
1825 dev = &rte_eth_devices[port];
1826
1827 if (!is_i40e_supported(dev))
1828 return -ENOTSUP;
1829
1830 if (count > I40E_MAX_PKT_TYPE)
1831 return -EINVAL;
1832
1833 if (check_invalid_ptype_mapping(mapping_items, count))
1834 return -EINVAL;
1835
1836 ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1837
1838 if (exclusive) {
1839 for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
1840 ad->ptype_tbl[i] = RTE_PTYPE_UNKNOWN;
1841 }
1842
1843 for (i = 0; i < count; i++)
1844 ad->ptype_tbl[mapping_items[i].hw_ptype]
1845 = mapping_items[i].sw_ptype;
1846
1847 return 0;
1848 }
1849
1850 int rte_pmd_i40e_ptype_mapping_reset(uint8_t port)
1851 {
1852 struct rte_eth_dev *dev;
1853
1854 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1855
1856 dev = &rte_eth_devices[port];
1857
1858 if (!is_i40e_supported(dev))
1859 return -ENOTSUP;
1860
1861 i40e_set_default_ptype_table(dev);
1862
1863 return 0;
1864 }
1865
1866 int rte_pmd_i40e_ptype_mapping_get(
1867 uint8_t port,
1868 struct rte_pmd_i40e_ptype_mapping *mapping_items,
1869 uint16_t size,
1870 uint16_t *count,
1871 uint8_t valid_only)
1872 {
1873 struct rte_eth_dev *dev;
1874 struct i40e_adapter *ad;
1875 int n = 0;
1876 uint16_t i;
1877
1878 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1879
1880 dev = &rte_eth_devices[port];
1881
1882 if (!is_i40e_supported(dev))
1883 return -ENOTSUP;
1884
1885 ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1886
1887 for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
1888 if (n >= size)
1889 break;
1890 if (valid_only && ad->ptype_tbl[i] == RTE_PTYPE_UNKNOWN)
1891 continue;
1892 mapping_items[n].hw_ptype = i;
1893 mapping_items[n].sw_ptype = ad->ptype_tbl[i];
1894 n++;
1895 }
1896
1897 *count = n;
1898 return 0;
1899 }
1900
1901 int rte_pmd_i40e_ptype_mapping_replace(uint8_t port,
1902 uint32_t target,
1903 uint8_t mask,
1904 uint32_t pkt_type)
1905 {
1906 struct rte_eth_dev *dev;
1907 struct i40e_adapter *ad;
1908 uint16_t i;
1909
1910 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1911
1912 dev = &rte_eth_devices[port];
1913
1914 if (!is_i40e_supported(dev))
1915 return -ENOTSUP;
1916
1917 if (!mask && check_invalid_pkt_type(target))
1918 return -EINVAL;
1919
1920 if (check_invalid_pkt_type(pkt_type))
1921 return -EINVAL;
1922
1923 ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1924
1925 for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
1926 if (mask) {
1927 if ((target | ad->ptype_tbl[i]) == target &&
1928 (target & ad->ptype_tbl[i]))
1929 ad->ptype_tbl[i] = pkt_type;
1930 } else {
1931 if (ad->ptype_tbl[i] == target)
1932 ad->ptype_tbl[i] = pkt_type;
1933 }
1934 }
1935
1936 return 0;
1937 }