4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_malloc.h>
35 #include <rte_tailq.h>
37 #include "base/i40e_prototype.h"
38 #include "i40e_ethdev.h"
40 #include "i40e_rxtx.h"
41 #include "rte_pmd_i40e.h"
43 /* The max bandwidth of i40e is 40Gbps. */
44 #define I40E_QOS_BW_MAX 40000
45 /* The bandwidth should be the multiple of 50Mbps. */
46 #define I40E_QOS_BW_GRANULARITY 50
47 /* The min bandwidth weight is 1. */
48 #define I40E_QOS_BW_WEIGHT_MIN 1
49 /* The max bandwidth weight is 127. */
50 #define I40E_QOS_BW_WEIGHT_MAX 127
53 rte_pmd_i40e_ping_vfs(uint8_t port
, uint16_t vf
)
55 struct rte_eth_dev
*dev
;
58 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
60 dev
= &rte_eth_devices
[port
];
62 if (!is_i40e_supported(dev
))
65 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
67 if (vf
>= pf
->vf_num
|| !pf
->vfs
) {
68 PMD_DRV_LOG(ERR
, "Invalid argument.");
72 i40e_notify_vf_link_status(dev
, &pf
->vfs
[vf
]);
78 rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port
, uint16_t vf_id
, uint8_t on
)
80 struct rte_eth_dev
*dev
;
84 struct i40e_vsi_context ctxt
;
87 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
89 dev
= &rte_eth_devices
[port
];
91 if (!is_i40e_supported(dev
))
94 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
96 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
97 PMD_DRV_LOG(ERR
, "Invalid argument.");
101 vsi
= pf
->vfs
[vf_id
].vsi
;
103 PMD_DRV_LOG(ERR
, "Invalid VSI.");
107 /* Check if it has been already on or off */
108 if (vsi
->info
.valid_sections
&
109 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID
)) {
111 if ((vsi
->info
.sec_flags
&
112 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK
) ==
113 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK
)
114 return 0; /* already on */
116 if ((vsi
->info
.sec_flags
&
117 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK
) == 0)
118 return 0; /* already off */
122 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID
);
124 vsi
->info
.sec_flags
|= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK
;
126 vsi
->info
.sec_flags
&= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK
;
128 memset(&ctxt
, 0, sizeof(ctxt
));
129 (void)rte_memcpy(&ctxt
.info
, &vsi
->info
, sizeof(vsi
->info
));
130 ctxt
.seid
= vsi
->seid
;
132 hw
= I40E_VSI_TO_HW(vsi
);
133 ret
= i40e_aq_update_vsi_params(hw
, &ctxt
, NULL
);
134 if (ret
!= I40E_SUCCESS
) {
136 PMD_DRV_LOG(ERR
, "Failed to update VSI params");
143 i40e_add_rm_all_vlan_filter(struct i40e_vsi
*vsi
, uint8_t add
)
147 struct i40e_hw
*hw
= I40E_VSI_TO_HW(vsi
);
148 struct i40e_aqc_add_remove_vlan_element_data vlan_data
= {0};
151 for (j
= 0; j
< I40E_VFTA_SIZE
; j
++) {
155 for (k
= 0; k
< I40E_UINT32_BIT_SIZE
; k
++) {
156 if (!(vsi
->vfta
[j
] & (1 << k
)))
159 vlan_id
= j
* I40E_UINT32_BIT_SIZE
+ k
;
163 vlan_data
.vlan_tag
= rte_cpu_to_le_16(vlan_id
);
165 ret
= i40e_aq_add_vlan(hw
, vsi
->seid
,
166 &vlan_data
, 1, NULL
);
168 ret
= i40e_aq_remove_vlan(hw
, vsi
->seid
,
169 &vlan_data
, 1, NULL
);
170 if (ret
!= I40E_SUCCESS
) {
172 "Failed to add/rm vlan filter");
182 rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port
, uint16_t vf_id
, uint8_t on
)
184 struct rte_eth_dev
*dev
;
186 struct i40e_vsi
*vsi
;
188 struct i40e_vsi_context ctxt
;
191 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
193 dev
= &rte_eth_devices
[port
];
195 if (!is_i40e_supported(dev
))
198 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
200 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
201 PMD_DRV_LOG(ERR
, "Invalid argument.");
205 vsi
= pf
->vfs
[vf_id
].vsi
;
207 PMD_DRV_LOG(ERR
, "Invalid VSI.");
211 /* Check if it has been already on or off */
212 if (vsi
->vlan_anti_spoof_on
== on
)
213 return 0; /* already on or off */
215 vsi
->vlan_anti_spoof_on
= on
;
216 if (!vsi
->vlan_filter_on
) {
217 ret
= i40e_add_rm_all_vlan_filter(vsi
, on
);
219 PMD_DRV_LOG(ERR
, "Failed to add/remove VLAN filters.");
224 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID
);
226 vsi
->info
.sec_flags
|= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK
;
228 vsi
->info
.sec_flags
&= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK
;
230 memset(&ctxt
, 0, sizeof(ctxt
));
231 (void)rte_memcpy(&ctxt
.info
, &vsi
->info
, sizeof(vsi
->info
));
232 ctxt
.seid
= vsi
->seid
;
234 hw
= I40E_VSI_TO_HW(vsi
);
235 ret
= i40e_aq_update_vsi_params(hw
, &ctxt
, NULL
);
236 if (ret
!= I40E_SUCCESS
) {
238 PMD_DRV_LOG(ERR
, "Failed to update VSI params");
245 i40e_vsi_rm_mac_filter(struct i40e_vsi
*vsi
)
247 struct i40e_mac_filter
*f
;
248 struct i40e_macvlan_filter
*mv_f
;
250 enum rte_mac_filter_type filter_type
;
251 int ret
= I40E_SUCCESS
;
254 /* remove all the MACs */
255 TAILQ_FOREACH_SAFE(f
, &vsi
->mac_list
, next
, temp
) {
256 vlan_num
= vsi
->vlan_num
;
257 filter_type
= f
->mac_info
.filter_type
;
258 if (filter_type
== RTE_MACVLAN_PERFECT_MATCH
||
259 filter_type
== RTE_MACVLAN_HASH_MATCH
) {
261 PMD_DRV_LOG(ERR
, "VLAN number shouldn't be 0");
262 return I40E_ERR_PARAM
;
264 } else if (filter_type
== RTE_MAC_PERFECT_MATCH
||
265 filter_type
== RTE_MAC_HASH_MATCH
)
268 mv_f
= rte_zmalloc("macvlan_data", vlan_num
* sizeof(*mv_f
), 0);
270 PMD_DRV_LOG(ERR
, "failed to allocate memory");
271 return I40E_ERR_NO_MEMORY
;
274 for (i
= 0; i
< vlan_num
; i
++) {
275 mv_f
[i
].filter_type
= filter_type
;
276 (void)rte_memcpy(&mv_f
[i
].macaddr
,
277 &f
->mac_info
.mac_addr
,
280 if (filter_type
== RTE_MACVLAN_PERFECT_MATCH
||
281 filter_type
== RTE_MACVLAN_HASH_MATCH
) {
282 ret
= i40e_find_all_vlan_for_mac(vsi
, mv_f
, vlan_num
,
283 &f
->mac_info
.mac_addr
);
284 if (ret
!= I40E_SUCCESS
) {
290 ret
= i40e_remove_macvlan_filters(vsi
, mv_f
, vlan_num
);
291 if (ret
!= I40E_SUCCESS
) {
304 i40e_vsi_restore_mac_filter(struct i40e_vsi
*vsi
)
306 struct i40e_mac_filter
*f
;
307 struct i40e_macvlan_filter
*mv_f
;
309 int ret
= I40E_SUCCESS
;
312 /* restore all the MACs */
313 TAILQ_FOREACH_SAFE(f
, &vsi
->mac_list
, next
, temp
) {
314 if ((f
->mac_info
.filter_type
== RTE_MACVLAN_PERFECT_MATCH
) ||
315 (f
->mac_info
.filter_type
== RTE_MACVLAN_HASH_MATCH
)) {
317 * If vlan_num is 0, that's the first time to add mac,
318 * set mask for vlan_id 0.
320 if (vsi
->vlan_num
== 0) {
321 i40e_set_vlan_filter(vsi
, 0, 1);
324 vlan_num
= vsi
->vlan_num
;
325 } else if ((f
->mac_info
.filter_type
== RTE_MAC_PERFECT_MATCH
) ||
326 (f
->mac_info
.filter_type
== RTE_MAC_HASH_MATCH
))
329 mv_f
= rte_zmalloc("macvlan_data", vlan_num
* sizeof(*mv_f
), 0);
331 PMD_DRV_LOG(ERR
, "failed to allocate memory");
332 return I40E_ERR_NO_MEMORY
;
335 for (i
= 0; i
< vlan_num
; i
++) {
336 mv_f
[i
].filter_type
= f
->mac_info
.filter_type
;
337 (void)rte_memcpy(&mv_f
[i
].macaddr
,
338 &f
->mac_info
.mac_addr
,
342 if (f
->mac_info
.filter_type
== RTE_MACVLAN_PERFECT_MATCH
||
343 f
->mac_info
.filter_type
== RTE_MACVLAN_HASH_MATCH
) {
344 ret
= i40e_find_all_vlan_for_mac(vsi
, mv_f
, vlan_num
,
345 &f
->mac_info
.mac_addr
);
346 if (ret
!= I40E_SUCCESS
) {
352 ret
= i40e_add_macvlan_filters(vsi
, mv_f
, vlan_num
);
353 if (ret
!= I40E_SUCCESS
) {
366 i40e_vsi_set_tx_loopback(struct i40e_vsi
*vsi
, uint8_t on
)
368 struct i40e_vsi_context ctxt
;
375 hw
= I40E_VSI_TO_HW(vsi
);
377 /* Use the FW API if FW >= v5.0 */
378 if (hw
->aq
.fw_maj_ver
< 5) {
379 PMD_INIT_LOG(ERR
, "FW < v5.0, cannot enable loopback");
383 /* Check if it has been already on or off */
384 if (vsi
->info
.valid_sections
&
385 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID
)) {
387 if ((vsi
->info
.switch_id
&
388 I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
) ==
389 I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
)
390 return 0; /* already on */
392 if ((vsi
->info
.switch_id
&
393 I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
) == 0)
394 return 0; /* already off */
398 /* remove all the MAC and VLAN first */
399 ret
= i40e_vsi_rm_mac_filter(vsi
);
401 PMD_INIT_LOG(ERR
, "Failed to remove MAC filters.");
404 if (vsi
->vlan_anti_spoof_on
|| vsi
->vlan_filter_on
) {
405 ret
= i40e_add_rm_all_vlan_filter(vsi
, 0);
407 PMD_INIT_LOG(ERR
, "Failed to remove VLAN filters.");
412 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
414 vsi
->info
.switch_id
|= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
;
416 vsi
->info
.switch_id
&= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
;
418 memset(&ctxt
, 0, sizeof(ctxt
));
419 (void)rte_memcpy(&ctxt
.info
, &vsi
->info
, sizeof(vsi
->info
));
420 ctxt
.seid
= vsi
->seid
;
422 ret
= i40e_aq_update_vsi_params(hw
, &ctxt
, NULL
);
423 if (ret
!= I40E_SUCCESS
) {
424 PMD_DRV_LOG(ERR
, "Failed to update VSI params");
428 /* add all the MAC and VLAN back */
429 ret
= i40e_vsi_restore_mac_filter(vsi
);
432 if (vsi
->vlan_anti_spoof_on
|| vsi
->vlan_filter_on
) {
433 ret
= i40e_add_rm_all_vlan_filter(vsi
, 1);
442 rte_pmd_i40e_set_tx_loopback(uint8_t port
, uint8_t on
)
444 struct rte_eth_dev
*dev
;
446 struct i40e_pf_vf
*vf
;
447 struct i40e_vsi
*vsi
;
451 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
453 dev
= &rte_eth_devices
[port
];
455 if (!is_i40e_supported(dev
))
458 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
460 /* setup PF TX loopback */
462 ret
= i40e_vsi_set_tx_loopback(vsi
, on
);
466 /* setup TX loopback for all the VFs */
468 /* if no VF, do nothing. */
472 for (vf_id
= 0; vf_id
< pf
->vf_num
; vf_id
++) {
473 vf
= &pf
->vfs
[vf_id
];
476 ret
= i40e_vsi_set_tx_loopback(vsi
, on
);
485 rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port
, uint16_t vf_id
, uint8_t on
)
487 struct rte_eth_dev
*dev
;
489 struct i40e_vsi
*vsi
;
493 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
495 dev
= &rte_eth_devices
[port
];
497 if (!is_i40e_supported(dev
))
500 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
502 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
503 PMD_DRV_LOG(ERR
, "Invalid argument.");
507 vsi
= pf
->vfs
[vf_id
].vsi
;
509 PMD_DRV_LOG(ERR
, "Invalid VSI.");
513 hw
= I40E_VSI_TO_HW(vsi
);
515 ret
= i40e_aq_set_vsi_unicast_promiscuous(hw
, vsi
->seid
,
517 if (ret
!= I40E_SUCCESS
) {
519 PMD_DRV_LOG(ERR
, "Failed to set unicast promiscuous mode");
526 rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port
, uint16_t vf_id
, uint8_t on
)
528 struct rte_eth_dev
*dev
;
530 struct i40e_vsi
*vsi
;
534 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
536 dev
= &rte_eth_devices
[port
];
538 if (!is_i40e_supported(dev
))
541 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
543 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
544 PMD_DRV_LOG(ERR
, "Invalid argument.");
548 vsi
= pf
->vfs
[vf_id
].vsi
;
550 PMD_DRV_LOG(ERR
, "Invalid VSI.");
554 hw
= I40E_VSI_TO_HW(vsi
);
556 ret
= i40e_aq_set_vsi_multicast_promiscuous(hw
, vsi
->seid
,
558 if (ret
!= I40E_SUCCESS
) {
560 PMD_DRV_LOG(ERR
, "Failed to set multicast promiscuous mode");
567 rte_pmd_i40e_set_vf_mac_addr(uint8_t port
, uint16_t vf_id
,
568 struct ether_addr
*mac_addr
)
570 struct i40e_mac_filter
*f
;
571 struct rte_eth_dev
*dev
;
572 struct i40e_pf_vf
*vf
;
573 struct i40e_vsi
*vsi
;
577 if (i40e_validate_mac_addr((u8
*)mac_addr
) != I40E_SUCCESS
)
580 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
582 dev
= &rte_eth_devices
[port
];
584 if (!is_i40e_supported(dev
))
587 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
589 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
)
592 vf
= &pf
->vfs
[vf_id
];
595 PMD_DRV_LOG(ERR
, "Invalid VSI.");
599 ether_addr_copy(mac_addr
, &vf
->mac_addr
);
601 /* Remove all existing mac */
602 TAILQ_FOREACH_SAFE(f
, &vsi
->mac_list
, next
, temp
)
603 i40e_vsi_delete_mac(vsi
, &f
->mac_info
.mac_addr
);
608 /* Set vlan strip on/off for specific VF from host */
610 rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port
, uint16_t vf_id
, uint8_t on
)
612 struct rte_eth_dev
*dev
;
614 struct i40e_vsi
*vsi
;
617 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
619 dev
= &rte_eth_devices
[port
];
621 if (!is_i40e_supported(dev
))
624 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
626 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
627 PMD_DRV_LOG(ERR
, "Invalid argument.");
631 vsi
= pf
->vfs
[vf_id
].vsi
;
636 ret
= i40e_vsi_config_vlan_stripping(vsi
, !!on
);
637 if (ret
!= I40E_SUCCESS
) {
639 PMD_DRV_LOG(ERR
, "Failed to set VLAN stripping!");
645 int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port
, uint16_t vf_id
,
648 struct rte_eth_dev
*dev
;
651 struct i40e_vsi
*vsi
;
652 struct i40e_vsi_context ctxt
;
655 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
657 if (vlan_id
> ETHER_MAX_VLAN_ID
) {
658 PMD_DRV_LOG(ERR
, "Invalid VLAN ID.");
662 dev
= &rte_eth_devices
[port
];
664 if (!is_i40e_supported(dev
))
667 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
668 hw
= I40E_PF_TO_HW(pf
);
671 * return -ENODEV if SRIOV not enabled, VF number not configured
672 * or no queue assigned.
674 if (!hw
->func_caps
.sr_iov_1_1
|| pf
->vf_num
== 0 ||
678 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
679 PMD_DRV_LOG(ERR
, "Invalid VF ID.");
683 vsi
= pf
->vfs
[vf_id
].vsi
;
685 PMD_DRV_LOG(ERR
, "Invalid VSI.");
689 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
);
690 vsi
->info
.pvid
= vlan_id
;
692 vsi
->info
.port_vlan_flags
|= I40E_AQ_VSI_PVLAN_INSERT_PVID
;
694 vsi
->info
.port_vlan_flags
&= ~I40E_AQ_VSI_PVLAN_INSERT_PVID
;
696 memset(&ctxt
, 0, sizeof(ctxt
));
697 (void)rte_memcpy(&ctxt
.info
, &vsi
->info
, sizeof(vsi
->info
));
698 ctxt
.seid
= vsi
->seid
;
700 hw
= I40E_VSI_TO_HW(vsi
);
701 ret
= i40e_aq_update_vsi_params(hw
, &ctxt
, NULL
);
702 if (ret
!= I40E_SUCCESS
) {
704 PMD_DRV_LOG(ERR
, "Failed to update VSI params");
710 int rte_pmd_i40e_set_vf_broadcast(uint8_t port
, uint16_t vf_id
,
713 struct rte_eth_dev
*dev
;
715 struct i40e_vsi
*vsi
;
717 struct i40e_mac_filter_info filter
;
718 struct ether_addr broadcast
= {
719 .addr_bytes
= {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
722 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
725 PMD_DRV_LOG(ERR
, "on should be 0 or 1.");
729 dev
= &rte_eth_devices
[port
];
731 if (!is_i40e_supported(dev
))
734 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
735 hw
= I40E_PF_TO_HW(pf
);
737 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
738 PMD_DRV_LOG(ERR
, "Invalid VF ID.");
743 * return -ENODEV if SRIOV not enabled, VF number not configured
744 * or no queue assigned.
746 if (!hw
->func_caps
.sr_iov_1_1
|| pf
->vf_num
== 0 ||
747 pf
->vf_nb_qps
== 0) {
748 PMD_DRV_LOG(ERR
, "SRIOV is not enabled or no queue.");
752 vsi
= pf
->vfs
[vf_id
].vsi
;
754 PMD_DRV_LOG(ERR
, "Invalid VSI.");
759 (void)rte_memcpy(&filter
.mac_addr
, &broadcast
, ETHER_ADDR_LEN
);
760 filter
.filter_type
= RTE_MACVLAN_PERFECT_MATCH
;
761 ret
= i40e_vsi_add_mac(vsi
, &filter
);
763 ret
= i40e_vsi_delete_mac(vsi
, &broadcast
);
766 if (ret
!= I40E_SUCCESS
&& ret
!= I40E_ERR_PARAM
) {
768 PMD_DRV_LOG(ERR
, "Failed to set VSI broadcast");
776 int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port
, uint16_t vf_id
, uint8_t on
)
778 struct rte_eth_dev
*dev
;
781 struct i40e_vsi
*vsi
;
782 struct i40e_vsi_context ctxt
;
785 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
788 PMD_DRV_LOG(ERR
, "on should be 0 or 1.");
792 dev
= &rte_eth_devices
[port
];
794 if (!is_i40e_supported(dev
))
797 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
798 hw
= I40E_PF_TO_HW(pf
);
801 * return -ENODEV if SRIOV not enabled, VF number not configured
802 * or no queue assigned.
804 if (!hw
->func_caps
.sr_iov_1_1
|| pf
->vf_num
== 0 ||
805 pf
->vf_nb_qps
== 0) {
806 PMD_DRV_LOG(ERR
, "SRIOV is not enabled or no queue.");
810 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
811 PMD_DRV_LOG(ERR
, "Invalid VF ID.");
815 vsi
= pf
->vfs
[vf_id
].vsi
;
817 PMD_DRV_LOG(ERR
, "Invalid VSI.");
821 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
);
823 vsi
->info
.port_vlan_flags
|= I40E_AQ_VSI_PVLAN_MODE_TAGGED
;
824 vsi
->info
.port_vlan_flags
&= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED
;
826 vsi
->info
.port_vlan_flags
|= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED
;
827 vsi
->info
.port_vlan_flags
&= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED
;
830 memset(&ctxt
, 0, sizeof(ctxt
));
831 (void)rte_memcpy(&ctxt
.info
, &vsi
->info
, sizeof(vsi
->info
));
832 ctxt
.seid
= vsi
->seid
;
834 hw
= I40E_VSI_TO_HW(vsi
);
835 ret
= i40e_aq_update_vsi_params(hw
, &ctxt
, NULL
);
836 if (ret
!= I40E_SUCCESS
) {
838 PMD_DRV_LOG(ERR
, "Failed to update VSI params");
845 i40e_vlan_filter_count(struct i40e_vsi
*vsi
)
851 for (j
= 0; j
< I40E_VFTA_SIZE
; j
++) {
855 for (k
= 0; k
< I40E_UINT32_BIT_SIZE
; k
++) {
856 if (!(vsi
->vfta
[j
] & (1 << k
)))
859 vlan_id
= j
* I40E_UINT32_BIT_SIZE
+ k
;
870 int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port
, uint16_t vlan_id
,
871 uint64_t vf_mask
, uint8_t on
)
873 struct rte_eth_dev
*dev
;
876 struct i40e_vsi
*vsi
;
878 int ret
= I40E_SUCCESS
;
880 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
882 dev
= &rte_eth_devices
[port
];
884 if (!is_i40e_supported(dev
))
887 if (vlan_id
> ETHER_MAX_VLAN_ID
|| !vlan_id
) {
888 PMD_DRV_LOG(ERR
, "Invalid VLAN ID.");
893 PMD_DRV_LOG(ERR
, "No VF.");
898 PMD_DRV_LOG(ERR
, "on is should be 0 or 1.");
902 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
903 hw
= I40E_PF_TO_HW(pf
);
906 * return -ENODEV if SRIOV not enabled, VF number not configured
907 * or no queue assigned.
909 if (!hw
->func_caps
.sr_iov_1_1
|| pf
->vf_num
== 0 ||
910 pf
->vf_nb_qps
== 0) {
911 PMD_DRV_LOG(ERR
, "SRIOV is not enabled or no queue.");
915 for (vf_idx
= 0; vf_idx
< pf
->vf_num
&& ret
== I40E_SUCCESS
; vf_idx
++) {
916 if (vf_mask
& ((uint64_t)(1ULL << vf_idx
))) {
917 vsi
= pf
->vfs
[vf_idx
].vsi
;
919 if (!vsi
->vlan_filter_on
) {
920 vsi
->vlan_filter_on
= true;
921 i40e_aq_set_vsi_vlan_promisc(hw
,
925 if (!vsi
->vlan_anti_spoof_on
)
926 i40e_add_rm_all_vlan_filter(
929 ret
= i40e_vsi_add_vlan(vsi
, vlan_id
);
931 ret
= i40e_vsi_delete_vlan(vsi
, vlan_id
);
933 if (!i40e_vlan_filter_count(vsi
)) {
934 vsi
->vlan_filter_on
= false;
935 i40e_aq_set_vsi_vlan_promisc(hw
,
944 if (ret
!= I40E_SUCCESS
) {
946 PMD_DRV_LOG(ERR
, "Failed to set VF VLAN filter, on = %d", on
);
953 rte_pmd_i40e_get_vf_stats(uint8_t port
,
955 struct rte_eth_stats
*stats
)
957 struct rte_eth_dev
*dev
;
959 struct i40e_vsi
*vsi
;
961 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
963 dev
= &rte_eth_devices
[port
];
965 if (!is_i40e_supported(dev
))
968 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
970 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
971 PMD_DRV_LOG(ERR
, "Invalid VF ID.");
975 vsi
= pf
->vfs
[vf_id
].vsi
;
977 PMD_DRV_LOG(ERR
, "Invalid VSI.");
981 i40e_update_vsi_stats(vsi
);
983 stats
->ipackets
= vsi
->eth_stats
.rx_unicast
+
984 vsi
->eth_stats
.rx_multicast
+
985 vsi
->eth_stats
.rx_broadcast
;
986 stats
->opackets
= vsi
->eth_stats
.tx_unicast
+
987 vsi
->eth_stats
.tx_multicast
+
988 vsi
->eth_stats
.tx_broadcast
;
989 stats
->ibytes
= vsi
->eth_stats
.rx_bytes
;
990 stats
->obytes
= vsi
->eth_stats
.tx_bytes
;
991 stats
->ierrors
= vsi
->eth_stats
.rx_discards
;
992 stats
->oerrors
= vsi
->eth_stats
.tx_errors
+ vsi
->eth_stats
.tx_discards
;
998 rte_pmd_i40e_reset_vf_stats(uint8_t port
,
1001 struct rte_eth_dev
*dev
;
1003 struct i40e_vsi
*vsi
;
1005 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
1007 dev
= &rte_eth_devices
[port
];
1009 if (!is_i40e_supported(dev
))
1012 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
1014 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
1015 PMD_DRV_LOG(ERR
, "Invalid VF ID.");
1019 vsi
= pf
->vfs
[vf_id
].vsi
;
1021 PMD_DRV_LOG(ERR
, "Invalid VSI.");
1025 vsi
->offset_loaded
= false;
1026 i40e_update_vsi_stats(vsi
);
1032 rte_pmd_i40e_set_vf_max_bw(uint8_t port
, uint16_t vf_id
, uint32_t bw
)
1034 struct rte_eth_dev
*dev
;
1036 struct i40e_vsi
*vsi
;
1041 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
1043 dev
= &rte_eth_devices
[port
];
1045 if (!is_i40e_supported(dev
))
1048 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
1050 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
1051 PMD_DRV_LOG(ERR
, "Invalid VF ID.");
1055 vsi
= pf
->vfs
[vf_id
].vsi
;
1057 PMD_DRV_LOG(ERR
, "Invalid VSI.");
1061 if (bw
> I40E_QOS_BW_MAX
) {
1062 PMD_DRV_LOG(ERR
, "Bandwidth should not be larger than %dMbps.",
1067 if (bw
% I40E_QOS_BW_GRANULARITY
) {
1068 PMD_DRV_LOG(ERR
, "Bandwidth should be the multiple of %dMbps.",
1069 I40E_QOS_BW_GRANULARITY
);
1073 bw
/= I40E_QOS_BW_GRANULARITY
;
1075 hw
= I40E_VSI_TO_HW(vsi
);
1078 if (bw
== vsi
->bw_info
.bw_limit
) {
1080 "No change for VF max bandwidth. Nothing to do.");
1085 * VF bandwidth limitation and TC bandwidth limitation cannot be
1086 * enabled in parallel, quit if TC bandwidth limitation is enabled.
1088 * If bw is 0, means disable bandwidth limitation. Then no need to
1089 * check TC bandwidth limitation.
1092 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1093 if ((vsi
->enabled_tc
& BIT_ULL(i
)) &&
1094 vsi
->bw_info
.bw_ets_credits
[i
])
1097 if (i
!= I40E_MAX_TRAFFIC_CLASS
) {
1099 "TC max bandwidth has been set on this VF,"
1100 " please disable it first.");
1105 ret
= i40e_aq_config_vsi_bw_limit(hw
, vsi
->seid
, (uint16_t)bw
, 0, NULL
);
1108 "Failed to set VF %d bandwidth, err(%d).",
1113 /* Store the configuration. */
1114 vsi
->bw_info
.bw_limit
= (uint16_t)bw
;
1115 vsi
->bw_info
.bw_max
= 0;
1121 rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port
, uint16_t vf_id
,
1122 uint8_t tc_num
, uint8_t *bw_weight
)
1124 struct rte_eth_dev
*dev
;
1126 struct i40e_vsi
*vsi
;
1128 struct i40e_aqc_configure_vsi_tc_bw_data tc_bw
;
1132 bool b_change
= false;
1134 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
1136 dev
= &rte_eth_devices
[port
];
1138 if (!is_i40e_supported(dev
))
1141 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
1143 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
1144 PMD_DRV_LOG(ERR
, "Invalid VF ID.");
1148 vsi
= pf
->vfs
[vf_id
].vsi
;
1150 PMD_DRV_LOG(ERR
, "Invalid VSI.");
1154 if (tc_num
> I40E_MAX_TRAFFIC_CLASS
) {
1155 PMD_DRV_LOG(ERR
, "TCs should be no more than %d.",
1156 I40E_MAX_TRAFFIC_CLASS
);
1161 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1162 if (vsi
->enabled_tc
& BIT_ULL(i
))
1165 if (sum
!= tc_num
) {
1167 "Weight should be set for all %d enabled TCs.",
1173 for (i
= 0; i
< tc_num
; i
++) {
1174 if (!bw_weight
[i
]) {
1176 "The weight should be 1 at least.");
1179 sum
+= bw_weight
[i
];
1183 "The summary of the TC weight should be 100.");
1188 * Create the configuration for all the TCs.
1190 memset(&tc_bw
, 0, sizeof(tc_bw
));
1191 tc_bw
.tc_valid_bits
= vsi
->enabled_tc
;
1193 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1194 if (vsi
->enabled_tc
& BIT_ULL(i
)) {
1196 vsi
->bw_info
.bw_ets_share_credits
[i
])
1199 tc_bw
.tc_bw_credits
[i
] = bw_weight
[j
];
1207 "No change for TC allocated bandwidth."
1212 hw
= I40E_VSI_TO_HW(vsi
);
1214 ret
= i40e_aq_config_vsi_tc_bw(hw
, vsi
->seid
, &tc_bw
, NULL
);
1217 "Failed to set VF %d TC bandwidth weight, err(%d).",
1222 /* Store the configuration. */
1224 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1225 if (vsi
->enabled_tc
& BIT_ULL(i
)) {
1226 vsi
->bw_info
.bw_ets_share_credits
[i
] = bw_weight
[j
];
1235 rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port
, uint16_t vf_id
,
1236 uint8_t tc_no
, uint32_t bw
)
1238 struct rte_eth_dev
*dev
;
1240 struct i40e_vsi
*vsi
;
1242 struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw
;
1246 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
1248 dev
= &rte_eth_devices
[port
];
1250 if (!is_i40e_supported(dev
))
1253 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
1255 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
1256 PMD_DRV_LOG(ERR
, "Invalid VF ID.");
1260 vsi
= pf
->vfs
[vf_id
].vsi
;
1262 PMD_DRV_LOG(ERR
, "Invalid VSI.");
1266 if (bw
> I40E_QOS_BW_MAX
) {
1267 PMD_DRV_LOG(ERR
, "Bandwidth should not be larger than %dMbps.",
1272 if (bw
% I40E_QOS_BW_GRANULARITY
) {
1273 PMD_DRV_LOG(ERR
, "Bandwidth should be the multiple of %dMbps.",
1274 I40E_QOS_BW_GRANULARITY
);
1278 bw
/= I40E_QOS_BW_GRANULARITY
;
1280 if (tc_no
>= I40E_MAX_TRAFFIC_CLASS
) {
1281 PMD_DRV_LOG(ERR
, "TC No. should be less than %d.",
1282 I40E_MAX_TRAFFIC_CLASS
);
1286 hw
= I40E_VSI_TO_HW(vsi
);
1288 if (!(vsi
->enabled_tc
& BIT_ULL(tc_no
))) {
1289 PMD_DRV_LOG(ERR
, "VF %d TC %d isn't enabled.",
1295 if (bw
== vsi
->bw_info
.bw_ets_credits
[tc_no
]) {
1297 "No change for TC max bandwidth. Nothing to do.");
1302 * VF bandwidth limitation and TC bandwidth limitation cannot be
1303 * enabled in parallel, disable VF bandwidth limitation if it's
1305 * If bw is 0, means disable bandwidth limitation. Then no need to
1306 * care about VF bandwidth limitation configuration.
1308 if (bw
&& vsi
->bw_info
.bw_limit
) {
1309 ret
= i40e_aq_config_vsi_bw_limit(hw
, vsi
->seid
, 0, 0, NULL
);
1312 "Failed to disable VF(%d)"
1313 " bandwidth limitation, err(%d).",
1319 "VF max bandwidth is disabled according"
1320 " to TC max bandwidth setting.");
1324 * Get all the TCs' info to create a whole picture.
1325 * Because the incremental change isn't permitted.
1327 memset(&tc_bw
, 0, sizeof(tc_bw
));
1328 tc_bw
.tc_valid_bits
= vsi
->enabled_tc
;
1329 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1330 if (vsi
->enabled_tc
& BIT_ULL(i
)) {
1331 tc_bw
.tc_bw_credits
[i
] =
1333 vsi
->bw_info
.bw_ets_credits
[i
]);
1336 tc_bw
.tc_bw_credits
[tc_no
] = rte_cpu_to_le_16((uint16_t)bw
);
1338 ret
= i40e_aq_config_vsi_ets_sla_bw_limit(hw
, vsi
->seid
, &tc_bw
, NULL
);
1341 "Failed to set VF %d TC %d max bandwidth, err(%d).",
1346 /* Store the configuration. */
1347 vsi
->bw_info
.bw_ets_credits
[tc_no
] = (uint16_t)bw
;
1353 rte_pmd_i40e_set_tc_strict_prio(uint8_t port
, uint8_t tc_map
)
1355 struct rte_eth_dev
*dev
;
1357 struct i40e_vsi
*vsi
;
1358 struct i40e_veb
*veb
;
1360 struct i40e_aqc_configure_switching_comp_ets_data ets_data
;
1364 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
1366 dev
= &rte_eth_devices
[port
];
1368 if (!is_i40e_supported(dev
))
1371 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
1375 PMD_DRV_LOG(ERR
, "Invalid VSI.");
1381 PMD_DRV_LOG(ERR
, "Invalid VEB.");
1385 if ((tc_map
& veb
->enabled_tc
) != tc_map
) {
1387 "TC bitmap isn't the subset of enabled TCs 0x%x.",
1392 if (tc_map
== veb
->strict_prio_tc
) {
1393 PMD_DRV_LOG(INFO
, "No change for TC bitmap. Nothing to do.");
1397 hw
= I40E_VSI_TO_HW(vsi
);
1399 /* Disable DCBx if it's the first time to set strict priority. */
1400 if (!veb
->strict_prio_tc
) {
1401 ret
= i40e_aq_stop_lldp(hw
, true, NULL
);
1404 "Failed to disable DCBx as it's already"
1408 "DCBx is disabled according to strict"
1409 " priority setting.");
1412 memset(&ets_data
, 0, sizeof(ets_data
));
1413 ets_data
.tc_valid_bits
= veb
->enabled_tc
;
1414 ets_data
.seepage
= I40E_AQ_ETS_SEEPAGE_EN_MASK
;
1415 ets_data
.tc_strict_priority_flags
= tc_map
;
1416 /* Get all TCs' bandwidth. */
1417 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1418 if (veb
->enabled_tc
& BIT_ULL(i
)) {
1419 /* For rubust, if bandwidth is 0, use 1 instead. */
1420 if (veb
->bw_info
.bw_ets_share_credits
[i
])
1421 ets_data
.tc_bw_share_credits
[i
] =
1422 veb
->bw_info
.bw_ets_share_credits
[i
];
1424 ets_data
.tc_bw_share_credits
[i
] =
1425 I40E_QOS_BW_WEIGHT_MIN
;
1429 if (!veb
->strict_prio_tc
)
1430 ret
= i40e_aq_config_switch_comp_ets(
1431 hw
, veb
->uplink_seid
,
1432 &ets_data
, i40e_aqc_opc_enable_switching_comp_ets
,
1435 ret
= i40e_aq_config_switch_comp_ets(
1436 hw
, veb
->uplink_seid
,
1437 &ets_data
, i40e_aqc_opc_modify_switching_comp_ets
,
1440 ret
= i40e_aq_config_switch_comp_ets(
1441 hw
, veb
->uplink_seid
,
1442 &ets_data
, i40e_aqc_opc_disable_switching_comp_ets
,
1447 "Failed to set TCs' strict priority mode."
1452 veb
->strict_prio_tc
= tc_map
;
1454 /* Enable DCBx again, if all the TCs' strict priority disabled. */
1456 ret
= i40e_aq_start_lldp(hw
, NULL
);
1459 "Failed to enable DCBx, err(%d).", ret
);
1464 "DCBx is enabled again according to strict"
1465 " priority setting.");
1471 #define I40E_PROFILE_INFO_SIZE 48
1472 #define I40E_MAX_PROFILE_NUM 16
1475 i40e_generate_profile_info_sec(char *name
, struct i40e_ddp_version
*version
,
1476 uint32_t track_id
, uint8_t *profile_info_sec
,
1479 struct i40e_profile_section_header
*sec
= NULL
;
1480 struct i40e_profile_info
*pinfo
;
1482 sec
= (struct i40e_profile_section_header
*)profile_info_sec
;
1484 sec
->data_end
= sizeof(struct i40e_profile_section_header
) +
1485 sizeof(struct i40e_profile_info
);
1486 sec
->section
.type
= SECTION_TYPE_INFO
;
1487 sec
->section
.offset
= sizeof(struct i40e_profile_section_header
);
1488 sec
->section
.size
= sizeof(struct i40e_profile_info
);
1489 pinfo
= (struct i40e_profile_info
*)(profile_info_sec
+
1490 sec
->section
.offset
);
1491 pinfo
->track_id
= track_id
;
1492 memcpy(pinfo
->name
, name
, I40E_DDP_NAME_SIZE
);
1493 memcpy(&pinfo
->version
, version
, sizeof(struct i40e_ddp_version
));
1495 pinfo
->op
= I40E_DDP_ADD_TRACKID
;
1497 pinfo
->op
= I40E_DDP_REMOVE_TRACKID
;
1500 static enum i40e_status_code
1501 i40e_add_rm_profile_info(struct i40e_hw
*hw
, uint8_t *profile_info_sec
)
1503 enum i40e_status_code status
= I40E_SUCCESS
;
1504 struct i40e_profile_section_header
*sec
;
1506 uint32_t offset
= 0;
1509 sec
= (struct i40e_profile_section_header
*)profile_info_sec
;
1510 track_id
= ((struct i40e_profile_info
*)(profile_info_sec
+
1511 sec
->section
.offset
))->track_id
;
1513 status
= i40e_aq_write_ddp(hw
, (void *)sec
, sec
->data_end
,
1514 track_id
, &offset
, &info
, NULL
);
1516 PMD_DRV_LOG(ERR
, "Failed to add/remove profile info: "
1517 "offset %d, info %d",
1523 #define I40E_PROFILE_INFO_SIZE 48
1524 #define I40E_MAX_PROFILE_NUM 16
1526 /* Check if the profile info exists */
1528 i40e_check_profile_info(uint8_t port
, uint8_t *profile_info_sec
)
1530 struct rte_eth_dev
*dev
= &rte_eth_devices
[port
];
1531 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1533 struct rte_pmd_i40e_profile_list
*p_list
;
1534 struct rte_pmd_i40e_profile_info
*pinfo
, *p
;
1538 buff
= rte_zmalloc("pinfo_list",
1539 (I40E_PROFILE_INFO_SIZE
* I40E_MAX_PROFILE_NUM
+ 4),
1542 PMD_DRV_LOG(ERR
, "failed to allocate memory");
1546 ret
= i40e_aq_get_ddp_list(
1548 (I40E_PROFILE_INFO_SIZE
* I40E_MAX_PROFILE_NUM
+ 4),
1551 PMD_DRV_LOG(ERR
, "Failed to get profile info list.");
1555 p_list
= (struct rte_pmd_i40e_profile_list
*)buff
;
1556 pinfo
= (struct rte_pmd_i40e_profile_info
*)(profile_info_sec
+
1557 sizeof(struct i40e_profile_section_header
));
1558 for (i
= 0; i
< p_list
->p_count
; i
++) {
1559 p
= &p_list
->p_info
[i
];
1560 if ((pinfo
->track_id
== p
->track_id
) &&
1561 !memcmp(&pinfo
->version
, &p
->version
,
1562 sizeof(struct i40e_ddp_version
)) &&
1563 !memcmp(&pinfo
->name
, &p
->name
,
1564 I40E_DDP_NAME_SIZE
)) {
1565 PMD_DRV_LOG(INFO
, "Profile exists.");
1576 rte_pmd_i40e_process_ddp_package(uint8_t port
, uint8_t *buff
,
1578 enum rte_pmd_i40e_package_op op
)
1580 struct rte_eth_dev
*dev
;
1582 struct i40e_package_header
*pkg_hdr
;
1583 struct i40e_generic_seg_header
*profile_seg_hdr
;
1584 struct i40e_generic_seg_header
*metadata_seg_hdr
;
1586 uint8_t *profile_info_sec
;
1588 enum i40e_status_code status
= I40E_SUCCESS
;
1590 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
1592 dev
= &rte_eth_devices
[port
];
1594 if (!is_i40e_supported(dev
))
1597 hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1599 if (size
< (sizeof(struct i40e_package_header
) +
1600 sizeof(struct i40e_metadata_segment
) +
1601 sizeof(uint32_t) * 2)) {
1602 PMD_DRV_LOG(ERR
, "Buff is invalid.");
1606 pkg_hdr
= (struct i40e_package_header
*)buff
;
1609 PMD_DRV_LOG(ERR
, "Failed to fill the package structure");
1613 if (pkg_hdr
->segment_count
< 2) {
1614 PMD_DRV_LOG(ERR
, "Segment_count should be 2 at least.");
1618 /* Find metadata segment */
1619 metadata_seg_hdr
= i40e_find_segment_in_package(SEGMENT_TYPE_METADATA
,
1621 if (!metadata_seg_hdr
) {
1622 PMD_DRV_LOG(ERR
, "Failed to find metadata segment header");
1625 track_id
= ((struct i40e_metadata_segment
*)metadata_seg_hdr
)->track_id
;
1627 /* Find profile segment */
1628 profile_seg_hdr
= i40e_find_segment_in_package(SEGMENT_TYPE_I40E
,
1630 if (!profile_seg_hdr
) {
1631 PMD_DRV_LOG(ERR
, "Failed to find profile segment header");
1635 profile_info_sec
= rte_zmalloc(
1636 "i40e_profile_info",
1637 sizeof(struct i40e_profile_section_header
) +
1638 sizeof(struct i40e_profile_info
),
1640 if (!profile_info_sec
) {
1641 PMD_DRV_LOG(ERR
, "Failed to allocate memory");
1645 if (op
== RTE_PMD_I40E_PKG_OP_WR_ADD
) {
1646 /* Check if the profile exists */
1647 i40e_generate_profile_info_sec(
1648 ((struct i40e_profile_segment
*)profile_seg_hdr
)->name
,
1649 &((struct i40e_profile_segment
*)profile_seg_hdr
)->version
,
1650 track_id
, profile_info_sec
, 1);
1651 is_exist
= i40e_check_profile_info(port
, profile_info_sec
);
1653 PMD_DRV_LOG(ERR
, "Profile already exists.");
1654 rte_free(profile_info_sec
);
1656 } else if (is_exist
< 0) {
1657 PMD_DRV_LOG(ERR
, "Failed to check profile.");
1658 rte_free(profile_info_sec
);
1662 /* Write profile to HW */
1663 status
= i40e_write_profile(
1665 (struct i40e_profile_segment
*)profile_seg_hdr
,
1668 PMD_DRV_LOG(ERR
, "Failed to write profile.");
1669 rte_free(profile_info_sec
);
1673 /* Add profile info to info list */
1674 status
= i40e_add_rm_profile_info(hw
, profile_info_sec
);
1676 PMD_DRV_LOG(ERR
, "Failed to add profile info.");
1678 PMD_DRV_LOG(ERR
, "Operation not supported.");
1681 rte_free(profile_info_sec
);
1686 rte_pmd_i40e_get_ddp_list(uint8_t port
, uint8_t *buff
, uint32_t size
)
1688 struct rte_eth_dev
*dev
;
1690 enum i40e_status_code status
= I40E_SUCCESS
;
1692 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
1694 dev
= &rte_eth_devices
[port
];
1696 if (!is_i40e_supported(dev
))
1699 if (size
< (I40E_PROFILE_INFO_SIZE
* I40E_MAX_PROFILE_NUM
+ 4))
1702 hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1704 status
= i40e_aq_get_ddp_list(hw
, (void *)buff
,
1710 static int check_invalid_pkt_type(uint32_t pkt_type
)
1712 uint32_t l2
, l3
, l4
, tnl
, il2
, il3
, il4
;
1714 l2
= pkt_type
& RTE_PTYPE_L2_MASK
;
1715 l3
= pkt_type
& RTE_PTYPE_L3_MASK
;
1716 l4
= pkt_type
& RTE_PTYPE_L4_MASK
;
1717 tnl
= pkt_type
& RTE_PTYPE_TUNNEL_MASK
;
1718 il2
= pkt_type
& RTE_PTYPE_INNER_L2_MASK
;
1719 il3
= pkt_type
& RTE_PTYPE_INNER_L3_MASK
;
1720 il4
= pkt_type
& RTE_PTYPE_INNER_L4_MASK
;
1723 l2
!= RTE_PTYPE_L2_ETHER
&&
1724 l2
!= RTE_PTYPE_L2_ETHER_TIMESYNC
&&
1725 l2
!= RTE_PTYPE_L2_ETHER_ARP
&&
1726 l2
!= RTE_PTYPE_L2_ETHER_LLDP
&&
1727 l2
!= RTE_PTYPE_L2_ETHER_NSH
&&
1728 l2
!= RTE_PTYPE_L2_ETHER_VLAN
&&
1729 l2
!= RTE_PTYPE_L2_ETHER_QINQ
)
1733 l3
!= RTE_PTYPE_L3_IPV4
&&
1734 l3
!= RTE_PTYPE_L3_IPV4_EXT
&&
1735 l3
!= RTE_PTYPE_L3_IPV6
&&
1736 l3
!= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
&&
1737 l3
!= RTE_PTYPE_L3_IPV6_EXT
&&
1738 l3
!= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
)
1742 l4
!= RTE_PTYPE_L4_TCP
&&
1743 l4
!= RTE_PTYPE_L4_UDP
&&
1744 l4
!= RTE_PTYPE_L4_FRAG
&&
1745 l4
!= RTE_PTYPE_L4_SCTP
&&
1746 l4
!= RTE_PTYPE_L4_ICMP
&&
1747 l4
!= RTE_PTYPE_L4_NONFRAG
)
1751 tnl
!= RTE_PTYPE_TUNNEL_IP
&&
1752 tnl
!= RTE_PTYPE_TUNNEL_GRENAT
&&
1753 tnl
!= RTE_PTYPE_TUNNEL_VXLAN
&&
1754 tnl
!= RTE_PTYPE_TUNNEL_NVGRE
&&
1755 tnl
!= RTE_PTYPE_TUNNEL_GENEVE
&&
1756 tnl
!= RTE_PTYPE_TUNNEL_GRENAT
)
1760 il2
!= RTE_PTYPE_INNER_L2_ETHER
&&
1761 il2
!= RTE_PTYPE_INNER_L2_ETHER_VLAN
&&
1762 il2
!= RTE_PTYPE_INNER_L2_ETHER_QINQ
)
1766 il3
!= RTE_PTYPE_INNER_L3_IPV4
&&
1767 il3
!= RTE_PTYPE_INNER_L3_IPV4_EXT
&&
1768 il3
!= RTE_PTYPE_INNER_L3_IPV6
&&
1769 il3
!= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN
&&
1770 il3
!= RTE_PTYPE_INNER_L3_IPV6_EXT
&&
1771 il3
!= RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN
)
1775 il4
!= RTE_PTYPE_INNER_L4_TCP
&&
1776 il4
!= RTE_PTYPE_INNER_L4_UDP
&&
1777 il4
!= RTE_PTYPE_INNER_L4_FRAG
&&
1778 il4
!= RTE_PTYPE_INNER_L4_SCTP
&&
1779 il4
!= RTE_PTYPE_INNER_L4_ICMP
&&
1780 il4
!= RTE_PTYPE_INNER_L4_NONFRAG
)
1786 static int check_invalid_ptype_mapping(
1787 struct rte_pmd_i40e_ptype_mapping
*mapping_table
,
1792 for (i
= 0; i
< count
; i
++) {
1793 uint16_t ptype
= mapping_table
[i
].hw_ptype
;
1794 uint32_t pkt_type
= mapping_table
[i
].sw_ptype
;
1796 if (ptype
>= I40E_MAX_PKT_TYPE
)
1799 if (pkt_type
== RTE_PTYPE_UNKNOWN
)
1802 if (pkt_type
& RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK
)
1805 if (check_invalid_pkt_type(pkt_type
))
1813 rte_pmd_i40e_ptype_mapping_update(
1815 struct rte_pmd_i40e_ptype_mapping
*mapping_items
,
1819 struct rte_eth_dev
*dev
;
1820 struct i40e_adapter
*ad
;
1823 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
1825 dev
= &rte_eth_devices
[port
];
1827 if (!is_i40e_supported(dev
))
1830 if (count
> I40E_MAX_PKT_TYPE
)
1833 if (check_invalid_ptype_mapping(mapping_items
, count
))
1836 ad
= I40E_DEV_PRIVATE_TO_ADAPTER(dev
->data
->dev_private
);
1839 for (i
= 0; i
< I40E_MAX_PKT_TYPE
; i
++)
1840 ad
->ptype_tbl
[i
] = RTE_PTYPE_UNKNOWN
;
1843 for (i
= 0; i
< count
; i
++)
1844 ad
->ptype_tbl
[mapping_items
[i
].hw_ptype
]
1845 = mapping_items
[i
].sw_ptype
;
1850 int rte_pmd_i40e_ptype_mapping_reset(uint8_t port
)
1852 struct rte_eth_dev
*dev
;
1854 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
1856 dev
= &rte_eth_devices
[port
];
1858 if (!is_i40e_supported(dev
))
1861 i40e_set_default_ptype_table(dev
);
1866 int rte_pmd_i40e_ptype_mapping_get(
1868 struct rte_pmd_i40e_ptype_mapping
*mapping_items
,
1873 struct rte_eth_dev
*dev
;
1874 struct i40e_adapter
*ad
;
1878 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
1880 dev
= &rte_eth_devices
[port
];
1882 if (!is_i40e_supported(dev
))
1885 ad
= I40E_DEV_PRIVATE_TO_ADAPTER(dev
->data
->dev_private
);
1887 for (i
= 0; i
< I40E_MAX_PKT_TYPE
; i
++) {
1890 if (valid_only
&& ad
->ptype_tbl
[i
] == RTE_PTYPE_UNKNOWN
)
1892 mapping_items
[n
].hw_ptype
= i
;
1893 mapping_items
[n
].sw_ptype
= ad
->ptype_tbl
[i
];
1901 int rte_pmd_i40e_ptype_mapping_replace(uint8_t port
,
1906 struct rte_eth_dev
*dev
;
1907 struct i40e_adapter
*ad
;
1910 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
1912 dev
= &rte_eth_devices
[port
];
1914 if (!is_i40e_supported(dev
))
1917 if (!mask
&& check_invalid_pkt_type(target
))
1920 if (check_invalid_pkt_type(pkt_type
))
1923 ad
= I40E_DEV_PRIVATE_TO_ADAPTER(dev
->data
->dev_private
);
1925 for (i
= 0; i
< I40E_MAX_PKT_TYPE
; i
++) {
1927 if ((target
| ad
->ptype_tbl
[i
]) == target
&&
1928 (target
& ad
->ptype_tbl
[i
]))
1929 ad
->ptype_tbl
[i
] = pkt_type
;
1931 if (ad
->ptype_tbl
[i
] == target
)
1932 ad
->ptype_tbl
[i
] = pkt_type
;