1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
5 #include <rte_string_fns.h>
6 #include <rte_malloc.h>
9 #include "base/i40e_prototype.h"
10 #include "base/i40e_dcb.h"
11 #include "i40e_ethdev.h"
13 #include "i40e_rxtx.h"
14 #include "rte_pmd_i40e.h"
17 rte_pmd_i40e_ping_vfs(uint16_t port
, uint16_t vf
)
19 struct rte_eth_dev
*dev
;
22 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
24 dev
= &rte_eth_devices
[port
];
26 if (!is_i40e_supported(dev
))
29 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
31 if (vf
>= pf
->vf_num
|| !pf
->vfs
) {
32 PMD_DRV_LOG(ERR
, "Invalid argument.");
36 i40e_notify_vf_link_status(dev
, &pf
->vfs
[vf
]);
42 rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port
, uint16_t vf_id
, uint8_t on
)
44 struct rte_eth_dev
*dev
;
48 struct i40e_vsi_context ctxt
;
51 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
53 dev
= &rte_eth_devices
[port
];
55 if (!is_i40e_supported(dev
))
58 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
60 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
61 PMD_DRV_LOG(ERR
, "Invalid argument.");
65 vsi
= pf
->vfs
[vf_id
].vsi
;
67 PMD_DRV_LOG(ERR
, "Invalid VSI.");
71 /* Check if it has been already on or off */
72 if (vsi
->info
.valid_sections
&
73 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID
)) {
75 if ((vsi
->info
.sec_flags
&
76 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK
) ==
77 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK
)
78 return 0; /* already on */
80 if ((vsi
->info
.sec_flags
&
81 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK
) == 0)
82 return 0; /* already off */
86 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID
);
88 vsi
->info
.sec_flags
|= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK
;
90 vsi
->info
.sec_flags
&= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK
;
92 memset(&ctxt
, 0, sizeof(ctxt
));
93 rte_memcpy(&ctxt
.info
, &vsi
->info
, sizeof(vsi
->info
));
94 ctxt
.seid
= vsi
->seid
;
96 hw
= I40E_VSI_TO_HW(vsi
);
97 ret
= i40e_aq_update_vsi_params(hw
, &ctxt
, NULL
);
98 if (ret
!= I40E_SUCCESS
) {
100 PMD_DRV_LOG(ERR
, "Failed to update VSI params");
107 i40e_add_rm_all_vlan_filter(struct i40e_vsi
*vsi
, uint8_t add
)
111 struct i40e_hw
*hw
= I40E_VSI_TO_HW(vsi
);
112 struct i40e_aqc_add_remove_vlan_element_data vlan_data
= {0};
115 for (j
= 0; j
< I40E_VFTA_SIZE
; j
++) {
119 for (k
= 0; k
< I40E_UINT32_BIT_SIZE
; k
++) {
120 if (!(vsi
->vfta
[j
] & (1 << k
)))
123 vlan_id
= j
* I40E_UINT32_BIT_SIZE
+ k
;
127 vlan_data
.vlan_tag
= rte_cpu_to_le_16(vlan_id
);
129 ret
= i40e_aq_add_vlan(hw
, vsi
->seid
,
130 &vlan_data
, 1, NULL
);
132 ret
= i40e_aq_remove_vlan(hw
, vsi
->seid
,
133 &vlan_data
, 1, NULL
);
134 if (ret
!= I40E_SUCCESS
) {
136 "Failed to add/rm vlan filter");
146 rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port
, uint16_t vf_id
, uint8_t on
)
148 struct rte_eth_dev
*dev
;
150 struct i40e_vsi
*vsi
;
152 struct i40e_vsi_context ctxt
;
155 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
157 dev
= &rte_eth_devices
[port
];
159 if (!is_i40e_supported(dev
))
162 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
164 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
165 PMD_DRV_LOG(ERR
, "Invalid argument.");
169 vsi
= pf
->vfs
[vf_id
].vsi
;
171 PMD_DRV_LOG(ERR
, "Invalid VSI.");
175 /* Check if it has been already on or off */
176 if (vsi
->vlan_anti_spoof_on
== on
)
177 return 0; /* already on or off */
179 vsi
->vlan_anti_spoof_on
= on
;
180 if (!vsi
->vlan_filter_on
) {
181 ret
= i40e_add_rm_all_vlan_filter(vsi
, on
);
183 PMD_DRV_LOG(ERR
, "Failed to add/remove VLAN filters.");
188 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID
);
190 vsi
->info
.sec_flags
|= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK
;
192 vsi
->info
.sec_flags
&= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK
;
194 memset(&ctxt
, 0, sizeof(ctxt
));
195 rte_memcpy(&ctxt
.info
, &vsi
->info
, sizeof(vsi
->info
));
196 ctxt
.seid
= vsi
->seid
;
198 hw
= I40E_VSI_TO_HW(vsi
);
199 ret
= i40e_aq_update_vsi_params(hw
, &ctxt
, NULL
);
200 if (ret
!= I40E_SUCCESS
) {
202 PMD_DRV_LOG(ERR
, "Failed to update VSI params");
209 i40e_vsi_rm_mac_filter(struct i40e_vsi
*vsi
)
211 struct i40e_mac_filter
*f
;
212 struct i40e_macvlan_filter
*mv_f
;
214 enum rte_mac_filter_type filter_type
;
215 int ret
= I40E_SUCCESS
;
218 /* remove all the MACs */
219 TAILQ_FOREACH_SAFE(f
, &vsi
->mac_list
, next
, temp
) {
220 vlan_num
= vsi
->vlan_num
;
221 filter_type
= f
->mac_info
.filter_type
;
222 if (filter_type
== RTE_MACVLAN_PERFECT_MATCH
||
223 filter_type
== RTE_MACVLAN_HASH_MATCH
) {
225 PMD_DRV_LOG(ERR
, "VLAN number shouldn't be 0");
226 return I40E_ERR_PARAM
;
228 } else if (filter_type
== RTE_MAC_PERFECT_MATCH
||
229 filter_type
== RTE_MAC_HASH_MATCH
)
232 mv_f
= rte_zmalloc("macvlan_data", vlan_num
* sizeof(*mv_f
), 0);
234 PMD_DRV_LOG(ERR
, "failed to allocate memory");
235 return I40E_ERR_NO_MEMORY
;
238 for (i
= 0; i
< vlan_num
; i
++) {
239 mv_f
[i
].filter_type
= filter_type
;
240 rte_memcpy(&mv_f
[i
].macaddr
,
241 &f
->mac_info
.mac_addr
,
244 if (filter_type
== RTE_MACVLAN_PERFECT_MATCH
||
245 filter_type
== RTE_MACVLAN_HASH_MATCH
) {
246 ret
= i40e_find_all_vlan_for_mac(vsi
, mv_f
, vlan_num
,
247 &f
->mac_info
.mac_addr
);
248 if (ret
!= I40E_SUCCESS
) {
254 ret
= i40e_remove_macvlan_filters(vsi
, mv_f
, vlan_num
);
255 if (ret
!= I40E_SUCCESS
) {
268 i40e_vsi_restore_mac_filter(struct i40e_vsi
*vsi
)
270 struct i40e_mac_filter
*f
;
271 struct i40e_macvlan_filter
*mv_f
;
273 int ret
= I40E_SUCCESS
;
276 /* restore all the MACs */
277 TAILQ_FOREACH_SAFE(f
, &vsi
->mac_list
, next
, temp
) {
278 if ((f
->mac_info
.filter_type
== RTE_MACVLAN_PERFECT_MATCH
) ||
279 (f
->mac_info
.filter_type
== RTE_MACVLAN_HASH_MATCH
)) {
281 * If vlan_num is 0, that's the first time to add mac,
282 * set mask for vlan_id 0.
284 if (vsi
->vlan_num
== 0) {
285 i40e_set_vlan_filter(vsi
, 0, 1);
288 vlan_num
= vsi
->vlan_num
;
289 } else if ((f
->mac_info
.filter_type
== RTE_MAC_PERFECT_MATCH
) ||
290 (f
->mac_info
.filter_type
== RTE_MAC_HASH_MATCH
))
293 mv_f
= rte_zmalloc("macvlan_data", vlan_num
* sizeof(*mv_f
), 0);
295 PMD_DRV_LOG(ERR
, "failed to allocate memory");
296 return I40E_ERR_NO_MEMORY
;
299 for (i
= 0; i
< vlan_num
; i
++) {
300 mv_f
[i
].filter_type
= f
->mac_info
.filter_type
;
301 rte_memcpy(&mv_f
[i
].macaddr
,
302 &f
->mac_info
.mac_addr
,
306 if (f
->mac_info
.filter_type
== RTE_MACVLAN_PERFECT_MATCH
||
307 f
->mac_info
.filter_type
== RTE_MACVLAN_HASH_MATCH
) {
308 ret
= i40e_find_all_vlan_for_mac(vsi
, mv_f
, vlan_num
,
309 &f
->mac_info
.mac_addr
);
310 if (ret
!= I40E_SUCCESS
) {
316 ret
= i40e_add_macvlan_filters(vsi
, mv_f
, vlan_num
);
317 if (ret
!= I40E_SUCCESS
) {
330 i40e_vsi_set_tx_loopback(struct i40e_vsi
*vsi
, uint8_t on
)
332 struct i40e_vsi_context ctxt
;
339 hw
= I40E_VSI_TO_HW(vsi
);
341 /* Use the FW API if FW >= v5.0 */
342 if (hw
->aq
.fw_maj_ver
< 5 && hw
->mac
.type
!= I40E_MAC_X722
) {
343 PMD_INIT_LOG(ERR
, "FW < v5.0, cannot enable loopback");
347 /* Check if it has been already on or off */
348 if (vsi
->info
.valid_sections
&
349 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID
)) {
351 if ((vsi
->info
.switch_id
&
352 I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
) ==
353 I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
)
354 return 0; /* already on */
356 if ((vsi
->info
.switch_id
&
357 I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
) == 0)
358 return 0; /* already off */
362 /* remove all the MAC and VLAN first */
363 ret
= i40e_vsi_rm_mac_filter(vsi
);
365 PMD_INIT_LOG(ERR
, "Failed to remove MAC filters.");
368 if (vsi
->vlan_anti_spoof_on
|| vsi
->vlan_filter_on
) {
369 ret
= i40e_add_rm_all_vlan_filter(vsi
, 0);
371 PMD_INIT_LOG(ERR
, "Failed to remove VLAN filters.");
376 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
378 vsi
->info
.switch_id
|= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
;
380 vsi
->info
.switch_id
&= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
;
382 memset(&ctxt
, 0, sizeof(ctxt
));
383 rte_memcpy(&ctxt
.info
, &vsi
->info
, sizeof(vsi
->info
));
384 ctxt
.seid
= vsi
->seid
;
386 ret
= i40e_aq_update_vsi_params(hw
, &ctxt
, NULL
);
387 if (ret
!= I40E_SUCCESS
) {
388 PMD_DRV_LOG(ERR
, "Failed to update VSI params");
392 /* add all the MAC and VLAN back */
393 ret
= i40e_vsi_restore_mac_filter(vsi
);
396 if (vsi
->vlan_anti_spoof_on
|| vsi
->vlan_filter_on
) {
397 ret
= i40e_add_rm_all_vlan_filter(vsi
, 1);
406 rte_pmd_i40e_set_tx_loopback(uint16_t port
, uint8_t on
)
408 struct rte_eth_dev
*dev
;
410 struct i40e_pf_vf
*vf
;
411 struct i40e_vsi
*vsi
;
415 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
417 dev
= &rte_eth_devices
[port
];
419 if (!is_i40e_supported(dev
))
422 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
424 /* setup PF TX loopback */
426 ret
= i40e_vsi_set_tx_loopback(vsi
, on
);
430 /* setup TX loopback for all the VFs */
432 /* if no VF, do nothing. */
436 for (vf_id
= 0; vf_id
< pf
->vf_num
; vf_id
++) {
437 vf
= &pf
->vfs
[vf_id
];
440 ret
= i40e_vsi_set_tx_loopback(vsi
, on
);
449 rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port
, uint16_t vf_id
, uint8_t on
)
451 struct rte_eth_dev
*dev
;
453 struct i40e_vsi
*vsi
;
457 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
459 dev
= &rte_eth_devices
[port
];
461 if (!is_i40e_supported(dev
))
464 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
466 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
467 PMD_DRV_LOG(ERR
, "Invalid argument.");
471 vsi
= pf
->vfs
[vf_id
].vsi
;
473 PMD_DRV_LOG(ERR
, "Invalid VSI.");
477 hw
= I40E_VSI_TO_HW(vsi
);
479 ret
= i40e_aq_set_vsi_unicast_promiscuous(hw
, vsi
->seid
,
481 if (ret
!= I40E_SUCCESS
) {
483 PMD_DRV_LOG(ERR
, "Failed to set unicast promiscuous mode");
490 rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port
, uint16_t vf_id
, uint8_t on
)
492 struct rte_eth_dev
*dev
;
494 struct i40e_vsi
*vsi
;
498 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
500 dev
= &rte_eth_devices
[port
];
502 if (!is_i40e_supported(dev
))
505 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
507 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
508 PMD_DRV_LOG(ERR
, "Invalid argument.");
512 vsi
= pf
->vfs
[vf_id
].vsi
;
514 PMD_DRV_LOG(ERR
, "Invalid VSI.");
518 hw
= I40E_VSI_TO_HW(vsi
);
520 ret
= i40e_aq_set_vsi_multicast_promiscuous(hw
, vsi
->seid
,
522 if (ret
!= I40E_SUCCESS
) {
524 PMD_DRV_LOG(ERR
, "Failed to set multicast promiscuous mode");
531 rte_pmd_i40e_set_vf_mac_addr(uint16_t port
, uint16_t vf_id
,
532 struct ether_addr
*mac_addr
)
534 struct i40e_mac_filter
*f
;
535 struct rte_eth_dev
*dev
;
536 struct i40e_pf_vf
*vf
;
537 struct i40e_vsi
*vsi
;
541 if (i40e_validate_mac_addr((u8
*)mac_addr
) != I40E_SUCCESS
)
544 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
546 dev
= &rte_eth_devices
[port
];
548 if (!is_i40e_supported(dev
))
551 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
553 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
)
556 vf
= &pf
->vfs
[vf_id
];
559 PMD_DRV_LOG(ERR
, "Invalid VSI.");
563 ether_addr_copy(mac_addr
, &vf
->mac_addr
);
565 /* Remove all existing mac */
566 TAILQ_FOREACH_SAFE(f
, &vsi
->mac_list
, next
, temp
)
567 if (i40e_vsi_delete_mac(vsi
, &f
->mac_info
.mac_addr
)
569 PMD_DRV_LOG(WARNING
, "Delete MAC failed");
574 static const struct ether_addr null_mac_addr
;
577 rte_pmd_i40e_remove_vf_mac_addr(uint16_t port
, uint16_t vf_id
,
578 struct ether_addr
*mac_addr
)
580 struct rte_eth_dev
*dev
;
581 struct i40e_pf_vf
*vf
;
582 struct i40e_vsi
*vsi
;
585 if (i40e_validate_mac_addr((u8
*)mac_addr
) != I40E_SUCCESS
)
588 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
590 dev
= &rte_eth_devices
[port
];
592 if (!is_i40e_supported(dev
))
595 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
597 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
)
600 vf
= &pf
->vfs
[vf_id
];
603 PMD_DRV_LOG(ERR
, "Invalid VSI.");
607 if (is_same_ether_addr(mac_addr
, &vf
->mac_addr
))
608 /* Reset the mac with NULL address */
609 ether_addr_copy(&null_mac_addr
, &vf
->mac_addr
);
612 i40e_vsi_delete_mac(vsi
, mac_addr
);
617 /* Set vlan strip on/off for specific VF from host */
619 rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port
, uint16_t vf_id
, uint8_t on
)
621 struct rte_eth_dev
*dev
;
623 struct i40e_vsi
*vsi
;
626 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
628 dev
= &rte_eth_devices
[port
];
630 if (!is_i40e_supported(dev
))
633 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
635 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
636 PMD_DRV_LOG(ERR
, "Invalid argument.");
640 vsi
= pf
->vfs
[vf_id
].vsi
;
645 ret
= i40e_vsi_config_vlan_stripping(vsi
, !!on
);
646 if (ret
!= I40E_SUCCESS
) {
648 PMD_DRV_LOG(ERR
, "Failed to set VLAN stripping!");
654 int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port
, uint16_t vf_id
,
657 struct rte_eth_dev
*dev
;
660 struct i40e_vsi
*vsi
;
661 struct i40e_vsi_context ctxt
;
664 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
666 if (vlan_id
> ETHER_MAX_VLAN_ID
) {
667 PMD_DRV_LOG(ERR
, "Invalid VLAN ID.");
671 dev
= &rte_eth_devices
[port
];
673 if (!is_i40e_supported(dev
))
676 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
677 hw
= I40E_PF_TO_HW(pf
);
680 * return -ENODEV if SRIOV not enabled, VF number not configured
681 * or no queue assigned.
683 if (!hw
->func_caps
.sr_iov_1_1
|| pf
->vf_num
== 0 ||
687 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
688 PMD_DRV_LOG(ERR
, "Invalid VF ID.");
692 vsi
= pf
->vfs
[vf_id
].vsi
;
694 PMD_DRV_LOG(ERR
, "Invalid VSI.");
698 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
);
699 vsi
->info
.pvid
= vlan_id
;
701 vsi
->info
.port_vlan_flags
|= I40E_AQ_VSI_PVLAN_INSERT_PVID
;
703 vsi
->info
.port_vlan_flags
&= ~I40E_AQ_VSI_PVLAN_INSERT_PVID
;
705 memset(&ctxt
, 0, sizeof(ctxt
));
706 rte_memcpy(&ctxt
.info
, &vsi
->info
, sizeof(vsi
->info
));
707 ctxt
.seid
= vsi
->seid
;
709 hw
= I40E_VSI_TO_HW(vsi
);
710 ret
= i40e_aq_update_vsi_params(hw
, &ctxt
, NULL
);
711 if (ret
!= I40E_SUCCESS
) {
713 PMD_DRV_LOG(ERR
, "Failed to update VSI params");
719 int rte_pmd_i40e_set_vf_broadcast(uint16_t port
, uint16_t vf_id
,
722 struct rte_eth_dev
*dev
;
724 struct i40e_vsi
*vsi
;
726 struct i40e_mac_filter_info filter
;
727 struct ether_addr broadcast
= {
728 .addr_bytes
= {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
731 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
734 PMD_DRV_LOG(ERR
, "on should be 0 or 1.");
738 dev
= &rte_eth_devices
[port
];
740 if (!is_i40e_supported(dev
))
743 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
744 hw
= I40E_PF_TO_HW(pf
);
746 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
747 PMD_DRV_LOG(ERR
, "Invalid VF ID.");
752 * return -ENODEV if SRIOV not enabled, VF number not configured
753 * or no queue assigned.
755 if (!hw
->func_caps
.sr_iov_1_1
|| pf
->vf_num
== 0 ||
756 pf
->vf_nb_qps
== 0) {
757 PMD_DRV_LOG(ERR
, "SRIOV is not enabled or no queue.");
761 vsi
= pf
->vfs
[vf_id
].vsi
;
763 PMD_DRV_LOG(ERR
, "Invalid VSI.");
768 rte_memcpy(&filter
.mac_addr
, &broadcast
, ETHER_ADDR_LEN
);
769 filter
.filter_type
= RTE_MACVLAN_PERFECT_MATCH
;
770 ret
= i40e_vsi_add_mac(vsi
, &filter
);
772 ret
= i40e_vsi_delete_mac(vsi
, &broadcast
);
775 if (ret
!= I40E_SUCCESS
&& ret
!= I40E_ERR_PARAM
) {
777 PMD_DRV_LOG(ERR
, "Failed to set VSI broadcast");
785 int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port
, uint16_t vf_id
, uint8_t on
)
787 struct rte_eth_dev
*dev
;
790 struct i40e_vsi
*vsi
;
791 struct i40e_vsi_context ctxt
;
794 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
797 PMD_DRV_LOG(ERR
, "on should be 0 or 1.");
801 dev
= &rte_eth_devices
[port
];
803 if (!is_i40e_supported(dev
))
806 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
807 hw
= I40E_PF_TO_HW(pf
);
810 * return -ENODEV if SRIOV not enabled, VF number not configured
811 * or no queue assigned.
813 if (!hw
->func_caps
.sr_iov_1_1
|| pf
->vf_num
== 0 ||
814 pf
->vf_nb_qps
== 0) {
815 PMD_DRV_LOG(ERR
, "SRIOV is not enabled or no queue.");
819 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
820 PMD_DRV_LOG(ERR
, "Invalid VF ID.");
824 vsi
= pf
->vfs
[vf_id
].vsi
;
826 PMD_DRV_LOG(ERR
, "Invalid VSI.");
830 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
);
832 vsi
->info
.port_vlan_flags
|= I40E_AQ_VSI_PVLAN_MODE_TAGGED
;
833 vsi
->info
.port_vlan_flags
&= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED
;
835 vsi
->info
.port_vlan_flags
|= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED
;
836 vsi
->info
.port_vlan_flags
&= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED
;
839 memset(&ctxt
, 0, sizeof(ctxt
));
840 rte_memcpy(&ctxt
.info
, &vsi
->info
, sizeof(vsi
->info
));
841 ctxt
.seid
= vsi
->seid
;
843 hw
= I40E_VSI_TO_HW(vsi
);
844 ret
= i40e_aq_update_vsi_params(hw
, &ctxt
, NULL
);
845 if (ret
!= I40E_SUCCESS
) {
847 PMD_DRV_LOG(ERR
, "Failed to update VSI params");
854 i40e_vlan_filter_count(struct i40e_vsi
*vsi
)
860 for (j
= 0; j
< I40E_VFTA_SIZE
; j
++) {
864 for (k
= 0; k
< I40E_UINT32_BIT_SIZE
; k
++) {
865 if (!(vsi
->vfta
[j
] & (1 << k
)))
868 vlan_id
= j
* I40E_UINT32_BIT_SIZE
+ k
;
879 int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port
, uint16_t vlan_id
,
880 uint64_t vf_mask
, uint8_t on
)
882 struct rte_eth_dev
*dev
;
885 struct i40e_vsi
*vsi
;
887 int ret
= I40E_SUCCESS
;
889 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
891 dev
= &rte_eth_devices
[port
];
893 if (!is_i40e_supported(dev
))
896 if (vlan_id
> ETHER_MAX_VLAN_ID
|| !vlan_id
) {
897 PMD_DRV_LOG(ERR
, "Invalid VLAN ID.");
902 PMD_DRV_LOG(ERR
, "No VF.");
907 PMD_DRV_LOG(ERR
, "on is should be 0 or 1.");
911 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
912 hw
= I40E_PF_TO_HW(pf
);
915 * return -ENODEV if SRIOV not enabled, VF number not configured
916 * or no queue assigned.
918 if (!hw
->func_caps
.sr_iov_1_1
|| pf
->vf_num
== 0 ||
919 pf
->vf_nb_qps
== 0) {
920 PMD_DRV_LOG(ERR
, "SRIOV is not enabled or no queue.");
924 for (vf_idx
= 0; vf_idx
< pf
->vf_num
&& ret
== I40E_SUCCESS
; vf_idx
++) {
925 if (vf_mask
& ((uint64_t)(1ULL << vf_idx
))) {
926 vsi
= pf
->vfs
[vf_idx
].vsi
;
928 if (!vsi
->vlan_filter_on
) {
929 vsi
->vlan_filter_on
= true;
930 i40e_aq_set_vsi_vlan_promisc(hw
,
934 if (!vsi
->vlan_anti_spoof_on
)
935 i40e_add_rm_all_vlan_filter(
938 ret
= i40e_vsi_add_vlan(vsi
, vlan_id
);
940 ret
= i40e_vsi_delete_vlan(vsi
, vlan_id
);
942 if (!i40e_vlan_filter_count(vsi
)) {
943 vsi
->vlan_filter_on
= false;
944 i40e_aq_set_vsi_vlan_promisc(hw
,
953 if (ret
!= I40E_SUCCESS
) {
955 PMD_DRV_LOG(ERR
, "Failed to set VF VLAN filter, on = %d", on
);
962 rte_pmd_i40e_get_vf_stats(uint16_t port
,
964 struct rte_eth_stats
*stats
)
966 struct rte_eth_dev
*dev
;
968 struct i40e_vsi
*vsi
;
970 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
972 dev
= &rte_eth_devices
[port
];
974 if (!is_i40e_supported(dev
))
977 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
979 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
980 PMD_DRV_LOG(ERR
, "Invalid VF ID.");
984 vsi
= pf
->vfs
[vf_id
].vsi
;
986 PMD_DRV_LOG(ERR
, "Invalid VSI.");
990 i40e_update_vsi_stats(vsi
);
992 stats
->ipackets
= vsi
->eth_stats
.rx_unicast
+
993 vsi
->eth_stats
.rx_multicast
+
994 vsi
->eth_stats
.rx_broadcast
;
995 stats
->opackets
= vsi
->eth_stats
.tx_unicast
+
996 vsi
->eth_stats
.tx_multicast
+
997 vsi
->eth_stats
.tx_broadcast
;
998 stats
->ibytes
= vsi
->eth_stats
.rx_bytes
;
999 stats
->obytes
= vsi
->eth_stats
.tx_bytes
;
1000 stats
->ierrors
= vsi
->eth_stats
.rx_discards
;
1001 stats
->oerrors
= vsi
->eth_stats
.tx_errors
+ vsi
->eth_stats
.tx_discards
;
1007 rte_pmd_i40e_reset_vf_stats(uint16_t port
,
1010 struct rte_eth_dev
*dev
;
1012 struct i40e_vsi
*vsi
;
1014 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
1016 dev
= &rte_eth_devices
[port
];
1018 if (!is_i40e_supported(dev
))
1021 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
1023 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
1024 PMD_DRV_LOG(ERR
, "Invalid VF ID.");
1028 vsi
= pf
->vfs
[vf_id
].vsi
;
1030 PMD_DRV_LOG(ERR
, "Invalid VSI.");
1034 vsi
->offset_loaded
= false;
1035 i40e_update_vsi_stats(vsi
);
1041 rte_pmd_i40e_set_vf_max_bw(uint16_t port
, uint16_t vf_id
, uint32_t bw
)
1043 struct rte_eth_dev
*dev
;
1045 struct i40e_vsi
*vsi
;
1050 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
1052 dev
= &rte_eth_devices
[port
];
1054 if (!is_i40e_supported(dev
))
1057 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
1059 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
1060 PMD_DRV_LOG(ERR
, "Invalid VF ID.");
1064 vsi
= pf
->vfs
[vf_id
].vsi
;
1066 PMD_DRV_LOG(ERR
, "Invalid VSI.");
1070 if (bw
> I40E_QOS_BW_MAX
) {
1071 PMD_DRV_LOG(ERR
, "Bandwidth should not be larger than %dMbps.",
1076 if (bw
% I40E_QOS_BW_GRANULARITY
) {
1077 PMD_DRV_LOG(ERR
, "Bandwidth should be the multiple of %dMbps.",
1078 I40E_QOS_BW_GRANULARITY
);
1082 bw
/= I40E_QOS_BW_GRANULARITY
;
1084 hw
= I40E_VSI_TO_HW(vsi
);
1087 if (bw
== vsi
->bw_info
.bw_limit
) {
1089 "No change for VF max bandwidth. Nothing to do.");
1094 * VF bandwidth limitation and TC bandwidth limitation cannot be
1095 * enabled in parallel, quit if TC bandwidth limitation is enabled.
1097 * If bw is 0, means disable bandwidth limitation. Then no need to
1098 * check TC bandwidth limitation.
1101 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1102 if ((vsi
->enabled_tc
& BIT_ULL(i
)) &&
1103 vsi
->bw_info
.bw_ets_credits
[i
])
1106 if (i
!= I40E_MAX_TRAFFIC_CLASS
) {
1108 "TC max bandwidth has been set on this VF,"
1109 " please disable it first.");
1114 ret
= i40e_aq_config_vsi_bw_limit(hw
, vsi
->seid
, (uint16_t)bw
, 0, NULL
);
1117 "Failed to set VF %d bandwidth, err(%d).",
1122 /* Store the configuration. */
1123 vsi
->bw_info
.bw_limit
= (uint16_t)bw
;
1124 vsi
->bw_info
.bw_max
= 0;
1130 rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port
, uint16_t vf_id
,
1131 uint8_t tc_num
, uint8_t *bw_weight
)
1133 struct rte_eth_dev
*dev
;
1135 struct i40e_vsi
*vsi
;
1137 struct i40e_aqc_configure_vsi_tc_bw_data tc_bw
;
1141 bool b_change
= false;
1143 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
1145 dev
= &rte_eth_devices
[port
];
1147 if (!is_i40e_supported(dev
))
1150 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
1152 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
1153 PMD_DRV_LOG(ERR
, "Invalid VF ID.");
1157 vsi
= pf
->vfs
[vf_id
].vsi
;
1159 PMD_DRV_LOG(ERR
, "Invalid VSI.");
1163 if (tc_num
> I40E_MAX_TRAFFIC_CLASS
) {
1164 PMD_DRV_LOG(ERR
, "TCs should be no more than %d.",
1165 I40E_MAX_TRAFFIC_CLASS
);
1170 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1171 if (vsi
->enabled_tc
& BIT_ULL(i
))
1174 if (sum
!= tc_num
) {
1176 "Weight should be set for all %d enabled TCs.",
1182 for (i
= 0; i
< tc_num
; i
++) {
1183 if (!bw_weight
[i
]) {
1185 "The weight should be 1 at least.");
1188 sum
+= bw_weight
[i
];
1192 "The summary of the TC weight should be 100.");
1197 * Create the configuration for all the TCs.
1199 memset(&tc_bw
, 0, sizeof(tc_bw
));
1200 tc_bw
.tc_valid_bits
= vsi
->enabled_tc
;
1202 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1203 if (vsi
->enabled_tc
& BIT_ULL(i
)) {
1205 vsi
->bw_info
.bw_ets_share_credits
[i
])
1208 tc_bw
.tc_bw_credits
[i
] = bw_weight
[j
];
1216 "No change for TC allocated bandwidth."
1221 hw
= I40E_VSI_TO_HW(vsi
);
1223 ret
= i40e_aq_config_vsi_tc_bw(hw
, vsi
->seid
, &tc_bw
, NULL
);
1226 "Failed to set VF %d TC bandwidth weight, err(%d).",
1231 /* Store the configuration. */
1233 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1234 if (vsi
->enabled_tc
& BIT_ULL(i
)) {
1235 vsi
->bw_info
.bw_ets_share_credits
[i
] = bw_weight
[j
];
1244 rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port
, uint16_t vf_id
,
1245 uint8_t tc_no
, uint32_t bw
)
1247 struct rte_eth_dev
*dev
;
1249 struct i40e_vsi
*vsi
;
1251 struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw
;
1255 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
1257 dev
= &rte_eth_devices
[port
];
1259 if (!is_i40e_supported(dev
))
1262 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
1264 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
) {
1265 PMD_DRV_LOG(ERR
, "Invalid VF ID.");
1269 vsi
= pf
->vfs
[vf_id
].vsi
;
1271 PMD_DRV_LOG(ERR
, "Invalid VSI.");
1275 if (bw
> I40E_QOS_BW_MAX
) {
1276 PMD_DRV_LOG(ERR
, "Bandwidth should not be larger than %dMbps.",
1281 if (bw
% I40E_QOS_BW_GRANULARITY
) {
1282 PMD_DRV_LOG(ERR
, "Bandwidth should be the multiple of %dMbps.",
1283 I40E_QOS_BW_GRANULARITY
);
1287 bw
/= I40E_QOS_BW_GRANULARITY
;
1289 if (tc_no
>= I40E_MAX_TRAFFIC_CLASS
) {
1290 PMD_DRV_LOG(ERR
, "TC No. should be less than %d.",
1291 I40E_MAX_TRAFFIC_CLASS
);
1295 hw
= I40E_VSI_TO_HW(vsi
);
1297 if (!(vsi
->enabled_tc
& BIT_ULL(tc_no
))) {
1298 PMD_DRV_LOG(ERR
, "VF %d TC %d isn't enabled.",
1304 if (bw
== vsi
->bw_info
.bw_ets_credits
[tc_no
]) {
1306 "No change for TC max bandwidth. Nothing to do.");
1311 * VF bandwidth limitation and TC bandwidth limitation cannot be
1312 * enabled in parallel, disable VF bandwidth limitation if it's
1314 * If bw is 0, means disable bandwidth limitation. Then no need to
1315 * care about VF bandwidth limitation configuration.
1317 if (bw
&& vsi
->bw_info
.bw_limit
) {
1318 ret
= i40e_aq_config_vsi_bw_limit(hw
, vsi
->seid
, 0, 0, NULL
);
1321 "Failed to disable VF(%d)"
1322 " bandwidth limitation, err(%d).",
1328 "VF max bandwidth is disabled according"
1329 " to TC max bandwidth setting.");
1333 * Get all the TCs' info to create a whole picture.
1334 * Because the incremental change isn't permitted.
1336 memset(&tc_bw
, 0, sizeof(tc_bw
));
1337 tc_bw
.tc_valid_bits
= vsi
->enabled_tc
;
1338 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1339 if (vsi
->enabled_tc
& BIT_ULL(i
)) {
1340 tc_bw
.tc_bw_credits
[i
] =
1342 vsi
->bw_info
.bw_ets_credits
[i
]);
1345 tc_bw
.tc_bw_credits
[tc_no
] = rte_cpu_to_le_16((uint16_t)bw
);
1347 ret
= i40e_aq_config_vsi_ets_sla_bw_limit(hw
, vsi
->seid
, &tc_bw
, NULL
);
1350 "Failed to set VF %d TC %d max bandwidth, err(%d).",
1355 /* Store the configuration. */
1356 vsi
->bw_info
.bw_ets_credits
[tc_no
] = (uint16_t)bw
;
1362 rte_pmd_i40e_set_tc_strict_prio(uint16_t port
, uint8_t tc_map
)
1364 struct rte_eth_dev
*dev
;
1366 struct i40e_vsi
*vsi
;
1367 struct i40e_veb
*veb
;
1369 struct i40e_aqc_configure_switching_comp_ets_data ets_data
;
1373 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
1375 dev
= &rte_eth_devices
[port
];
1377 if (!is_i40e_supported(dev
))
1380 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
1384 PMD_DRV_LOG(ERR
, "Invalid VSI.");
1390 PMD_DRV_LOG(ERR
, "Invalid VEB.");
1394 if ((tc_map
& veb
->enabled_tc
) != tc_map
) {
1396 "TC bitmap isn't the subset of enabled TCs 0x%x.",
1401 if (tc_map
== veb
->strict_prio_tc
) {
1402 PMD_DRV_LOG(INFO
, "No change for TC bitmap. Nothing to do.");
1406 hw
= I40E_VSI_TO_HW(vsi
);
1408 /* Disable DCBx if it's the first time to set strict priority. */
1409 if (!veb
->strict_prio_tc
) {
1410 ret
= i40e_aq_stop_lldp(hw
, true, NULL
);
1413 "Failed to disable DCBx as it's already"
1417 "DCBx is disabled according to strict"
1418 " priority setting.");
1421 memset(&ets_data
, 0, sizeof(ets_data
));
1422 ets_data
.tc_valid_bits
= veb
->enabled_tc
;
1423 ets_data
.seepage
= I40E_AQ_ETS_SEEPAGE_EN_MASK
;
1424 ets_data
.tc_strict_priority_flags
= tc_map
;
1425 /* Get all TCs' bandwidth. */
1426 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1427 if (veb
->enabled_tc
& BIT_ULL(i
)) {
1428 /* For rubust, if bandwidth is 0, use 1 instead. */
1429 if (veb
->bw_info
.bw_ets_share_credits
[i
])
1430 ets_data
.tc_bw_share_credits
[i
] =
1431 veb
->bw_info
.bw_ets_share_credits
[i
];
1433 ets_data
.tc_bw_share_credits
[i
] =
1434 I40E_QOS_BW_WEIGHT_MIN
;
1438 if (!veb
->strict_prio_tc
)
1439 ret
= i40e_aq_config_switch_comp_ets(
1440 hw
, veb
->uplink_seid
,
1441 &ets_data
, i40e_aqc_opc_enable_switching_comp_ets
,
1444 ret
= i40e_aq_config_switch_comp_ets(
1445 hw
, veb
->uplink_seid
,
1446 &ets_data
, i40e_aqc_opc_modify_switching_comp_ets
,
1449 ret
= i40e_aq_config_switch_comp_ets(
1450 hw
, veb
->uplink_seid
,
1451 &ets_data
, i40e_aqc_opc_disable_switching_comp_ets
,
1456 "Failed to set TCs' strict priority mode."
1461 veb
->strict_prio_tc
= tc_map
;
1463 /* Enable DCBx again, if all the TCs' strict priority disabled. */
1465 ret
= i40e_aq_start_lldp(hw
, NULL
);
1468 "Failed to enable DCBx, err(%d).", ret
);
1473 "DCBx is enabled again according to strict"
1474 " priority setting.");
1480 #define I40E_PROFILE_INFO_SIZE sizeof(struct rte_pmd_i40e_profile_info)
1481 #define I40E_MAX_PROFILE_NUM 16
1484 i40e_generate_profile_info_sec(char *name
, struct i40e_ddp_version
*version
,
1485 uint32_t track_id
, uint8_t *profile_info_sec
,
1488 struct i40e_profile_section_header
*sec
= NULL
;
1489 struct i40e_profile_info
*pinfo
;
1491 sec
= (struct i40e_profile_section_header
*)profile_info_sec
;
1493 sec
->data_end
= sizeof(struct i40e_profile_section_header
) +
1494 sizeof(struct i40e_profile_info
);
1495 sec
->section
.type
= SECTION_TYPE_INFO
;
1496 sec
->section
.offset
= sizeof(struct i40e_profile_section_header
);
1497 sec
->section
.size
= sizeof(struct i40e_profile_info
);
1498 pinfo
= (struct i40e_profile_info
*)(profile_info_sec
+
1499 sec
->section
.offset
);
1500 pinfo
->track_id
= track_id
;
1501 memcpy(pinfo
->name
, name
, I40E_DDP_NAME_SIZE
);
1502 memcpy(&pinfo
->version
, version
, sizeof(struct i40e_ddp_version
));
1504 pinfo
->op
= I40E_DDP_ADD_TRACKID
;
1506 pinfo
->op
= I40E_DDP_REMOVE_TRACKID
;
1509 static enum i40e_status_code
1510 i40e_add_rm_profile_info(struct i40e_hw
*hw
, uint8_t *profile_info_sec
)
1512 enum i40e_status_code status
= I40E_SUCCESS
;
1513 struct i40e_profile_section_header
*sec
;
1515 uint32_t offset
= 0;
1518 sec
= (struct i40e_profile_section_header
*)profile_info_sec
;
1519 track_id
= ((struct i40e_profile_info
*)(profile_info_sec
+
1520 sec
->section
.offset
))->track_id
;
1522 status
= i40e_aq_write_ddp(hw
, (void *)sec
, sec
->data_end
,
1523 track_id
, &offset
, &info
, NULL
);
1525 PMD_DRV_LOG(ERR
, "Failed to add/remove profile info: "
1526 "offset %d, info %d",
1532 /* Check if the profile info exists */
1534 i40e_check_profile_info(uint16_t port
, uint8_t *profile_info_sec
)
1536 struct rte_eth_dev
*dev
= &rte_eth_devices
[port
];
1537 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1539 struct rte_pmd_i40e_profile_list
*p_list
;
1540 struct rte_pmd_i40e_profile_info
*pinfo
, *p
;
1543 static const uint32_t group_mask
= 0x00ff0000;
1545 pinfo
= (struct rte_pmd_i40e_profile_info
*)(profile_info_sec
+
1546 sizeof(struct i40e_profile_section_header
));
1547 if (pinfo
->track_id
== 0) {
1548 PMD_DRV_LOG(INFO
, "Read-only profile.");
1551 buff
= rte_zmalloc("pinfo_list",
1552 (I40E_PROFILE_INFO_SIZE
* I40E_MAX_PROFILE_NUM
+ 4),
1555 PMD_DRV_LOG(ERR
, "failed to allocate memory");
1559 ret
= i40e_aq_get_ddp_list(
1561 (I40E_PROFILE_INFO_SIZE
* I40E_MAX_PROFILE_NUM
+ 4),
1564 PMD_DRV_LOG(ERR
, "Failed to get profile info list.");
1568 p_list
= (struct rte_pmd_i40e_profile_list
*)buff
;
1569 for (i
= 0; i
< p_list
->p_count
; i
++) {
1570 p
= &p_list
->p_info
[i
];
1571 if (pinfo
->track_id
== p
->track_id
) {
1572 PMD_DRV_LOG(INFO
, "Profile exists.");
1577 /* profile with group id 0xff is compatible with any other profile */
1578 if ((pinfo
->track_id
& group_mask
) == group_mask
) {
1582 for (i
= 0; i
< p_list
->p_count
; i
++) {
1583 p
= &p_list
->p_info
[i
];
1584 if ((p
->track_id
& group_mask
) == 0) {
1585 PMD_DRV_LOG(INFO
, "Profile of the group 0 exists.");
1590 for (i
= 0; i
< p_list
->p_count
; i
++) {
1591 p
= &p_list
->p_info
[i
];
1592 if ((p
->track_id
& group_mask
) == group_mask
)
1594 if ((pinfo
->track_id
& group_mask
) !=
1595 (p
->track_id
& group_mask
)) {
1596 PMD_DRV_LOG(INFO
, "Profile of different group exists.");
1607 rte_pmd_i40e_process_ddp_package(uint16_t port
, uint8_t *buff
,
1609 enum rte_pmd_i40e_package_op op
)
1611 struct rte_eth_dev
*dev
;
1613 struct i40e_package_header
*pkg_hdr
;
1614 struct i40e_generic_seg_header
*profile_seg_hdr
;
1615 struct i40e_generic_seg_header
*metadata_seg_hdr
;
1617 uint8_t *profile_info_sec
;
1619 enum i40e_status_code status
= I40E_SUCCESS
;
1620 static const uint32_t type_mask
= 0xff000000;
1622 if (op
!= RTE_PMD_I40E_PKG_OP_WR_ADD
&&
1623 op
!= RTE_PMD_I40E_PKG_OP_WR_ONLY
&&
1624 op
!= RTE_PMD_I40E_PKG_OP_WR_DEL
) {
1625 PMD_DRV_LOG(ERR
, "Operation not supported.");
1629 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
1631 dev
= &rte_eth_devices
[port
];
1633 if (!is_i40e_supported(dev
))
1636 hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1638 if (size
< (sizeof(struct i40e_package_header
) +
1639 sizeof(struct i40e_metadata_segment
) +
1640 sizeof(uint32_t) * 2)) {
1641 PMD_DRV_LOG(ERR
, "Buff is invalid.");
1645 pkg_hdr
= (struct i40e_package_header
*)buff
;
1648 PMD_DRV_LOG(ERR
, "Failed to fill the package structure");
1652 if (pkg_hdr
->segment_count
< 2) {
1653 PMD_DRV_LOG(ERR
, "Segment_count should be 2 at least.");
1657 /* Find metadata segment */
1658 metadata_seg_hdr
= i40e_find_segment_in_package(SEGMENT_TYPE_METADATA
,
1660 if (!metadata_seg_hdr
) {
1661 PMD_DRV_LOG(ERR
, "Failed to find metadata segment header");
1664 track_id
= ((struct i40e_metadata_segment
*)metadata_seg_hdr
)->track_id
;
1665 if (track_id
== I40E_DDP_TRACKID_INVALID
) {
1666 PMD_DRV_LOG(ERR
, "Invalid track_id");
1670 /* force read-only track_id for type 0 */
1671 if ((track_id
& type_mask
) == 0)
1674 /* Find profile segment */
1675 profile_seg_hdr
= i40e_find_segment_in_package(SEGMENT_TYPE_I40E
,
1677 if (!profile_seg_hdr
) {
1678 PMD_DRV_LOG(ERR
, "Failed to find profile segment header");
1682 profile_info_sec
= rte_zmalloc(
1683 "i40e_profile_info",
1684 sizeof(struct i40e_profile_section_header
) +
1685 sizeof(struct i40e_profile_info
),
1687 if (!profile_info_sec
) {
1688 PMD_DRV_LOG(ERR
, "Failed to allocate memory");
1692 /* Check if the profile already loaded */
1693 i40e_generate_profile_info_sec(
1694 ((struct i40e_profile_segment
*)profile_seg_hdr
)->name
,
1695 &((struct i40e_profile_segment
*)profile_seg_hdr
)->version
,
1696 track_id
, profile_info_sec
,
1697 op
== RTE_PMD_I40E_PKG_OP_WR_ADD
);
1698 is_exist
= i40e_check_profile_info(port
, profile_info_sec
);
1700 PMD_DRV_LOG(ERR
, "Failed to check profile.");
1701 rte_free(profile_info_sec
);
1705 if (op
== RTE_PMD_I40E_PKG_OP_WR_ADD
) {
1708 PMD_DRV_LOG(ERR
, "Profile already exists.");
1709 else if (is_exist
== 2)
1710 PMD_DRV_LOG(ERR
, "Profile of group 0 already exists.");
1711 else if (is_exist
== 3)
1712 PMD_DRV_LOG(ERR
, "Profile of different group already exists");
1713 i40e_update_customized_info(dev
, buff
, size
, op
);
1714 rte_free(profile_info_sec
);
1717 } else if (op
== RTE_PMD_I40E_PKG_OP_WR_DEL
) {
1718 if (is_exist
!= 1) {
1719 PMD_DRV_LOG(ERR
, "Profile does not exist.");
1720 rte_free(profile_info_sec
);
1725 if (op
== RTE_PMD_I40E_PKG_OP_WR_DEL
) {
1726 status
= i40e_rollback_profile(
1728 (struct i40e_profile_segment
*)profile_seg_hdr
,
1731 PMD_DRV_LOG(ERR
, "Failed to write profile for delete.");
1732 rte_free(profile_info_sec
);
1736 status
= i40e_write_profile(
1738 (struct i40e_profile_segment
*)profile_seg_hdr
,
1741 if (op
== RTE_PMD_I40E_PKG_OP_WR_ADD
)
1742 PMD_DRV_LOG(ERR
, "Failed to write profile for add.");
1744 PMD_DRV_LOG(ERR
, "Failed to write profile.");
1745 rte_free(profile_info_sec
);
1750 if (track_id
&& (op
!= RTE_PMD_I40E_PKG_OP_WR_ONLY
)) {
1751 /* Modify loaded profiles info list */
1752 status
= i40e_add_rm_profile_info(hw
, profile_info_sec
);
1754 if (op
== RTE_PMD_I40E_PKG_OP_WR_ADD
)
1755 PMD_DRV_LOG(ERR
, "Failed to add profile to info list.");
1757 PMD_DRV_LOG(ERR
, "Failed to delete profile from info list.");
1761 if (op
== RTE_PMD_I40E_PKG_OP_WR_ADD
||
1762 op
== RTE_PMD_I40E_PKG_OP_WR_DEL
)
1763 i40e_update_customized_info(dev
, buff
, size
, op
);
1765 rte_free(profile_info_sec
);
1769 /* Get number of tvl records in the section */
1771 i40e_get_tlv_section_size(struct i40e_profile_section_header
*sec
)
1773 unsigned int i
, nb_rec
, nb_tlv
= 0;
1774 struct i40e_profile_tlv_section_record
*tlv
;
1779 /* get number of records in the section */
1780 nb_rec
= sec
->section
.size
/
1781 sizeof(struct i40e_profile_tlv_section_record
);
1782 for (i
= 0; i
< nb_rec
; ) {
1783 tlv
= (struct i40e_profile_tlv_section_record
*)&sec
[1 + i
];
1790 int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff
, uint32_t pkg_size
,
1791 uint8_t *info_buff
, uint32_t info_size
,
1792 enum rte_pmd_i40e_package_info type
)
1795 struct i40e_package_header
*pkg_hdr
;
1796 struct i40e_generic_seg_header
*i40e_seg_hdr
;
1797 struct i40e_generic_seg_header
*note_seg_hdr
;
1798 struct i40e_generic_seg_header
*metadata_seg_hdr
;
1801 PMD_DRV_LOG(ERR
, "Output info buff is invalid.");
1805 if (!pkg_buff
|| pkg_size
< (sizeof(struct i40e_package_header
) +
1806 sizeof(struct i40e_metadata_segment
) +
1807 sizeof(uint32_t) * 2)) {
1808 PMD_DRV_LOG(ERR
, "Package buff is invalid.");
1812 pkg_hdr
= (struct i40e_package_header
*)pkg_buff
;
1813 if (pkg_hdr
->segment_count
< 2) {
1814 PMD_DRV_LOG(ERR
, "Segment_count should be 2 at least.");
1818 /* Find metadata segment */
1819 metadata_seg_hdr
= i40e_find_segment_in_package(SEGMENT_TYPE_METADATA
,
1822 /* Find global notes segment */
1823 note_seg_hdr
= i40e_find_segment_in_package(SEGMENT_TYPE_NOTES
,
1826 /* Find i40e profile segment */
1827 i40e_seg_hdr
= i40e_find_segment_in_package(SEGMENT_TYPE_I40E
, pkg_hdr
);
1829 /* get global header info */
1830 if (type
== RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER
) {
1831 struct rte_pmd_i40e_profile_info
*info
=
1832 (struct rte_pmd_i40e_profile_info
*)info_buff
;
1834 if (info_size
< sizeof(struct rte_pmd_i40e_profile_info
)) {
1835 PMD_DRV_LOG(ERR
, "Output info buff size is invalid.");
1839 if (!metadata_seg_hdr
) {
1840 PMD_DRV_LOG(ERR
, "Failed to find metadata segment header");
1844 memset(info
, 0, sizeof(struct rte_pmd_i40e_profile_info
));
1845 info
->owner
= RTE_PMD_I40E_DDP_OWNER_UNKNOWN
;
1847 ((struct i40e_metadata_segment
*)metadata_seg_hdr
)->track_id
;
1850 ((struct i40e_metadata_segment
*)metadata_seg_hdr
)->name
,
1851 I40E_DDP_NAME_SIZE
);
1852 memcpy(&info
->version
,
1853 &((struct i40e_metadata_segment
*)metadata_seg_hdr
)->version
,
1854 sizeof(struct i40e_ddp_version
));
1855 return I40E_SUCCESS
;
1858 /* get global note size */
1859 if (type
== RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE
) {
1860 if (info_size
< sizeof(uint32_t)) {
1861 PMD_DRV_LOG(ERR
, "Invalid information buffer size");
1864 if (note_seg_hdr
== NULL
)
1867 ret_size
= note_seg_hdr
->size
;
1868 *(uint32_t *)info_buff
= ret_size
;
1869 return I40E_SUCCESS
;
1872 /* get global note */
1873 if (type
== RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES
) {
1874 if (note_seg_hdr
== NULL
)
1876 if (info_size
< note_seg_hdr
->size
) {
1877 PMD_DRV_LOG(ERR
, "Information buffer size is too small");
1880 memcpy(info_buff
, ¬e_seg_hdr
[1], note_seg_hdr
->size
);
1881 return I40E_SUCCESS
;
1884 /* get i40e segment header info */
1885 if (type
== RTE_PMD_I40E_PKG_INFO_HEADER
) {
1886 struct rte_pmd_i40e_profile_info
*info
=
1887 (struct rte_pmd_i40e_profile_info
*)info_buff
;
1889 if (info_size
< sizeof(struct rte_pmd_i40e_profile_info
)) {
1890 PMD_DRV_LOG(ERR
, "Output info buff size is invalid.");
1894 if (!metadata_seg_hdr
) {
1895 PMD_DRV_LOG(ERR
, "Failed to find metadata segment header");
1899 if (!i40e_seg_hdr
) {
1900 PMD_DRV_LOG(ERR
, "Failed to find i40e segment header");
1904 memset(info
, 0, sizeof(struct rte_pmd_i40e_profile_info
));
1905 info
->owner
= RTE_PMD_I40E_DDP_OWNER_UNKNOWN
;
1907 ((struct i40e_metadata_segment
*)metadata_seg_hdr
)->track_id
;
1910 ((struct i40e_profile_segment
*)i40e_seg_hdr
)->name
,
1911 I40E_DDP_NAME_SIZE
);
1912 memcpy(&info
->version
,
1913 &((struct i40e_profile_segment
*)i40e_seg_hdr
)->version
,
1914 sizeof(struct i40e_ddp_version
));
1915 return I40E_SUCCESS
;
1918 /* get number of devices */
1919 if (type
== RTE_PMD_I40E_PKG_INFO_DEVID_NUM
) {
1920 if (info_size
< sizeof(uint32_t)) {
1921 PMD_DRV_LOG(ERR
, "Invalid information buffer size");
1924 *(uint32_t *)info_buff
=
1925 ((struct i40e_profile_segment
*)i40e_seg_hdr
)->device_table_count
;
1926 return I40E_SUCCESS
;
1929 /* get list of devices */
1930 if (type
== RTE_PMD_I40E_PKG_INFO_DEVID_LIST
) {
1933 ((struct i40e_profile_segment
*)i40e_seg_hdr
)->device_table_count
;
1934 if (info_size
< sizeof(struct rte_pmd_i40e_ddp_device_id
) * dev_num
) {
1935 PMD_DRV_LOG(ERR
, "Invalid information buffer size");
1939 ((struct i40e_profile_segment
*)i40e_seg_hdr
)->device_table
,
1940 sizeof(struct rte_pmd_i40e_ddp_device_id
) * dev_num
);
1941 return I40E_SUCCESS
;
1944 /* get number of protocols */
1945 if (type
== RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM
) {
1946 struct i40e_profile_section_header
*proto
;
1948 if (info_size
< sizeof(uint32_t)) {
1949 PMD_DRV_LOG(ERR
, "Invalid information buffer size");
1952 proto
= i40e_find_section_in_profile(SECTION_TYPE_PROTO
,
1953 (struct i40e_profile_segment
*)i40e_seg_hdr
);
1954 *(uint32_t *)info_buff
= i40e_get_tlv_section_size(proto
);
1955 return I40E_SUCCESS
;
1958 /* get list of protocols */
1959 if (type
== RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST
) {
1960 uint32_t i
, j
, nb_tlv
, nb_rec
, nb_proto_info
;
1961 struct rte_pmd_i40e_proto_info
*pinfo
;
1962 struct i40e_profile_section_header
*proto
;
1963 struct i40e_profile_tlv_section_record
*tlv
;
1965 pinfo
= (struct rte_pmd_i40e_proto_info
*)info_buff
;
1966 nb_proto_info
= info_size
/
1967 sizeof(struct rte_pmd_i40e_proto_info
);
1968 for (i
= 0; i
< nb_proto_info
; i
++) {
1969 pinfo
[i
].proto_id
= RTE_PMD_I40E_PROTO_UNUSED
;
1970 memset(pinfo
[i
].name
, 0, RTE_PMD_I40E_DDP_NAME_SIZE
);
1972 proto
= i40e_find_section_in_profile(SECTION_TYPE_PROTO
,
1973 (struct i40e_profile_segment
*)i40e_seg_hdr
);
1974 nb_tlv
= i40e_get_tlv_section_size(proto
);
1976 return I40E_SUCCESS
;
1977 if (nb_proto_info
< nb_tlv
) {
1978 PMD_DRV_LOG(ERR
, "Invalid information buffer size");
1981 /* get number of records in the section */
1982 nb_rec
= proto
->section
.size
/
1983 sizeof(struct i40e_profile_tlv_section_record
);
1984 tlv
= (struct i40e_profile_tlv_section_record
*)&proto
[1];
1985 for (i
= j
= 0; i
< nb_rec
; j
++) {
1986 pinfo
[j
].proto_id
= tlv
->data
[0];
1987 strlcpy(pinfo
[j
].name
, (const char *)&tlv
->data
[1],
1988 I40E_DDP_NAME_SIZE
);
1990 tlv
= &tlv
[tlv
->len
];
1992 return I40E_SUCCESS
;
1995 /* get number of packet classification types */
1996 if (type
== RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM
) {
1997 struct i40e_profile_section_header
*pctype
;
1999 if (info_size
< sizeof(uint32_t)) {
2000 PMD_DRV_LOG(ERR
, "Invalid information buffer size");
2003 pctype
= i40e_find_section_in_profile(SECTION_TYPE_PCTYPE
,
2004 (struct i40e_profile_segment
*)i40e_seg_hdr
);
2005 *(uint32_t *)info_buff
= i40e_get_tlv_section_size(pctype
);
2006 return I40E_SUCCESS
;
2009 /* get list of packet classification types */
2010 if (type
== RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST
) {
2011 uint32_t i
, j
, nb_tlv
, nb_rec
, nb_proto_info
;
2012 struct rte_pmd_i40e_ptype_info
*pinfo
;
2013 struct i40e_profile_section_header
*pctype
;
2014 struct i40e_profile_tlv_section_record
*tlv
;
2016 pinfo
= (struct rte_pmd_i40e_ptype_info
*)info_buff
;
2017 nb_proto_info
= info_size
/
2018 sizeof(struct rte_pmd_i40e_ptype_info
);
2019 for (i
= 0; i
< nb_proto_info
; i
++)
2020 memset(&pinfo
[i
], RTE_PMD_I40E_PROTO_UNUSED
,
2021 sizeof(struct rte_pmd_i40e_ptype_info
));
2022 pctype
= i40e_find_section_in_profile(SECTION_TYPE_PCTYPE
,
2023 (struct i40e_profile_segment
*)i40e_seg_hdr
);
2024 nb_tlv
= i40e_get_tlv_section_size(pctype
);
2026 return I40E_SUCCESS
;
2027 if (nb_proto_info
< nb_tlv
) {
2028 PMD_DRV_LOG(ERR
, "Invalid information buffer size");
2032 /* get number of records in the section */
2033 nb_rec
= pctype
->section
.size
/
2034 sizeof(struct i40e_profile_tlv_section_record
);
2035 tlv
= (struct i40e_profile_tlv_section_record
*)&pctype
[1];
2036 for (i
= j
= 0; i
< nb_rec
; j
++) {
2037 memcpy(&pinfo
[j
], tlv
->data
,
2038 sizeof(struct rte_pmd_i40e_ptype_info
));
2040 tlv
= &tlv
[tlv
->len
];
2042 return I40E_SUCCESS
;
2045 /* get number of packet types */
2046 if (type
== RTE_PMD_I40E_PKG_INFO_PTYPE_NUM
) {
2047 struct i40e_profile_section_header
*ptype
;
2049 if (info_size
< sizeof(uint32_t)) {
2050 PMD_DRV_LOG(ERR
, "Invalid information buffer size");
2053 ptype
= i40e_find_section_in_profile(SECTION_TYPE_PTYPE
,
2054 (struct i40e_profile_segment
*)i40e_seg_hdr
);
2055 *(uint32_t *)info_buff
= i40e_get_tlv_section_size(ptype
);
2056 return I40E_SUCCESS
;
2059 /* get list of packet types */
2060 if (type
== RTE_PMD_I40E_PKG_INFO_PTYPE_LIST
) {
2061 uint32_t i
, j
, nb_tlv
, nb_rec
, nb_proto_info
;
2062 struct rte_pmd_i40e_ptype_info
*pinfo
;
2063 struct i40e_profile_section_header
*ptype
;
2064 struct i40e_profile_tlv_section_record
*tlv
;
2066 pinfo
= (struct rte_pmd_i40e_ptype_info
*)info_buff
;
2067 nb_proto_info
= info_size
/
2068 sizeof(struct rte_pmd_i40e_ptype_info
);
2069 for (i
= 0; i
< nb_proto_info
; i
++)
2070 memset(&pinfo
[i
], RTE_PMD_I40E_PROTO_UNUSED
,
2071 sizeof(struct rte_pmd_i40e_ptype_info
));
2072 ptype
= i40e_find_section_in_profile(SECTION_TYPE_PTYPE
,
2073 (struct i40e_profile_segment
*)i40e_seg_hdr
);
2074 nb_tlv
= i40e_get_tlv_section_size(ptype
);
2076 return I40E_SUCCESS
;
2077 if (nb_proto_info
< nb_tlv
) {
2078 PMD_DRV_LOG(ERR
, "Invalid information buffer size");
2081 /* get number of records in the section */
2082 nb_rec
= ptype
->section
.size
/
2083 sizeof(struct i40e_profile_tlv_section_record
);
2084 for (i
= j
= 0; i
< nb_rec
; j
++) {
2085 tlv
= (struct i40e_profile_tlv_section_record
*)
2087 memcpy(&pinfo
[j
], tlv
->data
,
2088 sizeof(struct rte_pmd_i40e_ptype_info
));
2091 return I40E_SUCCESS
;
2094 PMD_DRV_LOG(ERR
, "Info type %u is invalid.", type
);
2099 rte_pmd_i40e_get_ddp_list(uint16_t port
, uint8_t *buff
, uint32_t size
)
2101 struct rte_eth_dev
*dev
;
2103 enum i40e_status_code status
= I40E_SUCCESS
;
2105 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
2107 dev
= &rte_eth_devices
[port
];
2109 if (!is_i40e_supported(dev
))
2112 if (size
< (I40E_PROFILE_INFO_SIZE
* I40E_MAX_PROFILE_NUM
+ 4))
2115 hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2117 status
= i40e_aq_get_ddp_list(hw
, (void *)buff
,
2123 static int check_invalid_pkt_type(uint32_t pkt_type
)
2125 uint32_t l2
, l3
, l4
, tnl
, il2
, il3
, il4
;
2127 l2
= pkt_type
& RTE_PTYPE_L2_MASK
;
2128 l3
= pkt_type
& RTE_PTYPE_L3_MASK
;
2129 l4
= pkt_type
& RTE_PTYPE_L4_MASK
;
2130 tnl
= pkt_type
& RTE_PTYPE_TUNNEL_MASK
;
2131 il2
= pkt_type
& RTE_PTYPE_INNER_L2_MASK
;
2132 il3
= pkt_type
& RTE_PTYPE_INNER_L3_MASK
;
2133 il4
= pkt_type
& RTE_PTYPE_INNER_L4_MASK
;
2136 l2
!= RTE_PTYPE_L2_ETHER
&&
2137 l2
!= RTE_PTYPE_L2_ETHER_TIMESYNC
&&
2138 l2
!= RTE_PTYPE_L2_ETHER_ARP
&&
2139 l2
!= RTE_PTYPE_L2_ETHER_LLDP
&&
2140 l2
!= RTE_PTYPE_L2_ETHER_NSH
&&
2141 l2
!= RTE_PTYPE_L2_ETHER_VLAN
&&
2142 l2
!= RTE_PTYPE_L2_ETHER_QINQ
&&
2143 l2
!= RTE_PTYPE_L2_ETHER_PPPOE
)
2147 l3
!= RTE_PTYPE_L3_IPV4
&&
2148 l3
!= RTE_PTYPE_L3_IPV4_EXT
&&
2149 l3
!= RTE_PTYPE_L3_IPV6
&&
2150 l3
!= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
&&
2151 l3
!= RTE_PTYPE_L3_IPV6_EXT
&&
2152 l3
!= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
)
2156 l4
!= RTE_PTYPE_L4_TCP
&&
2157 l4
!= RTE_PTYPE_L4_UDP
&&
2158 l4
!= RTE_PTYPE_L4_FRAG
&&
2159 l4
!= RTE_PTYPE_L4_SCTP
&&
2160 l4
!= RTE_PTYPE_L4_ICMP
&&
2161 l4
!= RTE_PTYPE_L4_NONFRAG
)
2165 tnl
!= RTE_PTYPE_TUNNEL_IP
&&
2166 tnl
!= RTE_PTYPE_TUNNEL_GRENAT
&&
2167 tnl
!= RTE_PTYPE_TUNNEL_VXLAN
&&
2168 tnl
!= RTE_PTYPE_TUNNEL_NVGRE
&&
2169 tnl
!= RTE_PTYPE_TUNNEL_GENEVE
&&
2170 tnl
!= RTE_PTYPE_TUNNEL_GRENAT
&&
2171 tnl
!= RTE_PTYPE_TUNNEL_GTPC
&&
2172 tnl
!= RTE_PTYPE_TUNNEL_GTPU
&&
2173 tnl
!= RTE_PTYPE_TUNNEL_L2TP
)
2177 il2
!= RTE_PTYPE_INNER_L2_ETHER
&&
2178 il2
!= RTE_PTYPE_INNER_L2_ETHER_VLAN
&&
2179 il2
!= RTE_PTYPE_INNER_L2_ETHER_QINQ
)
2183 il3
!= RTE_PTYPE_INNER_L3_IPV4
&&
2184 il3
!= RTE_PTYPE_INNER_L3_IPV4_EXT
&&
2185 il3
!= RTE_PTYPE_INNER_L3_IPV6
&&
2186 il3
!= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN
&&
2187 il3
!= RTE_PTYPE_INNER_L3_IPV6_EXT
&&
2188 il3
!= RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN
)
2192 il4
!= RTE_PTYPE_INNER_L4_TCP
&&
2193 il4
!= RTE_PTYPE_INNER_L4_UDP
&&
2194 il4
!= RTE_PTYPE_INNER_L4_FRAG
&&
2195 il4
!= RTE_PTYPE_INNER_L4_SCTP
&&
2196 il4
!= RTE_PTYPE_INNER_L4_ICMP
&&
2197 il4
!= RTE_PTYPE_INNER_L4_NONFRAG
)
2203 static int check_invalid_ptype_mapping(
2204 struct rte_pmd_i40e_ptype_mapping
*mapping_table
,
2209 for (i
= 0; i
< count
; i
++) {
2210 uint16_t ptype
= mapping_table
[i
].hw_ptype
;
2211 uint32_t pkt_type
= mapping_table
[i
].sw_ptype
;
2213 if (ptype
>= I40E_MAX_PKT_TYPE
)
2216 if (pkt_type
== RTE_PTYPE_UNKNOWN
)
2219 if (pkt_type
& RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK
)
2222 if (check_invalid_pkt_type(pkt_type
))
2230 rte_pmd_i40e_ptype_mapping_update(
2232 struct rte_pmd_i40e_ptype_mapping
*mapping_items
,
2236 struct rte_eth_dev
*dev
;
2237 struct i40e_adapter
*ad
;
2240 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
2242 dev
= &rte_eth_devices
[port
];
2244 if (!is_i40e_supported(dev
))
2247 if (count
> I40E_MAX_PKT_TYPE
)
2250 if (check_invalid_ptype_mapping(mapping_items
, count
))
2253 ad
= I40E_DEV_PRIVATE_TO_ADAPTER(dev
->data
->dev_private
);
2256 for (i
= 0; i
< I40E_MAX_PKT_TYPE
; i
++)
2257 ad
->ptype_tbl
[i
] = RTE_PTYPE_UNKNOWN
;
2260 for (i
= 0; i
< count
; i
++)
2261 ad
->ptype_tbl
[mapping_items
[i
].hw_ptype
]
2262 = mapping_items
[i
].sw_ptype
;
2267 int rte_pmd_i40e_ptype_mapping_reset(uint16_t port
)
2269 struct rte_eth_dev
*dev
;
2271 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
2273 dev
= &rte_eth_devices
[port
];
2275 if (!is_i40e_supported(dev
))
2278 i40e_set_default_ptype_table(dev
);
2283 int rte_pmd_i40e_ptype_mapping_get(
2285 struct rte_pmd_i40e_ptype_mapping
*mapping_items
,
2290 struct rte_eth_dev
*dev
;
2291 struct i40e_adapter
*ad
;
2295 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
2297 dev
= &rte_eth_devices
[port
];
2299 if (!is_i40e_supported(dev
))
2302 ad
= I40E_DEV_PRIVATE_TO_ADAPTER(dev
->data
->dev_private
);
2304 for (i
= 0; i
< I40E_MAX_PKT_TYPE
; i
++) {
2307 if (valid_only
&& ad
->ptype_tbl
[i
] == RTE_PTYPE_UNKNOWN
)
2309 mapping_items
[n
].hw_ptype
= i
;
2310 mapping_items
[n
].sw_ptype
= ad
->ptype_tbl
[i
];
2318 int rte_pmd_i40e_ptype_mapping_replace(uint16_t port
,
2323 struct rte_eth_dev
*dev
;
2324 struct i40e_adapter
*ad
;
2327 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
2329 dev
= &rte_eth_devices
[port
];
2331 if (!is_i40e_supported(dev
))
2334 if (!mask
&& check_invalid_pkt_type(target
))
2337 if (check_invalid_pkt_type(pkt_type
))
2340 ad
= I40E_DEV_PRIVATE_TO_ADAPTER(dev
->data
->dev_private
);
2342 for (i
= 0; i
< I40E_MAX_PKT_TYPE
; i
++) {
2344 if ((target
| ad
->ptype_tbl
[i
]) == target
&&
2345 (target
& ad
->ptype_tbl
[i
]))
2346 ad
->ptype_tbl
[i
] = pkt_type
;
2348 if (ad
->ptype_tbl
[i
] == target
)
2349 ad
->ptype_tbl
[i
] = pkt_type
;
2357 rte_pmd_i40e_add_vf_mac_addr(uint16_t port
, uint16_t vf_id
,
2358 struct ether_addr
*mac_addr
)
2360 struct rte_eth_dev
*dev
;
2361 struct i40e_pf_vf
*vf
;
2362 struct i40e_vsi
*vsi
;
2364 struct i40e_mac_filter_info mac_filter
;
2367 if (i40e_validate_mac_addr((u8
*)mac_addr
) != I40E_SUCCESS
)
2370 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
2372 dev
= &rte_eth_devices
[port
];
2374 if (!is_i40e_supported(dev
))
2377 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
2379 if (vf_id
>= pf
->vf_num
|| !pf
->vfs
)
2382 vf
= &pf
->vfs
[vf_id
];
2385 PMD_DRV_LOG(ERR
, "Invalid VSI.");
2389 mac_filter
.filter_type
= RTE_MACVLAN_PERFECT_MATCH
;
2390 ether_addr_copy(mac_addr
, &mac_filter
.mac_addr
);
2391 ret
= i40e_vsi_add_mac(vsi
, &mac_filter
);
2392 if (ret
!= I40E_SUCCESS
) {
2393 PMD_DRV_LOG(ERR
, "Failed to add MAC filter.");
2400 int rte_pmd_i40e_flow_type_mapping_reset(uint16_t port
)
2402 struct rte_eth_dev
*dev
;
2404 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
2406 dev
= &rte_eth_devices
[port
];
2408 if (!is_i40e_supported(dev
))
2411 i40e_set_default_pctype_table(dev
);
2416 int rte_pmd_i40e_flow_type_mapping_get(
2418 struct rte_pmd_i40e_flow_type_mapping
*mapping_items
)
2420 struct rte_eth_dev
*dev
;
2421 struct i40e_adapter
*ad
;
2424 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
2426 dev
= &rte_eth_devices
[port
];
2428 if (!is_i40e_supported(dev
))
2431 ad
= I40E_DEV_PRIVATE_TO_ADAPTER(dev
->data
->dev_private
);
2433 for (i
= 0; i
< I40E_FLOW_TYPE_MAX
; i
++) {
2434 mapping_items
[i
].flow_type
= i
;
2435 mapping_items
[i
].pctype
= ad
->pctypes_tbl
[i
];
2442 rte_pmd_i40e_flow_type_mapping_update(
2444 struct rte_pmd_i40e_flow_type_mapping
*mapping_items
,
2448 struct rte_eth_dev
*dev
;
2449 struct i40e_adapter
*ad
;
2452 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
2454 dev
= &rte_eth_devices
[port
];
2456 if (!is_i40e_supported(dev
))
2459 if (count
> I40E_FLOW_TYPE_MAX
)
2462 for (i
= 0; i
< count
; i
++)
2463 if (mapping_items
[i
].flow_type
>= I40E_FLOW_TYPE_MAX
||
2464 mapping_items
[i
].flow_type
== RTE_ETH_FLOW_UNKNOWN
||
2465 (mapping_items
[i
].pctype
&
2466 (1ULL << I40E_FILTER_PCTYPE_INVALID
)))
2469 ad
= I40E_DEV_PRIVATE_TO_ADAPTER(dev
->data
->dev_private
);
2472 for (i
= 0; i
< I40E_FLOW_TYPE_MAX
; i
++)
2473 ad
->pctypes_tbl
[i
] = 0ULL;
2474 ad
->flow_types_mask
= 0ULL;
2477 for (i
= 0; i
< count
; i
++) {
2478 ad
->pctypes_tbl
[mapping_items
[i
].flow_type
] =
2479 mapping_items
[i
].pctype
;
2480 if (mapping_items
[i
].pctype
)
2481 ad
->flow_types_mask
|=
2482 (1ULL << mapping_items
[i
].flow_type
);
2484 ad
->flow_types_mask
&=
2485 ~(1ULL << mapping_items
[i
].flow_type
);
2488 for (i
= 0, ad
->pctypes_mask
= 0ULL; i
< I40E_FLOW_TYPE_MAX
; i
++)
2489 ad
->pctypes_mask
|= ad
->pctypes_tbl
[i
];
2495 rte_pmd_i40e_query_vfid_by_mac(uint16_t port
, const struct ether_addr
*vf_mac
)
2497 struct rte_eth_dev
*dev
;
2498 struct ether_addr
*mac
;
2501 struct i40e_pf_vf
*vf
;
2504 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
2505 dev
= &rte_eth_devices
[port
];
2507 if (!is_i40e_supported(dev
))
2510 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
2511 vf_num
= pf
->vf_num
;
2513 for (vf_id
= 0; vf_id
< vf_num
; vf_id
++) {
2514 vf
= &pf
->vfs
[vf_id
];
2515 mac
= &vf
->mac_addr
;
2517 if (is_same_ether_addr(mac
, vf_mac
))
2525 i40e_vsi_update_queue_region_mapping(struct i40e_hw
*hw
,
2529 struct i40e_vsi
*vsi
= pf
->main_vsi
;
2530 uint16_t queue_offset
, bsf
, tc_index
;
2531 struct i40e_vsi_context ctxt
;
2532 struct i40e_aqc_vsi_properties_data
*vsi_info
;
2533 struct i40e_queue_regions
*region_info
=
2535 int32_t ret
= -EINVAL
;
2537 if (!region_info
->queue_region_number
) {
2538 PMD_INIT_LOG(ERR
, "there is no that region id been set before");
2542 memset(&ctxt
, 0, sizeof(struct i40e_vsi_context
));
2544 /* Update Queue Pairs Mapping for currently enabled UPs */
2545 ctxt
.seid
= vsi
->seid
;
2546 ctxt
.pf_num
= hw
->pf_id
;
2548 ctxt
.uplink_seid
= vsi
->uplink_seid
;
2549 ctxt
.info
= vsi
->info
;
2550 vsi_info
= &ctxt
.info
;
2552 memset(vsi_info
->tc_mapping
, 0, sizeof(uint16_t) * 8);
2553 memset(vsi_info
->queue_mapping
, 0, sizeof(uint16_t) * 16);
2555 /* Configure queue region and queue mapping parameters,
2556 * for enabled queue region, allocate queues to this region.
2559 for (i
= 0; i
< region_info
->queue_region_number
; i
++) {
2560 tc_index
= region_info
->region
[i
].region_id
;
2561 bsf
= rte_bsf32(region_info
->region
[i
].queue_num
);
2562 queue_offset
= region_info
->region
[i
].queue_start_index
;
2563 vsi_info
->tc_mapping
[tc_index
] = rte_cpu_to_le_16(
2564 (queue_offset
<< I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT
) |
2565 (bsf
<< I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT
));
2568 /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
2569 vsi_info
->mapping_flags
|=
2570 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG
);
2571 vsi_info
->queue_mapping
[0] = rte_cpu_to_le_16(vsi
->base_queue
);
2572 vsi_info
->valid_sections
|=
2573 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID
);
2575 /* Update the VSI after updating the VSI queue-mapping information */
2576 ret
= i40e_aq_update_vsi_params(hw
, &ctxt
, NULL
);
2578 PMD_DRV_LOG(ERR
, "Failed to configure queue region mapping = %d ",
2579 hw
->aq
.asq_last_status
);
2582 /* update the local VSI info with updated queue map */
2583 rte_memcpy(&vsi
->info
.tc_mapping
, &ctxt
.info
.tc_mapping
,
2584 sizeof(vsi
->info
.tc_mapping
));
2585 rte_memcpy(&vsi
->info
.queue_mapping
,
2586 &ctxt
.info
.queue_mapping
,
2587 sizeof(vsi
->info
.queue_mapping
));
2588 vsi
->info
.mapping_flags
= ctxt
.info
.mapping_flags
;
2589 vsi
->info
.valid_sections
= 0;
2596 i40e_queue_region_set_region(struct i40e_pf
*pf
,
2597 struct rte_pmd_i40e_queue_region_conf
*conf_ptr
)
2600 struct i40e_vsi
*main_vsi
= pf
->main_vsi
;
2601 struct i40e_queue_regions
*info
= &pf
->queue_region
;
2602 int32_t ret
= -EINVAL
;
2604 if (!((rte_is_power_of_2(conf_ptr
->queue_num
)) &&
2605 conf_ptr
->queue_num
<= 64)) {
2606 PMD_DRV_LOG(ERR
, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
2607 "total number of queues do not exceed the VSI allocation");
2611 if (conf_ptr
->region_id
> I40E_REGION_MAX_INDEX
) {
2612 PMD_DRV_LOG(ERR
, "the queue region max index is 7");
2616 if ((conf_ptr
->queue_start_index
+ conf_ptr
->queue_num
)
2617 > main_vsi
->nb_used_qps
) {
2618 PMD_DRV_LOG(ERR
, "the queue index exceeds the VSI range");
2622 for (i
= 0; i
< info
->queue_region_number
; i
++)
2623 if (conf_ptr
->region_id
== info
->region
[i
].region_id
)
2626 if (i
== info
->queue_region_number
&&
2627 i
<= I40E_REGION_MAX_INDEX
) {
2628 info
->region
[i
].region_id
= conf_ptr
->region_id
;
2629 info
->region
[i
].queue_num
= conf_ptr
->queue_num
;
2630 info
->region
[i
].queue_start_index
=
2631 conf_ptr
->queue_start_index
;
2632 info
->queue_region_number
++;
2634 PMD_DRV_LOG(ERR
, "queue region number exceeds maxnum 8 or the queue region id has been set before");
2642 i40e_queue_region_set_flowtype(struct i40e_pf
*pf
,
2643 struct rte_pmd_i40e_queue_region_conf
*rss_region_conf
)
2645 int32_t ret
= -EINVAL
;
2646 struct i40e_queue_regions
*info
= &pf
->queue_region
;
2648 uint16_t region_index
, flowtype_index
;
2650 /* For the pctype or hardware flowtype of packet,
2651 * the specific index for each type has been defined
2652 * in file i40e_type.h as enum i40e_filter_pctype.
2655 if (rss_region_conf
->region_id
> I40E_PFQF_HREGION_MAX_INDEX
) {
2656 PMD_DRV_LOG(ERR
, "the queue region max index is 7");
2660 if (rss_region_conf
->hw_flowtype
>= I40E_FILTER_PCTYPE_MAX
) {
2661 PMD_DRV_LOG(ERR
, "the hw_flowtype or PCTYPE max index is 63");
2666 for (i
= 0; i
< info
->queue_region_number
; i
++)
2667 if (rss_region_conf
->region_id
== info
->region
[i
].region_id
)
2670 if (i
== info
->queue_region_number
) {
2671 PMD_DRV_LOG(ERR
, "that region id has not been set before");
2677 for (i
= 0; i
< info
->queue_region_number
; i
++) {
2678 for (j
= 0; j
< info
->region
[i
].flowtype_num
; j
++) {
2679 if (rss_region_conf
->hw_flowtype
==
2680 info
->region
[i
].hw_flowtype
[j
]) {
2681 PMD_DRV_LOG(ERR
, "that hw_flowtype has been set before");
2687 flowtype_index
= info
->region
[region_index
].flowtype_num
;
2688 info
->region
[region_index
].hw_flowtype
[flowtype_index
] =
2689 rss_region_conf
->hw_flowtype
;
2690 info
->region
[region_index
].flowtype_num
++;
2696 i40e_queue_region_pf_flowtype_conf(struct i40e_hw
*hw
,
2699 uint8_t hw_flowtype
;
2700 uint32_t pfqf_hregion
;
2701 uint16_t i
, j
, index
;
2702 struct i40e_queue_regions
*info
= &pf
->queue_region
;
2704 /* For the pctype or hardware flowtype of packet,
2705 * the specific index for each type has been defined
2706 * in file i40e_type.h as enum i40e_filter_pctype.
2709 for (i
= 0; i
< info
->queue_region_number
; i
++) {
2710 for (j
= 0; j
< info
->region
[i
].flowtype_num
; j
++) {
2711 hw_flowtype
= info
->region
[i
].hw_flowtype
[j
];
2712 index
= hw_flowtype
>> 3;
2714 i40e_read_rx_ctl(hw
, I40E_PFQF_HREGION(index
));
2716 if ((hw_flowtype
& 0x7) == 0) {
2717 pfqf_hregion
|= info
->region
[i
].region_id
<<
2718 I40E_PFQF_HREGION_REGION_0_SHIFT
;
2719 pfqf_hregion
|= 1 <<
2720 I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT
;
2721 } else if ((hw_flowtype
& 0x7) == 1) {
2722 pfqf_hregion
|= info
->region
[i
].region_id
<<
2723 I40E_PFQF_HREGION_REGION_1_SHIFT
;
2724 pfqf_hregion
|= 1 <<
2725 I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT
;
2726 } else if ((hw_flowtype
& 0x7) == 2) {
2727 pfqf_hregion
|= info
->region
[i
].region_id
<<
2728 I40E_PFQF_HREGION_REGION_2_SHIFT
;
2729 pfqf_hregion
|= 1 <<
2730 I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT
;
2731 } else if ((hw_flowtype
& 0x7) == 3) {
2732 pfqf_hregion
|= info
->region
[i
].region_id
<<
2733 I40E_PFQF_HREGION_REGION_3_SHIFT
;
2734 pfqf_hregion
|= 1 <<
2735 I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT
;
2736 } else if ((hw_flowtype
& 0x7) == 4) {
2737 pfqf_hregion
|= info
->region
[i
].region_id
<<
2738 I40E_PFQF_HREGION_REGION_4_SHIFT
;
2739 pfqf_hregion
|= 1 <<
2740 I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT
;
2741 } else if ((hw_flowtype
& 0x7) == 5) {
2742 pfqf_hregion
|= info
->region
[i
].region_id
<<
2743 I40E_PFQF_HREGION_REGION_5_SHIFT
;
2744 pfqf_hregion
|= 1 <<
2745 I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT
;
2746 } else if ((hw_flowtype
& 0x7) == 6) {
2747 pfqf_hregion
|= info
->region
[i
].region_id
<<
2748 I40E_PFQF_HREGION_REGION_6_SHIFT
;
2749 pfqf_hregion
|= 1 <<
2750 I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT
;
2752 pfqf_hregion
|= info
->region
[i
].region_id
<<
2753 I40E_PFQF_HREGION_REGION_7_SHIFT
;
2754 pfqf_hregion
|= 1 <<
2755 I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT
;
2758 i40e_write_rx_ctl(hw
, I40E_PFQF_HREGION(index
),
2765 i40e_queue_region_set_user_priority(struct i40e_pf
*pf
,
2766 struct rte_pmd_i40e_queue_region_conf
*rss_region_conf
)
2768 struct i40e_queue_regions
*info
= &pf
->queue_region
;
2769 int32_t ret
= -EINVAL
;
2770 uint16_t i
, j
, region_index
;
2772 if (rss_region_conf
->user_priority
>= I40E_MAX_USER_PRIORITY
) {
2773 PMD_DRV_LOG(ERR
, "the queue region max index is 7");
2777 if (rss_region_conf
->region_id
> I40E_REGION_MAX_INDEX
) {
2778 PMD_DRV_LOG(ERR
, "the region_id max index is 7");
2782 for (i
= 0; i
< info
->queue_region_number
; i
++)
2783 if (rss_region_conf
->region_id
== info
->region
[i
].region_id
)
2786 if (i
== info
->queue_region_number
) {
2787 PMD_DRV_LOG(ERR
, "that region id has not been set before");
2794 for (i
= 0; i
< info
->queue_region_number
; i
++) {
2795 for (j
= 0; j
< info
->region
[i
].user_priority_num
; j
++) {
2796 if (info
->region
[i
].user_priority
[j
] ==
2797 rss_region_conf
->user_priority
) {
2798 PMD_DRV_LOG(ERR
, "that user priority has been set before");
2804 j
= info
->region
[region_index
].user_priority_num
;
2805 info
->region
[region_index
].user_priority
[j
] =
2806 rss_region_conf
->user_priority
;
2807 info
->region
[region_index
].user_priority_num
++;
2813 i40e_queue_region_dcb_configure(struct i40e_hw
*hw
,
2816 struct i40e_dcbx_config dcb_cfg_local
;
2817 struct i40e_dcbx_config
*dcb_cfg
;
2818 struct i40e_queue_regions
*info
= &pf
->queue_region
;
2819 struct i40e_dcbx_config
*old_cfg
= &hw
->local_dcbx_config
;
2820 int32_t ret
= -EINVAL
;
2821 uint16_t i
, j
, prio_index
, region_index
;
2822 uint8_t tc_map
, tc_bw
, bw_lf
, dcb_flag
= 0;
2824 if (!info
->queue_region_number
) {
2825 PMD_DRV_LOG(ERR
, "No queue region been set before");
2829 for (i
= 0; i
< info
->queue_region_number
; i
++) {
2830 if (info
->region
[i
].user_priority_num
) {
2839 dcb_cfg
= &dcb_cfg_local
;
2840 memset(dcb_cfg
, 0, sizeof(struct i40e_dcbx_config
));
2842 /* assume each tc has the same bw */
2843 tc_bw
= I40E_MAX_PERCENT
/ info
->queue_region_number
;
2844 for (i
= 0; i
< info
->queue_region_number
; i
++)
2845 dcb_cfg
->etscfg
.tcbwtable
[i
] = tc_bw
;
2846 /* to ensure the sum of tcbw is equal to 100 */
2847 bw_lf
= I40E_MAX_PERCENT
% info
->queue_region_number
;
2848 for (i
= 0; i
< bw_lf
; i
++)
2849 dcb_cfg
->etscfg
.tcbwtable
[i
]++;
2851 /* assume each tc has the same Transmission Selection Algorithm */
2852 for (i
= 0; i
< info
->queue_region_number
; i
++)
2853 dcb_cfg
->etscfg
.tsatable
[i
] = I40E_IEEE_TSA_ETS
;
2855 for (i
= 0; i
< info
->queue_region_number
; i
++) {
2856 for (j
= 0; j
< info
->region
[i
].user_priority_num
; j
++) {
2857 prio_index
= info
->region
[i
].user_priority
[j
];
2858 region_index
= info
->region
[i
].region_id
;
2859 dcb_cfg
->etscfg
.prioritytable
[prio_index
] =
2864 /* FW needs one App to configure HW */
2865 dcb_cfg
->numapps
= I40E_DEFAULT_DCB_APP_NUM
;
2866 dcb_cfg
->app
[0].selector
= I40E_APP_SEL_ETHTYPE
;
2867 dcb_cfg
->app
[0].priority
= I40E_DEFAULT_DCB_APP_PRIO
;
2868 dcb_cfg
->app
[0].protocolid
= I40E_APP_PROTOID_FCOE
;
2870 tc_map
= RTE_LEN2MASK(info
->queue_region_number
, uint8_t);
2872 dcb_cfg
->pfc
.willing
= 0;
2873 dcb_cfg
->pfc
.pfccap
= I40E_MAX_TRAFFIC_CLASS
;
2874 dcb_cfg
->pfc
.pfcenable
= tc_map
;
2876 /* Copy the new config to the current config */
2877 *old_cfg
= *dcb_cfg
;
2878 old_cfg
->etsrec
= old_cfg
->etscfg
;
2879 ret
= i40e_set_dcb_config(hw
);
2882 PMD_DRV_LOG(ERR
, "Set queue region DCB Config failed, err %s aq_err %s",
2883 i40e_stat_str(hw
, ret
),
2884 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
2892 i40e_flush_queue_region_all_conf(struct rte_eth_dev
*dev
,
2893 struct i40e_hw
*hw
, struct i40e_pf
*pf
, uint16_t on
)
2895 int32_t ret
= -EINVAL
;
2896 struct i40e_queue_regions
*info
= &pf
->queue_region
;
2897 struct i40e_vsi
*main_vsi
= pf
->main_vsi
;
2900 i40e_queue_region_pf_flowtype_conf(hw
, pf
);
2902 ret
= i40e_vsi_update_queue_region_mapping(hw
, pf
);
2903 if (ret
!= I40E_SUCCESS
) {
2904 PMD_DRV_LOG(INFO
, "Failed to flush queue region mapping.");
2908 ret
= i40e_queue_region_dcb_configure(hw
, pf
);
2909 if (ret
!= I40E_SUCCESS
) {
2910 PMD_DRV_LOG(INFO
, "Failed to flush dcb.");
2917 if (info
->queue_region_number
) {
2918 info
->queue_region_number
= 1;
2919 info
->region
[0].queue_num
= main_vsi
->nb_used_qps
;
2920 info
->region
[0].queue_start_index
= 0;
2922 ret
= i40e_vsi_update_queue_region_mapping(hw
, pf
);
2923 if (ret
!= I40E_SUCCESS
)
2924 PMD_DRV_LOG(INFO
, "Failed to flush queue region mapping.");
2926 ret
= i40e_dcb_init_configure(dev
, TRUE
);
2927 if (ret
!= I40E_SUCCESS
) {
2928 PMD_DRV_LOG(INFO
, "Failed to flush dcb.");
2929 pf
->flags
&= ~I40E_FLAG_DCB
;
2932 i40e_init_queue_region_conf(dev
);
2938 i40e_queue_region_pf_check_rss(struct i40e_pf
*pf
)
2940 struct i40e_hw
*hw
= I40E_PF_TO_HW(pf
);
2943 hena
= (uint64_t)i40e_read_rx_ctl(hw
, I40E_PFQF_HENA(0));
2944 hena
|= ((uint64_t)i40e_read_rx_ctl(hw
, I40E_PFQF_HENA(1))) << 32;
2953 i40e_queue_region_get_all_info(struct i40e_pf
*pf
,
2954 struct i40e_queue_regions
*regions_ptr
)
2956 struct i40e_queue_regions
*info
= &pf
->queue_region
;
2958 rte_memcpy(regions_ptr
, info
,
2959 sizeof(struct i40e_queue_regions
));
2964 int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id
,
2965 enum rte_pmd_i40e_queue_region_op op_type
, void *arg
)
2967 struct rte_eth_dev
*dev
= &rte_eth_devices
[port_id
];
2968 struct i40e_pf
*pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
2969 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2972 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2974 if (!is_i40e_supported(dev
))
2977 if (!(!i40e_queue_region_pf_check_rss(pf
)))
2980 /* This queue region feature only support pf by now. It should
2981 * be called after dev_start, and will be clear after dev_stop.
2982 * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON"
2983 * is just an enable function which server for other configuration,
2984 * it is for all configuration about queue region from up layer,
2985 * at first will only keep in DPDK softwarestored in driver,
2986 * only after "FLUSH_ON", it commit all configuration to HW.
2987 * Because PMD had to set hardware configuration at a time, so
2988 * it will record all up layer command at first.
2989 * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF" is
2990 * just clean all configuration about queue region just now,
2991 * and restore all to DPDK i40e driver default
2992 * config when start up.
2996 case RTE_PMD_I40E_RSS_QUEUE_REGION_SET
:
2997 ret
= i40e_queue_region_set_region(pf
,
2998 (struct rte_pmd_i40e_queue_region_conf
*)arg
);
3000 case RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET
:
3001 ret
= i40e_queue_region_set_flowtype(pf
,
3002 (struct rte_pmd_i40e_queue_region_conf
*)arg
);
3004 case RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET
:
3005 ret
= i40e_queue_region_set_user_priority(pf
,
3006 (struct rte_pmd_i40e_queue_region_conf
*)arg
);
3008 case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON
:
3009 ret
= i40e_flush_queue_region_all_conf(dev
, hw
, pf
, 1);
3011 case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF
:
3012 ret
= i40e_flush_queue_region_all_conf(dev
, hw
, pf
, 0);
3014 case RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET
:
3015 ret
= i40e_queue_region_get_all_info(pf
,
3016 (struct i40e_queue_regions
*)arg
);
3019 PMD_DRV_LOG(WARNING
, "op type (%d) not supported",
3024 I40E_WRITE_FLUSH(hw
);
3029 int rte_pmd_i40e_flow_add_del_packet_template(
3031 const struct rte_pmd_i40e_pkt_template_conf
*conf
,
3034 struct rte_eth_dev
*dev
= &rte_eth_devices
[port
];
3035 struct i40e_fdir_filter_conf filter_conf
;
3037 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
3039 if (!is_i40e_supported(dev
))
3042 memset(&filter_conf
, 0, sizeof(filter_conf
));
3043 filter_conf
.soft_id
= conf
->soft_id
;
3044 filter_conf
.input
.flow
.raw_flow
.pctype
= conf
->input
.pctype
;
3045 filter_conf
.input
.flow
.raw_flow
.packet
= conf
->input
.packet
;
3046 filter_conf
.input
.flow
.raw_flow
.length
= conf
->input
.length
;
3047 filter_conf
.input
.flow_ext
.pkt_template
= true;
3049 filter_conf
.action
.rx_queue
= conf
->action
.rx_queue
;
3050 filter_conf
.action
.behavior
=
3051 (enum i40e_fdir_behavior
)conf
->action
.behavior
;
3052 filter_conf
.action
.report_status
=
3053 (enum i40e_fdir_status
)conf
->action
.report_status
;
3054 filter_conf
.action
.flex_off
= conf
->action
.flex_off
;
3056 return i40e_flow_add_del_fdir_filter(dev
, &filter_conf
, add
);
3060 rte_pmd_i40e_inset_get(uint16_t port
, uint8_t pctype
,
3061 struct rte_pmd_i40e_inset
*inset
,
3062 enum rte_pmd_i40e_inset_type inset_type
)
3064 struct rte_eth_dev
*dev
;
3067 uint32_t mask_reg
[2];
3070 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
3072 dev
= &rte_eth_devices
[port
];
3074 if (!is_i40e_supported(dev
))
3080 hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3081 memset(inset
, 0, sizeof(struct rte_pmd_i40e_inset
));
3083 switch (inset_type
) {
3087 i40e_read_rx_ctl(hw
, I40E_GLQF_HASH_INSET(1, pctype
));
3088 inset_reg
<<= I40E_32_BIT_WIDTH
;
3090 i40e_read_rx_ctl(hw
, I40E_GLQF_HASH_INSET(0, pctype
));
3091 /* Get field mask */
3093 i40e_read_rx_ctl(hw
, I40E_GLQF_HASH_MSK(0, pctype
));
3095 i40e_read_rx_ctl(hw
, I40E_GLQF_HASH_MSK(1, pctype
));
3099 i40e_read_rx_ctl(hw
, I40E_PRTQF_FD_INSET(pctype
, 1));
3100 inset_reg
<<= I40E_32_BIT_WIDTH
;
3102 i40e_read_rx_ctl(hw
, I40E_PRTQF_FD_INSET(pctype
, 0));
3104 i40e_read_rx_ctl(hw
, I40E_GLQF_FD_MSK(0, pctype
));
3106 i40e_read_rx_ctl(hw
, I40E_GLQF_FD_MSK(1, pctype
));
3108 case INSET_FDIR_FLX
:
3110 i40e_read_rx_ctl(hw
, I40E_PRTQF_FD_FLXINSET(pctype
));
3112 i40e_read_rx_ctl(hw
, I40E_PRTQF_FD_MSK(pctype
, 0));
3114 i40e_read_rx_ctl(hw
, I40E_PRTQF_FD_MSK(pctype
, 1));
3117 PMD_DRV_LOG(ERR
, "Unsupported input set type.");
3121 inset
->inset
= inset_reg
;
3123 for (i
= 0; i
< 2; i
++) {
3124 inset
->mask
[i
].field_idx
= ((mask_reg
[i
] >> 16) & 0x3F);
3125 inset
->mask
[i
].mask
= mask_reg
[i
] & 0xFFFF;
3132 rte_pmd_i40e_inset_set(uint16_t port
, uint8_t pctype
,
3133 struct rte_pmd_i40e_inset
*inset
,
3134 enum rte_pmd_i40e_inset_type inset_type
)
3136 struct rte_eth_dev
*dev
;
3140 uint32_t mask_reg
[2];
3143 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
3145 dev
= &rte_eth_devices
[port
];
3147 if (!is_i40e_supported(dev
))
3153 hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3154 pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
3156 if (pf
->support_multi_driver
) {
3157 PMD_DRV_LOG(ERR
, "Input set configuration is not supported.");
3161 inset_reg
= inset
->inset
;
3162 for (i
= 0; i
< 2; i
++)
3163 mask_reg
[i
] = (inset
->mask
[i
].field_idx
<< 16) |
3164 inset
->mask
[i
].mask
;
3166 switch (inset_type
) {
3168 i40e_check_write_global_reg(hw
, I40E_GLQF_HASH_INSET(0, pctype
),
3169 (uint32_t)(inset_reg
& UINT32_MAX
));
3170 i40e_check_write_global_reg(hw
, I40E_GLQF_HASH_INSET(1, pctype
),
3171 (uint32_t)((inset_reg
>>
3172 I40E_32_BIT_WIDTH
) & UINT32_MAX
));
3173 for (i
= 0; i
< 2; i
++)
3174 i40e_check_write_global_reg(hw
,
3175 I40E_GLQF_HASH_MSK(i
, pctype
),
3179 i40e_check_write_reg(hw
, I40E_PRTQF_FD_INSET(pctype
, 0),
3180 (uint32_t)(inset_reg
& UINT32_MAX
));
3181 i40e_check_write_reg(hw
, I40E_PRTQF_FD_INSET(pctype
, 1),
3182 (uint32_t)((inset_reg
>>
3183 I40E_32_BIT_WIDTH
) & UINT32_MAX
));
3184 for (i
= 0; i
< 2; i
++)
3185 i40e_check_write_global_reg(hw
,
3186 I40E_GLQF_FD_MSK(i
, pctype
),
3189 case INSET_FDIR_FLX
:
3190 i40e_check_write_reg(hw
, I40E_PRTQF_FD_FLXINSET(pctype
),
3191 (uint32_t)(inset_reg
& UINT32_MAX
));
3192 for (i
= 0; i
< 2; i
++)
3193 i40e_check_write_reg(hw
, I40E_PRTQF_FD_MSK(pctype
, i
),
3197 PMD_DRV_LOG(ERR
, "Unsupported input set type.");
3201 I40E_WRITE_FLUSH(hw
);