4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
36 #include "base/ixgbe_api.h"
37 #include "ixgbe_ethdev.h"
38 #include "rte_pmd_ixgbe.h"
41 rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port
, uint16_t vf
,
42 struct ether_addr
*mac_addr
)
45 struct ixgbe_vf_info
*vfinfo
;
47 uint8_t *new_mac
= (uint8_t *)(mac_addr
);
48 struct rte_eth_dev
*dev
;
49 struct rte_pci_device
*pci_dev
;
51 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
53 dev
= &rte_eth_devices
[port
];
54 pci_dev
= IXGBE_DEV_TO_PCI(dev
);
56 if (!is_ixgbe_supported(dev
))
59 if (vf
>= pci_dev
->max_vfs
)
62 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
63 vfinfo
= *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
));
64 rar_entry
= hw
->mac
.num_rar_entries
- (vf
+ 1);
66 if (is_valid_assigned_ether_addr((struct ether_addr
*)new_mac
)) {
67 rte_memcpy(vfinfo
[vf
].vf_mac_addresses
, new_mac
,
69 return hw
->mac
.ops
.set_rar(hw
, rar_entry
, new_mac
, vf
,
76 rte_pmd_ixgbe_ping_vf(uint8_t port
, uint16_t vf
)
79 struct ixgbe_vf_info
*vfinfo
;
80 struct rte_eth_dev
*dev
;
81 struct rte_pci_device
*pci_dev
;
84 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
86 dev
= &rte_eth_devices
[port
];
87 pci_dev
= IXGBE_DEV_TO_PCI(dev
);
89 if (!is_ixgbe_supported(dev
))
92 if (vf
>= pci_dev
->max_vfs
)
95 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
96 vfinfo
= *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
));
98 ctrl
= IXGBE_PF_CONTROL_MSG
;
99 if (vfinfo
[vf
].clear_to_send
)
100 ctrl
|= IXGBE_VT_MSGTYPE_CTS
;
102 ixgbe_write_mbx(hw
, &ctrl
, 1, vf
);
108 rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port
, uint16_t vf
, uint8_t on
)
111 struct ixgbe_mac_info
*mac
;
112 struct rte_eth_dev
*dev
;
113 struct rte_pci_device
*pci_dev
;
115 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
117 dev
= &rte_eth_devices
[port
];
118 pci_dev
= IXGBE_DEV_TO_PCI(dev
);
120 if (!is_ixgbe_supported(dev
))
123 if (vf
>= pci_dev
->max_vfs
)
129 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
132 mac
->ops
.set_vlan_anti_spoofing(hw
, on
, vf
);
138 rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port
, uint16_t vf
, uint8_t on
)
141 struct ixgbe_mac_info
*mac
;
142 struct rte_eth_dev
*dev
;
143 struct rte_pci_device
*pci_dev
;
145 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
147 dev
= &rte_eth_devices
[port
];
148 pci_dev
= IXGBE_DEV_TO_PCI(dev
);
150 if (!is_ixgbe_supported(dev
))
153 if (vf
>= pci_dev
->max_vfs
)
159 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
161 mac
->ops
.set_mac_anti_spoofing(hw
, on
, vf
);
167 rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port
, uint16_t vf
, uint16_t vlan_id
)
171 struct rte_eth_dev
*dev
;
172 struct rte_pci_device
*pci_dev
;
174 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
176 dev
= &rte_eth_devices
[port
];
177 pci_dev
= IXGBE_DEV_TO_PCI(dev
);
179 if (!is_ixgbe_supported(dev
))
182 if (vf
>= pci_dev
->max_vfs
)
185 if (vlan_id
> ETHER_MAX_VLAN_ID
)
188 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
189 ctrl
= IXGBE_READ_REG(hw
, IXGBE_VMVIR(vf
));
192 ctrl
|= IXGBE_VMVIR_VLANA_DEFAULT
;
197 IXGBE_WRITE_REG(hw
, IXGBE_VMVIR(vf
), ctrl
);
203 rte_pmd_ixgbe_set_tx_loopback(uint8_t port
, uint8_t on
)
207 struct rte_eth_dev
*dev
;
209 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
211 dev
= &rte_eth_devices
[port
];
213 if (!is_ixgbe_supported(dev
))
219 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
220 ctrl
= IXGBE_READ_REG(hw
, IXGBE_PFDTXGSWC
);
221 /* enable or disable VMDQ loopback */
223 ctrl
|= IXGBE_PFDTXGSWC_VT_LBEN
;
225 ctrl
&= ~IXGBE_PFDTXGSWC_VT_LBEN
;
227 IXGBE_WRITE_REG(hw
, IXGBE_PFDTXGSWC
, ctrl
);
233 rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port
, uint8_t on
)
238 int num_queues
= (int)(IXGBE_QDE_IDX_MASK
>> IXGBE_QDE_IDX_SHIFT
);
239 struct rte_eth_dev
*dev
;
241 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
243 dev
= &rte_eth_devices
[port
];
245 if (!is_ixgbe_supported(dev
))
251 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
252 for (i
= 0; i
<= num_queues
; i
++) {
253 reg_value
= IXGBE_QDE_WRITE
|
254 (i
<< IXGBE_QDE_IDX_SHIFT
) |
255 (on
& IXGBE_QDE_ENABLE
);
256 IXGBE_WRITE_REG(hw
, IXGBE_QDE
, reg_value
);
263 rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port
, uint16_t vf
, uint8_t on
)
267 struct rte_eth_dev
*dev
;
268 struct rte_pci_device
*pci_dev
;
270 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
272 dev
= &rte_eth_devices
[port
];
273 pci_dev
= IXGBE_DEV_TO_PCI(dev
);
275 if (!is_ixgbe_supported(dev
))
278 /* only support VF's 0 to 63 */
279 if ((vf
>= pci_dev
->max_vfs
) || (vf
> 63))
285 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
286 reg_value
= IXGBE_READ_REG(hw
, IXGBE_SRRCTL(vf
));
288 reg_value
|= IXGBE_SRRCTL_DROP_EN
;
290 reg_value
&= ~IXGBE_SRRCTL_DROP_EN
;
292 IXGBE_WRITE_REG(hw
, IXGBE_SRRCTL(vf
), reg_value
);
298 rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port
, uint16_t vf
, uint8_t on
)
300 struct rte_eth_dev
*dev
;
301 struct rte_pci_device
*pci_dev
;
303 uint16_t queues_per_pool
;
306 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
308 dev
= &rte_eth_devices
[port
];
309 pci_dev
= IXGBE_DEV_TO_PCI(dev
);
310 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
312 if (!is_ixgbe_supported(dev
))
315 if (vf
>= pci_dev
->max_vfs
)
321 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->vlan_strip_queue_set
, -ENOTSUP
);
323 /* The PF has 128 queue pairs and in SRIOV configuration
324 * those queues will be assigned to VF's, so RXDCTL
325 * registers will be dealing with queues which will be
327 * Let's say we have SRIOV configured with 31 VF's then the
328 * first 124 queues 0-123 will be allocated to VF's and only
329 * the last 4 queues 123-127 will be assigned to the PF.
331 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
332 queues_per_pool
= (uint16_t)hw
->mac
.max_rx_queues
/
335 queues_per_pool
= (uint16_t)hw
->mac
.max_rx_queues
/
338 for (q
= 0; q
< queues_per_pool
; q
++)
339 (*dev
->dev_ops
->vlan_strip_queue_set
)(dev
,
340 q
+ vf
* queues_per_pool
, on
);
345 rte_pmd_ixgbe_set_vf_rxmode(uint8_t port
, uint16_t vf
,
346 uint16_t rx_mask
, uint8_t on
)
349 struct rte_eth_dev
*dev
;
350 struct rte_pci_device
*pci_dev
;
354 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
356 dev
= &rte_eth_devices
[port
];
357 pci_dev
= IXGBE_DEV_TO_PCI(dev
);
359 if (!is_ixgbe_supported(dev
))
362 if (vf
>= pci_dev
->max_vfs
)
368 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
369 vmolr
= IXGBE_READ_REG(hw
, IXGBE_VMOLR(vf
));
371 if (hw
->mac
.type
== ixgbe_mac_82598EB
) {
372 PMD_INIT_LOG(ERR
, "setting VF receive mode set should be done"
373 " on 82599 hardware and newer");
376 if (ixgbe_vt_check(hw
) < 0)
379 val
= ixgbe_convert_vm_rx_mask_to_val(rx_mask
, val
);
386 IXGBE_WRITE_REG(hw
, IXGBE_VMOLR(vf
), vmolr
);
392 rte_pmd_ixgbe_set_vf_rx(uint8_t port
, uint16_t vf
, uint8_t on
)
394 struct rte_eth_dev
*dev
;
395 struct rte_pci_device
*pci_dev
;
398 const uint8_t bit1
= 0x1;
401 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
403 dev
= &rte_eth_devices
[port
];
404 pci_dev
= IXGBE_DEV_TO_PCI(dev
);
406 if (!is_ixgbe_supported(dev
))
409 if (vf
>= pci_dev
->max_vfs
)
415 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
417 if (ixgbe_vt_check(hw
) < 0)
420 /* for vf >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
422 addr
= IXGBE_VFRE(1);
423 val
= bit1
<< (vf
- 32);
425 addr
= IXGBE_VFRE(0);
429 reg
= IXGBE_READ_REG(hw
, addr
);
436 IXGBE_WRITE_REG(hw
, addr
, reg
);
442 rte_pmd_ixgbe_set_vf_tx(uint8_t port
, uint16_t vf
, uint8_t on
)
444 struct rte_eth_dev
*dev
;
445 struct rte_pci_device
*pci_dev
;
448 const uint8_t bit1
= 0x1;
452 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
454 dev
= &rte_eth_devices
[port
];
455 pci_dev
= IXGBE_DEV_TO_PCI(dev
);
457 if (!is_ixgbe_supported(dev
))
460 if (vf
>= pci_dev
->max_vfs
)
466 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
467 if (ixgbe_vt_check(hw
) < 0)
470 /* for vf >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
472 addr
= IXGBE_VFTE(1);
473 val
= bit1
<< (vf
- 32);
475 addr
= IXGBE_VFTE(0);
479 reg
= IXGBE_READ_REG(hw
, addr
);
486 IXGBE_WRITE_REG(hw
, addr
, reg
);
492 rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port
, uint16_t vlan
,
493 uint64_t vf_mask
, uint8_t vlan_on
)
495 struct rte_eth_dev
*dev
;
500 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
502 dev
= &rte_eth_devices
[port
];
504 if (!is_ixgbe_supported(dev
))
507 if ((vlan
> ETHER_MAX_VLAN_ID
) || (vf_mask
== 0))
510 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
511 if (ixgbe_vt_check(hw
) < 0)
514 for (vf_idx
= 0; vf_idx
< 64; vf_idx
++) {
515 if (vf_mask
& ((uint64_t)(1ULL << vf_idx
))) {
516 ret
= hw
->mac
.ops
.set_vfta(hw
, vlan
, vf_idx
,
527 rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port
, uint16_t vf
,
528 uint16_t tx_rate
, uint64_t q_msk
)
530 struct rte_eth_dev
*dev
;
532 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
534 dev
= &rte_eth_devices
[port
];
536 if (!is_ixgbe_supported(dev
))
539 return ixgbe_set_vf_rate_limit(dev
, vf
, tx_rate
, q_msk
);
543 rte_pmd_ixgbe_macsec_enable(uint8_t port
, uint8_t en
, uint8_t rp
)
546 struct rte_eth_dev
*dev
;
549 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
551 dev
= &rte_eth_devices
[port
];
553 if (!is_ixgbe_supported(dev
))
556 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
558 /* Stop the data paths */
559 if (ixgbe_disable_sec_rx_path(hw
) != IXGBE_SUCCESS
)
563 * As no ixgbe_disable_sec_rx_path equivalent is
564 * implemented for tx in the base code, and we are
565 * not allowed to modify the base code in DPDK, so
566 * just call the hand-written one directly for now.
567 * The hardware support has been checked by
568 * ixgbe_disable_sec_rx_path().
570 ixgbe_disable_sec_tx_path_generic(hw
);
572 /* Enable Ethernet CRC (required by MACsec offload) */
573 ctrl
= IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
574 ctrl
|= IXGBE_HLREG0_TXCRCEN
| IXGBE_HLREG0_RXCRCSTRP
;
575 IXGBE_WRITE_REG(hw
, IXGBE_HLREG0
, ctrl
);
577 /* Enable the TX and RX crypto engines */
578 ctrl
= IXGBE_READ_REG(hw
, IXGBE_SECTXCTRL
);
579 ctrl
&= ~IXGBE_SECTXCTRL_SECTX_DIS
;
580 IXGBE_WRITE_REG(hw
, IXGBE_SECTXCTRL
, ctrl
);
582 ctrl
= IXGBE_READ_REG(hw
, IXGBE_SECRXCTRL
);
583 ctrl
&= ~IXGBE_SECRXCTRL_SECRX_DIS
;
584 IXGBE_WRITE_REG(hw
, IXGBE_SECRXCTRL
, ctrl
);
586 ctrl
= IXGBE_READ_REG(hw
, IXGBE_SECTXMINIFG
);
587 ctrl
&= ~IXGBE_SECTX_MINSECIFG_MASK
;
589 IXGBE_WRITE_REG(hw
, IXGBE_SECTXMINIFG
, ctrl
);
591 /* Enable SA lookup */
592 ctrl
= IXGBE_READ_REG(hw
, IXGBE_LSECTXCTRL
);
593 ctrl
&= ~IXGBE_LSECTXCTRL_EN_MASK
;
594 ctrl
|= en
? IXGBE_LSECTXCTRL_AUTH_ENCRYPT
:
595 IXGBE_LSECTXCTRL_AUTH
;
596 ctrl
|= IXGBE_LSECTXCTRL_AISCI
;
597 ctrl
&= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK
;
598 ctrl
|= IXGBE_MACSEC_PNTHRSH
& IXGBE_LSECTXCTRL_PNTHRSH_MASK
;
599 IXGBE_WRITE_REG(hw
, IXGBE_LSECTXCTRL
, ctrl
);
601 ctrl
= IXGBE_READ_REG(hw
, IXGBE_LSECRXCTRL
);
602 ctrl
&= ~IXGBE_LSECRXCTRL_EN_MASK
;
603 ctrl
|= IXGBE_LSECRXCTRL_STRICT
<< IXGBE_LSECRXCTRL_EN_SHIFT
;
604 ctrl
&= ~IXGBE_LSECRXCTRL_PLSH
;
606 ctrl
|= IXGBE_LSECRXCTRL_RP
;
608 ctrl
&= ~IXGBE_LSECRXCTRL_RP
;
609 IXGBE_WRITE_REG(hw
, IXGBE_LSECRXCTRL
, ctrl
);
611 /* Start the data paths */
612 ixgbe_enable_sec_rx_path(hw
);
615 * As no ixgbe_enable_sec_rx_path equivalent is
616 * implemented for tx in the base code, and we are
617 * not allowed to modify the base code in DPDK, so
618 * just call the hand-written one directly for now.
620 ixgbe_enable_sec_tx_path_generic(hw
);
626 rte_pmd_ixgbe_macsec_disable(uint8_t port
)
629 struct rte_eth_dev
*dev
;
632 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
634 dev
= &rte_eth_devices
[port
];
636 if (!is_ixgbe_supported(dev
))
639 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
641 /* Stop the data paths */
642 if (ixgbe_disable_sec_rx_path(hw
) != IXGBE_SUCCESS
)
646 * As no ixgbe_disable_sec_rx_path equivalent is
647 * implemented for tx in the base code, and we are
648 * not allowed to modify the base code in DPDK, so
649 * just call the hand-written one directly for now.
650 * The hardware support has been checked by
651 * ixgbe_disable_sec_rx_path().
653 ixgbe_disable_sec_tx_path_generic(hw
);
655 /* Disable the TX and RX crypto engines */
656 ctrl
= IXGBE_READ_REG(hw
, IXGBE_SECTXCTRL
);
657 ctrl
|= IXGBE_SECTXCTRL_SECTX_DIS
;
658 IXGBE_WRITE_REG(hw
, IXGBE_SECTXCTRL
, ctrl
);
660 ctrl
= IXGBE_READ_REG(hw
, IXGBE_SECRXCTRL
);
661 ctrl
|= IXGBE_SECRXCTRL_SECRX_DIS
;
662 IXGBE_WRITE_REG(hw
, IXGBE_SECRXCTRL
, ctrl
);
664 /* Disable SA lookup */
665 ctrl
= IXGBE_READ_REG(hw
, IXGBE_LSECTXCTRL
);
666 ctrl
&= ~IXGBE_LSECTXCTRL_EN_MASK
;
667 ctrl
|= IXGBE_LSECTXCTRL_DISABLE
;
668 IXGBE_WRITE_REG(hw
, IXGBE_LSECTXCTRL
, ctrl
);
670 ctrl
= IXGBE_READ_REG(hw
, IXGBE_LSECRXCTRL
);
671 ctrl
&= ~IXGBE_LSECRXCTRL_EN_MASK
;
672 ctrl
|= IXGBE_LSECRXCTRL_DISABLE
<< IXGBE_LSECRXCTRL_EN_SHIFT
;
673 IXGBE_WRITE_REG(hw
, IXGBE_LSECRXCTRL
, ctrl
);
675 /* Start the data paths */
676 ixgbe_enable_sec_rx_path(hw
);
679 * As no ixgbe_enable_sec_rx_path equivalent is
680 * implemented for tx in the base code, and we are
681 * not allowed to modify the base code in DPDK, so
682 * just call the hand-written one directly for now.
684 ixgbe_enable_sec_tx_path_generic(hw
);
690 rte_pmd_ixgbe_macsec_config_txsc(uint8_t port
, uint8_t *mac
)
693 struct rte_eth_dev
*dev
;
696 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
698 dev
= &rte_eth_devices
[port
];
700 if (!is_ixgbe_supported(dev
))
703 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
705 ctrl
= mac
[0] | (mac
[1] << 8) | (mac
[2] << 16) | (mac
[3] << 24);
706 IXGBE_WRITE_REG(hw
, IXGBE_LSECTXSCL
, ctrl
);
708 ctrl
= mac
[4] | (mac
[5] << 8);
709 IXGBE_WRITE_REG(hw
, IXGBE_LSECTXSCH
, ctrl
);
715 rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port
, uint8_t *mac
, uint16_t pi
)
718 struct rte_eth_dev
*dev
;
721 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
723 dev
= &rte_eth_devices
[port
];
725 if (!is_ixgbe_supported(dev
))
728 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
730 ctrl
= mac
[0] | (mac
[1] << 8) | (mac
[2] << 16) | (mac
[3] << 24);
731 IXGBE_WRITE_REG(hw
, IXGBE_LSECRXSCL
, ctrl
);
733 pi
= rte_cpu_to_be_16(pi
);
734 ctrl
= mac
[4] | (mac
[5] << 8) | (pi
<< 16);
735 IXGBE_WRITE_REG(hw
, IXGBE_LSECRXSCH
, ctrl
);
741 rte_pmd_ixgbe_macsec_select_txsa(uint8_t port
, uint8_t idx
, uint8_t an
,
742 uint32_t pn
, uint8_t *key
)
745 struct rte_eth_dev
*dev
;
748 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
750 dev
= &rte_eth_devices
[port
];
752 if (!is_ixgbe_supported(dev
))
755 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
757 if (idx
!= 0 && idx
!= 1)
763 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
765 /* Set the PN and key */
766 pn
= rte_cpu_to_be_32(pn
);
768 IXGBE_WRITE_REG(hw
, IXGBE_LSECTXPN0
, pn
);
770 for (i
= 0; i
< 4; i
++) {
771 ctrl
= (key
[i
* 4 + 0] << 0) |
772 (key
[i
* 4 + 1] << 8) |
773 (key
[i
* 4 + 2] << 16) |
774 (key
[i
* 4 + 3] << 24);
775 IXGBE_WRITE_REG(hw
, IXGBE_LSECTXKEY0(i
), ctrl
);
778 IXGBE_WRITE_REG(hw
, IXGBE_LSECTXPN1
, pn
);
780 for (i
= 0; i
< 4; i
++) {
781 ctrl
= (key
[i
* 4 + 0] << 0) |
782 (key
[i
* 4 + 1] << 8) |
783 (key
[i
* 4 + 2] << 16) |
784 (key
[i
* 4 + 3] << 24);
785 IXGBE_WRITE_REG(hw
, IXGBE_LSECTXKEY1(i
), ctrl
);
789 /* Set AN and select the SA */
790 ctrl
= (an
<< idx
* 2) | (idx
<< 4);
791 IXGBE_WRITE_REG(hw
, IXGBE_LSECTXSA
, ctrl
);
797 rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port
, uint8_t idx
, uint8_t an
,
798 uint32_t pn
, uint8_t *key
)
801 struct rte_eth_dev
*dev
;
804 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
806 dev
= &rte_eth_devices
[port
];
808 if (!is_ixgbe_supported(dev
))
811 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
813 if (idx
!= 0 && idx
!= 1)
820 pn
= rte_cpu_to_be_32(pn
);
821 IXGBE_WRITE_REG(hw
, IXGBE_LSECRXPN(idx
), pn
);
824 for (i
= 0; i
< 4; i
++) {
825 ctrl
= (key
[i
* 4 + 0] << 0) |
826 (key
[i
* 4 + 1] << 8) |
827 (key
[i
* 4 + 2] << 16) |
828 (key
[i
* 4 + 3] << 24);
829 IXGBE_WRITE_REG(hw
, IXGBE_LSECRXKEY(idx
, i
), ctrl
);
832 /* Set the AN and validate the SA */
833 ctrl
= an
| (1 << 2);
834 IXGBE_WRITE_REG(hw
, IXGBE_LSECRXSA(idx
), ctrl
);
840 rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port
,
844 struct rte_eth_dev
*dev
;
845 struct ixgbe_dcb_config
*dcb_config
;
846 struct ixgbe_dcb_tc_config
*tc
;
847 struct rte_eth_conf
*eth_conf
;
848 struct ixgbe_bw_conf
*bw_conf
;
853 RTE_ETH_VALID_PORTID_OR_ERR_RET(port
, -ENODEV
);
855 dev
= &rte_eth_devices
[port
];
857 if (!is_ixgbe_supported(dev
))
860 if (tc_num
> IXGBE_DCB_MAX_TRAFFIC_CLASS
) {
861 PMD_DRV_LOG(ERR
, "TCs should be no more than %d.",
862 IXGBE_DCB_MAX_TRAFFIC_CLASS
);
866 dcb_config
= IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev
->data
->dev_private
);
867 bw_conf
= IXGBE_DEV_PRIVATE_TO_BW_CONF(dev
->data
->dev_private
);
868 eth_conf
= &dev
->data
->dev_conf
;
870 if (eth_conf
->txmode
.mq_mode
== ETH_MQ_TX_DCB
) {
871 nb_tcs
= eth_conf
->tx_adv_conf
.dcb_tx_conf
.nb_tcs
;
872 } else if (eth_conf
->txmode
.mq_mode
== ETH_MQ_TX_VMDQ_DCB
) {
873 if (eth_conf
->tx_adv_conf
.vmdq_dcb_tx_conf
.nb_queue_pools
==
882 if (nb_tcs
!= tc_num
) {
884 "Weight should be set for all %d enabled TCs.",
890 for (i
= 0; i
< nb_tcs
; i
++)
894 "The summary of the TC weight should be 100.");
898 for (i
= 0; i
< nb_tcs
; i
++) {
899 tc
= &dcb_config
->tc_config
[i
];
900 tc
->path
[IXGBE_DCB_TX_CONFIG
].bwg_percent
= bw_weight
[i
];
902 for (; i
< IXGBE_DCB_MAX_TRAFFIC_CLASS
; i
++) {
903 tc
= &dcb_config
->tc_config
[i
];
904 tc
->path
[IXGBE_DCB_TX_CONFIG
].bwg_percent
= 0;
907 bw_conf
->tc_num
= nb_tcs
;