1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
13 #include <rte_interrupts.h>
15 #include <rte_debug.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_memcpy.h>
20 #include <rte_malloc.h>
21 #include <rte_random.h>
23 #include "base/ixgbe_common.h"
24 #include "ixgbe_ethdev.h"
25 #include "rte_pmd_ixgbe.h"
27 #define IXGBE_MAX_VFTA (128)
28 #define IXGBE_VF_MSG_SIZE_DEFAULT 1
29 #define IXGBE_VF_GET_QUEUE_MSG_SIZE 5
30 #define IXGBE_ETHERTYPE_FLOW_CTRL 0x8808
32 static inline uint16_t
33 dev_num_vf(struct rte_eth_dev
*eth_dev
)
35 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
37 return pci_dev
->max_vfs
;
41 int ixgbe_vf_perm_addr_gen(struct rte_eth_dev
*dev
, uint16_t vf_num
)
43 unsigned char vf_mac_addr
[ETHER_ADDR_LEN
];
44 struct ixgbe_vf_info
*vfinfo
=
45 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
);
48 for (vfn
= 0; vfn
< vf_num
; vfn
++) {
49 eth_random_addr(vf_mac_addr
);
50 /* keep the random address as default */
51 memcpy(vfinfo
[vfn
].vf_mac_addresses
, vf_mac_addr
,
59 ixgbe_mb_intr_setup(struct rte_eth_dev
*dev
)
61 struct ixgbe_interrupt
*intr
=
62 IXGBE_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
64 intr
->mask
|= IXGBE_EICR_MAILBOX
;
69 void ixgbe_pf_host_init(struct rte_eth_dev
*eth_dev
)
71 struct ixgbe_vf_info
**vfinfo
=
72 IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev
->data
->dev_private
);
73 struct ixgbe_mirror_info
*mirror_info
=
74 IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev
->data
->dev_private
);
75 struct ixgbe_uta_info
*uta_info
=
76 IXGBE_DEV_PRIVATE_TO_UTA(eth_dev
->data
->dev_private
);
78 IXGBE_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
82 PMD_INIT_FUNC_TRACE();
84 RTE_ETH_DEV_SRIOV(eth_dev
).active
= 0;
85 vf_num
= dev_num_vf(eth_dev
);
89 *vfinfo
= rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info
) * vf_num
, 0);
91 rte_panic("Cannot allocate memory for private VF data\n");
93 rte_eth_switch_domain_alloc(&(*vfinfo
)->switch_domain_id
);
95 memset(mirror_info
, 0, sizeof(struct ixgbe_mirror_info
));
96 memset(uta_info
, 0, sizeof(struct ixgbe_uta_info
));
97 hw
->mac
.mc_filter_type
= 0;
99 if (vf_num
>= ETH_32_POOLS
) {
101 RTE_ETH_DEV_SRIOV(eth_dev
).active
= ETH_64_POOLS
;
102 } else if (vf_num
>= ETH_16_POOLS
) {
104 RTE_ETH_DEV_SRIOV(eth_dev
).active
= ETH_32_POOLS
;
107 RTE_ETH_DEV_SRIOV(eth_dev
).active
= ETH_16_POOLS
;
110 RTE_ETH_DEV_SRIOV(eth_dev
).nb_q_per_pool
= nb_queue
;
111 RTE_ETH_DEV_SRIOV(eth_dev
).def_vmdq_idx
= vf_num
;
112 RTE_ETH_DEV_SRIOV(eth_dev
).def_pool_q_idx
= (uint16_t)(vf_num
* nb_queue
);
114 ixgbe_vf_perm_addr_gen(eth_dev
, vf_num
);
116 /* init_mailbox_params */
117 hw
->mbx
.ops
.init_params(hw
);
119 /* set mb interrupt mask */
120 ixgbe_mb_intr_setup(eth_dev
);
123 void ixgbe_pf_host_uninit(struct rte_eth_dev
*eth_dev
)
125 struct ixgbe_vf_info
**vfinfo
;
129 PMD_INIT_FUNC_TRACE();
131 RTE_ETH_DEV_SRIOV(eth_dev
).active
= 0;
132 RTE_ETH_DEV_SRIOV(eth_dev
).nb_q_per_pool
= 0;
133 RTE_ETH_DEV_SRIOV(eth_dev
).def_vmdq_idx
= 0;
134 RTE_ETH_DEV_SRIOV(eth_dev
).def_pool_q_idx
= 0;
136 vf_num
= dev_num_vf(eth_dev
);
140 vfinfo
= IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev
->data
->dev_private
);
144 ret
= rte_eth_switch_domain_free((*vfinfo
)->switch_domain_id
);
146 PMD_INIT_LOG(WARNING
, "failed to free switch domain: %d", ret
);
153 ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev
*eth_dev
)
155 struct ixgbe_hw
*hw
=
156 IXGBE_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
157 struct ixgbe_filter_info
*filter_info
=
158 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev
->data
->dev_private
);
161 struct ixgbe_ethertype_filter ethertype_filter
;
163 if (!hw
->mac
.ops
.set_ethertype_anti_spoofing
) {
164 RTE_LOG(INFO
, PMD
, "ether type anti-spoofing is not"
169 i
= ixgbe_ethertype_filter_lookup(filter_info
,
170 IXGBE_ETHERTYPE_FLOW_CTRL
);
172 RTE_LOG(ERR
, PMD
, "A ether type filter"
173 " entity for flow control already exists!\n");
177 ethertype_filter
.ethertype
= IXGBE_ETHERTYPE_FLOW_CTRL
;
178 ethertype_filter
.etqf
= IXGBE_ETQF_FILTER_EN
|
179 IXGBE_ETQF_TX_ANTISPOOF
|
180 IXGBE_ETHERTYPE_FLOW_CTRL
;
181 ethertype_filter
.etqs
= 0;
182 ethertype_filter
.conf
= TRUE
;
183 i
= ixgbe_ethertype_filter_insert(filter_info
,
186 RTE_LOG(ERR
, PMD
, "Cannot find an unused ether type filter"
187 " entity for flow control.\n");
191 IXGBE_WRITE_REG(hw
, IXGBE_ETQF(i
),
192 (IXGBE_ETQF_FILTER_EN
|
193 IXGBE_ETQF_TX_ANTISPOOF
|
194 IXGBE_ETHERTYPE_FLOW_CTRL
));
196 vf_num
= dev_num_vf(eth_dev
);
197 for (i
= 0; i
< vf_num
; i
++)
198 hw
->mac
.ops
.set_ethertype_anti_spoofing(hw
, true, i
);
201 int ixgbe_pf_host_configure(struct rte_eth_dev
*eth_dev
)
203 uint32_t vtctl
, fcrth
;
204 uint32_t vfre_slot
, vfre_offset
;
206 const uint8_t VFRE_SHIFT
= 5; /* VFRE 32 bits per slot */
207 const uint8_t VFRE_MASK
= (uint8_t)((1U << VFRE_SHIFT
) - 1);
208 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
209 uint32_t gpie
, gcr_ext
;
213 vf_num
= dev_num_vf(eth_dev
);
217 /* enable VMDq and set the default pool for PF */
218 vtctl
= IXGBE_READ_REG(hw
, IXGBE_VT_CTL
);
219 vtctl
|= IXGBE_VMD_CTL_VMDQ_EN
;
220 vtctl
&= ~IXGBE_VT_CTL_POOL_MASK
;
221 vtctl
|= RTE_ETH_DEV_SRIOV(eth_dev
).def_vmdq_idx
222 << IXGBE_VT_CTL_POOL_SHIFT
;
223 vtctl
|= IXGBE_VT_CTL_REPLEN
;
224 IXGBE_WRITE_REG(hw
, IXGBE_VT_CTL
, vtctl
);
226 vfre_offset
= vf_num
& VFRE_MASK
;
227 vfre_slot
= (vf_num
>> VFRE_SHIFT
) > 0 ? 1 : 0;
229 /* Enable pools reserved to PF only */
230 IXGBE_WRITE_REG(hw
, IXGBE_VFRE(vfre_slot
), (~0U) << vfre_offset
);
231 IXGBE_WRITE_REG(hw
, IXGBE_VFRE(vfre_slot
^ 1), vfre_slot
- 1);
232 IXGBE_WRITE_REG(hw
, IXGBE_VFTE(vfre_slot
), (~0U) << vfre_offset
);
233 IXGBE_WRITE_REG(hw
, IXGBE_VFTE(vfre_slot
^ 1), vfre_slot
- 1);
235 /* PFDMA Tx General Switch Control Enables VMDQ loopback */
236 IXGBE_WRITE_REG(hw
, IXGBE_PFDTXGSWC
, IXGBE_PFDTXGSWC_VT_LBEN
);
238 /* clear VMDq map to perment rar 0 */
239 hw
->mac
.ops
.clear_vmdq(hw
, 0, IXGBE_CLEAR_VMDQ_ALL
);
241 /* clear VMDq map to scan rar 127 */
242 IXGBE_WRITE_REG(hw
, IXGBE_MPSAR_LO(hw
->mac
.num_rar_entries
), 0);
243 IXGBE_WRITE_REG(hw
, IXGBE_MPSAR_HI(hw
->mac
.num_rar_entries
), 0);
245 /* set VMDq map to default PF pool */
246 hw
->mac
.ops
.set_vmdq(hw
, 0, RTE_ETH_DEV_SRIOV(eth_dev
).def_vmdq_idx
);
249 * SW msut set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
251 gcr_ext
= IXGBE_READ_REG(hw
, IXGBE_GCR_EXT
);
252 gcr_ext
&= ~IXGBE_GCR_EXT_VT_MODE_MASK
;
254 gpie
= IXGBE_READ_REG(hw
, IXGBE_GPIE
);
255 gpie
&= ~IXGBE_GPIE_VTMODE_MASK
;
256 gpie
|= IXGBE_GPIE_MSIX_MODE
| IXGBE_GPIE_PBA_SUPPORT
;
258 switch (RTE_ETH_DEV_SRIOV(eth_dev
).active
) {
260 gcr_ext
|= IXGBE_GCR_EXT_VT_MODE_64
;
261 gpie
|= IXGBE_GPIE_VTMODE_64
;
264 gcr_ext
|= IXGBE_GCR_EXT_VT_MODE_32
;
265 gpie
|= IXGBE_GPIE_VTMODE_32
;
268 gcr_ext
|= IXGBE_GCR_EXT_VT_MODE_16
;
269 gpie
|= IXGBE_GPIE_VTMODE_16
;
273 IXGBE_WRITE_REG(hw
, IXGBE_GCR_EXT
, gcr_ext
);
274 IXGBE_WRITE_REG(hw
, IXGBE_GPIE
, gpie
);
277 * enable vlan filtering and allow all vlan tags through
279 vlanctrl
= IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
280 vlanctrl
|= IXGBE_VLNCTRL_VFE
; /* enable vlan filters */
281 IXGBE_WRITE_REG(hw
, IXGBE_VLNCTRL
, vlanctrl
);
283 /* VFTA - enable all vlan filters */
284 for (i
= 0; i
< IXGBE_MAX_VFTA
; i
++)
285 IXGBE_WRITE_REG(hw
, IXGBE_VFTA(i
), 0xFFFFFFFF);
287 /* Enable MAC Anti-Spoofing */
288 hw
->mac
.ops
.set_mac_anti_spoofing(hw
, FALSE
, vf_num
);
290 /* set flow control threshold to max to avoid tx switch hang */
291 for (i
= 0; i
< IXGBE_DCB_MAX_TRAFFIC_CLASS
; i
++) {
292 IXGBE_WRITE_REG(hw
, IXGBE_FCRTL_82599(i
), 0);
293 fcrth
= IXGBE_READ_REG(hw
, IXGBE_RXPBSIZE(i
)) - 32;
294 IXGBE_WRITE_REG(hw
, IXGBE_FCRTH_82599(i
), fcrth
);
297 ixgbe_add_tx_flow_control_drop_filter(eth_dev
);
303 set_rx_mode(struct rte_eth_dev
*dev
)
305 struct rte_eth_dev_data
*dev_data
= dev
->data
;
306 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
307 u32 fctrl
, vmolr
= IXGBE_VMOLR_BAM
| IXGBE_VMOLR_AUPE
;
308 uint16_t vfn
= dev_num_vf(dev
);
310 /* Check for Promiscuous and All Multicast modes */
311 fctrl
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
313 /* set all bits that we expect to always be set */
314 fctrl
&= ~IXGBE_FCTRL_SBP
; /* disable store-bad-packets */
315 fctrl
|= IXGBE_FCTRL_BAM
;
317 /* clear the bits we are changing the status of */
318 fctrl
&= ~(IXGBE_FCTRL_UPE
| IXGBE_FCTRL_MPE
);
320 if (dev_data
->promiscuous
) {
321 fctrl
|= (IXGBE_FCTRL_UPE
| IXGBE_FCTRL_MPE
);
322 vmolr
|= (IXGBE_VMOLR_ROPE
| IXGBE_VMOLR_MPE
);
324 if (dev_data
->all_multicast
) {
325 fctrl
|= IXGBE_FCTRL_MPE
;
326 vmolr
|= IXGBE_VMOLR_MPE
;
328 vmolr
|= IXGBE_VMOLR_ROMPE
;
332 if (hw
->mac
.type
!= ixgbe_mac_82598EB
) {
333 vmolr
|= IXGBE_READ_REG(hw
, IXGBE_VMOLR(vfn
)) &
334 ~(IXGBE_VMOLR_MPE
| IXGBE_VMOLR_ROMPE
|
336 IXGBE_WRITE_REG(hw
, IXGBE_VMOLR(vfn
), vmolr
);
339 IXGBE_WRITE_REG(hw
, IXGBE_FCTRL
, fctrl
);
341 ixgbe_vlan_hw_strip_config(dev
);
345 ixgbe_vf_reset_event(struct rte_eth_dev
*dev
, uint16_t vf
)
347 struct ixgbe_hw
*hw
=
348 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
349 struct ixgbe_vf_info
*vfinfo
=
350 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
));
351 int rar_entry
= hw
->mac
.num_rar_entries
- (vf
+ 1);
352 uint32_t vmolr
= IXGBE_READ_REG(hw
, IXGBE_VMOLR(vf
));
354 vmolr
|= (IXGBE_VMOLR_ROPE
|
355 IXGBE_VMOLR_BAM
| IXGBE_VMOLR_AUPE
);
356 IXGBE_WRITE_REG(hw
, IXGBE_VMOLR(vf
), vmolr
);
358 IXGBE_WRITE_REG(hw
, IXGBE_VMVIR(vf
), 0);
360 /* reset multicast table array for vf */
361 vfinfo
[vf
].num_vf_mc_hashes
= 0;
366 hw
->mac
.ops
.clear_rar(hw
, rar_entry
);
370 ixgbe_vf_reset_msg(struct rte_eth_dev
*dev
, uint16_t vf
)
372 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
374 uint32_t reg_offset
, vf_shift
;
375 const uint8_t VFRE_SHIFT
= 5; /* VFRE 32 bits per slot */
376 const uint8_t VFRE_MASK
= (uint8_t)((1U << VFRE_SHIFT
) - 1);
377 uint8_t nb_q_per_pool
;
380 vf_shift
= vf
& VFRE_MASK
;
381 reg_offset
= (vf
>> VFRE_SHIFT
) > 0 ? 1 : 0;
383 /* enable transmit for vf */
384 reg
= IXGBE_READ_REG(hw
, IXGBE_VFTE(reg_offset
));
385 reg
|= (reg
| (1 << vf_shift
));
386 IXGBE_WRITE_REG(hw
, IXGBE_VFTE(reg_offset
), reg
);
388 /* enable all queue drop for IOV */
389 nb_q_per_pool
= RTE_ETH_DEV_SRIOV(dev
).nb_q_per_pool
;
390 for (i
= vf
* nb_q_per_pool
; i
< (vf
+ 1) * nb_q_per_pool
; i
++) {
391 IXGBE_WRITE_FLUSH(hw
);
392 reg
= IXGBE_QDE_ENABLE
| IXGBE_QDE_WRITE
;
393 reg
|= i
<< IXGBE_QDE_IDX_SHIFT
;
394 IXGBE_WRITE_REG(hw
, IXGBE_QDE
, reg
);
397 /* enable receive for vf */
398 reg
= IXGBE_READ_REG(hw
, IXGBE_VFRE(reg_offset
));
399 reg
|= (reg
| (1 << vf_shift
));
400 IXGBE_WRITE_REG(hw
, IXGBE_VFRE(reg_offset
), reg
);
402 /* Enable counting of spoofed packets in the SSVPC register */
403 reg
= IXGBE_READ_REG(hw
, IXGBE_VMECM(reg_offset
));
404 reg
|= (1 << vf_shift
);
405 IXGBE_WRITE_REG(hw
, IXGBE_VMECM(reg_offset
), reg
);
407 ixgbe_vf_reset_event(dev
, vf
);
411 ixgbe_disable_vf_mc_promisc(struct rte_eth_dev
*dev
, uint32_t vf
)
413 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
416 vmolr
= IXGBE_READ_REG(hw
, IXGBE_VMOLR(vf
));
418 RTE_LOG(INFO
, PMD
, "VF %u: disabling multicast promiscuous\n", vf
);
420 vmolr
&= ~IXGBE_VMOLR_MPE
;
422 IXGBE_WRITE_REG(hw
, IXGBE_VMOLR(vf
), vmolr
);
428 ixgbe_vf_reset(struct rte_eth_dev
*dev
, uint16_t vf
, uint32_t *msgbuf
)
430 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
431 struct ixgbe_vf_info
*vfinfo
=
432 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
));
433 unsigned char *vf_mac
= vfinfo
[vf
].vf_mac_addresses
;
434 int rar_entry
= hw
->mac
.num_rar_entries
- (vf
+ 1);
435 uint8_t *new_mac
= (uint8_t *)(&msgbuf
[1]);
437 ixgbe_vf_reset_msg(dev
, vf
);
439 hw
->mac
.ops
.set_rar(hw
, rar_entry
, vf_mac
, vf
, IXGBE_RAH_AV
);
441 /* Disable multicast promiscuous at reset */
442 ixgbe_disable_vf_mc_promisc(dev
, vf
);
444 /* reply to reset with ack and vf mac address */
445 msgbuf
[0] = IXGBE_VF_RESET
| IXGBE_VT_MSGTYPE_ACK
;
446 rte_memcpy(new_mac
, vf_mac
, ETHER_ADDR_LEN
);
448 * Piggyback the multicast filter type so VF can compute the
451 msgbuf
[3] = hw
->mac
.mc_filter_type
;
452 ixgbe_write_mbx(hw
, msgbuf
, IXGBE_VF_PERMADDR_MSG_LEN
, vf
);
458 ixgbe_vf_set_mac_addr(struct rte_eth_dev
*dev
, uint32_t vf
, uint32_t *msgbuf
)
460 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
461 struct ixgbe_vf_info
*vfinfo
=
462 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
));
463 int rar_entry
= hw
->mac
.num_rar_entries
- (vf
+ 1);
464 uint8_t *new_mac
= (uint8_t *)(&msgbuf
[1]);
466 if (is_valid_assigned_ether_addr((struct ether_addr
*)new_mac
)) {
467 rte_memcpy(vfinfo
[vf
].vf_mac_addresses
, new_mac
, 6);
468 return hw
->mac
.ops
.set_rar(hw
, rar_entry
, new_mac
, vf
, IXGBE_RAH_AV
);
474 ixgbe_vf_set_multicast(struct rte_eth_dev
*dev
, uint32_t vf
, uint32_t *msgbuf
)
476 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
477 struct ixgbe_vf_info
*vfinfo
=
478 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
));
479 int nb_entries
= (msgbuf
[0] & IXGBE_VT_MSGINFO_MASK
) >>
480 IXGBE_VT_MSGINFO_SHIFT
;
481 uint16_t *hash_list
= (uint16_t *)&msgbuf
[1];
484 const uint32_t IXGBE_MTA_INDEX_MASK
= 0x7F;
485 const uint32_t IXGBE_MTA_BIT_SHIFT
= 5;
486 const uint32_t IXGBE_MTA_BIT_MASK
= (0x1 << IXGBE_MTA_BIT_SHIFT
) - 1;
489 u32 vmolr
= IXGBE_READ_REG(hw
, IXGBE_VMOLR(vf
));
491 /* Disable multicast promiscuous first */
492 ixgbe_disable_vf_mc_promisc(dev
, vf
);
494 /* only so many hash values supported */
495 nb_entries
= RTE_MIN(nb_entries
, IXGBE_MAX_VF_MC_ENTRIES
);
497 /* store the mc entries */
498 vfinfo
->num_vf_mc_hashes
= (uint16_t)nb_entries
;
499 for (i
= 0; i
< nb_entries
; i
++) {
500 vfinfo
->vf_mc_hashes
[i
] = hash_list
[i
];
503 if (nb_entries
== 0) {
504 vmolr
&= ~IXGBE_VMOLR_ROMPE
;
505 IXGBE_WRITE_REG(hw
, IXGBE_VMOLR(vf
), vmolr
);
509 for (i
= 0; i
< vfinfo
->num_vf_mc_hashes
; i
++) {
510 mta_idx
= (vfinfo
->vf_mc_hashes
[i
] >> IXGBE_MTA_BIT_SHIFT
)
511 & IXGBE_MTA_INDEX_MASK
;
512 mta_shift
= vfinfo
->vf_mc_hashes
[i
] & IXGBE_MTA_BIT_MASK
;
513 reg_val
= IXGBE_READ_REG(hw
, IXGBE_MTA(mta_idx
));
514 reg_val
|= (1 << mta_shift
);
515 IXGBE_WRITE_REG(hw
, IXGBE_MTA(mta_idx
), reg_val
);
518 vmolr
|= IXGBE_VMOLR_ROMPE
;
519 IXGBE_WRITE_REG(hw
, IXGBE_VMOLR(vf
), vmolr
);
525 ixgbe_vf_set_vlan(struct rte_eth_dev
*dev
, uint32_t vf
, uint32_t *msgbuf
)
528 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
529 struct ixgbe_vf_info
*vfinfo
=
530 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
));
532 add
= (msgbuf
[0] & IXGBE_VT_MSGINFO_MASK
)
533 >> IXGBE_VT_MSGINFO_SHIFT
;
534 vid
= (msgbuf
[1] & IXGBE_VLVF_VLANID_MASK
);
537 vfinfo
[vf
].vlan_count
++;
538 else if (vfinfo
[vf
].vlan_count
)
539 vfinfo
[vf
].vlan_count
--;
540 return hw
->mac
.ops
.set_vfta(hw
, vid
, vf
, (bool)add
, false);
544 ixgbe_set_vf_lpe(struct rte_eth_dev
*dev
, __rte_unused
uint32_t vf
, uint32_t *msgbuf
)
546 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
547 uint32_t new_mtu
= msgbuf
[1];
549 int max_frame
= new_mtu
+ ETHER_HDR_LEN
+ ETHER_CRC_LEN
;
551 /* X540 and X550 support jumbo frames in IOV mode */
552 if (hw
->mac
.type
!= ixgbe_mac_X540
&&
553 hw
->mac
.type
!= ixgbe_mac_X550
&&
554 hw
->mac
.type
!= ixgbe_mac_X550EM_x
&&
555 hw
->mac
.type
!= ixgbe_mac_X550EM_a
)
558 if ((max_frame
< ETHER_MIN_LEN
) || (max_frame
> ETHER_MAX_JUMBO_FRAME_LEN
))
561 max_frs
= (IXGBE_READ_REG(hw
, IXGBE_MAXFRS
) &
562 IXGBE_MHADD_MFS_MASK
) >> IXGBE_MHADD_MFS_SHIFT
;
563 if (max_frs
< new_mtu
) {
564 max_frs
= new_mtu
<< IXGBE_MHADD_MFS_SHIFT
;
565 IXGBE_WRITE_REG(hw
, IXGBE_MAXFRS
, max_frs
);
572 ixgbe_negotiate_vf_api(struct rte_eth_dev
*dev
, uint32_t vf
, uint32_t *msgbuf
)
574 uint32_t api_version
= msgbuf
[1];
575 struct ixgbe_vf_info
*vfinfo
=
576 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
);
578 switch (api_version
) {
579 case ixgbe_mbox_api_10
:
580 case ixgbe_mbox_api_11
:
581 case ixgbe_mbox_api_12
:
582 case ixgbe_mbox_api_13
:
583 vfinfo
[vf
].api_version
= (uint8_t)api_version
;
589 RTE_LOG(ERR
, PMD
, "Negotiate invalid api version %u from VF %d\n",
596 ixgbe_get_vf_queues(struct rte_eth_dev
*dev
, uint32_t vf
, uint32_t *msgbuf
)
598 struct ixgbe_vf_info
*vfinfo
=
599 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
);
600 uint32_t default_q
= vf
* RTE_ETH_DEV_SRIOV(dev
).nb_q_per_pool
;
601 struct rte_eth_conf
*eth_conf
;
602 struct rte_eth_vmdq_dcb_tx_conf
*vmdq_dcb_tx_conf
;
606 #define IXGBE_VMVIR_VLANA_MASK 0xC0000000
607 #define IXGBE_VMVIR_VLAN_VID_MASK 0x00000FFF
608 #define IXGBE_VMVIR_VLAN_UP_MASK 0x0000E000
609 #define VLAN_PRIO_SHIFT 13
614 /* Verify if the PF supports the mbox APIs version or not */
615 switch (vfinfo
[vf
].api_version
) {
616 case ixgbe_mbox_api_20
:
617 case ixgbe_mbox_api_11
:
618 case ixgbe_mbox_api_12
:
624 /* Notify VF of Rx and Tx queue number */
625 msgbuf
[IXGBE_VF_RX_QUEUES
] = RTE_ETH_DEV_SRIOV(dev
).nb_q_per_pool
;
626 msgbuf
[IXGBE_VF_TX_QUEUES
] = RTE_ETH_DEV_SRIOV(dev
).nb_q_per_pool
;
628 /* Notify VF of default queue */
629 msgbuf
[IXGBE_VF_DEF_QUEUE
] = default_q
;
631 /* Notify VF of number of DCB traffic classes */
632 eth_conf
= &dev
->data
->dev_conf
;
633 switch (eth_conf
->txmode
.mq_mode
) {
636 RTE_LOG(ERR
, PMD
, "PF must work with virtualization for VF %u"
637 ", but its tx mode = %d\n", vf
,
638 eth_conf
->txmode
.mq_mode
);
641 case ETH_MQ_TX_VMDQ_DCB
:
642 vmdq_dcb_tx_conf
= ð_conf
->tx_adv_conf
.vmdq_dcb_tx_conf
;
643 switch (vmdq_dcb_tx_conf
->nb_queue_pools
) {
655 /* ETH_MQ_TX_VMDQ_ONLY, DCB not enabled */
656 case ETH_MQ_TX_VMDQ_ONLY
:
657 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
658 vmvir
= IXGBE_READ_REG(hw
, IXGBE_VMVIR(vf
));
659 vlana
= vmvir
& IXGBE_VMVIR_VLANA_MASK
;
660 vid
= vmvir
& IXGBE_VMVIR_VLAN_VID_MASK
;
662 (vmvir
& IXGBE_VMVIR_VLAN_UP_MASK
) >> VLAN_PRIO_SHIFT
;
663 if ((vlana
== IXGBE_VMVIR_VLANA_DEFAULT
) &&
664 ((vid
!= 0) || (user_priority
!= 0)))
671 RTE_LOG(ERR
, PMD
, "PF work with invalid mode = %d\n",
672 eth_conf
->txmode
.mq_mode
);
675 msgbuf
[IXGBE_VF_TRANS_VLAN
] = num_tcs
;
681 ixgbe_set_vf_mc_promisc(struct rte_eth_dev
*dev
, uint32_t vf
, uint32_t *msgbuf
)
683 struct ixgbe_vf_info
*vfinfo
=
684 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
));
685 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
686 int xcast_mode
= msgbuf
[1]; /* msgbuf contains the flag to enable */
687 u32 vmolr
, fctrl
, disable
, enable
;
689 switch (vfinfo
[vf
].api_version
) {
690 case ixgbe_mbox_api_12
:
691 /* promisc introduced in 1.3 version */
692 if (xcast_mode
== IXGBEVF_XCAST_MODE_PROMISC
)
696 case ixgbe_mbox_api_13
:
702 if (vfinfo
[vf
].xcast_mode
== xcast_mode
)
705 switch (xcast_mode
) {
706 case IXGBEVF_XCAST_MODE_NONE
:
707 disable
= IXGBE_VMOLR_BAM
| IXGBE_VMOLR_ROMPE
|
708 IXGBE_VMOLR_MPE
| IXGBE_VMOLR_UPE
| IXGBE_VMOLR_VPE
;
711 case IXGBEVF_XCAST_MODE_MULTI
:
712 disable
= IXGBE_VMOLR_MPE
| IXGBE_VMOLR_UPE
| IXGBE_VMOLR_VPE
;
713 enable
= IXGBE_VMOLR_BAM
| IXGBE_VMOLR_ROMPE
;
715 case IXGBEVF_XCAST_MODE_ALLMULTI
:
716 disable
= IXGBE_VMOLR_UPE
| IXGBE_VMOLR_VPE
;
717 enable
= IXGBE_VMOLR_BAM
| IXGBE_VMOLR_ROMPE
| IXGBE_VMOLR_MPE
;
719 case IXGBEVF_XCAST_MODE_PROMISC
:
720 if (hw
->mac
.type
<= ixgbe_mac_82599EB
)
723 fctrl
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
724 if (!(fctrl
& IXGBE_FCTRL_UPE
)) {
725 /* VF promisc requires PF in promisc */
727 "Enabling VF promisc requires PF in promisc\n");
732 enable
= IXGBE_VMOLR_BAM
| IXGBE_VMOLR_ROMPE
|
733 IXGBE_VMOLR_MPE
| IXGBE_VMOLR_UPE
| IXGBE_VMOLR_VPE
;
739 vmolr
= IXGBE_READ_REG(hw
, IXGBE_VMOLR(vf
));
742 IXGBE_WRITE_REG(hw
, IXGBE_VMOLR(vf
), vmolr
);
743 vfinfo
[vf
].xcast_mode
= xcast_mode
;
746 msgbuf
[1] = xcast_mode
;
752 ixgbe_rcv_msg_from_vf(struct rte_eth_dev
*dev
, uint16_t vf
)
754 uint16_t mbx_size
= IXGBE_VFMAILBOX_SIZE
;
755 uint16_t msg_size
= IXGBE_VF_MSG_SIZE_DEFAULT
;
756 uint32_t msgbuf
[IXGBE_VFMAILBOX_SIZE
];
758 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
759 struct ixgbe_vf_info
*vfinfo
=
760 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
);
761 struct rte_pmd_ixgbe_mb_event_param ret_param
;
763 retval
= ixgbe_read_mbx(hw
, msgbuf
, mbx_size
, vf
);
765 PMD_DRV_LOG(ERR
, "Error mbx recv msg from VF %d", vf
);
769 /* do nothing with the message already been processed */
770 if (msgbuf
[0] & (IXGBE_VT_MSGTYPE_ACK
| IXGBE_VT_MSGTYPE_NACK
))
773 /* flush the ack before we write any messages back */
774 IXGBE_WRITE_FLUSH(hw
);
777 * initialise structure to send to user application
778 * will return response from user in retval field
780 ret_param
.retval
= RTE_PMD_IXGBE_MB_EVENT_PROCEED
;
782 ret_param
.msg_type
= msgbuf
[0] & 0xFFFF;
783 ret_param
.msg
= (void *)msgbuf
;
785 /* perform VF reset */
786 if (msgbuf
[0] == IXGBE_VF_RESET
) {
787 int ret
= ixgbe_vf_reset(dev
, vf
, msgbuf
);
789 vfinfo
[vf
].clear_to_send
= true;
791 /* notify application about VF reset */
792 _rte_eth_dev_callback_process(dev
, RTE_ETH_EVENT_VF_MBOX
,
798 * ask user application if we allowed to perform those functions
799 * if we get ret_param.retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED
800 * then business as usual,
801 * if 0, do nothing and send ACK to VF
802 * if ret_param.retval > 1, do nothing and send NAK to VF
804 _rte_eth_dev_callback_process(dev
, RTE_ETH_EVENT_VF_MBOX
,
807 retval
= ret_param
.retval
;
809 /* check & process VF to PF mailbox message */
810 switch ((msgbuf
[0] & 0xFFFF)) {
811 case IXGBE_VF_SET_MAC_ADDR
:
812 if (retval
== RTE_PMD_IXGBE_MB_EVENT_PROCEED
)
813 retval
= ixgbe_vf_set_mac_addr(dev
, vf
, msgbuf
);
815 case IXGBE_VF_SET_MULTICAST
:
816 if (retval
== RTE_PMD_IXGBE_MB_EVENT_PROCEED
)
817 retval
= ixgbe_vf_set_multicast(dev
, vf
, msgbuf
);
819 case IXGBE_VF_SET_LPE
:
820 if (retval
== RTE_PMD_IXGBE_MB_EVENT_PROCEED
)
821 retval
= ixgbe_set_vf_lpe(dev
, vf
, msgbuf
);
823 case IXGBE_VF_SET_VLAN
:
824 if (retval
== RTE_PMD_IXGBE_MB_EVENT_PROCEED
)
825 retval
= ixgbe_vf_set_vlan(dev
, vf
, msgbuf
);
827 case IXGBE_VF_API_NEGOTIATE
:
828 retval
= ixgbe_negotiate_vf_api(dev
, vf
, msgbuf
);
830 case IXGBE_VF_GET_QUEUES
:
831 retval
= ixgbe_get_vf_queues(dev
, vf
, msgbuf
);
832 msg_size
= IXGBE_VF_GET_QUEUE_MSG_SIZE
;
834 case IXGBE_VF_UPDATE_XCAST_MODE
:
835 if (retval
== RTE_PMD_IXGBE_MB_EVENT_PROCEED
)
836 retval
= ixgbe_set_vf_mc_promisc(dev
, vf
, msgbuf
);
839 PMD_DRV_LOG(DEBUG
, "Unhandled Msg %8.8x", (unsigned)msgbuf
[0]);
840 retval
= IXGBE_ERR_MBX
;
844 /* response the VF according to the message process result */
846 msgbuf
[0] |= IXGBE_VT_MSGTYPE_NACK
;
848 msgbuf
[0] |= IXGBE_VT_MSGTYPE_ACK
;
850 msgbuf
[0] |= IXGBE_VT_MSGTYPE_CTS
;
852 ixgbe_write_mbx(hw
, msgbuf
, msg_size
, vf
);
858 ixgbe_rcv_ack_from_vf(struct rte_eth_dev
*dev
, uint16_t vf
)
860 uint32_t msg
= IXGBE_VT_MSGTYPE_NACK
;
861 struct ixgbe_hw
*hw
=
862 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
863 struct ixgbe_vf_info
*vfinfo
=
864 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
);
866 if (!vfinfo
[vf
].clear_to_send
)
867 ixgbe_write_mbx(hw
, &msg
, 1, vf
);
870 void ixgbe_pf_mbx_process(struct rte_eth_dev
*eth_dev
)
873 struct ixgbe_hw
*hw
=
874 IXGBE_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
876 for (vf
= 0; vf
< dev_num_vf(eth_dev
); vf
++) {
877 /* check & process vf function level reset */
878 if (!ixgbe_check_for_rst(hw
, vf
))
879 ixgbe_vf_reset_event(eth_dev
, vf
);
881 /* check & process vf mailbox messages */
882 if (!ixgbe_check_for_msg(hw
, vf
))
883 ixgbe_rcv_msg_from_vf(eth_dev
, vf
);
885 /* check & process acks from vf */
886 if (!ixgbe_check_for_ack(hw
, vf
))
887 ixgbe_rcv_ack_from_vf(eth_dev
, vf
);