1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
13 #include <rte_interrupts.h>
15 #include <rte_debug.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_memcpy.h>
20 #include <rte_malloc.h>
21 #include <rte_random.h>
23 #include "base/ixgbe_common.h"
24 #include "ixgbe_ethdev.h"
25 #include "rte_pmd_ixgbe.h"
27 #define IXGBE_MAX_VFTA (128)
28 #define IXGBE_VF_MSG_SIZE_DEFAULT 1
29 #define IXGBE_VF_GET_QUEUE_MSG_SIZE 5
30 #define IXGBE_ETHERTYPE_FLOW_CTRL 0x8808
32 static inline uint16_t
33 dev_num_vf(struct rte_eth_dev
*eth_dev
)
35 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
37 return pci_dev
->max_vfs
;
41 int ixgbe_vf_perm_addr_gen(struct rte_eth_dev
*dev
, uint16_t vf_num
)
43 unsigned char vf_mac_addr
[RTE_ETHER_ADDR_LEN
];
44 struct ixgbe_vf_info
*vfinfo
=
45 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
);
48 for (vfn
= 0; vfn
< vf_num
; vfn
++) {
49 rte_eth_random_addr(vf_mac_addr
);
50 /* keep the random address as default */
51 memcpy(vfinfo
[vfn
].vf_mac_addresses
, vf_mac_addr
,
59 ixgbe_mb_intr_setup(struct rte_eth_dev
*dev
)
61 struct ixgbe_interrupt
*intr
=
62 IXGBE_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
64 intr
->mask
|= IXGBE_EICR_MAILBOX
;
69 void ixgbe_pf_host_init(struct rte_eth_dev
*eth_dev
)
71 struct ixgbe_vf_info
**vfinfo
=
72 IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev
->data
->dev_private
);
73 struct ixgbe_mirror_info
*mirror_info
=
74 IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev
->data
->dev_private
);
75 struct ixgbe_uta_info
*uta_info
=
76 IXGBE_DEV_PRIVATE_TO_UTA(eth_dev
->data
->dev_private
);
78 IXGBE_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
82 PMD_INIT_FUNC_TRACE();
84 RTE_ETH_DEV_SRIOV(eth_dev
).active
= 0;
85 vf_num
= dev_num_vf(eth_dev
);
89 *vfinfo
= rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info
) * vf_num
, 0);
91 rte_panic("Cannot allocate memory for private VF data\n");
93 rte_eth_switch_domain_alloc(&(*vfinfo
)->switch_domain_id
);
95 memset(mirror_info
, 0, sizeof(struct ixgbe_mirror_info
));
96 memset(uta_info
, 0, sizeof(struct ixgbe_uta_info
));
97 hw
->mac
.mc_filter_type
= 0;
99 if (vf_num
>= ETH_32_POOLS
) {
101 RTE_ETH_DEV_SRIOV(eth_dev
).active
= ETH_64_POOLS
;
102 } else if (vf_num
>= ETH_16_POOLS
) {
104 RTE_ETH_DEV_SRIOV(eth_dev
).active
= ETH_32_POOLS
;
107 RTE_ETH_DEV_SRIOV(eth_dev
).active
= ETH_16_POOLS
;
110 RTE_ETH_DEV_SRIOV(eth_dev
).nb_q_per_pool
= nb_queue
;
111 RTE_ETH_DEV_SRIOV(eth_dev
).def_vmdq_idx
= vf_num
;
112 RTE_ETH_DEV_SRIOV(eth_dev
).def_pool_q_idx
= (uint16_t)(vf_num
* nb_queue
);
114 ixgbe_vf_perm_addr_gen(eth_dev
, vf_num
);
116 /* init_mailbox_params */
117 hw
->mbx
.ops
.init_params(hw
);
119 /* set mb interrupt mask */
120 ixgbe_mb_intr_setup(eth_dev
);
123 void ixgbe_pf_host_uninit(struct rte_eth_dev
*eth_dev
)
125 struct ixgbe_vf_info
**vfinfo
;
129 PMD_INIT_FUNC_TRACE();
131 RTE_ETH_DEV_SRIOV(eth_dev
).active
= 0;
132 RTE_ETH_DEV_SRIOV(eth_dev
).nb_q_per_pool
= 0;
133 RTE_ETH_DEV_SRIOV(eth_dev
).def_vmdq_idx
= 0;
134 RTE_ETH_DEV_SRIOV(eth_dev
).def_pool_q_idx
= 0;
136 vf_num
= dev_num_vf(eth_dev
);
140 vfinfo
= IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev
->data
->dev_private
);
144 ret
= rte_eth_switch_domain_free((*vfinfo
)->switch_domain_id
);
146 PMD_INIT_LOG(WARNING
, "failed to free switch domain: %d", ret
);
153 ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev
*eth_dev
)
155 struct ixgbe_hw
*hw
=
156 IXGBE_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
157 struct ixgbe_filter_info
*filter_info
=
158 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev
->data
->dev_private
);
161 struct ixgbe_ethertype_filter ethertype_filter
;
163 if (!hw
->mac
.ops
.set_ethertype_anti_spoofing
) {
164 PMD_DRV_LOG(INFO
, "ether type anti-spoofing is not supported.\n");
168 i
= ixgbe_ethertype_filter_lookup(filter_info
,
169 IXGBE_ETHERTYPE_FLOW_CTRL
);
171 PMD_DRV_LOG(ERR
, "A ether type filter entity for flow control already exists!\n");
175 ethertype_filter
.ethertype
= IXGBE_ETHERTYPE_FLOW_CTRL
;
176 ethertype_filter
.etqf
= IXGBE_ETQF_FILTER_EN
|
177 IXGBE_ETQF_TX_ANTISPOOF
|
178 IXGBE_ETHERTYPE_FLOW_CTRL
;
179 ethertype_filter
.etqs
= 0;
180 ethertype_filter
.conf
= TRUE
;
181 i
= ixgbe_ethertype_filter_insert(filter_info
,
184 PMD_DRV_LOG(ERR
, "Cannot find an unused ether type filter entity for flow control.\n");
188 IXGBE_WRITE_REG(hw
, IXGBE_ETQF(i
),
189 (IXGBE_ETQF_FILTER_EN
|
190 IXGBE_ETQF_TX_ANTISPOOF
|
191 IXGBE_ETHERTYPE_FLOW_CTRL
));
193 vf_num
= dev_num_vf(eth_dev
);
194 for (i
= 0; i
< vf_num
; i
++)
195 hw
->mac
.ops
.set_ethertype_anti_spoofing(hw
, true, i
);
198 int ixgbe_pf_host_configure(struct rte_eth_dev
*eth_dev
)
200 uint32_t vtctl
, fcrth
;
201 uint32_t vfre_slot
, vfre_offset
;
203 const uint8_t VFRE_SHIFT
= 5; /* VFRE 32 bits per slot */
204 const uint8_t VFRE_MASK
= (uint8_t)((1U << VFRE_SHIFT
) - 1);
205 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
206 uint32_t gpie
, gcr_ext
;
210 vf_num
= dev_num_vf(eth_dev
);
214 /* enable VMDq and set the default pool for PF */
215 vtctl
= IXGBE_READ_REG(hw
, IXGBE_VT_CTL
);
216 vtctl
|= IXGBE_VMD_CTL_VMDQ_EN
;
217 vtctl
&= ~IXGBE_VT_CTL_POOL_MASK
;
218 vtctl
|= RTE_ETH_DEV_SRIOV(eth_dev
).def_vmdq_idx
219 << IXGBE_VT_CTL_POOL_SHIFT
;
220 vtctl
|= IXGBE_VT_CTL_REPLEN
;
221 IXGBE_WRITE_REG(hw
, IXGBE_VT_CTL
, vtctl
);
223 vfre_offset
= vf_num
& VFRE_MASK
;
224 vfre_slot
= (vf_num
>> VFRE_SHIFT
) > 0 ? 1 : 0;
226 /* Enable pools reserved to PF only */
227 IXGBE_WRITE_REG(hw
, IXGBE_VFRE(vfre_slot
), (~0U) << vfre_offset
);
228 IXGBE_WRITE_REG(hw
, IXGBE_VFRE(vfre_slot
^ 1), vfre_slot
- 1);
229 IXGBE_WRITE_REG(hw
, IXGBE_VFTE(vfre_slot
), (~0U) << vfre_offset
);
230 IXGBE_WRITE_REG(hw
, IXGBE_VFTE(vfre_slot
^ 1), vfre_slot
- 1);
232 /* PFDMA Tx General Switch Control Enables VMDQ loopback */
233 IXGBE_WRITE_REG(hw
, IXGBE_PFDTXGSWC
, IXGBE_PFDTXGSWC_VT_LBEN
);
235 /* clear VMDq map to perment rar 0 */
236 hw
->mac
.ops
.clear_vmdq(hw
, 0, IXGBE_CLEAR_VMDQ_ALL
);
238 /* clear VMDq map to scan rar 127 */
239 IXGBE_WRITE_REG(hw
, IXGBE_MPSAR_LO(hw
->mac
.num_rar_entries
), 0);
240 IXGBE_WRITE_REG(hw
, IXGBE_MPSAR_HI(hw
->mac
.num_rar_entries
), 0);
242 /* set VMDq map to default PF pool */
243 hw
->mac
.ops
.set_vmdq(hw
, 0, RTE_ETH_DEV_SRIOV(eth_dev
).def_vmdq_idx
);
246 * SW msut set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
248 gcr_ext
= IXGBE_READ_REG(hw
, IXGBE_GCR_EXT
);
249 gcr_ext
&= ~IXGBE_GCR_EXT_VT_MODE_MASK
;
251 gpie
= IXGBE_READ_REG(hw
, IXGBE_GPIE
);
252 gpie
&= ~IXGBE_GPIE_VTMODE_MASK
;
253 gpie
|= IXGBE_GPIE_MSIX_MODE
| IXGBE_GPIE_PBA_SUPPORT
;
255 switch (RTE_ETH_DEV_SRIOV(eth_dev
).active
) {
257 gcr_ext
|= IXGBE_GCR_EXT_VT_MODE_64
;
258 gpie
|= IXGBE_GPIE_VTMODE_64
;
261 gcr_ext
|= IXGBE_GCR_EXT_VT_MODE_32
;
262 gpie
|= IXGBE_GPIE_VTMODE_32
;
265 gcr_ext
|= IXGBE_GCR_EXT_VT_MODE_16
;
266 gpie
|= IXGBE_GPIE_VTMODE_16
;
270 IXGBE_WRITE_REG(hw
, IXGBE_GCR_EXT
, gcr_ext
);
271 IXGBE_WRITE_REG(hw
, IXGBE_GPIE
, gpie
);
274 * enable vlan filtering and allow all vlan tags through
276 vlanctrl
= IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
277 vlanctrl
|= IXGBE_VLNCTRL_VFE
; /* enable vlan filters */
278 IXGBE_WRITE_REG(hw
, IXGBE_VLNCTRL
, vlanctrl
);
280 /* VFTA - enable all vlan filters */
281 for (i
= 0; i
< IXGBE_MAX_VFTA
; i
++)
282 IXGBE_WRITE_REG(hw
, IXGBE_VFTA(i
), 0xFFFFFFFF);
284 /* Enable MAC Anti-Spoofing */
285 hw
->mac
.ops
.set_mac_anti_spoofing(hw
, FALSE
, vf_num
);
287 /* set flow control threshold to max to avoid tx switch hang */
288 for (i
= 0; i
< IXGBE_DCB_MAX_TRAFFIC_CLASS
; i
++) {
289 IXGBE_WRITE_REG(hw
, IXGBE_FCRTL_82599(i
), 0);
290 fcrth
= IXGBE_READ_REG(hw
, IXGBE_RXPBSIZE(i
)) - 32;
291 IXGBE_WRITE_REG(hw
, IXGBE_FCRTH_82599(i
), fcrth
);
294 ixgbe_add_tx_flow_control_drop_filter(eth_dev
);
300 set_rx_mode(struct rte_eth_dev
*dev
)
302 struct rte_eth_dev_data
*dev_data
= dev
->data
;
303 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
304 u32 fctrl
, vmolr
= IXGBE_VMOLR_BAM
| IXGBE_VMOLR_AUPE
;
305 uint16_t vfn
= dev_num_vf(dev
);
307 /* Check for Promiscuous and All Multicast modes */
308 fctrl
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
310 /* set all bits that we expect to always be set */
311 fctrl
&= ~IXGBE_FCTRL_SBP
; /* disable store-bad-packets */
312 fctrl
|= IXGBE_FCTRL_BAM
;
314 /* clear the bits we are changing the status of */
315 fctrl
&= ~(IXGBE_FCTRL_UPE
| IXGBE_FCTRL_MPE
);
317 if (dev_data
->promiscuous
) {
318 fctrl
|= (IXGBE_FCTRL_UPE
| IXGBE_FCTRL_MPE
);
319 vmolr
|= (IXGBE_VMOLR_ROPE
| IXGBE_VMOLR_MPE
);
321 if (dev_data
->all_multicast
) {
322 fctrl
|= IXGBE_FCTRL_MPE
;
323 vmolr
|= IXGBE_VMOLR_MPE
;
325 vmolr
|= IXGBE_VMOLR_ROMPE
;
329 if (hw
->mac
.type
!= ixgbe_mac_82598EB
) {
330 vmolr
|= IXGBE_READ_REG(hw
, IXGBE_VMOLR(vfn
)) &
331 ~(IXGBE_VMOLR_MPE
| IXGBE_VMOLR_ROMPE
|
333 IXGBE_WRITE_REG(hw
, IXGBE_VMOLR(vfn
), vmolr
);
336 IXGBE_WRITE_REG(hw
, IXGBE_FCTRL
, fctrl
);
338 ixgbe_vlan_hw_strip_config(dev
);
342 ixgbe_vf_reset_event(struct rte_eth_dev
*dev
, uint16_t vf
)
344 struct ixgbe_hw
*hw
=
345 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
346 struct ixgbe_vf_info
*vfinfo
=
347 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
));
348 int rar_entry
= hw
->mac
.num_rar_entries
- (vf
+ 1);
349 uint32_t vmolr
= IXGBE_READ_REG(hw
, IXGBE_VMOLR(vf
));
351 vmolr
|= (IXGBE_VMOLR_ROPE
|
352 IXGBE_VMOLR_BAM
| IXGBE_VMOLR_AUPE
);
353 IXGBE_WRITE_REG(hw
, IXGBE_VMOLR(vf
), vmolr
);
355 IXGBE_WRITE_REG(hw
, IXGBE_VMVIR(vf
), 0);
357 /* reset multicast table array for vf */
358 vfinfo
[vf
].num_vf_mc_hashes
= 0;
363 hw
->mac
.ops
.clear_rar(hw
, rar_entry
);
367 ixgbe_vf_reset_msg(struct rte_eth_dev
*dev
, uint16_t vf
)
369 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
371 uint32_t reg_offset
, vf_shift
;
372 const uint8_t VFRE_SHIFT
= 5; /* VFRE 32 bits per slot */
373 const uint8_t VFRE_MASK
= (uint8_t)((1U << VFRE_SHIFT
) - 1);
374 uint8_t nb_q_per_pool
;
377 vf_shift
= vf
& VFRE_MASK
;
378 reg_offset
= (vf
>> VFRE_SHIFT
) > 0 ? 1 : 0;
380 /* enable transmit for vf */
381 reg
= IXGBE_READ_REG(hw
, IXGBE_VFTE(reg_offset
));
382 reg
|= (reg
| (1 << vf_shift
));
383 IXGBE_WRITE_REG(hw
, IXGBE_VFTE(reg_offset
), reg
);
385 /* enable all queue drop for IOV */
386 nb_q_per_pool
= RTE_ETH_DEV_SRIOV(dev
).nb_q_per_pool
;
387 for (i
= vf
* nb_q_per_pool
; i
< (vf
+ 1) * nb_q_per_pool
; i
++) {
388 IXGBE_WRITE_FLUSH(hw
);
389 reg
= IXGBE_QDE_ENABLE
| IXGBE_QDE_WRITE
;
390 reg
|= i
<< IXGBE_QDE_IDX_SHIFT
;
391 IXGBE_WRITE_REG(hw
, IXGBE_QDE
, reg
);
394 /* enable receive for vf */
395 reg
= IXGBE_READ_REG(hw
, IXGBE_VFRE(reg_offset
));
396 reg
|= (reg
| (1 << vf_shift
));
397 IXGBE_WRITE_REG(hw
, IXGBE_VFRE(reg_offset
), reg
);
399 /* Enable counting of spoofed packets in the SSVPC register */
400 reg
= IXGBE_READ_REG(hw
, IXGBE_VMECM(reg_offset
));
401 reg
|= (1 << vf_shift
);
402 IXGBE_WRITE_REG(hw
, IXGBE_VMECM(reg_offset
), reg
);
404 ixgbe_vf_reset_event(dev
, vf
);
408 ixgbe_disable_vf_mc_promisc(struct rte_eth_dev
*dev
, uint32_t vf
)
410 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
413 vmolr
= IXGBE_READ_REG(hw
, IXGBE_VMOLR(vf
));
415 PMD_DRV_LOG(INFO
, "VF %u: disabling multicast promiscuous\n", vf
);
417 vmolr
&= ~IXGBE_VMOLR_MPE
;
419 IXGBE_WRITE_REG(hw
, IXGBE_VMOLR(vf
), vmolr
);
425 ixgbe_vf_reset(struct rte_eth_dev
*dev
, uint16_t vf
, uint32_t *msgbuf
)
427 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
428 struct ixgbe_vf_info
*vfinfo
=
429 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
));
430 unsigned char *vf_mac
= vfinfo
[vf
].vf_mac_addresses
;
431 int rar_entry
= hw
->mac
.num_rar_entries
- (vf
+ 1);
432 uint8_t *new_mac
= (uint8_t *)(&msgbuf
[1]);
434 ixgbe_vf_reset_msg(dev
, vf
);
436 hw
->mac
.ops
.set_rar(hw
, rar_entry
, vf_mac
, vf
, IXGBE_RAH_AV
);
438 /* Disable multicast promiscuous at reset */
439 ixgbe_disable_vf_mc_promisc(dev
, vf
);
441 /* reply to reset with ack and vf mac address */
442 msgbuf
[0] = IXGBE_VF_RESET
| IXGBE_VT_MSGTYPE_ACK
;
443 rte_memcpy(new_mac
, vf_mac
, RTE_ETHER_ADDR_LEN
);
445 * Piggyback the multicast filter type so VF can compute the
448 msgbuf
[3] = hw
->mac
.mc_filter_type
;
449 ixgbe_write_mbx(hw
, msgbuf
, IXGBE_VF_PERMADDR_MSG_LEN
, vf
);
455 ixgbe_vf_set_mac_addr(struct rte_eth_dev
*dev
, uint32_t vf
, uint32_t *msgbuf
)
457 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
458 struct ixgbe_vf_info
*vfinfo
=
459 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
));
460 int rar_entry
= hw
->mac
.num_rar_entries
- (vf
+ 1);
461 uint8_t *new_mac
= (uint8_t *)(&msgbuf
[1]);
463 if (rte_is_valid_assigned_ether_addr(
464 (struct rte_ether_addr
*)new_mac
)) {
465 rte_memcpy(vfinfo
[vf
].vf_mac_addresses
, new_mac
, 6);
466 return hw
->mac
.ops
.set_rar(hw
, rar_entry
, new_mac
, vf
, IXGBE_RAH_AV
);
472 ixgbe_vf_set_multicast(struct rte_eth_dev
*dev
, uint32_t vf
, uint32_t *msgbuf
)
474 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
475 struct ixgbe_vf_info
*vfinfo
=
476 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
));
477 int nb_entries
= (msgbuf
[0] & IXGBE_VT_MSGINFO_MASK
) >>
478 IXGBE_VT_MSGINFO_SHIFT
;
479 uint16_t *hash_list
= (uint16_t *)&msgbuf
[1];
482 const uint32_t IXGBE_MTA_INDEX_MASK
= 0x7F;
483 const uint32_t IXGBE_MTA_BIT_SHIFT
= 5;
484 const uint32_t IXGBE_MTA_BIT_MASK
= (0x1 << IXGBE_MTA_BIT_SHIFT
) - 1;
487 u32 vmolr
= IXGBE_READ_REG(hw
, IXGBE_VMOLR(vf
));
489 /* Disable multicast promiscuous first */
490 ixgbe_disable_vf_mc_promisc(dev
, vf
);
492 /* only so many hash values supported */
493 nb_entries
= RTE_MIN(nb_entries
, IXGBE_MAX_VF_MC_ENTRIES
);
495 /* store the mc entries */
496 vfinfo
->num_vf_mc_hashes
= (uint16_t)nb_entries
;
497 for (i
= 0; i
< nb_entries
; i
++) {
498 vfinfo
->vf_mc_hashes
[i
] = hash_list
[i
];
501 if (nb_entries
== 0) {
502 vmolr
&= ~IXGBE_VMOLR_ROMPE
;
503 IXGBE_WRITE_REG(hw
, IXGBE_VMOLR(vf
), vmolr
);
507 for (i
= 0; i
< vfinfo
->num_vf_mc_hashes
; i
++) {
508 mta_idx
= (vfinfo
->vf_mc_hashes
[i
] >> IXGBE_MTA_BIT_SHIFT
)
509 & IXGBE_MTA_INDEX_MASK
;
510 mta_shift
= vfinfo
->vf_mc_hashes
[i
] & IXGBE_MTA_BIT_MASK
;
511 reg_val
= IXGBE_READ_REG(hw
, IXGBE_MTA(mta_idx
));
512 reg_val
|= (1 << mta_shift
);
513 IXGBE_WRITE_REG(hw
, IXGBE_MTA(mta_idx
), reg_val
);
516 vmolr
|= IXGBE_VMOLR_ROMPE
;
517 IXGBE_WRITE_REG(hw
, IXGBE_VMOLR(vf
), vmolr
);
523 ixgbe_vf_set_vlan(struct rte_eth_dev
*dev
, uint32_t vf
, uint32_t *msgbuf
)
526 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
527 struct ixgbe_vf_info
*vfinfo
=
528 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
));
530 add
= (msgbuf
[0] & IXGBE_VT_MSGINFO_MASK
)
531 >> IXGBE_VT_MSGINFO_SHIFT
;
532 vid
= (msgbuf
[1] & IXGBE_VLVF_VLANID_MASK
);
535 vfinfo
[vf
].vlan_count
++;
536 else if (vfinfo
[vf
].vlan_count
)
537 vfinfo
[vf
].vlan_count
--;
538 return hw
->mac
.ops
.set_vfta(hw
, vid
, vf
, (bool)add
, false);
542 ixgbe_set_vf_lpe(struct rte_eth_dev
*dev
, __rte_unused
uint32_t vf
, uint32_t *msgbuf
)
544 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
545 uint32_t new_mtu
= msgbuf
[1];
548 int max_frame
= new_mtu
+ RTE_ETHER_HDR_LEN
+ RTE_ETHER_CRC_LEN
;
550 /* X540 and X550 support jumbo frames in IOV mode */
551 if (hw
->mac
.type
!= ixgbe_mac_X540
&&
552 hw
->mac
.type
!= ixgbe_mac_X550
&&
553 hw
->mac
.type
!= ixgbe_mac_X550EM_x
&&
554 hw
->mac
.type
!= ixgbe_mac_X550EM_a
)
557 if (max_frame
< RTE_ETHER_MIN_LEN
||
558 max_frame
> RTE_ETHER_MAX_JUMBO_FRAME_LEN
)
561 max_frs
= (IXGBE_READ_REG(hw
, IXGBE_MAXFRS
) &
562 IXGBE_MHADD_MFS_MASK
) >> IXGBE_MHADD_MFS_SHIFT
;
563 if (max_frs
< new_mtu
) {
564 hlreg0
= IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
565 if (new_mtu
> RTE_ETHER_MAX_LEN
) {
566 dev
->data
->dev_conf
.rxmode
.offloads
|=
567 DEV_RX_OFFLOAD_JUMBO_FRAME
;
568 hlreg0
|= IXGBE_HLREG0_JUMBOEN
;
570 dev
->data
->dev_conf
.rxmode
.offloads
&=
571 ~DEV_RX_OFFLOAD_JUMBO_FRAME
;
572 hlreg0
&= ~IXGBE_HLREG0_JUMBOEN
;
574 IXGBE_WRITE_REG(hw
, IXGBE_HLREG0
, hlreg0
);
576 max_frs
= new_mtu
<< IXGBE_MHADD_MFS_SHIFT
;
577 IXGBE_WRITE_REG(hw
, IXGBE_MAXFRS
, max_frs
);
584 ixgbe_negotiate_vf_api(struct rte_eth_dev
*dev
, uint32_t vf
, uint32_t *msgbuf
)
586 uint32_t api_version
= msgbuf
[1];
587 struct ixgbe_vf_info
*vfinfo
=
588 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
);
590 switch (api_version
) {
591 case ixgbe_mbox_api_10
:
592 case ixgbe_mbox_api_11
:
593 case ixgbe_mbox_api_12
:
594 case ixgbe_mbox_api_13
:
595 vfinfo
[vf
].api_version
= (uint8_t)api_version
;
601 PMD_DRV_LOG(ERR
, "Negotiate invalid api version %u from VF %d\n",
608 ixgbe_get_vf_queues(struct rte_eth_dev
*dev
, uint32_t vf
, uint32_t *msgbuf
)
610 struct ixgbe_vf_info
*vfinfo
=
611 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
);
612 uint32_t default_q
= vf
* RTE_ETH_DEV_SRIOV(dev
).nb_q_per_pool
;
613 struct rte_eth_conf
*eth_conf
;
614 struct rte_eth_vmdq_dcb_tx_conf
*vmdq_dcb_tx_conf
;
618 #define IXGBE_VMVIR_VLANA_MASK 0xC0000000
619 #define IXGBE_VMVIR_VLAN_VID_MASK 0x00000FFF
620 #define IXGBE_VMVIR_VLAN_UP_MASK 0x0000E000
621 #define VLAN_PRIO_SHIFT 13
626 /* Verify if the PF supports the mbox APIs version or not */
627 switch (vfinfo
[vf
].api_version
) {
628 case ixgbe_mbox_api_20
:
629 case ixgbe_mbox_api_11
:
630 case ixgbe_mbox_api_12
:
631 case ixgbe_mbox_api_13
:
637 /* Notify VF of Rx and Tx queue number */
638 msgbuf
[IXGBE_VF_RX_QUEUES
] = RTE_ETH_DEV_SRIOV(dev
).nb_q_per_pool
;
639 msgbuf
[IXGBE_VF_TX_QUEUES
] = RTE_ETH_DEV_SRIOV(dev
).nb_q_per_pool
;
641 /* Notify VF of default queue */
642 msgbuf
[IXGBE_VF_DEF_QUEUE
] = default_q
;
644 /* Notify VF of number of DCB traffic classes */
645 eth_conf
= &dev
->data
->dev_conf
;
646 switch (eth_conf
->txmode
.mq_mode
) {
649 PMD_DRV_LOG(ERR
, "PF must work with virtualization for VF %u"
650 ", but its tx mode = %d\n", vf
,
651 eth_conf
->txmode
.mq_mode
);
654 case ETH_MQ_TX_VMDQ_DCB
:
655 vmdq_dcb_tx_conf
= ð_conf
->tx_adv_conf
.vmdq_dcb_tx_conf
;
656 switch (vmdq_dcb_tx_conf
->nb_queue_pools
) {
668 /* ETH_MQ_TX_VMDQ_ONLY, DCB not enabled */
669 case ETH_MQ_TX_VMDQ_ONLY
:
670 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
671 vmvir
= IXGBE_READ_REG(hw
, IXGBE_VMVIR(vf
));
672 vlana
= vmvir
& IXGBE_VMVIR_VLANA_MASK
;
673 vid
= vmvir
& IXGBE_VMVIR_VLAN_VID_MASK
;
675 (vmvir
& IXGBE_VMVIR_VLAN_UP_MASK
) >> VLAN_PRIO_SHIFT
;
676 if ((vlana
== IXGBE_VMVIR_VLANA_DEFAULT
) &&
677 ((vid
!= 0) || (user_priority
!= 0)))
684 PMD_DRV_LOG(ERR
, "PF work with invalid mode = %d\n",
685 eth_conf
->txmode
.mq_mode
);
688 msgbuf
[IXGBE_VF_TRANS_VLAN
] = num_tcs
;
694 ixgbe_set_vf_mc_promisc(struct rte_eth_dev
*dev
, uint32_t vf
, uint32_t *msgbuf
)
696 struct ixgbe_vf_info
*vfinfo
=
697 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
));
698 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
699 int xcast_mode
= msgbuf
[1]; /* msgbuf contains the flag to enable */
700 u32 vmolr
, fctrl
, disable
, enable
;
702 switch (vfinfo
[vf
].api_version
) {
703 case ixgbe_mbox_api_12
:
704 /* promisc introduced in 1.3 version */
705 if (xcast_mode
== IXGBEVF_XCAST_MODE_PROMISC
)
709 case ixgbe_mbox_api_13
:
715 if (vfinfo
[vf
].xcast_mode
== xcast_mode
)
718 switch (xcast_mode
) {
719 case IXGBEVF_XCAST_MODE_NONE
:
720 disable
= IXGBE_VMOLR_BAM
| IXGBE_VMOLR_ROMPE
|
721 IXGBE_VMOLR_MPE
| IXGBE_VMOLR_UPE
| IXGBE_VMOLR_VPE
;
724 case IXGBEVF_XCAST_MODE_MULTI
:
725 disable
= IXGBE_VMOLR_MPE
| IXGBE_VMOLR_UPE
| IXGBE_VMOLR_VPE
;
726 enable
= IXGBE_VMOLR_BAM
| IXGBE_VMOLR_ROMPE
;
728 case IXGBEVF_XCAST_MODE_ALLMULTI
:
729 disable
= IXGBE_VMOLR_UPE
| IXGBE_VMOLR_VPE
;
730 enable
= IXGBE_VMOLR_BAM
| IXGBE_VMOLR_ROMPE
| IXGBE_VMOLR_MPE
;
732 case IXGBEVF_XCAST_MODE_PROMISC
:
733 if (hw
->mac
.type
<= ixgbe_mac_82599EB
)
736 fctrl
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
737 if (!(fctrl
& IXGBE_FCTRL_UPE
)) {
738 /* VF promisc requires PF in promisc */
740 "Enabling VF promisc requires PF in promisc\n");
745 enable
= IXGBE_VMOLR_BAM
| IXGBE_VMOLR_ROMPE
|
746 IXGBE_VMOLR_MPE
| IXGBE_VMOLR_UPE
| IXGBE_VMOLR_VPE
;
752 vmolr
= IXGBE_READ_REG(hw
, IXGBE_VMOLR(vf
));
755 IXGBE_WRITE_REG(hw
, IXGBE_VMOLR(vf
), vmolr
);
756 vfinfo
[vf
].xcast_mode
= xcast_mode
;
759 msgbuf
[1] = xcast_mode
;
765 ixgbe_set_vf_macvlan_msg(struct rte_eth_dev
*dev
, uint32_t vf
, uint32_t *msgbuf
)
767 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
768 struct ixgbe_vf_info
*vf_info
=
769 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
));
770 uint8_t *new_mac
= (uint8_t *)(&msgbuf
[1]);
771 int index
= (msgbuf
[0] & IXGBE_VT_MSGINFO_MASK
) >>
772 IXGBE_VT_MSGINFO_SHIFT
;
775 if (!rte_is_valid_assigned_ether_addr(
776 (struct rte_ether_addr
*)new_mac
)) {
777 PMD_DRV_LOG(ERR
, "set invalid mac vf:%d\n", vf
);
781 vf_info
[vf
].mac_count
++;
783 hw
->mac
.ops
.set_rar(hw
, vf_info
[vf
].mac_count
,
784 new_mac
, vf
, IXGBE_RAH_AV
);
786 if (vf_info
[vf
].mac_count
) {
787 hw
->mac
.ops
.clear_rar(hw
, vf_info
[vf
].mac_count
);
788 vf_info
[vf
].mac_count
= 0;
795 ixgbe_rcv_msg_from_vf(struct rte_eth_dev
*dev
, uint16_t vf
)
797 uint16_t mbx_size
= IXGBE_VFMAILBOX_SIZE
;
798 uint16_t msg_size
= IXGBE_VF_MSG_SIZE_DEFAULT
;
799 uint32_t msgbuf
[IXGBE_VFMAILBOX_SIZE
];
801 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
802 struct ixgbe_vf_info
*vfinfo
=
803 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
);
804 struct rte_pmd_ixgbe_mb_event_param ret_param
;
806 retval
= ixgbe_read_mbx(hw
, msgbuf
, mbx_size
, vf
);
808 PMD_DRV_LOG(ERR
, "Error mbx recv msg from VF %d", vf
);
812 /* do nothing with the message already been processed */
813 if (msgbuf
[0] & (IXGBE_VT_MSGTYPE_ACK
| IXGBE_VT_MSGTYPE_NACK
))
816 /* flush the ack before we write any messages back */
817 IXGBE_WRITE_FLUSH(hw
);
820 * initialise structure to send to user application
821 * will return response from user in retval field
823 ret_param
.retval
= RTE_PMD_IXGBE_MB_EVENT_PROCEED
;
825 ret_param
.msg_type
= msgbuf
[0] & 0xFFFF;
826 ret_param
.msg
= (void *)msgbuf
;
828 /* perform VF reset */
829 if (msgbuf
[0] == IXGBE_VF_RESET
) {
830 int ret
= ixgbe_vf_reset(dev
, vf
, msgbuf
);
832 vfinfo
[vf
].clear_to_send
= true;
834 /* notify application about VF reset */
835 _rte_eth_dev_callback_process(dev
, RTE_ETH_EVENT_VF_MBOX
,
841 * ask user application if we allowed to perform those functions
842 * if we get ret_param.retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED
843 * then business as usual,
844 * if 0, do nothing and send ACK to VF
845 * if ret_param.retval > 1, do nothing and send NAK to VF
847 _rte_eth_dev_callback_process(dev
, RTE_ETH_EVENT_VF_MBOX
,
850 retval
= ret_param
.retval
;
852 /* check & process VF to PF mailbox message */
853 switch ((msgbuf
[0] & 0xFFFF)) {
854 case IXGBE_VF_SET_MAC_ADDR
:
855 if (retval
== RTE_PMD_IXGBE_MB_EVENT_PROCEED
)
856 retval
= ixgbe_vf_set_mac_addr(dev
, vf
, msgbuf
);
858 case IXGBE_VF_SET_MULTICAST
:
859 if (retval
== RTE_PMD_IXGBE_MB_EVENT_PROCEED
)
860 retval
= ixgbe_vf_set_multicast(dev
, vf
, msgbuf
);
862 case IXGBE_VF_SET_LPE
:
863 if (retval
== RTE_PMD_IXGBE_MB_EVENT_PROCEED
)
864 retval
= ixgbe_set_vf_lpe(dev
, vf
, msgbuf
);
866 case IXGBE_VF_SET_VLAN
:
867 if (retval
== RTE_PMD_IXGBE_MB_EVENT_PROCEED
)
868 retval
= ixgbe_vf_set_vlan(dev
, vf
, msgbuf
);
870 case IXGBE_VF_API_NEGOTIATE
:
871 retval
= ixgbe_negotiate_vf_api(dev
, vf
, msgbuf
);
873 case IXGBE_VF_GET_QUEUES
:
874 retval
= ixgbe_get_vf_queues(dev
, vf
, msgbuf
);
875 msg_size
= IXGBE_VF_GET_QUEUE_MSG_SIZE
;
877 case IXGBE_VF_UPDATE_XCAST_MODE
:
878 if (retval
== RTE_PMD_IXGBE_MB_EVENT_PROCEED
)
879 retval
= ixgbe_set_vf_mc_promisc(dev
, vf
, msgbuf
);
881 case IXGBE_VF_SET_MACVLAN
:
882 if (retval
== RTE_PMD_IXGBE_MB_EVENT_PROCEED
)
883 retval
= ixgbe_set_vf_macvlan_msg(dev
, vf
, msgbuf
);
886 PMD_DRV_LOG(DEBUG
, "Unhandled Msg %8.8x", (unsigned)msgbuf
[0]);
887 retval
= IXGBE_ERR_MBX
;
891 /* response the VF according to the message process result */
893 msgbuf
[0] |= IXGBE_VT_MSGTYPE_NACK
;
895 msgbuf
[0] |= IXGBE_VT_MSGTYPE_ACK
;
897 msgbuf
[0] |= IXGBE_VT_MSGTYPE_CTS
;
899 ixgbe_write_mbx(hw
, msgbuf
, msg_size
, vf
);
905 ixgbe_rcv_ack_from_vf(struct rte_eth_dev
*dev
, uint16_t vf
)
907 uint32_t msg
= IXGBE_VT_MSGTYPE_NACK
;
908 struct ixgbe_hw
*hw
=
909 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
910 struct ixgbe_vf_info
*vfinfo
=
911 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
);
913 if (!vfinfo
[vf
].clear_to_send
)
914 ixgbe_write_mbx(hw
, &msg
, 1, vf
);
917 void ixgbe_pf_mbx_process(struct rte_eth_dev
*eth_dev
)
920 struct ixgbe_hw
*hw
=
921 IXGBE_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
923 for (vf
= 0; vf
< dev_num_vf(eth_dev
); vf
++) {
924 /* check & process vf function level reset */
925 if (!ixgbe_check_for_rst(hw
, vf
))
926 ixgbe_vf_reset_event(eth_dev
, vf
);
928 /* check & process vf mailbox messages */
929 if (!ixgbe_check_for_msg(hw
, vf
))
930 ixgbe_rcv_msg_from_vf(eth_dev
, vf
);
932 /* check & process acks from vf */
933 if (!ixgbe_check_for_ack(hw
, vf
))
934 ixgbe_rcv_ack_from_vf(eth_dev
, vf
);