4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <rte_interrupts.h>
44 #include <rte_debug.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_memcpy.h>
49 #include <rte_malloc.h>
50 #include <rte_random.h>
52 #include "base/ixgbe_common.h"
53 #include "ixgbe_ethdev.h"
54 #include "rte_pmd_ixgbe.h"
56 #define IXGBE_MAX_VFTA (128)
57 #define IXGBE_VF_MSG_SIZE_DEFAULT 1
58 #define IXGBE_VF_GET_QUEUE_MSG_SIZE 5
59 #define IXGBE_ETHERTYPE_FLOW_CTRL 0x8808
61 static inline uint16_t
62 dev_num_vf(struct rte_eth_dev
*eth_dev
)
64 struct rte_pci_device
*pci_dev
= IXGBE_DEV_TO_PCI(eth_dev
);
66 return pci_dev
->max_vfs
;
70 int ixgbe_vf_perm_addr_gen(struct rte_eth_dev
*dev
, uint16_t vf_num
)
72 unsigned char vf_mac_addr
[ETHER_ADDR_LEN
];
73 struct ixgbe_vf_info
*vfinfo
=
74 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
);
77 for (vfn
= 0; vfn
< vf_num
; vfn
++) {
78 eth_random_addr(vf_mac_addr
);
79 /* keep the random address as default */
80 memcpy(vfinfo
[vfn
].vf_mac_addresses
, vf_mac_addr
,
88 ixgbe_mb_intr_setup(struct rte_eth_dev
*dev
)
90 struct ixgbe_interrupt
*intr
=
91 IXGBE_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
93 intr
->mask
|= IXGBE_EICR_MAILBOX
;
98 void ixgbe_pf_host_init(struct rte_eth_dev
*eth_dev
)
100 struct ixgbe_vf_info
**vfinfo
=
101 IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev
->data
->dev_private
);
102 struct ixgbe_mirror_info
*mirror_info
=
103 IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev
->data
->dev_private
);
104 struct ixgbe_uta_info
*uta_info
=
105 IXGBE_DEV_PRIVATE_TO_UTA(eth_dev
->data
->dev_private
);
106 struct ixgbe_hw
*hw
=
107 IXGBE_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
111 PMD_INIT_FUNC_TRACE();
113 RTE_ETH_DEV_SRIOV(eth_dev
).active
= 0;
114 vf_num
= dev_num_vf(eth_dev
);
118 *vfinfo
= rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info
) * vf_num
, 0);
120 rte_panic("Cannot allocate memory for private VF data\n");
122 memset(mirror_info
, 0, sizeof(struct ixgbe_mirror_info
));
123 memset(uta_info
, 0, sizeof(struct ixgbe_uta_info
));
124 hw
->mac
.mc_filter_type
= 0;
126 if (vf_num
>= ETH_32_POOLS
) {
128 RTE_ETH_DEV_SRIOV(eth_dev
).active
= ETH_64_POOLS
;
129 } else if (vf_num
>= ETH_16_POOLS
) {
131 RTE_ETH_DEV_SRIOV(eth_dev
).active
= ETH_32_POOLS
;
134 RTE_ETH_DEV_SRIOV(eth_dev
).active
= ETH_16_POOLS
;
137 RTE_ETH_DEV_SRIOV(eth_dev
).nb_q_per_pool
= nb_queue
;
138 RTE_ETH_DEV_SRIOV(eth_dev
).def_vmdq_idx
= vf_num
;
139 RTE_ETH_DEV_SRIOV(eth_dev
).def_pool_q_idx
= (uint16_t)(vf_num
* nb_queue
);
141 ixgbe_vf_perm_addr_gen(eth_dev
, vf_num
);
143 /* init_mailbox_params */
144 hw
->mbx
.ops
.init_params(hw
);
146 /* set mb interrupt mask */
147 ixgbe_mb_intr_setup(eth_dev
);
150 void ixgbe_pf_host_uninit(struct rte_eth_dev
*eth_dev
)
152 struct ixgbe_vf_info
**vfinfo
;
155 PMD_INIT_FUNC_TRACE();
157 vfinfo
= IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev
->data
->dev_private
);
159 RTE_ETH_DEV_SRIOV(eth_dev
).active
= 0;
160 RTE_ETH_DEV_SRIOV(eth_dev
).nb_q_per_pool
= 0;
161 RTE_ETH_DEV_SRIOV(eth_dev
).def_vmdq_idx
= 0;
162 RTE_ETH_DEV_SRIOV(eth_dev
).def_pool_q_idx
= 0;
164 vf_num
= dev_num_vf(eth_dev
);
173 ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev
*eth_dev
)
175 struct ixgbe_hw
*hw
=
176 IXGBE_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
177 struct ixgbe_filter_info
*filter_info
=
178 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev
->data
->dev_private
);
181 struct ixgbe_ethertype_filter ethertype_filter
;
183 if (!hw
->mac
.ops
.set_ethertype_anti_spoofing
) {
184 RTE_LOG(INFO
, PMD
, "ether type anti-spoofing is not"
189 i
= ixgbe_ethertype_filter_lookup(filter_info
,
190 IXGBE_ETHERTYPE_FLOW_CTRL
);
192 RTE_LOG(ERR
, PMD
, "A ether type filter"
193 " entity for flow control already exists!\n");
197 ethertype_filter
.ethertype
= IXGBE_ETHERTYPE_FLOW_CTRL
;
198 ethertype_filter
.etqf
= IXGBE_ETQF_FILTER_EN
|
199 IXGBE_ETQF_TX_ANTISPOOF
|
200 IXGBE_ETHERTYPE_FLOW_CTRL
;
201 ethertype_filter
.etqs
= 0;
202 ethertype_filter
.conf
= TRUE
;
203 i
= ixgbe_ethertype_filter_insert(filter_info
,
206 RTE_LOG(ERR
, PMD
, "Cannot find an unused ether type filter"
207 " entity for flow control.\n");
211 IXGBE_WRITE_REG(hw
, IXGBE_ETQF(i
),
212 (IXGBE_ETQF_FILTER_EN
|
213 IXGBE_ETQF_TX_ANTISPOOF
|
214 IXGBE_ETHERTYPE_FLOW_CTRL
));
216 vf_num
= dev_num_vf(eth_dev
);
217 for (i
= 0; i
< vf_num
; i
++)
218 hw
->mac
.ops
.set_ethertype_anti_spoofing(hw
, true, i
);
221 int ixgbe_pf_host_configure(struct rte_eth_dev
*eth_dev
)
223 uint32_t vtctl
, fcrth
;
224 uint32_t vfre_slot
, vfre_offset
;
226 const uint8_t VFRE_SHIFT
= 5; /* VFRE 32 bits per slot */
227 const uint8_t VFRE_MASK
= (uint8_t)((1U << VFRE_SHIFT
) - 1);
228 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
229 uint32_t gpie
, gcr_ext
;
233 vf_num
= dev_num_vf(eth_dev
);
237 /* enable VMDq and set the default pool for PF */
238 vtctl
= IXGBE_READ_REG(hw
, IXGBE_VT_CTL
);
239 vtctl
|= IXGBE_VMD_CTL_VMDQ_EN
;
240 vtctl
&= ~IXGBE_VT_CTL_POOL_MASK
;
241 vtctl
|= RTE_ETH_DEV_SRIOV(eth_dev
).def_vmdq_idx
242 << IXGBE_VT_CTL_POOL_SHIFT
;
243 vtctl
|= IXGBE_VT_CTL_REPLEN
;
244 IXGBE_WRITE_REG(hw
, IXGBE_VT_CTL
, vtctl
);
246 vfre_offset
= vf_num
& VFRE_MASK
;
247 vfre_slot
= (vf_num
>> VFRE_SHIFT
) > 0 ? 1 : 0;
249 /* Enable pools reserved to PF only */
250 IXGBE_WRITE_REG(hw
, IXGBE_VFRE(vfre_slot
), (~0U) << vfre_offset
);
251 IXGBE_WRITE_REG(hw
, IXGBE_VFRE(vfre_slot
^ 1), vfre_slot
- 1);
252 IXGBE_WRITE_REG(hw
, IXGBE_VFTE(vfre_slot
), (~0U) << vfre_offset
);
253 IXGBE_WRITE_REG(hw
, IXGBE_VFTE(vfre_slot
^ 1), vfre_slot
- 1);
255 /* PFDMA Tx General Switch Control Enables VMDQ loopback */
256 IXGBE_WRITE_REG(hw
, IXGBE_PFDTXGSWC
, IXGBE_PFDTXGSWC_VT_LBEN
);
258 /* clear VMDq map to perment rar 0 */
259 hw
->mac
.ops
.clear_vmdq(hw
, 0, IXGBE_CLEAR_VMDQ_ALL
);
261 /* clear VMDq map to scan rar 127 */
262 IXGBE_WRITE_REG(hw
, IXGBE_MPSAR_LO(hw
->mac
.num_rar_entries
), 0);
263 IXGBE_WRITE_REG(hw
, IXGBE_MPSAR_HI(hw
->mac
.num_rar_entries
), 0);
265 /* set VMDq map to default PF pool */
266 hw
->mac
.ops
.set_vmdq(hw
, 0, RTE_ETH_DEV_SRIOV(eth_dev
).def_vmdq_idx
);
269 * SW msut set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
271 gcr_ext
= IXGBE_READ_REG(hw
, IXGBE_GCR_EXT
);
272 gcr_ext
&= ~IXGBE_GCR_EXT_VT_MODE_MASK
;
274 gpie
= IXGBE_READ_REG(hw
, IXGBE_GPIE
);
275 gpie
&= ~IXGBE_GPIE_VTMODE_MASK
;
276 gpie
|= IXGBE_GPIE_MSIX_MODE
;
278 switch (RTE_ETH_DEV_SRIOV(eth_dev
).active
) {
280 gcr_ext
|= IXGBE_GCR_EXT_VT_MODE_64
;
281 gpie
|= IXGBE_GPIE_VTMODE_64
;
284 gcr_ext
|= IXGBE_GCR_EXT_VT_MODE_32
;
285 gpie
|= IXGBE_GPIE_VTMODE_32
;
288 gcr_ext
|= IXGBE_GCR_EXT_VT_MODE_16
;
289 gpie
|= IXGBE_GPIE_VTMODE_16
;
293 IXGBE_WRITE_REG(hw
, IXGBE_GCR_EXT
, gcr_ext
);
294 IXGBE_WRITE_REG(hw
, IXGBE_GPIE
, gpie
);
297 * enable vlan filtering and allow all vlan tags through
299 vlanctrl
= IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
300 vlanctrl
|= IXGBE_VLNCTRL_VFE
; /* enable vlan filters */
301 IXGBE_WRITE_REG(hw
, IXGBE_VLNCTRL
, vlanctrl
);
303 /* VFTA - enable all vlan filters */
304 for (i
= 0; i
< IXGBE_MAX_VFTA
; i
++)
305 IXGBE_WRITE_REG(hw
, IXGBE_VFTA(i
), 0xFFFFFFFF);
307 /* Enable MAC Anti-Spoofing */
308 hw
->mac
.ops
.set_mac_anti_spoofing(hw
, FALSE
, vf_num
);
310 /* set flow control threshold to max to avoid tx switch hang */
311 for (i
= 0; i
< IXGBE_DCB_MAX_TRAFFIC_CLASS
; i
++) {
312 IXGBE_WRITE_REG(hw
, IXGBE_FCRTL_82599(i
), 0);
313 fcrth
= IXGBE_READ_REG(hw
, IXGBE_RXPBSIZE(i
)) - 32;
314 IXGBE_WRITE_REG(hw
, IXGBE_FCRTH_82599(i
), fcrth
);
317 ixgbe_add_tx_flow_control_drop_filter(eth_dev
);
323 set_rx_mode(struct rte_eth_dev
*dev
)
325 struct rte_eth_dev_data
*dev_data
= dev
->data
;
326 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
327 u32 fctrl
, vmolr
= IXGBE_VMOLR_BAM
| IXGBE_VMOLR_AUPE
;
328 uint16_t vfn
= dev_num_vf(dev
);
330 /* Check for Promiscuous and All Multicast modes */
331 fctrl
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
333 /* set all bits that we expect to always be set */
334 fctrl
&= ~IXGBE_FCTRL_SBP
; /* disable store-bad-packets */
335 fctrl
|= IXGBE_FCTRL_BAM
;
337 /* clear the bits we are changing the status of */
338 fctrl
&= ~(IXGBE_FCTRL_UPE
| IXGBE_FCTRL_MPE
);
340 if (dev_data
->promiscuous
) {
341 fctrl
|= (IXGBE_FCTRL_UPE
| IXGBE_FCTRL_MPE
);
342 vmolr
|= (IXGBE_VMOLR_ROPE
| IXGBE_VMOLR_MPE
);
344 if (dev_data
->all_multicast
) {
345 fctrl
|= IXGBE_FCTRL_MPE
;
346 vmolr
|= IXGBE_VMOLR_MPE
;
348 vmolr
|= IXGBE_VMOLR_ROMPE
;
352 if (hw
->mac
.type
!= ixgbe_mac_82598EB
) {
353 vmolr
|= IXGBE_READ_REG(hw
, IXGBE_VMOLR(vfn
)) &
354 ~(IXGBE_VMOLR_MPE
| IXGBE_VMOLR_ROMPE
|
356 IXGBE_WRITE_REG(hw
, IXGBE_VMOLR(vfn
), vmolr
);
359 IXGBE_WRITE_REG(hw
, IXGBE_FCTRL
, fctrl
);
361 if (dev
->data
->dev_conf
.rxmode
.hw_vlan_strip
)
362 ixgbe_vlan_hw_strip_enable_all(dev
);
364 ixgbe_vlan_hw_strip_disable_all(dev
);
368 ixgbe_vf_reset_event(struct rte_eth_dev
*dev
, uint16_t vf
)
370 struct ixgbe_hw
*hw
=
371 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
372 struct ixgbe_vf_info
*vfinfo
=
373 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
));
374 int rar_entry
= hw
->mac
.num_rar_entries
- (vf
+ 1);
375 uint32_t vmolr
= IXGBE_READ_REG(hw
, IXGBE_VMOLR(vf
));
377 vmolr
|= (IXGBE_VMOLR_ROPE
| IXGBE_VMOLR_ROMPE
|
378 IXGBE_VMOLR_BAM
| IXGBE_VMOLR_AUPE
);
379 IXGBE_WRITE_REG(hw
, IXGBE_VMOLR(vf
), vmolr
);
381 IXGBE_WRITE_REG(hw
, IXGBE_VMVIR(vf
), 0);
383 /* reset multicast table array for vf */
384 vfinfo
[vf
].num_vf_mc_hashes
= 0;
389 hw
->mac
.ops
.clear_rar(hw
, rar_entry
);
393 ixgbe_vf_reset_msg(struct rte_eth_dev
*dev
, uint16_t vf
)
395 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
397 uint32_t reg_offset
, vf_shift
;
398 const uint8_t VFRE_SHIFT
= 5; /* VFRE 32 bits per slot */
399 const uint8_t VFRE_MASK
= (uint8_t)((1U << VFRE_SHIFT
) - 1);
400 uint8_t nb_q_per_pool
;
403 vf_shift
= vf
& VFRE_MASK
;
404 reg_offset
= (vf
>> VFRE_SHIFT
) > 0 ? 1 : 0;
406 /* enable transmit for vf */
407 reg
= IXGBE_READ_REG(hw
, IXGBE_VFTE(reg_offset
));
408 reg
|= (reg
| (1 << vf_shift
));
409 IXGBE_WRITE_REG(hw
, IXGBE_VFTE(reg_offset
), reg
);
411 /* enable all queue drop for IOV */
412 nb_q_per_pool
= RTE_ETH_DEV_SRIOV(dev
).nb_q_per_pool
;
413 for (i
= vf
* nb_q_per_pool
; i
< (vf
+ 1) * nb_q_per_pool
; i
++) {
414 IXGBE_WRITE_FLUSH(hw
);
415 reg
= IXGBE_QDE_ENABLE
| IXGBE_QDE_WRITE
;
416 reg
|= i
<< IXGBE_QDE_IDX_SHIFT
;
417 IXGBE_WRITE_REG(hw
, IXGBE_QDE
, reg
);
420 /* enable receive for vf */
421 reg
= IXGBE_READ_REG(hw
, IXGBE_VFRE(reg_offset
));
422 reg
|= (reg
| (1 << vf_shift
));
423 IXGBE_WRITE_REG(hw
, IXGBE_VFRE(reg_offset
), reg
);
425 /* Enable counting of spoofed packets in the SSVPC register */
426 reg
= IXGBE_READ_REG(hw
, IXGBE_VMECM(reg_offset
));
427 reg
|= (1 << vf_shift
);
428 IXGBE_WRITE_REG(hw
, IXGBE_VMECM(reg_offset
), reg
);
430 ixgbe_vf_reset_event(dev
, vf
);
434 ixgbe_enable_vf_mc_promisc(struct rte_eth_dev
*dev
, uint32_t vf
)
436 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
439 vmolr
= IXGBE_READ_REG(hw
, IXGBE_VMOLR(vf
));
441 RTE_LOG(INFO
, PMD
, "VF %u: enabling multicast promiscuous\n", vf
);
443 vmolr
|= IXGBE_VMOLR_MPE
;
445 IXGBE_WRITE_REG(hw
, IXGBE_VMOLR(vf
), vmolr
);
451 ixgbe_disable_vf_mc_promisc(struct rte_eth_dev
*dev
, uint32_t vf
)
453 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
456 vmolr
= IXGBE_READ_REG(hw
, IXGBE_VMOLR(vf
));
458 RTE_LOG(INFO
, PMD
, "VF %u: disabling multicast promiscuous\n", vf
);
460 vmolr
&= ~IXGBE_VMOLR_MPE
;
462 IXGBE_WRITE_REG(hw
, IXGBE_VMOLR(vf
), vmolr
);
468 ixgbe_vf_reset(struct rte_eth_dev
*dev
, uint16_t vf
, uint32_t *msgbuf
)
470 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
471 struct ixgbe_vf_info
*vfinfo
=
472 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
));
473 unsigned char *vf_mac
= vfinfo
[vf
].vf_mac_addresses
;
474 int rar_entry
= hw
->mac
.num_rar_entries
- (vf
+ 1);
475 uint8_t *new_mac
= (uint8_t *)(&msgbuf
[1]);
477 ixgbe_vf_reset_msg(dev
, vf
);
479 hw
->mac
.ops
.set_rar(hw
, rar_entry
, vf_mac
, vf
, IXGBE_RAH_AV
);
481 /* Disable multicast promiscuous at reset */
482 ixgbe_disable_vf_mc_promisc(dev
, vf
);
484 /* reply to reset with ack and vf mac address */
485 msgbuf
[0] = IXGBE_VF_RESET
| IXGBE_VT_MSGTYPE_ACK
;
486 rte_memcpy(new_mac
, vf_mac
, ETHER_ADDR_LEN
);
488 * Piggyback the multicast filter type so VF can compute the
491 msgbuf
[3] = hw
->mac
.mc_filter_type
;
492 ixgbe_write_mbx(hw
, msgbuf
, IXGBE_VF_PERMADDR_MSG_LEN
, vf
);
498 ixgbe_vf_set_mac_addr(struct rte_eth_dev
*dev
, uint32_t vf
, uint32_t *msgbuf
)
500 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
501 struct ixgbe_vf_info
*vfinfo
=
502 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
));
503 int rar_entry
= hw
->mac
.num_rar_entries
- (vf
+ 1);
504 uint8_t *new_mac
= (uint8_t *)(&msgbuf
[1]);
506 if (is_valid_assigned_ether_addr((struct ether_addr
*)new_mac
)) {
507 rte_memcpy(vfinfo
[vf
].vf_mac_addresses
, new_mac
, 6);
508 return hw
->mac
.ops
.set_rar(hw
, rar_entry
, new_mac
, vf
, IXGBE_RAH_AV
);
514 ixgbe_vf_set_multicast(struct rte_eth_dev
*dev
, __rte_unused
uint32_t vf
, uint32_t *msgbuf
)
516 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
517 struct ixgbe_vf_info
*vfinfo
=
518 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
));
519 int nb_entries
= (msgbuf
[0] & IXGBE_VT_MSGINFO_MASK
) >>
520 IXGBE_VT_MSGINFO_SHIFT
;
521 uint16_t *hash_list
= (uint16_t *)&msgbuf
[1];
524 const uint32_t IXGBE_MTA_INDEX_MASK
= 0x7F;
525 const uint32_t IXGBE_MTA_BIT_SHIFT
= 5;
526 const uint32_t IXGBE_MTA_BIT_MASK
= (0x1 << IXGBE_MTA_BIT_SHIFT
) - 1;
530 /* Disable multicast promiscuous first */
531 ixgbe_disable_vf_mc_promisc(dev
, vf
);
533 /* only so many hash values supported */
534 nb_entries
= RTE_MIN(nb_entries
, IXGBE_MAX_VF_MC_ENTRIES
);
536 /* store the mc entries */
537 vfinfo
->num_vf_mc_hashes
= (uint16_t)nb_entries
;
538 for (i
= 0; i
< nb_entries
; i
++) {
539 vfinfo
->vf_mc_hashes
[i
] = hash_list
[i
];
542 for (i
= 0; i
< vfinfo
->num_vf_mc_hashes
; i
++) {
543 mta_idx
= (vfinfo
->vf_mc_hashes
[i
] >> IXGBE_MTA_BIT_SHIFT
)
544 & IXGBE_MTA_INDEX_MASK
;
545 mta_shift
= vfinfo
->vf_mc_hashes
[i
] & IXGBE_MTA_BIT_MASK
;
546 reg_val
= IXGBE_READ_REG(hw
, IXGBE_MTA(mta_idx
));
547 reg_val
|= (1 << mta_shift
);
548 IXGBE_WRITE_REG(hw
, IXGBE_MTA(mta_idx
), reg_val
);
555 ixgbe_vf_set_vlan(struct rte_eth_dev
*dev
, uint32_t vf
, uint32_t *msgbuf
)
558 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
559 struct ixgbe_vf_info
*vfinfo
=
560 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
));
562 add
= (msgbuf
[0] & IXGBE_VT_MSGINFO_MASK
)
563 >> IXGBE_VT_MSGINFO_SHIFT
;
564 vid
= (msgbuf
[1] & IXGBE_VLVF_VLANID_MASK
);
567 vfinfo
[vf
].vlan_count
++;
568 else if (vfinfo
[vf
].vlan_count
)
569 vfinfo
[vf
].vlan_count
--;
570 return hw
->mac
.ops
.set_vfta(hw
, vid
, vf
, (bool)add
, false);
574 ixgbe_set_vf_lpe(struct rte_eth_dev
*dev
, __rte_unused
uint32_t vf
, uint32_t *msgbuf
)
576 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
577 uint32_t new_mtu
= msgbuf
[1];
579 int max_frame
= new_mtu
+ ETHER_HDR_LEN
+ ETHER_CRC_LEN
;
581 /* X540 and X550 support jumbo frames in IOV mode */
582 if (hw
->mac
.type
!= ixgbe_mac_X540
&&
583 hw
->mac
.type
!= ixgbe_mac_X550
&&
584 hw
->mac
.type
!= ixgbe_mac_X550EM_x
&&
585 hw
->mac
.type
!= ixgbe_mac_X550EM_a
)
588 if ((max_frame
< ETHER_MIN_LEN
) || (max_frame
> ETHER_MAX_JUMBO_FRAME_LEN
))
591 max_frs
= (IXGBE_READ_REG(hw
, IXGBE_MAXFRS
) &
592 IXGBE_MHADD_MFS_MASK
) >> IXGBE_MHADD_MFS_SHIFT
;
593 if (max_frs
< new_mtu
) {
594 max_frs
= new_mtu
<< IXGBE_MHADD_MFS_SHIFT
;
595 IXGBE_WRITE_REG(hw
, IXGBE_MAXFRS
, max_frs
);
602 ixgbe_negotiate_vf_api(struct rte_eth_dev
*dev
, uint32_t vf
, uint32_t *msgbuf
)
604 uint32_t api_version
= msgbuf
[1];
605 struct ixgbe_vf_info
*vfinfo
=
606 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
);
608 switch (api_version
) {
609 case ixgbe_mbox_api_10
:
610 case ixgbe_mbox_api_11
:
611 case ixgbe_mbox_api_12
:
612 vfinfo
[vf
].api_version
= (uint8_t)api_version
;
618 RTE_LOG(ERR
, PMD
, "Negotiate invalid api version %u from VF %d\n",
625 ixgbe_get_vf_queues(struct rte_eth_dev
*dev
, uint32_t vf
, uint32_t *msgbuf
)
627 struct ixgbe_vf_info
*vfinfo
=
628 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
);
629 uint32_t default_q
= vf
* RTE_ETH_DEV_SRIOV(dev
).nb_q_per_pool
;
631 /* Verify if the PF supports the mbox APIs version or not */
632 switch (vfinfo
[vf
].api_version
) {
633 case ixgbe_mbox_api_20
:
634 case ixgbe_mbox_api_11
:
635 case ixgbe_mbox_api_12
:
641 /* Notify VF of Rx and Tx queue number */
642 msgbuf
[IXGBE_VF_RX_QUEUES
] = RTE_ETH_DEV_SRIOV(dev
).nb_q_per_pool
;
643 msgbuf
[IXGBE_VF_TX_QUEUES
] = RTE_ETH_DEV_SRIOV(dev
).nb_q_per_pool
;
645 /* Notify VF of default queue */
646 msgbuf
[IXGBE_VF_DEF_QUEUE
] = default_q
;
649 * FIX ME if it needs fill msgbuf[IXGBE_VF_TRANS_VLAN]
650 * for VLAN strip or VMDQ_DCB or VMDQ_DCB_RSS
657 ixgbe_set_vf_mc_promisc(struct rte_eth_dev
*dev
, uint32_t vf
, uint32_t *msgbuf
)
659 struct ixgbe_vf_info
*vfinfo
=
660 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
));
661 bool enable
= !!msgbuf
[1]; /* msgbuf contains the flag to enable */
663 switch (vfinfo
[vf
].api_version
) {
664 case ixgbe_mbox_api_12
:
671 return ixgbe_enable_vf_mc_promisc(dev
, vf
);
673 return ixgbe_disable_vf_mc_promisc(dev
, vf
);
677 ixgbe_rcv_msg_from_vf(struct rte_eth_dev
*dev
, uint16_t vf
)
679 uint16_t mbx_size
= IXGBE_VFMAILBOX_SIZE
;
680 uint16_t msg_size
= IXGBE_VF_MSG_SIZE_DEFAULT
;
681 uint32_t msgbuf
[IXGBE_VFMAILBOX_SIZE
];
683 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
684 struct ixgbe_vf_info
*vfinfo
=
685 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
);
686 struct rte_pmd_ixgbe_mb_event_param cb_param
;
688 retval
= ixgbe_read_mbx(hw
, msgbuf
, mbx_size
, vf
);
690 PMD_DRV_LOG(ERR
, "Error mbx recv msg from VF %d", vf
);
694 /* do nothing with the message already been processed */
695 if (msgbuf
[0] & (IXGBE_VT_MSGTYPE_ACK
| IXGBE_VT_MSGTYPE_NACK
))
698 /* flush the ack before we write any messages back */
699 IXGBE_WRITE_FLUSH(hw
);
702 * initialise structure to send to user application
703 * will return response from user in retval field
705 cb_param
.retval
= RTE_PMD_IXGBE_MB_EVENT_PROCEED
;
707 cb_param
.msg_type
= msgbuf
[0] & 0xFFFF;
708 cb_param
.msg
= (void *)msgbuf
;
710 /* perform VF reset */
711 if (msgbuf
[0] == IXGBE_VF_RESET
) {
712 int ret
= ixgbe_vf_reset(dev
, vf
, msgbuf
);
714 vfinfo
[vf
].clear_to_send
= true;
716 /* notify application about VF reset */
717 _rte_eth_dev_callback_process(dev
, RTE_ETH_EVENT_VF_MBOX
, &cb_param
);
722 * ask user application if we allowed to perform those functions
723 * if we get cb_param.retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED
724 * then business as usual,
725 * if 0, do nothing and send ACK to VF
726 * if cb_param.retval > 1, do nothing and send NAK to VF
728 _rte_eth_dev_callback_process(dev
, RTE_ETH_EVENT_VF_MBOX
, &cb_param
);
730 retval
= cb_param
.retval
;
732 /* check & process VF to PF mailbox message */
733 switch ((msgbuf
[0] & 0xFFFF)) {
734 case IXGBE_VF_SET_MAC_ADDR
:
735 if (retval
== RTE_PMD_IXGBE_MB_EVENT_PROCEED
)
736 retval
= ixgbe_vf_set_mac_addr(dev
, vf
, msgbuf
);
738 case IXGBE_VF_SET_MULTICAST
:
739 if (retval
== RTE_PMD_IXGBE_MB_EVENT_PROCEED
)
740 retval
= ixgbe_vf_set_multicast(dev
, vf
, msgbuf
);
742 case IXGBE_VF_SET_LPE
:
743 if (retval
== RTE_PMD_IXGBE_MB_EVENT_PROCEED
)
744 retval
= ixgbe_set_vf_lpe(dev
, vf
, msgbuf
);
746 case IXGBE_VF_SET_VLAN
:
747 if (retval
== RTE_PMD_IXGBE_MB_EVENT_PROCEED
)
748 retval
= ixgbe_vf_set_vlan(dev
, vf
, msgbuf
);
750 case IXGBE_VF_API_NEGOTIATE
:
751 retval
= ixgbe_negotiate_vf_api(dev
, vf
, msgbuf
);
753 case IXGBE_VF_GET_QUEUES
:
754 retval
= ixgbe_get_vf_queues(dev
, vf
, msgbuf
);
755 msg_size
= IXGBE_VF_GET_QUEUE_MSG_SIZE
;
757 case IXGBE_VF_UPDATE_XCAST_MODE
:
758 if (retval
== RTE_PMD_IXGBE_MB_EVENT_PROCEED
)
759 retval
= ixgbe_set_vf_mc_promisc(dev
, vf
, msgbuf
);
762 PMD_DRV_LOG(DEBUG
, "Unhandled Msg %8.8x", (unsigned)msgbuf
[0]);
763 retval
= IXGBE_ERR_MBX
;
767 /* response the VF according to the message process result */
769 msgbuf
[0] |= IXGBE_VT_MSGTYPE_NACK
;
771 msgbuf
[0] |= IXGBE_VT_MSGTYPE_ACK
;
773 msgbuf
[0] |= IXGBE_VT_MSGTYPE_CTS
;
775 ixgbe_write_mbx(hw
, msgbuf
, msg_size
, vf
);
781 ixgbe_rcv_ack_from_vf(struct rte_eth_dev
*dev
, uint16_t vf
)
783 uint32_t msg
= IXGBE_VT_MSGTYPE_NACK
;
784 struct ixgbe_hw
*hw
=
785 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
786 struct ixgbe_vf_info
*vfinfo
=
787 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
);
789 if (!vfinfo
[vf
].clear_to_send
)
790 ixgbe_write_mbx(hw
, &msg
, 1, vf
);
793 void ixgbe_pf_mbx_process(struct rte_eth_dev
*eth_dev
)
796 struct ixgbe_hw
*hw
=
797 IXGBE_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
799 for (vf
= 0; vf
< dev_num_vf(eth_dev
); vf
++) {
800 /* check & process vf function level reset */
801 if (!ixgbe_check_for_rst(hw
, vf
))
802 ixgbe_vf_reset_event(eth_dev
, vf
);
804 /* check & process vf mailbox messages */
805 if (!ixgbe_check_for_msg(hw
, vf
))
806 ixgbe_rcv_msg_from_vf(eth_dev
, vf
);
808 /* check & process acks from vf */
809 if (!ixgbe_check_for_ack(hw
, vf
))
810 ixgbe_rcv_ack_from_vf(eth_dev
, vf
);