]>
Commit | Line | Data |
---|---|---|
9f95a23c TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright(c) 2010-2016 Intel Corporation | |
7c673cae FG |
3 | */ |
4 | ||
5 | #include <stdio.h> | |
6 | #include <errno.h> | |
7 | #include <stdint.h> | |
8 | #include <stdlib.h> | |
9 | #include <unistd.h> | |
10 | #include <stdarg.h> | |
11 | #include <inttypes.h> | |
12 | ||
13 | #include <rte_interrupts.h> | |
14 | #include <rte_log.h> | |
15 | #include <rte_debug.h> | |
16 | #include <rte_eal.h> | |
17 | #include <rte_ether.h> | |
9f95a23c | 18 | #include <rte_ethdev_driver.h> |
7c673cae FG |
19 | #include <rte_memcpy.h> |
20 | #include <rte_malloc.h> | |
21 | #include <rte_random.h> | |
22 | ||
23 | #include "base/ixgbe_common.h" | |
24 | #include "ixgbe_ethdev.h" | |
25 | #include "rte_pmd_ixgbe.h" | |
26 | ||
27 | #define IXGBE_MAX_VFTA (128) | |
28 | #define IXGBE_VF_MSG_SIZE_DEFAULT 1 | |
29 | #define IXGBE_VF_GET_QUEUE_MSG_SIZE 5 | |
30 | #define IXGBE_ETHERTYPE_FLOW_CTRL 0x8808 | |
31 | ||
32 | static inline uint16_t | |
33 | dev_num_vf(struct rte_eth_dev *eth_dev) | |
34 | { | |
9f95a23c | 35 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); |
11fdf7f2 TL |
36 | |
37 | return pci_dev->max_vfs; | |
7c673cae FG |
38 | } |
39 | ||
40 | static inline | |
41 | int ixgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num) | |
42 | { | |
43 | unsigned char vf_mac_addr[ETHER_ADDR_LEN]; | |
44 | struct ixgbe_vf_info *vfinfo = | |
45 | *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); | |
46 | uint16_t vfn; | |
47 | ||
48 | for (vfn = 0; vfn < vf_num; vfn++) { | |
49 | eth_random_addr(vf_mac_addr); | |
50 | /* keep the random address as default */ | |
51 | memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr, | |
52 | ETHER_ADDR_LEN); | |
53 | } | |
54 | ||
55 | return 0; | |
56 | } | |
57 | ||
58 | static inline int | |
59 | ixgbe_mb_intr_setup(struct rte_eth_dev *dev) | |
60 | { | |
61 | struct ixgbe_interrupt *intr = | |
62 | IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); | |
63 | ||
64 | intr->mask |= IXGBE_EICR_MAILBOX; | |
65 | ||
66 | return 0; | |
67 | } | |
68 | ||
69 | void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev) | |
70 | { | |
71 | struct ixgbe_vf_info **vfinfo = | |
72 | IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private); | |
73 | struct ixgbe_mirror_info *mirror_info = | |
74 | IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private); | |
75 | struct ixgbe_uta_info *uta_info = | |
76 | IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private); | |
77 | struct ixgbe_hw *hw = | |
78 | IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
79 | uint16_t vf_num; | |
80 | uint8_t nb_queue; | |
81 | ||
82 | PMD_INIT_FUNC_TRACE(); | |
83 | ||
84 | RTE_ETH_DEV_SRIOV(eth_dev).active = 0; | |
85 | vf_num = dev_num_vf(eth_dev); | |
86 | if (vf_num == 0) | |
87 | return; | |
88 | ||
89 | *vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) * vf_num, 0); | |
90 | if (*vfinfo == NULL) | |
91 | rte_panic("Cannot allocate memory for private VF data\n"); | |
92 | ||
9f95a23c TL |
93 | rte_eth_switch_domain_alloc(&(*vfinfo)->switch_domain_id); |
94 | ||
7c673cae FG |
95 | memset(mirror_info, 0, sizeof(struct ixgbe_mirror_info)); |
96 | memset(uta_info, 0, sizeof(struct ixgbe_uta_info)); | |
97 | hw->mac.mc_filter_type = 0; | |
98 | ||
99 | if (vf_num >= ETH_32_POOLS) { | |
100 | nb_queue = 2; | |
101 | RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS; | |
102 | } else if (vf_num >= ETH_16_POOLS) { | |
103 | nb_queue = 4; | |
104 | RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS; | |
105 | } else { | |
106 | nb_queue = 8; | |
107 | RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS; | |
108 | } | |
109 | ||
110 | RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue; | |
111 | RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num; | |
112 | RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue); | |
113 | ||
114 | ixgbe_vf_perm_addr_gen(eth_dev, vf_num); | |
115 | ||
116 | /* init_mailbox_params */ | |
117 | hw->mbx.ops.init_params(hw); | |
118 | ||
119 | /* set mb interrupt mask */ | |
120 | ixgbe_mb_intr_setup(eth_dev); | |
121 | } | |
122 | ||
123 | void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev) | |
124 | { | |
125 | struct ixgbe_vf_info **vfinfo; | |
126 | uint16_t vf_num; | |
9f95a23c | 127 | int ret; |
7c673cae FG |
128 | |
129 | PMD_INIT_FUNC_TRACE(); | |
130 | ||
7c673cae FG |
131 | RTE_ETH_DEV_SRIOV(eth_dev).active = 0; |
132 | RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = 0; | |
133 | RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = 0; | |
134 | RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = 0; | |
135 | ||
136 | vf_num = dev_num_vf(eth_dev); | |
137 | if (vf_num == 0) | |
138 | return; | |
139 | ||
9f95a23c TL |
140 | vfinfo = IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private); |
141 | if (*vfinfo == NULL) | |
142 | return; | |
143 | ||
144 | ret = rte_eth_switch_domain_free((*vfinfo)->switch_domain_id); | |
145 | if (ret) | |
146 | PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret); | |
147 | ||
7c673cae FG |
148 | rte_free(*vfinfo); |
149 | *vfinfo = NULL; | |
150 | } | |
151 | ||
152 | static void | |
153 | ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev) | |
154 | { | |
155 | struct ixgbe_hw *hw = | |
156 | IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
157 | struct ixgbe_filter_info *filter_info = | |
158 | IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); | |
159 | uint16_t vf_num; | |
160 | int i; | |
11fdf7f2 | 161 | struct ixgbe_ethertype_filter ethertype_filter; |
7c673cae FG |
162 | |
163 | if (!hw->mac.ops.set_ethertype_anti_spoofing) { | |
164 | RTE_LOG(INFO, PMD, "ether type anti-spoofing is not" | |
165 | " supported.\n"); | |
166 | return; | |
167 | } | |
168 | ||
11fdf7f2 TL |
169 | i = ixgbe_ethertype_filter_lookup(filter_info, |
170 | IXGBE_ETHERTYPE_FLOW_CTRL); | |
171 | if (i >= 0) { | |
172 | RTE_LOG(ERR, PMD, "A ether type filter" | |
173 | " entity for flow control already exists!\n"); | |
174 | return; | |
7c673cae | 175 | } |
11fdf7f2 TL |
176 | |
177 | ethertype_filter.ethertype = IXGBE_ETHERTYPE_FLOW_CTRL; | |
178 | ethertype_filter.etqf = IXGBE_ETQF_FILTER_EN | | |
179 | IXGBE_ETQF_TX_ANTISPOOF | | |
180 | IXGBE_ETHERTYPE_FLOW_CTRL; | |
181 | ethertype_filter.etqs = 0; | |
182 | ethertype_filter.conf = TRUE; | |
183 | i = ixgbe_ethertype_filter_insert(filter_info, | |
184 | ðertype_filter); | |
185 | if (i < 0) { | |
7c673cae FG |
186 | RTE_LOG(ERR, PMD, "Cannot find an unused ether type filter" |
187 | " entity for flow control.\n"); | |
188 | return; | |
189 | } | |
190 | ||
191 | IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), | |
192 | (IXGBE_ETQF_FILTER_EN | | |
193 | IXGBE_ETQF_TX_ANTISPOOF | | |
194 | IXGBE_ETHERTYPE_FLOW_CTRL)); | |
195 | ||
196 | vf_num = dev_num_vf(eth_dev); | |
197 | for (i = 0; i < vf_num; i++) | |
198 | hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i); | |
199 | } | |
200 | ||
201 | int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev) | |
202 | { | |
203 | uint32_t vtctl, fcrth; | |
204 | uint32_t vfre_slot, vfre_offset; | |
205 | uint16_t vf_num; | |
206 | const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */ | |
207 | const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1); | |
208 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
209 | uint32_t gpie, gcr_ext; | |
210 | uint32_t vlanctrl; | |
211 | int i; | |
212 | ||
213 | vf_num = dev_num_vf(eth_dev); | |
214 | if (vf_num == 0) | |
215 | return -1; | |
216 | ||
217 | /* enable VMDq and set the default pool for PF */ | |
218 | vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); | |
219 | vtctl |= IXGBE_VMD_CTL_VMDQ_EN; | |
220 | vtctl &= ~IXGBE_VT_CTL_POOL_MASK; | |
221 | vtctl |= RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx | |
222 | << IXGBE_VT_CTL_POOL_SHIFT; | |
223 | vtctl |= IXGBE_VT_CTL_REPLEN; | |
224 | IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); | |
225 | ||
226 | vfre_offset = vf_num & VFRE_MASK; | |
227 | vfre_slot = (vf_num >> VFRE_SHIFT) > 0 ? 1 : 0; | |
228 | ||
229 | /* Enable pools reserved to PF only */ | |
230 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot), (~0U) << vfre_offset); | |
231 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot ^ 1), vfre_slot - 1); | |
232 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot), (~0U) << vfre_offset); | |
233 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot ^ 1), vfre_slot - 1); | |
234 | ||
235 | /* PFDMA Tx General Switch Control Enables VMDQ loopback */ | |
236 | IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); | |
237 | ||
238 | /* clear VMDq map to perment rar 0 */ | |
239 | hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); | |
240 | ||
241 | /* clear VMDq map to scan rar 127 */ | |
242 | IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(hw->mac.num_rar_entries), 0); | |
243 | IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(hw->mac.num_rar_entries), 0); | |
244 | ||
245 | /* set VMDq map to default PF pool */ | |
246 | hw->mac.ops.set_vmdq(hw, 0, RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx); | |
247 | ||
248 | /* | |
249 | * SW msut set GCR_EXT.VT_Mode the same as GPIE.VT_Mode | |
250 | */ | |
251 | gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); | |
252 | gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK; | |
253 | ||
254 | gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); | |
255 | gpie &= ~IXGBE_GPIE_VTMODE_MASK; | |
9f95a23c | 256 | gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT; |
7c673cae FG |
257 | |
258 | switch (RTE_ETH_DEV_SRIOV(eth_dev).active) { | |
259 | case ETH_64_POOLS: | |
260 | gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; | |
261 | gpie |= IXGBE_GPIE_VTMODE_64; | |
262 | break; | |
263 | case ETH_32_POOLS: | |
264 | gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32; | |
265 | gpie |= IXGBE_GPIE_VTMODE_32; | |
266 | break; | |
267 | case ETH_16_POOLS: | |
268 | gcr_ext |= IXGBE_GCR_EXT_VT_MODE_16; | |
269 | gpie |= IXGBE_GPIE_VTMODE_16; | |
270 | break; | |
271 | } | |
272 | ||
273 | IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); | |
274 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); | |
275 | ||
276 | /* | |
277 | * enable vlan filtering and allow all vlan tags through | |
278 | */ | |
279 | vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | |
280 | vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ | |
281 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); | |
282 | ||
283 | /* VFTA - enable all vlan filters */ | |
284 | for (i = 0; i < IXGBE_MAX_VFTA; i++) | |
285 | IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF); | |
286 | ||
287 | /* Enable MAC Anti-Spoofing */ | |
288 | hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num); | |
289 | ||
290 | /* set flow control threshold to max to avoid tx switch hang */ | |
291 | for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { | |
292 | IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); | |
293 | fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32; | |
294 | IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); | |
295 | } | |
296 | ||
297 | ixgbe_add_tx_flow_control_drop_filter(eth_dev); | |
298 | ||
299 | return 0; | |
300 | } | |
301 | ||
302 | static void | |
303 | set_rx_mode(struct rte_eth_dev *dev) | |
304 | { | |
305 | struct rte_eth_dev_data *dev_data = dev->data; | |
306 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
307 | u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; | |
308 | uint16_t vfn = dev_num_vf(dev); | |
309 | ||
310 | /* Check for Promiscuous and All Multicast modes */ | |
311 | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); | |
312 | ||
313 | /* set all bits that we expect to always be set */ | |
314 | fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ | |
315 | fctrl |= IXGBE_FCTRL_BAM; | |
316 | ||
317 | /* clear the bits we are changing the status of */ | |
318 | fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); | |
319 | ||
320 | if (dev_data->promiscuous) { | |
321 | fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); | |
322 | vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE); | |
323 | } else { | |
324 | if (dev_data->all_multicast) { | |
325 | fctrl |= IXGBE_FCTRL_MPE; | |
326 | vmolr |= IXGBE_VMOLR_MPE; | |
327 | } else { | |
328 | vmolr |= IXGBE_VMOLR_ROMPE; | |
329 | } | |
330 | } | |
331 | ||
332 | if (hw->mac.type != ixgbe_mac_82598EB) { | |
333 | vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(vfn)) & | |
334 | ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE | | |
335 | IXGBE_VMOLR_ROPE); | |
336 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vfn), vmolr); | |
337 | } | |
338 | ||
339 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); | |
340 | ||
9f95a23c | 341 | ixgbe_vlan_hw_strip_config(dev); |
7c673cae FG |
342 | } |
343 | ||
344 | static inline void | |
345 | ixgbe_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf) | |
346 | { | |
347 | struct ixgbe_hw *hw = | |
348 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
349 | struct ixgbe_vf_info *vfinfo = | |
350 | *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); | |
351 | int rar_entry = hw->mac.num_rar_entries - (vf + 1); | |
352 | uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); | |
353 | ||
9f95a23c | 354 | vmolr |= (IXGBE_VMOLR_ROPE | |
7c673cae FG |
355 | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE); |
356 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); | |
357 | ||
358 | IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); | |
359 | ||
360 | /* reset multicast table array for vf */ | |
361 | vfinfo[vf].num_vf_mc_hashes = 0; | |
362 | ||
363 | /* reset rx mode */ | |
364 | set_rx_mode(dev); | |
365 | ||
366 | hw->mac.ops.clear_rar(hw, rar_entry); | |
367 | } | |
368 | ||
369 | static inline void | |
370 | ixgbe_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf) | |
371 | { | |
372 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
373 | uint32_t reg; | |
374 | uint32_t reg_offset, vf_shift; | |
375 | const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */ | |
376 | const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1); | |
11fdf7f2 TL |
377 | uint8_t nb_q_per_pool; |
378 | int i; | |
7c673cae FG |
379 | |
380 | vf_shift = vf & VFRE_MASK; | |
381 | reg_offset = (vf >> VFRE_SHIFT) > 0 ? 1 : 0; | |
382 | ||
11fdf7f2 | 383 | /* enable transmit for vf */ |
7c673cae FG |
384 | reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); |
385 | reg |= (reg | (1 << vf_shift)); | |
386 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); | |
387 | ||
11fdf7f2 TL |
388 | /* enable all queue drop for IOV */ |
389 | nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; | |
390 | for (i = vf * nb_q_per_pool; i < (vf + 1) * nb_q_per_pool; i++) { | |
391 | IXGBE_WRITE_FLUSH(hw); | |
392 | reg = IXGBE_QDE_ENABLE | IXGBE_QDE_WRITE; | |
393 | reg |= i << IXGBE_QDE_IDX_SHIFT; | |
394 | IXGBE_WRITE_REG(hw, IXGBE_QDE, reg); | |
395 | } | |
396 | ||
397 | /* enable receive for vf */ | |
7c673cae FG |
398 | reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); |
399 | reg |= (reg | (1 << vf_shift)); | |
400 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); | |
401 | ||
402 | /* Enable counting of spoofed packets in the SSVPC register */ | |
403 | reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); | |
404 | reg |= (1 << vf_shift); | |
405 | IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); | |
406 | ||
407 | ixgbe_vf_reset_event(dev, vf); | |
408 | } | |
409 | ||
7c673cae FG |
410 | static int |
411 | ixgbe_disable_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf) | |
412 | { | |
413 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
414 | uint32_t vmolr; | |
415 | ||
416 | vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); | |
417 | ||
418 | RTE_LOG(INFO, PMD, "VF %u: disabling multicast promiscuous\n", vf); | |
419 | ||
420 | vmolr &= ~IXGBE_VMOLR_MPE; | |
421 | ||
422 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); | |
423 | ||
424 | return 0; | |
425 | } | |
426 | ||
427 | static int | |
428 | ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf) | |
429 | { | |
430 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
431 | struct ixgbe_vf_info *vfinfo = | |
432 | *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); | |
433 | unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses; | |
434 | int rar_entry = hw->mac.num_rar_entries - (vf + 1); | |
435 | uint8_t *new_mac = (uint8_t *)(&msgbuf[1]); | |
436 | ||
437 | ixgbe_vf_reset_msg(dev, vf); | |
438 | ||
439 | hw->mac.ops.set_rar(hw, rar_entry, vf_mac, vf, IXGBE_RAH_AV); | |
440 | ||
441 | /* Disable multicast promiscuous at reset */ | |
442 | ixgbe_disable_vf_mc_promisc(dev, vf); | |
443 | ||
444 | /* reply to reset with ack and vf mac address */ | |
445 | msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK; | |
446 | rte_memcpy(new_mac, vf_mac, ETHER_ADDR_LEN); | |
447 | /* | |
448 | * Piggyback the multicast filter type so VF can compute the | |
449 | * correct vectors | |
450 | */ | |
451 | msgbuf[3] = hw->mac.mc_filter_type; | |
452 | ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf); | |
453 | ||
454 | return 0; | |
455 | } | |
456 | ||
457 | static int | |
458 | ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) | |
459 | { | |
460 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
461 | struct ixgbe_vf_info *vfinfo = | |
462 | *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); | |
463 | int rar_entry = hw->mac.num_rar_entries - (vf + 1); | |
464 | uint8_t *new_mac = (uint8_t *)(&msgbuf[1]); | |
465 | ||
466 | if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) { | |
467 | rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6); | |
468 | return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, IXGBE_RAH_AV); | |
469 | } | |
470 | return -1; | |
471 | } | |
472 | ||
473 | static int | |
9f95a23c | 474 | ixgbe_vf_set_multicast(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) |
7c673cae FG |
475 | { |
476 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
477 | struct ixgbe_vf_info *vfinfo = | |
478 | *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); | |
479 | int nb_entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> | |
480 | IXGBE_VT_MSGINFO_SHIFT; | |
481 | uint16_t *hash_list = (uint16_t *)&msgbuf[1]; | |
482 | uint32_t mta_idx; | |
483 | uint32_t mta_shift; | |
484 | const uint32_t IXGBE_MTA_INDEX_MASK = 0x7F; | |
485 | const uint32_t IXGBE_MTA_BIT_SHIFT = 5; | |
486 | const uint32_t IXGBE_MTA_BIT_MASK = (0x1 << IXGBE_MTA_BIT_SHIFT) - 1; | |
487 | uint32_t reg_val; | |
488 | int i; | |
9f95a23c | 489 | u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); |
7c673cae FG |
490 | |
491 | /* Disable multicast promiscuous first */ | |
492 | ixgbe_disable_vf_mc_promisc(dev, vf); | |
493 | ||
494 | /* only so many hash values supported */ | |
495 | nb_entries = RTE_MIN(nb_entries, IXGBE_MAX_VF_MC_ENTRIES); | |
496 | ||
497 | /* store the mc entries */ | |
498 | vfinfo->num_vf_mc_hashes = (uint16_t)nb_entries; | |
499 | for (i = 0; i < nb_entries; i++) { | |
500 | vfinfo->vf_mc_hashes[i] = hash_list[i]; | |
501 | } | |
502 | ||
9f95a23c TL |
503 | if (nb_entries == 0) { |
504 | vmolr &= ~IXGBE_VMOLR_ROMPE; | |
505 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); | |
506 | return 0; | |
507 | } | |
508 | ||
7c673cae FG |
509 | for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { |
510 | mta_idx = (vfinfo->vf_mc_hashes[i] >> IXGBE_MTA_BIT_SHIFT) | |
511 | & IXGBE_MTA_INDEX_MASK; | |
512 | mta_shift = vfinfo->vf_mc_hashes[i] & IXGBE_MTA_BIT_MASK; | |
513 | reg_val = IXGBE_READ_REG(hw, IXGBE_MTA(mta_idx)); | |
514 | reg_val |= (1 << mta_shift); | |
515 | IXGBE_WRITE_REG(hw, IXGBE_MTA(mta_idx), reg_val); | |
516 | } | |
517 | ||
9f95a23c TL |
518 | vmolr |= IXGBE_VMOLR_ROMPE; |
519 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); | |
520 | ||
7c673cae FG |
521 | return 0; |
522 | } | |
523 | ||
524 | static int | |
525 | ixgbe_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) | |
526 | { | |
527 | int add, vid; | |
528 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
529 | struct ixgbe_vf_info *vfinfo = | |
530 | *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); | |
531 | ||
532 | add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) | |
533 | >> IXGBE_VT_MSGINFO_SHIFT; | |
534 | vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); | |
535 | ||
536 | if (add) | |
537 | vfinfo[vf].vlan_count++; | |
538 | else if (vfinfo[vf].vlan_count) | |
539 | vfinfo[vf].vlan_count--; | |
540 | return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add, false); | |
541 | } | |
542 | ||
543 | static int | |
544 | ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf) | |
545 | { | |
546 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
547 | uint32_t new_mtu = msgbuf[1]; | |
548 | uint32_t max_frs; | |
549 | int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; | |
550 | ||
551 | /* X540 and X550 support jumbo frames in IOV mode */ | |
552 | if (hw->mac.type != ixgbe_mac_X540 && | |
553 | hw->mac.type != ixgbe_mac_X550 && | |
554 | hw->mac.type != ixgbe_mac_X550EM_x && | |
555 | hw->mac.type != ixgbe_mac_X550EM_a) | |
556 | return -1; | |
557 | ||
558 | if ((max_frame < ETHER_MIN_LEN) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN)) | |
559 | return -1; | |
560 | ||
561 | max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) & | |
562 | IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT; | |
563 | if (max_frs < new_mtu) { | |
564 | max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT; | |
565 | IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); | |
566 | } | |
567 | ||
568 | return 0; | |
569 | } | |
570 | ||
571 | static int | |
572 | ixgbe_negotiate_vf_api(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) | |
573 | { | |
574 | uint32_t api_version = msgbuf[1]; | |
575 | struct ixgbe_vf_info *vfinfo = | |
576 | *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); | |
577 | ||
578 | switch (api_version) { | |
579 | case ixgbe_mbox_api_10: | |
580 | case ixgbe_mbox_api_11: | |
581 | case ixgbe_mbox_api_12: | |
9f95a23c | 582 | case ixgbe_mbox_api_13: |
7c673cae FG |
583 | vfinfo[vf].api_version = (uint8_t)api_version; |
584 | return 0; | |
585 | default: | |
586 | break; | |
587 | } | |
588 | ||
589 | RTE_LOG(ERR, PMD, "Negotiate invalid api version %u from VF %d\n", | |
590 | api_version, vf); | |
591 | ||
592 | return -1; | |
593 | } | |
594 | ||
595 | static int | |
596 | ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) | |
597 | { | |
598 | struct ixgbe_vf_info *vfinfo = | |
599 | *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); | |
600 | uint32_t default_q = vf * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; | |
9f95a23c TL |
601 | struct rte_eth_conf *eth_conf; |
602 | struct rte_eth_vmdq_dcb_tx_conf *vmdq_dcb_tx_conf; | |
603 | u8 num_tcs; | |
604 | struct ixgbe_hw *hw; | |
605 | u32 vmvir; | |
606 | #define IXGBE_VMVIR_VLANA_MASK 0xC0000000 | |
607 | #define IXGBE_VMVIR_VLAN_VID_MASK 0x00000FFF | |
608 | #define IXGBE_VMVIR_VLAN_UP_MASK 0x0000E000 | |
609 | #define VLAN_PRIO_SHIFT 13 | |
610 | u32 vlana; | |
611 | u32 vid; | |
612 | u32 user_priority; | |
7c673cae FG |
613 | |
614 | /* Verify if the PF supports the mbox APIs version or not */ | |
615 | switch (vfinfo[vf].api_version) { | |
616 | case ixgbe_mbox_api_20: | |
617 | case ixgbe_mbox_api_11: | |
618 | case ixgbe_mbox_api_12: | |
619 | break; | |
620 | default: | |
621 | return -1; | |
622 | } | |
623 | ||
624 | /* Notify VF of Rx and Tx queue number */ | |
625 | msgbuf[IXGBE_VF_RX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; | |
626 | msgbuf[IXGBE_VF_TX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; | |
627 | ||
628 | /* Notify VF of default queue */ | |
629 | msgbuf[IXGBE_VF_DEF_QUEUE] = default_q; | |
630 | ||
9f95a23c TL |
631 | /* Notify VF of number of DCB traffic classes */ |
632 | eth_conf = &dev->data->dev_conf; | |
633 | switch (eth_conf->txmode.mq_mode) { | |
634 | case ETH_MQ_TX_NONE: | |
635 | case ETH_MQ_TX_DCB: | |
636 | RTE_LOG(ERR, PMD, "PF must work with virtualization for VF %u" | |
637 | ", but its tx mode = %d\n", vf, | |
638 | eth_conf->txmode.mq_mode); | |
639 | return -1; | |
640 | ||
641 | case ETH_MQ_TX_VMDQ_DCB: | |
642 | vmdq_dcb_tx_conf = ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; | |
643 | switch (vmdq_dcb_tx_conf->nb_queue_pools) { | |
644 | case ETH_16_POOLS: | |
645 | num_tcs = ETH_8_TCS; | |
646 | break; | |
647 | case ETH_32_POOLS: | |
648 | num_tcs = ETH_4_TCS; | |
649 | break; | |
650 | default: | |
651 | return -1; | |
652 | } | |
653 | break; | |
654 | ||
655 | /* ETH_MQ_TX_VMDQ_ONLY, DCB not enabled */ | |
656 | case ETH_MQ_TX_VMDQ_ONLY: | |
657 | hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
658 | vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf)); | |
659 | vlana = vmvir & IXGBE_VMVIR_VLANA_MASK; | |
660 | vid = vmvir & IXGBE_VMVIR_VLAN_VID_MASK; | |
661 | user_priority = | |
662 | (vmvir & IXGBE_VMVIR_VLAN_UP_MASK) >> VLAN_PRIO_SHIFT; | |
663 | if ((vlana == IXGBE_VMVIR_VLANA_DEFAULT) && | |
664 | ((vid != 0) || (user_priority != 0))) | |
665 | num_tcs = 1; | |
666 | else | |
667 | num_tcs = 0; | |
668 | break; | |
669 | ||
670 | default: | |
671 | RTE_LOG(ERR, PMD, "PF work with invalid mode = %d\n", | |
672 | eth_conf->txmode.mq_mode); | |
673 | return -1; | |
674 | } | |
675 | msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs; | |
7c673cae FG |
676 | |
677 | return 0; | |
678 | } | |
679 | ||
680 | static int | |
681 | ixgbe_set_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) | |
682 | { | |
683 | struct ixgbe_vf_info *vfinfo = | |
684 | *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); | |
9f95a23c TL |
685 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); |
686 | int xcast_mode = msgbuf[1]; /* msgbuf contains the flag to enable */ | |
687 | u32 vmolr, fctrl, disable, enable; | |
7c673cae FG |
688 | |
689 | switch (vfinfo[vf].api_version) { | |
690 | case ixgbe_mbox_api_12: | |
9f95a23c TL |
691 | /* promisc introduced in 1.3 version */ |
692 | if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) | |
693 | return -EOPNOTSUPP; | |
694 | break; | |
695 | /* Fall threw */ | |
696 | case ixgbe_mbox_api_13: | |
697 | break; | |
698 | default: | |
699 | return -1; | |
700 | } | |
701 | ||
702 | if (vfinfo[vf].xcast_mode == xcast_mode) | |
703 | goto out; | |
704 | ||
705 | switch (xcast_mode) { | |
706 | case IXGBEVF_XCAST_MODE_NONE: | |
707 | disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | | |
708 | IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; | |
709 | enable = 0; | |
710 | break; | |
711 | case IXGBEVF_XCAST_MODE_MULTI: | |
712 | disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; | |
713 | enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE; | |
714 | break; | |
715 | case IXGBEVF_XCAST_MODE_ALLMULTI: | |
716 | disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; | |
717 | enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; | |
718 | break; | |
719 | case IXGBEVF_XCAST_MODE_PROMISC: | |
720 | if (hw->mac.type <= ixgbe_mac_82599EB) | |
721 | return -1; | |
722 | ||
723 | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); | |
724 | if (!(fctrl & IXGBE_FCTRL_UPE)) { | |
725 | /* VF promisc requires PF in promisc */ | |
726 | RTE_LOG(ERR, PMD, | |
727 | "Enabling VF promisc requires PF in promisc\n"); | |
728 | return -1; | |
729 | } | |
730 | ||
731 | disable = 0; | |
732 | enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | | |
733 | IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; | |
7c673cae FG |
734 | break; |
735 | default: | |
736 | return -1; | |
737 | } | |
738 | ||
9f95a23c TL |
739 | vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); |
740 | vmolr &= ~disable; | |
741 | vmolr |= enable; | |
742 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); | |
743 | vfinfo[vf].xcast_mode = xcast_mode; | |
744 | ||
745 | out: | |
746 | msgbuf[1] = xcast_mode; | |
747 | ||
748 | return 0; | |
7c673cae FG |
749 | } |
750 | ||
751 | static int | |
752 | ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf) | |
753 | { | |
754 | uint16_t mbx_size = IXGBE_VFMAILBOX_SIZE; | |
755 | uint16_t msg_size = IXGBE_VF_MSG_SIZE_DEFAULT; | |
756 | uint32_t msgbuf[IXGBE_VFMAILBOX_SIZE]; | |
757 | int32_t retval; | |
758 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
759 | struct ixgbe_vf_info *vfinfo = | |
760 | *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); | |
9f95a23c | 761 | struct rte_pmd_ixgbe_mb_event_param ret_param; |
7c673cae FG |
762 | |
763 | retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); | |
764 | if (retval) { | |
765 | PMD_DRV_LOG(ERR, "Error mbx recv msg from VF %d", vf); | |
766 | return retval; | |
767 | } | |
768 | ||
769 | /* do nothing with the message already been processed */ | |
770 | if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) | |
771 | return retval; | |
772 | ||
773 | /* flush the ack before we write any messages back */ | |
774 | IXGBE_WRITE_FLUSH(hw); | |
775 | ||
776 | /** | |
777 | * initialise structure to send to user application | |
778 | * will return response from user in retval field | |
779 | */ | |
9f95a23c TL |
780 | ret_param.retval = RTE_PMD_IXGBE_MB_EVENT_PROCEED; |
781 | ret_param.vfid = vf; | |
782 | ret_param.msg_type = msgbuf[0] & 0xFFFF; | |
783 | ret_param.msg = (void *)msgbuf; | |
7c673cae FG |
784 | |
785 | /* perform VF reset */ | |
786 | if (msgbuf[0] == IXGBE_VF_RESET) { | |
787 | int ret = ixgbe_vf_reset(dev, vf, msgbuf); | |
788 | ||
789 | vfinfo[vf].clear_to_send = true; | |
790 | ||
791 | /* notify application about VF reset */ | |
9f95a23c TL |
792 | _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, |
793 | &ret_param); | |
7c673cae FG |
794 | return ret; |
795 | } | |
796 | ||
797 | /** | |
798 | * ask user application if we allowed to perform those functions | |
9f95a23c | 799 | * if we get ret_param.retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED |
7c673cae FG |
800 | * then business as usual, |
801 | * if 0, do nothing and send ACK to VF | |
9f95a23c | 802 | * if ret_param.retval > 1, do nothing and send NAK to VF |
7c673cae | 803 | */ |
9f95a23c TL |
804 | _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, |
805 | &ret_param); | |
7c673cae | 806 | |
9f95a23c | 807 | retval = ret_param.retval; |
7c673cae FG |
808 | |
809 | /* check & process VF to PF mailbox message */ | |
810 | switch ((msgbuf[0] & 0xFFFF)) { | |
811 | case IXGBE_VF_SET_MAC_ADDR: | |
812 | if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED) | |
813 | retval = ixgbe_vf_set_mac_addr(dev, vf, msgbuf); | |
814 | break; | |
815 | case IXGBE_VF_SET_MULTICAST: | |
816 | if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED) | |
817 | retval = ixgbe_vf_set_multicast(dev, vf, msgbuf); | |
818 | break; | |
819 | case IXGBE_VF_SET_LPE: | |
820 | if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED) | |
821 | retval = ixgbe_set_vf_lpe(dev, vf, msgbuf); | |
822 | break; | |
823 | case IXGBE_VF_SET_VLAN: | |
824 | if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED) | |
825 | retval = ixgbe_vf_set_vlan(dev, vf, msgbuf); | |
826 | break; | |
827 | case IXGBE_VF_API_NEGOTIATE: | |
828 | retval = ixgbe_negotiate_vf_api(dev, vf, msgbuf); | |
829 | break; | |
830 | case IXGBE_VF_GET_QUEUES: | |
831 | retval = ixgbe_get_vf_queues(dev, vf, msgbuf); | |
832 | msg_size = IXGBE_VF_GET_QUEUE_MSG_SIZE; | |
833 | break; | |
834 | case IXGBE_VF_UPDATE_XCAST_MODE: | |
835 | if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED) | |
836 | retval = ixgbe_set_vf_mc_promisc(dev, vf, msgbuf); | |
837 | break; | |
838 | default: | |
839 | PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned)msgbuf[0]); | |
840 | retval = IXGBE_ERR_MBX; | |
841 | break; | |
842 | } | |
843 | ||
844 | /* response the VF according to the message process result */ | |
845 | if (retval) | |
846 | msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; | |
847 | else | |
848 | msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; | |
849 | ||
850 | msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; | |
851 | ||
852 | ixgbe_write_mbx(hw, msgbuf, msg_size, vf); | |
853 | ||
854 | return retval; | |
855 | } | |
856 | ||
857 | static inline void | |
858 | ixgbe_rcv_ack_from_vf(struct rte_eth_dev *dev, uint16_t vf) | |
859 | { | |
860 | uint32_t msg = IXGBE_VT_MSGTYPE_NACK; | |
861 | struct ixgbe_hw *hw = | |
862 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
863 | struct ixgbe_vf_info *vfinfo = | |
864 | *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); | |
865 | ||
866 | if (!vfinfo[vf].clear_to_send) | |
867 | ixgbe_write_mbx(hw, &msg, 1, vf); | |
868 | } | |
869 | ||
870 | void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev) | |
871 | { | |
872 | uint16_t vf; | |
873 | struct ixgbe_hw *hw = | |
874 | IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
875 | ||
876 | for (vf = 0; vf < dev_num_vf(eth_dev); vf++) { | |
877 | /* check & process vf function level reset */ | |
878 | if (!ixgbe_check_for_rst(hw, vf)) | |
879 | ixgbe_vf_reset_event(eth_dev, vf); | |
880 | ||
881 | /* check & process vf mailbox messages */ | |
882 | if (!ixgbe_check_for_msg(hw, vf)) | |
883 | ixgbe_rcv_msg_from_vf(eth_dev, vf); | |
884 | ||
885 | /* check & process acks from vf */ | |
886 | if (!ixgbe_check_for_ack(hw, vf)) | |
887 | ixgbe_rcv_ack_from_vf(eth_dev, vf); | |
888 | } | |
889 | } |