]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/fm10k/base/fm10k_pf.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / fm10k / base / fm10k_pf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2013 - 2015 Intel Corporation
3 */
4
5 #include "fm10k_pf.h"
6 #include "fm10k_vf.h"
7
8 /**
9 * fm10k_reset_hw_pf - PF hardware reset
10 * @hw: pointer to hardware structure
11 *
12 * This function should return the hardware to a state similar to the
13 * one it is in after being powered on.
14 **/
15 STATIC s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
16 {
17 s32 err;
18 u32 reg;
19 u16 i;
20
21 DEBUGFUNC("fm10k_reset_hw_pf");
22
23 /* Disable interrupts */
24 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(ALL));
25
26 /* Lock ITR2 reg 0 into itself and disable interrupt moderation */
27 FM10K_WRITE_REG(hw, FM10K_ITR2(0), 0);
28 FM10K_WRITE_REG(hw, FM10K_INT_CTRL, 0);
29
30 /* We assume here Tx and Rx queue 0 are owned by the PF */
31
32 /* Shut off VF access to their queues forcing them to queue 0 */
33 for (i = 0; i < FM10K_TQMAP_TABLE_SIZE; i++) {
34 FM10K_WRITE_REG(hw, FM10K_TQMAP(i), 0);
35 FM10K_WRITE_REG(hw, FM10K_RQMAP(i), 0);
36 }
37
38 /* shut down all rings */
39 err = fm10k_disable_queues_generic(hw, FM10K_MAX_QUEUES);
40 if (err == FM10K_ERR_REQUESTS_PENDING) {
41 hw->mac.reset_while_pending++;
42 goto force_reset;
43 } else if (err) {
44 return err;
45 }
46
47 /* Verify that DMA is no longer active */
48 reg = FM10K_READ_REG(hw, FM10K_DMA_CTRL);
49 if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE))
50 return FM10K_ERR_DMA_PENDING;
51
52 force_reset:
53 /* Inititate data path reset */
54 reg = FM10K_DMA_CTRL_DATAPATH_RESET;
55 FM10K_WRITE_REG(hw, FM10K_DMA_CTRL, reg);
56
57 /* Flush write and allow 100us for reset to complete */
58 FM10K_WRITE_FLUSH(hw);
59 usec_delay(FM10K_RESET_TIMEOUT);
60
61 /* Verify we made it out of reset */
62 reg = FM10K_READ_REG(hw, FM10K_IP);
63 if (!(reg & FM10K_IP_NOTINRESET))
64 return FM10K_ERR_RESET_FAILED;
65
66 return FM10K_SUCCESS;
67 }
68
69 /**
70 * fm10k_is_ari_hierarchy_pf - Indicate ARI hierarchy support
71 * @hw: pointer to hardware structure
72 *
73 * Looks at the ARI hierarchy bit to determine whether ARI is supported or not.
74 **/
75 STATIC bool fm10k_is_ari_hierarchy_pf(struct fm10k_hw *hw)
76 {
77 u16 sriov_ctrl = FM10K_READ_PCI_WORD(hw, FM10K_PCIE_SRIOV_CTRL);
78
79 DEBUGFUNC("fm10k_is_ari_hierarchy_pf");
80
81 return !!(sriov_ctrl & FM10K_PCIE_SRIOV_CTRL_VFARI);
82 }
83
84 /**
85 * fm10k_init_hw_pf - PF hardware initialization
86 * @hw: pointer to hardware structure
87 *
88 **/
89 STATIC s32 fm10k_init_hw_pf(struct fm10k_hw *hw)
90 {
91 u32 dma_ctrl, txqctl;
92 u16 i;
93
94 DEBUGFUNC("fm10k_init_hw_pf");
95
96 /* Establish default VSI as valid */
97 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(fm10k_dglort_default), 0);
98 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(fm10k_dglort_default),
99 FM10K_DGLORTMAP_ANY);
100
101 /* Invalidate all other GLORT entries */
102 for (i = 1; i < FM10K_DGLORT_COUNT; i++)
103 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i), FM10K_DGLORTMAP_NONE);
104
105 /* reset ITR2(0) to point to itself */
106 FM10K_WRITE_REG(hw, FM10K_ITR2(0), 0);
107
108 /* reset VF ITR2(0) to point to 0 avoid PF registers */
109 FM10K_WRITE_REG(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), 0);
110
111 /* loop through all PF ITR2 registers pointing them to the previous */
112 for (i = 1; i < FM10K_ITR_REG_COUNT_PF; i++)
113 FM10K_WRITE_REG(hw, FM10K_ITR2(i), i - 1);
114
115 /* Enable interrupt moderator if not already enabled */
116 FM10K_WRITE_REG(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR);
117
118 /* compute the default txqctl configuration */
119 txqctl = FM10K_TXQCTL_PF | FM10K_TXQCTL_UNLIMITED_BW |
120 (hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT);
121
122 for (i = 0; i < FM10K_MAX_QUEUES; i++) {
123 /* configure rings for 256 Queue / 32 Descriptor cache mode */
124 FM10K_WRITE_REG(hw, FM10K_TQDLOC(i),
125 (i * FM10K_TQDLOC_BASE_32_DESC) |
126 FM10K_TQDLOC_SIZE_32_DESC);
127 FM10K_WRITE_REG(hw, FM10K_TXQCTL(i), txqctl);
128
129 /* configure rings to provide TPH processing hints */
130 FM10K_WRITE_REG(hw, FM10K_TPH_TXCTRL(i),
131 FM10K_TPH_TXCTRL_DESC_TPHEN |
132 FM10K_TPH_TXCTRL_DESC_RROEN |
133 FM10K_TPH_TXCTRL_DESC_WROEN |
134 FM10K_TPH_TXCTRL_DATA_RROEN);
135 FM10K_WRITE_REG(hw, FM10K_TPH_RXCTRL(i),
136 FM10K_TPH_RXCTRL_DESC_TPHEN |
137 FM10K_TPH_RXCTRL_DESC_RROEN |
138 FM10K_TPH_RXCTRL_DATA_WROEN |
139 FM10K_TPH_RXCTRL_HDR_WROEN);
140 }
141
142 /* set max hold interval to align with 1.024 usec in all modes and
143 * store ITR scale
144 */
145 switch (hw->bus.speed) {
146 case fm10k_bus_speed_2500:
147 dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1;
148 hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN1;
149 break;
150 case fm10k_bus_speed_5000:
151 dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2;
152 hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN2;
153 break;
154 case fm10k_bus_speed_8000:
155 dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3;
156 hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3;
157 break;
158 default:
159 dma_ctrl = 0;
160 /* just in case, assume Gen3 ITR scale */
161 hw->mac.itr_scale = FM10K_TDLEN_ITR_SCALE_GEN3;
162 break;
163 }
164
165 /* Configure TSO flags */
166 FM10K_WRITE_REG(hw, FM10K_DTXTCPFLGL, FM10K_TSO_FLAGS_LOW);
167 FM10K_WRITE_REG(hw, FM10K_DTXTCPFLGH, FM10K_TSO_FLAGS_HI);
168
169 /* Enable DMA engine
170 * Set Rx Descriptor size to 32
171 * Set Minimum MSS to 64
172 * Set Maximum number of Rx queues to 256 / 32 Descriptor
173 */
174 dma_ctrl |= FM10K_DMA_CTRL_TX_ENABLE | FM10K_DMA_CTRL_RX_ENABLE |
175 FM10K_DMA_CTRL_RX_DESC_SIZE | FM10K_DMA_CTRL_MINMSS_64 |
176 FM10K_DMA_CTRL_32_DESC;
177
178 FM10K_WRITE_REG(hw, FM10K_DMA_CTRL, dma_ctrl);
179
180 /* record maximum queue count, we limit ourselves to 128 */
181 hw->mac.max_queues = FM10K_MAX_QUEUES_PF;
182
183 /* We support either 64 VFs or 7 VFs depending on if we have ARI */
184 hw->iov.total_vfs = fm10k_is_ari_hierarchy_pf(hw) ? 64 : 7;
185
186 return FM10K_SUCCESS;
187 }
188
189 #ifndef NO_IS_SLOT_APPROPRIATE_CHECK
190 /**
191 * fm10k_is_slot_appropriate_pf - Indicate appropriate slot for this SKU
192 * @hw: pointer to hardware structure
193 *
194 * Looks at the PCIe bus info to confirm whether or not this slot can support
195 * the necessary bandwidth for this device.
196 **/
197 STATIC bool fm10k_is_slot_appropriate_pf(struct fm10k_hw *hw)
198 {
199 DEBUGFUNC("fm10k_is_slot_appropriate_pf");
200
201 return (hw->bus.speed == hw->bus_caps.speed) &&
202 (hw->bus.width == hw->bus_caps.width);
203 }
204
205 #endif
206 /**
207 * fm10k_update_vlan_pf - Update status of VLAN ID in VLAN filter table
208 * @hw: pointer to hardware structure
209 * @vid: VLAN ID to add to table
210 * @vsi: Index indicating VF ID or PF ID in table
211 * @set: Indicates if this is a set or clear operation
212 *
213 * This function adds or removes the corresponding VLAN ID from the VLAN
214 * filter table for the corresponding function. In addition to the
215 * standard set/clear that supports one bit a multi-bit write is
216 * supported to set 64 bits at a time.
217 **/
218 STATIC s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
219 {
220 u32 vlan_table, reg, mask, bit, len;
221
222 /* verify the VSI index is valid */
223 if (vsi > FM10K_VLAN_TABLE_VSI_MAX)
224 return FM10K_ERR_PARAM;
225
226 /* VLAN multi-bit write:
227 * The multi-bit write has several parts to it.
228 * 24 16 8 0
229 * 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
230 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
231 * | RSVD0 | Length |C|RSVD0| VLAN ID |
232 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
233 *
234 * VLAN ID: Vlan Starting value
235 * RSVD0: Reserved section, must be 0
236 * C: Flag field, 0 is set, 1 is clear (Used in VF VLAN message)
237 * Length: Number of times to repeat the bit being set
238 */
239 len = vid >> 16;
240 vid = (vid << 17) >> 17;
241
242 /* verify the reserved 0 fields are 0 */
243 if (len >= FM10K_VLAN_TABLE_VID_MAX || vid >= FM10K_VLAN_TABLE_VID_MAX)
244 return FM10K_ERR_PARAM;
245
246 /* Loop through the table updating all required VLANs */
247 for (reg = FM10K_VLAN_TABLE(vsi, vid / 32), bit = vid % 32;
248 len < FM10K_VLAN_TABLE_VID_MAX;
249 len -= 32 - bit, reg++, bit = 0) {
250 /* record the initial state of the register */
251 vlan_table = FM10K_READ_REG(hw, reg);
252
253 /* truncate mask if we are at the start or end of the run */
254 mask = (~(u32)0 >> ((len < 31) ? 31 - len : 0)) << bit;
255
256 /* make necessary modifications to the register */
257 mask &= set ? ~vlan_table : vlan_table;
258 if (mask)
259 FM10K_WRITE_REG(hw, reg, vlan_table ^ mask);
260 }
261
262 return FM10K_SUCCESS;
263 }
264
265 /**
266 * fm10k_read_mac_addr_pf - Read device MAC address
267 * @hw: pointer to the HW structure
268 *
269 * Reads the device MAC address from the SM_AREA and stores the value.
270 **/
271 STATIC s32 fm10k_read_mac_addr_pf(struct fm10k_hw *hw)
272 {
273 u8 perm_addr[ETH_ALEN];
274 u32 serial_num;
275
276 DEBUGFUNC("fm10k_read_mac_addr_pf");
277
278 serial_num = FM10K_READ_REG(hw, FM10K_SM_AREA(1));
279
280 /* last byte should be all 1's */
281 if ((~serial_num) << 24)
282 return FM10K_ERR_INVALID_MAC_ADDR;
283
284 perm_addr[0] = (u8)(serial_num >> 24);
285 perm_addr[1] = (u8)(serial_num >> 16);
286 perm_addr[2] = (u8)(serial_num >> 8);
287
288 serial_num = FM10K_READ_REG(hw, FM10K_SM_AREA(0));
289
290 /* first byte should be all 1's */
291 if ((~serial_num) >> 24)
292 return FM10K_ERR_INVALID_MAC_ADDR;
293
294 perm_addr[3] = (u8)(serial_num >> 16);
295 perm_addr[4] = (u8)(serial_num >> 8);
296 perm_addr[5] = (u8)(serial_num);
297
298 memcpy(hw->mac.perm_addr, perm_addr, ETH_ALEN);
299 memcpy(hw->mac.addr, perm_addr, ETH_ALEN);
300
301 return FM10K_SUCCESS;
302 }
303
304 /**
305 * fm10k_glort_valid_pf - Validate that the provided glort is valid
306 * @hw: pointer to the HW structure
307 * @glort: base glort to be validated
308 *
309 * This function will return an error if the provided glort is invalid
310 **/
311 bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort)
312 {
313 glort &= hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT;
314
315 return glort == (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE);
316 }
317
318 /**
319 * fm10k_update_xc_addr_pf - Update device addresses
320 * @hw: pointer to the HW structure
321 * @glort: base resource tag for this request
322 * @mac: MAC address to add/remove from table
323 * @vid: VLAN ID to add/remove from table
324 * @add: Indicates if this is an add or remove operation
325 * @flags: flags field to indicate add and secure
326 *
327 * This function generates a message to the Switch API requesting
328 * that the given logical port add/remove the given L2 MAC/VLAN address.
329 **/
330 STATIC s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort,
331 const u8 *mac, u16 vid, bool add, u8 flags)
332 {
333 struct fm10k_mbx_info *mbx = &hw->mbx;
334 struct fm10k_mac_update mac_update;
335 u32 msg[5];
336
337 DEBUGFUNC("fm10k_update_xc_addr_pf");
338
339 /* clear set bit from VLAN ID */
340 vid &= ~FM10K_VLAN_CLEAR;
341
342 /* if glort or VLAN are not valid return error */
343 if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX)
344 return FM10K_ERR_PARAM;
345
346 /* record fields */
347 mac_update.mac_lower = FM10K_CPU_TO_LE32(((u32)mac[2] << 24) |
348 ((u32)mac[3] << 16) |
349 ((u32)mac[4] << 8) |
350 ((u32)mac[5]));
351 mac_update.mac_upper = FM10K_CPU_TO_LE16(((u16)mac[0] << 8) |
352 ((u16)mac[1]));
353 mac_update.vlan = FM10K_CPU_TO_LE16(vid);
354 mac_update.glort = FM10K_CPU_TO_LE16(glort);
355 mac_update.action = add ? 0 : 1;
356 mac_update.flags = flags;
357
358 /* populate mac_update fields */
359 fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE);
360 fm10k_tlv_attr_put_le_struct(msg, FM10K_PF_ATTR_ID_MAC_UPDATE,
361 &mac_update, sizeof(mac_update));
362
363 /* load onto outgoing mailbox */
364 return mbx->ops.enqueue_tx(hw, mbx, msg);
365 }
366
367 /**
368 * fm10k_update_uc_addr_pf - Update device unicast addresses
369 * @hw: pointer to the HW structure
370 * @glort: base resource tag for this request
371 * @mac: MAC address to add/remove from table
372 * @vid: VLAN ID to add/remove from table
373 * @add: Indicates if this is an add or remove operation
374 * @flags: flags field to indicate add and secure
375 *
376 * This function is used to add or remove unicast addresses for
377 * the PF.
378 **/
379 STATIC s32 fm10k_update_uc_addr_pf(struct fm10k_hw *hw, u16 glort,
380 const u8 *mac, u16 vid, bool add, u8 flags)
381 {
382 DEBUGFUNC("fm10k_update_uc_addr_pf");
383
384 /* verify MAC address is valid */
385 if (!IS_VALID_ETHER_ADDR(mac))
386 return FM10K_ERR_PARAM;
387
388 return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, flags);
389 }
390
391 /**
392 * fm10k_update_mc_addr_pf - Update device multicast addresses
393 * @hw: pointer to the HW structure
394 * @glort: base resource tag for this request
395 * @mac: MAC address to add/remove from table
396 * @vid: VLAN ID to add/remove from table
397 * @add: Indicates if this is an add or remove operation
398 *
399 * This function is used to add or remove multicast MAC addresses for
400 * the PF.
401 **/
402 STATIC s32 fm10k_update_mc_addr_pf(struct fm10k_hw *hw, u16 glort,
403 const u8 *mac, u16 vid, bool add)
404 {
405 DEBUGFUNC("fm10k_update_mc_addr_pf");
406
407 /* verify multicast address is valid */
408 if (!IS_MULTICAST_ETHER_ADDR(mac))
409 return FM10K_ERR_PARAM;
410
411 return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, 0);
412 }
413
414 /**
415 * fm10k_update_xcast_mode_pf - Request update of multicast mode
416 * @hw: pointer to hardware structure
417 * @glort: base resource tag for this request
418 * @mode: integer value indicating mode being requested
419 *
420 * This function will attempt to request a higher mode for the port
421 * so that it can enable either multicast, multicast promiscuous, or
422 * promiscuous mode of operation.
423 **/
424 STATIC s32 fm10k_update_xcast_mode_pf(struct fm10k_hw *hw, u16 glort, u8 mode)
425 {
426 struct fm10k_mbx_info *mbx = &hw->mbx;
427 u32 msg[3], xcast_mode;
428
429 DEBUGFUNC("fm10k_update_xcast_mode_pf");
430
431 if (mode > FM10K_XCAST_MODE_NONE)
432 return FM10K_ERR_PARAM;
433
434 /* if glort is not valid return error */
435 if (!fm10k_glort_valid_pf(hw, glort))
436 return FM10K_ERR_PARAM;
437
438 /* write xcast mode as a single u32 value,
439 * lower 16 bits: glort
440 * upper 16 bits: mode
441 */
442 xcast_mode = ((u32)mode << 16) | glort;
443
444 /* generate message requesting to change xcast mode */
445 fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_XCAST_MODES);
446 fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_XCAST_MODE, xcast_mode);
447
448 /* load onto outgoing mailbox */
449 return mbx->ops.enqueue_tx(hw, mbx, msg);
450 }
451
452 /**
453 * fm10k_update_int_moderator_pf - Update interrupt moderator linked list
454 * @hw: pointer to hardware structure
455 *
456 * This function walks through the MSI-X vector table to determine the
457 * number of active interrupts and based on that information updates the
458 * interrupt moderator linked list.
459 **/
460 STATIC void fm10k_update_int_moderator_pf(struct fm10k_hw *hw)
461 {
462 u32 i;
463
464 /* Disable interrupt moderator */
465 FM10K_WRITE_REG(hw, FM10K_INT_CTRL, 0);
466
467 /* loop through PF from last to first looking enabled vectors */
468 for (i = FM10K_ITR_REG_COUNT_PF - 1; i; i--) {
469 if (!FM10K_READ_REG(hw, FM10K_MSIX_VECTOR_MASK(i)))
470 break;
471 }
472
473 /* always reset VFITR2[0] to point to last enabled PF vector */
474 FM10K_WRITE_REG(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i);
475
476 /* reset ITR2[0] to point to last enabled PF vector */
477 if (!hw->iov.num_vfs)
478 FM10K_WRITE_REG(hw, FM10K_ITR2(0), i);
479
480 /* Enable interrupt moderator */
481 FM10K_WRITE_REG(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR);
482 }
483
484 /**
485 * fm10k_update_lport_state_pf - Notify the switch of a change in port state
486 * @hw: pointer to the HW structure
487 * @glort: base resource tag for this request
488 * @count: number of logical ports being updated
489 * @enable: boolean value indicating enable or disable
490 *
491 * This function is used to add/remove a logical port from the switch.
492 **/
493 STATIC s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort,
494 u16 count, bool enable)
495 {
496 struct fm10k_mbx_info *mbx = &hw->mbx;
497 u32 msg[3], lport_msg;
498
499 DEBUGFUNC("fm10k_lport_state_pf");
500
501 /* do nothing if we are being asked to create or destroy 0 ports */
502 if (!count)
503 return FM10K_SUCCESS;
504
505 /* if glort is not valid return error */
506 if (!fm10k_glort_valid_pf(hw, glort))
507 return FM10K_ERR_PARAM;
508
509 /* reset multicast mode if deleting lport */
510 if (!enable)
511 fm10k_update_xcast_mode_pf(hw, glort, FM10K_XCAST_MODE_NONE);
512
513 /* construct the lport message from the 2 pieces of data we have */
514 lport_msg = ((u32)count << 16) | glort;
515
516 /* generate lport create/delete message */
517 fm10k_tlv_msg_init(msg, enable ? FM10K_PF_MSG_ID_LPORT_CREATE :
518 FM10K_PF_MSG_ID_LPORT_DELETE);
519 fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_PORT, lport_msg);
520
521 /* load onto outgoing mailbox */
522 return mbx->ops.enqueue_tx(hw, mbx, msg);
523 }
524
525 /**
526 * fm10k_configure_dglort_map_pf - Configures GLORT entry and queues
527 * @hw: pointer to hardware structure
528 * @dglort: pointer to dglort configuration structure
529 *
530 * Reads the configuration structure contained in dglort_cfg and uses
531 * that information to then populate a DGLORTMAP/DEC entry and the queues
532 * to which it has been assigned.
533 **/
534 STATIC s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw,
535 struct fm10k_dglort_cfg *dglort)
536 {
537 u16 glort, queue_count, vsi_count, pc_count;
538 u16 vsi, queue, pc, q_idx;
539 u32 txqctl, dglortdec, dglortmap;
540
541 /* verify the dglort pointer */
542 if (!dglort)
543 return FM10K_ERR_PARAM;
544
545 /* verify the dglort values */
546 if ((dglort->idx > 7) || (dglort->rss_l > 7) || (dglort->pc_l > 3) ||
547 (dglort->vsi_l > 6) || (dglort->vsi_b > 64) ||
548 (dglort->queue_l > 8) || (dglort->queue_b >= 256))
549 return FM10K_ERR_PARAM;
550
551 /* determine count of VSIs and queues */
552 queue_count = BIT(dglort->rss_l + dglort->pc_l);
553 vsi_count = BIT(dglort->vsi_l + dglort->queue_l);
554 glort = dglort->glort;
555 q_idx = dglort->queue_b;
556
557 /* configure SGLORT for queues */
558 for (vsi = 0; vsi < vsi_count; vsi++, glort++) {
559 for (queue = 0; queue < queue_count; queue++, q_idx++) {
560 if (q_idx >= FM10K_MAX_QUEUES)
561 break;
562
563 FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(q_idx), glort);
564 FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(q_idx), glort);
565 }
566 }
567
568 /* determine count of PCs and queues */
569 queue_count = BIT(dglort->queue_l + dglort->rss_l + dglort->vsi_l);
570 pc_count = BIT(dglort->pc_l);
571
572 /* configure PC for Tx queues */
573 for (pc = 0; pc < pc_count; pc++) {
574 q_idx = pc + dglort->queue_b;
575 for (queue = 0; queue < queue_count; queue++) {
576 if (q_idx >= FM10K_MAX_QUEUES)
577 break;
578
579 txqctl = FM10K_READ_REG(hw, FM10K_TXQCTL(q_idx));
580 txqctl &= ~FM10K_TXQCTL_PC_MASK;
581 txqctl |= pc << FM10K_TXQCTL_PC_SHIFT;
582 FM10K_WRITE_REG(hw, FM10K_TXQCTL(q_idx), txqctl);
583
584 q_idx += pc_count;
585 }
586 }
587
588 /* configure DGLORTDEC */
589 dglortdec = ((u32)(dglort->rss_l) << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) |
590 ((u32)(dglort->queue_b) << FM10K_DGLORTDEC_QBASE_SHIFT) |
591 ((u32)(dglort->pc_l) << FM10K_DGLORTDEC_PCLENGTH_SHIFT) |
592 ((u32)(dglort->vsi_b) << FM10K_DGLORTDEC_VSIBASE_SHIFT) |
593 ((u32)(dglort->vsi_l) << FM10K_DGLORTDEC_VSILENGTH_SHIFT) |
594 ((u32)(dglort->queue_l));
595 if (dglort->inner_rss)
596 dglortdec |= FM10K_DGLORTDEC_INNERRSS_ENABLE;
597
598 /* configure DGLORTMAP */
599 dglortmap = (dglort->idx == fm10k_dglort_default) ?
600 FM10K_DGLORTMAP_ANY : FM10K_DGLORTMAP_ZERO;
601 dglortmap <<= dglort->vsi_l + dglort->queue_l + dglort->shared_l;
602 dglortmap |= dglort->glort;
603
604 /* write values to hardware */
605 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(dglort->idx), dglortdec);
606 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(dglort->idx), dglortmap);
607
608 return FM10K_SUCCESS;
609 }
610
611 u16 fm10k_queues_per_pool(struct fm10k_hw *hw)
612 {
613 u16 num_pools = hw->iov.num_pools;
614
615 return (num_pools > 32) ? 2 : (num_pools > 16) ? 4 : (num_pools > 8) ?
616 8 : FM10K_MAX_QUEUES_POOL;
617 }
618
619 u16 fm10k_vf_queue_index(struct fm10k_hw *hw, u16 vf_idx)
620 {
621 u16 num_vfs = hw->iov.num_vfs;
622 u16 vf_q_idx = FM10K_MAX_QUEUES;
623
624 vf_q_idx -= fm10k_queues_per_pool(hw) * (num_vfs - vf_idx);
625
626 return vf_q_idx;
627 }
628
629 STATIC u16 fm10k_vectors_per_pool(struct fm10k_hw *hw)
630 {
631 u16 num_pools = hw->iov.num_pools;
632
633 return (num_pools > 32) ? 8 : (num_pools > 16) ? 16 :
634 FM10K_MAX_VECTORS_POOL;
635 }
636
637 STATIC u16 fm10k_vf_vector_index(struct fm10k_hw *hw, u16 vf_idx)
638 {
639 u16 vf_v_idx = FM10K_MAX_VECTORS_PF;
640
641 vf_v_idx += fm10k_vectors_per_pool(hw) * vf_idx;
642
643 return vf_v_idx;
644 }
645
646 /**
647 * fm10k_iov_assign_resources_pf - Assign pool resources for virtualization
648 * @hw: pointer to the HW structure
649 * @num_vfs: number of VFs to be allocated
650 * @num_pools: number of virtualization pools to be allocated
651 *
652 * Allocates queues and traffic classes to virtualization entities to prepare
653 * the PF for SR-IOV and VMDq
654 **/
655 STATIC s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs,
656 u16 num_pools)
657 {
658 u16 qmap_stride, qpp, vpp, vf_q_idx, vf_q_idx0, qmap_idx;
659 u32 vid = hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT;
660 int i, j;
661
662 /* hardware only supports up to 64 pools */
663 if (num_pools > 64)
664 return FM10K_ERR_PARAM;
665
666 /* the number of VFs cannot exceed the number of pools */
667 if ((num_vfs > num_pools) || (num_vfs > hw->iov.total_vfs))
668 return FM10K_ERR_PARAM;
669
670 /* record number of virtualization entities */
671 hw->iov.num_vfs = num_vfs;
672 hw->iov.num_pools = num_pools;
673
674 /* determine qmap offsets and counts */
675 qmap_stride = (num_vfs > 8) ? 32 : 256;
676 qpp = fm10k_queues_per_pool(hw);
677 vpp = fm10k_vectors_per_pool(hw);
678
679 /* calculate starting index for queues */
680 vf_q_idx = fm10k_vf_queue_index(hw, 0);
681 qmap_idx = 0;
682
683 /* establish TCs with -1 credits and no quanta to prevent transmit */
684 for (i = 0; i < num_vfs; i++) {
685 FM10K_WRITE_REG(hw, FM10K_TC_MAXCREDIT(i), 0);
686 FM10K_WRITE_REG(hw, FM10K_TC_RATE(i), 0);
687 FM10K_WRITE_REG(hw, FM10K_TC_CREDIT(i),
688 FM10K_TC_CREDIT_CREDIT_MASK);
689 }
690
691 /* zero out all mbmem registers */
692 for (i = FM10K_VFMBMEM_LEN * num_vfs; i--;)
693 FM10K_WRITE_REG(hw, FM10K_MBMEM(i), 0);
694
695 /* clear event notification of VF FLR */
696 FM10K_WRITE_REG(hw, FM10K_PFVFLREC(0), ~0);
697 FM10K_WRITE_REG(hw, FM10K_PFVFLREC(1), ~0);
698
699 /* loop through unallocated rings assigning them back to PF */
700 for (i = FM10K_MAX_QUEUES_PF; i < vf_q_idx; i++) {
701 FM10K_WRITE_REG(hw, FM10K_TXDCTL(i), 0);
702 FM10K_WRITE_REG(hw, FM10K_TXQCTL(i), FM10K_TXQCTL_PF |
703 FM10K_TXQCTL_UNLIMITED_BW | vid);
704 FM10K_WRITE_REG(hw, FM10K_RXQCTL(i), FM10K_RXQCTL_PF);
705 }
706
707 /* PF should have already updated VFITR2[0] */
708
709 /* update all ITR registers to flow to VFITR2[0] */
710 for (i = FM10K_ITR_REG_COUNT_PF + 1; i < FM10K_ITR_REG_COUNT; i++) {
711 if (!(i & (vpp - 1)))
712 FM10K_WRITE_REG(hw, FM10K_ITR2(i), i - vpp);
713 else
714 FM10K_WRITE_REG(hw, FM10K_ITR2(i), i - 1);
715 }
716
717 /* update PF ITR2[0] to reference the last vector */
718 FM10K_WRITE_REG(hw, FM10K_ITR2(0),
719 fm10k_vf_vector_index(hw, num_vfs - 1));
720
721 /* loop through rings populating rings and TCs */
722 for (i = 0; i < num_vfs; i++) {
723 /* record index for VF queue 0 for use in end of loop */
724 vf_q_idx0 = vf_q_idx;
725
726 for (j = 0; j < qpp; j++, qmap_idx++, vf_q_idx++) {
727 /* assign VF and locked TC to queues */
728 FM10K_WRITE_REG(hw, FM10K_TXDCTL(vf_q_idx), 0);
729 FM10K_WRITE_REG(hw, FM10K_TXQCTL(vf_q_idx),
730 (i << FM10K_TXQCTL_TC_SHIFT) | i |
731 FM10K_TXQCTL_VF | vid);
732 FM10K_WRITE_REG(hw, FM10K_RXDCTL(vf_q_idx),
733 FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
734 FM10K_RXDCTL_DROP_ON_EMPTY);
735 FM10K_WRITE_REG(hw, FM10K_RXQCTL(vf_q_idx),
736 (i << FM10K_RXQCTL_VF_SHIFT) |
737 FM10K_RXQCTL_VF);
738
739 /* map queue pair to VF */
740 FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
741 FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx), vf_q_idx);
742 }
743
744 /* repeat the first ring for all of the remaining VF rings */
745 for (; j < qmap_stride; j++, qmap_idx++) {
746 FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), vf_q_idx0);
747 FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx), vf_q_idx0);
748 }
749 }
750
751 /* loop through remaining indexes assigning all to queue 0 */
752 while (qmap_idx < FM10K_TQMAP_TABLE_SIZE) {
753 FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), 0);
754 FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx), 0);
755 qmap_idx++;
756 }
757
758 return FM10K_SUCCESS;
759 }
760
761 /**
762 * fm10k_iov_configure_tc_pf - Configure the shaping group for VF
763 * @hw: pointer to the HW structure
764 * @vf_idx: index of VF receiving GLORT
765 * @rate: Rate indicated in Mb/s
766 *
767 * Configured the TC for a given VF to allow only up to a given number
768 * of Mb/s of outgoing Tx throughput.
769 **/
770 STATIC s32 fm10k_iov_configure_tc_pf(struct fm10k_hw *hw, u16 vf_idx, int rate)
771 {
772 /* configure defaults */
773 u32 interval = FM10K_TC_RATE_INTERVAL_4US_GEN3;
774 u32 tc_rate = FM10K_TC_RATE_QUANTA_MASK;
775
776 /* verify vf is in range */
777 if (vf_idx >= hw->iov.num_vfs)
778 return FM10K_ERR_PARAM;
779
780 /* set interval to align with 4.096 usec in all modes */
781 switch (hw->bus.speed) {
782 case fm10k_bus_speed_2500:
783 interval = FM10K_TC_RATE_INTERVAL_4US_GEN1;
784 break;
785 case fm10k_bus_speed_5000:
786 interval = FM10K_TC_RATE_INTERVAL_4US_GEN2;
787 break;
788 default:
789 break;
790 }
791
792 if (rate) {
793 if (rate > FM10K_VF_TC_MAX || rate < FM10K_VF_TC_MIN)
794 return FM10K_ERR_PARAM;
795
796 /* The quanta is measured in Bytes per 4.096 or 8.192 usec
797 * The rate is provided in Mbits per second
798 * To tralslate from rate to quanta we need to multiply the
799 * rate by 8.192 usec and divide by 8 bits/byte. To avoid
800 * dealing with floating point we can round the values up
801 * to the nearest whole number ratio which gives us 128 / 125.
802 */
803 tc_rate = (rate * 128) / 125;
804
805 /* try to keep the rate limiting accurate by increasing
806 * the number of credits and interval for rates less than 4Gb/s
807 */
808 if (rate < 4000)
809 interval <<= 1;
810 else
811 tc_rate >>= 1;
812 }
813
814 /* update rate limiter with new values */
815 FM10K_WRITE_REG(hw, FM10K_TC_RATE(vf_idx), tc_rate | interval);
816 FM10K_WRITE_REG(hw, FM10K_TC_MAXCREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K);
817 FM10K_WRITE_REG(hw, FM10K_TC_CREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K);
818
819 return FM10K_SUCCESS;
820 }
821
822 /**
823 * fm10k_iov_assign_int_moderator_pf - Add VF interrupts to moderator list
824 * @hw: pointer to the HW structure
825 * @vf_idx: index of VF receiving GLORT
826 *
827 * Update the interrupt moderator linked list to include any MSI-X
828 * interrupts which the VF has enabled in the MSI-X vector table.
829 **/
830 STATIC s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx)
831 {
832 u16 vf_v_idx, vf_v_limit, i;
833
834 /* verify vf is in range */
835 if (vf_idx >= hw->iov.num_vfs)
836 return FM10K_ERR_PARAM;
837
838 /* determine vector offset and count */
839 vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
840 vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
841
842 /* search for first vector that is not masked */
843 for (i = vf_v_limit - 1; i > vf_v_idx; i--) {
844 if (!FM10K_READ_REG(hw, FM10K_MSIX_VECTOR_MASK(i)))
845 break;
846 }
847
848 /* reset linked list so it now includes our active vectors */
849 if (vf_idx == (hw->iov.num_vfs - 1))
850 FM10K_WRITE_REG(hw, FM10K_ITR2(0), i);
851 else
852 FM10K_WRITE_REG(hw, FM10K_ITR2(vf_v_limit), i);
853
854 return FM10K_SUCCESS;
855 }
856
857 /**
858 * fm10k_iov_assign_default_mac_vlan_pf - Assign a MAC and VLAN to VF
859 * @hw: pointer to the HW structure
860 * @vf_info: pointer to VF information structure
861 *
862 * Assign a MAC address and default VLAN to a VF and notify it of the update
863 **/
864 STATIC s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
865 struct fm10k_vf_info *vf_info)
866 {
867 u16 qmap_stride, queues_per_pool, vf_q_idx, timeout, qmap_idx, i;
868 u32 msg[4], txdctl, txqctl, tdbal = 0, tdbah = 0;
869 s32 err = FM10K_SUCCESS;
870 u16 vf_idx, vf_vid;
871
872 /* verify vf is in range */
873 if (!vf_info || vf_info->vf_idx >= hw->iov.num_vfs)
874 return FM10K_ERR_PARAM;
875
876 /* determine qmap offsets and counts */
877 qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256;
878 queues_per_pool = fm10k_queues_per_pool(hw);
879
880 /* calculate starting index for queues */
881 vf_idx = vf_info->vf_idx;
882 vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
883 qmap_idx = qmap_stride * vf_idx;
884
885 /* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is
886 * used here to indicate to the VF that it will not have privilege to
887 * write VLAN_TABLE. All policy is enforced on the PF but this allows
888 * the VF to correctly report errors to userspace rqeuests.
889 */
890 if (vf_info->pf_vid)
891 vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE;
892 else
893 vf_vid = vf_info->sw_vid;
894
895 /* generate MAC_ADDR request */
896 fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
897 fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_DEFAULT_MAC,
898 vf_info->mac, vf_vid);
899
900 /* Configure Queue control register with new VLAN ID. The TXQCTL
901 * register is RO from the VF, so the PF must do this even in the
902 * case of notifying the VF of a new VID via the mailbox.
903 */
904 txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) &
905 FM10K_TXQCTL_VID_MASK;
906 txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
907 FM10K_TXQCTL_VF | vf_idx;
908
909 for (i = 0; i < queues_per_pool; i++)
910 FM10K_WRITE_REG(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl);
911
912 /* try loading a message onto outgoing mailbox first */
913 if (vf_info->mbx.ops.enqueue_tx) {
914 err = vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
915 if (err != FM10K_MBX_ERR_NO_MBX)
916 return err;
917 err = FM10K_SUCCESS;
918 }
919
920 /* If we aren't connected to a mailbox, this is most likely because
921 * the VF driver is not running. It should thus be safe to re-map
922 * queues and use the registers to pass the MAC address so that the VF
923 * driver gets correct information during its initialization.
924 */
925
926 /* MAP Tx queue back to 0 temporarily, and disable it */
927 FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), 0);
928 FM10K_WRITE_REG(hw, FM10K_TXDCTL(vf_q_idx), 0);
929
930 /* verify ring has disabled before modifying base address registers */
931 txdctl = FM10K_READ_REG(hw, FM10K_TXDCTL(vf_q_idx));
932 for (timeout = 0; txdctl & FM10K_TXDCTL_ENABLE; timeout++) {
933 /* limit ourselves to a 1ms timeout */
934 if (timeout == 10) {
935 err = FM10K_ERR_DMA_PENDING;
936 goto err_out;
937 }
938
939 usec_delay(100);
940 txdctl = FM10K_READ_REG(hw, FM10K_TXDCTL(vf_q_idx));
941 }
942
943 /* Update base address registers to contain MAC address */
944 if (IS_VALID_ETHER_ADDR(vf_info->mac)) {
945 tdbal = (((u32)vf_info->mac[3]) << 24) |
946 (((u32)vf_info->mac[4]) << 16) |
947 (((u32)vf_info->mac[5]) << 8);
948
949 tdbah = (((u32)0xFF) << 24) |
950 (((u32)vf_info->mac[0]) << 16) |
951 (((u32)vf_info->mac[1]) << 8) |
952 ((u32)vf_info->mac[2]);
953 }
954
955 /* Record the base address into queue 0 */
956 FM10K_WRITE_REG(hw, FM10K_TDBAL(vf_q_idx), tdbal);
957 FM10K_WRITE_REG(hw, FM10K_TDBAH(vf_q_idx), tdbah);
958
959 /* Provide the VF the ITR scale, using software-defined fields in TDLEN
960 * to pass the information during VF initialization. See definition of
961 * FM10K_TDLEN_ITR_SCALE_SHIFT for more details.
962 */
963 FM10K_WRITE_REG(hw, FM10K_TDLEN(vf_q_idx), hw->mac.itr_scale <<
964 FM10K_TDLEN_ITR_SCALE_SHIFT);
965
966 err_out:
967 /* restore the queue back to VF ownership */
968 FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
969 return err;
970 }
971
972 /**
973 * fm10k_iov_reset_resources_pf - Reassign queues and interrupts to a VF
974 * @hw: pointer to the HW structure
975 * @vf_info: pointer to VF information structure
976 *
977 * Reassign the interrupts and queues to a VF following an FLR
978 **/
979 STATIC s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
980 struct fm10k_vf_info *vf_info)
981 {
982 u16 qmap_stride, queues_per_pool, vf_q_idx, qmap_idx;
983 u32 tdbal = 0, tdbah = 0, txqctl, rxqctl;
984 u16 vf_v_idx, vf_v_limit, vf_vid;
985 u8 vf_idx = vf_info->vf_idx;
986 int i;
987
988 /* verify vf is in range */
989 if (vf_idx >= hw->iov.num_vfs)
990 return FM10K_ERR_PARAM;
991
992 /* clear event notification of VF FLR */
993 FM10K_WRITE_REG(hw, FM10K_PFVFLREC(vf_idx / 32), BIT(vf_idx % 32));
994
995 /* force timeout and then disconnect the mailbox */
996 vf_info->mbx.timeout = 0;
997 if (vf_info->mbx.ops.disconnect)
998 vf_info->mbx.ops.disconnect(hw, &vf_info->mbx);
999
1000 /* determine vector offset and count */
1001 vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
1002 vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
1003
1004 /* determine qmap offsets and counts */
1005 qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256;
1006 queues_per_pool = fm10k_queues_per_pool(hw);
1007 qmap_idx = qmap_stride * vf_idx;
1008
1009 /* make all the queues inaccessible to the VF */
1010 for (i = qmap_idx; i < (qmap_idx + qmap_stride); i++) {
1011 FM10K_WRITE_REG(hw, FM10K_TQMAP(i), 0);
1012 FM10K_WRITE_REG(hw, FM10K_RQMAP(i), 0);
1013 }
1014
1015 /* calculate starting index for queues */
1016 vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
1017
1018 /* determine correct default VLAN ID */
1019 if (vf_info->pf_vid)
1020 vf_vid = vf_info->pf_vid;
1021 else
1022 vf_vid = vf_info->sw_vid;
1023
1024 /* configure Queue control register */
1025 txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) |
1026 (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
1027 FM10K_TXQCTL_VF | vf_idx;
1028 rxqctl = (vf_idx << FM10K_RXQCTL_VF_SHIFT) | FM10K_RXQCTL_VF;
1029
1030 /* stop further DMA and reset queue ownership back to VF */
1031 for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) {
1032 FM10K_WRITE_REG(hw, FM10K_TXDCTL(i), 0);
1033 FM10K_WRITE_REG(hw, FM10K_TXQCTL(i), txqctl);
1034 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i),
1035 FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
1036 FM10K_RXDCTL_DROP_ON_EMPTY);
1037 FM10K_WRITE_REG(hw, FM10K_RXQCTL(i), rxqctl);
1038 }
1039
1040 /* reset TC with -1 credits and no quanta to prevent transmit */
1041 FM10K_WRITE_REG(hw, FM10K_TC_MAXCREDIT(vf_idx), 0);
1042 FM10K_WRITE_REG(hw, FM10K_TC_RATE(vf_idx), 0);
1043 FM10K_WRITE_REG(hw, FM10K_TC_CREDIT(vf_idx),
1044 FM10K_TC_CREDIT_CREDIT_MASK);
1045
1046 /* update our first entry in the table based on previous VF */
1047 if (!vf_idx)
1048 hw->mac.ops.update_int_moderator(hw);
1049 else
1050 hw->iov.ops.assign_int_moderator(hw, vf_idx - 1);
1051
1052 /* reset linked list so it now includes our active vectors */
1053 if (vf_idx == (hw->iov.num_vfs - 1))
1054 FM10K_WRITE_REG(hw, FM10K_ITR2(0), vf_v_idx);
1055 else
1056 FM10K_WRITE_REG(hw, FM10K_ITR2(vf_v_limit), vf_v_idx);
1057
1058 /* link remaining vectors so that next points to previous */
1059 for (vf_v_idx++; vf_v_idx < vf_v_limit; vf_v_idx++)
1060 FM10K_WRITE_REG(hw, FM10K_ITR2(vf_v_idx), vf_v_idx - 1);
1061
1062 /* zero out MBMEM, VLAN_TABLE, RETA, RSSRK, and MRQC registers */
1063 for (i = FM10K_VFMBMEM_LEN; i--;)
1064 FM10K_WRITE_REG(hw, FM10K_MBMEM_VF(vf_idx, i), 0);
1065 for (i = FM10K_VLAN_TABLE_SIZE; i--;)
1066 FM10K_WRITE_REG(hw, FM10K_VLAN_TABLE(vf_info->vsi, i), 0);
1067 for (i = FM10K_RETA_SIZE; i--;)
1068 FM10K_WRITE_REG(hw, FM10K_RETA(vf_info->vsi, i), 0);
1069 for (i = FM10K_RSSRK_SIZE; i--;)
1070 FM10K_WRITE_REG(hw, FM10K_RSSRK(vf_info->vsi, i), 0);
1071 FM10K_WRITE_REG(hw, FM10K_MRQC(vf_info->vsi), 0);
1072
1073 /* Update base address registers to contain MAC address */
1074 if (IS_VALID_ETHER_ADDR(vf_info->mac)) {
1075 tdbal = (((u32)vf_info->mac[3]) << 24) |
1076 (((u32)vf_info->mac[4]) << 16) |
1077 (((u32)vf_info->mac[5]) << 8);
1078 tdbah = (((u32)0xFF) << 24) |
1079 (((u32)vf_info->mac[0]) << 16) |
1080 (((u32)vf_info->mac[1]) << 8) |
1081 ((u32)vf_info->mac[2]);
1082 }
1083
1084 /* map queue pairs back to VF from last to first */
1085 for (i = queues_per_pool; i--;) {
1086 FM10K_WRITE_REG(hw, FM10K_TDBAL(vf_q_idx + i), tdbal);
1087 FM10K_WRITE_REG(hw, FM10K_TDBAH(vf_q_idx + i), tdbah);
1088 /* See definition of FM10K_TDLEN_ITR_SCALE_SHIFT for an
1089 * explanation of how TDLEN is used.
1090 */
1091 FM10K_WRITE_REG(hw, FM10K_TDLEN(vf_q_idx + i),
1092 hw->mac.itr_scale <<
1093 FM10K_TDLEN_ITR_SCALE_SHIFT);
1094 FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx + i);
1095 FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx + i);
1096 }
1097
1098 /* repeat the first ring for all the remaining VF rings */
1099 for (i = queues_per_pool; i < qmap_stride; i++) {
1100 FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx);
1101 FM10K_WRITE_REG(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx);
1102 }
1103
1104 return FM10K_SUCCESS;
1105 }
1106
1107 /**
1108 * fm10k_iov_set_lport_pf - Assign and enable a logical port for a given VF
1109 * @hw: pointer to hardware structure
1110 * @vf_info: pointer to VF information structure
1111 * @lport_idx: Logical port offset from the hardware glort
1112 * @flags: Set of capability flags to extend port beyond basic functionality
1113 *
1114 * This function allows enabling a VF port by assigning it a GLORT and
1115 * setting the flags so that it can enable an Rx mode.
1116 **/
1117 STATIC s32 fm10k_iov_set_lport_pf(struct fm10k_hw *hw,
1118 struct fm10k_vf_info *vf_info,
1119 u16 lport_idx, u8 flags)
1120 {
1121 u16 glort = (hw->mac.dglort_map + lport_idx) & FM10K_DGLORTMAP_NONE;
1122
1123 DEBUGFUNC("fm10k_iov_set_lport_state_pf");
1124
1125 /* if glort is not valid return error */
1126 if (!fm10k_glort_valid_pf(hw, glort))
1127 return FM10K_ERR_PARAM;
1128
1129 vf_info->vf_flags = flags | FM10K_VF_FLAG_NONE_CAPABLE;
1130 vf_info->glort = glort;
1131
1132 return FM10K_SUCCESS;
1133 }
1134
1135 /**
1136 * fm10k_iov_reset_lport_pf - Disable a logical port for a given VF
1137 * @hw: pointer to hardware structure
1138 * @vf_info: pointer to VF information structure
1139 *
1140 * This function disables a VF port by stripping it of a GLORT and
1141 * setting the flags so that it cannot enable any Rx mode.
1142 **/
1143 STATIC void fm10k_iov_reset_lport_pf(struct fm10k_hw *hw,
1144 struct fm10k_vf_info *vf_info)
1145 {
1146 u32 msg[1];
1147
1148 DEBUGFUNC("fm10k_iov_reset_lport_state_pf");
1149
1150 /* need to disable the port if it is already enabled */
1151 if (FM10K_VF_FLAG_ENABLED(vf_info)) {
1152 /* notify switch that this port has been disabled */
1153 fm10k_update_lport_state_pf(hw, vf_info->glort, 1, false);
1154
1155 /* generate port state response to notify VF it is not ready */
1156 fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
1157 vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
1158 }
1159
1160 /* clear flags and glort if it exists */
1161 vf_info->vf_flags = 0;
1162 vf_info->glort = 0;
1163 }
1164
1165 /**
1166 * fm10k_iov_update_stats_pf - Updates hardware related statistics for VFs
1167 * @hw: pointer to hardware structure
1168 * @q: stats for all queues of a VF
1169 * @vf_idx: index of VF
1170 *
1171 * This function collects queue stats for VFs.
1172 **/
1173 STATIC void fm10k_iov_update_stats_pf(struct fm10k_hw *hw,
1174 struct fm10k_hw_stats_q *q,
1175 u16 vf_idx)
1176 {
1177 u32 idx, qpp;
1178
1179 /* get stats for all of the queues */
1180 qpp = fm10k_queues_per_pool(hw);
1181 idx = fm10k_vf_queue_index(hw, vf_idx);
1182 fm10k_update_hw_stats_q(hw, q, idx, qpp);
1183 }
1184
1185 /**
1186 * fm10k_iov_msg_msix_pf - Message handler for MSI-X request from VF
1187 * @hw: Pointer to hardware structure
1188 * @results: Pointer array to message, results[0] is pointer to message
1189 * @mbx: Pointer to mailbox information structure
1190 *
1191 * This function is a default handler for MSI-X requests from the VF. The
1192 * assumption is that in this case it is acceptable to just directly
1193 * hand off the message from the VF to the underlying shared code.
1194 **/
1195 s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
1196 struct fm10k_mbx_info *mbx)
1197 {
1198 struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
1199 u8 vf_idx = vf_info->vf_idx;
1200
1201 UNREFERENCED_1PARAMETER(results);
1202 DEBUGFUNC("fm10k_iov_msg_msix_pf");
1203
1204 return hw->iov.ops.assign_int_moderator(hw, vf_idx);
1205 }
1206
1207 /**
1208 * fm10k_iov_select_vid - Select correct default VLAN ID
1209 * @hw: Pointer to hardware structure
1210 * @vid: VLAN ID to correct
1211 *
1212 * Will report an error if the VLAN ID is out of range. For VID = 0, it will
1213 * return either the pf_vid or sw_vid depending on which one is set.
1214 */
1215 STATIC s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
1216 {
1217 if (!vid)
1218 return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid;
1219 else if (vf_info->pf_vid && vid != vf_info->pf_vid)
1220 return FM10K_ERR_PARAM;
1221 else
1222 return vid;
1223 }
1224
1225 /**
1226 * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF
1227 * @hw: Pointer to hardware structure
1228 * @results: Pointer array to message, results[0] is pointer to message
1229 * @mbx: Pointer to mailbox information structure
1230 *
1231 * This function is a default handler for MAC/VLAN requests from the VF.
1232 * The assumption is that in this case it is acceptable to just directly
1233 * hand off the message from the VF to the underlying shared code.
1234 **/
1235 s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
1236 struct fm10k_mbx_info *mbx)
1237 {
1238 struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
1239 u8 mac[ETH_ALEN];
1240 u32 *result;
1241 int err = FM10K_SUCCESS;
1242 bool set;
1243 u16 vlan;
1244 u32 vid;
1245
1246 DEBUGFUNC("fm10k_iov_msg_mac_vlan_pf");
1247
1248 /* we shouldn't be updating rules on a disabled interface */
1249 if (!FM10K_VF_FLAG_ENABLED(vf_info))
1250 err = FM10K_ERR_PARAM;
1251
1252 if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
1253 result = results[FM10K_MAC_VLAN_MSG_VLAN];
1254
1255 /* record VLAN id requested */
1256 err = fm10k_tlv_attr_get_u32(result, &vid);
1257 if (err)
1258 return err;
1259
1260 set = !(vid & FM10K_VLAN_CLEAR);
1261 vid &= ~FM10K_VLAN_CLEAR;
1262
1263 /* if the length field has been set, this is a multi-bit
1264 * update request. For multi-bit requests, simply disallow
1265 * them when the pf_vid has been set. In this case, the PF
1266 * should have already cleared the VLAN_TABLE, and if we
1267 * allowed them, it could allow a rogue VF to receive traffic
1268 * on a VLAN it was not assigned. In the single-bit case, we
1269 * need to modify requests for VLAN 0 to use the default PF or
1270 * SW vid when assigned.
1271 */
1272
1273 if (vid >> 16) {
1274 /* prevent multi-bit requests when PF has
1275 * administratively set the VLAN for this VF
1276 */
1277 if (vf_info->pf_vid)
1278 return FM10K_ERR_PARAM;
1279 } else {
1280 err = fm10k_iov_select_vid(vf_info, (u16)vid);
1281 if (err < 0)
1282 return err;
1283
1284 vid = err;
1285 }
1286
1287 /* update VSI info for VF in regards to VLAN table */
1288 err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
1289 }
1290
1291 if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
1292 result = results[FM10K_MAC_VLAN_MSG_MAC];
1293
1294 /* record unicast MAC address requested */
1295 err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
1296 if (err)
1297 return err;
1298
1299 /* block attempts to set MAC for a locked device */
1300 if (IS_VALID_ETHER_ADDR(vf_info->mac) &&
1301 memcmp(mac, vf_info->mac, ETH_ALEN))
1302 return FM10K_ERR_PARAM;
1303
1304 set = !(vlan & FM10K_VLAN_CLEAR);
1305 vlan &= ~FM10K_VLAN_CLEAR;
1306
1307 err = fm10k_iov_select_vid(vf_info, vlan);
1308 if (err < 0)
1309 return err;
1310
1311 vlan = (u16)err;
1312
1313 /* notify switch of request for new unicast address */
1314 err = hw->mac.ops.update_uc_addr(hw, vf_info->glort,
1315 mac, vlan, set, 0);
1316 }
1317
1318 if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
1319 result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
1320
1321 /* record multicast MAC address requested */
1322 err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
1323 if (err)
1324 return err;
1325
1326 /* verify that the VF is allowed to request multicast */
1327 if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
1328 return FM10K_ERR_PARAM;
1329
1330 set = !(vlan & FM10K_VLAN_CLEAR);
1331 vlan &= ~FM10K_VLAN_CLEAR;
1332
1333 err = fm10k_iov_select_vid(vf_info, vlan);
1334 if (err < 0)
1335 return err;
1336
1337 vlan = (u16)err;
1338
1339 /* notify switch of request for new multicast address */
1340 err = hw->mac.ops.update_mc_addr(hw, vf_info->glort,
1341 mac, vlan, set);
1342 }
1343
1344 return err;
1345 }
1346
1347 /**
1348 * fm10k_iov_supported_xcast_mode_pf - Determine best match for xcast mode
1349 * @vf_info: VF info structure containing capability flags
1350 * @mode: Requested xcast mode
1351 *
1352 * This function outputs the mode that most closely matches the requested
1353 * mode. If not modes match it will request we disable the port
1354 **/
1355 STATIC u8 fm10k_iov_supported_xcast_mode_pf(struct fm10k_vf_info *vf_info,
1356 u8 mode)
1357 {
1358 u8 vf_flags = vf_info->vf_flags;
1359
1360 /* match up mode to capabilities as best as possible */
1361 switch (mode) {
1362 case FM10K_XCAST_MODE_PROMISC:
1363 if (vf_flags & FM10K_VF_FLAG_PROMISC_CAPABLE)
1364 return FM10K_XCAST_MODE_PROMISC;
1365 /* fallthough */
1366 case FM10K_XCAST_MODE_ALLMULTI:
1367 if (vf_flags & FM10K_VF_FLAG_ALLMULTI_CAPABLE)
1368 return FM10K_XCAST_MODE_ALLMULTI;
1369 /* fallthough */
1370 case FM10K_XCAST_MODE_MULTI:
1371 if (vf_flags & FM10K_VF_FLAG_MULTI_CAPABLE)
1372 return FM10K_XCAST_MODE_MULTI;
1373 /* fallthough */
1374 case FM10K_XCAST_MODE_NONE:
1375 if (vf_flags & FM10K_VF_FLAG_NONE_CAPABLE)
1376 return FM10K_XCAST_MODE_NONE;
1377 /* fallthough */
1378 default:
1379 break;
1380 }
1381
1382 /* disable interface as it should not be able to request any */
1383 return FM10K_XCAST_MODE_DISABLE;
1384 }
1385
1386 /**
1387 * fm10k_iov_msg_lport_state_pf - Message handler for port state requests
1388 * @hw: Pointer to hardware structure
1389 * @results: Pointer array to message, results[0] is pointer to message
1390 * @mbx: Pointer to mailbox information structure
1391 *
1392 * This function is a default handler for port state requests. The port
1393 * state requests for now are basic and consist of enabling or disabling
1394 * the port.
1395 **/
1396 s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
1397 struct fm10k_mbx_info *mbx)
1398 {
1399 struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
1400 u32 *result;
1401 s32 err = FM10K_SUCCESS;
1402 u32 msg[2];
1403 u8 mode = 0;
1404
1405 DEBUGFUNC("fm10k_iov_msg_lport_state_pf");
1406
1407 /* verify VF is allowed to enable even minimal mode */
1408 if (!(vf_info->vf_flags & FM10K_VF_FLAG_NONE_CAPABLE))
1409 return FM10K_ERR_PARAM;
1410
1411 if (!!results[FM10K_LPORT_STATE_MSG_XCAST_MODE]) {
1412 result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE];
1413
1414 /* XCAST mode update requested */
1415 err = fm10k_tlv_attr_get_u8(result, &mode);
1416 if (err)
1417 return FM10K_ERR_PARAM;
1418
1419 /* prep for possible demotion depending on capabilities */
1420 mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode);
1421
1422 /* if mode is not currently enabled, enable it */
1423 if (!(FM10K_VF_FLAG_ENABLED(vf_info) & BIT(mode)))
1424 fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode);
1425
1426 /* swap mode back to a bit flag */
1427 mode = FM10K_VF_FLAG_SET_MODE(mode);
1428 } else if (!results[FM10K_LPORT_STATE_MSG_DISABLE]) {
1429 /* need to disable the port if it is already enabled */
1430 if (FM10K_VF_FLAG_ENABLED(vf_info))
1431 err = fm10k_update_lport_state_pf(hw, vf_info->glort,
1432 1, false);
1433
1434 /* we need to clear VF_FLAG_ENABLED flags in order to ensure
1435 * that we actually re-enable the LPORT state below. Note that
1436 * this has no impact if the VF is already disabled, as the
1437 * flags are already cleared.
1438 */
1439 if (!err)
1440 vf_info->vf_flags = FM10K_VF_FLAG_CAPABLE(vf_info);
1441
1442 /* when enabling the port we should reset the rate limiters */
1443 hw->iov.ops.configure_tc(hw, vf_info->vf_idx, vf_info->rate);
1444
1445 /* set mode for minimal functionality */
1446 mode = FM10K_VF_FLAG_SET_MODE_NONE;
1447
1448 /* generate port state response to notify VF it is ready */
1449 fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
1450 fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_READY);
1451 mbx->ops.enqueue_tx(hw, mbx, msg);
1452 }
1453
1454 /* if enable state toggled note the update */
1455 if (!err && (!FM10K_VF_FLAG_ENABLED(vf_info) != !mode))
1456 err = fm10k_update_lport_state_pf(hw, vf_info->glort, 1,
1457 !!mode);
1458
1459 /* if state change succeeded, then update our stored state */
1460 mode |= FM10K_VF_FLAG_CAPABLE(vf_info);
1461 if (!err)
1462 vf_info->vf_flags = mode;
1463
1464 return err;
1465 }
1466
1467 #ifndef NO_DEFAULT_SRIOV_MSG_HANDLERS
1468 const struct fm10k_msg_data fm10k_iov_msg_data_pf[] = {
1469 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
1470 FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf),
1471 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf),
1472 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf),
1473 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1474 };
1475
1476 #endif
1477 /**
1478 * fm10k_update_stats_hw_pf - Updates hardware related statistics of PF
1479 * @hw: pointer to hardware structure
1480 * @stats: pointer to the stats structure to update
1481 *
1482 * This function collects and aggregates global and per queue hardware
1483 * statistics.
1484 **/
1485 void fm10k_update_hw_stats_pf(struct fm10k_hw *hw,
1486 struct fm10k_hw_stats *stats)
1487 {
1488 u32 timeout, ur, ca, um, xec, vlan_drop, loopback_drop, nodesc_drop;
1489 u32 id, id_prev;
1490
1491 DEBUGFUNC("fm10k_update_hw_stats_pf");
1492
1493 /* Use Tx queue 0 as a canary to detect a reset */
1494 id = FM10K_READ_REG(hw, FM10K_TXQCTL(0));
1495
1496 /* Read Global Statistics */
1497 do {
1498 timeout = fm10k_read_hw_stats_32b(hw, FM10K_STATS_TIMEOUT,
1499 &stats->timeout);
1500 ur = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UR, &stats->ur);
1501 ca = fm10k_read_hw_stats_32b(hw, FM10K_STATS_CA, &stats->ca);
1502 um = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UM, &stats->um);
1503 xec = fm10k_read_hw_stats_32b(hw, FM10K_STATS_XEC, &stats->xec);
1504 vlan_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_VLAN_DROP,
1505 &stats->vlan_drop);
1506 loopback_drop =
1507 fm10k_read_hw_stats_32b(hw,
1508 FM10K_STATS_LOOPBACK_DROP,
1509 &stats->loopback_drop);
1510 nodesc_drop = fm10k_read_hw_stats_32b(hw,
1511 FM10K_STATS_NODESC_DROP,
1512 &stats->nodesc_drop);
1513
1514 /* if value has not changed then we have consistent data */
1515 id_prev = id;
1516 id = FM10K_READ_REG(hw, FM10K_TXQCTL(0));
1517 } while ((id ^ id_prev) & FM10K_TXQCTL_ID_MASK);
1518
1519 /* drop non-ID bits and set VALID ID bit */
1520 id &= FM10K_TXQCTL_ID_MASK;
1521 id |= FM10K_STAT_VALID;
1522
1523 /* Update Global Statistics */
1524 if (stats->stats_idx == id) {
1525 stats->timeout.count += timeout;
1526 stats->ur.count += ur;
1527 stats->ca.count += ca;
1528 stats->um.count += um;
1529 stats->xec.count += xec;
1530 stats->vlan_drop.count += vlan_drop;
1531 stats->loopback_drop.count += loopback_drop;
1532 stats->nodesc_drop.count += nodesc_drop;
1533 }
1534
1535 /* Update bases and record current PF id */
1536 fm10k_update_hw_base_32b(&stats->timeout, timeout);
1537 fm10k_update_hw_base_32b(&stats->ur, ur);
1538 fm10k_update_hw_base_32b(&stats->ca, ca);
1539 fm10k_update_hw_base_32b(&stats->um, um);
1540 fm10k_update_hw_base_32b(&stats->xec, xec);
1541 fm10k_update_hw_base_32b(&stats->vlan_drop, vlan_drop);
1542 fm10k_update_hw_base_32b(&stats->loopback_drop, loopback_drop);
1543 fm10k_update_hw_base_32b(&stats->nodesc_drop, nodesc_drop);
1544 stats->stats_idx = id;
1545
1546 /* Update Queue Statistics */
1547 fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues);
1548 }
1549
1550 /**
1551 * fm10k_rebind_hw_stats_pf - Resets base for hardware statistics of PF
1552 * @hw: pointer to hardware structure
1553 * @stats: pointer to the stats structure to update
1554 *
1555 * This function resets the base for global and per queue hardware
1556 * statistics.
1557 **/
1558 void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw,
1559 struct fm10k_hw_stats *stats)
1560 {
1561 DEBUGFUNC("fm10k_rebind_hw_stats_pf");
1562
1563 /* Unbind Global Statistics */
1564 fm10k_unbind_hw_stats_32b(&stats->timeout);
1565 fm10k_unbind_hw_stats_32b(&stats->ur);
1566 fm10k_unbind_hw_stats_32b(&stats->ca);
1567 fm10k_unbind_hw_stats_32b(&stats->um);
1568 fm10k_unbind_hw_stats_32b(&stats->xec);
1569 fm10k_unbind_hw_stats_32b(&stats->vlan_drop);
1570 fm10k_unbind_hw_stats_32b(&stats->loopback_drop);
1571 fm10k_unbind_hw_stats_32b(&stats->nodesc_drop);
1572
1573 /* Unbind Queue Statistics */
1574 fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues);
1575
1576 /* Reinitialize bases for all stats */
1577 fm10k_update_hw_stats_pf(hw, stats);
1578 }
1579
1580 /**
1581 * fm10k_set_dma_mask_pf - Configures PhyAddrSpace to limit DMA to system
1582 * @hw: pointer to hardware structure
1583 * @dma_mask: 64 bit DMA mask required for platform
1584 *
1585 * This function sets the PHYADDR.PhyAddrSpace bits for the endpoint in order
1586 * to limit the access to memory beyond what is physically in the system.
1587 **/
1588 STATIC void fm10k_set_dma_mask_pf(struct fm10k_hw *hw, u64 dma_mask)
1589 {
1590 /* we need to write the upper 32 bits of DMA mask to PhyAddrSpace */
1591 u32 phyaddr = (u32)(dma_mask >> 32);
1592
1593 DEBUGFUNC("fm10k_set_dma_mask_pf");
1594
1595 FM10K_WRITE_REG(hw, FM10K_PHYADDR, phyaddr);
1596 }
1597
1598 /**
1599 * fm10k_get_fault_pf - Record a fault in one of the interface units
1600 * @hw: pointer to hardware structure
1601 * @type: pointer to fault type register offset
1602 * @fault: pointer to memory location to record the fault
1603 *
1604 * Record the fault register contents to the fault data structure and
1605 * clear the entry from the register.
1606 *
1607 * Returns ERR_PARAM if invalid register is specified or no error is present.
1608 **/
1609 STATIC s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type,
1610 struct fm10k_fault *fault)
1611 {
1612 u32 func;
1613
1614 DEBUGFUNC("fm10k_get_fault_pf");
1615
1616 /* verify the fault register is in range and is aligned */
1617 switch (type) {
1618 case FM10K_PCA_FAULT:
1619 case FM10K_THI_FAULT:
1620 case FM10K_FUM_FAULT:
1621 break;
1622 default:
1623 return FM10K_ERR_PARAM;
1624 }
1625
1626 /* only service faults that are valid */
1627 func = FM10K_READ_REG(hw, type + FM10K_FAULT_FUNC);
1628 if (!(func & FM10K_FAULT_FUNC_VALID))
1629 return FM10K_ERR_PARAM;
1630
1631 /* read remaining fields */
1632 fault->address = FM10K_READ_REG(hw, type + FM10K_FAULT_ADDR_HI);
1633 fault->address <<= 32;
1634 fault->address = FM10K_READ_REG(hw, type + FM10K_FAULT_ADDR_LO);
1635 fault->specinfo = FM10K_READ_REG(hw, type + FM10K_FAULT_SPECINFO);
1636
1637 /* clear valid bit to allow for next error */
1638 FM10K_WRITE_REG(hw, type + FM10K_FAULT_FUNC, FM10K_FAULT_FUNC_VALID);
1639
1640 /* Record which function triggered the error */
1641 if (func & FM10K_FAULT_FUNC_PF)
1642 fault->func = 0;
1643 else
1644 fault->func = 1 + ((func & FM10K_FAULT_FUNC_VF_MASK) >>
1645 FM10K_FAULT_FUNC_VF_SHIFT);
1646
1647 /* record fault type */
1648 fault->type = func & FM10K_FAULT_FUNC_TYPE_MASK;
1649
1650 return FM10K_SUCCESS;
1651 }
1652
1653 /**
1654 * fm10k_request_lport_map_pf - Request LPORT map from the switch API
1655 * @hw: pointer to hardware structure
1656 *
1657 **/
1658 STATIC s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw)
1659 {
1660 struct fm10k_mbx_info *mbx = &hw->mbx;
1661 u32 msg[1];
1662
1663 DEBUGFUNC("fm10k_request_lport_pf");
1664
1665 /* issue request asking for LPORT map */
1666 fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_LPORT_MAP);
1667
1668 /* load onto outgoing mailbox */
1669 return mbx->ops.enqueue_tx(hw, mbx, msg);
1670 }
1671
1672 /**
1673 * fm10k_get_host_state_pf - Returns the state of the switch and mailbox
1674 * @hw: pointer to hardware structure
1675 * @switch_ready: pointer to boolean value that will record switch state
1676 *
1677 * This function will check the DMA_CTRL2 register and mailbox in order
1678 * to determine if the switch is ready for the PF to begin requesting
1679 * addresses and mapping traffic to the local interface.
1680 **/
1681 STATIC s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready)
1682 {
1683 u32 dma_ctrl2;
1684
1685 DEBUGFUNC("fm10k_get_host_state_pf");
1686
1687 /* verify the switch is ready for interaction */
1688 dma_ctrl2 = FM10K_READ_REG(hw, FM10K_DMA_CTRL2);
1689 if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY))
1690 return FM10K_SUCCESS;
1691
1692 /* retrieve generic host state info */
1693 return fm10k_get_host_state_generic(hw, switch_ready);
1694 }
1695
1696 /* This structure defines the attibutes to be parsed below */
1697 const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = {
1698 FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
1699 sizeof(struct fm10k_swapi_error)),
1700 FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP),
1701 FM10K_TLV_ATTR_LAST
1702 };
1703
1704 /**
1705 * fm10k_msg_lport_map_pf - Message handler for lport_map message from SM
1706 * @hw: Pointer to hardware structure
1707 * @results: pointer array containing parsed data
1708 * @mbx: Pointer to mailbox information structure
1709 *
1710 * This handler configures the lport mapping based on the reply from the
1711 * switch API.
1712 **/
1713 s32 fm10k_msg_lport_map_pf(struct fm10k_hw *hw, u32 **results,
1714 struct fm10k_mbx_info *mbx)
1715 {
1716 u16 glort, mask;
1717 u32 dglort_map;
1718 s32 err;
1719
1720 UNREFERENCED_1PARAMETER(mbx);
1721 DEBUGFUNC("fm10k_msg_lport_map_pf");
1722
1723 err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_LPORT_MAP],
1724 &dglort_map);
1725 if (err)
1726 return err;
1727
1728 /* extract values out of the header */
1729 glort = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_GLORT);
1730 mask = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_MASK);
1731
1732 /* verify mask is set and none of the masked bits in glort are set */
1733 if (!mask || (glort & ~mask))
1734 return FM10K_ERR_PARAM;
1735
1736 /* verify the mask is contiguous, and that it is 1's followed by 0's */
1737 if (((~(mask - 1) & mask) + mask) & FM10K_DGLORTMAP_NONE)
1738 return FM10K_ERR_PARAM;
1739
1740 /* record the glort, mask, and port count */
1741 hw->mac.dglort_map = dglort_map;
1742
1743 return FM10K_SUCCESS;
1744 }
1745
1746 const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = {
1747 FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_UPDATE_PVID),
1748 FM10K_TLV_ATTR_LAST
1749 };
1750
1751 /**
1752 * fm10k_msg_update_pvid_pf - Message handler for port VLAN message from SM
1753 * @hw: Pointer to hardware structure
1754 * @results: pointer array containing parsed data
1755 * @mbx: Pointer to mailbox information structure
1756 *
1757 * This handler configures the default VLAN for the PF
1758 **/
1759 static s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results,
1760 struct fm10k_mbx_info *mbx)
1761 {
1762 u16 glort, pvid;
1763 u32 pvid_update;
1764 s32 err;
1765
1766 UNREFERENCED_1PARAMETER(mbx);
1767 DEBUGFUNC("fm10k_msg_update_pvid_pf");
1768
1769 err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID],
1770 &pvid_update);
1771 if (err)
1772 return err;
1773
1774 /* extract values from the pvid update */
1775 glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT);
1776 pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID);
1777
1778 /* if glort is not valid return error */
1779 if (!fm10k_glort_valid_pf(hw, glort))
1780 return FM10K_ERR_PARAM;
1781
1782 /* verify VLAN ID is valid */
1783 if (pvid >= FM10K_VLAN_TABLE_VID_MAX)
1784 return FM10K_ERR_PARAM;
1785
1786 /* record the port VLAN ID value */
1787 hw->mac.default_vid = pvid;
1788
1789 return FM10K_SUCCESS;
1790 }
1791
1792 /**
1793 * fm10k_record_global_table_data - Move global table data to swapi table info
1794 * @from: pointer to source table data structure
1795 * @to: pointer to destination table info structure
1796 *
1797 * This function is will copy table_data to the table_info contained in
1798 * the hw struct.
1799 **/
1800 static void fm10k_record_global_table_data(struct fm10k_global_table_data *from,
1801 struct fm10k_swapi_table_info *to)
1802 {
1803 /* convert from le32 struct to CPU byte ordered values */
1804 to->used = FM10K_LE32_TO_CPU(from->used);
1805 to->avail = FM10K_LE32_TO_CPU(from->avail);
1806 }
1807
1808 const struct fm10k_tlv_attr fm10k_err_msg_attr[] = {
1809 FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
1810 sizeof(struct fm10k_swapi_error)),
1811 FM10K_TLV_ATTR_LAST
1812 };
1813
1814 /**
1815 * fm10k_msg_err_pf - Message handler for error reply
1816 * @hw: Pointer to hardware structure
1817 * @results: pointer array containing parsed data
1818 * @mbx: Pointer to mailbox information structure
1819 *
1820 * This handler will capture the data for any error replies to previous
1821 * messages that the PF has sent.
1822 **/
1823 s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results,
1824 struct fm10k_mbx_info *mbx)
1825 {
1826 struct fm10k_swapi_error err_msg;
1827 s32 err;
1828
1829 UNREFERENCED_1PARAMETER(mbx);
1830 DEBUGFUNC("fm10k_msg_err_pf");
1831
1832 /* extract structure from message */
1833 err = fm10k_tlv_attr_get_le_struct(results[FM10K_PF_ATTR_ID_ERR],
1834 &err_msg, sizeof(err_msg));
1835 if (err)
1836 return err;
1837
1838 /* record table status */
1839 fm10k_record_global_table_data(&err_msg.mac, &hw->swapi.mac);
1840 fm10k_record_global_table_data(&err_msg.nexthop, &hw->swapi.nexthop);
1841 fm10k_record_global_table_data(&err_msg.ffu, &hw->swapi.ffu);
1842
1843 /* record SW API status value */
1844 hw->swapi.status = FM10K_LE32_TO_CPU(err_msg.status);
1845
1846 return FM10K_SUCCESS;
1847 }
1848
1849 /* currently there is no shared 1588 timestamp handler */
1850
1851 const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[] = {
1852 FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_TIMESTAMP,
1853 sizeof(struct fm10k_swapi_1588_timestamp)),
1854 FM10K_TLV_ATTR_LAST
1855 };
1856
1857 const struct fm10k_tlv_attr fm10k_1588_clock_owner_attr[] = {
1858 FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_CLOCK_OWNER,
1859 sizeof(struct fm10k_swapi_1588_clock_owner)),
1860 FM10K_TLV_ATTR_LAST
1861 };
1862
1863 const struct fm10k_tlv_attr fm10k_master_clk_offset_attr[] = {
1864 FM10K_TLV_ATTR_U64(FM10K_PF_ATTR_ID_MASTER_CLK_OFFSET),
1865 FM10K_TLV_ATTR_LAST
1866 };
1867
1868 /**
1869 * fm10k_iov_notify_offset_pf - Notify VF of change in PTP offset
1870 * @hw: pointer to hardware structure
1871 * @vf_info: pointer to the vf info structure
1872 * @offset: 64bit unsigned offset from hardware SYSTIME
1873 *
1874 * This function sends a message to a given VF to notify it of PTP offset
1875 * changes.
1876 **/
1877 STATIC void fm10k_iov_notify_offset_pf(struct fm10k_hw *hw,
1878 struct fm10k_vf_info *vf_info,
1879 u64 offset)
1880 {
1881 u32 msg[4];
1882
1883 fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_1588);
1884 fm10k_tlv_attr_put_u64(msg, FM10K_1588_MSG_CLK_OFFSET, offset);
1885
1886 if (vf_info->mbx.ops.enqueue_tx)
1887 vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
1888 }
1889
1890 /**
1891 * fm10k_msg_1588_clock_owner_pf - Message handler for clock ownership from SM
1892 * @hw: pointer to hardware structure
1893 * @results: pointer to array containing parsed data,
1894 * @mbx: Pointer to mailbox information structure
1895 *
1896 * This handler configures the FM10K_HW_FLAG_CLOCK_OWNER field for the PF
1897 */
1898 s32 fm10k_msg_1588_clock_owner_pf(struct fm10k_hw *hw, u32 **results,
1899 struct fm10k_mbx_info *mbx)
1900 {
1901 struct fm10k_swapi_1588_clock_owner msg;
1902 u16 glort;
1903 s32 err;
1904
1905 UNREFERENCED_1PARAMETER(mbx);
1906 DEBUGFUNC("fm10k_msg_1588_clock_owner");
1907
1908 err = fm10k_tlv_attr_get_le_struct(
1909 results[FM10K_PF_ATTR_ID_1588_CLOCK_OWNER],
1910 &msg, sizeof(msg));
1911 if (err)
1912 return err;
1913
1914 /* We own the clock iff the glort matches us and the enabled field is
1915 * true. Otherwise, the clock must belong to some other port.
1916 */
1917 glort = le16_to_cpu(msg.glort);
1918 if (fm10k_glort_valid_pf(hw, glort) && msg.enabled)
1919 hw->flags |= FM10K_HW_FLAG_CLOCK_OWNER;
1920 else
1921 hw->flags &= ~FM10K_HW_FLAG_CLOCK_OWNER;
1922
1923 return FM10K_SUCCESS;
1924 }
1925
1926 /**
1927 * fm10k_adjust_systime_pf - Adjust systime frequency
1928 * @hw: pointer to hardware structure
1929 * @ppb: adjustment rate in parts per billion
1930 *
1931 * This function will adjust the SYSTIME_CFG register contained in BAR 4
1932 * if this function is supported for BAR 4 access. The adjustment amount
1933 * is based on the parts per billion value provided and adjusted to a
1934 * value based on parts per 2^48 clock cycles.
1935 *
1936 * If adjustment is not supported or the requested value is too large
1937 * we will return an error.
1938 **/
1939 STATIC s32 fm10k_adjust_systime_pf(struct fm10k_hw *hw, s32 ppb)
1940 {
1941 u64 systime_adjust;
1942
1943 DEBUGFUNC("fm10k_adjust_systime_pf");
1944
1945 /* ensure that we control the clock */
1946 if (!(hw->flags & FM10K_HW_FLAG_CLOCK_OWNER))
1947 return FM10K_ERR_DEVICE_NOT_SUPPORTED;
1948
1949 /* if sw_addr is not set we don't have switch register access */
1950 if (!hw->sw_addr)
1951 return ppb ? FM10K_ERR_PARAM : FM10K_SUCCESS;
1952
1953 /* we must convert the value from parts per billion to parts per
1954 * 2^48 cycles. In addition I have opted to only use the 30 most
1955 * significant bits of the adjustment value as the 8 least
1956 * significant bits are located in another register and represent
1957 * a value significantly less than a part per billion, the result
1958 * of dropping the 8 least significant bits is that the adjustment
1959 * value is effectively multiplied by 2^8 when we write it.
1960 *
1961 * As a result of all this the math for this breaks down as follows:
1962 * ppb / 10^9 == adjust * 2^8 / 2^48
1963 * If we solve this for adjust, and simplify it comes out as:
1964 * ppb * 2^31 / 5^9 == adjust
1965 */
1966 systime_adjust = (ppb < 0) ? -ppb : ppb;
1967 systime_adjust <<= 31;
1968 do_div(systime_adjust, 1953125);
1969
1970 /* verify the requested adjustment value is in range */
1971 if (systime_adjust > FM10K_SW_SYSTIME_ADJUST_MASK)
1972 return FM10K_ERR_PARAM;
1973
1974 if (ppb > 0)
1975 systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE;
1976
1977 FM10K_WRITE_SW_REG(hw, FM10K_SW_SYSTIME_ADJUST, (u32)systime_adjust);
1978
1979 return FM10K_SUCCESS;
1980 }
1981
1982 /**
1983 * fm10k_notify_offset_pf - Notify switch of change in PTP offset
1984 * @hw: pointer to hardware structure
1985 * @offset: 64bit unsigned offset of SYSTIME
1986 *
1987 * This function sends a message to the switch to indicate a change in the
1988 * offset of the hardware SYSTIME registers. The switch manager is
1989 * responsible for transmitting this message to other hosts.
1990 */
1991 STATIC s32 fm10k_notify_offset_pf(struct fm10k_hw *hw, u64 offset)
1992 {
1993 struct fm10k_mbx_info *mbx = &hw->mbx;
1994 u32 msg[4];
1995
1996 DEBUGFUNC("fm10k_notify_offset_pf");
1997
1998 /* ensure that we control the clock */
1999 if (!(hw->flags & FM10K_HW_FLAG_CLOCK_OWNER))
2000 return FM10K_ERR_DEVICE_NOT_SUPPORTED;
2001
2002 fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_MASTER_CLK_OFFSET);
2003 fm10k_tlv_attr_put_u64(msg, FM10K_PF_ATTR_ID_MASTER_CLK_OFFSET, offset);
2004
2005 /* load onto outgoing mailbox */
2006 return mbx->ops.enqueue_tx(hw, mbx, msg);
2007 }
2008
2009 /**
2010 * fm10k_read_systime_pf - Reads value of systime registers
2011 * @hw: pointer to the hardware structure
2012 *
2013 * Function reads the content of 2 registers, combined to represent a 64 bit
2014 * value measured in nanosecods. In order to guarantee the value is accurate
2015 * we check the 32 most significant bits both before and after reading the
2016 * 32 least significant bits to verify they didn't change as we were reading
2017 * the registers.
2018 **/
2019 static u64 fm10k_read_systime_pf(struct fm10k_hw *hw)
2020 {
2021 u32 systime_l, systime_h, systime_tmp;
2022
2023 systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
2024
2025 do {
2026 systime_tmp = systime_h;
2027 systime_l = fm10k_read_reg(hw, FM10K_SYSTIME);
2028 systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
2029 } while (systime_tmp != systime_h);
2030
2031 return ((u64)systime_h << 32) | systime_l;
2032 }
2033
2034 static const struct fm10k_msg_data fm10k_msg_data_pf[] = {
2035 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
2036 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
2037 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
2038 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
2039 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
2040 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
2041 FM10K_PF_MSG_1588_CLOCK_OWNER_HANDLER(fm10k_msg_1588_clock_owner_pf),
2042 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2043 };
2044
2045 /**
2046 * fm10k_init_ops_pf - Inits func ptrs and MAC type
2047 * @hw: pointer to hardware structure
2048 *
2049 * Initialize the function pointers and assign the MAC type for PF.
2050 * Does not touch the hardware.
2051 **/
2052 s32 fm10k_init_ops_pf(struct fm10k_hw *hw)
2053 {
2054 struct fm10k_mac_info *mac = &hw->mac;
2055 struct fm10k_iov_info *iov = &hw->iov;
2056
2057 DEBUGFUNC("fm10k_init_ops_pf");
2058
2059 fm10k_init_ops_generic(hw);
2060
2061 mac->ops.reset_hw = &fm10k_reset_hw_pf;
2062 mac->ops.init_hw = &fm10k_init_hw_pf;
2063 mac->ops.start_hw = &fm10k_start_hw_generic;
2064 mac->ops.stop_hw = &fm10k_stop_hw_generic;
2065 #ifndef NO_IS_SLOT_APPROPRIATE_CHECK
2066 mac->ops.is_slot_appropriate = &fm10k_is_slot_appropriate_pf;
2067 #endif
2068 mac->ops.update_vlan = &fm10k_update_vlan_pf;
2069 mac->ops.read_mac_addr = &fm10k_read_mac_addr_pf;
2070 mac->ops.update_uc_addr = &fm10k_update_uc_addr_pf;
2071 mac->ops.update_mc_addr = &fm10k_update_mc_addr_pf;
2072 mac->ops.update_xcast_mode = &fm10k_update_xcast_mode_pf;
2073 mac->ops.update_int_moderator = &fm10k_update_int_moderator_pf;
2074 mac->ops.update_lport_state = &fm10k_update_lport_state_pf;
2075 mac->ops.update_hw_stats = &fm10k_update_hw_stats_pf;
2076 mac->ops.rebind_hw_stats = &fm10k_rebind_hw_stats_pf;
2077 mac->ops.configure_dglort_map = &fm10k_configure_dglort_map_pf;
2078 mac->ops.set_dma_mask = &fm10k_set_dma_mask_pf;
2079 mac->ops.get_fault = &fm10k_get_fault_pf;
2080 mac->ops.get_host_state = &fm10k_get_host_state_pf;
2081 mac->ops.request_lport_map = &fm10k_request_lport_map_pf;
2082 mac->ops.adjust_systime = &fm10k_adjust_systime_pf;
2083 mac->ops.notify_offset = &fm10k_notify_offset_pf;
2084 mac->ops.read_systime = &fm10k_read_systime_pf;
2085
2086 mac->max_msix_vectors = fm10k_get_pcie_msix_count_generic(hw);
2087
2088 iov->ops.assign_resources = &fm10k_iov_assign_resources_pf;
2089 iov->ops.configure_tc = &fm10k_iov_configure_tc_pf;
2090 iov->ops.assign_int_moderator = &fm10k_iov_assign_int_moderator_pf;
2091 iov->ops.assign_default_mac_vlan = fm10k_iov_assign_default_mac_vlan_pf;
2092 iov->ops.reset_resources = &fm10k_iov_reset_resources_pf;
2093 iov->ops.set_lport = &fm10k_iov_set_lport_pf;
2094 iov->ops.reset_lport = &fm10k_iov_reset_lport_pf;
2095 iov->ops.update_stats = &fm10k_iov_update_stats_pf;
2096 iov->ops.notify_offset = &fm10k_iov_notify_offset_pf;
2097
2098 return fm10k_sm_mbx_init(hw, &hw->mbx, fm10k_msg_data_pf);
2099 }