1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/etherdevice.h>
10 #include <linux/crc32.h>
11 #include <linux/qed/qed_iov_if.h>
15 #include "qed_init_ops.h"
18 #include "qed_reg_addr.h"
20 #include "qed_sriov.h"
24 static int qed_sp_vf_start(struct qed_hwfn
*p_hwfn
, struct qed_vf_info
*p_vf
)
26 struct vf_start_ramrod_data
*p_ramrod
= NULL
;
27 struct qed_spq_entry
*p_ent
= NULL
;
28 struct qed_sp_init_data init_data
;
33 memset(&init_data
, 0, sizeof(init_data
));
34 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
35 init_data
.opaque_fid
= p_vf
->opaque_fid
;
36 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
38 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
39 COMMON_RAMROD_VF_START
,
40 PROTOCOLID_COMMON
, &init_data
);
44 p_ramrod
= &p_ent
->ramrod
.vf_start
;
46 p_ramrod
->vf_id
= GET_FIELD(p_vf
->concrete_fid
, PXP_CONCRETE_FID_VFID
);
47 p_ramrod
->opaque_fid
= cpu_to_le16(p_vf
->opaque_fid
);
49 switch (p_hwfn
->hw_info
.personality
) {
51 p_ramrod
->personality
= PERSONALITY_ETH
;
53 case QED_PCI_ETH_ROCE
:
54 p_ramrod
->personality
= PERSONALITY_RDMA_AND_ETH
;
57 DP_NOTICE(p_hwfn
, "Unknown VF personality %d\n",
58 p_hwfn
->hw_info
.personality
);
62 fp_minor
= p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
;
63 if (fp_minor
> ETH_HSI_VER_MINOR
) {
66 "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
69 fp_minor
, ETH_HSI_VER_MAJOR
, ETH_HSI_VER_MINOR
);
70 fp_minor
= ETH_HSI_VER_MINOR
;
73 p_ramrod
->hsi_fp_ver
.major_ver_arr
[ETH_VER_KEY
] = ETH_HSI_VER_MAJOR
;
74 p_ramrod
->hsi_fp_ver
.minor_ver_arr
[ETH_VER_KEY
] = fp_minor
;
76 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
77 "VF[%d] - Starting using HSI %02x.%02x\n",
78 p_vf
->abs_vf_id
, ETH_HSI_VER_MAJOR
, fp_minor
);
80 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
83 static int qed_sp_vf_stop(struct qed_hwfn
*p_hwfn
,
84 u32 concrete_vfid
, u16 opaque_vfid
)
86 struct vf_stop_ramrod_data
*p_ramrod
= NULL
;
87 struct qed_spq_entry
*p_ent
= NULL
;
88 struct qed_sp_init_data init_data
;
92 memset(&init_data
, 0, sizeof(init_data
));
93 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
94 init_data
.opaque_fid
= opaque_vfid
;
95 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
97 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
98 COMMON_RAMROD_VF_STOP
,
99 PROTOCOLID_COMMON
, &init_data
);
103 p_ramrod
= &p_ent
->ramrod
.vf_stop
;
105 p_ramrod
->vf_id
= GET_FIELD(concrete_vfid
, PXP_CONCRETE_FID_VFID
);
107 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
110 bool qed_iov_is_valid_vfid(struct qed_hwfn
*p_hwfn
,
111 int rel_vf_id
, bool b_enabled_only
)
113 if (!p_hwfn
->pf_iov_info
) {
114 DP_NOTICE(p_hwfn
->cdev
, "No iov info\n");
118 if ((rel_vf_id
>= p_hwfn
->cdev
->p_iov_info
->total_vfs
) ||
122 if ((!p_hwfn
->pf_iov_info
->vfs_array
[rel_vf_id
].b_init
) &&
129 static struct qed_vf_info
*qed_iov_get_vf_info(struct qed_hwfn
*p_hwfn
,
133 struct qed_vf_info
*vf
= NULL
;
135 if (!p_hwfn
->pf_iov_info
) {
136 DP_NOTICE(p_hwfn
->cdev
, "No iov info\n");
140 if (qed_iov_is_valid_vfid(p_hwfn
, relative_vf_id
, b_enabled_only
))
141 vf
= &p_hwfn
->pf_iov_info
->vfs_array
[relative_vf_id
];
143 DP_ERR(p_hwfn
, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
149 int qed_iov_post_vf_bulletin(struct qed_hwfn
*p_hwfn
,
150 int vfid
, struct qed_ptt
*p_ptt
)
152 struct qed_bulletin_content
*p_bulletin
;
153 int crc_size
= sizeof(p_bulletin
->crc
);
154 struct qed_dmae_params params
;
155 struct qed_vf_info
*p_vf
;
157 p_vf
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
161 if (!p_vf
->vf_bulletin
)
164 p_bulletin
= p_vf
->bulletin
.p_virt
;
166 /* Increment bulletin board version and compute crc */
167 p_bulletin
->version
++;
168 p_bulletin
->crc
= crc32(0, (u8
*)p_bulletin
+ crc_size
,
169 p_vf
->bulletin
.size
- crc_size
);
171 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
172 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
173 p_bulletin
->version
, p_vf
->relative_vf_id
, p_bulletin
->crc
);
175 /* propagate bulletin board via dmae to vm memory */
176 memset(¶ms
, 0, sizeof(params
));
177 params
.flags
= QED_DMAE_FLAG_VF_DST
;
178 params
.dst_vfid
= p_vf
->abs_vf_id
;
179 return qed_dmae_host2host(p_hwfn
, p_ptt
, p_vf
->bulletin
.phys
,
180 p_vf
->vf_bulletin
, p_vf
->bulletin
.size
/ 4,
184 static int qed_iov_pci_cfg_info(struct qed_dev
*cdev
)
186 struct qed_hw_sriov_info
*iov
= cdev
->p_iov_info
;
189 DP_VERBOSE(cdev
, QED_MSG_IOV
, "sriov ext pos %d\n", pos
);
190 pci_read_config_word(cdev
->pdev
, pos
+ PCI_SRIOV_CTRL
, &iov
->ctrl
);
192 pci_read_config_word(cdev
->pdev
,
193 pos
+ PCI_SRIOV_TOTAL_VF
, &iov
->total_vfs
);
194 pci_read_config_word(cdev
->pdev
,
195 pos
+ PCI_SRIOV_INITIAL_VF
, &iov
->initial_vfs
);
197 pci_read_config_word(cdev
->pdev
, pos
+ PCI_SRIOV_NUM_VF
, &iov
->num_vfs
);
201 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
205 pci_read_config_word(cdev
->pdev
,
206 pos
+ PCI_SRIOV_VF_OFFSET
, &iov
->offset
);
208 pci_read_config_word(cdev
->pdev
,
209 pos
+ PCI_SRIOV_VF_STRIDE
, &iov
->stride
);
211 pci_read_config_word(cdev
->pdev
,
212 pos
+ PCI_SRIOV_VF_DID
, &iov
->vf_device_id
);
214 pci_read_config_dword(cdev
->pdev
,
215 pos
+ PCI_SRIOV_SUP_PGSIZE
, &iov
->pgsz
);
217 pci_read_config_dword(cdev
->pdev
, pos
+ PCI_SRIOV_CAP
, &iov
->cap
);
219 pci_read_config_byte(cdev
->pdev
, pos
+ PCI_SRIOV_FUNC_LINK
, &iov
->link
);
223 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
229 iov
->nr_virtfn
, iov
->offset
, iov
->stride
, iov
->pgsz
);
231 /* Some sanity checks */
232 if (iov
->num_vfs
> NUM_OF_VFS(cdev
) ||
233 iov
->total_vfs
> NUM_OF_VFS(cdev
)) {
234 /* This can happen only due to a bug. In this case we set
235 * num_vfs to zero to avoid memory corruption in the code that
236 * assumes max number of vfs
239 "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
249 static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn
*p_hwfn
,
250 struct qed_ptt
*p_ptt
)
252 struct qed_igu_block
*p_sb
;
256 if (!p_hwfn
->hw_info
.p_igu_info
) {
258 "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
262 for (sb_id
= 0; sb_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
);
264 p_sb
= &p_hwfn
->hw_info
.p_igu_info
->igu_map
.igu_blocks
[sb_id
];
265 if ((p_sb
->status
& QED_IGU_STATUS_FREE
) &&
266 !(p_sb
->status
& QED_IGU_STATUS_PF
)) {
267 val
= qed_rd(p_hwfn
, p_ptt
,
268 IGU_REG_MAPPING_MEMORY
+ sb_id
* 4);
269 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 0);
270 qed_wr(p_hwfn
, p_ptt
,
271 IGU_REG_MAPPING_MEMORY
+ 4 * sb_id
, val
);
276 static void qed_iov_setup_vfdb(struct qed_hwfn
*p_hwfn
)
278 struct qed_hw_sriov_info
*p_iov
= p_hwfn
->cdev
->p_iov_info
;
279 struct qed_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
280 struct qed_bulletin_content
*p_bulletin_virt
;
281 dma_addr_t req_p
, rply_p
, bulletin_p
;
282 union pfvf_tlvs
*p_reply_virt_addr
;
283 union vfpf_tlvs
*p_req_virt_addr
;
286 memset(p_iov_info
->vfs_array
, 0, sizeof(p_iov_info
->vfs_array
));
288 p_req_virt_addr
= p_iov_info
->mbx_msg_virt_addr
;
289 req_p
= p_iov_info
->mbx_msg_phys_addr
;
290 p_reply_virt_addr
= p_iov_info
->mbx_reply_virt_addr
;
291 rply_p
= p_iov_info
->mbx_reply_phys_addr
;
292 p_bulletin_virt
= p_iov_info
->p_bulletins
;
293 bulletin_p
= p_iov_info
->bulletins_phys
;
294 if (!p_req_virt_addr
|| !p_reply_virt_addr
|| !p_bulletin_virt
) {
296 "qed_iov_setup_vfdb called without allocating mem first\n");
300 for (idx
= 0; idx
< p_iov
->total_vfs
; idx
++) {
301 struct qed_vf_info
*vf
= &p_iov_info
->vfs_array
[idx
];
304 vf
->vf_mbx
.req_virt
= p_req_virt_addr
+ idx
;
305 vf
->vf_mbx
.req_phys
= req_p
+ idx
* sizeof(union vfpf_tlvs
);
306 vf
->vf_mbx
.reply_virt
= p_reply_virt_addr
+ idx
;
307 vf
->vf_mbx
.reply_phys
= rply_p
+ idx
* sizeof(union pfvf_tlvs
);
309 vf
->state
= VF_STOPPED
;
312 vf
->bulletin
.phys
= idx
*
313 sizeof(struct qed_bulletin_content
) +
315 vf
->bulletin
.p_virt
= p_bulletin_virt
+ idx
;
316 vf
->bulletin
.size
= sizeof(struct qed_bulletin_content
);
318 vf
->relative_vf_id
= idx
;
319 vf
->abs_vf_id
= idx
+ p_iov
->first_vf_in_pf
;
320 concrete
= qed_vfid_to_concrete(p_hwfn
, vf
->abs_vf_id
);
321 vf
->concrete_fid
= concrete
;
322 vf
->opaque_fid
= (p_hwfn
->hw_info
.opaque_fid
& 0xff) |
323 (vf
->abs_vf_id
<< 8);
324 vf
->vport_id
= idx
+ 1;
328 static int qed_iov_allocate_vfdb(struct qed_hwfn
*p_hwfn
)
330 struct qed_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
334 num_vfs
= p_hwfn
->cdev
->p_iov_info
->total_vfs
;
336 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
337 "qed_iov_allocate_vfdb for %d VFs\n", num_vfs
);
339 /* Allocate PF Mailbox buffer (per-VF) */
340 p_iov_info
->mbx_msg_size
= sizeof(union vfpf_tlvs
) * num_vfs
;
341 p_v_addr
= &p_iov_info
->mbx_msg_virt_addr
;
342 *p_v_addr
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
343 p_iov_info
->mbx_msg_size
,
344 &p_iov_info
->mbx_msg_phys_addr
,
349 /* Allocate PF Mailbox Reply buffer (per-VF) */
350 p_iov_info
->mbx_reply_size
= sizeof(union pfvf_tlvs
) * num_vfs
;
351 p_v_addr
= &p_iov_info
->mbx_reply_virt_addr
;
352 *p_v_addr
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
353 p_iov_info
->mbx_reply_size
,
354 &p_iov_info
->mbx_reply_phys_addr
,
359 p_iov_info
->bulletins_size
= sizeof(struct qed_bulletin_content
) *
361 p_v_addr
= &p_iov_info
->p_bulletins
;
362 *p_v_addr
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
363 p_iov_info
->bulletins_size
,
364 &p_iov_info
->bulletins_phys
,
371 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
372 p_iov_info
->mbx_msg_virt_addr
,
373 (u64
) p_iov_info
->mbx_msg_phys_addr
,
374 p_iov_info
->mbx_reply_virt_addr
,
375 (u64
) p_iov_info
->mbx_reply_phys_addr
,
376 p_iov_info
->p_bulletins
, (u64
) p_iov_info
->bulletins_phys
);
381 static void qed_iov_free_vfdb(struct qed_hwfn
*p_hwfn
)
383 struct qed_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
385 if (p_hwfn
->pf_iov_info
->mbx_msg_virt_addr
)
386 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
387 p_iov_info
->mbx_msg_size
,
388 p_iov_info
->mbx_msg_virt_addr
,
389 p_iov_info
->mbx_msg_phys_addr
);
391 if (p_hwfn
->pf_iov_info
->mbx_reply_virt_addr
)
392 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
393 p_iov_info
->mbx_reply_size
,
394 p_iov_info
->mbx_reply_virt_addr
,
395 p_iov_info
->mbx_reply_phys_addr
);
397 if (p_iov_info
->p_bulletins
)
398 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
399 p_iov_info
->bulletins_size
,
400 p_iov_info
->p_bulletins
,
401 p_iov_info
->bulletins_phys
);
404 int qed_iov_alloc(struct qed_hwfn
*p_hwfn
)
406 struct qed_pf_iov
*p_sriov
;
408 if (!IS_PF_SRIOV(p_hwfn
)) {
409 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
410 "No SR-IOV - no need for IOV db\n");
414 p_sriov
= kzalloc(sizeof(*p_sriov
), GFP_KERNEL
);
416 DP_NOTICE(p_hwfn
, "Failed to allocate `struct qed_sriov'\n");
420 p_hwfn
->pf_iov_info
= p_sriov
;
422 return qed_iov_allocate_vfdb(p_hwfn
);
425 void qed_iov_setup(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
427 if (!IS_PF_SRIOV(p_hwfn
) || !IS_PF_SRIOV_ALLOC(p_hwfn
))
430 qed_iov_setup_vfdb(p_hwfn
);
431 qed_iov_clear_vf_igu_blocks(p_hwfn
, p_ptt
);
434 void qed_iov_free(struct qed_hwfn
*p_hwfn
)
436 if (IS_PF_SRIOV_ALLOC(p_hwfn
)) {
437 qed_iov_free_vfdb(p_hwfn
);
438 kfree(p_hwfn
->pf_iov_info
);
442 void qed_iov_free_hw_info(struct qed_dev
*cdev
)
444 kfree(cdev
->p_iov_info
);
445 cdev
->p_iov_info
= NULL
;
448 int qed_iov_hw_info(struct qed_hwfn
*p_hwfn
)
450 struct qed_dev
*cdev
= p_hwfn
->cdev
;
454 if (IS_VF(p_hwfn
->cdev
))
457 /* Learn the PCI configuration */
458 pos
= pci_find_ext_capability(p_hwfn
->cdev
->pdev
,
459 PCI_EXT_CAP_ID_SRIOV
);
461 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "No PCIe IOV support\n");
465 /* Allocate a new struct for IOV information */
466 cdev
->p_iov_info
= kzalloc(sizeof(*cdev
->p_iov_info
), GFP_KERNEL
);
467 if (!cdev
->p_iov_info
) {
468 DP_NOTICE(p_hwfn
, "Can't support IOV due to lack of memory\n");
471 cdev
->p_iov_info
->pos
= pos
;
473 rc
= qed_iov_pci_cfg_info(cdev
);
477 /* We want PF IOV to be synonemous with the existance of p_iov_info;
478 * In case the capability is published but there are no VFs, simply
479 * de-allocate the struct.
481 if (!cdev
->p_iov_info
->total_vfs
) {
482 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
483 "IOV capabilities, but no VFs are published\n");
484 kfree(cdev
->p_iov_info
);
485 cdev
->p_iov_info
= NULL
;
489 /* Calculate the first VF index - this is a bit tricky; Basically,
490 * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
491 * after the first engine's VFs.
493 cdev
->p_iov_info
->first_vf_in_pf
= p_hwfn
->cdev
->p_iov_info
->offset
+
494 p_hwfn
->abs_pf_id
- 16;
495 if (QED_PATH_ID(p_hwfn
))
496 cdev
->p_iov_info
->first_vf_in_pf
-= MAX_NUM_VFS_BB
;
498 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
499 "First VF in hwfn 0x%08x\n",
500 cdev
->p_iov_info
->first_vf_in_pf
);
505 static bool qed_iov_pf_sanity_check(struct qed_hwfn
*p_hwfn
, int vfid
)
507 /* Check PF supports sriov */
508 if (IS_VF(p_hwfn
->cdev
) || !IS_QED_SRIOV(p_hwfn
->cdev
) ||
509 !IS_PF_SRIOV_ALLOC(p_hwfn
))
512 /* Check VF validity */
513 if (!qed_iov_is_valid_vfid(p_hwfn
, vfid
, true))
519 static void qed_iov_set_vf_to_disable(struct qed_dev
*cdev
,
520 u16 rel_vf_id
, u8 to_disable
)
522 struct qed_vf_info
*vf
;
525 for_each_hwfn(cdev
, i
) {
526 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
528 vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
532 vf
->to_disable
= to_disable
;
536 void qed_iov_set_vfs_to_disable(struct qed_dev
*cdev
, u8 to_disable
)
540 if (!IS_QED_SRIOV(cdev
))
543 for (i
= 0; i
< cdev
->p_iov_info
->total_vfs
; i
++)
544 qed_iov_set_vf_to_disable(cdev
, i
, to_disable
);
547 static void qed_iov_vf_pglue_clear_err(struct qed_hwfn
*p_hwfn
,
548 struct qed_ptt
*p_ptt
, u8 abs_vfid
)
550 qed_wr(p_hwfn
, p_ptt
,
551 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR
+ (abs_vfid
>> 5) * 4,
552 1 << (abs_vfid
& 0x1f));
555 static void qed_iov_vf_igu_reset(struct qed_hwfn
*p_hwfn
,
556 struct qed_ptt
*p_ptt
, struct qed_vf_info
*vf
)
560 /* Set VF masks and configuration - pretend */
561 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) vf
->concrete_fid
);
563 qed_wr(p_hwfn
, p_ptt
, IGU_REG_STATISTIC_NUM_VF_MSG_SENT
, 0);
566 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
568 /* iterate over all queues, clear sb consumer */
569 for (i
= 0; i
< vf
->num_sbs
; i
++)
570 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
572 vf
->opaque_fid
, true);
575 static void qed_iov_vf_igu_set_int(struct qed_hwfn
*p_hwfn
,
576 struct qed_ptt
*p_ptt
,
577 struct qed_vf_info
*vf
, bool enable
)
581 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) vf
->concrete_fid
);
583 igu_vf_conf
= qed_rd(p_hwfn
, p_ptt
, IGU_REG_VF_CONFIGURATION
);
586 igu_vf_conf
|= IGU_VF_CONF_MSI_MSIX_EN
;
588 igu_vf_conf
&= ~IGU_VF_CONF_MSI_MSIX_EN
;
590 qed_wr(p_hwfn
, p_ptt
, IGU_REG_VF_CONFIGURATION
, igu_vf_conf
);
593 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
596 static int qed_iov_enable_vf_access(struct qed_hwfn
*p_hwfn
,
597 struct qed_ptt
*p_ptt
,
598 struct qed_vf_info
*vf
)
600 u32 igu_vf_conf
= IGU_VF_CONF_FUNC_EN
;
608 "Enable internal access for vf %x [abs %x]\n",
609 vf
->abs_vf_id
, QED_VF_ABS_ID(p_hwfn
, vf
));
611 qed_iov_vf_pglue_clear_err(p_hwfn
, p_ptt
, QED_VF_ABS_ID(p_hwfn
, vf
));
613 qed_iov_vf_igu_reset(p_hwfn
, p_ptt
, vf
);
615 rc
= qed_mcp_config_vf_msix(p_hwfn
, p_ptt
, vf
->abs_vf_id
, vf
->num_sbs
);
619 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) vf
->concrete_fid
);
621 SET_FIELD(igu_vf_conf
, IGU_VF_CONF_PARENT
, p_hwfn
->rel_pf_id
);
622 STORE_RT_REG(p_hwfn
, IGU_REG_VF_CONFIGURATION_RT_OFFSET
, igu_vf_conf
);
624 qed_init_run(p_hwfn
, p_ptt
, PHASE_VF
, vf
->abs_vf_id
,
625 p_hwfn
->hw_info
.hw_mode
);
628 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
636 * @brief qed_iov_config_perm_table - configure the permission
638 * In E4, queue zone permission table size is 320x9. There
639 * are 320 VF queues for single engine device (256 for dual
640 * engine device), and each entry has the following format:
647 static void qed_iov_config_perm_table(struct qed_hwfn
*p_hwfn
,
648 struct qed_ptt
*p_ptt
,
649 struct qed_vf_info
*vf
, u8 enable
)
655 for (qid
= 0; qid
< vf
->num_rxqs
; qid
++) {
656 qed_fw_l2_queue(p_hwfn
, vf
->vf_queues
[qid
].fw_rx_qid
,
659 reg_addr
= PSWHST_REG_ZONE_PERMISSION_TABLE
+ qzone_id
* 4;
660 val
= enable
? (vf
->abs_vf_id
| (1 << 8)) : 0;
661 qed_wr(p_hwfn
, p_ptt
, reg_addr
, val
);
665 static void qed_iov_enable_vf_traffic(struct qed_hwfn
*p_hwfn
,
666 struct qed_ptt
*p_ptt
,
667 struct qed_vf_info
*vf
)
669 /* Reset vf in IGU - interrupts are still disabled */
670 qed_iov_vf_igu_reset(p_hwfn
, p_ptt
, vf
);
672 qed_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 1);
674 /* Permission Table */
675 qed_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, true);
678 static u8
qed_iov_alloc_vf_igu_sbs(struct qed_hwfn
*p_hwfn
,
679 struct qed_ptt
*p_ptt
,
680 struct qed_vf_info
*vf
, u16 num_rx_queues
)
682 struct qed_igu_block
*igu_blocks
;
683 int qid
= 0, igu_id
= 0;
686 igu_blocks
= p_hwfn
->hw_info
.p_igu_info
->igu_map
.igu_blocks
;
688 if (num_rx_queues
> p_hwfn
->hw_info
.p_igu_info
->free_blks
)
689 num_rx_queues
= p_hwfn
->hw_info
.p_igu_info
->free_blks
;
690 p_hwfn
->hw_info
.p_igu_info
->free_blks
-= num_rx_queues
;
692 SET_FIELD(val
, IGU_MAPPING_LINE_FUNCTION_NUMBER
, vf
->abs_vf_id
);
693 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 1);
694 SET_FIELD(val
, IGU_MAPPING_LINE_PF_VALID
, 0);
696 while ((qid
< num_rx_queues
) &&
697 (igu_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
))) {
698 if (igu_blocks
[igu_id
].status
& QED_IGU_STATUS_FREE
) {
699 struct cau_sb_entry sb_entry
;
701 vf
->igu_sbs
[qid
] = (u16
)igu_id
;
702 igu_blocks
[igu_id
].status
&= ~QED_IGU_STATUS_FREE
;
704 SET_FIELD(val
, IGU_MAPPING_LINE_VECTOR_NUMBER
, qid
);
706 qed_wr(p_hwfn
, p_ptt
,
707 IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_id
,
710 /* Configure igu sb in CAU which were marked valid */
711 qed_init_cau_sb_entry(p_hwfn
, &sb_entry
,
714 qed_dmae_host2grc(p_hwfn
, p_ptt
,
715 (u64
)(uintptr_t)&sb_entry
,
716 CAU_REG_SB_VAR_MEMORY
+
717 igu_id
* sizeof(u64
), 2, 0);
723 vf
->num_sbs
= (u8
) num_rx_queues
;
728 static void qed_iov_free_vf_igu_sbs(struct qed_hwfn
*p_hwfn
,
729 struct qed_ptt
*p_ptt
,
730 struct qed_vf_info
*vf
)
732 struct qed_igu_info
*p_info
= p_hwfn
->hw_info
.p_igu_info
;
736 /* Invalidate igu CAM lines and mark them as free */
737 for (idx
= 0; idx
< vf
->num_sbs
; idx
++) {
738 igu_id
= vf
->igu_sbs
[idx
];
739 addr
= IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_id
;
741 val
= qed_rd(p_hwfn
, p_ptt
, addr
);
742 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 0);
743 qed_wr(p_hwfn
, p_ptt
, addr
, val
);
745 p_info
->igu_map
.igu_blocks
[igu_id
].status
|=
748 p_hwfn
->hw_info
.p_igu_info
->free_blks
++;
754 static int qed_iov_init_hw_for_vf(struct qed_hwfn
*p_hwfn
,
755 struct qed_ptt
*p_ptt
,
756 u16 rel_vf_id
, u16 num_rx_queues
)
758 u8 num_of_vf_avaiable_chains
= 0;
759 struct qed_vf_info
*vf
= NULL
;
764 vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
766 DP_ERR(p_hwfn
, "qed_iov_init_hw_for_vf : vf is NULL\n");
771 DP_NOTICE(p_hwfn
, "VF[%d] is already active.\n", rel_vf_id
);
775 /* Limit number of queues according to number of CIDs */
776 qed_cxt_get_proto_cid_count(p_hwfn
, PROTOCOLID_ETH
, &cids
);
779 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
780 vf
->relative_vf_id
, num_rx_queues
, (u16
) cids
);
781 num_rx_queues
= min_t(u16
, num_rx_queues
, ((u16
) cids
));
783 num_of_vf_avaiable_chains
= qed_iov_alloc_vf_igu_sbs(p_hwfn
,
787 if (!num_of_vf_avaiable_chains
) {
788 DP_ERR(p_hwfn
, "no available igu sbs\n");
792 /* Choose queue number and index ranges */
793 vf
->num_rxqs
= num_of_vf_avaiable_chains
;
794 vf
->num_txqs
= num_of_vf_avaiable_chains
;
796 for (i
= 0; i
< vf
->num_rxqs
; i
++) {
797 u16 queue_id
= qed_int_queue_id_from_sb_id(p_hwfn
,
800 if (queue_id
> RESC_NUM(p_hwfn
, QED_L2_QUEUE
)) {
802 "VF[%d] will require utilizing of out-of-bounds queues - %04x\n",
803 vf
->relative_vf_id
, queue_id
);
807 /* CIDs are per-VF, so no problem having them 0-based. */
808 vf
->vf_queues
[i
].fw_rx_qid
= queue_id
;
809 vf
->vf_queues
[i
].fw_tx_qid
= queue_id
;
810 vf
->vf_queues
[i
].fw_cid
= i
;
812 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
813 "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
814 vf
->relative_vf_id
, i
, vf
->igu_sbs
[i
], queue_id
, i
);
816 rc
= qed_iov_enable_vf_access(p_hwfn
, p_ptt
, vf
);
820 if (IS_LEAD_HWFN(p_hwfn
))
821 p_hwfn
->cdev
->p_iov_info
->num_vfs
++;
827 static void qed_iov_set_link(struct qed_hwfn
*p_hwfn
,
829 struct qed_mcp_link_params
*params
,
830 struct qed_mcp_link_state
*link
,
831 struct qed_mcp_link_capabilities
*p_caps
)
833 struct qed_vf_info
*p_vf
= qed_iov_get_vf_info(p_hwfn
,
836 struct qed_bulletin_content
*p_bulletin
;
841 p_bulletin
= p_vf
->bulletin
.p_virt
;
842 p_bulletin
->req_autoneg
= params
->speed
.autoneg
;
843 p_bulletin
->req_adv_speed
= params
->speed
.advertised_speeds
;
844 p_bulletin
->req_forced_speed
= params
->speed
.forced_speed
;
845 p_bulletin
->req_autoneg_pause
= params
->pause
.autoneg
;
846 p_bulletin
->req_forced_rx
= params
->pause
.forced_rx
;
847 p_bulletin
->req_forced_tx
= params
->pause
.forced_tx
;
848 p_bulletin
->req_loopback
= params
->loopback_mode
;
850 p_bulletin
->link_up
= link
->link_up
;
851 p_bulletin
->speed
= link
->speed
;
852 p_bulletin
->full_duplex
= link
->full_duplex
;
853 p_bulletin
->autoneg
= link
->an
;
854 p_bulletin
->autoneg_complete
= link
->an_complete
;
855 p_bulletin
->parallel_detection
= link
->parallel_detection
;
856 p_bulletin
->pfc_enabled
= link
->pfc_enabled
;
857 p_bulletin
->partner_adv_speed
= link
->partner_adv_speed
;
858 p_bulletin
->partner_tx_flow_ctrl_en
= link
->partner_tx_flow_ctrl_en
;
859 p_bulletin
->partner_rx_flow_ctrl_en
= link
->partner_rx_flow_ctrl_en
;
860 p_bulletin
->partner_adv_pause
= link
->partner_adv_pause
;
861 p_bulletin
->sfp_tx_fault
= link
->sfp_tx_fault
;
863 p_bulletin
->capability_speed
= p_caps
->speed_capabilities
;
866 static int qed_iov_release_hw_for_vf(struct qed_hwfn
*p_hwfn
,
867 struct qed_ptt
*p_ptt
, u16 rel_vf_id
)
869 struct qed_mcp_link_capabilities caps
;
870 struct qed_mcp_link_params params
;
871 struct qed_mcp_link_state link
;
872 struct qed_vf_info
*vf
= NULL
;
874 vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
876 DP_ERR(p_hwfn
, "qed_iov_release_hw_for_vf : vf is NULL\n");
880 if (vf
->bulletin
.p_virt
)
881 memset(vf
->bulletin
.p_virt
, 0, sizeof(*vf
->bulletin
.p_virt
));
883 memset(&vf
->p_vf_info
, 0, sizeof(vf
->p_vf_info
));
885 /* Get the link configuration back in bulletin so
886 * that when VFs are re-enabled they get the actual
887 * link configuration.
889 memcpy(¶ms
, qed_mcp_get_link_params(p_hwfn
), sizeof(params
));
890 memcpy(&link
, qed_mcp_get_link_state(p_hwfn
), sizeof(link
));
891 memcpy(&caps
, qed_mcp_get_link_capabilities(p_hwfn
), sizeof(caps
));
892 qed_iov_set_link(p_hwfn
, rel_vf_id
, ¶ms
, &link
, &caps
);
894 /* Forget the VF's acquisition message */
895 memset(&vf
->acquire
, 0, sizeof(vf
->acquire
));
897 /* disablng interrupts and resetting permission table was done during
898 * vf-close, however, we could get here without going through vf_close
900 /* Disable Interrupts for VF */
901 qed_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 0);
903 /* Reset Permission table */
904 qed_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, 0);
908 qed_iov_free_vf_igu_sbs(p_hwfn
, p_ptt
, vf
);
913 if (IS_LEAD_HWFN(p_hwfn
))
914 p_hwfn
->cdev
->p_iov_info
->num_vfs
--;
920 static bool qed_iov_tlv_supported(u16 tlvtype
)
922 return CHANNEL_TLV_NONE
< tlvtype
&& tlvtype
< CHANNEL_TLV_MAX
;
925 /* place a given tlv on the tlv buffer, continuing current tlv list */
926 void *qed_add_tlv(struct qed_hwfn
*p_hwfn
, u8
**offset
, u16 type
, u16 length
)
928 struct channel_tlv
*tl
= (struct channel_tlv
*)*offset
;
933 /* Offset should keep pointing to next TLV (the end of the last) */
936 /* Return a pointer to the start of the added tlv */
937 return *offset
- length
;
940 /* list the types and lengths of the tlvs on the buffer */
941 void qed_dp_tlv_list(struct qed_hwfn
*p_hwfn
, void *tlvs_list
)
943 u16 i
= 1, total_length
= 0;
944 struct channel_tlv
*tlv
;
947 tlv
= (struct channel_tlv
*)((u8
*)tlvs_list
+ total_length
);
950 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
951 "TLV number %d: type %d, length %d\n",
952 i
, tlv
->type
, tlv
->length
);
954 if (tlv
->type
== CHANNEL_TLV_LIST_END
)
957 /* Validate entry - protect against malicious VFs */
959 DP_NOTICE(p_hwfn
, "TLV of length 0 found\n");
963 total_length
+= tlv
->length
;
965 if (total_length
>= sizeof(struct tlv_buffer_size
)) {
966 DP_NOTICE(p_hwfn
, "TLV ==> Buffer overflow\n");
974 static void qed_iov_send_response(struct qed_hwfn
*p_hwfn
,
975 struct qed_ptt
*p_ptt
,
976 struct qed_vf_info
*p_vf
,
977 u16 length
, u8 status
)
979 struct qed_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
980 struct qed_dmae_params params
;
983 mbx
->reply_virt
->default_resp
.hdr
.status
= status
;
985 qed_dp_tlv_list(p_hwfn
, mbx
->reply_virt
);
987 eng_vf_id
= p_vf
->abs_vf_id
;
989 memset(¶ms
, 0, sizeof(struct qed_dmae_params
));
990 params
.flags
= QED_DMAE_FLAG_VF_DST
;
991 params
.dst_vfid
= eng_vf_id
;
993 qed_dmae_host2host(p_hwfn
, p_ptt
, mbx
->reply_phys
+ sizeof(u64
),
994 mbx
->req_virt
->first_tlv
.reply_address
+
996 (sizeof(union pfvf_tlvs
) - sizeof(u64
)) / 4,
999 qed_dmae_host2host(p_hwfn
, p_ptt
, mbx
->reply_phys
,
1000 mbx
->req_virt
->first_tlv
.reply_address
,
1001 sizeof(u64
) / 4, ¶ms
);
1004 GTT_BAR0_MAP_REG_USDM_RAM
+
1005 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id
), 1);
1008 static u16
qed_iov_vport_to_tlv(struct qed_hwfn
*p_hwfn
,
1009 enum qed_iov_vport_update_flag flag
)
1012 case QED_IOV_VP_UPDATE_ACTIVATE
:
1013 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
1014 case QED_IOV_VP_UPDATE_VLAN_STRIP
:
1015 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
;
1016 case QED_IOV_VP_UPDATE_TX_SWITCH
:
1017 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
1018 case QED_IOV_VP_UPDATE_MCAST
:
1019 return CHANNEL_TLV_VPORT_UPDATE_MCAST
;
1020 case QED_IOV_VP_UPDATE_ACCEPT_PARAM
:
1021 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
1022 case QED_IOV_VP_UPDATE_RSS
:
1023 return CHANNEL_TLV_VPORT_UPDATE_RSS
;
1024 case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN
:
1025 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
1026 case QED_IOV_VP_UPDATE_SGE_TPA
:
1027 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
;
1033 static u16
qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn
*p_hwfn
,
1034 struct qed_vf_info
*p_vf
,
1035 struct qed_iov_vf_mbx
*p_mbx
,
1037 u16 tlvs_mask
, u16 tlvs_accepted
)
1039 struct pfvf_def_resp_tlv
*resp
;
1040 u16 size
, total_len
, i
;
1042 memset(p_mbx
->reply_virt
, 0, sizeof(union pfvf_tlvs
));
1043 p_mbx
->offset
= (u8
*)p_mbx
->reply_virt
;
1044 size
= sizeof(struct pfvf_def_resp_tlv
);
1047 qed_add_tlv(p_hwfn
, &p_mbx
->offset
, CHANNEL_TLV_VPORT_UPDATE
, size
);
1049 /* Prepare response for all extended tlvs if they are found by PF */
1050 for (i
= 0; i
< QED_IOV_VP_UPDATE_MAX
; i
++) {
1051 if (!(tlvs_mask
& (1 << i
)))
1054 resp
= qed_add_tlv(p_hwfn
, &p_mbx
->offset
,
1055 qed_iov_vport_to_tlv(p_hwfn
, i
), size
);
1057 if (tlvs_accepted
& (1 << i
))
1058 resp
->hdr
.status
= status
;
1060 resp
->hdr
.status
= PFVF_STATUS_NOT_SUPPORTED
;
1064 "VF[%d] - vport_update response: TLV %d, status %02x\n",
1065 p_vf
->relative_vf_id
,
1066 qed_iov_vport_to_tlv(p_hwfn
, i
), resp
->hdr
.status
);
1071 qed_add_tlv(p_hwfn
, &p_mbx
->offset
, CHANNEL_TLV_LIST_END
,
1072 sizeof(struct channel_list_end_tlv
));
1077 static void qed_iov_prepare_resp(struct qed_hwfn
*p_hwfn
,
1078 struct qed_ptt
*p_ptt
,
1079 struct qed_vf_info
*vf_info
,
1080 u16 type
, u16 length
, u8 status
)
1082 struct qed_iov_vf_mbx
*mbx
= &vf_info
->vf_mbx
;
1084 mbx
->offset
= (u8
*)mbx
->reply_virt
;
1086 qed_add_tlv(p_hwfn
, &mbx
->offset
, type
, length
);
1087 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
1088 sizeof(struct channel_list_end_tlv
));
1090 qed_iov_send_response(p_hwfn
, p_ptt
, vf_info
, length
, status
);
1093 struct qed_public_vf_info
*qed_iov_get_public_vf_info(struct qed_hwfn
*p_hwfn
,
1095 bool b_enabled_only
)
1097 struct qed_vf_info
*vf
= NULL
;
1099 vf
= qed_iov_get_vf_info(p_hwfn
, relative_vf_id
, b_enabled_only
);
1103 return &vf
->p_vf_info
;
1106 void qed_iov_clean_vf(struct qed_hwfn
*p_hwfn
, u8 vfid
)
1108 struct qed_public_vf_info
*vf_info
;
1110 vf_info
= qed_iov_get_public_vf_info(p_hwfn
, vfid
, false);
1115 /* Clear the VF mac */
1116 memset(vf_info
->mac
, 0, ETH_ALEN
);
1119 static void qed_iov_vf_cleanup(struct qed_hwfn
*p_hwfn
,
1120 struct qed_vf_info
*p_vf
)
1124 p_vf
->vf_bulletin
= 0;
1125 p_vf
->vport_instance
= 0;
1126 p_vf
->num_mac_filters
= 0;
1127 p_vf
->num_vlan_filters
= 0;
1128 p_vf
->configured_features
= 0;
1130 /* If VF previously requested less resources, go back to default */
1131 p_vf
->num_rxqs
= p_vf
->num_sbs
;
1132 p_vf
->num_txqs
= p_vf
->num_sbs
;
1134 p_vf
->num_active_rxqs
= 0;
1136 for (i
= 0; i
< QED_MAX_VF_CHAINS_PER_PF
; i
++)
1137 p_vf
->vf_queues
[i
].rxq_active
= 0;
1139 memset(&p_vf
->shadow_config
, 0, sizeof(p_vf
->shadow_config
));
1140 memset(&p_vf
->acquire
, 0, sizeof(p_vf
->acquire
));
1141 qed_iov_clean_vf(p_hwfn
, p_vf
->relative_vf_id
);
1144 static void qed_iov_vf_mbx_acquire(struct qed_hwfn
*p_hwfn
,
1145 struct qed_ptt
*p_ptt
,
1146 struct qed_vf_info
*vf
)
1148 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1149 struct pfvf_acquire_resp_tlv
*resp
= &mbx
->reply_virt
->acquire_resp
;
1150 struct pf_vf_pfdev_info
*pfdev_info
= &resp
->pfdev_info
;
1151 struct vfpf_acquire_tlv
*req
= &mbx
->req_virt
->acquire
;
1152 u8 i
, vfpf_status
= PFVF_STATUS_NOT_SUPPORTED
;
1153 struct pf_vf_resc
*resc
= &resp
->resc
;
1156 memset(resp
, 0, sizeof(*resp
));
1158 /* Validate FW compatibility */
1159 if (req
->vfdev_info
.eth_fp_hsi_major
!= ETH_HSI_VER_MAJOR
) {
1161 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
1163 req
->vfdev_info
.eth_fp_hsi_major
,
1164 req
->vfdev_info
.eth_fp_hsi_minor
,
1165 ETH_HSI_VER_MAJOR
, ETH_HSI_VER_MINOR
);
1167 /* Write the PF version so that VF would know which version
1170 pfdev_info
->major_fp_hsi
= ETH_HSI_VER_MAJOR
;
1171 pfdev_info
->minor_fp_hsi
= ETH_HSI_VER_MINOR
;
1176 /* On 100g PFs, prevent old VFs from loading */
1177 if ((p_hwfn
->cdev
->num_hwfns
> 1) &&
1178 !(req
->vfdev_info
.capabilities
& VFPF_ACQUIRE_CAP_100G
)) {
1180 "VF[%d] is running an old driver that doesn't support 100g\n",
1185 /* Store the acquire message */
1186 memcpy(&vf
->acquire
, req
, sizeof(vf
->acquire
));
1188 /* Fill in vf info stuff */
1189 vf
->opaque_fid
= req
->vfdev_info
.opaque_fid
;
1190 vf
->num_mac_filters
= 1;
1191 vf
->num_vlan_filters
= QED_ETH_VF_NUM_VLAN_FILTERS
;
1193 vf
->vf_bulletin
= req
->bulletin_addr
;
1194 vf
->bulletin
.size
= (vf
->bulletin
.size
< req
->bulletin_size
) ?
1195 vf
->bulletin
.size
: req
->bulletin_size
;
1197 /* fill in pfdev info */
1198 pfdev_info
->chip_num
= p_hwfn
->cdev
->chip_num
;
1199 pfdev_info
->db_size
= 0;
1200 pfdev_info
->indices_per_sb
= PIS_PER_SB
;
1202 pfdev_info
->capabilities
= PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED
|
1203 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE
;
1204 if (p_hwfn
->cdev
->num_hwfns
> 1)
1205 pfdev_info
->capabilities
|= PFVF_ACQUIRE_CAP_100G
;
1207 pfdev_info
->stats_info
.mstats
.address
=
1208 PXP_VF_BAR0_START_MSDM_ZONE_B
+
1209 offsetof(struct mstorm_vf_zone
, non_trigger
.eth_queue_stat
);
1210 pfdev_info
->stats_info
.mstats
.len
=
1211 sizeof(struct eth_mstorm_per_queue_stat
);
1213 pfdev_info
->stats_info
.ustats
.address
=
1214 PXP_VF_BAR0_START_USDM_ZONE_B
+
1215 offsetof(struct ustorm_vf_zone
, non_trigger
.eth_queue_stat
);
1216 pfdev_info
->stats_info
.ustats
.len
=
1217 sizeof(struct eth_ustorm_per_queue_stat
);
1219 pfdev_info
->stats_info
.pstats
.address
=
1220 PXP_VF_BAR0_START_PSDM_ZONE_B
+
1221 offsetof(struct pstorm_vf_zone
, non_trigger
.eth_queue_stat
);
1222 pfdev_info
->stats_info
.pstats
.len
=
1223 sizeof(struct eth_pstorm_per_queue_stat
);
1225 pfdev_info
->stats_info
.tstats
.address
= 0;
1226 pfdev_info
->stats_info
.tstats
.len
= 0;
1228 memcpy(pfdev_info
->port_mac
, p_hwfn
->hw_info
.hw_mac_addr
, ETH_ALEN
);
1230 pfdev_info
->fw_major
= FW_MAJOR_VERSION
;
1231 pfdev_info
->fw_minor
= FW_MINOR_VERSION
;
1232 pfdev_info
->fw_rev
= FW_REVISION_VERSION
;
1233 pfdev_info
->fw_eng
= FW_ENGINEERING_VERSION
;
1234 pfdev_info
->minor_fp_hsi
= min_t(u8
,
1236 req
->vfdev_info
.eth_fp_hsi_minor
);
1237 pfdev_info
->os_type
= VFPF_ACQUIRE_OS_LINUX
;
1238 qed_mcp_get_mfw_ver(p_hwfn
, p_ptt
, &pfdev_info
->mfw_ver
, NULL
);
1240 pfdev_info
->dev_type
= p_hwfn
->cdev
->type
;
1241 pfdev_info
->chip_rev
= p_hwfn
->cdev
->chip_rev
;
1243 resc
->num_rxqs
= vf
->num_rxqs
;
1244 resc
->num_txqs
= vf
->num_txqs
;
1245 resc
->num_sbs
= vf
->num_sbs
;
1246 for (i
= 0; i
< resc
->num_sbs
; i
++) {
1247 resc
->hw_sbs
[i
].hw_sb_id
= vf
->igu_sbs
[i
];
1248 resc
->hw_sbs
[i
].sb_qid
= 0;
1251 for (i
= 0; i
< resc
->num_rxqs
; i
++) {
1252 qed_fw_l2_queue(p_hwfn
, vf
->vf_queues
[i
].fw_rx_qid
,
1253 (u16
*)&resc
->hw_qid
[i
]);
1254 resc
->cid
[i
] = vf
->vf_queues
[i
].fw_cid
;
1257 resc
->num_mac_filters
= min_t(u8
, vf
->num_mac_filters
,
1258 req
->resc_request
.num_mac_filters
);
1259 resc
->num_vlan_filters
= min_t(u8
, vf
->num_vlan_filters
,
1260 req
->resc_request
.num_vlan_filters
);
1262 /* This isn't really required as VF isn't limited, but some VFs might
1263 * actually test this value, so need to provide it.
1265 resc
->num_mc_filters
= req
->resc_request
.num_mc_filters
;
1267 /* Start the VF in FW */
1268 rc
= qed_sp_vf_start(p_hwfn
, vf
);
1270 DP_NOTICE(p_hwfn
, "Failed to start VF[%02x]\n", vf
->abs_vf_id
);
1271 vfpf_status
= PFVF_STATUS_FAILURE
;
1275 /* Fill agreed size of bulletin board in response */
1276 resp
->bulletin_size
= vf
->bulletin
.size
;
1277 qed_iov_post_vf_bulletin(p_hwfn
, vf
->relative_vf_id
, p_ptt
);
1281 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1282 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1284 resp
->pfdev_info
.chip_num
,
1285 resp
->pfdev_info
.db_size
,
1286 resp
->pfdev_info
.indices_per_sb
,
1287 resp
->pfdev_info
.capabilities
,
1291 resc
->num_mac_filters
,
1292 resc
->num_vlan_filters
);
1293 vf
->state
= VF_ACQUIRED
;
1295 /* Prepare Response */
1297 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_ACQUIRE
,
1298 sizeof(struct pfvf_acquire_resp_tlv
), vfpf_status
);
1301 static int __qed_iov_spoofchk_set(struct qed_hwfn
*p_hwfn
,
1302 struct qed_vf_info
*p_vf
, bool val
)
1304 struct qed_sp_vport_update_params params
;
1307 if (val
== p_vf
->spoof_chk
) {
1308 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1309 "Spoofchk value[%d] is already configured\n", val
);
1313 memset(¶ms
, 0, sizeof(struct qed_sp_vport_update_params
));
1314 params
.opaque_fid
= p_vf
->opaque_fid
;
1315 params
.vport_id
= p_vf
->vport_id
;
1316 params
.update_anti_spoofing_en_flg
= 1;
1317 params
.anti_spoofing_en
= val
;
1319 rc
= qed_sp_vport_update(p_hwfn
, ¶ms
, QED_SPQ_MODE_EBLOCK
, NULL
);
1321 p_vf
->spoof_chk
= val
;
1322 p_vf
->req_spoofchk_val
= p_vf
->spoof_chk
;
1323 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1324 "Spoofchk val[%d] configured\n", val
);
1326 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1327 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1328 val
, p_vf
->relative_vf_id
);
1334 static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn
*p_hwfn
,
1335 struct qed_vf_info
*p_vf
)
1337 struct qed_filter_ucast filter
;
1341 memset(&filter
, 0, sizeof(filter
));
1342 filter
.is_rx_filter
= 1;
1343 filter
.is_tx_filter
= 1;
1344 filter
.vport_to_add_to
= p_vf
->vport_id
;
1345 filter
.opcode
= QED_FILTER_ADD
;
1347 /* Reconfigure vlans */
1348 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++) {
1349 if (!p_vf
->shadow_config
.vlans
[i
].used
)
1352 filter
.type
= QED_FILTER_VLAN
;
1353 filter
.vlan
= p_vf
->shadow_config
.vlans
[i
].vid
;
1356 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1357 filter
.vlan
, p_vf
->relative_vf_id
);
1358 rc
= qed_sp_eth_filter_ucast(p_hwfn
,
1361 QED_SPQ_MODE_CB
, NULL
);
1364 "Failed to configure VLAN [%04x] to VF [%04x]\n",
1365 filter
.vlan
, p_vf
->relative_vf_id
);
1374 qed_iov_reconfigure_unicast_shadow(struct qed_hwfn
*p_hwfn
,
1375 struct qed_vf_info
*p_vf
, u64 events
)
1379 if ((events
& (1 << VLAN_ADDR_FORCED
)) &&
1380 !(p_vf
->configured_features
& (1 << VLAN_ADDR_FORCED
)))
1381 rc
= qed_iov_reconfigure_unicast_vlan(p_hwfn
, p_vf
);
1386 static int qed_iov_configure_vport_forced(struct qed_hwfn
*p_hwfn
,
1387 struct qed_vf_info
*p_vf
, u64 events
)
1390 struct qed_filter_ucast filter
;
1392 if (!p_vf
->vport_instance
)
1395 if (events
& (1 << MAC_ADDR_FORCED
)) {
1396 /* Since there's no way [currently] of removing the MAC,
1397 * we can always assume this means we need to force it.
1399 memset(&filter
, 0, sizeof(filter
));
1400 filter
.type
= QED_FILTER_MAC
;
1401 filter
.opcode
= QED_FILTER_REPLACE
;
1402 filter
.is_rx_filter
= 1;
1403 filter
.is_tx_filter
= 1;
1404 filter
.vport_to_add_to
= p_vf
->vport_id
;
1405 ether_addr_copy(filter
.mac
, p_vf
->bulletin
.p_virt
->mac
);
1407 rc
= qed_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1408 &filter
, QED_SPQ_MODE_CB
, NULL
);
1411 "PF failed to configure MAC for VF\n");
1415 p_vf
->configured_features
|= 1 << MAC_ADDR_FORCED
;
1418 if (events
& (1 << VLAN_ADDR_FORCED
)) {
1419 struct qed_sp_vport_update_params vport_update
;
1423 memset(&filter
, 0, sizeof(filter
));
1424 filter
.type
= QED_FILTER_VLAN
;
1425 filter
.is_rx_filter
= 1;
1426 filter
.is_tx_filter
= 1;
1427 filter
.vport_to_add_to
= p_vf
->vport_id
;
1428 filter
.vlan
= p_vf
->bulletin
.p_virt
->pvid
;
1429 filter
.opcode
= filter
.vlan
? QED_FILTER_REPLACE
:
1432 /* Send the ramrod */
1433 rc
= qed_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1434 &filter
, QED_SPQ_MODE_CB
, NULL
);
1437 "PF failed to configure VLAN for VF\n");
1441 /* Update the default-vlan & silent vlan stripping */
1442 memset(&vport_update
, 0, sizeof(vport_update
));
1443 vport_update
.opaque_fid
= p_vf
->opaque_fid
;
1444 vport_update
.vport_id
= p_vf
->vport_id
;
1445 vport_update
.update_default_vlan_enable_flg
= 1;
1446 vport_update
.default_vlan_enable_flg
= filter
.vlan
? 1 : 0;
1447 vport_update
.update_default_vlan_flg
= 1;
1448 vport_update
.default_vlan
= filter
.vlan
;
1450 vport_update
.update_inner_vlan_removal_flg
= 1;
1451 removal
= filter
.vlan
? 1
1452 : p_vf
->shadow_config
.inner_vlan_removal
;
1453 vport_update
.inner_vlan_removal_flg
= removal
;
1454 vport_update
.silent_vlan_removal_flg
= filter
.vlan
? 1 : 0;
1455 rc
= qed_sp_vport_update(p_hwfn
,
1457 QED_SPQ_MODE_EBLOCK
, NULL
);
1460 "PF failed to configure VF vport for vlan\n");
1464 /* Update all the Rx queues */
1465 for (i
= 0; i
< QED_MAX_VF_CHAINS_PER_PF
; i
++) {
1468 if (!p_vf
->vf_queues
[i
].rxq_active
)
1471 qid
= p_vf
->vf_queues
[i
].fw_rx_qid
;
1473 rc
= qed_sp_eth_rx_queues_update(p_hwfn
, qid
,
1475 QED_SPQ_MODE_EBLOCK
,
1479 "Failed to send Rx update fo queue[0x%04x]\n",
1486 p_vf
->configured_features
|= 1 << VLAN_ADDR_FORCED
;
1488 p_vf
->configured_features
&= ~(1 << VLAN_ADDR_FORCED
);
1491 /* If forced features are terminated, we need to configure the shadow
1492 * configuration back again.
1495 qed_iov_reconfigure_unicast_shadow(p_hwfn
, p_vf
, events
);
1500 static void qed_iov_vf_mbx_start_vport(struct qed_hwfn
*p_hwfn
,
1501 struct qed_ptt
*p_ptt
,
1502 struct qed_vf_info
*vf
)
1504 struct qed_sp_vport_start_params params
= { 0 };
1505 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1506 struct vfpf_vport_start_tlv
*start
;
1507 u8 status
= PFVF_STATUS_SUCCESS
;
1508 struct qed_vf_info
*vf_info
;
1513 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vf
->relative_vf_id
, true);
1515 DP_NOTICE(p_hwfn
->cdev
,
1516 "Failed to get VF info, invalid vfid [%d]\n",
1517 vf
->relative_vf_id
);
1521 vf
->state
= VF_ENABLED
;
1522 start
= &mbx
->req_virt
->start_vport
;
1524 /* Initialize Status block in CAU */
1525 for (sb_id
= 0; sb_id
< vf
->num_sbs
; sb_id
++) {
1526 if (!start
->sb_addr
[sb_id
]) {
1527 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1528 "VF[%d] did not fill the address of SB %d\n",
1529 vf
->relative_vf_id
, sb_id
);
1533 qed_int_cau_conf_sb(p_hwfn
, p_ptt
,
1534 start
->sb_addr
[sb_id
],
1538 qed_iov_enable_vf_traffic(p_hwfn
, p_ptt
, vf
);
1540 vf
->mtu
= start
->mtu
;
1541 vf
->shadow_config
.inner_vlan_removal
= start
->inner_vlan_removal
;
1543 /* Take into consideration configuration forced by hypervisor;
1544 * If none is configured, use the supplied VF values [for old
1545 * vfs that would still be fine, since they passed '0' as padding].
1547 p_bitmap
= &vf_info
->bulletin
.p_virt
->valid_bitmap
;
1548 if (!(*p_bitmap
& (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED
))) {
1549 u8 vf_req
= start
->only_untagged
;
1551 vf_info
->bulletin
.p_virt
->default_only_untagged
= vf_req
;
1552 *p_bitmap
|= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT
;
1555 params
.tpa_mode
= start
->tpa_mode
;
1556 params
.remove_inner_vlan
= start
->inner_vlan_removal
;
1557 params
.tx_switching
= true;
1559 params
.only_untagged
= vf_info
->bulletin
.p_virt
->default_only_untagged
;
1560 params
.drop_ttl0
= false;
1561 params
.concrete_fid
= vf
->concrete_fid
;
1562 params
.opaque_fid
= vf
->opaque_fid
;
1563 params
.vport_id
= vf
->vport_id
;
1564 params
.max_buffers_per_cqe
= start
->max_buffers_per_cqe
;
1565 params
.mtu
= vf
->mtu
;
1567 rc
= qed_sp_eth_vport_start(p_hwfn
, ¶ms
);
1570 "qed_iov_vf_mbx_start_vport returned error %d\n", rc
);
1571 status
= PFVF_STATUS_FAILURE
;
1573 vf
->vport_instance
++;
1575 /* Force configuration if needed on the newly opened vport */
1576 qed_iov_configure_vport_forced(p_hwfn
, vf
, *p_bitmap
);
1578 __qed_iov_spoofchk_set(p_hwfn
, vf
, vf
->req_spoofchk_val
);
1580 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_VPORT_START
,
1581 sizeof(struct pfvf_def_resp_tlv
), status
);
1584 static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn
*p_hwfn
,
1585 struct qed_ptt
*p_ptt
,
1586 struct qed_vf_info
*vf
)
1588 u8 status
= PFVF_STATUS_SUCCESS
;
1591 vf
->vport_instance
--;
1592 vf
->spoof_chk
= false;
1594 rc
= qed_sp_vport_stop(p_hwfn
, vf
->opaque_fid
, vf
->vport_id
);
1596 DP_ERR(p_hwfn
, "qed_iov_vf_mbx_stop_vport returned error %d\n",
1598 status
= PFVF_STATUS_FAILURE
;
1601 /* Forget the configuration on the vport */
1602 vf
->configured_features
= 0;
1603 memset(&vf
->shadow_config
, 0, sizeof(vf
->shadow_config
));
1605 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_VPORT_TEARDOWN
,
1606 sizeof(struct pfvf_def_resp_tlv
), status
);
1609 static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn
*p_hwfn
,
1610 struct qed_ptt
*p_ptt
,
1611 struct qed_vf_info
*vf
, u8 status
)
1613 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1614 struct pfvf_start_queue_resp_tlv
*p_tlv
;
1615 struct vfpf_start_rxq_tlv
*req
;
1617 mbx
->offset
= (u8
*)mbx
->reply_virt
;
1619 p_tlv
= qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_START_RXQ
,
1621 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
1622 sizeof(struct channel_list_end_tlv
));
1624 /* Update the TLV with the response */
1625 if (status
== PFVF_STATUS_SUCCESS
) {
1626 req
= &mbx
->req_virt
->start_rxq
;
1627 p_tlv
->offset
= PXP_VF_BAR0_START_MSDM_ZONE_B
+
1628 offsetof(struct mstorm_vf_zone
,
1629 non_trigger
.eth_rx_queue_producers
) +
1630 sizeof(struct eth_rx_prod_data
) * req
->rx_qid
;
1633 qed_iov_send_response(p_hwfn
, p_ptt
, vf
, sizeof(*p_tlv
), status
);
1636 static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn
*p_hwfn
,
1637 struct qed_ptt
*p_ptt
,
1638 struct qed_vf_info
*vf
)
1640 struct qed_queue_start_common_params params
;
1641 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1642 u8 status
= PFVF_STATUS_SUCCESS
;
1643 struct vfpf_start_rxq_tlv
*req
;
1646 memset(¶ms
, 0, sizeof(params
));
1647 req
= &mbx
->req_virt
->start_rxq
;
1648 params
.queue_id
= vf
->vf_queues
[req
->rx_qid
].fw_rx_qid
;
1649 params
.vf_qid
= req
->rx_qid
;
1650 params
.vport_id
= vf
->vport_id
;
1651 params
.sb
= req
->hw_sb
;
1652 params
.sb_idx
= req
->sb_index
;
1654 rc
= qed_sp_eth_rxq_start_ramrod(p_hwfn
, vf
->opaque_fid
,
1655 vf
->vf_queues
[req
->rx_qid
].fw_cid
,
1657 vf
->abs_vf_id
+ 0x10,
1660 req
->cqe_pbl_addr
, req
->cqe_pbl_size
);
1663 status
= PFVF_STATUS_FAILURE
;
1665 vf
->vf_queues
[req
->rx_qid
].rxq_active
= true;
1666 vf
->num_active_rxqs
++;
1669 qed_iov_vf_mbx_start_rxq_resp(p_hwfn
, p_ptt
, vf
, status
);
1672 static void qed_iov_vf_mbx_start_txq(struct qed_hwfn
*p_hwfn
,
1673 struct qed_ptt
*p_ptt
,
1674 struct qed_vf_info
*vf
)
1676 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
1677 struct qed_queue_start_common_params params
;
1678 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1679 union qed_qm_pq_params pq_params
;
1680 u8 status
= PFVF_STATUS_SUCCESS
;
1681 struct vfpf_start_txq_tlv
*req
;
1684 /* Prepare the parameters which would choose the right PQ */
1685 memset(&pq_params
, 0, sizeof(pq_params
));
1686 pq_params
.eth
.is_vf
= 1;
1687 pq_params
.eth
.vf_id
= vf
->relative_vf_id
;
1689 memset(¶ms
, 0, sizeof(params
));
1690 req
= &mbx
->req_virt
->start_txq
;
1691 params
.queue_id
= vf
->vf_queues
[req
->tx_qid
].fw_tx_qid
;
1692 params
.vport_id
= vf
->vport_id
;
1693 params
.sb
= req
->hw_sb
;
1694 params
.sb_idx
= req
->sb_index
;
1696 rc
= qed_sp_eth_txq_start_ramrod(p_hwfn
,
1698 vf
->vf_queues
[req
->tx_qid
].fw_cid
,
1700 vf
->abs_vf_id
+ 0x10,
1702 req
->pbl_size
, &pq_params
);
1705 status
= PFVF_STATUS_FAILURE
;
1707 vf
->vf_queues
[req
->tx_qid
].txq_active
= true;
1709 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_START_TXQ
,
1713 static int qed_iov_vf_stop_rxqs(struct qed_hwfn
*p_hwfn
,
1714 struct qed_vf_info
*vf
,
1715 u16 rxq_id
, u8 num_rxqs
, bool cqe_completion
)
1720 if (rxq_id
+ num_rxqs
> ARRAY_SIZE(vf
->vf_queues
))
1723 for (qid
= rxq_id
; qid
< rxq_id
+ num_rxqs
; qid
++) {
1724 if (vf
->vf_queues
[qid
].rxq_active
) {
1725 rc
= qed_sp_eth_rx_queue_stop(p_hwfn
,
1733 vf
->vf_queues
[qid
].rxq_active
= false;
1734 vf
->num_active_rxqs
--;
1740 static int qed_iov_vf_stop_txqs(struct qed_hwfn
*p_hwfn
,
1741 struct qed_vf_info
*vf
, u16 txq_id
, u8 num_txqs
)
1746 if (txq_id
+ num_txqs
> ARRAY_SIZE(vf
->vf_queues
))
1749 for (qid
= txq_id
; qid
< txq_id
+ num_txqs
; qid
++) {
1750 if (vf
->vf_queues
[qid
].txq_active
) {
1751 rc
= qed_sp_eth_tx_queue_stop(p_hwfn
,
1758 vf
->vf_queues
[qid
].txq_active
= false;
1763 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn
*p_hwfn
,
1764 struct qed_ptt
*p_ptt
,
1765 struct qed_vf_info
*vf
)
1767 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
1768 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1769 u8 status
= PFVF_STATUS_SUCCESS
;
1770 struct vfpf_stop_rxqs_tlv
*req
;
1773 /* We give the option of starting from qid != 0, in this case we
1774 * need to make sure that qid + num_qs doesn't exceed the actual
1775 * amount of queues that exist.
1777 req
= &mbx
->req_virt
->stop_rxqs
;
1778 rc
= qed_iov_vf_stop_rxqs(p_hwfn
, vf
, req
->rx_qid
,
1779 req
->num_rxqs
, req
->cqe_completion
);
1781 status
= PFVF_STATUS_FAILURE
;
1783 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_STOP_RXQS
,
1787 static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn
*p_hwfn
,
1788 struct qed_ptt
*p_ptt
,
1789 struct qed_vf_info
*vf
)
1791 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
1792 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1793 u8 status
= PFVF_STATUS_SUCCESS
;
1794 struct vfpf_stop_txqs_tlv
*req
;
1797 /* We give the option of starting from qid != 0, in this case we
1798 * need to make sure that qid + num_qs doesn't exceed the actual
1799 * amount of queues that exist.
1801 req
= &mbx
->req_virt
->stop_txqs
;
1802 rc
= qed_iov_vf_stop_txqs(p_hwfn
, vf
, req
->tx_qid
, req
->num_txqs
);
1804 status
= PFVF_STATUS_FAILURE
;
1806 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_STOP_TXQS
,
1810 static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn
*p_hwfn
,
1811 struct qed_ptt
*p_ptt
,
1812 struct qed_vf_info
*vf
)
1814 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
1815 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1816 struct vfpf_update_rxq_tlv
*req
;
1817 u8 status
= PFVF_STATUS_SUCCESS
;
1818 u8 complete_event_flg
;
1819 u8 complete_cqe_flg
;
1824 req
= &mbx
->req_virt
->update_rxq
;
1825 complete_cqe_flg
= !!(req
->flags
& VFPF_RXQ_UPD_COMPLETE_CQE_FLAG
);
1826 complete_event_flg
= !!(req
->flags
& VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG
);
1828 for (i
= 0; i
< req
->num_rxqs
; i
++) {
1829 qid
= req
->rx_qid
+ i
;
1831 if (!vf
->vf_queues
[qid
].rxq_active
) {
1832 DP_NOTICE(p_hwfn
, "VF rx_qid = %d isn`t active!\n",
1834 status
= PFVF_STATUS_FAILURE
;
1838 rc
= qed_sp_eth_rx_queues_update(p_hwfn
,
1839 vf
->vf_queues
[qid
].fw_rx_qid
,
1843 QED_SPQ_MODE_EBLOCK
, NULL
);
1846 status
= PFVF_STATUS_FAILURE
;
1851 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_UPDATE_RXQ
,
1855 void *qed_iov_search_list_tlvs(struct qed_hwfn
*p_hwfn
,
1856 void *p_tlvs_list
, u16 req_type
)
1858 struct channel_tlv
*p_tlv
= (struct channel_tlv
*)p_tlvs_list
;
1862 if (!p_tlv
->length
) {
1863 DP_NOTICE(p_hwfn
, "Zero length TLV found\n");
1867 if (p_tlv
->type
== req_type
) {
1868 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1869 "Extended tlv type %d, length %d found\n",
1870 p_tlv
->type
, p_tlv
->length
);
1874 len
+= p_tlv
->length
;
1875 p_tlv
= (struct channel_tlv
*)((u8
*)p_tlv
+ p_tlv
->length
);
1877 if ((len
+ p_tlv
->length
) > TLV_BUFFER_SIZE
) {
1878 DP_NOTICE(p_hwfn
, "TLVs has overrun the buffer size\n");
1881 } while (p_tlv
->type
!= CHANNEL_TLV_LIST_END
);
1887 qed_iov_vp_update_act_param(struct qed_hwfn
*p_hwfn
,
1888 struct qed_sp_vport_update_params
*p_data
,
1889 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
1891 struct vfpf_vport_update_activate_tlv
*p_act_tlv
;
1892 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
1894 p_act_tlv
= (struct vfpf_vport_update_activate_tlv
*)
1895 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
1899 p_data
->update_vport_active_rx_flg
= p_act_tlv
->update_rx
;
1900 p_data
->vport_active_rx_flg
= p_act_tlv
->active_rx
;
1901 p_data
->update_vport_active_tx_flg
= p_act_tlv
->update_tx
;
1902 p_data
->vport_active_tx_flg
= p_act_tlv
->active_tx
;
1903 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_ACTIVATE
;
1907 qed_iov_vp_update_vlan_param(struct qed_hwfn
*p_hwfn
,
1908 struct qed_sp_vport_update_params
*p_data
,
1909 struct qed_vf_info
*p_vf
,
1910 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
1912 struct vfpf_vport_update_vlan_strip_tlv
*p_vlan_tlv
;
1913 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
;
1915 p_vlan_tlv
= (struct vfpf_vport_update_vlan_strip_tlv
*)
1916 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
1920 p_vf
->shadow_config
.inner_vlan_removal
= p_vlan_tlv
->remove_vlan
;
1922 /* Ignore the VF request if we're forcing a vlan */
1923 if (!(p_vf
->configured_features
& (1 << VLAN_ADDR_FORCED
))) {
1924 p_data
->update_inner_vlan_removal_flg
= 1;
1925 p_data
->inner_vlan_removal_flg
= p_vlan_tlv
->remove_vlan
;
1928 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP
;
1932 qed_iov_vp_update_tx_switch(struct qed_hwfn
*p_hwfn
,
1933 struct qed_sp_vport_update_params
*p_data
,
1934 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
1936 struct vfpf_vport_update_tx_switch_tlv
*p_tx_switch_tlv
;
1937 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
1939 p_tx_switch_tlv
= (struct vfpf_vport_update_tx_switch_tlv
*)
1940 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
,
1942 if (!p_tx_switch_tlv
)
1945 p_data
->update_tx_switching_flg
= 1;
1946 p_data
->tx_switching_flg
= p_tx_switch_tlv
->tx_switching
;
1947 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_TX_SWITCH
;
1951 qed_iov_vp_update_mcast_bin_param(struct qed_hwfn
*p_hwfn
,
1952 struct qed_sp_vport_update_params
*p_data
,
1953 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
1955 struct vfpf_vport_update_mcast_bin_tlv
*p_mcast_tlv
;
1956 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_MCAST
;
1958 p_mcast_tlv
= (struct vfpf_vport_update_mcast_bin_tlv
*)
1959 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
1963 p_data
->update_approx_mcast_flg
= 1;
1964 memcpy(p_data
->bins
, p_mcast_tlv
->bins
,
1965 sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS
);
1966 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_MCAST
;
1970 qed_iov_vp_update_accept_flag(struct qed_hwfn
*p_hwfn
,
1971 struct qed_sp_vport_update_params
*p_data
,
1972 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
1974 struct qed_filter_accept_flags
*p_flags
= &p_data
->accept_flags
;
1975 struct vfpf_vport_update_accept_param_tlv
*p_accept_tlv
;
1976 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
1978 p_accept_tlv
= (struct vfpf_vport_update_accept_param_tlv
*)
1979 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
1983 p_flags
->update_rx_mode_config
= p_accept_tlv
->update_rx_mode
;
1984 p_flags
->rx_accept_filter
= p_accept_tlv
->rx_accept_filter
;
1985 p_flags
->update_tx_mode_config
= p_accept_tlv
->update_tx_mode
;
1986 p_flags
->tx_accept_filter
= p_accept_tlv
->tx_accept_filter
;
1987 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM
;
1991 qed_iov_vp_update_accept_any_vlan(struct qed_hwfn
*p_hwfn
,
1992 struct qed_sp_vport_update_params
*p_data
,
1993 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
1995 struct vfpf_vport_update_accept_any_vlan_tlv
*p_accept_any_vlan
;
1996 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
1998 p_accept_any_vlan
= (struct vfpf_vport_update_accept_any_vlan_tlv
*)
1999 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
,
2001 if (!p_accept_any_vlan
)
2004 p_data
->accept_any_vlan
= p_accept_any_vlan
->accept_any_vlan
;
2005 p_data
->update_accept_any_vlan_flg
=
2006 p_accept_any_vlan
->update_accept_any_vlan_flg
;
2007 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN
;
2011 qed_iov_vp_update_rss_param(struct qed_hwfn
*p_hwfn
,
2012 struct qed_vf_info
*vf
,
2013 struct qed_sp_vport_update_params
*p_data
,
2014 struct qed_rss_params
*p_rss
,
2015 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2017 struct vfpf_vport_update_rss_tlv
*p_rss_tlv
;
2018 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_RSS
;
2019 u16 i
, q_idx
, max_q_idx
;
2022 p_rss_tlv
= (struct vfpf_vport_update_rss_tlv
*)
2023 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2025 p_data
->rss_params
= NULL
;
2029 memset(p_rss
, 0, sizeof(struct qed_rss_params
));
2031 p_rss
->update_rss_config
= !!(p_rss_tlv
->update_rss_flags
&
2032 VFPF_UPDATE_RSS_CONFIG_FLAG
);
2033 p_rss
->update_rss_capabilities
= !!(p_rss_tlv
->update_rss_flags
&
2034 VFPF_UPDATE_RSS_CAPS_FLAG
);
2035 p_rss
->update_rss_ind_table
= !!(p_rss_tlv
->update_rss_flags
&
2036 VFPF_UPDATE_RSS_IND_TABLE_FLAG
);
2037 p_rss
->update_rss_key
= !!(p_rss_tlv
->update_rss_flags
&
2038 VFPF_UPDATE_RSS_KEY_FLAG
);
2040 p_rss
->rss_enable
= p_rss_tlv
->rss_enable
;
2041 p_rss
->rss_eng_id
= vf
->relative_vf_id
+ 1;
2042 p_rss
->rss_caps
= p_rss_tlv
->rss_caps
;
2043 p_rss
->rss_table_size_log
= p_rss_tlv
->rss_table_size_log
;
2044 memcpy(p_rss
->rss_ind_table
, p_rss_tlv
->rss_ind_table
,
2045 sizeof(p_rss
->rss_ind_table
));
2046 memcpy(p_rss
->rss_key
, p_rss_tlv
->rss_key
, sizeof(p_rss
->rss_key
));
2048 table_size
= min_t(u16
, ARRAY_SIZE(p_rss
->rss_ind_table
),
2049 (1 << p_rss_tlv
->rss_table_size_log
));
2051 max_q_idx
= ARRAY_SIZE(vf
->vf_queues
);
2053 for (i
= 0; i
< table_size
; i
++) {
2054 u16 index
= vf
->vf_queues
[0].fw_rx_qid
;
2056 q_idx
= p_rss
->rss_ind_table
[i
];
2057 if (q_idx
>= max_q_idx
)
2059 "rss_ind_table[%d] = %d, rxq is out of range\n",
2061 else if (!vf
->vf_queues
[q_idx
].rxq_active
)
2063 "rss_ind_table[%d] = %d, rxq is not active\n",
2066 index
= vf
->vf_queues
[q_idx
].fw_rx_qid
;
2067 p_rss
->rss_ind_table
[i
] = index
;
2070 p_data
->rss_params
= p_rss
;
2071 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_RSS
;
2075 qed_iov_vp_update_sge_tpa_param(struct qed_hwfn
*p_hwfn
,
2076 struct qed_vf_info
*vf
,
2077 struct qed_sp_vport_update_params
*p_data
,
2078 struct qed_sge_tpa_params
*p_sge_tpa
,
2079 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2081 struct vfpf_vport_update_sge_tpa_tlv
*p_sge_tpa_tlv
;
2082 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
;
2084 p_sge_tpa_tlv
= (struct vfpf_vport_update_sge_tpa_tlv
*)
2085 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2087 if (!p_sge_tpa_tlv
) {
2088 p_data
->sge_tpa_params
= NULL
;
2092 memset(p_sge_tpa
, 0, sizeof(struct qed_sge_tpa_params
));
2094 p_sge_tpa
->update_tpa_en_flg
=
2095 !!(p_sge_tpa_tlv
->update_sge_tpa_flags
& VFPF_UPDATE_TPA_EN_FLAG
);
2096 p_sge_tpa
->update_tpa_param_flg
=
2097 !!(p_sge_tpa_tlv
->update_sge_tpa_flags
&
2098 VFPF_UPDATE_TPA_PARAM_FLAG
);
2100 p_sge_tpa
->tpa_ipv4_en_flg
=
2101 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_IPV4_EN_FLAG
);
2102 p_sge_tpa
->tpa_ipv6_en_flg
=
2103 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_IPV6_EN_FLAG
);
2104 p_sge_tpa
->tpa_pkt_split_flg
=
2105 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_PKT_SPLIT_FLAG
);
2106 p_sge_tpa
->tpa_hdr_data_split_flg
=
2107 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_HDR_DATA_SPLIT_FLAG
);
2108 p_sge_tpa
->tpa_gro_consistent_flg
=
2109 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_GRO_CONSIST_FLAG
);
2111 p_sge_tpa
->tpa_max_aggs_num
= p_sge_tpa_tlv
->tpa_max_aggs_num
;
2112 p_sge_tpa
->tpa_max_size
= p_sge_tpa_tlv
->tpa_max_size
;
2113 p_sge_tpa
->tpa_min_size_to_start
= p_sge_tpa_tlv
->tpa_min_size_to_start
;
2114 p_sge_tpa
->tpa_min_size_to_cont
= p_sge_tpa_tlv
->tpa_min_size_to_cont
;
2115 p_sge_tpa
->max_buffers_per_cqe
= p_sge_tpa_tlv
->max_buffers_per_cqe
;
2117 p_data
->sge_tpa_params
= p_sge_tpa
;
2119 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_SGE_TPA
;
2122 static void qed_iov_vf_mbx_vport_update(struct qed_hwfn
*p_hwfn
,
2123 struct qed_ptt
*p_ptt
,
2124 struct qed_vf_info
*vf
)
2126 struct qed_sp_vport_update_params params
;
2127 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2128 struct qed_sge_tpa_params sge_tpa_params
;
2129 struct qed_rss_params rss_params
;
2130 u8 status
= PFVF_STATUS_SUCCESS
;
2135 memset(¶ms
, 0, sizeof(params
));
2136 params
.opaque_fid
= vf
->opaque_fid
;
2137 params
.vport_id
= vf
->vport_id
;
2138 params
.rss_params
= NULL
;
2140 /* Search for extended tlvs list and update values
2141 * from VF in struct qed_sp_vport_update_params.
2143 qed_iov_vp_update_act_param(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2144 qed_iov_vp_update_vlan_param(p_hwfn
, ¶ms
, vf
, mbx
, &tlvs_mask
);
2145 qed_iov_vp_update_tx_switch(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2146 qed_iov_vp_update_mcast_bin_param(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2147 qed_iov_vp_update_accept_flag(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2148 qed_iov_vp_update_rss_param(p_hwfn
, vf
, ¶ms
, &rss_params
,
2150 qed_iov_vp_update_accept_any_vlan(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2151 qed_iov_vp_update_sge_tpa_param(p_hwfn
, vf
, ¶ms
,
2152 &sge_tpa_params
, mbx
, &tlvs_mask
);
2154 /* Just log a message if there is no single extended tlv in buffer.
2155 * When all features of vport update ramrod would be requested by VF
2156 * as extended TLVs in buffer then an error can be returned in response
2157 * if there is no extended TLV present in buffer.
2161 "No feature tlvs found for vport update\n");
2162 status
= PFVF_STATUS_NOT_SUPPORTED
;
2166 rc
= qed_sp_vport_update(p_hwfn
, ¶ms
, QED_SPQ_MODE_EBLOCK
, NULL
);
2169 status
= PFVF_STATUS_FAILURE
;
2172 length
= qed_iov_prep_vp_update_resp_tlvs(p_hwfn
, vf
, mbx
, status
,
2173 tlvs_mask
, tlvs_mask
);
2174 qed_iov_send_response(p_hwfn
, p_ptt
, vf
, length
, status
);
2177 static int qed_iov_vf_update_unicast_shadow(struct qed_hwfn
*p_hwfn
,
2178 struct qed_vf_info
*p_vf
,
2179 struct qed_filter_ucast
*p_params
)
2183 if (p_params
->type
== QED_FILTER_MAC
)
2186 /* First remove entries and then add new ones */
2187 if (p_params
->opcode
== QED_FILTER_REMOVE
) {
2188 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++)
2189 if (p_vf
->shadow_config
.vlans
[i
].used
&&
2190 p_vf
->shadow_config
.vlans
[i
].vid
==
2192 p_vf
->shadow_config
.vlans
[i
].used
= false;
2195 if (i
== QED_ETH_VF_NUM_VLAN_FILTERS
+ 1) {
2198 "VF [%d] - Tries to remove a non-existing vlan\n",
2199 p_vf
->relative_vf_id
);
2202 } else if (p_params
->opcode
== QED_FILTER_REPLACE
||
2203 p_params
->opcode
== QED_FILTER_FLUSH
) {
2204 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++)
2205 p_vf
->shadow_config
.vlans
[i
].used
= false;
2208 /* In forced mode, we're willing to remove entries - but we don't add
2211 if (p_vf
->bulletin
.p_virt
->valid_bitmap
& (1 << VLAN_ADDR_FORCED
))
2214 if (p_params
->opcode
== QED_FILTER_ADD
||
2215 p_params
->opcode
== QED_FILTER_REPLACE
) {
2216 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++) {
2217 if (p_vf
->shadow_config
.vlans
[i
].used
)
2220 p_vf
->shadow_config
.vlans
[i
].used
= true;
2221 p_vf
->shadow_config
.vlans
[i
].vid
= p_params
->vlan
;
2225 if (i
== QED_ETH_VF_NUM_VLAN_FILTERS
+ 1) {
2228 "VF [%d] - Tries to configure more than %d vlan filters\n",
2229 p_vf
->relative_vf_id
,
2230 QED_ETH_VF_NUM_VLAN_FILTERS
+ 1);
2238 int qed_iov_chk_ucast(struct qed_hwfn
*hwfn
,
2239 int vfid
, struct qed_filter_ucast
*params
)
2241 struct qed_public_vf_info
*vf
;
2243 vf
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
2247 /* No real decision to make; Store the configured MAC */
2248 if (params
->type
== QED_FILTER_MAC
||
2249 params
->type
== QED_FILTER_MAC_VLAN
)
2250 ether_addr_copy(vf
->mac
, params
->mac
);
2255 static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn
*p_hwfn
,
2256 struct qed_ptt
*p_ptt
,
2257 struct qed_vf_info
*vf
)
2259 struct qed_bulletin_content
*p_bulletin
= vf
->bulletin
.p_virt
;
2260 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2261 struct vfpf_ucast_filter_tlv
*req
;
2262 u8 status
= PFVF_STATUS_SUCCESS
;
2263 struct qed_filter_ucast params
;
2266 /* Prepare the unicast filter params */
2267 memset(¶ms
, 0, sizeof(struct qed_filter_ucast
));
2268 req
= &mbx
->req_virt
->ucast_filter
;
2269 params
.opcode
= (enum qed_filter_opcode
)req
->opcode
;
2270 params
.type
= (enum qed_filter_ucast_type
)req
->type
;
2272 params
.is_rx_filter
= 1;
2273 params
.is_tx_filter
= 1;
2274 params
.vport_to_remove_from
= vf
->vport_id
;
2275 params
.vport_to_add_to
= vf
->vport_id
;
2276 memcpy(params
.mac
, req
->mac
, ETH_ALEN
);
2277 params
.vlan
= req
->vlan
;
2281 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
2282 vf
->abs_vf_id
, params
.opcode
, params
.type
,
2283 params
.is_rx_filter
? "RX" : "",
2284 params
.is_tx_filter
? "TX" : "",
2285 params
.vport_to_add_to
,
2286 params
.mac
[0], params
.mac
[1],
2287 params
.mac
[2], params
.mac
[3],
2288 params
.mac
[4], params
.mac
[5], params
.vlan
);
2290 if (!vf
->vport_instance
) {
2293 "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
2295 status
= PFVF_STATUS_FAILURE
;
2299 /* Update shadow copy of the VF configuration */
2300 if (qed_iov_vf_update_unicast_shadow(p_hwfn
, vf
, ¶ms
)) {
2301 status
= PFVF_STATUS_FAILURE
;
2305 /* Determine if the unicast filtering is acceptible by PF */
2306 if ((p_bulletin
->valid_bitmap
& (1 << VLAN_ADDR_FORCED
)) &&
2307 (params
.type
== QED_FILTER_VLAN
||
2308 params
.type
== QED_FILTER_MAC_VLAN
)) {
2309 /* Once VLAN is forced or PVID is set, do not allow
2310 * to add/replace any further VLANs.
2312 if (params
.opcode
== QED_FILTER_ADD
||
2313 params
.opcode
== QED_FILTER_REPLACE
)
2314 status
= PFVF_STATUS_FORCED
;
2318 if ((p_bulletin
->valid_bitmap
& (1 << MAC_ADDR_FORCED
)) &&
2319 (params
.type
== QED_FILTER_MAC
||
2320 params
.type
== QED_FILTER_MAC_VLAN
)) {
2321 if (!ether_addr_equal(p_bulletin
->mac
, params
.mac
) ||
2322 (params
.opcode
!= QED_FILTER_ADD
&&
2323 params
.opcode
!= QED_FILTER_REPLACE
))
2324 status
= PFVF_STATUS_FORCED
;
2328 rc
= qed_iov_chk_ucast(p_hwfn
, vf
->relative_vf_id
, ¶ms
);
2330 status
= PFVF_STATUS_FAILURE
;
2334 rc
= qed_sp_eth_filter_ucast(p_hwfn
, vf
->opaque_fid
, ¶ms
,
2335 QED_SPQ_MODE_CB
, NULL
);
2337 status
= PFVF_STATUS_FAILURE
;
2340 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_UCAST_FILTER
,
2341 sizeof(struct pfvf_def_resp_tlv
), status
);
2344 static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn
*p_hwfn
,
2345 struct qed_ptt
*p_ptt
,
2346 struct qed_vf_info
*vf
)
2351 for (i
= 0; i
< vf
->num_sbs
; i
++)
2352 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
2354 vf
->opaque_fid
, false);
2356 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_INT_CLEANUP
,
2357 sizeof(struct pfvf_def_resp_tlv
),
2358 PFVF_STATUS_SUCCESS
);
2361 static void qed_iov_vf_mbx_close(struct qed_hwfn
*p_hwfn
,
2362 struct qed_ptt
*p_ptt
, struct qed_vf_info
*vf
)
2364 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2365 u8 status
= PFVF_STATUS_SUCCESS
;
2367 /* Disable Interrupts for VF */
2368 qed_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 0);
2370 /* Reset Permission table */
2371 qed_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, 0);
2373 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_CLOSE
,
2377 static void qed_iov_vf_mbx_release(struct qed_hwfn
*p_hwfn
,
2378 struct qed_ptt
*p_ptt
,
2379 struct qed_vf_info
*p_vf
)
2381 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2382 u8 status
= PFVF_STATUS_SUCCESS
;
2385 qed_iov_vf_cleanup(p_hwfn
, p_vf
);
2387 if (p_vf
->state
!= VF_STOPPED
&& p_vf
->state
!= VF_FREE
) {
2388 /* Stopping the VF */
2389 rc
= qed_sp_vf_stop(p_hwfn
, p_vf
->concrete_fid
,
2393 DP_ERR(p_hwfn
, "qed_sp_vf_stop returned error %d\n",
2395 status
= PFVF_STATUS_FAILURE
;
2398 p_vf
->state
= VF_STOPPED
;
2401 qed_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
, CHANNEL_TLV_RELEASE
,
2406 qed_iov_vf_flr_poll_dorq(struct qed_hwfn
*p_hwfn
,
2407 struct qed_vf_info
*p_vf
, struct qed_ptt
*p_ptt
)
2412 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_vf
->concrete_fid
);
2414 for (cnt
= 0; cnt
< 50; cnt
++) {
2415 val
= qed_rd(p_hwfn
, p_ptt
, DORQ_REG_VF_USAGE_CNT
);
2420 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
2424 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
2425 p_vf
->abs_vf_id
, val
);
2433 qed_iov_vf_flr_poll_pbf(struct qed_hwfn
*p_hwfn
,
2434 struct qed_vf_info
*p_vf
, struct qed_ptt
*p_ptt
)
2436 u32 cons
[MAX_NUM_VOQS
], distance
[MAX_NUM_VOQS
];
2439 /* Read initial consumers & producers */
2440 for (i
= 0; i
< MAX_NUM_VOQS
; i
++) {
2443 cons
[i
] = qed_rd(p_hwfn
, p_ptt
,
2444 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0
+
2446 prod
= qed_rd(p_hwfn
, p_ptt
,
2447 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0
+
2449 distance
[i
] = prod
- cons
[i
];
2452 /* Wait for consumers to pass the producers */
2454 for (cnt
= 0; cnt
< 50; cnt
++) {
2455 for (; i
< MAX_NUM_VOQS
; i
++) {
2458 tmp
= qed_rd(p_hwfn
, p_ptt
,
2459 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0
+
2461 if (distance
[i
] > tmp
- cons
[i
])
2465 if (i
== MAX_NUM_VOQS
)
2472 DP_ERR(p_hwfn
, "VF[%d] - pbf polling failed on VOQ %d\n",
2473 p_vf
->abs_vf_id
, i
);
2480 static int qed_iov_vf_flr_poll(struct qed_hwfn
*p_hwfn
,
2481 struct qed_vf_info
*p_vf
, struct qed_ptt
*p_ptt
)
2485 rc
= qed_iov_vf_flr_poll_dorq(p_hwfn
, p_vf
, p_ptt
);
2489 rc
= qed_iov_vf_flr_poll_pbf(p_hwfn
, p_vf
, p_ptt
);
2497 qed_iov_execute_vf_flr_cleanup(struct qed_hwfn
*p_hwfn
,
2498 struct qed_ptt
*p_ptt
,
2499 u16 rel_vf_id
, u32
*ack_vfs
)
2501 struct qed_vf_info
*p_vf
;
2504 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
2508 if (p_hwfn
->pf_iov_info
->pending_flr
[rel_vf_id
/ 64] &
2509 (1ULL << (rel_vf_id
% 64))) {
2510 u16 vfid
= p_vf
->abs_vf_id
;
2512 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2513 "VF[%d] - Handling FLR\n", vfid
);
2515 qed_iov_vf_cleanup(p_hwfn
, p_vf
);
2517 /* If VF isn't active, no need for anything but SW */
2521 rc
= qed_iov_vf_flr_poll(p_hwfn
, p_vf
, p_ptt
);
2525 rc
= qed_final_cleanup(p_hwfn
, p_ptt
, vfid
, true);
2527 DP_ERR(p_hwfn
, "Failed handle FLR of VF[%d]\n", vfid
);
2531 /* VF_STOPPED has to be set only after final cleanup
2532 * but prior to re-enabling the VF.
2534 p_vf
->state
= VF_STOPPED
;
2536 rc
= qed_iov_enable_vf_access(p_hwfn
, p_ptt
, p_vf
);
2538 DP_ERR(p_hwfn
, "Failed to re-enable VF[%d] acces\n",
2543 /* Mark VF for ack and clean pending state */
2544 if (p_vf
->state
== VF_RESET
)
2545 p_vf
->state
= VF_STOPPED
;
2546 ack_vfs
[vfid
/ 32] |= (1 << (vfid
% 32));
2547 p_hwfn
->pf_iov_info
->pending_flr
[rel_vf_id
/ 64] &=
2548 ~(1ULL << (rel_vf_id
% 64));
2549 p_hwfn
->pf_iov_info
->pending_events
[rel_vf_id
/ 64] &=
2550 ~(1ULL << (rel_vf_id
% 64));
2556 int qed_iov_vf_flr_cleanup(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2558 u32 ack_vfs
[VF_MAX_STATIC
/ 32];
2562 memset(ack_vfs
, 0, sizeof(u32
) * (VF_MAX_STATIC
/ 32));
2564 /* Since BRB <-> PRS interface can't be tested as part of the flr
2565 * polling due to HW limitations, simply sleep a bit. And since
2566 * there's no need to wait per-vf, do it before looping.
2570 for (i
= 0; i
< p_hwfn
->cdev
->p_iov_info
->total_vfs
; i
++)
2571 qed_iov_execute_vf_flr_cleanup(p_hwfn
, p_ptt
, i
, ack_vfs
);
2573 rc
= qed_mcp_ack_vf_flr(p_hwfn
, p_ptt
, ack_vfs
);
2577 int qed_iov_mark_vf_flr(struct qed_hwfn
*p_hwfn
, u32
*p_disabled_vfs
)
2581 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "Marking FLR-ed VFs\n");
2582 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++)
2583 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2584 "[%08x,...,%08x]: %08x\n",
2585 i
* 32, (i
+ 1) * 32 - 1, p_disabled_vfs
[i
]);
2587 if (!p_hwfn
->cdev
->p_iov_info
) {
2588 DP_NOTICE(p_hwfn
, "VF flr but no IOV\n");
2593 for (i
= 0; i
< p_hwfn
->cdev
->p_iov_info
->total_vfs
; i
++) {
2594 struct qed_vf_info
*p_vf
;
2597 p_vf
= qed_iov_get_vf_info(p_hwfn
, i
, false);
2601 vfid
= p_vf
->abs_vf_id
;
2602 if ((1 << (vfid
% 32)) & p_disabled_vfs
[vfid
/ 32]) {
2603 u64
*p_flr
= p_hwfn
->pf_iov_info
->pending_flr
;
2604 u16 rel_vf_id
= p_vf
->relative_vf_id
;
2606 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2607 "VF[%d] [rel %d] got FLR-ed\n",
2610 p_vf
->state
= VF_RESET
;
2612 /* No need to lock here, since pending_flr should
2613 * only change here and before ACKing MFw. Since
2614 * MFW will not trigger an additional attention for
2615 * VF flr until ACKs, we're safe.
2617 p_flr
[rel_vf_id
/ 64] |= 1ULL << (rel_vf_id
% 64);
2625 static void qed_iov_get_link(struct qed_hwfn
*p_hwfn
,
2627 struct qed_mcp_link_params
*p_params
,
2628 struct qed_mcp_link_state
*p_link
,
2629 struct qed_mcp_link_capabilities
*p_caps
)
2631 struct qed_vf_info
*p_vf
= qed_iov_get_vf_info(p_hwfn
,
2634 struct qed_bulletin_content
*p_bulletin
;
2639 p_bulletin
= p_vf
->bulletin
.p_virt
;
2642 __qed_vf_get_link_params(p_hwfn
, p_params
, p_bulletin
);
2644 __qed_vf_get_link_state(p_hwfn
, p_link
, p_bulletin
);
2646 __qed_vf_get_link_caps(p_hwfn
, p_caps
, p_bulletin
);
2649 static void qed_iov_process_mbx_req(struct qed_hwfn
*p_hwfn
,
2650 struct qed_ptt
*p_ptt
, int vfid
)
2652 struct qed_iov_vf_mbx
*mbx
;
2653 struct qed_vf_info
*p_vf
;
2656 p_vf
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
2660 mbx
= &p_vf
->vf_mbx
;
2662 /* qed_iov_process_mbx_request */
2665 "qed_iov_process_mbx_req vfid %d\n", p_vf
->abs_vf_id
);
2667 mbx
->first_tlv
= mbx
->req_virt
->first_tlv
;
2669 /* check if tlv type is known */
2670 if (qed_iov_tlv_supported(mbx
->first_tlv
.tl
.type
)) {
2671 switch (mbx
->first_tlv
.tl
.type
) {
2672 case CHANNEL_TLV_ACQUIRE
:
2673 qed_iov_vf_mbx_acquire(p_hwfn
, p_ptt
, p_vf
);
2675 case CHANNEL_TLV_VPORT_START
:
2676 qed_iov_vf_mbx_start_vport(p_hwfn
, p_ptt
, p_vf
);
2678 case CHANNEL_TLV_VPORT_TEARDOWN
:
2679 qed_iov_vf_mbx_stop_vport(p_hwfn
, p_ptt
, p_vf
);
2681 case CHANNEL_TLV_START_RXQ
:
2682 qed_iov_vf_mbx_start_rxq(p_hwfn
, p_ptt
, p_vf
);
2684 case CHANNEL_TLV_START_TXQ
:
2685 qed_iov_vf_mbx_start_txq(p_hwfn
, p_ptt
, p_vf
);
2687 case CHANNEL_TLV_STOP_RXQS
:
2688 qed_iov_vf_mbx_stop_rxqs(p_hwfn
, p_ptt
, p_vf
);
2690 case CHANNEL_TLV_STOP_TXQS
:
2691 qed_iov_vf_mbx_stop_txqs(p_hwfn
, p_ptt
, p_vf
);
2693 case CHANNEL_TLV_UPDATE_RXQ
:
2694 qed_iov_vf_mbx_update_rxqs(p_hwfn
, p_ptt
, p_vf
);
2696 case CHANNEL_TLV_VPORT_UPDATE
:
2697 qed_iov_vf_mbx_vport_update(p_hwfn
, p_ptt
, p_vf
);
2699 case CHANNEL_TLV_UCAST_FILTER
:
2700 qed_iov_vf_mbx_ucast_filter(p_hwfn
, p_ptt
, p_vf
);
2702 case CHANNEL_TLV_CLOSE
:
2703 qed_iov_vf_mbx_close(p_hwfn
, p_ptt
, p_vf
);
2705 case CHANNEL_TLV_INT_CLEANUP
:
2706 qed_iov_vf_mbx_int_cleanup(p_hwfn
, p_ptt
, p_vf
);
2708 case CHANNEL_TLV_RELEASE
:
2709 qed_iov_vf_mbx_release(p_hwfn
, p_ptt
, p_vf
);
2713 /* unknown TLV - this may belong to a VF driver from the future
2714 * - a version written after this PF driver was written, which
2715 * supports features unknown as of yet. Too bad since we don't
2716 * support them. Or this may be because someone wrote a crappy
2717 * VF driver and is sending garbage over the channel.
2720 "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
2721 mbx
->first_tlv
.tl
.type
, mbx
->first_tlv
.tl
.length
);
2723 for (i
= 0; i
< 20; i
++) {
2727 mbx
->req_virt
->tlv_buf_size
.tlv_buffer
[i
]);
2732 void qed_iov_pf_add_pending_events(struct qed_hwfn
*p_hwfn
, u8 vfid
)
2734 u64 add_bit
= 1ULL << (vfid
% 64);
2736 p_hwfn
->pf_iov_info
->pending_events
[vfid
/ 64] |= add_bit
;
2739 static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn
*p_hwfn
,
2742 u64
*p_pending_events
= p_hwfn
->pf_iov_info
->pending_events
;
2744 memcpy(events
, p_pending_events
, sizeof(u64
) * QED_VF_ARRAY_LENGTH
);
2745 memset(p_pending_events
, 0, sizeof(u64
) * QED_VF_ARRAY_LENGTH
);
2748 static int qed_sriov_vfpf_msg(struct qed_hwfn
*p_hwfn
,
2749 u16 abs_vfid
, struct regpair
*vf_msg
)
2751 u8 min
= (u8
)p_hwfn
->cdev
->p_iov_info
->first_vf_in_pf
;
2752 struct qed_vf_info
*p_vf
;
2754 if (!qed_iov_pf_sanity_check(p_hwfn
, (int)abs_vfid
- min
)) {
2757 "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n",
2761 p_vf
= &p_hwfn
->pf_iov_info
->vfs_array
[(u8
)abs_vfid
- min
];
2763 /* List the physical address of the request so that handler
2764 * could later on copy the message from it.
2766 p_vf
->vf_mbx
.pending_req
= (((u64
)vf_msg
->hi
) << 32) | vf_msg
->lo
;
2768 /* Mark the event and schedule the workqueue */
2769 qed_iov_pf_add_pending_events(p_hwfn
, p_vf
->relative_vf_id
);
2770 qed_schedule_iov(p_hwfn
, QED_IOV_WQ_MSG_FLAG
);
2775 int qed_sriov_eqe_event(struct qed_hwfn
*p_hwfn
,
2776 u8 opcode
, __le16 echo
, union event_ring_data
*data
)
2779 case COMMON_EVENT_VF_PF_CHANNEL
:
2780 return qed_sriov_vfpf_msg(p_hwfn
, le16_to_cpu(echo
),
2781 &data
->vf_pf_channel
.msg_addr
);
2783 DP_INFO(p_hwfn
->cdev
, "Unknown sriov eqe event 0x%02x\n",
2789 u16
qed_iov_get_next_active_vf(struct qed_hwfn
*p_hwfn
, u16 rel_vf_id
)
2791 struct qed_hw_sriov_info
*p_iov
= p_hwfn
->cdev
->p_iov_info
;
2797 for (i
= rel_vf_id
; i
< p_iov
->total_vfs
; i
++)
2798 if (qed_iov_is_valid_vfid(p_hwfn
, rel_vf_id
, true))
2805 static int qed_iov_copy_vf_msg(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*ptt
,
2808 struct qed_dmae_params params
;
2809 struct qed_vf_info
*vf_info
;
2811 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
2815 memset(¶ms
, 0, sizeof(struct qed_dmae_params
));
2816 params
.flags
= QED_DMAE_FLAG_VF_SRC
| QED_DMAE_FLAG_COMPLETION_DST
;
2817 params
.src_vfid
= vf_info
->abs_vf_id
;
2819 if (qed_dmae_host2host(p_hwfn
, ptt
,
2820 vf_info
->vf_mbx
.pending_req
,
2821 vf_info
->vf_mbx
.req_phys
,
2822 sizeof(union vfpf_tlvs
) / 4, ¶ms
)) {
2823 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2824 "Failed to copy message from VF 0x%02x\n", vfid
);
2832 static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn
*p_hwfn
,
2835 struct qed_vf_info
*vf_info
;
2838 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
2840 DP_NOTICE(p_hwfn
->cdev
,
2841 "Can not set forced MAC, invalid vfid [%d]\n", vfid
);
2845 feature
= 1 << MAC_ADDR_FORCED
;
2846 memcpy(vf_info
->bulletin
.p_virt
->mac
, mac
, ETH_ALEN
);
2848 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
2849 /* Forced MAC will disable MAC_ADDR */
2850 vf_info
->bulletin
.p_virt
->valid_bitmap
&=
2851 ~(1 << VFPF_BULLETIN_MAC_ADDR
);
2853 qed_iov_configure_vport_forced(p_hwfn
, vf_info
, feature
);
2856 void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn
*p_hwfn
,
2859 struct qed_vf_info
*vf_info
;
2862 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
2864 DP_NOTICE(p_hwfn
->cdev
,
2865 "Can not set forced MAC, invalid vfid [%d]\n", vfid
);
2869 feature
= 1 << VLAN_ADDR_FORCED
;
2870 vf_info
->bulletin
.p_virt
->pvid
= pvid
;
2872 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
2874 vf_info
->bulletin
.p_virt
->valid_bitmap
&= ~feature
;
2876 qed_iov_configure_vport_forced(p_hwfn
, vf_info
, feature
);
2879 static bool qed_iov_vf_has_vport_instance(struct qed_hwfn
*p_hwfn
, int vfid
)
2881 struct qed_vf_info
*p_vf_info
;
2883 p_vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
2887 return !!p_vf_info
->vport_instance
;
2890 bool qed_iov_is_vf_stopped(struct qed_hwfn
*p_hwfn
, int vfid
)
2892 struct qed_vf_info
*p_vf_info
;
2894 p_vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
2898 return p_vf_info
->state
== VF_STOPPED
;
2901 static bool qed_iov_spoofchk_get(struct qed_hwfn
*p_hwfn
, int vfid
)
2903 struct qed_vf_info
*vf_info
;
2905 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
2909 return vf_info
->spoof_chk
;
2912 int qed_iov_spoofchk_set(struct qed_hwfn
*p_hwfn
, int vfid
, bool val
)
2914 struct qed_vf_info
*vf
;
2917 if (!qed_iov_pf_sanity_check(p_hwfn
, vfid
)) {
2919 "SR-IOV sanity check failed, can't set spoofchk\n");
2923 vf
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
2927 if (!qed_iov_vf_has_vport_instance(p_hwfn
, vfid
)) {
2928 /* After VF VPORT start PF will configure spoof check */
2929 vf
->req_spoofchk_val
= val
;
2934 rc
= __qed_iov_spoofchk_set(p_hwfn
, vf
, val
);
2940 static u8
*qed_iov_bulletin_get_forced_mac(struct qed_hwfn
*p_hwfn
,
2943 struct qed_vf_info
*p_vf
;
2945 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
2946 if (!p_vf
|| !p_vf
->bulletin
.p_virt
)
2949 if (!(p_vf
->bulletin
.p_virt
->valid_bitmap
& (1 << MAC_ADDR_FORCED
)))
2952 return p_vf
->bulletin
.p_virt
->mac
;
2955 u16
qed_iov_bulletin_get_forced_vlan(struct qed_hwfn
*p_hwfn
, u16 rel_vf_id
)
2957 struct qed_vf_info
*p_vf
;
2959 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
2960 if (!p_vf
|| !p_vf
->bulletin
.p_virt
)
2963 if (!(p_vf
->bulletin
.p_virt
->valid_bitmap
& (1 << VLAN_ADDR_FORCED
)))
2966 return p_vf
->bulletin
.p_virt
->pvid
;
2969 static int qed_iov_configure_tx_rate(struct qed_hwfn
*p_hwfn
,
2970 struct qed_ptt
*p_ptt
, int vfid
, int val
)
2972 struct qed_vf_info
*vf
;
2976 vf
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
2980 rc
= qed_fw_vport(p_hwfn
, vf
->vport_id
, &abs_vp_id
);
2984 return qed_init_vport_rl(p_hwfn
, p_ptt
, abs_vp_id
, (u32
)val
);
2987 int qed_iov_configure_min_tx_rate(struct qed_dev
*cdev
, int vfid
, u32 rate
)
2989 struct qed_vf_info
*vf
;
2993 for_each_hwfn(cdev
, i
) {
2994 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
2996 if (!qed_iov_pf_sanity_check(p_hwfn
, vfid
)) {
2998 "SR-IOV sanity check failed, can't set min rate\n");
3003 vf
= qed_iov_get_vf_info(QED_LEADING_HWFN(cdev
), (u16
)vfid
, true);
3004 vport_id
= vf
->vport_id
;
3006 return qed_configure_vport_wfq(cdev
, vport_id
, rate
);
3009 static int qed_iov_get_vf_min_rate(struct qed_hwfn
*p_hwfn
, int vfid
)
3011 struct qed_wfq_data
*vf_vp_wfq
;
3012 struct qed_vf_info
*vf_info
;
3014 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
3018 vf_vp_wfq
= &p_hwfn
->qm_info
.wfq_data
[vf_info
->vport_id
];
3020 if (vf_vp_wfq
->configured
)
3021 return vf_vp_wfq
->min_speed
;
3027 * qed_schedule_iov - schedules IOV task for VF and PF
3028 * @hwfn: hardware function pointer
3029 * @flag: IOV flag for VF/PF
3031 void qed_schedule_iov(struct qed_hwfn
*hwfn
, enum qed_iov_wq_flag flag
)
3033 smp_mb__before_atomic();
3034 set_bit(flag
, &hwfn
->iov_task_flags
);
3035 smp_mb__after_atomic();
3036 DP_VERBOSE(hwfn
, QED_MSG_IOV
, "Scheduling iov task [Flag: %d]\n", flag
);
3037 queue_delayed_work(hwfn
->iov_wq
, &hwfn
->iov_task
, 0);
3040 void qed_vf_start_iov_wq(struct qed_dev
*cdev
)
3044 for_each_hwfn(cdev
, i
)
3045 queue_delayed_work(cdev
->hwfns
[i
].iov_wq
,
3046 &cdev
->hwfns
[i
].iov_task
, 0);
3049 int qed_sriov_disable(struct qed_dev
*cdev
, bool pci_enabled
)
3053 for_each_hwfn(cdev
, i
)
3054 if (cdev
->hwfns
[i
].iov_wq
)
3055 flush_workqueue(cdev
->hwfns
[i
].iov_wq
);
3057 /* Mark VFs for disablement */
3058 qed_iov_set_vfs_to_disable(cdev
, true);
3060 if (cdev
->p_iov_info
&& cdev
->p_iov_info
->num_vfs
&& pci_enabled
)
3061 pci_disable_sriov(cdev
->pdev
);
3063 for_each_hwfn(cdev
, i
) {
3064 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
3065 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
3067 /* Failure to acquire the ptt in 100g creates an odd error
3068 * where the first engine has already relased IOV.
3071 DP_ERR(hwfn
, "Failed to acquire ptt\n");
3075 /* Clean WFQ db and configure equal weight for all vports */
3076 qed_clean_wfq_db(hwfn
, ptt
);
3078 qed_for_each_vf(hwfn
, j
) {
3081 if (!qed_iov_is_valid_vfid(hwfn
, j
, true))
3084 /* Wait until VF is disabled before releasing */
3085 for (k
= 0; k
< 100; k
++) {
3086 if (!qed_iov_is_vf_stopped(hwfn
, j
))
3093 qed_iov_release_hw_for_vf(&cdev
->hwfns
[i
],
3097 "Timeout waiting for VF's FLR to end\n");
3100 qed_ptt_release(hwfn
, ptt
);
3103 qed_iov_set_vfs_to_disable(cdev
, false);
3108 static int qed_sriov_enable(struct qed_dev
*cdev
, int num
)
3110 struct qed_sb_cnt_info sb_cnt_info
;
3113 if (num
>= RESC_NUM(&cdev
->hwfns
[0], QED_VPORT
)) {
3114 DP_NOTICE(cdev
, "Can start at most %d VFs\n",
3115 RESC_NUM(&cdev
->hwfns
[0], QED_VPORT
) - 1);
3119 /* Initialize HW for VF access */
3120 for_each_hwfn(cdev
, j
) {
3121 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[j
];
3122 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
3123 int num_sbs
= 0, limit
= 16;
3126 DP_ERR(hwfn
, "Failed to acquire ptt\n");
3131 if (IS_MF_DEFAULT(hwfn
))
3132 limit
= MAX_NUM_VFS_BB
/ hwfn
->num_funcs_on_engine
;
3134 memset(&sb_cnt_info
, 0, sizeof(sb_cnt_info
));
3135 qed_int_get_num_sbs(hwfn
, &sb_cnt_info
);
3136 num_sbs
= min_t(int, sb_cnt_info
.sb_free_blk
, limit
);
3138 for (i
= 0; i
< num
; i
++) {
3139 if (!qed_iov_is_valid_vfid(hwfn
, i
, false))
3142 rc
= qed_iov_init_hw_for_vf(hwfn
,
3143 ptt
, i
, num_sbs
/ num
);
3145 DP_ERR(cdev
, "Failed to enable VF[%d]\n", i
);
3146 qed_ptt_release(hwfn
, ptt
);
3151 qed_ptt_release(hwfn
, ptt
);
3154 /* Enable SRIOV PCIe functions */
3155 rc
= pci_enable_sriov(cdev
->pdev
, num
);
3157 DP_ERR(cdev
, "Failed to enable sriov [%d]\n", rc
);
3164 qed_sriov_disable(cdev
, false);
3168 static int qed_sriov_configure(struct qed_dev
*cdev
, int num_vfs_param
)
3170 if (!IS_QED_SRIOV(cdev
)) {
3171 DP_VERBOSE(cdev
, QED_MSG_IOV
, "SR-IOV is not supported\n");
3176 return qed_sriov_enable(cdev
, num_vfs_param
);
3178 return qed_sriov_disable(cdev
, true);
3181 static int qed_sriov_pf_set_mac(struct qed_dev
*cdev
, u8
*mac
, int vfid
)
3185 if (!IS_QED_SRIOV(cdev
) || !IS_PF_SRIOV_ALLOC(&cdev
->hwfns
[0])) {
3186 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3187 "Cannot set a VF MAC; Sriov is not enabled\n");
3191 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vfid
, true)) {
3192 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3193 "Cannot set VF[%d] MAC (VF is not active)\n", vfid
);
3197 for_each_hwfn(cdev
, i
) {
3198 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
3199 struct qed_public_vf_info
*vf_info
;
3201 vf_info
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
3205 /* Set the forced MAC, and schedule the IOV task */
3206 ether_addr_copy(vf_info
->forced_mac
, mac
);
3207 qed_schedule_iov(hwfn
, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG
);
3213 static int qed_sriov_pf_set_vlan(struct qed_dev
*cdev
, u16 vid
, int vfid
)
3217 if (!IS_QED_SRIOV(cdev
) || !IS_PF_SRIOV_ALLOC(&cdev
->hwfns
[0])) {
3218 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3219 "Cannot set a VF MAC; Sriov is not enabled\n");
3223 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vfid
, true)) {
3224 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3225 "Cannot set VF[%d] MAC (VF is not active)\n", vfid
);
3229 for_each_hwfn(cdev
, i
) {
3230 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
3231 struct qed_public_vf_info
*vf_info
;
3233 vf_info
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
3237 /* Set the forced vlan, and schedule the IOV task */
3238 vf_info
->forced_vlan
= vid
;
3239 qed_schedule_iov(hwfn
, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG
);
3245 static int qed_get_vf_config(struct qed_dev
*cdev
,
3246 int vf_id
, struct ifla_vf_info
*ivi
)
3248 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
3249 struct qed_public_vf_info
*vf_info
;
3250 struct qed_mcp_link_state link
;
3253 /* Sanitize request */
3257 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vf_id
, true)) {
3258 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3259 "VF index [%d] isn't active\n", vf_id
);
3263 vf_info
= qed_iov_get_public_vf_info(hwfn
, vf_id
, true);
3265 qed_iov_get_link(hwfn
, vf_id
, NULL
, &link
, NULL
);
3267 /* Fill information about VF */
3270 if (is_valid_ether_addr(vf_info
->forced_mac
))
3271 ether_addr_copy(ivi
->mac
, vf_info
->forced_mac
);
3273 ether_addr_copy(ivi
->mac
, vf_info
->mac
);
3275 ivi
->vlan
= vf_info
->forced_vlan
;
3276 ivi
->spoofchk
= qed_iov_spoofchk_get(hwfn
, vf_id
);
3277 ivi
->linkstate
= vf_info
->link_state
;
3278 tx_rate
= vf_info
->tx_rate
;
3279 ivi
->max_tx_rate
= tx_rate
? tx_rate
: link
.speed
;
3280 ivi
->min_tx_rate
= qed_iov_get_vf_min_rate(hwfn
, vf_id
);
3285 void qed_inform_vf_link_state(struct qed_hwfn
*hwfn
)
3287 struct qed_mcp_link_capabilities caps
;
3288 struct qed_mcp_link_params params
;
3289 struct qed_mcp_link_state link
;
3292 if (!hwfn
->pf_iov_info
)
3295 /* Update bulletin of all future possible VFs with link configuration */
3296 for (i
= 0; i
< hwfn
->cdev
->p_iov_info
->total_vfs
; i
++) {
3297 struct qed_public_vf_info
*vf_info
;
3299 vf_info
= qed_iov_get_public_vf_info(hwfn
, i
, false);
3303 memcpy(¶ms
, qed_mcp_get_link_params(hwfn
), sizeof(params
));
3304 memcpy(&link
, qed_mcp_get_link_state(hwfn
), sizeof(link
));
3305 memcpy(&caps
, qed_mcp_get_link_capabilities(hwfn
),
3308 /* Modify link according to the VF's configured link state */
3309 switch (vf_info
->link_state
) {
3310 case IFLA_VF_LINK_STATE_DISABLE
:
3311 link
.link_up
= false;
3313 case IFLA_VF_LINK_STATE_ENABLE
:
3314 link
.link_up
= true;
3315 /* Set speed according to maximum supported by HW.
3316 * that is 40G for regular devices and 100G for CMT
3319 link
.speed
= (hwfn
->cdev
->num_hwfns
> 1) ?
3322 /* In auto mode pass PF link image to VF */
3326 if (link
.link_up
&& vf_info
->tx_rate
) {
3327 struct qed_ptt
*ptt
;
3330 rate
= min_t(int, vf_info
->tx_rate
, link
.speed
);
3332 ptt
= qed_ptt_acquire(hwfn
);
3334 DP_NOTICE(hwfn
, "Failed to acquire PTT\n");
3338 if (!qed_iov_configure_tx_rate(hwfn
, ptt
, i
, rate
)) {
3339 vf_info
->tx_rate
= rate
;
3343 qed_ptt_release(hwfn
, ptt
);
3346 qed_iov_set_link(hwfn
, i
, ¶ms
, &link
, &caps
);
3349 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
3352 static int qed_set_vf_link_state(struct qed_dev
*cdev
,
3353 int vf_id
, int link_state
)
3357 /* Sanitize request */
3361 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vf_id
, true)) {
3362 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3363 "VF index [%d] isn't active\n", vf_id
);
3367 /* Handle configuration of link state */
3368 for_each_hwfn(cdev
, i
) {
3369 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
3370 struct qed_public_vf_info
*vf
;
3372 vf
= qed_iov_get_public_vf_info(hwfn
, vf_id
, true);
3376 if (vf
->link_state
== link_state
)
3379 vf
->link_state
= link_state
;
3380 qed_inform_vf_link_state(&cdev
->hwfns
[i
]);
3386 static int qed_spoof_configure(struct qed_dev
*cdev
, int vfid
, bool val
)
3388 int i
, rc
= -EINVAL
;
3390 for_each_hwfn(cdev
, i
) {
3391 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
3393 rc
= qed_iov_spoofchk_set(p_hwfn
, vfid
, val
);
3401 static int qed_configure_max_vf_rate(struct qed_dev
*cdev
, int vfid
, int rate
)
3405 for_each_hwfn(cdev
, i
) {
3406 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
3407 struct qed_public_vf_info
*vf
;
3409 if (!qed_iov_pf_sanity_check(p_hwfn
, vfid
)) {
3411 "SR-IOV sanity check failed, can't set tx rate\n");
3415 vf
= qed_iov_get_public_vf_info(p_hwfn
, vfid
, true);
3419 qed_inform_vf_link_state(p_hwfn
);
3425 static int qed_set_vf_rate(struct qed_dev
*cdev
,
3426 int vfid
, u32 min_rate
, u32 max_rate
)
3428 int rc_min
= 0, rc_max
= 0;
3431 rc_max
= qed_configure_max_vf_rate(cdev
, vfid
, max_rate
);
3434 rc_min
= qed_iov_configure_min_tx_rate(cdev
, vfid
, min_rate
);
3436 if (rc_max
| rc_min
)
3442 static void qed_handle_vf_msg(struct qed_hwfn
*hwfn
)
3444 u64 events
[QED_VF_ARRAY_LENGTH
];
3445 struct qed_ptt
*ptt
;
3448 ptt
= qed_ptt_acquire(hwfn
);
3450 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
3451 "Can't acquire PTT; re-scheduling\n");
3452 qed_schedule_iov(hwfn
, QED_IOV_WQ_MSG_FLAG
);
3456 qed_iov_pf_get_and_clear_pending_events(hwfn
, events
);
3458 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
3459 "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
3460 events
[0], events
[1], events
[2]);
3462 qed_for_each_vf(hwfn
, i
) {
3463 /* Skip VFs with no pending messages */
3464 if (!(events
[i
/ 64] & (1ULL << (i
% 64))))
3467 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
3468 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
3469 i
, hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+ i
);
3471 /* Copy VF's message to PF's request buffer for that VF */
3472 if (qed_iov_copy_vf_msg(hwfn
, ptt
, i
))
3475 qed_iov_process_mbx_req(hwfn
, ptt
, i
);
3478 qed_ptt_release(hwfn
, ptt
);
3481 static void qed_handle_pf_set_vf_unicast(struct qed_hwfn
*hwfn
)
3485 qed_for_each_vf(hwfn
, i
) {
3486 struct qed_public_vf_info
*info
;
3487 bool update
= false;
3490 info
= qed_iov_get_public_vf_info(hwfn
, i
, true);
3494 /* Update data on bulletin board */
3495 mac
= qed_iov_bulletin_get_forced_mac(hwfn
, i
);
3496 if (is_valid_ether_addr(info
->forced_mac
) &&
3497 (!mac
|| !ether_addr_equal(mac
, info
->forced_mac
))) {
3500 "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
3502 hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+ i
);
3504 /* Update bulletin board with forced MAC */
3505 qed_iov_bulletin_set_forced_mac(hwfn
,
3506 info
->forced_mac
, i
);
3510 if (qed_iov_bulletin_get_forced_vlan(hwfn
, i
) ^
3511 info
->forced_vlan
) {
3514 "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
3517 hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+ i
);
3518 qed_iov_bulletin_set_forced_vlan(hwfn
,
3519 info
->forced_vlan
, i
);
3524 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
3528 static void qed_handle_bulletin_post(struct qed_hwfn
*hwfn
)
3530 struct qed_ptt
*ptt
;
3533 ptt
= qed_ptt_acquire(hwfn
);
3535 DP_NOTICE(hwfn
, "Failed allocating a ptt entry\n");
3536 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
3540 qed_for_each_vf(hwfn
, i
)
3541 qed_iov_post_vf_bulletin(hwfn
, i
, ptt
);
3543 qed_ptt_release(hwfn
, ptt
);
3546 void qed_iov_pf_task(struct work_struct
*work
)
3548 struct qed_hwfn
*hwfn
= container_of(work
, struct qed_hwfn
,
3552 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG
, &hwfn
->iov_task_flags
))
3555 if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG
, &hwfn
->iov_task_flags
)) {
3556 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
3559 qed_schedule_iov(hwfn
, QED_IOV_WQ_FLR_FLAG
);
3563 rc
= qed_iov_vf_flr_cleanup(hwfn
, ptt
);
3565 qed_schedule_iov(hwfn
, QED_IOV_WQ_FLR_FLAG
);
3567 qed_ptt_release(hwfn
, ptt
);
3570 if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG
, &hwfn
->iov_task_flags
))
3571 qed_handle_vf_msg(hwfn
);
3573 if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG
,
3574 &hwfn
->iov_task_flags
))
3575 qed_handle_pf_set_vf_unicast(hwfn
);
3577 if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG
,
3578 &hwfn
->iov_task_flags
))
3579 qed_handle_bulletin_post(hwfn
);
3582 void qed_iov_wq_stop(struct qed_dev
*cdev
, bool schedule_first
)
3586 for_each_hwfn(cdev
, i
) {
3587 if (!cdev
->hwfns
[i
].iov_wq
)
3590 if (schedule_first
) {
3591 qed_schedule_iov(&cdev
->hwfns
[i
],
3592 QED_IOV_WQ_STOP_WQ_FLAG
);
3593 cancel_delayed_work_sync(&cdev
->hwfns
[i
].iov_task
);
3596 flush_workqueue(cdev
->hwfns
[i
].iov_wq
);
3597 destroy_workqueue(cdev
->hwfns
[i
].iov_wq
);
3601 int qed_iov_wq_start(struct qed_dev
*cdev
)
3603 char name
[NAME_SIZE
];
3606 for_each_hwfn(cdev
, i
) {
3607 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
3609 /* PFs needs a dedicated workqueue only if they support IOV.
3610 * VFs always require one.
3612 if (IS_PF(p_hwfn
->cdev
) && !IS_PF_SRIOV(p_hwfn
))
3615 snprintf(name
, NAME_SIZE
, "iov-%02x:%02x.%02x",
3616 cdev
->pdev
->bus
->number
,
3617 PCI_SLOT(cdev
->pdev
->devfn
), p_hwfn
->abs_pf_id
);
3619 p_hwfn
->iov_wq
= create_singlethread_workqueue(name
);
3620 if (!p_hwfn
->iov_wq
) {
3621 DP_NOTICE(p_hwfn
, "Cannot create iov workqueue\n");
3626 INIT_DELAYED_WORK(&p_hwfn
->iov_task
, qed_iov_pf_task
);
3628 INIT_DELAYED_WORK(&p_hwfn
->iov_task
, qed_iov_vf_task
);
3634 const struct qed_iov_hv_ops qed_iov_ops_pass
= {
3635 .configure
= &qed_sriov_configure
,
3636 .set_mac
= &qed_sriov_pf_set_mac
,
3637 .set_vlan
= &qed_sriov_pf_set_vlan
,
3638 .get_config
= &qed_get_vf_config
,
3639 .set_link_state
= &qed_set_vf_link_state
,
3640 .set_spoof
= &qed_spoof_configure
,
3641 .set_rate
= &qed_set_vf_rate
,