1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/crc32.h>
35 #include <linux/vmalloc.h>
36 #include <linux/qed/qed_iov_if.h>
40 #include "qed_init_ops.h"
43 #include "qed_reg_addr.h"
45 #include "qed_sriov.h"
49 static int qed_sp_vf_start(struct qed_hwfn
*p_hwfn
, struct qed_vf_info
*p_vf
)
51 struct vf_start_ramrod_data
*p_ramrod
= NULL
;
52 struct qed_spq_entry
*p_ent
= NULL
;
53 struct qed_sp_init_data init_data
;
58 memset(&init_data
, 0, sizeof(init_data
));
59 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
60 init_data
.opaque_fid
= p_vf
->opaque_fid
;
61 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
63 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
64 COMMON_RAMROD_VF_START
,
65 PROTOCOLID_COMMON
, &init_data
);
69 p_ramrod
= &p_ent
->ramrod
.vf_start
;
71 p_ramrod
->vf_id
= GET_FIELD(p_vf
->concrete_fid
, PXP_CONCRETE_FID_VFID
);
72 p_ramrod
->opaque_fid
= cpu_to_le16(p_vf
->opaque_fid
);
74 switch (p_hwfn
->hw_info
.personality
) {
76 p_ramrod
->personality
= PERSONALITY_ETH
;
78 case QED_PCI_ETH_ROCE
:
79 p_ramrod
->personality
= PERSONALITY_RDMA_AND_ETH
;
82 DP_NOTICE(p_hwfn
, "Unknown VF personality %d\n",
83 p_hwfn
->hw_info
.personality
);
87 fp_minor
= p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
;
88 if (fp_minor
> ETH_HSI_VER_MINOR
&&
89 fp_minor
!= ETH_HSI_VER_NO_PKT_LEN_TUNN
) {
92 "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
95 fp_minor
, ETH_HSI_VER_MAJOR
, ETH_HSI_VER_MINOR
);
96 fp_minor
= ETH_HSI_VER_MINOR
;
99 p_ramrod
->hsi_fp_ver
.major_ver_arr
[ETH_VER_KEY
] = ETH_HSI_VER_MAJOR
;
100 p_ramrod
->hsi_fp_ver
.minor_ver_arr
[ETH_VER_KEY
] = fp_minor
;
102 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
103 "VF[%d] - Starting using HSI %02x.%02x\n",
104 p_vf
->abs_vf_id
, ETH_HSI_VER_MAJOR
, fp_minor
);
106 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
109 static int qed_sp_vf_stop(struct qed_hwfn
*p_hwfn
,
110 u32 concrete_vfid
, u16 opaque_vfid
)
112 struct vf_stop_ramrod_data
*p_ramrod
= NULL
;
113 struct qed_spq_entry
*p_ent
= NULL
;
114 struct qed_sp_init_data init_data
;
118 memset(&init_data
, 0, sizeof(init_data
));
119 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
120 init_data
.opaque_fid
= opaque_vfid
;
121 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
123 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
124 COMMON_RAMROD_VF_STOP
,
125 PROTOCOLID_COMMON
, &init_data
);
129 p_ramrod
= &p_ent
->ramrod
.vf_stop
;
131 p_ramrod
->vf_id
= GET_FIELD(concrete_vfid
, PXP_CONCRETE_FID_VFID
);
133 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
136 static bool qed_iov_is_valid_vfid(struct qed_hwfn
*p_hwfn
,
138 bool b_enabled_only
, bool b_non_malicious
)
140 if (!p_hwfn
->pf_iov_info
) {
141 DP_NOTICE(p_hwfn
->cdev
, "No iov info\n");
145 if ((rel_vf_id
>= p_hwfn
->cdev
->p_iov_info
->total_vfs
) ||
149 if ((!p_hwfn
->pf_iov_info
->vfs_array
[rel_vf_id
].b_init
) &&
153 if ((p_hwfn
->pf_iov_info
->vfs_array
[rel_vf_id
].b_malicious
) &&
160 static struct qed_vf_info
*qed_iov_get_vf_info(struct qed_hwfn
*p_hwfn
,
164 struct qed_vf_info
*vf
= NULL
;
166 if (!p_hwfn
->pf_iov_info
) {
167 DP_NOTICE(p_hwfn
->cdev
, "No iov info\n");
171 if (qed_iov_is_valid_vfid(p_hwfn
, relative_vf_id
,
172 b_enabled_only
, false))
173 vf
= &p_hwfn
->pf_iov_info
->vfs_array
[relative_vf_id
];
175 DP_ERR(p_hwfn
, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
181 enum qed_iov_validate_q_mode
{
182 QED_IOV_VALIDATE_Q_NA
,
183 QED_IOV_VALIDATE_Q_ENABLE
,
184 QED_IOV_VALIDATE_Q_DISABLE
,
187 static bool qed_iov_validate_queue_mode(struct qed_hwfn
*p_hwfn
,
188 struct qed_vf_info
*p_vf
,
190 enum qed_iov_validate_q_mode mode
,
193 if (mode
== QED_IOV_VALIDATE_Q_NA
)
196 if ((b_is_tx
&& p_vf
->vf_queues
[qid
].p_tx_cid
) ||
197 (!b_is_tx
&& p_vf
->vf_queues
[qid
].p_rx_cid
))
198 return mode
== QED_IOV_VALIDATE_Q_ENABLE
;
200 /* In case we haven't found any valid cid, then its disabled */
201 return mode
== QED_IOV_VALIDATE_Q_DISABLE
;
204 static bool qed_iov_validate_rxq(struct qed_hwfn
*p_hwfn
,
205 struct qed_vf_info
*p_vf
,
207 enum qed_iov_validate_q_mode mode
)
209 if (rx_qid
>= p_vf
->num_rxqs
) {
212 "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
213 p_vf
->abs_vf_id
, rx_qid
, p_vf
->num_rxqs
);
217 return qed_iov_validate_queue_mode(p_hwfn
, p_vf
, rx_qid
, mode
, false);
220 static bool qed_iov_validate_txq(struct qed_hwfn
*p_hwfn
,
221 struct qed_vf_info
*p_vf
,
223 enum qed_iov_validate_q_mode mode
)
225 if (tx_qid
>= p_vf
->num_txqs
) {
228 "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
229 p_vf
->abs_vf_id
, tx_qid
, p_vf
->num_txqs
);
233 return qed_iov_validate_queue_mode(p_hwfn
, p_vf
, tx_qid
, mode
, true);
236 static bool qed_iov_validate_sb(struct qed_hwfn
*p_hwfn
,
237 struct qed_vf_info
*p_vf
, u16 sb_idx
)
241 for (i
= 0; i
< p_vf
->num_sbs
; i
++)
242 if (p_vf
->igu_sbs
[i
] == sb_idx
)
247 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
248 p_vf
->abs_vf_id
, sb_idx
, p_vf
->num_sbs
);
253 static bool qed_iov_validate_active_rxq(struct qed_hwfn
*p_hwfn
,
254 struct qed_vf_info
*p_vf
)
258 for (i
= 0; i
< p_vf
->num_rxqs
; i
++)
259 if (qed_iov_validate_queue_mode(p_hwfn
, p_vf
, i
,
260 QED_IOV_VALIDATE_Q_ENABLE
,
267 static bool qed_iov_validate_active_txq(struct qed_hwfn
*p_hwfn
,
268 struct qed_vf_info
*p_vf
)
272 for (i
= 0; i
< p_vf
->num_txqs
; i
++)
273 if (qed_iov_validate_queue_mode(p_hwfn
, p_vf
, i
,
274 QED_IOV_VALIDATE_Q_ENABLE
,
281 static int qed_iov_post_vf_bulletin(struct qed_hwfn
*p_hwfn
,
282 int vfid
, struct qed_ptt
*p_ptt
)
284 struct qed_bulletin_content
*p_bulletin
;
285 int crc_size
= sizeof(p_bulletin
->crc
);
286 struct qed_dmae_params params
;
287 struct qed_vf_info
*p_vf
;
289 p_vf
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
293 if (!p_vf
->vf_bulletin
)
296 p_bulletin
= p_vf
->bulletin
.p_virt
;
298 /* Increment bulletin board version and compute crc */
299 p_bulletin
->version
++;
300 p_bulletin
->crc
= crc32(0, (u8
*)p_bulletin
+ crc_size
,
301 p_vf
->bulletin
.size
- crc_size
);
303 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
304 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
305 p_bulletin
->version
, p_vf
->relative_vf_id
, p_bulletin
->crc
);
307 /* propagate bulletin board via dmae to vm memory */
308 memset(¶ms
, 0, sizeof(params
));
309 params
.flags
= QED_DMAE_FLAG_VF_DST
;
310 params
.dst_vfid
= p_vf
->abs_vf_id
;
311 return qed_dmae_host2host(p_hwfn
, p_ptt
, p_vf
->bulletin
.phys
,
312 p_vf
->vf_bulletin
, p_vf
->bulletin
.size
/ 4,
316 static int qed_iov_pci_cfg_info(struct qed_dev
*cdev
)
318 struct qed_hw_sriov_info
*iov
= cdev
->p_iov_info
;
321 DP_VERBOSE(cdev
, QED_MSG_IOV
, "sriov ext pos %d\n", pos
);
322 pci_read_config_word(cdev
->pdev
, pos
+ PCI_SRIOV_CTRL
, &iov
->ctrl
);
324 pci_read_config_word(cdev
->pdev
,
325 pos
+ PCI_SRIOV_TOTAL_VF
, &iov
->total_vfs
);
326 pci_read_config_word(cdev
->pdev
,
327 pos
+ PCI_SRIOV_INITIAL_VF
, &iov
->initial_vfs
);
329 pci_read_config_word(cdev
->pdev
, pos
+ PCI_SRIOV_NUM_VF
, &iov
->num_vfs
);
333 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
337 pci_read_config_word(cdev
->pdev
,
338 pos
+ PCI_SRIOV_VF_OFFSET
, &iov
->offset
);
340 pci_read_config_word(cdev
->pdev
,
341 pos
+ PCI_SRIOV_VF_STRIDE
, &iov
->stride
);
343 pci_read_config_word(cdev
->pdev
,
344 pos
+ PCI_SRIOV_VF_DID
, &iov
->vf_device_id
);
346 pci_read_config_dword(cdev
->pdev
,
347 pos
+ PCI_SRIOV_SUP_PGSIZE
, &iov
->pgsz
);
349 pci_read_config_dword(cdev
->pdev
, pos
+ PCI_SRIOV_CAP
, &iov
->cap
);
351 pci_read_config_byte(cdev
->pdev
, pos
+ PCI_SRIOV_FUNC_LINK
, &iov
->link
);
355 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
361 iov
->nr_virtfn
, iov
->offset
, iov
->stride
, iov
->pgsz
);
363 /* Some sanity checks */
364 if (iov
->num_vfs
> NUM_OF_VFS(cdev
) ||
365 iov
->total_vfs
> NUM_OF_VFS(cdev
)) {
366 /* This can happen only due to a bug. In this case we set
367 * num_vfs to zero to avoid memory corruption in the code that
368 * assumes max number of vfs
371 "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
381 static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn
*p_hwfn
,
382 struct qed_ptt
*p_ptt
)
384 struct qed_igu_block
*p_sb
;
388 if (!p_hwfn
->hw_info
.p_igu_info
) {
390 "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
394 for (sb_id
= 0; sb_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
);
396 p_sb
= &p_hwfn
->hw_info
.p_igu_info
->igu_map
.igu_blocks
[sb_id
];
397 if ((p_sb
->status
& QED_IGU_STATUS_FREE
) &&
398 !(p_sb
->status
& QED_IGU_STATUS_PF
)) {
399 val
= qed_rd(p_hwfn
, p_ptt
,
400 IGU_REG_MAPPING_MEMORY
+ sb_id
* 4);
401 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 0);
402 qed_wr(p_hwfn
, p_ptt
,
403 IGU_REG_MAPPING_MEMORY
+ 4 * sb_id
, val
);
408 static void qed_iov_setup_vfdb(struct qed_hwfn
*p_hwfn
)
410 struct qed_hw_sriov_info
*p_iov
= p_hwfn
->cdev
->p_iov_info
;
411 struct qed_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
412 struct qed_bulletin_content
*p_bulletin_virt
;
413 dma_addr_t req_p
, rply_p
, bulletin_p
;
414 union pfvf_tlvs
*p_reply_virt_addr
;
415 union vfpf_tlvs
*p_req_virt_addr
;
418 memset(p_iov_info
->vfs_array
, 0, sizeof(p_iov_info
->vfs_array
));
420 p_req_virt_addr
= p_iov_info
->mbx_msg_virt_addr
;
421 req_p
= p_iov_info
->mbx_msg_phys_addr
;
422 p_reply_virt_addr
= p_iov_info
->mbx_reply_virt_addr
;
423 rply_p
= p_iov_info
->mbx_reply_phys_addr
;
424 p_bulletin_virt
= p_iov_info
->p_bulletins
;
425 bulletin_p
= p_iov_info
->bulletins_phys
;
426 if (!p_req_virt_addr
|| !p_reply_virt_addr
|| !p_bulletin_virt
) {
428 "qed_iov_setup_vfdb called without allocating mem first\n");
432 for (idx
= 0; idx
< p_iov
->total_vfs
; idx
++) {
433 struct qed_vf_info
*vf
= &p_iov_info
->vfs_array
[idx
];
436 vf
->vf_mbx
.req_virt
= p_req_virt_addr
+ idx
;
437 vf
->vf_mbx
.req_phys
= req_p
+ idx
* sizeof(union vfpf_tlvs
);
438 vf
->vf_mbx
.reply_virt
= p_reply_virt_addr
+ idx
;
439 vf
->vf_mbx
.reply_phys
= rply_p
+ idx
* sizeof(union pfvf_tlvs
);
441 vf
->state
= VF_STOPPED
;
444 vf
->bulletin
.phys
= idx
*
445 sizeof(struct qed_bulletin_content
) +
447 vf
->bulletin
.p_virt
= p_bulletin_virt
+ idx
;
448 vf
->bulletin
.size
= sizeof(struct qed_bulletin_content
);
450 vf
->relative_vf_id
= idx
;
451 vf
->abs_vf_id
= idx
+ p_iov
->first_vf_in_pf
;
452 concrete
= qed_vfid_to_concrete(p_hwfn
, vf
->abs_vf_id
);
453 vf
->concrete_fid
= concrete
;
454 vf
->opaque_fid
= (p_hwfn
->hw_info
.opaque_fid
& 0xff) |
455 (vf
->abs_vf_id
<< 8);
456 vf
->vport_id
= idx
+ 1;
458 vf
->num_mac_filters
= QED_ETH_VF_NUM_MAC_FILTERS
;
459 vf
->num_vlan_filters
= QED_ETH_VF_NUM_VLAN_FILTERS
;
463 static int qed_iov_allocate_vfdb(struct qed_hwfn
*p_hwfn
)
465 struct qed_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
469 num_vfs
= p_hwfn
->cdev
->p_iov_info
->total_vfs
;
471 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
472 "qed_iov_allocate_vfdb for %d VFs\n", num_vfs
);
474 /* Allocate PF Mailbox buffer (per-VF) */
475 p_iov_info
->mbx_msg_size
= sizeof(union vfpf_tlvs
) * num_vfs
;
476 p_v_addr
= &p_iov_info
->mbx_msg_virt_addr
;
477 *p_v_addr
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
478 p_iov_info
->mbx_msg_size
,
479 &p_iov_info
->mbx_msg_phys_addr
,
484 /* Allocate PF Mailbox Reply buffer (per-VF) */
485 p_iov_info
->mbx_reply_size
= sizeof(union pfvf_tlvs
) * num_vfs
;
486 p_v_addr
= &p_iov_info
->mbx_reply_virt_addr
;
487 *p_v_addr
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
488 p_iov_info
->mbx_reply_size
,
489 &p_iov_info
->mbx_reply_phys_addr
,
494 p_iov_info
->bulletins_size
= sizeof(struct qed_bulletin_content
) *
496 p_v_addr
= &p_iov_info
->p_bulletins
;
497 *p_v_addr
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
498 p_iov_info
->bulletins_size
,
499 &p_iov_info
->bulletins_phys
,
506 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
507 p_iov_info
->mbx_msg_virt_addr
,
508 (u64
) p_iov_info
->mbx_msg_phys_addr
,
509 p_iov_info
->mbx_reply_virt_addr
,
510 (u64
) p_iov_info
->mbx_reply_phys_addr
,
511 p_iov_info
->p_bulletins
, (u64
) p_iov_info
->bulletins_phys
);
516 static void qed_iov_free_vfdb(struct qed_hwfn
*p_hwfn
)
518 struct qed_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
520 if (p_hwfn
->pf_iov_info
->mbx_msg_virt_addr
)
521 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
522 p_iov_info
->mbx_msg_size
,
523 p_iov_info
->mbx_msg_virt_addr
,
524 p_iov_info
->mbx_msg_phys_addr
);
526 if (p_hwfn
->pf_iov_info
->mbx_reply_virt_addr
)
527 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
528 p_iov_info
->mbx_reply_size
,
529 p_iov_info
->mbx_reply_virt_addr
,
530 p_iov_info
->mbx_reply_phys_addr
);
532 if (p_iov_info
->p_bulletins
)
533 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
534 p_iov_info
->bulletins_size
,
535 p_iov_info
->p_bulletins
,
536 p_iov_info
->bulletins_phys
);
539 int qed_iov_alloc(struct qed_hwfn
*p_hwfn
)
541 struct qed_pf_iov
*p_sriov
;
543 if (!IS_PF_SRIOV(p_hwfn
)) {
544 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
545 "No SR-IOV - no need for IOV db\n");
549 p_sriov
= kzalloc(sizeof(*p_sriov
), GFP_KERNEL
);
553 p_hwfn
->pf_iov_info
= p_sriov
;
555 return qed_iov_allocate_vfdb(p_hwfn
);
558 void qed_iov_setup(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
560 if (!IS_PF_SRIOV(p_hwfn
) || !IS_PF_SRIOV_ALLOC(p_hwfn
))
563 qed_iov_setup_vfdb(p_hwfn
);
564 qed_iov_clear_vf_igu_blocks(p_hwfn
, p_ptt
);
567 void qed_iov_free(struct qed_hwfn
*p_hwfn
)
569 if (IS_PF_SRIOV_ALLOC(p_hwfn
)) {
570 qed_iov_free_vfdb(p_hwfn
);
571 kfree(p_hwfn
->pf_iov_info
);
575 void qed_iov_free_hw_info(struct qed_dev
*cdev
)
577 kfree(cdev
->p_iov_info
);
578 cdev
->p_iov_info
= NULL
;
581 int qed_iov_hw_info(struct qed_hwfn
*p_hwfn
)
583 struct qed_dev
*cdev
= p_hwfn
->cdev
;
587 if (IS_VF(p_hwfn
->cdev
))
590 /* Learn the PCI configuration */
591 pos
= pci_find_ext_capability(p_hwfn
->cdev
->pdev
,
592 PCI_EXT_CAP_ID_SRIOV
);
594 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "No PCIe IOV support\n");
598 /* Allocate a new struct for IOV information */
599 cdev
->p_iov_info
= kzalloc(sizeof(*cdev
->p_iov_info
), GFP_KERNEL
);
600 if (!cdev
->p_iov_info
)
603 cdev
->p_iov_info
->pos
= pos
;
605 rc
= qed_iov_pci_cfg_info(cdev
);
609 /* We want PF IOV to be synonemous with the existance of p_iov_info;
610 * In case the capability is published but there are no VFs, simply
611 * de-allocate the struct.
613 if (!cdev
->p_iov_info
->total_vfs
) {
614 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
615 "IOV capabilities, but no VFs are published\n");
616 kfree(cdev
->p_iov_info
);
617 cdev
->p_iov_info
= NULL
;
621 /* First VF index based on offset is tricky:
622 * - If ARI is supported [likely], offset - (16 - pf_id) would
623 * provide the number for eng0. 2nd engine Vfs would begin
624 * after the first engine's VFs.
625 * - If !ARI, VFs would start on next device.
626 * so offset - (256 - pf_id) would provide the number.
627 * Utilize the fact that (256 - pf_id) is achieved only by later
628 * to diffrentiate between the two.
631 if (p_hwfn
->cdev
->p_iov_info
->offset
< (256 - p_hwfn
->abs_pf_id
)) {
632 u32 first
= p_hwfn
->cdev
->p_iov_info
->offset
+
633 p_hwfn
->abs_pf_id
- 16;
635 cdev
->p_iov_info
->first_vf_in_pf
= first
;
637 if (QED_PATH_ID(p_hwfn
))
638 cdev
->p_iov_info
->first_vf_in_pf
-= MAX_NUM_VFS_BB
;
640 u32 first
= p_hwfn
->cdev
->p_iov_info
->offset
+
641 p_hwfn
->abs_pf_id
- 256;
643 cdev
->p_iov_info
->first_vf_in_pf
= first
;
646 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
647 "First VF in hwfn 0x%08x\n",
648 cdev
->p_iov_info
->first_vf_in_pf
);
653 bool _qed_iov_pf_sanity_check(struct qed_hwfn
*p_hwfn
,
654 int vfid
, bool b_fail_malicious
)
656 /* Check PF supports sriov */
657 if (IS_VF(p_hwfn
->cdev
) || !IS_QED_SRIOV(p_hwfn
->cdev
) ||
658 !IS_PF_SRIOV_ALLOC(p_hwfn
))
661 /* Check VF validity */
662 if (!qed_iov_is_valid_vfid(p_hwfn
, vfid
, true, b_fail_malicious
))
668 bool qed_iov_pf_sanity_check(struct qed_hwfn
*p_hwfn
, int vfid
)
670 return _qed_iov_pf_sanity_check(p_hwfn
, vfid
, true);
673 static void qed_iov_set_vf_to_disable(struct qed_dev
*cdev
,
674 u16 rel_vf_id
, u8 to_disable
)
676 struct qed_vf_info
*vf
;
679 for_each_hwfn(cdev
, i
) {
680 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
682 vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
686 vf
->to_disable
= to_disable
;
690 static void qed_iov_set_vfs_to_disable(struct qed_dev
*cdev
, u8 to_disable
)
694 if (!IS_QED_SRIOV(cdev
))
697 for (i
= 0; i
< cdev
->p_iov_info
->total_vfs
; i
++)
698 qed_iov_set_vf_to_disable(cdev
, i
, to_disable
);
701 static void qed_iov_vf_pglue_clear_err(struct qed_hwfn
*p_hwfn
,
702 struct qed_ptt
*p_ptt
, u8 abs_vfid
)
704 qed_wr(p_hwfn
, p_ptt
,
705 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR
+ (abs_vfid
>> 5) * 4,
706 1 << (abs_vfid
& 0x1f));
709 static void qed_iov_vf_igu_reset(struct qed_hwfn
*p_hwfn
,
710 struct qed_ptt
*p_ptt
, struct qed_vf_info
*vf
)
714 /* Set VF masks and configuration - pretend */
715 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) vf
->concrete_fid
);
717 qed_wr(p_hwfn
, p_ptt
, IGU_REG_STATISTIC_NUM_VF_MSG_SENT
, 0);
720 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
722 /* iterate over all queues, clear sb consumer */
723 for (i
= 0; i
< vf
->num_sbs
; i
++)
724 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
726 vf
->opaque_fid
, true);
729 static void qed_iov_vf_igu_set_int(struct qed_hwfn
*p_hwfn
,
730 struct qed_ptt
*p_ptt
,
731 struct qed_vf_info
*vf
, bool enable
)
735 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) vf
->concrete_fid
);
737 igu_vf_conf
= qed_rd(p_hwfn
, p_ptt
, IGU_REG_VF_CONFIGURATION
);
740 igu_vf_conf
|= IGU_VF_CONF_MSI_MSIX_EN
;
742 igu_vf_conf
&= ~IGU_VF_CONF_MSI_MSIX_EN
;
744 qed_wr(p_hwfn
, p_ptt
, IGU_REG_VF_CONFIGURATION
, igu_vf_conf
);
747 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
750 static int qed_iov_enable_vf_access(struct qed_hwfn
*p_hwfn
,
751 struct qed_ptt
*p_ptt
,
752 struct qed_vf_info
*vf
)
754 u32 igu_vf_conf
= IGU_VF_CONF_FUNC_EN
;
757 /* It's possible VF was previously considered malicious -
758 * clear the indication even if we're only going to disable VF.
760 vf
->b_malicious
= false;
767 "Enable internal access for vf %x [abs %x]\n",
768 vf
->abs_vf_id
, QED_VF_ABS_ID(p_hwfn
, vf
));
770 qed_iov_vf_pglue_clear_err(p_hwfn
, p_ptt
, QED_VF_ABS_ID(p_hwfn
, vf
));
772 qed_iov_vf_igu_reset(p_hwfn
, p_ptt
, vf
);
774 rc
= qed_mcp_config_vf_msix(p_hwfn
, p_ptt
, vf
->abs_vf_id
, vf
->num_sbs
);
778 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) vf
->concrete_fid
);
780 SET_FIELD(igu_vf_conf
, IGU_VF_CONF_PARENT
, p_hwfn
->rel_pf_id
);
781 STORE_RT_REG(p_hwfn
, IGU_REG_VF_CONFIGURATION_RT_OFFSET
, igu_vf_conf
);
783 qed_init_run(p_hwfn
, p_ptt
, PHASE_VF
, vf
->abs_vf_id
,
784 p_hwfn
->hw_info
.hw_mode
);
787 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
795 * @brief qed_iov_config_perm_table - configure the permission
797 * In E4, queue zone permission table size is 320x9. There
798 * are 320 VF queues for single engine device (256 for dual
799 * engine device), and each entry has the following format:
806 static void qed_iov_config_perm_table(struct qed_hwfn
*p_hwfn
,
807 struct qed_ptt
*p_ptt
,
808 struct qed_vf_info
*vf
, u8 enable
)
814 for (qid
= 0; qid
< vf
->num_rxqs
; qid
++) {
815 qed_fw_l2_queue(p_hwfn
, vf
->vf_queues
[qid
].fw_rx_qid
,
818 reg_addr
= PSWHST_REG_ZONE_PERMISSION_TABLE
+ qzone_id
* 4;
819 val
= enable
? (vf
->abs_vf_id
| BIT(8)) : 0;
820 qed_wr(p_hwfn
, p_ptt
, reg_addr
, val
);
824 static void qed_iov_enable_vf_traffic(struct qed_hwfn
*p_hwfn
,
825 struct qed_ptt
*p_ptt
,
826 struct qed_vf_info
*vf
)
828 /* Reset vf in IGU - interrupts are still disabled */
829 qed_iov_vf_igu_reset(p_hwfn
, p_ptt
, vf
);
831 qed_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 1);
833 /* Permission Table */
834 qed_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, true);
837 static u8
qed_iov_alloc_vf_igu_sbs(struct qed_hwfn
*p_hwfn
,
838 struct qed_ptt
*p_ptt
,
839 struct qed_vf_info
*vf
, u16 num_rx_queues
)
841 struct qed_igu_block
*igu_blocks
;
842 int qid
= 0, igu_id
= 0;
845 igu_blocks
= p_hwfn
->hw_info
.p_igu_info
->igu_map
.igu_blocks
;
847 if (num_rx_queues
> p_hwfn
->hw_info
.p_igu_info
->free_blks
)
848 num_rx_queues
= p_hwfn
->hw_info
.p_igu_info
->free_blks
;
849 p_hwfn
->hw_info
.p_igu_info
->free_blks
-= num_rx_queues
;
851 SET_FIELD(val
, IGU_MAPPING_LINE_FUNCTION_NUMBER
, vf
->abs_vf_id
);
852 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 1);
853 SET_FIELD(val
, IGU_MAPPING_LINE_PF_VALID
, 0);
855 while ((qid
< num_rx_queues
) &&
856 (igu_id
< QED_MAPPING_MEMORY_SIZE(p_hwfn
->cdev
))) {
857 if (igu_blocks
[igu_id
].status
& QED_IGU_STATUS_FREE
) {
858 struct cau_sb_entry sb_entry
;
860 vf
->igu_sbs
[qid
] = (u16
)igu_id
;
861 igu_blocks
[igu_id
].status
&= ~QED_IGU_STATUS_FREE
;
863 SET_FIELD(val
, IGU_MAPPING_LINE_VECTOR_NUMBER
, qid
);
865 qed_wr(p_hwfn
, p_ptt
,
866 IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_id
,
869 /* Configure igu sb in CAU which were marked valid */
870 qed_init_cau_sb_entry(p_hwfn
, &sb_entry
,
873 qed_dmae_host2grc(p_hwfn
, p_ptt
,
874 (u64
)(uintptr_t)&sb_entry
,
875 CAU_REG_SB_VAR_MEMORY
+
876 igu_id
* sizeof(u64
), 2, 0);
882 vf
->num_sbs
= (u8
) num_rx_queues
;
887 static void qed_iov_free_vf_igu_sbs(struct qed_hwfn
*p_hwfn
,
888 struct qed_ptt
*p_ptt
,
889 struct qed_vf_info
*vf
)
891 struct qed_igu_info
*p_info
= p_hwfn
->hw_info
.p_igu_info
;
895 /* Invalidate igu CAM lines and mark them as free */
896 for (idx
= 0; idx
< vf
->num_sbs
; idx
++) {
897 igu_id
= vf
->igu_sbs
[idx
];
898 addr
= IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_id
;
900 val
= qed_rd(p_hwfn
, p_ptt
, addr
);
901 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 0);
902 qed_wr(p_hwfn
, p_ptt
, addr
, val
);
904 p_info
->igu_map
.igu_blocks
[igu_id
].status
|=
907 p_hwfn
->hw_info
.p_igu_info
->free_blks
++;
913 static void qed_iov_set_link(struct qed_hwfn
*p_hwfn
,
915 struct qed_mcp_link_params
*params
,
916 struct qed_mcp_link_state
*link
,
917 struct qed_mcp_link_capabilities
*p_caps
)
919 struct qed_vf_info
*p_vf
= qed_iov_get_vf_info(p_hwfn
,
922 struct qed_bulletin_content
*p_bulletin
;
927 p_bulletin
= p_vf
->bulletin
.p_virt
;
928 p_bulletin
->req_autoneg
= params
->speed
.autoneg
;
929 p_bulletin
->req_adv_speed
= params
->speed
.advertised_speeds
;
930 p_bulletin
->req_forced_speed
= params
->speed
.forced_speed
;
931 p_bulletin
->req_autoneg_pause
= params
->pause
.autoneg
;
932 p_bulletin
->req_forced_rx
= params
->pause
.forced_rx
;
933 p_bulletin
->req_forced_tx
= params
->pause
.forced_tx
;
934 p_bulletin
->req_loopback
= params
->loopback_mode
;
936 p_bulletin
->link_up
= link
->link_up
;
937 p_bulletin
->speed
= link
->speed
;
938 p_bulletin
->full_duplex
= link
->full_duplex
;
939 p_bulletin
->autoneg
= link
->an
;
940 p_bulletin
->autoneg_complete
= link
->an_complete
;
941 p_bulletin
->parallel_detection
= link
->parallel_detection
;
942 p_bulletin
->pfc_enabled
= link
->pfc_enabled
;
943 p_bulletin
->partner_adv_speed
= link
->partner_adv_speed
;
944 p_bulletin
->partner_tx_flow_ctrl_en
= link
->partner_tx_flow_ctrl_en
;
945 p_bulletin
->partner_rx_flow_ctrl_en
= link
->partner_rx_flow_ctrl_en
;
946 p_bulletin
->partner_adv_pause
= link
->partner_adv_pause
;
947 p_bulletin
->sfp_tx_fault
= link
->sfp_tx_fault
;
949 p_bulletin
->capability_speed
= p_caps
->speed_capabilities
;
952 static int qed_iov_init_hw_for_vf(struct qed_hwfn
*p_hwfn
,
953 struct qed_ptt
*p_ptt
,
954 struct qed_iov_vf_init_params
*p_params
)
956 struct qed_mcp_link_capabilities link_caps
;
957 struct qed_mcp_link_params link_params
;
958 struct qed_mcp_link_state link_state
;
959 u8 num_of_vf_avaiable_chains
= 0;
960 struct qed_vf_info
*vf
= NULL
;
966 vf
= qed_iov_get_vf_info(p_hwfn
, p_params
->rel_vf_id
, false);
968 DP_ERR(p_hwfn
, "qed_iov_init_hw_for_vf : vf is NULL\n");
973 DP_NOTICE(p_hwfn
, "VF[%d] is already active.\n",
974 p_params
->rel_vf_id
);
978 /* Perform sanity checking on the requested queue_id */
979 for (i
= 0; i
< p_params
->num_queues
; i
++) {
980 u16 min_vf_qzone
= FEAT_NUM(p_hwfn
, QED_PF_L2_QUE
);
981 u16 max_vf_qzone
= min_vf_qzone
+
982 FEAT_NUM(p_hwfn
, QED_VF_L2_QUE
) - 1;
984 qid
= p_params
->req_rx_queue
[i
];
985 if (qid
< min_vf_qzone
|| qid
> max_vf_qzone
) {
987 "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
990 min_vf_qzone
, max_vf_qzone
);
994 qid
= p_params
->req_tx_queue
[i
];
995 if (qid
> max_vf_qzone
) {
997 "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
998 qid
, p_params
->rel_vf_id
, max_vf_qzone
);
1002 /* If client *really* wants, Tx qid can be shared with PF */
1003 if (qid
< min_vf_qzone
)
1006 "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
1007 p_params
->rel_vf_id
, qid
, i
);
1010 /* Limit number of queues according to number of CIDs */
1011 qed_cxt_get_proto_cid_count(p_hwfn
, PROTOCOLID_ETH
, &cids
);
1014 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
1015 vf
->relative_vf_id
, p_params
->num_queues
, (u16
)cids
);
1016 num_irqs
= min_t(u16
, p_params
->num_queues
, ((u16
)cids
));
1018 num_of_vf_avaiable_chains
= qed_iov_alloc_vf_igu_sbs(p_hwfn
,
1021 if (!num_of_vf_avaiable_chains
) {
1022 DP_ERR(p_hwfn
, "no available igu sbs\n");
1026 /* Choose queue number and index ranges */
1027 vf
->num_rxqs
= num_of_vf_avaiable_chains
;
1028 vf
->num_txqs
= num_of_vf_avaiable_chains
;
1030 for (i
= 0; i
< vf
->num_rxqs
; i
++) {
1031 struct qed_vf_q_info
*p_queue
= &vf
->vf_queues
[i
];
1033 p_queue
->fw_rx_qid
= p_params
->req_rx_queue
[i
];
1034 p_queue
->fw_tx_qid
= p_params
->req_tx_queue
[i
];
1036 /* CIDs are per-VF, so no problem having them 0-based. */
1037 p_queue
->fw_cid
= i
;
1039 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1040 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x] CID %04x\n",
1044 p_queue
->fw_tx_qid
, p_queue
->fw_cid
);
1047 /* Update the link configuration in bulletin */
1048 memcpy(&link_params
, qed_mcp_get_link_params(p_hwfn
),
1049 sizeof(link_params
));
1050 memcpy(&link_state
, qed_mcp_get_link_state(p_hwfn
), sizeof(link_state
));
1051 memcpy(&link_caps
, qed_mcp_get_link_capabilities(p_hwfn
),
1053 qed_iov_set_link(p_hwfn
, p_params
->rel_vf_id
,
1054 &link_params
, &link_state
, &link_caps
);
1056 rc
= qed_iov_enable_vf_access(p_hwfn
, p_ptt
, vf
);
1060 if (IS_LEAD_HWFN(p_hwfn
))
1061 p_hwfn
->cdev
->p_iov_info
->num_vfs
++;
1067 static int qed_iov_release_hw_for_vf(struct qed_hwfn
*p_hwfn
,
1068 struct qed_ptt
*p_ptt
, u16 rel_vf_id
)
1070 struct qed_mcp_link_capabilities caps
;
1071 struct qed_mcp_link_params params
;
1072 struct qed_mcp_link_state link
;
1073 struct qed_vf_info
*vf
= NULL
;
1075 vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
1077 DP_ERR(p_hwfn
, "qed_iov_release_hw_for_vf : vf is NULL\n");
1081 if (vf
->bulletin
.p_virt
)
1082 memset(vf
->bulletin
.p_virt
, 0, sizeof(*vf
->bulletin
.p_virt
));
1084 memset(&vf
->p_vf_info
, 0, sizeof(vf
->p_vf_info
));
1086 /* Get the link configuration back in bulletin so
1087 * that when VFs are re-enabled they get the actual
1088 * link configuration.
1090 memcpy(¶ms
, qed_mcp_get_link_params(p_hwfn
), sizeof(params
));
1091 memcpy(&link
, qed_mcp_get_link_state(p_hwfn
), sizeof(link
));
1092 memcpy(&caps
, qed_mcp_get_link_capabilities(p_hwfn
), sizeof(caps
));
1093 qed_iov_set_link(p_hwfn
, rel_vf_id
, ¶ms
, &link
, &caps
);
1095 /* Forget the VF's acquisition message */
1096 memset(&vf
->acquire
, 0, sizeof(vf
->acquire
));
1098 /* disablng interrupts and resetting permission table was done during
1099 * vf-close, however, we could get here without going through vf_close
1101 /* Disable Interrupts for VF */
1102 qed_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 0);
1104 /* Reset Permission table */
1105 qed_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, 0);
1109 qed_iov_free_vf_igu_sbs(p_hwfn
, p_ptt
, vf
);
1114 if (IS_LEAD_HWFN(p_hwfn
))
1115 p_hwfn
->cdev
->p_iov_info
->num_vfs
--;
1121 static bool qed_iov_tlv_supported(u16 tlvtype
)
1123 return CHANNEL_TLV_NONE
< tlvtype
&& tlvtype
< CHANNEL_TLV_MAX
;
1126 /* place a given tlv on the tlv buffer, continuing current tlv list */
1127 void *qed_add_tlv(struct qed_hwfn
*p_hwfn
, u8
**offset
, u16 type
, u16 length
)
1129 struct channel_tlv
*tl
= (struct channel_tlv
*)*offset
;
1132 tl
->length
= length
;
1134 /* Offset should keep pointing to next TLV (the end of the last) */
1137 /* Return a pointer to the start of the added tlv */
1138 return *offset
- length
;
1141 /* list the types and lengths of the tlvs on the buffer */
1142 void qed_dp_tlv_list(struct qed_hwfn
*p_hwfn
, void *tlvs_list
)
1144 u16 i
= 1, total_length
= 0;
1145 struct channel_tlv
*tlv
;
1148 tlv
= (struct channel_tlv
*)((u8
*)tlvs_list
+ total_length
);
1151 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1152 "TLV number %d: type %d, length %d\n",
1153 i
, tlv
->type
, tlv
->length
);
1155 if (tlv
->type
== CHANNEL_TLV_LIST_END
)
1158 /* Validate entry - protect against malicious VFs */
1160 DP_NOTICE(p_hwfn
, "TLV of length 0 found\n");
1164 total_length
+= tlv
->length
;
1166 if (total_length
>= sizeof(struct tlv_buffer_size
)) {
1167 DP_NOTICE(p_hwfn
, "TLV ==> Buffer overflow\n");
1175 static void qed_iov_send_response(struct qed_hwfn
*p_hwfn
,
1176 struct qed_ptt
*p_ptt
,
1177 struct qed_vf_info
*p_vf
,
1178 u16 length
, u8 status
)
1180 struct qed_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
1181 struct qed_dmae_params params
;
1184 mbx
->reply_virt
->default_resp
.hdr
.status
= status
;
1186 qed_dp_tlv_list(p_hwfn
, mbx
->reply_virt
);
1188 eng_vf_id
= p_vf
->abs_vf_id
;
1190 memset(¶ms
, 0, sizeof(struct qed_dmae_params
));
1191 params
.flags
= QED_DMAE_FLAG_VF_DST
;
1192 params
.dst_vfid
= eng_vf_id
;
1194 qed_dmae_host2host(p_hwfn
, p_ptt
, mbx
->reply_phys
+ sizeof(u64
),
1195 mbx
->req_virt
->first_tlv
.reply_address
+
1197 (sizeof(union pfvf_tlvs
) - sizeof(u64
)) / 4,
1200 /* Once PF copies the rc to the VF, the latter can continue
1201 * and send an additional message. So we have to make sure the
1202 * channel would be re-set to ready prior to that.
1205 GTT_BAR0_MAP_REG_USDM_RAM
+
1206 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id
), 1);
1208 qed_dmae_host2host(p_hwfn
, p_ptt
, mbx
->reply_phys
,
1209 mbx
->req_virt
->first_tlv
.reply_address
,
1210 sizeof(u64
) / 4, ¶ms
);
1213 static u16
qed_iov_vport_to_tlv(struct qed_hwfn
*p_hwfn
,
1214 enum qed_iov_vport_update_flag flag
)
1217 case QED_IOV_VP_UPDATE_ACTIVATE
:
1218 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
1219 case QED_IOV_VP_UPDATE_VLAN_STRIP
:
1220 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
;
1221 case QED_IOV_VP_UPDATE_TX_SWITCH
:
1222 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
1223 case QED_IOV_VP_UPDATE_MCAST
:
1224 return CHANNEL_TLV_VPORT_UPDATE_MCAST
;
1225 case QED_IOV_VP_UPDATE_ACCEPT_PARAM
:
1226 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
1227 case QED_IOV_VP_UPDATE_RSS
:
1228 return CHANNEL_TLV_VPORT_UPDATE_RSS
;
1229 case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN
:
1230 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
1231 case QED_IOV_VP_UPDATE_SGE_TPA
:
1232 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
;
1238 static u16
qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn
*p_hwfn
,
1239 struct qed_vf_info
*p_vf
,
1240 struct qed_iov_vf_mbx
*p_mbx
,
1242 u16 tlvs_mask
, u16 tlvs_accepted
)
1244 struct pfvf_def_resp_tlv
*resp
;
1245 u16 size
, total_len
, i
;
1247 memset(p_mbx
->reply_virt
, 0, sizeof(union pfvf_tlvs
));
1248 p_mbx
->offset
= (u8
*)p_mbx
->reply_virt
;
1249 size
= sizeof(struct pfvf_def_resp_tlv
);
1252 qed_add_tlv(p_hwfn
, &p_mbx
->offset
, CHANNEL_TLV_VPORT_UPDATE
, size
);
1254 /* Prepare response for all extended tlvs if they are found by PF */
1255 for (i
= 0; i
< QED_IOV_VP_UPDATE_MAX
; i
++) {
1256 if (!(tlvs_mask
& BIT(i
)))
1259 resp
= qed_add_tlv(p_hwfn
, &p_mbx
->offset
,
1260 qed_iov_vport_to_tlv(p_hwfn
, i
), size
);
1262 if (tlvs_accepted
& BIT(i
))
1263 resp
->hdr
.status
= status
;
1265 resp
->hdr
.status
= PFVF_STATUS_NOT_SUPPORTED
;
1269 "VF[%d] - vport_update response: TLV %d, status %02x\n",
1270 p_vf
->relative_vf_id
,
1271 qed_iov_vport_to_tlv(p_hwfn
, i
), resp
->hdr
.status
);
1276 qed_add_tlv(p_hwfn
, &p_mbx
->offset
, CHANNEL_TLV_LIST_END
,
1277 sizeof(struct channel_list_end_tlv
));
1282 static void qed_iov_prepare_resp(struct qed_hwfn
*p_hwfn
,
1283 struct qed_ptt
*p_ptt
,
1284 struct qed_vf_info
*vf_info
,
1285 u16 type
, u16 length
, u8 status
)
1287 struct qed_iov_vf_mbx
*mbx
= &vf_info
->vf_mbx
;
1289 mbx
->offset
= (u8
*)mbx
->reply_virt
;
1291 qed_add_tlv(p_hwfn
, &mbx
->offset
, type
, length
);
1292 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
1293 sizeof(struct channel_list_end_tlv
));
1295 qed_iov_send_response(p_hwfn
, p_ptt
, vf_info
, length
, status
);
1299 qed_public_vf_info
*qed_iov_get_public_vf_info(struct qed_hwfn
*p_hwfn
,
1301 bool b_enabled_only
)
1303 struct qed_vf_info
*vf
= NULL
;
1305 vf
= qed_iov_get_vf_info(p_hwfn
, relative_vf_id
, b_enabled_only
);
1309 return &vf
->p_vf_info
;
1312 static void qed_iov_clean_vf(struct qed_hwfn
*p_hwfn
, u8 vfid
)
1314 struct qed_public_vf_info
*vf_info
;
1316 vf_info
= qed_iov_get_public_vf_info(p_hwfn
, vfid
, false);
1321 /* Clear the VF mac */
1322 eth_zero_addr(vf_info
->mac
);
1324 vf_info
->rx_accept_mode
= 0;
1325 vf_info
->tx_accept_mode
= 0;
1328 static void qed_iov_vf_cleanup(struct qed_hwfn
*p_hwfn
,
1329 struct qed_vf_info
*p_vf
)
1333 p_vf
->vf_bulletin
= 0;
1334 p_vf
->vport_instance
= 0;
1335 p_vf
->configured_features
= 0;
1337 /* If VF previously requested less resources, go back to default */
1338 p_vf
->num_rxqs
= p_vf
->num_sbs
;
1339 p_vf
->num_txqs
= p_vf
->num_sbs
;
1341 p_vf
->num_active_rxqs
= 0;
1343 for (i
= 0; i
< QED_MAX_VF_CHAINS_PER_PF
; i
++) {
1344 struct qed_vf_q_info
*p_queue
= &p_vf
->vf_queues
[i
];
1346 if (p_queue
->p_rx_cid
) {
1347 qed_eth_queue_cid_release(p_hwfn
, p_queue
->p_rx_cid
);
1348 p_queue
->p_rx_cid
= NULL
;
1351 if (p_queue
->p_tx_cid
) {
1352 qed_eth_queue_cid_release(p_hwfn
, p_queue
->p_tx_cid
);
1353 p_queue
->p_tx_cid
= NULL
;
1357 memset(&p_vf
->shadow_config
, 0, sizeof(p_vf
->shadow_config
));
1358 memset(&p_vf
->acquire
, 0, sizeof(p_vf
->acquire
));
1359 qed_iov_clean_vf(p_hwfn
, p_vf
->relative_vf_id
);
1362 static u8
qed_iov_vf_mbx_acquire_resc(struct qed_hwfn
*p_hwfn
,
1363 struct qed_ptt
*p_ptt
,
1364 struct qed_vf_info
*p_vf
,
1365 struct vf_pf_resc_request
*p_req
,
1366 struct pf_vf_resc
*p_resp
)
1370 /* Queue related information */
1371 p_resp
->num_rxqs
= p_vf
->num_rxqs
;
1372 p_resp
->num_txqs
= p_vf
->num_txqs
;
1373 p_resp
->num_sbs
= p_vf
->num_sbs
;
1375 for (i
= 0; i
< p_resp
->num_sbs
; i
++) {
1376 p_resp
->hw_sbs
[i
].hw_sb_id
= p_vf
->igu_sbs
[i
];
1377 p_resp
->hw_sbs
[i
].sb_qid
= 0;
1380 /* These fields are filled for backward compatibility.
1381 * Unused by modern vfs.
1383 for (i
= 0; i
< p_resp
->num_rxqs
; i
++) {
1384 qed_fw_l2_queue(p_hwfn
, p_vf
->vf_queues
[i
].fw_rx_qid
,
1385 (u16
*)&p_resp
->hw_qid
[i
]);
1386 p_resp
->cid
[i
] = p_vf
->vf_queues
[i
].fw_cid
;
1389 /* Filter related information */
1390 p_resp
->num_mac_filters
= min_t(u8
, p_vf
->num_mac_filters
,
1391 p_req
->num_mac_filters
);
1392 p_resp
->num_vlan_filters
= min_t(u8
, p_vf
->num_vlan_filters
,
1393 p_req
->num_vlan_filters
);
1395 /* This isn't really needed/enforced, but some legacy VFs might depend
1396 * on the correct filling of this field.
1398 p_resp
->num_mc_filters
= QED_MAX_MC_ADDRS
;
1400 /* Validate sufficient resources for VF */
1401 if (p_resp
->num_rxqs
< p_req
->num_rxqs
||
1402 p_resp
->num_txqs
< p_req
->num_txqs
||
1403 p_resp
->num_sbs
< p_req
->num_sbs
||
1404 p_resp
->num_mac_filters
< p_req
->num_mac_filters
||
1405 p_resp
->num_vlan_filters
< p_req
->num_vlan_filters
||
1406 p_resp
->num_mc_filters
< p_req
->num_mc_filters
) {
1409 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n",
1417 p_req
->num_mac_filters
,
1418 p_resp
->num_mac_filters
,
1419 p_req
->num_vlan_filters
,
1420 p_resp
->num_vlan_filters
,
1421 p_req
->num_mc_filters
, p_resp
->num_mc_filters
);
1423 /* Some legacy OSes are incapable of correctly handling this
1426 if ((p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
==
1427 ETH_HSI_VER_NO_PKT_LEN_TUNN
) &&
1428 (p_vf
->acquire
.vfdev_info
.os_type
==
1429 VFPF_ACQUIRE_OS_WINDOWS
))
1430 return PFVF_STATUS_SUCCESS
;
1432 return PFVF_STATUS_NO_RESOURCE
;
1435 return PFVF_STATUS_SUCCESS
;
1438 static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn
*p_hwfn
,
1439 struct pfvf_stats_info
*p_stats
)
1441 p_stats
->mstats
.address
= PXP_VF_BAR0_START_MSDM_ZONE_B
+
1442 offsetof(struct mstorm_vf_zone
,
1443 non_trigger
.eth_queue_stat
);
1444 p_stats
->mstats
.len
= sizeof(struct eth_mstorm_per_queue_stat
);
1445 p_stats
->ustats
.address
= PXP_VF_BAR0_START_USDM_ZONE_B
+
1446 offsetof(struct ustorm_vf_zone
,
1447 non_trigger
.eth_queue_stat
);
1448 p_stats
->ustats
.len
= sizeof(struct eth_ustorm_per_queue_stat
);
1449 p_stats
->pstats
.address
= PXP_VF_BAR0_START_PSDM_ZONE_B
+
1450 offsetof(struct pstorm_vf_zone
,
1451 non_trigger
.eth_queue_stat
);
1452 p_stats
->pstats
.len
= sizeof(struct eth_pstorm_per_queue_stat
);
1453 p_stats
->tstats
.address
= 0;
1454 p_stats
->tstats
.len
= 0;
1457 static void qed_iov_vf_mbx_acquire(struct qed_hwfn
*p_hwfn
,
1458 struct qed_ptt
*p_ptt
,
1459 struct qed_vf_info
*vf
)
1461 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1462 struct pfvf_acquire_resp_tlv
*resp
= &mbx
->reply_virt
->acquire_resp
;
1463 struct pf_vf_pfdev_info
*pfdev_info
= &resp
->pfdev_info
;
1464 struct vfpf_acquire_tlv
*req
= &mbx
->req_virt
->acquire
;
1465 u8 vfpf_status
= PFVF_STATUS_NOT_SUPPORTED
;
1466 struct pf_vf_resc
*resc
= &resp
->resc
;
1469 memset(resp
, 0, sizeof(*resp
));
1471 /* Write the PF version so that VF would know which version
1472 * is supported - might be later overriden. This guarantees that
1473 * VF could recognize legacy PF based on lack of versions in reply.
1475 pfdev_info
->major_fp_hsi
= ETH_HSI_VER_MAJOR
;
1476 pfdev_info
->minor_fp_hsi
= ETH_HSI_VER_MINOR
;
1478 if (vf
->state
!= VF_FREE
&& vf
->state
!= VF_STOPPED
) {
1481 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1482 vf
->abs_vf_id
, vf
->state
);
1486 /* Validate FW compatibility */
1487 if (req
->vfdev_info
.eth_fp_hsi_major
!= ETH_HSI_VER_MAJOR
) {
1488 if (req
->vfdev_info
.capabilities
&
1489 VFPF_ACQUIRE_CAP_PRE_FP_HSI
) {
1490 struct vf_pf_vfdev_info
*p_vfdev
= &req
->vfdev_info
;
1492 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1493 "VF[%d] is pre-fastpath HSI\n",
1495 p_vfdev
->eth_fp_hsi_major
= ETH_HSI_VER_MAJOR
;
1496 p_vfdev
->eth_fp_hsi_minor
= ETH_HSI_VER_NO_PKT_LEN_TUNN
;
1499 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
1501 req
->vfdev_info
.eth_fp_hsi_major
,
1502 req
->vfdev_info
.eth_fp_hsi_minor
,
1503 ETH_HSI_VER_MAJOR
, ETH_HSI_VER_MINOR
);
1509 /* On 100g PFs, prevent old VFs from loading */
1510 if ((p_hwfn
->cdev
->num_hwfns
> 1) &&
1511 !(req
->vfdev_info
.capabilities
& VFPF_ACQUIRE_CAP_100G
)) {
1513 "VF[%d] is running an old driver that doesn't support 100g\n",
1518 /* Store the acquire message */
1519 memcpy(&vf
->acquire
, req
, sizeof(vf
->acquire
));
1521 vf
->opaque_fid
= req
->vfdev_info
.opaque_fid
;
1523 vf
->vf_bulletin
= req
->bulletin_addr
;
1524 vf
->bulletin
.size
= (vf
->bulletin
.size
< req
->bulletin_size
) ?
1525 vf
->bulletin
.size
: req
->bulletin_size
;
1527 /* fill in pfdev info */
1528 pfdev_info
->chip_num
= p_hwfn
->cdev
->chip_num
;
1529 pfdev_info
->db_size
= 0;
1530 pfdev_info
->indices_per_sb
= PIS_PER_SB
;
1532 pfdev_info
->capabilities
= PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED
|
1533 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE
;
1534 if (p_hwfn
->cdev
->num_hwfns
> 1)
1535 pfdev_info
->capabilities
|= PFVF_ACQUIRE_CAP_100G
;
1537 qed_iov_vf_mbx_acquire_stats(p_hwfn
, &pfdev_info
->stats_info
);
1539 memcpy(pfdev_info
->port_mac
, p_hwfn
->hw_info
.hw_mac_addr
, ETH_ALEN
);
1541 pfdev_info
->fw_major
= FW_MAJOR_VERSION
;
1542 pfdev_info
->fw_minor
= FW_MINOR_VERSION
;
1543 pfdev_info
->fw_rev
= FW_REVISION_VERSION
;
1544 pfdev_info
->fw_eng
= FW_ENGINEERING_VERSION
;
1546 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1549 pfdev_info
->minor_fp_hsi
= min_t(u8
, ETH_HSI_VER_MINOR
,
1550 req
->vfdev_info
.eth_fp_hsi_minor
);
1551 pfdev_info
->os_type
= VFPF_ACQUIRE_OS_LINUX
;
1552 qed_mcp_get_mfw_ver(p_hwfn
, p_ptt
, &pfdev_info
->mfw_ver
, NULL
);
1554 pfdev_info
->dev_type
= p_hwfn
->cdev
->type
;
1555 pfdev_info
->chip_rev
= p_hwfn
->cdev
->chip_rev
;
1557 /* Fill resources available to VF; Make sure there are enough to
1558 * satisfy the VF's request.
1560 vfpf_status
= qed_iov_vf_mbx_acquire_resc(p_hwfn
, p_ptt
, vf
,
1561 &req
->resc_request
, resc
);
1562 if (vfpf_status
!= PFVF_STATUS_SUCCESS
)
1565 /* Start the VF in FW */
1566 rc
= qed_sp_vf_start(p_hwfn
, vf
);
1568 DP_NOTICE(p_hwfn
, "Failed to start VF[%02x]\n", vf
->abs_vf_id
);
1569 vfpf_status
= PFVF_STATUS_FAILURE
;
1573 /* Fill agreed size of bulletin board in response */
1574 resp
->bulletin_size
= vf
->bulletin
.size
;
1575 qed_iov_post_vf_bulletin(p_hwfn
, vf
->relative_vf_id
, p_ptt
);
1579 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1580 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1582 resp
->pfdev_info
.chip_num
,
1583 resp
->pfdev_info
.db_size
,
1584 resp
->pfdev_info
.indices_per_sb
,
1585 resp
->pfdev_info
.capabilities
,
1589 resc
->num_mac_filters
,
1590 resc
->num_vlan_filters
);
1591 vf
->state
= VF_ACQUIRED
;
1593 /* Prepare Response */
1595 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_ACQUIRE
,
1596 sizeof(struct pfvf_acquire_resp_tlv
), vfpf_status
);
1599 static int __qed_iov_spoofchk_set(struct qed_hwfn
*p_hwfn
,
1600 struct qed_vf_info
*p_vf
, bool val
)
1602 struct qed_sp_vport_update_params params
;
1605 if (val
== p_vf
->spoof_chk
) {
1606 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1607 "Spoofchk value[%d] is already configured\n", val
);
1611 memset(¶ms
, 0, sizeof(struct qed_sp_vport_update_params
));
1612 params
.opaque_fid
= p_vf
->opaque_fid
;
1613 params
.vport_id
= p_vf
->vport_id
;
1614 params
.update_anti_spoofing_en_flg
= 1;
1615 params
.anti_spoofing_en
= val
;
1617 rc
= qed_sp_vport_update(p_hwfn
, ¶ms
, QED_SPQ_MODE_EBLOCK
, NULL
);
1619 p_vf
->spoof_chk
= val
;
1620 p_vf
->req_spoofchk_val
= p_vf
->spoof_chk
;
1621 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1622 "Spoofchk val[%d] configured\n", val
);
1624 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1625 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1626 val
, p_vf
->relative_vf_id
);
1632 static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn
*p_hwfn
,
1633 struct qed_vf_info
*p_vf
)
1635 struct qed_filter_ucast filter
;
1639 memset(&filter
, 0, sizeof(filter
));
1640 filter
.is_rx_filter
= 1;
1641 filter
.is_tx_filter
= 1;
1642 filter
.vport_to_add_to
= p_vf
->vport_id
;
1643 filter
.opcode
= QED_FILTER_ADD
;
1645 /* Reconfigure vlans */
1646 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++) {
1647 if (!p_vf
->shadow_config
.vlans
[i
].used
)
1650 filter
.type
= QED_FILTER_VLAN
;
1651 filter
.vlan
= p_vf
->shadow_config
.vlans
[i
].vid
;
1652 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1653 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1654 filter
.vlan
, p_vf
->relative_vf_id
);
1655 rc
= qed_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1656 &filter
, QED_SPQ_MODE_CB
, NULL
);
1659 "Failed to configure VLAN [%04x] to VF [%04x]\n",
1660 filter
.vlan
, p_vf
->relative_vf_id
);
1669 qed_iov_reconfigure_unicast_shadow(struct qed_hwfn
*p_hwfn
,
1670 struct qed_vf_info
*p_vf
, u64 events
)
1674 if ((events
& BIT(VLAN_ADDR_FORCED
)) &&
1675 !(p_vf
->configured_features
& (1 << VLAN_ADDR_FORCED
)))
1676 rc
= qed_iov_reconfigure_unicast_vlan(p_hwfn
, p_vf
);
1681 static int qed_iov_configure_vport_forced(struct qed_hwfn
*p_hwfn
,
1682 struct qed_vf_info
*p_vf
, u64 events
)
1685 struct qed_filter_ucast filter
;
1687 if (!p_vf
->vport_instance
)
1690 if (events
& BIT(MAC_ADDR_FORCED
)) {
1691 /* Since there's no way [currently] of removing the MAC,
1692 * we can always assume this means we need to force it.
1694 memset(&filter
, 0, sizeof(filter
));
1695 filter
.type
= QED_FILTER_MAC
;
1696 filter
.opcode
= QED_FILTER_REPLACE
;
1697 filter
.is_rx_filter
= 1;
1698 filter
.is_tx_filter
= 1;
1699 filter
.vport_to_add_to
= p_vf
->vport_id
;
1700 ether_addr_copy(filter
.mac
, p_vf
->bulletin
.p_virt
->mac
);
1702 rc
= qed_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1703 &filter
, QED_SPQ_MODE_CB
, NULL
);
1706 "PF failed to configure MAC for VF\n");
1710 p_vf
->configured_features
|= 1 << MAC_ADDR_FORCED
;
1713 if (events
& BIT(VLAN_ADDR_FORCED
)) {
1714 struct qed_sp_vport_update_params vport_update
;
1718 memset(&filter
, 0, sizeof(filter
));
1719 filter
.type
= QED_FILTER_VLAN
;
1720 filter
.is_rx_filter
= 1;
1721 filter
.is_tx_filter
= 1;
1722 filter
.vport_to_add_to
= p_vf
->vport_id
;
1723 filter
.vlan
= p_vf
->bulletin
.p_virt
->pvid
;
1724 filter
.opcode
= filter
.vlan
? QED_FILTER_REPLACE
:
1727 /* Send the ramrod */
1728 rc
= qed_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1729 &filter
, QED_SPQ_MODE_CB
, NULL
);
1732 "PF failed to configure VLAN for VF\n");
1736 /* Update the default-vlan & silent vlan stripping */
1737 memset(&vport_update
, 0, sizeof(vport_update
));
1738 vport_update
.opaque_fid
= p_vf
->opaque_fid
;
1739 vport_update
.vport_id
= p_vf
->vport_id
;
1740 vport_update
.update_default_vlan_enable_flg
= 1;
1741 vport_update
.default_vlan_enable_flg
= filter
.vlan
? 1 : 0;
1742 vport_update
.update_default_vlan_flg
= 1;
1743 vport_update
.default_vlan
= filter
.vlan
;
1745 vport_update
.update_inner_vlan_removal_flg
= 1;
1746 removal
= filter
.vlan
? 1
1747 : p_vf
->shadow_config
.inner_vlan_removal
;
1748 vport_update
.inner_vlan_removal_flg
= removal
;
1749 vport_update
.silent_vlan_removal_flg
= filter
.vlan
? 1 : 0;
1750 rc
= qed_sp_vport_update(p_hwfn
,
1752 QED_SPQ_MODE_EBLOCK
, NULL
);
1755 "PF failed to configure VF vport for vlan\n");
1759 /* Update all the Rx queues */
1760 for (i
= 0; i
< QED_MAX_VF_CHAINS_PER_PF
; i
++) {
1761 struct qed_queue_cid
*p_cid
;
1763 p_cid
= p_vf
->vf_queues
[i
].p_rx_cid
;
1767 rc
= qed_sp_eth_rx_queues_update(p_hwfn
,
1770 QED_SPQ_MODE_EBLOCK
,
1774 "Failed to send Rx update fo queue[0x%04x]\n",
1775 p_cid
->rel
.queue_id
);
1781 p_vf
->configured_features
|= 1 << VLAN_ADDR_FORCED
;
1783 p_vf
->configured_features
&= ~BIT(VLAN_ADDR_FORCED
);
1786 /* If forced features are terminated, we need to configure the shadow
1787 * configuration back again.
1790 qed_iov_reconfigure_unicast_shadow(p_hwfn
, p_vf
, events
);
1795 static void qed_iov_vf_mbx_start_vport(struct qed_hwfn
*p_hwfn
,
1796 struct qed_ptt
*p_ptt
,
1797 struct qed_vf_info
*vf
)
1799 struct qed_sp_vport_start_params params
= { 0 };
1800 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1801 struct vfpf_vport_start_tlv
*start
;
1802 u8 status
= PFVF_STATUS_SUCCESS
;
1803 struct qed_vf_info
*vf_info
;
1808 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vf
->relative_vf_id
, true);
1810 DP_NOTICE(p_hwfn
->cdev
,
1811 "Failed to get VF info, invalid vfid [%d]\n",
1812 vf
->relative_vf_id
);
1816 vf
->state
= VF_ENABLED
;
1817 start
= &mbx
->req_virt
->start_vport
;
1819 qed_iov_enable_vf_traffic(p_hwfn
, p_ptt
, vf
);
1821 /* Initialize Status block in CAU */
1822 for (sb_id
= 0; sb_id
< vf
->num_sbs
; sb_id
++) {
1823 if (!start
->sb_addr
[sb_id
]) {
1824 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1825 "VF[%d] did not fill the address of SB %d\n",
1826 vf
->relative_vf_id
, sb_id
);
1830 qed_int_cau_conf_sb(p_hwfn
, p_ptt
,
1831 start
->sb_addr
[sb_id
],
1832 vf
->igu_sbs
[sb_id
], vf
->abs_vf_id
, 1);
1835 vf
->mtu
= start
->mtu
;
1836 vf
->shadow_config
.inner_vlan_removal
= start
->inner_vlan_removal
;
1838 /* Take into consideration configuration forced by hypervisor;
1839 * If none is configured, use the supplied VF values [for old
1840 * vfs that would still be fine, since they passed '0' as padding].
1842 p_bitmap
= &vf_info
->bulletin
.p_virt
->valid_bitmap
;
1843 if (!(*p_bitmap
& BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED
))) {
1844 u8 vf_req
= start
->only_untagged
;
1846 vf_info
->bulletin
.p_virt
->default_only_untagged
= vf_req
;
1847 *p_bitmap
|= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT
;
1850 params
.tpa_mode
= start
->tpa_mode
;
1851 params
.remove_inner_vlan
= start
->inner_vlan_removal
;
1852 params
.tx_switching
= true;
1854 params
.only_untagged
= vf_info
->bulletin
.p_virt
->default_only_untagged
;
1855 params
.drop_ttl0
= false;
1856 params
.concrete_fid
= vf
->concrete_fid
;
1857 params
.opaque_fid
= vf
->opaque_fid
;
1858 params
.vport_id
= vf
->vport_id
;
1859 params
.max_buffers_per_cqe
= start
->max_buffers_per_cqe
;
1860 params
.mtu
= vf
->mtu
;
1861 params
.check_mac
= true;
1863 rc
= qed_sp_eth_vport_start(p_hwfn
, ¶ms
);
1866 "qed_iov_vf_mbx_start_vport returned error %d\n", rc
);
1867 status
= PFVF_STATUS_FAILURE
;
1869 vf
->vport_instance
++;
1871 /* Force configuration if needed on the newly opened vport */
1872 qed_iov_configure_vport_forced(p_hwfn
, vf
, *p_bitmap
);
1874 __qed_iov_spoofchk_set(p_hwfn
, vf
, vf
->req_spoofchk_val
);
1876 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_VPORT_START
,
1877 sizeof(struct pfvf_def_resp_tlv
), status
);
1880 static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn
*p_hwfn
,
1881 struct qed_ptt
*p_ptt
,
1882 struct qed_vf_info
*vf
)
1884 u8 status
= PFVF_STATUS_SUCCESS
;
1887 vf
->vport_instance
--;
1888 vf
->spoof_chk
= false;
1890 if ((qed_iov_validate_active_rxq(p_hwfn
, vf
)) ||
1891 (qed_iov_validate_active_txq(p_hwfn
, vf
))) {
1892 vf
->b_malicious
= true;
1894 "VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n",
1896 status
= PFVF_STATUS_MALICIOUS
;
1900 rc
= qed_sp_vport_stop(p_hwfn
, vf
->opaque_fid
, vf
->vport_id
);
1902 DP_ERR(p_hwfn
, "qed_iov_vf_mbx_stop_vport returned error %d\n",
1904 status
= PFVF_STATUS_FAILURE
;
1907 /* Forget the configuration on the vport */
1908 vf
->configured_features
= 0;
1909 memset(&vf
->shadow_config
, 0, sizeof(vf
->shadow_config
));
1912 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_VPORT_TEARDOWN
,
1913 sizeof(struct pfvf_def_resp_tlv
), status
);
1916 static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn
*p_hwfn
,
1917 struct qed_ptt
*p_ptt
,
1918 struct qed_vf_info
*vf
,
1919 u8 status
, bool b_legacy
)
1921 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1922 struct pfvf_start_queue_resp_tlv
*p_tlv
;
1923 struct vfpf_start_rxq_tlv
*req
;
1926 mbx
->offset
= (u8
*)mbx
->reply_virt
;
1928 /* Taking a bigger struct instead of adding a TLV to list was a
1929 * mistake, but one which we're now stuck with, as some older
1930 * clients assume the size of the previous response.
1933 length
= sizeof(*p_tlv
);
1935 length
= sizeof(struct pfvf_def_resp_tlv
);
1937 p_tlv
= qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_START_RXQ
,
1939 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
1940 sizeof(struct channel_list_end_tlv
));
1942 /* Update the TLV with the response */
1943 if ((status
== PFVF_STATUS_SUCCESS
) && !b_legacy
) {
1944 req
= &mbx
->req_virt
->start_rxq
;
1945 p_tlv
->offset
= PXP_VF_BAR0_START_MSDM_ZONE_B
+
1946 offsetof(struct mstorm_vf_zone
,
1947 non_trigger
.eth_rx_queue_producers
) +
1948 sizeof(struct eth_rx_prod_data
) * req
->rx_qid
;
1951 qed_iov_send_response(p_hwfn
, p_ptt
, vf
, length
, status
);
1954 static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn
*p_hwfn
,
1955 struct qed_ptt
*p_ptt
,
1956 struct qed_vf_info
*vf
)
1958 struct qed_queue_start_common_params params
;
1959 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1960 u8 status
= PFVF_STATUS_NO_RESOURCE
;
1961 struct qed_vf_q_info
*p_queue
;
1962 struct vfpf_start_rxq_tlv
*req
;
1963 bool b_legacy_vf
= false;
1966 req
= &mbx
->req_virt
->start_rxq
;
1968 if (!qed_iov_validate_rxq(p_hwfn
, vf
, req
->rx_qid
,
1969 QED_IOV_VALIDATE_Q_DISABLE
) ||
1970 !qed_iov_validate_sb(p_hwfn
, vf
, req
->hw_sb
))
1973 /* Acquire a new queue-cid */
1974 p_queue
= &vf
->vf_queues
[req
->rx_qid
];
1976 memset(¶ms
, 0, sizeof(params
));
1977 params
.queue_id
= p_queue
->fw_rx_qid
;
1978 params
.vport_id
= vf
->vport_id
;
1979 params
.stats_id
= vf
->abs_vf_id
+ 0x10;
1980 params
.sb
= req
->hw_sb
;
1981 params
.sb_idx
= req
->sb_index
;
1983 p_queue
->p_rx_cid
= _qed_eth_queue_to_cid(p_hwfn
,
1986 req
->rx_qid
, ¶ms
);
1987 if (!p_queue
->p_rx_cid
)
1990 /* Legacy VFs have their Producers in a different location, which they
1991 * calculate on their own and clean the producer prior to this.
1993 if (vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
==
1994 ETH_HSI_VER_NO_PKT_LEN_TUNN
) {
1998 GTT_BAR0_MAP_REG_MSDM_RAM
+
1999 MSTORM_ETH_VF_PRODS_OFFSET(vf
->abs_vf_id
, req
->rx_qid
),
2002 p_queue
->p_rx_cid
->b_legacy_vf
= b_legacy_vf
;
2004 rc
= qed_eth_rxq_start_ramrod(p_hwfn
,
2008 req
->cqe_pbl_addr
, req
->cqe_pbl_size
);
2010 status
= PFVF_STATUS_FAILURE
;
2011 qed_eth_queue_cid_release(p_hwfn
, p_queue
->p_rx_cid
);
2012 p_queue
->p_rx_cid
= NULL
;
2014 status
= PFVF_STATUS_SUCCESS
;
2015 vf
->num_active_rxqs
++;
2019 qed_iov_vf_mbx_start_rxq_resp(p_hwfn
, p_ptt
, vf
, status
, b_legacy_vf
);
2022 static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn
*p_hwfn
,
2023 struct qed_ptt
*p_ptt
,
2024 struct qed_vf_info
*p_vf
, u8 status
)
2026 struct qed_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
2027 struct pfvf_start_queue_resp_tlv
*p_tlv
;
2028 bool b_legacy
= false;
2031 mbx
->offset
= (u8
*)mbx
->reply_virt
;
2033 /* Taking a bigger struct instead of adding a TLV to list was a
2034 * mistake, but one which we're now stuck with, as some older
2035 * clients assume the size of the previous response.
2037 if (p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
==
2038 ETH_HSI_VER_NO_PKT_LEN_TUNN
)
2042 length
= sizeof(*p_tlv
);
2044 length
= sizeof(struct pfvf_def_resp_tlv
);
2046 p_tlv
= qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_START_TXQ
,
2048 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
2049 sizeof(struct channel_list_end_tlv
));
2051 /* Update the TLV with the response */
2052 if ((status
== PFVF_STATUS_SUCCESS
) && !b_legacy
) {
2053 u16 qid
= mbx
->req_virt
->start_txq
.tx_qid
;
2055 p_tlv
->offset
= qed_db_addr_vf(p_vf
->vf_queues
[qid
].fw_cid
,
2059 qed_iov_send_response(p_hwfn
, p_ptt
, p_vf
, length
, status
);
2062 static void qed_iov_vf_mbx_start_txq(struct qed_hwfn
*p_hwfn
,
2063 struct qed_ptt
*p_ptt
,
2064 struct qed_vf_info
*vf
)
2066 struct qed_queue_start_common_params params
;
2067 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2068 u8 status
= PFVF_STATUS_NO_RESOURCE
;
2069 union qed_qm_pq_params pq_params
;
2070 struct vfpf_start_txq_tlv
*req
;
2071 struct qed_vf_q_info
*p_queue
;
2075 /* Prepare the parameters which would choose the right PQ */
2076 memset(&pq_params
, 0, sizeof(pq_params
));
2077 pq_params
.eth
.is_vf
= 1;
2078 pq_params
.eth
.vf_id
= vf
->relative_vf_id
;
2080 memset(¶ms
, 0, sizeof(params
));
2081 req
= &mbx
->req_virt
->start_txq
;
2083 if (!qed_iov_validate_txq(p_hwfn
, vf
, req
->tx_qid
,
2084 QED_IOV_VALIDATE_Q_DISABLE
) ||
2085 !qed_iov_validate_sb(p_hwfn
, vf
, req
->hw_sb
))
2088 /* Acquire a new queue-cid */
2089 p_queue
= &vf
->vf_queues
[req
->tx_qid
];
2091 params
.queue_id
= p_queue
->fw_tx_qid
;
2092 params
.vport_id
= vf
->vport_id
;
2093 params
.stats_id
= vf
->abs_vf_id
+ 0x10;
2094 params
.sb
= req
->hw_sb
;
2095 params
.sb_idx
= req
->sb_index
;
2097 p_queue
->p_tx_cid
= _qed_eth_queue_to_cid(p_hwfn
,
2100 req
->tx_qid
, ¶ms
);
2101 if (!p_queue
->p_tx_cid
)
2104 pq
= qed_get_qm_pq(p_hwfn
, PROTOCOLID_ETH
, &pq_params
);
2105 rc
= qed_eth_txq_start_ramrod(p_hwfn
, p_queue
->p_tx_cid
,
2106 req
->pbl_addr
, req
->pbl_size
, pq
);
2108 status
= PFVF_STATUS_FAILURE
;
2109 qed_eth_queue_cid_release(p_hwfn
, p_queue
->p_tx_cid
);
2110 p_queue
->p_tx_cid
= NULL
;
2112 status
= PFVF_STATUS_SUCCESS
;
2116 qed_iov_vf_mbx_start_txq_resp(p_hwfn
, p_ptt
, vf
, status
);
2119 static int qed_iov_vf_stop_rxqs(struct qed_hwfn
*p_hwfn
,
2120 struct qed_vf_info
*vf
,
2121 u16 rxq_id
, bool cqe_completion
)
2123 struct qed_vf_q_info
*p_queue
;
2126 if (!qed_iov_validate_rxq(p_hwfn
, vf
, rxq_id
,
2127 QED_IOV_VALIDATE_Q_ENABLE
)) {
2130 "VF[%d] Tried Closing Rx 0x%04x which is inactive\n",
2131 vf
->relative_vf_id
, rxq_id
);
2135 p_queue
= &vf
->vf_queues
[rxq_id
];
2137 rc
= qed_eth_rx_queue_stop(p_hwfn
,
2139 false, cqe_completion
);
2143 p_queue
->p_rx_cid
= NULL
;
2144 vf
->num_active_rxqs
--;
2149 static int qed_iov_vf_stop_txqs(struct qed_hwfn
*p_hwfn
,
2150 struct qed_vf_info
*vf
, u16 txq_id
)
2152 struct qed_vf_q_info
*p_queue
;
2155 if (!qed_iov_validate_txq(p_hwfn
, vf
, txq_id
,
2156 QED_IOV_VALIDATE_Q_ENABLE
))
2159 p_queue
= &vf
->vf_queues
[txq_id
];
2161 rc
= qed_eth_tx_queue_stop(p_hwfn
, p_queue
->p_tx_cid
);
2165 p_queue
->p_tx_cid
= NULL
;
2170 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn
*p_hwfn
,
2171 struct qed_ptt
*p_ptt
,
2172 struct qed_vf_info
*vf
)
2174 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2175 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2176 u8 status
= PFVF_STATUS_FAILURE
;
2177 struct vfpf_stop_rxqs_tlv
*req
;
2180 /* There has never been an official driver that used this interface
2181 * for stopping multiple queues, and it is now considered deprecated.
2182 * Validate this isn't used here.
2184 req
= &mbx
->req_virt
->stop_rxqs
;
2185 if (req
->num_rxqs
!= 1) {
2186 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2187 "Odd; VF[%d] tried stopping multiple Rx queues\n",
2188 vf
->relative_vf_id
);
2189 status
= PFVF_STATUS_NOT_SUPPORTED
;
2193 rc
= qed_iov_vf_stop_rxqs(p_hwfn
, vf
, req
->rx_qid
,
2194 req
->cqe_completion
);
2196 status
= PFVF_STATUS_SUCCESS
;
2198 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_STOP_RXQS
,
2202 static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn
*p_hwfn
,
2203 struct qed_ptt
*p_ptt
,
2204 struct qed_vf_info
*vf
)
2206 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2207 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2208 u8 status
= PFVF_STATUS_FAILURE
;
2209 struct vfpf_stop_txqs_tlv
*req
;
2212 /* There has never been an official driver that used this interface
2213 * for stopping multiple queues, and it is now considered deprecated.
2214 * Validate this isn't used here.
2216 req
= &mbx
->req_virt
->stop_txqs
;
2217 if (req
->num_txqs
!= 1) {
2218 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2219 "Odd; VF[%d] tried stopping multiple Tx queues\n",
2220 vf
->relative_vf_id
);
2221 status
= PFVF_STATUS_NOT_SUPPORTED
;
2224 rc
= qed_iov_vf_stop_txqs(p_hwfn
, vf
, req
->tx_qid
);
2226 status
= PFVF_STATUS_SUCCESS
;
2229 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_STOP_TXQS
,
2233 static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn
*p_hwfn
,
2234 struct qed_ptt
*p_ptt
,
2235 struct qed_vf_info
*vf
)
2237 struct qed_queue_cid
*handlers
[QED_MAX_VF_CHAINS_PER_PF
];
2238 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2239 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2240 struct vfpf_update_rxq_tlv
*req
;
2241 u8 status
= PFVF_STATUS_FAILURE
;
2242 u8 complete_event_flg
;
2243 u8 complete_cqe_flg
;
2248 req
= &mbx
->req_virt
->update_rxq
;
2249 complete_cqe_flg
= !!(req
->flags
& VFPF_RXQ_UPD_COMPLETE_CQE_FLAG
);
2250 complete_event_flg
= !!(req
->flags
& VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG
);
2252 /* Validate inputs */
2253 for (i
= req
->rx_qid
; i
< req
->rx_qid
+ req
->num_rxqs
; i
++)
2254 if (!qed_iov_validate_rxq(p_hwfn
, vf
, i
,
2255 QED_IOV_VALIDATE_Q_ENABLE
)) {
2256 DP_INFO(p_hwfn
, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2257 vf
->relative_vf_id
, req
->rx_qid
, req
->num_rxqs
);
2261 /* Prepare the handlers */
2262 for (i
= 0; i
< req
->num_rxqs
; i
++) {
2263 qid
= req
->rx_qid
+ i
;
2264 handlers
[i
] = vf
->vf_queues
[qid
].p_rx_cid
;
2267 rc
= qed_sp_eth_rx_queues_update(p_hwfn
, (void **)&handlers
,
2271 QED_SPQ_MODE_EBLOCK
, NULL
);
2275 status
= PFVF_STATUS_SUCCESS
;
2277 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_UPDATE_RXQ
,
2281 void *qed_iov_search_list_tlvs(struct qed_hwfn
*p_hwfn
,
2282 void *p_tlvs_list
, u16 req_type
)
2284 struct channel_tlv
*p_tlv
= (struct channel_tlv
*)p_tlvs_list
;
2288 if (!p_tlv
->length
) {
2289 DP_NOTICE(p_hwfn
, "Zero length TLV found\n");
2293 if (p_tlv
->type
== req_type
) {
2294 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2295 "Extended tlv type %d, length %d found\n",
2296 p_tlv
->type
, p_tlv
->length
);
2300 len
+= p_tlv
->length
;
2301 p_tlv
= (struct channel_tlv
*)((u8
*)p_tlv
+ p_tlv
->length
);
2303 if ((len
+ p_tlv
->length
) > TLV_BUFFER_SIZE
) {
2304 DP_NOTICE(p_hwfn
, "TLVs has overrun the buffer size\n");
2307 } while (p_tlv
->type
!= CHANNEL_TLV_LIST_END
);
2313 qed_iov_vp_update_act_param(struct qed_hwfn
*p_hwfn
,
2314 struct qed_sp_vport_update_params
*p_data
,
2315 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2317 struct vfpf_vport_update_activate_tlv
*p_act_tlv
;
2318 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
2320 p_act_tlv
= (struct vfpf_vport_update_activate_tlv
*)
2321 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2325 p_data
->update_vport_active_rx_flg
= p_act_tlv
->update_rx
;
2326 p_data
->vport_active_rx_flg
= p_act_tlv
->active_rx
;
2327 p_data
->update_vport_active_tx_flg
= p_act_tlv
->update_tx
;
2328 p_data
->vport_active_tx_flg
= p_act_tlv
->active_tx
;
2329 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_ACTIVATE
;
2333 qed_iov_vp_update_vlan_param(struct qed_hwfn
*p_hwfn
,
2334 struct qed_sp_vport_update_params
*p_data
,
2335 struct qed_vf_info
*p_vf
,
2336 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2338 struct vfpf_vport_update_vlan_strip_tlv
*p_vlan_tlv
;
2339 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
;
2341 p_vlan_tlv
= (struct vfpf_vport_update_vlan_strip_tlv
*)
2342 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2346 p_vf
->shadow_config
.inner_vlan_removal
= p_vlan_tlv
->remove_vlan
;
2348 /* Ignore the VF request if we're forcing a vlan */
2349 if (!(p_vf
->configured_features
& BIT(VLAN_ADDR_FORCED
))) {
2350 p_data
->update_inner_vlan_removal_flg
= 1;
2351 p_data
->inner_vlan_removal_flg
= p_vlan_tlv
->remove_vlan
;
2354 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP
;
2358 qed_iov_vp_update_tx_switch(struct qed_hwfn
*p_hwfn
,
2359 struct qed_sp_vport_update_params
*p_data
,
2360 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2362 struct vfpf_vport_update_tx_switch_tlv
*p_tx_switch_tlv
;
2363 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
2365 p_tx_switch_tlv
= (struct vfpf_vport_update_tx_switch_tlv
*)
2366 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
,
2368 if (!p_tx_switch_tlv
)
2371 p_data
->update_tx_switching_flg
= 1;
2372 p_data
->tx_switching_flg
= p_tx_switch_tlv
->tx_switching
;
2373 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_TX_SWITCH
;
2377 qed_iov_vp_update_mcast_bin_param(struct qed_hwfn
*p_hwfn
,
2378 struct qed_sp_vport_update_params
*p_data
,
2379 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2381 struct vfpf_vport_update_mcast_bin_tlv
*p_mcast_tlv
;
2382 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_MCAST
;
2384 p_mcast_tlv
= (struct vfpf_vport_update_mcast_bin_tlv
*)
2385 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2389 p_data
->update_approx_mcast_flg
= 1;
2390 memcpy(p_data
->bins
, p_mcast_tlv
->bins
,
2391 sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS
);
2392 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_MCAST
;
2396 qed_iov_vp_update_accept_flag(struct qed_hwfn
*p_hwfn
,
2397 struct qed_sp_vport_update_params
*p_data
,
2398 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2400 struct qed_filter_accept_flags
*p_flags
= &p_data
->accept_flags
;
2401 struct vfpf_vport_update_accept_param_tlv
*p_accept_tlv
;
2402 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
2404 p_accept_tlv
= (struct vfpf_vport_update_accept_param_tlv
*)
2405 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2409 p_flags
->update_rx_mode_config
= p_accept_tlv
->update_rx_mode
;
2410 p_flags
->rx_accept_filter
= p_accept_tlv
->rx_accept_filter
;
2411 p_flags
->update_tx_mode_config
= p_accept_tlv
->update_tx_mode
;
2412 p_flags
->tx_accept_filter
= p_accept_tlv
->tx_accept_filter
;
2413 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM
;
2417 qed_iov_vp_update_accept_any_vlan(struct qed_hwfn
*p_hwfn
,
2418 struct qed_sp_vport_update_params
*p_data
,
2419 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2421 struct vfpf_vport_update_accept_any_vlan_tlv
*p_accept_any_vlan
;
2422 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
2424 p_accept_any_vlan
= (struct vfpf_vport_update_accept_any_vlan_tlv
*)
2425 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
,
2427 if (!p_accept_any_vlan
)
2430 p_data
->accept_any_vlan
= p_accept_any_vlan
->accept_any_vlan
;
2431 p_data
->update_accept_any_vlan_flg
=
2432 p_accept_any_vlan
->update_accept_any_vlan_flg
;
2433 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN
;
2437 qed_iov_vp_update_rss_param(struct qed_hwfn
*p_hwfn
,
2438 struct qed_vf_info
*vf
,
2439 struct qed_sp_vport_update_params
*p_data
,
2440 struct qed_rss_params
*p_rss
,
2441 struct qed_iov_vf_mbx
*p_mbx
,
2442 u16
*tlvs_mask
, u16
*tlvs_accepted
)
2444 struct vfpf_vport_update_rss_tlv
*p_rss_tlv
;
2445 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_RSS
;
2446 bool b_reject
= false;
2450 p_rss_tlv
= (struct vfpf_vport_update_rss_tlv
*)
2451 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2453 p_data
->rss_params
= NULL
;
2457 memset(p_rss
, 0, sizeof(struct qed_rss_params
));
2459 p_rss
->update_rss_config
= !!(p_rss_tlv
->update_rss_flags
&
2460 VFPF_UPDATE_RSS_CONFIG_FLAG
);
2461 p_rss
->update_rss_capabilities
= !!(p_rss_tlv
->update_rss_flags
&
2462 VFPF_UPDATE_RSS_CAPS_FLAG
);
2463 p_rss
->update_rss_ind_table
= !!(p_rss_tlv
->update_rss_flags
&
2464 VFPF_UPDATE_RSS_IND_TABLE_FLAG
);
2465 p_rss
->update_rss_key
= !!(p_rss_tlv
->update_rss_flags
&
2466 VFPF_UPDATE_RSS_KEY_FLAG
);
2468 p_rss
->rss_enable
= p_rss_tlv
->rss_enable
;
2469 p_rss
->rss_eng_id
= vf
->relative_vf_id
+ 1;
2470 p_rss
->rss_caps
= p_rss_tlv
->rss_caps
;
2471 p_rss
->rss_table_size_log
= p_rss_tlv
->rss_table_size_log
;
2472 memcpy(p_rss
->rss_key
, p_rss_tlv
->rss_key
, sizeof(p_rss
->rss_key
));
2474 table_size
= min_t(u16
, ARRAY_SIZE(p_rss
->rss_ind_table
),
2475 (1 << p_rss_tlv
->rss_table_size_log
));
2477 for (i
= 0; i
< table_size
; i
++) {
2478 q_idx
= p_rss_tlv
->rss_ind_table
[i
];
2479 if (!qed_iov_validate_rxq(p_hwfn
, vf
, q_idx
,
2480 QED_IOV_VALIDATE_Q_ENABLE
)) {
2483 "VF[%d]: Omitting RSS due to wrong queue %04x\n",
2484 vf
->relative_vf_id
, q_idx
);
2489 p_rss
->rss_ind_table
[i
] = vf
->vf_queues
[q_idx
].p_rx_cid
;
2492 p_data
->rss_params
= p_rss
;
2494 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_RSS
;
2496 *tlvs_accepted
|= 1 << QED_IOV_VP_UPDATE_RSS
;
2500 qed_iov_vp_update_sge_tpa_param(struct qed_hwfn
*p_hwfn
,
2501 struct qed_vf_info
*vf
,
2502 struct qed_sp_vport_update_params
*p_data
,
2503 struct qed_sge_tpa_params
*p_sge_tpa
,
2504 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2506 struct vfpf_vport_update_sge_tpa_tlv
*p_sge_tpa_tlv
;
2507 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
;
2509 p_sge_tpa_tlv
= (struct vfpf_vport_update_sge_tpa_tlv
*)
2510 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2512 if (!p_sge_tpa_tlv
) {
2513 p_data
->sge_tpa_params
= NULL
;
2517 memset(p_sge_tpa
, 0, sizeof(struct qed_sge_tpa_params
));
2519 p_sge_tpa
->update_tpa_en_flg
=
2520 !!(p_sge_tpa_tlv
->update_sge_tpa_flags
& VFPF_UPDATE_TPA_EN_FLAG
);
2521 p_sge_tpa
->update_tpa_param_flg
=
2522 !!(p_sge_tpa_tlv
->update_sge_tpa_flags
&
2523 VFPF_UPDATE_TPA_PARAM_FLAG
);
2525 p_sge_tpa
->tpa_ipv4_en_flg
=
2526 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_IPV4_EN_FLAG
);
2527 p_sge_tpa
->tpa_ipv6_en_flg
=
2528 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_IPV6_EN_FLAG
);
2529 p_sge_tpa
->tpa_pkt_split_flg
=
2530 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_PKT_SPLIT_FLAG
);
2531 p_sge_tpa
->tpa_hdr_data_split_flg
=
2532 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_HDR_DATA_SPLIT_FLAG
);
2533 p_sge_tpa
->tpa_gro_consistent_flg
=
2534 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_GRO_CONSIST_FLAG
);
2536 p_sge_tpa
->tpa_max_aggs_num
= p_sge_tpa_tlv
->tpa_max_aggs_num
;
2537 p_sge_tpa
->tpa_max_size
= p_sge_tpa_tlv
->tpa_max_size
;
2538 p_sge_tpa
->tpa_min_size_to_start
= p_sge_tpa_tlv
->tpa_min_size_to_start
;
2539 p_sge_tpa
->tpa_min_size_to_cont
= p_sge_tpa_tlv
->tpa_min_size_to_cont
;
2540 p_sge_tpa
->max_buffers_per_cqe
= p_sge_tpa_tlv
->max_buffers_per_cqe
;
2542 p_data
->sge_tpa_params
= p_sge_tpa
;
2544 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_SGE_TPA
;
2547 static int qed_iov_pre_update_vport(struct qed_hwfn
*hwfn
,
2549 struct qed_sp_vport_update_params
*params
,
2552 u8 mask
= QED_ACCEPT_UCAST_UNMATCHED
| QED_ACCEPT_MCAST_UNMATCHED
;
2553 struct qed_filter_accept_flags
*flags
= ¶ms
->accept_flags
;
2554 struct qed_public_vf_info
*vf_info
;
2556 /* Untrusted VFs can't even be trusted to know that fact.
2557 * Simply indicate everything is configured fine, and trace
2558 * configuration 'behind their back'.
2560 if (!(*tlvs
& BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM
)))
2563 vf_info
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
2565 if (flags
->update_rx_mode_config
) {
2566 vf_info
->rx_accept_mode
= flags
->rx_accept_filter
;
2567 if (!vf_info
->is_trusted_configured
)
2568 flags
->rx_accept_filter
&= ~mask
;
2571 if (flags
->update_tx_mode_config
) {
2572 vf_info
->tx_accept_mode
= flags
->tx_accept_filter
;
2573 if (!vf_info
->is_trusted_configured
)
2574 flags
->tx_accept_filter
&= ~mask
;
2580 static void qed_iov_vf_mbx_vport_update(struct qed_hwfn
*p_hwfn
,
2581 struct qed_ptt
*p_ptt
,
2582 struct qed_vf_info
*vf
)
2584 struct qed_rss_params
*p_rss_params
= NULL
;
2585 struct qed_sp_vport_update_params params
;
2586 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2587 struct qed_sge_tpa_params sge_tpa_params
;
2588 u16 tlvs_mask
= 0, tlvs_accepted
= 0;
2589 u8 status
= PFVF_STATUS_SUCCESS
;
2593 /* Valiate PF can send such a request */
2594 if (!vf
->vport_instance
) {
2597 "No VPORT instance available for VF[%d], failing vport update\n",
2599 status
= PFVF_STATUS_FAILURE
;
2602 p_rss_params
= vzalloc(sizeof(*p_rss_params
));
2603 if (p_rss_params
== NULL
) {
2604 status
= PFVF_STATUS_FAILURE
;
2608 memset(¶ms
, 0, sizeof(params
));
2609 params
.opaque_fid
= vf
->opaque_fid
;
2610 params
.vport_id
= vf
->vport_id
;
2611 params
.rss_params
= NULL
;
2613 /* Search for extended tlvs list and update values
2614 * from VF in struct qed_sp_vport_update_params.
2616 qed_iov_vp_update_act_param(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2617 qed_iov_vp_update_vlan_param(p_hwfn
, ¶ms
, vf
, mbx
, &tlvs_mask
);
2618 qed_iov_vp_update_tx_switch(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2619 qed_iov_vp_update_mcast_bin_param(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2620 qed_iov_vp_update_accept_flag(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2621 qed_iov_vp_update_accept_any_vlan(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2622 qed_iov_vp_update_sge_tpa_param(p_hwfn
, vf
, ¶ms
,
2623 &sge_tpa_params
, mbx
, &tlvs_mask
);
2625 tlvs_accepted
= tlvs_mask
;
2627 /* Some of the extended TLVs need to be validated first; In that case,
2628 * they can update the mask without updating the accepted [so that
2629 * PF could communicate to VF it has rejected request].
2631 qed_iov_vp_update_rss_param(p_hwfn
, vf
, ¶ms
, p_rss_params
,
2632 mbx
, &tlvs_mask
, &tlvs_accepted
);
2634 if (qed_iov_pre_update_vport(p_hwfn
, vf
->relative_vf_id
,
2635 ¶ms
, &tlvs_accepted
)) {
2637 status
= PFVF_STATUS_NOT_SUPPORTED
;
2641 if (!tlvs_accepted
) {
2643 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2644 "Upper-layer prevents VF vport configuration\n");
2646 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2647 "No feature tlvs found for vport update\n");
2648 status
= PFVF_STATUS_NOT_SUPPORTED
;
2652 rc
= qed_sp_vport_update(p_hwfn
, ¶ms
, QED_SPQ_MODE_EBLOCK
, NULL
);
2655 status
= PFVF_STATUS_FAILURE
;
2658 vfree(p_rss_params
);
2659 length
= qed_iov_prep_vp_update_resp_tlvs(p_hwfn
, vf
, mbx
, status
,
2660 tlvs_mask
, tlvs_accepted
);
2661 qed_iov_send_response(p_hwfn
, p_ptt
, vf
, length
, status
);
2664 static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn
*p_hwfn
,
2665 struct qed_vf_info
*p_vf
,
2666 struct qed_filter_ucast
*p_params
)
2670 /* First remove entries and then add new ones */
2671 if (p_params
->opcode
== QED_FILTER_REMOVE
) {
2672 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++)
2673 if (p_vf
->shadow_config
.vlans
[i
].used
&&
2674 p_vf
->shadow_config
.vlans
[i
].vid
==
2676 p_vf
->shadow_config
.vlans
[i
].used
= false;
2679 if (i
== QED_ETH_VF_NUM_VLAN_FILTERS
+ 1) {
2682 "VF [%d] - Tries to remove a non-existing vlan\n",
2683 p_vf
->relative_vf_id
);
2686 } else if (p_params
->opcode
== QED_FILTER_REPLACE
||
2687 p_params
->opcode
== QED_FILTER_FLUSH
) {
2688 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++)
2689 p_vf
->shadow_config
.vlans
[i
].used
= false;
2692 /* In forced mode, we're willing to remove entries - but we don't add
2695 if (p_vf
->bulletin
.p_virt
->valid_bitmap
& BIT(VLAN_ADDR_FORCED
))
2698 if (p_params
->opcode
== QED_FILTER_ADD
||
2699 p_params
->opcode
== QED_FILTER_REPLACE
) {
2700 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++) {
2701 if (p_vf
->shadow_config
.vlans
[i
].used
)
2704 p_vf
->shadow_config
.vlans
[i
].used
= true;
2705 p_vf
->shadow_config
.vlans
[i
].vid
= p_params
->vlan
;
2709 if (i
== QED_ETH_VF_NUM_VLAN_FILTERS
+ 1) {
2712 "VF [%d] - Tries to configure more than %d vlan filters\n",
2713 p_vf
->relative_vf_id
,
2714 QED_ETH_VF_NUM_VLAN_FILTERS
+ 1);
2722 static int qed_iov_vf_update_mac_shadow(struct qed_hwfn
*p_hwfn
,
2723 struct qed_vf_info
*p_vf
,
2724 struct qed_filter_ucast
*p_params
)
2728 /* If we're in forced-mode, we don't allow any change */
2729 if (p_vf
->bulletin
.p_virt
->valid_bitmap
& BIT(MAC_ADDR_FORCED
))
2732 /* First remove entries and then add new ones */
2733 if (p_params
->opcode
== QED_FILTER_REMOVE
) {
2734 for (i
= 0; i
< QED_ETH_VF_NUM_MAC_FILTERS
; i
++) {
2735 if (ether_addr_equal(p_vf
->shadow_config
.macs
[i
],
2737 eth_zero_addr(p_vf
->shadow_config
.macs
[i
]);
2742 if (i
== QED_ETH_VF_NUM_MAC_FILTERS
) {
2743 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2744 "MAC isn't configured\n");
2747 } else if (p_params
->opcode
== QED_FILTER_REPLACE
||
2748 p_params
->opcode
== QED_FILTER_FLUSH
) {
2749 for (i
= 0; i
< QED_ETH_VF_NUM_MAC_FILTERS
; i
++)
2750 eth_zero_addr(p_vf
->shadow_config
.macs
[i
]);
2753 /* List the new MAC address */
2754 if (p_params
->opcode
!= QED_FILTER_ADD
&&
2755 p_params
->opcode
!= QED_FILTER_REPLACE
)
2758 for (i
= 0; i
< QED_ETH_VF_NUM_MAC_FILTERS
; i
++) {
2759 if (is_zero_ether_addr(p_vf
->shadow_config
.macs
[i
])) {
2760 ether_addr_copy(p_vf
->shadow_config
.macs
[i
],
2762 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2763 "Added MAC at %d entry in shadow\n", i
);
2768 if (i
== QED_ETH_VF_NUM_MAC_FILTERS
) {
2769 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "No available place for MAC\n");
2777 qed_iov_vf_update_unicast_shadow(struct qed_hwfn
*p_hwfn
,
2778 struct qed_vf_info
*p_vf
,
2779 struct qed_filter_ucast
*p_params
)
2783 if (p_params
->type
== QED_FILTER_MAC
) {
2784 rc
= qed_iov_vf_update_mac_shadow(p_hwfn
, p_vf
, p_params
);
2789 if (p_params
->type
== QED_FILTER_VLAN
)
2790 rc
= qed_iov_vf_update_vlan_shadow(p_hwfn
, p_vf
, p_params
);
2795 static int qed_iov_chk_ucast(struct qed_hwfn
*hwfn
,
2796 int vfid
, struct qed_filter_ucast
*params
)
2798 struct qed_public_vf_info
*vf
;
2800 vf
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
2804 /* No real decision to make; Store the configured MAC */
2805 if (params
->type
== QED_FILTER_MAC
||
2806 params
->type
== QED_FILTER_MAC_VLAN
)
2807 ether_addr_copy(vf
->mac
, params
->mac
);
2812 static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn
*p_hwfn
,
2813 struct qed_ptt
*p_ptt
,
2814 struct qed_vf_info
*vf
)
2816 struct qed_bulletin_content
*p_bulletin
= vf
->bulletin
.p_virt
;
2817 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2818 struct vfpf_ucast_filter_tlv
*req
;
2819 u8 status
= PFVF_STATUS_SUCCESS
;
2820 struct qed_filter_ucast params
;
2823 /* Prepare the unicast filter params */
2824 memset(¶ms
, 0, sizeof(struct qed_filter_ucast
));
2825 req
= &mbx
->req_virt
->ucast_filter
;
2826 params
.opcode
= (enum qed_filter_opcode
)req
->opcode
;
2827 params
.type
= (enum qed_filter_ucast_type
)req
->type
;
2829 params
.is_rx_filter
= 1;
2830 params
.is_tx_filter
= 1;
2831 params
.vport_to_remove_from
= vf
->vport_id
;
2832 params
.vport_to_add_to
= vf
->vport_id
;
2833 memcpy(params
.mac
, req
->mac
, ETH_ALEN
);
2834 params
.vlan
= req
->vlan
;
2838 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
2839 vf
->abs_vf_id
, params
.opcode
, params
.type
,
2840 params
.is_rx_filter
? "RX" : "",
2841 params
.is_tx_filter
? "TX" : "",
2842 params
.vport_to_add_to
,
2843 params
.mac
[0], params
.mac
[1],
2844 params
.mac
[2], params
.mac
[3],
2845 params
.mac
[4], params
.mac
[5], params
.vlan
);
2847 if (!vf
->vport_instance
) {
2850 "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
2852 status
= PFVF_STATUS_FAILURE
;
2856 /* Update shadow copy of the VF configuration */
2857 if (qed_iov_vf_update_unicast_shadow(p_hwfn
, vf
, ¶ms
)) {
2858 status
= PFVF_STATUS_FAILURE
;
2862 /* Determine if the unicast filtering is acceptible by PF */
2863 if ((p_bulletin
->valid_bitmap
& BIT(VLAN_ADDR_FORCED
)) &&
2864 (params
.type
== QED_FILTER_VLAN
||
2865 params
.type
== QED_FILTER_MAC_VLAN
)) {
2866 /* Once VLAN is forced or PVID is set, do not allow
2867 * to add/replace any further VLANs.
2869 if (params
.opcode
== QED_FILTER_ADD
||
2870 params
.opcode
== QED_FILTER_REPLACE
)
2871 status
= PFVF_STATUS_FORCED
;
2875 if ((p_bulletin
->valid_bitmap
& BIT(MAC_ADDR_FORCED
)) &&
2876 (params
.type
== QED_FILTER_MAC
||
2877 params
.type
== QED_FILTER_MAC_VLAN
)) {
2878 if (!ether_addr_equal(p_bulletin
->mac
, params
.mac
) ||
2879 (params
.opcode
!= QED_FILTER_ADD
&&
2880 params
.opcode
!= QED_FILTER_REPLACE
))
2881 status
= PFVF_STATUS_FORCED
;
2885 rc
= qed_iov_chk_ucast(p_hwfn
, vf
->relative_vf_id
, ¶ms
);
2887 status
= PFVF_STATUS_FAILURE
;
2891 rc
= qed_sp_eth_filter_ucast(p_hwfn
, vf
->opaque_fid
, ¶ms
,
2892 QED_SPQ_MODE_CB
, NULL
);
2894 status
= PFVF_STATUS_FAILURE
;
2897 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_UCAST_FILTER
,
2898 sizeof(struct pfvf_def_resp_tlv
), status
);
2901 static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn
*p_hwfn
,
2902 struct qed_ptt
*p_ptt
,
2903 struct qed_vf_info
*vf
)
2908 for (i
= 0; i
< vf
->num_sbs
; i
++)
2909 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
2911 vf
->opaque_fid
, false);
2913 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_INT_CLEANUP
,
2914 sizeof(struct pfvf_def_resp_tlv
),
2915 PFVF_STATUS_SUCCESS
);
2918 static void qed_iov_vf_mbx_close(struct qed_hwfn
*p_hwfn
,
2919 struct qed_ptt
*p_ptt
, struct qed_vf_info
*vf
)
2921 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2922 u8 status
= PFVF_STATUS_SUCCESS
;
2924 /* Disable Interrupts for VF */
2925 qed_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 0);
2927 /* Reset Permission table */
2928 qed_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, 0);
2930 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_CLOSE
,
2934 static void qed_iov_vf_mbx_release(struct qed_hwfn
*p_hwfn
,
2935 struct qed_ptt
*p_ptt
,
2936 struct qed_vf_info
*p_vf
)
2938 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2939 u8 status
= PFVF_STATUS_SUCCESS
;
2942 qed_iov_vf_cleanup(p_hwfn
, p_vf
);
2944 if (p_vf
->state
!= VF_STOPPED
&& p_vf
->state
!= VF_FREE
) {
2945 /* Stopping the VF */
2946 rc
= qed_sp_vf_stop(p_hwfn
, p_vf
->concrete_fid
,
2950 DP_ERR(p_hwfn
, "qed_sp_vf_stop returned error %d\n",
2952 status
= PFVF_STATUS_FAILURE
;
2955 p_vf
->state
= VF_STOPPED
;
2958 qed_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
, CHANNEL_TLV_RELEASE
,
2963 qed_iov_vf_flr_poll_dorq(struct qed_hwfn
*p_hwfn
,
2964 struct qed_vf_info
*p_vf
, struct qed_ptt
*p_ptt
)
2969 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_vf
->concrete_fid
);
2971 for (cnt
= 0; cnt
< 50; cnt
++) {
2972 val
= qed_rd(p_hwfn
, p_ptt
, DORQ_REG_VF_USAGE_CNT
);
2977 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
2981 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
2982 p_vf
->abs_vf_id
, val
);
2990 qed_iov_vf_flr_poll_pbf(struct qed_hwfn
*p_hwfn
,
2991 struct qed_vf_info
*p_vf
, struct qed_ptt
*p_ptt
)
2993 u32 cons
[MAX_NUM_VOQS
], distance
[MAX_NUM_VOQS
];
2996 /* Read initial consumers & producers */
2997 for (i
= 0; i
< MAX_NUM_VOQS
; i
++) {
3000 cons
[i
] = qed_rd(p_hwfn
, p_ptt
,
3001 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0
+
3003 prod
= qed_rd(p_hwfn
, p_ptt
,
3004 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0
+
3006 distance
[i
] = prod
- cons
[i
];
3009 /* Wait for consumers to pass the producers */
3011 for (cnt
= 0; cnt
< 50; cnt
++) {
3012 for (; i
< MAX_NUM_VOQS
; i
++) {
3015 tmp
= qed_rd(p_hwfn
, p_ptt
,
3016 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0
+
3018 if (distance
[i
] > tmp
- cons
[i
])
3022 if (i
== MAX_NUM_VOQS
)
3029 DP_ERR(p_hwfn
, "VF[%d] - pbf polling failed on VOQ %d\n",
3030 p_vf
->abs_vf_id
, i
);
3037 static int qed_iov_vf_flr_poll(struct qed_hwfn
*p_hwfn
,
3038 struct qed_vf_info
*p_vf
, struct qed_ptt
*p_ptt
)
3042 rc
= qed_iov_vf_flr_poll_dorq(p_hwfn
, p_vf
, p_ptt
);
3046 rc
= qed_iov_vf_flr_poll_pbf(p_hwfn
, p_vf
, p_ptt
);
3054 qed_iov_execute_vf_flr_cleanup(struct qed_hwfn
*p_hwfn
,
3055 struct qed_ptt
*p_ptt
,
3056 u16 rel_vf_id
, u32
*ack_vfs
)
3058 struct qed_vf_info
*p_vf
;
3061 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
3065 if (p_hwfn
->pf_iov_info
->pending_flr
[rel_vf_id
/ 64] &
3066 (1ULL << (rel_vf_id
% 64))) {
3067 u16 vfid
= p_vf
->abs_vf_id
;
3069 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3070 "VF[%d] - Handling FLR\n", vfid
);
3072 qed_iov_vf_cleanup(p_hwfn
, p_vf
);
3074 /* If VF isn't active, no need for anything but SW */
3078 rc
= qed_iov_vf_flr_poll(p_hwfn
, p_vf
, p_ptt
);
3082 rc
= qed_final_cleanup(p_hwfn
, p_ptt
, vfid
, true);
3084 DP_ERR(p_hwfn
, "Failed handle FLR of VF[%d]\n", vfid
);
3088 /* Workaround to make VF-PF channel ready, as FW
3089 * doesn't do that as a part of FLR.
3092 GTT_BAR0_MAP_REG_USDM_RAM
+
3093 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid
), 1);
3095 /* VF_STOPPED has to be set only after final cleanup
3096 * but prior to re-enabling the VF.
3098 p_vf
->state
= VF_STOPPED
;
3100 rc
= qed_iov_enable_vf_access(p_hwfn
, p_ptt
, p_vf
);
3102 DP_ERR(p_hwfn
, "Failed to re-enable VF[%d] acces\n",
3107 /* Mark VF for ack and clean pending state */
3108 if (p_vf
->state
== VF_RESET
)
3109 p_vf
->state
= VF_STOPPED
;
3110 ack_vfs
[vfid
/ 32] |= BIT((vfid
% 32));
3111 p_hwfn
->pf_iov_info
->pending_flr
[rel_vf_id
/ 64] &=
3112 ~(1ULL << (rel_vf_id
% 64));
3113 p_vf
->vf_mbx
.b_pending_msg
= false;
3120 qed_iov_vf_flr_cleanup(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
3122 u32 ack_vfs
[VF_MAX_STATIC
/ 32];
3126 memset(ack_vfs
, 0, sizeof(u32
) * (VF_MAX_STATIC
/ 32));
3128 /* Since BRB <-> PRS interface can't be tested as part of the flr
3129 * polling due to HW limitations, simply sleep a bit. And since
3130 * there's no need to wait per-vf, do it before looping.
3134 for (i
= 0; i
< p_hwfn
->cdev
->p_iov_info
->total_vfs
; i
++)
3135 qed_iov_execute_vf_flr_cleanup(p_hwfn
, p_ptt
, i
, ack_vfs
);
3137 rc
= qed_mcp_ack_vf_flr(p_hwfn
, p_ptt
, ack_vfs
);
3141 bool qed_iov_mark_vf_flr(struct qed_hwfn
*p_hwfn
, u32
*p_disabled_vfs
)
3146 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "Marking FLR-ed VFs\n");
3147 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++)
3148 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3149 "[%08x,...,%08x]: %08x\n",
3150 i
* 32, (i
+ 1) * 32 - 1, p_disabled_vfs
[i
]);
3152 if (!p_hwfn
->cdev
->p_iov_info
) {
3153 DP_NOTICE(p_hwfn
, "VF flr but no IOV\n");
3158 for (i
= 0; i
< p_hwfn
->cdev
->p_iov_info
->total_vfs
; i
++) {
3159 struct qed_vf_info
*p_vf
;
3162 p_vf
= qed_iov_get_vf_info(p_hwfn
, i
, false);
3166 vfid
= p_vf
->abs_vf_id
;
3167 if (BIT((vfid
% 32)) & p_disabled_vfs
[vfid
/ 32]) {
3168 u64
*p_flr
= p_hwfn
->pf_iov_info
->pending_flr
;
3169 u16 rel_vf_id
= p_vf
->relative_vf_id
;
3171 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3172 "VF[%d] [rel %d] got FLR-ed\n",
3175 p_vf
->state
= VF_RESET
;
3177 /* No need to lock here, since pending_flr should
3178 * only change here and before ACKing MFw. Since
3179 * MFW will not trigger an additional attention for
3180 * VF flr until ACKs, we're safe.
3182 p_flr
[rel_vf_id
/ 64] |= 1ULL << (rel_vf_id
% 64);
3190 static void qed_iov_get_link(struct qed_hwfn
*p_hwfn
,
3192 struct qed_mcp_link_params
*p_params
,
3193 struct qed_mcp_link_state
*p_link
,
3194 struct qed_mcp_link_capabilities
*p_caps
)
3196 struct qed_vf_info
*p_vf
= qed_iov_get_vf_info(p_hwfn
,
3199 struct qed_bulletin_content
*p_bulletin
;
3204 p_bulletin
= p_vf
->bulletin
.p_virt
;
3207 __qed_vf_get_link_params(p_hwfn
, p_params
, p_bulletin
);
3209 __qed_vf_get_link_state(p_hwfn
, p_link
, p_bulletin
);
3211 __qed_vf_get_link_caps(p_hwfn
, p_caps
, p_bulletin
);
3214 static void qed_iov_process_mbx_req(struct qed_hwfn
*p_hwfn
,
3215 struct qed_ptt
*p_ptt
, int vfid
)
3217 struct qed_iov_vf_mbx
*mbx
;
3218 struct qed_vf_info
*p_vf
;
3220 p_vf
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
3224 mbx
= &p_vf
->vf_mbx
;
3226 /* qed_iov_process_mbx_request */
3227 if (!mbx
->b_pending_msg
) {
3229 "VF[%02x]: Trying to process mailbox message when none is pending\n",
3233 mbx
->b_pending_msg
= false;
3235 mbx
->first_tlv
= mbx
->req_virt
->first_tlv
;
3237 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3238 "VF[%02x]: Processing mailbox message [type %04x]\n",
3239 p_vf
->abs_vf_id
, mbx
->first_tlv
.tl
.type
);
3241 /* check if tlv type is known */
3242 if (qed_iov_tlv_supported(mbx
->first_tlv
.tl
.type
) &&
3243 !p_vf
->b_malicious
) {
3244 switch (mbx
->first_tlv
.tl
.type
) {
3245 case CHANNEL_TLV_ACQUIRE
:
3246 qed_iov_vf_mbx_acquire(p_hwfn
, p_ptt
, p_vf
);
3248 case CHANNEL_TLV_VPORT_START
:
3249 qed_iov_vf_mbx_start_vport(p_hwfn
, p_ptt
, p_vf
);
3251 case CHANNEL_TLV_VPORT_TEARDOWN
:
3252 qed_iov_vf_mbx_stop_vport(p_hwfn
, p_ptt
, p_vf
);
3254 case CHANNEL_TLV_START_RXQ
:
3255 qed_iov_vf_mbx_start_rxq(p_hwfn
, p_ptt
, p_vf
);
3257 case CHANNEL_TLV_START_TXQ
:
3258 qed_iov_vf_mbx_start_txq(p_hwfn
, p_ptt
, p_vf
);
3260 case CHANNEL_TLV_STOP_RXQS
:
3261 qed_iov_vf_mbx_stop_rxqs(p_hwfn
, p_ptt
, p_vf
);
3263 case CHANNEL_TLV_STOP_TXQS
:
3264 qed_iov_vf_mbx_stop_txqs(p_hwfn
, p_ptt
, p_vf
);
3266 case CHANNEL_TLV_UPDATE_RXQ
:
3267 qed_iov_vf_mbx_update_rxqs(p_hwfn
, p_ptt
, p_vf
);
3269 case CHANNEL_TLV_VPORT_UPDATE
:
3270 qed_iov_vf_mbx_vport_update(p_hwfn
, p_ptt
, p_vf
);
3272 case CHANNEL_TLV_UCAST_FILTER
:
3273 qed_iov_vf_mbx_ucast_filter(p_hwfn
, p_ptt
, p_vf
);
3275 case CHANNEL_TLV_CLOSE
:
3276 qed_iov_vf_mbx_close(p_hwfn
, p_ptt
, p_vf
);
3278 case CHANNEL_TLV_INT_CLEANUP
:
3279 qed_iov_vf_mbx_int_cleanup(p_hwfn
, p_ptt
, p_vf
);
3281 case CHANNEL_TLV_RELEASE
:
3282 qed_iov_vf_mbx_release(p_hwfn
, p_ptt
, p_vf
);
3285 } else if (qed_iov_tlv_supported(mbx
->first_tlv
.tl
.type
)) {
3286 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3287 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
3288 p_vf
->abs_vf_id
, mbx
->first_tlv
.tl
.type
);
3290 qed_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
,
3291 mbx
->first_tlv
.tl
.type
,
3292 sizeof(struct pfvf_def_resp_tlv
),
3293 PFVF_STATUS_MALICIOUS
);
3295 /* unknown TLV - this may belong to a VF driver from the future
3296 * - a version written after this PF driver was written, which
3297 * supports features unknown as of yet. Too bad since we don't
3298 * support them. Or this may be because someone wrote a crappy
3299 * VF driver and is sending garbage over the channel.
3302 "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
3304 mbx
->first_tlv
.tl
.type
,
3305 mbx
->first_tlv
.tl
.length
,
3306 mbx
->first_tlv
.padding
, mbx
->first_tlv
.reply_address
);
3308 /* Try replying in case reply address matches the acquisition's
3311 if (p_vf
->acquire
.first_tlv
.reply_address
&&
3312 (mbx
->first_tlv
.reply_address
==
3313 p_vf
->acquire
.first_tlv
.reply_address
)) {
3314 qed_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
,
3315 mbx
->first_tlv
.tl
.type
,
3316 sizeof(struct pfvf_def_resp_tlv
),
3317 PFVF_STATUS_NOT_SUPPORTED
);
3321 "VF[%02x]: Can't respond to TLV - no valid reply address\n",
3327 void qed_iov_pf_get_pending_events(struct qed_hwfn
*p_hwfn
, u64
*events
)
3331 memset(events
, 0, sizeof(u64
) * QED_VF_ARRAY_LENGTH
);
3333 qed_for_each_vf(p_hwfn
, i
) {
3334 struct qed_vf_info
*p_vf
;
3336 p_vf
= &p_hwfn
->pf_iov_info
->vfs_array
[i
];
3337 if (p_vf
->vf_mbx
.b_pending_msg
)
3338 events
[i
/ 64] |= 1ULL << (i
% 64);
3342 static struct qed_vf_info
*qed_sriov_get_vf_from_absid(struct qed_hwfn
*p_hwfn
,
3345 u8 min
= (u8
) p_hwfn
->cdev
->p_iov_info
->first_vf_in_pf
;
3347 if (!_qed_iov_pf_sanity_check(p_hwfn
, (int)abs_vfid
- min
, false)) {
3350 "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
3355 return &p_hwfn
->pf_iov_info
->vfs_array
[(u8
) abs_vfid
- min
];
3358 static int qed_sriov_vfpf_msg(struct qed_hwfn
*p_hwfn
,
3359 u16 abs_vfid
, struct regpair
*vf_msg
)
3361 struct qed_vf_info
*p_vf
= qed_sriov_get_vf_from_absid(p_hwfn
,
3367 /* List the physical address of the request so that handler
3368 * could later on copy the message from it.
3370 p_vf
->vf_mbx
.pending_req
= (((u64
)vf_msg
->hi
) << 32) | vf_msg
->lo
;
3372 /* Mark the event and schedule the workqueue */
3373 p_vf
->vf_mbx
.b_pending_msg
= true;
3374 qed_schedule_iov(p_hwfn
, QED_IOV_WQ_MSG_FLAG
);
3379 static void qed_sriov_vfpf_malicious(struct qed_hwfn
*p_hwfn
,
3380 struct malicious_vf_eqe_data
*p_data
)
3382 struct qed_vf_info
*p_vf
;
3384 p_vf
= qed_sriov_get_vf_from_absid(p_hwfn
, p_data
->vf_id
);
3389 if (!p_vf
->b_malicious
) {
3391 "VF [%d] - Malicious behavior [%02x]\n",
3392 p_vf
->abs_vf_id
, p_data
->err_id
);
3394 p_vf
->b_malicious
= true;
3397 "VF [%d] - Malicious behavior [%02x]\n",
3398 p_vf
->abs_vf_id
, p_data
->err_id
);
3402 int qed_sriov_eqe_event(struct qed_hwfn
*p_hwfn
,
3403 u8 opcode
, __le16 echo
, union event_ring_data
*data
)
3406 case COMMON_EVENT_VF_PF_CHANNEL
:
3407 return qed_sriov_vfpf_msg(p_hwfn
, le16_to_cpu(echo
),
3408 &data
->vf_pf_channel
.msg_addr
);
3409 case COMMON_EVENT_MALICIOUS_VF
:
3410 qed_sriov_vfpf_malicious(p_hwfn
, &data
->malicious_vf
);
3413 DP_INFO(p_hwfn
->cdev
, "Unknown sriov eqe event 0x%02x\n",
3419 u16
qed_iov_get_next_active_vf(struct qed_hwfn
*p_hwfn
, u16 rel_vf_id
)
3421 struct qed_hw_sriov_info
*p_iov
= p_hwfn
->cdev
->p_iov_info
;
3427 for (i
= rel_vf_id
; i
< p_iov
->total_vfs
; i
++)
3428 if (qed_iov_is_valid_vfid(p_hwfn
, rel_vf_id
, true, false))
3435 static int qed_iov_copy_vf_msg(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*ptt
,
3438 struct qed_dmae_params params
;
3439 struct qed_vf_info
*vf_info
;
3441 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
3445 memset(¶ms
, 0, sizeof(struct qed_dmae_params
));
3446 params
.flags
= QED_DMAE_FLAG_VF_SRC
| QED_DMAE_FLAG_COMPLETION_DST
;
3447 params
.src_vfid
= vf_info
->abs_vf_id
;
3449 if (qed_dmae_host2host(p_hwfn
, ptt
,
3450 vf_info
->vf_mbx
.pending_req
,
3451 vf_info
->vf_mbx
.req_phys
,
3452 sizeof(union vfpf_tlvs
) / 4, ¶ms
)) {
3453 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3454 "Failed to copy message from VF 0x%02x\n", vfid
);
3462 static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn
*p_hwfn
,
3465 struct qed_vf_info
*vf_info
;
3468 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
3470 DP_NOTICE(p_hwfn
->cdev
,
3471 "Can not set forced MAC, invalid vfid [%d]\n", vfid
);
3475 if (vf_info
->b_malicious
) {
3476 DP_NOTICE(p_hwfn
->cdev
,
3477 "Can't set forced MAC to malicious VF [%d]\n", vfid
);
3481 feature
= 1 << MAC_ADDR_FORCED
;
3482 memcpy(vf_info
->bulletin
.p_virt
->mac
, mac
, ETH_ALEN
);
3484 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
3485 /* Forced MAC will disable MAC_ADDR */
3486 vf_info
->bulletin
.p_virt
->valid_bitmap
&= ~BIT(VFPF_BULLETIN_MAC_ADDR
);
3488 qed_iov_configure_vport_forced(p_hwfn
, vf_info
, feature
);
3491 static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn
*p_hwfn
,
3494 struct qed_vf_info
*vf_info
;
3497 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
3499 DP_NOTICE(p_hwfn
->cdev
,
3500 "Can not set forced MAC, invalid vfid [%d]\n", vfid
);
3504 if (vf_info
->b_malicious
) {
3505 DP_NOTICE(p_hwfn
->cdev
,
3506 "Can't set forced vlan to malicious VF [%d]\n", vfid
);
3510 feature
= 1 << VLAN_ADDR_FORCED
;
3511 vf_info
->bulletin
.p_virt
->pvid
= pvid
;
3513 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
3515 vf_info
->bulletin
.p_virt
->valid_bitmap
&= ~feature
;
3517 qed_iov_configure_vport_forced(p_hwfn
, vf_info
, feature
);
3520 static bool qed_iov_vf_has_vport_instance(struct qed_hwfn
*p_hwfn
, int vfid
)
3522 struct qed_vf_info
*p_vf_info
;
3524 p_vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
3528 return !!p_vf_info
->vport_instance
;
3531 static bool qed_iov_is_vf_stopped(struct qed_hwfn
*p_hwfn
, int vfid
)
3533 struct qed_vf_info
*p_vf_info
;
3535 p_vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
3539 return p_vf_info
->state
== VF_STOPPED
;
3542 static bool qed_iov_spoofchk_get(struct qed_hwfn
*p_hwfn
, int vfid
)
3544 struct qed_vf_info
*vf_info
;
3546 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
3550 return vf_info
->spoof_chk
;
3553 static int qed_iov_spoofchk_set(struct qed_hwfn
*p_hwfn
, int vfid
, bool val
)
3555 struct qed_vf_info
*vf
;
3558 if (!qed_iov_pf_sanity_check(p_hwfn
, vfid
)) {
3560 "SR-IOV sanity check failed, can't set spoofchk\n");
3564 vf
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
3568 if (!qed_iov_vf_has_vport_instance(p_hwfn
, vfid
)) {
3569 /* After VF VPORT start PF will configure spoof check */
3570 vf
->req_spoofchk_val
= val
;
3575 rc
= __qed_iov_spoofchk_set(p_hwfn
, vf
, val
);
3581 static u8
*qed_iov_bulletin_get_forced_mac(struct qed_hwfn
*p_hwfn
,
3584 struct qed_vf_info
*p_vf
;
3586 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
3587 if (!p_vf
|| !p_vf
->bulletin
.p_virt
)
3590 if (!(p_vf
->bulletin
.p_virt
->valid_bitmap
& BIT(MAC_ADDR_FORCED
)))
3593 return p_vf
->bulletin
.p_virt
->mac
;
3597 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn
*p_hwfn
, u16 rel_vf_id
)
3599 struct qed_vf_info
*p_vf
;
3601 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
3602 if (!p_vf
|| !p_vf
->bulletin
.p_virt
)
3605 if (!(p_vf
->bulletin
.p_virt
->valid_bitmap
& BIT(VLAN_ADDR_FORCED
)))
3608 return p_vf
->bulletin
.p_virt
->pvid
;
3611 static int qed_iov_configure_tx_rate(struct qed_hwfn
*p_hwfn
,
3612 struct qed_ptt
*p_ptt
, int vfid
, int val
)
3614 struct qed_vf_info
*vf
;
3618 vf
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
3622 rc
= qed_fw_vport(p_hwfn
, vf
->vport_id
, &abs_vp_id
);
3626 return qed_init_vport_rl(p_hwfn
, p_ptt
, abs_vp_id
, (u32
)val
);
3630 qed_iov_configure_min_tx_rate(struct qed_dev
*cdev
, int vfid
, u32 rate
)
3632 struct qed_vf_info
*vf
;
3636 for_each_hwfn(cdev
, i
) {
3637 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
3639 if (!qed_iov_pf_sanity_check(p_hwfn
, vfid
)) {
3641 "SR-IOV sanity check failed, can't set min rate\n");
3646 vf
= qed_iov_get_vf_info(QED_LEADING_HWFN(cdev
), (u16
)vfid
, true);
3647 vport_id
= vf
->vport_id
;
3649 return qed_configure_vport_wfq(cdev
, vport_id
, rate
);
3652 static int qed_iov_get_vf_min_rate(struct qed_hwfn
*p_hwfn
, int vfid
)
3654 struct qed_wfq_data
*vf_vp_wfq
;
3655 struct qed_vf_info
*vf_info
;
3657 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
3661 vf_vp_wfq
= &p_hwfn
->qm_info
.wfq_data
[vf_info
->vport_id
];
3663 if (vf_vp_wfq
->configured
)
3664 return vf_vp_wfq
->min_speed
;
3670 * qed_schedule_iov - schedules IOV task for VF and PF
3671 * @hwfn: hardware function pointer
3672 * @flag: IOV flag for VF/PF
3674 void qed_schedule_iov(struct qed_hwfn
*hwfn
, enum qed_iov_wq_flag flag
)
3676 smp_mb__before_atomic();
3677 set_bit(flag
, &hwfn
->iov_task_flags
);
3678 smp_mb__after_atomic();
3679 DP_VERBOSE(hwfn
, QED_MSG_IOV
, "Scheduling iov task [Flag: %d]\n", flag
);
3680 queue_delayed_work(hwfn
->iov_wq
, &hwfn
->iov_task
, 0);
3683 void qed_vf_start_iov_wq(struct qed_dev
*cdev
)
3687 for_each_hwfn(cdev
, i
)
3688 queue_delayed_work(cdev
->hwfns
[i
].iov_wq
,
3689 &cdev
->hwfns
[i
].iov_task
, 0);
3692 int qed_sriov_disable(struct qed_dev
*cdev
, bool pci_enabled
)
3696 for_each_hwfn(cdev
, i
)
3697 if (cdev
->hwfns
[i
].iov_wq
)
3698 flush_workqueue(cdev
->hwfns
[i
].iov_wq
);
3700 /* Mark VFs for disablement */
3701 qed_iov_set_vfs_to_disable(cdev
, true);
3703 if (cdev
->p_iov_info
&& cdev
->p_iov_info
->num_vfs
&& pci_enabled
)
3704 pci_disable_sriov(cdev
->pdev
);
3706 for_each_hwfn(cdev
, i
) {
3707 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
3708 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
3710 /* Failure to acquire the ptt in 100g creates an odd error
3711 * where the first engine has already relased IOV.
3714 DP_ERR(hwfn
, "Failed to acquire ptt\n");
3718 /* Clean WFQ db and configure equal weight for all vports */
3719 qed_clean_wfq_db(hwfn
, ptt
);
3721 qed_for_each_vf(hwfn
, j
) {
3724 if (!qed_iov_is_valid_vfid(hwfn
, j
, true, false))
3727 /* Wait until VF is disabled before releasing */
3728 for (k
= 0; k
< 100; k
++) {
3729 if (!qed_iov_is_vf_stopped(hwfn
, j
))
3736 qed_iov_release_hw_for_vf(&cdev
->hwfns
[i
],
3740 "Timeout waiting for VF's FLR to end\n");
3743 qed_ptt_release(hwfn
, ptt
);
3746 qed_iov_set_vfs_to_disable(cdev
, false);
3751 static void qed_sriov_enable_qid_config(struct qed_hwfn
*hwfn
,
3753 struct qed_iov_vf_init_params
*params
)
3757 /* Since we have an equal resource distribution per-VF, and we assume
3758 * PF has acquired the QED_PF_L2_QUE first queues, we start setting
3759 * sequentially from there.
3761 base
= FEAT_NUM(hwfn
, QED_PF_L2_QUE
) + vfid
* params
->num_queues
;
3763 params
->rel_vf_id
= vfid
;
3764 for (i
= 0; i
< params
->num_queues
; i
++) {
3765 params
->req_rx_queue
[i
] = base
+ i
;
3766 params
->req_tx_queue
[i
] = base
+ i
;
3770 static int qed_sriov_enable(struct qed_dev
*cdev
, int num
)
3772 struct qed_iov_vf_init_params params
;
3775 if (num
>= RESC_NUM(&cdev
->hwfns
[0], QED_VPORT
)) {
3776 DP_NOTICE(cdev
, "Can start at most %d VFs\n",
3777 RESC_NUM(&cdev
->hwfns
[0], QED_VPORT
) - 1);
3781 memset(¶ms
, 0, sizeof(params
));
3783 /* Initialize HW for VF access */
3784 for_each_hwfn(cdev
, j
) {
3785 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[j
];
3786 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
3788 /* Make sure not to use more than 16 queues per VF */
3789 params
.num_queues
= min_t(int,
3790 FEAT_NUM(hwfn
, QED_VF_L2_QUE
) / num
,
3794 DP_ERR(hwfn
, "Failed to acquire ptt\n");
3799 for (i
= 0; i
< num
; i
++) {
3800 if (!qed_iov_is_valid_vfid(hwfn
, i
, false, true))
3803 qed_sriov_enable_qid_config(hwfn
, i
, ¶ms
);
3804 rc
= qed_iov_init_hw_for_vf(hwfn
, ptt
, ¶ms
);
3806 DP_ERR(cdev
, "Failed to enable VF[%d]\n", i
);
3807 qed_ptt_release(hwfn
, ptt
);
3812 qed_ptt_release(hwfn
, ptt
);
3815 /* Enable SRIOV PCIe functions */
3816 rc
= pci_enable_sriov(cdev
->pdev
, num
);
3818 DP_ERR(cdev
, "Failed to enable sriov [%d]\n", rc
);
3825 qed_sriov_disable(cdev
, false);
3829 static int qed_sriov_configure(struct qed_dev
*cdev
, int num_vfs_param
)
3831 if (!IS_QED_SRIOV(cdev
)) {
3832 DP_VERBOSE(cdev
, QED_MSG_IOV
, "SR-IOV is not supported\n");
3837 return qed_sriov_enable(cdev
, num_vfs_param
);
3839 return qed_sriov_disable(cdev
, true);
3842 static int qed_sriov_pf_set_mac(struct qed_dev
*cdev
, u8
*mac
, int vfid
)
3846 if (!IS_QED_SRIOV(cdev
) || !IS_PF_SRIOV_ALLOC(&cdev
->hwfns
[0])) {
3847 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3848 "Cannot set a VF MAC; Sriov is not enabled\n");
3852 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vfid
, true, true)) {
3853 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3854 "Cannot set VF[%d] MAC (VF is not active)\n", vfid
);
3858 for_each_hwfn(cdev
, i
) {
3859 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
3860 struct qed_public_vf_info
*vf_info
;
3862 vf_info
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
3866 /* Set the forced MAC, and schedule the IOV task */
3867 ether_addr_copy(vf_info
->forced_mac
, mac
);
3868 qed_schedule_iov(hwfn
, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG
);
3874 static int qed_sriov_pf_set_vlan(struct qed_dev
*cdev
, u16 vid
, int vfid
)
3878 if (!IS_QED_SRIOV(cdev
) || !IS_PF_SRIOV_ALLOC(&cdev
->hwfns
[0])) {
3879 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3880 "Cannot set a VF MAC; Sriov is not enabled\n");
3884 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vfid
, true, true)) {
3885 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3886 "Cannot set VF[%d] MAC (VF is not active)\n", vfid
);
3890 for_each_hwfn(cdev
, i
) {
3891 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
3892 struct qed_public_vf_info
*vf_info
;
3894 vf_info
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
3898 /* Set the forced vlan, and schedule the IOV task */
3899 vf_info
->forced_vlan
= vid
;
3900 qed_schedule_iov(hwfn
, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG
);
3906 static int qed_get_vf_config(struct qed_dev
*cdev
,
3907 int vf_id
, struct ifla_vf_info
*ivi
)
3909 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
3910 struct qed_public_vf_info
*vf_info
;
3911 struct qed_mcp_link_state link
;
3914 /* Sanitize request */
3918 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vf_id
, true, false)) {
3919 DP_VERBOSE(cdev
, QED_MSG_IOV
,
3920 "VF index [%d] isn't active\n", vf_id
);
3924 vf_info
= qed_iov_get_public_vf_info(hwfn
, vf_id
, true);
3926 qed_iov_get_link(hwfn
, vf_id
, NULL
, &link
, NULL
);
3928 /* Fill information about VF */
3931 if (is_valid_ether_addr(vf_info
->forced_mac
))
3932 ether_addr_copy(ivi
->mac
, vf_info
->forced_mac
);
3934 ether_addr_copy(ivi
->mac
, vf_info
->mac
);
3936 ivi
->vlan
= vf_info
->forced_vlan
;
3937 ivi
->spoofchk
= qed_iov_spoofchk_get(hwfn
, vf_id
);
3938 ivi
->linkstate
= vf_info
->link_state
;
3939 tx_rate
= vf_info
->tx_rate
;
3940 ivi
->max_tx_rate
= tx_rate
? tx_rate
: link
.speed
;
3941 ivi
->min_tx_rate
= qed_iov_get_vf_min_rate(hwfn
, vf_id
);
3946 void qed_inform_vf_link_state(struct qed_hwfn
*hwfn
)
3948 struct qed_mcp_link_capabilities caps
;
3949 struct qed_mcp_link_params params
;
3950 struct qed_mcp_link_state link
;
3953 if (!hwfn
->pf_iov_info
)
3956 /* Update bulletin of all future possible VFs with link configuration */
3957 for (i
= 0; i
< hwfn
->cdev
->p_iov_info
->total_vfs
; i
++) {
3958 struct qed_public_vf_info
*vf_info
;
3960 vf_info
= qed_iov_get_public_vf_info(hwfn
, i
, false);
3964 memcpy(¶ms
, qed_mcp_get_link_params(hwfn
), sizeof(params
));
3965 memcpy(&link
, qed_mcp_get_link_state(hwfn
), sizeof(link
));
3966 memcpy(&caps
, qed_mcp_get_link_capabilities(hwfn
),
3969 /* Modify link according to the VF's configured link state */
3970 switch (vf_info
->link_state
) {
3971 case IFLA_VF_LINK_STATE_DISABLE
:
3972 link
.link_up
= false;
3974 case IFLA_VF_LINK_STATE_ENABLE
:
3975 link
.link_up
= true;
3976 /* Set speed according to maximum supported by HW.
3977 * that is 40G for regular devices and 100G for CMT
3980 link
.speed
= (hwfn
->cdev
->num_hwfns
> 1) ?
3983 /* In auto mode pass PF link image to VF */
3987 if (link
.link_up
&& vf_info
->tx_rate
) {
3988 struct qed_ptt
*ptt
;
3991 rate
= min_t(int, vf_info
->tx_rate
, link
.speed
);
3993 ptt
= qed_ptt_acquire(hwfn
);
3995 DP_NOTICE(hwfn
, "Failed to acquire PTT\n");
3999 if (!qed_iov_configure_tx_rate(hwfn
, ptt
, i
, rate
)) {
4000 vf_info
->tx_rate
= rate
;
4004 qed_ptt_release(hwfn
, ptt
);
4007 qed_iov_set_link(hwfn
, i
, ¶ms
, &link
, &caps
);
4010 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
4013 static int qed_set_vf_link_state(struct qed_dev
*cdev
,
4014 int vf_id
, int link_state
)
4018 /* Sanitize request */
4022 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vf_id
, true, true)) {
4023 DP_VERBOSE(cdev
, QED_MSG_IOV
,
4024 "VF index [%d] isn't active\n", vf_id
);
4028 /* Handle configuration of link state */
4029 for_each_hwfn(cdev
, i
) {
4030 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
4031 struct qed_public_vf_info
*vf
;
4033 vf
= qed_iov_get_public_vf_info(hwfn
, vf_id
, true);
4037 if (vf
->link_state
== link_state
)
4040 vf
->link_state
= link_state
;
4041 qed_inform_vf_link_state(&cdev
->hwfns
[i
]);
4047 static int qed_spoof_configure(struct qed_dev
*cdev
, int vfid
, bool val
)
4049 int i
, rc
= -EINVAL
;
4051 for_each_hwfn(cdev
, i
) {
4052 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
4054 rc
= qed_iov_spoofchk_set(p_hwfn
, vfid
, val
);
4062 static int qed_configure_max_vf_rate(struct qed_dev
*cdev
, int vfid
, int rate
)
4066 for_each_hwfn(cdev
, i
) {
4067 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
4068 struct qed_public_vf_info
*vf
;
4070 if (!qed_iov_pf_sanity_check(p_hwfn
, vfid
)) {
4072 "SR-IOV sanity check failed, can't set tx rate\n");
4076 vf
= qed_iov_get_public_vf_info(p_hwfn
, vfid
, true);
4080 qed_inform_vf_link_state(p_hwfn
);
4086 static int qed_set_vf_rate(struct qed_dev
*cdev
,
4087 int vfid
, u32 min_rate
, u32 max_rate
)
4089 int rc_min
= 0, rc_max
= 0;
4092 rc_max
= qed_configure_max_vf_rate(cdev
, vfid
, max_rate
);
4095 rc_min
= qed_iov_configure_min_tx_rate(cdev
, vfid
, min_rate
);
4097 if (rc_max
| rc_min
)
4103 static int qed_set_vf_trust(struct qed_dev
*cdev
, int vfid
, bool trust
)
4107 for_each_hwfn(cdev
, i
) {
4108 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
4109 struct qed_public_vf_info
*vf
;
4111 if (!qed_iov_pf_sanity_check(hwfn
, vfid
)) {
4113 "SR-IOV sanity check failed, can't set trust\n");
4117 vf
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
4119 if (vf
->is_trusted_request
== trust
)
4121 vf
->is_trusted_request
= trust
;
4123 qed_schedule_iov(hwfn
, QED_IOV_WQ_TRUST_FLAG
);
4129 static void qed_handle_vf_msg(struct qed_hwfn
*hwfn
)
4131 u64 events
[QED_VF_ARRAY_LENGTH
];
4132 struct qed_ptt
*ptt
;
4135 ptt
= qed_ptt_acquire(hwfn
);
4137 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
4138 "Can't acquire PTT; re-scheduling\n");
4139 qed_schedule_iov(hwfn
, QED_IOV_WQ_MSG_FLAG
);
4143 qed_iov_pf_get_pending_events(hwfn
, events
);
4145 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
4146 "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
4147 events
[0], events
[1], events
[2]);
4149 qed_for_each_vf(hwfn
, i
) {
4150 /* Skip VFs with no pending messages */
4151 if (!(events
[i
/ 64] & (1ULL << (i
% 64))))
4154 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
4155 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
4156 i
, hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+ i
);
4158 /* Copy VF's message to PF's request buffer for that VF */
4159 if (qed_iov_copy_vf_msg(hwfn
, ptt
, i
))
4162 qed_iov_process_mbx_req(hwfn
, ptt
, i
);
4165 qed_ptt_release(hwfn
, ptt
);
4168 static void qed_handle_pf_set_vf_unicast(struct qed_hwfn
*hwfn
)
4172 qed_for_each_vf(hwfn
, i
) {
4173 struct qed_public_vf_info
*info
;
4174 bool update
= false;
4177 info
= qed_iov_get_public_vf_info(hwfn
, i
, true);
4181 /* Update data on bulletin board */
4182 mac
= qed_iov_bulletin_get_forced_mac(hwfn
, i
);
4183 if (is_valid_ether_addr(info
->forced_mac
) &&
4184 (!mac
|| !ether_addr_equal(mac
, info
->forced_mac
))) {
4187 "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
4189 hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+ i
);
4191 /* Update bulletin board with forced MAC */
4192 qed_iov_bulletin_set_forced_mac(hwfn
,
4193 info
->forced_mac
, i
);
4197 if (qed_iov_bulletin_get_forced_vlan(hwfn
, i
) ^
4198 info
->forced_vlan
) {
4201 "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
4204 hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+ i
);
4205 qed_iov_bulletin_set_forced_vlan(hwfn
,
4206 info
->forced_vlan
, i
);
4211 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
4215 static void qed_handle_bulletin_post(struct qed_hwfn
*hwfn
)
4217 struct qed_ptt
*ptt
;
4220 ptt
= qed_ptt_acquire(hwfn
);
4222 DP_NOTICE(hwfn
, "Failed allocating a ptt entry\n");
4223 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
4227 qed_for_each_vf(hwfn
, i
)
4228 qed_iov_post_vf_bulletin(hwfn
, i
, ptt
);
4230 qed_ptt_release(hwfn
, ptt
);
4233 static void qed_iov_handle_trust_change(struct qed_hwfn
*hwfn
)
4235 struct qed_sp_vport_update_params params
;
4236 struct qed_filter_accept_flags
*flags
;
4237 struct qed_public_vf_info
*vf_info
;
4238 struct qed_vf_info
*vf
;
4242 mask
= QED_ACCEPT_UCAST_UNMATCHED
| QED_ACCEPT_MCAST_UNMATCHED
;
4243 flags
= ¶ms
.accept_flags
;
4245 qed_for_each_vf(hwfn
, i
) {
4246 /* Need to make sure current requested configuration didn't
4247 * flip so that we'll end up configuring something that's not
4250 vf_info
= qed_iov_get_public_vf_info(hwfn
, i
, true);
4251 if (vf_info
->is_trusted_configured
==
4252 vf_info
->is_trusted_request
)
4254 vf_info
->is_trusted_configured
= vf_info
->is_trusted_request
;
4256 /* Validate that the VF has a configured vport */
4257 vf
= qed_iov_get_vf_info(hwfn
, i
, true);
4258 if (!vf
->vport_instance
)
4261 memset(¶ms
, 0, sizeof(params
));
4262 params
.opaque_fid
= vf
->opaque_fid
;
4263 params
.vport_id
= vf
->vport_id
;
4265 if (vf_info
->rx_accept_mode
& mask
) {
4266 flags
->update_rx_mode_config
= 1;
4267 flags
->rx_accept_filter
= vf_info
->rx_accept_mode
;
4270 if (vf_info
->tx_accept_mode
& mask
) {
4271 flags
->update_tx_mode_config
= 1;
4272 flags
->tx_accept_filter
= vf_info
->tx_accept_mode
;
4275 /* Remove if needed; Otherwise this would set the mask */
4276 if (!vf_info
->is_trusted_configured
) {
4277 flags
->rx_accept_filter
&= ~mask
;
4278 flags
->tx_accept_filter
&= ~mask
;
4281 if (flags
->update_rx_mode_config
||
4282 flags
->update_tx_mode_config
)
4283 qed_sp_vport_update(hwfn
, ¶ms
,
4284 QED_SPQ_MODE_EBLOCK
, NULL
);
4288 static void qed_iov_pf_task(struct work_struct
*work
)
4291 struct qed_hwfn
*hwfn
= container_of(work
, struct qed_hwfn
,
4295 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG
, &hwfn
->iov_task_flags
))
4298 if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG
, &hwfn
->iov_task_flags
)) {
4299 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
4302 qed_schedule_iov(hwfn
, QED_IOV_WQ_FLR_FLAG
);
4306 rc
= qed_iov_vf_flr_cleanup(hwfn
, ptt
);
4308 qed_schedule_iov(hwfn
, QED_IOV_WQ_FLR_FLAG
);
4310 qed_ptt_release(hwfn
, ptt
);
4313 if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG
, &hwfn
->iov_task_flags
))
4314 qed_handle_vf_msg(hwfn
);
4316 if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG
,
4317 &hwfn
->iov_task_flags
))
4318 qed_handle_pf_set_vf_unicast(hwfn
);
4320 if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG
,
4321 &hwfn
->iov_task_flags
))
4322 qed_handle_bulletin_post(hwfn
);
4324 if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG
, &hwfn
->iov_task_flags
))
4325 qed_iov_handle_trust_change(hwfn
);
4328 void qed_iov_wq_stop(struct qed_dev
*cdev
, bool schedule_first
)
4332 for_each_hwfn(cdev
, i
) {
4333 if (!cdev
->hwfns
[i
].iov_wq
)
4336 if (schedule_first
) {
4337 qed_schedule_iov(&cdev
->hwfns
[i
],
4338 QED_IOV_WQ_STOP_WQ_FLAG
);
4339 cancel_delayed_work_sync(&cdev
->hwfns
[i
].iov_task
);
4342 flush_workqueue(cdev
->hwfns
[i
].iov_wq
);
4343 destroy_workqueue(cdev
->hwfns
[i
].iov_wq
);
4347 int qed_iov_wq_start(struct qed_dev
*cdev
)
4349 char name
[NAME_SIZE
];
4352 for_each_hwfn(cdev
, i
) {
4353 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
4355 /* PFs needs a dedicated workqueue only if they support IOV.
4356 * VFs always require one.
4358 if (IS_PF(p_hwfn
->cdev
) && !IS_PF_SRIOV(p_hwfn
))
4361 snprintf(name
, NAME_SIZE
, "iov-%02x:%02x.%02x",
4362 cdev
->pdev
->bus
->number
,
4363 PCI_SLOT(cdev
->pdev
->devfn
), p_hwfn
->abs_pf_id
);
4365 p_hwfn
->iov_wq
= create_singlethread_workqueue(name
);
4366 if (!p_hwfn
->iov_wq
) {
4367 DP_NOTICE(p_hwfn
, "Cannot create iov workqueue\n");
4372 INIT_DELAYED_WORK(&p_hwfn
->iov_task
, qed_iov_pf_task
);
4374 INIT_DELAYED_WORK(&p_hwfn
->iov_task
, qed_iov_vf_task
);
4380 const struct qed_iov_hv_ops qed_iov_ops_pass
= {
4381 .configure
= &qed_sriov_configure
,
4382 .set_mac
= &qed_sriov_pf_set_mac
,
4383 .set_vlan
= &qed_sriov_pf_set_vlan
,
4384 .get_config
= &qed_get_vf_config
,
4385 .set_link_state
= &qed_set_vf_link_state
,
4386 .set_spoof
= &qed_spoof_configure
,
4387 .set_rate
= &qed_set_vf_rate
,
4388 .set_trust
= &qed_set_vf_trust
,