1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
10 #include "ecore_sriov.h"
11 #include "ecore_status.h"
13 #include "ecore_hw_defs.h"
14 #include "ecore_int.h"
15 #include "ecore_hsi_eth.h"
17 #include "ecore_vfpf_if.h"
18 #include "ecore_rt_defs.h"
19 #include "ecore_init_ops.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
22 #include "ecore_mcp.h"
23 #include "ecore_cxt.h"
25 #include "ecore_init_fw_funcs.h"
26 #include "ecore_sp_commands.h"
28 static enum _ecore_status_t
ecore_sriov_eqe_event(struct ecore_hwfn
*p_hwfn
,
31 union event_ring_data
*data
,
34 const char *ecore_channel_tlvs_string
[] = {
35 "CHANNEL_TLV_NONE", /* ends tlv sequence */
36 "CHANNEL_TLV_ACQUIRE",
37 "CHANNEL_TLV_VPORT_START",
38 "CHANNEL_TLV_VPORT_UPDATE",
39 "CHANNEL_TLV_VPORT_TEARDOWN",
40 "CHANNEL_TLV_START_RXQ",
41 "CHANNEL_TLV_START_TXQ",
42 "CHANNEL_TLV_STOP_RXQ",
43 "CHANNEL_TLV_STOP_TXQ",
44 "CHANNEL_TLV_UPDATE_RXQ",
45 "CHANNEL_TLV_INT_CLEANUP",
47 "CHANNEL_TLV_RELEASE",
48 "CHANNEL_TLV_LIST_END",
49 "CHANNEL_TLV_UCAST_FILTER",
50 "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
51 "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
52 "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
53 "CHANNEL_TLV_VPORT_UPDATE_MCAST",
54 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
55 "CHANNEL_TLV_VPORT_UPDATE_RSS",
56 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
57 "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
58 "CHANNEL_TLV_UPDATE_TUNN_PARAM",
59 "CHANNEL_TLV_COALESCE_UPDATE",
61 "CHANNEL_TLV_COALESCE_READ",
62 "CHANNEL_TLV_BULLETIN_UPDATE_MAC",
63 "CHANNEL_TLV_UPDATE_MTU",
67 static u8
ecore_vf_calculate_legacy(struct ecore_vf_info
*p_vf
)
71 if (p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
==
72 ETH_HSI_VER_NO_PKT_LEN_TUNN
)
73 legacy
|= ECORE_QCID_LEGACY_VF_RX_PROD
;
75 if (!(p_vf
->acquire
.vfdev_info
.capabilities
&
76 VFPF_ACQUIRE_CAP_QUEUE_QIDS
))
77 legacy
|= ECORE_QCID_LEGACY_VF_CID
;
83 static enum _ecore_status_t
ecore_sp_vf_start(struct ecore_hwfn
*p_hwfn
,
84 struct ecore_vf_info
*p_vf
)
86 struct vf_start_ramrod_data
*p_ramrod
= OSAL_NULL
;
87 struct ecore_spq_entry
*p_ent
= OSAL_NULL
;
88 struct ecore_sp_init_data init_data
;
89 enum _ecore_status_t rc
= ECORE_NOTIMPL
;
93 OSAL_MEMSET(&init_data
, 0, sizeof(init_data
));
94 init_data
.cid
= ecore_spq_get_cid(p_hwfn
);
95 init_data
.opaque_fid
= p_vf
->opaque_fid
;
96 init_data
.comp_mode
= ECORE_SPQ_MODE_EBLOCK
;
98 rc
= ecore_sp_init_request(p_hwfn
, &p_ent
,
99 COMMON_RAMROD_VF_START
,
100 PROTOCOLID_COMMON
, &init_data
);
101 if (rc
!= ECORE_SUCCESS
)
104 p_ramrod
= &p_ent
->ramrod
.vf_start
;
106 p_ramrod
->vf_id
= GET_FIELD(p_vf
->concrete_fid
, PXP_CONCRETE_FID_VFID
);
107 p_ramrod
->opaque_fid
= OSAL_CPU_TO_LE16(p_vf
->opaque_fid
);
109 switch (p_hwfn
->hw_info
.personality
) {
111 p_ramrod
->personality
= PERSONALITY_ETH
;
113 case ECORE_PCI_ETH_ROCE
:
114 case ECORE_PCI_ETH_IWARP
:
115 p_ramrod
->personality
= PERSONALITY_RDMA_AND_ETH
;
118 DP_NOTICE(p_hwfn
, true, "Unknown VF personality %d\n",
119 p_hwfn
->hw_info
.personality
);
123 fp_minor
= p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
;
124 if (fp_minor
> ETH_HSI_VER_MINOR
&&
125 fp_minor
!= ETH_HSI_VER_NO_PKT_LEN_TUNN
) {
126 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
127 "VF [%d] - Requested fp hsi %02x.%02x which is"
128 " slightly newer than PF's %02x.%02x; Configuring"
131 ETH_HSI_VER_MAJOR
, fp_minor
,
132 ETH_HSI_VER_MAJOR
, ETH_HSI_VER_MINOR
);
133 fp_minor
= ETH_HSI_VER_MINOR
;
136 p_ramrod
->hsi_fp_ver
.major_ver_arr
[ETH_VER_KEY
] = ETH_HSI_VER_MAJOR
;
137 p_ramrod
->hsi_fp_ver
.minor_ver_arr
[ETH_VER_KEY
] = fp_minor
;
139 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
140 "VF[%d] - Starting using HSI %02x.%02x\n",
141 p_vf
->abs_vf_id
, ETH_HSI_VER_MAJOR
, fp_minor
);
143 return ecore_spq_post(p_hwfn
, p_ent
, OSAL_NULL
);
146 static enum _ecore_status_t
ecore_sp_vf_stop(struct ecore_hwfn
*p_hwfn
,
150 struct vf_stop_ramrod_data
*p_ramrod
= OSAL_NULL
;
151 struct ecore_spq_entry
*p_ent
= OSAL_NULL
;
152 struct ecore_sp_init_data init_data
;
153 enum _ecore_status_t rc
= ECORE_NOTIMPL
;
156 OSAL_MEMSET(&init_data
, 0, sizeof(init_data
));
157 init_data
.cid
= ecore_spq_get_cid(p_hwfn
);
158 init_data
.opaque_fid
= opaque_vfid
;
159 init_data
.comp_mode
= ECORE_SPQ_MODE_EBLOCK
;
161 rc
= ecore_sp_init_request(p_hwfn
, &p_ent
,
162 COMMON_RAMROD_VF_STOP
,
163 PROTOCOLID_COMMON
, &init_data
);
164 if (rc
!= ECORE_SUCCESS
)
167 p_ramrod
= &p_ent
->ramrod
.vf_stop
;
169 p_ramrod
->vf_id
= GET_FIELD(concrete_vfid
, PXP_CONCRETE_FID_VFID
);
171 return ecore_spq_post(p_hwfn
, p_ent
, OSAL_NULL
);
174 bool ecore_iov_is_valid_vfid(struct ecore_hwfn
*p_hwfn
, int rel_vf_id
,
175 bool b_enabled_only
, bool b_non_malicious
)
177 if (!p_hwfn
->pf_iov_info
) {
178 DP_NOTICE(p_hwfn
->p_dev
, true, "No iov info\n");
182 if ((rel_vf_id
>= p_hwfn
->p_dev
->p_iov_info
->total_vfs
) ||
186 if ((!p_hwfn
->pf_iov_info
->vfs_array
[rel_vf_id
].b_init
) &&
190 if ((p_hwfn
->pf_iov_info
->vfs_array
[rel_vf_id
].b_malicious
) &&
197 struct ecore_vf_info
*ecore_iov_get_vf_info(struct ecore_hwfn
*p_hwfn
,
201 struct ecore_vf_info
*vf
= OSAL_NULL
;
203 if (!p_hwfn
->pf_iov_info
) {
204 DP_NOTICE(p_hwfn
->p_dev
, true, "No iov info\n");
208 if (ecore_iov_is_valid_vfid(p_hwfn
, relative_vf_id
,
209 b_enabled_only
, false))
210 vf
= &p_hwfn
->pf_iov_info
->vfs_array
[relative_vf_id
];
212 DP_ERR(p_hwfn
, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
218 static struct ecore_queue_cid
*
219 ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue
*p_queue
)
223 for (i
= 0; i
< MAX_QUEUES_PER_QZONE
; i
++) {
224 if (p_queue
->cids
[i
].p_cid
&&
225 !p_queue
->cids
[i
].b_is_tx
)
226 return p_queue
->cids
[i
].p_cid
;
232 enum ecore_iov_validate_q_mode
{
233 ECORE_IOV_VALIDATE_Q_NA
,
234 ECORE_IOV_VALIDATE_Q_ENABLE
,
235 ECORE_IOV_VALIDATE_Q_DISABLE
,
238 static bool ecore_iov_validate_queue_mode(struct ecore_vf_info
*p_vf
,
240 enum ecore_iov_validate_q_mode mode
,
245 if (mode
== ECORE_IOV_VALIDATE_Q_NA
)
248 for (i
= 0; i
< MAX_QUEUES_PER_QZONE
; i
++) {
249 struct ecore_vf_queue_cid
*p_qcid
;
251 p_qcid
= &p_vf
->vf_queues
[qid
].cids
[i
];
253 if (p_qcid
->p_cid
== OSAL_NULL
)
256 if (p_qcid
->b_is_tx
!= b_is_tx
)
259 /* Found. It's enabled. */
260 return (mode
== ECORE_IOV_VALIDATE_Q_ENABLE
);
263 /* In case we haven't found any valid cid, then its disabled */
264 return (mode
== ECORE_IOV_VALIDATE_Q_DISABLE
);
267 static bool ecore_iov_validate_rxq(struct ecore_hwfn
*p_hwfn
,
268 struct ecore_vf_info
*p_vf
,
270 enum ecore_iov_validate_q_mode mode
)
272 if (rx_qid
>= p_vf
->num_rxqs
) {
273 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
274 "VF[0x%02x] - can't touch Rx queue[%04x];"
275 " Only 0x%04x are allocated\n",
276 p_vf
->abs_vf_id
, rx_qid
, p_vf
->num_rxqs
);
280 return ecore_iov_validate_queue_mode(p_vf
, rx_qid
, mode
, false);
283 static bool ecore_iov_validate_txq(struct ecore_hwfn
*p_hwfn
,
284 struct ecore_vf_info
*p_vf
,
286 enum ecore_iov_validate_q_mode mode
)
288 if (tx_qid
>= p_vf
->num_txqs
) {
289 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
290 "VF[0x%02x] - can't touch Tx queue[%04x];"
291 " Only 0x%04x are allocated\n",
292 p_vf
->abs_vf_id
, tx_qid
, p_vf
->num_txqs
);
296 return ecore_iov_validate_queue_mode(p_vf
, tx_qid
, mode
, true);
299 static bool ecore_iov_validate_sb(struct ecore_hwfn
*p_hwfn
,
300 struct ecore_vf_info
*p_vf
,
305 for (i
= 0; i
< p_vf
->num_sbs
; i
++)
306 if (p_vf
->igu_sbs
[i
] == sb_idx
)
309 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
310 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as"
311 " one of its 0x%02x SBs\n",
312 p_vf
->abs_vf_id
, sb_idx
, p_vf
->num_sbs
);
317 /* Is there at least 1 queue open? */
318 static bool ecore_iov_validate_active_rxq(struct ecore_vf_info
*p_vf
)
322 for (i
= 0; i
< p_vf
->num_rxqs
; i
++)
323 if (ecore_iov_validate_queue_mode(p_vf
, i
,
324 ECORE_IOV_VALIDATE_Q_ENABLE
,
331 static bool ecore_iov_validate_active_txq(struct ecore_vf_info
*p_vf
)
335 for (i
= 0; i
< p_vf
->num_txqs
; i
++)
336 if (ecore_iov_validate_queue_mode(p_vf
, i
,
337 ECORE_IOV_VALIDATE_Q_ENABLE
,
344 enum _ecore_status_t
ecore_iov_post_vf_bulletin(struct ecore_hwfn
*p_hwfn
,
346 struct ecore_ptt
*p_ptt
)
348 struct ecore_bulletin_content
*p_bulletin
;
349 int crc_size
= sizeof(p_bulletin
->crc
);
350 struct ecore_dmae_params params
;
351 struct ecore_vf_info
*p_vf
;
353 p_vf
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
357 /* TODO - check VF is in a state where it can accept message */
358 if (!p_vf
->vf_bulletin
)
361 p_bulletin
= p_vf
->bulletin
.p_virt
;
363 /* Increment bulletin board version and compute crc */
364 p_bulletin
->version
++;
365 p_bulletin
->crc
= OSAL_CRC32(0, (u8
*)p_bulletin
+ crc_size
,
366 p_vf
->bulletin
.size
- crc_size
);
368 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
369 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
370 p_bulletin
->version
, p_vf
->relative_vf_id
, p_bulletin
->crc
);
372 /* propagate bulletin board via dmae to vm memory */
373 OSAL_MEMSET(¶ms
, 0, sizeof(params
));
374 params
.flags
= ECORE_DMAE_FLAG_VF_DST
;
375 params
.dst_vfid
= p_vf
->abs_vf_id
;
376 return ecore_dmae_host2host(p_hwfn
, p_ptt
, p_vf
->bulletin
.phys
,
377 p_vf
->vf_bulletin
, p_vf
->bulletin
.size
/ 4,
381 static enum _ecore_status_t
ecore_iov_pci_cfg_info(struct ecore_dev
*p_dev
)
383 struct ecore_hw_sriov_info
*iov
= p_dev
->p_iov_info
;
386 DP_VERBOSE(p_dev
, ECORE_MSG_IOV
, "sriov ext pos %d\n", pos
);
387 OSAL_PCI_READ_CONFIG_WORD(p_dev
, pos
+ PCI_SRIOV_CTRL
, &iov
->ctrl
);
389 OSAL_PCI_READ_CONFIG_WORD(p_dev
,
390 pos
+ PCI_SRIOV_TOTAL_VF
, &iov
->total_vfs
);
391 OSAL_PCI_READ_CONFIG_WORD(p_dev
,
392 pos
+ PCI_SRIOV_INITIAL_VF
,
395 OSAL_PCI_READ_CONFIG_WORD(p_dev
, pos
+ PCI_SRIOV_NUM_VF
, &iov
->num_vfs
);
397 /* @@@TODO - in future we might want to add an OSAL here to
398 * allow each OS to decide on its own how to act.
400 DP_VERBOSE(p_dev
, ECORE_MSG_IOV
,
401 "Number of VFs are already set to non-zero value."
402 " Ignoring PCI configuration value\n");
406 OSAL_PCI_READ_CONFIG_WORD(p_dev
,
407 pos
+ PCI_SRIOV_VF_OFFSET
, &iov
->offset
);
409 OSAL_PCI_READ_CONFIG_WORD(p_dev
,
410 pos
+ PCI_SRIOV_VF_STRIDE
, &iov
->stride
);
412 OSAL_PCI_READ_CONFIG_WORD(p_dev
,
413 pos
+ PCI_SRIOV_VF_DID
, &iov
->vf_device_id
);
415 OSAL_PCI_READ_CONFIG_DWORD(p_dev
,
416 pos
+ PCI_SRIOV_SUP_PGSIZE
, &iov
->pgsz
);
418 OSAL_PCI_READ_CONFIG_DWORD(p_dev
, pos
+ PCI_SRIOV_CAP
, &iov
->cap
);
420 OSAL_PCI_READ_CONFIG_BYTE(p_dev
, pos
+ PCI_SRIOV_FUNC_LINK
, &iov
->link
);
422 DP_VERBOSE(p_dev
, ECORE_MSG_IOV
, "IOV info: nres %d, cap 0x%x,"
423 "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
424 " stride %d, page size 0x%x\n",
425 iov
->nres
, iov
->cap
, iov
->ctrl
,
426 iov
->total_vfs
, iov
->initial_vfs
, iov
->nr_virtfn
,
427 iov
->offset
, iov
->stride
, iov
->pgsz
);
429 /* Some sanity checks */
430 if (iov
->num_vfs
> NUM_OF_VFS(p_dev
) ||
431 iov
->total_vfs
> NUM_OF_VFS(p_dev
)) {
432 /* This can happen only due to a bug. In this case we set
433 * num_vfs to zero to avoid memory corruption in the code that
434 * assumes max number of vfs
436 DP_NOTICE(p_dev
, false,
437 "IOV: Unexpected number of vfs set: %d"
438 " setting num_vf to zero\n",
445 return ECORE_SUCCESS
;
448 static void ecore_iov_setup_vfdb(struct ecore_hwfn
*p_hwfn
)
450 struct ecore_hw_sriov_info
*p_iov
= p_hwfn
->p_dev
->p_iov_info
;
451 struct ecore_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
452 struct ecore_bulletin_content
*p_bulletin_virt
;
453 dma_addr_t req_p
, rply_p
, bulletin_p
;
454 union pfvf_tlvs
*p_reply_virt_addr
;
455 union vfpf_tlvs
*p_req_virt_addr
;
458 OSAL_MEMSET(p_iov_info
->vfs_array
, 0, sizeof(p_iov_info
->vfs_array
));
460 p_req_virt_addr
= p_iov_info
->mbx_msg_virt_addr
;
461 req_p
= p_iov_info
->mbx_msg_phys_addr
;
462 p_reply_virt_addr
= p_iov_info
->mbx_reply_virt_addr
;
463 rply_p
= p_iov_info
->mbx_reply_phys_addr
;
464 p_bulletin_virt
= p_iov_info
->p_bulletins
;
465 bulletin_p
= p_iov_info
->bulletins_phys
;
466 if (!p_req_virt_addr
|| !p_reply_virt_addr
|| !p_bulletin_virt
) {
468 "ecore_iov_setup_vfdb called without alloc mem first\n");
472 for (idx
= 0; idx
< p_iov
->total_vfs
; idx
++) {
473 struct ecore_vf_info
*vf
= &p_iov_info
->vfs_array
[idx
];
476 vf
->vf_mbx
.req_virt
= p_req_virt_addr
+ idx
;
477 vf
->vf_mbx
.req_phys
= req_p
+ idx
* sizeof(union vfpf_tlvs
);
478 vf
->vf_mbx
.reply_virt
= p_reply_virt_addr
+ idx
;
479 vf
->vf_mbx
.reply_phys
= rply_p
+ idx
* sizeof(union pfvf_tlvs
);
481 #ifdef CONFIG_ECORE_SW_CHANNEL
482 vf
->vf_mbx
.sw_mbx
.request_size
= sizeof(union vfpf_tlvs
);
483 vf
->vf_mbx
.sw_mbx
.mbx_state
= VF_PF_WAIT_FOR_START_REQUEST
;
485 vf
->state
= VF_STOPPED
;
488 vf
->bulletin
.phys
= idx
*
489 sizeof(struct ecore_bulletin_content
) + bulletin_p
;
490 vf
->bulletin
.p_virt
= p_bulletin_virt
+ idx
;
491 vf
->bulletin
.size
= sizeof(struct ecore_bulletin_content
);
493 vf
->relative_vf_id
= idx
;
494 vf
->abs_vf_id
= idx
+ p_iov
->first_vf_in_pf
;
495 concrete
= ecore_vfid_to_concrete(p_hwfn
, vf
->abs_vf_id
);
496 vf
->concrete_fid
= concrete
;
497 /* TODO - need to devise a better way of getting opaque */
498 vf
->opaque_fid
= (p_hwfn
->hw_info
.opaque_fid
& 0xff) |
499 (vf
->abs_vf_id
<< 8);
501 vf
->num_mac_filters
= ECORE_ETH_VF_NUM_MAC_FILTERS
;
502 vf
->num_vlan_filters
= ECORE_ETH_VF_NUM_VLAN_FILTERS
;
506 static enum _ecore_status_t
ecore_iov_allocate_vfdb(struct ecore_hwfn
*p_hwfn
)
508 struct ecore_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
512 num_vfs
= p_hwfn
->p_dev
->p_iov_info
->total_vfs
;
514 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
515 "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs
);
517 /* Allocate PF Mailbox buffer (per-VF) */
518 p_iov_info
->mbx_msg_size
= sizeof(union vfpf_tlvs
) * num_vfs
;
519 p_v_addr
= &p_iov_info
->mbx_msg_virt_addr
;
520 *p_v_addr
= OSAL_DMA_ALLOC_COHERENT(p_hwfn
->p_dev
,
521 &p_iov_info
->mbx_msg_phys_addr
,
522 p_iov_info
->mbx_msg_size
);
526 /* Allocate PF Mailbox Reply buffer (per-VF) */
527 p_iov_info
->mbx_reply_size
= sizeof(union pfvf_tlvs
) * num_vfs
;
528 p_v_addr
= &p_iov_info
->mbx_reply_virt_addr
;
529 *p_v_addr
= OSAL_DMA_ALLOC_COHERENT(p_hwfn
->p_dev
,
530 &p_iov_info
->mbx_reply_phys_addr
,
531 p_iov_info
->mbx_reply_size
);
535 p_iov_info
->bulletins_size
= sizeof(struct ecore_bulletin_content
) *
537 p_v_addr
= &p_iov_info
->p_bulletins
;
538 *p_v_addr
= OSAL_DMA_ALLOC_COHERENT(p_hwfn
->p_dev
,
539 &p_iov_info
->bulletins_phys
,
540 p_iov_info
->bulletins_size
);
544 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
545 "PF's Requests mailbox [%p virt 0x%lx phys], "
546 "Response mailbox [%p virt 0x%lx phys] Bulletinsi"
547 " [%p virt 0x%lx phys]\n",
548 p_iov_info
->mbx_msg_virt_addr
,
549 (unsigned long)p_iov_info
->mbx_msg_phys_addr
,
550 p_iov_info
->mbx_reply_virt_addr
,
551 (unsigned long)p_iov_info
->mbx_reply_phys_addr
,
552 p_iov_info
->p_bulletins
,
553 (unsigned long)p_iov_info
->bulletins_phys
);
555 return ECORE_SUCCESS
;
558 static void ecore_iov_free_vfdb(struct ecore_hwfn
*p_hwfn
)
560 struct ecore_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
562 if (p_hwfn
->pf_iov_info
->mbx_msg_virt_addr
)
563 OSAL_DMA_FREE_COHERENT(p_hwfn
->p_dev
,
564 p_iov_info
->mbx_msg_virt_addr
,
565 p_iov_info
->mbx_msg_phys_addr
,
566 p_iov_info
->mbx_msg_size
);
568 if (p_hwfn
->pf_iov_info
->mbx_reply_virt_addr
)
569 OSAL_DMA_FREE_COHERENT(p_hwfn
->p_dev
,
570 p_iov_info
->mbx_reply_virt_addr
,
571 p_iov_info
->mbx_reply_phys_addr
,
572 p_iov_info
->mbx_reply_size
);
574 if (p_iov_info
->p_bulletins
)
575 OSAL_DMA_FREE_COHERENT(p_hwfn
->p_dev
,
576 p_iov_info
->p_bulletins
,
577 p_iov_info
->bulletins_phys
,
578 p_iov_info
->bulletins_size
);
581 enum _ecore_status_t
ecore_iov_alloc(struct ecore_hwfn
*p_hwfn
)
583 struct ecore_pf_iov
*p_sriov
;
585 if (!IS_PF_SRIOV(p_hwfn
)) {
586 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
587 "No SR-IOV - no need for IOV db\n");
588 return ECORE_SUCCESS
;
591 p_sriov
= OSAL_ZALLOC(p_hwfn
->p_dev
, GFP_KERNEL
, sizeof(*p_sriov
));
593 DP_NOTICE(p_hwfn
, false, "Failed to allocate `struct ecore_sriov'\n");
597 p_hwfn
->pf_iov_info
= p_sriov
;
599 ecore_spq_register_async_cb(p_hwfn
, PROTOCOLID_COMMON
,
600 ecore_sriov_eqe_event
);
602 return ecore_iov_allocate_vfdb(p_hwfn
);
605 void ecore_iov_setup(struct ecore_hwfn
*p_hwfn
)
607 if (!IS_PF_SRIOV(p_hwfn
) || !IS_PF_SRIOV_ALLOC(p_hwfn
))
610 ecore_iov_setup_vfdb(p_hwfn
);
613 void ecore_iov_free(struct ecore_hwfn
*p_hwfn
)
615 ecore_spq_unregister_async_cb(p_hwfn
, PROTOCOLID_COMMON
);
617 if (IS_PF_SRIOV_ALLOC(p_hwfn
)) {
618 ecore_iov_free_vfdb(p_hwfn
);
619 OSAL_FREE(p_hwfn
->p_dev
, p_hwfn
->pf_iov_info
);
623 void ecore_iov_free_hw_info(struct ecore_dev
*p_dev
)
625 OSAL_FREE(p_dev
, p_dev
->p_iov_info
);
628 enum _ecore_status_t
ecore_iov_hw_info(struct ecore_hwfn
*p_hwfn
)
630 struct ecore_dev
*p_dev
= p_hwfn
->p_dev
;
632 enum _ecore_status_t rc
;
634 if (IS_VF(p_hwfn
->p_dev
))
635 return ECORE_SUCCESS
;
637 /* Learn the PCI configuration */
638 pos
= OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn
->p_dev
,
639 PCI_EXT_CAP_ID_SRIOV
);
641 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
, "No PCIe IOV support\n");
642 return ECORE_SUCCESS
;
645 /* Allocate a new struct for IOV information */
646 /* TODO - can change to VALLOC when its available */
647 p_dev
->p_iov_info
= OSAL_ZALLOC(p_dev
, GFP_KERNEL
,
648 sizeof(*p_dev
->p_iov_info
));
649 if (!p_dev
->p_iov_info
) {
650 DP_NOTICE(p_hwfn
, false,
651 "Can't support IOV due to lack of memory\n");
654 p_dev
->p_iov_info
->pos
= pos
;
656 rc
= ecore_iov_pci_cfg_info(p_dev
);
660 /* We want PF IOV to be synonemous with the existence of p_iov_info;
661 * In case the capability is published but there are no VFs, simply
662 * de-allocate the struct.
664 if (!p_dev
->p_iov_info
->total_vfs
) {
665 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
666 "IOV capabilities, but no VFs are published\n");
667 OSAL_FREE(p_dev
, p_dev
->p_iov_info
);
668 return ECORE_SUCCESS
;
671 /* First VF index based on offset is tricky:
672 * - If ARI is supported [likely], offset - (16 - pf_id) would
673 * provide the number for eng0. 2nd engine Vfs would begin
674 * after the first engine's VFs.
675 * - If !ARI, VFs would start on next device.
676 * so offset - (256 - pf_id) would provide the number.
677 * Utilize the fact that (256 - pf_id) is achieved only be later
678 * to diffrentiate between the two.
681 if (p_hwfn
->p_dev
->p_iov_info
->offset
< (256 - p_hwfn
->abs_pf_id
)) {
682 u32 first
= p_hwfn
->p_dev
->p_iov_info
->offset
+
683 p_hwfn
->abs_pf_id
- 16;
685 p_dev
->p_iov_info
->first_vf_in_pf
= first
;
687 if (ECORE_PATH_ID(p_hwfn
))
688 p_dev
->p_iov_info
->first_vf_in_pf
-= MAX_NUM_VFS_BB
;
690 u32 first
= p_hwfn
->p_dev
->p_iov_info
->offset
+
691 p_hwfn
->abs_pf_id
- 256;
693 p_dev
->p_iov_info
->first_vf_in_pf
= first
;
696 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
697 "First VF in hwfn 0x%08x\n",
698 p_dev
->p_iov_info
->first_vf_in_pf
);
700 return ECORE_SUCCESS
;
703 static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn
*p_hwfn
, int vfid
,
704 bool b_fail_malicious
)
706 /* Check PF supports sriov */
707 if (IS_VF(p_hwfn
->p_dev
) || !IS_ECORE_SRIOV(p_hwfn
->p_dev
) ||
708 !IS_PF_SRIOV_ALLOC(p_hwfn
))
711 /* Check VF validity */
712 if (!ecore_iov_is_valid_vfid(p_hwfn
, vfid
, true, b_fail_malicious
))
718 bool ecore_iov_pf_sanity_check(struct ecore_hwfn
*p_hwfn
, int vfid
)
720 return _ecore_iov_pf_sanity_check(p_hwfn
, vfid
, true);
723 void ecore_iov_set_vf_to_disable(struct ecore_dev
*p_dev
,
724 u16 rel_vf_id
, u8 to_disable
)
726 struct ecore_vf_info
*vf
;
729 for_each_hwfn(p_dev
, i
) {
730 struct ecore_hwfn
*p_hwfn
= &p_dev
->hwfns
[i
];
732 vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
736 vf
->to_disable
= to_disable
;
740 void ecore_iov_set_vfs_to_disable(struct ecore_dev
*p_dev
,
745 if (!IS_ECORE_SRIOV(p_dev
))
748 for (i
= 0; i
< p_dev
->p_iov_info
->total_vfs
; i
++)
749 ecore_iov_set_vf_to_disable(p_dev
, i
, to_disable
);
753 /* @@@TBD Consider taking outside of ecore... */
754 enum _ecore_status_t
ecore_iov_set_vf_ctx(struct ecore_hwfn
*p_hwfn
,
758 enum _ecore_status_t rc
= ECORE_SUCCESS
;
759 struct ecore_vf_info
*vf
= ecore_iov_get_vf_info(p_hwfn
, vf_id
, true);
761 if (vf
!= OSAL_NULL
) {
763 #ifdef CONFIG_ECORE_SW_CHANNEL
764 vf
->vf_mbx
.sw_mbx
.mbx_state
= VF_PF_WAIT_FOR_START_REQUEST
;
767 rc
= ECORE_UNKNOWN_ERROR
;
773 static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn
*p_hwfn
,
774 struct ecore_ptt
*p_ptt
,
777 ecore_wr(p_hwfn
, p_ptt
,
778 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR
+ (abs_vfid
>> 5) * 4,
779 1 << (abs_vfid
& 0x1f));
782 static void ecore_iov_vf_igu_reset(struct ecore_hwfn
*p_hwfn
,
783 struct ecore_ptt
*p_ptt
,
784 struct ecore_vf_info
*vf
)
788 /* Set VF masks and configuration - pretend */
789 ecore_fid_pretend(p_hwfn
, p_ptt
, (u16
)vf
->concrete_fid
);
791 ecore_wr(p_hwfn
, p_ptt
, IGU_REG_STATISTIC_NUM_VF_MSG_SENT
, 0);
794 ecore_fid_pretend(p_hwfn
, p_ptt
, (u16
)p_hwfn
->hw_info
.concrete_fid
);
796 /* iterate over all queues, clear sb consumer */
797 for (i
= 0; i
< vf
->num_sbs
; i
++)
798 ecore_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
800 vf
->opaque_fid
, true);
803 static void ecore_iov_vf_igu_set_int(struct ecore_hwfn
*p_hwfn
,
804 struct ecore_ptt
*p_ptt
,
805 struct ecore_vf_info
*vf
, bool enable
)
809 ecore_fid_pretend(p_hwfn
, p_ptt
, (u16
)vf
->concrete_fid
);
811 igu_vf_conf
= ecore_rd(p_hwfn
, p_ptt
, IGU_REG_VF_CONFIGURATION
);
814 igu_vf_conf
|= IGU_VF_CONF_MSI_MSIX_EN
;
816 igu_vf_conf
&= ~IGU_VF_CONF_MSI_MSIX_EN
;
818 ecore_wr(p_hwfn
, p_ptt
, IGU_REG_VF_CONFIGURATION
, igu_vf_conf
);
821 ecore_fid_pretend(p_hwfn
, p_ptt
, (u16
)p_hwfn
->hw_info
.concrete_fid
);
824 static enum _ecore_status_t
825 ecore_iov_enable_vf_access_msix(struct ecore_hwfn
*p_hwfn
,
826 struct ecore_ptt
*p_ptt
,
833 /* If client overrides this, don't do anything */
834 if (p_hwfn
->p_dev
->b_dont_override_vf_msix
)
835 return ECORE_SUCCESS
;
837 /* For AH onward, configuration is per-PF. Find maximum of all
838 * the currently enabled child VFs, and set the number to be that.
840 if (!ECORE_IS_BB(p_hwfn
->p_dev
)) {
841 ecore_for_each_vf(p_hwfn
, i
) {
842 struct ecore_vf_info
*p_vf
;
844 p_vf
= ecore_iov_get_vf_info(p_hwfn
, (u16
)i
, true);
848 current_max
= OSAL_MAX_T(u8
, current_max
,
853 if (num_sbs
> current_max
)
854 return ecore_mcp_config_vf_msix(p_hwfn
, p_ptt
,
857 return ECORE_SUCCESS
;
860 static enum _ecore_status_t
861 ecore_iov_enable_vf_access(struct ecore_hwfn
*p_hwfn
,
862 struct ecore_ptt
*p_ptt
, struct ecore_vf_info
*vf
)
864 u32 igu_vf_conf
= IGU_VF_CONF_FUNC_EN
;
865 enum _ecore_status_t rc
= ECORE_SUCCESS
;
867 /* It's possible VF was previously considered malicious -
868 * clear the indication even if we're only going to disable VF.
870 vf
->b_malicious
= false;
873 return ECORE_SUCCESS
;
875 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
876 "Enable internal access for vf %x [abs %x]\n", vf
->abs_vf_id
,
877 ECORE_VF_ABS_ID(p_hwfn
, vf
));
879 ecore_iov_vf_pglue_clear_err(p_hwfn
, p_ptt
,
880 ECORE_VF_ABS_ID(p_hwfn
, vf
));
882 ecore_iov_vf_igu_reset(p_hwfn
, p_ptt
, vf
);
884 rc
= ecore_iov_enable_vf_access_msix(p_hwfn
, p_ptt
,
885 vf
->abs_vf_id
, vf
->num_sbs
);
886 if (rc
!= ECORE_SUCCESS
)
889 ecore_fid_pretend(p_hwfn
, p_ptt
, (u16
)vf
->concrete_fid
);
891 SET_FIELD(igu_vf_conf
, IGU_VF_CONF_PARENT
, p_hwfn
->rel_pf_id
);
892 STORE_RT_REG(p_hwfn
, IGU_REG_VF_CONFIGURATION_RT_OFFSET
, igu_vf_conf
);
894 ecore_init_run(p_hwfn
, p_ptt
, PHASE_VF
, vf
->abs_vf_id
,
895 p_hwfn
->hw_info
.hw_mode
);
898 ecore_fid_pretend(p_hwfn
, p_ptt
, (u16
)p_hwfn
->hw_info
.concrete_fid
);
907 * @brief ecore_iov_config_perm_table - configure the permission
909 * In E4, queue zone permission table size is 320x9. There
910 * are 320 VF queues for single engine device (256 for dual
911 * engine device), and each entry has the following format:
918 static void ecore_iov_config_perm_table(struct ecore_hwfn
*p_hwfn
,
919 struct ecore_ptt
*p_ptt
,
920 struct ecore_vf_info
*vf
, u8 enable
)
926 for (qid
= 0; qid
< vf
->num_rxqs
; qid
++) {
927 ecore_fw_l2_queue(p_hwfn
, vf
->vf_queues
[qid
].fw_rx_qid
,
930 reg_addr
= PSWHST_REG_ZONE_PERMISSION_TABLE
+ qzone_id
* 4;
931 val
= enable
? (vf
->abs_vf_id
| (1 << 8)) : 0;
932 ecore_wr(p_hwfn
, p_ptt
, reg_addr
, val
);
936 static void ecore_iov_enable_vf_traffic(struct ecore_hwfn
*p_hwfn
,
937 struct ecore_ptt
*p_ptt
,
938 struct ecore_vf_info
*vf
)
940 /* Reset vf in IGU - interrupts are still disabled */
941 ecore_iov_vf_igu_reset(p_hwfn
, p_ptt
, vf
);
943 ecore_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 1);
945 /* Permission Table */
946 ecore_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, true);
949 static u8
ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn
*p_hwfn
,
950 struct ecore_ptt
*p_ptt
,
951 struct ecore_vf_info
*vf
,
954 struct ecore_igu_block
*p_block
;
955 struct cau_sb_entry sb_entry
;
959 if (num_rx_queues
> p_hwfn
->hw_info
.p_igu_info
->usage
.free_cnt_iov
)
961 (u16
)p_hwfn
->hw_info
.p_igu_info
->usage
.free_cnt_iov
;
962 p_hwfn
->hw_info
.p_igu_info
->usage
.free_cnt_iov
-= num_rx_queues
;
964 SET_FIELD(val
, IGU_MAPPING_LINE_FUNCTION_NUMBER
, vf
->abs_vf_id
);
965 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 1);
966 SET_FIELD(val
, IGU_MAPPING_LINE_PF_VALID
, 0);
968 for (qid
= 0; qid
< num_rx_queues
; qid
++) {
969 p_block
= ecore_get_igu_free_sb(p_hwfn
, false);
970 vf
->igu_sbs
[qid
] = p_block
->igu_sb_id
;
971 p_block
->status
&= ~ECORE_IGU_STATUS_FREE
;
972 SET_FIELD(val
, IGU_MAPPING_LINE_VECTOR_NUMBER
, qid
);
974 ecore_wr(p_hwfn
, p_ptt
,
975 IGU_REG_MAPPING_MEMORY
+
976 sizeof(u32
) * p_block
->igu_sb_id
, val
);
978 /* Configure igu sb in CAU which were marked valid */
979 ecore_init_cau_sb_entry(p_hwfn
, &sb_entry
,
982 ecore_dmae_host2grc(p_hwfn
, p_ptt
,
983 (u64
)(osal_uintptr_t
)&sb_entry
,
984 CAU_REG_SB_VAR_MEMORY
+
985 p_block
->igu_sb_id
* sizeof(u64
), 2, 0);
988 vf
->num_sbs
= (u8
)num_rx_queues
;
995 * @brief The function invalidates all the VF entries,
996 * technically this isn't required, but added for
997 * cleaness and ease of debugging incase a VF attempts to
998 * produce an interrupt after it has been taken down.
1004 static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn
*p_hwfn
,
1005 struct ecore_ptt
*p_ptt
,
1006 struct ecore_vf_info
*vf
)
1008 struct ecore_igu_info
*p_info
= p_hwfn
->hw_info
.p_igu_info
;
1012 /* Invalidate igu CAM lines and mark them as free */
1013 for (idx
= 0; idx
< vf
->num_sbs
; idx
++) {
1014 igu_id
= vf
->igu_sbs
[idx
];
1015 addr
= IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_id
;
1017 val
= ecore_rd(p_hwfn
, p_ptt
, addr
);
1018 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 0);
1019 ecore_wr(p_hwfn
, p_ptt
, addr
, val
);
1021 p_info
->entry
[igu_id
].status
|= ECORE_IGU_STATUS_FREE
;
1022 p_hwfn
->hw_info
.p_igu_info
->usage
.free_cnt_iov
++;
1028 void ecore_iov_set_link(struct ecore_hwfn
*p_hwfn
,
1030 struct ecore_mcp_link_params
*params
,
1031 struct ecore_mcp_link_state
*link
,
1032 struct ecore_mcp_link_capabilities
*p_caps
)
1034 struct ecore_vf_info
*p_vf
= ecore_iov_get_vf_info(p_hwfn
, vfid
, false);
1035 struct ecore_bulletin_content
*p_bulletin
;
1040 p_bulletin
= p_vf
->bulletin
.p_virt
;
1041 p_bulletin
->req_autoneg
= params
->speed
.autoneg
;
1042 p_bulletin
->req_adv_speed
= params
->speed
.advertised_speeds
;
1043 p_bulletin
->req_forced_speed
= params
->speed
.forced_speed
;
1044 p_bulletin
->req_autoneg_pause
= params
->pause
.autoneg
;
1045 p_bulletin
->req_forced_rx
= params
->pause
.forced_rx
;
1046 p_bulletin
->req_forced_tx
= params
->pause
.forced_tx
;
1047 p_bulletin
->req_loopback
= params
->loopback_mode
;
1049 p_bulletin
->link_up
= link
->link_up
;
1050 p_bulletin
->speed
= link
->speed
;
1051 p_bulletin
->full_duplex
= link
->full_duplex
;
1052 p_bulletin
->autoneg
= link
->an
;
1053 p_bulletin
->autoneg_complete
= link
->an_complete
;
1054 p_bulletin
->parallel_detection
= link
->parallel_detection
;
1055 p_bulletin
->pfc_enabled
= link
->pfc_enabled
;
1056 p_bulletin
->partner_adv_speed
= link
->partner_adv_speed
;
1057 p_bulletin
->partner_tx_flow_ctrl_en
= link
->partner_tx_flow_ctrl_en
;
1058 p_bulletin
->partner_rx_flow_ctrl_en
= link
->partner_rx_flow_ctrl_en
;
1059 p_bulletin
->partner_adv_pause
= link
->partner_adv_pause
;
1060 p_bulletin
->sfp_tx_fault
= link
->sfp_tx_fault
;
1062 p_bulletin
->capability_speed
= p_caps
->speed_capabilities
;
1065 enum _ecore_status_t
1066 ecore_iov_init_hw_for_vf(struct ecore_hwfn
*p_hwfn
,
1067 struct ecore_ptt
*p_ptt
,
1068 struct ecore_iov_vf_init_params
*p_params
)
1070 struct ecore_mcp_link_capabilities link_caps
;
1071 struct ecore_mcp_link_params link_params
;
1072 struct ecore_mcp_link_state link_state
;
1073 u8 num_of_vf_available_chains
= 0;
1074 struct ecore_vf_info
*vf
= OSAL_NULL
;
1076 enum _ecore_status_t rc
= ECORE_SUCCESS
;
1080 vf
= ecore_iov_get_vf_info(p_hwfn
, p_params
->rel_vf_id
, false);
1082 DP_ERR(p_hwfn
, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
1083 return ECORE_UNKNOWN_ERROR
;
1087 DP_NOTICE(p_hwfn
, true, "VF[%d] is already active.\n",
1088 p_params
->rel_vf_id
);
1092 /* Perform sanity checking on the requested vport/rss */
1093 if (p_params
->vport_id
>= RESC_NUM(p_hwfn
, ECORE_VPORT
)) {
1094 DP_NOTICE(p_hwfn
, true, "VF[%d] - can't use VPORT %02x\n",
1095 p_params
->rel_vf_id
, p_params
->vport_id
);
1099 if ((p_params
->num_queues
> 1) &&
1100 (p_params
->rss_eng_id
>= RESC_NUM(p_hwfn
, ECORE_RSS_ENG
))) {
1101 DP_NOTICE(p_hwfn
, true, "VF[%d] - can't use RSS_ENG %02x\n",
1102 p_params
->rel_vf_id
, p_params
->rss_eng_id
);
1106 /* TODO - remove this once we get confidence of change */
1107 if (!p_params
->vport_id
) {
1108 DP_NOTICE(p_hwfn
, false,
1109 "VF[%d] - Unlikely that VF uses vport0. Forgotten?\n",
1110 p_params
->rel_vf_id
);
1112 if ((!p_params
->rss_eng_id
) && (p_params
->num_queues
> 1)) {
1113 DP_NOTICE(p_hwfn
, false,
1114 "VF[%d] - Unlikely that VF uses RSS_eng0. Forgotten?\n",
1115 p_params
->rel_vf_id
);
1117 vf
->vport_id
= p_params
->vport_id
;
1118 vf
->rss_eng_id
= p_params
->rss_eng_id
;
1120 /* Since it's possible to relocate SBs, it's a bit difficult to check
1121 * things here. Simply check whether the index falls in the range
1122 * belonging to the PF.
1124 for (i
= 0; i
< p_params
->num_queues
; i
++) {
1125 qid
= p_params
->req_rx_queue
[i
];
1126 if (qid
> (u16
)RESC_NUM(p_hwfn
, ECORE_L2_QUEUE
)) {
1127 DP_NOTICE(p_hwfn
, true,
1128 "Can't enable Rx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
1129 qid
, p_params
->rel_vf_id
,
1130 (u16
)RESC_NUM(p_hwfn
, ECORE_L2_QUEUE
));
1134 qid
= p_params
->req_tx_queue
[i
];
1135 if (qid
> (u16
)RESC_NUM(p_hwfn
, ECORE_L2_QUEUE
)) {
1136 DP_NOTICE(p_hwfn
, true,
1137 "Can't enable Tx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
1138 qid
, p_params
->rel_vf_id
,
1139 (u16
)RESC_NUM(p_hwfn
, ECORE_L2_QUEUE
));
1144 /* Limit number of queues according to number of CIDs */
1145 ecore_cxt_get_proto_cid_count(p_hwfn
, PROTOCOLID_ETH
, &cids
);
1146 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1147 "VF[%d] - requesting to initialize for 0x%04x queues"
1148 " [0x%04x CIDs available]\n",
1149 vf
->relative_vf_id
, p_params
->num_queues
, (u16
)cids
);
1150 num_irqs
= OSAL_MIN_T(u16
, p_params
->num_queues
, ((u16
)cids
));
1152 num_of_vf_available_chains
= ecore_iov_alloc_vf_igu_sbs(p_hwfn
,
1156 if (num_of_vf_available_chains
== 0) {
1157 DP_ERR(p_hwfn
, "no available igu sbs\n");
1161 /* Choose queue number and index ranges */
1162 vf
->num_rxqs
= num_of_vf_available_chains
;
1163 vf
->num_txqs
= num_of_vf_available_chains
;
1165 for (i
= 0; i
< vf
->num_rxqs
; i
++) {
1166 struct ecore_vf_queue
*p_queue
= &vf
->vf_queues
[i
];
1168 p_queue
->fw_rx_qid
= p_params
->req_rx_queue
[i
];
1169 p_queue
->fw_tx_qid
= p_params
->req_tx_queue
[i
];
1171 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1172 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
1173 vf
->relative_vf_id
, i
, vf
->igu_sbs
[i
],
1174 p_queue
->fw_rx_qid
, p_queue
->fw_tx_qid
);
1177 /* Update the link configuration in bulletin.
1179 OSAL_MEMCPY(&link_params
, ecore_mcp_get_link_params(p_hwfn
),
1180 sizeof(link_params
));
1181 OSAL_MEMCPY(&link_state
, ecore_mcp_get_link_state(p_hwfn
),
1182 sizeof(link_state
));
1183 OSAL_MEMCPY(&link_caps
, ecore_mcp_get_link_capabilities(p_hwfn
),
1185 ecore_iov_set_link(p_hwfn
, p_params
->rel_vf_id
,
1186 &link_params
, &link_state
, &link_caps
);
1188 rc
= ecore_iov_enable_vf_access(p_hwfn
, p_ptt
, vf
);
1190 if (rc
== ECORE_SUCCESS
) {
1192 p_hwfn
->pf_iov_info
->active_vfs
[vf
->relative_vf_id
/ 64] |=
1193 (1ULL << (vf
->relative_vf_id
% 64));
1195 if (IS_LEAD_HWFN(p_hwfn
))
1196 p_hwfn
->p_dev
->p_iov_info
->num_vfs
++;
1202 enum _ecore_status_t
ecore_iov_release_hw_for_vf(struct ecore_hwfn
*p_hwfn
,
1203 struct ecore_ptt
*p_ptt
,
1206 struct ecore_mcp_link_capabilities caps
;
1207 struct ecore_mcp_link_params params
;
1208 struct ecore_mcp_link_state link
;
1209 struct ecore_vf_info
*vf
= OSAL_NULL
;
1211 vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
1213 DP_ERR(p_hwfn
, "ecore_iov_release_hw_for_vf : vf is NULL\n");
1214 return ECORE_UNKNOWN_ERROR
;
1217 if (vf
->bulletin
.p_virt
)
1218 OSAL_MEMSET(vf
->bulletin
.p_virt
, 0,
1219 sizeof(*vf
->bulletin
.p_virt
));
1221 OSAL_MEMSET(&vf
->p_vf_info
, 0, sizeof(vf
->p_vf_info
));
1223 /* Get the link configuration back in bulletin so
1224 * that when VFs are re-enabled they get the actual
1225 * link configuration.
1227 OSAL_MEMCPY(¶ms
, ecore_mcp_get_link_params(p_hwfn
), sizeof(params
));
1228 OSAL_MEMCPY(&link
, ecore_mcp_get_link_state(p_hwfn
), sizeof(link
));
1229 OSAL_MEMCPY(&caps
, ecore_mcp_get_link_capabilities(p_hwfn
),
1231 ecore_iov_set_link(p_hwfn
, rel_vf_id
, ¶ms
, &link
, &caps
);
1233 /* Forget the VF's acquisition message */
1234 OSAL_MEMSET(&vf
->acquire
, 0, sizeof(vf
->acquire
));
1236 /* disablng interrupts and resetting permission table was done during
1237 * vf-close, however, we could get here without going through vf_close
1239 /* Disable Interrupts for VF */
1240 ecore_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 0);
1242 /* Reset Permission table */
1243 ecore_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, 0);
1247 ecore_iov_free_vf_igu_sbs(p_hwfn
, p_ptt
, vf
);
1251 p_hwfn
->pf_iov_info
->active_vfs
[vf
->relative_vf_id
/ 64] &=
1252 ~(1ULL << (vf
->relative_vf_id
/ 64));
1254 if (IS_LEAD_HWFN(p_hwfn
))
1255 p_hwfn
->p_dev
->p_iov_info
->num_vfs
--;
1258 return ECORE_SUCCESS
;
1261 static bool ecore_iov_tlv_supported(u16 tlvtype
)
1263 return tlvtype
> CHANNEL_TLV_NONE
&& tlvtype
< CHANNEL_TLV_MAX
;
1266 static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn
*p_hwfn
,
1267 struct ecore_vf_info
*vf
, u16 tlv
)
1269 /* lock the channel */
1270 /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
1272 /* record the locking op */
1273 /* vf->op_current = tlv; @@@TBD MichalK */
1276 if (ecore_iov_tlv_supported(tlv
))
1279 "VF[%d]: vf pf channel locked by %s\n",
1281 ecore_channel_tlvs_string
[tlv
]);
1285 "VF[%d]: vf pf channel locked by %04x\n",
1286 vf
->abs_vf_id
, tlv
);
1289 static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn
*p_hwfn
,
1290 struct ecore_vf_info
*vf
,
1293 /* log the unlock */
1294 if (ecore_iov_tlv_supported(expected_tlv
))
1297 "VF[%d]: vf pf channel unlocked by %s\n",
1299 ecore_channel_tlvs_string
[expected_tlv
]);
1303 "VF[%d]: vf pf channel unlocked by %04x\n",
1304 vf
->abs_vf_id
, expected_tlv
);
1306 /* record the locking op */
1307 /* vf->op_current = CHANNEL_TLV_NONE; */
1310 /* place a given tlv on the tlv buffer, continuing current tlv list */
1311 void *ecore_add_tlv(u8
**offset
, u16 type
, u16 length
)
1313 struct channel_tlv
*tl
= (struct channel_tlv
*)*offset
;
1316 tl
->length
= length
;
1318 /* Offset should keep pointing to next TLV (the end of the last) */
1321 /* Return a pointer to the start of the added tlv */
1322 return *offset
- length
;
1325 /* list the types and lengths of the tlvs on the buffer */
1326 void ecore_dp_tlv_list(struct ecore_hwfn
*p_hwfn
, void *tlvs_list
)
1328 u16 i
= 1, total_length
= 0;
1329 struct channel_tlv
*tlv
;
1332 /* cast current tlv list entry to channel tlv header */
1333 tlv
= (struct channel_tlv
*)((u8
*)tlvs_list
+ total_length
);
1336 if (ecore_iov_tlv_supported(tlv
->type
))
1337 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1338 "TLV number %d: type %s, length %d\n",
1339 i
, ecore_channel_tlvs_string
[tlv
->type
],
1342 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1343 "TLV number %d: type %d, length %d\n",
1344 i
, tlv
->type
, tlv
->length
);
1346 if (tlv
->type
== CHANNEL_TLV_LIST_END
)
1349 /* Validate entry - protect against malicious VFs */
1351 DP_NOTICE(p_hwfn
, false, "TLV of length 0 found\n");
1354 total_length
+= tlv
->length
;
1355 if (total_length
>= sizeof(struct tlv_buffer_size
)) {
1356 DP_NOTICE(p_hwfn
, false, "TLV ==> Buffer overflow\n");
1364 static void ecore_iov_send_response(struct ecore_hwfn
*p_hwfn
,
1365 struct ecore_ptt
*p_ptt
,
1366 struct ecore_vf_info
*p_vf
,
1367 #ifdef CONFIG_ECORE_SW_CHANNEL
1370 u16 OSAL_UNUSED length
,
1374 struct ecore_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
1375 struct ecore_dmae_params params
;
1378 mbx
->reply_virt
->default_resp
.hdr
.status
= status
;
1380 ecore_dp_tlv_list(p_hwfn
, mbx
->reply_virt
);
1382 #ifdef CONFIG_ECORE_SW_CHANNEL
1383 mbx
->sw_mbx
.response_size
=
1384 length
+ sizeof(struct channel_list_end_tlv
);
1386 if (!p_vf
->b_hw_channel
)
1390 eng_vf_id
= p_vf
->abs_vf_id
;
1392 OSAL_MEMSET(¶ms
, 0, sizeof(struct ecore_dmae_params
));
1393 params
.flags
= ECORE_DMAE_FLAG_VF_DST
;
1394 params
.dst_vfid
= eng_vf_id
;
1396 ecore_dmae_host2host(p_hwfn
, p_ptt
, mbx
->reply_phys
+ sizeof(u64
),
1397 mbx
->req_virt
->first_tlv
.reply_address
+
1399 (sizeof(union pfvf_tlvs
) - sizeof(u64
)) / 4,
1402 /* Once PF copies the rc to the VF, the latter can continue and
1403 * and send an additional message. So we have to make sure the
1404 * channel would be re-set to ready prior to that.
1407 GTT_BAR0_MAP_REG_USDM_RAM
+
1408 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id
), 1);
1410 ecore_dmae_host2host(p_hwfn
, p_ptt
, mbx
->reply_phys
,
1411 mbx
->req_virt
->first_tlv
.reply_address
,
1412 sizeof(u64
) / 4, ¶ms
);
1414 OSAL_IOV_PF_RESP_TYPE(p_hwfn
, p_vf
->relative_vf_id
, status
);
1417 static u16
ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag
)
1420 case ECORE_IOV_VP_UPDATE_ACTIVATE
:
1421 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
1422 case ECORE_IOV_VP_UPDATE_VLAN_STRIP
:
1423 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
;
1424 case ECORE_IOV_VP_UPDATE_TX_SWITCH
:
1425 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
1426 case ECORE_IOV_VP_UPDATE_MCAST
:
1427 return CHANNEL_TLV_VPORT_UPDATE_MCAST
;
1428 case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM
:
1429 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
1430 case ECORE_IOV_VP_UPDATE_RSS
:
1431 return CHANNEL_TLV_VPORT_UPDATE_RSS
;
1432 case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN
:
1433 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
1434 case ECORE_IOV_VP_UPDATE_SGE_TPA
:
1435 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
;
1441 static u16
ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn
*p_hwfn
,
1442 struct ecore_vf_info
*p_vf
,
1443 struct ecore_iov_vf_mbx
*p_mbx
,
1444 u8 status
, u16 tlvs_mask
,
1447 struct pfvf_def_resp_tlv
*resp
;
1448 u16 size
, total_len
, i
;
1450 OSAL_MEMSET(p_mbx
->reply_virt
, 0, sizeof(union pfvf_tlvs
));
1451 p_mbx
->offset
= (u8
*)p_mbx
->reply_virt
;
1452 size
= sizeof(struct pfvf_def_resp_tlv
);
1455 ecore_add_tlv(&p_mbx
->offset
, CHANNEL_TLV_VPORT_UPDATE
, size
);
1457 /* Prepare response for all extended tlvs if they are found by PF */
1458 for (i
= 0; i
< ECORE_IOV_VP_UPDATE_MAX
; i
++) {
1459 if (!(tlvs_mask
& (1 << i
)))
1462 resp
= ecore_add_tlv(&p_mbx
->offset
, ecore_iov_vport_to_tlv(i
),
1465 if (tlvs_accepted
& (1 << i
))
1466 resp
->hdr
.status
= status
;
1468 resp
->hdr
.status
= PFVF_STATUS_NOT_SUPPORTED
;
1470 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1471 "VF[%d] - vport_update resp: TLV %d, status %02x\n",
1472 p_vf
->relative_vf_id
,
1473 ecore_iov_vport_to_tlv(i
),
1479 ecore_add_tlv(&p_mbx
->offset
, CHANNEL_TLV_LIST_END
,
1480 sizeof(struct channel_list_end_tlv
));
1485 static void ecore_iov_prepare_resp(struct ecore_hwfn
*p_hwfn
,
1486 struct ecore_ptt
*p_ptt
,
1487 struct ecore_vf_info
*vf_info
,
1488 u16 type
, u16 length
, u8 status
)
1490 struct ecore_iov_vf_mbx
*mbx
= &vf_info
->vf_mbx
;
1492 mbx
->offset
= (u8
*)mbx
->reply_virt
;
1494 ecore_add_tlv(&mbx
->offset
, type
, length
);
1495 ecore_add_tlv(&mbx
->offset
, CHANNEL_TLV_LIST_END
,
1496 sizeof(struct channel_list_end_tlv
));
1498 ecore_iov_send_response(p_hwfn
, p_ptt
, vf_info
, length
, status
);
1501 struct ecore_public_vf_info
1502 *ecore_iov_get_public_vf_info(struct ecore_hwfn
*p_hwfn
,
1504 bool b_enabled_only
)
1506 struct ecore_vf_info
*vf
= OSAL_NULL
;
1508 vf
= ecore_iov_get_vf_info(p_hwfn
, relative_vf_id
, b_enabled_only
);
1512 return &vf
->p_vf_info
;
1515 static void ecore_iov_vf_cleanup(struct ecore_hwfn
*p_hwfn
,
1516 struct ecore_vf_info
*p_vf
)
1519 p_vf
->vf_bulletin
= 0;
1520 p_vf
->vport_instance
= 0;
1521 p_vf
->configured_features
= 0;
1523 /* If VF previously requested less resources, go back to default */
1524 p_vf
->num_rxqs
= p_vf
->num_sbs
;
1525 p_vf
->num_txqs
= p_vf
->num_sbs
;
1527 p_vf
->num_active_rxqs
= 0;
1529 for (i
= 0; i
< ECORE_MAX_VF_CHAINS_PER_PF
; i
++) {
1530 struct ecore_vf_queue
*p_queue
= &p_vf
->vf_queues
[i
];
1532 for (j
= 0; j
< MAX_QUEUES_PER_QZONE
; j
++) {
1533 if (!p_queue
->cids
[j
].p_cid
)
1536 ecore_eth_queue_cid_release(p_hwfn
,
1537 p_queue
->cids
[j
].p_cid
);
1538 p_queue
->cids
[j
].p_cid
= OSAL_NULL
;
1542 OSAL_MEMSET(&p_vf
->shadow_config
, 0, sizeof(p_vf
->shadow_config
));
1543 OSAL_MEMSET(&p_vf
->acquire
, 0, sizeof(p_vf
->acquire
));
1544 OSAL_IOV_VF_CLEANUP(p_hwfn
, p_vf
->relative_vf_id
);
1547 /* Returns either 0, or log(size) */
1548 static u32
ecore_iov_vf_db_bar_size(struct ecore_hwfn
*p_hwfn
,
1549 struct ecore_ptt
*p_ptt
)
1551 u32 val
= ecore_rd(p_hwfn
, p_ptt
, PGLUE_B_REG_VF_BAR1_SIZE
);
1559 ecore_iov_vf_mbx_acquire_resc_cids(struct ecore_hwfn
*p_hwfn
,
1560 struct ecore_ptt
*p_ptt
,
1561 struct ecore_vf_info
*p_vf
,
1562 struct vf_pf_resc_request
*p_req
,
1563 struct pf_vf_resc
*p_resp
)
1565 u8 num_vf_cons
= p_hwfn
->pf_params
.eth_pf_params
.num_vf_cons
;
1566 u8 db_size
= DB_ADDR_VF(1, DQ_DEMS_LEGACY
) -
1567 DB_ADDR_VF(0, DQ_DEMS_LEGACY
);
1570 p_resp
->num_cids
= OSAL_MIN_T(u8
, p_req
->num_cids
, num_vf_cons
);
1572 /* If VF didn't bother asking for QIDs than don't bother limiting
1573 * number of CIDs. The VF doesn't care about the number, and this
1574 * has the likely result of causing an additional acquisition.
1576 if (!(p_vf
->acquire
.vfdev_info
.capabilities
&
1577 VFPF_ACQUIRE_CAP_QUEUE_QIDS
))
1580 /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
1581 * that would make sure doorbells for all CIDs fall within the bar.
1582 * If it doesn't, make sure regview window is sufficient.
1584 if (p_vf
->acquire
.vfdev_info
.capabilities
&
1585 VFPF_ACQUIRE_CAP_PHYSICAL_BAR
) {
1586 bar_size
= ecore_iov_vf_db_bar_size(p_hwfn
, p_ptt
);
1588 bar_size
= 1 << bar_size
;
1590 if (ECORE_IS_CMT(p_hwfn
->p_dev
))
1593 bar_size
= PXP_VF_BAR0_DQ_LENGTH
;
1596 if (bar_size
/ db_size
< 256)
1597 p_resp
->num_cids
= OSAL_MIN_T(u8
, p_resp
->num_cids
,
1598 (u8
)(bar_size
/ db_size
));
1601 static u8
ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn
*p_hwfn
,
1602 struct ecore_ptt
*p_ptt
,
1603 struct ecore_vf_info
*p_vf
,
1604 struct vf_pf_resc_request
*p_req
,
1605 struct pf_vf_resc
*p_resp
)
1609 /* Queue related information */
1610 p_resp
->num_rxqs
= p_vf
->num_rxqs
;
1611 p_resp
->num_txqs
= p_vf
->num_txqs
;
1612 p_resp
->num_sbs
= p_vf
->num_sbs
;
1614 for (i
= 0; i
< p_resp
->num_sbs
; i
++) {
1615 p_resp
->hw_sbs
[i
].hw_sb_id
= p_vf
->igu_sbs
[i
];
1616 /* TODO - what's this sb_qid field? Is it deprecated?
1617 * or is there an ecore_client that looks at this?
1619 p_resp
->hw_sbs
[i
].sb_qid
= 0;
1622 /* These fields are filled for backward compatibility.
1623 * Unused by modern vfs.
1625 for (i
= 0; i
< p_resp
->num_rxqs
; i
++) {
1626 ecore_fw_l2_queue(p_hwfn
, p_vf
->vf_queues
[i
].fw_rx_qid
,
1627 (u16
*)&p_resp
->hw_qid
[i
]);
1631 /* Filter related information */
1632 p_resp
->num_mac_filters
= OSAL_MIN_T(u8
, p_vf
->num_mac_filters
,
1633 p_req
->num_mac_filters
);
1634 p_resp
->num_vlan_filters
= OSAL_MIN_T(u8
, p_vf
->num_vlan_filters
,
1635 p_req
->num_vlan_filters
);
1637 ecore_iov_vf_mbx_acquire_resc_cids(p_hwfn
, p_ptt
, p_vf
, p_req
, p_resp
);
1639 /* This isn't really needed/enforced, but some legacy VFs might depend
1640 * on the correct filling of this field.
1642 p_resp
->num_mc_filters
= ECORE_MAX_MC_ADDRS
;
1644 /* Validate sufficient resources for VF */
1645 if (p_resp
->num_rxqs
< p_req
->num_rxqs
||
1646 p_resp
->num_txqs
< p_req
->num_txqs
||
1647 p_resp
->num_sbs
< p_req
->num_sbs
||
1648 p_resp
->num_mac_filters
< p_req
->num_mac_filters
||
1649 p_resp
->num_vlan_filters
< p_req
->num_vlan_filters
||
1650 p_resp
->num_mc_filters
< p_req
->num_mc_filters
||
1651 p_resp
->num_cids
< p_req
->num_cids
) {
1652 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1653 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
1655 p_req
->num_rxqs
, p_resp
->num_rxqs
,
1656 p_req
->num_rxqs
, p_resp
->num_txqs
,
1657 p_req
->num_sbs
, p_resp
->num_sbs
,
1658 p_req
->num_mac_filters
, p_resp
->num_mac_filters
,
1659 p_req
->num_vlan_filters
, p_resp
->num_vlan_filters
,
1660 p_req
->num_mc_filters
, p_resp
->num_mc_filters
,
1661 p_req
->num_cids
, p_resp
->num_cids
);
1663 /* Some legacy OSes are incapable of correctly handling this
1666 if ((p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
==
1667 ETH_HSI_VER_NO_PKT_LEN_TUNN
) &&
1668 (p_vf
->acquire
.vfdev_info
.os_type
==
1669 VFPF_ACQUIRE_OS_WINDOWS
))
1670 return PFVF_STATUS_SUCCESS
;
1672 return PFVF_STATUS_NO_RESOURCE
;
1675 return PFVF_STATUS_SUCCESS
;
1678 static void ecore_iov_vf_mbx_acquire_stats(struct pfvf_stats_info
*p_stats
)
1680 p_stats
->mstats
.address
= PXP_VF_BAR0_START_MSDM_ZONE_B
+
1681 OFFSETOF(struct mstorm_vf_zone
,
1682 non_trigger
.eth_queue_stat
);
1683 p_stats
->mstats
.len
= sizeof(struct eth_mstorm_per_queue_stat
);
1684 p_stats
->ustats
.address
= PXP_VF_BAR0_START_USDM_ZONE_B
+
1685 OFFSETOF(struct ustorm_vf_zone
,
1686 non_trigger
.eth_queue_stat
);
1687 p_stats
->ustats
.len
= sizeof(struct eth_ustorm_per_queue_stat
);
1688 p_stats
->pstats
.address
= PXP_VF_BAR0_START_PSDM_ZONE_B
+
1689 OFFSETOF(struct pstorm_vf_zone
,
1690 non_trigger
.eth_queue_stat
);
1691 p_stats
->pstats
.len
= sizeof(struct eth_pstorm_per_queue_stat
);
1692 p_stats
->tstats
.address
= 0;
1693 p_stats
->tstats
.len
= 0;
1696 static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn
*p_hwfn
,
1697 struct ecore_ptt
*p_ptt
,
1698 struct ecore_vf_info
*vf
)
1700 struct ecore_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1701 struct pfvf_acquire_resp_tlv
*resp
= &mbx
->reply_virt
->acquire_resp
;
1702 struct pf_vf_pfdev_info
*pfdev_info
= &resp
->pfdev_info
;
1703 struct vfpf_acquire_tlv
*req
= &mbx
->req_virt
->acquire
;
1704 u8 vfpf_status
= PFVF_STATUS_NOT_SUPPORTED
;
1705 struct pf_vf_resc
*resc
= &resp
->resc
;
1706 enum _ecore_status_t rc
;
1708 OSAL_MEMSET(resp
, 0, sizeof(*resp
));
1710 /* Write the PF version so that VF would know which version
1711 * is supported - might be later overridden. This guarantees that
1712 * VF could recognize legacy PF based on lack of versions in reply.
1714 pfdev_info
->major_fp_hsi
= ETH_HSI_VER_MAJOR
;
1715 pfdev_info
->minor_fp_hsi
= ETH_HSI_VER_MINOR
;
1717 /* TODO - not doing anything is bad since we'll assert, but this isn't
1718 * necessarily the right behavior - perhaps we should have allowed some
1721 if (vf
->state
!= VF_FREE
&&
1722 vf
->state
!= VF_STOPPED
) {
1723 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1724 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1725 vf
->abs_vf_id
, vf
->state
);
1729 /* Validate FW compatibility */
1730 if (req
->vfdev_info
.eth_fp_hsi_major
!= ETH_HSI_VER_MAJOR
) {
1731 if (req
->vfdev_info
.capabilities
&
1732 VFPF_ACQUIRE_CAP_PRE_FP_HSI
) {
1733 struct vf_pf_vfdev_info
*p_vfdev
= &req
->vfdev_info
;
1735 /* This legacy support would need to be removed once
1736 * the major has changed.
1738 OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR
!= 3);
1740 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1741 "VF[%d] is pre-fastpath HSI\n",
1743 p_vfdev
->eth_fp_hsi_major
= ETH_HSI_VER_MAJOR
;
1744 p_vfdev
->eth_fp_hsi_minor
= ETH_HSI_VER_NO_PKT_LEN_TUNN
;
1747 "VF[%d] needs fastpath HSI %02x.%02x, which is"
1748 " incompatible with loaded FW's faspath"
1751 req
->vfdev_info
.eth_fp_hsi_major
,
1752 req
->vfdev_info
.eth_fp_hsi_minor
,
1753 ETH_HSI_VER_MAJOR
, ETH_HSI_VER_MINOR
);
1759 /* On 100g PFs, prevent old VFs from loading */
1760 if (ECORE_IS_CMT(p_hwfn
->p_dev
) &&
1761 !(req
->vfdev_info
.capabilities
& VFPF_ACQUIRE_CAP_100G
)) {
1763 "VF[%d] is running an old driver that doesn't support"
1769 #ifndef __EXTRACT__LINUX__
1770 if (OSAL_IOV_VF_ACQUIRE(p_hwfn
, vf
->relative_vf_id
) != ECORE_SUCCESS
) {
1771 vfpf_status
= PFVF_STATUS_NOT_SUPPORTED
;
1776 /* Store the acquire message */
1777 OSAL_MEMCPY(&vf
->acquire
, req
, sizeof(vf
->acquire
));
1779 vf
->opaque_fid
= req
->vfdev_info
.opaque_fid
;
1781 vf
->vf_bulletin
= req
->bulletin_addr
;
1782 vf
->bulletin
.size
= (vf
->bulletin
.size
< req
->bulletin_size
) ?
1783 vf
->bulletin
.size
: req
->bulletin_size
;
1785 /* fill in pfdev info */
1786 pfdev_info
->chip_num
= p_hwfn
->p_dev
->chip_num
;
1787 pfdev_info
->db_size
= 0; /* @@@ TBD MichalK Vf Doorbells */
1788 pfdev_info
->indices_per_sb
= PIS_PER_SB_E4
;
1790 pfdev_info
->capabilities
= PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED
|
1791 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE
;
1792 if (ECORE_IS_CMT(p_hwfn
->p_dev
))
1793 pfdev_info
->capabilities
|= PFVF_ACQUIRE_CAP_100G
;
1795 /* Share our ability to use multiple queue-ids only with VFs
1798 if (req
->vfdev_info
.capabilities
& VFPF_ACQUIRE_CAP_QUEUE_QIDS
)
1799 pfdev_info
->capabilities
|= PFVF_ACQUIRE_CAP_QUEUE_QIDS
;
1801 /* Share the sizes of the bars with VF */
1802 resp
->pfdev_info
.bar_size
= (u8
)ecore_iov_vf_db_bar_size(p_hwfn
,
1805 ecore_iov_vf_mbx_acquire_stats(&pfdev_info
->stats_info
);
1807 OSAL_MEMCPY(pfdev_info
->port_mac
, p_hwfn
->hw_info
.hw_mac_addr
,
1810 pfdev_info
->fw_major
= FW_MAJOR_VERSION
;
1811 pfdev_info
->fw_minor
= FW_MINOR_VERSION
;
1812 pfdev_info
->fw_rev
= FW_REVISION_VERSION
;
1813 pfdev_info
->fw_eng
= FW_ENGINEERING_VERSION
;
1815 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1818 pfdev_info
->minor_fp_hsi
= OSAL_MIN_T(u8
, ETH_HSI_VER_MINOR
,
1819 req
->vfdev_info
.eth_fp_hsi_minor
);
1820 pfdev_info
->os_type
= OSAL_IOV_GET_OS_TYPE();
1821 ecore_mcp_get_mfw_ver(p_hwfn
, p_ptt
, &pfdev_info
->mfw_ver
,
1824 pfdev_info
->dev_type
= p_hwfn
->p_dev
->type
;
1825 pfdev_info
->chip_rev
= p_hwfn
->p_dev
->chip_rev
;
1827 /* Fill resources available to VF; Make sure there are enough to
1828 * satisfy the VF's request.
1830 vfpf_status
= ecore_iov_vf_mbx_acquire_resc(p_hwfn
, p_ptt
, vf
,
1831 &req
->resc_request
, resc
);
1832 if (vfpf_status
!= PFVF_STATUS_SUCCESS
)
1835 /* Start the VF in FW */
1836 rc
= ecore_sp_vf_start(p_hwfn
, vf
);
1837 if (rc
!= ECORE_SUCCESS
) {
1838 DP_NOTICE(p_hwfn
, true, "Failed to start VF[%02x]\n",
1840 vfpf_status
= PFVF_STATUS_FAILURE
;
1844 /* Fill agreed size of bulletin board in response, and post
1845 * an initial image to the bulletin board.
1847 resp
->bulletin_size
= vf
->bulletin
.size
;
1848 ecore_iov_post_vf_bulletin(p_hwfn
, vf
->relative_vf_id
, p_ptt
);
1850 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1851 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
1852 " db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
1853 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
1855 vf
->abs_vf_id
, resp
->pfdev_info
.chip_num
,
1856 resp
->pfdev_info
.db_size
, resp
->pfdev_info
.indices_per_sb
,
1857 (unsigned long)resp
->pfdev_info
.capabilities
, resc
->num_rxqs
,
1858 resc
->num_txqs
, resc
->num_sbs
, resc
->num_mac_filters
,
1859 resc
->num_vlan_filters
);
1861 vf
->state
= VF_ACQUIRED
;
1864 /* Prepare Response */
1865 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_ACQUIRE
,
1866 sizeof(struct pfvf_acquire_resp_tlv
),
1870 static enum _ecore_status_t
1871 __ecore_iov_spoofchk_set(struct ecore_hwfn
*p_hwfn
,
1872 struct ecore_vf_info
*p_vf
, bool val
)
1874 struct ecore_sp_vport_update_params params
;
1875 enum _ecore_status_t rc
;
1877 if (val
== p_vf
->spoof_chk
) {
1878 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1879 "Spoofchk value[%d] is already configured\n", val
);
1880 return ECORE_SUCCESS
;
1883 OSAL_MEMSET(¶ms
, 0, sizeof(struct ecore_sp_vport_update_params
));
1884 params
.opaque_fid
= p_vf
->opaque_fid
;
1885 params
.vport_id
= p_vf
->vport_id
;
1886 params
.update_anti_spoofing_en_flg
= 1;
1887 params
.anti_spoofing_en
= val
;
1889 rc
= ecore_sp_vport_update(p_hwfn
, ¶ms
, ECORE_SPQ_MODE_EBLOCK
,
1891 if (rc
== ECORE_SUCCESS
) {
1892 p_vf
->spoof_chk
= val
;
1893 p_vf
->req_spoofchk_val
= p_vf
->spoof_chk
;
1894 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1895 "Spoofchk val[%d] configured\n", val
);
1897 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1898 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1899 val
, p_vf
->relative_vf_id
);
1905 static enum _ecore_status_t
1906 ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn
*p_hwfn
,
1907 struct ecore_vf_info
*p_vf
)
1909 struct ecore_filter_ucast filter
;
1910 enum _ecore_status_t rc
= ECORE_SUCCESS
;
1913 OSAL_MEMSET(&filter
, 0, sizeof(filter
));
1914 filter
.is_rx_filter
= 1;
1915 filter
.is_tx_filter
= 1;
1916 filter
.vport_to_add_to
= p_vf
->vport_id
;
1917 filter
.opcode
= ECORE_FILTER_ADD
;
1919 /* Reconfigure vlans */
1920 for (i
= 0; i
< ECORE_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++) {
1921 if (!p_vf
->shadow_config
.vlans
[i
].used
)
1924 filter
.type
= ECORE_FILTER_VLAN
;
1925 filter
.vlan
= p_vf
->shadow_config
.vlans
[i
].vid
;
1926 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1927 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1928 filter
.vlan
, p_vf
->relative_vf_id
);
1929 rc
= ecore_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1930 &filter
, ECORE_SPQ_MODE_CB
,
1933 DP_NOTICE(p_hwfn
, true,
1934 "Failed to configure VLAN [%04x]"
1936 filter
.vlan
, p_vf
->relative_vf_id
);
1944 static enum _ecore_status_t
1945 ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn
*p_hwfn
,
1946 struct ecore_vf_info
*p_vf
, u64 events
)
1948 enum _ecore_status_t rc
= ECORE_SUCCESS
;
1950 /*TODO - what about MACs? */
1952 if ((events
& (1 << VLAN_ADDR_FORCED
)) &&
1953 !(p_vf
->configured_features
& (1 << VLAN_ADDR_FORCED
)))
1954 rc
= ecore_iov_reconfigure_unicast_vlan(p_hwfn
, p_vf
);
1959 static enum _ecore_status_t
1960 ecore_iov_configure_vport_forced(struct ecore_hwfn
*p_hwfn
,
1961 struct ecore_vf_info
*p_vf
,
1964 enum _ecore_status_t rc
= ECORE_SUCCESS
;
1965 struct ecore_filter_ucast filter
;
1967 if (!p_vf
->vport_instance
)
1970 if ((events
& (1 << MAC_ADDR_FORCED
)) ||
1971 p_hwfn
->pf_params
.eth_pf_params
.allow_vf_mac_change
) {
1972 /* Since there's no way [currently] of removing the MAC,
1973 * we can always assume this means we need to force it.
1975 OSAL_MEMSET(&filter
, 0, sizeof(filter
));
1976 filter
.type
= ECORE_FILTER_MAC
;
1977 filter
.opcode
= ECORE_FILTER_REPLACE
;
1978 filter
.is_rx_filter
= 1;
1979 filter
.is_tx_filter
= 1;
1980 filter
.vport_to_add_to
= p_vf
->vport_id
;
1981 OSAL_MEMCPY(filter
.mac
, p_vf
->bulletin
.p_virt
->mac
, ETH_ALEN
);
1983 rc
= ecore_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1985 ECORE_SPQ_MODE_CB
, OSAL_NULL
);
1987 DP_NOTICE(p_hwfn
, true,
1988 "PF failed to configure MAC for VF\n");
1992 if (p_hwfn
->pf_params
.eth_pf_params
.allow_vf_mac_change
)
1993 p_vf
->configured_features
|=
1994 1 << VFPF_BULLETIN_MAC_ADDR
;
1996 p_vf
->configured_features
|= 1 << MAC_ADDR_FORCED
;
1999 if (events
& (1 << VLAN_ADDR_FORCED
)) {
2000 struct ecore_sp_vport_update_params vport_update
;
2004 OSAL_MEMSET(&filter
, 0, sizeof(filter
));
2005 filter
.type
= ECORE_FILTER_VLAN
;
2006 filter
.is_rx_filter
= 1;
2007 filter
.is_tx_filter
= 1;
2008 filter
.vport_to_add_to
= p_vf
->vport_id
;
2009 filter
.vlan
= p_vf
->bulletin
.p_virt
->pvid
;
2010 filter
.opcode
= filter
.vlan
? ECORE_FILTER_REPLACE
:
2013 /* Send the ramrod */
2014 rc
= ecore_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
2016 ECORE_SPQ_MODE_CB
, OSAL_NULL
);
2018 DP_NOTICE(p_hwfn
, true,
2019 "PF failed to configure VLAN for VF\n");
2023 /* Update the default-vlan & silent vlan stripping */
2024 OSAL_MEMSET(&vport_update
, 0, sizeof(vport_update
));
2025 vport_update
.opaque_fid
= p_vf
->opaque_fid
;
2026 vport_update
.vport_id
= p_vf
->vport_id
;
2027 vport_update
.update_default_vlan_enable_flg
= 1;
2028 vport_update
.default_vlan_enable_flg
= filter
.vlan
? 1 : 0;
2029 vport_update
.update_default_vlan_flg
= 1;
2030 vport_update
.default_vlan
= filter
.vlan
;
2032 vport_update
.update_inner_vlan_removal_flg
= 1;
2033 removal
= filter
.vlan
?
2034 1 : p_vf
->shadow_config
.inner_vlan_removal
;
2035 vport_update
.inner_vlan_removal_flg
= removal
;
2036 vport_update
.silent_vlan_removal_flg
= filter
.vlan
? 1 : 0;
2037 rc
= ecore_sp_vport_update(p_hwfn
, &vport_update
,
2038 ECORE_SPQ_MODE_EBLOCK
, OSAL_NULL
);
2040 DP_NOTICE(p_hwfn
, true,
2041 "PF failed to configure VF vport for vlan\n");
2045 /* Update all the Rx queues */
2046 for (i
= 0; i
< ECORE_MAX_VF_CHAINS_PER_PF
; i
++) {
2047 struct ecore_vf_queue
*p_queue
= &p_vf
->vf_queues
[i
];
2048 struct ecore_queue_cid
*p_cid
= OSAL_NULL
;
2050 /* There can be at most 1 Rx queue on qzone. Find it */
2051 p_cid
= ecore_iov_get_vf_rx_queue_cid(p_queue
);
2052 if (p_cid
== OSAL_NULL
)
2055 rc
= ecore_sp_eth_rx_queues_update(p_hwfn
,
2058 ECORE_SPQ_MODE_EBLOCK
,
2061 DP_NOTICE(p_hwfn
, true,
2062 "Failed to send Rx update"
2063 " fo queue[0x%04x]\n",
2064 p_cid
->rel
.queue_id
);
2070 p_vf
->configured_features
|= 1 << VLAN_ADDR_FORCED
;
2072 p_vf
->configured_features
&= ~(1 << VLAN_ADDR_FORCED
);
2075 /* If forced features are terminated, we need to configure the shadow
2076 * configuration back again.
2079 ecore_iov_reconfigure_unicast_shadow(p_hwfn
, p_vf
, events
);
2084 static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn
*p_hwfn
,
2085 struct ecore_ptt
*p_ptt
,
2086 struct ecore_vf_info
*vf
)
2088 struct ecore_sp_vport_start_params params
= { 0 };
2089 struct ecore_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2090 struct vfpf_vport_start_tlv
*start
;
2091 u8 status
= PFVF_STATUS_SUCCESS
;
2092 struct ecore_vf_info
*vf_info
;
2095 enum _ecore_status_t rc
;
2097 vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vf
->relative_vf_id
, true);
2099 DP_NOTICE(p_hwfn
->p_dev
, true,
2100 "Failed to get VF info, invalid vfid [%d]\n",
2101 vf
->relative_vf_id
);
2105 vf
->state
= VF_ENABLED
;
2106 start
= &mbx
->req_virt
->start_vport
;
2108 ecore_iov_enable_vf_traffic(p_hwfn
, p_ptt
, vf
);
2110 /* Initialize Status block in CAU */
2111 for (sb_id
= 0; sb_id
< vf
->num_sbs
; sb_id
++) {
2112 if (!start
->sb_addr
[sb_id
]) {
2113 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
2114 "VF[%d] did not fill the address of SB %d\n",
2115 vf
->relative_vf_id
, sb_id
);
2119 ecore_int_cau_conf_sb(p_hwfn
, p_ptt
,
2120 start
->sb_addr
[sb_id
],
2125 vf
->mtu
= start
->mtu
;
2126 vf
->shadow_config
.inner_vlan_removal
= start
->inner_vlan_removal
;
2128 /* Take into consideration configuration forced by hypervisor;
2129 * If none is configured, use the supplied VF values [for old
2130 * vfs that would still be fine, since they passed '0' as padding].
2132 p_bitmap
= &vf_info
->bulletin
.p_virt
->valid_bitmap
;
2133 if (!(*p_bitmap
& (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED
))) {
2134 u8 vf_req
= start
->only_untagged
;
2136 vf_info
->bulletin
.p_virt
->default_only_untagged
= vf_req
;
2137 *p_bitmap
|= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT
;
2140 params
.tpa_mode
= start
->tpa_mode
;
2141 params
.remove_inner_vlan
= start
->inner_vlan_removal
;
2142 params
.tx_switching
= true;
2145 if (CHIP_REV_IS_FPGA(p_hwfn
->p_dev
)) {
2146 DP_NOTICE(p_hwfn
, false,
2147 "FPGA: Don't config VF for Tx-switching [no pVFC]\n");
2148 params
.tx_switching
= false;
2152 params
.only_untagged
= vf_info
->bulletin
.p_virt
->default_only_untagged
;
2153 params
.drop_ttl0
= false;
2154 params
.concrete_fid
= vf
->concrete_fid
;
2155 params
.opaque_fid
= vf
->opaque_fid
;
2156 params
.vport_id
= vf
->vport_id
;
2157 params
.max_buffers_per_cqe
= start
->max_buffers_per_cqe
;
2158 params
.mtu
= vf
->mtu
;
2159 params
.check_mac
= true;
2161 rc
= ecore_sp_eth_vport_start(p_hwfn
, ¶ms
);
2162 if (rc
!= ECORE_SUCCESS
) {
2164 "ecore_iov_vf_mbx_start_vport returned error %d\n", rc
);
2165 status
= PFVF_STATUS_FAILURE
;
2167 vf
->vport_instance
++;
2169 /* Force configuration if needed on the newly opened vport */
2170 ecore_iov_configure_vport_forced(p_hwfn
, vf
, *p_bitmap
);
2171 OSAL_IOV_POST_START_VPORT(p_hwfn
, vf
->relative_vf_id
,
2172 vf
->vport_id
, vf
->opaque_fid
);
2173 __ecore_iov_spoofchk_set(p_hwfn
, vf
, vf
->req_spoofchk_val
);
2176 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_VPORT_START
,
2177 sizeof(struct pfvf_def_resp_tlv
), status
);
2180 static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn
*p_hwfn
,
2181 struct ecore_ptt
*p_ptt
,
2182 struct ecore_vf_info
*vf
)
2184 u8 status
= PFVF_STATUS_SUCCESS
;
2185 enum _ecore_status_t rc
;
2187 OSAL_IOV_VF_VPORT_STOP(p_hwfn
, vf
);
2188 vf
->vport_instance
--;
2189 vf
->spoof_chk
= false;
2191 if ((ecore_iov_validate_active_rxq(vf
)) ||
2192 (ecore_iov_validate_active_txq(vf
))) {
2193 vf
->b_malicious
= true;
2194 DP_NOTICE(p_hwfn
, false,
2195 "VF [%02x] - considered malicious;"
2196 " Unable to stop RX/TX queuess\n",
2198 status
= PFVF_STATUS_MALICIOUS
;
2202 rc
= ecore_sp_vport_stop(p_hwfn
, vf
->opaque_fid
, vf
->vport_id
);
2203 if (rc
!= ECORE_SUCCESS
) {
2205 "ecore_iov_vf_mbx_stop_vport returned error %d\n", rc
);
2206 status
= PFVF_STATUS_FAILURE
;
2209 /* Forget the configuration on the vport */
2210 vf
->configured_features
= 0;
2211 OSAL_MEMSET(&vf
->shadow_config
, 0, sizeof(vf
->shadow_config
));
2214 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_VPORT_TEARDOWN
,
2215 sizeof(struct pfvf_def_resp_tlv
), status
);
2218 static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn
*p_hwfn
,
2219 struct ecore_ptt
*p_ptt
,
2220 struct ecore_vf_info
*vf
,
2221 u8 status
, bool b_legacy
)
2223 struct ecore_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2224 struct pfvf_start_queue_resp_tlv
*p_tlv
;
2225 struct vfpf_start_rxq_tlv
*req
;
2228 mbx
->offset
= (u8
*)mbx
->reply_virt
;
2230 /* Taking a bigger struct instead of adding a TLV to list was a
2231 * mistake, but one which we're now stuck with, as some older
2232 * clients assume the size of the previous response.
2235 length
= sizeof(*p_tlv
);
2237 length
= sizeof(struct pfvf_def_resp_tlv
);
2239 p_tlv
= ecore_add_tlv(&mbx
->offset
, CHANNEL_TLV_START_RXQ
, length
);
2240 ecore_add_tlv(&mbx
->offset
, CHANNEL_TLV_LIST_END
,
2241 sizeof(struct channel_list_end_tlv
));
2243 /* Update the TLV with the response */
2244 if ((status
== PFVF_STATUS_SUCCESS
) && !b_legacy
) {
2245 req
= &mbx
->req_virt
->start_rxq
;
2246 p_tlv
->offset
= PXP_VF_BAR0_START_MSDM_ZONE_B
+
2247 OFFSETOF(struct mstorm_vf_zone
,
2248 non_trigger
.eth_rx_queue_producers
) +
2249 sizeof(struct eth_rx_prod_data
) * req
->rx_qid
;
2252 ecore_iov_send_response(p_hwfn
, p_ptt
, vf
, length
, status
);
2255 static u8
ecore_iov_vf_mbx_qid(struct ecore_hwfn
*p_hwfn
,
2256 struct ecore_vf_info
*p_vf
, bool b_is_tx
)
2258 struct ecore_iov_vf_mbx
*p_mbx
= &p_vf
->vf_mbx
;
2259 struct vfpf_qid_tlv
*p_qid_tlv
;
2261 /* Search for the qid if the VF published if its going to provide it */
2262 if (!(p_vf
->acquire
.vfdev_info
.capabilities
&
2263 VFPF_ACQUIRE_CAP_QUEUE_QIDS
)) {
2265 return ECORE_IOV_LEGACY_QID_TX
;
2267 return ECORE_IOV_LEGACY_QID_RX
;
2270 p_qid_tlv
= (struct vfpf_qid_tlv
*)
2271 ecore_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
,
2273 if (p_qid_tlv
== OSAL_NULL
) {
2274 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
2275 "VF[%2x]: Failed to provide qid\n",
2276 p_vf
->relative_vf_id
);
2278 return ECORE_IOV_QID_INVALID
;
2281 if (p_qid_tlv
->qid
>= MAX_QUEUES_PER_QZONE
) {
2282 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
2283 "VF[%02x]: Provided qid out-of-bounds %02x\n",
2284 p_vf
->relative_vf_id
, p_qid_tlv
->qid
);
2285 return ECORE_IOV_QID_INVALID
;
2288 return p_qid_tlv
->qid
;
2291 static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn
*p_hwfn
,
2292 struct ecore_ptt
*p_ptt
,
2293 struct ecore_vf_info
*vf
)
2295 struct ecore_queue_start_common_params params
;
2296 struct ecore_queue_cid_vf_params vf_params
;
2297 struct ecore_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2298 u8 status
= PFVF_STATUS_NO_RESOURCE
;
2299 u8 qid_usage_idx
, vf_legacy
= 0;
2300 struct ecore_vf_queue
*p_queue
;
2301 struct vfpf_start_rxq_tlv
*req
;
2302 struct ecore_queue_cid
*p_cid
;
2303 struct ecore_sb_info sb_dummy
;
2304 enum _ecore_status_t rc
;
2306 req
= &mbx
->req_virt
->start_rxq
;
2308 if (!ecore_iov_validate_rxq(p_hwfn
, vf
, req
->rx_qid
,
2309 ECORE_IOV_VALIDATE_Q_DISABLE
) ||
2310 !ecore_iov_validate_sb(p_hwfn
, vf
, req
->hw_sb
))
2313 qid_usage_idx
= ecore_iov_vf_mbx_qid(p_hwfn
, vf
, false);
2314 if (qid_usage_idx
== ECORE_IOV_QID_INVALID
)
2317 p_queue
= &vf
->vf_queues
[req
->rx_qid
];
2318 if (p_queue
->cids
[qid_usage_idx
].p_cid
)
2321 vf_legacy
= ecore_vf_calculate_legacy(vf
);
2323 /* Acquire a new queue-cid */
2324 OSAL_MEMSET(¶ms
, 0, sizeof(params
));
2325 params
.queue_id
= (u8
)p_queue
->fw_rx_qid
;
2326 params
.vport_id
= vf
->vport_id
;
2327 params
.stats_id
= vf
->abs_vf_id
+ 0x10;
2329 /* Since IGU index is passed via sb_info, construct a dummy one */
2330 OSAL_MEM_ZERO(&sb_dummy
, sizeof(sb_dummy
));
2331 sb_dummy
.igu_sb_id
= req
->hw_sb
;
2332 params
.p_sb
= &sb_dummy
;
2333 params
.sb_idx
= req
->sb_index
;
2335 OSAL_MEM_ZERO(&vf_params
, sizeof(vf_params
));
2336 vf_params
.vfid
= vf
->relative_vf_id
;
2337 vf_params
.vf_qid
= (u8
)req
->rx_qid
;
2338 vf_params
.vf_legacy
= vf_legacy
;
2339 vf_params
.qid_usage_idx
= qid_usage_idx
;
2341 p_cid
= ecore_eth_queue_to_cid(p_hwfn
, vf
->opaque_fid
,
2342 ¶ms
, true, &vf_params
);
2343 if (p_cid
== OSAL_NULL
)
2346 /* Legacy VFs have their Producers in a different location, which they
2347 * calculate on their own and clean the producer prior to this.
2349 if (!(vf_legacy
& ECORE_QCID_LEGACY_VF_RX_PROD
))
2351 GTT_BAR0_MAP_REG_MSDM_RAM
+
2352 MSTORM_ETH_VF_PRODS_OFFSET(vf
->abs_vf_id
, req
->rx_qid
),
2355 rc
= ecore_eth_rxq_start_ramrod(p_hwfn
, p_cid
,
2360 if (rc
!= ECORE_SUCCESS
) {
2361 status
= PFVF_STATUS_FAILURE
;
2362 ecore_eth_queue_cid_release(p_hwfn
, p_cid
);
2364 p_queue
->cids
[qid_usage_idx
].p_cid
= p_cid
;
2365 p_queue
->cids
[qid_usage_idx
].b_is_tx
= false;
2366 status
= PFVF_STATUS_SUCCESS
;
2367 vf
->num_active_rxqs
++;
2371 ecore_iov_vf_mbx_start_rxq_resp(p_hwfn
, p_ptt
, vf
, status
,
2373 ECORE_QCID_LEGACY_VF_RX_PROD
));
2377 ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv
*p_resp
,
2378 struct ecore_tunnel_info
*p_tun
,
2379 u16 tunn_feature_mask
)
2381 p_resp
->tunn_feature_mask
= tunn_feature_mask
;
2382 p_resp
->vxlan_mode
= p_tun
->vxlan
.b_mode_enabled
;
2383 p_resp
->l2geneve_mode
= p_tun
->l2_geneve
.b_mode_enabled
;
2384 p_resp
->ipgeneve_mode
= p_tun
->ip_geneve
.b_mode_enabled
;
2385 p_resp
->l2gre_mode
= p_tun
->l2_gre
.b_mode_enabled
;
2386 p_resp
->ipgre_mode
= p_tun
->l2_gre
.b_mode_enabled
;
2387 p_resp
->vxlan_clss
= p_tun
->vxlan
.tun_cls
;
2388 p_resp
->l2gre_clss
= p_tun
->l2_gre
.tun_cls
;
2389 p_resp
->ipgre_clss
= p_tun
->ip_gre
.tun_cls
;
2390 p_resp
->l2geneve_clss
= p_tun
->l2_geneve
.tun_cls
;
2391 p_resp
->ipgeneve_clss
= p_tun
->ip_geneve
.tun_cls
;
2392 p_resp
->geneve_udp_port
= p_tun
->geneve_port
.port
;
2393 p_resp
->vxlan_udp_port
= p_tun
->vxlan_port
.port
;
2397 __ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv
*p_req
,
2398 struct ecore_tunn_update_type
*p_tun
,
2399 enum ecore_tunn_mode mask
, u8 tun_cls
)
2401 if (p_req
->tun_mode_update_mask
& (1 << mask
)) {
2402 p_tun
->b_update_mode
= true;
2404 if (p_req
->tunn_mode
& (1 << mask
))
2405 p_tun
->b_mode_enabled
= true;
2408 p_tun
->tun_cls
= tun_cls
;
2412 ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv
*p_req
,
2413 struct ecore_tunn_update_type
*p_tun
,
2414 struct ecore_tunn_update_udp_port
*p_port
,
2415 enum ecore_tunn_mode mask
,
2416 u8 tun_cls
, u8 update_port
, u16 port
)
2419 p_port
->b_update_port
= true;
2420 p_port
->port
= port
;
2423 __ecore_iov_pf_update_tun_param(p_req
, p_tun
, mask
, tun_cls
);
2427 ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv
*p_req
)
2429 bool b_update_requested
= false;
2431 if (p_req
->tun_mode_update_mask
|| p_req
->update_tun_cls
||
2432 p_req
->update_geneve_port
|| p_req
->update_vxlan_port
)
2433 b_update_requested
= true;
2435 return b_update_requested
;
2438 static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn
*p_hwfn
,
2439 struct ecore_ptt
*p_ptt
,
2440 struct ecore_vf_info
*p_vf
)
2442 struct ecore_tunnel_info
*p_tun
= &p_hwfn
->p_dev
->tunnel
;
2443 struct ecore_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
2444 struct pfvf_update_tunn_param_tlv
*p_resp
;
2445 struct vfpf_update_tunn_param_tlv
*p_req
;
2446 enum _ecore_status_t rc
= ECORE_SUCCESS
;
2447 u8 status
= PFVF_STATUS_SUCCESS
;
2448 bool b_update_required
= false;
2449 struct ecore_tunnel_info tunn
;
2450 u16 tunn_feature_mask
= 0;
2453 mbx
->offset
= (u8
*)mbx
->reply_virt
;
2455 OSAL_MEM_ZERO(&tunn
, sizeof(tunn
));
2456 p_req
= &mbx
->req_virt
->tunn_param_update
;
2458 if (!ecore_iov_pf_validate_tunn_param(p_req
)) {
2459 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
2460 "No tunnel update requested by VF\n");
2461 status
= PFVF_STATUS_FAILURE
;
2465 tunn
.b_update_rx_cls
= p_req
->update_tun_cls
;
2466 tunn
.b_update_tx_cls
= p_req
->update_tun_cls
;
2468 ecore_iov_pf_update_tun_param(p_req
, &tunn
.vxlan
, &tunn
.vxlan_port
,
2469 ECORE_MODE_VXLAN_TUNN
, p_req
->vxlan_clss
,
2470 p_req
->update_vxlan_port
,
2472 ecore_iov_pf_update_tun_param(p_req
, &tunn
.l2_geneve
, &tunn
.geneve_port
,
2473 ECORE_MODE_L2GENEVE_TUNN
,
2474 p_req
->l2geneve_clss
,
2475 p_req
->update_geneve_port
,
2476 p_req
->geneve_port
);
2477 __ecore_iov_pf_update_tun_param(p_req
, &tunn
.ip_geneve
,
2478 ECORE_MODE_IPGENEVE_TUNN
,
2479 p_req
->ipgeneve_clss
);
2480 __ecore_iov_pf_update_tun_param(p_req
, &tunn
.l2_gre
,
2481 ECORE_MODE_L2GRE_TUNN
,
2483 __ecore_iov_pf_update_tun_param(p_req
, &tunn
.ip_gre
,
2484 ECORE_MODE_IPGRE_TUNN
,
2487 /* If PF modifies VF's req then it should
2488 * still return an error in case of partial configuration
2489 * or modified configuration as opposed to requested one.
2491 rc
= OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn
, &tunn_feature_mask
,
2492 &b_update_required
, &tunn
);
2494 if (rc
!= ECORE_SUCCESS
)
2495 status
= PFVF_STATUS_FAILURE
;
2497 /* If ECORE client is willing to update anything ? */
2498 if (b_update_required
) {
2501 rc
= ecore_sp_pf_update_tunn_cfg(p_hwfn
, p_ptt
, &tunn
,
2502 ECORE_SPQ_MODE_EBLOCK
,
2504 if (rc
!= ECORE_SUCCESS
)
2505 status
= PFVF_STATUS_FAILURE
;
2507 geneve_port
= p_tun
->geneve_port
.port
;
2508 ecore_for_each_vf(p_hwfn
, i
) {
2509 ecore_iov_bulletin_set_udp_ports(p_hwfn
, i
,
2510 p_tun
->vxlan_port
.port
,
2516 p_resp
= ecore_add_tlv(&mbx
->offset
,
2517 CHANNEL_TLV_UPDATE_TUNN_PARAM
, sizeof(*p_resp
));
2519 ecore_iov_pf_update_tun_response(p_resp
, p_tun
, tunn_feature_mask
);
2520 ecore_add_tlv(&mbx
->offset
, CHANNEL_TLV_LIST_END
,
2521 sizeof(struct channel_list_end_tlv
));
2523 ecore_iov_send_response(p_hwfn
, p_ptt
, p_vf
, sizeof(*p_resp
), status
);
2526 static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn
*p_hwfn
,
2527 struct ecore_ptt
*p_ptt
,
2528 struct ecore_vf_info
*p_vf
,
2532 struct ecore_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
2533 struct pfvf_start_queue_resp_tlv
*p_tlv
;
2534 bool b_legacy
= false;
2537 mbx
->offset
= (u8
*)mbx
->reply_virt
;
2539 /* Taking a bigger struct instead of adding a TLV to list was a
2540 * mistake, but one which we're now stuck with, as some older
2541 * clients assume the size of the previous response.
2543 if (p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
==
2544 ETH_HSI_VER_NO_PKT_LEN_TUNN
)
2548 length
= sizeof(*p_tlv
);
2550 length
= sizeof(struct pfvf_def_resp_tlv
);
2552 p_tlv
= ecore_add_tlv(&mbx
->offset
, CHANNEL_TLV_START_TXQ
, length
);
2553 ecore_add_tlv(&mbx
->offset
, CHANNEL_TLV_LIST_END
,
2554 sizeof(struct channel_list_end_tlv
));
2556 /* Update the TLV with the response */
2557 if ((status
== PFVF_STATUS_SUCCESS
) && !b_legacy
)
2558 p_tlv
->offset
= DB_ADDR_VF(cid
, DQ_DEMS_LEGACY
);
2560 ecore_iov_send_response(p_hwfn
, p_ptt
, p_vf
, length
, status
);
2563 static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn
*p_hwfn
,
2564 struct ecore_ptt
*p_ptt
,
2565 struct ecore_vf_info
*vf
)
2567 struct ecore_queue_start_common_params params
;
2568 struct ecore_queue_cid_vf_params vf_params
;
2569 struct ecore_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2570 u8 status
= PFVF_STATUS_NO_RESOURCE
;
2571 struct ecore_vf_queue
*p_queue
;
2572 struct vfpf_start_txq_tlv
*req
;
2573 struct ecore_queue_cid
*p_cid
;
2574 struct ecore_sb_info sb_dummy
;
2575 u8 qid_usage_idx
, vf_legacy
;
2577 enum _ecore_status_t rc
;
2580 OSAL_MEMSET(¶ms
, 0, sizeof(params
));
2581 req
= &mbx
->req_virt
->start_txq
;
2583 if (!ecore_iov_validate_txq(p_hwfn
, vf
, req
->tx_qid
,
2584 ECORE_IOV_VALIDATE_Q_NA
) ||
2585 !ecore_iov_validate_sb(p_hwfn
, vf
, req
->hw_sb
))
2588 qid_usage_idx
= ecore_iov_vf_mbx_qid(p_hwfn
, vf
, true);
2589 if (qid_usage_idx
== ECORE_IOV_QID_INVALID
)
2592 p_queue
= &vf
->vf_queues
[req
->tx_qid
];
2593 if (p_queue
->cids
[qid_usage_idx
].p_cid
)
2596 vf_legacy
= ecore_vf_calculate_legacy(vf
);
2598 /* Acquire a new queue-cid */
2599 params
.queue_id
= p_queue
->fw_tx_qid
;
2600 params
.vport_id
= vf
->vport_id
;
2601 params
.stats_id
= vf
->abs_vf_id
+ 0x10;
2603 /* Since IGU index is passed via sb_info, construct a dummy one */
2604 OSAL_MEM_ZERO(&sb_dummy
, sizeof(sb_dummy
));
2605 sb_dummy
.igu_sb_id
= req
->hw_sb
;
2606 params
.p_sb
= &sb_dummy
;
2607 params
.sb_idx
= req
->sb_index
;
2609 OSAL_MEM_ZERO(&vf_params
, sizeof(vf_params
));
2610 vf_params
.vfid
= vf
->relative_vf_id
;
2611 vf_params
.vf_qid
= (u8
)req
->tx_qid
;
2612 vf_params
.vf_legacy
= vf_legacy
;
2613 vf_params
.qid_usage_idx
= qid_usage_idx
;
2615 p_cid
= ecore_eth_queue_to_cid(p_hwfn
, vf
->opaque_fid
,
2616 ¶ms
, false, &vf_params
);
2617 if (p_cid
== OSAL_NULL
)
2620 pq
= ecore_get_cm_pq_idx_vf(p_hwfn
,
2621 vf
->relative_vf_id
);
2622 rc
= ecore_eth_txq_start_ramrod(p_hwfn
, p_cid
,
2623 req
->pbl_addr
, req
->pbl_size
, pq
);
2624 if (rc
!= ECORE_SUCCESS
) {
2625 status
= PFVF_STATUS_FAILURE
;
2626 ecore_eth_queue_cid_release(p_hwfn
, p_cid
);
2628 status
= PFVF_STATUS_SUCCESS
;
2629 p_queue
->cids
[qid_usage_idx
].p_cid
= p_cid
;
2630 p_queue
->cids
[qid_usage_idx
].b_is_tx
= true;
2635 ecore_iov_vf_mbx_start_txq_resp(p_hwfn
, p_ptt
, vf
,
2639 static enum _ecore_status_t
ecore_iov_vf_stop_rxqs(struct ecore_hwfn
*p_hwfn
,
2640 struct ecore_vf_info
*vf
,
2643 bool cqe_completion
)
2645 struct ecore_vf_queue
*p_queue
;
2646 enum _ecore_status_t rc
= ECORE_SUCCESS
;
2648 if (!ecore_iov_validate_rxq(p_hwfn
, vf
, rxq_id
,
2649 ECORE_IOV_VALIDATE_Q_NA
)) {
2650 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
2651 "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
2652 vf
->relative_vf_id
, rxq_id
, qid_usage_idx
);
2656 p_queue
= &vf
->vf_queues
[rxq_id
];
2658 /* We've validated the index and the existence of the active RXQ -
2659 * now we need to make sure that it's using the correct qid.
2661 if (!p_queue
->cids
[qid_usage_idx
].p_cid
||
2662 p_queue
->cids
[qid_usage_idx
].b_is_tx
) {
2663 struct ecore_queue_cid
*p_cid
;
2665 p_cid
= ecore_iov_get_vf_rx_queue_cid(p_queue
);
2666 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
2667 "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
2668 vf
->relative_vf_id
, rxq_id
, qid_usage_idx
,
2669 rxq_id
, p_cid
->qid_usage_idx
);
2673 /* Now that we know we have a valid Rx-queue - close it */
2674 rc
= ecore_eth_rx_queue_stop(p_hwfn
,
2675 p_queue
->cids
[qid_usage_idx
].p_cid
,
2676 false, cqe_completion
);
2677 if (rc
!= ECORE_SUCCESS
)
2680 p_queue
->cids
[qid_usage_idx
].p_cid
= OSAL_NULL
;
2681 vf
->num_active_rxqs
--;
2683 return ECORE_SUCCESS
;
2686 static enum _ecore_status_t
ecore_iov_vf_stop_txqs(struct ecore_hwfn
*p_hwfn
,
2687 struct ecore_vf_info
*vf
,
2691 struct ecore_vf_queue
*p_queue
;
2692 enum _ecore_status_t rc
= ECORE_SUCCESS
;
2694 if (!ecore_iov_validate_txq(p_hwfn
, vf
, txq_id
,
2695 ECORE_IOV_VALIDATE_Q_NA
))
2698 p_queue
= &vf
->vf_queues
[txq_id
];
2699 if (!p_queue
->cids
[qid_usage_idx
].p_cid
||
2700 !p_queue
->cids
[qid_usage_idx
].b_is_tx
)
2703 rc
= ecore_eth_tx_queue_stop(p_hwfn
,
2704 p_queue
->cids
[qid_usage_idx
].p_cid
);
2705 if (rc
!= ECORE_SUCCESS
)
2708 p_queue
->cids
[qid_usage_idx
].p_cid
= OSAL_NULL
;
2709 return ECORE_SUCCESS
;
2712 static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn
*p_hwfn
,
2713 struct ecore_ptt
*p_ptt
,
2714 struct ecore_vf_info
*vf
)
2716 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2717 struct ecore_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2718 u8 status
= PFVF_STATUS_FAILURE
;
2719 struct vfpf_stop_rxqs_tlv
*req
;
2721 enum _ecore_status_t rc
;
2723 /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_rxqs'
2724 * would be one. Since no older ecore passed multiple queues
2725 * using this API, sanitize on the value.
2727 req
= &mbx
->req_virt
->stop_rxqs
;
2728 if (req
->num_rxqs
!= 1) {
2729 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
2730 "Odd; VF[%d] tried stopping multiple Rx queues\n",
2731 vf
->relative_vf_id
);
2732 status
= PFVF_STATUS_NOT_SUPPORTED
;
2736 /* Find which qid-index is associated with the queue */
2737 qid_usage_idx
= ecore_iov_vf_mbx_qid(p_hwfn
, vf
, false);
2738 if (qid_usage_idx
== ECORE_IOV_QID_INVALID
)
2741 rc
= ecore_iov_vf_stop_rxqs(p_hwfn
, vf
, req
->rx_qid
,
2742 qid_usage_idx
, req
->cqe_completion
);
2743 if (rc
== ECORE_SUCCESS
)
2744 status
= PFVF_STATUS_SUCCESS
;
2746 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_STOP_RXQS
,
2750 static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn
*p_hwfn
,
2751 struct ecore_ptt
*p_ptt
,
2752 struct ecore_vf_info
*vf
)
2754 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2755 struct ecore_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2756 u8 status
= PFVF_STATUS_FAILURE
;
2757 struct vfpf_stop_txqs_tlv
*req
;
2759 enum _ecore_status_t rc
;
2761 /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_txqs'
2762 * would be one. Since no older ecore passed multiple queues
2763 * using this API, sanitize on the value.
2765 req
= &mbx
->req_virt
->stop_txqs
;
2766 if (req
->num_txqs
!= 1) {
2767 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
2768 "Odd; VF[%d] tried stopping multiple Tx queues\n",
2769 vf
->relative_vf_id
);
2770 status
= PFVF_STATUS_NOT_SUPPORTED
;
2774 /* Find which qid-index is associated with the queue */
2775 qid_usage_idx
= ecore_iov_vf_mbx_qid(p_hwfn
, vf
, true);
2776 if (qid_usage_idx
== ECORE_IOV_QID_INVALID
)
2779 rc
= ecore_iov_vf_stop_txqs(p_hwfn
, vf
, req
->tx_qid
,
2781 if (rc
== ECORE_SUCCESS
)
2782 status
= PFVF_STATUS_SUCCESS
;
2785 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_STOP_TXQS
,
2789 static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn
*p_hwfn
,
2790 struct ecore_ptt
*p_ptt
,
2791 struct ecore_vf_info
*vf
)
2793 struct ecore_queue_cid
*handlers
[ECORE_MAX_VF_CHAINS_PER_PF
];
2794 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2795 struct ecore_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2796 struct vfpf_update_rxq_tlv
*req
;
2797 u8 status
= PFVF_STATUS_FAILURE
;
2798 u8 complete_event_flg
;
2799 u8 complete_cqe_flg
;
2801 enum _ecore_status_t rc
;
2804 req
= &mbx
->req_virt
->update_rxq
;
2805 complete_cqe_flg
= !!(req
->flags
& VFPF_RXQ_UPD_COMPLETE_CQE_FLAG
);
2806 complete_event_flg
= !!(req
->flags
& VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG
);
2808 qid_usage_idx
= ecore_iov_vf_mbx_qid(p_hwfn
, vf
, false);
2809 if (qid_usage_idx
== ECORE_IOV_QID_INVALID
)
2812 /* Starting with the addition of CHANNEL_TLV_QID, this API started
2813 * expecting a single queue at a time. Validate this.
2815 if ((vf
->acquire
.vfdev_info
.capabilities
&
2816 VFPF_ACQUIRE_CAP_QUEUE_QIDS
) &&
2817 req
->num_rxqs
!= 1) {
2818 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
2819 "VF[%d] supports QIDs but sends multiple queues\n",
2820 vf
->relative_vf_id
);
2824 /* Validate inputs - for the legacy case this is still true since
2825 * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
2827 for (i
= req
->rx_qid
; i
< req
->rx_qid
+ req
->num_rxqs
; i
++) {
2828 if (!ecore_iov_validate_rxq(p_hwfn
, vf
, i
,
2829 ECORE_IOV_VALIDATE_Q_NA
) ||
2830 !vf
->vf_queues
[i
].cids
[qid_usage_idx
].p_cid
||
2831 vf
->vf_queues
[i
].cids
[qid_usage_idx
].b_is_tx
) {
2832 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
2833 "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2834 vf
->relative_vf_id
, req
->rx_qid
,
2840 for (i
= 0; i
< req
->num_rxqs
; i
++) {
2841 u16 qid
= req
->rx_qid
+ i
;
2843 handlers
[i
] = vf
->vf_queues
[qid
].cids
[qid_usage_idx
].p_cid
;
2846 rc
= ecore_sp_eth_rx_queues_update(p_hwfn
, (void **)&handlers
,
2850 ECORE_SPQ_MODE_EBLOCK
,
2852 if (rc
!= ECORE_SUCCESS
)
2855 status
= PFVF_STATUS_SUCCESS
;
2857 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_UPDATE_RXQ
,
2861 static enum _ecore_status_t
2862 ecore_iov_vf_pf_update_mtu(struct ecore_hwfn
*p_hwfn
,
2863 struct ecore_ptt
*p_ptt
,
2864 struct ecore_vf_info
*p_vf
)
2866 struct ecore_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
2867 struct ecore_sp_vport_update_params params
;
2868 enum _ecore_status_t rc
= ECORE_SUCCESS
;
2869 struct vfpf_update_mtu_tlv
*p_req
;
2870 u8 status
= PFVF_STATUS_SUCCESS
;
2872 /* Valiate PF can send such a request */
2873 if (!p_vf
->vport_instance
) {
2874 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
2875 "No VPORT instance available for VF[%d], failing MTU update\n",
2877 status
= PFVF_STATUS_FAILURE
;
2881 p_req
= &mbx
->req_virt
->update_mtu
;
2883 OSAL_MEMSET(¶ms
, 0, sizeof(params
));
2884 params
.opaque_fid
= p_vf
->opaque_fid
;
2885 params
.vport_id
= p_vf
->vport_id
;
2886 params
.mtu
= p_req
->mtu
;
2887 rc
= ecore_sp_vport_update(p_hwfn
, ¶ms
, ECORE_SPQ_MODE_EBLOCK
,
2891 status
= PFVF_STATUS_FAILURE
;
2893 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
,
2894 CHANNEL_TLV_UPDATE_MTU
,
2895 sizeof(struct pfvf_def_resp_tlv
),
2900 void *ecore_iov_search_list_tlvs(struct ecore_hwfn
*p_hwfn
,
2901 void *p_tlvs_list
, u16 req_type
)
2903 struct channel_tlv
*p_tlv
= (struct channel_tlv
*)p_tlvs_list
;
2907 if (!p_tlv
->length
) {
2908 DP_NOTICE(p_hwfn
, true, "Zero length TLV found\n");
2912 if (p_tlv
->type
== req_type
) {
2913 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
2914 "Extended tlv type %s, length %d found\n",
2915 ecore_channel_tlvs_string
[p_tlv
->type
],
2920 len
+= p_tlv
->length
;
2921 p_tlv
= (struct channel_tlv
*)((u8
*)p_tlv
+ p_tlv
->length
);
2923 if ((len
+ p_tlv
->length
) > TLV_BUFFER_SIZE
) {
2924 DP_NOTICE(p_hwfn
, true,
2925 "TLVs has overrun the buffer size\n");
2928 } while (p_tlv
->type
!= CHANNEL_TLV_LIST_END
);
2934 ecore_iov_vp_update_act_param(struct ecore_hwfn
*p_hwfn
,
2935 struct ecore_sp_vport_update_params
*p_data
,
2936 struct ecore_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2938 struct vfpf_vport_update_activate_tlv
*p_act_tlv
;
2939 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
2941 p_act_tlv
= (struct vfpf_vport_update_activate_tlv
*)
2942 ecore_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2946 p_data
->update_vport_active_rx_flg
= p_act_tlv
->update_rx
;
2947 p_data
->vport_active_rx_flg
= p_act_tlv
->active_rx
;
2948 p_data
->update_vport_active_tx_flg
= p_act_tlv
->update_tx
;
2949 p_data
->vport_active_tx_flg
= p_act_tlv
->active_tx
;
2950 *tlvs_mask
|= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE
;
2954 ecore_iov_vp_update_vlan_param(struct ecore_hwfn
*p_hwfn
,
2955 struct ecore_sp_vport_update_params
*p_data
,
2956 struct ecore_vf_info
*p_vf
,
2957 struct ecore_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2959 struct vfpf_vport_update_vlan_strip_tlv
*p_vlan_tlv
;
2960 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
;
2962 p_vlan_tlv
= (struct vfpf_vport_update_vlan_strip_tlv
*)
2963 ecore_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2967 p_vf
->shadow_config
.inner_vlan_removal
= p_vlan_tlv
->remove_vlan
;
2969 /* Ignore the VF request if we're forcing a vlan */
2970 if (!(p_vf
->configured_features
& (1 << VLAN_ADDR_FORCED
))) {
2971 p_data
->update_inner_vlan_removal_flg
= 1;
2972 p_data
->inner_vlan_removal_flg
= p_vlan_tlv
->remove_vlan
;
2975 *tlvs_mask
|= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP
;
2979 ecore_iov_vp_update_tx_switch(struct ecore_hwfn
*p_hwfn
,
2980 struct ecore_sp_vport_update_params
*p_data
,
2981 struct ecore_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2983 struct vfpf_vport_update_tx_switch_tlv
*p_tx_switch_tlv
;
2984 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
2986 p_tx_switch_tlv
= (struct vfpf_vport_update_tx_switch_tlv
*)
2987 ecore_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2988 if (!p_tx_switch_tlv
)
2992 if (CHIP_REV_IS_FPGA(p_hwfn
->p_dev
)) {
2993 DP_NOTICE(p_hwfn
, false,
2994 "FPGA: Ignore tx-switching configuration originating"
3000 p_data
->update_tx_switching_flg
= 1;
3001 p_data
->tx_switching_flg
= p_tx_switch_tlv
->tx_switching
;
3002 *tlvs_mask
|= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH
;
3006 ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn
*p_hwfn
,
3007 struct ecore_sp_vport_update_params
*p_data
,
3008 struct ecore_iov_vf_mbx
*p_mbx
,
3011 struct vfpf_vport_update_mcast_bin_tlv
*p_mcast_tlv
;
3012 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_MCAST
;
3014 p_mcast_tlv
= (struct vfpf_vport_update_mcast_bin_tlv
*)
3015 ecore_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
3019 p_data
->update_approx_mcast_flg
= 1;
3020 OSAL_MEMCPY(p_data
->bins
, p_mcast_tlv
->bins
,
3021 sizeof(u32
) * ETH_MULTICAST_MAC_BINS_IN_REGS
);
3022 *tlvs_mask
|= 1 << ECORE_IOV_VP_UPDATE_MCAST
;
3026 ecore_iov_vp_update_accept_flag(struct ecore_hwfn
*p_hwfn
,
3027 struct ecore_sp_vport_update_params
*p_data
,
3028 struct ecore_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
3030 struct ecore_filter_accept_flags
*p_flags
= &p_data
->accept_flags
;
3031 struct vfpf_vport_update_accept_param_tlv
*p_accept_tlv
;
3032 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
3034 p_accept_tlv
= (struct vfpf_vport_update_accept_param_tlv
*)
3035 ecore_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
3039 p_flags
->update_rx_mode_config
= p_accept_tlv
->update_rx_mode
;
3040 p_flags
->rx_accept_filter
= p_accept_tlv
->rx_accept_filter
;
3041 p_flags
->update_tx_mode_config
= p_accept_tlv
->update_tx_mode
;
3042 p_flags
->tx_accept_filter
= p_accept_tlv
->tx_accept_filter
;
3043 *tlvs_mask
|= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM
;
3047 ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn
*p_hwfn
,
3048 struct ecore_sp_vport_update_params
*p_data
,
3049 struct ecore_iov_vf_mbx
*p_mbx
,
3052 struct vfpf_vport_update_accept_any_vlan_tlv
*p_accept_any_vlan
;
3053 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
3055 p_accept_any_vlan
= (struct vfpf_vport_update_accept_any_vlan_tlv
*)
3056 ecore_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
3057 if (!p_accept_any_vlan
)
3060 p_data
->accept_any_vlan
= p_accept_any_vlan
->accept_any_vlan
;
3061 p_data
->update_accept_any_vlan_flg
=
3062 p_accept_any_vlan
->update_accept_any_vlan_flg
;
3063 *tlvs_mask
|= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN
;
3067 ecore_iov_vp_update_rss_param(struct ecore_hwfn
*p_hwfn
,
3068 struct ecore_vf_info
*vf
,
3069 struct ecore_sp_vport_update_params
*p_data
,
3070 struct ecore_rss_params
*p_rss
,
3071 struct ecore_iov_vf_mbx
*p_mbx
,
3072 u16
*tlvs_mask
, u16
*tlvs_accepted
)
3074 struct vfpf_vport_update_rss_tlv
*p_rss_tlv
;
3075 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_RSS
;
3076 bool b_reject
= false;
3080 p_rss_tlv
= (struct vfpf_vport_update_rss_tlv
*)
3081 ecore_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
3083 p_data
->rss_params
= OSAL_NULL
;
3087 OSAL_MEMSET(p_rss
, 0, sizeof(struct ecore_rss_params
));
3089 p_rss
->update_rss_config
=
3090 !!(p_rss_tlv
->update_rss_flags
&
3091 VFPF_UPDATE_RSS_CONFIG_FLAG
);
3092 p_rss
->update_rss_capabilities
=
3093 !!(p_rss_tlv
->update_rss_flags
&
3094 VFPF_UPDATE_RSS_CAPS_FLAG
);
3095 p_rss
->update_rss_ind_table
=
3096 !!(p_rss_tlv
->update_rss_flags
&
3097 VFPF_UPDATE_RSS_IND_TABLE_FLAG
);
3098 p_rss
->update_rss_key
=
3099 !!(p_rss_tlv
->update_rss_flags
&
3100 VFPF_UPDATE_RSS_KEY_FLAG
);
3102 p_rss
->rss_enable
= p_rss_tlv
->rss_enable
;
3103 p_rss
->rss_eng_id
= vf
->rss_eng_id
;
3104 p_rss
->rss_caps
= p_rss_tlv
->rss_caps
;
3105 p_rss
->rss_table_size_log
= p_rss_tlv
->rss_table_size_log
;
3106 OSAL_MEMCPY(p_rss
->rss_key
, p_rss_tlv
->rss_key
,
3107 sizeof(p_rss
->rss_key
));
3109 table_size
= OSAL_MIN_T(u16
, OSAL_ARRAY_SIZE(p_rss
->rss_ind_table
),
3110 (1 << p_rss_tlv
->rss_table_size_log
));
3112 for (i
= 0; i
< table_size
; i
++) {
3113 struct ecore_queue_cid
*p_cid
;
3115 q_idx
= p_rss_tlv
->rss_ind_table
[i
];
3116 if (!ecore_iov_validate_rxq(p_hwfn
, vf
, q_idx
,
3117 ECORE_IOV_VALIDATE_Q_ENABLE
)) {
3118 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3119 "VF[%d]: Omitting RSS due to wrong queue %04x\n",
3120 vf
->relative_vf_id
, q_idx
);
3125 p_cid
= ecore_iov_get_vf_rx_queue_cid(&vf
->vf_queues
[q_idx
]);
3126 p_rss
->rss_ind_table
[i
] = p_cid
;
3129 p_data
->rss_params
= p_rss
;
3131 *tlvs_mask
|= 1 << ECORE_IOV_VP_UPDATE_RSS
;
3133 *tlvs_accepted
|= 1 << ECORE_IOV_VP_UPDATE_RSS
;
3137 ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn
*p_hwfn
,
3138 struct ecore_sp_vport_update_params
*p_data
,
3139 struct ecore_sge_tpa_params
*p_sge_tpa
,
3140 struct ecore_iov_vf_mbx
*p_mbx
,
3143 struct vfpf_vport_update_sge_tpa_tlv
*p_sge_tpa_tlv
;
3144 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
;
3146 p_sge_tpa_tlv
= (struct vfpf_vport_update_sge_tpa_tlv
*)
3147 ecore_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
3149 if (!p_sge_tpa_tlv
) {
3150 p_data
->sge_tpa_params
= OSAL_NULL
;
3154 OSAL_MEMSET(p_sge_tpa
, 0, sizeof(struct ecore_sge_tpa_params
));
3156 p_sge_tpa
->update_tpa_en_flg
=
3157 !!(p_sge_tpa_tlv
->update_sge_tpa_flags
& VFPF_UPDATE_TPA_EN_FLAG
);
3158 p_sge_tpa
->update_tpa_param_flg
=
3159 !!(p_sge_tpa_tlv
->update_sge_tpa_flags
&
3160 VFPF_UPDATE_TPA_PARAM_FLAG
);
3162 p_sge_tpa
->tpa_ipv4_en_flg
=
3163 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_IPV4_EN_FLAG
);
3164 p_sge_tpa
->tpa_ipv6_en_flg
=
3165 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_IPV6_EN_FLAG
);
3166 p_sge_tpa
->tpa_pkt_split_flg
=
3167 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_PKT_SPLIT_FLAG
);
3168 p_sge_tpa
->tpa_hdr_data_split_flg
=
3169 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_HDR_DATA_SPLIT_FLAG
);
3170 p_sge_tpa
->tpa_gro_consistent_flg
=
3171 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_GRO_CONSIST_FLAG
);
3173 p_sge_tpa
->tpa_max_aggs_num
= p_sge_tpa_tlv
->tpa_max_aggs_num
;
3174 p_sge_tpa
->tpa_max_size
= p_sge_tpa_tlv
->tpa_max_size
;
3175 p_sge_tpa
->tpa_min_size_to_start
= p_sge_tpa_tlv
->tpa_min_size_to_start
;
3176 p_sge_tpa
->tpa_min_size_to_cont
= p_sge_tpa_tlv
->tpa_min_size_to_cont
;
3177 p_sge_tpa
->max_buffers_per_cqe
= p_sge_tpa_tlv
->max_buffers_per_cqe
;
3179 p_data
->sge_tpa_params
= p_sge_tpa
;
3181 *tlvs_mask
|= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA
;
3184 static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn
*p_hwfn
,
3185 struct ecore_ptt
*p_ptt
,
3186 struct ecore_vf_info
*vf
)
3188 struct ecore_rss_params
*p_rss_params
= OSAL_NULL
;
3189 struct ecore_sp_vport_update_params params
;
3190 struct ecore_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
3191 struct ecore_sge_tpa_params sge_tpa_params
;
3192 u16 tlvs_mask
= 0, tlvs_accepted
= 0;
3193 u8 status
= PFVF_STATUS_SUCCESS
;
3195 enum _ecore_status_t rc
;
3197 /* Valiate PF can send such a request */
3198 if (!vf
->vport_instance
) {
3199 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3200 "No VPORT instance available for VF[%d],"
3201 " failing vport update\n",
3203 status
= PFVF_STATUS_FAILURE
;
3207 p_rss_params
= OSAL_VZALLOC(p_hwfn
->p_dev
, sizeof(*p_rss_params
));
3208 if (p_rss_params
== OSAL_NULL
) {
3209 status
= PFVF_STATUS_FAILURE
;
3213 OSAL_MEMSET(¶ms
, 0, sizeof(params
));
3214 params
.opaque_fid
= vf
->opaque_fid
;
3215 params
.vport_id
= vf
->vport_id
;
3216 params
.rss_params
= OSAL_NULL
;
3218 /* Search for extended tlvs list and update values
3219 * from VF in struct ecore_sp_vport_update_params.
3221 ecore_iov_vp_update_act_param(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
3222 ecore_iov_vp_update_vlan_param(p_hwfn
, ¶ms
, vf
, mbx
, &tlvs_mask
);
3223 ecore_iov_vp_update_tx_switch(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
3224 ecore_iov_vp_update_mcast_bin_param(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
3225 ecore_iov_vp_update_accept_flag(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
3226 ecore_iov_vp_update_accept_any_vlan(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
3227 ecore_iov_vp_update_sge_tpa_param(p_hwfn
, ¶ms
,
3228 &sge_tpa_params
, mbx
, &tlvs_mask
);
3230 tlvs_accepted
= tlvs_mask
;
3232 /* Some of the extended TLVs need to be validated first; In that case,
3233 * they can update the mask without updating the accepted [so that
3234 * PF could communicate to VF it has rejected request].
3236 ecore_iov_vp_update_rss_param(p_hwfn
, vf
, ¶ms
, p_rss_params
,
3237 mbx
, &tlvs_mask
, &tlvs_accepted
);
3239 /* Just log a message if there is no single extended tlv in buffer.
3240 * When all features of vport update ramrod would be requested by VF
3241 * as extended TLVs in buffer then an error can be returned in response
3242 * if there is no extended TLV present in buffer.
3244 if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn
, vf
->relative_vf_id
,
3245 ¶ms
, &tlvs_accepted
) !=
3248 status
= PFVF_STATUS_NOT_SUPPORTED
;
3252 if (!tlvs_accepted
) {
3254 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3255 "Upper-layer prevents said VF"
3256 " configuration\n");
3258 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3259 "No feature tlvs found for vport update\n");
3260 status
= PFVF_STATUS_NOT_SUPPORTED
;
3264 rc
= ecore_sp_vport_update(p_hwfn
, ¶ms
, ECORE_SPQ_MODE_EBLOCK
,
3268 status
= PFVF_STATUS_FAILURE
;
3271 OSAL_VFREE(p_hwfn
->p_dev
, p_rss_params
);
3272 length
= ecore_iov_prep_vp_update_resp_tlvs(p_hwfn
, vf
, mbx
, status
,
3273 tlvs_mask
, tlvs_accepted
);
3274 ecore_iov_send_response(p_hwfn
, p_ptt
, vf
, length
, status
);
3277 static enum _ecore_status_t
3278 ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn
*p_hwfn
,
3279 struct ecore_vf_info
*p_vf
,
3280 struct ecore_filter_ucast
*p_params
)
3284 /* First remove entries and then add new ones */
3285 if (p_params
->opcode
== ECORE_FILTER_REMOVE
) {
3286 for (i
= 0; i
< ECORE_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++)
3287 if (p_vf
->shadow_config
.vlans
[i
].used
&&
3288 p_vf
->shadow_config
.vlans
[i
].vid
==
3290 p_vf
->shadow_config
.vlans
[i
].used
= false;
3293 if (i
== ECORE_ETH_VF_NUM_VLAN_FILTERS
+ 1) {
3294 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3295 "VF [%d] - Tries to remove a non-existing"
3297 p_vf
->relative_vf_id
);
3300 } else if (p_params
->opcode
== ECORE_FILTER_REPLACE
||
3301 p_params
->opcode
== ECORE_FILTER_FLUSH
) {
3302 for (i
= 0; i
< ECORE_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++)
3303 p_vf
->shadow_config
.vlans
[i
].used
= false;
3306 /* In forced mode, we're willing to remove entries - but we don't add
3309 if (p_vf
->bulletin
.p_virt
->valid_bitmap
& (1 << VLAN_ADDR_FORCED
))
3310 return ECORE_SUCCESS
;
3312 if (p_params
->opcode
== ECORE_FILTER_ADD
||
3313 p_params
->opcode
== ECORE_FILTER_REPLACE
) {
3314 for (i
= 0; i
< ECORE_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++) {
3315 if (p_vf
->shadow_config
.vlans
[i
].used
)
3318 p_vf
->shadow_config
.vlans
[i
].used
= true;
3319 p_vf
->shadow_config
.vlans
[i
].vid
= p_params
->vlan
;
3323 if (i
== ECORE_ETH_VF_NUM_VLAN_FILTERS
+ 1) {
3324 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3325 "VF [%d] - Tries to configure more than %d"
3327 p_vf
->relative_vf_id
,
3328 ECORE_ETH_VF_NUM_VLAN_FILTERS
+ 1);
3333 return ECORE_SUCCESS
;
3336 static enum _ecore_status_t
3337 ecore_iov_vf_update_mac_shadow(struct ecore_hwfn
*p_hwfn
,
3338 struct ecore_vf_info
*p_vf
,
3339 struct ecore_filter_ucast
*p_params
)
3341 char empty_mac
[ETH_ALEN
];
3344 OSAL_MEM_ZERO(empty_mac
, ETH_ALEN
);
3346 /* If we're in forced-mode, we don't allow any change */
3347 /* TODO - this would change if we were ever to implement logic for
3348 * removing a forced MAC altogether [in which case, like for vlans,
3349 * we should be able to re-trace previous configuration.
3351 if (p_vf
->bulletin
.p_virt
->valid_bitmap
& (1 << MAC_ADDR_FORCED
))
3352 return ECORE_SUCCESS
;
3354 /* First remove entries and then add new ones */
3355 if (p_params
->opcode
== ECORE_FILTER_REMOVE
) {
3356 for (i
= 0; i
< ECORE_ETH_VF_NUM_MAC_FILTERS
; i
++) {
3357 if (!OSAL_MEMCMP(p_vf
->shadow_config
.macs
[i
],
3358 p_params
->mac
, ETH_ALEN
)) {
3359 OSAL_MEM_ZERO(p_vf
->shadow_config
.macs
[i
],
3365 if (i
== ECORE_ETH_VF_NUM_MAC_FILTERS
) {
3366 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3367 "MAC isn't configured\n");
3370 } else if (p_params
->opcode
== ECORE_FILTER_REPLACE
||
3371 p_params
->opcode
== ECORE_FILTER_FLUSH
) {
3372 for (i
= 0; i
< ECORE_ETH_VF_NUM_MAC_FILTERS
; i
++)
3373 OSAL_MEM_ZERO(p_vf
->shadow_config
.macs
[i
], ETH_ALEN
);
3376 /* List the new MAC address */
3377 if (p_params
->opcode
!= ECORE_FILTER_ADD
&&
3378 p_params
->opcode
!= ECORE_FILTER_REPLACE
)
3379 return ECORE_SUCCESS
;
3381 for (i
= 0; i
< ECORE_ETH_VF_NUM_MAC_FILTERS
; i
++) {
3382 if (!OSAL_MEMCMP(p_vf
->shadow_config
.macs
[i
],
3383 empty_mac
, ETH_ALEN
)) {
3384 OSAL_MEMCPY(p_vf
->shadow_config
.macs
[i
],
3385 p_params
->mac
, ETH_ALEN
);
3386 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3387 "Added MAC at %d entry in shadow\n", i
);
3392 if (i
== ECORE_ETH_VF_NUM_MAC_FILTERS
) {
3393 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3394 "No available place for MAC\n");
3398 return ECORE_SUCCESS
;
3401 static enum _ecore_status_t
3402 ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn
*p_hwfn
,
3403 struct ecore_vf_info
*p_vf
,
3404 struct ecore_filter_ucast
*p_params
)
3406 enum _ecore_status_t rc
= ECORE_SUCCESS
;
3408 if (p_params
->type
== ECORE_FILTER_MAC
) {
3409 rc
= ecore_iov_vf_update_mac_shadow(p_hwfn
, p_vf
, p_params
);
3410 if (rc
!= ECORE_SUCCESS
)
3414 if (p_params
->type
== ECORE_FILTER_VLAN
)
3415 rc
= ecore_iov_vf_update_vlan_shadow(p_hwfn
, p_vf
, p_params
);
3420 static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn
*p_hwfn
,
3421 struct ecore_ptt
*p_ptt
,
3422 struct ecore_vf_info
*vf
)
3424 struct ecore_bulletin_content
*p_bulletin
= vf
->bulletin
.p_virt
;
3425 struct ecore_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
3426 struct vfpf_ucast_filter_tlv
*req
;
3427 u8 status
= PFVF_STATUS_SUCCESS
;
3428 struct ecore_filter_ucast params
;
3429 enum _ecore_status_t rc
;
3431 /* Prepare the unicast filter params */
3432 OSAL_MEMSET(¶ms
, 0, sizeof(struct ecore_filter_ucast
));
3433 req
= &mbx
->req_virt
->ucast_filter
;
3434 params
.opcode
= (enum ecore_filter_opcode
)req
->opcode
;
3435 params
.type
= (enum ecore_filter_ucast_type
)req
->type
;
3437 /* @@@TBD - We might need logic on HV side in determining this */
3438 params
.is_rx_filter
= 1;
3439 params
.is_tx_filter
= 1;
3440 params
.vport_to_remove_from
= vf
->vport_id
;
3441 params
.vport_to_add_to
= vf
->vport_id
;
3442 OSAL_MEMCPY(params
.mac
, req
->mac
, ETH_ALEN
);
3443 params
.vlan
= req
->vlan
;
3445 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3446 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x]"
3447 " MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3448 vf
->abs_vf_id
, params
.opcode
, params
.type
,
3449 params
.is_rx_filter
? "RX" : "",
3450 params
.is_tx_filter
? "TX" : "",
3451 params
.vport_to_add_to
,
3452 params
.mac
[0], params
.mac
[1], params
.mac
[2],
3453 params
.mac
[3], params
.mac
[4], params
.mac
[5], params
.vlan
);
3455 if (!vf
->vport_instance
) {
3456 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3457 "No VPORT instance available for VF[%d],"
3458 " failing ucast MAC configuration\n",
3460 status
= PFVF_STATUS_FAILURE
;
3464 /* Update shadow copy of the VF configuration. In case shadow indicates
3465 * the action should be blocked return success to VF to imitate the
3466 * firmware behaviour in such case.
3468 if (ecore_iov_vf_update_unicast_shadow(p_hwfn
, vf
, ¶ms
) !=
3472 /* Determine if the unicast filtering is acceptible by PF */
3473 if ((p_bulletin
->valid_bitmap
& (1 << VLAN_ADDR_FORCED
)) &&
3474 (params
.type
== ECORE_FILTER_VLAN
||
3475 params
.type
== ECORE_FILTER_MAC_VLAN
)) {
3476 /* Once VLAN is forced or PVID is set, do not allow
3477 * to add/replace any further VLANs.
3479 if (params
.opcode
== ECORE_FILTER_ADD
||
3480 params
.opcode
== ECORE_FILTER_REPLACE
)
3481 status
= PFVF_STATUS_FORCED
;
3485 if ((p_bulletin
->valid_bitmap
& (1 << MAC_ADDR_FORCED
)) &&
3486 (params
.type
== ECORE_FILTER_MAC
||
3487 params
.type
== ECORE_FILTER_MAC_VLAN
)) {
3488 if (OSAL_MEMCMP(p_bulletin
->mac
, params
.mac
, ETH_ALEN
) ||
3489 (params
.opcode
!= ECORE_FILTER_ADD
&&
3490 params
.opcode
!= ECORE_FILTER_REPLACE
))
3491 status
= PFVF_STATUS_FORCED
;
3495 rc
= OSAL_IOV_CHK_UCAST(p_hwfn
, vf
->relative_vf_id
, ¶ms
);
3496 if (rc
== ECORE_EXISTS
) {
3498 } else if (rc
== ECORE_INVAL
) {
3499 status
= PFVF_STATUS_FAILURE
;
3503 rc
= ecore_sp_eth_filter_ucast(p_hwfn
, vf
->opaque_fid
, ¶ms
,
3504 ECORE_SPQ_MODE_CB
, OSAL_NULL
);
3506 status
= PFVF_STATUS_FAILURE
;
3509 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_UCAST_FILTER
,
3510 sizeof(struct pfvf_def_resp_tlv
), status
);
3513 static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn
*p_hwfn
,
3514 struct ecore_ptt
*p_ptt
,
3515 struct ecore_vf_info
*vf
)
3520 for (i
= 0; i
< vf
->num_sbs
; i
++)
3521 ecore_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
3523 vf
->opaque_fid
, false);
3525 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_INT_CLEANUP
,
3526 sizeof(struct pfvf_def_resp_tlv
),
3527 PFVF_STATUS_SUCCESS
);
3530 static void ecore_iov_vf_mbx_close(struct ecore_hwfn
*p_hwfn
,
3531 struct ecore_ptt
*p_ptt
,
3532 struct ecore_vf_info
*vf
)
3534 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
3535 u8 status
= PFVF_STATUS_SUCCESS
;
3537 /* Disable Interrupts for VF */
3538 ecore_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 0);
3540 /* Reset Permission table */
3541 ecore_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, 0);
3543 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_CLOSE
,
3547 static void ecore_iov_vf_mbx_release(struct ecore_hwfn
*p_hwfn
,
3548 struct ecore_ptt
*p_ptt
,
3549 struct ecore_vf_info
*p_vf
)
3551 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
3552 u8 status
= PFVF_STATUS_SUCCESS
;
3553 enum _ecore_status_t rc
= ECORE_SUCCESS
;
3555 ecore_iov_vf_cleanup(p_hwfn
, p_vf
);
3557 if (p_vf
->state
!= VF_STOPPED
&& p_vf
->state
!= VF_FREE
) {
3558 /* Stopping the VF */
3559 rc
= ecore_sp_vf_stop(p_hwfn
, p_vf
->concrete_fid
,
3562 if (rc
!= ECORE_SUCCESS
) {
3563 DP_ERR(p_hwfn
, "ecore_sp_vf_stop returned error %d\n",
3565 status
= PFVF_STATUS_FAILURE
;
3568 p_vf
->state
= VF_STOPPED
;
3571 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
, CHANNEL_TLV_RELEASE
,
3575 static void ecore_iov_vf_pf_get_coalesce(struct ecore_hwfn
*p_hwfn
,
3576 struct ecore_ptt
*p_ptt
,
3577 struct ecore_vf_info
*p_vf
)
3579 struct ecore_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
3580 struct pfvf_read_coal_resp_tlv
*p_resp
;
3581 struct vfpf_read_coal_req_tlv
*req
;
3582 u8 status
= PFVF_STATUS_FAILURE
;
3583 struct ecore_vf_queue
*p_queue
;
3584 struct ecore_queue_cid
*p_cid
;
3585 enum _ecore_status_t rc
= ECORE_SUCCESS
;
3586 u16 coal
= 0, qid
, i
;
3589 mbx
->offset
= (u8
*)mbx
->reply_virt
;
3590 req
= &mbx
->req_virt
->read_coal_req
;
3593 b_is_rx
= req
->is_rx
? true : false;
3596 if (!ecore_iov_validate_rxq(p_hwfn
, p_vf
, qid
,
3597 ECORE_IOV_VALIDATE_Q_ENABLE
)) {
3598 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3599 "VF[%d]: Invalid Rx queue_id = %d\n",
3600 p_vf
->abs_vf_id
, qid
);
3604 p_cid
= ecore_iov_get_vf_rx_queue_cid(&p_vf
->vf_queues
[qid
]);
3605 rc
= ecore_get_rxq_coalesce(p_hwfn
, p_ptt
, p_cid
, &coal
);
3606 if (rc
!= ECORE_SUCCESS
)
3609 if (!ecore_iov_validate_txq(p_hwfn
, p_vf
, qid
,
3610 ECORE_IOV_VALIDATE_Q_ENABLE
)) {
3611 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3612 "VF[%d]: Invalid Tx queue_id = %d\n",
3613 p_vf
->abs_vf_id
, qid
);
3616 for (i
= 0; i
< MAX_QUEUES_PER_QZONE
; i
++) {
3617 p_queue
= &p_vf
->vf_queues
[qid
];
3618 if ((p_queue
->cids
[i
].p_cid
== OSAL_NULL
) ||
3619 (!p_queue
->cids
[i
].b_is_tx
))
3622 p_cid
= p_queue
->cids
[i
].p_cid
;
3624 rc
= ecore_get_txq_coalesce(p_hwfn
, p_ptt
,
3626 if (rc
!= ECORE_SUCCESS
)
3632 status
= PFVF_STATUS_SUCCESS
;
3635 p_resp
= ecore_add_tlv(&mbx
->offset
, CHANNEL_TLV_COALESCE_READ
,
3637 p_resp
->coal
= coal
;
3639 ecore_add_tlv(&mbx
->offset
, CHANNEL_TLV_LIST_END
,
3640 sizeof(struct channel_list_end_tlv
));
3642 ecore_iov_send_response(p_hwfn
, p_ptt
, p_vf
, sizeof(*p_resp
), status
);
3645 static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn
*p_hwfn
,
3646 struct ecore_ptt
*p_ptt
,
3647 struct ecore_vf_info
*vf
)
3649 struct ecore_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
3650 enum _ecore_status_t rc
= ECORE_SUCCESS
;
3651 struct vfpf_update_coalesce
*req
;
3652 u8 status
= PFVF_STATUS_FAILURE
;
3653 struct ecore_queue_cid
*p_cid
;
3654 u16 rx_coal
, tx_coal
;
3658 req
= &mbx
->req_virt
->update_coalesce
;
3660 rx_coal
= req
->rx_coal
;
3661 tx_coal
= req
->tx_coal
;
3664 if (!ecore_iov_validate_rxq(p_hwfn
, vf
, qid
,
3665 ECORE_IOV_VALIDATE_Q_ENABLE
) &&
3667 DP_ERR(p_hwfn
, "VF[%d]: Invalid Rx queue_id = %d\n",
3668 vf
->abs_vf_id
, qid
);
3672 if (!ecore_iov_validate_txq(p_hwfn
, vf
, qid
,
3673 ECORE_IOV_VALIDATE_Q_ENABLE
) &&
3675 DP_ERR(p_hwfn
, "VF[%d]: Invalid Tx queue_id = %d\n",
3676 vf
->abs_vf_id
, qid
);
3680 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3681 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3682 vf
->abs_vf_id
, rx_coal
, tx_coal
, qid
);
3685 p_cid
= ecore_iov_get_vf_rx_queue_cid(&vf
->vf_queues
[qid
]);
3687 rc
= ecore_set_rxq_coalesce(p_hwfn
, p_ptt
, rx_coal
, p_cid
);
3688 if (rc
!= ECORE_SUCCESS
) {
3689 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3690 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3691 vf
->abs_vf_id
, vf
->vf_queues
[qid
].fw_rx_qid
);
3694 vf
->rx_coal
= rx_coal
;
3697 /* TODO - in future, it might be possible to pass this in a per-cid
3698 * granularity. For now, do this for all Tx queues.
3701 struct ecore_vf_queue
*p_queue
= &vf
->vf_queues
[qid
];
3703 for (i
= 0; i
< MAX_QUEUES_PER_QZONE
; i
++) {
3704 if (p_queue
->cids
[i
].p_cid
== OSAL_NULL
)
3707 if (!p_queue
->cids
[i
].b_is_tx
)
3710 rc
= ecore_set_txq_coalesce(p_hwfn
, p_ptt
, tx_coal
,
3711 p_queue
->cids
[i
].p_cid
);
3712 if (rc
!= ECORE_SUCCESS
) {
3713 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3714 "VF[%d]: Unable to set tx queue coalesce\n",
3719 vf
->tx_coal
= tx_coal
;
3722 status
= PFVF_STATUS_SUCCESS
;
3724 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_COALESCE_UPDATE
,
3725 sizeof(struct pfvf_def_resp_tlv
), status
);
3728 enum _ecore_status_t
3729 ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn
*p_hwfn
,
3730 u16 rx_coal
, u16 tx_coal
,
3733 struct ecore_queue_cid
*p_cid
;
3734 struct ecore_vf_info
*vf
;
3735 struct ecore_ptt
*p_ptt
;
3738 if (!ecore_iov_is_valid_vfid(p_hwfn
, vf_id
, true, true)) {
3739 DP_NOTICE(p_hwfn
, true,
3740 "VF[%d] - Can not set coalescing: VF is not active\n",
3745 vf
= &p_hwfn
->pf_iov_info
->vfs_array
[vf_id
];
3746 p_ptt
= ecore_ptt_acquire(p_hwfn
);
3750 if (!ecore_iov_validate_rxq(p_hwfn
, vf
, qid
,
3751 ECORE_IOV_VALIDATE_Q_ENABLE
) &&
3753 DP_ERR(p_hwfn
, "VF[%d]: Invalid Rx queue_id = %d\n",
3754 vf
->abs_vf_id
, qid
);
3758 if (!ecore_iov_validate_txq(p_hwfn
, vf
, qid
,
3759 ECORE_IOV_VALIDATE_Q_ENABLE
) &&
3761 DP_ERR(p_hwfn
, "VF[%d]: Invalid Tx queue_id = %d\n",
3762 vf
->abs_vf_id
, qid
);
3766 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3767 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3768 vf
->abs_vf_id
, rx_coal
, tx_coal
, qid
);
3771 p_cid
= ecore_iov_get_vf_rx_queue_cid(&vf
->vf_queues
[qid
]);
3773 rc
= ecore_set_rxq_coalesce(p_hwfn
, p_ptt
, rx_coal
, p_cid
);
3774 if (rc
!= ECORE_SUCCESS
) {
3775 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3776 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3777 vf
->abs_vf_id
, vf
->vf_queues
[qid
].fw_rx_qid
);
3780 vf
->rx_coal
= rx_coal
;
3783 /* TODO - in future, it might be possible to pass this in a per-cid
3784 * granularity. For now, do this for all Tx queues.
3787 struct ecore_vf_queue
*p_queue
= &vf
->vf_queues
[qid
];
3789 for (i
= 0; i
< MAX_QUEUES_PER_QZONE
; i
++) {
3790 if (p_queue
->cids
[i
].p_cid
== OSAL_NULL
)
3793 if (!p_queue
->cids
[i
].b_is_tx
)
3796 rc
= ecore_set_txq_coalesce(p_hwfn
, p_ptt
, tx_coal
,
3797 p_queue
->cids
[i
].p_cid
);
3798 if (rc
!= ECORE_SUCCESS
) {
3799 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3800 "VF[%d]: Unable to set tx queue coalesce\n",
3805 vf
->tx_coal
= tx_coal
;
3809 ecore_ptt_release(p_hwfn
, p_ptt
);
3814 static enum _ecore_status_t
3815 ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn
*p_hwfn
,
3816 struct ecore_vf_info
*p_vf
, struct ecore_ptt
*p_ptt
)
3821 ecore_fid_pretend(p_hwfn
, p_ptt
, (u16
)p_vf
->concrete_fid
);
3823 for (cnt
= 0; cnt
< 50; cnt
++) {
3824 val
= ecore_rd(p_hwfn
, p_ptt
, DORQ_REG_VF_USAGE_CNT
);
3829 ecore_fid_pretend(p_hwfn
, p_ptt
, (u16
)p_hwfn
->hw_info
.concrete_fid
);
3833 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3834 p_vf
->abs_vf_id
, val
);
3835 return ECORE_TIMEOUT
;
3838 return ECORE_SUCCESS
;
3841 static enum _ecore_status_t
3842 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn
*p_hwfn
,
3843 struct ecore_vf_info
*p_vf
, struct ecore_ptt
*p_ptt
)
3845 u32 cons
[MAX_NUM_VOQS_E4
], distance
[MAX_NUM_VOQS_E4
];
3848 /* Read initial consumers & producers */
3849 for (i
= 0; i
< MAX_NUM_VOQS_E4
; i
++) {
3852 cons
[i
] = ecore_rd(p_hwfn
, p_ptt
,
3853 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0
+
3855 prod
= ecore_rd(p_hwfn
, p_ptt
,
3856 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0
+
3858 distance
[i
] = prod
- cons
[i
];
3861 /* Wait for consumers to pass the producers */
3863 for (cnt
= 0; cnt
< 50; cnt
++) {
3864 for (; i
< MAX_NUM_VOQS_E4
; i
++) {
3867 tmp
= ecore_rd(p_hwfn
, p_ptt
,
3868 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0
+
3870 if (distance
[i
] > tmp
- cons
[i
])
3874 if (i
== MAX_NUM_VOQS_E4
)
3881 DP_ERR(p_hwfn
, "VF[%d] - pbf polling failed on VOQ %d\n",
3882 p_vf
->abs_vf_id
, i
);
3883 return ECORE_TIMEOUT
;
3886 return ECORE_SUCCESS
;
3889 static enum _ecore_status_t
ecore_iov_vf_flr_poll(struct ecore_hwfn
*p_hwfn
,
3890 struct ecore_vf_info
*p_vf
,
3891 struct ecore_ptt
*p_ptt
)
3893 enum _ecore_status_t rc
;
3895 /* TODO - add SRC and TM polling once we add storage IOV */
3897 rc
= ecore_iov_vf_flr_poll_dorq(p_hwfn
, p_vf
, p_ptt
);
3901 rc
= ecore_iov_vf_flr_poll_pbf(p_hwfn
, p_vf
, p_ptt
);
3905 return ECORE_SUCCESS
;
3908 static enum _ecore_status_t
3909 ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn
*p_hwfn
,
3910 struct ecore_ptt
*p_ptt
,
3911 u16 rel_vf_id
, u32
*ack_vfs
)
3913 struct ecore_vf_info
*p_vf
;
3914 enum _ecore_status_t rc
= ECORE_SUCCESS
;
3916 p_vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
3918 return ECORE_SUCCESS
;
3920 if (p_hwfn
->pf_iov_info
->pending_flr
[rel_vf_id
/ 64] &
3921 (1ULL << (rel_vf_id
% 64))) {
3922 u16 vfid
= p_vf
->abs_vf_id
;
3924 /* TODO - should we lock channel? */
3926 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3927 "VF[%d] - Handling FLR\n", vfid
);
3929 ecore_iov_vf_cleanup(p_hwfn
, p_vf
);
3931 /* If VF isn't active, no need for anything but SW */
3935 /* TODO - what to do in case of failure? */
3936 rc
= ecore_iov_vf_flr_poll(p_hwfn
, p_vf
, p_ptt
);
3937 if (rc
!= ECORE_SUCCESS
)
3940 rc
= ecore_final_cleanup(p_hwfn
, p_ptt
, vfid
, true);
3942 /* TODO - what's now? What a mess.... */
3943 DP_ERR(p_hwfn
, "Failed handle FLR of VF[%d]\n", vfid
);
3947 /* Workaround to make VF-PF channel ready, as FW
3948 * doesn't do that as a part of FLR.
3951 GTT_BAR0_MAP_REG_USDM_RAM
+
3952 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid
), 1);
3954 /* VF_STOPPED has to be set only after final cleanup
3955 * but prior to re-enabling the VF.
3957 p_vf
->state
= VF_STOPPED
;
3959 rc
= ecore_iov_enable_vf_access(p_hwfn
, p_ptt
, p_vf
);
3961 /* TODO - again, a mess... */
3962 DP_ERR(p_hwfn
, "Failed to re-enable VF[%d] acces\n",
3967 /* Mark VF for ack and clean pending state */
3968 if (p_vf
->state
== VF_RESET
)
3969 p_vf
->state
= VF_STOPPED
;
3970 ack_vfs
[vfid
/ 32] |= (1 << (vfid
% 32));
3971 p_hwfn
->pf_iov_info
->pending_flr
[rel_vf_id
/ 64] &=
3972 ~(1ULL << (rel_vf_id
% 64));
3973 p_vf
->vf_mbx
.b_pending_msg
= false;
3979 enum _ecore_status_t
ecore_iov_vf_flr_cleanup(struct ecore_hwfn
*p_hwfn
,
3980 struct ecore_ptt
*p_ptt
)
3982 u32 ack_vfs
[VF_MAX_STATIC
/ 32];
3983 enum _ecore_status_t rc
= ECORE_SUCCESS
;
3986 OSAL_MEMSET(ack_vfs
, 0, sizeof(u32
) * (VF_MAX_STATIC
/ 32));
3988 /* Since BRB <-> PRS interface can't be tested as part of the flr
3989 * polling due to HW limitations, simply sleep a bit. And since
3990 * there's no need to wait per-vf, do it before looping.
3994 for (i
= 0; i
< p_hwfn
->p_dev
->p_iov_info
->total_vfs
; i
++)
3995 ecore_iov_execute_vf_flr_cleanup(p_hwfn
, p_ptt
, i
, ack_vfs
);
3997 rc
= ecore_mcp_ack_vf_flr(p_hwfn
, p_ptt
, ack_vfs
);
4001 enum _ecore_status_t
4002 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn
*p_hwfn
,
4003 struct ecore_ptt
*p_ptt
, u16 rel_vf_id
)
4005 u32 ack_vfs
[VF_MAX_STATIC
/ 32];
4006 enum _ecore_status_t rc
= ECORE_SUCCESS
;
4008 OSAL_MEMSET(ack_vfs
, 0, sizeof(u32
) * (VF_MAX_STATIC
/ 32));
4010 /* Wait instead of polling the BRB <-> PRS interface */
4013 ecore_iov_execute_vf_flr_cleanup(p_hwfn
, p_ptt
, rel_vf_id
, ack_vfs
);
4015 rc
= ecore_mcp_ack_vf_flr(p_hwfn
, p_ptt
, ack_vfs
);
4019 bool ecore_iov_mark_vf_flr(struct ecore_hwfn
*p_hwfn
, u32
*p_disabled_vfs
)
4024 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
, "Marking FLR-ed VFs\n");
4025 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++)
4026 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
4027 "[%08x,...,%08x]: %08x\n",
4028 i
* 32, (i
+ 1) * 32 - 1, p_disabled_vfs
[i
]);
4030 if (!p_hwfn
->p_dev
->p_iov_info
) {
4031 DP_NOTICE(p_hwfn
, true, "VF flr but no IOV\n");
4036 for (i
= 0; i
< p_hwfn
->p_dev
->p_iov_info
->total_vfs
; i
++) {
4037 struct ecore_vf_info
*p_vf
;
4040 p_vf
= ecore_iov_get_vf_info(p_hwfn
, i
, false);
4044 vfid
= p_vf
->abs_vf_id
;
4045 if ((1 << (vfid
% 32)) & p_disabled_vfs
[vfid
/ 32]) {
4046 u64
*p_flr
= p_hwfn
->pf_iov_info
->pending_flr
;
4047 u16 rel_vf_id
= p_vf
->relative_vf_id
;
4049 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
4050 "VF[%d] [rel %d] got FLR-ed\n",
4053 p_vf
->state
= VF_RESET
;
4055 /* No need to lock here, since pending_flr should
4056 * only change here and before ACKing MFw. Since
4057 * MFW will not trigger an additional attention for
4058 * VF flr until ACKs, we're safe.
4060 p_flr
[rel_vf_id
/ 64] |= 1ULL << (rel_vf_id
% 64);
4068 void ecore_iov_get_link(struct ecore_hwfn
*p_hwfn
,
4070 struct ecore_mcp_link_params
*p_params
,
4071 struct ecore_mcp_link_state
*p_link
,
4072 struct ecore_mcp_link_capabilities
*p_caps
)
4074 struct ecore_vf_info
*p_vf
= ecore_iov_get_vf_info(p_hwfn
, vfid
, false);
4075 struct ecore_bulletin_content
*p_bulletin
;
4080 p_bulletin
= p_vf
->bulletin
.p_virt
;
4083 __ecore_vf_get_link_params(p_params
, p_bulletin
);
4085 __ecore_vf_get_link_state(p_link
, p_bulletin
);
4087 __ecore_vf_get_link_caps(p_caps
, p_bulletin
);
4090 void ecore_iov_process_mbx_req(struct ecore_hwfn
*p_hwfn
,
4091 struct ecore_ptt
*p_ptt
, int vfid
)
4093 struct ecore_iov_vf_mbx
*mbx
;
4094 struct ecore_vf_info
*p_vf
;
4096 p_vf
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4100 mbx
= &p_vf
->vf_mbx
;
4102 /* ecore_iov_process_mbx_request */
4103 #ifndef CONFIG_ECORE_SW_CHANNEL
4104 if (!mbx
->b_pending_msg
) {
4105 DP_NOTICE(p_hwfn
, true,
4106 "VF[%02x]: Trying to process mailbox message when none is pending\n",
4110 mbx
->b_pending_msg
= false;
4113 mbx
->first_tlv
= mbx
->req_virt
->first_tlv
;
4115 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
4116 "VF[%02x]: Processing mailbox message [type %04x]\n",
4117 p_vf
->abs_vf_id
, mbx
->first_tlv
.tl
.type
);
4119 OSAL_IOV_VF_MSG_TYPE(p_hwfn
,
4120 p_vf
->relative_vf_id
,
4121 mbx
->first_tlv
.tl
.type
);
4123 /* Lock the per vf op mutex and note the locker's identity.
4124 * The unlock will take place in mbx response.
4126 ecore_iov_lock_vf_pf_channel(p_hwfn
,
4127 p_vf
, mbx
->first_tlv
.tl
.type
);
4129 /* check if tlv type is known */
4130 if (ecore_iov_tlv_supported(mbx
->first_tlv
.tl
.type
) &&
4131 !p_vf
->b_malicious
) {
4132 /* switch on the opcode */
4133 switch (mbx
->first_tlv
.tl
.type
) {
4134 case CHANNEL_TLV_ACQUIRE
:
4135 ecore_iov_vf_mbx_acquire(p_hwfn
, p_ptt
, p_vf
);
4137 case CHANNEL_TLV_VPORT_START
:
4138 ecore_iov_vf_mbx_start_vport(p_hwfn
, p_ptt
, p_vf
);
4140 case CHANNEL_TLV_VPORT_TEARDOWN
:
4141 ecore_iov_vf_mbx_stop_vport(p_hwfn
, p_ptt
, p_vf
);
4143 case CHANNEL_TLV_START_RXQ
:
4144 ecore_iov_vf_mbx_start_rxq(p_hwfn
, p_ptt
, p_vf
);
4146 case CHANNEL_TLV_START_TXQ
:
4147 ecore_iov_vf_mbx_start_txq(p_hwfn
, p_ptt
, p_vf
);
4149 case CHANNEL_TLV_STOP_RXQS
:
4150 ecore_iov_vf_mbx_stop_rxqs(p_hwfn
, p_ptt
, p_vf
);
4152 case CHANNEL_TLV_STOP_TXQS
:
4153 ecore_iov_vf_mbx_stop_txqs(p_hwfn
, p_ptt
, p_vf
);
4155 case CHANNEL_TLV_UPDATE_RXQ
:
4156 ecore_iov_vf_mbx_update_rxqs(p_hwfn
, p_ptt
, p_vf
);
4158 case CHANNEL_TLV_VPORT_UPDATE
:
4159 ecore_iov_vf_mbx_vport_update(p_hwfn
, p_ptt
, p_vf
);
4161 case CHANNEL_TLV_UCAST_FILTER
:
4162 ecore_iov_vf_mbx_ucast_filter(p_hwfn
, p_ptt
, p_vf
);
4164 case CHANNEL_TLV_CLOSE
:
4165 ecore_iov_vf_mbx_close(p_hwfn
, p_ptt
, p_vf
);
4167 case CHANNEL_TLV_INT_CLEANUP
:
4168 ecore_iov_vf_mbx_int_cleanup(p_hwfn
, p_ptt
, p_vf
);
4170 case CHANNEL_TLV_RELEASE
:
4171 ecore_iov_vf_mbx_release(p_hwfn
, p_ptt
, p_vf
);
4173 case CHANNEL_TLV_UPDATE_TUNN_PARAM
:
4174 ecore_iov_vf_mbx_update_tunn_param(p_hwfn
, p_ptt
, p_vf
);
4176 case CHANNEL_TLV_COALESCE_UPDATE
:
4177 ecore_iov_vf_pf_set_coalesce(p_hwfn
, p_ptt
, p_vf
);
4179 case CHANNEL_TLV_COALESCE_READ
:
4180 ecore_iov_vf_pf_get_coalesce(p_hwfn
, p_ptt
, p_vf
);
4182 case CHANNEL_TLV_UPDATE_MTU
:
4183 ecore_iov_vf_pf_update_mtu(p_hwfn
, p_ptt
, p_vf
);
4186 } else if (ecore_iov_tlv_supported(mbx
->first_tlv
.tl
.type
)) {
4187 /* If we've received a message from a VF we consider malicious
4188 * we ignore the messasge unless it's one for RELEASE, in which
4189 * case we'll let it have the benefit of doubt, allowing the
4190 * next loaded driver to start again.
4192 if (mbx
->first_tlv
.tl
.type
== CHANNEL_TLV_RELEASE
) {
4193 /* TODO - initiate FLR, remove malicious indication */
4194 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
4195 "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n",
4198 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
4199 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
4200 p_vf
->abs_vf_id
, mbx
->first_tlv
.tl
.type
);
4203 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
,
4204 mbx
->first_tlv
.tl
.type
,
4205 sizeof(struct pfvf_def_resp_tlv
),
4206 PFVF_STATUS_MALICIOUS
);
4208 /* unknown TLV - this may belong to a VF driver from the future
4209 * - a version written after this PF driver was written, which
4210 * supports features unknown as of yet. Too bad since we don't
4211 * support them. Or this may be because someone wrote a crappy
4212 * VF driver and is sending garbage over the channel.
4214 DP_NOTICE(p_hwfn
, false,
4215 "VF[%02x]: unknown TLV. type %04x length %04x"
4216 " padding %08x reply address %lu\n",
4218 mbx
->first_tlv
.tl
.type
,
4219 mbx
->first_tlv
.tl
.length
,
4220 mbx
->first_tlv
.padding
,
4221 (unsigned long)mbx
->first_tlv
.reply_address
);
4223 /* Try replying in case reply address matches the acquisition's
4226 if (p_vf
->acquire
.first_tlv
.reply_address
&&
4227 (mbx
->first_tlv
.reply_address
==
4228 p_vf
->acquire
.first_tlv
.reply_address
))
4229 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
,
4230 mbx
->first_tlv
.tl
.type
,
4231 sizeof(struct pfvf_def_resp_tlv
),
4232 PFVF_STATUS_NOT_SUPPORTED
);
4234 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
4235 "VF[%02x]: Can't respond to TLV -"
4236 " no valid reply address\n",
4240 ecore_iov_unlock_vf_pf_channel(p_hwfn
, p_vf
,
4241 mbx
->first_tlv
.tl
.type
);
4243 #ifdef CONFIG_ECORE_SW_CHANNEL
4244 mbx
->sw_mbx
.mbx_state
= VF_PF_RESPONSE_READY
;
4245 mbx
->sw_mbx
.response_offset
= 0;
4249 void ecore_iov_pf_get_pending_events(struct ecore_hwfn
*p_hwfn
,
4254 OSAL_MEM_ZERO(events
, sizeof(u64
) * ECORE_VF_ARRAY_LENGTH
);
4256 ecore_for_each_vf(p_hwfn
, i
) {
4257 struct ecore_vf_info
*p_vf
;
4259 p_vf
= &p_hwfn
->pf_iov_info
->vfs_array
[i
];
4260 if (p_vf
->vf_mbx
.b_pending_msg
)
4261 events
[i
/ 64] |= 1ULL << (i
% 64);
4265 static struct ecore_vf_info
*
4266 ecore_sriov_get_vf_from_absid(struct ecore_hwfn
*p_hwfn
, u16 abs_vfid
)
4268 u8 min
= (u8
)p_hwfn
->p_dev
->p_iov_info
->first_vf_in_pf
;
4270 if (!_ecore_iov_pf_sanity_check(p_hwfn
, (int)abs_vfid
- min
, false)) {
4271 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
4272 "Got indication for VF [abs 0x%08x] that cannot be"
4278 return &p_hwfn
->pf_iov_info
->vfs_array
[(u8
)abs_vfid
- min
];
4281 static enum _ecore_status_t
ecore_sriov_vfpf_msg(struct ecore_hwfn
*p_hwfn
,
4283 struct regpair
*vf_msg
)
4285 struct ecore_vf_info
*p_vf
= ecore_sriov_get_vf_from_absid(p_hwfn
,
4289 return ECORE_SUCCESS
;
4291 /* List the physical address of the request so that handler
4292 * could later on copy the message from it.
4294 p_vf
->vf_mbx
.pending_req
= (((u64
)vf_msg
->hi
) << 32) | vf_msg
->lo
;
4296 p_vf
->vf_mbx
.b_pending_msg
= true;
4298 return OSAL_PF_VF_MSG(p_hwfn
, p_vf
->relative_vf_id
);
4301 static void ecore_sriov_vfpf_malicious(struct ecore_hwfn
*p_hwfn
,
4302 struct malicious_vf_eqe_data
*p_data
)
4304 struct ecore_vf_info
*p_vf
;
4306 p_vf
= ecore_sriov_get_vf_from_absid(p_hwfn
, p_data
->vf_id
);
4311 if (!p_vf
->b_malicious
) {
4312 DP_NOTICE(p_hwfn
, false,
4313 "VF [%d] - Malicious behavior [%02x]\n",
4314 p_vf
->abs_vf_id
, p_data
->err_id
);
4316 p_vf
->b_malicious
= true;
4319 "VF [%d] - Malicious behavior [%02x]\n",
4320 p_vf
->abs_vf_id
, p_data
->err_id
);
4323 OSAL_PF_VF_MALICIOUS(p_hwfn
, p_vf
->relative_vf_id
);
4326 static enum _ecore_status_t
ecore_sriov_eqe_event(struct ecore_hwfn
*p_hwfn
,
4329 union event_ring_data
*data
,
4330 u8 OSAL_UNUSED fw_return_code
)
4333 case COMMON_EVENT_VF_PF_CHANNEL
:
4334 return ecore_sriov_vfpf_msg(p_hwfn
, OSAL_LE16_TO_CPU(echo
),
4335 &data
->vf_pf_channel
.msg_addr
);
4336 case COMMON_EVENT_VF_FLR
:
4337 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
4338 "VF-FLR is still not supported\n");
4339 return ECORE_SUCCESS
;
4340 case COMMON_EVENT_MALICIOUS_VF
:
4341 ecore_sriov_vfpf_malicious(p_hwfn
, &data
->malicious_vf
);
4342 return ECORE_SUCCESS
;
4344 DP_INFO(p_hwfn
->p_dev
, "Unknown sriov eqe event 0x%02x\n",
4350 bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn
*p_hwfn
, u16 rel_vf_id
)
4352 return !!(p_hwfn
->pf_iov_info
->pending_flr
[rel_vf_id
/ 64] &
4353 (1ULL << (rel_vf_id
% 64)));
4356 u16
ecore_iov_get_next_active_vf(struct ecore_hwfn
*p_hwfn
, u16 rel_vf_id
)
4358 struct ecore_hw_sriov_info
*p_iov
= p_hwfn
->p_dev
->p_iov_info
;
4364 for (i
= rel_vf_id
; i
< p_iov
->total_vfs
; i
++)
4365 if (ecore_iov_is_valid_vfid(p_hwfn
, rel_vf_id
, true, false))
4369 return MAX_NUM_VFS_E4
;
4372 enum _ecore_status_t
ecore_iov_copy_vf_msg(struct ecore_hwfn
*p_hwfn
,
4373 struct ecore_ptt
*ptt
, int vfid
)
4375 struct ecore_dmae_params params
;
4376 struct ecore_vf_info
*vf_info
;
4378 vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4382 OSAL_MEMSET(¶ms
, 0, sizeof(struct ecore_dmae_params
));
4383 params
.flags
= ECORE_DMAE_FLAG_VF_SRC
| ECORE_DMAE_FLAG_COMPLETION_DST
;
4384 params
.src_vfid
= vf_info
->abs_vf_id
;
4386 if (ecore_dmae_host2host(p_hwfn
, ptt
,
4387 vf_info
->vf_mbx
.pending_req
,
4388 vf_info
->vf_mbx
.req_phys
,
4389 sizeof(union vfpf_tlvs
) / 4, ¶ms
)) {
4390 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
4391 "Failed to copy message from VF 0x%02x\n", vfid
);
4396 return ECORE_SUCCESS
;
4399 void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn
*p_hwfn
,
4402 struct ecore_vf_info
*vf_info
;
4405 vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4407 DP_NOTICE(p_hwfn
->p_dev
, true,
4408 "Can not set forced MAC, invalid vfid [%d]\n", vfid
);
4411 if (vf_info
->b_malicious
) {
4412 DP_NOTICE(p_hwfn
->p_dev
, false,
4413 "Can't set forced MAC to malicious VF [%d]\n",
4418 if (p_hwfn
->pf_params
.eth_pf_params
.allow_vf_mac_change
)
4419 feature
= 1 << VFPF_BULLETIN_MAC_ADDR
;
4421 feature
= 1 << MAC_ADDR_FORCED
;
4423 OSAL_MEMCPY(vf_info
->bulletin
.p_virt
->mac
, mac
, ETH_ALEN
);
4425 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
4426 /* Forced MAC will disable MAC_ADDR */
4427 vf_info
->bulletin
.p_virt
->valid_bitmap
&=
4428 ~(1 << VFPF_BULLETIN_MAC_ADDR
);
4430 ecore_iov_configure_vport_forced(p_hwfn
, vf_info
, feature
);
4433 enum _ecore_status_t
ecore_iov_bulletin_set_mac(struct ecore_hwfn
*p_hwfn
,
4436 struct ecore_vf_info
*vf_info
;
4439 vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4441 DP_NOTICE(p_hwfn
->p_dev
, true,
4442 "Can not set MAC, invalid vfid [%d]\n", vfid
);
4445 if (vf_info
->b_malicious
) {
4446 DP_NOTICE(p_hwfn
->p_dev
, false,
4447 "Can't set MAC to malicious VF [%d]\n",
4452 if (vf_info
->bulletin
.p_virt
->valid_bitmap
& (1 << MAC_ADDR_FORCED
)) {
4453 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
4454 "Can not set MAC, Forced MAC is configured\n");
4458 feature
= 1 << VFPF_BULLETIN_MAC_ADDR
;
4459 OSAL_MEMCPY(vf_info
->bulletin
.p_virt
->mac
, mac
, ETH_ALEN
);
4461 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
4463 if (p_hwfn
->pf_params
.eth_pf_params
.allow_vf_mac_change
)
4464 ecore_iov_configure_vport_forced(p_hwfn
, vf_info
, feature
);
4466 return ECORE_SUCCESS
;
4469 #ifndef LINUX_REMOVE
4470 enum _ecore_status_t
4471 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn
*p_hwfn
,
4472 bool b_untagged_only
, int vfid
)
4474 struct ecore_vf_info
*vf_info
;
4477 vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4479 DP_NOTICE(p_hwfn
->p_dev
, true,
4480 "Can not set untagged default, invalid vfid [%d]\n",
4484 if (vf_info
->b_malicious
) {
4485 DP_NOTICE(p_hwfn
->p_dev
, false,
4486 "Can't set untagged default to malicious VF [%d]\n",
4491 /* Since this is configurable only during vport-start, don't take it
4492 * if we're past that point.
4494 if (vf_info
->state
== VF_ENABLED
) {
4495 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
4496 "Can't support untagged change for vfid[%d] -"
4497 " VF is already active\n",
4502 /* Set configuration; This will later be taken into account during the
4503 * VF initialization.
4505 feature
= (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT
) |
4506 (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED
);
4507 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
4509 vf_info
->bulletin
.p_virt
->default_only_untagged
= b_untagged_only
? 1
4512 return ECORE_SUCCESS
;
4515 void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn
*p_hwfn
, int vfid
,
4518 struct ecore_vf_info
*vf_info
;
4520 vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4524 *opaque_fid
= vf_info
->opaque_fid
;
4528 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn
*p_hwfn
,
4531 struct ecore_vf_info
*vf_info
;
4534 vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4536 DP_NOTICE(p_hwfn
->p_dev
, true,
4537 "Can not set forced MAC, invalid vfid [%d]\n",
4541 if (vf_info
->b_malicious
) {
4542 DP_NOTICE(p_hwfn
->p_dev
, false,
4543 "Can't set forced vlan to malicious VF [%d]\n",
4548 feature
= 1 << VLAN_ADDR_FORCED
;
4549 vf_info
->bulletin
.p_virt
->pvid
= pvid
;
4551 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
4553 vf_info
->bulletin
.p_virt
->valid_bitmap
&= ~feature
;
4555 ecore_iov_configure_vport_forced(p_hwfn
, vf_info
, feature
);
4558 void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn
*p_hwfn
,
4559 int vfid
, u16 vxlan_port
, u16 geneve_port
)
4561 struct ecore_vf_info
*vf_info
;
4563 vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4565 DP_NOTICE(p_hwfn
->p_dev
, true,
4566 "Can not set udp ports, invalid vfid [%d]\n", vfid
);
4570 if (vf_info
->b_malicious
) {
4571 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
4572 "Can not set udp ports to malicious VF [%d]\n",
4577 vf_info
->bulletin
.p_virt
->vxlan_udp_port
= vxlan_port
;
4578 vf_info
->bulletin
.p_virt
->geneve_udp_port
= geneve_port
;
4581 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn
*p_hwfn
, int vfid
)
4583 struct ecore_vf_info
*p_vf_info
;
4585 p_vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4589 return !!p_vf_info
->vport_instance
;
4592 bool ecore_iov_is_vf_stopped(struct ecore_hwfn
*p_hwfn
, int vfid
)
4594 struct ecore_vf_info
*p_vf_info
;
4596 p_vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4600 return p_vf_info
->state
== VF_STOPPED
;
4603 bool ecore_iov_spoofchk_get(struct ecore_hwfn
*p_hwfn
, int vfid
)
4605 struct ecore_vf_info
*vf_info
;
4607 vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4611 return vf_info
->spoof_chk
;
4614 enum _ecore_status_t
ecore_iov_spoofchk_set(struct ecore_hwfn
*p_hwfn
,
4617 struct ecore_vf_info
*vf
;
4618 enum _ecore_status_t rc
= ECORE_INVAL
;
4620 if (!ecore_iov_pf_sanity_check(p_hwfn
, vfid
)) {
4621 DP_NOTICE(p_hwfn
, true,
4622 "SR-IOV sanity check failed, can't set spoofchk\n");
4626 vf
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4630 if (!ecore_iov_vf_has_vport_instance(p_hwfn
, vfid
)) {
4631 /* After VF VPORT start PF will configure spoof check */
4632 vf
->req_spoofchk_val
= val
;
4637 rc
= __ecore_iov_spoofchk_set(p_hwfn
, vf
, val
);
4643 u8
ecore_iov_vf_chains_per_pf(struct ecore_hwfn
*p_hwfn
)
4645 u8 max_chains_per_vf
= p_hwfn
->hw_info
.max_chains_per_vf
;
4647 max_chains_per_vf
= (max_chains_per_vf
) ? max_chains_per_vf
4648 : ECORE_MAX_VF_CHAINS_PER_PF
;
4650 return max_chains_per_vf
;
4653 void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn
*p_hwfn
,
4655 void **pp_req_virt_addr
,
4656 u16
*p_req_virt_size
)
4658 struct ecore_vf_info
*vf_info
=
4659 ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
4664 if (pp_req_virt_addr
)
4665 *pp_req_virt_addr
= vf_info
->vf_mbx
.req_virt
;
4667 if (p_req_virt_size
)
4668 *p_req_virt_size
= sizeof(*vf_info
->vf_mbx
.req_virt
);
4671 void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn
*p_hwfn
,
4673 void **pp_reply_virt_addr
,
4674 u16
*p_reply_virt_size
)
4676 struct ecore_vf_info
*vf_info
=
4677 ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
4682 if (pp_reply_virt_addr
)
4683 *pp_reply_virt_addr
= vf_info
->vf_mbx
.reply_virt
;
4685 if (p_reply_virt_size
)
4686 *p_reply_virt_size
= sizeof(*vf_info
->vf_mbx
.reply_virt
);
4689 #ifdef CONFIG_ECORE_SW_CHANNEL
4690 struct ecore_iov_sw_mbx
*ecore_iov_get_vf_sw_mbx(struct ecore_hwfn
*p_hwfn
,
4693 struct ecore_vf_info
*vf_info
=
4694 ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
4699 return &vf_info
->vf_mbx
.sw_mbx
;
4703 bool ecore_iov_is_valid_vfpf_msg_length(u32 length
)
4705 return (length
>= sizeof(struct vfpf_first_tlv
) &&
4706 (length
<= sizeof(union vfpf_tlvs
)));
4709 u32
ecore_iov_pfvf_msg_length(void)
4711 return sizeof(union pfvf_tlvs
);
4714 u8
*ecore_iov_bulletin_get_mac(struct ecore_hwfn
*p_hwfn
,
4717 struct ecore_vf_info
*p_vf
;
4719 p_vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
4720 if (!p_vf
|| !p_vf
->bulletin
.p_virt
)
4723 if (!(p_vf
->bulletin
.p_virt
->valid_bitmap
&
4724 (1 << VFPF_BULLETIN_MAC_ADDR
)))
4727 return p_vf
->bulletin
.p_virt
->mac
;
4730 u8
*ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn
*p_hwfn
, u16 rel_vf_id
)
4732 struct ecore_vf_info
*p_vf
;
4734 p_vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
4735 if (!p_vf
|| !p_vf
->bulletin
.p_virt
)
4738 if (!(p_vf
->bulletin
.p_virt
->valid_bitmap
& (1 << MAC_ADDR_FORCED
)))
4741 return p_vf
->bulletin
.p_virt
->mac
;
4744 u16
ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn
*p_hwfn
,
4747 struct ecore_vf_info
*p_vf
;
4749 p_vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
4750 if (!p_vf
|| !p_vf
->bulletin
.p_virt
)
4753 if (!(p_vf
->bulletin
.p_virt
->valid_bitmap
& (1 << VLAN_ADDR_FORCED
)))
4756 return p_vf
->bulletin
.p_virt
->pvid
;
4759 enum _ecore_status_t
ecore_iov_configure_tx_rate(struct ecore_hwfn
*p_hwfn
,
4760 struct ecore_ptt
*p_ptt
,
4763 struct ecore_mcp_link_state
*p_link
;
4764 struct ecore_vf_info
*vf
;
4766 enum _ecore_status_t rc
;
4768 vf
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4773 rc
= ecore_fw_vport(p_hwfn
, vf
->vport_id
, &abs_vp_id
);
4774 if (rc
!= ECORE_SUCCESS
)
4777 p_link
= &ECORE_LEADING_HWFN(p_hwfn
->p_dev
)->mcp_info
->link_output
;
4779 return ecore_init_vport_rl(p_hwfn
, p_ptt
, abs_vp_id
, (u32
)val
,
4783 enum _ecore_status_t
ecore_iov_get_vf_stats(struct ecore_hwfn
*p_hwfn
,
4784 struct ecore_ptt
*p_ptt
,
4786 struct ecore_eth_stats
*p_stats
)
4788 struct ecore_vf_info
*vf
;
4790 vf
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4794 if (vf
->state
!= VF_ENABLED
)
4797 __ecore_get_vport_stats(p_hwfn
, p_ptt
, p_stats
,
4798 vf
->abs_vf_id
+ 0x10, false);
4800 return ECORE_SUCCESS
;
4803 u8
ecore_iov_get_vf_num_rxqs(struct ecore_hwfn
*p_hwfn
, u16 rel_vf_id
)
4805 struct ecore_vf_info
*p_vf
;
4807 p_vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
4811 return p_vf
->num_rxqs
;
4814 u8
ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn
*p_hwfn
, u16 rel_vf_id
)
4816 struct ecore_vf_info
*p_vf
;
4818 p_vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
4822 return p_vf
->num_active_rxqs
;
4825 void *ecore_iov_get_vf_ctx(struct ecore_hwfn
*p_hwfn
, u16 rel_vf_id
)
4827 struct ecore_vf_info
*p_vf
;
4829 p_vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
4836 u8
ecore_iov_get_vf_num_sbs(struct ecore_hwfn
*p_hwfn
, u16 rel_vf_id
)
4838 struct ecore_vf_info
*p_vf
;
4840 p_vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
4844 return p_vf
->num_sbs
;
4847 bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn
*p_hwfn
, u16 rel_vf_id
)
4849 struct ecore_vf_info
*p_vf
;
4851 p_vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
4855 return (p_vf
->state
== VF_FREE
);
4858 bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn
*p_hwfn
,
4861 struct ecore_vf_info
*p_vf
;
4863 p_vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
4867 return (p_vf
->state
== VF_ACQUIRED
);
4870 bool ecore_iov_is_vf_initialized(struct ecore_hwfn
*p_hwfn
, u16 rel_vf_id
)
4872 struct ecore_vf_info
*p_vf
;
4874 p_vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
4878 return (p_vf
->state
== VF_ENABLED
);
4881 bool ecore_iov_is_vf_started(struct ecore_hwfn
*p_hwfn
,
4884 struct ecore_vf_info
*p_vf
;
4886 p_vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
4890 return (p_vf
->state
!= VF_FREE
&& p_vf
->state
!= VF_STOPPED
);
4893 enum _ecore_status_t
4894 ecore_iov_get_vf_min_rate(struct ecore_hwfn
*p_hwfn
, int vfid
)
4896 struct ecore_wfq_data
*vf_vp_wfq
;
4897 struct ecore_vf_info
*vf_info
;
4899 vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4903 vf_vp_wfq
= &p_hwfn
->qm_info
.wfq_data
[vf_info
->vport_id
];
4905 if (vf_vp_wfq
->configured
)
4906 return vf_vp_wfq
->min_speed
;
4911 #ifdef CONFIG_ECORE_SW_CHANNEL
4912 void ecore_iov_set_vf_hw_channel(struct ecore_hwfn
*p_hwfn
, int vfid
,
4915 struct ecore_vf_info
*vf_info
;
4917 vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4921 vf_info
->b_hw_channel
= b_is_hw
;