2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
12 #include "ecore_sriov.h"
13 #include "ecore_status.h"
15 #include "ecore_hw_defs.h"
16 #include "ecore_int.h"
17 #include "ecore_hsi_eth.h"
19 #include "ecore_vfpf_if.h"
20 #include "ecore_rt_defs.h"
21 #include "ecore_init_ops.h"
22 #include "ecore_gtt_reg_addr.h"
23 #include "ecore_iro.h"
24 #include "ecore_mcp.h"
25 #include "ecore_cxt.h"
27 #include "ecore_init_fw_funcs.h"
28 #include "ecore_sp_commands.h"
30 const char *ecore_channel_tlvs_string
[] = {
31 "CHANNEL_TLV_NONE", /* ends tlv sequence */
32 "CHANNEL_TLV_ACQUIRE",
33 "CHANNEL_TLV_VPORT_START",
34 "CHANNEL_TLV_VPORT_UPDATE",
35 "CHANNEL_TLV_VPORT_TEARDOWN",
36 "CHANNEL_TLV_START_RXQ",
37 "CHANNEL_TLV_START_TXQ",
38 "CHANNEL_TLV_STOP_RXQ",
39 "CHANNEL_TLV_STOP_TXQ",
40 "CHANNEL_TLV_UPDATE_RXQ",
41 "CHANNEL_TLV_INT_CLEANUP",
43 "CHANNEL_TLV_RELEASE",
44 "CHANNEL_TLV_LIST_END",
45 "CHANNEL_TLV_UCAST_FILTER",
46 "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
47 "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
48 "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
49 "CHANNEL_TLV_VPORT_UPDATE_MCAST",
50 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
51 "CHANNEL_TLV_VPORT_UPDATE_RSS",
52 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
53 "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
58 static enum _ecore_status_t
ecore_sp_vf_start(struct ecore_hwfn
*p_hwfn
,
59 struct ecore_vf_info
*p_vf
)
61 struct vf_start_ramrod_data
*p_ramrod
= OSAL_NULL
;
62 struct ecore_spq_entry
*p_ent
= OSAL_NULL
;
63 struct ecore_sp_init_data init_data
;
64 enum _ecore_status_t rc
= ECORE_NOTIMPL
;
68 OSAL_MEMSET(&init_data
, 0, sizeof(init_data
));
69 init_data
.cid
= ecore_spq_get_cid(p_hwfn
);
70 init_data
.opaque_fid
= p_vf
->opaque_fid
;
71 init_data
.comp_mode
= ECORE_SPQ_MODE_EBLOCK
;
73 rc
= ecore_sp_init_request(p_hwfn
, &p_ent
,
74 COMMON_RAMROD_VF_START
,
75 PROTOCOLID_COMMON
, &init_data
);
76 if (rc
!= ECORE_SUCCESS
)
79 p_ramrod
= &p_ent
->ramrod
.vf_start
;
81 p_ramrod
->vf_id
= GET_FIELD(p_vf
->concrete_fid
, PXP_CONCRETE_FID_VFID
);
82 p_ramrod
->opaque_fid
= OSAL_CPU_TO_LE16(p_vf
->opaque_fid
);
84 switch (p_hwfn
->hw_info
.personality
) {
86 p_ramrod
->personality
= PERSONALITY_ETH
;
88 case ECORE_PCI_ETH_ROCE
:
89 p_ramrod
->personality
= PERSONALITY_RDMA_AND_ETH
;
92 DP_NOTICE(p_hwfn
, true, "Unknown VF personality %d\n",
93 p_hwfn
->hw_info
.personality
);
97 fp_minor
= p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
;
98 if (fp_minor
> ETH_HSI_VER_MINOR
&&
99 fp_minor
!= ETH_HSI_VER_NO_PKT_LEN_TUNN
) {
100 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
101 "VF [%d] - Requested fp hsi %02x.%02x which is"
102 " slightly newer than PF's %02x.%02x; Configuring"
105 ETH_HSI_VER_MAJOR
, fp_minor
,
106 ETH_HSI_VER_MAJOR
, ETH_HSI_VER_MINOR
);
107 fp_minor
= ETH_HSI_VER_MINOR
;
110 p_ramrod
->hsi_fp_ver
.major_ver_arr
[ETH_VER_KEY
] = ETH_HSI_VER_MAJOR
;
111 p_ramrod
->hsi_fp_ver
.minor_ver_arr
[ETH_VER_KEY
] = fp_minor
;
113 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
114 "VF[%d] - Starting using HSI %02x.%02x\n",
115 p_vf
->abs_vf_id
, ETH_HSI_VER_MAJOR
, fp_minor
);
117 return ecore_spq_post(p_hwfn
, p_ent
, OSAL_NULL
);
120 static enum _ecore_status_t
ecore_sp_vf_stop(struct ecore_hwfn
*p_hwfn
,
124 struct vf_stop_ramrod_data
*p_ramrod
= OSAL_NULL
;
125 struct ecore_spq_entry
*p_ent
= OSAL_NULL
;
126 struct ecore_sp_init_data init_data
;
127 enum _ecore_status_t rc
= ECORE_NOTIMPL
;
130 OSAL_MEMSET(&init_data
, 0, sizeof(init_data
));
131 init_data
.cid
= ecore_spq_get_cid(p_hwfn
);
132 init_data
.opaque_fid
= opaque_vfid
;
133 init_data
.comp_mode
= ECORE_SPQ_MODE_EBLOCK
;
135 rc
= ecore_sp_init_request(p_hwfn
, &p_ent
,
136 COMMON_RAMROD_VF_STOP
,
137 PROTOCOLID_COMMON
, &init_data
);
138 if (rc
!= ECORE_SUCCESS
)
141 p_ramrod
= &p_ent
->ramrod
.vf_stop
;
143 p_ramrod
->vf_id
= GET_FIELD(concrete_vfid
, PXP_CONCRETE_FID_VFID
);
145 return ecore_spq_post(p_hwfn
, p_ent
, OSAL_NULL
);
148 bool ecore_iov_is_valid_vfid(struct ecore_hwfn
*p_hwfn
, int rel_vf_id
,
151 if (!p_hwfn
->pf_iov_info
) {
152 DP_NOTICE(p_hwfn
->p_dev
, true, "No iov info\n");
156 if ((rel_vf_id
>= p_hwfn
->p_dev
->p_iov_info
->total_vfs
) ||
160 if ((!p_hwfn
->pf_iov_info
->vfs_array
[rel_vf_id
].b_init
) &&
167 struct ecore_vf_info
*ecore_iov_get_vf_info(struct ecore_hwfn
*p_hwfn
,
171 struct ecore_vf_info
*vf
= OSAL_NULL
;
173 if (!p_hwfn
->pf_iov_info
) {
174 DP_NOTICE(p_hwfn
->p_dev
, true, "No iov info\n");
178 if (ecore_iov_is_valid_vfid(p_hwfn
, relative_vf_id
, b_enabled_only
))
179 vf
= &p_hwfn
->pf_iov_info
->vfs_array
[relative_vf_id
];
181 DP_ERR(p_hwfn
, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
187 static bool ecore_iov_validate_rxq(struct ecore_hwfn
*p_hwfn
,
188 struct ecore_vf_info
*p_vf
,
191 if (rx_qid
>= p_vf
->num_rxqs
)
192 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
193 "VF[0x%02x] - can't touch Rx queue[%04x];"
194 " Only 0x%04x are allocated\n",
195 p_vf
->abs_vf_id
, rx_qid
, p_vf
->num_rxqs
);
196 return rx_qid
< p_vf
->num_rxqs
;
199 static bool ecore_iov_validate_txq(struct ecore_hwfn
*p_hwfn
,
200 struct ecore_vf_info
*p_vf
,
203 if (tx_qid
>= p_vf
->num_txqs
)
204 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
205 "VF[0x%02x] - can't touch Tx queue[%04x];"
206 " Only 0x%04x are allocated\n",
207 p_vf
->abs_vf_id
, tx_qid
, p_vf
->num_txqs
);
208 return tx_qid
< p_vf
->num_txqs
;
211 static bool ecore_iov_validate_sb(struct ecore_hwfn
*p_hwfn
,
212 struct ecore_vf_info
*p_vf
,
217 for (i
= 0; i
< p_vf
->num_sbs
; i
++)
218 if (p_vf
->igu_sbs
[i
] == sb_idx
)
221 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
222 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as"
223 " one of its 0x%02x SBs\n",
224 p_vf
->abs_vf_id
, sb_idx
, p_vf
->num_sbs
);
229 /* TODO - this is linux crc32; Need a way to ifdef it out for linux */
230 u32
ecore_crc32(u32 crc
, u8
*ptr
, u32 length
)
236 for (i
= 0; i
< 8; i
++)
237 crc
= (crc
>> 1) ^ ((crc
& 1) ? 0xedb88320 : 0);
242 enum _ecore_status_t
ecore_iov_post_vf_bulletin(struct ecore_hwfn
*p_hwfn
,
244 struct ecore_ptt
*p_ptt
)
246 struct ecore_bulletin_content
*p_bulletin
;
247 int crc_size
= sizeof(p_bulletin
->crc
);
248 struct ecore_dmae_params params
;
249 struct ecore_vf_info
*p_vf
;
251 p_vf
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
255 /* TODO - check VF is in a state where it can accept message */
256 if (!p_vf
->vf_bulletin
)
259 p_bulletin
= p_vf
->bulletin
.p_virt
;
261 /* Increment bulletin board version and compute crc */
262 p_bulletin
->version
++;
263 p_bulletin
->crc
= ecore_crc32(0, (u8
*)p_bulletin
+ crc_size
,
264 p_vf
->bulletin
.size
- crc_size
);
266 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
267 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
268 p_bulletin
->version
, p_vf
->relative_vf_id
, p_bulletin
->crc
);
270 /* propagate bulletin board via dmae to vm memory */
271 OSAL_MEMSET(¶ms
, 0, sizeof(params
));
272 params
.flags
= ECORE_DMAE_FLAG_VF_DST
;
273 params
.dst_vfid
= p_vf
->abs_vf_id
;
274 return ecore_dmae_host2host(p_hwfn
, p_ptt
, p_vf
->bulletin
.phys
,
275 p_vf
->vf_bulletin
, p_vf
->bulletin
.size
/ 4,
279 static enum _ecore_status_t
ecore_iov_pci_cfg_info(struct ecore_dev
*p_dev
)
281 struct ecore_hw_sriov_info
*iov
= p_dev
->p_iov_info
;
284 DP_VERBOSE(p_dev
, ECORE_MSG_IOV
, "sriov ext pos %d\n", pos
);
285 OSAL_PCI_READ_CONFIG_WORD(p_dev
, pos
+ PCI_SRIOV_CTRL
, &iov
->ctrl
);
287 OSAL_PCI_READ_CONFIG_WORD(p_dev
,
288 pos
+ PCI_SRIOV_TOTAL_VF
, &iov
->total_vfs
);
289 OSAL_PCI_READ_CONFIG_WORD(p_dev
,
290 pos
+ PCI_SRIOV_INITIAL_VF
,
293 OSAL_PCI_READ_CONFIG_WORD(p_dev
, pos
+ PCI_SRIOV_NUM_VF
, &iov
->num_vfs
);
295 /* @@@TODO - in future we might want to add an OSAL here to
296 * allow each OS to decide on its own how to act.
298 DP_VERBOSE(p_dev
, ECORE_MSG_IOV
,
299 "Number of VFs are already set to non-zero value."
300 " Ignoring PCI configuration value\n");
304 OSAL_PCI_READ_CONFIG_WORD(p_dev
,
305 pos
+ PCI_SRIOV_VF_OFFSET
, &iov
->offset
);
307 OSAL_PCI_READ_CONFIG_WORD(p_dev
,
308 pos
+ PCI_SRIOV_VF_STRIDE
, &iov
->stride
);
310 OSAL_PCI_READ_CONFIG_WORD(p_dev
,
311 pos
+ PCI_SRIOV_VF_DID
, &iov
->vf_device_id
);
313 OSAL_PCI_READ_CONFIG_DWORD(p_dev
,
314 pos
+ PCI_SRIOV_SUP_PGSIZE
, &iov
->pgsz
);
316 OSAL_PCI_READ_CONFIG_DWORD(p_dev
, pos
+ PCI_SRIOV_CAP
, &iov
->cap
);
318 OSAL_PCI_READ_CONFIG_BYTE(p_dev
, pos
+ PCI_SRIOV_FUNC_LINK
, &iov
->link
);
320 DP_VERBOSE(p_dev
, ECORE_MSG_IOV
, "IOV info[%d]: nres %d, cap 0x%x,"
321 "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
322 " stride %d, page size 0x%x\n", 0,
323 /* @@@TBD MichalK - function id */
324 iov
->nres
, iov
->cap
, iov
->ctrl
,
325 iov
->total_vfs
, iov
->initial_vfs
, iov
->nr_virtfn
,
326 iov
->offset
, iov
->stride
, iov
->pgsz
);
328 /* Some sanity checks */
329 if (iov
->num_vfs
> NUM_OF_VFS(p_dev
) ||
330 iov
->total_vfs
> NUM_OF_VFS(p_dev
)) {
331 /* This can happen only due to a bug. In this case we set
332 * num_vfs to zero to avoid memory corruption in the code that
333 * assumes max number of vfs
335 DP_NOTICE(p_dev
, false,
336 "IOV: Unexpected number of vfs set: %d"
337 " setting num_vf to zero\n",
344 return ECORE_SUCCESS
;
347 static void ecore_iov_clear_vf_igu_blocks(struct ecore_hwfn
*p_hwfn
,
348 struct ecore_ptt
*p_ptt
)
350 struct ecore_igu_block
*p_sb
;
354 if (!p_hwfn
->hw_info
.p_igu_info
) {
356 "ecore_iov_clear_vf_igu_blocks IGU Info not inited\n");
361 sb_id
< ECORE_MAPPING_MEMORY_SIZE(p_hwfn
->p_dev
); sb_id
++) {
362 p_sb
= &p_hwfn
->hw_info
.p_igu_info
->igu_map
.igu_blocks
[sb_id
];
363 if ((p_sb
->status
& ECORE_IGU_STATUS_FREE
) &&
364 !(p_sb
->status
& ECORE_IGU_STATUS_PF
)) {
365 val
= ecore_rd(p_hwfn
, p_ptt
,
366 IGU_REG_MAPPING_MEMORY
+ sb_id
* 4);
367 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 0);
368 ecore_wr(p_hwfn
, p_ptt
,
369 IGU_REG_MAPPING_MEMORY
+ 4 * sb_id
, val
);
374 static void ecore_iov_setup_vfdb(struct ecore_hwfn
*p_hwfn
)
376 struct ecore_hw_sriov_info
*p_iov
= p_hwfn
->p_dev
->p_iov_info
;
377 struct ecore_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
378 struct ecore_bulletin_content
*p_bulletin_virt
;
379 dma_addr_t req_p
, rply_p
, bulletin_p
;
380 union pfvf_tlvs
*p_reply_virt_addr
;
381 union vfpf_tlvs
*p_req_virt_addr
;
384 OSAL_MEMSET(p_iov_info
->vfs_array
, 0, sizeof(p_iov_info
->vfs_array
));
386 p_req_virt_addr
= p_iov_info
->mbx_msg_virt_addr
;
387 req_p
= p_iov_info
->mbx_msg_phys_addr
;
388 p_reply_virt_addr
= p_iov_info
->mbx_reply_virt_addr
;
389 rply_p
= p_iov_info
->mbx_reply_phys_addr
;
390 p_bulletin_virt
= p_iov_info
->p_bulletins
;
391 bulletin_p
= p_iov_info
->bulletins_phys
;
392 if (!p_req_virt_addr
|| !p_reply_virt_addr
|| !p_bulletin_virt
) {
394 "ecore_iov_setup_vfdb called without alloc mem first\n");
398 p_iov_info
->base_vport_id
= 1; /* @@@TBD resource allocation */
400 for (idx
= 0; idx
< p_iov
->total_vfs
; idx
++) {
401 struct ecore_vf_info
*vf
= &p_iov_info
->vfs_array
[idx
];
404 vf
->vf_mbx
.req_virt
= p_req_virt_addr
+ idx
;
405 vf
->vf_mbx
.req_phys
= req_p
+ idx
* sizeof(union vfpf_tlvs
);
406 vf
->vf_mbx
.reply_virt
= p_reply_virt_addr
+ idx
;
407 vf
->vf_mbx
.reply_phys
= rply_p
+ idx
* sizeof(union pfvf_tlvs
);
409 #ifdef CONFIG_ECORE_SW_CHANNEL
410 vf
->vf_mbx
.sw_mbx
.request_size
= sizeof(union vfpf_tlvs
);
411 vf
->vf_mbx
.sw_mbx
.mbx_state
= VF_PF_WAIT_FOR_START_REQUEST
;
413 vf
->state
= VF_STOPPED
;
416 vf
->bulletin
.phys
= idx
*
417 sizeof(struct ecore_bulletin_content
) + bulletin_p
;
418 vf
->bulletin
.p_virt
= p_bulletin_virt
+ idx
;
419 vf
->bulletin
.size
= sizeof(struct ecore_bulletin_content
);
421 vf
->relative_vf_id
= idx
;
422 vf
->abs_vf_id
= idx
+ p_iov
->first_vf_in_pf
;
423 concrete
= ecore_vfid_to_concrete(p_hwfn
, vf
->abs_vf_id
);
424 vf
->concrete_fid
= concrete
;
425 /* TODO - need to devise a better way of getting opaque */
426 vf
->opaque_fid
= (p_hwfn
->hw_info
.opaque_fid
& 0xff) |
427 (vf
->abs_vf_id
<< 8);
428 /* @@TBD MichalK - add base vport_id of VFs to equation */
429 vf
->vport_id
= p_iov_info
->base_vport_id
+ idx
;
431 vf
->num_mac_filters
= ECORE_ETH_VF_NUM_MAC_FILTERS
;
432 vf
->num_vlan_filters
= ECORE_ETH_VF_NUM_VLAN_FILTERS
;
436 static enum _ecore_status_t
ecore_iov_allocate_vfdb(struct ecore_hwfn
*p_hwfn
)
438 struct ecore_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
442 num_vfs
= p_hwfn
->p_dev
->p_iov_info
->total_vfs
;
444 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
445 "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs
);
447 /* Allocate PF Mailbox buffer (per-VF) */
448 p_iov_info
->mbx_msg_size
= sizeof(union vfpf_tlvs
) * num_vfs
;
449 p_v_addr
= &p_iov_info
->mbx_msg_virt_addr
;
450 *p_v_addr
= OSAL_DMA_ALLOC_COHERENT(p_hwfn
->p_dev
,
451 &p_iov_info
->mbx_msg_phys_addr
,
452 p_iov_info
->mbx_msg_size
);
456 /* Allocate PF Mailbox Reply buffer (per-VF) */
457 p_iov_info
->mbx_reply_size
= sizeof(union pfvf_tlvs
) * num_vfs
;
458 p_v_addr
= &p_iov_info
->mbx_reply_virt_addr
;
459 *p_v_addr
= OSAL_DMA_ALLOC_COHERENT(p_hwfn
->p_dev
,
460 &p_iov_info
->mbx_reply_phys_addr
,
461 p_iov_info
->mbx_reply_size
);
465 p_iov_info
->bulletins_size
= sizeof(struct ecore_bulletin_content
) *
467 p_v_addr
= &p_iov_info
->p_bulletins
;
468 *p_v_addr
= OSAL_DMA_ALLOC_COHERENT(p_hwfn
->p_dev
,
469 &p_iov_info
->bulletins_phys
,
470 p_iov_info
->bulletins_size
);
474 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
475 "PF's Requests mailbox [%p virt 0x%lx phys], "
476 "Response mailbox [%p virt 0x%lx phys] Bulletinsi"
477 " [%p virt 0x%lx phys]\n",
478 p_iov_info
->mbx_msg_virt_addr
,
479 (unsigned long)p_iov_info
->mbx_msg_phys_addr
,
480 p_iov_info
->mbx_reply_virt_addr
,
481 (unsigned long)p_iov_info
->mbx_reply_phys_addr
,
482 p_iov_info
->p_bulletins
,
483 (unsigned long)p_iov_info
->bulletins_phys
);
485 return ECORE_SUCCESS
;
488 static void ecore_iov_free_vfdb(struct ecore_hwfn
*p_hwfn
)
490 struct ecore_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
492 if (p_hwfn
->pf_iov_info
->mbx_msg_virt_addr
)
493 OSAL_DMA_FREE_COHERENT(p_hwfn
->p_dev
,
494 p_iov_info
->mbx_msg_virt_addr
,
495 p_iov_info
->mbx_msg_phys_addr
,
496 p_iov_info
->mbx_msg_size
);
498 if (p_hwfn
->pf_iov_info
->mbx_reply_virt_addr
)
499 OSAL_DMA_FREE_COHERENT(p_hwfn
->p_dev
,
500 p_iov_info
->mbx_reply_virt_addr
,
501 p_iov_info
->mbx_reply_phys_addr
,
502 p_iov_info
->mbx_reply_size
);
504 if (p_iov_info
->p_bulletins
)
505 OSAL_DMA_FREE_COHERENT(p_hwfn
->p_dev
,
506 p_iov_info
->p_bulletins
,
507 p_iov_info
->bulletins_phys
,
508 p_iov_info
->bulletins_size
);
511 enum _ecore_status_t
ecore_iov_alloc(struct ecore_hwfn
*p_hwfn
)
513 struct ecore_pf_iov
*p_sriov
;
515 if (!IS_PF_SRIOV(p_hwfn
)) {
516 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
517 "No SR-IOV - no need for IOV db\n");
518 return ECORE_SUCCESS
;
521 p_sriov
= OSAL_ZALLOC(p_hwfn
->p_dev
, GFP_KERNEL
, sizeof(*p_sriov
));
523 DP_NOTICE(p_hwfn
, true,
524 "Failed to allocate `struct ecore_sriov'\n");
528 p_hwfn
->pf_iov_info
= p_sriov
;
530 return ecore_iov_allocate_vfdb(p_hwfn
);
533 void ecore_iov_setup(struct ecore_hwfn
*p_hwfn
, struct ecore_ptt
*p_ptt
)
535 if (!IS_PF_SRIOV(p_hwfn
) || !IS_PF_SRIOV_ALLOC(p_hwfn
))
538 ecore_iov_setup_vfdb(p_hwfn
);
539 ecore_iov_clear_vf_igu_blocks(p_hwfn
, p_ptt
);
542 void ecore_iov_free(struct ecore_hwfn
*p_hwfn
)
544 if (IS_PF_SRIOV_ALLOC(p_hwfn
)) {
545 ecore_iov_free_vfdb(p_hwfn
);
546 OSAL_FREE(p_hwfn
->p_dev
, p_hwfn
->pf_iov_info
);
550 void ecore_iov_free_hw_info(struct ecore_dev
*p_dev
)
552 OSAL_FREE(p_dev
, p_dev
->p_iov_info
);
553 p_dev
->p_iov_info
= OSAL_NULL
;
556 enum _ecore_status_t
ecore_iov_hw_info(struct ecore_hwfn
*p_hwfn
)
558 struct ecore_dev
*p_dev
= p_hwfn
->p_dev
;
560 enum _ecore_status_t rc
;
562 if (IS_VF(p_hwfn
->p_dev
))
563 return ECORE_SUCCESS
;
565 /* Learn the PCI configuration */
566 pos
= OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn
->p_dev
,
567 PCI_EXT_CAP_ID_SRIOV
);
569 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
, "No PCIe IOV support\n");
570 return ECORE_SUCCESS
;
573 /* Allocate a new struct for IOV information */
574 /* TODO - can change to VALLOC when its available */
575 p_dev
->p_iov_info
= OSAL_ZALLOC(p_dev
, GFP_KERNEL
,
576 sizeof(*p_dev
->p_iov_info
));
577 if (!p_dev
->p_iov_info
) {
578 DP_NOTICE(p_hwfn
, true,
579 "Can't support IOV due to lack of memory\n");
582 p_dev
->p_iov_info
->pos
= pos
;
584 rc
= ecore_iov_pci_cfg_info(p_dev
);
588 /* We want PF IOV to be synonemous with the existence of p_iov_info;
589 * In case the capability is published but there are no VFs, simply
590 * de-allocate the struct.
592 if (!p_dev
->p_iov_info
->total_vfs
) {
593 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
594 "IOV capabilities, but no VFs are published\n");
595 OSAL_FREE(p_dev
, p_dev
->p_iov_info
);
596 p_dev
->p_iov_info
= OSAL_NULL
;
597 return ECORE_SUCCESS
;
600 /* Calculate the first VF index - this is a bit tricky; Basically,
601 * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
602 * after the first engine's VFs.
604 p_dev
->p_iov_info
->first_vf_in_pf
= p_hwfn
->p_dev
->p_iov_info
->offset
+
605 p_hwfn
->abs_pf_id
- 16;
606 if (ECORE_PATH_ID(p_hwfn
))
607 p_dev
->p_iov_info
->first_vf_in_pf
-= MAX_NUM_VFS_BB
;
609 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
610 "First VF in hwfn 0x%08x\n",
611 p_dev
->p_iov_info
->first_vf_in_pf
);
613 return ECORE_SUCCESS
;
616 bool ecore_iov_pf_sanity_check(struct ecore_hwfn
*p_hwfn
, int vfid
)
618 /* Check PF supports sriov */
619 if (IS_VF(p_hwfn
->p_dev
) || !IS_ECORE_SRIOV(p_hwfn
->p_dev
) ||
620 !IS_PF_SRIOV_ALLOC(p_hwfn
))
623 /* Check VF validity */
624 if (!ecore_iov_is_valid_vfid(p_hwfn
, vfid
, true))
630 void ecore_iov_set_vf_to_disable(struct ecore_dev
*p_dev
,
631 u16 rel_vf_id
, u8 to_disable
)
633 struct ecore_vf_info
*vf
;
636 for_each_hwfn(p_dev
, i
) {
637 struct ecore_hwfn
*p_hwfn
= &p_dev
->hwfns
[i
];
639 vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
643 vf
->to_disable
= to_disable
;
647 void ecore_iov_set_vfs_to_disable(struct ecore_dev
*p_dev
,
652 if (!IS_ECORE_SRIOV(p_dev
))
655 for (i
= 0; i
< p_dev
->p_iov_info
->total_vfs
; i
++)
656 ecore_iov_set_vf_to_disable(p_dev
, i
, to_disable
);
660 /* @@@TBD Consider taking outside of ecore... */
661 enum _ecore_status_t
ecore_iov_set_vf_ctx(struct ecore_hwfn
*p_hwfn
,
665 enum _ecore_status_t rc
= ECORE_SUCCESS
;
666 struct ecore_vf_info
*vf
= ecore_iov_get_vf_info(p_hwfn
, vf_id
, true);
668 if (vf
!= OSAL_NULL
) {
670 #ifdef CONFIG_ECORE_SW_CHANNEL
671 vf
->vf_mbx
.sw_mbx
.mbx_state
= VF_PF_WAIT_FOR_START_REQUEST
;
674 rc
= ECORE_UNKNOWN_ERROR
;
680 static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn
*p_hwfn
,
681 struct ecore_ptt
*p_ptt
,
684 ecore_wr(p_hwfn
, p_ptt
,
685 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR
+ (abs_vfid
>> 5) * 4,
686 1 << (abs_vfid
& 0x1f));
689 static void ecore_iov_vf_igu_reset(struct ecore_hwfn
*p_hwfn
,
690 struct ecore_ptt
*p_ptt
,
691 struct ecore_vf_info
*vf
)
695 /* Set VF masks and configuration - pretend */
696 ecore_fid_pretend(p_hwfn
, p_ptt
, (u16
)vf
->concrete_fid
);
698 ecore_wr(p_hwfn
, p_ptt
, IGU_REG_STATISTIC_NUM_VF_MSG_SENT
, 0);
701 ecore_fid_pretend(p_hwfn
, p_ptt
, (u16
)p_hwfn
->hw_info
.concrete_fid
);
703 /* iterate over all queues, clear sb consumer */
704 for (i
= 0; i
< vf
->num_sbs
; i
++)
705 ecore_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
707 vf
->opaque_fid
, true);
710 static void ecore_iov_vf_igu_set_int(struct ecore_hwfn
*p_hwfn
,
711 struct ecore_ptt
*p_ptt
,
712 struct ecore_vf_info
*vf
, bool enable
)
716 ecore_fid_pretend(p_hwfn
, p_ptt
, (u16
)vf
->concrete_fid
);
718 igu_vf_conf
= ecore_rd(p_hwfn
, p_ptt
, IGU_REG_VF_CONFIGURATION
);
721 igu_vf_conf
|= IGU_VF_CONF_MSI_MSIX_EN
;
723 igu_vf_conf
&= ~IGU_VF_CONF_MSI_MSIX_EN
;
725 ecore_wr(p_hwfn
, p_ptt
, IGU_REG_VF_CONFIGURATION
, igu_vf_conf
);
728 ecore_fid_pretend(p_hwfn
, p_ptt
, (u16
)p_hwfn
->hw_info
.concrete_fid
);
731 static enum _ecore_status_t
732 ecore_iov_enable_vf_access(struct ecore_hwfn
*p_hwfn
,
733 struct ecore_ptt
*p_ptt
, struct ecore_vf_info
*vf
)
735 u32 igu_vf_conf
= IGU_VF_CONF_FUNC_EN
;
736 enum _ecore_status_t rc
;
739 return ECORE_SUCCESS
;
741 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
742 "Enable internal access for vf %x [abs %x]\n", vf
->abs_vf_id
,
743 ECORE_VF_ABS_ID(p_hwfn
, vf
));
745 ecore_iov_vf_pglue_clear_err(p_hwfn
, p_ptt
,
746 ECORE_VF_ABS_ID(p_hwfn
, vf
));
748 ecore_iov_vf_igu_reset(p_hwfn
, p_ptt
, vf
);
750 rc
= ecore_mcp_config_vf_msix(p_hwfn
, p_ptt
,
751 vf
->abs_vf_id
, vf
->num_sbs
);
752 if (rc
!= ECORE_SUCCESS
)
755 ecore_fid_pretend(p_hwfn
, p_ptt
, (u16
)vf
->concrete_fid
);
757 SET_FIELD(igu_vf_conf
, IGU_VF_CONF_PARENT
, p_hwfn
->rel_pf_id
);
758 STORE_RT_REG(p_hwfn
, IGU_REG_VF_CONFIGURATION_RT_OFFSET
, igu_vf_conf
);
760 ecore_init_run(p_hwfn
, p_ptt
, PHASE_VF
, vf
->abs_vf_id
,
761 p_hwfn
->hw_info
.hw_mode
);
764 ecore_fid_pretend(p_hwfn
, p_ptt
, (u16
)p_hwfn
->hw_info
.concrete_fid
);
773 * @brief ecore_iov_config_perm_table - configure the permission
775 * In E4, queue zone permission table size is 320x9. There
776 * are 320 VF queues for single engine device (256 for dual
777 * engine device), and each entry has the following format:
784 static void ecore_iov_config_perm_table(struct ecore_hwfn
*p_hwfn
,
785 struct ecore_ptt
*p_ptt
,
786 struct ecore_vf_info
*vf
, u8 enable
)
792 for (qid
= 0; qid
< vf
->num_rxqs
; qid
++) {
793 ecore_fw_l2_queue(p_hwfn
, vf
->vf_queues
[qid
].fw_rx_qid
,
796 reg_addr
= PSWHST_REG_ZONE_PERMISSION_TABLE
+ qzone_id
* 4;
797 val
= enable
? (vf
->abs_vf_id
| (1 << 8)) : 0;
798 ecore_wr(p_hwfn
, p_ptt
, reg_addr
, val
);
802 static void ecore_iov_enable_vf_traffic(struct ecore_hwfn
*p_hwfn
,
803 struct ecore_ptt
*p_ptt
,
804 struct ecore_vf_info
*vf
)
806 /* Reset vf in IGU - interrupts are still disabled */
807 ecore_iov_vf_igu_reset(p_hwfn
, p_ptt
, vf
);
809 ecore_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 1);
811 /* Permission Table */
812 ecore_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, true);
815 static u8
ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn
*p_hwfn
,
816 struct ecore_ptt
*p_ptt
,
817 struct ecore_vf_info
*vf
,
820 struct ecore_igu_block
*igu_blocks
;
821 int qid
= 0, igu_id
= 0;
824 igu_blocks
= p_hwfn
->hw_info
.p_igu_info
->igu_map
.igu_blocks
;
826 if (num_rx_queues
> p_hwfn
->hw_info
.p_igu_info
->free_blks
)
827 num_rx_queues
= p_hwfn
->hw_info
.p_igu_info
->free_blks
;
829 p_hwfn
->hw_info
.p_igu_info
->free_blks
-= num_rx_queues
;
831 SET_FIELD(val
, IGU_MAPPING_LINE_FUNCTION_NUMBER
, vf
->abs_vf_id
);
832 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 1);
833 SET_FIELD(val
, IGU_MAPPING_LINE_PF_VALID
, 0);
835 while ((qid
< num_rx_queues
) &&
836 (igu_id
< ECORE_MAPPING_MEMORY_SIZE(p_hwfn
->p_dev
))) {
837 if (igu_blocks
[igu_id
].status
& ECORE_IGU_STATUS_FREE
) {
838 struct cau_sb_entry sb_entry
;
840 vf
->igu_sbs
[qid
] = (u16
)igu_id
;
841 igu_blocks
[igu_id
].status
&= ~ECORE_IGU_STATUS_FREE
;
843 SET_FIELD(val
, IGU_MAPPING_LINE_VECTOR_NUMBER
, qid
);
845 ecore_wr(p_hwfn
, p_ptt
,
846 IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_id
,
849 /* Configure igu sb in CAU which were marked valid */
850 ecore_init_cau_sb_entry(p_hwfn
, &sb_entry
,
853 ecore_dmae_host2grc(p_hwfn
, p_ptt
,
854 (u64
)(osal_uintptr_t
)&sb_entry
,
855 CAU_REG_SB_VAR_MEMORY
+
856 igu_id
* sizeof(u64
), 2, 0);
862 vf
->num_sbs
= (u8
)num_rx_queues
;
869 * @brief The function invalidates all the VF entries,
870 * technically this isn't required, but added for
871 * cleaness and ease of debugging incase a VF attempts to
872 * produce an interrupt after it has been taken down.
878 static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn
*p_hwfn
,
879 struct ecore_ptt
*p_ptt
,
880 struct ecore_vf_info
*vf
)
882 struct ecore_igu_info
*p_info
= p_hwfn
->hw_info
.p_igu_info
;
886 /* Invalidate igu CAM lines and mark them as free */
887 for (idx
= 0; idx
< vf
->num_sbs
; idx
++) {
888 igu_id
= vf
->igu_sbs
[idx
];
889 addr
= IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_id
;
891 val
= ecore_rd(p_hwfn
, p_ptt
, addr
);
892 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 0);
893 ecore_wr(p_hwfn
, p_ptt
, addr
, val
);
895 p_info
->igu_map
.igu_blocks
[igu_id
].status
|=
896 ECORE_IGU_STATUS_FREE
;
898 p_hwfn
->hw_info
.p_igu_info
->free_blks
++;
904 enum _ecore_status_t
ecore_iov_init_hw_for_vf(struct ecore_hwfn
*p_hwfn
,
905 struct ecore_ptt
*p_ptt
,
906 u16 rel_vf_id
, u16 num_rx_queues
)
908 u8 num_of_vf_available_chains
= 0;
909 struct ecore_vf_info
*vf
= OSAL_NULL
;
910 enum _ecore_status_t rc
= ECORE_SUCCESS
;
914 vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
916 DP_ERR(p_hwfn
, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
917 return ECORE_UNKNOWN_ERROR
;
921 DP_NOTICE(p_hwfn
, true, "VF[%d] is already active.\n",
926 /* Limit number of queues according to number of CIDs */
927 ecore_cxt_get_proto_cid_count(p_hwfn
, PROTOCOLID_ETH
, &cids
);
928 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
929 "VF[%d] - requesting to initialize for 0x%04x queues"
930 " [0x%04x CIDs available]\n",
931 vf
->relative_vf_id
, num_rx_queues
, (u16
)cids
);
932 num_rx_queues
= OSAL_MIN_T(u16
, num_rx_queues
, ((u16
)cids
));
934 num_of_vf_available_chains
= ecore_iov_alloc_vf_igu_sbs(p_hwfn
,
938 if (num_of_vf_available_chains
== 0) {
939 DP_ERR(p_hwfn
, "no available igu sbs\n");
943 /* Choose queue number and index ranges */
944 vf
->num_rxqs
= num_of_vf_available_chains
;
945 vf
->num_txqs
= num_of_vf_available_chains
;
947 for (i
= 0; i
< vf
->num_rxqs
; i
++) {
948 u16 queue_id
= ecore_int_queue_id_from_sb_id(p_hwfn
,
951 if (queue_id
> RESC_NUM(p_hwfn
, ECORE_L2_QUEUE
)) {
952 DP_NOTICE(p_hwfn
, true,
953 "VF[%d] will require utilizing of"
954 " out-of-bounds queues - %04x\n",
955 vf
->relative_vf_id
, queue_id
);
956 /* TODO - cleanup the already allocate SBs */
960 /* CIDs are per-VF, so no problem having them 0-based. */
961 vf
->vf_queues
[i
].fw_rx_qid
= queue_id
;
962 vf
->vf_queues
[i
].fw_tx_qid
= queue_id
;
963 vf
->vf_queues
[i
].fw_cid
= i
;
965 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
966 "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
967 vf
->relative_vf_id
, i
, vf
->igu_sbs
[i
], queue_id
, i
);
970 rc
= ecore_iov_enable_vf_access(p_hwfn
, p_ptt
, vf
);
972 if (rc
== ECORE_SUCCESS
) {
974 p_hwfn
->pf_iov_info
->active_vfs
[vf
->relative_vf_id
/ 64] |=
975 (1ULL << (vf
->relative_vf_id
% 64));
977 if (IS_LEAD_HWFN(p_hwfn
))
978 p_hwfn
->p_dev
->p_iov_info
->num_vfs
++;
984 void ecore_iov_set_link(struct ecore_hwfn
*p_hwfn
,
986 struct ecore_mcp_link_params
*params
,
987 struct ecore_mcp_link_state
*link
,
988 struct ecore_mcp_link_capabilities
*p_caps
)
990 struct ecore_vf_info
*p_vf
= ecore_iov_get_vf_info(p_hwfn
, vfid
, false);
991 struct ecore_bulletin_content
*p_bulletin
;
996 p_bulletin
= p_vf
->bulletin
.p_virt
;
997 p_bulletin
->req_autoneg
= params
->speed
.autoneg
;
998 p_bulletin
->req_adv_speed
= params
->speed
.advertised_speeds
;
999 p_bulletin
->req_forced_speed
= params
->speed
.forced_speed
;
1000 p_bulletin
->req_autoneg_pause
= params
->pause
.autoneg
;
1001 p_bulletin
->req_forced_rx
= params
->pause
.forced_rx
;
1002 p_bulletin
->req_forced_tx
= params
->pause
.forced_tx
;
1003 p_bulletin
->req_loopback
= params
->loopback_mode
;
1005 p_bulletin
->link_up
= link
->link_up
;
1006 p_bulletin
->speed
= link
->speed
;
1007 p_bulletin
->full_duplex
= link
->full_duplex
;
1008 p_bulletin
->autoneg
= link
->an
;
1009 p_bulletin
->autoneg_complete
= link
->an_complete
;
1010 p_bulletin
->parallel_detection
= link
->parallel_detection
;
1011 p_bulletin
->pfc_enabled
= link
->pfc_enabled
;
1012 p_bulletin
->partner_adv_speed
= link
->partner_adv_speed
;
1013 p_bulletin
->partner_tx_flow_ctrl_en
= link
->partner_tx_flow_ctrl_en
;
1014 p_bulletin
->partner_rx_flow_ctrl_en
= link
->partner_rx_flow_ctrl_en
;
1015 p_bulletin
->partner_adv_pause
= link
->partner_adv_pause
;
1016 p_bulletin
->sfp_tx_fault
= link
->sfp_tx_fault
;
1018 p_bulletin
->capability_speed
= p_caps
->speed_capabilities
;
1021 enum _ecore_status_t
ecore_iov_release_hw_for_vf(struct ecore_hwfn
*p_hwfn
,
1022 struct ecore_ptt
*p_ptt
,
1025 struct ecore_mcp_link_capabilities caps
;
1026 struct ecore_mcp_link_params params
;
1027 struct ecore_mcp_link_state link
;
1028 struct ecore_vf_info
*vf
= OSAL_NULL
;
1030 vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
1032 DP_ERR(p_hwfn
, "ecore_iov_release_hw_for_vf : vf is NULL\n");
1033 return ECORE_UNKNOWN_ERROR
;
1036 if (vf
->bulletin
.p_virt
)
1037 OSAL_MEMSET(vf
->bulletin
.p_virt
, 0,
1038 sizeof(*vf
->bulletin
.p_virt
));
1040 OSAL_MEMSET(&vf
->p_vf_info
, 0, sizeof(vf
->p_vf_info
));
1042 /* Get the link configuration back in bulletin so
1043 * that when VFs are re-enabled they get the actual
1044 * link configuration.
1046 OSAL_MEMCPY(¶ms
, ecore_mcp_get_link_params(p_hwfn
), sizeof(params
));
1047 OSAL_MEMCPY(&link
, ecore_mcp_get_link_state(p_hwfn
), sizeof(link
));
1048 OSAL_MEMCPY(&caps
, ecore_mcp_get_link_capabilities(p_hwfn
),
1050 ecore_iov_set_link(p_hwfn
, rel_vf_id
, ¶ms
, &link
, &caps
);
1052 /* Forget the VF's acquisition message */
1053 OSAL_MEMSET(&vf
->acquire
, 0, sizeof(vf
->acquire
));
1055 /* disablng interrupts and resetting permission table was done during
1056 * vf-close, however, we could get here without going through vf_close
1058 /* Disable Interrupts for VF */
1059 ecore_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 0);
1061 /* Reset Permission table */
1062 ecore_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, 0);
1066 ecore_iov_free_vf_igu_sbs(p_hwfn
, p_ptt
, vf
);
1070 p_hwfn
->pf_iov_info
->active_vfs
[vf
->relative_vf_id
/ 64] &=
1071 ~(1ULL << (vf
->relative_vf_id
/ 64));
1073 if (IS_LEAD_HWFN(p_hwfn
))
1074 p_hwfn
->p_dev
->p_iov_info
->num_vfs
--;
1077 return ECORE_SUCCESS
;
1080 static bool ecore_iov_tlv_supported(u16 tlvtype
)
1082 return tlvtype
> CHANNEL_TLV_NONE
&& tlvtype
< CHANNEL_TLV_MAX
;
1085 static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn
*p_hwfn
,
1086 struct ecore_vf_info
*vf
, u16 tlv
)
1088 /* lock the channel */
1089 /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
1091 /* record the locking op */
1092 /* vf->op_current = tlv; @@@TBD MichalK */
1095 if (ecore_iov_tlv_supported(tlv
))
1098 "VF[%d]: vf pf channel locked by %s\n",
1100 ecore_channel_tlvs_string
[tlv
]);
1104 "VF[%d]: vf pf channel locked by %04x\n",
1105 vf
->abs_vf_id
, tlv
);
1108 static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn
*p_hwfn
,
1109 struct ecore_vf_info
*vf
,
1112 /* log the unlock */
1113 if (ecore_iov_tlv_supported(expected_tlv
))
1116 "VF[%d]: vf pf channel unlocked by %s\n",
1118 ecore_channel_tlvs_string
[expected_tlv
]);
1122 "VF[%d]: vf pf channel unlocked by %04x\n",
1123 vf
->abs_vf_id
, expected_tlv
);
1125 /* record the locking op */
1126 /* vf->op_current = CHANNEL_TLV_NONE; */
1129 /* place a given tlv on the tlv buffer, continuing current tlv list */
1130 void *ecore_add_tlv(struct ecore_hwfn
*p_hwfn
,
1131 u8
**offset
, u16 type
, u16 length
)
1133 struct channel_tlv
*tl
= (struct channel_tlv
*)*offset
;
1136 tl
->length
= length
;
1138 /* Offset should keep pointing to next TLV (the end of the last) */
1141 /* Return a pointer to the start of the added tlv */
1142 return *offset
- length
;
1145 /* list the types and lengths of the tlvs on the buffer */
1146 void ecore_dp_tlv_list(struct ecore_hwfn
*p_hwfn
, void *tlvs_list
)
1148 u16 i
= 1, total_length
= 0;
1149 struct channel_tlv
*tlv
;
1152 /* cast current tlv list entry to channel tlv header */
1153 tlv
= (struct channel_tlv
*)((u8
*)tlvs_list
+ total_length
);
1156 if (ecore_iov_tlv_supported(tlv
->type
))
1157 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1158 "TLV number %d: type %s, length %d\n",
1159 i
, ecore_channel_tlvs_string
[tlv
->type
],
1162 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1163 "TLV number %d: type %d, length %d\n",
1164 i
, tlv
->type
, tlv
->length
);
1166 if (tlv
->type
== CHANNEL_TLV_LIST_END
)
1169 /* Validate entry - protect against malicious VFs */
1171 DP_NOTICE(p_hwfn
, false, "TLV of length 0 found\n");
1174 total_length
+= tlv
->length
;
1175 if (total_length
>= sizeof(struct tlv_buffer_size
)) {
1176 DP_NOTICE(p_hwfn
, false, "TLV ==> Buffer overflow\n");
1184 static void ecore_iov_send_response(struct ecore_hwfn
*p_hwfn
,
1185 struct ecore_ptt
*p_ptt
,
1186 struct ecore_vf_info
*p_vf
,
1187 u16 length
, u8 status
)
1189 struct ecore_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
1190 struct ecore_dmae_params params
;
1193 mbx
->reply_virt
->default_resp
.hdr
.status
= status
;
1195 ecore_dp_tlv_list(p_hwfn
, mbx
->reply_virt
);
1197 #ifdef CONFIG_ECORE_SW_CHANNEL
1198 mbx
->sw_mbx
.response_size
=
1199 length
+ sizeof(struct channel_list_end_tlv
);
1201 if (!p_hwfn
->p_dev
->b_hw_channel
)
1205 eng_vf_id
= p_vf
->abs_vf_id
;
1207 OSAL_MEMSET(¶ms
, 0, sizeof(struct ecore_dmae_params
));
1208 params
.flags
= ECORE_DMAE_FLAG_VF_DST
;
1209 params
.dst_vfid
= eng_vf_id
;
1211 ecore_dmae_host2host(p_hwfn
, p_ptt
, mbx
->reply_phys
+ sizeof(u64
),
1212 mbx
->req_virt
->first_tlv
.reply_address
+
1214 (sizeof(union pfvf_tlvs
) - sizeof(u64
)) / 4,
1217 ecore_dmae_host2host(p_hwfn
, p_ptt
, mbx
->reply_phys
,
1218 mbx
->req_virt
->first_tlv
.reply_address
,
1219 sizeof(u64
) / 4, ¶ms
);
1222 GTT_BAR0_MAP_REG_USDM_RAM
+
1223 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id
), 1);
1226 static u16
ecore_iov_vport_to_tlv(struct ecore_hwfn
*p_hwfn
,
1227 enum ecore_iov_vport_update_flag flag
)
1230 case ECORE_IOV_VP_UPDATE_ACTIVATE
:
1231 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
1232 case ECORE_IOV_VP_UPDATE_VLAN_STRIP
:
1233 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
;
1234 case ECORE_IOV_VP_UPDATE_TX_SWITCH
:
1235 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
1236 case ECORE_IOV_VP_UPDATE_MCAST
:
1237 return CHANNEL_TLV_VPORT_UPDATE_MCAST
;
1238 case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM
:
1239 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
1240 case ECORE_IOV_VP_UPDATE_RSS
:
1241 return CHANNEL_TLV_VPORT_UPDATE_RSS
;
1242 case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN
:
1243 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
1244 case ECORE_IOV_VP_UPDATE_SGE_TPA
:
1245 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
;
1251 static u16
ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn
*p_hwfn
,
1252 struct ecore_vf_info
*p_vf
,
1253 struct ecore_iov_vf_mbx
*p_mbx
,
1254 u8 status
, u16 tlvs_mask
,
1257 struct pfvf_def_resp_tlv
*resp
;
1258 u16 size
, total_len
, i
;
1260 OSAL_MEMSET(p_mbx
->reply_virt
, 0, sizeof(union pfvf_tlvs
));
1261 p_mbx
->offset
= (u8
*)p_mbx
->reply_virt
;
1262 size
= sizeof(struct pfvf_def_resp_tlv
);
1265 ecore_add_tlv(p_hwfn
, &p_mbx
->offset
, CHANNEL_TLV_VPORT_UPDATE
, size
);
1267 /* Prepare response for all extended tlvs if they are found by PF */
1268 for (i
= 0; i
< ECORE_IOV_VP_UPDATE_MAX
; i
++) {
1269 if (!(tlvs_mask
& (1 << i
)))
1272 resp
= ecore_add_tlv(p_hwfn
, &p_mbx
->offset
,
1273 ecore_iov_vport_to_tlv(p_hwfn
, i
), size
);
1275 if (tlvs_accepted
& (1 << i
))
1276 resp
->hdr
.status
= status
;
1278 resp
->hdr
.status
= PFVF_STATUS_NOT_SUPPORTED
;
1280 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1281 "VF[%d] - vport_update resp: TLV %d, status %02x\n",
1282 p_vf
->relative_vf_id
,
1283 ecore_iov_vport_to_tlv(p_hwfn
, i
), resp
->hdr
.status
);
1288 ecore_add_tlv(p_hwfn
, &p_mbx
->offset
, CHANNEL_TLV_LIST_END
,
1289 sizeof(struct channel_list_end_tlv
));
1294 static void ecore_iov_prepare_resp(struct ecore_hwfn
*p_hwfn
,
1295 struct ecore_ptt
*p_ptt
,
1296 struct ecore_vf_info
*vf_info
,
1297 u16 type
, u16 length
, u8 status
)
1299 struct ecore_iov_vf_mbx
*mbx
= &vf_info
->vf_mbx
;
1301 mbx
->offset
= (u8
*)mbx
->reply_virt
;
1303 ecore_add_tlv(p_hwfn
, &mbx
->offset
, type
, length
);
1304 ecore_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
1305 sizeof(struct channel_list_end_tlv
));
1307 ecore_iov_send_response(p_hwfn
, p_ptt
, vf_info
, length
, status
);
1309 OSAL_IOV_PF_RESP_TYPE(p_hwfn
, vf_info
->relative_vf_id
, status
);
1312 struct ecore_public_vf_info
1313 *ecore_iov_get_public_vf_info(struct ecore_hwfn
*p_hwfn
,
1315 bool b_enabled_only
)
1317 struct ecore_vf_info
*vf
= OSAL_NULL
;
1319 vf
= ecore_iov_get_vf_info(p_hwfn
, relative_vf_id
, b_enabled_only
);
1323 return &vf
->p_vf_info
;
1326 static void ecore_iov_vf_cleanup(struct ecore_hwfn
*p_hwfn
,
1327 struct ecore_vf_info
*p_vf
)
1330 p_vf
->vf_bulletin
= 0;
1331 p_vf
->vport_instance
= 0;
1332 p_vf
->configured_features
= 0;
1334 /* If VF previously requested less resources, go back to default */
1335 p_vf
->num_rxqs
= p_vf
->num_sbs
;
1336 p_vf
->num_txqs
= p_vf
->num_sbs
;
1338 p_vf
->num_active_rxqs
= 0;
1340 for (i
= 0; i
< ECORE_MAX_VF_CHAINS_PER_PF
; i
++)
1341 p_vf
->vf_queues
[i
].rxq_active
= 0;
1343 OSAL_MEMSET(&p_vf
->shadow_config
, 0, sizeof(p_vf
->shadow_config
));
1344 OSAL_MEMSET(&p_vf
->acquire
, 0, sizeof(p_vf
->acquire
));
1345 OSAL_IOV_VF_CLEANUP(p_hwfn
, p_vf
->relative_vf_id
);
1348 static u8
ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn
*p_hwfn
,
1349 struct ecore_ptt
*p_ptt
,
1350 struct ecore_vf_info
*p_vf
,
1351 struct vf_pf_resc_request
*p_req
,
1352 struct pf_vf_resc
*p_resp
)
1356 /* Queue related information */
1357 p_resp
->num_rxqs
= p_vf
->num_rxqs
;
1358 p_resp
->num_txqs
= p_vf
->num_txqs
;
1359 p_resp
->num_sbs
= p_vf
->num_sbs
;
1361 for (i
= 0; i
< p_resp
->num_sbs
; i
++) {
1362 p_resp
->hw_sbs
[i
].hw_sb_id
= p_vf
->igu_sbs
[i
];
1363 /* TODO - what's this sb_qid field? Is it deprecated?
1364 * or is there an ecore_client that looks at this?
1366 p_resp
->hw_sbs
[i
].sb_qid
= 0;
1369 /* These fields are filled for backward compatibility.
1370 * Unused by modern vfs.
1372 for (i
= 0; i
< p_resp
->num_rxqs
; i
++) {
1373 ecore_fw_l2_queue(p_hwfn
, p_vf
->vf_queues
[i
].fw_rx_qid
,
1374 (u16
*)&p_resp
->hw_qid
[i
]);
1375 p_resp
->cid
[i
] = p_vf
->vf_queues
[i
].fw_cid
;
1378 /* Filter related information */
1379 p_resp
->num_mac_filters
= OSAL_MIN_T(u8
, p_vf
->num_mac_filters
,
1380 p_req
->num_mac_filters
);
1381 p_resp
->num_vlan_filters
= OSAL_MIN_T(u8
, p_vf
->num_vlan_filters
,
1382 p_req
->num_vlan_filters
);
1384 /* This isn't really needed/enforced, but some legacy VFs might depend
1385 * on the correct filling of this field.
1387 p_resp
->num_mc_filters
= ECORE_MAX_MC_ADDRS
;
1389 /* Validate sufficient resources for VF */
1390 if (p_resp
->num_rxqs
< p_req
->num_rxqs
||
1391 p_resp
->num_txqs
< p_req
->num_txqs
||
1392 p_resp
->num_sbs
< p_req
->num_sbs
||
1393 p_resp
->num_mac_filters
< p_req
->num_mac_filters
||
1394 p_resp
->num_vlan_filters
< p_req
->num_vlan_filters
||
1395 p_resp
->num_mc_filters
< p_req
->num_mc_filters
) {
1396 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1397 "VF[%d] - Insufficient resources: rxq [%02x/%02x]"
1398 " txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x]"
1399 " vlan [%02x/%02x] mc [%02x/%02x]\n",
1401 p_req
->num_rxqs
, p_resp
->num_rxqs
,
1402 p_req
->num_rxqs
, p_resp
->num_txqs
,
1403 p_req
->num_sbs
, p_resp
->num_sbs
,
1404 p_req
->num_mac_filters
, p_resp
->num_mac_filters
,
1405 p_req
->num_vlan_filters
, p_resp
->num_vlan_filters
,
1406 p_req
->num_mc_filters
, p_resp
->num_mc_filters
);
1408 /* Some legacy OSes are incapable of correctly handling this
1411 if ((p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
==
1412 ETH_HSI_VER_NO_PKT_LEN_TUNN
) &&
1413 (p_vf
->acquire
.vfdev_info
.os_type
==
1414 VFPF_ACQUIRE_OS_WINDOWS
))
1415 return PFVF_STATUS_SUCCESS
;
1417 return PFVF_STATUS_NO_RESOURCE
;
1420 return PFVF_STATUS_SUCCESS
;
1423 static void ecore_iov_vf_mbx_acquire_stats(struct ecore_hwfn
*p_hwfn
,
1424 struct pfvf_stats_info
*p_stats
)
1426 p_stats
->mstats
.address
= PXP_VF_BAR0_START_MSDM_ZONE_B
+
1427 OFFSETOF(struct mstorm_vf_zone
,
1428 non_trigger
.eth_queue_stat
);
1429 p_stats
->mstats
.len
= sizeof(struct eth_mstorm_per_queue_stat
);
1430 p_stats
->ustats
.address
= PXP_VF_BAR0_START_USDM_ZONE_B
+
1431 OFFSETOF(struct ustorm_vf_zone
,
1432 non_trigger
.eth_queue_stat
);
1433 p_stats
->ustats
.len
= sizeof(struct eth_ustorm_per_queue_stat
);
1434 p_stats
->pstats
.address
= PXP_VF_BAR0_START_PSDM_ZONE_B
+
1435 OFFSETOF(struct pstorm_vf_zone
,
1436 non_trigger
.eth_queue_stat
);
1437 p_stats
->pstats
.len
= sizeof(struct eth_pstorm_per_queue_stat
);
1438 p_stats
->tstats
.address
= 0;
1439 p_stats
->tstats
.len
= 0;
1442 static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn
*p_hwfn
,
1443 struct ecore_ptt
*p_ptt
,
1444 struct ecore_vf_info
*vf
)
1446 struct ecore_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1447 struct pfvf_acquire_resp_tlv
*resp
= &mbx
->reply_virt
->acquire_resp
;
1448 struct pf_vf_pfdev_info
*pfdev_info
= &resp
->pfdev_info
;
1449 struct vfpf_acquire_tlv
*req
= &mbx
->req_virt
->acquire
;
1450 u8 vfpf_status
= PFVF_STATUS_NOT_SUPPORTED
;
1451 struct pf_vf_resc
*resc
= &resp
->resc
;
1452 enum _ecore_status_t rc
;
1454 OSAL_MEMSET(resp
, 0, sizeof(*resp
));
1456 /* Write the PF version so that VF would know which version
1457 * is supported - might be later overridden. This guarantees that
1458 * VF could recognize legacy PF based on lack of versions in reply.
1460 pfdev_info
->major_fp_hsi
= ETH_HSI_VER_MAJOR
;
1461 pfdev_info
->minor_fp_hsi
= ETH_HSI_VER_MINOR
;
1463 /* Validate FW compatibility */
1464 if (req
->vfdev_info
.eth_fp_hsi_major
!= ETH_HSI_VER_MAJOR
) {
1465 if (req
->vfdev_info
.capabilities
&
1466 VFPF_ACQUIRE_CAP_PRE_FP_HSI
) {
1467 struct vf_pf_vfdev_info
*p_vfdev
= &req
->vfdev_info
;
1469 /* This legacy support would need to be removed once
1470 * the major has changed.
1472 OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR
!= 3);
1474 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1475 "VF[%d] is pre-fastpath HSI\n",
1477 p_vfdev
->eth_fp_hsi_major
= ETH_HSI_VER_MAJOR
;
1478 p_vfdev
->eth_fp_hsi_minor
= ETH_HSI_VER_NO_PKT_LEN_TUNN
;
1481 "VF[%d] needs fastpath HSI %02x.%02x, which is"
1482 " incompatible with loaded FW's faspath"
1485 req
->vfdev_info
.eth_fp_hsi_major
,
1486 req
->vfdev_info
.eth_fp_hsi_minor
,
1487 ETH_HSI_VER_MAJOR
, ETH_HSI_VER_MINOR
);
1493 /* On 100g PFs, prevent old VFs from loading */
1494 if ((p_hwfn
->p_dev
->num_hwfns
> 1) &&
1495 !(req
->vfdev_info
.capabilities
& VFPF_ACQUIRE_CAP_100G
)) {
1497 "VF[%d] is running an old driver that doesn't support"
1503 #ifndef __EXTRACT__LINUX__
1504 if (OSAL_IOV_VF_ACQUIRE(p_hwfn
, vf
->relative_vf_id
) != ECORE_SUCCESS
) {
1505 vfpf_status
= PFVF_STATUS_NOT_SUPPORTED
;
1510 /* Store the acquire message */
1511 OSAL_MEMCPY(&vf
->acquire
, req
, sizeof(vf
->acquire
));
1513 vf
->opaque_fid
= req
->vfdev_info
.opaque_fid
;
1515 vf
->vf_bulletin
= req
->bulletin_addr
;
1516 vf
->bulletin
.size
= (vf
->bulletin
.size
< req
->bulletin_size
) ?
1517 vf
->bulletin
.size
: req
->bulletin_size
;
1519 /* fill in pfdev info */
1520 pfdev_info
->chip_num
= p_hwfn
->p_dev
->chip_num
;
1521 pfdev_info
->db_size
= 0; /* @@@ TBD MichalK Vf Doorbells */
1522 pfdev_info
->indices_per_sb
= PIS_PER_SB
;
1524 pfdev_info
->capabilities
= PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED
|
1525 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE
;
1526 if (p_hwfn
->p_dev
->num_hwfns
> 1)
1527 pfdev_info
->capabilities
|= PFVF_ACQUIRE_CAP_100G
;
1529 ecore_iov_vf_mbx_acquire_stats(p_hwfn
, &pfdev_info
->stats_info
);
1531 OSAL_MEMCPY(pfdev_info
->port_mac
, p_hwfn
->hw_info
.hw_mac_addr
,
1534 pfdev_info
->fw_major
= FW_MAJOR_VERSION
;
1535 pfdev_info
->fw_minor
= FW_MINOR_VERSION
;
1536 pfdev_info
->fw_rev
= FW_REVISION_VERSION
;
1537 pfdev_info
->fw_eng
= FW_ENGINEERING_VERSION
;
1539 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1542 pfdev_info
->minor_fp_hsi
= OSAL_MIN_T(u8
, ETH_HSI_VER_MINOR
,
1543 req
->vfdev_info
.eth_fp_hsi_minor
);
1544 pfdev_info
->os_type
= OSAL_IOV_GET_OS_TYPE();
1545 ecore_mcp_get_mfw_ver(p_hwfn
, p_ptt
, &pfdev_info
->mfw_ver
,
1548 pfdev_info
->dev_type
= p_hwfn
->p_dev
->type
;
1549 pfdev_info
->chip_rev
= p_hwfn
->p_dev
->chip_rev
;
1551 /* Fill resources available to VF; Make sure there are enough to
1552 * satisfy the VF's request.
1554 vfpf_status
= ecore_iov_vf_mbx_acquire_resc(p_hwfn
, p_ptt
, vf
,
1555 &req
->resc_request
, resc
);
1556 if (vfpf_status
!= PFVF_STATUS_SUCCESS
)
1559 /* Start the VF in FW */
1560 rc
= ecore_sp_vf_start(p_hwfn
, vf
);
1561 if (rc
!= ECORE_SUCCESS
) {
1562 DP_NOTICE(p_hwfn
, true, "Failed to start VF[%02x]\n",
1564 vfpf_status
= PFVF_STATUS_FAILURE
;
1568 /* Fill agreed size of bulletin board in response, and post
1569 * an initial image to the bulletin board.
1571 resp
->bulletin_size
= vf
->bulletin
.size
;
1572 ecore_iov_post_vf_bulletin(p_hwfn
, vf
->relative_vf_id
, p_ptt
);
1574 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1575 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
1576 " db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
1577 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
1578 " n_vlans-%d, n_mcs-%d\n",
1579 vf
->abs_vf_id
, resp
->pfdev_info
.chip_num
,
1580 resp
->pfdev_info
.db_size
, resp
->pfdev_info
.indices_per_sb
,
1581 (unsigned long)resp
->pfdev_info
.capabilities
, resc
->num_rxqs
,
1582 resc
->num_txqs
, resc
->num_sbs
, resc
->num_mac_filters
,
1583 resc
->num_vlan_filters
, resc
->num_mc_filters
);
1585 vf
->state
= VF_ACQUIRED
;
1588 /* Prepare Response */
1589 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_ACQUIRE
,
1590 sizeof(struct pfvf_acquire_resp_tlv
),
1594 static enum _ecore_status_t
1595 __ecore_iov_spoofchk_set(struct ecore_hwfn
*p_hwfn
,
1596 struct ecore_vf_info
*p_vf
, bool val
)
1598 struct ecore_sp_vport_update_params params
;
1599 enum _ecore_status_t rc
;
1601 if (val
== p_vf
->spoof_chk
) {
1602 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1603 "Spoofchk value[%d] is already configured\n", val
);
1604 return ECORE_SUCCESS
;
1607 OSAL_MEMSET(¶ms
, 0, sizeof(struct ecore_sp_vport_update_params
));
1608 params
.opaque_fid
= p_vf
->opaque_fid
;
1609 params
.vport_id
= p_vf
->vport_id
;
1610 params
.update_anti_spoofing_en_flg
= 1;
1611 params
.anti_spoofing_en
= val
;
1613 rc
= ecore_sp_vport_update(p_hwfn
, ¶ms
, ECORE_SPQ_MODE_EBLOCK
,
1615 if (rc
== ECORE_SUCCESS
) {
1616 p_vf
->spoof_chk
= val
;
1617 p_vf
->req_spoofchk_val
= p_vf
->spoof_chk
;
1618 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1619 "Spoofchk val[%d] configured\n", val
);
1621 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1622 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1623 val
, p_vf
->relative_vf_id
);
1629 static enum _ecore_status_t
1630 ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn
*p_hwfn
,
1631 struct ecore_vf_info
*p_vf
)
1633 struct ecore_filter_ucast filter
;
1634 enum _ecore_status_t rc
= ECORE_SUCCESS
;
1637 OSAL_MEMSET(&filter
, 0, sizeof(filter
));
1638 filter
.is_rx_filter
= 1;
1639 filter
.is_tx_filter
= 1;
1640 filter
.vport_to_add_to
= p_vf
->vport_id
;
1641 filter
.opcode
= ECORE_FILTER_ADD
;
1643 /* Reconfigure vlans */
1644 for (i
= 0; i
< ECORE_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++) {
1645 if (!p_vf
->shadow_config
.vlans
[i
].used
)
1648 filter
.type
= ECORE_FILTER_VLAN
;
1649 filter
.vlan
= p_vf
->shadow_config
.vlans
[i
].vid
;
1650 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1651 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1652 filter
.vlan
, p_vf
->relative_vf_id
);
1653 rc
= ecore_sp_eth_filter_ucast(p_hwfn
,
1659 DP_NOTICE(p_hwfn
, true,
1660 "Failed to configure VLAN [%04x]"
1662 filter
.vlan
, p_vf
->relative_vf_id
);
1670 static enum _ecore_status_t
1671 ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn
*p_hwfn
,
1672 struct ecore_vf_info
*p_vf
, u64 events
)
1674 enum _ecore_status_t rc
= ECORE_SUCCESS
;
1676 /*TODO - what about MACs? */
1678 if ((events
& (1 << VLAN_ADDR_FORCED
)) &&
1679 !(p_vf
->configured_features
& (1 << VLAN_ADDR_FORCED
)))
1680 rc
= ecore_iov_reconfigure_unicast_vlan(p_hwfn
, p_vf
);
1685 static int ecore_iov_configure_vport_forced(struct ecore_hwfn
*p_hwfn
,
1686 struct ecore_vf_info
*p_vf
,
1689 enum _ecore_status_t rc
= ECORE_SUCCESS
;
1690 struct ecore_filter_ucast filter
;
1692 if (!p_vf
->vport_instance
)
1695 if (events
& (1 << MAC_ADDR_FORCED
)) {
1696 /* Since there's no way [currently] of removing the MAC,
1697 * we can always assume this means we need to force it.
1699 OSAL_MEMSET(&filter
, 0, sizeof(filter
));
1700 filter
.type
= ECORE_FILTER_MAC
;
1701 filter
.opcode
= ECORE_FILTER_REPLACE
;
1702 filter
.is_rx_filter
= 1;
1703 filter
.is_tx_filter
= 1;
1704 filter
.vport_to_add_to
= p_vf
->vport_id
;
1705 OSAL_MEMCPY(filter
.mac
, p_vf
->bulletin
.p_virt
->mac
, ETH_ALEN
);
1707 rc
= ecore_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1709 ECORE_SPQ_MODE_CB
, OSAL_NULL
);
1711 DP_NOTICE(p_hwfn
, true,
1712 "PF failed to configure MAC for VF\n");
1716 p_vf
->configured_features
|= 1 << MAC_ADDR_FORCED
;
1719 if (events
& (1 << VLAN_ADDR_FORCED
)) {
1720 struct ecore_sp_vport_update_params vport_update
;
1724 OSAL_MEMSET(&filter
, 0, sizeof(filter
));
1725 filter
.type
= ECORE_FILTER_VLAN
;
1726 filter
.is_rx_filter
= 1;
1727 filter
.is_tx_filter
= 1;
1728 filter
.vport_to_add_to
= p_vf
->vport_id
;
1729 filter
.vlan
= p_vf
->bulletin
.p_virt
->pvid
;
1730 filter
.opcode
= filter
.vlan
? ECORE_FILTER_REPLACE
:
1733 /* Send the ramrod */
1734 rc
= ecore_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1736 ECORE_SPQ_MODE_CB
, OSAL_NULL
);
1738 DP_NOTICE(p_hwfn
, true,
1739 "PF failed to configure VLAN for VF\n");
1743 /* Update the default-vlan & silent vlan stripping */
1744 OSAL_MEMSET(&vport_update
, 0, sizeof(vport_update
));
1745 vport_update
.opaque_fid
= p_vf
->opaque_fid
;
1746 vport_update
.vport_id
= p_vf
->vport_id
;
1747 vport_update
.update_default_vlan_enable_flg
= 1;
1748 vport_update
.default_vlan_enable_flg
= filter
.vlan
? 1 : 0;
1749 vport_update
.update_default_vlan_flg
= 1;
1750 vport_update
.default_vlan
= filter
.vlan
;
1752 vport_update
.update_inner_vlan_removal_flg
= 1;
1753 removal
= filter
.vlan
?
1754 1 : p_vf
->shadow_config
.inner_vlan_removal
;
1755 vport_update
.inner_vlan_removal_flg
= removal
;
1756 vport_update
.silent_vlan_removal_flg
= filter
.vlan
? 1 : 0;
1757 rc
= ecore_sp_vport_update(p_hwfn
, &vport_update
,
1758 ECORE_SPQ_MODE_EBLOCK
, OSAL_NULL
);
1760 DP_NOTICE(p_hwfn
, true,
1761 "PF failed to configure VF vport for vlan\n");
1765 /* Update all the Rx queues */
1766 for (i
= 0; i
< ECORE_MAX_VF_CHAINS_PER_PF
; i
++) {
1769 if (!p_vf
->vf_queues
[i
].rxq_active
)
1772 qid
= p_vf
->vf_queues
[i
].fw_rx_qid
;
1774 rc
= ecore_sp_eth_rx_queues_update(p_hwfn
, qid
,
1776 ECORE_SPQ_MODE_EBLOCK
,
1779 DP_NOTICE(p_hwfn
, true,
1780 "Failed to send Rx update"
1781 " fo queue[0x%04x]\n",
1788 p_vf
->configured_features
|= 1 << VLAN_ADDR_FORCED
;
1790 p_vf
->configured_features
&= ~(1 << VLAN_ADDR_FORCED
);
1793 /* If forced features are terminated, we need to configure the shadow
1794 * configuration back again.
1797 ecore_iov_reconfigure_unicast_shadow(p_hwfn
, p_vf
, events
);
1802 static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn
*p_hwfn
,
1803 struct ecore_ptt
*p_ptt
,
1804 struct ecore_vf_info
*vf
)
1806 struct ecore_sp_vport_start_params params
= { 0 };
1807 struct ecore_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1808 struct vfpf_vport_start_tlv
*start
;
1809 u8 status
= PFVF_STATUS_SUCCESS
;
1810 struct ecore_vf_info
*vf_info
;
1813 enum _ecore_status_t rc
;
1815 vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vf
->relative_vf_id
, true);
1817 DP_NOTICE(p_hwfn
->p_dev
, true,
1818 "Failed to get VF info, invalid vfid [%d]\n",
1819 vf
->relative_vf_id
);
1823 vf
->state
= VF_ENABLED
;
1824 start
= &mbx
->req_virt
->start_vport
;
1826 /* Initialize Status block in CAU */
1827 for (sb_id
= 0; sb_id
< vf
->num_sbs
; sb_id
++) {
1828 if (!start
->sb_addr
[sb_id
]) {
1829 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1830 "VF[%d] did not fill the address of SB %d\n",
1831 vf
->relative_vf_id
, sb_id
);
1835 ecore_int_cau_conf_sb(p_hwfn
, p_ptt
,
1836 start
->sb_addr
[sb_id
],
1840 ecore_iov_enable_vf_traffic(p_hwfn
, p_ptt
, vf
);
1842 vf
->mtu
= start
->mtu
;
1843 vf
->shadow_config
.inner_vlan_removal
= start
->inner_vlan_removal
;
1845 /* Take into consideration configuration forced by hypervisor;
1846 * If none is configured, use the supplied VF values [for old
1847 * vfs that would still be fine, since they passed '0' as padding].
1849 p_bitmap
= &vf_info
->bulletin
.p_virt
->valid_bitmap
;
1850 if (!(*p_bitmap
& (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED
))) {
1851 u8 vf_req
= start
->only_untagged
;
1853 vf_info
->bulletin
.p_virt
->default_only_untagged
= vf_req
;
1854 *p_bitmap
|= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT
;
1857 params
.tpa_mode
= start
->tpa_mode
;
1858 params
.remove_inner_vlan
= start
->inner_vlan_removal
;
1859 params
.tx_switching
= true;
1862 if (CHIP_REV_IS_FPGA(p_hwfn
->p_dev
)) {
1863 DP_NOTICE(p_hwfn
, false,
1864 "FPGA: Don't config VF for Tx-switching [no pVFC]\n");
1865 params
.tx_switching
= false;
1869 params
.only_untagged
= vf_info
->bulletin
.p_virt
->default_only_untagged
;
1870 params
.drop_ttl0
= false;
1871 params
.concrete_fid
= vf
->concrete_fid
;
1872 params
.opaque_fid
= vf
->opaque_fid
;
1873 params
.vport_id
= vf
->vport_id
;
1874 params
.max_buffers_per_cqe
= start
->max_buffers_per_cqe
;
1875 params
.mtu
= vf
->mtu
;
1876 params
.check_mac
= true;
1878 rc
= ecore_sp_eth_vport_start(p_hwfn
, ¶ms
);
1879 if (rc
!= ECORE_SUCCESS
) {
1881 "ecore_iov_vf_mbx_start_vport returned error %d\n", rc
);
1882 status
= PFVF_STATUS_FAILURE
;
1884 vf
->vport_instance
++;
1886 /* Force configuration if needed on the newly opened vport */
1887 ecore_iov_configure_vport_forced(p_hwfn
, vf
, *p_bitmap
);
1888 OSAL_IOV_POST_START_VPORT(p_hwfn
, vf
->relative_vf_id
,
1889 vf
->vport_id
, vf
->opaque_fid
);
1890 __ecore_iov_spoofchk_set(p_hwfn
, vf
, vf
->req_spoofchk_val
);
1893 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_VPORT_START
,
1894 sizeof(struct pfvf_def_resp_tlv
), status
);
1897 static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn
*p_hwfn
,
1898 struct ecore_ptt
*p_ptt
,
1899 struct ecore_vf_info
*vf
)
1901 u8 status
= PFVF_STATUS_SUCCESS
;
1902 enum _ecore_status_t rc
;
1904 vf
->vport_instance
--;
1905 vf
->spoof_chk
= false;
1907 rc
= ecore_sp_vport_stop(p_hwfn
, vf
->opaque_fid
, vf
->vport_id
);
1908 if (rc
!= ECORE_SUCCESS
) {
1910 "ecore_iov_vf_mbx_stop_vport returned error %d\n", rc
);
1911 status
= PFVF_STATUS_FAILURE
;
1914 /* Forget the configuration on the vport */
1915 vf
->configured_features
= 0;
1916 OSAL_MEMSET(&vf
->shadow_config
, 0, sizeof(vf
->shadow_config
));
1918 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_VPORT_TEARDOWN
,
1919 sizeof(struct pfvf_def_resp_tlv
), status
);
1922 static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn
*p_hwfn
,
1923 struct ecore_ptt
*p_ptt
,
1924 struct ecore_vf_info
*vf
,
1925 u8 status
, bool b_legacy
)
1927 struct ecore_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1928 struct pfvf_start_queue_resp_tlv
*p_tlv
;
1929 struct vfpf_start_rxq_tlv
*req
;
1932 mbx
->offset
= (u8
*)mbx
->reply_virt
;
1934 /* Taking a bigger struct instead of adding a TLV to list was a
1935 * mistake, but one which we're now stuck with, as some older
1936 * clients assume the size of the previous response.
1939 length
= sizeof(*p_tlv
);
1941 length
= sizeof(struct pfvf_def_resp_tlv
);
1943 p_tlv
= ecore_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_START_RXQ
,
1945 ecore_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
1946 sizeof(struct channel_list_end_tlv
));
1948 /* Update the TLV with the response */
1949 if ((status
== PFVF_STATUS_SUCCESS
) && !b_legacy
) {
1950 req
= &mbx
->req_virt
->start_rxq
;
1951 p_tlv
->offset
= PXP_VF_BAR0_START_MSDM_ZONE_B
+
1952 OFFSETOF(struct mstorm_vf_zone
,
1953 non_trigger
.eth_rx_queue_producers
) +
1954 sizeof(struct eth_rx_prod_data
) * req
->rx_qid
;
1957 ecore_iov_send_response(p_hwfn
, p_ptt
, vf
, length
, status
);
1960 static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn
*p_hwfn
,
1961 struct ecore_ptt
*p_ptt
,
1962 struct ecore_vf_info
*vf
)
1964 struct ecore_queue_start_common_params p_params
;
1965 struct ecore_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1966 u8 status
= PFVF_STATUS_NO_RESOURCE
;
1967 struct vfpf_start_rxq_tlv
*req
;
1968 bool b_legacy_vf
= false;
1969 enum _ecore_status_t rc
;
1971 req
= &mbx
->req_virt
->start_rxq
;
1972 OSAL_MEMSET(&p_params
, 0, sizeof(p_params
));
1973 p_params
.queue_id
= (u8
)vf
->vf_queues
[req
->rx_qid
].fw_rx_qid
;
1974 p_params
.vf_qid
= req
->rx_qid
;
1975 p_params
.vport_id
= vf
->vport_id
;
1976 p_params
.stats_id
= vf
->abs_vf_id
+ 0x10,
1977 p_params
.sb
= req
->hw_sb
;
1978 p_params
.sb_idx
= req
->sb_index
;
1980 if (!ecore_iov_validate_rxq(p_hwfn
, vf
, req
->rx_qid
) ||
1981 !ecore_iov_validate_sb(p_hwfn
, vf
, req
->hw_sb
))
1984 /* Legacy VFs have their Producers in a different location, which they
1985 * calculate on their own and clean the producer prior to this.
1987 if (vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
==
1988 ETH_HSI_VER_NO_PKT_LEN_TUNN
)
1992 GTT_BAR0_MAP_REG_MSDM_RAM
+
1993 MSTORM_ETH_VF_PRODS_OFFSET(vf
->abs_vf_id
, req
->rx_qid
),
1996 rc
= ecore_sp_eth_rxq_start_ramrod(p_hwfn
, vf
->opaque_fid
,
1997 vf
->vf_queues
[req
->rx_qid
].fw_cid
,
2006 status
= PFVF_STATUS_FAILURE
;
2008 status
= PFVF_STATUS_SUCCESS
;
2009 vf
->vf_queues
[req
->rx_qid
].rxq_active
= true;
2010 vf
->num_active_rxqs
++;
2014 ecore_iov_vf_mbx_start_rxq_resp(p_hwfn
, p_ptt
, vf
,
2015 status
, b_legacy_vf
);
2018 static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn
*p_hwfn
,
2019 struct ecore_ptt
*p_ptt
,
2020 struct ecore_vf_info
*p_vf
,
2023 struct ecore_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
2024 struct pfvf_start_queue_resp_tlv
*p_tlv
;
2025 bool b_legacy
= false;
2028 mbx
->offset
= (u8
*)mbx
->reply_virt
;
2030 /* Taking a bigger struct instead of adding a TLV to list was a
2031 * mistake, but one which we're now stuck with, as some older
2032 * clients assume the size of the previous response.
2034 if (p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
==
2035 ETH_HSI_VER_NO_PKT_LEN_TUNN
)
2039 length
= sizeof(*p_tlv
);
2041 length
= sizeof(struct pfvf_def_resp_tlv
);
2043 p_tlv
= ecore_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_START_TXQ
,
2045 ecore_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
2046 sizeof(struct channel_list_end_tlv
));
2048 /* Update the TLV with the response */
2049 if ((status
== PFVF_STATUS_SUCCESS
) && !b_legacy
) {
2050 u16 qid
= mbx
->req_virt
->start_txq
.tx_qid
;
2052 p_tlv
->offset
= DB_ADDR_VF(p_vf
->vf_queues
[qid
].fw_cid
,
2056 ecore_iov_send_response(p_hwfn
, p_ptt
, p_vf
, length
, status
);
2059 static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn
*p_hwfn
,
2060 struct ecore_ptt
*p_ptt
,
2061 struct ecore_vf_info
*vf
)
2063 struct ecore_queue_start_common_params p_params
;
2064 struct ecore_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2065 u8 status
= PFVF_STATUS_NO_RESOURCE
;
2066 union ecore_qm_pq_params pq_params
;
2067 struct vfpf_start_txq_tlv
*req
;
2068 enum _ecore_status_t rc
;
2070 /* Prepare the parameters which would choose the right PQ */
2071 OSAL_MEMSET(&pq_params
, 0, sizeof(pq_params
));
2072 pq_params
.eth
.is_vf
= 1;
2073 pq_params
.eth
.vf_id
= vf
->relative_vf_id
;
2075 req
= &mbx
->req_virt
->start_txq
;
2076 OSAL_MEMSET(&p_params
, 0, sizeof(p_params
));
2077 p_params
.queue_id
= (u8
)vf
->vf_queues
[req
->tx_qid
].fw_tx_qid
;
2078 p_params
.vport_id
= vf
->vport_id
;
2079 p_params
.stats_id
= vf
->abs_vf_id
+ 0x10,
2080 p_params
.sb
= req
->hw_sb
;
2081 p_params
.sb_idx
= req
->sb_index
;
2083 if (!ecore_iov_validate_txq(p_hwfn
, vf
, req
->tx_qid
) ||
2084 !ecore_iov_validate_sb(p_hwfn
, vf
, req
->hw_sb
))
2087 rc
= ecore_sp_eth_txq_start_ramrod(
2090 vf
->vf_queues
[req
->tx_qid
].fw_cid
,
2097 status
= PFVF_STATUS_FAILURE
;
2099 status
= PFVF_STATUS_SUCCESS
;
2100 vf
->vf_queues
[req
->tx_qid
].txq_active
= true;
2104 ecore_iov_vf_mbx_start_txq_resp(p_hwfn
, p_ptt
, vf
, status
);
2107 static enum _ecore_status_t
ecore_iov_vf_stop_rxqs(struct ecore_hwfn
*p_hwfn
,
2108 struct ecore_vf_info
*vf
,
2111 bool cqe_completion
)
2113 enum _ecore_status_t rc
= ECORE_SUCCESS
;
2116 if (rxq_id
+ num_rxqs
> OSAL_ARRAY_SIZE(vf
->vf_queues
))
2119 for (qid
= rxq_id
; qid
< rxq_id
+ num_rxqs
; qid
++) {
2120 if (vf
->vf_queues
[qid
].rxq_active
) {
2121 rc
= ecore_sp_eth_rx_queue_stop(p_hwfn
,
2129 vf
->vf_queues
[qid
].rxq_active
= false;
2130 vf
->num_active_rxqs
--;
2136 static enum _ecore_status_t
ecore_iov_vf_stop_txqs(struct ecore_hwfn
*p_hwfn
,
2137 struct ecore_vf_info
*vf
,
2138 u16 txq_id
, u8 num_txqs
)
2140 enum _ecore_status_t rc
= ECORE_SUCCESS
;
2143 if (txq_id
+ num_txqs
> OSAL_ARRAY_SIZE(vf
->vf_queues
))
2146 for (qid
= txq_id
; qid
< txq_id
+ num_txqs
; qid
++) {
2147 if (vf
->vf_queues
[qid
].txq_active
) {
2148 rc
= ecore_sp_eth_tx_queue_stop(p_hwfn
,
2155 vf
->vf_queues
[qid
].txq_active
= false;
2160 static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn
*p_hwfn
,
2161 struct ecore_ptt
*p_ptt
,
2162 struct ecore_vf_info
*vf
)
2164 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2165 struct ecore_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2166 u8 status
= PFVF_STATUS_SUCCESS
;
2167 struct vfpf_stop_rxqs_tlv
*req
;
2168 enum _ecore_status_t rc
;
2170 /* We give the option of starting from qid != 0, in this case we
2171 * need to make sure that qid + num_qs doesn't exceed the actual
2172 * amount of queues that exist.
2174 req
= &mbx
->req_virt
->stop_rxqs
;
2175 rc
= ecore_iov_vf_stop_rxqs(p_hwfn
, vf
, req
->rx_qid
,
2176 req
->num_rxqs
, req
->cqe_completion
);
2178 status
= PFVF_STATUS_FAILURE
;
2180 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_STOP_RXQS
,
2184 static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn
*p_hwfn
,
2185 struct ecore_ptt
*p_ptt
,
2186 struct ecore_vf_info
*vf
)
2188 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2189 struct ecore_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2190 u8 status
= PFVF_STATUS_SUCCESS
;
2191 struct vfpf_stop_txqs_tlv
*req
;
2192 enum _ecore_status_t rc
;
2194 /* We give the option of starting from qid != 0, in this case we
2195 * need to make sure that qid + num_qs doesn't exceed the actual
2196 * amount of queues that exist.
2198 req
= &mbx
->req_virt
->stop_txqs
;
2199 rc
= ecore_iov_vf_stop_txqs(p_hwfn
, vf
, req
->tx_qid
, req
->num_txqs
);
2201 status
= PFVF_STATUS_FAILURE
;
2203 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_STOP_TXQS
,
2207 static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn
*p_hwfn
,
2208 struct ecore_ptt
*p_ptt
,
2209 struct ecore_vf_info
*vf
)
2211 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2212 struct ecore_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2213 struct vfpf_update_rxq_tlv
*req
;
2214 u8 status
= PFVF_STATUS_SUCCESS
;
2215 u8 complete_event_flg
;
2216 u8 complete_cqe_flg
;
2218 enum _ecore_status_t rc
;
2221 req
= &mbx
->req_virt
->update_rxq
;
2222 complete_cqe_flg
= !!(req
->flags
& VFPF_RXQ_UPD_COMPLETE_CQE_FLAG
);
2223 complete_event_flg
= !!(req
->flags
& VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG
);
2225 for (i
= 0; i
< req
->num_rxqs
; i
++) {
2226 qid
= req
->rx_qid
+ i
;
2228 if (!vf
->vf_queues
[qid
].rxq_active
) {
2229 DP_NOTICE(p_hwfn
, true,
2230 "VF rx_qid = %d isn`t active!\n", qid
);
2231 status
= PFVF_STATUS_FAILURE
;
2235 rc
= ecore_sp_eth_rx_queues_update(p_hwfn
,
2236 vf
->vf_queues
[qid
].fw_rx_qid
,
2240 ECORE_SPQ_MODE_EBLOCK
,
2244 status
= PFVF_STATUS_FAILURE
;
2249 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_UPDATE_RXQ
,
2253 void *ecore_iov_search_list_tlvs(struct ecore_hwfn
*p_hwfn
,
2254 void *p_tlvs_list
, u16 req_type
)
2256 struct channel_tlv
*p_tlv
= (struct channel_tlv
*)p_tlvs_list
;
2260 if (!p_tlv
->length
) {
2261 DP_NOTICE(p_hwfn
, true, "Zero length TLV found\n");
2265 if (p_tlv
->type
== req_type
) {
2266 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
2267 "Extended tlv type %s, length %d found\n",
2268 ecore_channel_tlvs_string
[p_tlv
->type
],
2273 len
+= p_tlv
->length
;
2274 p_tlv
= (struct channel_tlv
*)((u8
*)p_tlv
+ p_tlv
->length
);
2276 if ((len
+ p_tlv
->length
) > TLV_BUFFER_SIZE
) {
2277 DP_NOTICE(p_hwfn
, true,
2278 "TLVs has overrun the buffer size\n");
2281 } while (p_tlv
->type
!= CHANNEL_TLV_LIST_END
);
2287 ecore_iov_vp_update_act_param(struct ecore_hwfn
*p_hwfn
,
2288 struct ecore_sp_vport_update_params
*p_data
,
2289 struct ecore_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2291 struct vfpf_vport_update_activate_tlv
*p_act_tlv
;
2292 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
2294 p_act_tlv
= (struct vfpf_vport_update_activate_tlv
*)
2295 ecore_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2299 p_data
->update_vport_active_rx_flg
= p_act_tlv
->update_rx
;
2300 p_data
->vport_active_rx_flg
= p_act_tlv
->active_rx
;
2301 p_data
->update_vport_active_tx_flg
= p_act_tlv
->update_tx
;
2302 p_data
->vport_active_tx_flg
= p_act_tlv
->active_tx
;
2303 *tlvs_mask
|= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE
;
2307 ecore_iov_vp_update_vlan_param(struct ecore_hwfn
*p_hwfn
,
2308 struct ecore_sp_vport_update_params
*p_data
,
2309 struct ecore_vf_info
*p_vf
,
2310 struct ecore_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2312 struct vfpf_vport_update_vlan_strip_tlv
*p_vlan_tlv
;
2313 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
;
2315 p_vlan_tlv
= (struct vfpf_vport_update_vlan_strip_tlv
*)
2316 ecore_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2320 p_vf
->shadow_config
.inner_vlan_removal
= p_vlan_tlv
->remove_vlan
;
2322 /* Ignore the VF request if we're forcing a vlan */
2323 if (!(p_vf
->configured_features
& (1 << VLAN_ADDR_FORCED
))) {
2324 p_data
->update_inner_vlan_removal_flg
= 1;
2325 p_data
->inner_vlan_removal_flg
= p_vlan_tlv
->remove_vlan
;
2328 *tlvs_mask
|= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP
;
2332 ecore_iov_vp_update_tx_switch(struct ecore_hwfn
*p_hwfn
,
2333 struct ecore_sp_vport_update_params
*p_data
,
2334 struct ecore_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2336 struct vfpf_vport_update_tx_switch_tlv
*p_tx_switch_tlv
;
2337 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
2339 p_tx_switch_tlv
= (struct vfpf_vport_update_tx_switch_tlv
*)
2340 ecore_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2341 if (!p_tx_switch_tlv
)
2345 if (CHIP_REV_IS_FPGA(p_hwfn
->p_dev
)) {
2346 DP_NOTICE(p_hwfn
, false,
2347 "FPGA: Ignore tx-switching configuration originating"
2353 p_data
->update_tx_switching_flg
= 1;
2354 p_data
->tx_switching_flg
= p_tx_switch_tlv
->tx_switching
;
2355 *tlvs_mask
|= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH
;
2359 ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn
*p_hwfn
,
2360 struct ecore_sp_vport_update_params
*p_data
,
2361 struct ecore_iov_vf_mbx
*p_mbx
,
2364 struct vfpf_vport_update_mcast_bin_tlv
*p_mcast_tlv
;
2365 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_MCAST
;
2367 p_mcast_tlv
= (struct vfpf_vport_update_mcast_bin_tlv
*)
2368 ecore_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2372 p_data
->update_approx_mcast_flg
= 1;
2373 OSAL_MEMCPY(p_data
->bins
, p_mcast_tlv
->bins
,
2374 sizeof(unsigned long) *
2375 ETH_MULTICAST_MAC_BINS_IN_REGS
);
2376 *tlvs_mask
|= 1 << ECORE_IOV_VP_UPDATE_MCAST
;
2380 ecore_iov_vp_update_accept_flag(struct ecore_hwfn
*p_hwfn
,
2381 struct ecore_sp_vport_update_params
*p_data
,
2382 struct ecore_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2384 struct ecore_filter_accept_flags
*p_flags
= &p_data
->accept_flags
;
2385 struct vfpf_vport_update_accept_param_tlv
*p_accept_tlv
;
2386 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
2388 p_accept_tlv
= (struct vfpf_vport_update_accept_param_tlv
*)
2389 ecore_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2393 p_flags
->update_rx_mode_config
= p_accept_tlv
->update_rx_mode
;
2394 p_flags
->rx_accept_filter
= p_accept_tlv
->rx_accept_filter
;
2395 p_flags
->update_tx_mode_config
= p_accept_tlv
->update_tx_mode
;
2396 p_flags
->tx_accept_filter
= p_accept_tlv
->tx_accept_filter
;
2397 *tlvs_mask
|= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM
;
2401 ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn
*p_hwfn
,
2402 struct ecore_sp_vport_update_params
*p_data
,
2403 struct ecore_iov_vf_mbx
*p_mbx
,
2406 struct vfpf_vport_update_accept_any_vlan_tlv
*p_accept_any_vlan
;
2407 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
2409 p_accept_any_vlan
= (struct vfpf_vport_update_accept_any_vlan_tlv
*)
2410 ecore_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2411 if (!p_accept_any_vlan
)
2414 p_data
->accept_any_vlan
= p_accept_any_vlan
->accept_any_vlan
;
2415 p_data
->update_accept_any_vlan_flg
=
2416 p_accept_any_vlan
->update_accept_any_vlan_flg
;
2417 *tlvs_mask
|= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN
;
2421 ecore_iov_vp_update_rss_param(struct ecore_hwfn
*p_hwfn
,
2422 struct ecore_vf_info
*vf
,
2423 struct ecore_sp_vport_update_params
*p_data
,
2424 struct ecore_rss_params
*p_rss
,
2425 struct ecore_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2427 struct vfpf_vport_update_rss_tlv
*p_rss_tlv
;
2428 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_RSS
;
2429 u16 i
, q_idx
, max_q_idx
;
2432 p_rss_tlv
= (struct vfpf_vport_update_rss_tlv
*)
2433 ecore_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2435 p_data
->rss_params
= OSAL_NULL
;
2439 OSAL_MEMSET(p_rss
, 0, sizeof(struct ecore_rss_params
));
2441 p_rss
->update_rss_config
=
2442 !!(p_rss_tlv
->update_rss_flags
&
2443 VFPF_UPDATE_RSS_CONFIG_FLAG
);
2444 p_rss
->update_rss_capabilities
=
2445 !!(p_rss_tlv
->update_rss_flags
&
2446 VFPF_UPDATE_RSS_CAPS_FLAG
);
2447 p_rss
->update_rss_ind_table
=
2448 !!(p_rss_tlv
->update_rss_flags
&
2449 VFPF_UPDATE_RSS_IND_TABLE_FLAG
);
2450 p_rss
->update_rss_key
=
2451 !!(p_rss_tlv
->update_rss_flags
&
2452 VFPF_UPDATE_RSS_KEY_FLAG
);
2454 p_rss
->rss_enable
= p_rss_tlv
->rss_enable
;
2455 p_rss
->rss_eng_id
= vf
->relative_vf_id
+ 1;
2456 p_rss
->rss_caps
= p_rss_tlv
->rss_caps
;
2457 p_rss
->rss_table_size_log
= p_rss_tlv
->rss_table_size_log
;
2458 OSAL_MEMCPY(p_rss
->rss_ind_table
, p_rss_tlv
->rss_ind_table
,
2459 sizeof(p_rss
->rss_ind_table
));
2460 OSAL_MEMCPY(p_rss
->rss_key
, p_rss_tlv
->rss_key
,
2461 sizeof(p_rss
->rss_key
));
2463 table_size
= OSAL_MIN_T(u16
, OSAL_ARRAY_SIZE(p_rss
->rss_ind_table
),
2464 (1 << p_rss_tlv
->rss_table_size_log
));
2466 max_q_idx
= OSAL_ARRAY_SIZE(vf
->vf_queues
);
2468 for (i
= 0; i
< table_size
; i
++) {
2469 u16 index
= vf
->vf_queues
[0].fw_rx_qid
;
2471 q_idx
= p_rss
->rss_ind_table
[i
];
2472 if (q_idx
>= max_q_idx
)
2473 DP_NOTICE(p_hwfn
, true,
2474 "rss_ind_table[%d] = %d,"
2475 " rxq is out of range\n",
2477 else if (!vf
->vf_queues
[q_idx
].rxq_active
)
2478 DP_NOTICE(p_hwfn
, true,
2479 "rss_ind_table[%d] = %d, rxq is not active\n",
2482 index
= vf
->vf_queues
[q_idx
].fw_rx_qid
;
2483 p_rss
->rss_ind_table
[i
] = index
;
2486 p_data
->rss_params
= p_rss
;
2487 *tlvs_mask
|= 1 << ECORE_IOV_VP_UPDATE_RSS
;
2491 ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn
*p_hwfn
,
2492 struct ecore_vf_info
*vf
,
2493 struct ecore_sp_vport_update_params
*p_data
,
2494 struct ecore_sge_tpa_params
*p_sge_tpa
,
2495 struct ecore_iov_vf_mbx
*p_mbx
,
2498 struct vfpf_vport_update_sge_tpa_tlv
*p_sge_tpa_tlv
;
2499 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
;
2501 p_sge_tpa_tlv
= (struct vfpf_vport_update_sge_tpa_tlv
*)
2502 ecore_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2504 if (!p_sge_tpa_tlv
) {
2505 p_data
->sge_tpa_params
= OSAL_NULL
;
2509 OSAL_MEMSET(p_sge_tpa
, 0, sizeof(struct ecore_sge_tpa_params
));
2511 p_sge_tpa
->update_tpa_en_flg
=
2512 !!(p_sge_tpa_tlv
->update_sge_tpa_flags
& VFPF_UPDATE_TPA_EN_FLAG
);
2513 p_sge_tpa
->update_tpa_param_flg
=
2514 !!(p_sge_tpa_tlv
->update_sge_tpa_flags
&
2515 VFPF_UPDATE_TPA_PARAM_FLAG
);
2517 p_sge_tpa
->tpa_ipv4_en_flg
=
2518 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_IPV4_EN_FLAG
);
2519 p_sge_tpa
->tpa_ipv6_en_flg
=
2520 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_IPV6_EN_FLAG
);
2521 p_sge_tpa
->tpa_pkt_split_flg
=
2522 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_PKT_SPLIT_FLAG
);
2523 p_sge_tpa
->tpa_hdr_data_split_flg
=
2524 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_HDR_DATA_SPLIT_FLAG
);
2525 p_sge_tpa
->tpa_gro_consistent_flg
=
2526 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_GRO_CONSIST_FLAG
);
2528 p_sge_tpa
->tpa_max_aggs_num
= p_sge_tpa_tlv
->tpa_max_aggs_num
;
2529 p_sge_tpa
->tpa_max_size
= p_sge_tpa_tlv
->tpa_max_size
;
2530 p_sge_tpa
->tpa_min_size_to_start
= p_sge_tpa_tlv
->tpa_min_size_to_start
;
2531 p_sge_tpa
->tpa_min_size_to_cont
= p_sge_tpa_tlv
->tpa_min_size_to_cont
;
2532 p_sge_tpa
->max_buffers_per_cqe
= p_sge_tpa_tlv
->max_buffers_per_cqe
;
2534 p_data
->sge_tpa_params
= p_sge_tpa
;
2536 *tlvs_mask
|= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA
;
2539 static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn
*p_hwfn
,
2540 struct ecore_ptt
*p_ptt
,
2541 struct ecore_vf_info
*vf
)
2543 struct ecore_sp_vport_update_params params
;
2544 struct ecore_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2545 struct ecore_sge_tpa_params sge_tpa_params
;
2546 u16 tlvs_mask
= 0, tlvs_accepted
= 0;
2547 struct ecore_rss_params rss_params
;
2548 u8 status
= PFVF_STATUS_SUCCESS
;
2550 enum _ecore_status_t rc
;
2552 /* Valiate PF can send such a request */
2553 if (!vf
->vport_instance
) {
2554 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
2555 "No VPORT instance available for VF[%d],"
2556 " failing vport update\n",
2558 status
= PFVF_STATUS_FAILURE
;
2562 OSAL_MEMSET(¶ms
, 0, sizeof(params
));
2563 params
.opaque_fid
= vf
->opaque_fid
;
2564 params
.vport_id
= vf
->vport_id
;
2565 params
.rss_params
= OSAL_NULL
;
2567 /* Search for extended tlvs list and update values
2568 * from VF in struct ecore_sp_vport_update_params.
2570 ecore_iov_vp_update_act_param(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2571 ecore_iov_vp_update_vlan_param(p_hwfn
, ¶ms
, vf
, mbx
, &tlvs_mask
);
2572 ecore_iov_vp_update_tx_switch(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2573 ecore_iov_vp_update_mcast_bin_param(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2574 ecore_iov_vp_update_accept_flag(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2575 ecore_iov_vp_update_rss_param(p_hwfn
, vf
, ¶ms
, &rss_params
,
2577 ecore_iov_vp_update_accept_any_vlan(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
2578 ecore_iov_vp_update_sge_tpa_param(p_hwfn
, vf
, ¶ms
,
2579 &sge_tpa_params
, mbx
, &tlvs_mask
);
2581 /* Just log a message if there is no single extended tlv in buffer.
2582 * When all features of vport update ramrod would be requested by VF
2583 * as extended TLVs in buffer then an error can be returned in response
2584 * if there is no extended TLV present in buffer.
2586 tlvs_accepted
= tlvs_mask
;
2588 #ifndef LINUX_REMOVE
2589 if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn
, vf
->relative_vf_id
,
2590 ¶ms
, &tlvs_accepted
) !=
2593 status
= PFVF_STATUS_NOT_SUPPORTED
;
2598 if (!tlvs_accepted
) {
2600 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
2601 "Upper-layer prevents said VF"
2602 " configuration\n");
2604 DP_NOTICE(p_hwfn
, true,
2605 "No feature tlvs found for vport update\n");
2606 status
= PFVF_STATUS_NOT_SUPPORTED
;
2610 rc
= ecore_sp_vport_update(p_hwfn
, ¶ms
, ECORE_SPQ_MODE_EBLOCK
,
2614 status
= PFVF_STATUS_FAILURE
;
2617 length
= ecore_iov_prep_vp_update_resp_tlvs(p_hwfn
, vf
, mbx
, status
,
2618 tlvs_mask
, tlvs_accepted
);
2619 ecore_iov_send_response(p_hwfn
, p_ptt
, vf
, length
, status
);
2622 static enum _ecore_status_t
2623 ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn
*p_hwfn
,
2624 struct ecore_vf_info
*p_vf
,
2625 struct ecore_filter_ucast
*p_params
)
2629 /* First remove entries and then add new ones */
2630 if (p_params
->opcode
== ECORE_FILTER_REMOVE
) {
2631 for (i
= 0; i
< ECORE_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++)
2632 if (p_vf
->shadow_config
.vlans
[i
].used
&&
2633 p_vf
->shadow_config
.vlans
[i
].vid
==
2635 p_vf
->shadow_config
.vlans
[i
].used
= false;
2638 if (i
== ECORE_ETH_VF_NUM_VLAN_FILTERS
+ 1) {
2639 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
2640 "VF [%d] - Tries to remove a non-existing"
2642 p_vf
->relative_vf_id
);
2645 } else if (p_params
->opcode
== ECORE_FILTER_REPLACE
||
2646 p_params
->opcode
== ECORE_FILTER_FLUSH
) {
2647 for (i
= 0; i
< ECORE_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++)
2648 p_vf
->shadow_config
.vlans
[i
].used
= false;
2651 /* In forced mode, we're willing to remove entries - but we don't add
2654 if (p_vf
->bulletin
.p_virt
->valid_bitmap
& (1 << VLAN_ADDR_FORCED
))
2655 return ECORE_SUCCESS
;
2657 if (p_params
->opcode
== ECORE_FILTER_ADD
||
2658 p_params
->opcode
== ECORE_FILTER_REPLACE
) {
2659 for (i
= 0; i
< ECORE_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++) {
2660 if (p_vf
->shadow_config
.vlans
[i
].used
)
2663 p_vf
->shadow_config
.vlans
[i
].used
= true;
2664 p_vf
->shadow_config
.vlans
[i
].vid
= p_params
->vlan
;
2668 if (i
== ECORE_ETH_VF_NUM_VLAN_FILTERS
+ 1) {
2669 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
2670 "VF [%d] - Tries to configure more than %d"
2672 p_vf
->relative_vf_id
,
2673 ECORE_ETH_VF_NUM_VLAN_FILTERS
+ 1);
2678 return ECORE_SUCCESS
;
2681 static enum _ecore_status_t
2682 ecore_iov_vf_update_mac_shadow(struct ecore_hwfn
*p_hwfn
,
2683 struct ecore_vf_info
*p_vf
,
2684 struct ecore_filter_ucast
*p_params
)
2686 char empty_mac
[ETH_ALEN
];
2689 OSAL_MEM_ZERO(empty_mac
, ETH_ALEN
);
2691 /* If we're in forced-mode, we don't allow any change */
2692 /* TODO - this would change if we were ever to implement logic for
2693 * removing a forced MAC altogether [in which case, like for vlans,
2694 * we should be able to re-trace previous configuration.
2696 if (p_vf
->bulletin
.p_virt
->valid_bitmap
& (1 << MAC_ADDR_FORCED
))
2697 return ECORE_SUCCESS
;
2699 /* First remove entries and then add new ones */
2700 if (p_params
->opcode
== ECORE_FILTER_REMOVE
) {
2701 for (i
= 0; i
< ECORE_ETH_VF_NUM_MAC_FILTERS
; i
++) {
2702 if (!OSAL_MEMCMP(p_vf
->shadow_config
.macs
[i
],
2703 p_params
->mac
, ETH_ALEN
)) {
2704 OSAL_MEM_ZERO(p_vf
->shadow_config
.macs
[i
],
2710 if (i
== ECORE_ETH_VF_NUM_MAC_FILTERS
) {
2711 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
2712 "MAC isn't configured\n");
2715 } else if (p_params
->opcode
== ECORE_FILTER_REPLACE
||
2716 p_params
->opcode
== ECORE_FILTER_FLUSH
) {
2717 for (i
= 0; i
< ECORE_ETH_VF_NUM_MAC_FILTERS
; i
++)
2718 OSAL_MEM_ZERO(p_vf
->shadow_config
.macs
[i
], ETH_ALEN
);
2721 /* List the new MAC address */
2722 if (p_params
->opcode
!= ECORE_FILTER_ADD
&&
2723 p_params
->opcode
!= ECORE_FILTER_REPLACE
)
2724 return ECORE_SUCCESS
;
2726 for (i
= 0; i
< ECORE_ETH_VF_NUM_MAC_FILTERS
; i
++) {
2727 if (!OSAL_MEMCMP(p_vf
->shadow_config
.macs
[i
],
2728 empty_mac
, ETH_ALEN
)) {
2729 OSAL_MEMCPY(p_vf
->shadow_config
.macs
[i
],
2730 p_params
->mac
, ETH_ALEN
);
2731 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
2732 "Added MAC at %d entry in shadow\n", i
);
2737 if (i
== ECORE_ETH_VF_NUM_MAC_FILTERS
) {
2738 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
2739 "No available place for MAC\n");
2743 return ECORE_SUCCESS
;
2746 static enum _ecore_status_t
2747 ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn
*p_hwfn
,
2748 struct ecore_vf_info
*p_vf
,
2749 struct ecore_filter_ucast
*p_params
)
2751 enum _ecore_status_t rc
= ECORE_SUCCESS
;
2753 if (p_params
->type
== ECORE_FILTER_MAC
) {
2754 rc
= ecore_iov_vf_update_mac_shadow(p_hwfn
, p_vf
, p_params
);
2755 if (rc
!= ECORE_SUCCESS
)
2759 if (p_params
->type
== ECORE_FILTER_VLAN
)
2760 rc
= ecore_iov_vf_update_vlan_shadow(p_hwfn
, p_vf
, p_params
);
2765 static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn
*p_hwfn
,
2766 struct ecore_ptt
*p_ptt
,
2767 struct ecore_vf_info
*vf
)
2769 struct ecore_bulletin_content
*p_bulletin
= vf
->bulletin
.p_virt
;
2770 struct ecore_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2771 struct vfpf_ucast_filter_tlv
*req
;
2772 u8 status
= PFVF_STATUS_SUCCESS
;
2773 struct ecore_filter_ucast params
;
2774 enum _ecore_status_t rc
;
2776 /* Prepare the unicast filter params */
2777 OSAL_MEMSET(¶ms
, 0, sizeof(struct ecore_filter_ucast
));
2778 req
= &mbx
->req_virt
->ucast_filter
;
2779 params
.opcode
= (enum ecore_filter_opcode
)req
->opcode
;
2780 params
.type
= (enum ecore_filter_ucast_type
)req
->type
;
2782 /* @@@TBD - We might need logic on HV side in determining this */
2783 params
.is_rx_filter
= 1;
2784 params
.is_tx_filter
= 1;
2785 params
.vport_to_remove_from
= vf
->vport_id
;
2786 params
.vport_to_add_to
= vf
->vport_id
;
2787 OSAL_MEMCPY(params
.mac
, req
->mac
, ETH_ALEN
);
2788 params
.vlan
= req
->vlan
;
2790 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
2791 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x]"
2792 " MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
2793 vf
->abs_vf_id
, params
.opcode
, params
.type
,
2794 params
.is_rx_filter
? "RX" : "",
2795 params
.is_tx_filter
? "TX" : "",
2796 params
.vport_to_add_to
,
2797 params
.mac
[0], params
.mac
[1], params
.mac
[2],
2798 params
.mac
[3], params
.mac
[4], params
.mac
[5], params
.vlan
);
2800 if (!vf
->vport_instance
) {
2801 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
2802 "No VPORT instance available for VF[%d],"
2803 " failing ucast MAC configuration\n",
2805 status
= PFVF_STATUS_FAILURE
;
2809 /* Update shadow copy of the VF configuration */
2810 if (ecore_iov_vf_update_unicast_shadow(p_hwfn
, vf
, ¶ms
) !=
2812 status
= PFVF_STATUS_FAILURE
;
2816 /* Determine if the unicast filtering is acceptible by PF */
2817 if ((p_bulletin
->valid_bitmap
& (1 << VLAN_ADDR_FORCED
)) &&
2818 (params
.type
== ECORE_FILTER_VLAN
||
2819 params
.type
== ECORE_FILTER_MAC_VLAN
)) {
2820 /* Once VLAN is forced or PVID is set, do not allow
2821 * to add/replace any further VLANs.
2823 if (params
.opcode
== ECORE_FILTER_ADD
||
2824 params
.opcode
== ECORE_FILTER_REPLACE
)
2825 status
= PFVF_STATUS_FORCED
;
2829 if ((p_bulletin
->valid_bitmap
& (1 << MAC_ADDR_FORCED
)) &&
2830 (params
.type
== ECORE_FILTER_MAC
||
2831 params
.type
== ECORE_FILTER_MAC_VLAN
)) {
2832 if (OSAL_MEMCMP(p_bulletin
->mac
, params
.mac
, ETH_ALEN
) ||
2833 (params
.opcode
!= ECORE_FILTER_ADD
&&
2834 params
.opcode
!= ECORE_FILTER_REPLACE
))
2835 status
= PFVF_STATUS_FORCED
;
2839 rc
= OSAL_IOV_CHK_UCAST(p_hwfn
, vf
->relative_vf_id
, ¶ms
);
2840 if (rc
== ECORE_EXISTS
) {
2842 } else if (rc
== ECORE_INVAL
) {
2843 status
= PFVF_STATUS_FAILURE
;
2847 rc
= ecore_sp_eth_filter_ucast(p_hwfn
, vf
->opaque_fid
, ¶ms
,
2848 ECORE_SPQ_MODE_CB
, OSAL_NULL
);
2850 status
= PFVF_STATUS_FAILURE
;
2853 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_UCAST_FILTER
,
2854 sizeof(struct pfvf_def_resp_tlv
), status
);
2857 static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn
*p_hwfn
,
2858 struct ecore_ptt
*p_ptt
,
2859 struct ecore_vf_info
*vf
)
2864 for (i
= 0; i
< vf
->num_sbs
; i
++)
2865 ecore_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
2867 vf
->opaque_fid
, false);
2869 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_INT_CLEANUP
,
2870 sizeof(struct pfvf_def_resp_tlv
),
2871 PFVF_STATUS_SUCCESS
);
2874 static void ecore_iov_vf_mbx_close(struct ecore_hwfn
*p_hwfn
,
2875 struct ecore_ptt
*p_ptt
,
2876 struct ecore_vf_info
*vf
)
2878 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2879 u8 status
= PFVF_STATUS_SUCCESS
;
2881 /* Disable Interrupts for VF */
2882 ecore_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 0);
2884 /* Reset Permission table */
2885 ecore_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, 0);
2887 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_CLOSE
,
2891 static void ecore_iov_vf_mbx_release(struct ecore_hwfn
*p_hwfn
,
2892 struct ecore_ptt
*p_ptt
,
2893 struct ecore_vf_info
*p_vf
)
2895 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2896 u8 status
= PFVF_STATUS_SUCCESS
;
2897 enum _ecore_status_t rc
= ECORE_SUCCESS
;
2899 ecore_iov_vf_cleanup(p_hwfn
, p_vf
);
2901 if (p_vf
->state
!= VF_STOPPED
&& p_vf
->state
!= VF_FREE
) {
2902 /* Stopping the VF */
2903 rc
= ecore_sp_vf_stop(p_hwfn
, p_vf
->concrete_fid
,
2906 if (rc
!= ECORE_SUCCESS
) {
2907 DP_ERR(p_hwfn
, "ecore_sp_vf_stop returned error %d\n",
2909 status
= PFVF_STATUS_FAILURE
;
2912 p_vf
->state
= VF_STOPPED
;
2915 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
, CHANNEL_TLV_RELEASE
,
2919 static enum _ecore_status_t
2920 ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn
*p_hwfn
,
2921 struct ecore_vf_info
*p_vf
, struct ecore_ptt
*p_ptt
)
2926 ecore_fid_pretend(p_hwfn
, p_ptt
, (u16
)p_vf
->concrete_fid
);
2928 for (cnt
= 0; cnt
< 50; cnt
++) {
2929 val
= ecore_rd(p_hwfn
, p_ptt
, DORQ_REG_VF_USAGE_CNT
);
2934 ecore_fid_pretend(p_hwfn
, p_ptt
, (u16
)p_hwfn
->hw_info
.concrete_fid
);
2938 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
2939 p_vf
->abs_vf_id
, val
);
2940 return ECORE_TIMEOUT
;
2943 return ECORE_SUCCESS
;
2946 static enum _ecore_status_t
2947 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn
*p_hwfn
,
2948 struct ecore_vf_info
*p_vf
, struct ecore_ptt
*p_ptt
)
2950 u32 cons
[MAX_NUM_VOQS
], distance
[MAX_NUM_VOQS
];
2953 /* Read initial consumers & producers */
2954 for (i
= 0; i
< MAX_NUM_VOQS
; i
++) {
2957 cons
[i
] = ecore_rd(p_hwfn
, p_ptt
,
2958 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0
+
2960 prod
= ecore_rd(p_hwfn
, p_ptt
,
2961 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0
+
2963 distance
[i
] = prod
- cons
[i
];
2966 /* Wait for consumers to pass the producers */
2968 for (cnt
= 0; cnt
< 50; cnt
++) {
2969 for (; i
< MAX_NUM_VOQS
; i
++) {
2972 tmp
= ecore_rd(p_hwfn
, p_ptt
,
2973 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0
+
2975 if (distance
[i
] > tmp
- cons
[i
])
2979 if (i
== MAX_NUM_VOQS
)
2986 DP_ERR(p_hwfn
, "VF[%d] - pbf polling failed on VOQ %d\n",
2987 p_vf
->abs_vf_id
, i
);
2988 return ECORE_TIMEOUT
;
2991 return ECORE_SUCCESS
;
2994 static enum _ecore_status_t
ecore_iov_vf_flr_poll(struct ecore_hwfn
*p_hwfn
,
2995 struct ecore_vf_info
*p_vf
,
2996 struct ecore_ptt
*p_ptt
)
2998 enum _ecore_status_t rc
;
3000 /* TODO - add SRC and TM polling once we add storage IOV */
3002 rc
= ecore_iov_vf_flr_poll_dorq(p_hwfn
, p_vf
, p_ptt
);
3006 rc
= ecore_iov_vf_flr_poll_pbf(p_hwfn
, p_vf
, p_ptt
);
3010 return ECORE_SUCCESS
;
3013 static enum _ecore_status_t
3014 ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn
*p_hwfn
,
3015 struct ecore_ptt
*p_ptt
,
3016 u16 rel_vf_id
, u32
*ack_vfs
)
3018 struct ecore_vf_info
*p_vf
;
3019 enum _ecore_status_t rc
= ECORE_SUCCESS
;
3021 p_vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
3023 return ECORE_SUCCESS
;
3025 if (p_hwfn
->pf_iov_info
->pending_flr
[rel_vf_id
/ 64] &
3026 (1ULL << (rel_vf_id
% 64))) {
3027 u16 vfid
= p_vf
->abs_vf_id
;
3029 /* TODO - should we lock channel? */
3031 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3032 "VF[%d] - Handling FLR\n", vfid
);
3034 ecore_iov_vf_cleanup(p_hwfn
, p_vf
);
3036 /* If VF isn't active, no need for anything but SW */
3040 /* TODO - what to do in case of failure? */
3041 rc
= ecore_iov_vf_flr_poll(p_hwfn
, p_vf
, p_ptt
);
3042 if (rc
!= ECORE_SUCCESS
)
3045 rc
= ecore_final_cleanup(p_hwfn
, p_ptt
, vfid
, true);
3047 /* TODO - what's now? What a mess.... */
3048 DP_ERR(p_hwfn
, "Failed handle FLR of VF[%d]\n", vfid
);
3052 /* VF_STOPPED has to be set only after final cleanup
3053 * but prior to re-enabling the VF.
3055 p_vf
->state
= VF_STOPPED
;
3057 rc
= ecore_iov_enable_vf_access(p_hwfn
, p_ptt
, p_vf
);
3059 /* TODO - again, a mess... */
3060 DP_ERR(p_hwfn
, "Failed to re-enable VF[%d] acces\n",
3065 /* Mark VF for ack and clean pending state */
3066 if (p_vf
->state
== VF_RESET
)
3067 p_vf
->state
= VF_STOPPED
;
3068 ack_vfs
[vfid
/ 32] |= (1 << (vfid
% 32));
3069 p_hwfn
->pf_iov_info
->pending_flr
[rel_vf_id
/ 64] &=
3070 ~(1ULL << (rel_vf_id
% 64));
3071 p_hwfn
->pf_iov_info
->pending_events
[rel_vf_id
/ 64] &=
3072 ~(1ULL << (rel_vf_id
% 64));
3078 enum _ecore_status_t
ecore_iov_vf_flr_cleanup(struct ecore_hwfn
*p_hwfn
,
3079 struct ecore_ptt
*p_ptt
)
3081 u32 ack_vfs
[VF_MAX_STATIC
/ 32];
3082 enum _ecore_status_t rc
= ECORE_SUCCESS
;
3085 OSAL_MEMSET(ack_vfs
, 0, sizeof(u32
) * (VF_MAX_STATIC
/ 32));
3087 /* Since BRB <-> PRS interface can't be tested as part of the flr
3088 * polling due to HW limitations, simply sleep a bit. And since
3089 * there's no need to wait per-vf, do it before looping.
3093 for (i
= 0; i
< p_hwfn
->p_dev
->p_iov_info
->total_vfs
; i
++)
3094 ecore_iov_execute_vf_flr_cleanup(p_hwfn
, p_ptt
, i
, ack_vfs
);
3096 rc
= ecore_mcp_ack_vf_flr(p_hwfn
, p_ptt
, ack_vfs
);
3100 enum _ecore_status_t
3101 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn
*p_hwfn
,
3102 struct ecore_ptt
*p_ptt
, u16 rel_vf_id
)
3104 u32 ack_vfs
[VF_MAX_STATIC
/ 32];
3105 enum _ecore_status_t rc
= ECORE_SUCCESS
;
3107 OSAL_MEMSET(ack_vfs
, 0, sizeof(u32
) * (VF_MAX_STATIC
/ 32));
3109 /* Wait instead of polling the BRB <-> PRS interface */
3112 ecore_iov_execute_vf_flr_cleanup(p_hwfn
, p_ptt
, rel_vf_id
, ack_vfs
);
3114 rc
= ecore_mcp_ack_vf_flr(p_hwfn
, p_ptt
, ack_vfs
);
3118 int ecore_iov_mark_vf_flr(struct ecore_hwfn
*p_hwfn
, u32
*p_disabled_vfs
)
3122 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
, "Marking FLR-ed VFs\n");
3123 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++)
3124 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3125 "[%08x,...,%08x]: %08x\n",
3126 i
* 32, (i
+ 1) * 32 - 1, p_disabled_vfs
[i
]);
3128 if (!p_hwfn
->p_dev
->p_iov_info
) {
3129 DP_NOTICE(p_hwfn
, true, "VF flr but no IOV\n");
3134 for (i
= 0; i
< p_hwfn
->p_dev
->p_iov_info
->total_vfs
; i
++) {
3135 struct ecore_vf_info
*p_vf
;
3138 p_vf
= ecore_iov_get_vf_info(p_hwfn
, i
, false);
3142 vfid
= p_vf
->abs_vf_id
;
3143 if ((1 << (vfid
% 32)) & p_disabled_vfs
[vfid
/ 32]) {
3144 u64
*p_flr
= p_hwfn
->pf_iov_info
->pending_flr
;
3145 u16 rel_vf_id
= p_vf
->relative_vf_id
;
3147 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3148 "VF[%d] [rel %d] got FLR-ed\n",
3151 p_vf
->state
= VF_RESET
;
3153 /* No need to lock here, since pending_flr should
3154 * only change here and before ACKing MFw. Since
3155 * MFW will not trigger an additional attention for
3156 * VF flr until ACKs, we're safe.
3158 p_flr
[rel_vf_id
/ 64] |= 1ULL << (rel_vf_id
% 64);
3166 void ecore_iov_get_link(struct ecore_hwfn
*p_hwfn
,
3168 struct ecore_mcp_link_params
*p_params
,
3169 struct ecore_mcp_link_state
*p_link
,
3170 struct ecore_mcp_link_capabilities
*p_caps
)
3172 struct ecore_vf_info
*p_vf
= ecore_iov_get_vf_info(p_hwfn
, vfid
, false);
3173 struct ecore_bulletin_content
*p_bulletin
;
3178 p_bulletin
= p_vf
->bulletin
.p_virt
;
3181 __ecore_vf_get_link_params(p_hwfn
, p_params
, p_bulletin
);
3183 __ecore_vf_get_link_state(p_hwfn
, p_link
, p_bulletin
);
3185 __ecore_vf_get_link_caps(p_hwfn
, p_caps
, p_bulletin
);
3188 void ecore_iov_process_mbx_req(struct ecore_hwfn
*p_hwfn
,
3189 struct ecore_ptt
*p_ptt
, int vfid
)
3191 struct ecore_iov_vf_mbx
*mbx
;
3192 struct ecore_vf_info
*p_vf
;
3194 p_vf
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
3198 mbx
= &p_vf
->vf_mbx
;
3200 /* ecore_iov_process_mbx_request */
3203 "VF[%02x]: Processing mailbox message\n", p_vf
->abs_vf_id
);
3205 mbx
->first_tlv
= mbx
->req_virt
->first_tlv
;
3207 OSAL_IOV_VF_MSG_TYPE(p_hwfn
,
3208 p_vf
->relative_vf_id
,
3209 mbx
->first_tlv
.tl
.type
);
3211 /* Lock the per vf op mutex and note the locker's identity.
3212 * The unlock will take place in mbx response.
3214 ecore_iov_lock_vf_pf_channel(p_hwfn
,
3215 p_vf
, mbx
->first_tlv
.tl
.type
);
3217 /* check if tlv type is known */
3218 if (ecore_iov_tlv_supported(mbx
->first_tlv
.tl
.type
)) {
3219 /* switch on the opcode */
3220 switch (mbx
->first_tlv
.tl
.type
) {
3221 case CHANNEL_TLV_ACQUIRE
:
3222 ecore_iov_vf_mbx_acquire(p_hwfn
, p_ptt
, p_vf
);
3224 case CHANNEL_TLV_VPORT_START
:
3225 ecore_iov_vf_mbx_start_vport(p_hwfn
, p_ptt
, p_vf
);
3227 case CHANNEL_TLV_VPORT_TEARDOWN
:
3228 ecore_iov_vf_mbx_stop_vport(p_hwfn
, p_ptt
, p_vf
);
3230 case CHANNEL_TLV_START_RXQ
:
3231 ecore_iov_vf_mbx_start_rxq(p_hwfn
, p_ptt
, p_vf
);
3233 case CHANNEL_TLV_START_TXQ
:
3234 ecore_iov_vf_mbx_start_txq(p_hwfn
, p_ptt
, p_vf
);
3236 case CHANNEL_TLV_STOP_RXQS
:
3237 ecore_iov_vf_mbx_stop_rxqs(p_hwfn
, p_ptt
, p_vf
);
3239 case CHANNEL_TLV_STOP_TXQS
:
3240 ecore_iov_vf_mbx_stop_txqs(p_hwfn
, p_ptt
, p_vf
);
3242 case CHANNEL_TLV_UPDATE_RXQ
:
3243 ecore_iov_vf_mbx_update_rxqs(p_hwfn
, p_ptt
, p_vf
);
3245 case CHANNEL_TLV_VPORT_UPDATE
:
3246 ecore_iov_vf_mbx_vport_update(p_hwfn
, p_ptt
, p_vf
);
3248 case CHANNEL_TLV_UCAST_FILTER
:
3249 ecore_iov_vf_mbx_ucast_filter(p_hwfn
, p_ptt
, p_vf
);
3251 case CHANNEL_TLV_CLOSE
:
3252 ecore_iov_vf_mbx_close(p_hwfn
, p_ptt
, p_vf
);
3254 case CHANNEL_TLV_INT_CLEANUP
:
3255 ecore_iov_vf_mbx_int_cleanup(p_hwfn
, p_ptt
, p_vf
);
3257 case CHANNEL_TLV_RELEASE
:
3258 ecore_iov_vf_mbx_release(p_hwfn
, p_ptt
, p_vf
);
3262 /* unknown TLV - this may belong to a VF driver from the future
3263 * - a version written after this PF driver was written, which
3264 * supports features unknown as of yet. Too bad since we don't
3265 * support them. Or this may be because someone wrote a crappy
3266 * VF driver and is sending garbage over the channel.
3268 DP_NOTICE(p_hwfn
, false,
3269 "VF[%02x]: unknown TLV. type %04x length %04x"
3270 " padding %08x reply address %lu\n",
3272 mbx
->first_tlv
.tl
.type
,
3273 mbx
->first_tlv
.tl
.length
,
3274 mbx
->first_tlv
.padding
,
3275 (unsigned long)mbx
->first_tlv
.reply_address
);
3277 /* Try replying in case reply address matches the acquisition's
3280 if (p_vf
->acquire
.first_tlv
.reply_address
&&
3281 (mbx
->first_tlv
.reply_address
==
3282 p_vf
->acquire
.first_tlv
.reply_address
))
3283 ecore_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
,
3284 mbx
->first_tlv
.tl
.type
,
3285 sizeof(struct pfvf_def_resp_tlv
),
3286 PFVF_STATUS_NOT_SUPPORTED
);
3288 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3289 "VF[%02x]: Can't respond to TLV -"
3290 " no valid reply address\n",
3294 ecore_iov_unlock_vf_pf_channel(p_hwfn
, p_vf
,
3295 mbx
->first_tlv
.tl
.type
);
3297 #ifdef CONFIG_ECORE_SW_CHANNEL
3298 mbx
->sw_mbx
.mbx_state
= VF_PF_RESPONSE_READY
;
3299 mbx
->sw_mbx
.response_offset
= 0;
3303 void ecore_iov_pf_add_pending_events(struct ecore_hwfn
*p_hwfn
, u8 vfid
)
3305 u64 add_bit
= 1ULL << (vfid
% 64);
3307 /* TODO - add locking mechanisms [no atomics in ecore, so we can't
3308 * add the lock inside the ecore_pf_iov struct].
3310 p_hwfn
->pf_iov_info
->pending_events
[vfid
/ 64] |= add_bit
;
3313 void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn
*p_hwfn
,
3316 u64
*p_pending_events
= p_hwfn
->pf_iov_info
->pending_events
;
3318 /* TODO - Take a lock */
3319 OSAL_MEMCPY(events
, p_pending_events
,
3320 sizeof(u64
) * ECORE_VF_ARRAY_LENGTH
);
3321 OSAL_MEMSET(p_pending_events
, 0,
3322 sizeof(u64
) * ECORE_VF_ARRAY_LENGTH
);
3325 static enum _ecore_status_t
ecore_sriov_vfpf_msg(struct ecore_hwfn
*p_hwfn
,
3327 struct regpair
*vf_msg
)
3329 u8 min
= (u8
)p_hwfn
->p_dev
->p_iov_info
->first_vf_in_pf
;
3330 struct ecore_vf_info
*p_vf
;
3332 if (!ecore_iov_pf_sanity_check(p_hwfn
, (int)abs_vfid
- min
)) {
3333 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3334 "Got a message from VF [abs 0x%08x] that cannot be"
3337 return ECORE_SUCCESS
;
3339 p_vf
= &p_hwfn
->pf_iov_info
->vfs_array
[(u8
)abs_vfid
- min
];
3341 /* List the physical address of the request so that handler
3342 * could later on copy the message from it.
3344 p_vf
->vf_mbx
.pending_req
= (((u64
)vf_msg
->hi
) << 32) | vf_msg
->lo
;
3346 return OSAL_PF_VF_MSG(p_hwfn
, p_vf
->relative_vf_id
);
3349 enum _ecore_status_t
ecore_sriov_eqe_event(struct ecore_hwfn
*p_hwfn
,
3352 union event_ring_data
*data
)
3355 case COMMON_EVENT_VF_PF_CHANNEL
:
3356 return ecore_sriov_vfpf_msg(p_hwfn
, OSAL_LE16_TO_CPU(echo
),
3357 &data
->vf_pf_channel
.msg_addr
);
3358 case COMMON_EVENT_VF_FLR
:
3359 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3360 "VF-FLR is still not supported\n");
3361 return ECORE_SUCCESS
;
3363 DP_INFO(p_hwfn
->p_dev
, "Unknown sriov eqe event 0x%02x\n",
3369 bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn
*p_hwfn
, u16 rel_vf_id
)
3371 return !!(p_hwfn
->pf_iov_info
->pending_flr
[rel_vf_id
/ 64] &
3372 (1ULL << (rel_vf_id
% 64)));
3375 u16
ecore_iov_get_next_active_vf(struct ecore_hwfn
*p_hwfn
, u16 rel_vf_id
)
3377 struct ecore_hw_sriov_info
*p_iov
= p_hwfn
->p_dev
->p_iov_info
;
3383 for (i
= rel_vf_id
; i
< p_iov
->total_vfs
; i
++)
3384 if (ecore_iov_is_valid_vfid(p_hwfn
, rel_vf_id
, true))
3391 enum _ecore_status_t
ecore_iov_copy_vf_msg(struct ecore_hwfn
*p_hwfn
,
3392 struct ecore_ptt
*ptt
, int vfid
)
3394 struct ecore_dmae_params params
;
3395 struct ecore_vf_info
*vf_info
;
3397 vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
3401 OSAL_MEMSET(¶ms
, 0, sizeof(struct ecore_dmae_params
));
3402 params
.flags
= ECORE_DMAE_FLAG_VF_SRC
| ECORE_DMAE_FLAG_COMPLETION_DST
;
3403 params
.src_vfid
= vf_info
->abs_vf_id
;
3405 if (ecore_dmae_host2host(p_hwfn
, ptt
,
3406 vf_info
->vf_mbx
.pending_req
,
3407 vf_info
->vf_mbx
.req_phys
,
3408 sizeof(union vfpf_tlvs
) / 4, ¶ms
)) {
3409 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3410 "Failed to copy message from VF 0x%02x\n", vfid
);
3415 return ECORE_SUCCESS
;
3418 void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn
*p_hwfn
,
3421 struct ecore_vf_info
*vf_info
;
3424 vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
3426 DP_NOTICE(p_hwfn
->p_dev
, true,
3427 "Can not set forced MAC, invalid vfid [%d]\n", vfid
);
3431 feature
= 1 << MAC_ADDR_FORCED
;
3432 OSAL_MEMCPY(vf_info
->bulletin
.p_virt
->mac
, mac
, ETH_ALEN
);
3434 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
3435 /* Forced MAC will disable MAC_ADDR */
3436 vf_info
->bulletin
.p_virt
->valid_bitmap
&=
3437 ~(1 << VFPF_BULLETIN_MAC_ADDR
);
3439 ecore_iov_configure_vport_forced(p_hwfn
, vf_info
, feature
);
3442 enum _ecore_status_t
ecore_iov_bulletin_set_mac(struct ecore_hwfn
*p_hwfn
,
3445 struct ecore_vf_info
*vf_info
;
3448 vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
3450 DP_NOTICE(p_hwfn
->p_dev
, true,
3451 "Can not set MAC, invalid vfid [%d]\n", vfid
);
3455 if (vf_info
->bulletin
.p_virt
->valid_bitmap
& (1 << MAC_ADDR_FORCED
)) {
3456 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3457 "Can not set MAC, Forced MAC is configured\n");
3461 feature
= 1 << VFPF_BULLETIN_MAC_ADDR
;
3462 OSAL_MEMCPY(vf_info
->bulletin
.p_virt
->mac
, mac
, ETH_ALEN
);
3464 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
3466 return ECORE_SUCCESS
;
3469 enum _ecore_status_t
3470 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn
*p_hwfn
,
3471 bool b_untagged_only
, int vfid
)
3473 struct ecore_vf_info
*vf_info
;
3476 vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
3478 DP_NOTICE(p_hwfn
->p_dev
, true,
3479 "Can not set forced MAC, invalid vfid [%d]\n", vfid
);
3483 /* Since this is configurable only during vport-start, don't take it
3484 * if we're past that point.
3486 if (vf_info
->state
== VF_ENABLED
) {
3487 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
3488 "Can't support untagged change for vfid[%d] -"
3489 " VF is already active\n",
3494 /* Set configuration; This will later be taken into account during the
3495 * VF initialization.
3497 feature
= (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT
) |
3498 (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED
);
3499 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
3501 vf_info
->bulletin
.p_virt
->default_only_untagged
= b_untagged_only
? 1
3504 return ECORE_SUCCESS
;
3507 void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn
*p_hwfn
, int vfid
,
3510 struct ecore_vf_info
*vf_info
;
3512 vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
3516 *opaque_fid
= vf_info
->opaque_fid
;
3519 void ecore_iov_get_vfs_vport_id(struct ecore_hwfn
*p_hwfn
, int vfid
,
3522 struct ecore_vf_info
*vf_info
;
3524 vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
3528 *p_vort_id
= vf_info
->vport_id
;
3531 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn
*p_hwfn
,
3534 struct ecore_vf_info
*vf_info
;
3537 vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
3539 DP_NOTICE(p_hwfn
->p_dev
, true,
3540 "Can not set forced MAC, invalid vfid [%d]\n",
3545 feature
= 1 << VLAN_ADDR_FORCED
;
3546 vf_info
->bulletin
.p_virt
->pvid
= pvid
;
3548 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
3550 vf_info
->bulletin
.p_virt
->valid_bitmap
&= ~feature
;
3552 ecore_iov_configure_vport_forced(p_hwfn
, vf_info
, feature
);
3555 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn
*p_hwfn
, int vfid
)
3557 struct ecore_vf_info
*p_vf_info
;
3559 p_vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
3563 return !!p_vf_info
->vport_instance
;
3566 bool ecore_iov_is_vf_stopped(struct ecore_hwfn
*p_hwfn
, int vfid
)
3568 struct ecore_vf_info
*p_vf_info
;
3570 p_vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
3574 return p_vf_info
->state
== VF_STOPPED
;
3577 bool ecore_iov_spoofchk_get(struct ecore_hwfn
*p_hwfn
, int vfid
)
3579 struct ecore_vf_info
*vf_info
;
3581 vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
3585 return vf_info
->spoof_chk
;
3588 enum _ecore_status_t
ecore_iov_spoofchk_set(struct ecore_hwfn
*p_hwfn
,
3591 struct ecore_vf_info
*vf
;
3592 enum _ecore_status_t rc
= ECORE_INVAL
;
3594 if (!ecore_iov_pf_sanity_check(p_hwfn
, vfid
)) {
3595 DP_NOTICE(p_hwfn
, true,
3596 "SR-IOV sanity check failed, can't set spoofchk\n");
3600 vf
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
3604 if (!ecore_iov_vf_has_vport_instance(p_hwfn
, vfid
)) {
3605 /* After VF VPORT start PF will configure spoof check */
3606 vf
->req_spoofchk_val
= val
;
3611 rc
= __ecore_iov_spoofchk_set(p_hwfn
, vf
, val
);
3617 u8
ecore_iov_vf_chains_per_pf(struct ecore_hwfn
*p_hwfn
)
3619 u8 max_chains_per_vf
= p_hwfn
->hw_info
.max_chains_per_vf
;
3621 max_chains_per_vf
= (max_chains_per_vf
) ? max_chains_per_vf
3622 : ECORE_MAX_VF_CHAINS_PER_PF
;
3624 return max_chains_per_vf
;
3627 void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn
*p_hwfn
,
3629 void **pp_req_virt_addr
,
3630 u16
*p_req_virt_size
)
3632 struct ecore_vf_info
*vf_info
=
3633 ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
3638 if (pp_req_virt_addr
)
3639 *pp_req_virt_addr
= vf_info
->vf_mbx
.req_virt
;
3641 if (p_req_virt_size
)
3642 *p_req_virt_size
= sizeof(*vf_info
->vf_mbx
.req_virt
);
3645 void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn
*p_hwfn
,
3647 void **pp_reply_virt_addr
,
3648 u16
*p_reply_virt_size
)
3650 struct ecore_vf_info
*vf_info
=
3651 ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
3656 if (pp_reply_virt_addr
)
3657 *pp_reply_virt_addr
= vf_info
->vf_mbx
.reply_virt
;
3659 if (p_reply_virt_size
)
3660 *p_reply_virt_size
= sizeof(*vf_info
->vf_mbx
.reply_virt
);
3663 #ifdef CONFIG_ECORE_SW_CHANNEL
3664 struct ecore_iov_sw_mbx
*ecore_iov_get_vf_sw_mbx(struct ecore_hwfn
*p_hwfn
,
3667 struct ecore_vf_info
*vf_info
=
3668 ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
3673 return &vf_info
->vf_mbx
.sw_mbx
;
3677 bool ecore_iov_is_valid_vfpf_msg_length(u32 length
)
3679 return (length
>= sizeof(struct vfpf_first_tlv
) &&
3680 (length
<= sizeof(union vfpf_tlvs
)));
3683 u32
ecore_iov_pfvf_msg_length(void)
3685 return sizeof(union pfvf_tlvs
);
3688 u8
*ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn
*p_hwfn
, u16 rel_vf_id
)
3690 struct ecore_vf_info
*p_vf
;
3692 p_vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
3693 if (!p_vf
|| !p_vf
->bulletin
.p_virt
)
3696 if (!(p_vf
->bulletin
.p_virt
->valid_bitmap
& (1 << MAC_ADDR_FORCED
)))
3699 return p_vf
->bulletin
.p_virt
->mac
;
3702 u16
ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn
*p_hwfn
,
3705 struct ecore_vf_info
*p_vf
;
3707 p_vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
3708 if (!p_vf
|| !p_vf
->bulletin
.p_virt
)
3711 if (!(p_vf
->bulletin
.p_virt
->valid_bitmap
& (1 << VLAN_ADDR_FORCED
)))
3714 return p_vf
->bulletin
.p_virt
->pvid
;
3717 enum _ecore_status_t
ecore_iov_configure_tx_rate(struct ecore_hwfn
*p_hwfn
,
3718 struct ecore_ptt
*p_ptt
,
3721 struct ecore_vf_info
*vf
;
3723 enum _ecore_status_t rc
;
3725 vf
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
3730 rc
= ecore_fw_vport(p_hwfn
, vf
->vport_id
, &abs_vp_id
);
3731 if (rc
!= ECORE_SUCCESS
)
3734 return ecore_init_vport_rl(p_hwfn
, p_ptt
, abs_vp_id
, (u32
)val
);
3737 enum _ecore_status_t
ecore_iov_configure_min_tx_rate(struct ecore_dev
*p_dev
,
3740 struct ecore_vf_info
*vf
;
3744 for_each_hwfn(p_dev
, i
) {
3745 struct ecore_hwfn
*p_hwfn
= &p_dev
->hwfns
[i
];
3747 if (!ecore_iov_pf_sanity_check(p_hwfn
, vfid
)) {
3748 DP_NOTICE(p_hwfn
, true,
3749 "SR-IOV sanity check failed,"
3750 " can't set min rate\n");
3755 vf
= ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev
), (u16
)vfid
, true);
3756 vport_id
= vf
->vport_id
;
3758 return ecore_configure_vport_wfq(p_dev
, vport_id
, rate
);
3761 enum _ecore_status_t
ecore_iov_get_vf_stats(struct ecore_hwfn
*p_hwfn
,
3762 struct ecore_ptt
*p_ptt
,
3764 struct ecore_eth_stats
*p_stats
)
3766 struct ecore_vf_info
*vf
;
3768 vf
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
3772 if (vf
->state
!= VF_ENABLED
)
3775 __ecore_get_vport_stats(p_hwfn
, p_ptt
, p_stats
,
3776 vf
->abs_vf_id
+ 0x10, false);
3778 return ECORE_SUCCESS
;
3781 u8
ecore_iov_get_vf_num_rxqs(struct ecore_hwfn
*p_hwfn
, u16 rel_vf_id
)
3783 struct ecore_vf_info
*p_vf
;
3785 p_vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
3789 return p_vf
->num_rxqs
;
3792 u8
ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn
*p_hwfn
, u16 rel_vf_id
)
3794 struct ecore_vf_info
*p_vf
;
3796 p_vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
3800 return p_vf
->num_active_rxqs
;
3803 void *ecore_iov_get_vf_ctx(struct ecore_hwfn
*p_hwfn
, u16 rel_vf_id
)
3805 struct ecore_vf_info
*p_vf
;
3807 p_vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
3814 u8
ecore_iov_get_vf_num_sbs(struct ecore_hwfn
*p_hwfn
, u16 rel_vf_id
)
3816 struct ecore_vf_info
*p_vf
;
3818 p_vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
3822 return p_vf
->num_sbs
;
3825 bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn
*p_hwfn
, u16 rel_vf_id
)
3827 struct ecore_vf_info
*p_vf
;
3829 p_vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
3833 return (p_vf
->state
== VF_FREE
);
3836 bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn
*p_hwfn
,
3839 struct ecore_vf_info
*p_vf
;
3841 p_vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
3845 return (p_vf
->state
== VF_ACQUIRED
);
3848 bool ecore_iov_is_vf_initialized(struct ecore_hwfn
*p_hwfn
, u16 rel_vf_id
)
3850 struct ecore_vf_info
*p_vf
;
3852 p_vf
= ecore_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
3856 return (p_vf
->state
== VF_ENABLED
);
3859 int ecore_iov_get_vf_min_rate(struct ecore_hwfn
*p_hwfn
, int vfid
)
3861 struct ecore_wfq_data
*vf_vp_wfq
;
3862 struct ecore_vf_info
*vf_info
;
3864 vf_info
= ecore_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
3868 vf_vp_wfq
= &p_hwfn
->qm_info
.wfq_data
[vf_info
->vport_id
];
3870 if (vf_vp_wfq
->configured
)
3871 return vf_vp_wfq
->min_speed
;