2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 #include "ecore_status.h"
12 #include "ecore_mcp.h"
13 #include "mcp_public.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_sriov.h"
19 #include "ecore_iov_api.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
22 #include "ecore_dcbx.h"
24 #define CHIP_MCP_RESP_ITER_US 10
25 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
27 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
28 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
30 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
31 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
34 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
35 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
37 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
38 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
39 OFFSETOF(struct public_drv_mb, _field), _val)
41 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
42 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
43 OFFSETOF(struct public_drv_mb, _field))
45 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
46 DRV_ID_PDA_COMP_VER_SHIFT)
48 #define MCP_BYTES_PER_MBIT_SHIFT 17
52 static int loaded_port
[MAX_NUM_PORTS
] = { 0 };
55 bool ecore_mcp_is_init(struct ecore_hwfn
*p_hwfn
)
57 if (!p_hwfn
->mcp_info
|| !p_hwfn
->mcp_info
->public_base
)
62 void ecore_mcp_cmd_port_init(struct ecore_hwfn
*p_hwfn
, struct ecore_ptt
*p_ptt
)
64 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
66 u32 mfw_mb_offsize
= ecore_rd(p_hwfn
, p_ptt
, addr
);
68 p_hwfn
->mcp_info
->port_addr
= SECTION_ADDR(mfw_mb_offsize
,
70 DP_VERBOSE(p_hwfn
, ECORE_MSG_SP
,
71 "port_addr = 0x%x, port_id 0x%02x\n",
72 p_hwfn
->mcp_info
->port_addr
, MFW_PORT(p_hwfn
));
75 void ecore_mcp_read_mb(struct ecore_hwfn
*p_hwfn
, struct ecore_ptt
*p_ptt
)
77 u32 length
= MFW_DRV_MSG_MAX_DWORDS(p_hwfn
->mcp_info
->mfw_mb_length
);
82 if (CHIP_REV_IS_TEDIBEAR(p_hwfn
->p_dev
))
86 if (!p_hwfn
->mcp_info
->public_base
)
89 for (i
= 0; i
< length
; i
++) {
90 tmp
= ecore_rd(p_hwfn
, p_ptt
,
91 p_hwfn
->mcp_info
->mfw_mb_addr
+
92 (i
<< 2) + sizeof(u32
));
94 ((u32
*)p_hwfn
->mcp_info
->mfw_mb_cur
)[i
] =
95 OSAL_BE32_TO_CPU(tmp
);
99 enum _ecore_status_t
ecore_mcp_free(struct ecore_hwfn
*p_hwfn
)
101 if (p_hwfn
->mcp_info
) {
102 OSAL_FREE(p_hwfn
->p_dev
, p_hwfn
->mcp_info
->mfw_mb_cur
);
103 OSAL_FREE(p_hwfn
->p_dev
, p_hwfn
->mcp_info
->mfw_mb_shadow
);
104 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn
->mcp_info
->lock
);
106 OSAL_FREE(p_hwfn
->p_dev
, p_hwfn
->mcp_info
);
107 p_hwfn
->mcp_info
= OSAL_NULL
;
109 return ECORE_SUCCESS
;
112 static enum _ecore_status_t
ecore_load_mcp_offsets(struct ecore_hwfn
*p_hwfn
,
113 struct ecore_ptt
*p_ptt
)
115 struct ecore_mcp_info
*p_info
= p_hwfn
->mcp_info
;
116 u32 drv_mb_offsize
, mfw_mb_offsize
;
117 u32 mcp_pf_id
= MCP_PF_ID(p_hwfn
);
120 if (CHIP_REV_IS_EMUL(p_hwfn
->p_dev
)) {
121 DP_NOTICE(p_hwfn
, false, "Emulation - assume no MFW\n");
122 p_info
->public_base
= 0;
127 p_info
->public_base
= ecore_rd(p_hwfn
, p_ptt
, MISC_REG_SHARED_MEM_ADDR
);
128 if (!p_info
->public_base
)
131 p_info
->public_base
|= GRCBASE_MCP
;
133 /* Calculate the driver and MFW mailbox address */
134 drv_mb_offsize
= ecore_rd(p_hwfn
, p_ptt
,
135 SECTION_OFFSIZE_ADDR(p_info
->public_base
,
137 p_info
->drv_mb_addr
= SECTION_ADDR(drv_mb_offsize
, mcp_pf_id
);
138 DP_VERBOSE(p_hwfn
, ECORE_MSG_SP
,
139 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
140 " mcp_pf_id = 0x%x\n",
141 drv_mb_offsize
, p_info
->drv_mb_addr
, mcp_pf_id
);
143 /* Set the MFW MB address */
144 mfw_mb_offsize
= ecore_rd(p_hwfn
, p_ptt
,
145 SECTION_OFFSIZE_ADDR(p_info
->public_base
,
147 p_info
->mfw_mb_addr
= SECTION_ADDR(mfw_mb_offsize
, mcp_pf_id
);
148 p_info
->mfw_mb_length
= (u16
)ecore_rd(p_hwfn
, p_ptt
,
149 p_info
->mfw_mb_addr
);
151 /* Get the current driver mailbox sequence before sending
154 p_info
->drv_mb_seq
= DRV_MB_RD(p_hwfn
, p_ptt
, drv_mb_header
) &
155 DRV_MSG_SEQ_NUMBER_MASK
;
157 /* Get current FW pulse sequence */
158 p_info
->drv_pulse_seq
= DRV_MB_RD(p_hwfn
, p_ptt
, drv_pulse_mb
) &
161 p_info
->mcp_hist
= (u16
)ecore_rd(p_hwfn
, p_ptt
,
162 MISCS_REG_GENERIC_POR_0
);
164 return ECORE_SUCCESS
;
167 enum _ecore_status_t
ecore_mcp_cmd_init(struct ecore_hwfn
*p_hwfn
,
168 struct ecore_ptt
*p_ptt
)
170 struct ecore_mcp_info
*p_info
;
173 /* Allocate mcp_info structure */
174 p_hwfn
->mcp_info
= OSAL_ZALLOC(p_hwfn
->p_dev
, GFP_KERNEL
,
175 sizeof(*p_hwfn
->mcp_info
));
176 if (!p_hwfn
->mcp_info
)
178 p_info
= p_hwfn
->mcp_info
;
180 if (ecore_load_mcp_offsets(p_hwfn
, p_ptt
) != ECORE_SUCCESS
) {
181 DP_NOTICE(p_hwfn
, false, "MCP is not initialized\n");
182 /* Do not free mcp_info here, since public_base indicate that
183 * the MCP is not initialized
185 return ECORE_SUCCESS
;
188 size
= MFW_DRV_MSG_MAX_DWORDS(p_info
->mfw_mb_length
) * sizeof(u32
);
189 p_info
->mfw_mb_cur
= OSAL_ZALLOC(p_hwfn
->p_dev
, GFP_KERNEL
, size
);
190 p_info
->mfw_mb_shadow
= OSAL_ZALLOC(p_hwfn
->p_dev
, GFP_KERNEL
, size
);
191 if (!p_info
->mfw_mb_shadow
|| !p_info
->mfw_mb_addr
)
194 /* Initialize the MFW spinlock */
195 OSAL_SPIN_LOCK_ALLOC(p_hwfn
, &p_info
->lock
);
196 OSAL_SPIN_LOCK_INIT(&p_info
->lock
);
198 return ECORE_SUCCESS
;
201 DP_NOTICE(p_hwfn
, true, "Failed to allocate mcp memory\n");
202 ecore_mcp_free(p_hwfn
);
206 /* Locks the MFW mailbox of a PF to ensure a single access.
207 * The lock is achieved in most cases by holding a spinlock, causing other
208 * threads to wait till a previous access is done.
209 * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
210 * access is achieved by setting a blocking flag, which will fail other
211 * competing contexts to send their mailboxes.
213 static enum _ecore_status_t
ecore_mcp_mb_lock(struct ecore_hwfn
*p_hwfn
,
216 OSAL_SPIN_LOCK(&p_hwfn
->mcp_info
->lock
);
218 /* The spinlock shouldn't be acquired when the mailbox command is
219 * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
220 * pending [UN]LOAD_REQ command of another PF together with a spinlock
221 * (i.e. interrupts are disabled) - can lead to a deadlock.
222 * It is assumed that for a single PF, no other mailbox commands can be
223 * sent from another context while sending LOAD_REQ, and that any
224 * parallel commands to UNLOAD_REQ can be cancelled.
226 if (cmd
== DRV_MSG_CODE_LOAD_DONE
|| cmd
== DRV_MSG_CODE_UNLOAD_DONE
)
227 p_hwfn
->mcp_info
->block_mb_sending
= false;
229 if (p_hwfn
->mcp_info
->block_mb_sending
) {
230 DP_NOTICE(p_hwfn
, false,
231 "Trying to send a MFW mailbox command [0x%x]"
232 " in parallel to [UN]LOAD_REQ. Aborting.\n",
234 OSAL_SPIN_UNLOCK(&p_hwfn
->mcp_info
->lock
);
238 if (cmd
== DRV_MSG_CODE_LOAD_REQ
|| cmd
== DRV_MSG_CODE_UNLOAD_REQ
) {
239 p_hwfn
->mcp_info
->block_mb_sending
= true;
240 OSAL_SPIN_UNLOCK(&p_hwfn
->mcp_info
->lock
);
243 return ECORE_SUCCESS
;
246 static void ecore_mcp_mb_unlock(struct ecore_hwfn
*p_hwfn
, u32 cmd
)
248 if (cmd
!= DRV_MSG_CODE_LOAD_REQ
&& cmd
!= DRV_MSG_CODE_UNLOAD_REQ
)
249 OSAL_SPIN_UNLOCK(&p_hwfn
->mcp_info
->lock
);
252 enum _ecore_status_t
ecore_mcp_reset(struct ecore_hwfn
*p_hwfn
,
253 struct ecore_ptt
*p_ptt
)
255 u32 seq
= ++p_hwfn
->mcp_info
->drv_mb_seq
;
256 u32 delay
= CHIP_MCP_RESP_ITER_US
;
257 u32 org_mcp_reset_seq
, cnt
= 0;
258 enum _ecore_status_t rc
= ECORE_SUCCESS
;
261 if (CHIP_REV_IS_EMUL(p_hwfn
->p_dev
))
262 delay
= EMUL_MCP_RESP_ITER_US
;
265 /* Ensure that only a single thread is accessing the mailbox at a
268 rc
= ecore_mcp_mb_lock(p_hwfn
, DRV_MSG_CODE_MCP_RESET
);
269 if (rc
!= ECORE_SUCCESS
)
272 /* Set drv command along with the updated sequence */
273 org_mcp_reset_seq
= ecore_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
);
274 DRV_MB_WR(p_hwfn
, p_ptt
, drv_mb_header
, (DRV_MSG_CODE_MCP_RESET
| seq
));
277 /* Wait for MFW response */
279 /* Give the FW up to 500 second (50*1000*10usec) */
280 } while ((org_mcp_reset_seq
== ecore_rd(p_hwfn
, p_ptt
,
281 MISCS_REG_GENERIC_POR_0
)) &&
282 (cnt
++ < ECORE_MCP_RESET_RETRIES
));
284 if (org_mcp_reset_seq
!=
285 ecore_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
)) {
286 DP_VERBOSE(p_hwfn
, ECORE_MSG_SP
,
287 "MCP was reset after %d usec\n", cnt
* delay
);
289 DP_ERR(p_hwfn
, "Failed to reset MCP\n");
293 ecore_mcp_mb_unlock(p_hwfn
, DRV_MSG_CODE_MCP_RESET
);
298 static enum _ecore_status_t
ecore_do_mcp_cmd(struct ecore_hwfn
*p_hwfn
,
299 struct ecore_ptt
*p_ptt
,
304 u32 delay
= CHIP_MCP_RESP_ITER_US
;
305 u32 max_retries
= ECORE_DRV_MB_MAX_RETRIES
;
306 u32 seq
, cnt
= 1, actual_mb_seq
;
307 enum _ecore_status_t rc
= ECORE_SUCCESS
;
310 if (CHIP_REV_IS_EMUL(p_hwfn
->p_dev
))
311 delay
= EMUL_MCP_RESP_ITER_US
;
312 /* There is a built-in delay of 100usec in each MFW response read */
313 if (CHIP_REV_IS_FPGA(p_hwfn
->p_dev
))
317 /* Get actual driver mailbox sequence */
318 actual_mb_seq
= DRV_MB_RD(p_hwfn
, p_ptt
, drv_mb_header
) &
319 DRV_MSG_SEQ_NUMBER_MASK
;
321 /* Use MCP history register to check if MCP reset occurred between
324 if (p_hwfn
->mcp_info
->mcp_hist
!=
325 ecore_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
)) {
326 DP_VERBOSE(p_hwfn
, ECORE_MSG_SP
, "Rereading MCP offsets\n");
327 ecore_load_mcp_offsets(p_hwfn
, p_ptt
);
328 ecore_mcp_cmd_port_init(p_hwfn
, p_ptt
);
330 seq
= ++p_hwfn
->mcp_info
->drv_mb_seq
;
333 DRV_MB_WR(p_hwfn
, p_ptt
, drv_mb_param
, param
);
335 /* Set drv command along with the updated sequence */
336 DRV_MB_WR(p_hwfn
, p_ptt
, drv_mb_header
, (cmd
| seq
));
339 /* Wait for MFW response */
341 *o_mcp_resp
= DRV_MB_RD(p_hwfn
, p_ptt
, fw_mb_header
);
343 /* Give the FW up to 5 second (500*10ms) */
344 } while ((seq
!= (*o_mcp_resp
& FW_MSG_SEQ_NUMBER_MASK
)) &&
345 (cnt
++ < max_retries
));
347 /* Is this a reply to our command? */
348 if (seq
== (*o_mcp_resp
& FW_MSG_SEQ_NUMBER_MASK
)) {
349 *o_mcp_resp
&= FW_MSG_CODE_MASK
;
350 /* Get the MCP param */
351 *o_mcp_param
= DRV_MB_RD(p_hwfn
, p_ptt
, fw_mb_param
);
354 DP_ERR(p_hwfn
, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
358 ecore_hw_err_notify(p_hwfn
, ECORE_HW_ERR_MFW_RESP_FAIL
);
363 static enum _ecore_status_t
364 ecore_mcp_cmd_and_union(struct ecore_hwfn
*p_hwfn
,
365 struct ecore_ptt
*p_ptt
,
366 struct ecore_mcp_mb_params
*p_mb_params
)
369 enum _ecore_status_t rc
;
371 /* MCP not initialized */
372 if (!ecore_mcp_is_init(p_hwfn
)) {
373 DP_NOTICE(p_hwfn
, true, "MFW is not initialized !\n");
377 union_data_addr
= p_hwfn
->mcp_info
->drv_mb_addr
+
378 OFFSETOF(struct public_drv_mb
, union_data
);
380 /* Ensure that only a single thread is accessing the mailbox at a
383 rc
= ecore_mcp_mb_lock(p_hwfn
, p_mb_params
->cmd
);
384 if (rc
!= ECORE_SUCCESS
)
387 if (p_mb_params
->p_data_src
!= OSAL_NULL
)
388 ecore_memcpy_to(p_hwfn
, p_ptt
, union_data_addr
,
389 p_mb_params
->p_data_src
,
390 sizeof(*p_mb_params
->p_data_src
));
392 rc
= ecore_do_mcp_cmd(p_hwfn
, p_ptt
, p_mb_params
->cmd
,
393 p_mb_params
->param
, &p_mb_params
->mcp_resp
,
394 &p_mb_params
->mcp_param
);
396 if (p_mb_params
->p_data_dst
!= OSAL_NULL
)
397 ecore_memcpy_from(p_hwfn
, p_ptt
, p_mb_params
->p_data_dst
,
399 sizeof(*p_mb_params
->p_data_dst
));
401 ecore_mcp_mb_unlock(p_hwfn
, p_mb_params
->cmd
);
406 enum _ecore_status_t
ecore_mcp_cmd(struct ecore_hwfn
*p_hwfn
,
407 struct ecore_ptt
*p_ptt
, u32 cmd
, u32 param
,
408 u32
*o_mcp_resp
, u32
*o_mcp_param
)
410 struct ecore_mcp_mb_params mb_params
;
411 enum _ecore_status_t rc
;
414 if (CHIP_REV_IS_EMUL(p_hwfn
->p_dev
)) {
415 if (cmd
== DRV_MSG_CODE_UNLOAD_REQ
) {
417 loaded_port
[p_hwfn
->port_id
]--;
418 DP_VERBOSE(p_hwfn
, ECORE_MSG_SP
, "Unload cnt: 0x%x\n",
421 return ECORE_SUCCESS
;
425 OSAL_MEM_ZERO(&mb_params
, sizeof(mb_params
));
427 mb_params
.param
= param
;
428 rc
= ecore_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
429 if (rc
!= ECORE_SUCCESS
)
432 *o_mcp_resp
= mb_params
.mcp_resp
;
433 *o_mcp_param
= mb_params
.mcp_param
;
435 return ECORE_SUCCESS
;
438 enum _ecore_status_t
ecore_mcp_nvm_wr_cmd(struct ecore_hwfn
*p_hwfn
,
439 struct ecore_ptt
*p_ptt
,
444 u32 i_txn_size
, u32
*i_buf
)
446 struct ecore_mcp_mb_params mb_params
;
447 union drv_union_data union_data
;
448 enum _ecore_status_t rc
;
450 OSAL_MEM_ZERO(&mb_params
, sizeof(mb_params
));
452 mb_params
.param
= param
;
453 OSAL_MEMCPY((u32
*)&union_data
.raw_data
, i_buf
, i_txn_size
);
454 mb_params
.p_data_src
= &union_data
;
455 rc
= ecore_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
456 if (rc
!= ECORE_SUCCESS
)
459 *o_mcp_resp
= mb_params
.mcp_resp
;
460 *o_mcp_param
= mb_params
.mcp_param
;
462 return ECORE_SUCCESS
;
465 enum _ecore_status_t
ecore_mcp_nvm_rd_cmd(struct ecore_hwfn
*p_hwfn
,
466 struct ecore_ptt
*p_ptt
,
471 u32
*o_txn_size
, u32
*o_buf
)
473 struct ecore_mcp_mb_params mb_params
;
474 union drv_union_data union_data
;
475 enum _ecore_status_t rc
;
477 OSAL_MEM_ZERO(&mb_params
, sizeof(mb_params
));
479 mb_params
.param
= param
;
480 mb_params
.p_data_dst
= &union_data
;
481 rc
= ecore_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
482 if (rc
!= ECORE_SUCCESS
)
485 *o_mcp_resp
= mb_params
.mcp_resp
;
486 *o_mcp_param
= mb_params
.mcp_param
;
488 *o_txn_size
= *o_mcp_param
;
489 OSAL_MEMCPY(o_buf
, (u32
*)&union_data
.raw_data
, *o_txn_size
);
491 return ECORE_SUCCESS
;
495 static void ecore_mcp_mf_workaround(struct ecore_hwfn
*p_hwfn
,
498 static int load_phase
= FW_MSG_CODE_DRV_LOAD_ENGINE
;
501 load_phase
= FW_MSG_CODE_DRV_LOAD_ENGINE
;
502 else if (!loaded_port
[p_hwfn
->port_id
])
503 load_phase
= FW_MSG_CODE_DRV_LOAD_PORT
;
505 load_phase
= FW_MSG_CODE_DRV_LOAD_FUNCTION
;
507 /* On CMT, always tell that it's engine */
508 if (p_hwfn
->p_dev
->num_hwfns
> 1)
509 load_phase
= FW_MSG_CODE_DRV_LOAD_ENGINE
;
511 *p_load_code
= load_phase
;
513 loaded_port
[p_hwfn
->port_id
]++;
515 DP_VERBOSE(p_hwfn
, ECORE_MSG_SP
,
516 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
517 *p_load_code
, loaded
, p_hwfn
->port_id
,
518 loaded_port
[p_hwfn
->port_id
]);
522 enum _ecore_status_t
ecore_mcp_load_req(struct ecore_hwfn
*p_hwfn
,
523 struct ecore_ptt
*p_ptt
,
526 struct ecore_dev
*p_dev
= p_hwfn
->p_dev
;
527 struct ecore_mcp_mb_params mb_params
;
528 union drv_union_data union_data
;
529 enum _ecore_status_t rc
;
532 if (CHIP_REV_IS_EMUL(p_hwfn
->p_dev
)) {
533 ecore_mcp_mf_workaround(p_hwfn
, p_load_code
);
534 return ECORE_SUCCESS
;
538 OSAL_MEM_ZERO(&mb_params
, sizeof(mb_params
));
539 mb_params
.cmd
= DRV_MSG_CODE_LOAD_REQ
;
540 mb_params
.param
= PDA_COMP
| DRV_ID_MCP_HSI_VER_CURRENT
|
542 OSAL_MEMCPY(&union_data
.ver_str
, p_dev
->ver_str
, MCP_DRV_VER_STR_SIZE
);
543 mb_params
.p_data_src
= &union_data
;
544 rc
= ecore_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
546 /* if mcp fails to respond we must abort */
547 if (rc
!= ECORE_SUCCESS
) {
548 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
552 *p_load_code
= mb_params
.mcp_resp
;
554 /* If MFW refused (e.g. other port is in diagnostic mode) we
555 * must abort. This can happen in the following cases:
556 * - Other port is in diagnostic mode
557 * - Previously loaded function on the engine is not compliant with
559 * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
562 if (!(*p_load_code
) ||
563 ((*p_load_code
) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI
) ||
564 ((*p_load_code
) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA
) ||
565 ((*p_load_code
) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG
)) {
566 DP_ERR(p_hwfn
, "MCP refused load request, aborting\n");
570 return ECORE_SUCCESS
;
573 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn
*p_hwfn
,
574 struct ecore_ptt
*p_ptt
)
576 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
578 u32 mfw_path_offsize
= ecore_rd(p_hwfn
, p_ptt
, addr
);
579 u32 path_addr
= SECTION_ADDR(mfw_path_offsize
,
580 ECORE_PATH_ID(p_hwfn
));
581 u32 disabled_vfs
[VF_MAX_STATIC
/ 32];
584 DP_VERBOSE(p_hwfn
, ECORE_MSG_SP
,
585 "Reading Disabled VF information from [offset %08x],"
587 mfw_path_offsize
, path_addr
);
589 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++) {
590 disabled_vfs
[i
] = ecore_rd(p_hwfn
, p_ptt
,
592 OFFSETOF(struct public_path
,
595 DP_VERBOSE(p_hwfn
, (ECORE_MSG_SP
| ECORE_MSG_IOV
),
596 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
597 i
* 32, (i
+ 1) * 32 - 1, disabled_vfs
[i
]);
600 if (ecore_iov_mark_vf_flr(p_hwfn
, disabled_vfs
))
601 OSAL_VF_FLR_UPDATE(p_hwfn
);
604 enum _ecore_status_t
ecore_mcp_ack_vf_flr(struct ecore_hwfn
*p_hwfn
,
605 struct ecore_ptt
*p_ptt
,
608 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
610 u32 mfw_func_offsize
= ecore_rd(p_hwfn
, p_ptt
, addr
);
611 u32 func_addr
= SECTION_ADDR(mfw_func_offsize
,
613 struct ecore_mcp_mb_params mb_params
;
614 union drv_union_data union_data
;
615 enum _ecore_status_t rc
;
618 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++)
619 DP_VERBOSE(p_hwfn
, (ECORE_MSG_SP
| ECORE_MSG_IOV
),
620 "Acking VFs [%08x,...,%08x] - %08x\n",
621 i
* 32, (i
+ 1) * 32 - 1, vfs_to_ack
[i
]);
623 OSAL_MEM_ZERO(&mb_params
, sizeof(mb_params
));
624 mb_params
.cmd
= DRV_MSG_CODE_VF_DISABLED_DONE
;
625 OSAL_MEMCPY(&union_data
.ack_vf_disabled
, vfs_to_ack
, VF_MAX_STATIC
/ 8);
626 mb_params
.p_data_src
= &union_data
;
627 rc
= ecore_mcp_cmd_and_union(p_hwfn
, p_ptt
,
629 if (rc
!= ECORE_SUCCESS
) {
630 DP_NOTICE(p_hwfn
, false,
631 "Failed to pass ACK for VF flr to MFW\n");
632 return ECORE_TIMEOUT
;
635 /* TMP - clear the ACK bits; should be done by MFW */
636 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++)
637 ecore_wr(p_hwfn
, p_ptt
,
639 OFFSETOF(struct public_func
, drv_ack_vf_disabled
) +
645 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn
*p_hwfn
,
646 struct ecore_ptt
*p_ptt
)
648 u32 transceiver_state
;
650 transceiver_state
= ecore_rd(p_hwfn
, p_ptt
,
651 p_hwfn
->mcp_info
->port_addr
+
652 OFFSETOF(struct public_port
,
655 DP_VERBOSE(p_hwfn
, (ECORE_MSG_HW
| ECORE_MSG_SP
),
656 "Received transceiver state update [0x%08x] from mfw"
658 transceiver_state
, (u32
)(p_hwfn
->mcp_info
->port_addr
+
659 OFFSETOF(struct public_port
,
662 transceiver_state
= GET_FIELD(transceiver_state
, ETH_TRANSCEIVER_STATE
);
664 if (transceiver_state
== ETH_TRANSCEIVER_STATE_PRESENT
)
665 DP_NOTICE(p_hwfn
, false, "Transceiver is present.\n");
667 DP_NOTICE(p_hwfn
, false, "Transceiver is unplugged.\n");
670 static void ecore_mcp_handle_link_change(struct ecore_hwfn
*p_hwfn
,
671 struct ecore_ptt
*p_ptt
,
674 struct ecore_mcp_link_state
*p_link
;
678 p_link
= &p_hwfn
->mcp_info
->link_output
;
679 OSAL_MEMSET(p_link
, 0, sizeof(*p_link
));
681 status
= ecore_rd(p_hwfn
, p_ptt
,
682 p_hwfn
->mcp_info
->port_addr
+
683 OFFSETOF(struct public_port
, link_status
));
684 DP_VERBOSE(p_hwfn
, (ECORE_MSG_LINK
| ECORE_MSG_SP
),
685 "Received link update [0x%08x] from mfw"
687 status
, (u32
)(p_hwfn
->mcp_info
->port_addr
+
688 OFFSETOF(struct public_port
,
691 DP_VERBOSE(p_hwfn
, ECORE_MSG_LINK
,
692 "Resetting link indications\n");
696 if (p_hwfn
->b_drv_link_init
)
697 p_link
->link_up
= !!(status
& LINK_STATUS_LINK_UP
);
699 p_link
->link_up
= false;
701 p_link
->full_duplex
= true;
702 switch ((status
& LINK_STATUS_SPEED_AND_DUPLEX_MASK
)) {
703 case LINK_STATUS_SPEED_AND_DUPLEX_100G
:
704 p_link
->speed
= 100000;
706 case LINK_STATUS_SPEED_AND_DUPLEX_50G
:
707 p_link
->speed
= 50000;
709 case LINK_STATUS_SPEED_AND_DUPLEX_40G
:
710 p_link
->speed
= 40000;
712 case LINK_STATUS_SPEED_AND_DUPLEX_25G
:
713 p_link
->speed
= 25000;
715 case LINK_STATUS_SPEED_AND_DUPLEX_20G
:
716 p_link
->speed
= 20000;
718 case LINK_STATUS_SPEED_AND_DUPLEX_10G
:
719 p_link
->speed
= 10000;
721 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD
:
722 p_link
->full_duplex
= false;
724 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
:
725 p_link
->speed
= 1000;
731 /* We never store total line speed as p_link->speed is
732 * again changes according to bandwidth allocation.
734 if (p_link
->link_up
&& p_link
->speed
)
735 p_link
->line_speed
= p_link
->speed
;
737 p_link
->line_speed
= 0;
739 max_bw
= p_hwfn
->mcp_info
->func_info
.bandwidth_max
;
740 min_bw
= p_hwfn
->mcp_info
->func_info
.bandwidth_min
;
742 /* Max bandwidth configuration */
743 __ecore_configure_pf_max_bandwidth(p_hwfn
, p_ptt
,
746 /* Mintz bandwidth configuration */
747 __ecore_configure_pf_min_bandwidth(p_hwfn
, p_ptt
,
749 ecore_configure_vp_wfq_on_link_change(p_hwfn
->p_dev
,
750 p_link
->min_pf_rate
);
752 p_link
->an
= !!(status
& LINK_STATUS_AUTO_NEGOTIATE_ENABLED
);
753 p_link
->an_complete
= !!(status
& LINK_STATUS_AUTO_NEGOTIATE_COMPLETE
);
754 p_link
->parallel_detection
= !!(status
&
755 LINK_STATUS_PARALLEL_DETECTION_USED
);
756 p_link
->pfc_enabled
= !!(status
& LINK_STATUS_PFC_ENABLED
);
758 p_link
->partner_adv_speed
|=
759 (status
& LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE
) ?
760 ECORE_LINK_PARTNER_SPEED_1G_FD
: 0;
761 p_link
->partner_adv_speed
|=
762 (status
& LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE
) ?
763 ECORE_LINK_PARTNER_SPEED_1G_HD
: 0;
764 p_link
->partner_adv_speed
|=
765 (status
& LINK_STATUS_LINK_PARTNER_10G_CAPABLE
) ?
766 ECORE_LINK_PARTNER_SPEED_10G
: 0;
767 p_link
->partner_adv_speed
|=
768 (status
& LINK_STATUS_LINK_PARTNER_20G_CAPABLE
) ?
769 ECORE_LINK_PARTNER_SPEED_20G
: 0;
770 p_link
->partner_adv_speed
|=
771 (status
& LINK_STATUS_LINK_PARTNER_25G_CAPABLE
) ?
772 ECORE_LINK_PARTNER_SPEED_25G
: 0;
773 p_link
->partner_adv_speed
|=
774 (status
& LINK_STATUS_LINK_PARTNER_40G_CAPABLE
) ?
775 ECORE_LINK_PARTNER_SPEED_40G
: 0;
776 p_link
->partner_adv_speed
|=
777 (status
& LINK_STATUS_LINK_PARTNER_50G_CAPABLE
) ?
778 ECORE_LINK_PARTNER_SPEED_50G
: 0;
779 p_link
->partner_adv_speed
|=
780 (status
& LINK_STATUS_LINK_PARTNER_100G_CAPABLE
) ?
781 ECORE_LINK_PARTNER_SPEED_100G
: 0;
783 p_link
->partner_tx_flow_ctrl_en
=
784 !!(status
& LINK_STATUS_TX_FLOW_CONTROL_ENABLED
);
785 p_link
->partner_rx_flow_ctrl_en
=
786 !!(status
& LINK_STATUS_RX_FLOW_CONTROL_ENABLED
);
788 switch (status
& LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK
) {
789 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE
:
790 p_link
->partner_adv_pause
= ECORE_LINK_PARTNER_SYMMETRIC_PAUSE
;
792 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE
:
793 p_link
->partner_adv_pause
= ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE
;
795 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE
:
796 p_link
->partner_adv_pause
= ECORE_LINK_PARTNER_BOTH_PAUSE
;
799 p_link
->partner_adv_pause
= 0;
802 p_link
->sfp_tx_fault
= !!(status
& LINK_STATUS_SFP_TX_FAULT
);
805 ecore_dcbx_eagle_workaround(p_hwfn
, p_ptt
, p_link
->pfc_enabled
);
807 OSAL_LINK_UPDATE(p_hwfn
);
810 enum _ecore_status_t
ecore_mcp_set_link(struct ecore_hwfn
*p_hwfn
,
811 struct ecore_ptt
*p_ptt
, bool b_up
)
813 struct ecore_mcp_link_params
*params
= &p_hwfn
->mcp_info
->link_input
;
814 struct ecore_mcp_mb_params mb_params
;
815 union drv_union_data union_data
;
816 struct eth_phy_cfg
*p_phy_cfg
;
817 enum _ecore_status_t rc
= ECORE_SUCCESS
;
821 if (CHIP_REV_IS_EMUL(p_hwfn
->p_dev
))
822 return ECORE_SUCCESS
;
825 /* Set the shmem configuration according to params */
826 p_phy_cfg
= &union_data
.drv_phy_cfg
;
827 OSAL_MEMSET(p_phy_cfg
, 0, sizeof(*p_phy_cfg
));
828 cmd
= b_up
? DRV_MSG_CODE_INIT_PHY
: DRV_MSG_CODE_LINK_RESET
;
829 if (!params
->speed
.autoneg
)
830 p_phy_cfg
->speed
= params
->speed
.forced_speed
;
831 p_phy_cfg
->pause
|= (params
->pause
.autoneg
) ? ETH_PAUSE_AUTONEG
: 0;
832 p_phy_cfg
->pause
|= (params
->pause
.forced_rx
) ? ETH_PAUSE_RX
: 0;
833 p_phy_cfg
->pause
|= (params
->pause
.forced_tx
) ? ETH_PAUSE_TX
: 0;
834 p_phy_cfg
->adv_speed
= params
->speed
.advertised_speeds
;
835 p_phy_cfg
->loopback_mode
= params
->loopback_mode
;
836 p_hwfn
->b_drv_link_init
= b_up
;
839 DP_VERBOSE(p_hwfn
, ECORE_MSG_LINK
,
840 "Configuring Link: Speed 0x%08x, Pause 0x%08x,"
841 " adv_speed 0x%08x, loopback 0x%08x,"
842 " features 0x%08x\n",
843 p_phy_cfg
->speed
, p_phy_cfg
->pause
,
844 p_phy_cfg
->adv_speed
, p_phy_cfg
->loopback_mode
,
845 p_phy_cfg
->feature_config_flags
);
847 DP_VERBOSE(p_hwfn
, ECORE_MSG_LINK
, "Resetting link\n");
849 OSAL_MEM_ZERO(&mb_params
, sizeof(mb_params
));
851 mb_params
.p_data_src
= &union_data
;
852 rc
= ecore_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
854 /* if mcp fails to respond we must abort */
855 if (rc
!= ECORE_SUCCESS
) {
856 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
860 /* Reset the link status if needed */
862 ecore_mcp_handle_link_change(p_hwfn
, p_ptt
, true);
867 u32
ecore_get_process_kill_counter(struct ecore_hwfn
*p_hwfn
,
868 struct ecore_ptt
*p_ptt
)
870 u32 path_offsize_addr
, path_offsize
, path_addr
, proc_kill_cnt
;
872 /* TODO - Add support for VFs */
873 if (IS_VF(p_hwfn
->p_dev
))
876 path_offsize_addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
878 path_offsize
= ecore_rd(p_hwfn
, p_ptt
, path_offsize_addr
);
879 path_addr
= SECTION_ADDR(path_offsize
, ECORE_PATH_ID(p_hwfn
));
881 proc_kill_cnt
= ecore_rd(p_hwfn
, p_ptt
,
883 OFFSETOF(struct public_path
, process_kill
)) &
884 PROCESS_KILL_COUNTER_MASK
;
886 return proc_kill_cnt
;
889 static void ecore_mcp_handle_process_kill(struct ecore_hwfn
*p_hwfn
,
890 struct ecore_ptt
*p_ptt
)
892 struct ecore_dev
*p_dev
= p_hwfn
->p_dev
;
895 /* Prevent possible attentions/interrupts during the recovery handling
896 * and till its load phase, during which they will be re-enabled.
898 ecore_int_igu_disable_int(p_hwfn
, p_ptt
);
900 DP_NOTICE(p_hwfn
, false, "Received a process kill indication\n");
902 /* The following operations should be done once, and thus in CMT mode
903 * are carried out by only the first HW function.
905 if (p_hwfn
!= ECORE_LEADING_HWFN(p_dev
))
908 if (p_dev
->recov_in_prog
) {
909 DP_NOTICE(p_hwfn
, false,
910 "Ignoring the indication since a recovery"
911 " process is already in progress\n");
915 p_dev
->recov_in_prog
= true;
917 proc_kill_cnt
= ecore_get_process_kill_counter(p_hwfn
, p_ptt
);
918 DP_NOTICE(p_hwfn
, false, "Process kill counter: %d\n", proc_kill_cnt
);
920 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn
);
923 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn
*p_hwfn
,
924 struct ecore_ptt
*p_ptt
,
925 enum MFW_DRV_MSG_TYPE type
)
927 enum ecore_mcp_protocol_type stats_type
;
928 union ecore_mcp_protocol_stats stats
;
929 struct ecore_mcp_mb_params mb_params
;
930 union drv_union_data union_data
;
934 case MFW_DRV_MSG_GET_LAN_STATS
:
935 stats_type
= ECORE_MCP_LAN_STATS
;
936 hsi_param
= DRV_MSG_CODE_STATS_TYPE_LAN
;
939 DP_NOTICE(p_hwfn
, false, "Invalid protocol type %d\n", type
);
943 OSAL_GET_PROTOCOL_STATS(p_hwfn
->p_dev
, stats_type
, &stats
);
945 OSAL_MEM_ZERO(&mb_params
, sizeof(mb_params
));
946 mb_params
.cmd
= DRV_MSG_CODE_GET_STATS
;
947 mb_params
.param
= hsi_param
;
948 OSAL_MEMCPY(&union_data
, &stats
, sizeof(stats
));
949 mb_params
.p_data_src
= &union_data
;
950 ecore_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
954 ecore_read_pf_bandwidth(struct ecore_hwfn
*p_hwfn
,
955 struct public_func
*p_shmem_info
)
957 struct ecore_mcp_function_info
*p_info
;
959 p_info
= &p_hwfn
->mcp_info
->func_info
;
961 /* TODO - bandwidth min/max should have valid values of 1-100,
962 * as well as some indication that the feature is disabled.
963 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
964 * limit and correct value to min `1' and max `100' if limit isn't in
967 p_info
->bandwidth_min
= (p_shmem_info
->config
&
968 FUNC_MF_CFG_MIN_BW_MASK
) >>
969 FUNC_MF_CFG_MIN_BW_SHIFT
;
970 if (p_info
->bandwidth_min
< 1 || p_info
->bandwidth_min
> 100) {
972 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
973 p_info
->bandwidth_min
);
974 p_info
->bandwidth_min
= 1;
977 p_info
->bandwidth_max
= (p_shmem_info
->config
&
978 FUNC_MF_CFG_MAX_BW_MASK
) >>
979 FUNC_MF_CFG_MAX_BW_SHIFT
;
980 if (p_info
->bandwidth_max
< 1 || p_info
->bandwidth_max
> 100) {
982 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
983 p_info
->bandwidth_max
);
984 p_info
->bandwidth_max
= 100;
988 static u32
ecore_mcp_get_shmem_func(struct ecore_hwfn
*p_hwfn
,
989 struct ecore_ptt
*p_ptt
,
990 struct public_func
*p_data
,
993 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
995 u32 mfw_path_offsize
= ecore_rd(p_hwfn
, p_ptt
, addr
);
996 u32 func_addr
= SECTION_ADDR(mfw_path_offsize
, pfid
);
999 OSAL_MEM_ZERO(p_data
, sizeof(*p_data
));
1001 size
= OSAL_MIN_T(u32
, sizeof(*p_data
),
1002 SECTION_SIZE(mfw_path_offsize
));
1003 for (i
= 0; i
< size
/ sizeof(u32
); i
++)
1004 ((u32
*)p_data
)[i
] = ecore_rd(p_hwfn
, p_ptt
,
1005 func_addr
+ (i
<< 2));
1011 ecore_mcp_update_bw(struct ecore_hwfn
*p_hwfn
, struct ecore_ptt
*p_ptt
)
1013 struct ecore_mcp_function_info
*p_info
;
1014 struct public_func shmem_info
;
1015 u32 resp
= 0, param
= 0;
1017 ecore_mcp_get_shmem_func(p_hwfn
, p_ptt
, &shmem_info
, MCP_PF_ID(p_hwfn
));
1019 ecore_read_pf_bandwidth(p_hwfn
, &shmem_info
);
1021 p_info
= &p_hwfn
->mcp_info
->func_info
;
1023 ecore_configure_pf_min_bandwidth(p_hwfn
->p_dev
, p_info
->bandwidth_min
);
1025 ecore_configure_pf_max_bandwidth(p_hwfn
->p_dev
, p_info
->bandwidth_max
);
1027 /* Acknowledge the MFW */
1028 ecore_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_BW_UPDATE_ACK
, 0, &resp
,
1032 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn
*p_hwfn
,
1033 struct ecore_ptt
*p_ptt
)
1035 /* A single notification should be sent to upper driver in CMT mode */
1036 if (p_hwfn
!= ECORE_LEADING_HWFN(p_hwfn
->p_dev
))
1039 DP_NOTICE(p_hwfn
, false,
1040 "Fan failure was detected on the network interface card"
1041 " and it's going to be shut down.\n");
1043 ecore_hw_err_notify(p_hwfn
, ECORE_HW_ERR_FAN_FAIL
);
1046 static enum _ecore_status_t
1047 ecore_mcp_mdump_cmd(struct ecore_hwfn
*p_hwfn
, struct ecore_ptt
*p_ptt
,
1048 u32 mdump_cmd
, union drv_union_data
*p_data_src
,
1049 union drv_union_data
*p_data_dst
, u32
*p_mcp_resp
)
1051 struct ecore_mcp_mb_params mb_params
;
1052 enum _ecore_status_t rc
;
1054 OSAL_MEM_ZERO(&mb_params
, sizeof(mb_params
));
1055 mb_params
.cmd
= DRV_MSG_CODE_MDUMP_CMD
;
1056 mb_params
.param
= mdump_cmd
;
1057 mb_params
.p_data_src
= p_data_src
;
1058 mb_params
.p_data_dst
= p_data_dst
;
1059 rc
= ecore_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1060 if (rc
!= ECORE_SUCCESS
)
1063 *p_mcp_resp
= mb_params
.mcp_resp
;
1064 if (*p_mcp_resp
== FW_MSG_CODE_MDUMP_INVALID_CMD
) {
1065 DP_NOTICE(p_hwfn
, false,
1066 "MFW claims that the mdump command is illegal [mdump_cmd 0x%x]\n",
1074 static enum _ecore_status_t
ecore_mcp_mdump_ack(struct ecore_hwfn
*p_hwfn
,
1075 struct ecore_ptt
*p_ptt
)
1079 return ecore_mcp_mdump_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_MDUMP_ACK
,
1080 OSAL_NULL
, OSAL_NULL
, &mcp_resp
);
1083 enum _ecore_status_t
ecore_mcp_mdump_set_values(struct ecore_hwfn
*p_hwfn
,
1084 struct ecore_ptt
*p_ptt
,
1087 union drv_union_data union_data
;
1090 OSAL_MEMCPY(&union_data
.raw_data
, &epoch
, sizeof(epoch
));
1092 return ecore_mcp_mdump_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_MDUMP_SET_VALUES
,
1093 &union_data
, OSAL_NULL
, &mcp_resp
);
1096 enum _ecore_status_t
ecore_mcp_mdump_trigger(struct ecore_hwfn
*p_hwfn
,
1097 struct ecore_ptt
*p_ptt
)
1101 return ecore_mcp_mdump_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_MDUMP_TRIGGER
,
1102 OSAL_NULL
, OSAL_NULL
, &mcp_resp
);
1105 enum _ecore_status_t
ecore_mcp_mdump_clear_logs(struct ecore_hwfn
*p_hwfn
,
1106 struct ecore_ptt
*p_ptt
)
1110 return ecore_mcp_mdump_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_MDUMP_CLEAR_LOGS
,
1111 OSAL_NULL
, OSAL_NULL
, &mcp_resp
);
1114 static enum _ecore_status_t
1115 ecore_mcp_mdump_get_config(struct ecore_hwfn
*p_hwfn
, struct ecore_ptt
*p_ptt
,
1116 struct mdump_config_stc
*p_mdump_config
)
1118 union drv_union_data union_data
;
1120 enum _ecore_status_t rc
;
1122 rc
= ecore_mcp_mdump_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_MDUMP_GET_CONFIG
,
1123 OSAL_NULL
, &union_data
, &mcp_resp
);
1124 if (rc
!= ECORE_SUCCESS
)
1127 /* A zero response implies that the mdump command is not supported */
1129 return ECORE_NOTIMPL
;
1131 if (mcp_resp
!= FW_MSG_CODE_OK
) {
1132 DP_NOTICE(p_hwfn
, false,
1133 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1135 rc
= ECORE_UNKNOWN_ERROR
;
1138 OSAL_MEMCPY(p_mdump_config
, &union_data
.mdump_config
,
1139 sizeof(*p_mdump_config
));
1144 enum _ecore_status_t
ecore_mcp_mdump_get_info(struct ecore_hwfn
*p_hwfn
,
1145 struct ecore_ptt
*p_ptt
)
1147 struct mdump_config_stc mdump_config
;
1148 enum _ecore_status_t rc
;
1150 rc
= ecore_mcp_mdump_get_config(p_hwfn
, p_ptt
, &mdump_config
);
1151 if (rc
!= ECORE_SUCCESS
)
1154 DP_VERBOSE(p_hwfn
, ECORE_MSG_SP
,
1155 "MFW mdump_config: version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1156 mdump_config
.version
, mdump_config
.config
, mdump_config
.epoc
,
1157 mdump_config
.num_of_logs
, mdump_config
.valid_logs
);
1159 if (mdump_config
.valid_logs
> 0) {
1160 DP_NOTICE(p_hwfn
, false,
1161 "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n");
1167 void ecore_mcp_mdump_enable(struct ecore_dev
*p_dev
, bool mdump_enable
)
1169 p_dev
->mdump_en
= mdump_enable
;
1172 static void ecore_mcp_handle_critical_error(struct ecore_hwfn
*p_hwfn
,
1173 struct ecore_ptt
*p_ptt
)
1175 /* In CMT mode - no need for more than a single acknowledgment to the
1176 * MFW, and no more than a single notification to the upper driver.
1178 if (p_hwfn
!= ECORE_LEADING_HWFN(p_hwfn
->p_dev
))
1181 DP_NOTICE(p_hwfn
, false,
1182 "Received a critical error notification from the MFW!\n");
1184 if (p_hwfn
->p_dev
->mdump_en
) {
1185 DP_NOTICE(p_hwfn
, false,
1186 "Not acknowledging the notification to allow the MFW crash dump\n");
1190 ecore_mcp_mdump_ack(p_hwfn
, p_ptt
);
1191 ecore_hw_err_notify(p_hwfn
, ECORE_HW_ERR_HW_ATTN
);
1194 enum _ecore_status_t
ecore_mcp_handle_events(struct ecore_hwfn
*p_hwfn
,
1195 struct ecore_ptt
*p_ptt
)
1197 struct ecore_mcp_info
*info
= p_hwfn
->mcp_info
;
1198 enum _ecore_status_t rc
= ECORE_SUCCESS
;
1202 DP_VERBOSE(p_hwfn
, ECORE_MSG_SP
, "Received message from MFW\n");
1204 /* Read Messages from MFW */
1205 ecore_mcp_read_mb(p_hwfn
, p_ptt
);
1207 /* Compare current messages to old ones */
1208 for (i
= 0; i
< info
->mfw_mb_length
; i
++) {
1209 if (info
->mfw_mb_cur
[i
] == info
->mfw_mb_shadow
[i
])
1214 DP_VERBOSE(p_hwfn
, ECORE_MSG_LINK
,
1215 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1216 i
, info
->mfw_mb_shadow
[i
], info
->mfw_mb_cur
[i
]);
1219 case MFW_DRV_MSG_LINK_CHANGE
:
1220 ecore_mcp_handle_link_change(p_hwfn
, p_ptt
, false);
1222 case MFW_DRV_MSG_VF_DISABLED
:
1223 ecore_mcp_handle_vf_flr(p_hwfn
, p_ptt
);
1225 case MFW_DRV_MSG_LLDP_DATA_UPDATED
:
1226 ecore_dcbx_mib_update_event(p_hwfn
, p_ptt
,
1227 ECORE_DCBX_REMOTE_LLDP_MIB
);
1229 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED
:
1230 ecore_dcbx_mib_update_event(p_hwfn
, p_ptt
,
1231 ECORE_DCBX_REMOTE_MIB
);
1233 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED
:
1234 ecore_dcbx_mib_update_event(p_hwfn
, p_ptt
,
1235 ECORE_DCBX_OPERATIONAL_MIB
);
1237 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE
:
1238 ecore_mcp_handle_transceiver_change(p_hwfn
, p_ptt
);
1240 case MFW_DRV_MSG_ERROR_RECOVERY
:
1241 ecore_mcp_handle_process_kill(p_hwfn
, p_ptt
);
1243 case MFW_DRV_MSG_GET_LAN_STATS
:
1244 case MFW_DRV_MSG_GET_FCOE_STATS
:
1245 case MFW_DRV_MSG_GET_ISCSI_STATS
:
1246 case MFW_DRV_MSG_GET_RDMA_STATS
:
1247 ecore_mcp_send_protocol_stats(p_hwfn
, p_ptt
, i
);
1249 case MFW_DRV_MSG_BW_UPDATE
:
1250 ecore_mcp_update_bw(p_hwfn
, p_ptt
);
1252 case MFW_DRV_MSG_FAILURE_DETECTED
:
1253 ecore_mcp_handle_fan_failure(p_hwfn
, p_ptt
);
1255 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED
:
1256 ecore_mcp_handle_critical_error(p_hwfn
, p_ptt
);
1260 DP_NOTICE(p_hwfn
, false,
1261 "Unimplemented MFW message %d\n", i
);
1266 /* ACK everything */
1267 for (i
= 0; i
< MFW_DRV_MSG_MAX_DWORDS(info
->mfw_mb_length
); i
++) {
1268 OSAL_BE32 val
= OSAL_CPU_TO_BE32(((u32
*)info
->mfw_mb_cur
)[i
]);
1270 /* MFW expect answer in BE, so we force write in that format */
1271 ecore_wr(p_hwfn
, p_ptt
,
1272 info
->mfw_mb_addr
+ sizeof(u32
) +
1273 MFW_DRV_MSG_MAX_DWORDS(info
->mfw_mb_length
) *
1274 sizeof(u32
) + i
* sizeof(u32
), val
);
1278 DP_NOTICE(p_hwfn
, false,
1279 "Received an MFW message indication but no"
1284 /* Copy the new mfw messages into the shadow */
1285 OSAL_MEMCPY(info
->mfw_mb_shadow
, info
->mfw_mb_cur
, info
->mfw_mb_length
);
1290 enum _ecore_status_t
ecore_mcp_get_mfw_ver(struct ecore_hwfn
*p_hwfn
,
1291 struct ecore_ptt
*p_ptt
,
1293 u32
*p_running_bundle_id
)
1298 if (CHIP_REV_IS_EMUL(p_hwfn
->p_dev
)) {
1299 DP_NOTICE(p_hwfn
, false, "Emulation - can't get MFW version\n");
1300 return ECORE_SUCCESS
;
1304 if (IS_VF(p_hwfn
->p_dev
)) {
1305 if (p_hwfn
->vf_iov_info
) {
1306 struct pfvf_acquire_resp_tlv
*p_resp
;
1308 p_resp
= &p_hwfn
->vf_iov_info
->acquire_resp
;
1309 *p_mfw_ver
= p_resp
->pfdev_info
.mfw_ver
;
1310 return ECORE_SUCCESS
;
1312 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1313 "VF requested MFW version prior to ACQUIRE\n");
1318 global_offsize
= ecore_rd(p_hwfn
, p_ptt
,
1319 SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->
1323 ecore_rd(p_hwfn
, p_ptt
,
1324 SECTION_ADDR(global_offsize
,
1325 0) + OFFSETOF(struct public_global
, mfw_ver
));
1327 if (p_running_bundle_id
!= OSAL_NULL
) {
1328 *p_running_bundle_id
= ecore_rd(p_hwfn
, p_ptt
,
1329 SECTION_ADDR(global_offsize
,
1331 OFFSETOF(struct public_global
,
1332 running_bundle_id
));
1335 return ECORE_SUCCESS
;
1338 enum _ecore_status_t
ecore_mcp_get_media_type(struct ecore_dev
*p_dev
,
1341 struct ecore_hwfn
*p_hwfn
= &p_dev
->hwfns
[0];
1342 struct ecore_ptt
*p_ptt
;
1344 /* TODO - Add support for VFs */
1348 if (!ecore_mcp_is_init(p_hwfn
)) {
1349 DP_NOTICE(p_hwfn
, true, "MFW is not initialized !\n");
1353 *p_media_type
= MEDIA_UNSPECIFIED
;
1355 p_ptt
= ecore_ptt_acquire(p_hwfn
);
1359 *p_media_type
= ecore_rd(p_hwfn
, p_ptt
, p_hwfn
->mcp_info
->port_addr
+
1360 OFFSETOF(struct public_port
, media_type
));
1362 ecore_ptt_release(p_hwfn
, p_ptt
);
1364 return ECORE_SUCCESS
;
1367 static enum _ecore_status_t
1368 ecore_mcp_get_shmem_proto(struct ecore_hwfn
*p_hwfn
,
1369 struct public_func
*p_info
,
1370 enum ecore_pci_personality
*p_proto
)
1372 enum _ecore_status_t rc
= ECORE_SUCCESS
;
1374 switch (p_info
->config
& FUNC_MF_CFG_PROTOCOL_MASK
) {
1375 case FUNC_MF_CFG_PROTOCOL_ETHERNET
:
1376 *p_proto
= ECORE_PCI_ETH
;
1385 enum _ecore_status_t
ecore_mcp_fill_shmem_func_info(struct ecore_hwfn
*p_hwfn
,
1386 struct ecore_ptt
*p_ptt
)
1388 struct ecore_mcp_function_info
*info
;
1389 struct public_func shmem_info
;
1391 ecore_mcp_get_shmem_func(p_hwfn
, p_ptt
, &shmem_info
, MCP_PF_ID(p_hwfn
));
1392 info
= &p_hwfn
->mcp_info
->func_info
;
1394 info
->pause_on_host
= (shmem_info
.config
&
1395 FUNC_MF_CFG_PAUSE_ON_HOST_RING
) ? 1 : 0;
1397 if (ecore_mcp_get_shmem_proto(p_hwfn
, &shmem_info
, &info
->protocol
)) {
1398 DP_ERR(p_hwfn
, "Unknown personality %08x\n",
1399 (u32
)(shmem_info
.config
& FUNC_MF_CFG_PROTOCOL_MASK
));
1403 ecore_read_pf_bandwidth(p_hwfn
, &shmem_info
);
1405 if (shmem_info
.mac_upper
|| shmem_info
.mac_lower
) {
1406 info
->mac
[0] = (u8
)(shmem_info
.mac_upper
>> 8);
1407 info
->mac
[1] = (u8
)(shmem_info
.mac_upper
);
1408 info
->mac
[2] = (u8
)(shmem_info
.mac_lower
>> 24);
1409 info
->mac
[3] = (u8
)(shmem_info
.mac_lower
>> 16);
1410 info
->mac
[4] = (u8
)(shmem_info
.mac_lower
>> 8);
1411 info
->mac
[5] = (u8
)(shmem_info
.mac_lower
);
1413 /* TODO - are there protocols for which there's no MAC? */
1414 DP_NOTICE(p_hwfn
, false, "MAC is 0 in shmem\n");
1417 /* TODO - are these calculations true for BE machine? */
1418 info
->wwn_port
= (u64
)shmem_info
.fcoe_wwn_port_name_upper
|
1419 (((u64
)shmem_info
.fcoe_wwn_port_name_lower
) << 32);
1420 info
->wwn_node
= (u64
)shmem_info
.fcoe_wwn_node_name_upper
|
1421 (((u64
)shmem_info
.fcoe_wwn_node_name_lower
) << 32);
1423 info
->ovlan
= (u16
)(shmem_info
.ovlan_stag
& FUNC_MF_CFG_OV_STAG_MASK
);
1425 DP_VERBOSE(p_hwfn
, (ECORE_MSG_SP
| ECORE_MSG_IFUP
),
1426 "Read configuration from shmem: pause_on_host %02x"
1427 " protocol %02x BW [%02x - %02x]"
1428 " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
1429 " node %lx ovlan %04x\n",
1430 info
->pause_on_host
, info
->protocol
,
1431 info
->bandwidth_min
, info
->bandwidth_max
,
1432 info
->mac
[0], info
->mac
[1], info
->mac
[2],
1433 info
->mac
[3], info
->mac
[4], info
->mac
[5],
1434 (unsigned long)info
->wwn_port
,
1435 (unsigned long)info
->wwn_node
, info
->ovlan
);
1437 return ECORE_SUCCESS
;
1440 struct ecore_mcp_link_params
1441 *ecore_mcp_get_link_params(struct ecore_hwfn
*p_hwfn
)
1443 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
1445 return &p_hwfn
->mcp_info
->link_input
;
1448 struct ecore_mcp_link_state
1449 *ecore_mcp_get_link_state(struct ecore_hwfn
*p_hwfn
)
1451 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
1455 if (CHIP_REV_IS_SLOW(p_hwfn
->p_dev
)) {
1456 DP_INFO(p_hwfn
, "Non-ASIC - always notify that link is up\n");
1457 p_hwfn
->mcp_info
->link_output
.link_up
= true;
1461 return &p_hwfn
->mcp_info
->link_output
;
1464 struct ecore_mcp_link_capabilities
1465 *ecore_mcp_get_link_capabilities(struct ecore_hwfn
*p_hwfn
)
1467 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
1469 return &p_hwfn
->mcp_info
->link_capabilities
;
1472 enum _ecore_status_t
ecore_mcp_drain(struct ecore_hwfn
*p_hwfn
,
1473 struct ecore_ptt
*p_ptt
)
1475 u32 resp
= 0, param
= 0;
1476 enum _ecore_status_t rc
;
1478 rc
= ecore_mcp_cmd(p_hwfn
, p_ptt
,
1479 DRV_MSG_CODE_NIG_DRAIN
, 1000, &resp
, ¶m
);
1481 /* Wait for the drain to complete before returning */
1487 const struct ecore_mcp_function_info
1488 *ecore_mcp_get_function_info(struct ecore_hwfn
*p_hwfn
)
1490 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
1492 return &p_hwfn
->mcp_info
->func_info
;
1495 enum _ecore_status_t
ecore_mcp_nvm_command(struct ecore_hwfn
*p_hwfn
,
1496 struct ecore_ptt
*p_ptt
,
1497 struct ecore_mcp_nvm_params
*params
)
1499 enum _ecore_status_t rc
;
1501 switch (params
->type
) {
1502 case ECORE_MCP_NVM_RD
:
1503 rc
= ecore_mcp_nvm_rd_cmd(p_hwfn
, p_ptt
, params
->nvm_common
.cmd
,
1504 params
->nvm_common
.offset
,
1505 ¶ms
->nvm_common
.resp
,
1506 ¶ms
->nvm_common
.param
,
1507 params
->nvm_rd
.buf_size
,
1508 params
->nvm_rd
.buf
);
1511 rc
= ecore_mcp_cmd(p_hwfn
, p_ptt
, params
->nvm_common
.cmd
,
1512 params
->nvm_common
.offset
,
1513 ¶ms
->nvm_common
.resp
,
1514 ¶ms
->nvm_common
.param
);
1516 case ECORE_MCP_NVM_WR
:
1517 rc
= ecore_mcp_nvm_wr_cmd(p_hwfn
, p_ptt
, params
->nvm_common
.cmd
,
1518 params
->nvm_common
.offset
,
1519 ¶ms
->nvm_common
.resp
,
1520 ¶ms
->nvm_common
.param
,
1521 params
->nvm_wr
.buf_size
,
1522 params
->nvm_wr
.buf
);
1531 int ecore_mcp_get_personality_cnt(struct ecore_hwfn
*p_hwfn
,
1532 struct ecore_ptt
*p_ptt
, u32 personalities
)
1534 enum ecore_pci_personality protocol
= ECORE_PCI_DEFAULT
;
1535 struct public_func shmem_info
;
1536 int i
, count
= 0, num_pfs
;
1538 num_pfs
= NUM_OF_ENG_PFS(p_hwfn
->p_dev
);
1540 for (i
= 0; i
< num_pfs
; i
++) {
1541 ecore_mcp_get_shmem_func(p_hwfn
, p_ptt
, &shmem_info
,
1542 MCP_PF_ID_BY_REL(p_hwfn
, i
));
1543 if (shmem_info
.config
& FUNC_MF_CFG_FUNC_HIDE
)
1546 if (ecore_mcp_get_shmem_proto(p_hwfn
, &shmem_info
,
1547 &protocol
) != ECORE_SUCCESS
)
1550 if ((1 << ((u32
)protocol
)) & personalities
)
1557 enum _ecore_status_t
ecore_mcp_get_flash_size(struct ecore_hwfn
*p_hwfn
,
1558 struct ecore_ptt
*p_ptt
,
1564 if (CHIP_REV_IS_EMUL(p_hwfn
->p_dev
)) {
1565 DP_NOTICE(p_hwfn
, false, "Emulation - can't get flash size\n");
1570 if (IS_VF(p_hwfn
->p_dev
))
1573 flash_size
= ecore_rd(p_hwfn
, p_ptt
, MCP_REG_NVM_CFG4
);
1574 flash_size
= (flash_size
& MCP_REG_NVM_CFG4_FLASH_SIZE
) >>
1575 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT
;
1576 flash_size
= (1 << (flash_size
+ MCP_BYTES_PER_MBIT_SHIFT
));
1578 *p_flash_size
= flash_size
;
1580 return ECORE_SUCCESS
;
1583 enum _ecore_status_t
ecore_start_recovery_process(struct ecore_hwfn
*p_hwfn
,
1584 struct ecore_ptt
*p_ptt
)
1586 struct ecore_dev
*p_dev
= p_hwfn
->p_dev
;
1588 if (p_dev
->recov_in_prog
) {
1589 DP_NOTICE(p_hwfn
, false,
1590 "Avoid triggering a recovery since such a process"
1591 " is already in progress\n");
1595 DP_NOTICE(p_hwfn
, false, "Triggering a recovery process\n");
1596 ecore_wr(p_hwfn
, p_ptt
, MISC_REG_AEU_GENERAL_ATTN_35
, 0x1);
1598 return ECORE_SUCCESS
;
1601 enum _ecore_status_t
ecore_mcp_config_vf_msix(struct ecore_hwfn
*p_hwfn
,
1602 struct ecore_ptt
*p_ptt
,
1605 u32 resp
= 0, param
= 0, rc_param
= 0;
1606 enum _ecore_status_t rc
;
1608 /* Only Leader can configure MSIX, and need to take CMT into account */
1610 if (!IS_LEAD_HWFN(p_hwfn
))
1611 return ECORE_SUCCESS
;
1612 num
*= p_hwfn
->p_dev
->num_hwfns
;
1614 param
|= (vf_id
<< DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT
) &
1615 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK
;
1616 param
|= (num
<< DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT
) &
1617 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK
;
1619 rc
= ecore_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_CFG_VF_MSIX
, param
,
1622 if (resp
!= FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE
) {
1623 DP_NOTICE(p_hwfn
, true, "VF[%d]: MFW failed to set MSI-X\n",
1627 DP_VERBOSE(p_hwfn
, ECORE_MSG_IOV
,
1628 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
1635 enum _ecore_status_t
1636 ecore_mcp_send_drv_version(struct ecore_hwfn
*p_hwfn
, struct ecore_ptt
*p_ptt
,
1637 struct ecore_mcp_drv_version
*p_ver
)
1639 struct drv_version_stc
*p_drv_version
;
1640 struct ecore_mcp_mb_params mb_params
;
1641 union drv_union_data union_data
;
1645 enum _ecore_status_t rc
;
1648 if (CHIP_REV_IS_SLOW(p_hwfn
->p_dev
))
1649 return ECORE_SUCCESS
;
1652 p_drv_version
= &union_data
.drv_version
;
1653 p_drv_version
->version
= p_ver
->version
;
1654 num_words
= (MCP_DRV_VER_STR_SIZE
- 4) / 4;
1655 for (i
= 0; i
< num_words
; i
++) {
1656 p_name
= &p_ver
->name
[i
* sizeof(u32
)];
1657 val
= OSAL_CPU_TO_BE32(*(u32
*)p_name
);
1658 *(u32
*)&p_drv_version
->name
[i
* sizeof(u32
)] = val
;
1661 OSAL_MEM_ZERO(&mb_params
, sizeof(mb_params
));
1662 mb_params
.cmd
= DRV_MSG_CODE_SET_VERSION
;
1663 mb_params
.p_data_src
= &union_data
;
1664 rc
= ecore_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1665 if (rc
!= ECORE_SUCCESS
)
1666 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
1671 enum _ecore_status_t
ecore_mcp_halt(struct ecore_hwfn
*p_hwfn
,
1672 struct ecore_ptt
*p_ptt
)
1674 enum _ecore_status_t rc
;
1675 u32 resp
= 0, param
= 0;
1677 rc
= ecore_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_MCP_HALT
, 0, &resp
,
1679 if (rc
!= ECORE_SUCCESS
)
1680 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
1685 enum _ecore_status_t
ecore_mcp_resume(struct ecore_hwfn
*p_hwfn
,
1686 struct ecore_ptt
*p_ptt
)
1688 u32 value
, cpu_mode
;
1690 ecore_wr(p_hwfn
, p_ptt
, MCP_REG_CPU_STATE
, 0xffffffff);
1692 value
= ecore_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_MODE
);
1693 value
&= ~MCP_REG_CPU_MODE_SOFT_HALT
;
1694 ecore_wr(p_hwfn
, p_ptt
, MCP_REG_CPU_MODE
, value
);
1695 cpu_mode
= ecore_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_MODE
);
1697 return (cpu_mode
& MCP_REG_CPU_MODE_SOFT_HALT
) ? -1 : 0;
1700 enum _ecore_status_t
1701 ecore_mcp_ov_update_current_config(struct ecore_hwfn
*p_hwfn
,
1702 struct ecore_ptt
*p_ptt
,
1703 enum ecore_ov_config_method config
,
1704 enum ecore_ov_client client
)
1706 enum _ecore_status_t rc
;
1707 u32 resp
= 0, param
= 0;
1711 case ECORE_OV_CLIENT_DRV
:
1712 drv_mb_param
= DRV_MB_PARAM_OV_CURR_CFG_OS
;
1714 case ECORE_OV_CLIENT_USER
:
1715 drv_mb_param
= DRV_MB_PARAM_OV_CURR_CFG_OTHER
;
1718 DP_NOTICE(p_hwfn
, true, "Invalid client type %d\n", config
);
1722 rc
= ecore_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_OV_UPDATE_CURR_CFG
,
1723 drv_mb_param
, &resp
, ¶m
);
1724 if (rc
!= ECORE_SUCCESS
)
1725 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
1730 enum _ecore_status_t
1731 ecore_mcp_ov_update_driver_state(struct ecore_hwfn
*p_hwfn
,
1732 struct ecore_ptt
*p_ptt
,
1733 enum ecore_ov_driver_state drv_state
)
1735 enum _ecore_status_t rc
;
1736 u32 resp
= 0, param
= 0;
1739 switch (drv_state
) {
1740 case ECORE_OV_DRIVER_STATE_NOT_LOADED
:
1741 drv_mb_param
= DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED
;
1743 case ECORE_OV_DRIVER_STATE_DISABLED
:
1744 drv_mb_param
= DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED
;
1746 case ECORE_OV_DRIVER_STATE_ACTIVE
:
1747 drv_mb_param
= DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE
;
1750 DP_NOTICE(p_hwfn
, true, "Invalid driver state %d\n", drv_state
);
1754 rc
= ecore_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE
,
1755 drv_state
, &resp
, ¶m
);
1756 if (rc
!= ECORE_SUCCESS
)
1757 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
1762 enum _ecore_status_t
1763 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn
*p_hwfn
, struct ecore_ptt
*p_ptt
,
1764 struct ecore_fc_npiv_tbl
*p_table
)
1769 enum _ecore_status_t
1770 ecore_mcp_ov_update_mtu(struct ecore_hwfn
*p_hwfn
,
1771 struct ecore_ptt
*p_ptt
, u16 mtu
)
1776 enum _ecore_status_t
ecore_mcp_set_led(struct ecore_hwfn
*p_hwfn
,
1777 struct ecore_ptt
*p_ptt
,
1778 enum ecore_led_mode mode
)
1780 u32 resp
= 0, param
= 0, drv_mb_param
;
1781 enum _ecore_status_t rc
;
1784 case ECORE_LED_MODE_ON
:
1785 drv_mb_param
= DRV_MB_PARAM_SET_LED_MODE_ON
;
1787 case ECORE_LED_MODE_OFF
:
1788 drv_mb_param
= DRV_MB_PARAM_SET_LED_MODE_OFF
;
1790 case ECORE_LED_MODE_RESTORE
:
1791 drv_mb_param
= DRV_MB_PARAM_SET_LED_MODE_OPER
;
1794 DP_NOTICE(p_hwfn
, true, "Invalid LED mode %d\n", mode
);
1798 rc
= ecore_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_SET_LED_MODE
,
1799 drv_mb_param
, &resp
, ¶m
);
1800 if (rc
!= ECORE_SUCCESS
)
1801 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
1806 enum _ecore_status_t
ecore_mcp_mask_parities(struct ecore_hwfn
*p_hwfn
,
1807 struct ecore_ptt
*p_ptt
,
1810 enum _ecore_status_t rc
;
1811 u32 resp
= 0, param
= 0;
1813 rc
= ecore_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_MASK_PARITIES
,
1814 mask_parities
, &resp
, ¶m
);
1816 if (rc
!= ECORE_SUCCESS
) {
1818 "MCP response failure for mask parities, aborting\n");
1819 } else if (resp
!= FW_MSG_CODE_OK
) {
1821 "MCP did not ack mask parity request. Old MFW?\n");
1828 enum _ecore_status_t
ecore_mcp_nvm_read(struct ecore_dev
*p_dev
, u32 addr
,
1831 struct ecore_hwfn
*p_hwfn
= ECORE_LEADING_HWFN(p_dev
);
1832 u32 bytes_left
, offset
, bytes_to_copy
, buf_size
;
1833 struct ecore_mcp_nvm_params params
;
1834 struct ecore_ptt
*p_ptt
;
1835 enum _ecore_status_t rc
= ECORE_SUCCESS
;
1837 p_ptt
= ecore_ptt_acquire(p_hwfn
);
1841 OSAL_MEMSET(¶ms
, 0, sizeof(struct ecore_mcp_nvm_params
));
1844 params
.type
= ECORE_MCP_NVM_RD
;
1845 params
.nvm_rd
.buf_size
= &buf_size
;
1846 params
.nvm_common
.cmd
= DRV_MSG_CODE_NVM_READ_NVRAM
;
1847 while (bytes_left
> 0) {
1848 bytes_to_copy
= OSAL_MIN_T(u32
, bytes_left
,
1849 MCP_DRV_NVM_BUF_LEN
);
1850 params
.nvm_common
.offset
= (addr
+ offset
) |
1851 (bytes_to_copy
<< DRV_MB_PARAM_NVM_LEN_SHIFT
);
1852 params
.nvm_rd
.buf
= (u32
*)(p_buf
+ offset
);
1853 rc
= ecore_mcp_nvm_command(p_hwfn
, p_ptt
, ¶ms
);
1854 if (rc
!= ECORE_SUCCESS
|| (params
.nvm_common
.resp
!=
1855 FW_MSG_CODE_NVM_OK
)) {
1856 DP_NOTICE(p_dev
, false, "MCP command rc = %d\n", rc
);
1860 /* This can be a lengthy process, and it's possible scheduler
1861 * isn't preemptible. Sleep a bit to prevent CPU hogging.
1863 if (bytes_left
% 0x1000 <
1864 (bytes_left
- *params
.nvm_rd
.buf_size
) % 0x1000)
1867 offset
+= *params
.nvm_rd
.buf_size
;
1868 bytes_left
-= *params
.nvm_rd
.buf_size
;
1871 p_dev
->mcp_nvm_resp
= params
.nvm_common
.resp
;
1872 ecore_ptt_release(p_hwfn
, p_ptt
);
1877 enum _ecore_status_t
ecore_mcp_phy_read(struct ecore_dev
*p_dev
, u32 cmd
,
1878 u32 addr
, u8
*p_buf
, u32 len
)
1880 struct ecore_hwfn
*p_hwfn
= ECORE_LEADING_HWFN(p_dev
);
1881 struct ecore_mcp_nvm_params params
;
1882 struct ecore_ptt
*p_ptt
;
1883 enum _ecore_status_t rc
;
1885 p_ptt
= ecore_ptt_acquire(p_hwfn
);
1889 OSAL_MEMSET(¶ms
, 0, sizeof(struct ecore_mcp_nvm_params
));
1890 params
.type
= ECORE_MCP_NVM_RD
;
1891 params
.nvm_rd
.buf_size
= &len
;
1892 params
.nvm_common
.cmd
= (cmd
== ECORE_PHY_CORE_READ
) ?
1893 DRV_MSG_CODE_PHY_CORE_READ
: DRV_MSG_CODE_PHY_RAW_READ
;
1894 params
.nvm_common
.offset
= addr
;
1895 params
.nvm_rd
.buf
= (u32
*)p_buf
;
1896 rc
= ecore_mcp_nvm_command(p_hwfn
, p_ptt
, ¶ms
);
1897 if (rc
!= ECORE_SUCCESS
)
1898 DP_NOTICE(p_dev
, false, "MCP command rc = %d\n", rc
);
1900 p_dev
->mcp_nvm_resp
= params
.nvm_common
.resp
;
1901 ecore_ptt_release(p_hwfn
, p_ptt
);
1906 enum _ecore_status_t
ecore_mcp_nvm_resp(struct ecore_dev
*p_dev
, u8
*p_buf
)
1908 struct ecore_hwfn
*p_hwfn
= ECORE_LEADING_HWFN(p_dev
);
1909 struct ecore_mcp_nvm_params params
;
1910 struct ecore_ptt
*p_ptt
;
1912 p_ptt
= ecore_ptt_acquire(p_hwfn
);
1916 OSAL_MEMSET(¶ms
, 0, sizeof(struct ecore_mcp_nvm_params
));
1917 OSAL_MEMCPY(p_buf
, &p_dev
->mcp_nvm_resp
, sizeof(p_dev
->mcp_nvm_resp
));
1918 ecore_ptt_release(p_hwfn
, p_ptt
);
1920 return ECORE_SUCCESS
;
1923 enum _ecore_status_t
ecore_mcp_nvm_del_file(struct ecore_dev
*p_dev
, u32 addr
)
1925 struct ecore_hwfn
*p_hwfn
= ECORE_LEADING_HWFN(p_dev
);
1926 struct ecore_mcp_nvm_params params
;
1927 struct ecore_ptt
*p_ptt
;
1928 enum _ecore_status_t rc
;
1930 p_ptt
= ecore_ptt_acquire(p_hwfn
);
1933 OSAL_MEMSET(¶ms
, 0, sizeof(struct ecore_mcp_nvm_params
));
1934 params
.type
= ECORE_MCP_CMD
;
1935 params
.nvm_common
.cmd
= DRV_MSG_CODE_NVM_DEL_FILE
;
1936 params
.nvm_common
.offset
= addr
;
1937 rc
= ecore_mcp_nvm_command(p_hwfn
, p_ptt
, ¶ms
);
1938 p_dev
->mcp_nvm_resp
= params
.nvm_common
.resp
;
1939 ecore_ptt_release(p_hwfn
, p_ptt
);
1944 enum _ecore_status_t
ecore_mcp_nvm_put_file_begin(struct ecore_dev
*p_dev
,
1947 struct ecore_hwfn
*p_hwfn
= ECORE_LEADING_HWFN(p_dev
);
1948 struct ecore_mcp_nvm_params params
;
1949 struct ecore_ptt
*p_ptt
;
1950 enum _ecore_status_t rc
;
1952 p_ptt
= ecore_ptt_acquire(p_hwfn
);
1955 OSAL_MEMSET(¶ms
, 0, sizeof(struct ecore_mcp_nvm_params
));
1956 params
.type
= ECORE_MCP_CMD
;
1957 params
.nvm_common
.cmd
= DRV_MSG_CODE_NVM_PUT_FILE_BEGIN
;
1958 params
.nvm_common
.offset
= addr
;
1959 rc
= ecore_mcp_nvm_command(p_hwfn
, p_ptt
, ¶ms
);
1960 p_dev
->mcp_nvm_resp
= params
.nvm_common
.resp
;
1961 ecore_ptt_release(p_hwfn
, p_ptt
);
1966 /* rc receives ECORE_INVAL as default parameter because
1967 * it might not enter the while loop if the len is 0
1969 enum _ecore_status_t
ecore_mcp_nvm_write(struct ecore_dev
*p_dev
, u32 cmd
,
1970 u32 addr
, u8
*p_buf
, u32 len
)
1972 struct ecore_hwfn
*p_hwfn
= ECORE_LEADING_HWFN(p_dev
);
1973 enum _ecore_status_t rc
= ECORE_INVAL
;
1974 struct ecore_mcp_nvm_params params
;
1975 struct ecore_ptt
*p_ptt
;
1976 u32 buf_idx
, buf_size
;
1978 p_ptt
= ecore_ptt_acquire(p_hwfn
);
1982 OSAL_MEMSET(¶ms
, 0, sizeof(struct ecore_mcp_nvm_params
));
1983 params
.type
= ECORE_MCP_NVM_WR
;
1984 if (cmd
== ECORE_PUT_FILE_DATA
)
1985 params
.nvm_common
.cmd
= DRV_MSG_CODE_NVM_PUT_FILE_DATA
;
1987 params
.nvm_common
.cmd
= DRV_MSG_CODE_NVM_WRITE_NVRAM
;
1989 while (buf_idx
< len
) {
1990 buf_size
= OSAL_MIN_T(u32
, (len
- buf_idx
),
1991 MCP_DRV_NVM_BUF_LEN
);
1992 params
.nvm_common
.offset
= ((buf_size
<<
1993 DRV_MB_PARAM_NVM_LEN_SHIFT
)
1995 params
.nvm_wr
.buf_size
= buf_size
;
1996 params
.nvm_wr
.buf
= (u32
*)&p_buf
[buf_idx
];
1997 rc
= ecore_mcp_nvm_command(p_hwfn
, p_ptt
, ¶ms
);
1998 if (rc
!= ECORE_SUCCESS
||
1999 ((params
.nvm_common
.resp
!= FW_MSG_CODE_NVM_OK
) &&
2000 (params
.nvm_common
.resp
!=
2001 FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK
)))
2002 DP_NOTICE(p_dev
, false, "MCP command rc = %d\n", rc
);
2004 /* This can be a lengthy process, and it's possible scheduler
2005 * isn't preemptible. Sleep a bit to prevent CPU hogging.
2007 if (buf_idx
% 0x1000 >
2008 (buf_idx
+ buf_size
) % 0x1000)
2011 buf_idx
+= buf_size
;
2014 p_dev
->mcp_nvm_resp
= params
.nvm_common
.resp
;
2015 ecore_ptt_release(p_hwfn
, p_ptt
);
2020 enum _ecore_status_t
ecore_mcp_phy_write(struct ecore_dev
*p_dev
, u32 cmd
,
2021 u32 addr
, u8
*p_buf
, u32 len
)
2023 struct ecore_hwfn
*p_hwfn
= ECORE_LEADING_HWFN(p_dev
);
2024 struct ecore_mcp_nvm_params params
;
2025 struct ecore_ptt
*p_ptt
;
2026 enum _ecore_status_t rc
;
2028 p_ptt
= ecore_ptt_acquire(p_hwfn
);
2032 OSAL_MEMSET(¶ms
, 0, sizeof(struct ecore_mcp_nvm_params
));
2033 params
.type
= ECORE_MCP_NVM_WR
;
2034 params
.nvm_wr
.buf_size
= len
;
2035 params
.nvm_common
.cmd
= (cmd
== ECORE_PHY_CORE_WRITE
) ?
2036 DRV_MSG_CODE_PHY_CORE_WRITE
: DRV_MSG_CODE_PHY_RAW_WRITE
;
2037 params
.nvm_common
.offset
= addr
;
2038 params
.nvm_wr
.buf
= (u32
*)p_buf
;
2039 rc
= ecore_mcp_nvm_command(p_hwfn
, p_ptt
, ¶ms
);
2040 if (rc
!= ECORE_SUCCESS
)
2041 DP_NOTICE(p_dev
, false, "MCP command rc = %d\n", rc
);
2042 p_dev
->mcp_nvm_resp
= params
.nvm_common
.resp
;
2043 ecore_ptt_release(p_hwfn
, p_ptt
);
2048 enum _ecore_status_t
ecore_mcp_nvm_set_secure_mode(struct ecore_dev
*p_dev
,
2051 struct ecore_hwfn
*p_hwfn
= ECORE_LEADING_HWFN(p_dev
);
2052 struct ecore_mcp_nvm_params params
;
2053 struct ecore_ptt
*p_ptt
;
2054 enum _ecore_status_t rc
;
2056 p_ptt
= ecore_ptt_acquire(p_hwfn
);
2060 OSAL_MEMSET(¶ms
, 0, sizeof(struct ecore_mcp_nvm_params
));
2061 params
.type
= ECORE_MCP_CMD
;
2062 params
.nvm_common
.cmd
= DRV_MSG_CODE_SET_SECURE_MODE
;
2063 params
.nvm_common
.offset
= addr
;
2064 rc
= ecore_mcp_nvm_command(p_hwfn
, p_ptt
, ¶ms
);
2065 p_dev
->mcp_nvm_resp
= params
.nvm_common
.resp
;
2066 ecore_ptt_release(p_hwfn
, p_ptt
);
2071 enum _ecore_status_t
ecore_mcp_phy_sfp_read(struct ecore_hwfn
*p_hwfn
,
2072 struct ecore_ptt
*p_ptt
,
2073 u32 port
, u32 addr
, u32 offset
,
2076 struct ecore_mcp_nvm_params params
;
2077 enum _ecore_status_t rc
;
2078 u32 bytes_left
, bytes_to_copy
, buf_size
;
2080 OSAL_MEMSET(¶ms
, 0, sizeof(struct ecore_mcp_nvm_params
));
2081 params
.nvm_common
.offset
=
2082 (port
<< DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT
) |
2083 (addr
<< DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT
);
2087 params
.type
= ECORE_MCP_NVM_RD
;
2088 params
.nvm_rd
.buf_size
= &buf_size
;
2089 params
.nvm_common
.cmd
= DRV_MSG_CODE_TRANSCEIVER_READ
;
2090 while (bytes_left
> 0) {
2091 bytes_to_copy
= OSAL_MIN_T(u32
, bytes_left
,
2092 MAX_I2C_TRANSACTION_SIZE
);
2093 params
.nvm_rd
.buf
= (u32
*)(p_buf
+ offset
);
2094 params
.nvm_common
.offset
&=
2095 (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK
|
2096 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK
);
2097 params
.nvm_common
.offset
|=
2099 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT
);
2100 params
.nvm_common
.offset
|=
2101 (bytes_to_copy
<< DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT
);
2102 rc
= ecore_mcp_nvm_command(p_hwfn
, p_ptt
, ¶ms
);
2103 if ((params
.nvm_common
.resp
& FW_MSG_CODE_MASK
) ==
2104 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT
) {
2106 } else if ((params
.nvm_common
.resp
& FW_MSG_CODE_MASK
) !=
2107 FW_MSG_CODE_TRANSCEIVER_DIAG_OK
)
2108 return ECORE_UNKNOWN_ERROR
;
2110 offset
+= *params
.nvm_rd
.buf_size
;
2111 bytes_left
-= *params
.nvm_rd
.buf_size
;
2114 return ECORE_SUCCESS
;
2117 enum _ecore_status_t
ecore_mcp_phy_sfp_write(struct ecore_hwfn
*p_hwfn
,
2118 struct ecore_ptt
*p_ptt
,
2119 u32 port
, u32 addr
, u32 offset
,
2122 struct ecore_mcp_nvm_params params
;
2123 enum _ecore_status_t rc
;
2124 u32 buf_idx
, buf_size
;
2126 OSAL_MEMSET(¶ms
, 0, sizeof(struct ecore_mcp_nvm_params
));
2127 params
.nvm_common
.offset
=
2128 (port
<< DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT
) |
2129 (addr
<< DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT
);
2130 params
.type
= ECORE_MCP_NVM_WR
;
2131 params
.nvm_common
.cmd
= DRV_MSG_CODE_TRANSCEIVER_WRITE
;
2133 while (buf_idx
< len
) {
2134 buf_size
= OSAL_MIN_T(u32
, (len
- buf_idx
),
2135 MAX_I2C_TRANSACTION_SIZE
);
2136 params
.nvm_common
.offset
&=
2137 (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK
|
2138 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK
);
2139 params
.nvm_common
.offset
|=
2140 ((offset
+ buf_idx
) <<
2141 DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT
);
2142 params
.nvm_common
.offset
|=
2143 (buf_size
<< DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT
);
2144 params
.nvm_wr
.buf_size
= buf_size
;
2145 params
.nvm_wr
.buf
= (u32
*)&p_buf
[buf_idx
];
2146 rc
= ecore_mcp_nvm_command(p_hwfn
, p_ptt
, ¶ms
);
2147 if ((params
.nvm_common
.resp
& FW_MSG_CODE_MASK
) ==
2148 FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT
) {
2150 } else if ((params
.nvm_common
.resp
& FW_MSG_CODE_MASK
) !=
2151 FW_MSG_CODE_TRANSCEIVER_DIAG_OK
)
2152 return ECORE_UNKNOWN_ERROR
;
2154 buf_idx
+= buf_size
;
2157 return ECORE_SUCCESS
;
2160 enum _ecore_status_t
ecore_mcp_gpio_read(struct ecore_hwfn
*p_hwfn
,
2161 struct ecore_ptt
*p_ptt
,
2162 u16 gpio
, u32
*gpio_val
)
2164 enum _ecore_status_t rc
= ECORE_SUCCESS
;
2165 u32 drv_mb_param
= 0, rsp
;
2167 drv_mb_param
= (gpio
<< DRV_MB_PARAM_GPIO_NUMBER_SHIFT
);
2169 rc
= ecore_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_GPIO_READ
,
2170 drv_mb_param
, &rsp
, gpio_val
);
2172 if (rc
!= ECORE_SUCCESS
)
2175 if ((rsp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_GPIO_OK
)
2176 return ECORE_UNKNOWN_ERROR
;
2178 return ECORE_SUCCESS
;
2181 enum _ecore_status_t
ecore_mcp_gpio_write(struct ecore_hwfn
*p_hwfn
,
2182 struct ecore_ptt
*p_ptt
,
2183 u16 gpio
, u16 gpio_val
)
2185 enum _ecore_status_t rc
= ECORE_SUCCESS
;
2186 u32 drv_mb_param
= 0, param
, rsp
;
2188 drv_mb_param
= (gpio
<< DRV_MB_PARAM_GPIO_NUMBER_SHIFT
) |
2189 (gpio_val
<< DRV_MB_PARAM_GPIO_VALUE_SHIFT
);
2191 rc
= ecore_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_GPIO_WRITE
,
2192 drv_mb_param
, &rsp
, ¶m
);
2194 if (rc
!= ECORE_SUCCESS
)
2197 if ((rsp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_GPIO_OK
)
2198 return ECORE_UNKNOWN_ERROR
;
2200 return ECORE_SUCCESS
;
2203 enum _ecore_status_t
ecore_mcp_gpio_info(struct ecore_hwfn
*p_hwfn
,
2204 struct ecore_ptt
*p_ptt
,
2205 u16 gpio
, u32
*gpio_direction
,
2208 u32 drv_mb_param
= 0, rsp
, val
= 0;
2209 enum _ecore_status_t rc
= ECORE_SUCCESS
;
2211 drv_mb_param
= gpio
<< DRV_MB_PARAM_GPIO_NUMBER_SHIFT
;
2213 rc
= ecore_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_GPIO_INFO
,
2214 drv_mb_param
, &rsp
, &val
);
2215 if (rc
!= ECORE_SUCCESS
)
2218 *gpio_direction
= (val
& DRV_MB_PARAM_GPIO_DIRECTION_MASK
) >>
2219 DRV_MB_PARAM_GPIO_DIRECTION_SHIFT
;
2220 *gpio_ctrl
= (val
& DRV_MB_PARAM_GPIO_CTRL_MASK
) >>
2221 DRV_MB_PARAM_GPIO_CTRL_SHIFT
;
2223 if ((rsp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_GPIO_OK
)
2224 return ECORE_UNKNOWN_ERROR
;
2226 return ECORE_SUCCESS
;
2229 enum _ecore_status_t
ecore_mcp_bist_register_test(struct ecore_hwfn
*p_hwfn
,
2230 struct ecore_ptt
*p_ptt
)
2232 u32 drv_mb_param
= 0, rsp
, param
;
2233 enum _ecore_status_t rc
= ECORE_SUCCESS
;
2235 drv_mb_param
= (DRV_MB_PARAM_BIST_REGISTER_TEST
<<
2236 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT
);
2238 rc
= ecore_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_BIST_TEST
,
2239 drv_mb_param
, &rsp
, ¶m
);
2241 if (rc
!= ECORE_SUCCESS
)
2244 if (((rsp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_OK
) ||
2245 (param
!= DRV_MB_PARAM_BIST_RC_PASSED
))
2246 rc
= ECORE_UNKNOWN_ERROR
;
2251 enum _ecore_status_t
ecore_mcp_bist_clock_test(struct ecore_hwfn
*p_hwfn
,
2252 struct ecore_ptt
*p_ptt
)
2254 u32 drv_mb_param
= 0, rsp
, param
;
2255 enum _ecore_status_t rc
= ECORE_SUCCESS
;
2257 drv_mb_param
= (DRV_MB_PARAM_BIST_CLOCK_TEST
<<
2258 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT
);
2260 rc
= ecore_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_BIST_TEST
,
2261 drv_mb_param
, &rsp
, ¶m
);
2263 if (rc
!= ECORE_SUCCESS
)
2266 if (((rsp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_OK
) ||
2267 (param
!= DRV_MB_PARAM_BIST_RC_PASSED
))
2268 rc
= ECORE_UNKNOWN_ERROR
;
2273 enum _ecore_status_t
ecore_mcp_bist_nvm_test_get_num_images(
2274 struct ecore_hwfn
*p_hwfn
, struct ecore_ptt
*p_ptt
, u32
*num_images
)
2276 u32 drv_mb_param
= 0, rsp
;
2277 enum _ecore_status_t rc
= ECORE_SUCCESS
;
2279 drv_mb_param
= (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES
<<
2280 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT
);
2282 rc
= ecore_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_BIST_TEST
,
2283 drv_mb_param
, &rsp
, num_images
);
2285 if (rc
!= ECORE_SUCCESS
)
2288 if (((rsp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_OK
))
2289 rc
= ECORE_UNKNOWN_ERROR
;
2294 enum _ecore_status_t
ecore_mcp_bist_nvm_test_get_image_att(
2295 struct ecore_hwfn
*p_hwfn
, struct ecore_ptt
*p_ptt
,
2296 struct bist_nvm_image_att
*p_image_att
, u32 image_index
)
2298 struct ecore_mcp_nvm_params params
;
2299 enum _ecore_status_t rc
;
2302 OSAL_MEMSET(¶ms
, 0, sizeof(struct ecore_mcp_nvm_params
));
2303 params
.nvm_common
.offset
= (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX
<<
2304 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT
);
2305 params
.nvm_common
.offset
|= (image_index
<<
2306 DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT
);
2308 params
.type
= ECORE_MCP_NVM_RD
;
2309 params
.nvm_rd
.buf_size
= &buf_size
;
2310 params
.nvm_common
.cmd
= DRV_MSG_CODE_BIST_TEST
;
2311 params
.nvm_rd
.buf
= (u32
*)p_image_att
;
2313 rc
= ecore_mcp_nvm_command(p_hwfn
, p_ptt
, ¶ms
);
2314 if (rc
!= ECORE_SUCCESS
)
2317 if (((params
.nvm_common
.resp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_OK
) ||
2318 (p_image_att
->return_code
!= 1))
2319 rc
= ECORE_UNKNOWN_ERROR
;
2324 enum _ecore_status_t
2325 ecore_mcp_get_temperature_info(struct ecore_hwfn
*p_hwfn
,
2326 struct ecore_ptt
*p_ptt
,
2327 struct ecore_temperature_info
*p_temp_info
)
2329 struct ecore_temperature_sensor
*p_temp_sensor
;
2330 struct temperature_status_stc
*p_mfw_temp_info
;
2331 struct ecore_mcp_mb_params mb_params
;
2332 union drv_union_data union_data
;
2334 enum _ecore_status_t rc
;
2337 OSAL_MEM_ZERO(&mb_params
, sizeof(mb_params
));
2338 mb_params
.cmd
= DRV_MSG_CODE_GET_TEMPERATURE
;
2339 mb_params
.p_data_dst
= &union_data
;
2340 rc
= ecore_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
2341 if (rc
!= ECORE_SUCCESS
)
2344 p_mfw_temp_info
= &union_data
.temp_info
;
2346 OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS
!= MAX_NUM_OF_SENSORS
);
2347 p_temp_info
->num_sensors
= OSAL_MIN_T(u32
,
2348 p_mfw_temp_info
->num_of_sensors
,
2349 ECORE_MAX_NUM_OF_SENSORS
);
2350 for (i
= 0; i
< p_temp_info
->num_sensors
; i
++) {
2351 val
= p_mfw_temp_info
->sensor
[i
];
2352 p_temp_sensor
= &p_temp_info
->sensors
[i
];
2353 p_temp_sensor
->sensor_location
= (val
& SENSOR_LOCATION_MASK
) >>
2354 SENSOR_LOCATION_SHIFT
;
2355 p_temp_sensor
->threshold_high
= (val
& THRESHOLD_HIGH_MASK
) >>
2356 THRESHOLD_HIGH_SHIFT
;
2357 p_temp_sensor
->critical
= (val
& CRITICAL_TEMPERATURE_MASK
) >>
2358 CRITICAL_TEMPERATURE_SHIFT
;
2359 p_temp_sensor
->current_temp
= (val
& CURRENT_TEMP_MASK
) >>
2363 return ECORE_SUCCESS
;
2366 enum _ecore_status_t
ecore_mcp_get_mba_versions(
2367 struct ecore_hwfn
*p_hwfn
,
2368 struct ecore_ptt
*p_ptt
,
2369 struct ecore_mba_vers
*p_mba_vers
)
2371 struct ecore_mcp_nvm_params params
;
2372 enum _ecore_status_t rc
;
2375 OSAL_MEM_ZERO(¶ms
, sizeof(params
));
2376 params
.type
= ECORE_MCP_NVM_RD
;
2377 params
.nvm_common
.cmd
= DRV_MSG_CODE_GET_MBA_VERSION
;
2378 params
.nvm_common
.offset
= 0;
2379 params
.nvm_rd
.buf
= &p_mba_vers
->mba_vers
[0];
2380 params
.nvm_rd
.buf_size
= &buf_size
;
2381 rc
= ecore_mcp_nvm_command(p_hwfn
, p_ptt
, ¶ms
);
2383 if (rc
!= ECORE_SUCCESS
)
2386 if ((params
.nvm_common
.resp
& FW_MSG_CODE_MASK
) !=
2388 rc
= ECORE_UNKNOWN_ERROR
;
2390 if (buf_size
!= MCP_DRV_NVM_BUF_LEN
)
2391 rc
= ECORE_UNKNOWN_ERROR
;
2396 enum _ecore_status_t
ecore_mcp_mem_ecc_events(struct ecore_hwfn
*p_hwfn
,
2397 struct ecore_ptt
*p_ptt
,
2402 return ecore_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_MEM_ECC_EVENTS
,
2403 0, &rsp
, (u32
*)num_events
);
2406 #define ECORE_RESC_ALLOC_VERSION_MAJOR 1
2407 #define ECORE_RESC_ALLOC_VERSION_MINOR 0
2408 #define ECORE_RESC_ALLOC_VERSION \
2409 ((ECORE_RESC_ALLOC_VERSION_MAJOR << \
2410 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
2411 (ECORE_RESC_ALLOC_VERSION_MINOR << \
2412 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
2414 enum _ecore_status_t
ecore_mcp_get_resc_info(struct ecore_hwfn
*p_hwfn
,
2415 struct ecore_ptt
*p_ptt
,
2416 struct resource_info
*p_resc_info
,
2417 u32
*p_mcp_resp
, u32
*p_mcp_param
)
2419 struct ecore_mcp_mb_params mb_params
;
2420 union drv_union_data
*p_union_data
;
2421 enum _ecore_status_t rc
;
2423 OSAL_MEM_ZERO(&mb_params
, sizeof(mb_params
));
2424 mb_params
.cmd
= DRV_MSG_GET_RESOURCE_ALLOC_MSG
;
2425 mb_params
.param
= ECORE_RESC_ALLOC_VERSION
;
2426 p_union_data
= (union drv_union_data
*)p_resc_info
;
2427 mb_params
.p_data_src
= p_union_data
;
2428 mb_params
.p_data_dst
= p_union_data
;
2429 rc
= ecore_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
2430 if (rc
!= ECORE_SUCCESS
)
2433 *p_mcp_resp
= mb_params
.mcp_resp
;
2434 *p_mcp_param
= mb_params
.mcp_param
;
2436 DP_VERBOSE(p_hwfn
, ECORE_MSG_SP
,
2437 "MFW resource_info: version 0x%x, res_id 0x%x, size 0x%x,"
2438 " offset 0x%x, vf_size 0x%x, vf_offset 0x%x, flags 0x%x\n",
2439 *p_mcp_param
, p_resc_info
->res_id
, p_resc_info
->size
,
2440 p_resc_info
->offset
, p_resc_info
->vf_size
,
2441 p_resc_info
->vf_offset
, p_resc_info
->flags
);
2443 return ECORE_SUCCESS
;
2446 enum _ecore_status_t
ecore_mcp_initiate_pf_flr(struct ecore_hwfn
*p_hwfn
,
2447 struct ecore_ptt
*p_ptt
)
2449 u32 mcp_resp
, mcp_param
;
2451 return ecore_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_INITIATE_PF_FLR
,
2452 0, &mcp_resp
, &mcp_param
);