1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <linux/delay.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/string.h>
22 #include "qed_reg_addr.h"
23 #include "qed_sriov.h"
25 #define CHIP_MCP_RESP_ITER_US 10
27 #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
28 #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
30 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
31 qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
34 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
35 qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
37 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
38 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
39 offsetof(struct public_drv_mb, _field), _val)
41 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
42 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
43 offsetof(struct public_drv_mb, _field))
45 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
46 DRV_ID_PDA_COMP_VER_SHIFT)
48 #define MCP_BYTES_PER_MBIT_SHIFT 17
50 bool qed_mcp_is_init(struct qed_hwfn
*p_hwfn
)
52 if (!p_hwfn
->mcp_info
|| !p_hwfn
->mcp_info
->public_base
)
57 void qed_mcp_cmd_port_init(struct qed_hwfn
*p_hwfn
,
58 struct qed_ptt
*p_ptt
)
60 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
62 u32 mfw_mb_offsize
= qed_rd(p_hwfn
, p_ptt
, addr
);
64 p_hwfn
->mcp_info
->port_addr
= SECTION_ADDR(mfw_mb_offsize
,
66 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
67 "port_addr = 0x%x, port_id 0x%02x\n",
68 p_hwfn
->mcp_info
->port_addr
, MFW_PORT(p_hwfn
));
71 void qed_mcp_read_mb(struct qed_hwfn
*p_hwfn
,
72 struct qed_ptt
*p_ptt
)
74 u32 length
= MFW_DRV_MSG_MAX_DWORDS(p_hwfn
->mcp_info
->mfw_mb_length
);
77 if (!p_hwfn
->mcp_info
->public_base
)
80 for (i
= 0; i
< length
; i
++) {
81 tmp
= qed_rd(p_hwfn
, p_ptt
,
82 p_hwfn
->mcp_info
->mfw_mb_addr
+
83 (i
<< 2) + sizeof(u32
));
85 /* The MB data is actually BE; Need to force it to cpu */
86 ((u32
*)p_hwfn
->mcp_info
->mfw_mb_cur
)[i
] =
87 be32_to_cpu((__force __be32
)tmp
);
91 int qed_mcp_free(struct qed_hwfn
*p_hwfn
)
93 if (p_hwfn
->mcp_info
) {
94 kfree(p_hwfn
->mcp_info
->mfw_mb_cur
);
95 kfree(p_hwfn
->mcp_info
->mfw_mb_shadow
);
97 kfree(p_hwfn
->mcp_info
);
102 static int qed_load_mcp_offsets(struct qed_hwfn
*p_hwfn
,
103 struct qed_ptt
*p_ptt
)
105 struct qed_mcp_info
*p_info
= p_hwfn
->mcp_info
;
106 u32 drv_mb_offsize
, mfw_mb_offsize
;
107 u32 mcp_pf_id
= MCP_PF_ID(p_hwfn
);
109 p_info
->public_base
= qed_rd(p_hwfn
, p_ptt
, MISC_REG_SHARED_MEM_ADDR
);
110 if (!p_info
->public_base
)
113 p_info
->public_base
|= GRCBASE_MCP
;
115 /* Calculate the driver and MFW mailbox address */
116 drv_mb_offsize
= qed_rd(p_hwfn
, p_ptt
,
117 SECTION_OFFSIZE_ADDR(p_info
->public_base
,
119 p_info
->drv_mb_addr
= SECTION_ADDR(drv_mb_offsize
, mcp_pf_id
);
120 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
121 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
122 drv_mb_offsize
, p_info
->drv_mb_addr
, mcp_pf_id
);
124 /* Set the MFW MB address */
125 mfw_mb_offsize
= qed_rd(p_hwfn
, p_ptt
,
126 SECTION_OFFSIZE_ADDR(p_info
->public_base
,
128 p_info
->mfw_mb_addr
= SECTION_ADDR(mfw_mb_offsize
, mcp_pf_id
);
129 p_info
->mfw_mb_length
= (u16
)qed_rd(p_hwfn
, p_ptt
, p_info
->mfw_mb_addr
);
131 /* Get the current driver mailbox sequence before sending
134 p_info
->drv_mb_seq
= DRV_MB_RD(p_hwfn
, p_ptt
, drv_mb_header
) &
135 DRV_MSG_SEQ_NUMBER_MASK
;
137 /* Get current FW pulse sequence */
138 p_info
->drv_pulse_seq
= DRV_MB_RD(p_hwfn
, p_ptt
, drv_pulse_mb
) &
141 p_info
->mcp_hist
= (u16
)qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
);
146 int qed_mcp_cmd_init(struct qed_hwfn
*p_hwfn
,
147 struct qed_ptt
*p_ptt
)
149 struct qed_mcp_info
*p_info
;
152 /* Allocate mcp_info structure */
153 p_hwfn
->mcp_info
= kzalloc(sizeof(*p_hwfn
->mcp_info
), GFP_KERNEL
);
154 if (!p_hwfn
->mcp_info
)
156 p_info
= p_hwfn
->mcp_info
;
158 if (qed_load_mcp_offsets(p_hwfn
, p_ptt
) != 0) {
159 DP_NOTICE(p_hwfn
, "MCP is not initialized\n");
160 /* Do not free mcp_info here, since public_base indicate that
161 * the MCP is not initialized
166 size
= MFW_DRV_MSG_MAX_DWORDS(p_info
->mfw_mb_length
) * sizeof(u32
);
167 p_info
->mfw_mb_cur
= kzalloc(size
, GFP_KERNEL
);
168 p_info
->mfw_mb_shadow
=
169 kzalloc(sizeof(u32
) * MFW_DRV_MSG_MAX_DWORDS(
170 p_info
->mfw_mb_length
), GFP_KERNEL
);
171 if (!p_info
->mfw_mb_shadow
|| !p_info
->mfw_mb_addr
)
174 /* Initialize the MFW spinlock */
175 spin_lock_init(&p_info
->lock
);
180 DP_NOTICE(p_hwfn
, "Failed to allocate mcp memory\n");
181 qed_mcp_free(p_hwfn
);
185 /* Locks the MFW mailbox of a PF to ensure a single access.
186 * The lock is achieved in most cases by holding a spinlock, causing other
187 * threads to wait till a previous access is done.
188 * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
189 * access is achieved by setting a blocking flag, which will fail other
190 * competing contexts to send their mailboxes.
192 static int qed_mcp_mb_lock(struct qed_hwfn
*p_hwfn
,
195 spin_lock_bh(&p_hwfn
->mcp_info
->lock
);
197 /* The spinlock shouldn't be acquired when the mailbox command is
198 * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
199 * pending [UN]LOAD_REQ command of another PF together with a spinlock
200 * (i.e. interrupts are disabled) - can lead to a deadlock.
201 * It is assumed that for a single PF, no other mailbox commands can be
202 * sent from another context while sending LOAD_REQ, and that any
203 * parallel commands to UNLOAD_REQ can be cancelled.
205 if (cmd
== DRV_MSG_CODE_LOAD_DONE
|| cmd
== DRV_MSG_CODE_UNLOAD_DONE
)
206 p_hwfn
->mcp_info
->block_mb_sending
= false;
208 if (p_hwfn
->mcp_info
->block_mb_sending
) {
210 "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
212 spin_unlock_bh(&p_hwfn
->mcp_info
->lock
);
216 if (cmd
== DRV_MSG_CODE_LOAD_REQ
|| cmd
== DRV_MSG_CODE_UNLOAD_REQ
) {
217 p_hwfn
->mcp_info
->block_mb_sending
= true;
218 spin_unlock_bh(&p_hwfn
->mcp_info
->lock
);
224 static void qed_mcp_mb_unlock(struct qed_hwfn
*p_hwfn
,
227 if (cmd
!= DRV_MSG_CODE_LOAD_REQ
&& cmd
!= DRV_MSG_CODE_UNLOAD_REQ
)
228 spin_unlock_bh(&p_hwfn
->mcp_info
->lock
);
231 int qed_mcp_reset(struct qed_hwfn
*p_hwfn
,
232 struct qed_ptt
*p_ptt
)
234 u32 seq
= ++p_hwfn
->mcp_info
->drv_mb_seq
;
235 u8 delay
= CHIP_MCP_RESP_ITER_US
;
236 u32 org_mcp_reset_seq
, cnt
= 0;
239 /* Ensure that only a single thread is accessing the mailbox at a
242 rc
= qed_mcp_mb_lock(p_hwfn
, DRV_MSG_CODE_MCP_RESET
);
246 /* Set drv command along with the updated sequence */
247 org_mcp_reset_seq
= qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
);
248 DRV_MB_WR(p_hwfn
, p_ptt
, drv_mb_header
,
249 (DRV_MSG_CODE_MCP_RESET
| seq
));
252 /* Wait for MFW response */
254 /* Give the FW up to 500 second (50*1000*10usec) */
255 } while ((org_mcp_reset_seq
== qed_rd(p_hwfn
, p_ptt
,
256 MISCS_REG_GENERIC_POR_0
)) &&
257 (cnt
++ < QED_MCP_RESET_RETRIES
));
259 if (org_mcp_reset_seq
!=
260 qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
)) {
261 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
262 "MCP was reset after %d usec\n", cnt
* delay
);
264 DP_ERR(p_hwfn
, "Failed to reset MCP\n");
268 qed_mcp_mb_unlock(p_hwfn
, DRV_MSG_CODE_MCP_RESET
);
273 static int qed_do_mcp_cmd(struct qed_hwfn
*p_hwfn
,
274 struct qed_ptt
*p_ptt
,
280 u8 delay
= CHIP_MCP_RESP_ITER_US
;
281 u32 seq
, cnt
= 1, actual_mb_seq
;
284 /* Get actual driver mailbox sequence */
285 actual_mb_seq
= DRV_MB_RD(p_hwfn
, p_ptt
, drv_mb_header
) &
286 DRV_MSG_SEQ_NUMBER_MASK
;
288 /* Use MCP history register to check if MCP reset occurred between
291 if (p_hwfn
->mcp_info
->mcp_hist
!=
292 qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
)) {
293 DP_VERBOSE(p_hwfn
, QED_MSG_SP
, "Rereading MCP offsets\n");
294 qed_load_mcp_offsets(p_hwfn
, p_ptt
);
295 qed_mcp_cmd_port_init(p_hwfn
, p_ptt
);
297 seq
= ++p_hwfn
->mcp_info
->drv_mb_seq
;
300 DRV_MB_WR(p_hwfn
, p_ptt
, drv_mb_param
, param
);
302 /* Set drv command along with the updated sequence */
303 DRV_MB_WR(p_hwfn
, p_ptt
, drv_mb_header
, (cmd
| seq
));
305 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
306 "wrote command (%x) to MFW MB param 0x%08x\n",
310 /* Wait for MFW response */
312 *o_mcp_resp
= DRV_MB_RD(p_hwfn
, p_ptt
, fw_mb_header
);
314 /* Give the FW up to 5 second (500*10ms) */
315 } while ((seq
!= (*o_mcp_resp
& FW_MSG_SEQ_NUMBER_MASK
)) &&
316 (cnt
++ < QED_DRV_MB_MAX_RETRIES
));
318 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
319 "[after %d ms] read (%x) seq is (%x) from FW MB\n",
320 cnt
* delay
, *o_mcp_resp
, seq
);
322 /* Is this a reply to our command? */
323 if (seq
== (*o_mcp_resp
& FW_MSG_SEQ_NUMBER_MASK
)) {
324 *o_mcp_resp
&= FW_MSG_CODE_MASK
;
325 /* Get the MCP param */
326 *o_mcp_param
= DRV_MB_RD(p_hwfn
, p_ptt
, fw_mb_param
);
329 DP_ERR(p_hwfn
, "MFW failed to respond!\n");
336 static int qed_mcp_cmd_and_union(struct qed_hwfn
*p_hwfn
,
337 struct qed_ptt
*p_ptt
,
338 struct qed_mcp_mb_params
*p_mb_params
)
343 /* MCP not initialized */
344 if (!qed_mcp_is_init(p_hwfn
)) {
345 DP_NOTICE(p_hwfn
, "MFW is not initialized !\n");
349 union_data_addr
= p_hwfn
->mcp_info
->drv_mb_addr
+
350 offsetof(struct public_drv_mb
, union_data
);
352 /* Ensure that only a single thread is accessing the mailbox at a
355 rc
= qed_mcp_mb_lock(p_hwfn
, p_mb_params
->cmd
);
359 if (p_mb_params
->p_data_src
!= NULL
)
360 qed_memcpy_to(p_hwfn
, p_ptt
, union_data_addr
,
361 p_mb_params
->p_data_src
,
362 sizeof(*p_mb_params
->p_data_src
));
364 rc
= qed_do_mcp_cmd(p_hwfn
, p_ptt
, p_mb_params
->cmd
,
365 p_mb_params
->param
, &p_mb_params
->mcp_resp
,
366 &p_mb_params
->mcp_param
);
368 if (p_mb_params
->p_data_dst
!= NULL
)
369 qed_memcpy_from(p_hwfn
, p_ptt
, p_mb_params
->p_data_dst
,
371 sizeof(*p_mb_params
->p_data_dst
));
373 qed_mcp_mb_unlock(p_hwfn
, p_mb_params
->cmd
);
378 int qed_mcp_cmd(struct qed_hwfn
*p_hwfn
,
379 struct qed_ptt
*p_ptt
,
385 struct qed_mcp_mb_params mb_params
;
388 memset(&mb_params
, 0, sizeof(mb_params
));
390 mb_params
.param
= param
;
391 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
395 *o_mcp_resp
= mb_params
.mcp_resp
;
396 *o_mcp_param
= mb_params
.mcp_param
;
401 int qed_mcp_load_req(struct qed_hwfn
*p_hwfn
,
402 struct qed_ptt
*p_ptt
,
405 struct qed_dev
*cdev
= p_hwfn
->cdev
;
406 struct qed_mcp_mb_params mb_params
;
407 union drv_union_data union_data
;
410 memset(&mb_params
, 0, sizeof(mb_params
));
412 mb_params
.cmd
= DRV_MSG_CODE_LOAD_REQ
;
413 mb_params
.param
= PDA_COMP
| DRV_ID_MCP_HSI_VER_CURRENT
|
415 memcpy(&union_data
.ver_str
, cdev
->ver_str
, MCP_DRV_VER_STR_SIZE
);
416 mb_params
.p_data_src
= &union_data
;
417 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
419 /* if mcp fails to respond we must abort */
421 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
425 *p_load_code
= mb_params
.mcp_resp
;
427 /* If MFW refused (e.g. other port is in diagnostic mode) we
428 * must abort. This can happen in the following cases:
429 * - Other port is in diagnostic mode
430 * - Previously loaded function on the engine is not compliant with
432 * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
435 if (!(*p_load_code
) ||
436 ((*p_load_code
) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI
) ||
437 ((*p_load_code
) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA
) ||
438 ((*p_load_code
) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG
)) {
439 DP_ERR(p_hwfn
, "MCP refused load request, aborting\n");
446 static void qed_mcp_handle_vf_flr(struct qed_hwfn
*p_hwfn
,
447 struct qed_ptt
*p_ptt
)
449 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
451 u32 mfw_path_offsize
= qed_rd(p_hwfn
, p_ptt
, addr
);
452 u32 path_addr
= SECTION_ADDR(mfw_path_offsize
,
453 QED_PATH_ID(p_hwfn
));
454 u32 disabled_vfs
[VF_MAX_STATIC
/ 32];
459 "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
460 mfw_path_offsize
, path_addr
);
462 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++) {
463 disabled_vfs
[i
] = qed_rd(p_hwfn
, p_ptt
,
465 offsetof(struct public_path
,
468 DP_VERBOSE(p_hwfn
, (QED_MSG_SP
| QED_MSG_IOV
),
469 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
470 i
* 32, (i
+ 1) * 32 - 1, disabled_vfs
[i
]);
473 if (qed_iov_mark_vf_flr(p_hwfn
, disabled_vfs
))
474 qed_schedule_iov(p_hwfn
, QED_IOV_WQ_FLR_FLAG
);
477 int qed_mcp_ack_vf_flr(struct qed_hwfn
*p_hwfn
,
478 struct qed_ptt
*p_ptt
, u32
*vfs_to_ack
)
480 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
482 u32 mfw_func_offsize
= qed_rd(p_hwfn
, p_ptt
, addr
);
483 u32 func_addr
= SECTION_ADDR(mfw_func_offsize
,
485 struct qed_mcp_mb_params mb_params
;
486 union drv_union_data union_data
;
490 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++)
491 DP_VERBOSE(p_hwfn
, (QED_MSG_SP
| QED_MSG_IOV
),
492 "Acking VFs [%08x,...,%08x] - %08x\n",
493 i
* 32, (i
+ 1) * 32 - 1, vfs_to_ack
[i
]);
495 memset(&mb_params
, 0, sizeof(mb_params
));
496 mb_params
.cmd
= DRV_MSG_CODE_VF_DISABLED_DONE
;
497 memcpy(&union_data
.ack_vf_disabled
, vfs_to_ack
, VF_MAX_STATIC
/ 8);
498 mb_params
.p_data_src
= &union_data
;
499 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
501 DP_NOTICE(p_hwfn
, "Failed to pass ACK for VF flr to MFW\n");
505 /* Clear the ACK bits */
506 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++)
507 qed_wr(p_hwfn
, p_ptt
,
509 offsetof(struct public_func
, drv_ack_vf_disabled
) +
515 static void qed_mcp_handle_transceiver_change(struct qed_hwfn
*p_hwfn
,
516 struct qed_ptt
*p_ptt
)
518 u32 transceiver_state
;
520 transceiver_state
= qed_rd(p_hwfn
, p_ptt
,
521 p_hwfn
->mcp_info
->port_addr
+
522 offsetof(struct public_port
,
526 (NETIF_MSG_HW
| QED_MSG_SP
),
527 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
529 (u32
)(p_hwfn
->mcp_info
->port_addr
+
530 offsetof(struct public_port
,
533 transceiver_state
= GET_FIELD(transceiver_state
,
534 ETH_TRANSCEIVER_STATE
);
536 if (transceiver_state
== ETH_TRANSCEIVER_STATE_PRESENT
)
537 DP_NOTICE(p_hwfn
, "Transceiver is present.\n");
539 DP_NOTICE(p_hwfn
, "Transceiver is unplugged.\n");
542 static void qed_mcp_handle_link_change(struct qed_hwfn
*p_hwfn
,
543 struct qed_ptt
*p_ptt
,
546 struct qed_mcp_link_state
*p_link
;
550 p_link
= &p_hwfn
->mcp_info
->link_output
;
551 memset(p_link
, 0, sizeof(*p_link
));
553 status
= qed_rd(p_hwfn
, p_ptt
,
554 p_hwfn
->mcp_info
->port_addr
+
555 offsetof(struct public_port
, link_status
));
556 DP_VERBOSE(p_hwfn
, (NETIF_MSG_LINK
| QED_MSG_SP
),
557 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
559 (u32
)(p_hwfn
->mcp_info
->port_addr
+
560 offsetof(struct public_port
,
563 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
564 "Resetting link indications\n");
568 if (p_hwfn
->b_drv_link_init
)
569 p_link
->link_up
= !!(status
& LINK_STATUS_LINK_UP
);
571 p_link
->link_up
= false;
573 p_link
->full_duplex
= true;
574 switch ((status
& LINK_STATUS_SPEED_AND_DUPLEX_MASK
)) {
575 case LINK_STATUS_SPEED_AND_DUPLEX_100G
:
576 p_link
->speed
= 100000;
578 case LINK_STATUS_SPEED_AND_DUPLEX_50G
:
579 p_link
->speed
= 50000;
581 case LINK_STATUS_SPEED_AND_DUPLEX_40G
:
582 p_link
->speed
= 40000;
584 case LINK_STATUS_SPEED_AND_DUPLEX_25G
:
585 p_link
->speed
= 25000;
587 case LINK_STATUS_SPEED_AND_DUPLEX_20G
:
588 p_link
->speed
= 20000;
590 case LINK_STATUS_SPEED_AND_DUPLEX_10G
:
591 p_link
->speed
= 10000;
593 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD
:
594 p_link
->full_duplex
= false;
596 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
:
597 p_link
->speed
= 1000;
603 if (p_link
->link_up
&& p_link
->speed
)
604 p_link
->line_speed
= p_link
->speed
;
606 p_link
->line_speed
= 0;
608 max_bw
= p_hwfn
->mcp_info
->func_info
.bandwidth_max
;
609 min_bw
= p_hwfn
->mcp_info
->func_info
.bandwidth_min
;
611 /* Max bandwidth configuration */
612 __qed_configure_pf_max_bandwidth(p_hwfn
, p_ptt
, p_link
, max_bw
);
614 /* Min bandwidth configuration */
615 __qed_configure_pf_min_bandwidth(p_hwfn
, p_ptt
, p_link
, min_bw
);
616 qed_configure_vp_wfq_on_link_change(p_hwfn
->cdev
, p_link
->min_pf_rate
);
618 p_link
->an
= !!(status
& LINK_STATUS_AUTO_NEGOTIATE_ENABLED
);
619 p_link
->an_complete
= !!(status
&
620 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE
);
621 p_link
->parallel_detection
= !!(status
&
622 LINK_STATUS_PARALLEL_DETECTION_USED
);
623 p_link
->pfc_enabled
= !!(status
& LINK_STATUS_PFC_ENABLED
);
625 p_link
->partner_adv_speed
|=
626 (status
& LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE
) ?
627 QED_LINK_PARTNER_SPEED_1G_FD
: 0;
628 p_link
->partner_adv_speed
|=
629 (status
& LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE
) ?
630 QED_LINK_PARTNER_SPEED_1G_HD
: 0;
631 p_link
->partner_adv_speed
|=
632 (status
& LINK_STATUS_LINK_PARTNER_10G_CAPABLE
) ?
633 QED_LINK_PARTNER_SPEED_10G
: 0;
634 p_link
->partner_adv_speed
|=
635 (status
& LINK_STATUS_LINK_PARTNER_20G_CAPABLE
) ?
636 QED_LINK_PARTNER_SPEED_20G
: 0;
637 p_link
->partner_adv_speed
|=
638 (status
& LINK_STATUS_LINK_PARTNER_40G_CAPABLE
) ?
639 QED_LINK_PARTNER_SPEED_40G
: 0;
640 p_link
->partner_adv_speed
|=
641 (status
& LINK_STATUS_LINK_PARTNER_50G_CAPABLE
) ?
642 QED_LINK_PARTNER_SPEED_50G
: 0;
643 p_link
->partner_adv_speed
|=
644 (status
& LINK_STATUS_LINK_PARTNER_100G_CAPABLE
) ?
645 QED_LINK_PARTNER_SPEED_100G
: 0;
647 p_link
->partner_tx_flow_ctrl_en
=
648 !!(status
& LINK_STATUS_TX_FLOW_CONTROL_ENABLED
);
649 p_link
->partner_rx_flow_ctrl_en
=
650 !!(status
& LINK_STATUS_RX_FLOW_CONTROL_ENABLED
);
652 switch (status
& LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK
) {
653 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE
:
654 p_link
->partner_adv_pause
= QED_LINK_PARTNER_SYMMETRIC_PAUSE
;
656 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE
:
657 p_link
->partner_adv_pause
= QED_LINK_PARTNER_ASYMMETRIC_PAUSE
;
659 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE
:
660 p_link
->partner_adv_pause
= QED_LINK_PARTNER_BOTH_PAUSE
;
663 p_link
->partner_adv_pause
= 0;
666 p_link
->sfp_tx_fault
= !!(status
& LINK_STATUS_SFP_TX_FAULT
);
668 qed_link_update(p_hwfn
);
671 int qed_mcp_set_link(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
, bool b_up
)
673 struct qed_mcp_link_params
*params
= &p_hwfn
->mcp_info
->link_input
;
674 struct qed_mcp_mb_params mb_params
;
675 union drv_union_data union_data
;
676 struct eth_phy_cfg
*phy_cfg
;
680 /* Set the shmem configuration according to params */
681 phy_cfg
= &union_data
.drv_phy_cfg
;
682 memset(phy_cfg
, 0, sizeof(*phy_cfg
));
683 cmd
= b_up
? DRV_MSG_CODE_INIT_PHY
: DRV_MSG_CODE_LINK_RESET
;
684 if (!params
->speed
.autoneg
)
685 phy_cfg
->speed
= params
->speed
.forced_speed
;
686 phy_cfg
->pause
|= (params
->pause
.autoneg
) ? ETH_PAUSE_AUTONEG
: 0;
687 phy_cfg
->pause
|= (params
->pause
.forced_rx
) ? ETH_PAUSE_RX
: 0;
688 phy_cfg
->pause
|= (params
->pause
.forced_tx
) ? ETH_PAUSE_TX
: 0;
689 phy_cfg
->adv_speed
= params
->speed
.advertised_speeds
;
690 phy_cfg
->loopback_mode
= params
->loopback_mode
;
692 p_hwfn
->b_drv_link_init
= b_up
;
695 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
696 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
700 phy_cfg
->loopback_mode
,
701 phy_cfg
->feature_config_flags
);
703 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
707 memset(&mb_params
, 0, sizeof(mb_params
));
709 mb_params
.p_data_src
= &union_data
;
710 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
712 /* if mcp fails to respond we must abort */
714 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
718 /* Reset the link status if needed */
720 qed_mcp_handle_link_change(p_hwfn
, p_ptt
, true);
725 static void qed_read_pf_bandwidth(struct qed_hwfn
*p_hwfn
,
726 struct public_func
*p_shmem_info
)
728 struct qed_mcp_function_info
*p_info
;
730 p_info
= &p_hwfn
->mcp_info
->func_info
;
732 p_info
->bandwidth_min
= (p_shmem_info
->config
&
733 FUNC_MF_CFG_MIN_BW_MASK
) >>
734 FUNC_MF_CFG_MIN_BW_SHIFT
;
735 if (p_info
->bandwidth_min
< 1 || p_info
->bandwidth_min
> 100) {
737 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
738 p_info
->bandwidth_min
);
739 p_info
->bandwidth_min
= 1;
742 p_info
->bandwidth_max
= (p_shmem_info
->config
&
743 FUNC_MF_CFG_MAX_BW_MASK
) >>
744 FUNC_MF_CFG_MAX_BW_SHIFT
;
745 if (p_info
->bandwidth_max
< 1 || p_info
->bandwidth_max
> 100) {
747 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
748 p_info
->bandwidth_max
);
749 p_info
->bandwidth_max
= 100;
753 static u32
qed_mcp_get_shmem_func(struct qed_hwfn
*p_hwfn
,
754 struct qed_ptt
*p_ptt
,
755 struct public_func
*p_data
,
758 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
760 u32 mfw_path_offsize
= qed_rd(p_hwfn
, p_ptt
, addr
);
761 u32 func_addr
= SECTION_ADDR(mfw_path_offsize
, pfid
);
764 memset(p_data
, 0, sizeof(*p_data
));
766 size
= min_t(u32
, sizeof(*p_data
),
767 QED_SECTION_SIZE(mfw_path_offsize
));
768 for (i
= 0; i
< size
/ sizeof(u32
); i
++)
769 ((u32
*)p_data
)[i
] = qed_rd(p_hwfn
, p_ptt
,
770 func_addr
+ (i
<< 2));
774 int qed_hw_init_first_eth(struct qed_hwfn
*p_hwfn
,
775 struct qed_ptt
*p_ptt
, u8
*p_pf
)
777 struct public_func shmem_info
;
780 /* Find first Ethernet interface in port */
781 for (i
= 0; i
< NUM_OF_ENG_PFS(p_hwfn
->cdev
);
782 i
+= p_hwfn
->cdev
->num_ports_in_engines
) {
783 qed_mcp_get_shmem_func(p_hwfn
, p_ptt
, &shmem_info
,
784 MCP_PF_ID_BY_REL(p_hwfn
, i
));
786 if (shmem_info
.config
& FUNC_MF_CFG_FUNC_HIDE
)
789 if ((shmem_info
.config
& FUNC_MF_CFG_PROTOCOL_MASK
) ==
790 FUNC_MF_CFG_PROTOCOL_ETHERNET
) {
797 "Failed to find on port an ethernet interface in MF_SI mode\n");
802 static void qed_mcp_update_bw(struct qed_hwfn
*p_hwfn
,
803 struct qed_ptt
*p_ptt
)
805 struct qed_mcp_function_info
*p_info
;
806 struct public_func shmem_info
;
807 u32 resp
= 0, param
= 0;
809 qed_mcp_get_shmem_func(p_hwfn
, p_ptt
, &shmem_info
,
812 qed_read_pf_bandwidth(p_hwfn
, &shmem_info
);
814 p_info
= &p_hwfn
->mcp_info
->func_info
;
816 qed_configure_pf_min_bandwidth(p_hwfn
->cdev
, p_info
->bandwidth_min
);
817 qed_configure_pf_max_bandwidth(p_hwfn
->cdev
, p_info
->bandwidth_max
);
819 /* Acknowledge the MFW */
820 qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_BW_UPDATE_ACK
, 0, &resp
,
824 int qed_mcp_handle_events(struct qed_hwfn
*p_hwfn
,
825 struct qed_ptt
*p_ptt
)
827 struct qed_mcp_info
*info
= p_hwfn
->mcp_info
;
832 DP_VERBOSE(p_hwfn
, QED_MSG_SP
, "Received message from MFW\n");
834 /* Read Messages from MFW */
835 qed_mcp_read_mb(p_hwfn
, p_ptt
);
837 /* Compare current messages to old ones */
838 for (i
= 0; i
< info
->mfw_mb_length
; i
++) {
839 if (info
->mfw_mb_cur
[i
] == info
->mfw_mb_shadow
[i
])
844 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
845 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
846 i
, info
->mfw_mb_shadow
[i
], info
->mfw_mb_cur
[i
]);
849 case MFW_DRV_MSG_LINK_CHANGE
:
850 qed_mcp_handle_link_change(p_hwfn
, p_ptt
, false);
852 case MFW_DRV_MSG_VF_DISABLED
:
853 qed_mcp_handle_vf_flr(p_hwfn
, p_ptt
);
855 case MFW_DRV_MSG_LLDP_DATA_UPDATED
:
856 qed_dcbx_mib_update_event(p_hwfn
, p_ptt
,
857 QED_DCBX_REMOTE_LLDP_MIB
);
859 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED
:
860 qed_dcbx_mib_update_event(p_hwfn
, p_ptt
,
861 QED_DCBX_REMOTE_MIB
);
863 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED
:
864 qed_dcbx_mib_update_event(p_hwfn
, p_ptt
,
865 QED_DCBX_OPERATIONAL_MIB
);
867 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE
:
868 qed_mcp_handle_transceiver_change(p_hwfn
, p_ptt
);
870 case MFW_DRV_MSG_BW_UPDATE
:
871 qed_mcp_update_bw(p_hwfn
, p_ptt
);
874 DP_NOTICE(p_hwfn
, "Unimplemented MFW message %d\n", i
);
880 for (i
= 0; i
< MFW_DRV_MSG_MAX_DWORDS(info
->mfw_mb_length
); i
++) {
881 __be32 val
= cpu_to_be32(((u32
*)info
->mfw_mb_cur
)[i
]);
883 /* MFW expect answer in BE, so we force write in that format */
884 qed_wr(p_hwfn
, p_ptt
,
885 info
->mfw_mb_addr
+ sizeof(u32
) +
886 MFW_DRV_MSG_MAX_DWORDS(info
->mfw_mb_length
) *
887 sizeof(u32
) + i
* sizeof(u32
),
893 "Received an MFW message indication but no new message!\n");
897 /* Copy the new mfw messages into the shadow */
898 memcpy(info
->mfw_mb_shadow
, info
->mfw_mb_cur
, info
->mfw_mb_length
);
903 int qed_mcp_get_mfw_ver(struct qed_hwfn
*p_hwfn
,
904 struct qed_ptt
*p_ptt
,
905 u32
*p_mfw_ver
, u32
*p_running_bundle_id
)
909 if (IS_VF(p_hwfn
->cdev
)) {
910 if (p_hwfn
->vf_iov_info
) {
911 struct pfvf_acquire_resp_tlv
*p_resp
;
913 p_resp
= &p_hwfn
->vf_iov_info
->acquire_resp
;
914 *p_mfw_ver
= p_resp
->pfdev_info
.mfw_ver
;
919 "VF requested MFW version prior to ACQUIRE\n");
924 global_offsize
= qed_rd(p_hwfn
, p_ptt
,
925 SECTION_OFFSIZE_ADDR(p_hwfn
->
926 mcp_info
->public_base
,
929 qed_rd(p_hwfn
, p_ptt
,
930 SECTION_ADDR(global_offsize
,
931 0) + offsetof(struct public_global
, mfw_ver
));
933 if (p_running_bundle_id
!= NULL
) {
934 *p_running_bundle_id
= qed_rd(p_hwfn
, p_ptt
,
935 SECTION_ADDR(global_offsize
, 0) +
936 offsetof(struct public_global
,
943 int qed_mcp_get_media_type(struct qed_dev
*cdev
,
946 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[0];
947 struct qed_ptt
*p_ptt
;
952 if (!qed_mcp_is_init(p_hwfn
)) {
953 DP_NOTICE(p_hwfn
, "MFW is not initialized !\n");
957 *p_media_type
= MEDIA_UNSPECIFIED
;
959 p_ptt
= qed_ptt_acquire(p_hwfn
);
963 *p_media_type
= qed_rd(p_hwfn
, p_ptt
, p_hwfn
->mcp_info
->port_addr
+
964 offsetof(struct public_port
, media_type
));
966 qed_ptt_release(p_hwfn
, p_ptt
);
972 qed_mcp_get_shmem_proto(struct qed_hwfn
*p_hwfn
,
973 struct public_func
*p_info
,
974 enum qed_pci_personality
*p_proto
)
978 switch (p_info
->config
& FUNC_MF_CFG_PROTOCOL_MASK
) {
979 case FUNC_MF_CFG_PROTOCOL_ETHERNET
:
980 if (test_bit(QED_DEV_CAP_ROCE
,
981 &p_hwfn
->hw_info
.device_capabilities
))
982 *p_proto
= QED_PCI_ETH_ROCE
;
984 *p_proto
= QED_PCI_ETH
;
986 case FUNC_MF_CFG_PROTOCOL_ISCSI
:
987 *p_proto
= QED_PCI_ISCSI
;
989 case FUNC_MF_CFG_PROTOCOL_ROCE
:
990 DP_NOTICE(p_hwfn
, "RoCE personality is not a valid value!\n");
1000 int qed_mcp_fill_shmem_func_info(struct qed_hwfn
*p_hwfn
,
1001 struct qed_ptt
*p_ptt
)
1003 struct qed_mcp_function_info
*info
;
1004 struct public_func shmem_info
;
1006 qed_mcp_get_shmem_func(p_hwfn
, p_ptt
, &shmem_info
,
1008 info
= &p_hwfn
->mcp_info
->func_info
;
1010 info
->pause_on_host
= (shmem_info
.config
&
1011 FUNC_MF_CFG_PAUSE_ON_HOST_RING
) ? 1 : 0;
1013 if (qed_mcp_get_shmem_proto(p_hwfn
, &shmem_info
,
1015 DP_ERR(p_hwfn
, "Unknown personality %08x\n",
1016 (u32
)(shmem_info
.config
& FUNC_MF_CFG_PROTOCOL_MASK
));
1020 qed_read_pf_bandwidth(p_hwfn
, &shmem_info
);
1022 if (shmem_info
.mac_upper
|| shmem_info
.mac_lower
) {
1023 info
->mac
[0] = (u8
)(shmem_info
.mac_upper
>> 8);
1024 info
->mac
[1] = (u8
)(shmem_info
.mac_upper
);
1025 info
->mac
[2] = (u8
)(shmem_info
.mac_lower
>> 24);
1026 info
->mac
[3] = (u8
)(shmem_info
.mac_lower
>> 16);
1027 info
->mac
[4] = (u8
)(shmem_info
.mac_lower
>> 8);
1028 info
->mac
[5] = (u8
)(shmem_info
.mac_lower
);
1030 DP_NOTICE(p_hwfn
, "MAC is 0 in shmem\n");
1033 info
->wwn_port
= (u64
)shmem_info
.fcoe_wwn_port_name_upper
|
1034 (((u64
)shmem_info
.fcoe_wwn_port_name_lower
) << 32);
1035 info
->wwn_node
= (u64
)shmem_info
.fcoe_wwn_node_name_upper
|
1036 (((u64
)shmem_info
.fcoe_wwn_node_name_lower
) << 32);
1038 info
->ovlan
= (u16
)(shmem_info
.ovlan_stag
& FUNC_MF_CFG_OV_STAG_MASK
);
1040 DP_VERBOSE(p_hwfn
, (QED_MSG_SP
| NETIF_MSG_IFUP
),
1041 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n",
1042 info
->pause_on_host
, info
->protocol
,
1043 info
->bandwidth_min
, info
->bandwidth_max
,
1044 info
->mac
[0], info
->mac
[1], info
->mac
[2],
1045 info
->mac
[3], info
->mac
[4], info
->mac
[5],
1046 info
->wwn_port
, info
->wwn_node
, info
->ovlan
);
1051 struct qed_mcp_link_params
1052 *qed_mcp_get_link_params(struct qed_hwfn
*p_hwfn
)
1054 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
1056 return &p_hwfn
->mcp_info
->link_input
;
1059 struct qed_mcp_link_state
1060 *qed_mcp_get_link_state(struct qed_hwfn
*p_hwfn
)
1062 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
1064 return &p_hwfn
->mcp_info
->link_output
;
1067 struct qed_mcp_link_capabilities
1068 *qed_mcp_get_link_capabilities(struct qed_hwfn
*p_hwfn
)
1070 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
1072 return &p_hwfn
->mcp_info
->link_capabilities
;
1075 int qed_mcp_drain(struct qed_hwfn
*p_hwfn
,
1076 struct qed_ptt
*p_ptt
)
1078 u32 resp
= 0, param
= 0;
1081 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
,
1082 DRV_MSG_CODE_NIG_DRAIN
, 1000,
1085 /* Wait for the drain to complete before returning */
1091 int qed_mcp_get_flash_size(struct qed_hwfn
*p_hwfn
,
1092 struct qed_ptt
*p_ptt
,
1097 if (IS_VF(p_hwfn
->cdev
))
1100 flash_size
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_NVM_CFG4
);
1101 flash_size
= (flash_size
& MCP_REG_NVM_CFG4_FLASH_SIZE
) >>
1102 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT
;
1103 flash_size
= (1 << (flash_size
+ MCP_BYTES_PER_MBIT_SHIFT
));
1105 *p_flash_size
= flash_size
;
1110 int qed_mcp_config_vf_msix(struct qed_hwfn
*p_hwfn
,
1111 struct qed_ptt
*p_ptt
, u8 vf_id
, u8 num
)
1113 u32 resp
= 0, param
= 0, rc_param
= 0;
1116 /* Only Leader can configure MSIX, and need to take CMT into account */
1117 if (!IS_LEAD_HWFN(p_hwfn
))
1119 num
*= p_hwfn
->cdev
->num_hwfns
;
1121 param
|= (vf_id
<< DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT
) &
1122 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK
;
1123 param
|= (num
<< DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT
) &
1124 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK
;
1126 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_CFG_VF_MSIX
, param
,
1129 if (resp
!= FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE
) {
1130 DP_NOTICE(p_hwfn
, "VF[%d]: MFW failed to set MSI-X\n", vf_id
);
1133 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1134 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
1142 qed_mcp_send_drv_version(struct qed_hwfn
*p_hwfn
,
1143 struct qed_ptt
*p_ptt
,
1144 struct qed_mcp_drv_version
*p_ver
)
1146 struct drv_version_stc
*p_drv_version
;
1147 struct qed_mcp_mb_params mb_params
;
1148 union drv_union_data union_data
;
1153 p_drv_version
= &union_data
.drv_version
;
1154 p_drv_version
->version
= p_ver
->version
;
1156 for (i
= 0; i
< MCP_DRV_VER_STR_SIZE
- 1; i
+= 4) {
1157 val
= cpu_to_be32(p_ver
->name
[i
]);
1158 *(__be32
*)&p_drv_version
->name
[i
* sizeof(u32
)] = val
;
1161 memset(&mb_params
, 0, sizeof(mb_params
));
1162 mb_params
.cmd
= DRV_MSG_CODE_SET_VERSION
;
1163 mb_params
.p_data_src
= &union_data
;
1164 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1166 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
1171 int qed_mcp_set_led(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
1172 enum qed_led_mode mode
)
1174 u32 resp
= 0, param
= 0, drv_mb_param
;
1178 case QED_LED_MODE_ON
:
1179 drv_mb_param
= DRV_MB_PARAM_SET_LED_MODE_ON
;
1181 case QED_LED_MODE_OFF
:
1182 drv_mb_param
= DRV_MB_PARAM_SET_LED_MODE_OFF
;
1184 case QED_LED_MODE_RESTORE
:
1185 drv_mb_param
= DRV_MB_PARAM_SET_LED_MODE_OPER
;
1188 DP_NOTICE(p_hwfn
, "Invalid LED mode %d\n", mode
);
1192 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_SET_LED_MODE
,
1193 drv_mb_param
, &resp
, ¶m
);
1198 int qed_mcp_bist_register_test(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1200 u32 drv_mb_param
= 0, rsp
, param
;
1203 drv_mb_param
= (DRV_MB_PARAM_BIST_REGISTER_TEST
<<
1204 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT
);
1206 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_BIST_TEST
,
1207 drv_mb_param
, &rsp
, ¶m
);
1212 if (((rsp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_OK
) ||
1213 (param
!= DRV_MB_PARAM_BIST_RC_PASSED
))
1219 int qed_mcp_bist_clock_test(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1221 u32 drv_mb_param
, rsp
, param
;
1224 drv_mb_param
= (DRV_MB_PARAM_BIST_CLOCK_TEST
<<
1225 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT
);
1227 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_BIST_TEST
,
1228 drv_mb_param
, &rsp
, ¶m
);
1233 if (((rsp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_OK
) ||
1234 (param
!= DRV_MB_PARAM_BIST_RC_PASSED
))