1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/delay.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
39 #include <linux/spinlock.h>
40 #include <linux/string.h>
41 #include <linux/etherdevice.h>
47 #include "qed_reg_addr.h"
48 #include "qed_sriov.h"
50 #define CHIP_MCP_RESP_ITER_US 10
52 #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
53 #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
55 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
56 qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
59 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
60 qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
62 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
63 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
64 offsetof(struct public_drv_mb, _field), _val)
66 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
67 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
68 offsetof(struct public_drv_mb, _field))
70 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
71 DRV_ID_PDA_COMP_VER_SHIFT)
73 #define MCP_BYTES_PER_MBIT_SHIFT 17
75 bool qed_mcp_is_init(struct qed_hwfn
*p_hwfn
)
77 if (!p_hwfn
->mcp_info
|| !p_hwfn
->mcp_info
->public_base
)
82 void qed_mcp_cmd_port_init(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
84 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
86 u32 mfw_mb_offsize
= qed_rd(p_hwfn
, p_ptt
, addr
);
88 p_hwfn
->mcp_info
->port_addr
= SECTION_ADDR(mfw_mb_offsize
,
90 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
91 "port_addr = 0x%x, port_id 0x%02x\n",
92 p_hwfn
->mcp_info
->port_addr
, MFW_PORT(p_hwfn
));
95 void qed_mcp_read_mb(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
97 u32 length
= MFW_DRV_MSG_MAX_DWORDS(p_hwfn
->mcp_info
->mfw_mb_length
);
100 if (!p_hwfn
->mcp_info
->public_base
)
103 for (i
= 0; i
< length
; i
++) {
104 tmp
= qed_rd(p_hwfn
, p_ptt
,
105 p_hwfn
->mcp_info
->mfw_mb_addr
+
106 (i
<< 2) + sizeof(u32
));
108 /* The MB data is actually BE; Need to force it to cpu */
109 ((u32
*)p_hwfn
->mcp_info
->mfw_mb_cur
)[i
] =
110 be32_to_cpu((__force __be32
)tmp
);
114 struct qed_mcp_cmd_elem
{
115 struct list_head list
;
116 struct qed_mcp_mb_params
*p_mb_params
;
117 u16 expected_seq_num
;
121 /* Must be called while cmd_lock is acquired */
122 static struct qed_mcp_cmd_elem
*
123 qed_mcp_cmd_add_elem(struct qed_hwfn
*p_hwfn
,
124 struct qed_mcp_mb_params
*p_mb_params
,
125 u16 expected_seq_num
)
127 struct qed_mcp_cmd_elem
*p_cmd_elem
= NULL
;
129 p_cmd_elem
= kzalloc(sizeof(*p_cmd_elem
), GFP_ATOMIC
);
133 p_cmd_elem
->p_mb_params
= p_mb_params
;
134 p_cmd_elem
->expected_seq_num
= expected_seq_num
;
135 list_add(&p_cmd_elem
->list
, &p_hwfn
->mcp_info
->cmd_list
);
140 /* Must be called while cmd_lock is acquired */
141 static void qed_mcp_cmd_del_elem(struct qed_hwfn
*p_hwfn
,
142 struct qed_mcp_cmd_elem
*p_cmd_elem
)
144 list_del(&p_cmd_elem
->list
);
148 /* Must be called while cmd_lock is acquired */
149 static struct qed_mcp_cmd_elem
*qed_mcp_cmd_get_elem(struct qed_hwfn
*p_hwfn
,
152 struct qed_mcp_cmd_elem
*p_cmd_elem
= NULL
;
154 list_for_each_entry(p_cmd_elem
, &p_hwfn
->mcp_info
->cmd_list
, list
) {
155 if (p_cmd_elem
->expected_seq_num
== seq_num
)
162 int qed_mcp_free(struct qed_hwfn
*p_hwfn
)
164 if (p_hwfn
->mcp_info
) {
165 struct qed_mcp_cmd_elem
*p_cmd_elem
, *p_tmp
;
167 kfree(p_hwfn
->mcp_info
->mfw_mb_cur
);
168 kfree(p_hwfn
->mcp_info
->mfw_mb_shadow
);
170 spin_lock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
171 list_for_each_entry_safe(p_cmd_elem
,
173 &p_hwfn
->mcp_info
->cmd_list
, list
) {
174 qed_mcp_cmd_del_elem(p_hwfn
, p_cmd_elem
);
176 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
179 kfree(p_hwfn
->mcp_info
);
184 static int qed_load_mcp_offsets(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
186 struct qed_mcp_info
*p_info
= p_hwfn
->mcp_info
;
187 u32 drv_mb_offsize
, mfw_mb_offsize
;
188 u32 mcp_pf_id
= MCP_PF_ID(p_hwfn
);
190 p_info
->public_base
= qed_rd(p_hwfn
, p_ptt
, MISC_REG_SHARED_MEM_ADDR
);
191 if (!p_info
->public_base
)
194 p_info
->public_base
|= GRCBASE_MCP
;
196 /* Calculate the driver and MFW mailbox address */
197 drv_mb_offsize
= qed_rd(p_hwfn
, p_ptt
,
198 SECTION_OFFSIZE_ADDR(p_info
->public_base
,
200 p_info
->drv_mb_addr
= SECTION_ADDR(drv_mb_offsize
, mcp_pf_id
);
201 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
202 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
203 drv_mb_offsize
, p_info
->drv_mb_addr
, mcp_pf_id
);
205 /* Set the MFW MB address */
206 mfw_mb_offsize
= qed_rd(p_hwfn
, p_ptt
,
207 SECTION_OFFSIZE_ADDR(p_info
->public_base
,
209 p_info
->mfw_mb_addr
= SECTION_ADDR(mfw_mb_offsize
, mcp_pf_id
);
210 p_info
->mfw_mb_length
= (u16
)qed_rd(p_hwfn
, p_ptt
, p_info
->mfw_mb_addr
);
212 /* Get the current driver mailbox sequence before sending
215 p_info
->drv_mb_seq
= DRV_MB_RD(p_hwfn
, p_ptt
, drv_mb_header
) &
216 DRV_MSG_SEQ_NUMBER_MASK
;
218 /* Get current FW pulse sequence */
219 p_info
->drv_pulse_seq
= DRV_MB_RD(p_hwfn
, p_ptt
, drv_pulse_mb
) &
222 p_info
->mcp_hist
= qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
);
227 int qed_mcp_cmd_init(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
229 struct qed_mcp_info
*p_info
;
232 /* Allocate mcp_info structure */
233 p_hwfn
->mcp_info
= kzalloc(sizeof(*p_hwfn
->mcp_info
), GFP_KERNEL
);
234 if (!p_hwfn
->mcp_info
)
236 p_info
= p_hwfn
->mcp_info
;
238 /* Initialize the MFW spinlock */
239 spin_lock_init(&p_info
->cmd_lock
);
240 spin_lock_init(&p_info
->link_lock
);
242 INIT_LIST_HEAD(&p_info
->cmd_list
);
244 if (qed_load_mcp_offsets(p_hwfn
, p_ptt
) != 0) {
245 DP_NOTICE(p_hwfn
, "MCP is not initialized\n");
246 /* Do not free mcp_info here, since public_base indicate that
247 * the MCP is not initialized
252 size
= MFW_DRV_MSG_MAX_DWORDS(p_info
->mfw_mb_length
) * sizeof(u32
);
253 p_info
->mfw_mb_cur
= kzalloc(size
, GFP_KERNEL
);
254 p_info
->mfw_mb_shadow
= kzalloc(size
, GFP_KERNEL
);
255 if (!p_info
->mfw_mb_shadow
|| !p_info
->mfw_mb_addr
)
261 qed_mcp_free(p_hwfn
);
265 static void qed_mcp_reread_offsets(struct qed_hwfn
*p_hwfn
,
266 struct qed_ptt
*p_ptt
)
268 u32 generic_por_0
= qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
);
270 /* Use MCP history register to check if MCP reset occurred between init
273 if (p_hwfn
->mcp_info
->mcp_hist
!= generic_por_0
) {
276 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
277 p_hwfn
->mcp_info
->mcp_hist
, generic_por_0
);
279 qed_load_mcp_offsets(p_hwfn
, p_ptt
);
280 qed_mcp_cmd_port_init(p_hwfn
, p_ptt
);
284 int qed_mcp_reset(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
286 u32 org_mcp_reset_seq
, seq
, delay
= CHIP_MCP_RESP_ITER_US
, cnt
= 0;
289 /* Ensure that only a single thread is accessing the mailbox */
290 spin_lock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
292 org_mcp_reset_seq
= qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
);
294 /* Set drv command along with the updated sequence */
295 qed_mcp_reread_offsets(p_hwfn
, p_ptt
);
296 seq
= ++p_hwfn
->mcp_info
->drv_mb_seq
;
297 DRV_MB_WR(p_hwfn
, p_ptt
, drv_mb_header
, (DRV_MSG_CODE_MCP_RESET
| seq
));
300 /* Wait for MFW response */
302 /* Give the FW up to 500 second (50*1000*10usec) */
303 } while ((org_mcp_reset_seq
== qed_rd(p_hwfn
, p_ptt
,
304 MISCS_REG_GENERIC_POR_0
)) &&
305 (cnt
++ < QED_MCP_RESET_RETRIES
));
307 if (org_mcp_reset_seq
!=
308 qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
)) {
309 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
310 "MCP was reset after %d usec\n", cnt
* delay
);
312 DP_ERR(p_hwfn
, "Failed to reset MCP\n");
316 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
321 /* Must be called while cmd_lock is acquired */
322 static bool qed_mcp_has_pending_cmd(struct qed_hwfn
*p_hwfn
)
324 struct qed_mcp_cmd_elem
*p_cmd_elem
;
326 /* There is at most one pending command at a certain time, and if it
327 * exists - it is placed at the HEAD of the list.
329 if (!list_empty(&p_hwfn
->mcp_info
->cmd_list
)) {
330 p_cmd_elem
= list_first_entry(&p_hwfn
->mcp_info
->cmd_list
,
331 struct qed_mcp_cmd_elem
, list
);
332 return !p_cmd_elem
->b_is_completed
;
338 /* Must be called while cmd_lock is acquired */
340 qed_mcp_update_pending_cmd(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
342 struct qed_mcp_mb_params
*p_mb_params
;
343 struct qed_mcp_cmd_elem
*p_cmd_elem
;
347 mcp_resp
= DRV_MB_RD(p_hwfn
, p_ptt
, fw_mb_header
);
348 seq_num
= (u16
)(mcp_resp
& FW_MSG_SEQ_NUMBER_MASK
);
350 /* Return if no new non-handled response has been received */
351 if (seq_num
!= p_hwfn
->mcp_info
->drv_mb_seq
)
354 p_cmd_elem
= qed_mcp_cmd_get_elem(p_hwfn
, seq_num
);
357 "Failed to find a pending mailbox cmd that expects sequence number %d\n",
362 p_mb_params
= p_cmd_elem
->p_mb_params
;
364 /* Get the MFW response along with the sequence number */
365 p_mb_params
->mcp_resp
= mcp_resp
;
367 /* Get the MFW param */
368 p_mb_params
->mcp_param
= DRV_MB_RD(p_hwfn
, p_ptt
, fw_mb_param
);
370 /* Get the union data */
371 if (p_mb_params
->p_data_dst
!= NULL
&& p_mb_params
->data_dst_size
) {
372 u32 union_data_addr
= p_hwfn
->mcp_info
->drv_mb_addr
+
373 offsetof(struct public_drv_mb
,
375 qed_memcpy_from(p_hwfn
, p_ptt
, p_mb_params
->p_data_dst
,
376 union_data_addr
, p_mb_params
->data_dst_size
);
379 p_cmd_elem
->b_is_completed
= true;
384 /* Must be called while cmd_lock is acquired */
385 static void __qed_mcp_cmd_and_union(struct qed_hwfn
*p_hwfn
,
386 struct qed_ptt
*p_ptt
,
387 struct qed_mcp_mb_params
*p_mb_params
,
390 union drv_union_data union_data
;
393 /* Set the union data */
394 union_data_addr
= p_hwfn
->mcp_info
->drv_mb_addr
+
395 offsetof(struct public_drv_mb
, union_data
);
396 memset(&union_data
, 0, sizeof(union_data
));
397 if (p_mb_params
->p_data_src
!= NULL
&& p_mb_params
->data_src_size
)
398 memcpy(&union_data
, p_mb_params
->p_data_src
,
399 p_mb_params
->data_src_size
);
400 qed_memcpy_to(p_hwfn
, p_ptt
, union_data_addr
, &union_data
,
403 /* Set the drv param */
404 DRV_MB_WR(p_hwfn
, p_ptt
, drv_mb_param
, p_mb_params
->param
);
406 /* Set the drv command along with the sequence number */
407 DRV_MB_WR(p_hwfn
, p_ptt
, drv_mb_header
, (p_mb_params
->cmd
| seq_num
));
409 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
410 "MFW mailbox: command 0x%08x param 0x%08x\n",
411 (p_mb_params
->cmd
| seq_num
), p_mb_params
->param
);
415 _qed_mcp_cmd_and_union(struct qed_hwfn
*p_hwfn
,
416 struct qed_ptt
*p_ptt
,
417 struct qed_mcp_mb_params
*p_mb_params
,
418 u32 max_retries
, u32 delay
)
420 struct qed_mcp_cmd_elem
*p_cmd_elem
;
425 /* Wait until the mailbox is non-occupied */
427 /* Exit the loop if there is no pending command, or if the
428 * pending command is completed during this iteration.
429 * The spinlock stays locked until the command is sent.
432 spin_lock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
434 if (!qed_mcp_has_pending_cmd(p_hwfn
))
437 rc
= qed_mcp_update_pending_cmd(p_hwfn
, p_ptt
);
440 else if (rc
!= -EAGAIN
)
443 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
445 } while (++cnt
< max_retries
);
447 if (cnt
>= max_retries
) {
449 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
450 p_mb_params
->cmd
, p_mb_params
->param
);
454 /* Send the mailbox command */
455 qed_mcp_reread_offsets(p_hwfn
, p_ptt
);
456 seq_num
= ++p_hwfn
->mcp_info
->drv_mb_seq
;
457 p_cmd_elem
= qed_mcp_cmd_add_elem(p_hwfn
, p_mb_params
, seq_num
);
463 __qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, p_mb_params
, seq_num
);
464 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
466 /* Wait for the MFW response */
468 /* Exit the loop if the command is already completed, or if the
469 * command is completed during this iteration.
470 * The spinlock stays locked until the list element is removed.
474 spin_lock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
476 if (p_cmd_elem
->b_is_completed
)
479 rc
= qed_mcp_update_pending_cmd(p_hwfn
, p_ptt
);
482 else if (rc
!= -EAGAIN
)
485 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
486 } while (++cnt
< max_retries
);
488 if (cnt
>= max_retries
) {
490 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
491 p_mb_params
->cmd
, p_mb_params
->param
);
493 spin_lock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
494 qed_mcp_cmd_del_elem(p_hwfn
, p_cmd_elem
);
495 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
500 qed_mcp_cmd_del_elem(p_hwfn
, p_cmd_elem
);
501 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
505 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
506 p_mb_params
->mcp_resp
,
507 p_mb_params
->mcp_param
,
508 (cnt
* delay
) / 1000, (cnt
* delay
) % 1000);
510 /* Clear the sequence number from the MFW response */
511 p_mb_params
->mcp_resp
&= FW_MSG_CODE_MASK
;
516 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
520 static int qed_mcp_cmd_and_union(struct qed_hwfn
*p_hwfn
,
521 struct qed_ptt
*p_ptt
,
522 struct qed_mcp_mb_params
*p_mb_params
)
524 size_t union_data_size
= sizeof(union drv_union_data
);
525 u32 max_retries
= QED_DRV_MB_MAX_RETRIES
;
526 u32 delay
= CHIP_MCP_RESP_ITER_US
;
528 /* MCP not initialized */
529 if (!qed_mcp_is_init(p_hwfn
)) {
530 DP_NOTICE(p_hwfn
, "MFW is not initialized!\n");
534 if (p_mb_params
->data_src_size
> union_data_size
||
535 p_mb_params
->data_dst_size
> union_data_size
) {
537 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
538 p_mb_params
->data_src_size
,
539 p_mb_params
->data_dst_size
, union_data_size
);
543 return _qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, p_mb_params
, max_retries
,
547 int qed_mcp_cmd(struct qed_hwfn
*p_hwfn
,
548 struct qed_ptt
*p_ptt
,
554 struct qed_mcp_mb_params mb_params
;
557 memset(&mb_params
, 0, sizeof(mb_params
));
559 mb_params
.param
= param
;
561 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
565 *o_mcp_resp
= mb_params
.mcp_resp
;
566 *o_mcp_param
= mb_params
.mcp_param
;
571 int qed_mcp_nvm_rd_cmd(struct qed_hwfn
*p_hwfn
,
572 struct qed_ptt
*p_ptt
,
576 u32
*o_mcp_param
, u32
*o_txn_size
, u32
*o_buf
)
578 struct qed_mcp_mb_params mb_params
;
579 u8 raw_data
[MCP_DRV_NVM_BUF_LEN
];
582 memset(&mb_params
, 0, sizeof(mb_params
));
584 mb_params
.param
= param
;
585 mb_params
.p_data_dst
= raw_data
;
587 /* Use the maximal value since the actual one is part of the response */
588 mb_params
.data_dst_size
= MCP_DRV_NVM_BUF_LEN
;
590 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
594 *o_mcp_resp
= mb_params
.mcp_resp
;
595 *o_mcp_param
= mb_params
.mcp_param
;
597 *o_txn_size
= *o_mcp_param
;
598 memcpy(o_buf
, raw_data
, *o_txn_size
);
604 qed_mcp_can_force_load(u8 drv_role
,
606 enum qed_override_force_load override_force_load
)
608 bool can_force_load
= false;
610 switch (override_force_load
) {
611 case QED_OVERRIDE_FORCE_LOAD_ALWAYS
:
612 can_force_load
= true;
614 case QED_OVERRIDE_FORCE_LOAD_NEVER
:
615 can_force_load
= false;
618 can_force_load
= (drv_role
== DRV_ROLE_OS
&&
619 exist_drv_role
== DRV_ROLE_PREBOOT
) ||
620 (drv_role
== DRV_ROLE_KDUMP
&&
621 exist_drv_role
== DRV_ROLE_OS
);
625 return can_force_load
;
628 static int qed_mcp_cancel_load_req(struct qed_hwfn
*p_hwfn
,
629 struct qed_ptt
*p_ptt
)
631 u32 resp
= 0, param
= 0;
634 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_CANCEL_LOAD_REQ
, 0,
638 "Failed to send cancel load request, rc = %d\n", rc
);
643 #define CONFIG_QEDE_BITMAP_IDX BIT(0)
644 #define CONFIG_QED_SRIOV_BITMAP_IDX BIT(1)
645 #define CONFIG_QEDR_BITMAP_IDX BIT(2)
646 #define CONFIG_QEDF_BITMAP_IDX BIT(4)
647 #define CONFIG_QEDI_BITMAP_IDX BIT(5)
648 #define CONFIG_QED_LL2_BITMAP_IDX BIT(6)
650 static u32
qed_get_config_bitmap(void)
652 u32 config_bitmap
= 0x0;
654 if (IS_ENABLED(CONFIG_QEDE
))
655 config_bitmap
|= CONFIG_QEDE_BITMAP_IDX
;
657 if (IS_ENABLED(CONFIG_QED_SRIOV
))
658 config_bitmap
|= CONFIG_QED_SRIOV_BITMAP_IDX
;
660 if (IS_ENABLED(CONFIG_QED_RDMA
))
661 config_bitmap
|= CONFIG_QEDR_BITMAP_IDX
;
663 if (IS_ENABLED(CONFIG_QED_FCOE
))
664 config_bitmap
|= CONFIG_QEDF_BITMAP_IDX
;
666 if (IS_ENABLED(CONFIG_QED_ISCSI
))
667 config_bitmap
|= CONFIG_QEDI_BITMAP_IDX
;
669 if (IS_ENABLED(CONFIG_QED_LL2
))
670 config_bitmap
|= CONFIG_QED_LL2_BITMAP_IDX
;
672 return config_bitmap
;
675 struct qed_load_req_in_params
{
677 #define QED_LOAD_REQ_HSI_VER_DEFAULT 0
678 #define QED_LOAD_REQ_HSI_VER_1 1
685 bool avoid_eng_reset
;
688 struct qed_load_req_out_params
{
699 __qed_mcp_load_req(struct qed_hwfn
*p_hwfn
,
700 struct qed_ptt
*p_ptt
,
701 struct qed_load_req_in_params
*p_in_params
,
702 struct qed_load_req_out_params
*p_out_params
)
704 struct qed_mcp_mb_params mb_params
;
705 struct load_req_stc load_req
;
706 struct load_rsp_stc load_rsp
;
710 memset(&load_req
, 0, sizeof(load_req
));
711 load_req
.drv_ver_0
= p_in_params
->drv_ver_0
;
712 load_req
.drv_ver_1
= p_in_params
->drv_ver_1
;
713 load_req
.fw_ver
= p_in_params
->fw_ver
;
714 QED_MFW_SET_FIELD(load_req
.misc0
, LOAD_REQ_ROLE
, p_in_params
->drv_role
);
715 QED_MFW_SET_FIELD(load_req
.misc0
, LOAD_REQ_LOCK_TO
,
716 p_in_params
->timeout_val
);
717 QED_MFW_SET_FIELD(load_req
.misc0
, LOAD_REQ_FORCE
,
718 p_in_params
->force_cmd
);
719 QED_MFW_SET_FIELD(load_req
.misc0
, LOAD_REQ_FLAGS0
,
720 p_in_params
->avoid_eng_reset
);
722 hsi_ver
= (p_in_params
->hsi_ver
== QED_LOAD_REQ_HSI_VER_DEFAULT
) ?
723 DRV_ID_MCP_HSI_VER_CURRENT
:
724 (p_in_params
->hsi_ver
<< DRV_ID_MCP_HSI_VER_SHIFT
);
726 memset(&mb_params
, 0, sizeof(mb_params
));
727 mb_params
.cmd
= DRV_MSG_CODE_LOAD_REQ
;
728 mb_params
.param
= PDA_COMP
| hsi_ver
| p_hwfn
->cdev
->drv_type
;
729 mb_params
.p_data_src
= &load_req
;
730 mb_params
.data_src_size
= sizeof(load_req
);
731 mb_params
.p_data_dst
= &load_rsp
;
732 mb_params
.data_dst_size
= sizeof(load_rsp
);
734 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
735 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
737 QED_MFW_GET_FIELD(mb_params
.param
, DRV_ID_DRV_INIT_HW
),
738 QED_MFW_GET_FIELD(mb_params
.param
, DRV_ID_DRV_TYPE
),
739 QED_MFW_GET_FIELD(mb_params
.param
, DRV_ID_MCP_HSI_VER
),
740 QED_MFW_GET_FIELD(mb_params
.param
, DRV_ID_PDA_COMP_VER
));
742 if (p_in_params
->hsi_ver
!= QED_LOAD_REQ_HSI_VER_1
) {
743 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
744 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
749 QED_MFW_GET_FIELD(load_req
.misc0
, LOAD_REQ_ROLE
),
750 QED_MFW_GET_FIELD(load_req
.misc0
,
752 QED_MFW_GET_FIELD(load_req
.misc0
, LOAD_REQ_FORCE
),
753 QED_MFW_GET_FIELD(load_req
.misc0
, LOAD_REQ_FLAGS0
));
756 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
758 DP_NOTICE(p_hwfn
, "Failed to send load request, rc = %d\n", rc
);
762 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
763 "Load Response: resp 0x%08x\n", mb_params
.mcp_resp
);
764 p_out_params
->load_code
= mb_params
.mcp_resp
;
766 if (p_in_params
->hsi_ver
!= QED_LOAD_REQ_HSI_VER_1
&&
767 p_out_params
->load_code
!= FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1
) {
770 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
775 QED_MFW_GET_FIELD(load_rsp
.misc0
, LOAD_RSP_ROLE
),
776 QED_MFW_GET_FIELD(load_rsp
.misc0
, LOAD_RSP_HSI
),
777 QED_MFW_GET_FIELD(load_rsp
.misc0
, LOAD_RSP_FLAGS0
));
779 p_out_params
->exist_drv_ver_0
= load_rsp
.drv_ver_0
;
780 p_out_params
->exist_drv_ver_1
= load_rsp
.drv_ver_1
;
781 p_out_params
->exist_fw_ver
= load_rsp
.fw_ver
;
782 p_out_params
->exist_drv_role
=
783 QED_MFW_GET_FIELD(load_rsp
.misc0
, LOAD_RSP_ROLE
);
784 p_out_params
->mfw_hsi_ver
=
785 QED_MFW_GET_FIELD(load_rsp
.misc0
, LOAD_RSP_HSI
);
786 p_out_params
->drv_exists
=
787 QED_MFW_GET_FIELD(load_rsp
.misc0
, LOAD_RSP_FLAGS0
) &
788 LOAD_RSP_FLAGS0_DRV_EXISTS
;
794 static int eocre_get_mfw_drv_role(struct qed_hwfn
*p_hwfn
,
795 enum qed_drv_role drv_role
,
799 case QED_DRV_ROLE_OS
:
800 *p_mfw_drv_role
= DRV_ROLE_OS
;
802 case QED_DRV_ROLE_KDUMP
:
803 *p_mfw_drv_role
= DRV_ROLE_KDUMP
;
806 DP_ERR(p_hwfn
, "Unexpected driver role %d\n", drv_role
);
813 enum qed_load_req_force
{
814 QED_LOAD_REQ_FORCE_NONE
,
815 QED_LOAD_REQ_FORCE_PF
,
816 QED_LOAD_REQ_FORCE_ALL
,
819 static void qed_get_mfw_force_cmd(struct qed_hwfn
*p_hwfn
,
821 enum qed_load_req_force force_cmd
,
825 case QED_LOAD_REQ_FORCE_NONE
:
826 *p_mfw_force_cmd
= LOAD_REQ_FORCE_NONE
;
828 case QED_LOAD_REQ_FORCE_PF
:
829 *p_mfw_force_cmd
= LOAD_REQ_FORCE_PF
;
831 case QED_LOAD_REQ_FORCE_ALL
:
832 *p_mfw_force_cmd
= LOAD_REQ_FORCE_ALL
;
837 int qed_mcp_load_req(struct qed_hwfn
*p_hwfn
,
838 struct qed_ptt
*p_ptt
,
839 struct qed_load_req_params
*p_params
)
841 struct qed_load_req_out_params out_params
;
842 struct qed_load_req_in_params in_params
;
843 u8 mfw_drv_role
, mfw_force_cmd
;
846 memset(&in_params
, 0, sizeof(in_params
));
847 in_params
.hsi_ver
= QED_LOAD_REQ_HSI_VER_DEFAULT
;
848 in_params
.drv_ver_0
= QED_VERSION
;
849 in_params
.drv_ver_1
= qed_get_config_bitmap();
850 in_params
.fw_ver
= STORM_FW_VERSION
;
851 rc
= eocre_get_mfw_drv_role(p_hwfn
, p_params
->drv_role
, &mfw_drv_role
);
855 in_params
.drv_role
= mfw_drv_role
;
856 in_params
.timeout_val
= p_params
->timeout_val
;
857 qed_get_mfw_force_cmd(p_hwfn
,
858 QED_LOAD_REQ_FORCE_NONE
, &mfw_force_cmd
);
860 in_params
.force_cmd
= mfw_force_cmd
;
861 in_params
.avoid_eng_reset
= p_params
->avoid_eng_reset
;
863 memset(&out_params
, 0, sizeof(out_params
));
864 rc
= __qed_mcp_load_req(p_hwfn
, p_ptt
, &in_params
, &out_params
);
868 /* First handle cases where another load request should/might be sent:
869 * - MFW expects the old interface [HSI version = 1]
870 * - MFW responds that a force load request is required
872 if (out_params
.load_code
== FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1
) {
874 "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
876 in_params
.hsi_ver
= QED_LOAD_REQ_HSI_VER_1
;
877 memset(&out_params
, 0, sizeof(out_params
));
878 rc
= __qed_mcp_load_req(p_hwfn
, p_ptt
, &in_params
, &out_params
);
881 } else if (out_params
.load_code
==
882 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE
) {
883 if (qed_mcp_can_force_load(in_params
.drv_role
,
884 out_params
.exist_drv_role
,
885 p_params
->override_force_load
)) {
887 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
888 in_params
.drv_role
, in_params
.fw_ver
,
889 in_params
.drv_ver_0
, in_params
.drv_ver_1
,
890 out_params
.exist_drv_role
,
891 out_params
.exist_fw_ver
,
892 out_params
.exist_drv_ver_0
,
893 out_params
.exist_drv_ver_1
);
895 qed_get_mfw_force_cmd(p_hwfn
,
896 QED_LOAD_REQ_FORCE_ALL
,
899 in_params
.force_cmd
= mfw_force_cmd
;
900 memset(&out_params
, 0, sizeof(out_params
));
901 rc
= __qed_mcp_load_req(p_hwfn
, p_ptt
, &in_params
,
907 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
908 in_params
.drv_role
, in_params
.fw_ver
,
909 in_params
.drv_ver_0
, in_params
.drv_ver_1
,
910 out_params
.exist_drv_role
,
911 out_params
.exist_fw_ver
,
912 out_params
.exist_drv_ver_0
,
913 out_params
.exist_drv_ver_1
);
915 "Avoid sending a force load request to prevent disruption of active PFs\n");
917 qed_mcp_cancel_load_req(p_hwfn
, p_ptt
);
922 /* Now handle the other types of responses.
923 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
924 * expected here after the additional revised load requests were sent.
926 switch (out_params
.load_code
) {
927 case FW_MSG_CODE_DRV_LOAD_ENGINE
:
928 case FW_MSG_CODE_DRV_LOAD_PORT
:
929 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
930 if (out_params
.mfw_hsi_ver
!= QED_LOAD_REQ_HSI_VER_1
&&
931 out_params
.drv_exists
) {
932 /* The role and fw/driver version match, but the PF is
933 * already loaded and has not been unloaded gracefully.
936 "PF is already loaded\n");
942 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
943 out_params
.load_code
);
947 p_params
->load_code
= out_params
.load_code
;
952 int qed_mcp_unload_req(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
954 u32 wol_param
, mcp_resp
, mcp_param
;
956 switch (p_hwfn
->cdev
->wol_config
) {
957 case QED_OV_WOL_DISABLED
:
958 wol_param
= DRV_MB_PARAM_UNLOAD_WOL_DISABLED
;
960 case QED_OV_WOL_ENABLED
:
961 wol_param
= DRV_MB_PARAM_UNLOAD_WOL_ENABLED
;
965 "Unknown WoL configuration %02x\n",
966 p_hwfn
->cdev
->wol_config
);
968 case QED_OV_WOL_DEFAULT
:
969 wol_param
= DRV_MB_PARAM_UNLOAD_WOL_MCP
;
972 return qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_UNLOAD_REQ
, wol_param
,
973 &mcp_resp
, &mcp_param
);
976 int qed_mcp_unload_done(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
978 struct qed_mcp_mb_params mb_params
;
979 struct mcp_mac wol_mac
;
981 memset(&mb_params
, 0, sizeof(mb_params
));
982 mb_params
.cmd
= DRV_MSG_CODE_UNLOAD_DONE
;
984 /* Set the primary MAC if WoL is enabled */
985 if (p_hwfn
->cdev
->wol_config
== QED_OV_WOL_ENABLED
) {
986 u8
*p_mac
= p_hwfn
->cdev
->wol_mac
;
988 memset(&wol_mac
, 0, sizeof(wol_mac
));
989 wol_mac
.mac_upper
= p_mac
[0] << 8 | p_mac
[1];
990 wol_mac
.mac_lower
= p_mac
[2] << 24 | p_mac
[3] << 16 |
991 p_mac
[4] << 8 | p_mac
[5];
994 (QED_MSG_SP
| NETIF_MSG_IFDOWN
),
995 "Setting WoL MAC: %pM --> [%08x,%08x]\n",
996 p_mac
, wol_mac
.mac_upper
, wol_mac
.mac_lower
);
998 mb_params
.p_data_src
= &wol_mac
;
999 mb_params
.data_src_size
= sizeof(wol_mac
);
1002 return qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1005 static void qed_mcp_handle_vf_flr(struct qed_hwfn
*p_hwfn
,
1006 struct qed_ptt
*p_ptt
)
1008 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
1010 u32 mfw_path_offsize
= qed_rd(p_hwfn
, p_ptt
, addr
);
1011 u32 path_addr
= SECTION_ADDR(mfw_path_offsize
,
1012 QED_PATH_ID(p_hwfn
));
1013 u32 disabled_vfs
[VF_MAX_STATIC
/ 32];
1018 "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1019 mfw_path_offsize
, path_addr
);
1021 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++) {
1022 disabled_vfs
[i
] = qed_rd(p_hwfn
, p_ptt
,
1024 offsetof(struct public_path
,
1027 DP_VERBOSE(p_hwfn
, (QED_MSG_SP
| QED_MSG_IOV
),
1028 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1029 i
* 32, (i
+ 1) * 32 - 1, disabled_vfs
[i
]);
1032 if (qed_iov_mark_vf_flr(p_hwfn
, disabled_vfs
))
1033 qed_schedule_iov(p_hwfn
, QED_IOV_WQ_FLR_FLAG
);
1036 int qed_mcp_ack_vf_flr(struct qed_hwfn
*p_hwfn
,
1037 struct qed_ptt
*p_ptt
, u32
*vfs_to_ack
)
1039 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
1041 u32 mfw_func_offsize
= qed_rd(p_hwfn
, p_ptt
, addr
);
1042 u32 func_addr
= SECTION_ADDR(mfw_func_offsize
,
1044 struct qed_mcp_mb_params mb_params
;
1048 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++)
1049 DP_VERBOSE(p_hwfn
, (QED_MSG_SP
| QED_MSG_IOV
),
1050 "Acking VFs [%08x,...,%08x] - %08x\n",
1051 i
* 32, (i
+ 1) * 32 - 1, vfs_to_ack
[i
]);
1053 memset(&mb_params
, 0, sizeof(mb_params
));
1054 mb_params
.cmd
= DRV_MSG_CODE_VF_DISABLED_DONE
;
1055 mb_params
.p_data_src
= vfs_to_ack
;
1056 mb_params
.data_src_size
= VF_MAX_STATIC
/ 8;
1057 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1059 DP_NOTICE(p_hwfn
, "Failed to pass ACK for VF flr to MFW\n");
1063 /* Clear the ACK bits */
1064 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++)
1065 qed_wr(p_hwfn
, p_ptt
,
1067 offsetof(struct public_func
, drv_ack_vf_disabled
) +
1068 i
* sizeof(u32
), 0);
1073 static void qed_mcp_handle_transceiver_change(struct qed_hwfn
*p_hwfn
,
1074 struct qed_ptt
*p_ptt
)
1076 u32 transceiver_state
;
1078 transceiver_state
= qed_rd(p_hwfn
, p_ptt
,
1079 p_hwfn
->mcp_info
->port_addr
+
1080 offsetof(struct public_port
,
1084 (NETIF_MSG_HW
| QED_MSG_SP
),
1085 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1087 (u32
)(p_hwfn
->mcp_info
->port_addr
+
1088 offsetof(struct public_port
, transceiver_data
)));
1090 transceiver_state
= GET_FIELD(transceiver_state
,
1091 ETH_TRANSCEIVER_STATE
);
1093 if (transceiver_state
== ETH_TRANSCEIVER_STATE_PRESENT
)
1094 DP_NOTICE(p_hwfn
, "Transceiver is present.\n");
1096 DP_NOTICE(p_hwfn
, "Transceiver is unplugged.\n");
1099 static void qed_mcp_handle_link_change(struct qed_hwfn
*p_hwfn
,
1100 struct qed_ptt
*p_ptt
, bool b_reset
)
1102 struct qed_mcp_link_state
*p_link
;
1106 /* Prevent SW/attentions from doing this at the same time */
1107 spin_lock_bh(&p_hwfn
->mcp_info
->link_lock
);
1109 p_link
= &p_hwfn
->mcp_info
->link_output
;
1110 memset(p_link
, 0, sizeof(*p_link
));
1112 status
= qed_rd(p_hwfn
, p_ptt
,
1113 p_hwfn
->mcp_info
->port_addr
+
1114 offsetof(struct public_port
, link_status
));
1115 DP_VERBOSE(p_hwfn
, (NETIF_MSG_LINK
| QED_MSG_SP
),
1116 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1118 (u32
)(p_hwfn
->mcp_info
->port_addr
+
1119 offsetof(struct public_port
, link_status
)));
1121 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1122 "Resetting link indications\n");
1126 if (p_hwfn
->b_drv_link_init
)
1127 p_link
->link_up
= !!(status
& LINK_STATUS_LINK_UP
);
1129 p_link
->link_up
= false;
1131 p_link
->full_duplex
= true;
1132 switch ((status
& LINK_STATUS_SPEED_AND_DUPLEX_MASK
)) {
1133 case LINK_STATUS_SPEED_AND_DUPLEX_100G
:
1134 p_link
->speed
= 100000;
1136 case LINK_STATUS_SPEED_AND_DUPLEX_50G
:
1137 p_link
->speed
= 50000;
1139 case LINK_STATUS_SPEED_AND_DUPLEX_40G
:
1140 p_link
->speed
= 40000;
1142 case LINK_STATUS_SPEED_AND_DUPLEX_25G
:
1143 p_link
->speed
= 25000;
1145 case LINK_STATUS_SPEED_AND_DUPLEX_20G
:
1146 p_link
->speed
= 20000;
1148 case LINK_STATUS_SPEED_AND_DUPLEX_10G
:
1149 p_link
->speed
= 10000;
1151 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD
:
1152 p_link
->full_duplex
= false;
1154 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
:
1155 p_link
->speed
= 1000;
1161 if (p_link
->link_up
&& p_link
->speed
)
1162 p_link
->line_speed
= p_link
->speed
;
1164 p_link
->line_speed
= 0;
1166 max_bw
= p_hwfn
->mcp_info
->func_info
.bandwidth_max
;
1167 min_bw
= p_hwfn
->mcp_info
->func_info
.bandwidth_min
;
1169 /* Max bandwidth configuration */
1170 __qed_configure_pf_max_bandwidth(p_hwfn
, p_ptt
, p_link
, max_bw
);
1172 /* Min bandwidth configuration */
1173 __qed_configure_pf_min_bandwidth(p_hwfn
, p_ptt
, p_link
, min_bw
);
1174 qed_configure_vp_wfq_on_link_change(p_hwfn
->cdev
, p_ptt
,
1175 p_link
->min_pf_rate
);
1177 p_link
->an
= !!(status
& LINK_STATUS_AUTO_NEGOTIATE_ENABLED
);
1178 p_link
->an_complete
= !!(status
&
1179 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE
);
1180 p_link
->parallel_detection
= !!(status
&
1181 LINK_STATUS_PARALLEL_DETECTION_USED
);
1182 p_link
->pfc_enabled
= !!(status
& LINK_STATUS_PFC_ENABLED
);
1184 p_link
->partner_adv_speed
|=
1185 (status
& LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE
) ?
1186 QED_LINK_PARTNER_SPEED_1G_FD
: 0;
1187 p_link
->partner_adv_speed
|=
1188 (status
& LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE
) ?
1189 QED_LINK_PARTNER_SPEED_1G_HD
: 0;
1190 p_link
->partner_adv_speed
|=
1191 (status
& LINK_STATUS_LINK_PARTNER_10G_CAPABLE
) ?
1192 QED_LINK_PARTNER_SPEED_10G
: 0;
1193 p_link
->partner_adv_speed
|=
1194 (status
& LINK_STATUS_LINK_PARTNER_20G_CAPABLE
) ?
1195 QED_LINK_PARTNER_SPEED_20G
: 0;
1196 p_link
->partner_adv_speed
|=
1197 (status
& LINK_STATUS_LINK_PARTNER_25G_CAPABLE
) ?
1198 QED_LINK_PARTNER_SPEED_25G
: 0;
1199 p_link
->partner_adv_speed
|=
1200 (status
& LINK_STATUS_LINK_PARTNER_40G_CAPABLE
) ?
1201 QED_LINK_PARTNER_SPEED_40G
: 0;
1202 p_link
->partner_adv_speed
|=
1203 (status
& LINK_STATUS_LINK_PARTNER_50G_CAPABLE
) ?
1204 QED_LINK_PARTNER_SPEED_50G
: 0;
1205 p_link
->partner_adv_speed
|=
1206 (status
& LINK_STATUS_LINK_PARTNER_100G_CAPABLE
) ?
1207 QED_LINK_PARTNER_SPEED_100G
: 0;
1209 p_link
->partner_tx_flow_ctrl_en
=
1210 !!(status
& LINK_STATUS_TX_FLOW_CONTROL_ENABLED
);
1211 p_link
->partner_rx_flow_ctrl_en
=
1212 !!(status
& LINK_STATUS_RX_FLOW_CONTROL_ENABLED
);
1214 switch (status
& LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK
) {
1215 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE
:
1216 p_link
->partner_adv_pause
= QED_LINK_PARTNER_SYMMETRIC_PAUSE
;
1218 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE
:
1219 p_link
->partner_adv_pause
= QED_LINK_PARTNER_ASYMMETRIC_PAUSE
;
1221 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE
:
1222 p_link
->partner_adv_pause
= QED_LINK_PARTNER_BOTH_PAUSE
;
1225 p_link
->partner_adv_pause
= 0;
1228 p_link
->sfp_tx_fault
= !!(status
& LINK_STATUS_SFP_TX_FAULT
);
1230 qed_link_update(p_hwfn
);
1232 spin_unlock_bh(&p_hwfn
->mcp_info
->link_lock
);
1235 int qed_mcp_set_link(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
, bool b_up
)
1237 struct qed_mcp_link_params
*params
= &p_hwfn
->mcp_info
->link_input
;
1238 struct qed_mcp_mb_params mb_params
;
1239 struct eth_phy_cfg phy_cfg
;
1243 /* Set the shmem configuration according to params */
1244 memset(&phy_cfg
, 0, sizeof(phy_cfg
));
1245 cmd
= b_up
? DRV_MSG_CODE_INIT_PHY
: DRV_MSG_CODE_LINK_RESET
;
1246 if (!params
->speed
.autoneg
)
1247 phy_cfg
.speed
= params
->speed
.forced_speed
;
1248 phy_cfg
.pause
|= (params
->pause
.autoneg
) ? ETH_PAUSE_AUTONEG
: 0;
1249 phy_cfg
.pause
|= (params
->pause
.forced_rx
) ? ETH_PAUSE_RX
: 0;
1250 phy_cfg
.pause
|= (params
->pause
.forced_tx
) ? ETH_PAUSE_TX
: 0;
1251 phy_cfg
.adv_speed
= params
->speed
.advertised_speeds
;
1252 phy_cfg
.loopback_mode
= params
->loopback_mode
;
1254 p_hwfn
->b_drv_link_init
= b_up
;
1257 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1258 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
1262 phy_cfg
.loopback_mode
,
1263 phy_cfg
.feature_config_flags
);
1265 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1266 "Resetting link\n");
1269 memset(&mb_params
, 0, sizeof(mb_params
));
1270 mb_params
.cmd
= cmd
;
1271 mb_params
.p_data_src
= &phy_cfg
;
1272 mb_params
.data_src_size
= sizeof(phy_cfg
);
1273 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1275 /* if mcp fails to respond we must abort */
1277 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
1281 /* Mimic link-change attention, done for several reasons:
1282 * - On reset, there's no guarantee MFW would trigger
1284 * - On initialization, older MFWs might not indicate link change
1285 * during LFA, so we'll never get an UP indication.
1287 qed_mcp_handle_link_change(p_hwfn
, p_ptt
, !b_up
);
1292 static void qed_mcp_send_protocol_stats(struct qed_hwfn
*p_hwfn
,
1293 struct qed_ptt
*p_ptt
,
1294 enum MFW_DRV_MSG_TYPE type
)
1296 enum qed_mcp_protocol_type stats_type
;
1297 union qed_mcp_protocol_stats stats
;
1298 struct qed_mcp_mb_params mb_params
;
1302 case MFW_DRV_MSG_GET_LAN_STATS
:
1303 stats_type
= QED_MCP_LAN_STATS
;
1304 hsi_param
= DRV_MSG_CODE_STATS_TYPE_LAN
;
1306 case MFW_DRV_MSG_GET_FCOE_STATS
:
1307 stats_type
= QED_MCP_FCOE_STATS
;
1308 hsi_param
= DRV_MSG_CODE_STATS_TYPE_FCOE
;
1310 case MFW_DRV_MSG_GET_ISCSI_STATS
:
1311 stats_type
= QED_MCP_ISCSI_STATS
;
1312 hsi_param
= DRV_MSG_CODE_STATS_TYPE_ISCSI
;
1314 case MFW_DRV_MSG_GET_RDMA_STATS
:
1315 stats_type
= QED_MCP_RDMA_STATS
;
1316 hsi_param
= DRV_MSG_CODE_STATS_TYPE_RDMA
;
1319 DP_NOTICE(p_hwfn
, "Invalid protocol type %d\n", type
);
1323 qed_get_protocol_stats(p_hwfn
->cdev
, stats_type
, &stats
);
1325 memset(&mb_params
, 0, sizeof(mb_params
));
1326 mb_params
.cmd
= DRV_MSG_CODE_GET_STATS
;
1327 mb_params
.param
= hsi_param
;
1328 mb_params
.p_data_src
= &stats
;
1329 mb_params
.data_src_size
= sizeof(stats
);
1330 qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1333 static void qed_read_pf_bandwidth(struct qed_hwfn
*p_hwfn
,
1334 struct public_func
*p_shmem_info
)
1336 struct qed_mcp_function_info
*p_info
;
1338 p_info
= &p_hwfn
->mcp_info
->func_info
;
1340 p_info
->bandwidth_min
= (p_shmem_info
->config
&
1341 FUNC_MF_CFG_MIN_BW_MASK
) >>
1342 FUNC_MF_CFG_MIN_BW_SHIFT
;
1343 if (p_info
->bandwidth_min
< 1 || p_info
->bandwidth_min
> 100) {
1345 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1346 p_info
->bandwidth_min
);
1347 p_info
->bandwidth_min
= 1;
1350 p_info
->bandwidth_max
= (p_shmem_info
->config
&
1351 FUNC_MF_CFG_MAX_BW_MASK
) >>
1352 FUNC_MF_CFG_MAX_BW_SHIFT
;
1353 if (p_info
->bandwidth_max
< 1 || p_info
->bandwidth_max
> 100) {
1355 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1356 p_info
->bandwidth_max
);
1357 p_info
->bandwidth_max
= 100;
1361 static u32
qed_mcp_get_shmem_func(struct qed_hwfn
*p_hwfn
,
1362 struct qed_ptt
*p_ptt
,
1363 struct public_func
*p_data
, int pfid
)
1365 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
1367 u32 mfw_path_offsize
= qed_rd(p_hwfn
, p_ptt
, addr
);
1368 u32 func_addr
= SECTION_ADDR(mfw_path_offsize
, pfid
);
1371 memset(p_data
, 0, sizeof(*p_data
));
1373 size
= min_t(u32
, sizeof(*p_data
), QED_SECTION_SIZE(mfw_path_offsize
));
1374 for (i
= 0; i
< size
/ sizeof(u32
); i
++)
1375 ((u32
*)p_data
)[i
] = qed_rd(p_hwfn
, p_ptt
,
1376 func_addr
+ (i
<< 2));
1380 static void qed_mcp_update_bw(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1382 struct qed_mcp_function_info
*p_info
;
1383 struct public_func shmem_info
;
1384 u32 resp
= 0, param
= 0;
1386 qed_mcp_get_shmem_func(p_hwfn
, p_ptt
, &shmem_info
, MCP_PF_ID(p_hwfn
));
1388 qed_read_pf_bandwidth(p_hwfn
, &shmem_info
);
1390 p_info
= &p_hwfn
->mcp_info
->func_info
;
1392 qed_configure_pf_min_bandwidth(p_hwfn
->cdev
, p_info
->bandwidth_min
);
1393 qed_configure_pf_max_bandwidth(p_hwfn
->cdev
, p_info
->bandwidth_max
);
1395 /* Acknowledge the MFW */
1396 qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_BW_UPDATE_ACK
, 0, &resp
,
1400 int qed_mcp_handle_events(struct qed_hwfn
*p_hwfn
,
1401 struct qed_ptt
*p_ptt
)
1403 struct qed_mcp_info
*info
= p_hwfn
->mcp_info
;
1408 DP_VERBOSE(p_hwfn
, QED_MSG_SP
, "Received message from MFW\n");
1410 /* Read Messages from MFW */
1411 qed_mcp_read_mb(p_hwfn
, p_ptt
);
1413 /* Compare current messages to old ones */
1414 for (i
= 0; i
< info
->mfw_mb_length
; i
++) {
1415 if (info
->mfw_mb_cur
[i
] == info
->mfw_mb_shadow
[i
])
1420 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1421 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1422 i
, info
->mfw_mb_shadow
[i
], info
->mfw_mb_cur
[i
]);
1425 case MFW_DRV_MSG_LINK_CHANGE
:
1426 qed_mcp_handle_link_change(p_hwfn
, p_ptt
, false);
1428 case MFW_DRV_MSG_VF_DISABLED
:
1429 qed_mcp_handle_vf_flr(p_hwfn
, p_ptt
);
1431 case MFW_DRV_MSG_LLDP_DATA_UPDATED
:
1432 qed_dcbx_mib_update_event(p_hwfn
, p_ptt
,
1433 QED_DCBX_REMOTE_LLDP_MIB
);
1435 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED
:
1436 qed_dcbx_mib_update_event(p_hwfn
, p_ptt
,
1437 QED_DCBX_REMOTE_MIB
);
1439 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED
:
1440 qed_dcbx_mib_update_event(p_hwfn
, p_ptt
,
1441 QED_DCBX_OPERATIONAL_MIB
);
1443 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE
:
1444 qed_mcp_handle_transceiver_change(p_hwfn
, p_ptt
);
1446 case MFW_DRV_MSG_GET_LAN_STATS
:
1447 case MFW_DRV_MSG_GET_FCOE_STATS
:
1448 case MFW_DRV_MSG_GET_ISCSI_STATS
:
1449 case MFW_DRV_MSG_GET_RDMA_STATS
:
1450 qed_mcp_send_protocol_stats(p_hwfn
, p_ptt
, i
);
1452 case MFW_DRV_MSG_BW_UPDATE
:
1453 qed_mcp_update_bw(p_hwfn
, p_ptt
);
1456 DP_INFO(p_hwfn
, "Unimplemented MFW message %d\n", i
);
1461 /* ACK everything */
1462 for (i
= 0; i
< MFW_DRV_MSG_MAX_DWORDS(info
->mfw_mb_length
); i
++) {
1463 __be32 val
= cpu_to_be32(((u32
*)info
->mfw_mb_cur
)[i
]);
1465 /* MFW expect answer in BE, so we force write in that format */
1466 qed_wr(p_hwfn
, p_ptt
,
1467 info
->mfw_mb_addr
+ sizeof(u32
) +
1468 MFW_DRV_MSG_MAX_DWORDS(info
->mfw_mb_length
) *
1469 sizeof(u32
) + i
* sizeof(u32
),
1475 "Received an MFW message indication but no new message!\n");
1479 /* Copy the new mfw messages into the shadow */
1480 memcpy(info
->mfw_mb_shadow
, info
->mfw_mb_cur
, info
->mfw_mb_length
);
1485 int qed_mcp_get_mfw_ver(struct qed_hwfn
*p_hwfn
,
1486 struct qed_ptt
*p_ptt
,
1487 u32
*p_mfw_ver
, u32
*p_running_bundle_id
)
1491 if (IS_VF(p_hwfn
->cdev
)) {
1492 if (p_hwfn
->vf_iov_info
) {
1493 struct pfvf_acquire_resp_tlv
*p_resp
;
1495 p_resp
= &p_hwfn
->vf_iov_info
->acquire_resp
;
1496 *p_mfw_ver
= p_resp
->pfdev_info
.mfw_ver
;
1501 "VF requested MFW version prior to ACQUIRE\n");
1506 global_offsize
= qed_rd(p_hwfn
, p_ptt
,
1507 SECTION_OFFSIZE_ADDR(p_hwfn
->
1508 mcp_info
->public_base
,
1511 qed_rd(p_hwfn
, p_ptt
,
1512 SECTION_ADDR(global_offsize
,
1513 0) + offsetof(struct public_global
, mfw_ver
));
1515 if (p_running_bundle_id
!= NULL
) {
1516 *p_running_bundle_id
= qed_rd(p_hwfn
, p_ptt
,
1517 SECTION_ADDR(global_offsize
, 0) +
1518 offsetof(struct public_global
,
1519 running_bundle_id
));
1525 int qed_mcp_get_media_type(struct qed_dev
*cdev
, u32
*p_media_type
)
1527 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[0];
1528 struct qed_ptt
*p_ptt
;
1533 if (!qed_mcp_is_init(p_hwfn
)) {
1534 DP_NOTICE(p_hwfn
, "MFW is not initialized!\n");
1538 *p_media_type
= MEDIA_UNSPECIFIED
;
1540 p_ptt
= qed_ptt_acquire(p_hwfn
);
1544 *p_media_type
= qed_rd(p_hwfn
, p_ptt
, p_hwfn
->mcp_info
->port_addr
+
1545 offsetof(struct public_port
, media_type
));
1547 qed_ptt_release(p_hwfn
, p_ptt
);
1552 /* Old MFW has a global configuration for all PFs regarding RDMA support */
1554 qed_mcp_get_shmem_proto_legacy(struct qed_hwfn
*p_hwfn
,
1555 enum qed_pci_personality
*p_proto
)
1557 /* There wasn't ever a legacy MFW that published iwarp.
1558 * So at this point, this is either plain l2 or RoCE.
1560 if (test_bit(QED_DEV_CAP_ROCE
, &p_hwfn
->hw_info
.device_capabilities
))
1561 *p_proto
= QED_PCI_ETH_ROCE
;
1563 *p_proto
= QED_PCI_ETH
;
1565 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFUP
,
1566 "According to Legacy capabilities, L2 personality is %08x\n",
1571 qed_mcp_get_shmem_proto_mfw(struct qed_hwfn
*p_hwfn
,
1572 struct qed_ptt
*p_ptt
,
1573 enum qed_pci_personality
*p_proto
)
1575 u32 resp
= 0, param
= 0;
1578 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
,
1579 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL
, 0, &resp
, ¶m
);
1582 if (resp
!= FW_MSG_CODE_OK
) {
1583 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFUP
,
1584 "MFW lacks support for command; Returns %08x\n",
1590 case FW_MB_PARAM_GET_PF_RDMA_NONE
:
1591 *p_proto
= QED_PCI_ETH
;
1593 case FW_MB_PARAM_GET_PF_RDMA_ROCE
:
1594 *p_proto
= QED_PCI_ETH_ROCE
;
1596 case FW_MB_PARAM_GET_PF_RDMA_BOTH
:
1598 "Current day drivers don't support RoCE & iWARP. Default to RoCE-only\n");
1599 *p_proto
= QED_PCI_ETH_ROCE
;
1601 case FW_MB_PARAM_GET_PF_RDMA_IWARP
:
1604 "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
1611 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
1612 (u32
) *p_proto
, resp
, param
);
1617 qed_mcp_get_shmem_proto(struct qed_hwfn
*p_hwfn
,
1618 struct public_func
*p_info
,
1619 struct qed_ptt
*p_ptt
,
1620 enum qed_pci_personality
*p_proto
)
1624 switch (p_info
->config
& FUNC_MF_CFG_PROTOCOL_MASK
) {
1625 case FUNC_MF_CFG_PROTOCOL_ETHERNET
:
1626 if (!IS_ENABLED(CONFIG_QED_RDMA
))
1627 *p_proto
= QED_PCI_ETH
;
1628 else if (qed_mcp_get_shmem_proto_mfw(p_hwfn
, p_ptt
, p_proto
))
1629 qed_mcp_get_shmem_proto_legacy(p_hwfn
, p_proto
);
1631 case FUNC_MF_CFG_PROTOCOL_ISCSI
:
1632 *p_proto
= QED_PCI_ISCSI
;
1634 case FUNC_MF_CFG_PROTOCOL_FCOE
:
1635 *p_proto
= QED_PCI_FCOE
;
1637 case FUNC_MF_CFG_PROTOCOL_ROCE
:
1638 DP_NOTICE(p_hwfn
, "RoCE personality is not a valid value!\n");
1647 int qed_mcp_fill_shmem_func_info(struct qed_hwfn
*p_hwfn
,
1648 struct qed_ptt
*p_ptt
)
1650 struct qed_mcp_function_info
*info
;
1651 struct public_func shmem_info
;
1653 qed_mcp_get_shmem_func(p_hwfn
, p_ptt
, &shmem_info
, MCP_PF_ID(p_hwfn
));
1654 info
= &p_hwfn
->mcp_info
->func_info
;
1656 info
->pause_on_host
= (shmem_info
.config
&
1657 FUNC_MF_CFG_PAUSE_ON_HOST_RING
) ? 1 : 0;
1659 if (qed_mcp_get_shmem_proto(p_hwfn
, &shmem_info
, p_ptt
,
1661 DP_ERR(p_hwfn
, "Unknown personality %08x\n",
1662 (u32
)(shmem_info
.config
& FUNC_MF_CFG_PROTOCOL_MASK
));
1666 qed_read_pf_bandwidth(p_hwfn
, &shmem_info
);
1668 if (shmem_info
.mac_upper
|| shmem_info
.mac_lower
) {
1669 info
->mac
[0] = (u8
)(shmem_info
.mac_upper
>> 8);
1670 info
->mac
[1] = (u8
)(shmem_info
.mac_upper
);
1671 info
->mac
[2] = (u8
)(shmem_info
.mac_lower
>> 24);
1672 info
->mac
[3] = (u8
)(shmem_info
.mac_lower
>> 16);
1673 info
->mac
[4] = (u8
)(shmem_info
.mac_lower
>> 8);
1674 info
->mac
[5] = (u8
)(shmem_info
.mac_lower
);
1676 /* Store primary MAC for later possible WoL */
1677 memcpy(&p_hwfn
->cdev
->wol_mac
, info
->mac
, ETH_ALEN
);
1679 DP_NOTICE(p_hwfn
, "MAC is 0 in shmem\n");
1682 info
->wwn_port
= (u64
)shmem_info
.fcoe_wwn_port_name_upper
|
1683 (((u64
)shmem_info
.fcoe_wwn_port_name_lower
) << 32);
1684 info
->wwn_node
= (u64
)shmem_info
.fcoe_wwn_node_name_upper
|
1685 (((u64
)shmem_info
.fcoe_wwn_node_name_lower
) << 32);
1687 info
->ovlan
= (u16
)(shmem_info
.ovlan_stag
& FUNC_MF_CFG_OV_STAG_MASK
);
1689 info
->mtu
= (u16
)shmem_info
.mtu_size
;
1691 p_hwfn
->hw_info
.b_wol_support
= QED_WOL_SUPPORT_NONE
;
1692 p_hwfn
->cdev
->wol_config
= (u8
)QED_OV_WOL_DEFAULT
;
1693 if (qed_mcp_is_init(p_hwfn
)) {
1694 u32 resp
= 0, param
= 0;
1697 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
,
1698 DRV_MSG_CODE_OS_WOL
, 0, &resp
, ¶m
);
1701 if (resp
== FW_MSG_CODE_OS_WOL_SUPPORTED
)
1702 p_hwfn
->hw_info
.b_wol_support
= QED_WOL_SUPPORT_PME
;
1705 DP_VERBOSE(p_hwfn
, (QED_MSG_SP
| NETIF_MSG_IFUP
),
1706 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
1707 info
->pause_on_host
, info
->protocol
,
1708 info
->bandwidth_min
, info
->bandwidth_max
,
1709 info
->mac
[0], info
->mac
[1], info
->mac
[2],
1710 info
->mac
[3], info
->mac
[4], info
->mac
[5],
1711 info
->wwn_port
, info
->wwn_node
,
1712 info
->ovlan
, (u8
)p_hwfn
->hw_info
.b_wol_support
);
1717 struct qed_mcp_link_params
1718 *qed_mcp_get_link_params(struct qed_hwfn
*p_hwfn
)
1720 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
1722 return &p_hwfn
->mcp_info
->link_input
;
1725 struct qed_mcp_link_state
1726 *qed_mcp_get_link_state(struct qed_hwfn
*p_hwfn
)
1728 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
1730 return &p_hwfn
->mcp_info
->link_output
;
1733 struct qed_mcp_link_capabilities
1734 *qed_mcp_get_link_capabilities(struct qed_hwfn
*p_hwfn
)
1736 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
1738 return &p_hwfn
->mcp_info
->link_capabilities
;
1741 int qed_mcp_drain(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1743 u32 resp
= 0, param
= 0;
1746 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
,
1747 DRV_MSG_CODE_NIG_DRAIN
, 1000, &resp
, ¶m
);
1749 /* Wait for the drain to complete before returning */
1755 int qed_mcp_get_flash_size(struct qed_hwfn
*p_hwfn
,
1756 struct qed_ptt
*p_ptt
, u32
*p_flash_size
)
1760 if (IS_VF(p_hwfn
->cdev
))
1763 flash_size
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_NVM_CFG4
);
1764 flash_size
= (flash_size
& MCP_REG_NVM_CFG4_FLASH_SIZE
) >>
1765 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT
;
1766 flash_size
= (1 << (flash_size
+ MCP_BYTES_PER_MBIT_SHIFT
));
1768 *p_flash_size
= flash_size
;
1773 int qed_mcp_config_vf_msix(struct qed_hwfn
*p_hwfn
,
1774 struct qed_ptt
*p_ptt
, u8 vf_id
, u8 num
)
1776 u32 resp
= 0, param
= 0, rc_param
= 0;
1779 /* Only Leader can configure MSIX, and need to take CMT into account */
1780 if (!IS_LEAD_HWFN(p_hwfn
))
1782 num
*= p_hwfn
->cdev
->num_hwfns
;
1784 param
|= (vf_id
<< DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT
) &
1785 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK
;
1786 param
|= (num
<< DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT
) &
1787 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK
;
1789 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_CFG_VF_MSIX
, param
,
1792 if (resp
!= FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE
) {
1793 DP_NOTICE(p_hwfn
, "VF[%d]: MFW failed to set MSI-X\n", vf_id
);
1796 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1797 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
1805 qed_mcp_send_drv_version(struct qed_hwfn
*p_hwfn
,
1806 struct qed_ptt
*p_ptt
,
1807 struct qed_mcp_drv_version
*p_ver
)
1809 struct qed_mcp_mb_params mb_params
;
1810 struct drv_version_stc drv_version
;
1815 memset(&drv_version
, 0, sizeof(drv_version
));
1816 drv_version
.version
= p_ver
->version
;
1817 for (i
= 0; i
< (MCP_DRV_VER_STR_SIZE
- 4) / sizeof(u32
); i
++) {
1818 val
= cpu_to_be32(*((u32
*)&p_ver
->name
[i
* sizeof(u32
)]));
1819 *(__be32
*)&drv_version
.name
[i
* sizeof(u32
)] = val
;
1822 memset(&mb_params
, 0, sizeof(mb_params
));
1823 mb_params
.cmd
= DRV_MSG_CODE_SET_VERSION
;
1824 mb_params
.p_data_src
= &drv_version
;
1825 mb_params
.data_src_size
= sizeof(drv_version
);
1826 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1828 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
1833 int qed_mcp_halt(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1835 u32 resp
= 0, param
= 0;
1838 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_MCP_HALT
, 0, &resp
,
1841 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
1846 int qed_mcp_resume(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1848 u32 value
, cpu_mode
;
1850 qed_wr(p_hwfn
, p_ptt
, MCP_REG_CPU_STATE
, 0xffffffff);
1852 value
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_MODE
);
1853 value
&= ~MCP_REG_CPU_MODE_SOFT_HALT
;
1854 qed_wr(p_hwfn
, p_ptt
, MCP_REG_CPU_MODE
, value
);
1855 cpu_mode
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_MODE
);
1857 return (cpu_mode
& MCP_REG_CPU_MODE_SOFT_HALT
) ? -EAGAIN
: 0;
1860 int qed_mcp_ov_update_current_config(struct qed_hwfn
*p_hwfn
,
1861 struct qed_ptt
*p_ptt
,
1862 enum qed_ov_client client
)
1864 u32 resp
= 0, param
= 0;
1869 case QED_OV_CLIENT_DRV
:
1870 drv_mb_param
= DRV_MB_PARAM_OV_CURR_CFG_OS
;
1872 case QED_OV_CLIENT_USER
:
1873 drv_mb_param
= DRV_MB_PARAM_OV_CURR_CFG_OTHER
;
1875 case QED_OV_CLIENT_VENDOR_SPEC
:
1876 drv_mb_param
= DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC
;
1879 DP_NOTICE(p_hwfn
, "Invalid client type %d\n", client
);
1883 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_OV_UPDATE_CURR_CFG
,
1884 drv_mb_param
, &resp
, ¶m
);
1886 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
1891 int qed_mcp_ov_update_driver_state(struct qed_hwfn
*p_hwfn
,
1892 struct qed_ptt
*p_ptt
,
1893 enum qed_ov_driver_state drv_state
)
1895 u32 resp
= 0, param
= 0;
1899 switch (drv_state
) {
1900 case QED_OV_DRIVER_STATE_NOT_LOADED
:
1901 drv_mb_param
= DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED
;
1903 case QED_OV_DRIVER_STATE_DISABLED
:
1904 drv_mb_param
= DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED
;
1906 case QED_OV_DRIVER_STATE_ACTIVE
:
1907 drv_mb_param
= DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE
;
1910 DP_NOTICE(p_hwfn
, "Invalid driver state %d\n", drv_state
);
1914 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE
,
1915 drv_mb_param
, &resp
, ¶m
);
1917 DP_ERR(p_hwfn
, "Failed to send driver state\n");
1922 int qed_mcp_ov_update_mtu(struct qed_hwfn
*p_hwfn
,
1923 struct qed_ptt
*p_ptt
, u16 mtu
)
1925 u32 resp
= 0, param
= 0;
1929 drv_mb_param
= (u32
)mtu
<< DRV_MB_PARAM_OV_MTU_SIZE_SHIFT
;
1930 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_OV_UPDATE_MTU
,
1931 drv_mb_param
, &resp
, ¶m
);
1933 DP_ERR(p_hwfn
, "Failed to send mtu value, rc = %d\n", rc
);
1938 int qed_mcp_ov_update_mac(struct qed_hwfn
*p_hwfn
,
1939 struct qed_ptt
*p_ptt
, u8
*mac
)
1941 struct qed_mcp_mb_params mb_params
;
1945 memset(&mb_params
, 0, sizeof(mb_params
));
1946 mb_params
.cmd
= DRV_MSG_CODE_SET_VMAC
;
1947 mb_params
.param
= DRV_MSG_CODE_VMAC_TYPE_MAC
<<
1948 DRV_MSG_CODE_VMAC_TYPE_SHIFT
;
1949 mb_params
.param
|= MCP_PF_ID(p_hwfn
);
1951 /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
1952 * in 32-bit granularity.
1953 * So the MAC has to be set in native order [and not byte order],
1954 * otherwise it would be read incorrectly by MFW after swap.
1956 mfw_mac
[0] = mac
[0] << 24 | mac
[1] << 16 | mac
[2] << 8 | mac
[3];
1957 mfw_mac
[1] = mac
[4] << 24 | mac
[5] << 16;
1959 mb_params
.p_data_src
= (u8
*)mfw_mac
;
1960 mb_params
.data_src_size
= 8;
1961 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1963 DP_ERR(p_hwfn
, "Failed to send mac address, rc = %d\n", rc
);
1965 /* Store primary MAC for later possible WoL */
1966 memcpy(p_hwfn
->cdev
->wol_mac
, mac
, ETH_ALEN
);
1971 int qed_mcp_ov_update_wol(struct qed_hwfn
*p_hwfn
,
1972 struct qed_ptt
*p_ptt
, enum qed_ov_wol wol
)
1974 u32 resp
= 0, param
= 0;
1978 if (p_hwfn
->hw_info
.b_wol_support
== QED_WOL_SUPPORT_NONE
) {
1979 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
1980 "Can't change WoL configuration when WoL isn't supported\n");
1985 case QED_OV_WOL_DEFAULT
:
1986 drv_mb_param
= DRV_MB_PARAM_WOL_DEFAULT
;
1988 case QED_OV_WOL_DISABLED
:
1989 drv_mb_param
= DRV_MB_PARAM_WOL_DISABLED
;
1991 case QED_OV_WOL_ENABLED
:
1992 drv_mb_param
= DRV_MB_PARAM_WOL_ENABLED
;
1995 DP_ERR(p_hwfn
, "Invalid wol state %d\n", wol
);
1999 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_OV_UPDATE_WOL
,
2000 drv_mb_param
, &resp
, ¶m
);
2002 DP_ERR(p_hwfn
, "Failed to send wol mode, rc = %d\n", rc
);
2004 /* Store the WoL update for a future unload */
2005 p_hwfn
->cdev
->wol_config
= (u8
)wol
;
2010 int qed_mcp_ov_update_eswitch(struct qed_hwfn
*p_hwfn
,
2011 struct qed_ptt
*p_ptt
,
2012 enum qed_ov_eswitch eswitch
)
2014 u32 resp
= 0, param
= 0;
2019 case QED_OV_ESWITCH_NONE
:
2020 drv_mb_param
= DRV_MB_PARAM_ESWITCH_MODE_NONE
;
2022 case QED_OV_ESWITCH_VEB
:
2023 drv_mb_param
= DRV_MB_PARAM_ESWITCH_MODE_VEB
;
2025 case QED_OV_ESWITCH_VEPA
:
2026 drv_mb_param
= DRV_MB_PARAM_ESWITCH_MODE_VEPA
;
2029 DP_ERR(p_hwfn
, "Invalid eswitch mode %d\n", eswitch
);
2033 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE
,
2034 drv_mb_param
, &resp
, ¶m
);
2036 DP_ERR(p_hwfn
, "Failed to send eswitch mode, rc = %d\n", rc
);
2041 int qed_mcp_set_led(struct qed_hwfn
*p_hwfn
,
2042 struct qed_ptt
*p_ptt
, enum qed_led_mode mode
)
2044 u32 resp
= 0, param
= 0, drv_mb_param
;
2048 case QED_LED_MODE_ON
:
2049 drv_mb_param
= DRV_MB_PARAM_SET_LED_MODE_ON
;
2051 case QED_LED_MODE_OFF
:
2052 drv_mb_param
= DRV_MB_PARAM_SET_LED_MODE_OFF
;
2054 case QED_LED_MODE_RESTORE
:
2055 drv_mb_param
= DRV_MB_PARAM_SET_LED_MODE_OPER
;
2058 DP_NOTICE(p_hwfn
, "Invalid LED mode %d\n", mode
);
2062 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_SET_LED_MODE
,
2063 drv_mb_param
, &resp
, ¶m
);
2068 int qed_mcp_mask_parities(struct qed_hwfn
*p_hwfn
,
2069 struct qed_ptt
*p_ptt
, u32 mask_parities
)
2071 u32 resp
= 0, param
= 0;
2074 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_MASK_PARITIES
,
2075 mask_parities
, &resp
, ¶m
);
2079 "MCP response failure for mask parities, aborting\n");
2080 } else if (resp
!= FW_MSG_CODE_OK
) {
2082 "MCP did not acknowledge mask parity request. Old MFW?\n");
2089 int qed_mcp_nvm_read(struct qed_dev
*cdev
, u32 addr
, u8
*p_buf
, u32 len
)
2091 u32 bytes_left
= len
, offset
= 0, bytes_to_copy
, read_len
= 0;
2092 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
2093 u32 resp
= 0, resp_param
= 0;
2094 struct qed_ptt
*p_ptt
;
2097 p_ptt
= qed_ptt_acquire(p_hwfn
);
2101 while (bytes_left
> 0) {
2102 bytes_to_copy
= min_t(u32
, bytes_left
, MCP_DRV_NVM_BUF_LEN
);
2104 rc
= qed_mcp_nvm_rd_cmd(p_hwfn
, p_ptt
,
2105 DRV_MSG_CODE_NVM_READ_NVRAM
,
2108 DRV_MB_PARAM_NVM_LEN_SHIFT
),
2111 (u32
*)(p_buf
+ offset
));
2113 if (rc
|| (resp
!= FW_MSG_CODE_NVM_OK
)) {
2114 DP_NOTICE(cdev
, "MCP command rc = %d\n", rc
);
2118 /* This can be a lengthy process, and it's possible scheduler
2119 * isn't preemptable. Sleep a bit to prevent CPU hogging.
2121 if (bytes_left
% 0x1000 <
2122 (bytes_left
- read_len
) % 0x1000)
2123 usleep_range(1000, 2000);
2126 bytes_left
-= read_len
;
2129 cdev
->mcp_nvm_resp
= resp
;
2130 qed_ptt_release(p_hwfn
, p_ptt
);
2135 int qed_mcp_bist_register_test(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2137 u32 drv_mb_param
= 0, rsp
, param
;
2140 drv_mb_param
= (DRV_MB_PARAM_BIST_REGISTER_TEST
<<
2141 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT
);
2143 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_BIST_TEST
,
2144 drv_mb_param
, &rsp
, ¶m
);
2149 if (((rsp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_OK
) ||
2150 (param
!= DRV_MB_PARAM_BIST_RC_PASSED
))
2156 int qed_mcp_bist_clock_test(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2158 u32 drv_mb_param
, rsp
, param
;
2161 drv_mb_param
= (DRV_MB_PARAM_BIST_CLOCK_TEST
<<
2162 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT
);
2164 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_BIST_TEST
,
2165 drv_mb_param
, &rsp
, ¶m
);
2170 if (((rsp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_OK
) ||
2171 (param
!= DRV_MB_PARAM_BIST_RC_PASSED
))
2177 int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn
*p_hwfn
,
2178 struct qed_ptt
*p_ptt
,
2181 u32 drv_mb_param
= 0, rsp
;
2184 drv_mb_param
= (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES
<<
2185 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT
);
2187 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_BIST_TEST
,
2188 drv_mb_param
, &rsp
, num_images
);
2192 if (((rsp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_OK
))
2198 int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn
*p_hwfn
,
2199 struct qed_ptt
*p_ptt
,
2200 struct bist_nvm_image_att
*p_image_att
,
2203 u32 buf_size
= 0, param
, resp
= 0, resp_param
= 0;
2206 param
= DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX
<<
2207 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT
;
2208 param
|= image_index
<< DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT
;
2210 rc
= qed_mcp_nvm_rd_cmd(p_hwfn
, p_ptt
,
2211 DRV_MSG_CODE_BIST_TEST
, param
,
2214 (u32
*)p_image_att
);
2218 if (((resp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_OK
) ||
2219 (p_image_att
->return_code
!= 1))
2225 static enum resource_id_enum
qed_mcp_get_mfw_res_id(enum qed_resources res_id
)
2227 enum resource_id_enum mfw_res_id
= RESOURCE_NUM_INVALID
;
2231 mfw_res_id
= RESOURCE_NUM_SB_E
;
2234 mfw_res_id
= RESOURCE_NUM_L2_QUEUE_E
;
2237 mfw_res_id
= RESOURCE_NUM_VPORT_E
;
2240 mfw_res_id
= RESOURCE_NUM_RSS_ENGINES_E
;
2243 mfw_res_id
= RESOURCE_NUM_PQ_E
;
2246 mfw_res_id
= RESOURCE_NUM_RL_E
;
2250 /* Each VFC resource can accommodate both a MAC and a VLAN */
2251 mfw_res_id
= RESOURCE_VFC_FILTER_E
;
2254 mfw_res_id
= RESOURCE_ILT_E
;
2257 mfw_res_id
= RESOURCE_LL2_QUEUE_E
;
2259 case QED_RDMA_CNQ_RAM
:
2261 /* CNQ/CMDQS are the same resource */
2262 mfw_res_id
= RESOURCE_CQS_E
;
2264 case QED_RDMA_STATS_QUEUE
:
2265 mfw_res_id
= RESOURCE_RDMA_STATS_QUEUE_E
;
2268 mfw_res_id
= RESOURCE_BDQ_E
;
2277 #define QED_RESC_ALLOC_VERSION_MAJOR 2
2278 #define QED_RESC_ALLOC_VERSION_MINOR 0
2279 #define QED_RESC_ALLOC_VERSION \
2280 ((QED_RESC_ALLOC_VERSION_MAJOR << \
2281 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
2282 (QED_RESC_ALLOC_VERSION_MINOR << \
2283 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
2285 struct qed_resc_alloc_in_params
{
2287 enum qed_resources res_id
;
2291 struct qed_resc_alloc_out_params
{
2302 qed_mcp_resc_allocation_msg(struct qed_hwfn
*p_hwfn
,
2303 struct qed_ptt
*p_ptt
,
2304 struct qed_resc_alloc_in_params
*p_in_params
,
2305 struct qed_resc_alloc_out_params
*p_out_params
)
2307 struct qed_mcp_mb_params mb_params
;
2308 struct resource_info mfw_resc_info
;
2311 memset(&mfw_resc_info
, 0, sizeof(mfw_resc_info
));
2313 mfw_resc_info
.res_id
= qed_mcp_get_mfw_res_id(p_in_params
->res_id
);
2314 if (mfw_resc_info
.res_id
== RESOURCE_NUM_INVALID
) {
2316 "Failed to match resource %d [%s] with the MFW resources\n",
2317 p_in_params
->res_id
,
2318 qed_hw_get_resc_name(p_in_params
->res_id
));
2322 switch (p_in_params
->cmd
) {
2323 case DRV_MSG_SET_RESOURCE_VALUE_MSG
:
2324 mfw_resc_info
.size
= p_in_params
->resc_max_val
;
2326 case DRV_MSG_GET_RESOURCE_ALLOC_MSG
:
2329 DP_ERR(p_hwfn
, "Unexpected resource alloc command [0x%08x]\n",
2334 memset(&mb_params
, 0, sizeof(mb_params
));
2335 mb_params
.cmd
= p_in_params
->cmd
;
2336 mb_params
.param
= QED_RESC_ALLOC_VERSION
;
2337 mb_params
.p_data_src
= &mfw_resc_info
;
2338 mb_params
.data_src_size
= sizeof(mfw_resc_info
);
2339 mb_params
.p_data_dst
= mb_params
.p_data_src
;
2340 mb_params
.data_dst_size
= mb_params
.data_src_size
;
2344 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
2346 p_in_params
->res_id
,
2347 qed_hw_get_resc_name(p_in_params
->res_id
),
2348 QED_MFW_GET_FIELD(mb_params
.param
,
2349 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR
),
2350 QED_MFW_GET_FIELD(mb_params
.param
,
2351 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR
),
2352 p_in_params
->resc_max_val
);
2354 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
2358 p_out_params
->mcp_resp
= mb_params
.mcp_resp
;
2359 p_out_params
->mcp_param
= mb_params
.mcp_param
;
2360 p_out_params
->resc_num
= mfw_resc_info
.size
;
2361 p_out_params
->resc_start
= mfw_resc_info
.offset
;
2362 p_out_params
->vf_resc_num
= mfw_resc_info
.vf_size
;
2363 p_out_params
->vf_resc_start
= mfw_resc_info
.vf_offset
;
2364 p_out_params
->flags
= mfw_resc_info
.flags
;
2368 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
2369 QED_MFW_GET_FIELD(p_out_params
->mcp_param
,
2370 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR
),
2371 QED_MFW_GET_FIELD(p_out_params
->mcp_param
,
2372 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR
),
2373 p_out_params
->resc_num
,
2374 p_out_params
->resc_start
,
2375 p_out_params
->vf_resc_num
,
2376 p_out_params
->vf_resc_start
, p_out_params
->flags
);
2382 qed_mcp_set_resc_max_val(struct qed_hwfn
*p_hwfn
,
2383 struct qed_ptt
*p_ptt
,
2384 enum qed_resources res_id
,
2385 u32 resc_max_val
, u32
*p_mcp_resp
)
2387 struct qed_resc_alloc_out_params out_params
;
2388 struct qed_resc_alloc_in_params in_params
;
2391 memset(&in_params
, 0, sizeof(in_params
));
2392 in_params
.cmd
= DRV_MSG_SET_RESOURCE_VALUE_MSG
;
2393 in_params
.res_id
= res_id
;
2394 in_params
.resc_max_val
= resc_max_val
;
2395 memset(&out_params
, 0, sizeof(out_params
));
2396 rc
= qed_mcp_resc_allocation_msg(p_hwfn
, p_ptt
, &in_params
,
2401 *p_mcp_resp
= out_params
.mcp_resp
;
2407 qed_mcp_get_resc_info(struct qed_hwfn
*p_hwfn
,
2408 struct qed_ptt
*p_ptt
,
2409 enum qed_resources res_id
,
2410 u32
*p_mcp_resp
, u32
*p_resc_num
, u32
*p_resc_start
)
2412 struct qed_resc_alloc_out_params out_params
;
2413 struct qed_resc_alloc_in_params in_params
;
2416 memset(&in_params
, 0, sizeof(in_params
));
2417 in_params
.cmd
= DRV_MSG_GET_RESOURCE_ALLOC_MSG
;
2418 in_params
.res_id
= res_id
;
2419 memset(&out_params
, 0, sizeof(out_params
));
2420 rc
= qed_mcp_resc_allocation_msg(p_hwfn
, p_ptt
, &in_params
,
2425 *p_mcp_resp
= out_params
.mcp_resp
;
2427 if (*p_mcp_resp
== FW_MSG_CODE_RESOURCE_ALLOC_OK
) {
2428 *p_resc_num
= out_params
.resc_num
;
2429 *p_resc_start
= out_params
.resc_start
;
2435 int qed_mcp_initiate_pf_flr(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2437 u32 mcp_resp
, mcp_param
;
2439 return qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_INITIATE_PF_FLR
, 0,
2440 &mcp_resp
, &mcp_param
);
2443 static int qed_mcp_resource_cmd(struct qed_hwfn
*p_hwfn
,
2444 struct qed_ptt
*p_ptt
,
2445 u32 param
, u32
*p_mcp_resp
, u32
*p_mcp_param
)
2449 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_RESOURCE_CMD
, param
,
2450 p_mcp_resp
, p_mcp_param
);
2454 if (*p_mcp_resp
== FW_MSG_CODE_UNSUPPORTED
) {
2456 "The resource command is unsupported by the MFW\n");
2460 if (*p_mcp_param
== RESOURCE_OPCODE_UNKNOWN_CMD
) {
2461 u8 opcode
= QED_MFW_GET_FIELD(param
, RESOURCE_CMD_REQ_OPCODE
);
2464 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
2473 __qed_mcp_resc_lock(struct qed_hwfn
*p_hwfn
,
2474 struct qed_ptt
*p_ptt
,
2475 struct qed_resc_lock_params
*p_params
)
2477 u32 param
= 0, mcp_resp
, mcp_param
;
2481 switch (p_params
->timeout
) {
2482 case QED_MCP_RESC_LOCK_TO_DEFAULT
:
2483 opcode
= RESOURCE_OPCODE_REQ
;
2484 p_params
->timeout
= 0;
2486 case QED_MCP_RESC_LOCK_TO_NONE
:
2487 opcode
= RESOURCE_OPCODE_REQ_WO_AGING
;
2488 p_params
->timeout
= 0;
2491 opcode
= RESOURCE_OPCODE_REQ_W_AGING
;
2495 QED_MFW_SET_FIELD(param
, RESOURCE_CMD_REQ_RESC
, p_params
->resource
);
2496 QED_MFW_SET_FIELD(param
, RESOURCE_CMD_REQ_OPCODE
, opcode
);
2497 QED_MFW_SET_FIELD(param
, RESOURCE_CMD_REQ_AGE
, p_params
->timeout
);
2501 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
2502 param
, p_params
->timeout
, opcode
, p_params
->resource
);
2504 /* Attempt to acquire the resource */
2505 rc
= qed_mcp_resource_cmd(p_hwfn
, p_ptt
, param
, &mcp_resp
, &mcp_param
);
2509 /* Analyze the response */
2510 p_params
->owner
= QED_MFW_GET_FIELD(mcp_param
, RESOURCE_CMD_RSP_OWNER
);
2511 opcode
= QED_MFW_GET_FIELD(mcp_param
, RESOURCE_CMD_RSP_OPCODE
);
2515 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
2516 mcp_param
, opcode
, p_params
->owner
);
2519 case RESOURCE_OPCODE_GNT
:
2520 p_params
->b_granted
= true;
2522 case RESOURCE_OPCODE_BUSY
:
2523 p_params
->b_granted
= false;
2527 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
2536 qed_mcp_resc_lock(struct qed_hwfn
*p_hwfn
,
2537 struct qed_ptt
*p_ptt
, struct qed_resc_lock_params
*p_params
)
2543 /* No need for an interval before the first iteration */
2545 if (p_params
->sleep_b4_retry
) {
2546 u16 retry_interval_in_ms
=
2547 DIV_ROUND_UP(p_params
->retry_interval
,
2550 msleep(retry_interval_in_ms
);
2552 udelay(p_params
->retry_interval
);
2556 rc
= __qed_mcp_resc_lock(p_hwfn
, p_ptt
, p_params
);
2560 if (p_params
->b_granted
)
2562 } while (retry_cnt
++ < p_params
->retry_num
);
2568 qed_mcp_resc_unlock(struct qed_hwfn
*p_hwfn
,
2569 struct qed_ptt
*p_ptt
,
2570 struct qed_resc_unlock_params
*p_params
)
2572 u32 param
= 0, mcp_resp
, mcp_param
;
2576 opcode
= p_params
->b_force
? RESOURCE_OPCODE_FORCE_RELEASE
2577 : RESOURCE_OPCODE_RELEASE
;
2578 QED_MFW_SET_FIELD(param
, RESOURCE_CMD_REQ_RESC
, p_params
->resource
);
2579 QED_MFW_SET_FIELD(param
, RESOURCE_CMD_REQ_OPCODE
, opcode
);
2581 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
2582 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
2583 param
, opcode
, p_params
->resource
);
2585 /* Attempt to release the resource */
2586 rc
= qed_mcp_resource_cmd(p_hwfn
, p_ptt
, param
, &mcp_resp
, &mcp_param
);
2590 /* Analyze the response */
2591 opcode
= QED_MFW_GET_FIELD(mcp_param
, RESOURCE_CMD_RSP_OPCODE
);
2593 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
2594 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
2598 case RESOURCE_OPCODE_RELEASED_PREVIOUS
:
2600 "Resource unlock request for an already released resource [%d]\n",
2601 p_params
->resource
);
2603 case RESOURCE_OPCODE_RELEASED
:
2604 p_params
->b_released
= true;
2606 case RESOURCE_OPCODE_WRONG_OWNER
:
2607 p_params
->b_released
= false;
2611 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",