]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/qlogic/qed/qed_mcp.c
qed: Prevent a possible deadlock during driver load and unload
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / qlogic / qed / qed_mcp.c
CommitLineData
fe56b9e6 1/* QLogic qed NIC Driver
e8f1cb50 2 * Copyright (c) 2015-2017 QLogic Corporation
fe56b9e6 3 *
e8f1cb50
MY
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
fe56b9e6
YM
31 */
32
33#include <linux/types.h>
34#include <asm/byteorder.h>
35#include <linux/delay.h>
36#include <linux/errno.h>
37#include <linux/kernel.h>
fe56b9e6 38#include <linux/slab.h>
5529bad9 39#include <linux/spinlock.h>
fe56b9e6 40#include <linux/string.h>
0fefbfba 41#include <linux/etherdevice.h>
fe56b9e6 42#include "qed.h"
39651abd 43#include "qed_dcbx.h"
fe56b9e6
YM
44#include "qed_hsi.h"
45#include "qed_hw.h"
46#include "qed_mcp.h"
47#include "qed_reg_addr.h"
1408cc1f
YM
48#include "qed_sriov.h"
49
550b4cfc 50#define QED_MCP_RESP_ITER_US 10
fe56b9e6
YM
51
52#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
53#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
54
55#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
56 qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
57 _val)
58
59#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
60 qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
61
62#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
63 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
64 offsetof(struct public_drv_mb, _field), _val)
65
66#define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
67 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
68 offsetof(struct public_drv_mb, _field))
69
70#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
71 DRV_ID_PDA_COMP_VER_SHIFT)
72
73#define MCP_BYTES_PER_MBIT_SHIFT 17
74
75bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
76{
77 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
78 return false;
79 return true;
80}
81
1a635e48 82void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
fe56b9e6
YM
83{
84 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
85 PUBLIC_PORT);
86 u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
87
88 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
89 MFW_PORT(p_hwfn));
90 DP_VERBOSE(p_hwfn, QED_MSG_SP,
91 "port_addr = 0x%x, port_id 0x%02x\n",
92 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
93}
94
1a635e48 95void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
fe56b9e6
YM
96{
97 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
98 u32 tmp, i;
99
100 if (!p_hwfn->mcp_info->public_base)
101 return;
102
103 for (i = 0; i < length; i++) {
104 tmp = qed_rd(p_hwfn, p_ptt,
105 p_hwfn->mcp_info->mfw_mb_addr +
106 (i << 2) + sizeof(u32));
107
108 /* The MB data is actually BE; Need to force it to cpu */
109 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
110 be32_to_cpu((__force __be32)tmp);
111 }
112}
113
4ed1eea8
TT
114struct qed_mcp_cmd_elem {
115 struct list_head list;
116 struct qed_mcp_mb_params *p_mb_params;
117 u16 expected_seq_num;
118 bool b_is_completed;
119};
120
121/* Must be called while cmd_lock is acquired */
122static struct qed_mcp_cmd_elem *
123qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn,
124 struct qed_mcp_mb_params *p_mb_params,
125 u16 expected_seq_num)
126{
127 struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
128
129 p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC);
130 if (!p_cmd_elem)
131 goto out;
132
133 p_cmd_elem->p_mb_params = p_mb_params;
134 p_cmd_elem->expected_seq_num = expected_seq_num;
135 list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
136out:
137 return p_cmd_elem;
138}
139
140/* Must be called while cmd_lock is acquired */
141static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn,
142 struct qed_mcp_cmd_elem *p_cmd_elem)
143{
144 list_del(&p_cmd_elem->list);
145 kfree(p_cmd_elem);
146}
147
148/* Must be called while cmd_lock is acquired */
149static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
150 u16 seq_num)
151{
152 struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
153
154 list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) {
155 if (p_cmd_elem->expected_seq_num == seq_num)
156 return p_cmd_elem;
157 }
158
159 return NULL;
160}
161
fe56b9e6
YM
162int qed_mcp_free(struct qed_hwfn *p_hwfn)
163{
164 if (p_hwfn->mcp_info) {
4ed1eea8
TT
165 struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp;
166
fe56b9e6
YM
167 kfree(p_hwfn->mcp_info->mfw_mb_cur);
168 kfree(p_hwfn->mcp_info->mfw_mb_shadow);
4ed1eea8
TT
169
170 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
171 list_for_each_entry_safe(p_cmd_elem,
172 p_tmp,
173 &p_hwfn->mcp_info->cmd_list, list) {
174 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
175 }
176 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
fe56b9e6 177 }
4ed1eea8 178
fe56b9e6 179 kfree(p_hwfn->mcp_info);
3587cb87 180 p_hwfn->mcp_info = NULL;
fe56b9e6
YM
181
182 return 0;
183}
184
7775a196
TT
185/* Maximum of 1 sec to wait for the SHMEM ready indication */
186#define QED_MCP_SHMEM_RDY_MAX_RETRIES 20
187#define QED_MCP_SHMEM_RDY_ITER_MS 50
188
1a635e48 189static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
fe56b9e6
YM
190{
191 struct qed_mcp_info *p_info = p_hwfn->mcp_info;
7775a196
TT
192 u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
193 u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
fe56b9e6
YM
194 u32 drv_mb_offsize, mfw_mb_offsize;
195 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
196
197 p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
7775a196
TT
198 if (!p_info->public_base) {
199 DP_NOTICE(p_hwfn,
200 "The address of the MCP scratch-pad is not configured\n");
201 return -EINVAL;
202 }
fe56b9e6
YM
203
204 p_info->public_base |= GRCBASE_MCP;
205
7775a196
TT
206 /* Get the MFW MB address and number of supported messages */
207 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
208 SECTION_OFFSIZE_ADDR(p_info->public_base,
209 PUBLIC_MFW_MB));
210 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
211 p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
212 p_info->mfw_mb_addr +
213 offsetof(struct public_mfw_mb,
214 sup_msgs));
215
216 /* The driver can notify that there was an MCP reset, and might read the
217 * SHMEM values before the MFW has completed initializing them.
218 * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
219 * data ready indication.
220 */
221 while (!p_info->mfw_mb_length && --cnt) {
222 msleep(msec);
223 p_info->mfw_mb_length =
224 (u16)qed_rd(p_hwfn, p_ptt,
225 p_info->mfw_mb_addr +
226 offsetof(struct public_mfw_mb, sup_msgs));
227 }
228
229 if (!cnt) {
230 DP_NOTICE(p_hwfn,
231 "Failed to get the SHMEM ready notification after %d msec\n",
232 QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
233 return -EBUSY;
234 }
235
fe56b9e6
YM
236 /* Calculate the driver and MFW mailbox address */
237 drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
238 SECTION_OFFSIZE_ADDR(p_info->public_base,
239 PUBLIC_DRV_MB));
240 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
241 DP_VERBOSE(p_hwfn, QED_MSG_SP,
242 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
243 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
244
fe56b9e6
YM
245 /* Get the current driver mailbox sequence before sending
246 * the first command
247 */
248 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
249 DRV_MSG_SEQ_NUMBER_MASK;
250
251 /* Get current FW pulse sequence */
252 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
253 DRV_PULSE_SEQ_MASK;
254
4ed1eea8 255 p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
fe56b9e6
YM
256
257 return 0;
258}
259
1a635e48 260int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
fe56b9e6
YM
261{
262 struct qed_mcp_info *p_info;
263 u32 size;
264
265 /* Allocate mcp_info structure */
60fffb3b 266 p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
fe56b9e6
YM
267 if (!p_hwfn->mcp_info)
268 goto err;
269 p_info = p_hwfn->mcp_info;
270
4ed1eea8
TT
271 /* Initialize the MFW spinlock */
272 spin_lock_init(&p_info->cmd_lock);
273 spin_lock_init(&p_info->link_lock);
274
275 INIT_LIST_HEAD(&p_info->cmd_list);
276
fe56b9e6
YM
277 if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
278 DP_NOTICE(p_hwfn, "MCP is not initialized\n");
279 /* Do not free mcp_info here, since public_base indicate that
280 * the MCP is not initialized
281 */
282 return 0;
283 }
284
285 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
60fffb3b 286 p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
83aeb933 287 p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
eb2a6b80 288 if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
fe56b9e6
YM
289 goto err;
290
fe56b9e6
YM
291 return 0;
292
293err:
fe56b9e6
YM
294 qed_mcp_free(p_hwfn);
295 return -ENOMEM;
296}
297
4ed1eea8
TT
298static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
299 struct qed_ptt *p_ptt)
5529bad9 300{
4ed1eea8 301 u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
5529bad9 302
4ed1eea8
TT
303 /* Use MCP history register to check if MCP reset occurred between init
304 * time and now.
305 */
306 if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
307 DP_VERBOSE(p_hwfn,
308 QED_MSG_SP,
309 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
310 p_hwfn->mcp_info->mcp_hist, generic_por_0);
5529bad9 311
4ed1eea8
TT
312 qed_load_mcp_offsets(p_hwfn, p_ptt);
313 qed_mcp_cmd_port_init(p_hwfn, p_ptt);
5529bad9 314 }
5529bad9
TT
315}
316
1a635e48 317int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
fe56b9e6 318{
550b4cfc 319 u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
fe56b9e6
YM
320 int rc = 0;
321
4ed1eea8
TT
322 /* Ensure that only a single thread is accessing the mailbox */
323 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
5529bad9 324
fe56b9e6 325 org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
4ed1eea8
TT
326
327 /* Set drv command along with the updated sequence */
328 qed_mcp_reread_offsets(p_hwfn, p_ptt);
329 seq = ++p_hwfn->mcp_info->drv_mb_seq;
330 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
fe56b9e6
YM
331
332 do {
333 /* Wait for MFW response */
334 udelay(delay);
335 /* Give the FW up to 500 second (50*1000*10usec) */
336 } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
337 MISCS_REG_GENERIC_POR_0)) &&
338 (cnt++ < QED_MCP_RESET_RETRIES));
339
340 if (org_mcp_reset_seq !=
341 qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
342 DP_VERBOSE(p_hwfn, QED_MSG_SP,
343 "MCP was reset after %d usec\n", cnt * delay);
344 } else {
345 DP_ERR(p_hwfn, "Failed to reset MCP\n");
346 rc = -EAGAIN;
347 }
348
4ed1eea8 349 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
5529bad9 350
fe56b9e6
YM
351 return rc;
352}
353
4ed1eea8
TT
354/* Must be called while cmd_lock is acquired */
355static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn)
fe56b9e6 356{
4ed1eea8 357 struct qed_mcp_cmd_elem *p_cmd_elem;
fe56b9e6 358
4ed1eea8
TT
359 /* There is at most one pending command at a certain time, and if it
360 * exists - it is placed at the HEAD of the list.
fe56b9e6 361 */
4ed1eea8
TT
362 if (!list_empty(&p_hwfn->mcp_info->cmd_list)) {
363 p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list,
364 struct qed_mcp_cmd_elem, list);
365 return !p_cmd_elem->b_is_completed;
fe56b9e6 366 }
fe56b9e6 367
4ed1eea8
TT
368 return false;
369}
fe56b9e6 370
4ed1eea8
TT
371/* Must be called while cmd_lock is acquired */
372static int
373qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
374{
375 struct qed_mcp_mb_params *p_mb_params;
376 struct qed_mcp_cmd_elem *p_cmd_elem;
377 u32 mcp_resp;
378 u16 seq_num;
379
380 mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
381 seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
382
383 /* Return if no new non-handled response has been received */
384 if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
385 return -EAGAIN;
386
387 p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num);
388 if (!p_cmd_elem) {
389 DP_ERR(p_hwfn,
390 "Failed to find a pending mailbox cmd that expects sequence number %d\n",
391 seq_num);
392 return -EINVAL;
393 }
394
395 p_mb_params = p_cmd_elem->p_mb_params;
396
397 /* Get the MFW response along with the sequence number */
398 p_mb_params->mcp_resp = mcp_resp;
399
400 /* Get the MFW param */
401 p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
402
403 /* Get the union data */
2f67af8c 404 if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
4ed1eea8
TT
405 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
406 offsetof(struct public_drv_mb,
407 union_data);
408 qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
2f67af8c 409 union_data_addr, p_mb_params->data_dst_size);
4ed1eea8
TT
410 }
411
412 p_cmd_elem->b_is_completed = true;
413
414 return 0;
415}
416
417/* Must be called while cmd_lock is acquired */
418static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
419 struct qed_ptt *p_ptt,
420 struct qed_mcp_mb_params *p_mb_params,
421 u16 seq_num)
422{
423 union drv_union_data union_data;
424 u32 union_data_addr;
425
426 /* Set the union data */
427 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
428 offsetof(struct public_drv_mb, union_data);
429 memset(&union_data, 0, sizeof(union_data));
2f67af8c 430 if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
4ed1eea8 431 memcpy(&union_data, p_mb_params->p_data_src,
2f67af8c 432 p_mb_params->data_src_size);
4ed1eea8
TT
433 qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
434 sizeof(union_data));
435
436 /* Set the drv param */
437 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
438
439 /* Set the drv command along with the sequence number */
440 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
fe56b9e6
YM
441
442 DP_VERBOSE(p_hwfn, QED_MSG_SP,
4ed1eea8
TT
443 "MFW mailbox: command 0x%08x param 0x%08x\n",
444 (p_mb_params->cmd | seq_num), p_mb_params->param);
445}
446
447static int
448_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
449 struct qed_ptt *p_ptt,
450 struct qed_mcp_mb_params *p_mb_params,
550b4cfc 451 u32 max_retries, u32 usecs)
4ed1eea8 452{
550b4cfc 453 u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
4ed1eea8 454 struct qed_mcp_cmd_elem *p_cmd_elem;
4ed1eea8
TT
455 u16 seq_num;
456 int rc = 0;
fe56b9e6 457
4ed1eea8 458 /* Wait until the mailbox is non-occupied */
fe56b9e6 459 do {
4ed1eea8
TT
460 /* Exit the loop if there is no pending command, or if the
461 * pending command is completed during this iteration.
462 * The spinlock stays locked until the command is sent.
463 */
464
465 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
466
467 if (!qed_mcp_has_pending_cmd(p_hwfn))
468 break;
469
470 rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
471 if (!rc)
472 break;
473 else if (rc != -EAGAIN)
474 goto err;
475
476 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
550b4cfc
TT
477
478 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
479 msleep(msecs);
480 else
481 udelay(usecs);
4ed1eea8 482 } while (++cnt < max_retries);
fe56b9e6 483
4ed1eea8
TT
484 if (cnt >= max_retries) {
485 DP_NOTICE(p_hwfn,
486 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
487 p_mb_params->cmd, p_mb_params->param);
488 return -EAGAIN;
489 }
fe56b9e6 490
4ed1eea8
TT
491 /* Send the mailbox command */
492 qed_mcp_reread_offsets(p_hwfn, p_ptt);
493 seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
494 p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
c8004600
DC
495 if (!p_cmd_elem) {
496 rc = -ENOMEM;
4ed1eea8 497 goto err;
c8004600 498 }
4ed1eea8
TT
499
500 __qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
501 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
502
503 /* Wait for the MFW response */
504 do {
505 /* Exit the loop if the command is already completed, or if the
506 * command is completed during this iteration.
507 * The spinlock stays locked until the list element is removed.
508 */
509
550b4cfc
TT
510 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
511 msleep(msecs);
512 else
513 udelay(usecs);
514
4ed1eea8
TT
515 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
516
517 if (p_cmd_elem->b_is_completed)
518 break;
519
520 rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
521 if (!rc)
522 break;
523 else if (rc != -EAGAIN)
524 goto err;
525
526 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
527 } while (++cnt < max_retries);
528
529 if (cnt >= max_retries) {
530 DP_NOTICE(p_hwfn,
531 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
532 p_mb_params->cmd, p_mb_params->param);
533
534 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
535 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
536 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
537
538 return -EAGAIN;
fe56b9e6 539 }
4ed1eea8
TT
540
541 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
542 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
543
544 DP_VERBOSE(p_hwfn,
545 QED_MSG_SP,
546 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
547 p_mb_params->mcp_resp,
548 p_mb_params->mcp_param,
550b4cfc 549 (cnt * usecs) / 1000, (cnt * usecs) % 1000);
4ed1eea8
TT
550
551 /* Clear the sequence number from the MFW response */
552 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
553
554 return 0;
555
556err:
557 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
fe56b9e6
YM
558 return rc;
559}
560
5529bad9
TT
561static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
562 struct qed_ptt *p_ptt,
563 struct qed_mcp_mb_params *p_mb_params)
fe56b9e6 564{
2f67af8c 565 size_t union_data_size = sizeof(union drv_union_data);
4ed1eea8 566 u32 max_retries = QED_DRV_MB_MAX_RETRIES;
550b4cfc 567 u32 usecs = QED_MCP_RESP_ITER_US;
fe56b9e6
YM
568
569 /* MCP not initialized */
570 if (!qed_mcp_is_init(p_hwfn)) {
525ef5c0 571 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
fe56b9e6
YM
572 return -EBUSY;
573 }
574
2f67af8c
TT
575 if (p_mb_params->data_src_size > union_data_size ||
576 p_mb_params->data_dst_size > union_data_size) {
577 DP_ERR(p_hwfn,
578 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
579 p_mb_params->data_src_size,
580 p_mb_params->data_dst_size, union_data_size);
581 return -EINVAL;
582 }
583
550b4cfc
TT
584 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
585 max_retries = DIV_ROUND_UP(max_retries, 1000);
586 usecs *= 1000;
587 }
588
4ed1eea8 589 return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
550b4cfc 590 usecs);
fe56b9e6
YM
591}
592
5529bad9
TT
593int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
594 struct qed_ptt *p_ptt,
595 u32 cmd,
596 u32 param,
597 u32 *o_mcp_resp,
598 u32 *o_mcp_param)
fe56b9e6 599{
5529bad9
TT
600 struct qed_mcp_mb_params mb_params;
601 int rc;
602
603 memset(&mb_params, 0, sizeof(mb_params));
604 mb_params.cmd = cmd;
605 mb_params.param = param;
14d39648 606
5529bad9
TT
607 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
608 if (rc)
609 return rc;
610
611 *o_mcp_resp = mb_params.mcp_resp;
612 *o_mcp_param = mb_params.mcp_param;
fe56b9e6 613
5529bad9 614 return 0;
fe56b9e6
YM
615}
616
4102426f
TT
617int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
618 struct qed_ptt *p_ptt,
619 u32 cmd,
620 u32 param,
621 u32 *o_mcp_resp,
622 u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
623{
624 struct qed_mcp_mb_params mb_params;
2f67af8c 625 u8 raw_data[MCP_DRV_NVM_BUF_LEN];
4102426f
TT
626 int rc;
627
628 memset(&mb_params, 0, sizeof(mb_params));
629 mb_params.cmd = cmd;
630 mb_params.param = param;
2f67af8c
TT
631 mb_params.p_data_dst = raw_data;
632
633 /* Use the maximal value since the actual one is part of the response */
634 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
635
4102426f
TT
636 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
637 if (rc)
638 return rc;
639
640 *o_mcp_resp = mb_params.mcp_resp;
641 *o_mcp_param = mb_params.mcp_param;
642
643 *o_txn_size = *o_mcp_param;
2f67af8c 644 memcpy(o_buf, raw_data, *o_txn_size);
4102426f
TT
645
646 return 0;
647}
648
5d24bcf1
TT
649static bool
650qed_mcp_can_force_load(u8 drv_role,
651 u8 exist_drv_role,
652 enum qed_override_force_load override_force_load)
653{
654 bool can_force_load = false;
655
656 switch (override_force_load) {
657 case QED_OVERRIDE_FORCE_LOAD_ALWAYS:
658 can_force_load = true;
659 break;
660 case QED_OVERRIDE_FORCE_LOAD_NEVER:
661 can_force_load = false;
662 break;
663 default:
664 can_force_load = (drv_role == DRV_ROLE_OS &&
665 exist_drv_role == DRV_ROLE_PREBOOT) ||
666 (drv_role == DRV_ROLE_KDUMP &&
667 exist_drv_role == DRV_ROLE_OS);
668 break;
669 }
670
671 return can_force_load;
672}
673
674static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,
675 struct qed_ptt *p_ptt)
676{
677 u32 resp = 0, param = 0;
678 int rc;
679
680 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
681 &resp, &param);
682 if (rc)
683 DP_NOTICE(p_hwfn,
684 "Failed to send cancel load request, rc = %d\n", rc);
685
686 return rc;
687}
688
689#define CONFIG_QEDE_BITMAP_IDX BIT(0)
690#define CONFIG_QED_SRIOV_BITMAP_IDX BIT(1)
691#define CONFIG_QEDR_BITMAP_IDX BIT(2)
692#define CONFIG_QEDF_BITMAP_IDX BIT(4)
693#define CONFIG_QEDI_BITMAP_IDX BIT(5)
694#define CONFIG_QED_LL2_BITMAP_IDX BIT(6)
695
696static u32 qed_get_config_bitmap(void)
697{
698 u32 config_bitmap = 0x0;
699
700 if (IS_ENABLED(CONFIG_QEDE))
701 config_bitmap |= CONFIG_QEDE_BITMAP_IDX;
702
703 if (IS_ENABLED(CONFIG_QED_SRIOV))
704 config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX;
705
706 if (IS_ENABLED(CONFIG_QED_RDMA))
707 config_bitmap |= CONFIG_QEDR_BITMAP_IDX;
708
709 if (IS_ENABLED(CONFIG_QED_FCOE))
710 config_bitmap |= CONFIG_QEDF_BITMAP_IDX;
711
712 if (IS_ENABLED(CONFIG_QED_ISCSI))
713 config_bitmap |= CONFIG_QEDI_BITMAP_IDX;
714
715 if (IS_ENABLED(CONFIG_QED_LL2))
716 config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX;
717
718 return config_bitmap;
719}
720
721struct qed_load_req_in_params {
722 u8 hsi_ver;
723#define QED_LOAD_REQ_HSI_VER_DEFAULT 0
724#define QED_LOAD_REQ_HSI_VER_1 1
725 u32 drv_ver_0;
726 u32 drv_ver_1;
727 u32 fw_ver;
728 u8 drv_role;
729 u8 timeout_val;
730 u8 force_cmd;
731 bool avoid_eng_reset;
732};
733
734struct qed_load_req_out_params {
735 u32 load_code;
736 u32 exist_drv_ver_0;
737 u32 exist_drv_ver_1;
738 u32 exist_fw_ver;
739 u8 exist_drv_role;
740 u8 mfw_hsi_ver;
741 bool drv_exists;
742};
743
744static int
745__qed_mcp_load_req(struct qed_hwfn *p_hwfn,
746 struct qed_ptt *p_ptt,
747 struct qed_load_req_in_params *p_in_params,
748 struct qed_load_req_out_params *p_out_params)
fe56b9e6 749{
5529bad9 750 struct qed_mcp_mb_params mb_params;
5d24bcf1
TT
751 struct load_req_stc load_req;
752 struct load_rsp_stc load_rsp;
753 u32 hsi_ver;
fe56b9e6
YM
754 int rc;
755
5d24bcf1
TT
756 memset(&load_req, 0, sizeof(load_req));
757 load_req.drv_ver_0 = p_in_params->drv_ver_0;
758 load_req.drv_ver_1 = p_in_params->drv_ver_1;
759 load_req.fw_ver = p_in_params->fw_ver;
760 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
761 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
762 p_in_params->timeout_val);
763 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
764 p_in_params->force_cmd);
765 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
766 p_in_params->avoid_eng_reset);
767
768 hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ?
769 DRV_ID_MCP_HSI_VER_CURRENT :
770 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
771
5529bad9 772 memset(&mb_params, 0, sizeof(mb_params));
5529bad9 773 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
5d24bcf1
TT
774 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type;
775 mb_params.p_data_src = &load_req;
776 mb_params.data_src_size = sizeof(load_req);
777 mb_params.p_data_dst = &load_rsp;
778 mb_params.data_dst_size = sizeof(load_rsp);
550b4cfc 779 mb_params.flags = QED_MB_FLAG_CAN_SLEEP;
5d24bcf1
TT
780
781 DP_VERBOSE(p_hwfn, QED_MSG_SP,
782 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
783 mb_params.param,
784 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
785 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
786 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
787 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
788
789 if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) {
790 DP_VERBOSE(p_hwfn, QED_MSG_SP,
791 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
792 load_req.drv_ver_0,
793 load_req.drv_ver_1,
794 load_req.fw_ver,
795 load_req.misc0,
796 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
797 QED_MFW_GET_FIELD(load_req.misc0,
798 LOAD_REQ_LOCK_TO),
799 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
800 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
801 }
fe56b9e6 802
5d24bcf1 803 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
fe56b9e6 804 if (rc) {
5d24bcf1 805 DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc);
fe56b9e6
YM
806 return rc;
807 }
808
5d24bcf1
TT
809 DP_VERBOSE(p_hwfn, QED_MSG_SP,
810 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
811 p_out_params->load_code = mb_params.mcp_resp;
5529bad9 812
5d24bcf1
TT
813 if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
814 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
815 DP_VERBOSE(p_hwfn,
816 QED_MSG_SP,
817 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
818 load_rsp.drv_ver_0,
819 load_rsp.drv_ver_1,
820 load_rsp.fw_ver,
821 load_rsp.misc0,
822 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
823 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
824 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
825
826 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
827 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
828 p_out_params->exist_fw_ver = load_rsp.fw_ver;
829 p_out_params->exist_drv_role =
830 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
831 p_out_params->mfw_hsi_ver =
832 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
833 p_out_params->drv_exists =
834 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
835 LOAD_RSP_FLAGS0_DRV_EXISTS;
836 }
837
838 return 0;
839}
840
841static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn,
842 enum qed_drv_role drv_role,
843 u8 *p_mfw_drv_role)
844{
845 switch (drv_role) {
846 case QED_DRV_ROLE_OS:
847 *p_mfw_drv_role = DRV_ROLE_OS;
848 break;
849 case QED_DRV_ROLE_KDUMP:
850 *p_mfw_drv_role = DRV_ROLE_KDUMP;
851 break;
852 default:
853 DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
854 return -EINVAL;
855 }
856
857 return 0;
858}
859
860enum qed_load_req_force {
861 QED_LOAD_REQ_FORCE_NONE,
862 QED_LOAD_REQ_FORCE_PF,
863 QED_LOAD_REQ_FORCE_ALL,
864};
865
866static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
867
868 enum qed_load_req_force force_cmd,
869 u8 *p_mfw_force_cmd)
870{
871 switch (force_cmd) {
872 case QED_LOAD_REQ_FORCE_NONE:
873 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
874 break;
875 case QED_LOAD_REQ_FORCE_PF:
876 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
877 break;
878 case QED_LOAD_REQ_FORCE_ALL:
879 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
880 break;
881 }
882}
883
884int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
885 struct qed_ptt *p_ptt,
886 struct qed_load_req_params *p_params)
887{
888 struct qed_load_req_out_params out_params;
889 struct qed_load_req_in_params in_params;
890 u8 mfw_drv_role, mfw_force_cmd;
891 int rc;
892
893 memset(&in_params, 0, sizeof(in_params));
894 in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
895 in_params.drv_ver_0 = QED_VERSION;
896 in_params.drv_ver_1 = qed_get_config_bitmap();
897 in_params.fw_ver = STORM_FW_VERSION;
898 rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
899 if (rc)
900 return rc;
901
902 in_params.drv_role = mfw_drv_role;
903 in_params.timeout_val = p_params->timeout_val;
904 qed_get_mfw_force_cmd(p_hwfn,
905 QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
906
907 in_params.force_cmd = mfw_force_cmd;
908 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
909
910 memset(&out_params, 0, sizeof(out_params));
911 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
912 if (rc)
913 return rc;
914
915 /* First handle cases where another load request should/might be sent:
916 * - MFW expects the old interface [HSI version = 1]
917 * - MFW responds that a force load request is required
918 */
919 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
920 DP_INFO(p_hwfn,
921 "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
922
923 in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1;
924 memset(&out_params, 0, sizeof(out_params));
925 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
926 if (rc)
927 return rc;
928 } else if (out_params.load_code ==
929 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
930 if (qed_mcp_can_force_load(in_params.drv_role,
931 out_params.exist_drv_role,
932 p_params->override_force_load)) {
933 DP_INFO(p_hwfn,
934 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
935 in_params.drv_role, in_params.fw_ver,
936 in_params.drv_ver_0, in_params.drv_ver_1,
937 out_params.exist_drv_role,
938 out_params.exist_fw_ver,
939 out_params.exist_drv_ver_0,
940 out_params.exist_drv_ver_1);
941
942 qed_get_mfw_force_cmd(p_hwfn,
943 QED_LOAD_REQ_FORCE_ALL,
944 &mfw_force_cmd);
945
946 in_params.force_cmd = mfw_force_cmd;
947 memset(&out_params, 0, sizeof(out_params));
948 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params,
949 &out_params);
950 if (rc)
951 return rc;
952 } else {
953 DP_NOTICE(p_hwfn,
954 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
955 in_params.drv_role, in_params.fw_ver,
956 in_params.drv_ver_0, in_params.drv_ver_1,
957 out_params.exist_drv_role,
958 out_params.exist_fw_ver,
959 out_params.exist_drv_ver_0,
960 out_params.exist_drv_ver_1);
961 DP_NOTICE(p_hwfn,
962 "Avoid sending a force load request to prevent disruption of active PFs\n");
963
964 qed_mcp_cancel_load_req(p_hwfn, p_ptt);
965 return -EBUSY;
966 }
967 }
968
969 /* Now handle the other types of responses.
970 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
971 * expected here after the additional revised load requests were sent.
fe56b9e6 972 */
5d24bcf1
TT
973 switch (out_params.load_code) {
974 case FW_MSG_CODE_DRV_LOAD_ENGINE:
975 case FW_MSG_CODE_DRV_LOAD_PORT:
976 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
977 if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
978 out_params.drv_exists) {
979 /* The role and fw/driver version match, but the PF is
980 * already loaded and has not been unloaded gracefully.
981 */
982 DP_NOTICE(p_hwfn,
983 "PF is already loaded\n");
984 return -EINVAL;
985 }
986 break;
987 default:
988 DP_NOTICE(p_hwfn,
989 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
990 out_params.load_code);
fe56b9e6
YM
991 return -EBUSY;
992 }
993
5d24bcf1
TT
994 p_params->load_code = out_params.load_code;
995
fe56b9e6
YM
996 return 0;
997}
998
1226337a
TT
999int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1000{
550b4cfc
TT
1001 struct qed_mcp_mb_params mb_params;
1002 u32 wol_param;
1226337a
TT
1003
1004 switch (p_hwfn->cdev->wol_config) {
1005 case QED_OV_WOL_DISABLED:
1006 wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
1007 break;
1008 case QED_OV_WOL_ENABLED:
1009 wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
1010 break;
1011 default:
1012 DP_NOTICE(p_hwfn,
1013 "Unknown WoL configuration %02x\n",
1014 p_hwfn->cdev->wol_config);
1015 /* Fallthrough */
1016 case QED_OV_WOL_DEFAULT:
1017 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1018 }
1019
550b4cfc
TT
1020 memset(&mb_params, 0, sizeof(mb_params));
1021 mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
1022 mb_params.param = wol_param;
1023 mb_params.flags = QED_MB_FLAG_CAN_SLEEP;
1024
1025 return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1226337a
TT
1026}
1027
1028int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1029{
1030 struct qed_mcp_mb_params mb_params;
1031 struct mcp_mac wol_mac;
1032
1033 memset(&mb_params, 0, sizeof(mb_params));
1034 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1035
1036 /* Set the primary MAC if WoL is enabled */
1037 if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) {
1038 u8 *p_mac = p_hwfn->cdev->wol_mac;
1039
1040 memset(&wol_mac, 0, sizeof(wol_mac));
1041 wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
1042 wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
1043 p_mac[4] << 8 | p_mac[5];
1044
1045 DP_VERBOSE(p_hwfn,
1046 (QED_MSG_SP | NETIF_MSG_IFDOWN),
1047 "Setting WoL MAC: %pM --> [%08x,%08x]\n",
1048 p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
1049
1050 mb_params.p_data_src = &wol_mac;
1051 mb_params.data_src_size = sizeof(wol_mac);
1052 }
1053
1054 return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1055}
1056
0b55e27d
YM
1057static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
1058 struct qed_ptt *p_ptt)
1059{
1060 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1061 PUBLIC_PATH);
1062 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1063 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1064 QED_PATH_ID(p_hwfn));
1065 u32 disabled_vfs[VF_MAX_STATIC / 32];
1066 int i;
1067
1068 DP_VERBOSE(p_hwfn,
1069 QED_MSG_SP,
1070 "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1071 mfw_path_offsize, path_addr);
1072
1073 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1074 disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
1075 path_addr +
1076 offsetof(struct public_path,
1077 mcp_vf_disabled) +
1078 sizeof(u32) * i);
1079 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1080 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1081 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1082 }
1083
1084 if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1085 qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
1086}
1087
1088int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
1089 struct qed_ptt *p_ptt, u32 *vfs_to_ack)
1090{
1091 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1092 PUBLIC_FUNC);
1093 u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
1094 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1095 MCP_PF_ID(p_hwfn));
1096 struct qed_mcp_mb_params mb_params;
0b55e27d
YM
1097 int rc;
1098 int i;
1099
1100 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1101 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1102 "Acking VFs [%08x,...,%08x] - %08x\n",
1103 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1104
1105 memset(&mb_params, 0, sizeof(mb_params));
1106 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
2f67af8c
TT
1107 mb_params.p_data_src = vfs_to_ack;
1108 mb_params.data_src_size = VF_MAX_STATIC / 8;
0b55e27d
YM
1109 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1110 if (rc) {
1111 DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
1112 return -EBUSY;
1113 }
1114
1115 /* Clear the ACK bits */
1116 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1117 qed_wr(p_hwfn, p_ptt,
1118 func_addr +
1119 offsetof(struct public_func, drv_ack_vf_disabled) +
1120 i * sizeof(u32), 0);
1121
1122 return rc;
1123}
1124
334c03b5
ZN
1125static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
1126 struct qed_ptt *p_ptt)
1127{
1128 u32 transceiver_state;
1129
1130 transceiver_state = qed_rd(p_hwfn, p_ptt,
1131 p_hwfn->mcp_info->port_addr +
1132 offsetof(struct public_port,
1133 transceiver_data));
1134
1135 DP_VERBOSE(p_hwfn,
1136 (NETIF_MSG_HW | QED_MSG_SP),
1137 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1138 transceiver_state,
1139 (u32)(p_hwfn->mcp_info->port_addr +
1a635e48 1140 offsetof(struct public_port, transceiver_data)));
334c03b5
ZN
1141
1142 transceiver_state = GET_FIELD(transceiver_state,
351a4ded 1143 ETH_TRANSCEIVER_STATE);
334c03b5 1144
351a4ded 1145 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
334c03b5
ZN
1146 DP_NOTICE(p_hwfn, "Transceiver is present.\n");
1147 else
1148 DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
1149}
1150
645874e5
SRK
1151static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn,
1152 struct qed_ptt *p_ptt,
1153 struct qed_mcp_link_state *p_link)
1154{
1155 u32 eee_status, val;
1156
1157 p_link->eee_adv_caps = 0;
1158 p_link->eee_lp_adv_caps = 0;
1159 eee_status = qed_rd(p_hwfn,
1160 p_ptt,
1161 p_hwfn->mcp_info->port_addr +
1162 offsetof(struct public_port, eee_status));
1163 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1164 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1165 if (val & EEE_1G_ADV)
1166 p_link->eee_adv_caps |= QED_EEE_1G_ADV;
1167 if (val & EEE_10G_ADV)
1168 p_link->eee_adv_caps |= QED_EEE_10G_ADV;
1169 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1170 if (val & EEE_1G_ADV)
1171 p_link->eee_lp_adv_caps |= QED_EEE_1G_ADV;
1172 if (val & EEE_10G_ADV)
1173 p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV;
1174}
1175
cc875c2e 1176static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
1a635e48 1177 struct qed_ptt *p_ptt, bool b_reset)
cc875c2e
YM
1178{
1179 struct qed_mcp_link_state *p_link;
a64b02d5 1180 u8 max_bw, min_bw;
cc875c2e
YM
1181 u32 status = 0;
1182
65ed2ffd
MY
1183 /* Prevent SW/attentions from doing this at the same time */
1184 spin_lock_bh(&p_hwfn->mcp_info->link_lock);
1185
cc875c2e
YM
1186 p_link = &p_hwfn->mcp_info->link_output;
1187 memset(p_link, 0, sizeof(*p_link));
1188 if (!b_reset) {
1189 status = qed_rd(p_hwfn, p_ptt,
1190 p_hwfn->mcp_info->port_addr +
1191 offsetof(struct public_port, link_status));
1192 DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
1193 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1194 status,
1195 (u32)(p_hwfn->mcp_info->port_addr +
1a635e48 1196 offsetof(struct public_port, link_status)));
cc875c2e
YM
1197 } else {
1198 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1199 "Resetting link indications\n");
65ed2ffd 1200 goto out;
cc875c2e
YM
1201 }
1202
fc916ff2
SRK
1203 if (p_hwfn->b_drv_link_init)
1204 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1205 else
1206 p_link->link_up = false;
cc875c2e
YM
1207
1208 p_link->full_duplex = true;
1209 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1210 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1211 p_link->speed = 100000;
1212 break;
1213 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1214 p_link->speed = 50000;
1215 break;
1216 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1217 p_link->speed = 40000;
1218 break;
1219 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1220 p_link->speed = 25000;
1221 break;
1222 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1223 p_link->speed = 20000;
1224 break;
1225 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1226 p_link->speed = 10000;
1227 break;
1228 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1229 p_link->full_duplex = false;
1230 /* Fall-through */
1231 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1232 p_link->speed = 1000;
1233 break;
1234 default:
1235 p_link->speed = 0;
043ef6ed 1236 p_link->link_up = 0;
cc875c2e
YM
1237 }
1238
4b01e519
MC
1239 if (p_link->link_up && p_link->speed)
1240 p_link->line_speed = p_link->speed;
1241 else
1242 p_link->line_speed = 0;
1243
1244 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
a64b02d5 1245 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
4b01e519 1246
a64b02d5 1247 /* Max bandwidth configuration */
4b01e519 1248 __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
cc875c2e 1249
a64b02d5
MC
1250 /* Min bandwidth configuration */
1251 __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
6f437d43
MY
1252 qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt,
1253 p_link->min_pf_rate);
a64b02d5 1254
cc875c2e
YM
1255 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1256 p_link->an_complete = !!(status &
1257 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1258 p_link->parallel_detection = !!(status &
1259 LINK_STATUS_PARALLEL_DETECTION_USED);
1260 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1261
1262 p_link->partner_adv_speed |=
1263 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1264 QED_LINK_PARTNER_SPEED_1G_FD : 0;
1265 p_link->partner_adv_speed |=
1266 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1267 QED_LINK_PARTNER_SPEED_1G_HD : 0;
1268 p_link->partner_adv_speed |=
1269 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1270 QED_LINK_PARTNER_SPEED_10G : 0;
1271 p_link->partner_adv_speed |=
1272 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1273 QED_LINK_PARTNER_SPEED_20G : 0;
054c67d1
SRK
1274 p_link->partner_adv_speed |=
1275 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1276 QED_LINK_PARTNER_SPEED_25G : 0;
cc875c2e
YM
1277 p_link->partner_adv_speed |=
1278 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1279 QED_LINK_PARTNER_SPEED_40G : 0;
1280 p_link->partner_adv_speed |=
1281 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1282 QED_LINK_PARTNER_SPEED_50G : 0;
1283 p_link->partner_adv_speed |=
1284 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1285 QED_LINK_PARTNER_SPEED_100G : 0;
1286
1287 p_link->partner_tx_flow_ctrl_en =
1288 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1289 p_link->partner_rx_flow_ctrl_en =
1290 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1291
1292 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1293 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1294 p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
1295 break;
1296 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1297 p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
1298 break;
1299 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1300 p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
1301 break;
1302 default:
1303 p_link->partner_adv_pause = 0;
1304 }
1305
1306 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1307
645874e5
SRK
1308 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1309 qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1310
cc875c2e 1311 qed_link_update(p_hwfn);
65ed2ffd
MY
1312out:
1313 spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
cc875c2e
YM
1314}
1315
351a4ded 1316int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
cc875c2e
YM
1317{
1318 struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
5529bad9 1319 struct qed_mcp_mb_params mb_params;
2f67af8c 1320 struct eth_phy_cfg phy_cfg;
cc875c2e 1321 int rc = 0;
5529bad9 1322 u32 cmd;
cc875c2e
YM
1323
1324 /* Set the shmem configuration according to params */
2f67af8c 1325 memset(&phy_cfg, 0, sizeof(phy_cfg));
cc875c2e
YM
1326 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1327 if (!params->speed.autoneg)
2f67af8c
TT
1328 phy_cfg.speed = params->speed.forced_speed;
1329 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1330 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1331 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1332 phy_cfg.adv_speed = params->speed.advertised_speeds;
1333 phy_cfg.loopback_mode = params->loopback_mode;
5eb0c86c
SRK
1334
1335 /* There are MFWs that share this capability regardless of whether
1336 * this is feasible or not. And given that at the very least adv_caps
1337 * would be set internally by qed, we want to make sure LFA would
1338 * still work.
1339 */
1340 if ((p_hwfn->mcp_info->capabilities &
1341 FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
1342 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
645874e5
SRK
1343 if (params->eee.tx_lpi_enable)
1344 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1345 if (params->eee.adv_caps & QED_EEE_1G_ADV)
1346 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1347 if (params->eee.adv_caps & QED_EEE_10G_ADV)
1348 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1349 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1350 EEE_TX_TIMER_USEC_OFFSET) &
1351 EEE_TX_TIMER_USEC_MASK;
1352 }
cc875c2e 1353
fc916ff2
SRK
1354 p_hwfn->b_drv_link_init = b_up;
1355
cc875c2e
YM
1356 if (b_up) {
1357 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1358 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
2f67af8c
TT
1359 phy_cfg.speed,
1360 phy_cfg.pause,
1361 phy_cfg.adv_speed,
1362 phy_cfg.loopback_mode,
1363 phy_cfg.feature_config_flags);
cc875c2e
YM
1364 } else {
1365 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1366 "Resetting link\n");
1367 }
1368
5529bad9
TT
1369 memset(&mb_params, 0, sizeof(mb_params));
1370 mb_params.cmd = cmd;
2f67af8c
TT
1371 mb_params.p_data_src = &phy_cfg;
1372 mb_params.data_src_size = sizeof(phy_cfg);
5529bad9 1373 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
cc875c2e
YM
1374
1375 /* if mcp fails to respond we must abort */
1376 if (rc) {
1377 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1378 return rc;
1379 }
1380
65ed2ffd
MY
1381 /* Mimic link-change attention, done for several reasons:
1382 * - On reset, there's no guarantee MFW would trigger
1383 * an attention.
1384 * - On initialization, older MFWs might not indicate link change
1385 * during LFA, so we'll never get an UP indication.
1386 */
1387 qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
cc875c2e
YM
1388
1389 return 0;
1390}
1391
6c754246
SRK
1392static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
1393 struct qed_ptt *p_ptt,
1394 enum MFW_DRV_MSG_TYPE type)
1395{
1396 enum qed_mcp_protocol_type stats_type;
1397 union qed_mcp_protocol_stats stats;
1398 struct qed_mcp_mb_params mb_params;
6c754246
SRK
1399 u32 hsi_param;
1400
1401 switch (type) {
1402 case MFW_DRV_MSG_GET_LAN_STATS:
1403 stats_type = QED_MCP_LAN_STATS;
1404 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1405 break;
1406 case MFW_DRV_MSG_GET_FCOE_STATS:
1407 stats_type = QED_MCP_FCOE_STATS;
1408 hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1409 break;
1410 case MFW_DRV_MSG_GET_ISCSI_STATS:
1411 stats_type = QED_MCP_ISCSI_STATS;
1412 hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1413 break;
1414 case MFW_DRV_MSG_GET_RDMA_STATS:
1415 stats_type = QED_MCP_RDMA_STATS;
1416 hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
1417 break;
1418 default:
1419 DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
1420 return;
1421 }
1422
1423 qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
1424
1425 memset(&mb_params, 0, sizeof(mb_params));
1426 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1427 mb_params.param = hsi_param;
2f67af8c
TT
1428 mb_params.p_data_src = &stats;
1429 mb_params.data_src_size = sizeof(stats);
6c754246
SRK
1430 qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1431}
1432
4b01e519
MC
1433static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
1434 struct public_func *p_shmem_info)
1435{
1436 struct qed_mcp_function_info *p_info;
1437
1438 p_info = &p_hwfn->mcp_info->func_info;
1439
1440 p_info->bandwidth_min = (p_shmem_info->config &
1441 FUNC_MF_CFG_MIN_BW_MASK) >>
1442 FUNC_MF_CFG_MIN_BW_SHIFT;
1443 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1444 DP_INFO(p_hwfn,
1445 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1446 p_info->bandwidth_min);
1447 p_info->bandwidth_min = 1;
1448 }
1449
1450 p_info->bandwidth_max = (p_shmem_info->config &
1451 FUNC_MF_CFG_MAX_BW_MASK) >>
1452 FUNC_MF_CFG_MAX_BW_SHIFT;
1453 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1454 DP_INFO(p_hwfn,
1455 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1456 p_info->bandwidth_max);
1457 p_info->bandwidth_max = 100;
1458 }
1459}
1460
1461static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
1462 struct qed_ptt *p_ptt,
1a635e48 1463 struct public_func *p_data, int pfid)
4b01e519
MC
1464{
1465 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1466 PUBLIC_FUNC);
1467 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1468 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1469 u32 i, size;
1470
1471 memset(p_data, 0, sizeof(*p_data));
1472
1a635e48 1473 size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
4b01e519
MC
1474 for (i = 0; i < size / sizeof(u32); i++)
1475 ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
1476 func_addr + (i << 2));
1477 return size;
1478}
1479
1a635e48 1480static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
4b01e519
MC
1481{
1482 struct qed_mcp_function_info *p_info;
1483 struct public_func shmem_info;
1484 u32 resp = 0, param = 0;
1485
1a635e48 1486 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
4b01e519
MC
1487
1488 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1489
1490 p_info = &p_hwfn->mcp_info->func_info;
1491
a64b02d5 1492 qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
4b01e519
MC
1493 qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
1494
1495 /* Acknowledge the MFW */
1496 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1497 &param);
1498}
1499
2a351fd9
MY
1500static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1501{
1502 struct public_func shmem_info;
1503 u32 resp = 0, param = 0;
1504
1505 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1506
1507 p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1508 FUNC_MF_CFG_OV_STAG_MASK;
1509 p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1510 if ((p_hwfn->hw_info.hw_mode & BIT(MODE_MF_SD)) &&
1511 (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET)) {
1512 qed_wr(p_hwfn, p_ptt,
1513 NIG_REG_LLH_FUNC_TAG_VALUE, p_hwfn->hw_info.ovlan);
1514 qed_sp_pf_update_stag(p_hwfn);
1515 }
1516
1517 /* Acknowledge the MFW */
1518 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1519 &resp, &param);
1520}
1521
cc875c2e
YM
1522int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
1523 struct qed_ptt *p_ptt)
1524{
1525 struct qed_mcp_info *info = p_hwfn->mcp_info;
1526 int rc = 0;
1527 bool found = false;
1528 u16 i;
1529
1530 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
1531
1532 /* Read Messages from MFW */
1533 qed_mcp_read_mb(p_hwfn, p_ptt);
1534
1535 /* Compare current messages to old ones */
1536 for (i = 0; i < info->mfw_mb_length; i++) {
1537 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1538 continue;
1539
1540 found = true;
1541
1542 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1543 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1544 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1545
1546 switch (i) {
1547 case MFW_DRV_MSG_LINK_CHANGE:
1548 qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
1549 break;
0b55e27d
YM
1550 case MFW_DRV_MSG_VF_DISABLED:
1551 qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
1552 break;
39651abd
SRK
1553 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1554 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1555 QED_DCBX_REMOTE_LLDP_MIB);
1556 break;
1557 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1558 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1559 QED_DCBX_REMOTE_MIB);
1560 break;
1561 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1562 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1563 QED_DCBX_OPERATIONAL_MIB);
1564 break;
334c03b5
ZN
1565 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1566 qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1567 break;
6c754246
SRK
1568 case MFW_DRV_MSG_GET_LAN_STATS:
1569 case MFW_DRV_MSG_GET_FCOE_STATS:
1570 case MFW_DRV_MSG_GET_ISCSI_STATS:
1571 case MFW_DRV_MSG_GET_RDMA_STATS:
1572 qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1573 break;
4b01e519
MC
1574 case MFW_DRV_MSG_BW_UPDATE:
1575 qed_mcp_update_bw(p_hwfn, p_ptt);
1576 break;
2a351fd9
MY
1577 case MFW_DRV_MSG_S_TAG_UPDATE:
1578 qed_mcp_update_stag(p_hwfn, p_ptt);
1579 break;
1580 break;
cc875c2e 1581 default:
39815944 1582 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
cc875c2e
YM
1583 rc = -EINVAL;
1584 }
1585 }
1586
1587 /* ACK everything */
1588 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1589 __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
1590
1591 /* MFW expect answer in BE, so we force write in that format */
1592 qed_wr(p_hwfn, p_ptt,
1593 info->mfw_mb_addr + sizeof(u32) +
1594 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1595 sizeof(u32) + i * sizeof(u32),
1596 (__force u32)val);
1597 }
1598
1599 if (!found) {
1600 DP_NOTICE(p_hwfn,
1601 "Received an MFW message indication but no new message!\n");
1602 rc = -EINVAL;
1603 }
1604
1605 /* Copy the new mfw messages into the shadow */
1606 memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1607
1608 return rc;
1609}
1610
1408cc1f
YM
1611int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
1612 struct qed_ptt *p_ptt,
1613 u32 *p_mfw_ver, u32 *p_running_bundle_id)
fe56b9e6 1614{
fe56b9e6
YM
1615 u32 global_offsize;
1616
1408cc1f
YM
1617 if (IS_VF(p_hwfn->cdev)) {
1618 if (p_hwfn->vf_iov_info) {
1619 struct pfvf_acquire_resp_tlv *p_resp;
1620
1621 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1622 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1623 return 0;
1624 } else {
1625 DP_VERBOSE(p_hwfn,
1626 QED_MSG_IOV,
1627 "VF requested MFW version prior to ACQUIRE\n");
1628 return -EINVAL;
1629 }
1630 }
fe56b9e6
YM
1631
1632 global_offsize = qed_rd(p_hwfn, p_ptt,
1408cc1f
YM
1633 SECTION_OFFSIZE_ADDR(p_hwfn->
1634 mcp_info->public_base,
fe56b9e6 1635 PUBLIC_GLOBAL));
1408cc1f
YM
1636 *p_mfw_ver =
1637 qed_rd(p_hwfn, p_ptt,
1638 SECTION_ADDR(global_offsize,
1639 0) + offsetof(struct public_global, mfw_ver));
1640
1641 if (p_running_bundle_id != NULL) {
1642 *p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
1643 SECTION_ADDR(global_offsize, 0) +
1644 offsetof(struct public_global,
1645 running_bundle_id));
1646 }
fe56b9e6
YM
1647
1648 return 0;
1649}
1650
ae33666a
TT
1651int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
1652 struct qed_ptt *p_ptt, u32 *p_mbi_ver)
1653{
1654 u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
1655
1656 if (IS_VF(p_hwfn->cdev))
1657 return -EINVAL;
1658
1659 /* Read the address of the nvm_cfg */
1660 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
1661 if (!nvm_cfg_addr) {
1662 DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
1663 return -EINVAL;
1664 }
1665
1666 /* Read the offset of nvm_cfg1 */
1667 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1668
1669 mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1670 offsetof(struct nvm_cfg1, glob) +
1671 offsetof(struct nvm_cfg1_glob, mbi_version);
1672 *p_mbi_ver = qed_rd(p_hwfn, p_ptt,
1673 mbi_ver_addr) &
1674 (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
1675 NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
1676 NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
1677
1678 return 0;
1679}
1680
1a635e48 1681int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
cc875c2e
YM
1682{
1683 struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
1684 struct qed_ptt *p_ptt;
1685
1408cc1f
YM
1686 if (IS_VF(cdev))
1687 return -EINVAL;
1688
cc875c2e 1689 if (!qed_mcp_is_init(p_hwfn)) {
525ef5c0 1690 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
cc875c2e
YM
1691 return -EBUSY;
1692 }
1693
1694 *p_media_type = MEDIA_UNSPECIFIED;
1695
1696 p_ptt = qed_ptt_acquire(p_hwfn);
1697 if (!p_ptt)
1698 return -EBUSY;
1699
1700 *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1701 offsetof(struct public_port, media_type));
1702
1703 qed_ptt_release(p_hwfn, p_ptt);
1704
1705 return 0;
1706}
1707
6927e826
MY
1708/* Old MFW has a global configuration for all PFs regarding RDMA support */
1709static void
1710qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
1711 enum qed_pci_personality *p_proto)
1712{
1713 /* There wasn't ever a legacy MFW that published iwarp.
1714 * So at this point, this is either plain l2 or RoCE.
1715 */
1716 if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities))
1717 *p_proto = QED_PCI_ETH_ROCE;
1718 else
1719 *p_proto = QED_PCI_ETH;
1720
1721 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
1722 "According to Legacy capabilities, L2 personality is %08x\n",
1723 (u32) *p_proto);
1724}
1725
1726static int
1727qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
1728 struct qed_ptt *p_ptt,
1729 enum qed_pci_personality *p_proto)
1730{
1731 u32 resp = 0, param = 0;
1732 int rc;
1733
1734 rc = qed_mcp_cmd(p_hwfn, p_ptt,
1735 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, &param);
1736 if (rc)
1737 return rc;
1738 if (resp != FW_MSG_CODE_OK) {
1739 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
1740 "MFW lacks support for command; Returns %08x\n",
1741 resp);
1742 return -EINVAL;
1743 }
1744
1745 switch (param) {
1746 case FW_MB_PARAM_GET_PF_RDMA_NONE:
1747 *p_proto = QED_PCI_ETH;
1748 break;
1749 case FW_MB_PARAM_GET_PF_RDMA_ROCE:
1750 *p_proto = QED_PCI_ETH_ROCE;
1751 break;
e0a8f9de
MK
1752 case FW_MB_PARAM_GET_PF_RDMA_IWARP:
1753 *p_proto = QED_PCI_ETH_IWARP;
1754 break;
6927e826 1755 case FW_MB_PARAM_GET_PF_RDMA_BOTH:
e0a8f9de 1756 *p_proto = QED_PCI_ETH_RDMA;
6927e826 1757 break;
6927e826
MY
1758 default:
1759 DP_NOTICE(p_hwfn,
1760 "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
1761 param);
1762 return -EINVAL;
1763 }
1764
1765 DP_VERBOSE(p_hwfn,
1766 NETIF_MSG_IFUP,
1767 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
1768 (u32) *p_proto, resp, param);
1769 return 0;
1770}
1771
fe56b9e6
YM
1772static int
1773qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
1774 struct public_func *p_info,
6927e826 1775 struct qed_ptt *p_ptt,
fe56b9e6
YM
1776 enum qed_pci_personality *p_proto)
1777{
1778 int rc = 0;
1779
1780 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
1781 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
1fe582ec
RA
1782 if (!IS_ENABLED(CONFIG_QED_RDMA))
1783 *p_proto = QED_PCI_ETH;
1784 else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
6927e826 1785 qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
c5ac9319
YM
1786 break;
1787 case FUNC_MF_CFG_PROTOCOL_ISCSI:
1788 *p_proto = QED_PCI_ISCSI;
1789 break;
1e128c81
AE
1790 case FUNC_MF_CFG_PROTOCOL_FCOE:
1791 *p_proto = QED_PCI_FCOE;
1792 break;
c5ac9319
YM
1793 case FUNC_MF_CFG_PROTOCOL_ROCE:
1794 DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
6927e826 1795 /* Fallthrough */
fe56b9e6
YM
1796 default:
1797 rc = -EINVAL;
1798 }
1799
1800 return rc;
1801}
1802
1803int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
1804 struct qed_ptt *p_ptt)
1805{
1806 struct qed_mcp_function_info *info;
1807 struct public_func shmem_info;
1808
1a635e48 1809 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
fe56b9e6
YM
1810 info = &p_hwfn->mcp_info->func_info;
1811
1812 info->pause_on_host = (shmem_info.config &
1813 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
1814
6927e826
MY
1815 if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
1816 &info->protocol)) {
fe56b9e6
YM
1817 DP_ERR(p_hwfn, "Unknown personality %08x\n",
1818 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
1819 return -EINVAL;
1820 }
1821
4b01e519 1822 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
fe56b9e6
YM
1823
1824 if (shmem_info.mac_upper || shmem_info.mac_lower) {
1825 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
1826 info->mac[1] = (u8)(shmem_info.mac_upper);
1827 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
1828 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
1829 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
1830 info->mac[5] = (u8)(shmem_info.mac_lower);
14d39648
MY
1831
1832 /* Store primary MAC for later possible WoL */
1833 memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN);
fe56b9e6
YM
1834 } else {
1835 DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
1836 }
1837
57796759
MY
1838 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
1839 (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
1840 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
1841 (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
fe56b9e6
YM
1842
1843 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
1844
0fefbfba
SK
1845 info->mtu = (u16)shmem_info.mtu_size;
1846
14d39648
MY
1847 p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE;
1848 p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT;
1849 if (qed_mcp_is_init(p_hwfn)) {
1850 u32 resp = 0, param = 0;
1851 int rc;
1852
1853 rc = qed_mcp_cmd(p_hwfn, p_ptt,
1854 DRV_MSG_CODE_OS_WOL, 0, &resp, &param);
1855 if (rc)
1856 return rc;
1857 if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
1858 p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME;
1859 }
1860
fe56b9e6 1861 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
14d39648 1862 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
fe56b9e6
YM
1863 info->pause_on_host, info->protocol,
1864 info->bandwidth_min, info->bandwidth_max,
1865 info->mac[0], info->mac[1], info->mac[2],
1866 info->mac[3], info->mac[4], info->mac[5],
14d39648
MY
1867 info->wwn_port, info->wwn_node,
1868 info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
fe56b9e6
YM
1869
1870 return 0;
1871}
1872
cc875c2e
YM
1873struct qed_mcp_link_params
1874*qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
1875{
1876 if (!p_hwfn || !p_hwfn->mcp_info)
1877 return NULL;
1878 return &p_hwfn->mcp_info->link_input;
1879}
1880
1881struct qed_mcp_link_state
1882*qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
1883{
1884 if (!p_hwfn || !p_hwfn->mcp_info)
1885 return NULL;
1886 return &p_hwfn->mcp_info->link_output;
1887}
1888
1889struct qed_mcp_link_capabilities
1890*qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
1891{
1892 if (!p_hwfn || !p_hwfn->mcp_info)
1893 return NULL;
1894 return &p_hwfn->mcp_info->link_capabilities;
1895}
1896
1a635e48 1897int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
fe56b9e6
YM
1898{
1899 u32 resp = 0, param = 0;
1900 int rc;
1901
1902 rc = qed_mcp_cmd(p_hwfn, p_ptt,
1a635e48 1903 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
fe56b9e6
YM
1904
1905 /* Wait for the drain to complete before returning */
8f60bafe 1906 msleep(1020);
fe56b9e6
YM
1907
1908 return rc;
1909}
1910
cee4d264 1911int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
1a635e48 1912 struct qed_ptt *p_ptt, u32 *p_flash_size)
cee4d264
MC
1913{
1914 u32 flash_size;
1915
1408cc1f
YM
1916 if (IS_VF(p_hwfn->cdev))
1917 return -EINVAL;
1918
cee4d264
MC
1919 flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
1920 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
1921 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
1922 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
1923
1924 *p_flash_size = flash_size;
1925
1926 return 0;
1927}
1928
88072fd4
MY
1929static int
1930qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn,
1931 struct qed_ptt *p_ptt, u8 vf_id, u8 num)
1408cc1f
YM
1932{
1933 u32 resp = 0, param = 0, rc_param = 0;
1934 int rc;
1935
1936 /* Only Leader can configure MSIX, and need to take CMT into account */
1937 if (!IS_LEAD_HWFN(p_hwfn))
1938 return 0;
1939 num *= p_hwfn->cdev->num_hwfns;
1940
1941 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
1942 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
1943 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
1944 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
1945
1946 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
1947 &resp, &rc_param);
1948
1949 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
1950 DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
1951 rc = -EINVAL;
1952 } else {
1953 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1954 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
1955 num, vf_id);
1956 }
1957
1958 return rc;
1959}
1960
88072fd4
MY
1961static int
1962qed_mcp_config_vf_msix_ah(struct qed_hwfn *p_hwfn,
1963 struct qed_ptt *p_ptt, u8 num)
1964{
1965 u32 resp = 0, param = num, rc_param = 0;
1966 int rc;
1967
1968 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
1969 param, &resp, &rc_param);
1970
1971 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
1972 DP_NOTICE(p_hwfn, "MFW failed to set MSI-X for VFs\n");
1973 rc = -EINVAL;
1974 } else {
1975 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1976 "Requested 0x%02x MSI-x interrupts for VFs\n", num);
1977 }
1978
1979 return rc;
1980}
1981
1982int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
1983 struct qed_ptt *p_ptt, u8 vf_id, u8 num)
1984{
1985 if (QED_IS_BB(p_hwfn->cdev))
1986 return qed_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
1987 else
1988 return qed_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
1989}
1990
fe56b9e6
YM
1991int
1992qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
1993 struct qed_ptt *p_ptt,
1994 struct qed_mcp_drv_version *p_ver)
1995{
5529bad9 1996 struct qed_mcp_mb_params mb_params;
2f67af8c 1997 struct drv_version_stc drv_version;
5529bad9
TT
1998 __be32 val;
1999 u32 i;
2000 int rc;
fe56b9e6 2001
2f67af8c
TT
2002 memset(&drv_version, 0, sizeof(drv_version));
2003 drv_version.version = p_ver->version;
67a99b70
YM
2004 for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
2005 val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
2f67af8c 2006 *(__be32 *)&drv_version.name[i * sizeof(u32)] = val;
fe56b9e6
YM
2007 }
2008
5529bad9
TT
2009 memset(&mb_params, 0, sizeof(mb_params));
2010 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2f67af8c
TT
2011 mb_params.p_data_src = &drv_version;
2012 mb_params.data_src_size = sizeof(drv_version);
5529bad9
TT
2013 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2014 if (rc)
fe56b9e6 2015 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
fe56b9e6 2016
5529bad9 2017 return rc;
fe56b9e6 2018}
91420b83 2019
61812851
TT
2020/* A maximal 100 msec waiting time for the MCP to halt */
2021#define QED_MCP_HALT_SLEEP_MS 10
2022#define QED_MCP_HALT_MAX_RETRIES 10
2023
4102426f
TT
2024int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2025{
61812851 2026 u32 resp = 0, param = 0, cpu_state, cnt = 0;
4102426f
TT
2027 int rc;
2028
2029 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2030 &param);
61812851 2031 if (rc) {
4102426f 2032 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
61812851
TT
2033 return rc;
2034 }
4102426f 2035
61812851
TT
2036 do {
2037 msleep(QED_MCP_HALT_SLEEP_MS);
2038 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2039 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2040 break;
2041 } while (++cnt < QED_MCP_HALT_MAX_RETRIES);
2042
2043 if (cnt == QED_MCP_HALT_MAX_RETRIES) {
2044 DP_NOTICE(p_hwfn,
2045 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2046 qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2047 return -EBUSY;
2048 }
2049
2050 return 0;
4102426f
TT
2051}
2052
61812851
TT
2053#define QED_MCP_RESUME_SLEEP_MS 10
2054
4102426f
TT
2055int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2056{
61812851 2057 u32 cpu_mode, cpu_state;
4102426f
TT
2058
2059 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2060
4102426f 2061 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
61812851
TT
2062 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2063 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2064 msleep(QED_MCP_RESUME_SLEEP_MS);
2065 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
4102426f 2066
61812851
TT
2067 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2068 DP_NOTICE(p_hwfn,
2069 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2070 cpu_mode, cpu_state);
2071 return -EBUSY;
2072 }
2073
2074 return 0;
4102426f
TT
2075}
2076
0fefbfba
SK
2077int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
2078 struct qed_ptt *p_ptt,
2079 enum qed_ov_client client)
2080{
2081 u32 resp = 0, param = 0;
2082 u32 drv_mb_param;
2083 int rc;
2084
2085 switch (client) {
2086 case QED_OV_CLIENT_DRV:
2087 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2088 break;
2089 case QED_OV_CLIENT_USER:
2090 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2091 break;
2092 case QED_OV_CLIENT_VENDOR_SPEC:
2093 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2094 break;
2095 default:
2096 DP_NOTICE(p_hwfn, "Invalid client type %d\n", client);
2097 return -EINVAL;
2098 }
2099
2100 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2101 drv_mb_param, &resp, &param);
2102 if (rc)
2103 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2104
2105 return rc;
2106}
2107
2108int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
2109 struct qed_ptt *p_ptt,
2110 enum qed_ov_driver_state drv_state)
2111{
2112 u32 resp = 0, param = 0;
2113 u32 drv_mb_param;
2114 int rc;
2115
2116 switch (drv_state) {
2117 case QED_OV_DRIVER_STATE_NOT_LOADED:
2118 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2119 break;
2120 case QED_OV_DRIVER_STATE_DISABLED:
2121 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2122 break;
2123 case QED_OV_DRIVER_STATE_ACTIVE:
2124 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2125 break;
2126 default:
2127 DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state);
2128 return -EINVAL;
2129 }
2130
2131 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2132 drv_mb_param, &resp, &param);
2133 if (rc)
2134 DP_ERR(p_hwfn, "Failed to send driver state\n");
2135
2136 return rc;
2137}
2138
2139int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
2140 struct qed_ptt *p_ptt, u16 mtu)
2141{
2142 u32 resp = 0, param = 0;
2143 u32 drv_mb_param;
2144 int rc;
2145
2146 drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
2147 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2148 drv_mb_param, &resp, &param);
2149 if (rc)
2150 DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2151
2152 return rc;
2153}
2154
2155int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
2156 struct qed_ptt *p_ptt, u8 *mac)
2157{
2158 struct qed_mcp_mb_params mb_params;
17991002 2159 u32 mfw_mac[2];
0fefbfba
SK
2160 int rc;
2161
2162 memset(&mb_params, 0, sizeof(mb_params));
2163 mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2164 mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
2165 DRV_MSG_CODE_VMAC_TYPE_SHIFT;
2166 mb_params.param |= MCP_PF_ID(p_hwfn);
2f67af8c 2167
17991002
MY
2168 /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
2169 * in 32-bit granularity.
2170 * So the MAC has to be set in native order [and not byte order],
2171 * otherwise it would be read incorrectly by MFW after swap.
2172 */
2173 mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
2174 mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
2175
2176 mb_params.p_data_src = (u8 *)mfw_mac;
2177 mb_params.data_src_size = 8;
0fefbfba
SK
2178 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2179 if (rc)
2180 DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2181
14d39648
MY
2182 /* Store primary MAC for later possible WoL */
2183 memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN);
2184
0fefbfba
SK
2185 return rc;
2186}
2187
2188int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
2189 struct qed_ptt *p_ptt, enum qed_ov_wol wol)
2190{
2191 u32 resp = 0, param = 0;
2192 u32 drv_mb_param;
2193 int rc;
2194
14d39648
MY
2195 if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) {
2196 DP_VERBOSE(p_hwfn, QED_MSG_SP,
2197 "Can't change WoL configuration when WoL isn't supported\n");
2198 return -EINVAL;
2199 }
2200
0fefbfba
SK
2201 switch (wol) {
2202 case QED_OV_WOL_DEFAULT:
2203 drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
2204 break;
2205 case QED_OV_WOL_DISABLED:
2206 drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
2207 break;
2208 case QED_OV_WOL_ENABLED:
2209 drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
2210 break;
2211 default:
2212 DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
2213 return -EINVAL;
2214 }
2215
2216 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
2217 drv_mb_param, &resp, &param);
2218 if (rc)
2219 DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
2220
14d39648
MY
2221 /* Store the WoL update for a future unload */
2222 p_hwfn->cdev->wol_config = (u8)wol;
2223
0fefbfba
SK
2224 return rc;
2225}
2226
2227int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
2228 struct qed_ptt *p_ptt,
2229 enum qed_ov_eswitch eswitch)
2230{
2231 u32 resp = 0, param = 0;
2232 u32 drv_mb_param;
2233 int rc;
2234
2235 switch (eswitch) {
2236 case QED_OV_ESWITCH_NONE:
2237 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2238 break;
2239 case QED_OV_ESWITCH_VEB:
2240 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2241 break;
2242 case QED_OV_ESWITCH_VEPA:
2243 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2244 break;
2245 default:
2246 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2247 return -EINVAL;
2248 }
2249
2250 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2251 drv_mb_param, &resp, &param);
2252 if (rc)
2253 DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2254
2255 return rc;
2256}
2257
1a635e48
YM
2258int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
2259 struct qed_ptt *p_ptt, enum qed_led_mode mode)
91420b83
SK
2260{
2261 u32 resp = 0, param = 0, drv_mb_param;
2262 int rc;
2263
2264 switch (mode) {
2265 case QED_LED_MODE_ON:
2266 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2267 break;
2268 case QED_LED_MODE_OFF:
2269 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2270 break;
2271 case QED_LED_MODE_RESTORE:
2272 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2273 break;
2274 default:
2275 DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
2276 return -EINVAL;
2277 }
2278
2279 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2280 drv_mb_param, &resp, &param);
2281
2282 return rc;
2283}
03dc76ca 2284
4102426f
TT
2285int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
2286 struct qed_ptt *p_ptt, u32 mask_parities)
2287{
2288 u32 resp = 0, param = 0;
2289 int rc;
2290
2291 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2292 mask_parities, &resp, &param);
2293
2294 if (rc) {
2295 DP_ERR(p_hwfn,
2296 "MCP response failure for mask parities, aborting\n");
2297 } else if (resp != FW_MSG_CODE_OK) {
2298 DP_ERR(p_hwfn,
2299 "MCP did not acknowledge mask parity request. Old MFW?\n");
2300 rc = -EINVAL;
2301 }
2302
2303 return rc;
2304}
2305
7a4b21b7
MY
2306int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
2307{
2308 u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0;
2309 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2310 u32 resp = 0, resp_param = 0;
2311 struct qed_ptt *p_ptt;
2312 int rc = 0;
2313
2314 p_ptt = qed_ptt_acquire(p_hwfn);
2315 if (!p_ptt)
2316 return -EBUSY;
2317
2318 while (bytes_left > 0) {
2319 bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN);
2320
2321 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2322 DRV_MSG_CODE_NVM_READ_NVRAM,
2323 addr + offset +
2324 (bytes_to_copy <<
2325 DRV_MB_PARAM_NVM_LEN_SHIFT),
2326 &resp, &resp_param,
2327 &read_len,
2328 (u32 *)(p_buf + offset));
2329
2330 if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
2331 DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
2332 break;
2333 }
2334
2335 /* This can be a lengthy process, and it's possible scheduler
2336 * isn't preemptable. Sleep a bit to prevent CPU hogging.
2337 */
2338 if (bytes_left % 0x1000 <
2339 (bytes_left - read_len) % 0x1000)
2340 usleep_range(1000, 2000);
2341
2342 offset += read_len;
2343 bytes_left -= read_len;
2344 }
2345
2346 cdev->mcp_nvm_resp = resp;
2347 qed_ptt_release(p_hwfn, p_ptt);
2348
2349 return rc;
2350}
2351
03dc76ca
SRK
2352int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2353{
2354 u32 drv_mb_param = 0, rsp, param;
2355 int rc = 0;
2356
2357 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
2358 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2359
2360 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2361 drv_mb_param, &rsp, &param);
2362
2363 if (rc)
2364 return rc;
2365
2366 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2367 (param != DRV_MB_PARAM_BIST_RC_PASSED))
2368 rc = -EAGAIN;
2369
2370 return rc;
2371}
2372
2373int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2374{
2375 u32 drv_mb_param, rsp, param;
2376 int rc = 0;
2377
2378 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
2379 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2380
2381 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2382 drv_mb_param, &rsp, &param);
2383
2384 if (rc)
2385 return rc;
2386
2387 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2388 (param != DRV_MB_PARAM_BIST_RC_PASSED))
2389 rc = -EAGAIN;
2390
2391 return rc;
2392}
7a4b21b7
MY
2393
2394int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn,
2395 struct qed_ptt *p_ptt,
2396 u32 *num_images)
2397{
2398 u32 drv_mb_param = 0, rsp;
2399 int rc = 0;
2400
2401 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
2402 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2403
2404 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2405 drv_mb_param, &rsp, num_images);
2406 if (rc)
2407 return rc;
2408
2409 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
2410 rc = -EINVAL;
2411
2412 return rc;
2413}
2414
2415int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
2416 struct qed_ptt *p_ptt,
2417 struct bist_nvm_image_att *p_image_att,
2418 u32 image_index)
2419{
2420 u32 buf_size = 0, param, resp = 0, resp_param = 0;
2421 int rc;
2422
2423 param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
2424 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT;
2425 param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT;
2426
2427 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2428 DRV_MSG_CODE_BIST_TEST, param,
2429 &resp, &resp_param,
2430 &buf_size,
2431 (u32 *)p_image_att);
2432 if (rc)
2433 return rc;
2434
2435 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2436 (p_image_att->return_code != 1))
2437 rc = -EINVAL;
2438
2439 return rc;
2440}
2edbff8d 2441
20675b37
MY
2442static int
2443qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
2444 struct qed_ptt *p_ptt,
2445 enum qed_nvm_images image_id,
2446 struct qed_nvm_image_att *p_image_att)
2447{
2448 struct bist_nvm_image_att mfw_image_att;
2449 enum nvm_image_type type;
2450 u32 num_images, i;
2451 int rc;
2452
2453 /* Translate image_id into MFW definitions */
2454 switch (image_id) {
2455 case QED_NVM_IMAGE_ISCSI_CFG:
2456 type = NVM_TYPE_ISCSI_CFG;
2457 break;
2458 case QED_NVM_IMAGE_FCOE_CFG:
2459 type = NVM_TYPE_FCOE_CFG;
2460 break;
2461 default:
2462 DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
2463 image_id);
2464 return -EINVAL;
2465 }
2466
2467 /* Learn number of images, then traverse and see if one fits */
2468 rc = qed_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images);
2469 if (rc || !num_images)
2470 return -EINVAL;
2471
2472 for (i = 0; i < num_images; i++) {
2473 rc = qed_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt,
2474 &mfw_image_att, i);
2475 if (rc)
2476 return rc;
2477
2478 if (type == mfw_image_att.image_type)
2479 break;
2480 }
2481 if (i == num_images) {
2482 DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
2483 "Failed to find nvram image of type %08x\n",
2484 image_id);
2485 return -EINVAL;
2486 }
2487
2488 p_image_att->start_addr = mfw_image_att.nvm_start_addr;
2489 p_image_att->length = mfw_image_att.len;
2490
2491 return 0;
2492}
2493
2494int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
2495 struct qed_ptt *p_ptt,
2496 enum qed_nvm_images image_id,
2497 u8 *p_buffer, u32 buffer_len)
2498{
2499 struct qed_nvm_image_att image_att;
2500 int rc;
2501
2502 memset(p_buffer, 0, buffer_len);
2503
2504 rc = qed_mcp_get_nvm_image_att(p_hwfn, p_ptt, image_id, &image_att);
2505 if (rc)
2506 return rc;
2507
2508 /* Validate sizes - both the image's and the supplied buffer's */
2509 if (image_att.length <= 4) {
2510 DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
2511 "Image [%d] is too small - only %d bytes\n",
2512 image_id, image_att.length);
2513 return -EINVAL;
2514 }
2515
2516 /* Each NVM image is suffixed by CRC; Upper-layer has no need for it */
2517 image_att.length -= 4;
2518
2519 if (image_att.length > buffer_len) {
2520 DP_VERBOSE(p_hwfn,
2521 QED_MSG_STORAGE,
2522 "Image [%d] is too big - %08x bytes where only %08x are available\n",
2523 image_id, image_att.length, buffer_len);
2524 return -ENOMEM;
2525 }
2526
2527 return qed_mcp_nvm_read(p_hwfn->cdev, image_att.start_addr,
2528 p_buffer, image_att.length);
2529}
2530
9c8517c4
TT
2531static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
2532{
2533 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
2534
2535 switch (res_id) {
2536 case QED_SB:
2537 mfw_res_id = RESOURCE_NUM_SB_E;
2538 break;
2539 case QED_L2_QUEUE:
2540 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
2541 break;
2542 case QED_VPORT:
2543 mfw_res_id = RESOURCE_NUM_VPORT_E;
2544 break;
2545 case QED_RSS_ENG:
2546 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
2547 break;
2548 case QED_PQ:
2549 mfw_res_id = RESOURCE_NUM_PQ_E;
2550 break;
2551 case QED_RL:
2552 mfw_res_id = RESOURCE_NUM_RL_E;
2553 break;
2554 case QED_MAC:
2555 case QED_VLAN:
2556 /* Each VFC resource can accommodate both a MAC and a VLAN */
2557 mfw_res_id = RESOURCE_VFC_FILTER_E;
2558 break;
2559 case QED_ILT:
2560 mfw_res_id = RESOURCE_ILT_E;
2561 break;
2562 case QED_LL2_QUEUE:
2563 mfw_res_id = RESOURCE_LL2_QUEUE_E;
2564 break;
2565 case QED_RDMA_CNQ_RAM:
2566 case QED_CMDQS_CQS:
2567 /* CNQ/CMDQS are the same resource */
2568 mfw_res_id = RESOURCE_CQS_E;
2569 break;
2570 case QED_RDMA_STATS_QUEUE:
2571 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
2572 break;
2573 case QED_BDQ:
2574 mfw_res_id = RESOURCE_BDQ_E;
2575 break;
2576 default:
2577 break;
2578 }
2579
2580 return mfw_res_id;
2581}
2582
2583#define QED_RESC_ALLOC_VERSION_MAJOR 2
2edbff8d
TT
2584#define QED_RESC_ALLOC_VERSION_MINOR 0
2585#define QED_RESC_ALLOC_VERSION \
2586 ((QED_RESC_ALLOC_VERSION_MAJOR << \
2587 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
2588 (QED_RESC_ALLOC_VERSION_MINOR << \
2589 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
9c8517c4
TT
2590
2591struct qed_resc_alloc_in_params {
2592 u32 cmd;
2593 enum qed_resources res_id;
2594 u32 resc_max_val;
2595};
2596
2597struct qed_resc_alloc_out_params {
2598 u32 mcp_resp;
2599 u32 mcp_param;
2600 u32 resc_num;
2601 u32 resc_start;
2602 u32 vf_resc_num;
2603 u32 vf_resc_start;
2604 u32 flags;
2605};
2606
2607static int
2608qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn,
2609 struct qed_ptt *p_ptt,
2610 struct qed_resc_alloc_in_params *p_in_params,
2611 struct qed_resc_alloc_out_params *p_out_params)
2edbff8d
TT
2612{
2613 struct qed_mcp_mb_params mb_params;
9c8517c4 2614 struct resource_info mfw_resc_info;
2edbff8d
TT
2615 int rc;
2616
9c8517c4
TT
2617 memset(&mfw_resc_info, 0, sizeof(mfw_resc_info));
2618
2619 mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id);
2620 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
2621 DP_ERR(p_hwfn,
2622 "Failed to match resource %d [%s] with the MFW resources\n",
2623 p_in_params->res_id,
2624 qed_hw_get_resc_name(p_in_params->res_id));
2625 return -EINVAL;
2626 }
2627
2628 switch (p_in_params->cmd) {
2629 case DRV_MSG_SET_RESOURCE_VALUE_MSG:
2630 mfw_resc_info.size = p_in_params->resc_max_val;
2631 /* Fallthrough */
2632 case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
2633 break;
2634 default:
2635 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
2636 p_in_params->cmd);
2637 return -EINVAL;
2638 }
2639
2edbff8d 2640 memset(&mb_params, 0, sizeof(mb_params));
9c8517c4 2641 mb_params.cmd = p_in_params->cmd;
2edbff8d 2642 mb_params.param = QED_RESC_ALLOC_VERSION;
9c8517c4
TT
2643 mb_params.p_data_src = &mfw_resc_info;
2644 mb_params.data_src_size = sizeof(mfw_resc_info);
2645 mb_params.p_data_dst = mb_params.p_data_src;
2646 mb_params.data_dst_size = mb_params.data_src_size;
2647
2648 DP_VERBOSE(p_hwfn,
2649 QED_MSG_SP,
2650 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
2651 p_in_params->cmd,
2652 p_in_params->res_id,
2653 qed_hw_get_resc_name(p_in_params->res_id),
2654 QED_MFW_GET_FIELD(mb_params.param,
2655 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
2656 QED_MFW_GET_FIELD(mb_params.param,
2657 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
2658 p_in_params->resc_max_val);
bb480242 2659
2edbff8d
TT
2660 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2661 if (rc)
2662 return rc;
2663
9c8517c4
TT
2664 p_out_params->mcp_resp = mb_params.mcp_resp;
2665 p_out_params->mcp_param = mb_params.mcp_param;
2666 p_out_params->resc_num = mfw_resc_info.size;
2667 p_out_params->resc_start = mfw_resc_info.offset;
2668 p_out_params->vf_resc_num = mfw_resc_info.vf_size;
2669 p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
2670 p_out_params->flags = mfw_resc_info.flags;
2edbff8d
TT
2671
2672 DP_VERBOSE(p_hwfn,
2673 QED_MSG_SP,
9c8517c4
TT
2674 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
2675 QED_MFW_GET_FIELD(p_out_params->mcp_param,
2676 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
2677 QED_MFW_GET_FIELD(p_out_params->mcp_param,
2678 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
2679 p_out_params->resc_num,
2680 p_out_params->resc_start,
2681 p_out_params->vf_resc_num,
2682 p_out_params->vf_resc_start, p_out_params->flags);
2683
2684 return 0;
2685}
2686
2687int
2688qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
2689 struct qed_ptt *p_ptt,
2690 enum qed_resources res_id,
2691 u32 resc_max_val, u32 *p_mcp_resp)
2692{
2693 struct qed_resc_alloc_out_params out_params;
2694 struct qed_resc_alloc_in_params in_params;
2695 int rc;
2696
2697 memset(&in_params, 0, sizeof(in_params));
2698 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
2699 in_params.res_id = res_id;
2700 in_params.resc_max_val = resc_max_val;
2701 memset(&out_params, 0, sizeof(out_params));
2702 rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
2703 &out_params);
2704 if (rc)
2705 return rc;
2706
2707 *p_mcp_resp = out_params.mcp_resp;
2708
2709 return 0;
2710}
2711
2712int
2713qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
2714 struct qed_ptt *p_ptt,
2715 enum qed_resources res_id,
2716 u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start)
2717{
2718 struct qed_resc_alloc_out_params out_params;
2719 struct qed_resc_alloc_in_params in_params;
2720 int rc;
2721
2722 memset(&in_params, 0, sizeof(in_params));
2723 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
2724 in_params.res_id = res_id;
2725 memset(&out_params, 0, sizeof(out_params));
2726 rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
2727 &out_params);
2728 if (rc)
2729 return rc;
2730
2731 *p_mcp_resp = out_params.mcp_resp;
2732
2733 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
2734 *p_resc_num = out_params.resc_num;
2735 *p_resc_start = out_params.resc_start;
2736 }
2edbff8d
TT
2737
2738 return 0;
2739}
18a69e36
MY
2740
2741int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2742{
2743 u32 mcp_resp, mcp_param;
2744
2745 return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
2746 &mcp_resp, &mcp_param);
2747}
95691c9c
TT
2748
2749static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
2750 struct qed_ptt *p_ptt,
2751 u32 param, u32 *p_mcp_resp, u32 *p_mcp_param)
2752{
2753 int rc;
2754
2755 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
2756 p_mcp_resp, p_mcp_param);
2757 if (rc)
2758 return rc;
2759
2760 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
2761 DP_INFO(p_hwfn,
2762 "The resource command is unsupported by the MFW\n");
2763 return -EINVAL;
2764 }
2765
2766 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
2767 u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
2768
2769 DP_NOTICE(p_hwfn,
2770 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
2771 param, opcode);
2772 return -EINVAL;
2773 }
2774
2775 return rc;
2776}
2777
2778int
2779__qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
2780 struct qed_ptt *p_ptt,
2781 struct qed_resc_lock_params *p_params)
2782{
2783 u32 param = 0, mcp_resp, mcp_param;
2784 u8 opcode;
2785 int rc;
2786
2787 switch (p_params->timeout) {
2788 case QED_MCP_RESC_LOCK_TO_DEFAULT:
2789 opcode = RESOURCE_OPCODE_REQ;
2790 p_params->timeout = 0;
2791 break;
2792 case QED_MCP_RESC_LOCK_TO_NONE:
2793 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
2794 p_params->timeout = 0;
2795 break;
2796 default:
2797 opcode = RESOURCE_OPCODE_REQ_W_AGING;
2798 break;
2799 }
2800
2801 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
2802 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
2803 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
2804
2805 DP_VERBOSE(p_hwfn,
2806 QED_MSG_SP,
2807 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
2808 param, p_params->timeout, opcode, p_params->resource);
2809
2810 /* Attempt to acquire the resource */
2811 rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
2812 if (rc)
2813 return rc;
2814
2815 /* Analyze the response */
2816 p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
2817 opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
2818
2819 DP_VERBOSE(p_hwfn,
2820 QED_MSG_SP,
2821 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
2822 mcp_param, opcode, p_params->owner);
2823
2824 switch (opcode) {
2825 case RESOURCE_OPCODE_GNT:
2826 p_params->b_granted = true;
2827 break;
2828 case RESOURCE_OPCODE_BUSY:
2829 p_params->b_granted = false;
2830 break;
2831 default:
2832 DP_NOTICE(p_hwfn,
2833 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
2834 mcp_param, opcode);
2835 return -EINVAL;
2836 }
2837
2838 return 0;
2839}
2840
2841int
2842qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
2843 struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params)
2844{
2845 u32 retry_cnt = 0;
2846 int rc;
2847
2848 do {
2849 /* No need for an interval before the first iteration */
2850 if (retry_cnt) {
2851 if (p_params->sleep_b4_retry) {
2852 u16 retry_interval_in_ms =
2853 DIV_ROUND_UP(p_params->retry_interval,
2854 1000);
2855
2856 msleep(retry_interval_in_ms);
2857 } else {
2858 udelay(p_params->retry_interval);
2859 }
2860 }
2861
2862 rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params);
2863 if (rc)
2864 return rc;
2865
2866 if (p_params->b_granted)
2867 break;
2868 } while (retry_cnt++ < p_params->retry_num);
2869
2870 return 0;
2871}
2872
2873int
2874qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
2875 struct qed_ptt *p_ptt,
2876 struct qed_resc_unlock_params *p_params)
2877{
2878 u32 param = 0, mcp_resp, mcp_param;
2879 u8 opcode;
2880 int rc;
2881
2882 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
2883 : RESOURCE_OPCODE_RELEASE;
2884 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
2885 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
2886
2887 DP_VERBOSE(p_hwfn, QED_MSG_SP,
2888 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
2889 param, opcode, p_params->resource);
2890
2891 /* Attempt to release the resource */
2892 rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
2893 if (rc)
2894 return rc;
2895
2896 /* Analyze the response */
2897 opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
2898
2899 DP_VERBOSE(p_hwfn, QED_MSG_SP,
2900 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
2901 mcp_param, opcode);
2902
2903 switch (opcode) {
2904 case RESOURCE_OPCODE_RELEASED_PREVIOUS:
2905 DP_INFO(p_hwfn,
2906 "Resource unlock request for an already released resource [%d]\n",
2907 p_params->resource);
2908 /* Fallthrough */
2909 case RESOURCE_OPCODE_RELEASED:
2910 p_params->b_released = true;
2911 break;
2912 case RESOURCE_OPCODE_WRONG_OWNER:
2913 p_params->b_released = false;
2914 break;
2915 default:
2916 DP_NOTICE(p_hwfn,
2917 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
2918 mcp_param, opcode);
2919 return -EINVAL;
2920 }
2921
2922 return 0;
2923}
f470f22c 2924
2925void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
2926 struct qed_resc_unlock_params *p_unlock,
2927 enum qed_resc_lock
2928 resource, bool b_is_permanent)
2929{
2930 if (p_lock) {
2931 memset(p_lock, 0, sizeof(*p_lock));
2932
2933 /* Permanent resources don't require aging, and there's no
2934 * point in trying to acquire them more than once since it's
2935 * unexpected another entity would release them.
2936 */
2937 if (b_is_permanent) {
2938 p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE;
2939 } else {
2940 p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT;
2941 p_lock->retry_interval =
2942 QED_MCP_RESC_LOCK_RETRY_VAL_DFLT;
2943 p_lock->sleep_b4_retry = true;
2944 }
2945
2946 p_lock->resource = resource;
2947 }
2948
2949 if (p_unlock) {
2950 memset(p_unlock, 0, sizeof(*p_unlock));
2951 p_unlock->resource = resource;
2952 }
2953}
645874e5
SRK
2954
2955int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2956{
2957 u32 mcp_resp;
2958 int rc;
2959
2960 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
2961 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
2962 if (!rc)
2963 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_PROBE),
2964 "MFW supported features: %08x\n",
2965 p_hwfn->mcp_info->capabilities);
2966
2967 return rc;
2968}
2969
2970int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2971{
2972 u32 mcp_resp, mcp_param, features;
2973
2974 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE;
2975
2976 return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
2977 features, &mcp_resp, &mcp_param);
2978}