]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_cmds.c
be2net: move async cmd processing to a separate routine
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / emulex / benet / be_cmds.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
6a4ab669 18#include <linux/module.h>
6b7c5b94 19#include "be.h"
8788fdc2 20#include "be_cmds.h"
6b7c5b94 21
f25b119c
PR
22static struct be_cmd_priv_map cmd_priv_map[] = {
23 {
24 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
25 CMD_SUBSYSTEM_ETH,
26 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
27 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
28 },
29 {
30 OPCODE_COMMON_GET_FLOW_CONTROL,
31 CMD_SUBSYSTEM_COMMON,
32 BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
33 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
34 },
35 {
36 OPCODE_COMMON_SET_FLOW_CONTROL,
37 CMD_SUBSYSTEM_COMMON,
38 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
39 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
40 },
41 {
42 OPCODE_ETH_GET_PPORT_STATS,
43 CMD_SUBSYSTEM_ETH,
44 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
45 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
46 },
47 {
48 OPCODE_COMMON_GET_PHY_DETAILS,
49 CMD_SUBSYSTEM_COMMON,
50 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
51 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
52 }
53};
54
a2cc4e0b 55static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
f25b119c
PR
56{
57 int i;
58 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
59 u32 cmd_privileges = adapter->cmd_privileges;
60
61 for (i = 0; i < num_entries; i++)
62 if (opcode == cmd_priv_map[i].opcode &&
63 subsystem == cmd_priv_map[i].subsystem)
64 if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
65 return false;
66
67 return true;
68}
69
3de09455
SK
70static inline void *embedded_payload(struct be_mcc_wrb *wrb)
71{
72 return wrb->payload.embedded_payload;
73}
609ff3bb 74
8788fdc2 75static void be_mcc_notify(struct be_adapter *adapter)
5fb379ee 76{
8788fdc2 77 struct be_queue_info *mccq = &adapter->mcc_obj.q;
5fb379ee
SP
78 u32 val = 0;
79
6589ade0 80 if (be_error(adapter))
7acc2087 81 return;
7acc2087 82
5fb379ee
SP
83 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
84 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
85
86 wmb();
8788fdc2 87 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
5fb379ee
SP
88}
89
90/* To check if valid bit is set, check the entire word as we don't know
91 * the endianness of the data (old entry is host endian while a new entry is
92 * little endian) */
efd2e40a 93static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
5fb379ee 94{
9e9ff4b7
SP
95 u32 flags;
96
5fb379ee 97 if (compl->flags != 0) {
9e9ff4b7
SP
98 flags = le32_to_cpu(compl->flags);
99 if (flags & CQE_FLAGS_VALID_MASK) {
100 compl->flags = flags;
101 return true;
102 }
5fb379ee 103 }
9e9ff4b7 104 return false;
5fb379ee
SP
105}
106
107/* Need to reset the entire word that houses the valid bit */
efd2e40a 108static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
5fb379ee
SP
109{
110 compl->flags = 0;
111}
112
652bf646
PR
113static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
114{
115 unsigned long addr;
116
117 addr = tag1;
118 addr = ((addr << 16) << 16) | tag0;
119 return (void *)addr;
120}
121
4c60005f
KA
122static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
123{
124 if (base_status == MCC_STATUS_NOT_SUPPORTED ||
125 base_status == MCC_STATUS_ILLEGAL_REQUEST ||
126 addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
127 (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
128 (base_status == MCC_STATUS_ILLEGAL_FIELD ||
129 addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
130 return true;
131 else
132 return false;
133}
134
559b633f
SP
135/* Place holder for all the async MCC cmds wherein the caller is not in a busy
136 * loop (has not issued be_mcc_notify_wait())
137 */
138static void be_async_cmd_process(struct be_adapter *adapter,
139 struct be_mcc_compl *compl,
140 struct be_cmd_resp_hdr *resp_hdr)
141{
142 enum mcc_base_status base_status = base_status(compl->status);
143 u8 opcode = 0, subsystem = 0;
144
145 if (resp_hdr) {
146 opcode = resp_hdr->opcode;
147 subsystem = resp_hdr->subsystem;
148 }
149
150 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
151 subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
152 complete(&adapter->et_cmd_compl);
153 return;
154 }
155
156 if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
157 opcode == OPCODE_COMMON_WRITE_OBJECT) &&
158 subsystem == CMD_SUBSYSTEM_COMMON) {
159 adapter->flash_status = compl->status;
160 complete(&adapter->et_cmd_compl);
161 return;
162 }
163
164 if ((opcode == OPCODE_ETH_GET_STATISTICS ||
165 opcode == OPCODE_ETH_GET_PPORT_STATS) &&
166 subsystem == CMD_SUBSYSTEM_ETH &&
167 base_status == MCC_STATUS_SUCCESS) {
168 be_parse_stats(adapter);
169 adapter->stats_cmd_sent = false;
170 return;
171 }
172
173 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
174 subsystem == CMD_SUBSYSTEM_COMMON) {
175 if (base_status == MCC_STATUS_SUCCESS) {
176 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
177 (void *)resp_hdr;
178 adapter->drv_stats.be_on_die_temperature =
179 resp->on_die_temperature;
180 } else {
181 adapter->be_get_temp_freq = 0;
182 }
183 return;
184 }
185}
186
8788fdc2 187static int be_mcc_compl_process(struct be_adapter *adapter,
652bf646 188 struct be_mcc_compl *compl)
5fb379ee 189{
4c60005f
KA
190 enum mcc_base_status base_status;
191 enum mcc_addl_status addl_status;
652bf646
PR
192 struct be_cmd_resp_hdr *resp_hdr;
193 u8 opcode = 0, subsystem = 0;
5fb379ee
SP
194
195 /* Just swap the status to host endian; mcc tag is opaquely copied
196 * from mcc_wrb */
197 be_dws_le_to_cpu(compl, 4);
198
4c60005f
KA
199 base_status = base_status(compl->status);
200 addl_status = addl_status(compl->status);
96c9b2e4 201
652bf646 202 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
652bf646
PR
203 if (resp_hdr) {
204 opcode = resp_hdr->opcode;
205 subsystem = resp_hdr->subsystem;
206 }
207
559b633f 208 be_async_cmd_process(adapter, compl, resp_hdr);
3de09455 209
559b633f
SP
210 if (base_status != MCC_STATUS_SUCCESS &&
211 !be_skip_err_log(opcode, base_status, addl_status)) {
96c9b2e4 212
4c60005f 213 if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
97f1d8cd 214 dev_warn(&adapter->pdev->dev,
522609f2 215 "VF is not privileged to issue opcode %d-%d\n",
97f1d8cd 216 opcode, subsystem);
2b3f291b 217 } else {
97f1d8cd
VV
218 dev_err(&adapter->pdev->dev,
219 "opcode %d-%d failed:status %d-%d\n",
4c60005f 220 opcode, subsystem, base_status, addl_status);
2b3f291b 221 }
5fb379ee 222 }
4c60005f 223 return compl->status;
5fb379ee
SP
224}
225
a8f447bd 226/* Link state evt is a string of bytes; no need for endian swapping */
8788fdc2 227static void be_async_link_state_process(struct be_adapter *adapter,
a2cc4e0b 228 struct be_async_event_link_state *evt)
a8f447bd 229{
b236916a 230 /* When link status changes, link speed must be re-queried from FW */
42f11cf2 231 adapter->phy.link_speed = -1;
b236916a 232
bdce2ad7
SR
233 /* On BEx the FW does not send a separate link status
234 * notification for physical and logical link.
235 * On other chips just process the logical link
236 * status notification
237 */
238 if (!BEx_chip(adapter) &&
2e177a5c
PR
239 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
240 return;
241
b236916a
AK
242 /* For the initial link status do not rely on the ASYNC event as
243 * it may not be received in some cases.
244 */
245 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
bdce2ad7
SR
246 be_link_status_update(adapter,
247 evt->port_link_status & LINK_STATUS_MASK);
a8f447bd
SP
248}
249
cc4ce020
SK
250/* Grp5 CoS Priority evt */
251static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
a2cc4e0b
SP
252 struct
253 be_async_event_grp5_cos_priority
254 *evt)
cc4ce020
SK
255{
256 if (evt->valid) {
257 adapter->vlan_prio_bmap = evt->available_priority_bmap;
60964dd7 258 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
cc4ce020
SK
259 adapter->recommended_prio =
260 evt->reco_default_priority << VLAN_PRIO_SHIFT;
261 }
262}
263
323ff71e 264/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
cc4ce020 265static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
a2cc4e0b
SP
266 struct
267 be_async_event_grp5_qos_link_speed
268 *evt)
cc4ce020 269{
323ff71e
SP
270 if (adapter->phy.link_speed >= 0 &&
271 evt->physical_port == adapter->port_num)
272 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
cc4ce020
SK
273}
274
3968fa1e
AK
275/*Grp5 PVID evt*/
276static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
a2cc4e0b
SP
277 struct
278 be_async_event_grp5_pvid_state
279 *evt)
3968fa1e 280{
bdac85b5 281 if (evt->enabled) {
939cf306 282 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
bdac85b5
RN
283 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
284 } else {
3968fa1e 285 adapter->pvid = 0;
bdac85b5 286 }
3968fa1e
AK
287}
288
cc4ce020 289static void be_async_grp5_evt_process(struct be_adapter *adapter,
a2cc4e0b 290 u32 trailer, struct be_mcc_compl *evt)
cc4ce020
SK
291{
292 u8 event_type = 0;
293
294 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
295 ASYNC_TRAILER_EVENT_TYPE_MASK;
296
297 switch (event_type) {
298 case ASYNC_EVENT_COS_PRIORITY:
299 be_async_grp5_cos_priority_process(adapter,
300 (struct be_async_event_grp5_cos_priority *)evt);
301 break;
302 case ASYNC_EVENT_QOS_SPEED:
303 be_async_grp5_qos_speed_process(adapter,
304 (struct be_async_event_grp5_qos_link_speed *)evt);
305 break;
3968fa1e
AK
306 case ASYNC_EVENT_PVID_STATE:
307 be_async_grp5_pvid_state_process(adapter,
308 (struct be_async_event_grp5_pvid_state *)evt);
309 break;
cc4ce020 310 default:
05ccaa2b
VV
311 dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",
312 event_type);
cc4ce020
SK
313 break;
314 }
315}
316
bc0c3405 317static void be_async_dbg_evt_process(struct be_adapter *adapter,
a2cc4e0b 318 u32 trailer, struct be_mcc_compl *cmp)
bc0c3405
AK
319{
320 u8 event_type = 0;
321 struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
322
323 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
324 ASYNC_TRAILER_EVENT_TYPE_MASK;
325
326 switch (event_type) {
327 case ASYNC_DEBUG_EVENT_TYPE_QNQ:
328 if (evt->valid)
329 adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
330 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
331 break;
332 default:
05ccaa2b
VV
333 dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
334 event_type);
bc0c3405
AK
335 break;
336 }
337}
338
a8f447bd
SP
339static inline bool is_link_state_evt(u32 trailer)
340{
807540ba 341 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
a8f447bd 342 ASYNC_TRAILER_EVENT_CODE_MASK) ==
807540ba 343 ASYNC_EVENT_CODE_LINK_STATE;
a8f447bd 344}
5fb379ee 345
cc4ce020
SK
346static inline bool is_grp5_evt(u32 trailer)
347{
348 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
349 ASYNC_TRAILER_EVENT_CODE_MASK) ==
350 ASYNC_EVENT_CODE_GRP_5);
351}
352
bc0c3405
AK
353static inline bool is_dbg_evt(u32 trailer)
354{
355 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
356 ASYNC_TRAILER_EVENT_CODE_MASK) ==
357 ASYNC_EVENT_CODE_QNQ);
358}
359
efd2e40a 360static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
5fb379ee 361{
8788fdc2 362 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
efd2e40a 363 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
5fb379ee
SP
364
365 if (be_mcc_compl_is_new(compl)) {
366 queue_tail_inc(mcc_cq);
367 return compl;
368 }
369 return NULL;
370}
371
7a1e9b20
SP
372void be_async_mcc_enable(struct be_adapter *adapter)
373{
374 spin_lock_bh(&adapter->mcc_cq_lock);
375
376 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
377 adapter->mcc_obj.rearm_cq = true;
378
379 spin_unlock_bh(&adapter->mcc_cq_lock);
380}
381
382void be_async_mcc_disable(struct be_adapter *adapter)
383{
a323d9bf
SP
384 spin_lock_bh(&adapter->mcc_cq_lock);
385
7a1e9b20 386 adapter->mcc_obj.rearm_cq = false;
a323d9bf
SP
387 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
388
389 spin_unlock_bh(&adapter->mcc_cq_lock);
7a1e9b20
SP
390}
391
10ef9ab4 392int be_process_mcc(struct be_adapter *adapter)
5fb379ee 393{
efd2e40a 394 struct be_mcc_compl *compl;
10ef9ab4 395 int num = 0, status = 0;
7a1e9b20 396 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
5fb379ee 397
072a9c48 398 spin_lock(&adapter->mcc_cq_lock);
8788fdc2 399 while ((compl = be_mcc_compl_get(adapter))) {
a8f447bd
SP
400 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
401 /* Interpret flags as an async trailer */
323f30b3
AK
402 if (is_link_state_evt(compl->flags))
403 be_async_link_state_process(adapter,
a8f447bd 404 (struct be_async_event_link_state *) compl);
cc4ce020
SK
405 else if (is_grp5_evt(compl->flags))
406 be_async_grp5_evt_process(adapter,
a2cc4e0b 407 compl->flags, compl);
bc0c3405
AK
408 else if (is_dbg_evt(compl->flags))
409 be_async_dbg_evt_process(adapter,
a2cc4e0b 410 compl->flags, compl);
b31c50a7 411 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
10ef9ab4 412 status = be_mcc_compl_process(adapter, compl);
7a1e9b20 413 atomic_dec(&mcc_obj->q.used);
5fb379ee
SP
414 }
415 be_mcc_compl_use(compl);
416 num++;
417 }
b31c50a7 418
10ef9ab4
SP
419 if (num)
420 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
421
072a9c48 422 spin_unlock(&adapter->mcc_cq_lock);
10ef9ab4 423 return status;
5fb379ee
SP
424}
425
6ac7b687 426/* Wait till no more pending mcc requests are present */
b31c50a7 427static int be_mcc_wait_compl(struct be_adapter *adapter)
6ac7b687 428{
b31c50a7 429#define mcc_timeout 120000 /* 12s timeout */
10ef9ab4 430 int i, status = 0;
f31e50a8
SP
431 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
432
6ac7b687 433 for (i = 0; i < mcc_timeout; i++) {
6589ade0
SP
434 if (be_error(adapter))
435 return -EIO;
436
072a9c48 437 local_bh_disable();
10ef9ab4 438 status = be_process_mcc(adapter);
072a9c48 439 local_bh_enable();
b31c50a7 440
f31e50a8 441 if (atomic_read(&mcc_obj->q.used) == 0)
6ac7b687
SP
442 break;
443 udelay(100);
444 }
b31c50a7 445 if (i == mcc_timeout) {
6589ade0
SP
446 dev_err(&adapter->pdev->dev, "FW not responding\n");
447 adapter->fw_timeout = true;
652bf646 448 return -EIO;
b31c50a7 449 }
f31e50a8 450 return status;
6ac7b687
SP
451}
452
453/* Notify MCC requests and wait for completion */
b31c50a7 454static int be_mcc_notify_wait(struct be_adapter *adapter)
6ac7b687 455{
652bf646
PR
456 int status;
457 struct be_mcc_wrb *wrb;
458 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
459 u16 index = mcc_obj->q.head;
460 struct be_cmd_resp_hdr *resp;
461
462 index_dec(&index, mcc_obj->q.len);
463 wrb = queue_index_node(&mcc_obj->q, index);
464
465 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
466
8788fdc2 467 be_mcc_notify(adapter);
652bf646
PR
468
469 status = be_mcc_wait_compl(adapter);
470 if (status == -EIO)
471 goto out;
472
4c60005f
KA
473 status = (resp->base_status |
474 ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
475 CQE_ADDL_STATUS_SHIFT));
652bf646
PR
476out:
477 return status;
6ac7b687
SP
478}
479
5f0b849e 480static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
6b7c5b94 481{
f25b03a7 482 int msecs = 0;
6b7c5b94
SP
483 u32 ready;
484
485 do {
6589ade0
SP
486 if (be_error(adapter))
487 return -EIO;
488
cf588477 489 ready = ioread32(db);
434b3648 490 if (ready == 0xffffffff)
cf588477 491 return -1;
cf588477
SP
492
493 ready &= MPU_MAILBOX_DB_RDY_MASK;
6b7c5b94
SP
494 if (ready)
495 break;
496
f25b03a7 497 if (msecs > 4000) {
6589ade0
SP
498 dev_err(&adapter->pdev->dev, "FW not responding\n");
499 adapter->fw_timeout = true;
f67ef7ba 500 be_detect_error(adapter);
6b7c5b94
SP
501 return -1;
502 }
503
1dbf53a2 504 msleep(1);
f25b03a7 505 msecs++;
6b7c5b94
SP
506 } while (true);
507
508 return 0;
509}
510
511/*
512 * Insert the mailbox address into the doorbell in two steps
5fb379ee 513 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
6b7c5b94 514 */
b31c50a7 515static int be_mbox_notify_wait(struct be_adapter *adapter)
6b7c5b94
SP
516{
517 int status;
6b7c5b94 518 u32 val = 0;
8788fdc2
SP
519 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
520 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
6b7c5b94 521 struct be_mcc_mailbox *mbox = mbox_mem->va;
efd2e40a 522 struct be_mcc_compl *compl = &mbox->compl;
6b7c5b94 523
cf588477
SP
524 /* wait for ready to be set */
525 status = be_mbox_db_ready_wait(adapter, db);
526 if (status != 0)
527 return status;
528
6b7c5b94
SP
529 val |= MPU_MAILBOX_DB_HI_MASK;
530 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
531 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
532 iowrite32(val, db);
533
534 /* wait for ready to be set */
5f0b849e 535 status = be_mbox_db_ready_wait(adapter, db);
6b7c5b94
SP
536 if (status != 0)
537 return status;
538
539 val = 0;
6b7c5b94
SP
540 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
541 val |= (u32)(mbox_mem->dma >> 4) << 2;
542 iowrite32(val, db);
543
5f0b849e 544 status = be_mbox_db_ready_wait(adapter, db);
6b7c5b94
SP
545 if (status != 0)
546 return status;
547
5fb379ee 548 /* A cq entry has been made now */
efd2e40a
SP
549 if (be_mcc_compl_is_new(compl)) {
550 status = be_mcc_compl_process(adapter, &mbox->compl);
551 be_mcc_compl_use(compl);
5fb379ee
SP
552 if (status)
553 return status;
554 } else {
5f0b849e 555 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
6b7c5b94
SP
556 return -1;
557 }
5fb379ee 558 return 0;
6b7c5b94
SP
559}
560
c5b3ad4c 561static u16 be_POST_stage_get(struct be_adapter *adapter)
6b7c5b94 562{
fe6d2a38
SP
563 u32 sem;
564
c5b3ad4c
SP
565 if (BEx_chip(adapter))
566 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
6b7c5b94 567 else
c5b3ad4c
SP
568 pci_read_config_dword(adapter->pdev,
569 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
570
571 return sem & POST_STAGE_MASK;
6b7c5b94
SP
572}
573
87f20c26 574static int lancer_wait_ready(struct be_adapter *adapter)
bf99e50d
PR
575{
576#define SLIPORT_READY_TIMEOUT 30
577 u32 sliport_status;
578 int status = 0, i;
579
580 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
581 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
582 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
583 break;
584
585 msleep(1000);
586 }
587
588 if (i == SLIPORT_READY_TIMEOUT)
589 status = -1;
590
591 return status;
592}
593
67297ad8
PR
594static bool lancer_provisioning_error(struct be_adapter *adapter)
595{
596 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
597 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
598 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
a2cc4e0b
SP
599 sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET);
600 sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET);
67297ad8
PR
601
602 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
603 sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
604 return true;
605 }
606 return false;
607}
608
bf99e50d
PR
609int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
610{
611 int status;
612 u32 sliport_status, err, reset_needed;
67297ad8
PR
613 bool resource_error;
614
615 resource_error = lancer_provisioning_error(adapter);
616 if (resource_error)
01e5b2c4 617 return -EAGAIN;
67297ad8 618
bf99e50d
PR
619 status = lancer_wait_ready(adapter);
620 if (!status) {
621 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
622 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
623 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
624 if (err && reset_needed) {
625 iowrite32(SLI_PORT_CONTROL_IP_MASK,
626 adapter->db + SLIPORT_CONTROL_OFFSET);
627
628 /* check adapter has corrected the error */
629 status = lancer_wait_ready(adapter);
630 sliport_status = ioread32(adapter->db +
631 SLIPORT_STATUS_OFFSET);
632 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
633 SLIPORT_STATUS_RN_MASK);
634 if (status || sliport_status)
635 status = -1;
636 } else if (err || reset_needed) {
637 status = -1;
638 }
639 }
67297ad8
PR
640 /* Stop error recovery if error is not recoverable.
641 * No resource error is temporary errors and will go away
642 * when PF provisions resources.
643 */
644 resource_error = lancer_provisioning_error(adapter);
01e5b2c4
SK
645 if (resource_error)
646 status = -EAGAIN;
67297ad8 647
bf99e50d
PR
648 return status;
649}
650
651int be_fw_wait_ready(struct be_adapter *adapter)
6b7c5b94 652{
43a04fdc
SP
653 u16 stage;
654 int status, timeout = 0;
6ed35eea 655 struct device *dev = &adapter->pdev->dev;
6b7c5b94 656
bf99e50d
PR
657 if (lancer_chip(adapter)) {
658 status = lancer_wait_ready(adapter);
659 return status;
660 }
661
43a04fdc 662 do {
c5b3ad4c 663 stage = be_POST_stage_get(adapter);
66d29cbc 664 if (stage == POST_STAGE_ARMFW_RDY)
43a04fdc 665 return 0;
66d29cbc 666
a2cc4e0b 667 dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
66d29cbc
GS
668 if (msleep_interruptible(2000)) {
669 dev_err(dev, "Waiting for POST aborted\n");
670 return -EINTR;
43a04fdc 671 }
66d29cbc 672 timeout += 2;
3ab81b5f 673 } while (timeout < 60);
6b7c5b94 674
6ed35eea 675 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
43a04fdc 676 return -1;
6b7c5b94
SP
677}
678
6b7c5b94
SP
679
680static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
681{
682 return &wrb->payload.sgl[0];
683}
684
a2cc4e0b 685static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
bea50988
SP
686{
687 wrb->tag0 = addr & 0xFFFFFFFF;
688 wrb->tag1 = upper_32_bits(addr);
689}
6b7c5b94
SP
690
691/* Don't touch the hdr after it's prepared */
106df1e3
SK
692/* mem will be NULL for embedded commands */
693static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
a2cc4e0b
SP
694 u8 subsystem, u8 opcode, int cmd_len,
695 struct be_mcc_wrb *wrb,
696 struct be_dma_mem *mem)
6b7c5b94 697{
106df1e3
SK
698 struct be_sge *sge;
699
6b7c5b94
SP
700 req_hdr->opcode = opcode;
701 req_hdr->subsystem = subsystem;
702 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
07793d33 703 req_hdr->version = 0;
bea50988 704 fill_wrb_tags(wrb, (ulong) req_hdr);
106df1e3
SK
705 wrb->payload_length = cmd_len;
706 if (mem) {
707 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
708 MCC_WRB_SGE_CNT_SHIFT;
709 sge = nonembedded_sgl(wrb);
710 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
711 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
712 sge->len = cpu_to_le32(mem->size);
713 } else
714 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
715 be_dws_cpu_to_le(wrb, 8);
6b7c5b94
SP
716}
717
718static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
a2cc4e0b 719 struct be_dma_mem *mem)
6b7c5b94
SP
720{
721 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
722 u64 dma = (u64)mem->dma;
723
724 for (i = 0; i < buf_pages; i++) {
725 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
726 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
727 dma += PAGE_SIZE_4K;
728 }
729}
730
b31c50a7 731static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
6b7c5b94 732{
b31c50a7
SP
733 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
734 struct be_mcc_wrb *wrb
735 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
736 memset(wrb, 0, sizeof(*wrb));
737 return wrb;
6b7c5b94
SP
738}
739
b31c50a7 740static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
5fb379ee 741{
b31c50a7
SP
742 struct be_queue_info *mccq = &adapter->mcc_obj.q;
743 struct be_mcc_wrb *wrb;
744
aa790db9
PR
745 if (!mccq->created)
746 return NULL;
747
4d277125 748 if (atomic_read(&mccq->used) >= mccq->len)
713d0394 749 return NULL;
713d0394 750
b31c50a7
SP
751 wrb = queue_head_node(mccq);
752 queue_head_inc(mccq);
753 atomic_inc(&mccq->used);
754 memset(wrb, 0, sizeof(*wrb));
5fb379ee
SP
755 return wrb;
756}
757
bea50988
SP
758static bool use_mcc(struct be_adapter *adapter)
759{
760 return adapter->mcc_obj.q.created;
761}
762
763/* Must be used only in process context */
764static int be_cmd_lock(struct be_adapter *adapter)
765{
766 if (use_mcc(adapter)) {
767 spin_lock_bh(&adapter->mcc_lock);
768 return 0;
769 } else {
770 return mutex_lock_interruptible(&adapter->mbox_lock);
771 }
772}
773
774/* Must be used only in process context */
775static void be_cmd_unlock(struct be_adapter *adapter)
776{
777 if (use_mcc(adapter))
778 spin_unlock_bh(&adapter->mcc_lock);
779 else
780 return mutex_unlock(&adapter->mbox_lock);
781}
782
783static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
784 struct be_mcc_wrb *wrb)
785{
786 struct be_mcc_wrb *dest_wrb;
787
788 if (use_mcc(adapter)) {
789 dest_wrb = wrb_from_mccq(adapter);
790 if (!dest_wrb)
791 return NULL;
792 } else {
793 dest_wrb = wrb_from_mbox(adapter);
794 }
795
796 memcpy(dest_wrb, wrb, sizeof(*wrb));
797 if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
798 fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
799
800 return dest_wrb;
801}
802
803/* Must be used only in process context */
804static int be_cmd_notify_wait(struct be_adapter *adapter,
805 struct be_mcc_wrb *wrb)
806{
807 struct be_mcc_wrb *dest_wrb;
808 int status;
809
810 status = be_cmd_lock(adapter);
811 if (status)
812 return status;
813
814 dest_wrb = be_cmd_copy(adapter, wrb);
815 if (!dest_wrb)
816 return -EBUSY;
817
818 if (use_mcc(adapter))
819 status = be_mcc_notify_wait(adapter);
820 else
821 status = be_mbox_notify_wait(adapter);
822
823 if (!status)
824 memcpy(wrb, dest_wrb, sizeof(*wrb));
825
826 be_cmd_unlock(adapter);
827 return status;
828}
829
2243e2e9
SP
830/* Tell fw we're about to start firing cmds by writing a
831 * special pattern across the wrb hdr; uses mbox
832 */
833int be_cmd_fw_init(struct be_adapter *adapter)
834{
835 u8 *wrb;
836 int status;
837
bf99e50d
PR
838 if (lancer_chip(adapter))
839 return 0;
840
2984961c
IV
841 if (mutex_lock_interruptible(&adapter->mbox_lock))
842 return -1;
2243e2e9
SP
843
844 wrb = (u8 *)wrb_from_mbox(adapter);
359a972f
SP
845 *wrb++ = 0xFF;
846 *wrb++ = 0x12;
847 *wrb++ = 0x34;
848 *wrb++ = 0xFF;
849 *wrb++ = 0xFF;
850 *wrb++ = 0x56;
851 *wrb++ = 0x78;
852 *wrb = 0xFF;
2243e2e9
SP
853
854 status = be_mbox_notify_wait(adapter);
855
2984961c 856 mutex_unlock(&adapter->mbox_lock);
2243e2e9
SP
857 return status;
858}
859
860/* Tell fw we're done with firing cmds by writing a
861 * special pattern across the wrb hdr; uses mbox
862 */
863int be_cmd_fw_clean(struct be_adapter *adapter)
864{
865 u8 *wrb;
866 int status;
867
bf99e50d
PR
868 if (lancer_chip(adapter))
869 return 0;
870
2984961c
IV
871 if (mutex_lock_interruptible(&adapter->mbox_lock))
872 return -1;
2243e2e9
SP
873
874 wrb = (u8 *)wrb_from_mbox(adapter);
875 *wrb++ = 0xFF;
876 *wrb++ = 0xAA;
877 *wrb++ = 0xBB;
878 *wrb++ = 0xFF;
879 *wrb++ = 0xFF;
880 *wrb++ = 0xCC;
881 *wrb++ = 0xDD;
882 *wrb = 0xFF;
883
884 status = be_mbox_notify_wait(adapter);
885
2984961c 886 mutex_unlock(&adapter->mbox_lock);
2243e2e9
SP
887 return status;
888}
bf99e50d 889
f2f781a7 890int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 891{
b31c50a7
SP
892 struct be_mcc_wrb *wrb;
893 struct be_cmd_req_eq_create *req;
f2f781a7
SP
894 struct be_dma_mem *q_mem = &eqo->q.dma_mem;
895 int status, ver = 0;
6b7c5b94 896
2984961c
IV
897 if (mutex_lock_interruptible(&adapter->mbox_lock))
898 return -1;
b31c50a7
SP
899
900 wrb = wrb_from_mbox(adapter);
901 req = embedded_payload(wrb);
6b7c5b94 902
106df1e3 903 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
904 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
905 NULL);
6b7c5b94 906
f2f781a7
SP
907 /* Support for EQ_CREATEv2 available only SH-R onwards */
908 if (!(BEx_chip(adapter) || lancer_chip(adapter)))
909 ver = 2;
910
911 req->hdr.version = ver;
6b7c5b94
SP
912 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
913
6b7c5b94
SP
914 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
915 /* 4byte eqe*/
916 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
917 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
f2f781a7 918 __ilog2_u32(eqo->q.len / 256));
6b7c5b94
SP
919 be_dws_cpu_to_le(req->context, sizeof(req->context));
920
921 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
922
b31c50a7 923 status = be_mbox_notify_wait(adapter);
6b7c5b94 924 if (!status) {
b31c50a7 925 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
f2f781a7
SP
926 eqo->q.id = le16_to_cpu(resp->eq_id);
927 eqo->msix_idx =
928 (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
929 eqo->q.created = true;
6b7c5b94 930 }
b31c50a7 931
2984961c 932 mutex_unlock(&adapter->mbox_lock);
6b7c5b94
SP
933 return status;
934}
935
f9449ab7 936/* Use MCC */
8788fdc2 937int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
5ee4979b 938 bool permanent, u32 if_handle, u32 pmac_id)
6b7c5b94 939{
b31c50a7
SP
940 struct be_mcc_wrb *wrb;
941 struct be_cmd_req_mac_query *req;
6b7c5b94
SP
942 int status;
943
f9449ab7 944 spin_lock_bh(&adapter->mcc_lock);
b31c50a7 945
f9449ab7
SP
946 wrb = wrb_from_mccq(adapter);
947 if (!wrb) {
948 status = -EBUSY;
949 goto err;
950 }
b31c50a7 951 req = embedded_payload(wrb);
6b7c5b94 952
106df1e3 953 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
954 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
955 NULL);
5ee4979b 956 req->type = MAC_ADDRESS_TYPE_NETWORK;
6b7c5b94
SP
957 if (permanent) {
958 req->permanent = 1;
959 } else {
b31c50a7 960 req->if_id = cpu_to_le16((u16) if_handle);
590c391d 961 req->pmac_id = cpu_to_le32(pmac_id);
6b7c5b94
SP
962 req->permanent = 0;
963 }
964
f9449ab7 965 status = be_mcc_notify_wait(adapter);
b31c50a7
SP
966 if (!status) {
967 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
6b7c5b94 968 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
b31c50a7 969 }
6b7c5b94 970
f9449ab7
SP
971err:
972 spin_unlock_bh(&adapter->mcc_lock);
6b7c5b94
SP
973 return status;
974}
975
b31c50a7 976/* Uses synchronous MCCQ */
8788fdc2 977int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
a2cc4e0b 978 u32 if_id, u32 *pmac_id, u32 domain)
6b7c5b94 979{
b31c50a7
SP
980 struct be_mcc_wrb *wrb;
981 struct be_cmd_req_pmac_add *req;
6b7c5b94
SP
982 int status;
983
b31c50a7
SP
984 spin_lock_bh(&adapter->mcc_lock);
985
986 wrb = wrb_from_mccq(adapter);
713d0394
SP
987 if (!wrb) {
988 status = -EBUSY;
989 goto err;
990 }
b31c50a7 991 req = embedded_payload(wrb);
6b7c5b94 992
106df1e3 993 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
994 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
995 NULL);
6b7c5b94 996
f8617e08 997 req->hdr.domain = domain;
6b7c5b94
SP
998 req->if_id = cpu_to_le32(if_id);
999 memcpy(req->mac_address, mac_addr, ETH_ALEN);
1000
b31c50a7 1001 status = be_mcc_notify_wait(adapter);
6b7c5b94
SP
1002 if (!status) {
1003 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
1004 *pmac_id = le32_to_cpu(resp->pmac_id);
1005 }
1006
713d0394 1007err:
b31c50a7 1008 spin_unlock_bh(&adapter->mcc_lock);
e3a7ae2c
SK
1009
1010 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
1011 status = -EPERM;
1012
6b7c5b94
SP
1013 return status;
1014}
1015
b31c50a7 1016/* Uses synchronous MCCQ */
30128031 1017int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
6b7c5b94 1018{
b31c50a7
SP
1019 struct be_mcc_wrb *wrb;
1020 struct be_cmd_req_pmac_del *req;
6b7c5b94
SP
1021 int status;
1022
30128031
SP
1023 if (pmac_id == -1)
1024 return 0;
1025
b31c50a7
SP
1026 spin_lock_bh(&adapter->mcc_lock);
1027
1028 wrb = wrb_from_mccq(adapter);
713d0394
SP
1029 if (!wrb) {
1030 status = -EBUSY;
1031 goto err;
1032 }
b31c50a7 1033 req = embedded_payload(wrb);
6b7c5b94 1034
106df1e3
SK
1035 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1036 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
6b7c5b94 1037
f8617e08 1038 req->hdr.domain = dom;
6b7c5b94
SP
1039 req->if_id = cpu_to_le32(if_id);
1040 req->pmac_id = cpu_to_le32(pmac_id);
1041
b31c50a7
SP
1042 status = be_mcc_notify_wait(adapter);
1043
713d0394 1044err:
b31c50a7 1045 spin_unlock_bh(&adapter->mcc_lock);
6b7c5b94
SP
1046 return status;
1047}
1048
b31c50a7 1049/* Uses Mbox */
10ef9ab4 1050int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
a2cc4e0b 1051 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
6b7c5b94 1052{
b31c50a7
SP
1053 struct be_mcc_wrb *wrb;
1054 struct be_cmd_req_cq_create *req;
6b7c5b94 1055 struct be_dma_mem *q_mem = &cq->dma_mem;
b31c50a7 1056 void *ctxt;
6b7c5b94
SP
1057 int status;
1058
2984961c
IV
1059 if (mutex_lock_interruptible(&adapter->mbox_lock))
1060 return -1;
b31c50a7
SP
1061
1062 wrb = wrb_from_mbox(adapter);
1063 req = embedded_payload(wrb);
1064 ctxt = &req->context;
6b7c5b94 1065
106df1e3 1066 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
1067 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
1068 NULL);
6b7c5b94
SP
1069
1070 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
bbdc42f8
AK
1071
1072 if (BEx_chip(adapter)) {
fe6d2a38 1073 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
a2cc4e0b 1074 coalesce_wm);
fe6d2a38 1075 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
a2cc4e0b 1076 ctxt, no_delay);
fe6d2a38 1077 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
a2cc4e0b 1078 __ilog2_u32(cq->len / 256));
fe6d2a38 1079 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
fe6d2a38
SP
1080 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
1081 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
bbdc42f8
AK
1082 } else {
1083 req->hdr.version = 2;
1084 req->page_size = 1; /* 1 for 4K */
09e83a9d
AK
1085
1086 /* coalesce-wm field in this cmd is not relevant to Lancer.
1087 * Lancer uses COMMON_MODIFY_CQ to set this field
1088 */
1089 if (!lancer_chip(adapter))
1090 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
1091 ctxt, coalesce_wm);
bbdc42f8 1092 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
a2cc4e0b 1093 no_delay);
bbdc42f8 1094 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
a2cc4e0b 1095 __ilog2_u32(cq->len / 256));
bbdc42f8 1096 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
a2cc4e0b
SP
1097 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
1098 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
fe6d2a38 1099 }
6b7c5b94 1100
6b7c5b94
SP
1101 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1102
1103 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1104
b31c50a7 1105 status = be_mbox_notify_wait(adapter);
6b7c5b94 1106 if (!status) {
b31c50a7 1107 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
6b7c5b94
SP
1108 cq->id = le16_to_cpu(resp->cq_id);
1109 cq->created = true;
1110 }
b31c50a7 1111
2984961c 1112 mutex_unlock(&adapter->mbox_lock);
5fb379ee
SP
1113
1114 return status;
1115}
1116
1117static u32 be_encoded_q_len(int q_len)
1118{
1119 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
1120 if (len_encoded == 16)
1121 len_encoded = 0;
1122 return len_encoded;
1123}
1124
4188e7df 1125static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
a2cc4e0b
SP
1126 struct be_queue_info *mccq,
1127 struct be_queue_info *cq)
5fb379ee 1128{
b31c50a7 1129 struct be_mcc_wrb *wrb;
34b1ef04 1130 struct be_cmd_req_mcc_ext_create *req;
5fb379ee 1131 struct be_dma_mem *q_mem = &mccq->dma_mem;
b31c50a7 1132 void *ctxt;
5fb379ee
SP
1133 int status;
1134
2984961c
IV
1135 if (mutex_lock_interruptible(&adapter->mbox_lock))
1136 return -1;
b31c50a7
SP
1137
1138 wrb = wrb_from_mbox(adapter);
1139 req = embedded_payload(wrb);
1140 ctxt = &req->context;
5fb379ee 1141
106df1e3 1142 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
1143 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
1144 NULL);
5fb379ee 1145
d4a2ac3e 1146 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
666d39c7 1147 if (BEx_chip(adapter)) {
fe6d2a38
SP
1148 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1149 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
a2cc4e0b 1150 be_encoded_q_len(mccq->len));
fe6d2a38 1151 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
666d39c7
VV
1152 } else {
1153 req->hdr.version = 1;
1154 req->cq_id = cpu_to_le16(cq->id);
1155
1156 AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
1157 be_encoded_q_len(mccq->len));
1158 AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
1159 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
1160 ctxt, cq->id);
1161 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
1162 ctxt, 1);
fe6d2a38 1163 }
5fb379ee 1164
cc4ce020 1165 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
fe6d2a38 1166 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
bc0c3405 1167 req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ);
5fb379ee
SP
1168 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1169
1170 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1171
b31c50a7 1172 status = be_mbox_notify_wait(adapter);
5fb379ee
SP
1173 if (!status) {
1174 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1175 mccq->id = le16_to_cpu(resp->id);
1176 mccq->created = true;
1177 }
2984961c 1178 mutex_unlock(&adapter->mbox_lock);
6b7c5b94
SP
1179
1180 return status;
1181}
1182
4188e7df 1183static int be_cmd_mccq_org_create(struct be_adapter *adapter,
a2cc4e0b
SP
1184 struct be_queue_info *mccq,
1185 struct be_queue_info *cq)
34b1ef04
SK
1186{
1187 struct be_mcc_wrb *wrb;
1188 struct be_cmd_req_mcc_create *req;
1189 struct be_dma_mem *q_mem = &mccq->dma_mem;
1190 void *ctxt;
1191 int status;
1192
1193 if (mutex_lock_interruptible(&adapter->mbox_lock))
1194 return -1;
1195
1196 wrb = wrb_from_mbox(adapter);
1197 req = embedded_payload(wrb);
1198 ctxt = &req->context;
1199
106df1e3 1200 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
1201 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
1202 NULL);
34b1ef04
SK
1203
1204 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1205
1206 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1207 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
a2cc4e0b 1208 be_encoded_q_len(mccq->len));
34b1ef04
SK
1209 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1210
1211 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1212
1213 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1214
1215 status = be_mbox_notify_wait(adapter);
1216 if (!status) {
1217 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1218 mccq->id = le16_to_cpu(resp->id);
1219 mccq->created = true;
1220 }
1221
1222 mutex_unlock(&adapter->mbox_lock);
1223 return status;
1224}
1225
1226int be_cmd_mccq_create(struct be_adapter *adapter,
a2cc4e0b 1227 struct be_queue_info *mccq, struct be_queue_info *cq)
34b1ef04
SK
1228{
1229 int status;
1230
1231 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
666d39c7 1232 if (status && BEx_chip(adapter)) {
34b1ef04
SK
1233 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1234 "or newer to avoid conflicting priorities between NIC "
1235 "and FCoE traffic");
1236 status = be_cmd_mccq_org_create(adapter, mccq, cq);
1237 }
1238 return status;
1239}
1240
94d73aaa 1241int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
6b7c5b94 1242{
7707133c 1243 struct be_mcc_wrb wrb = {0};
b31c50a7 1244 struct be_cmd_req_eth_tx_create *req;
94d73aaa
VV
1245 struct be_queue_info *txq = &txo->q;
1246 struct be_queue_info *cq = &txo->cq;
6b7c5b94 1247 struct be_dma_mem *q_mem = &txq->dma_mem;
94d73aaa 1248 int status, ver = 0;
6b7c5b94 1249
7707133c 1250 req = embedded_payload(&wrb);
106df1e3 1251 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
a2cc4e0b 1252 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
6b7c5b94 1253
8b7756ca
PR
1254 if (lancer_chip(adapter)) {
1255 req->hdr.version = 1;
94d73aaa
VV
1256 } else if (BEx_chip(adapter)) {
1257 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1258 req->hdr.version = 2;
1259 } else { /* For SH */
1260 req->hdr.version = 2;
8b7756ca
PR
1261 }
1262
81b02655
VV
1263 if (req->hdr.version > 0)
1264 req->if_id = cpu_to_le16(adapter->if_handle);
6b7c5b94
SP
1265 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1266 req->ulp_num = BE_ULP1_NUM;
1267 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
94d73aaa
VV
1268 req->cq_id = cpu_to_le16(cq->id);
1269 req->queue_size = be_encoded_q_len(txq->len);
6b7c5b94 1270 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
94d73aaa
VV
1271 ver = req->hdr.version;
1272
7707133c 1273 status = be_cmd_notify_wait(adapter, &wrb);
6b7c5b94 1274 if (!status) {
7707133c 1275 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
6b7c5b94 1276 txq->id = le16_to_cpu(resp->cid);
94d73aaa
VV
1277 if (ver == 2)
1278 txo->db_offset = le32_to_cpu(resp->db_offset);
1279 else
1280 txo->db_offset = DB_TXULP1_OFFSET;
6b7c5b94
SP
1281 txq->created = true;
1282 }
b31c50a7 1283
6b7c5b94
SP
1284 return status;
1285}
1286
482c9e79 1287/* Uses MCC */
8788fdc2 1288int be_cmd_rxq_create(struct be_adapter *adapter,
a2cc4e0b
SP
1289 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1290 u32 if_id, u32 rss, u8 *rss_id)
6b7c5b94 1291{
b31c50a7
SP
1292 struct be_mcc_wrb *wrb;
1293 struct be_cmd_req_eth_rx_create *req;
6b7c5b94
SP
1294 struct be_dma_mem *q_mem = &rxq->dma_mem;
1295 int status;
1296
482c9e79 1297 spin_lock_bh(&adapter->mcc_lock);
b31c50a7 1298
482c9e79
SP
1299 wrb = wrb_from_mccq(adapter);
1300 if (!wrb) {
1301 status = -EBUSY;
1302 goto err;
1303 }
b31c50a7 1304 req = embedded_payload(wrb);
6b7c5b94 1305
106df1e3 1306 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
a2cc4e0b 1307 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
6b7c5b94
SP
1308
1309 req->cq_id = cpu_to_le16(cq_id);
1310 req->frag_size = fls(frag_size) - 1;
1311 req->num_pages = 2;
1312 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1313 req->interface_id = cpu_to_le32(if_id);
10ef9ab4 1314 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
6b7c5b94
SP
1315 req->rss_queue = cpu_to_le32(rss);
1316
482c9e79 1317 status = be_mcc_notify_wait(adapter);
6b7c5b94
SP
1318 if (!status) {
1319 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1320 rxq->id = le16_to_cpu(resp->id);
1321 rxq->created = true;
3abcdeda 1322 *rss_id = resp->rss_id;
6b7c5b94 1323 }
b31c50a7 1324
482c9e79
SP
1325err:
1326 spin_unlock_bh(&adapter->mcc_lock);
6b7c5b94
SP
1327 return status;
1328}
1329
b31c50a7
SP
1330/* Generic destroyer function for all types of queues
1331 * Uses Mbox
1332 */
8788fdc2 1333int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
a2cc4e0b 1334 int queue_type)
6b7c5b94 1335{
b31c50a7
SP
1336 struct be_mcc_wrb *wrb;
1337 struct be_cmd_req_q_destroy *req;
6b7c5b94
SP
1338 u8 subsys = 0, opcode = 0;
1339 int status;
1340
2984961c
IV
1341 if (mutex_lock_interruptible(&adapter->mbox_lock))
1342 return -1;
6b7c5b94 1343
b31c50a7
SP
1344 wrb = wrb_from_mbox(adapter);
1345 req = embedded_payload(wrb);
1346
6b7c5b94
SP
1347 switch (queue_type) {
1348 case QTYPE_EQ:
1349 subsys = CMD_SUBSYSTEM_COMMON;
1350 opcode = OPCODE_COMMON_EQ_DESTROY;
1351 break;
1352 case QTYPE_CQ:
1353 subsys = CMD_SUBSYSTEM_COMMON;
1354 opcode = OPCODE_COMMON_CQ_DESTROY;
1355 break;
1356 case QTYPE_TXQ:
1357 subsys = CMD_SUBSYSTEM_ETH;
1358 opcode = OPCODE_ETH_TX_DESTROY;
1359 break;
1360 case QTYPE_RXQ:
1361 subsys = CMD_SUBSYSTEM_ETH;
1362 opcode = OPCODE_ETH_RX_DESTROY;
1363 break;
5fb379ee
SP
1364 case QTYPE_MCCQ:
1365 subsys = CMD_SUBSYSTEM_COMMON;
1366 opcode = OPCODE_COMMON_MCC_DESTROY;
1367 break;
6b7c5b94 1368 default:
5f0b849e 1369 BUG();
6b7c5b94 1370 }
d744b44e 1371
106df1e3 1372 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
a2cc4e0b 1373 NULL);
6b7c5b94
SP
1374 req->id = cpu_to_le16(q->id);
1375
b31c50a7 1376 status = be_mbox_notify_wait(adapter);
aa790db9 1377 q->created = false;
5f0b849e 1378
2984961c 1379 mutex_unlock(&adapter->mbox_lock);
482c9e79
SP
1380 return status;
1381}
6b7c5b94 1382
482c9e79
SP
1383/* Uses MCC */
1384int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1385{
1386 struct be_mcc_wrb *wrb;
1387 struct be_cmd_req_q_destroy *req;
1388 int status;
1389
1390 spin_lock_bh(&adapter->mcc_lock);
1391
1392 wrb = wrb_from_mccq(adapter);
1393 if (!wrb) {
1394 status = -EBUSY;
1395 goto err;
1396 }
1397 req = embedded_payload(wrb);
1398
106df1e3 1399 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
a2cc4e0b 1400 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
482c9e79
SP
1401 req->id = cpu_to_le16(q->id);
1402
1403 status = be_mcc_notify_wait(adapter);
aa790db9 1404 q->created = false;
482c9e79
SP
1405
1406err:
1407 spin_unlock_bh(&adapter->mcc_lock);
6b7c5b94
SP
1408 return status;
1409}
1410
b31c50a7 1411/* Create an rx filtering policy configuration on an i/f
bea50988 1412 * Will use MBOX only if MCCQ has not been created.
b31c50a7 1413 */
73d540f2 1414int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1578e777 1415 u32 *if_handle, u32 domain)
6b7c5b94 1416{
bea50988 1417 struct be_mcc_wrb wrb = {0};
b31c50a7 1418 struct be_cmd_req_if_create *req;
6b7c5b94
SP
1419 int status;
1420
bea50988 1421 req = embedded_payload(&wrb);
106df1e3 1422 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
1423 OPCODE_COMMON_NTWK_INTERFACE_CREATE,
1424 sizeof(*req), &wrb, NULL);
ba343c77 1425 req->hdr.domain = domain;
73d540f2
SP
1426 req->capability_flags = cpu_to_le32(cap_flags);
1427 req->enable_flags = cpu_to_le32(en_flags);
1578e777 1428 req->pmac_invalid = true;
6b7c5b94 1429
bea50988 1430 status = be_cmd_notify_wait(adapter, &wrb);
6b7c5b94 1431 if (!status) {
bea50988 1432 struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
6b7c5b94 1433 *if_handle = le32_to_cpu(resp->interface_id);
b5bb9776
SP
1434
1435 /* Hack to retrieve VF's pmac-id on BE3 */
1436 if (BE3_chip(adapter) && !be_physfn(adapter))
1437 adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
6b7c5b94 1438 }
6b7c5b94
SP
1439 return status;
1440}
1441
f9449ab7 1442/* Uses MCCQ */
30128031 1443int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
6b7c5b94 1444{
b31c50a7
SP
1445 struct be_mcc_wrb *wrb;
1446 struct be_cmd_req_if_destroy *req;
6b7c5b94
SP
1447 int status;
1448
30128031 1449 if (interface_id == -1)
f9449ab7 1450 return 0;
b31c50a7 1451
f9449ab7
SP
1452 spin_lock_bh(&adapter->mcc_lock);
1453
1454 wrb = wrb_from_mccq(adapter);
1455 if (!wrb) {
1456 status = -EBUSY;
1457 goto err;
1458 }
b31c50a7 1459 req = embedded_payload(wrb);
6b7c5b94 1460
106df1e3 1461 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
1462 OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
1463 sizeof(*req), wrb, NULL);
658681f7 1464 req->hdr.domain = domain;
6b7c5b94 1465 req->interface_id = cpu_to_le32(interface_id);
b31c50a7 1466
f9449ab7
SP
1467 status = be_mcc_notify_wait(adapter);
1468err:
1469 spin_unlock_bh(&adapter->mcc_lock);
6b7c5b94
SP
1470 return status;
1471}
1472
1473/* Get stats is a non embedded command: the request is not embedded inside
1474 * WRB but is a separate dma memory block
b31c50a7 1475 * Uses asynchronous MCC
6b7c5b94 1476 */
8788fdc2 1477int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
6b7c5b94 1478{
b31c50a7 1479 struct be_mcc_wrb *wrb;
89a88ab8 1480 struct be_cmd_req_hdr *hdr;
713d0394 1481 int status = 0;
6b7c5b94 1482
b31c50a7 1483 spin_lock_bh(&adapter->mcc_lock);
6b7c5b94 1484
b31c50a7 1485 wrb = wrb_from_mccq(adapter);
713d0394
SP
1486 if (!wrb) {
1487 status = -EBUSY;
1488 goto err;
1489 }
89a88ab8 1490 hdr = nonemb_cmd->va;
6b7c5b94 1491
106df1e3 1492 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
a2cc4e0b
SP
1493 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
1494 nonemb_cmd);
89a88ab8 1495
ca34fe38 1496 /* version 1 of the cmd is not supported only by BE2 */
61000861
AK
1497 if (BE2_chip(adapter))
1498 hdr->version = 0;
1499 if (BE3_chip(adapter) || lancer_chip(adapter))
89a88ab8 1500 hdr->version = 1;
61000861
AK
1501 else
1502 hdr->version = 2;
89a88ab8 1503
b31c50a7 1504 be_mcc_notify(adapter);
b2aebe6d 1505 adapter->stats_cmd_sent = true;
6b7c5b94 1506
713d0394 1507err:
b31c50a7 1508 spin_unlock_bh(&adapter->mcc_lock);
713d0394 1509 return status;
6b7c5b94
SP
1510}
1511
005d5696
SX
1512/* Lancer Stats */
1513int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
a2cc4e0b 1514 struct be_dma_mem *nonemb_cmd)
005d5696
SX
1515{
1516
1517 struct be_mcc_wrb *wrb;
1518 struct lancer_cmd_req_pport_stats *req;
005d5696
SX
1519 int status = 0;
1520
f25b119c
PR
1521 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1522 CMD_SUBSYSTEM_ETH))
1523 return -EPERM;
1524
005d5696
SX
1525 spin_lock_bh(&adapter->mcc_lock);
1526
1527 wrb = wrb_from_mccq(adapter);
1528 if (!wrb) {
1529 status = -EBUSY;
1530 goto err;
1531 }
1532 req = nonemb_cmd->va;
005d5696 1533
106df1e3 1534 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
a2cc4e0b
SP
1535 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
1536 wrb, nonemb_cmd);
005d5696 1537
d51ebd33 1538 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
005d5696
SX
1539 req->cmd_params.params.reset_stats = 0;
1540
005d5696
SX
1541 be_mcc_notify(adapter);
1542 adapter->stats_cmd_sent = true;
1543
1544err:
1545 spin_unlock_bh(&adapter->mcc_lock);
1546 return status;
1547}
1548
323ff71e
SP
1549static int be_mac_to_link_speed(int mac_speed)
1550{
1551 switch (mac_speed) {
1552 case PHY_LINK_SPEED_ZERO:
1553 return 0;
1554 case PHY_LINK_SPEED_10MBPS:
1555 return 10;
1556 case PHY_LINK_SPEED_100MBPS:
1557 return 100;
1558 case PHY_LINK_SPEED_1GBPS:
1559 return 1000;
1560 case PHY_LINK_SPEED_10GBPS:
1561 return 10000;
b971f847
VV
1562 case PHY_LINK_SPEED_20GBPS:
1563 return 20000;
1564 case PHY_LINK_SPEED_25GBPS:
1565 return 25000;
1566 case PHY_LINK_SPEED_40GBPS:
1567 return 40000;
323ff71e
SP
1568 }
1569 return 0;
1570}
1571
1572/* Uses synchronous mcc
1573 * Returns link_speed in Mbps
1574 */
1575int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1576 u8 *link_status, u32 dom)
6b7c5b94 1577{
b31c50a7
SP
1578 struct be_mcc_wrb *wrb;
1579 struct be_cmd_req_link_status *req;
6b7c5b94
SP
1580 int status;
1581
b31c50a7
SP
1582 spin_lock_bh(&adapter->mcc_lock);
1583
b236916a
AK
1584 if (link_status)
1585 *link_status = LINK_DOWN;
1586
b31c50a7 1587 wrb = wrb_from_mccq(adapter);
713d0394
SP
1588 if (!wrb) {
1589 status = -EBUSY;
1590 goto err;
1591 }
b31c50a7 1592 req = embedded_payload(wrb);
a8f447bd 1593
57cd80d4 1594 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
1595 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
1596 sizeof(*req), wrb, NULL);
57cd80d4 1597
ca34fe38
SP
1598 /* version 1 of the cmd is not supported only by BE2 */
1599 if (!BE2_chip(adapter))
daad6167
PR
1600 req->hdr.version = 1;
1601
57cd80d4 1602 req->hdr.domain = dom;
6b7c5b94 1603
b31c50a7 1604 status = be_mcc_notify_wait(adapter);
6b7c5b94
SP
1605 if (!status) {
1606 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
323ff71e
SP
1607 if (link_speed) {
1608 *link_speed = resp->link_speed ?
1609 le16_to_cpu(resp->link_speed) * 10 :
1610 be_mac_to_link_speed(resp->mac_speed);
1611
1612 if (!resp->logical_link_status)
1613 *link_speed = 0;
0388f251 1614 }
b236916a
AK
1615 if (link_status)
1616 *link_status = resp->logical_link_status;
6b7c5b94
SP
1617 }
1618
713d0394 1619err:
b31c50a7 1620 spin_unlock_bh(&adapter->mcc_lock);
6b7c5b94
SP
1621 return status;
1622}
1623
609ff3bb
AK
1624/* Uses synchronous mcc */
1625int be_cmd_get_die_temperature(struct be_adapter *adapter)
1626{
1627 struct be_mcc_wrb *wrb;
1628 struct be_cmd_req_get_cntl_addnl_attribs *req;
117affe3 1629 int status = 0;
609ff3bb
AK
1630
1631 spin_lock_bh(&adapter->mcc_lock);
1632
1633 wrb = wrb_from_mccq(adapter);
1634 if (!wrb) {
1635 status = -EBUSY;
1636 goto err;
1637 }
1638 req = embedded_payload(wrb);
1639
106df1e3 1640 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
1641 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
1642 sizeof(*req), wrb, NULL);
609ff3bb 1643
3de09455 1644 be_mcc_notify(adapter);
609ff3bb
AK
1645
1646err:
1647 spin_unlock_bh(&adapter->mcc_lock);
1648 return status;
1649}
1650
311fddc7
SK
1651/* Uses synchronous mcc */
1652int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1653{
1654 struct be_mcc_wrb *wrb;
1655 struct be_cmd_req_get_fat *req;
1656 int status;
1657
1658 spin_lock_bh(&adapter->mcc_lock);
1659
1660 wrb = wrb_from_mccq(adapter);
1661 if (!wrb) {
1662 status = -EBUSY;
1663 goto err;
1664 }
1665 req = embedded_payload(wrb);
1666
106df1e3 1667 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
1668 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
1669 NULL);
311fddc7
SK
1670 req->fat_operation = cpu_to_le32(QUERY_FAT);
1671 status = be_mcc_notify_wait(adapter);
1672 if (!status) {
1673 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1674 if (log_size && resp->log_size)
fe2a70ee
SK
1675 *log_size = le32_to_cpu(resp->log_size) -
1676 sizeof(u32);
311fddc7
SK
1677 }
1678err:
1679 spin_unlock_bh(&adapter->mcc_lock);
1680 return status;
1681}
1682
1683void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1684{
1685 struct be_dma_mem get_fat_cmd;
1686 struct be_mcc_wrb *wrb;
1687 struct be_cmd_req_get_fat *req;
fe2a70ee
SK
1688 u32 offset = 0, total_size, buf_size,
1689 log_offset = sizeof(u32), payload_len;
311fddc7
SK
1690 int status;
1691
1692 if (buf_len == 0)
1693 return;
1694
1695 total_size = buf_len;
1696
fe2a70ee
SK
1697 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1698 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
a2cc4e0b
SP
1699 get_fat_cmd.size,
1700 &get_fat_cmd.dma);
fe2a70ee
SK
1701 if (!get_fat_cmd.va) {
1702 status = -ENOMEM;
1703 dev_err(&adapter->pdev->dev,
1704 "Memory allocation failure while retrieving FAT data\n");
1705 return;
1706 }
1707
311fddc7
SK
1708 spin_lock_bh(&adapter->mcc_lock);
1709
311fddc7
SK
1710 while (total_size) {
1711 buf_size = min(total_size, (u32)60*1024);
1712 total_size -= buf_size;
1713
fe2a70ee
SK
1714 wrb = wrb_from_mccq(adapter);
1715 if (!wrb) {
1716 status = -EBUSY;
311fddc7
SK
1717 goto err;
1718 }
1719 req = get_fat_cmd.va;
311fddc7 1720
fe2a70ee 1721 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
106df1e3 1722 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
1723 OPCODE_COMMON_MANAGE_FAT, payload_len,
1724 wrb, &get_fat_cmd);
311fddc7
SK
1725
1726 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1727 req->read_log_offset = cpu_to_le32(log_offset);
1728 req->read_log_length = cpu_to_le32(buf_size);
1729 req->data_buffer_size = cpu_to_le32(buf_size);
1730
1731 status = be_mcc_notify_wait(adapter);
1732 if (!status) {
1733 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1734 memcpy(buf + offset,
a2cc4e0b
SP
1735 resp->data_buffer,
1736 le32_to_cpu(resp->read_log_length));
fe2a70ee 1737 } else {
311fddc7 1738 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
fe2a70ee
SK
1739 goto err;
1740 }
311fddc7
SK
1741 offset += buf_size;
1742 log_offset += buf_size;
1743 }
1744err:
fe2a70ee 1745 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
a2cc4e0b 1746 get_fat_cmd.va, get_fat_cmd.dma);
311fddc7
SK
1747 spin_unlock_bh(&adapter->mcc_lock);
1748}
1749
04b71175
SP
1750/* Uses synchronous mcc */
1751int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
a2cc4e0b 1752 char *fw_on_flash)
6b7c5b94 1753{
b31c50a7
SP
1754 struct be_mcc_wrb *wrb;
1755 struct be_cmd_req_get_fw_version *req;
6b7c5b94
SP
1756 int status;
1757
04b71175 1758 spin_lock_bh(&adapter->mcc_lock);
b31c50a7 1759
04b71175
SP
1760 wrb = wrb_from_mccq(adapter);
1761 if (!wrb) {
1762 status = -EBUSY;
1763 goto err;
1764 }
6b7c5b94 1765
04b71175 1766 req = embedded_payload(wrb);
6b7c5b94 1767
106df1e3 1768 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
1769 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
1770 NULL);
04b71175 1771 status = be_mcc_notify_wait(adapter);
6b7c5b94
SP
1772 if (!status) {
1773 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
04b71175
SP
1774 strcpy(fw_ver, resp->firmware_version_string);
1775 if (fw_on_flash)
1776 strcpy(fw_on_flash, resp->fw_on_flash_version_string);
6b7c5b94 1777 }
04b71175
SP
1778err:
1779 spin_unlock_bh(&adapter->mcc_lock);
6b7c5b94
SP
1780 return status;
1781}
1782
b31c50a7
SP
1783/* set the EQ delay interval of an EQ to specified value
1784 * Uses async mcc
1785 */
2632bafd
SP
1786int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1787 int num)
6b7c5b94 1788{
b31c50a7
SP
1789 struct be_mcc_wrb *wrb;
1790 struct be_cmd_req_modify_eq_delay *req;
2632bafd 1791 int status = 0, i;
6b7c5b94 1792
b31c50a7
SP
1793 spin_lock_bh(&adapter->mcc_lock);
1794
1795 wrb = wrb_from_mccq(adapter);
713d0394
SP
1796 if (!wrb) {
1797 status = -EBUSY;
1798 goto err;
1799 }
b31c50a7 1800 req = embedded_payload(wrb);
6b7c5b94 1801
106df1e3 1802 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
1803 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
1804 NULL);
6b7c5b94 1805
2632bafd
SP
1806 req->num_eq = cpu_to_le32(num);
1807 for (i = 0; i < num; i++) {
1808 req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
1809 req->set_eqd[i].phase = 0;
1810 req->set_eqd[i].delay_multiplier =
1811 cpu_to_le32(set_eqd[i].delay_multiplier);
1812 }
6b7c5b94 1813
b31c50a7 1814 be_mcc_notify(adapter);
713d0394 1815err:
b31c50a7 1816 spin_unlock_bh(&adapter->mcc_lock);
713d0394 1817 return status;
6b7c5b94
SP
1818}
1819
b31c50a7 1820/* Uses sycnhronous mcc */
8788fdc2 1821int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
4d567d97 1822 u32 num)
6b7c5b94 1823{
b31c50a7
SP
1824 struct be_mcc_wrb *wrb;
1825 struct be_cmd_req_vlan_config *req;
6b7c5b94
SP
1826 int status;
1827
b31c50a7
SP
1828 spin_lock_bh(&adapter->mcc_lock);
1829
1830 wrb = wrb_from_mccq(adapter);
713d0394
SP
1831 if (!wrb) {
1832 status = -EBUSY;
1833 goto err;
1834 }
b31c50a7 1835 req = embedded_payload(wrb);
6b7c5b94 1836
106df1e3 1837 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
1838 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1839 wrb, NULL);
6b7c5b94
SP
1840
1841 req->interface_id = if_id;
012bd387 1842 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
6b7c5b94 1843 req->num_vlan = num;
4d567d97
KA
1844 memcpy(req->normal_vlan, vtag_array,
1845 req->num_vlan * sizeof(vtag_array[0]));
6b7c5b94 1846
b31c50a7 1847 status = be_mcc_notify_wait(adapter);
713d0394 1848err:
b31c50a7 1849 spin_unlock_bh(&adapter->mcc_lock);
6b7c5b94
SP
1850 return status;
1851}
1852
5b8821b7 1853int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
6b7c5b94 1854{
6ac7b687 1855 struct be_mcc_wrb *wrb;
5b8821b7
SP
1856 struct be_dma_mem *mem = &adapter->rx_filter;
1857 struct be_cmd_req_rx_filter *req = mem->va;
e7b909a6 1858 int status;
6b7c5b94 1859
8788fdc2 1860 spin_lock_bh(&adapter->mcc_lock);
6ac7b687 1861
b31c50a7 1862 wrb = wrb_from_mccq(adapter);
713d0394
SP
1863 if (!wrb) {
1864 status = -EBUSY;
1865 goto err;
1866 }
5b8821b7 1867 memset(req, 0, sizeof(*req));
106df1e3 1868 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
1869 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1870 wrb, mem);
6b7c5b94 1871
5b8821b7
SP
1872 req->if_id = cpu_to_le32(adapter->if_handle);
1873 if (flags & IFF_PROMISC) {
1874 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
a2cc4e0b
SP
1875 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1876 BE_IF_FLAGS_MCAST_PROMISCUOUS);
5b8821b7 1877 if (value == ON)
a2cc4e0b
SP
1878 req->if_flags =
1879 cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1880 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1881 BE_IF_FLAGS_MCAST_PROMISCUOUS);
5b8821b7
SP
1882 } else if (flags & IFF_ALLMULTI) {
1883 req->if_flags_mask = req->if_flags =
8e7d3f68 1884 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
d9d604f8
AK
1885 } else if (flags & BE_FLAGS_VLAN_PROMISC) {
1886 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1887
1888 if (value == ON)
1889 req->if_flags =
1890 cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
5b8821b7 1891 } else {
22bedad3 1892 struct netdev_hw_addr *ha;
5b8821b7 1893 int i = 0;
24307eef 1894
8e7d3f68
SP
1895 req->if_flags_mask = req->if_flags =
1896 cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1610c79f
PR
1897
1898 /* Reset mcast promisc mode if already set by setting mask
1899 * and not setting flags field
1900 */
abb93951
PR
1901 req->if_flags_mask |=
1902 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
92bf14ab 1903 be_if_cap_flags(adapter));
016f97b1 1904 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
5b8821b7
SP
1905 netdev_for_each_mc_addr(ha, adapter->netdev)
1906 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
6b7c5b94
SP
1907 }
1908
012bd387 1909 if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) !=
a2cc4e0b 1910 req->if_flags_mask) {
012bd387
AK
1911 dev_warn(&adapter->pdev->dev,
1912 "Cannot set rx filter flags 0x%x\n",
1913 req->if_flags_mask);
1914 dev_warn(&adapter->pdev->dev,
1915 "Interface is capable of 0x%x flags only\n",
1916 be_if_cap_flags(adapter));
1917 }
1918 req->if_flags_mask &= cpu_to_le32(be_if_cap_flags(adapter));
1919
0d1d5875 1920 status = be_mcc_notify_wait(adapter);
012bd387 1921
713d0394 1922err:
8788fdc2 1923 spin_unlock_bh(&adapter->mcc_lock);
e7b909a6 1924 return status;
6b7c5b94
SP
1925}
1926
b31c50a7 1927/* Uses synchrounous mcc */
8788fdc2 1928int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
6b7c5b94 1929{
b31c50a7
SP
1930 struct be_mcc_wrb *wrb;
1931 struct be_cmd_req_set_flow_control *req;
6b7c5b94
SP
1932 int status;
1933
f25b119c
PR
1934 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1935 CMD_SUBSYSTEM_COMMON))
1936 return -EPERM;
1937
b31c50a7 1938 spin_lock_bh(&adapter->mcc_lock);
6b7c5b94 1939
b31c50a7 1940 wrb = wrb_from_mccq(adapter);
713d0394
SP
1941 if (!wrb) {
1942 status = -EBUSY;
1943 goto err;
1944 }
b31c50a7 1945 req = embedded_payload(wrb);
6b7c5b94 1946
106df1e3 1947 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
1948 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
1949 wrb, NULL);
6b7c5b94
SP
1950
1951 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1952 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1953
b31c50a7 1954 status = be_mcc_notify_wait(adapter);
6b7c5b94 1955
713d0394 1956err:
b31c50a7 1957 spin_unlock_bh(&adapter->mcc_lock);
6b7c5b94
SP
1958 return status;
1959}
1960
b31c50a7 1961/* Uses sycn mcc */
8788fdc2 1962int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
6b7c5b94 1963{
b31c50a7
SP
1964 struct be_mcc_wrb *wrb;
1965 struct be_cmd_req_get_flow_control *req;
6b7c5b94
SP
1966 int status;
1967
f25b119c
PR
1968 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
1969 CMD_SUBSYSTEM_COMMON))
1970 return -EPERM;
1971
b31c50a7 1972 spin_lock_bh(&adapter->mcc_lock);
6b7c5b94 1973
b31c50a7 1974 wrb = wrb_from_mccq(adapter);
713d0394
SP
1975 if (!wrb) {
1976 status = -EBUSY;
1977 goto err;
1978 }
b31c50a7 1979 req = embedded_payload(wrb);
6b7c5b94 1980
106df1e3 1981 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
1982 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
1983 wrb, NULL);
6b7c5b94 1984
b31c50a7 1985 status = be_mcc_notify_wait(adapter);
6b7c5b94
SP
1986 if (!status) {
1987 struct be_cmd_resp_get_flow_control *resp =
1988 embedded_payload(wrb);
1989 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1990 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1991 }
1992
713d0394 1993err:
b31c50a7 1994 spin_unlock_bh(&adapter->mcc_lock);
6b7c5b94
SP
1995 return status;
1996}
1997
b31c50a7 1998/* Uses mbox */
3abcdeda 1999int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
0ad3157e 2000 u32 *mode, u32 *caps, u16 *asic_rev)
6b7c5b94 2001{
b31c50a7
SP
2002 struct be_mcc_wrb *wrb;
2003 struct be_cmd_req_query_fw_cfg *req;
6b7c5b94
SP
2004 int status;
2005
2984961c
IV
2006 if (mutex_lock_interruptible(&adapter->mbox_lock))
2007 return -1;
6b7c5b94 2008
b31c50a7
SP
2009 wrb = wrb_from_mbox(adapter);
2010 req = embedded_payload(wrb);
6b7c5b94 2011
106df1e3 2012 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
2013 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2014 sizeof(*req), wrb, NULL);
6b7c5b94 2015
b31c50a7 2016 status = be_mbox_notify_wait(adapter);
6b7c5b94
SP
2017 if (!status) {
2018 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
2019 *port_num = le32_to_cpu(resp->phys_port);
3486be29 2020 *mode = le32_to_cpu(resp->function_mode);
3abcdeda 2021 *caps = le32_to_cpu(resp->function_caps);
0ad3157e 2022 *asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
6b7c5b94
SP
2023 }
2024
2984961c 2025 mutex_unlock(&adapter->mbox_lock);
6b7c5b94
SP
2026 return status;
2027}
14074eab 2028
b31c50a7 2029/* Uses mbox */
14074eab 2030int be_cmd_reset_function(struct be_adapter *adapter)
2031{
b31c50a7
SP
2032 struct be_mcc_wrb *wrb;
2033 struct be_cmd_req_hdr *req;
14074eab 2034 int status;
2035
bf99e50d
PR
2036 if (lancer_chip(adapter)) {
2037 status = lancer_wait_ready(adapter);
2038 if (!status) {
2039 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2040 adapter->db + SLIPORT_CONTROL_OFFSET);
2041 status = lancer_test_and_set_rdy_state(adapter);
2042 }
2043 if (status) {
2044 dev_err(&adapter->pdev->dev,
2045 "Adapter in non recoverable error\n");
2046 }
2047 return status;
2048 }
2049
2984961c
IV
2050 if (mutex_lock_interruptible(&adapter->mbox_lock))
2051 return -1;
14074eab 2052
b31c50a7
SP
2053 wrb = wrb_from_mbox(adapter);
2054 req = embedded_payload(wrb);
14074eab 2055
106df1e3 2056 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
2057 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
2058 NULL);
14074eab 2059
b31c50a7 2060 status = be_mbox_notify_wait(adapter);
14074eab 2061
2984961c 2062 mutex_unlock(&adapter->mbox_lock);
14074eab 2063 return status;
2064}
84517482 2065
594ad54a 2066int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
a2cc4e0b 2067 u32 rss_hash_opts, u16 table_size, u8 *rss_hkey)
3abcdeda
SP
2068{
2069 struct be_mcc_wrb *wrb;
2070 struct be_cmd_req_rss_config *req;
3abcdeda
SP
2071 int status;
2072
da1388d6
VV
2073 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2074 return 0;
2075
b51aa367 2076 spin_lock_bh(&adapter->mcc_lock);
3abcdeda 2077
b51aa367
KA
2078 wrb = wrb_from_mccq(adapter);
2079 if (!wrb) {
2080 status = -EBUSY;
2081 goto err;
2082 }
3abcdeda
SP
2083 req = embedded_payload(wrb);
2084
106df1e3 2085 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
a2cc4e0b 2086 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
3abcdeda
SP
2087
2088 req->if_id = cpu_to_le32(adapter->if_handle);
594ad54a
SR
2089 req->enable_rss = cpu_to_le16(rss_hash_opts);
2090 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
d3bd3a5e 2091
b51aa367 2092 if (!BEx_chip(adapter))
d3bd3a5e 2093 req->hdr.version = 1;
d3bd3a5e 2094
3abcdeda 2095 memcpy(req->cpu_table, rsstable, table_size);
e2557877 2096 memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
3abcdeda
SP
2097 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
2098
b51aa367
KA
2099 status = be_mcc_notify_wait(adapter);
2100err:
2101 spin_unlock_bh(&adapter->mcc_lock);
3abcdeda
SP
2102 return status;
2103}
2104
fad9ab2c
SB
2105/* Uses sync mcc */
2106int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
a2cc4e0b 2107 u8 bcn, u8 sts, u8 state)
fad9ab2c
SB
2108{
2109 struct be_mcc_wrb *wrb;
2110 struct be_cmd_req_enable_disable_beacon *req;
2111 int status;
2112
2113 spin_lock_bh(&adapter->mcc_lock);
2114
2115 wrb = wrb_from_mccq(adapter);
713d0394
SP
2116 if (!wrb) {
2117 status = -EBUSY;
2118 goto err;
2119 }
fad9ab2c
SB
2120 req = embedded_payload(wrb);
2121
106df1e3 2122 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
2123 OPCODE_COMMON_ENABLE_DISABLE_BEACON,
2124 sizeof(*req), wrb, NULL);
fad9ab2c
SB
2125
2126 req->port_num = port_num;
2127 req->beacon_state = state;
2128 req->beacon_duration = bcn;
2129 req->status_duration = sts;
2130
2131 status = be_mcc_notify_wait(adapter);
2132
713d0394 2133err:
fad9ab2c
SB
2134 spin_unlock_bh(&adapter->mcc_lock);
2135 return status;
2136}
2137
2138/* Uses sync mcc */
2139int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2140{
2141 struct be_mcc_wrb *wrb;
2142 struct be_cmd_req_get_beacon_state *req;
2143 int status;
2144
2145 spin_lock_bh(&adapter->mcc_lock);
2146
2147 wrb = wrb_from_mccq(adapter);
713d0394
SP
2148 if (!wrb) {
2149 status = -EBUSY;
2150 goto err;
2151 }
fad9ab2c
SB
2152 req = embedded_payload(wrb);
2153
106df1e3 2154 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
2155 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
2156 wrb, NULL);
fad9ab2c
SB
2157
2158 req->port_num = port_num;
2159
2160 status = be_mcc_notify_wait(adapter);
2161 if (!status) {
2162 struct be_cmd_resp_get_beacon_state *resp =
2163 embedded_payload(wrb);
2164 *state = resp->beacon_state;
2165 }
2166
713d0394 2167err:
fad9ab2c
SB
2168 spin_unlock_bh(&adapter->mcc_lock);
2169 return status;
2170}
2171
485bf569 2172int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
f67ef7ba
PR
2173 u32 data_size, u32 data_offset,
2174 const char *obj_name, u32 *data_written,
2175 u8 *change_status, u8 *addn_status)
485bf569
SN
2176{
2177 struct be_mcc_wrb *wrb;
2178 struct lancer_cmd_req_write_object *req;
2179 struct lancer_cmd_resp_write_object *resp;
2180 void *ctxt = NULL;
2181 int status;
2182
2183 spin_lock_bh(&adapter->mcc_lock);
2184 adapter->flash_status = 0;
2185
2186 wrb = wrb_from_mccq(adapter);
2187 if (!wrb) {
2188 status = -EBUSY;
2189 goto err_unlock;
2190 }
2191
2192 req = embedded_payload(wrb);
2193
106df1e3 2194 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
2195 OPCODE_COMMON_WRITE_OBJECT,
2196 sizeof(struct lancer_cmd_req_write_object), wrb,
2197 NULL);
485bf569
SN
2198
2199 ctxt = &req->context;
2200 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
a2cc4e0b 2201 write_length, ctxt, data_size);
485bf569
SN
2202
2203 if (data_size == 0)
2204 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
a2cc4e0b 2205 eof, ctxt, 1);
485bf569
SN
2206 else
2207 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
a2cc4e0b 2208 eof, ctxt, 0);
485bf569
SN
2209
2210 be_dws_cpu_to_le(ctxt, sizeof(req->context));
2211 req->write_offset = cpu_to_le32(data_offset);
2212 strcpy(req->object_name, obj_name);
2213 req->descriptor_count = cpu_to_le32(1);
2214 req->buf_len = cpu_to_le32(data_size);
2215 req->addr_low = cpu_to_le32((cmd->dma +
a2cc4e0b
SP
2216 sizeof(struct lancer_cmd_req_write_object))
2217 & 0xFFFFFFFF);
485bf569
SN
2218 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2219 sizeof(struct lancer_cmd_req_write_object)));
2220
2221 be_mcc_notify(adapter);
2222 spin_unlock_bh(&adapter->mcc_lock);
2223
5eeff635 2224 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
701962d0 2225 msecs_to_jiffies(60000)))
485bf569
SN
2226 status = -1;
2227 else
2228 status = adapter->flash_status;
2229
2230 resp = embedded_payload(wrb);
f67ef7ba 2231 if (!status) {
485bf569 2232 *data_written = le32_to_cpu(resp->actual_write_len);
f67ef7ba
PR
2233 *change_status = resp->change_status;
2234 } else {
485bf569 2235 *addn_status = resp->additional_status;
f67ef7ba 2236 }
485bf569
SN
2237
2238 return status;
2239
2240err_unlock:
2241 spin_unlock_bh(&adapter->mcc_lock);
2242 return status;
2243}
2244
de49bd5a 2245int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
a2cc4e0b
SP
2246 u32 data_size, u32 data_offset, const char *obj_name,
2247 u32 *data_read, u32 *eof, u8 *addn_status)
de49bd5a
PR
2248{
2249 struct be_mcc_wrb *wrb;
2250 struct lancer_cmd_req_read_object *req;
2251 struct lancer_cmd_resp_read_object *resp;
2252 int status;
2253
2254 spin_lock_bh(&adapter->mcc_lock);
2255
2256 wrb = wrb_from_mccq(adapter);
2257 if (!wrb) {
2258 status = -EBUSY;
2259 goto err_unlock;
2260 }
2261
2262 req = embedded_payload(wrb);
2263
2264 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
2265 OPCODE_COMMON_READ_OBJECT,
2266 sizeof(struct lancer_cmd_req_read_object), wrb,
2267 NULL);
de49bd5a
PR
2268
2269 req->desired_read_len = cpu_to_le32(data_size);
2270 req->read_offset = cpu_to_le32(data_offset);
2271 strcpy(req->object_name, obj_name);
2272 req->descriptor_count = cpu_to_le32(1);
2273 req->buf_len = cpu_to_le32(data_size);
2274 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2275 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2276
2277 status = be_mcc_notify_wait(adapter);
2278
2279 resp = embedded_payload(wrb);
2280 if (!status) {
2281 *data_read = le32_to_cpu(resp->actual_read_len);
2282 *eof = le32_to_cpu(resp->eof);
2283 } else {
2284 *addn_status = resp->additional_status;
2285 }
2286
2287err_unlock:
2288 spin_unlock_bh(&adapter->mcc_lock);
2289 return status;
2290}
2291
84517482 2292int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
a2cc4e0b 2293 u32 flash_type, u32 flash_opcode, u32 buf_size)
84517482 2294{
b31c50a7 2295 struct be_mcc_wrb *wrb;
3f0d4560 2296 struct be_cmd_write_flashrom *req;
84517482
AK
2297 int status;
2298
b31c50a7 2299 spin_lock_bh(&adapter->mcc_lock);
dd131e76 2300 adapter->flash_status = 0;
b31c50a7
SP
2301
2302 wrb = wrb_from_mccq(adapter);
713d0394
SP
2303 if (!wrb) {
2304 status = -EBUSY;
2892d9c2 2305 goto err_unlock;
713d0394
SP
2306 }
2307 req = cmd->va;
84517482 2308
106df1e3 2309 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
2310 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
2311 cmd);
84517482
AK
2312
2313 req->params.op_type = cpu_to_le32(flash_type);
2314 req->params.op_code = cpu_to_le32(flash_opcode);
2315 req->params.data_buf_size = cpu_to_le32(buf_size);
2316
dd131e76
SB
2317 be_mcc_notify(adapter);
2318 spin_unlock_bh(&adapter->mcc_lock);
2319
5eeff635
SR
2320 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2321 msecs_to_jiffies(40000)))
dd131e76
SB
2322 status = -1;
2323 else
2324 status = adapter->flash_status;
84517482 2325
2892d9c2
DC
2326 return status;
2327
2328err_unlock:
2329 spin_unlock_bh(&adapter->mcc_lock);
84517482
AK
2330 return status;
2331}
fa9a6fed 2332
3f0d4560 2333int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
96c9b2e4 2334 u16 optype, int offset)
fa9a6fed
SB
2335{
2336 struct be_mcc_wrb *wrb;
be716446 2337 struct be_cmd_read_flash_crc *req;
fa9a6fed
SB
2338 int status;
2339
2340 spin_lock_bh(&adapter->mcc_lock);
2341
2342 wrb = wrb_from_mccq(adapter);
713d0394
SP
2343 if (!wrb) {
2344 status = -EBUSY;
2345 goto err;
2346 }
fa9a6fed
SB
2347 req = embedded_payload(wrb);
2348
106df1e3 2349 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
be716446
PR
2350 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2351 wrb, NULL);
fa9a6fed 2352
96c9b2e4 2353 req->params.op_type = cpu_to_le32(optype);
fa9a6fed 2354 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
8b93b710
AK
2355 req->params.offset = cpu_to_le32(offset);
2356 req->params.data_buf_size = cpu_to_le32(0x4);
fa9a6fed
SB
2357
2358 status = be_mcc_notify_wait(adapter);
2359 if (!status)
be716446 2360 memcpy(flashed_crc, req->crc, 4);
fa9a6fed 2361
713d0394 2362err:
fa9a6fed
SB
2363 spin_unlock_bh(&adapter->mcc_lock);
2364 return status;
2365}
71d8d1b5 2366
c196b02c 2367int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
a2cc4e0b 2368 struct be_dma_mem *nonemb_cmd)
71d8d1b5
AK
2369{
2370 struct be_mcc_wrb *wrb;
2371 struct be_cmd_req_acpi_wol_magic_config *req;
71d8d1b5
AK
2372 int status;
2373
2374 spin_lock_bh(&adapter->mcc_lock);
2375
2376 wrb = wrb_from_mccq(adapter);
2377 if (!wrb) {
2378 status = -EBUSY;
2379 goto err;
2380 }
2381 req = nonemb_cmd->va;
71d8d1b5 2382
106df1e3 2383 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
a2cc4e0b
SP
2384 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
2385 wrb, nonemb_cmd);
71d8d1b5
AK
2386 memcpy(req->magic_mac, mac, ETH_ALEN);
2387
71d8d1b5
AK
2388 status = be_mcc_notify_wait(adapter);
2389
2390err:
2391 spin_unlock_bh(&adapter->mcc_lock);
2392 return status;
2393}
ff33a6e2 2394
fced9999
SB
2395int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2396 u8 loopback_type, u8 enable)
2397{
2398 struct be_mcc_wrb *wrb;
2399 struct be_cmd_req_set_lmode *req;
2400 int status;
2401
2402 spin_lock_bh(&adapter->mcc_lock);
2403
2404 wrb = wrb_from_mccq(adapter);
2405 if (!wrb) {
2406 status = -EBUSY;
2407 goto err;
2408 }
2409
2410 req = embedded_payload(wrb);
2411
106df1e3 2412 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
a2cc4e0b
SP
2413 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
2414 wrb, NULL);
fced9999
SB
2415
2416 req->src_port = port_num;
2417 req->dest_port = port_num;
2418 req->loopback_type = loopback_type;
2419 req->loopback_state = enable;
2420
2421 status = be_mcc_notify_wait(adapter);
2422err:
2423 spin_unlock_bh(&adapter->mcc_lock);
2424 return status;
2425}
2426
ff33a6e2 2427int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
a2cc4e0b
SP
2428 u32 loopback_type, u32 pkt_size, u32 num_pkts,
2429 u64 pattern)
ff33a6e2
S
2430{
2431 struct be_mcc_wrb *wrb;
2432 struct be_cmd_req_loopback_test *req;
5eeff635 2433 struct be_cmd_resp_loopback_test *resp;
ff33a6e2
S
2434 int status;
2435
2436 spin_lock_bh(&adapter->mcc_lock);
2437
2438 wrb = wrb_from_mccq(adapter);
2439 if (!wrb) {
2440 status = -EBUSY;
2441 goto err;
2442 }
2443
2444 req = embedded_payload(wrb);
2445
106df1e3 2446 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
a2cc4e0b
SP
2447 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
2448 NULL);
ff33a6e2 2449
5eeff635 2450 req->hdr.timeout = cpu_to_le32(15);
ff33a6e2
S
2451 req->pattern = cpu_to_le64(pattern);
2452 req->src_port = cpu_to_le32(port_num);
2453 req->dest_port = cpu_to_le32(port_num);
2454 req->pkt_size = cpu_to_le32(pkt_size);
2455 req->num_pkts = cpu_to_le32(num_pkts);
2456 req->loopback_type = cpu_to_le32(loopback_type);
2457
5eeff635
SR
2458 be_mcc_notify(adapter);
2459
2460 spin_unlock_bh(&adapter->mcc_lock);
ff33a6e2 2461
5eeff635
SR
2462 wait_for_completion(&adapter->et_cmd_compl);
2463 resp = embedded_payload(wrb);
2464 status = le32_to_cpu(resp->status);
2465
2466 return status;
ff33a6e2
S
2467err:
2468 spin_unlock_bh(&adapter->mcc_lock);
2469 return status;
2470}
2471
2472int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
a2cc4e0b 2473 u32 byte_cnt, struct be_dma_mem *cmd)
ff33a6e2
S
2474{
2475 struct be_mcc_wrb *wrb;
2476 struct be_cmd_req_ddrdma_test *req;
ff33a6e2
S
2477 int status;
2478 int i, j = 0;
2479
2480 spin_lock_bh(&adapter->mcc_lock);
2481
2482 wrb = wrb_from_mccq(adapter);
2483 if (!wrb) {
2484 status = -EBUSY;
2485 goto err;
2486 }
2487 req = cmd->va;
106df1e3 2488 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
a2cc4e0b
SP
2489 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
2490 cmd);
ff33a6e2
S
2491
2492 req->pattern = cpu_to_le64(pattern);
2493 req->byte_count = cpu_to_le32(byte_cnt);
2494 for (i = 0; i < byte_cnt; i++) {
2495 req->snd_buff[i] = (u8)(pattern >> (j*8));
2496 j++;
2497 if (j > 7)
2498 j = 0;
2499 }
2500
2501 status = be_mcc_notify_wait(adapter);
2502
2503 if (!status) {
2504 struct be_cmd_resp_ddrdma_test *resp;
2505 resp = cmd->va;
2506 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2507 resp->snd_err) {
2508 status = -1;
2509 }
2510 }
2511
2512err:
2513 spin_unlock_bh(&adapter->mcc_lock);
2514 return status;
2515}
368c0ca2 2516
c196b02c 2517int be_cmd_get_seeprom_data(struct be_adapter *adapter,
a2cc4e0b 2518 struct be_dma_mem *nonemb_cmd)
368c0ca2
SB
2519{
2520 struct be_mcc_wrb *wrb;
2521 struct be_cmd_req_seeprom_read *req;
368c0ca2
SB
2522 int status;
2523
2524 spin_lock_bh(&adapter->mcc_lock);
2525
2526 wrb = wrb_from_mccq(adapter);
e45ff01d
AK
2527 if (!wrb) {
2528 status = -EBUSY;
2529 goto err;
2530 }
368c0ca2 2531 req = nonemb_cmd->va;
368c0ca2 2532
106df1e3 2533 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
2534 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2535 nonemb_cmd);
368c0ca2
SB
2536
2537 status = be_mcc_notify_wait(adapter);
2538
e45ff01d 2539err:
368c0ca2
SB
2540 spin_unlock_bh(&adapter->mcc_lock);
2541 return status;
2542}
ee3cb629 2543
42f11cf2 2544int be_cmd_get_phy_info(struct be_adapter *adapter)
ee3cb629
AK
2545{
2546 struct be_mcc_wrb *wrb;
2547 struct be_cmd_req_get_phy_info *req;
306f1348 2548 struct be_dma_mem cmd;
ee3cb629
AK
2549 int status;
2550
f25b119c
PR
2551 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2552 CMD_SUBSYSTEM_COMMON))
2553 return -EPERM;
2554
ee3cb629
AK
2555 spin_lock_bh(&adapter->mcc_lock);
2556
2557 wrb = wrb_from_mccq(adapter);
2558 if (!wrb) {
2559 status = -EBUSY;
2560 goto err;
2561 }
306f1348 2562 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
a2cc4e0b 2563 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
306f1348
SP
2564 if (!cmd.va) {
2565 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2566 status = -ENOMEM;
2567 goto err;
2568 }
ee3cb629 2569
306f1348 2570 req = cmd.va;
ee3cb629 2571
106df1e3 2572 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
2573 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2574 wrb, &cmd);
ee3cb629
AK
2575
2576 status = be_mcc_notify_wait(adapter);
306f1348
SP
2577 if (!status) {
2578 struct be_phy_info *resp_phy_info =
2579 cmd.va + sizeof(struct be_cmd_req_hdr);
42f11cf2
AK
2580 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2581 adapter->phy.interface_type =
306f1348 2582 le16_to_cpu(resp_phy_info->interface_type);
42f11cf2
AK
2583 adapter->phy.auto_speeds_supported =
2584 le16_to_cpu(resp_phy_info->auto_speeds_supported);
2585 adapter->phy.fixed_speeds_supported =
2586 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2587 adapter->phy.misc_params =
2588 le32_to_cpu(resp_phy_info->misc_params);
68cb7e47
VV
2589
2590 if (BE2_chip(adapter)) {
2591 adapter->phy.fixed_speeds_supported =
2592 BE_SUPPORTED_SPEED_10GBPS |
2593 BE_SUPPORTED_SPEED_1GBPS;
2594 }
306f1348 2595 }
a2cc4e0b 2596 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
ee3cb629
AK
2597err:
2598 spin_unlock_bh(&adapter->mcc_lock);
2599 return status;
2600}
e1d18735
AK
2601
2602int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2603{
2604 struct be_mcc_wrb *wrb;
2605 struct be_cmd_req_set_qos *req;
2606 int status;
2607
2608 spin_lock_bh(&adapter->mcc_lock);
2609
2610 wrb = wrb_from_mccq(adapter);
2611 if (!wrb) {
2612 status = -EBUSY;
2613 goto err;
2614 }
2615
2616 req = embedded_payload(wrb);
2617
106df1e3 2618 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b 2619 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
e1d18735
AK
2620
2621 req->hdr.domain = domain;
6bff57a7
AK
2622 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2623 req->max_bps_nic = cpu_to_le32(bps);
e1d18735
AK
2624
2625 status = be_mcc_notify_wait(adapter);
2626
2627err:
2628 spin_unlock_bh(&adapter->mcc_lock);
2629 return status;
2630}
9e1453c5
AK
2631
2632int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2633{
2634 struct be_mcc_wrb *wrb;
2635 struct be_cmd_req_cntl_attribs *req;
2636 struct be_cmd_resp_cntl_attribs *resp;
9e1453c5
AK
2637 int status;
2638 int payload_len = max(sizeof(*req), sizeof(*resp));
2639 struct mgmt_controller_attrib *attribs;
2640 struct be_dma_mem attribs_cmd;
2641
d98ef50f
SR
2642 if (mutex_lock_interruptible(&adapter->mbox_lock))
2643 return -1;
2644
9e1453c5
AK
2645 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2646 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2647 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
a2cc4e0b 2648 &attribs_cmd.dma);
9e1453c5 2649 if (!attribs_cmd.va) {
a2cc4e0b 2650 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
d98ef50f
SR
2651 status = -ENOMEM;
2652 goto err;
9e1453c5
AK
2653 }
2654
9e1453c5
AK
2655 wrb = wrb_from_mbox(adapter);
2656 if (!wrb) {
2657 status = -EBUSY;
2658 goto err;
2659 }
2660 req = attribs_cmd.va;
9e1453c5 2661
106df1e3 2662 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
2663 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
2664 wrb, &attribs_cmd);
9e1453c5
AK
2665
2666 status = be_mbox_notify_wait(adapter);
2667 if (!status) {
43d620c8 2668 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
9e1453c5
AK
2669 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2670 }
2671
2672err:
2673 mutex_unlock(&adapter->mbox_lock);
d98ef50f
SR
2674 if (attribs_cmd.va)
2675 pci_free_consistent(adapter->pdev, attribs_cmd.size,
2676 attribs_cmd.va, attribs_cmd.dma);
9e1453c5
AK
2677 return status;
2678}
2e588f84
SP
2679
2680/* Uses mbox */
2dc1deb6 2681int be_cmd_req_native_mode(struct be_adapter *adapter)
2e588f84
SP
2682{
2683 struct be_mcc_wrb *wrb;
2684 struct be_cmd_req_set_func_cap *req;
2685 int status;
2686
2687 if (mutex_lock_interruptible(&adapter->mbox_lock))
2688 return -1;
2689
2690 wrb = wrb_from_mbox(adapter);
2691 if (!wrb) {
2692 status = -EBUSY;
2693 goto err;
2694 }
2695
2696 req = embedded_payload(wrb);
2697
106df1e3 2698 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
2699 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
2700 sizeof(*req), wrb, NULL);
2e588f84
SP
2701
2702 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2703 CAPABILITY_BE3_NATIVE_ERX_API);
2704 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2705
2706 status = be_mbox_notify_wait(adapter);
2707 if (!status) {
2708 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2709 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2710 CAPABILITY_BE3_NATIVE_ERX_API;
d379142b
SP
2711 if (!adapter->be3_native)
2712 dev_warn(&adapter->pdev->dev,
2713 "adapter not in advanced mode\n");
2e588f84
SP
2714 }
2715err:
2716 mutex_unlock(&adapter->mbox_lock);
2717 return status;
2718}
590c391d 2719
f25b119c
PR
2720/* Get privilege(s) for a function */
2721int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2722 u32 domain)
2723{
2724 struct be_mcc_wrb *wrb;
2725 struct be_cmd_req_get_fn_privileges *req;
2726 int status;
2727
2728 spin_lock_bh(&adapter->mcc_lock);
2729
2730 wrb = wrb_from_mccq(adapter);
2731 if (!wrb) {
2732 status = -EBUSY;
2733 goto err;
2734 }
2735
2736 req = embedded_payload(wrb);
2737
2738 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2739 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2740 wrb, NULL);
2741
2742 req->hdr.domain = domain;
2743
2744 status = be_mcc_notify_wait(adapter);
2745 if (!status) {
2746 struct be_cmd_resp_get_fn_privileges *resp =
2747 embedded_payload(wrb);
2748 *privilege = le32_to_cpu(resp->privilege_mask);
02308d74
SR
2749
2750 /* In UMC mode FW does not return right privileges.
2751 * Override with correct privilege equivalent to PF.
2752 */
2753 if (BEx_chip(adapter) && be_is_mc(adapter) &&
2754 be_physfn(adapter))
2755 *privilege = MAX_PRIVILEGES;
f25b119c
PR
2756 }
2757
2758err:
2759 spin_unlock_bh(&adapter->mcc_lock);
2760 return status;
2761}
2762
04a06028
SP
2763/* Set privilege(s) for a function */
2764int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
2765 u32 domain)
2766{
2767 struct be_mcc_wrb *wrb;
2768 struct be_cmd_req_set_fn_privileges *req;
2769 int status;
2770
2771 spin_lock_bh(&adapter->mcc_lock);
2772
2773 wrb = wrb_from_mccq(adapter);
2774 if (!wrb) {
2775 status = -EBUSY;
2776 goto err;
2777 }
2778
2779 req = embedded_payload(wrb);
2780 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2781 OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
2782 wrb, NULL);
2783 req->hdr.domain = domain;
2784 if (lancer_chip(adapter))
2785 req->privileges_lancer = cpu_to_le32(privileges);
2786 else
2787 req->privileges = cpu_to_le32(privileges);
2788
2789 status = be_mcc_notify_wait(adapter);
2790err:
2791 spin_unlock_bh(&adapter->mcc_lock);
2792 return status;
2793}
2794
5a712c13
SP
2795/* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
2796 * pmac_id_valid: false => pmac_id or MAC address is requested.
2797 * If pmac_id is returned, pmac_id_valid is returned as true
2798 */
1578e777 2799int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
b188f090
SR
2800 bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
2801 u8 domain)
590c391d
PR
2802{
2803 struct be_mcc_wrb *wrb;
2804 struct be_cmd_req_get_mac_list *req;
2805 int status;
2806 int mac_count;
e5e1ee89
PR
2807 struct be_dma_mem get_mac_list_cmd;
2808 int i;
2809
2810 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2811 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2812 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
a2cc4e0b
SP
2813 get_mac_list_cmd.size,
2814 &get_mac_list_cmd.dma);
e5e1ee89
PR
2815
2816 if (!get_mac_list_cmd.va) {
2817 dev_err(&adapter->pdev->dev,
a2cc4e0b 2818 "Memory allocation failure during GET_MAC_LIST\n");
e5e1ee89
PR
2819 return -ENOMEM;
2820 }
590c391d
PR
2821
2822 spin_lock_bh(&adapter->mcc_lock);
2823
2824 wrb = wrb_from_mccq(adapter);
2825 if (!wrb) {
2826 status = -EBUSY;
e5e1ee89 2827 goto out;
590c391d 2828 }
e5e1ee89
PR
2829
2830 req = get_mac_list_cmd.va;
590c391d
PR
2831
2832 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
bf591f51
SP
2833 OPCODE_COMMON_GET_MAC_LIST,
2834 get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
590c391d 2835 req->hdr.domain = domain;
e5e1ee89 2836 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
5a712c13
SP
2837 if (*pmac_id_valid) {
2838 req->mac_id = cpu_to_le32(*pmac_id);
b188f090 2839 req->iface_id = cpu_to_le16(if_handle);
5a712c13
SP
2840 req->perm_override = 0;
2841 } else {
2842 req->perm_override = 1;
2843 }
590c391d
PR
2844
2845 status = be_mcc_notify_wait(adapter);
2846 if (!status) {
2847 struct be_cmd_resp_get_mac_list *resp =
e5e1ee89 2848 get_mac_list_cmd.va;
5a712c13
SP
2849
2850 if (*pmac_id_valid) {
2851 memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
2852 ETH_ALEN);
2853 goto out;
2854 }
2855
e5e1ee89
PR
2856 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2857 /* Mac list returned could contain one or more active mac_ids
1578e777
PR
2858 * or one or more true or pseudo permanant mac addresses.
2859 * If an active mac_id is present, return first active mac_id
2860 * found.
e5e1ee89 2861 */
590c391d 2862 for (i = 0; i < mac_count; i++) {
e5e1ee89
PR
2863 struct get_list_macaddr *mac_entry;
2864 u16 mac_addr_size;
2865 u32 mac_id;
2866
2867 mac_entry = &resp->macaddr_list[i];
2868 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
2869 /* mac_id is a 32 bit value and mac_addr size
2870 * is 6 bytes
2871 */
2872 if (mac_addr_size == sizeof(u32)) {
5a712c13 2873 *pmac_id_valid = true;
e5e1ee89
PR
2874 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2875 *pmac_id = le32_to_cpu(mac_id);
2876 goto out;
590c391d 2877 }
590c391d 2878 }
1578e777 2879 /* If no active mac_id found, return first mac addr */
5a712c13 2880 *pmac_id_valid = false;
e5e1ee89 2881 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
a2cc4e0b 2882 ETH_ALEN);
590c391d
PR
2883 }
2884
e5e1ee89 2885out:
590c391d 2886 spin_unlock_bh(&adapter->mcc_lock);
e5e1ee89 2887 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
a2cc4e0b 2888 get_mac_list_cmd.va, get_mac_list_cmd.dma);
590c391d
PR
2889 return status;
2890}
2891
a2cc4e0b
SP
2892int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
2893 u8 *mac, u32 if_handle, bool active, u32 domain)
5a712c13 2894{
5a712c13 2895
b188f090
SR
2896 if (!active)
2897 be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
2898 if_handle, domain);
3175d8c2 2899 if (BEx_chip(adapter))
5a712c13 2900 return be_cmd_mac_addr_query(adapter, mac, false,
b188f090 2901 if_handle, curr_pmac_id);
3175d8c2
SP
2902 else
2903 /* Fetch the MAC address using pmac_id */
2904 return be_cmd_get_mac_from_list(adapter, mac, &active,
b188f090
SR
2905 &curr_pmac_id,
2906 if_handle, domain);
5a712c13
SP
2907}
2908
95046b92
SP
2909int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
2910{
2911 int status;
2912 bool pmac_valid = false;
2913
2914 memset(mac, 0, ETH_ALEN);
2915
3175d8c2
SP
2916 if (BEx_chip(adapter)) {
2917 if (be_physfn(adapter))
2918 status = be_cmd_mac_addr_query(adapter, mac, true, 0,
2919 0);
2920 else
2921 status = be_cmd_mac_addr_query(adapter, mac, false,
2922 adapter->if_handle, 0);
2923 } else {
95046b92 2924 status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
b188f090 2925 NULL, adapter->if_handle, 0);
3175d8c2
SP
2926 }
2927
95046b92
SP
2928 return status;
2929}
2930
590c391d
PR
2931/* Uses synchronous MCCQ */
2932int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2933 u8 mac_count, u32 domain)
2934{
2935 struct be_mcc_wrb *wrb;
2936 struct be_cmd_req_set_mac_list *req;
2937 int status;
2938 struct be_dma_mem cmd;
2939
2940 memset(&cmd, 0, sizeof(struct be_dma_mem));
2941 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2942 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
a2cc4e0b 2943 &cmd.dma, GFP_KERNEL);
d0320f75 2944 if (!cmd.va)
590c391d 2945 return -ENOMEM;
590c391d
PR
2946
2947 spin_lock_bh(&adapter->mcc_lock);
2948
2949 wrb = wrb_from_mccq(adapter);
2950 if (!wrb) {
2951 status = -EBUSY;
2952 goto err;
2953 }
2954
2955 req = cmd.va;
2956 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
2957 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2958 wrb, &cmd);
590c391d
PR
2959
2960 req->hdr.domain = domain;
2961 req->mac_count = mac_count;
2962 if (mac_count)
2963 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
2964
2965 status = be_mcc_notify_wait(adapter);
2966
2967err:
a2cc4e0b 2968 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
590c391d
PR
2969 spin_unlock_bh(&adapter->mcc_lock);
2970 return status;
2971}
4762f6ce 2972
3175d8c2
SP
2973/* Wrapper to delete any active MACs and provision the new mac.
2974 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
2975 * current list are active.
2976 */
2977int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
2978{
2979 bool active_mac = false;
2980 u8 old_mac[ETH_ALEN];
2981 u32 pmac_id;
2982 int status;
2983
2984 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
b188f090
SR
2985 &pmac_id, if_id, dom);
2986
3175d8c2
SP
2987 if (!status && active_mac)
2988 be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
2989
2990 return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
2991}
2992
f1f3ee1b 2993int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
a77dcb8c 2994 u32 domain, u16 intf_id, u16 hsw_mode)
f1f3ee1b
AK
2995{
2996 struct be_mcc_wrb *wrb;
2997 struct be_cmd_req_set_hsw_config *req;
2998 void *ctxt;
2999 int status;
3000
3001 spin_lock_bh(&adapter->mcc_lock);
3002
3003 wrb = wrb_from_mccq(adapter);
3004 if (!wrb) {
3005 status = -EBUSY;
3006 goto err;
3007 }
3008
3009 req = embedded_payload(wrb);
3010 ctxt = &req->context;
3011
3012 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
3013 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
3014 NULL);
f1f3ee1b
AK
3015
3016 req->hdr.domain = domain;
3017 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
3018 if (pvid) {
3019 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
3020 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
3021 }
a77dcb8c
AK
3022 if (!BEx_chip(adapter) && hsw_mode) {
3023 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
3024 ctxt, adapter->hba_port_num);
3025 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
3026 AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
3027 ctxt, hsw_mode);
3028 }
f1f3ee1b
AK
3029
3030 be_dws_cpu_to_le(req->context, sizeof(req->context));
3031 status = be_mcc_notify_wait(adapter);
3032
3033err:
3034 spin_unlock_bh(&adapter->mcc_lock);
3035 return status;
3036}
3037
3038/* Get Hyper switch config */
3039int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
a77dcb8c 3040 u32 domain, u16 intf_id, u8 *mode)
f1f3ee1b
AK
3041{
3042 struct be_mcc_wrb *wrb;
3043 struct be_cmd_req_get_hsw_config *req;
3044 void *ctxt;
3045 int status;
3046 u16 vid;
3047
3048 spin_lock_bh(&adapter->mcc_lock);
3049
3050 wrb = wrb_from_mccq(adapter);
3051 if (!wrb) {
3052 status = -EBUSY;
3053 goto err;
3054 }
3055
3056 req = embedded_payload(wrb);
3057 ctxt = &req->context;
3058
3059 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
a2cc4e0b
SP
3060 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
3061 NULL);
f1f3ee1b
AK
3062
3063 req->hdr.domain = domain;
a77dcb8c
AK
3064 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3065 ctxt, intf_id);
f1f3ee1b 3066 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
a77dcb8c 3067
2c07c1d7 3068 if (!BEx_chip(adapter) && mode) {
a77dcb8c
AK
3069 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3070 ctxt, adapter->hba_port_num);
3071 AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
3072 }
f1f3ee1b
AK
3073 be_dws_cpu_to_le(req->context, sizeof(req->context));
3074
3075 status = be_mcc_notify_wait(adapter);
3076 if (!status) {
3077 struct be_cmd_resp_get_hsw_config *resp =
3078 embedded_payload(wrb);
a2cc4e0b 3079 be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
f1f3ee1b 3080 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
a2cc4e0b 3081 pvid, &resp->context);
a77dcb8c
AK
3082 if (pvid)
3083 *pvid = le16_to_cpu(vid);
3084 if (mode)
3085 *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3086 port_fwd_type, &resp->context);
f1f3ee1b
AK
3087 }
3088
3089err:
3090 spin_unlock_bh(&adapter->mcc_lock);
3091 return status;
3092}
3093
4762f6ce
AK
3094int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3095{
3096 struct be_mcc_wrb *wrb;
3097 struct be_cmd_req_acpi_wol_magic_config_v1 *req;
76a9e08e 3098 int status = 0;
4762f6ce
AK
3099 struct be_dma_mem cmd;
3100
f25b119c
PR
3101 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3102 CMD_SUBSYSTEM_ETH))
3103 return -EPERM;
3104
76a9e08e
SR
3105 if (be_is_wol_excluded(adapter))
3106 return status;
3107
d98ef50f
SR
3108 if (mutex_lock_interruptible(&adapter->mbox_lock))
3109 return -1;
3110
4762f6ce
AK
3111 memset(&cmd, 0, sizeof(struct be_dma_mem));
3112 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
a2cc4e0b 3113 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
4762f6ce 3114 if (!cmd.va) {
a2cc4e0b 3115 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
d98ef50f
SR
3116 status = -ENOMEM;
3117 goto err;
4762f6ce
AK
3118 }
3119
4762f6ce
AK
3120 wrb = wrb_from_mbox(adapter);
3121 if (!wrb) {
3122 status = -EBUSY;
3123 goto err;
3124 }
3125
3126 req = cmd.va;
3127
3128 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
3129 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
76a9e08e 3130 sizeof(*req), wrb, &cmd);
4762f6ce
AK
3131
3132 req->hdr.version = 1;
3133 req->query_options = BE_GET_WOL_CAP;
3134
3135 status = be_mbox_notify_wait(adapter);
3136 if (!status) {
3137 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
3138 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
3139
4762f6ce 3140 adapter->wol_cap = resp->wol_settings;
76a9e08e
SR
3141 if (adapter->wol_cap & BE_WOL_CAP)
3142 adapter->wol_en = true;
4762f6ce
AK
3143 }
3144err:
3145 mutex_unlock(&adapter->mbox_lock);
d98ef50f
SR
3146 if (cmd.va)
3147 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
4762f6ce 3148 return status;
941a77d5
SK
3149
3150}
baaa08d1
VV
3151
3152int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
3153{
3154 struct be_dma_mem extfat_cmd;
3155 struct be_fat_conf_params *cfgs;
3156 int status;
3157 int i, j;
3158
3159 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3160 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3161 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3162 &extfat_cmd.dma);
3163 if (!extfat_cmd.va)
3164 return -ENOMEM;
3165
3166 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3167 if (status)
3168 goto err;
3169
3170 cfgs = (struct be_fat_conf_params *)
3171 (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
3172 for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
3173 u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
3174 for (j = 0; j < num_modes; j++) {
3175 if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
3176 cfgs->module[i].trace_lvl[j].dbg_lvl =
3177 cpu_to_le32(level);
3178 }
3179 }
3180
3181 status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
3182err:
3183 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3184 extfat_cmd.dma);
3185 return status;
3186}
3187
3188int be_cmd_get_fw_log_level(struct be_adapter *adapter)
3189{
3190 struct be_dma_mem extfat_cmd;
3191 struct be_fat_conf_params *cfgs;
3192 int status, j;
3193 int level = 0;
3194
3195 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3196 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3197 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3198 &extfat_cmd.dma);
3199
3200 if (!extfat_cmd.va) {
3201 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3202 __func__);
3203 goto err;
3204 }
3205
3206 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3207 if (!status) {
3208 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3209 sizeof(struct be_cmd_resp_hdr));
3210 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3211 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3212 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3213 }
3214 }
3215 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3216 extfat_cmd.dma);
3217err:
3218 return level;
3219}
3220
941a77d5
SK
3221int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
3222 struct be_dma_mem *cmd)
3223{
3224 struct be_mcc_wrb *wrb;
3225 struct be_cmd_req_get_ext_fat_caps *req;
3226 int status;
3227
3228 if (mutex_lock_interruptible(&adapter->mbox_lock))
3229 return -1;
3230
3231 wrb = wrb_from_mbox(adapter);
3232 if (!wrb) {
3233 status = -EBUSY;
3234 goto err;
3235 }
3236
3237 req = cmd->va;
3238 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3239 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
3240 cmd->size, wrb, cmd);
3241 req->parameter_type = cpu_to_le32(1);
3242
3243 status = be_mbox_notify_wait(adapter);
3244err:
3245 mutex_unlock(&adapter->mbox_lock);
3246 return status;
3247}
3248
3249int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
3250 struct be_dma_mem *cmd,
3251 struct be_fat_conf_params *configs)
3252{
3253 struct be_mcc_wrb *wrb;
3254 struct be_cmd_req_set_ext_fat_caps *req;
3255 int status;
3256
3257 spin_lock_bh(&adapter->mcc_lock);
3258
3259 wrb = wrb_from_mccq(adapter);
3260 if (!wrb) {
3261 status = -EBUSY;
3262 goto err;
3263 }
3264
3265 req = cmd->va;
3266 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
3267 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3268 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
3269 cmd->size, wrb, cmd);
3270
3271 status = be_mcc_notify_wait(adapter);
3272err:
3273 spin_unlock_bh(&adapter->mcc_lock);
3274 return status;
4762f6ce 3275}
6a4ab669 3276
b4e32a71
PR
3277int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
3278{
3279 struct be_mcc_wrb *wrb;
3280 struct be_cmd_req_get_port_name *req;
3281 int status;
3282
3283 if (!lancer_chip(adapter)) {
3284 *port_name = adapter->hba_port_num + '0';
3285 return 0;
3286 }
3287
3288 spin_lock_bh(&adapter->mcc_lock);
3289
3290 wrb = wrb_from_mccq(adapter);
3291 if (!wrb) {
3292 status = -EBUSY;
3293 goto err;
3294 }
3295
3296 req = embedded_payload(wrb);
3297
3298 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3299 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
3300 NULL);
3301 req->hdr.version = 1;
3302
3303 status = be_mcc_notify_wait(adapter);
3304 if (!status) {
3305 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
3306 *port_name = resp->port_name[adapter->hba_port_num];
3307 } else {
3308 *port_name = adapter->hba_port_num + '0';
3309 }
3310err:
3311 spin_unlock_bh(&adapter->mcc_lock);
3312 return status;
3313}
3314
150d58c7 3315static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count)
abb93951 3316{
150d58c7 3317 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
abb93951
PR
3318 int i;
3319
3320 for (i = 0; i < desc_count; i++) {
150d58c7
VV
3321 if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
3322 hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
3323 return (struct be_nic_res_desc *)hdr;
abb93951 3324
150d58c7
VV
3325 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3326 hdr = (void *)hdr + hdr->desc_len;
abb93951 3327 }
150d58c7
VV
3328 return NULL;
3329}
3330
3331static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
3332 u32 desc_count)
3333{
3334 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3335 struct be_pcie_res_desc *pcie;
3336 int i;
3337
3338 for (i = 0; i < desc_count; i++) {
3339 if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
3340 hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) {
3341 pcie = (struct be_pcie_res_desc *)hdr;
3342 if (pcie->pf_num == devfn)
3343 return pcie;
3344 }
abb93951 3345
150d58c7
VV
3346 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3347 hdr = (void *)hdr + hdr->desc_len;
3348 }
950e2958 3349 return NULL;
abb93951
PR
3350}
3351
f93f160b
VV
3352static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
3353{
3354 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3355 int i;
3356
3357 for (i = 0; i < desc_count; i++) {
3358 if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
3359 return (struct be_port_res_desc *)hdr;
3360
3361 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3362 hdr = (void *)hdr + hdr->desc_len;
3363 }
3364 return NULL;
3365}
3366
92bf14ab
SP
3367static void be_copy_nic_desc(struct be_resources *res,
3368 struct be_nic_res_desc *desc)
3369{
3370 res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
3371 res->max_vlans = le16_to_cpu(desc->vlan_count);
3372 res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3373 res->max_tx_qs = le16_to_cpu(desc->txq_count);
3374 res->max_rss_qs = le16_to_cpu(desc->rssq_count);
3375 res->max_rx_qs = le16_to_cpu(desc->rq_count);
3376 res->max_evt_qs = le16_to_cpu(desc->eq_count);
3377 /* Clear flags that driver is not interested in */
3378 res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
3379 BE_IF_CAP_FLAGS_WANT;
3380 /* Need 1 RXQ as the default RXQ */
3381 if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs)
3382 res->max_rss_qs -= 1;
3383}
3384
abb93951 3385/* Uses Mbox */
92bf14ab 3386int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
abb93951
PR
3387{
3388 struct be_mcc_wrb *wrb;
3389 struct be_cmd_req_get_func_config *req;
3390 int status;
3391 struct be_dma_mem cmd;
3392
d98ef50f
SR
3393 if (mutex_lock_interruptible(&adapter->mbox_lock))
3394 return -1;
3395
abb93951
PR
3396 memset(&cmd, 0, sizeof(struct be_dma_mem));
3397 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
a2cc4e0b 3398 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
abb93951
PR
3399 if (!cmd.va) {
3400 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
d98ef50f
SR
3401 status = -ENOMEM;
3402 goto err;
abb93951 3403 }
abb93951
PR
3404
3405 wrb = wrb_from_mbox(adapter);
3406 if (!wrb) {
3407 status = -EBUSY;
3408 goto err;
3409 }
3410
3411 req = cmd.va;
3412
3413 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3414 OPCODE_COMMON_GET_FUNC_CONFIG,
3415 cmd.size, wrb, &cmd);
3416
28710c55
KA
3417 if (skyhawk_chip(adapter))
3418 req->hdr.version = 1;
3419
abb93951
PR
3420 status = be_mbox_notify_wait(adapter);
3421 if (!status) {
3422 struct be_cmd_resp_get_func_config *resp = cmd.va;
3423 u32 desc_count = le32_to_cpu(resp->desc_count);
150d58c7 3424 struct be_nic_res_desc *desc;
abb93951 3425
150d58c7 3426 desc = be_get_nic_desc(resp->func_param, desc_count);
abb93951
PR
3427 if (!desc) {
3428 status = -EINVAL;
3429 goto err;
3430 }
3431
d5c18473 3432 adapter->pf_number = desc->pf_num;
92bf14ab 3433 be_copy_nic_desc(res, desc);
abb93951
PR
3434 }
3435err:
3436 mutex_unlock(&adapter->mbox_lock);
d98ef50f
SR
3437 if (cmd.va)
3438 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
abb93951
PR
3439 return status;
3440}
3441
a05f99db 3442/* Uses mbox */
4188e7df 3443static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
a2cc4e0b 3444 u8 domain, struct be_dma_mem *cmd)
abb93951
PR
3445{
3446 struct be_mcc_wrb *wrb;
3447 struct be_cmd_req_get_profile_config *req;
3448 int status;
abb93951 3449
a05f99db
VV
3450 if (mutex_lock_interruptible(&adapter->mbox_lock))
3451 return -1;
3452 wrb = wrb_from_mbox(adapter);
3453
3454 req = cmd->va;
3455 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3456 OPCODE_COMMON_GET_PROFILE_CONFIG,
3457 cmd->size, wrb, cmd);
3458
3459 req->type = ACTIVE_PROFILE_TYPE;
3460 req->hdr.domain = domain;
3461 if (!lancer_chip(adapter))
3462 req->hdr.version = 1;
3463
3464 status = be_mbox_notify_wait(adapter);
3465
3466 mutex_unlock(&adapter->mbox_lock);
3467 return status;
3468}
3469
3470/* Uses sync mcc */
4188e7df 3471static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
a2cc4e0b 3472 u8 domain, struct be_dma_mem *cmd)
a05f99db
VV
3473{
3474 struct be_mcc_wrb *wrb;
3475 struct be_cmd_req_get_profile_config *req;
3476 int status;
abb93951
PR
3477
3478 spin_lock_bh(&adapter->mcc_lock);
3479
3480 wrb = wrb_from_mccq(adapter);
3481 if (!wrb) {
3482 status = -EBUSY;
3483 goto err;
3484 }
3485
a05f99db 3486 req = cmd->va;
abb93951
PR
3487 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3488 OPCODE_COMMON_GET_PROFILE_CONFIG,
a05f99db 3489 cmd->size, wrb, cmd);
abb93951
PR
3490
3491 req->type = ACTIVE_PROFILE_TYPE;
3492 req->hdr.domain = domain;
a05f99db
VV
3493 if (!lancer_chip(adapter))
3494 req->hdr.version = 1;
abb93951
PR
3495
3496 status = be_mcc_notify_wait(adapter);
a05f99db
VV
3497
3498err:
3499 spin_unlock_bh(&adapter->mcc_lock);
3500 return status;
3501}
3502
3503/* Uses sync mcc, if MCCQ is already created otherwise mbox */
92bf14ab
SP
3504int be_cmd_get_profile_config(struct be_adapter *adapter,
3505 struct be_resources *res, u8 domain)
a05f99db 3506{
150d58c7
VV
3507 struct be_cmd_resp_get_profile_config *resp;
3508 struct be_pcie_res_desc *pcie;
f93f160b 3509 struct be_port_res_desc *port;
150d58c7 3510 struct be_nic_res_desc *nic;
a05f99db
VV
3511 struct be_queue_info *mccq = &adapter->mcc_obj.q;
3512 struct be_dma_mem cmd;
150d58c7 3513 u32 desc_count;
a05f99db
VV
3514 int status;
3515
3516 memset(&cmd, 0, sizeof(struct be_dma_mem));
150d58c7
VV
3517 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3518 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3519 if (!cmd.va)
a05f99db 3520 return -ENOMEM;
a05f99db
VV
3521
3522 if (!mccq->created)
3523 status = be_cmd_get_profile_config_mbox(adapter, domain, &cmd);
3524 else
3525 status = be_cmd_get_profile_config_mccq(adapter, domain, &cmd);
150d58c7
VV
3526 if (status)
3527 goto err;
abb93951 3528
150d58c7
VV
3529 resp = cmd.va;
3530 desc_count = le32_to_cpu(resp->desc_count);
abb93951 3531
a2cc4e0b
SP
3532 pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
3533 desc_count);
150d58c7 3534 if (pcie)
92bf14ab 3535 res->max_vfs = le16_to_cpu(pcie->num_vfs);
150d58c7 3536
f93f160b
VV
3537 port = be_get_port_desc(resp->func_param, desc_count);
3538 if (port)
3539 adapter->mc_type = port->mc_type;
3540
150d58c7 3541 nic = be_get_nic_desc(resp->func_param, desc_count);
92bf14ab
SP
3542 if (nic)
3543 be_copy_nic_desc(res, nic);
3544
abb93951 3545err:
a05f99db 3546 if (cmd.va)
150d58c7 3547 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
abb93951
PR
3548 return status;
3549}
3550
a401801c
SP
3551int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
3552 int size, u8 version, u8 domain)
d5c18473 3553{
d5c18473 3554 struct be_cmd_req_set_profile_config *req;
a401801c 3555 struct be_mcc_wrb *wrb;
d5c18473
PR
3556 int status;
3557
3558 spin_lock_bh(&adapter->mcc_lock);
3559
3560 wrb = wrb_from_mccq(adapter);
3561 if (!wrb) {
3562 status = -EBUSY;
3563 goto err;
3564 }
3565
3566 req = embedded_payload(wrb);
d5c18473
PR
3567 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3568 OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
3569 wrb, NULL);
a401801c 3570 req->hdr.version = version;
d5c18473
PR
3571 req->hdr.domain = domain;
3572 req->desc_count = cpu_to_le32(1);
a401801c
SP
3573 memcpy(req->desc, desc, size);
3574
d5c18473
PR
3575 status = be_mcc_notify_wait(adapter);
3576err:
3577 spin_unlock_bh(&adapter->mcc_lock);
3578 return status;
3579}
3580
a401801c
SP
3581/* Mark all fields invalid */
3582void be_reset_nic_desc(struct be_nic_res_desc *nic)
3583{
3584 memset(nic, 0, sizeof(*nic));
3585 nic->unicast_mac_count = 0xFFFF;
3586 nic->mcc_count = 0xFFFF;
3587 nic->vlan_count = 0xFFFF;
3588 nic->mcast_mac_count = 0xFFFF;
3589 nic->txq_count = 0xFFFF;
3590 nic->rq_count = 0xFFFF;
3591 nic->rssq_count = 0xFFFF;
3592 nic->lro_count = 0xFFFF;
3593 nic->cq_count = 0xFFFF;
3594 nic->toe_conn_count = 0xFFFF;
3595 nic->eq_count = 0xFFFF;
0f77ba73 3596 nic->iface_count = 0xFFFF;
a401801c 3597 nic->link_param = 0xFF;
0f77ba73 3598 nic->channel_id_param = cpu_to_le16(0xF000);
a401801c
SP
3599 nic->acpi_params = 0xFF;
3600 nic->wol_param = 0x0F;
0f77ba73
RN
3601 nic->tunnel_iface_count = 0xFFFF;
3602 nic->direct_tenant_iface_count = 0xFFFF;
a401801c
SP
3603 nic->bw_max = 0xFFFFFFFF;
3604}
3605
0f77ba73
RN
3606int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
3607 u8 domain)
a401801c 3608{
0f77ba73
RN
3609 struct be_nic_res_desc nic_desc;
3610 u32 bw_percent;
3611 u16 version = 0;
3612
3613 if (BE3_chip(adapter))
3614 return be_cmd_set_qos(adapter, max_rate / 10, domain);
a401801c 3615
0f77ba73
RN
3616 be_reset_nic_desc(&nic_desc);
3617 nic_desc.pf_num = adapter->pf_number;
3618 nic_desc.vf_num = domain;
3619 if (lancer_chip(adapter)) {
a401801c
SP
3620 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3621 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
3622 nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
3623 (1 << NOSV_SHIFT);
0f77ba73 3624 nic_desc.bw_max = cpu_to_le32(max_rate / 10);
a401801c 3625 } else {
0f77ba73
RN
3626 version = 1;
3627 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3628 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3629 nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3630 bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
3631 nic_desc.bw_max = cpu_to_le32(bw_percent);
a401801c 3632 }
0f77ba73
RN
3633
3634 return be_cmd_set_profile_config(adapter, &nic_desc,
3635 nic_desc.hdr.desc_len,
3636 version, domain);
a401801c
SP
3637}
3638
3639int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
3640{
3641 struct be_mcc_wrb *wrb;
3642 struct be_cmd_req_manage_iface_filters *req;
3643 int status;
3644
3645 if (iface == 0xFFFFFFFF)
3646 return -1;
3647
3648 spin_lock_bh(&adapter->mcc_lock);
3649
3650 wrb = wrb_from_mccq(adapter);
3651 if (!wrb) {
3652 status = -EBUSY;
3653 goto err;
3654 }
3655 req = embedded_payload(wrb);
3656
3657 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3658 OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
3659 wrb, NULL);
3660 req->op = op;
3661 req->target_iface_id = cpu_to_le32(iface);
3662
3663 status = be_mcc_notify_wait(adapter);
3664err:
3665 spin_unlock_bh(&adapter->mcc_lock);
3666 return status;
3667}
3668
3669int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
3670{
3671 struct be_port_res_desc port_desc;
3672
3673 memset(&port_desc, 0, sizeof(port_desc));
3674 port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
3675 port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3676 port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3677 port_desc.link_num = adapter->hba_port_num;
3678 if (port) {
3679 port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
3680 (1 << RCVID_SHIFT);
3681 port_desc.nv_port = swab16(port);
3682 } else {
3683 port_desc.nv_flags = NV_TYPE_DISABLED;
3684 port_desc.nv_port = 0;
3685 }
3686
3687 return be_cmd_set_profile_config(adapter, &port_desc,
3688 RESOURCE_DESC_SIZE_V1, 1, 0);
3689}
3690
4c876616
SP
3691int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
3692 int vf_num)
3693{
3694 struct be_mcc_wrb *wrb;
3695 struct be_cmd_req_get_iface_list *req;
3696 struct be_cmd_resp_get_iface_list *resp;
3697 int status;
3698
3699 spin_lock_bh(&adapter->mcc_lock);
3700
3701 wrb = wrb_from_mccq(adapter);
3702 if (!wrb) {
3703 status = -EBUSY;
3704 goto err;
3705 }
3706 req = embedded_payload(wrb);
3707
3708 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3709 OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
3710 wrb, NULL);
3711 req->hdr.domain = vf_num + 1;
3712
3713 status = be_mcc_notify_wait(adapter);
3714 if (!status) {
3715 resp = (struct be_cmd_resp_get_iface_list *)req;
3716 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
3717 }
3718
3719err:
3720 spin_unlock_bh(&adapter->mcc_lock);
3721 return status;
3722}
3723
5c510811
SK
3724static int lancer_wait_idle(struct be_adapter *adapter)
3725{
3726#define SLIPORT_IDLE_TIMEOUT 30
3727 u32 reg_val;
3728 int status = 0, i;
3729
3730 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3731 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3732 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3733 break;
3734
3735 ssleep(1);
3736 }
3737
3738 if (i == SLIPORT_IDLE_TIMEOUT)
3739 status = -1;
3740
3741 return status;
3742}
3743
3744int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
3745{
3746 int status = 0;
3747
3748 status = lancer_wait_idle(adapter);
3749 if (status)
3750 return status;
3751
3752 iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
3753
3754 return status;
3755}
3756
3757/* Routine to check whether dump image is present or not */
3758bool dump_present(struct be_adapter *adapter)
3759{
3760 u32 sliport_status = 0;
3761
3762 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3763 return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
3764}
3765
3766int lancer_initiate_dump(struct be_adapter *adapter)
3767{
3768 int status;
3769
3770 /* give firmware reset and diagnostic dump */
3771 status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
3772 PHYSDEV_CONTROL_DD_MASK);
3773 if (status < 0) {
3774 dev_err(&adapter->pdev->dev, "Firmware reset failed\n");
3775 return status;
3776 }
3777
3778 status = lancer_wait_idle(adapter);
3779 if (status)
3780 return status;
3781
3782 if (!dump_present(adapter)) {
3783 dev_err(&adapter->pdev->dev, "Dump image not present\n");
3784 return -1;
3785 }
3786
3787 return 0;
3788}
3789
dcf7ebba
PR
3790/* Uses sync mcc */
3791int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
3792{
3793 struct be_mcc_wrb *wrb;
3794 struct be_cmd_enable_disable_vf *req;
3795 int status;
3796
0599863d 3797 if (BEx_chip(adapter))
dcf7ebba
PR
3798 return 0;
3799
3800 spin_lock_bh(&adapter->mcc_lock);
3801
3802 wrb = wrb_from_mccq(adapter);
3803 if (!wrb) {
3804 status = -EBUSY;
3805 goto err;
3806 }
3807
3808 req = embedded_payload(wrb);
3809
3810 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3811 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
3812 wrb, NULL);
3813
3814 req->hdr.domain = domain;
3815 req->enable = 1;
3816 status = be_mcc_notify_wait(adapter);
3817err:
3818 spin_unlock_bh(&adapter->mcc_lock);
3819 return status;
3820}
3821
68c45a2d
SK
3822int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
3823{
3824 struct be_mcc_wrb *wrb;
3825 struct be_cmd_req_intr_set *req;
3826 int status;
3827
3828 if (mutex_lock_interruptible(&adapter->mbox_lock))
3829 return -1;
3830
3831 wrb = wrb_from_mbox(adapter);
3832
3833 req = embedded_payload(wrb);
3834
3835 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3836 OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
3837 wrb, NULL);
3838
3839 req->intr_enabled = intr_enable;
3840
3841 status = be_mbox_notify_wait(adapter);
3842
3843 mutex_unlock(&adapter->mbox_lock);
3844 return status;
3845}
3846
542963b7
VV
3847/* Uses MBOX */
3848int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
3849{
3850 struct be_cmd_req_get_active_profile *req;
3851 struct be_mcc_wrb *wrb;
3852 int status;
3853
3854 if (mutex_lock_interruptible(&adapter->mbox_lock))
3855 return -1;
3856
3857 wrb = wrb_from_mbox(adapter);
3858 if (!wrb) {
3859 status = -EBUSY;
3860 goto err;
3861 }
3862
3863 req = embedded_payload(wrb);
3864
3865 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3866 OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
3867 wrb, NULL);
3868
3869 status = be_mbox_notify_wait(adapter);
3870 if (!status) {
3871 struct be_cmd_resp_get_active_profile *resp =
3872 embedded_payload(wrb);
3873 *profile_id = le16_to_cpu(resp->active_profile_id);
3874 }
3875
3876err:
3877 mutex_unlock(&adapter->mbox_lock);
3878 return status;
3879}
3880
bdce2ad7
SR
3881int be_cmd_set_logical_link_config(struct be_adapter *adapter,
3882 int link_state, u8 domain)
3883{
3884 struct be_mcc_wrb *wrb;
3885 struct be_cmd_req_set_ll_link *req;
3886 int status;
3887
3888 if (BEx_chip(adapter) || lancer_chip(adapter))
3889 return 0;
3890
3891 spin_lock_bh(&adapter->mcc_lock);
3892
3893 wrb = wrb_from_mccq(adapter);
3894 if (!wrb) {
3895 status = -EBUSY;
3896 goto err;
3897 }
3898
3899 req = embedded_payload(wrb);
3900
3901 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3902 OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
3903 sizeof(*req), wrb, NULL);
3904
3905 req->hdr.version = 1;
3906 req->hdr.domain = domain;
3907
3908 if (link_state == IFLA_VF_LINK_STATE_ENABLE)
3909 req->link_config |= 1;
3910
3911 if (link_state == IFLA_VF_LINK_STATE_AUTO)
3912 req->link_config |= 1 << PLINK_TRACK_SHIFT;
3913
3914 status = be_mcc_notify_wait(adapter);
3915err:
3916 spin_unlock_bh(&adapter->mcc_lock);
3917 return status;
3918}
3919
6a4ab669 3920int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
a2cc4e0b 3921 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
6a4ab669
PP
3922{
3923 struct be_adapter *adapter = netdev_priv(netdev_handle);
3924 struct be_mcc_wrb *wrb;
3925 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
3926 struct be_cmd_req_hdr *req;
3927 struct be_cmd_resp_hdr *resp;
3928 int status;
3929
3930 spin_lock_bh(&adapter->mcc_lock);
3931
3932 wrb = wrb_from_mccq(adapter);
3933 if (!wrb) {
3934 status = -EBUSY;
3935 goto err;
3936 }
3937 req = embedded_payload(wrb);
3938 resp = embedded_payload(wrb);
3939
3940 be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
3941 hdr->opcode, wrb_payload_size, wrb, NULL);
3942 memcpy(req, wrb_payload, wrb_payload_size);
3943 be_dws_cpu_to_le(req, wrb_payload_size);
3944
3945 status = be_mcc_notify_wait(adapter);
3946 if (cmd_status)
3947 *cmd_status = (status & 0xffff);
3948 if (ext_status)
3949 *ext_status = 0;
3950 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
3951 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
3952err:
3953 spin_unlock_bh(&adapter->mcc_lock);
3954 return status;
3955}
3956EXPORT_SYMBOL(be_roce_mcc_cmd);