]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/ethernet/emulex/benet/be_cmds.c
5d96f773ea92ae559f036046708c7b5d58bf9b4b
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / emulex / benet / be_cmds.c
1 /*
2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18 #include <linux/module.h>
19 #include "be.h"
20 #include "be_cmds.h"
21
22 static struct be_cmd_priv_map cmd_priv_map[] = {
23 {
24 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
25 CMD_SUBSYSTEM_ETH,
26 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
27 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
28 },
29 {
30 OPCODE_COMMON_GET_FLOW_CONTROL,
31 CMD_SUBSYSTEM_COMMON,
32 BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
33 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
34 },
35 {
36 OPCODE_COMMON_SET_FLOW_CONTROL,
37 CMD_SUBSYSTEM_COMMON,
38 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
39 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
40 },
41 {
42 OPCODE_ETH_GET_PPORT_STATS,
43 CMD_SUBSYSTEM_ETH,
44 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
45 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
46 },
47 {
48 OPCODE_COMMON_GET_PHY_DETAILS,
49 CMD_SUBSYSTEM_COMMON,
50 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
51 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
52 }
53 };
54
55 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
56 u8 subsystem)
57 {
58 int i;
59 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
60 u32 cmd_privileges = adapter->cmd_privileges;
61
62 for (i = 0; i < num_entries; i++)
63 if (opcode == cmd_priv_map[i].opcode &&
64 subsystem == cmd_priv_map[i].subsystem)
65 if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
66 return false;
67
68 return true;
69 }
70
71 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
72 {
73 return wrb->payload.embedded_payload;
74 }
75
76 static void be_mcc_notify(struct be_adapter *adapter)
77 {
78 struct be_queue_info *mccq = &adapter->mcc_obj.q;
79 u32 val = 0;
80
81 if (be_error(adapter))
82 return;
83
84 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
85 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
86
87 wmb();
88 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
89 }
90
91 /* To check if valid bit is set, check the entire word as we don't know
92 * the endianness of the data (old entry is host endian while a new entry is
93 * little endian) */
94 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
95 {
96 u32 flags;
97
98 if (compl->flags != 0) {
99 flags = le32_to_cpu(compl->flags);
100 if (flags & CQE_FLAGS_VALID_MASK) {
101 compl->flags = flags;
102 return true;
103 }
104 }
105 return false;
106 }
107
108 /* Need to reset the entire word that houses the valid bit */
109 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
110 {
111 compl->flags = 0;
112 }
113
114 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
115 {
116 unsigned long addr;
117
118 addr = tag1;
119 addr = ((addr << 16) << 16) | tag0;
120 return (void *)addr;
121 }
122
123 static int be_mcc_compl_process(struct be_adapter *adapter,
124 struct be_mcc_compl *compl)
125 {
126 u16 compl_status, extd_status;
127 struct be_cmd_resp_hdr *resp_hdr;
128 u8 opcode = 0, subsystem = 0;
129
130 /* Just swap the status to host endian; mcc tag is opaquely copied
131 * from mcc_wrb */
132 be_dws_le_to_cpu(compl, 4);
133
134 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
135 CQE_STATUS_COMPL_MASK;
136
137 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
138
139 if (resp_hdr) {
140 opcode = resp_hdr->opcode;
141 subsystem = resp_hdr->subsystem;
142 }
143
144 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
145 (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
146 (subsystem == CMD_SUBSYSTEM_COMMON)) {
147 adapter->flash_status = compl_status;
148 complete(&adapter->flash_compl);
149 }
150
151 if (compl_status == MCC_STATUS_SUCCESS) {
152 if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
153 (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
154 (subsystem == CMD_SUBSYSTEM_ETH)) {
155 be_parse_stats(adapter);
156 adapter->stats_cmd_sent = false;
157 }
158 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
159 subsystem == CMD_SUBSYSTEM_COMMON) {
160 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
161 (void *)resp_hdr;
162 adapter->drv_stats.be_on_die_temperature =
163 resp->on_die_temperature;
164 }
165 } else {
166 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
167 adapter->be_get_temp_freq = 0;
168
169 if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
170 compl_status == MCC_STATUS_ILLEGAL_REQUEST)
171 goto done;
172
173 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
174 dev_warn(&adapter->pdev->dev,
175 "VF is not privileged to issue opcode %d-%d\n",
176 opcode, subsystem);
177 } else {
178 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
179 CQE_STATUS_EXTD_MASK;
180 dev_err(&adapter->pdev->dev,
181 "opcode %d-%d failed:status %d-%d\n",
182 opcode, subsystem, compl_status, extd_status);
183 }
184 }
185 done:
186 return compl_status;
187 }
188
189 /* Link state evt is a string of bytes; no need for endian swapping */
190 static void be_async_link_state_process(struct be_adapter *adapter,
191 struct be_async_event_link_state *evt)
192 {
193 /* When link status changes, link speed must be re-queried from FW */
194 adapter->phy.link_speed = -1;
195
196 /* Ignore physical link event */
197 if (lancer_chip(adapter) &&
198 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
199 return;
200
201 /* For the initial link status do not rely on the ASYNC event as
202 * it may not be received in some cases.
203 */
204 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
205 be_link_status_update(adapter, evt->port_link_status);
206 }
207
208 /* Grp5 CoS Priority evt */
209 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
210 struct be_async_event_grp5_cos_priority *evt)
211 {
212 if (evt->valid) {
213 adapter->vlan_prio_bmap = evt->available_priority_bmap;
214 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
215 adapter->recommended_prio =
216 evt->reco_default_priority << VLAN_PRIO_SHIFT;
217 }
218 }
219
220 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
221 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
222 struct be_async_event_grp5_qos_link_speed *evt)
223 {
224 if (adapter->phy.link_speed >= 0 &&
225 evt->physical_port == adapter->port_num)
226 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
227 }
228
229 /*Grp5 PVID evt*/
230 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
231 struct be_async_event_grp5_pvid_state *evt)
232 {
233 if (evt->enabled)
234 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
235 else
236 adapter->pvid = 0;
237 }
238
239 static void be_async_grp5_evt_process(struct be_adapter *adapter,
240 u32 trailer, struct be_mcc_compl *evt)
241 {
242 u8 event_type = 0;
243
244 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
245 ASYNC_TRAILER_EVENT_TYPE_MASK;
246
247 switch (event_type) {
248 case ASYNC_EVENT_COS_PRIORITY:
249 be_async_grp5_cos_priority_process(adapter,
250 (struct be_async_event_grp5_cos_priority *)evt);
251 break;
252 case ASYNC_EVENT_QOS_SPEED:
253 be_async_grp5_qos_speed_process(adapter,
254 (struct be_async_event_grp5_qos_link_speed *)evt);
255 break;
256 case ASYNC_EVENT_PVID_STATE:
257 be_async_grp5_pvid_state_process(adapter,
258 (struct be_async_event_grp5_pvid_state *)evt);
259 break;
260 default:
261 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
262 break;
263 }
264 }
265
266 static inline bool is_link_state_evt(u32 trailer)
267 {
268 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
269 ASYNC_TRAILER_EVENT_CODE_MASK) ==
270 ASYNC_EVENT_CODE_LINK_STATE;
271 }
272
273 static inline bool is_grp5_evt(u32 trailer)
274 {
275 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
276 ASYNC_TRAILER_EVENT_CODE_MASK) ==
277 ASYNC_EVENT_CODE_GRP_5);
278 }
279
280 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
281 {
282 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
283 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
284
285 if (be_mcc_compl_is_new(compl)) {
286 queue_tail_inc(mcc_cq);
287 return compl;
288 }
289 return NULL;
290 }
291
292 void be_async_mcc_enable(struct be_adapter *adapter)
293 {
294 spin_lock_bh(&adapter->mcc_cq_lock);
295
296 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
297 adapter->mcc_obj.rearm_cq = true;
298
299 spin_unlock_bh(&adapter->mcc_cq_lock);
300 }
301
302 void be_async_mcc_disable(struct be_adapter *adapter)
303 {
304 spin_lock_bh(&adapter->mcc_cq_lock);
305
306 adapter->mcc_obj.rearm_cq = false;
307 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
308
309 spin_unlock_bh(&adapter->mcc_cq_lock);
310 }
311
312 int be_process_mcc(struct be_adapter *adapter)
313 {
314 struct be_mcc_compl *compl;
315 int num = 0, status = 0;
316 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
317
318 spin_lock(&adapter->mcc_cq_lock);
319 while ((compl = be_mcc_compl_get(adapter))) {
320 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
321 /* Interpret flags as an async trailer */
322 if (is_link_state_evt(compl->flags))
323 be_async_link_state_process(adapter,
324 (struct be_async_event_link_state *) compl);
325 else if (is_grp5_evt(compl->flags))
326 be_async_grp5_evt_process(adapter,
327 compl->flags, compl);
328 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
329 status = be_mcc_compl_process(adapter, compl);
330 atomic_dec(&mcc_obj->q.used);
331 }
332 be_mcc_compl_use(compl);
333 num++;
334 }
335
336 if (num)
337 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
338
339 spin_unlock(&adapter->mcc_cq_lock);
340 return status;
341 }
342
343 /* Wait till no more pending mcc requests are present */
344 static int be_mcc_wait_compl(struct be_adapter *adapter)
345 {
346 #define mcc_timeout 120000 /* 12s timeout */
347 int i, status = 0;
348 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
349
350 for (i = 0; i < mcc_timeout; i++) {
351 if (be_error(adapter))
352 return -EIO;
353
354 local_bh_disable();
355 status = be_process_mcc(adapter);
356 local_bh_enable();
357
358 if (atomic_read(&mcc_obj->q.used) == 0)
359 break;
360 udelay(100);
361 }
362 if (i == mcc_timeout) {
363 dev_err(&adapter->pdev->dev, "FW not responding\n");
364 adapter->fw_timeout = true;
365 return -EIO;
366 }
367 return status;
368 }
369
370 /* Notify MCC requests and wait for completion */
371 static int be_mcc_notify_wait(struct be_adapter *adapter)
372 {
373 int status;
374 struct be_mcc_wrb *wrb;
375 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
376 u16 index = mcc_obj->q.head;
377 struct be_cmd_resp_hdr *resp;
378
379 index_dec(&index, mcc_obj->q.len);
380 wrb = queue_index_node(&mcc_obj->q, index);
381
382 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
383
384 be_mcc_notify(adapter);
385
386 status = be_mcc_wait_compl(adapter);
387 if (status == -EIO)
388 goto out;
389
390 status = resp->status;
391 out:
392 return status;
393 }
394
395 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
396 {
397 int msecs = 0;
398 u32 ready;
399
400 do {
401 if (be_error(adapter))
402 return -EIO;
403
404 ready = ioread32(db);
405 if (ready == 0xffffffff)
406 return -1;
407
408 ready &= MPU_MAILBOX_DB_RDY_MASK;
409 if (ready)
410 break;
411
412 if (msecs > 4000) {
413 dev_err(&adapter->pdev->dev, "FW not responding\n");
414 adapter->fw_timeout = true;
415 be_detect_error(adapter);
416 return -1;
417 }
418
419 msleep(1);
420 msecs++;
421 } while (true);
422
423 return 0;
424 }
425
426 /*
427 * Insert the mailbox address into the doorbell in two steps
428 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
429 */
430 static int be_mbox_notify_wait(struct be_adapter *adapter)
431 {
432 int status;
433 u32 val = 0;
434 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
435 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
436 struct be_mcc_mailbox *mbox = mbox_mem->va;
437 struct be_mcc_compl *compl = &mbox->compl;
438
439 /* wait for ready to be set */
440 status = be_mbox_db_ready_wait(adapter, db);
441 if (status != 0)
442 return status;
443
444 val |= MPU_MAILBOX_DB_HI_MASK;
445 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
446 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
447 iowrite32(val, db);
448
449 /* wait for ready to be set */
450 status = be_mbox_db_ready_wait(adapter, db);
451 if (status != 0)
452 return status;
453
454 val = 0;
455 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
456 val |= (u32)(mbox_mem->dma >> 4) << 2;
457 iowrite32(val, db);
458
459 status = be_mbox_db_ready_wait(adapter, db);
460 if (status != 0)
461 return status;
462
463 /* A cq entry has been made now */
464 if (be_mcc_compl_is_new(compl)) {
465 status = be_mcc_compl_process(adapter, &mbox->compl);
466 be_mcc_compl_use(compl);
467 if (status)
468 return status;
469 } else {
470 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
471 return -1;
472 }
473 return 0;
474 }
475
476 static u16 be_POST_stage_get(struct be_adapter *adapter)
477 {
478 u32 sem;
479
480 if (BEx_chip(adapter))
481 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
482 else
483 pci_read_config_dword(adapter->pdev,
484 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
485
486 return sem & POST_STAGE_MASK;
487 }
488
489 int lancer_wait_ready(struct be_adapter *adapter)
490 {
491 #define SLIPORT_READY_TIMEOUT 30
492 u32 sliport_status;
493 int status = 0, i;
494
495 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
496 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
497 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
498 break;
499
500 msleep(1000);
501 }
502
503 if (i == SLIPORT_READY_TIMEOUT)
504 status = -1;
505
506 return status;
507 }
508
509 static bool lancer_provisioning_error(struct be_adapter *adapter)
510 {
511 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
512 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
513 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
514 sliport_err1 = ioread32(adapter->db +
515 SLIPORT_ERROR1_OFFSET);
516 sliport_err2 = ioread32(adapter->db +
517 SLIPORT_ERROR2_OFFSET);
518
519 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
520 sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
521 return true;
522 }
523 return false;
524 }
525
526 int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
527 {
528 int status;
529 u32 sliport_status, err, reset_needed;
530 bool resource_error;
531
532 resource_error = lancer_provisioning_error(adapter);
533 if (resource_error)
534 return -1;
535
536 status = lancer_wait_ready(adapter);
537 if (!status) {
538 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
539 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
540 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
541 if (err && reset_needed) {
542 iowrite32(SLI_PORT_CONTROL_IP_MASK,
543 adapter->db + SLIPORT_CONTROL_OFFSET);
544
545 /* check adapter has corrected the error */
546 status = lancer_wait_ready(adapter);
547 sliport_status = ioread32(adapter->db +
548 SLIPORT_STATUS_OFFSET);
549 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
550 SLIPORT_STATUS_RN_MASK);
551 if (status || sliport_status)
552 status = -1;
553 } else if (err || reset_needed) {
554 status = -1;
555 }
556 }
557 /* Stop error recovery if error is not recoverable.
558 * No resource error is temporary errors and will go away
559 * when PF provisions resources.
560 */
561 resource_error = lancer_provisioning_error(adapter);
562 if (status == -1 && !resource_error)
563 adapter->eeh_error = true;
564
565 return status;
566 }
567
568 int be_fw_wait_ready(struct be_adapter *adapter)
569 {
570 u16 stage;
571 int status, timeout = 0;
572 struct device *dev = &adapter->pdev->dev;
573
574 if (lancer_chip(adapter)) {
575 status = lancer_wait_ready(adapter);
576 return status;
577 }
578
579 do {
580 stage = be_POST_stage_get(adapter);
581 if (stage == POST_STAGE_ARMFW_RDY)
582 return 0;
583
584 dev_info(dev, "Waiting for POST, %ds elapsed\n",
585 timeout);
586 if (msleep_interruptible(2000)) {
587 dev_err(dev, "Waiting for POST aborted\n");
588 return -EINTR;
589 }
590 timeout += 2;
591 } while (timeout < 60);
592
593 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
594 return -1;
595 }
596
597
598 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
599 {
600 return &wrb->payload.sgl[0];
601 }
602
603
604 /* Don't touch the hdr after it's prepared */
605 /* mem will be NULL for embedded commands */
606 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
607 u8 subsystem, u8 opcode, int cmd_len,
608 struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
609 {
610 struct be_sge *sge;
611 unsigned long addr = (unsigned long)req_hdr;
612 u64 req_addr = addr;
613
614 req_hdr->opcode = opcode;
615 req_hdr->subsystem = subsystem;
616 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
617 req_hdr->version = 0;
618
619 wrb->tag0 = req_addr & 0xFFFFFFFF;
620 wrb->tag1 = upper_32_bits(req_addr);
621
622 wrb->payload_length = cmd_len;
623 if (mem) {
624 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
625 MCC_WRB_SGE_CNT_SHIFT;
626 sge = nonembedded_sgl(wrb);
627 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
628 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
629 sge->len = cpu_to_le32(mem->size);
630 } else
631 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
632 be_dws_cpu_to_le(wrb, 8);
633 }
634
635 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
636 struct be_dma_mem *mem)
637 {
638 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
639 u64 dma = (u64)mem->dma;
640
641 for (i = 0; i < buf_pages; i++) {
642 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
643 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
644 dma += PAGE_SIZE_4K;
645 }
646 }
647
648 /* Converts interrupt delay in microseconds to multiplier value */
649 static u32 eq_delay_to_mult(u32 usec_delay)
650 {
651 #define MAX_INTR_RATE 651042
652 const u32 round = 10;
653 u32 multiplier;
654
655 if (usec_delay == 0)
656 multiplier = 0;
657 else {
658 u32 interrupt_rate = 1000000 / usec_delay;
659 /* Max delay, corresponding to the lowest interrupt rate */
660 if (interrupt_rate == 0)
661 multiplier = 1023;
662 else {
663 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
664 multiplier /= interrupt_rate;
665 /* Round the multiplier to the closest value.*/
666 multiplier = (multiplier + round/2) / round;
667 multiplier = min(multiplier, (u32)1023);
668 }
669 }
670 return multiplier;
671 }
672
673 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
674 {
675 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
676 struct be_mcc_wrb *wrb
677 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
678 memset(wrb, 0, sizeof(*wrb));
679 return wrb;
680 }
681
682 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
683 {
684 struct be_queue_info *mccq = &adapter->mcc_obj.q;
685 struct be_mcc_wrb *wrb;
686
687 if (!mccq->created)
688 return NULL;
689
690 if (atomic_read(&mccq->used) >= mccq->len) {
691 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
692 return NULL;
693 }
694
695 wrb = queue_head_node(mccq);
696 queue_head_inc(mccq);
697 atomic_inc(&mccq->used);
698 memset(wrb, 0, sizeof(*wrb));
699 return wrb;
700 }
701
702 /* Tell fw we're about to start firing cmds by writing a
703 * special pattern across the wrb hdr; uses mbox
704 */
705 int be_cmd_fw_init(struct be_adapter *adapter)
706 {
707 u8 *wrb;
708 int status;
709
710 if (lancer_chip(adapter))
711 return 0;
712
713 if (mutex_lock_interruptible(&adapter->mbox_lock))
714 return -1;
715
716 wrb = (u8 *)wrb_from_mbox(adapter);
717 *wrb++ = 0xFF;
718 *wrb++ = 0x12;
719 *wrb++ = 0x34;
720 *wrb++ = 0xFF;
721 *wrb++ = 0xFF;
722 *wrb++ = 0x56;
723 *wrb++ = 0x78;
724 *wrb = 0xFF;
725
726 status = be_mbox_notify_wait(adapter);
727
728 mutex_unlock(&adapter->mbox_lock);
729 return status;
730 }
731
732 /* Tell fw we're done with firing cmds by writing a
733 * special pattern across the wrb hdr; uses mbox
734 */
735 int be_cmd_fw_clean(struct be_adapter *adapter)
736 {
737 u8 *wrb;
738 int status;
739
740 if (lancer_chip(adapter))
741 return 0;
742
743 if (mutex_lock_interruptible(&adapter->mbox_lock))
744 return -1;
745
746 wrb = (u8 *)wrb_from_mbox(adapter);
747 *wrb++ = 0xFF;
748 *wrb++ = 0xAA;
749 *wrb++ = 0xBB;
750 *wrb++ = 0xFF;
751 *wrb++ = 0xFF;
752 *wrb++ = 0xCC;
753 *wrb++ = 0xDD;
754 *wrb = 0xFF;
755
756 status = be_mbox_notify_wait(adapter);
757
758 mutex_unlock(&adapter->mbox_lock);
759 return status;
760 }
761
762 int be_cmd_eq_create(struct be_adapter *adapter,
763 struct be_queue_info *eq, int eq_delay)
764 {
765 struct be_mcc_wrb *wrb;
766 struct be_cmd_req_eq_create *req;
767 struct be_dma_mem *q_mem = &eq->dma_mem;
768 int status;
769
770 if (mutex_lock_interruptible(&adapter->mbox_lock))
771 return -1;
772
773 wrb = wrb_from_mbox(adapter);
774 req = embedded_payload(wrb);
775
776 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
777 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
778
779 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
780
781 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
782 /* 4byte eqe*/
783 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
784 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
785 __ilog2_u32(eq->len/256));
786 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
787 eq_delay_to_mult(eq_delay));
788 be_dws_cpu_to_le(req->context, sizeof(req->context));
789
790 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
791
792 status = be_mbox_notify_wait(adapter);
793 if (!status) {
794 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
795 eq->id = le16_to_cpu(resp->eq_id);
796 eq->created = true;
797 }
798
799 mutex_unlock(&adapter->mbox_lock);
800 return status;
801 }
802
803 /* Use MCC */
804 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
805 bool permanent, u32 if_handle, u32 pmac_id)
806 {
807 struct be_mcc_wrb *wrb;
808 struct be_cmd_req_mac_query *req;
809 int status;
810
811 spin_lock_bh(&adapter->mcc_lock);
812
813 wrb = wrb_from_mccq(adapter);
814 if (!wrb) {
815 status = -EBUSY;
816 goto err;
817 }
818 req = embedded_payload(wrb);
819
820 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
821 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
822 req->type = MAC_ADDRESS_TYPE_NETWORK;
823 if (permanent) {
824 req->permanent = 1;
825 } else {
826 req->if_id = cpu_to_le16((u16) if_handle);
827 req->pmac_id = cpu_to_le32(pmac_id);
828 req->permanent = 0;
829 }
830
831 status = be_mcc_notify_wait(adapter);
832 if (!status) {
833 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
834 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
835 }
836
837 err:
838 spin_unlock_bh(&adapter->mcc_lock);
839 return status;
840 }
841
842 /* Uses synchronous MCCQ */
843 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
844 u32 if_id, u32 *pmac_id, u32 domain)
845 {
846 struct be_mcc_wrb *wrb;
847 struct be_cmd_req_pmac_add *req;
848 int status;
849
850 spin_lock_bh(&adapter->mcc_lock);
851
852 wrb = wrb_from_mccq(adapter);
853 if (!wrb) {
854 status = -EBUSY;
855 goto err;
856 }
857 req = embedded_payload(wrb);
858
859 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
860 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
861
862 req->hdr.domain = domain;
863 req->if_id = cpu_to_le32(if_id);
864 memcpy(req->mac_address, mac_addr, ETH_ALEN);
865
866 status = be_mcc_notify_wait(adapter);
867 if (!status) {
868 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
869 *pmac_id = le32_to_cpu(resp->pmac_id);
870 }
871
872 err:
873 spin_unlock_bh(&adapter->mcc_lock);
874
875 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
876 status = -EPERM;
877
878 return status;
879 }
880
881 /* Uses synchronous MCCQ */
882 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
883 {
884 struct be_mcc_wrb *wrb;
885 struct be_cmd_req_pmac_del *req;
886 int status;
887
888 if (pmac_id == -1)
889 return 0;
890
891 spin_lock_bh(&adapter->mcc_lock);
892
893 wrb = wrb_from_mccq(adapter);
894 if (!wrb) {
895 status = -EBUSY;
896 goto err;
897 }
898 req = embedded_payload(wrb);
899
900 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
901 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
902
903 req->hdr.domain = dom;
904 req->if_id = cpu_to_le32(if_id);
905 req->pmac_id = cpu_to_le32(pmac_id);
906
907 status = be_mcc_notify_wait(adapter);
908
909 err:
910 spin_unlock_bh(&adapter->mcc_lock);
911 return status;
912 }
913
914 /* Uses Mbox */
915 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
916 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
917 {
918 struct be_mcc_wrb *wrb;
919 struct be_cmd_req_cq_create *req;
920 struct be_dma_mem *q_mem = &cq->dma_mem;
921 void *ctxt;
922 int status;
923
924 if (mutex_lock_interruptible(&adapter->mbox_lock))
925 return -1;
926
927 wrb = wrb_from_mbox(adapter);
928 req = embedded_payload(wrb);
929 ctxt = &req->context;
930
931 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
932 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
933
934 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
935 if (lancer_chip(adapter)) {
936 req->hdr.version = 2;
937 req->page_size = 1; /* 1 for 4K */
938 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
939 no_delay);
940 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
941 __ilog2_u32(cq->len/256));
942 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
943 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
944 ctxt, 1);
945 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
946 ctxt, eq->id);
947 } else {
948 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
949 coalesce_wm);
950 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
951 ctxt, no_delay);
952 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
953 __ilog2_u32(cq->len/256));
954 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
955 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
956 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
957 }
958
959 be_dws_cpu_to_le(ctxt, sizeof(req->context));
960
961 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
962
963 status = be_mbox_notify_wait(adapter);
964 if (!status) {
965 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
966 cq->id = le16_to_cpu(resp->cq_id);
967 cq->created = true;
968 }
969
970 mutex_unlock(&adapter->mbox_lock);
971
972 return status;
973 }
974
975 static u32 be_encoded_q_len(int q_len)
976 {
977 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
978 if (len_encoded == 16)
979 len_encoded = 0;
980 return len_encoded;
981 }
982
983 int be_cmd_mccq_ext_create(struct be_adapter *adapter,
984 struct be_queue_info *mccq,
985 struct be_queue_info *cq)
986 {
987 struct be_mcc_wrb *wrb;
988 struct be_cmd_req_mcc_ext_create *req;
989 struct be_dma_mem *q_mem = &mccq->dma_mem;
990 void *ctxt;
991 int status;
992
993 if (mutex_lock_interruptible(&adapter->mbox_lock))
994 return -1;
995
996 wrb = wrb_from_mbox(adapter);
997 req = embedded_payload(wrb);
998 ctxt = &req->context;
999
1000 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1001 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
1002
1003 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1004 if (lancer_chip(adapter)) {
1005 req->hdr.version = 1;
1006 req->cq_id = cpu_to_le16(cq->id);
1007
1008 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
1009 be_encoded_q_len(mccq->len));
1010 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
1011 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
1012 ctxt, cq->id);
1013 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
1014 ctxt, 1);
1015
1016 } else {
1017 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1018 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1019 be_encoded_q_len(mccq->len));
1020 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1021 }
1022
1023 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
1024 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
1025 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1026
1027 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1028
1029 status = be_mbox_notify_wait(adapter);
1030 if (!status) {
1031 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1032 mccq->id = le16_to_cpu(resp->id);
1033 mccq->created = true;
1034 }
1035 mutex_unlock(&adapter->mbox_lock);
1036
1037 return status;
1038 }
1039
1040 int be_cmd_mccq_org_create(struct be_adapter *adapter,
1041 struct be_queue_info *mccq,
1042 struct be_queue_info *cq)
1043 {
1044 struct be_mcc_wrb *wrb;
1045 struct be_cmd_req_mcc_create *req;
1046 struct be_dma_mem *q_mem = &mccq->dma_mem;
1047 void *ctxt;
1048 int status;
1049
1050 if (mutex_lock_interruptible(&adapter->mbox_lock))
1051 return -1;
1052
1053 wrb = wrb_from_mbox(adapter);
1054 req = embedded_payload(wrb);
1055 ctxt = &req->context;
1056
1057 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1058 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
1059
1060 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1061
1062 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1063 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1064 be_encoded_q_len(mccq->len));
1065 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1066
1067 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1068
1069 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1070
1071 status = be_mbox_notify_wait(adapter);
1072 if (!status) {
1073 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1074 mccq->id = le16_to_cpu(resp->id);
1075 mccq->created = true;
1076 }
1077
1078 mutex_unlock(&adapter->mbox_lock);
1079 return status;
1080 }
1081
1082 int be_cmd_mccq_create(struct be_adapter *adapter,
1083 struct be_queue_info *mccq,
1084 struct be_queue_info *cq)
1085 {
1086 int status;
1087
1088 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1089 if (status && !lancer_chip(adapter)) {
1090 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1091 "or newer to avoid conflicting priorities between NIC "
1092 "and FCoE traffic");
1093 status = be_cmd_mccq_org_create(adapter, mccq, cq);
1094 }
1095 return status;
1096 }
1097
1098 int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1099 {
1100 struct be_mcc_wrb *wrb;
1101 struct be_cmd_req_eth_tx_create *req;
1102 struct be_queue_info *txq = &txo->q;
1103 struct be_queue_info *cq = &txo->cq;
1104 struct be_dma_mem *q_mem = &txq->dma_mem;
1105 int status, ver = 0;
1106
1107 spin_lock_bh(&adapter->mcc_lock);
1108
1109 wrb = wrb_from_mccq(adapter);
1110 if (!wrb) {
1111 status = -EBUSY;
1112 goto err;
1113 }
1114
1115 req = embedded_payload(wrb);
1116
1117 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1118 OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
1119
1120 if (lancer_chip(adapter)) {
1121 req->hdr.version = 1;
1122 req->if_id = cpu_to_le16(adapter->if_handle);
1123 } else if (BEx_chip(adapter)) {
1124 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1125 req->hdr.version = 2;
1126 } else { /* For SH */
1127 req->hdr.version = 2;
1128 }
1129
1130 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1131 req->ulp_num = BE_ULP1_NUM;
1132 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1133 req->cq_id = cpu_to_le16(cq->id);
1134 req->queue_size = be_encoded_q_len(txq->len);
1135 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1136
1137 ver = req->hdr.version;
1138
1139 status = be_mcc_notify_wait(adapter);
1140 if (!status) {
1141 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
1142 txq->id = le16_to_cpu(resp->cid);
1143 if (ver == 2)
1144 txo->db_offset = le32_to_cpu(resp->db_offset);
1145 else
1146 txo->db_offset = DB_TXULP1_OFFSET;
1147 txq->created = true;
1148 }
1149
1150 err:
1151 spin_unlock_bh(&adapter->mcc_lock);
1152
1153 return status;
1154 }
1155
1156 /* Uses MCC */
1157 int be_cmd_rxq_create(struct be_adapter *adapter,
1158 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1159 u32 if_id, u32 rss, u8 *rss_id)
1160 {
1161 struct be_mcc_wrb *wrb;
1162 struct be_cmd_req_eth_rx_create *req;
1163 struct be_dma_mem *q_mem = &rxq->dma_mem;
1164 int status;
1165
1166 spin_lock_bh(&adapter->mcc_lock);
1167
1168 wrb = wrb_from_mccq(adapter);
1169 if (!wrb) {
1170 status = -EBUSY;
1171 goto err;
1172 }
1173 req = embedded_payload(wrb);
1174
1175 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1176 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1177
1178 req->cq_id = cpu_to_le16(cq_id);
1179 req->frag_size = fls(frag_size) - 1;
1180 req->num_pages = 2;
1181 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1182 req->interface_id = cpu_to_le32(if_id);
1183 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1184 req->rss_queue = cpu_to_le32(rss);
1185
1186 status = be_mcc_notify_wait(adapter);
1187 if (!status) {
1188 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1189 rxq->id = le16_to_cpu(resp->id);
1190 rxq->created = true;
1191 *rss_id = resp->rss_id;
1192 }
1193
1194 err:
1195 spin_unlock_bh(&adapter->mcc_lock);
1196 return status;
1197 }
1198
1199 /* Generic destroyer function for all types of queues
1200 * Uses Mbox
1201 */
1202 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1203 int queue_type)
1204 {
1205 struct be_mcc_wrb *wrb;
1206 struct be_cmd_req_q_destroy *req;
1207 u8 subsys = 0, opcode = 0;
1208 int status;
1209
1210 if (mutex_lock_interruptible(&adapter->mbox_lock))
1211 return -1;
1212
1213 wrb = wrb_from_mbox(adapter);
1214 req = embedded_payload(wrb);
1215
1216 switch (queue_type) {
1217 case QTYPE_EQ:
1218 subsys = CMD_SUBSYSTEM_COMMON;
1219 opcode = OPCODE_COMMON_EQ_DESTROY;
1220 break;
1221 case QTYPE_CQ:
1222 subsys = CMD_SUBSYSTEM_COMMON;
1223 opcode = OPCODE_COMMON_CQ_DESTROY;
1224 break;
1225 case QTYPE_TXQ:
1226 subsys = CMD_SUBSYSTEM_ETH;
1227 opcode = OPCODE_ETH_TX_DESTROY;
1228 break;
1229 case QTYPE_RXQ:
1230 subsys = CMD_SUBSYSTEM_ETH;
1231 opcode = OPCODE_ETH_RX_DESTROY;
1232 break;
1233 case QTYPE_MCCQ:
1234 subsys = CMD_SUBSYSTEM_COMMON;
1235 opcode = OPCODE_COMMON_MCC_DESTROY;
1236 break;
1237 default:
1238 BUG();
1239 }
1240
1241 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1242 NULL);
1243 req->id = cpu_to_le16(q->id);
1244
1245 status = be_mbox_notify_wait(adapter);
1246 q->created = false;
1247
1248 mutex_unlock(&adapter->mbox_lock);
1249 return status;
1250 }
1251
1252 /* Uses MCC */
1253 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1254 {
1255 struct be_mcc_wrb *wrb;
1256 struct be_cmd_req_q_destroy *req;
1257 int status;
1258
1259 spin_lock_bh(&adapter->mcc_lock);
1260
1261 wrb = wrb_from_mccq(adapter);
1262 if (!wrb) {
1263 status = -EBUSY;
1264 goto err;
1265 }
1266 req = embedded_payload(wrb);
1267
1268 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1269 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1270 req->id = cpu_to_le16(q->id);
1271
1272 status = be_mcc_notify_wait(adapter);
1273 q->created = false;
1274
1275 err:
1276 spin_unlock_bh(&adapter->mcc_lock);
1277 return status;
1278 }
1279
1280 /* Create an rx filtering policy configuration on an i/f
1281 * Uses MCCQ
1282 */
1283 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1284 u32 *if_handle, u32 domain)
1285 {
1286 struct be_mcc_wrb *wrb;
1287 struct be_cmd_req_if_create *req;
1288 int status;
1289
1290 spin_lock_bh(&adapter->mcc_lock);
1291
1292 wrb = wrb_from_mccq(adapter);
1293 if (!wrb) {
1294 status = -EBUSY;
1295 goto err;
1296 }
1297 req = embedded_payload(wrb);
1298
1299 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1300 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
1301 req->hdr.domain = domain;
1302 req->capability_flags = cpu_to_le32(cap_flags);
1303 req->enable_flags = cpu_to_le32(en_flags);
1304
1305 req->pmac_invalid = true;
1306
1307 status = be_mcc_notify_wait(adapter);
1308 if (!status) {
1309 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1310 *if_handle = le32_to_cpu(resp->interface_id);
1311 }
1312
1313 err:
1314 spin_unlock_bh(&adapter->mcc_lock);
1315 return status;
1316 }
1317
1318 /* Uses MCCQ */
1319 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1320 {
1321 struct be_mcc_wrb *wrb;
1322 struct be_cmd_req_if_destroy *req;
1323 int status;
1324
1325 if (interface_id == -1)
1326 return 0;
1327
1328 spin_lock_bh(&adapter->mcc_lock);
1329
1330 wrb = wrb_from_mccq(adapter);
1331 if (!wrb) {
1332 status = -EBUSY;
1333 goto err;
1334 }
1335 req = embedded_payload(wrb);
1336
1337 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1338 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
1339 req->hdr.domain = domain;
1340 req->interface_id = cpu_to_le32(interface_id);
1341
1342 status = be_mcc_notify_wait(adapter);
1343 err:
1344 spin_unlock_bh(&adapter->mcc_lock);
1345 return status;
1346 }
1347
1348 /* Get stats is a non embedded command: the request is not embedded inside
1349 * WRB but is a separate dma memory block
1350 * Uses asynchronous MCC
1351 */
1352 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1353 {
1354 struct be_mcc_wrb *wrb;
1355 struct be_cmd_req_hdr *hdr;
1356 int status = 0;
1357
1358 spin_lock_bh(&adapter->mcc_lock);
1359
1360 wrb = wrb_from_mccq(adapter);
1361 if (!wrb) {
1362 status = -EBUSY;
1363 goto err;
1364 }
1365 hdr = nonemb_cmd->va;
1366
1367 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1368 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
1369
1370 /* version 1 of the cmd is not supported only by BE2 */
1371 if (!BE2_chip(adapter))
1372 hdr->version = 1;
1373
1374 be_mcc_notify(adapter);
1375 adapter->stats_cmd_sent = true;
1376
1377 err:
1378 spin_unlock_bh(&adapter->mcc_lock);
1379 return status;
1380 }
1381
1382 /* Lancer Stats */
1383 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1384 struct be_dma_mem *nonemb_cmd)
1385 {
1386
1387 struct be_mcc_wrb *wrb;
1388 struct lancer_cmd_req_pport_stats *req;
1389 int status = 0;
1390
1391 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1392 CMD_SUBSYSTEM_ETH))
1393 return -EPERM;
1394
1395 spin_lock_bh(&adapter->mcc_lock);
1396
1397 wrb = wrb_from_mccq(adapter);
1398 if (!wrb) {
1399 status = -EBUSY;
1400 goto err;
1401 }
1402 req = nonemb_cmd->va;
1403
1404 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1405 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
1406 nonemb_cmd);
1407
1408 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1409 req->cmd_params.params.reset_stats = 0;
1410
1411 be_mcc_notify(adapter);
1412 adapter->stats_cmd_sent = true;
1413
1414 err:
1415 spin_unlock_bh(&adapter->mcc_lock);
1416 return status;
1417 }
1418
1419 static int be_mac_to_link_speed(int mac_speed)
1420 {
1421 switch (mac_speed) {
1422 case PHY_LINK_SPEED_ZERO:
1423 return 0;
1424 case PHY_LINK_SPEED_10MBPS:
1425 return 10;
1426 case PHY_LINK_SPEED_100MBPS:
1427 return 100;
1428 case PHY_LINK_SPEED_1GBPS:
1429 return 1000;
1430 case PHY_LINK_SPEED_10GBPS:
1431 return 10000;
1432 }
1433 return 0;
1434 }
1435
1436 /* Uses synchronous mcc
1437 * Returns link_speed in Mbps
1438 */
1439 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1440 u8 *link_status, u32 dom)
1441 {
1442 struct be_mcc_wrb *wrb;
1443 struct be_cmd_req_link_status *req;
1444 int status;
1445
1446 spin_lock_bh(&adapter->mcc_lock);
1447
1448 if (link_status)
1449 *link_status = LINK_DOWN;
1450
1451 wrb = wrb_from_mccq(adapter);
1452 if (!wrb) {
1453 status = -EBUSY;
1454 goto err;
1455 }
1456 req = embedded_payload(wrb);
1457
1458 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1459 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1460
1461 /* version 1 of the cmd is not supported only by BE2 */
1462 if (!BE2_chip(adapter))
1463 req->hdr.version = 1;
1464
1465 req->hdr.domain = dom;
1466
1467 status = be_mcc_notify_wait(adapter);
1468 if (!status) {
1469 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1470 if (link_speed) {
1471 *link_speed = resp->link_speed ?
1472 le16_to_cpu(resp->link_speed) * 10 :
1473 be_mac_to_link_speed(resp->mac_speed);
1474
1475 if (!resp->logical_link_status)
1476 *link_speed = 0;
1477 }
1478 if (link_status)
1479 *link_status = resp->logical_link_status;
1480 }
1481
1482 err:
1483 spin_unlock_bh(&adapter->mcc_lock);
1484 return status;
1485 }
1486
1487 /* Uses synchronous mcc */
1488 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1489 {
1490 struct be_mcc_wrb *wrb;
1491 struct be_cmd_req_get_cntl_addnl_attribs *req;
1492 int status;
1493
1494 spin_lock_bh(&adapter->mcc_lock);
1495
1496 wrb = wrb_from_mccq(adapter);
1497 if (!wrb) {
1498 status = -EBUSY;
1499 goto err;
1500 }
1501 req = embedded_payload(wrb);
1502
1503 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1504 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
1505 wrb, NULL);
1506
1507 be_mcc_notify(adapter);
1508
1509 err:
1510 spin_unlock_bh(&adapter->mcc_lock);
1511 return status;
1512 }
1513
1514 /* Uses synchronous mcc */
1515 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1516 {
1517 struct be_mcc_wrb *wrb;
1518 struct be_cmd_req_get_fat *req;
1519 int status;
1520
1521 spin_lock_bh(&adapter->mcc_lock);
1522
1523 wrb = wrb_from_mccq(adapter);
1524 if (!wrb) {
1525 status = -EBUSY;
1526 goto err;
1527 }
1528 req = embedded_payload(wrb);
1529
1530 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1531 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
1532 req->fat_operation = cpu_to_le32(QUERY_FAT);
1533 status = be_mcc_notify_wait(adapter);
1534 if (!status) {
1535 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1536 if (log_size && resp->log_size)
1537 *log_size = le32_to_cpu(resp->log_size) -
1538 sizeof(u32);
1539 }
1540 err:
1541 spin_unlock_bh(&adapter->mcc_lock);
1542 return status;
1543 }
1544
1545 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1546 {
1547 struct be_dma_mem get_fat_cmd;
1548 struct be_mcc_wrb *wrb;
1549 struct be_cmd_req_get_fat *req;
1550 u32 offset = 0, total_size, buf_size,
1551 log_offset = sizeof(u32), payload_len;
1552 int status;
1553
1554 if (buf_len == 0)
1555 return;
1556
1557 total_size = buf_len;
1558
1559 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1560 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1561 get_fat_cmd.size,
1562 &get_fat_cmd.dma);
1563 if (!get_fat_cmd.va) {
1564 status = -ENOMEM;
1565 dev_err(&adapter->pdev->dev,
1566 "Memory allocation failure while retrieving FAT data\n");
1567 return;
1568 }
1569
1570 spin_lock_bh(&adapter->mcc_lock);
1571
1572 while (total_size) {
1573 buf_size = min(total_size, (u32)60*1024);
1574 total_size -= buf_size;
1575
1576 wrb = wrb_from_mccq(adapter);
1577 if (!wrb) {
1578 status = -EBUSY;
1579 goto err;
1580 }
1581 req = get_fat_cmd.va;
1582
1583 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1584 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1585 OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
1586 &get_fat_cmd);
1587
1588 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1589 req->read_log_offset = cpu_to_le32(log_offset);
1590 req->read_log_length = cpu_to_le32(buf_size);
1591 req->data_buffer_size = cpu_to_le32(buf_size);
1592
1593 status = be_mcc_notify_wait(adapter);
1594 if (!status) {
1595 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1596 memcpy(buf + offset,
1597 resp->data_buffer,
1598 le32_to_cpu(resp->read_log_length));
1599 } else {
1600 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1601 goto err;
1602 }
1603 offset += buf_size;
1604 log_offset += buf_size;
1605 }
1606 err:
1607 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1608 get_fat_cmd.va,
1609 get_fat_cmd.dma);
1610 spin_unlock_bh(&adapter->mcc_lock);
1611 }
1612
1613 /* Uses synchronous mcc */
1614 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1615 char *fw_on_flash)
1616 {
1617 struct be_mcc_wrb *wrb;
1618 struct be_cmd_req_get_fw_version *req;
1619 int status;
1620
1621 spin_lock_bh(&adapter->mcc_lock);
1622
1623 wrb = wrb_from_mccq(adapter);
1624 if (!wrb) {
1625 status = -EBUSY;
1626 goto err;
1627 }
1628
1629 req = embedded_payload(wrb);
1630
1631 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1632 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
1633 status = be_mcc_notify_wait(adapter);
1634 if (!status) {
1635 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1636 strcpy(fw_ver, resp->firmware_version_string);
1637 if (fw_on_flash)
1638 strcpy(fw_on_flash, resp->fw_on_flash_version_string);
1639 }
1640 err:
1641 spin_unlock_bh(&adapter->mcc_lock);
1642 return status;
1643 }
1644
1645 /* set the EQ delay interval of an EQ to specified value
1646 * Uses async mcc
1647 */
1648 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1649 {
1650 struct be_mcc_wrb *wrb;
1651 struct be_cmd_req_modify_eq_delay *req;
1652 int status = 0;
1653
1654 spin_lock_bh(&adapter->mcc_lock);
1655
1656 wrb = wrb_from_mccq(adapter);
1657 if (!wrb) {
1658 status = -EBUSY;
1659 goto err;
1660 }
1661 req = embedded_payload(wrb);
1662
1663 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1664 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
1665
1666 req->num_eq = cpu_to_le32(1);
1667 req->delay[0].eq_id = cpu_to_le32(eq_id);
1668 req->delay[0].phase = 0;
1669 req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1670
1671 be_mcc_notify(adapter);
1672
1673 err:
1674 spin_unlock_bh(&adapter->mcc_lock);
1675 return status;
1676 }
1677
1678 /* Uses sycnhronous mcc */
1679 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1680 u32 num, bool untagged, bool promiscuous)
1681 {
1682 struct be_mcc_wrb *wrb;
1683 struct be_cmd_req_vlan_config *req;
1684 int status;
1685
1686 spin_lock_bh(&adapter->mcc_lock);
1687
1688 wrb = wrb_from_mccq(adapter);
1689 if (!wrb) {
1690 status = -EBUSY;
1691 goto err;
1692 }
1693 req = embedded_payload(wrb);
1694
1695 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1696 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
1697
1698 req->interface_id = if_id;
1699 req->promiscuous = promiscuous;
1700 req->untagged = untagged;
1701 req->num_vlan = num;
1702 if (!promiscuous) {
1703 memcpy(req->normal_vlan, vtag_array,
1704 req->num_vlan * sizeof(vtag_array[0]));
1705 }
1706
1707 status = be_mcc_notify_wait(adapter);
1708
1709 err:
1710 spin_unlock_bh(&adapter->mcc_lock);
1711 return status;
1712 }
1713
1714 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1715 {
1716 struct be_mcc_wrb *wrb;
1717 struct be_dma_mem *mem = &adapter->rx_filter;
1718 struct be_cmd_req_rx_filter *req = mem->va;
1719 int status;
1720
1721 spin_lock_bh(&adapter->mcc_lock);
1722
1723 wrb = wrb_from_mccq(adapter);
1724 if (!wrb) {
1725 status = -EBUSY;
1726 goto err;
1727 }
1728 memset(req, 0, sizeof(*req));
1729 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1730 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1731 wrb, mem);
1732
1733 req->if_id = cpu_to_le32(adapter->if_handle);
1734 if (flags & IFF_PROMISC) {
1735 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1736 BE_IF_FLAGS_VLAN_PROMISCUOUS);
1737 if (value == ON)
1738 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1739 BE_IF_FLAGS_VLAN_PROMISCUOUS);
1740 } else if (flags & IFF_ALLMULTI) {
1741 req->if_flags_mask = req->if_flags =
1742 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1743 } else {
1744 struct netdev_hw_addr *ha;
1745 int i = 0;
1746
1747 req->if_flags_mask = req->if_flags =
1748 cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1749
1750 /* Reset mcast promisc mode if already set by setting mask
1751 * and not setting flags field
1752 */
1753 req->if_flags_mask |=
1754 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1755 adapter->if_cap_flags);
1756
1757 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1758 netdev_for_each_mc_addr(ha, adapter->netdev)
1759 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1760 }
1761
1762 status = be_mcc_notify_wait(adapter);
1763 err:
1764 spin_unlock_bh(&adapter->mcc_lock);
1765 return status;
1766 }
1767
1768 /* Uses synchrounous mcc */
1769 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1770 {
1771 struct be_mcc_wrb *wrb;
1772 struct be_cmd_req_set_flow_control *req;
1773 int status;
1774
1775 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1776 CMD_SUBSYSTEM_COMMON))
1777 return -EPERM;
1778
1779 spin_lock_bh(&adapter->mcc_lock);
1780
1781 wrb = wrb_from_mccq(adapter);
1782 if (!wrb) {
1783 status = -EBUSY;
1784 goto err;
1785 }
1786 req = embedded_payload(wrb);
1787
1788 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1789 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1790
1791 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1792 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1793
1794 status = be_mcc_notify_wait(adapter);
1795
1796 err:
1797 spin_unlock_bh(&adapter->mcc_lock);
1798 return status;
1799 }
1800
1801 /* Uses sycn mcc */
1802 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1803 {
1804 struct be_mcc_wrb *wrb;
1805 struct be_cmd_req_get_flow_control *req;
1806 int status;
1807
1808 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
1809 CMD_SUBSYSTEM_COMMON))
1810 return -EPERM;
1811
1812 spin_lock_bh(&adapter->mcc_lock);
1813
1814 wrb = wrb_from_mccq(adapter);
1815 if (!wrb) {
1816 status = -EBUSY;
1817 goto err;
1818 }
1819 req = embedded_payload(wrb);
1820
1821 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1822 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1823
1824 status = be_mcc_notify_wait(adapter);
1825 if (!status) {
1826 struct be_cmd_resp_get_flow_control *resp =
1827 embedded_payload(wrb);
1828 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1829 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1830 }
1831
1832 err:
1833 spin_unlock_bh(&adapter->mcc_lock);
1834 return status;
1835 }
1836
1837 /* Uses mbox */
1838 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1839 u32 *mode, u32 *caps)
1840 {
1841 struct be_mcc_wrb *wrb;
1842 struct be_cmd_req_query_fw_cfg *req;
1843 int status;
1844
1845 if (mutex_lock_interruptible(&adapter->mbox_lock))
1846 return -1;
1847
1848 wrb = wrb_from_mbox(adapter);
1849 req = embedded_payload(wrb);
1850
1851 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1852 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
1853
1854 status = be_mbox_notify_wait(adapter);
1855 if (!status) {
1856 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1857 *port_num = le32_to_cpu(resp->phys_port);
1858 *mode = le32_to_cpu(resp->function_mode);
1859 *caps = le32_to_cpu(resp->function_caps);
1860 }
1861
1862 mutex_unlock(&adapter->mbox_lock);
1863 return status;
1864 }
1865
1866 /* Uses mbox */
1867 int be_cmd_reset_function(struct be_adapter *adapter)
1868 {
1869 struct be_mcc_wrb *wrb;
1870 struct be_cmd_req_hdr *req;
1871 int status;
1872
1873 if (lancer_chip(adapter)) {
1874 status = lancer_wait_ready(adapter);
1875 if (!status) {
1876 iowrite32(SLI_PORT_CONTROL_IP_MASK,
1877 adapter->db + SLIPORT_CONTROL_OFFSET);
1878 status = lancer_test_and_set_rdy_state(adapter);
1879 }
1880 if (status) {
1881 dev_err(&adapter->pdev->dev,
1882 "Adapter in non recoverable error\n");
1883 }
1884 return status;
1885 }
1886
1887 if (mutex_lock_interruptible(&adapter->mbox_lock))
1888 return -1;
1889
1890 wrb = wrb_from_mbox(adapter);
1891 req = embedded_payload(wrb);
1892
1893 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1894 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
1895
1896 status = be_mbox_notify_wait(adapter);
1897
1898 mutex_unlock(&adapter->mbox_lock);
1899 return status;
1900 }
1901
1902 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1903 {
1904 struct be_mcc_wrb *wrb;
1905 struct be_cmd_req_rss_config *req;
1906 u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
1907 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
1908 0x3ea83c02, 0x4a110304};
1909 int status;
1910
1911 if (mutex_lock_interruptible(&adapter->mbox_lock))
1912 return -1;
1913
1914 wrb = wrb_from_mbox(adapter);
1915 req = embedded_payload(wrb);
1916
1917 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1918 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
1919
1920 req->if_id = cpu_to_le32(adapter->if_handle);
1921 req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
1922 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6);
1923
1924 if (lancer_chip(adapter) || skyhawk_chip(adapter)) {
1925 req->hdr.version = 1;
1926 req->enable_rss |= cpu_to_le16(RSS_ENABLE_UDP_IPV4 |
1927 RSS_ENABLE_UDP_IPV6);
1928 }
1929
1930 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1931 memcpy(req->cpu_table, rsstable, table_size);
1932 memcpy(req->hash, myhash, sizeof(myhash));
1933 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1934
1935 status = be_mbox_notify_wait(adapter);
1936
1937 mutex_unlock(&adapter->mbox_lock);
1938 return status;
1939 }
1940
1941 /* Uses sync mcc */
1942 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1943 u8 bcn, u8 sts, u8 state)
1944 {
1945 struct be_mcc_wrb *wrb;
1946 struct be_cmd_req_enable_disable_beacon *req;
1947 int status;
1948
1949 spin_lock_bh(&adapter->mcc_lock);
1950
1951 wrb = wrb_from_mccq(adapter);
1952 if (!wrb) {
1953 status = -EBUSY;
1954 goto err;
1955 }
1956 req = embedded_payload(wrb);
1957
1958 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1959 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
1960
1961 req->port_num = port_num;
1962 req->beacon_state = state;
1963 req->beacon_duration = bcn;
1964 req->status_duration = sts;
1965
1966 status = be_mcc_notify_wait(adapter);
1967
1968 err:
1969 spin_unlock_bh(&adapter->mcc_lock);
1970 return status;
1971 }
1972
1973 /* Uses sync mcc */
1974 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1975 {
1976 struct be_mcc_wrb *wrb;
1977 struct be_cmd_req_get_beacon_state *req;
1978 int status;
1979
1980 spin_lock_bh(&adapter->mcc_lock);
1981
1982 wrb = wrb_from_mccq(adapter);
1983 if (!wrb) {
1984 status = -EBUSY;
1985 goto err;
1986 }
1987 req = embedded_payload(wrb);
1988
1989 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1990 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
1991
1992 req->port_num = port_num;
1993
1994 status = be_mcc_notify_wait(adapter);
1995 if (!status) {
1996 struct be_cmd_resp_get_beacon_state *resp =
1997 embedded_payload(wrb);
1998 *state = resp->beacon_state;
1999 }
2000
2001 err:
2002 spin_unlock_bh(&adapter->mcc_lock);
2003 return status;
2004 }
2005
2006 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2007 u32 data_size, u32 data_offset,
2008 const char *obj_name, u32 *data_written,
2009 u8 *change_status, u8 *addn_status)
2010 {
2011 struct be_mcc_wrb *wrb;
2012 struct lancer_cmd_req_write_object *req;
2013 struct lancer_cmd_resp_write_object *resp;
2014 void *ctxt = NULL;
2015 int status;
2016
2017 spin_lock_bh(&adapter->mcc_lock);
2018 adapter->flash_status = 0;
2019
2020 wrb = wrb_from_mccq(adapter);
2021 if (!wrb) {
2022 status = -EBUSY;
2023 goto err_unlock;
2024 }
2025
2026 req = embedded_payload(wrb);
2027
2028 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2029 OPCODE_COMMON_WRITE_OBJECT,
2030 sizeof(struct lancer_cmd_req_write_object), wrb,
2031 NULL);
2032
2033 ctxt = &req->context;
2034 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2035 write_length, ctxt, data_size);
2036
2037 if (data_size == 0)
2038 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2039 eof, ctxt, 1);
2040 else
2041 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2042 eof, ctxt, 0);
2043
2044 be_dws_cpu_to_le(ctxt, sizeof(req->context));
2045 req->write_offset = cpu_to_le32(data_offset);
2046 strcpy(req->object_name, obj_name);
2047 req->descriptor_count = cpu_to_le32(1);
2048 req->buf_len = cpu_to_le32(data_size);
2049 req->addr_low = cpu_to_le32((cmd->dma +
2050 sizeof(struct lancer_cmd_req_write_object))
2051 & 0xFFFFFFFF);
2052 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2053 sizeof(struct lancer_cmd_req_write_object)));
2054
2055 be_mcc_notify(adapter);
2056 spin_unlock_bh(&adapter->mcc_lock);
2057
2058 if (!wait_for_completion_timeout(&adapter->flash_compl,
2059 msecs_to_jiffies(30000)))
2060 status = -1;
2061 else
2062 status = adapter->flash_status;
2063
2064 resp = embedded_payload(wrb);
2065 if (!status) {
2066 *data_written = le32_to_cpu(resp->actual_write_len);
2067 *change_status = resp->change_status;
2068 } else {
2069 *addn_status = resp->additional_status;
2070 }
2071
2072 return status;
2073
2074 err_unlock:
2075 spin_unlock_bh(&adapter->mcc_lock);
2076 return status;
2077 }
2078
2079 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2080 u32 data_size, u32 data_offset, const char *obj_name,
2081 u32 *data_read, u32 *eof, u8 *addn_status)
2082 {
2083 struct be_mcc_wrb *wrb;
2084 struct lancer_cmd_req_read_object *req;
2085 struct lancer_cmd_resp_read_object *resp;
2086 int status;
2087
2088 spin_lock_bh(&adapter->mcc_lock);
2089
2090 wrb = wrb_from_mccq(adapter);
2091 if (!wrb) {
2092 status = -EBUSY;
2093 goto err_unlock;
2094 }
2095
2096 req = embedded_payload(wrb);
2097
2098 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2099 OPCODE_COMMON_READ_OBJECT,
2100 sizeof(struct lancer_cmd_req_read_object), wrb,
2101 NULL);
2102
2103 req->desired_read_len = cpu_to_le32(data_size);
2104 req->read_offset = cpu_to_le32(data_offset);
2105 strcpy(req->object_name, obj_name);
2106 req->descriptor_count = cpu_to_le32(1);
2107 req->buf_len = cpu_to_le32(data_size);
2108 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2109 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2110
2111 status = be_mcc_notify_wait(adapter);
2112
2113 resp = embedded_payload(wrb);
2114 if (!status) {
2115 *data_read = le32_to_cpu(resp->actual_read_len);
2116 *eof = le32_to_cpu(resp->eof);
2117 } else {
2118 *addn_status = resp->additional_status;
2119 }
2120
2121 err_unlock:
2122 spin_unlock_bh(&adapter->mcc_lock);
2123 return status;
2124 }
2125
2126 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2127 u32 flash_type, u32 flash_opcode, u32 buf_size)
2128 {
2129 struct be_mcc_wrb *wrb;
2130 struct be_cmd_write_flashrom *req;
2131 int status;
2132
2133 spin_lock_bh(&adapter->mcc_lock);
2134 adapter->flash_status = 0;
2135
2136 wrb = wrb_from_mccq(adapter);
2137 if (!wrb) {
2138 status = -EBUSY;
2139 goto err_unlock;
2140 }
2141 req = cmd->va;
2142
2143 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2144 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
2145
2146 req->params.op_type = cpu_to_le32(flash_type);
2147 req->params.op_code = cpu_to_le32(flash_opcode);
2148 req->params.data_buf_size = cpu_to_le32(buf_size);
2149
2150 be_mcc_notify(adapter);
2151 spin_unlock_bh(&adapter->mcc_lock);
2152
2153 if (!wait_for_completion_timeout(&adapter->flash_compl,
2154 msecs_to_jiffies(40000)))
2155 status = -1;
2156 else
2157 status = adapter->flash_status;
2158
2159 return status;
2160
2161 err_unlock:
2162 spin_unlock_bh(&adapter->mcc_lock);
2163 return status;
2164 }
2165
2166 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2167 int offset)
2168 {
2169 struct be_mcc_wrb *wrb;
2170 struct be_cmd_read_flash_crc *req;
2171 int status;
2172
2173 spin_lock_bh(&adapter->mcc_lock);
2174
2175 wrb = wrb_from_mccq(adapter);
2176 if (!wrb) {
2177 status = -EBUSY;
2178 goto err;
2179 }
2180 req = embedded_payload(wrb);
2181
2182 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2183 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2184 wrb, NULL);
2185
2186 req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
2187 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2188 req->params.offset = cpu_to_le32(offset);
2189 req->params.data_buf_size = cpu_to_le32(0x4);
2190
2191 status = be_mcc_notify_wait(adapter);
2192 if (!status)
2193 memcpy(flashed_crc, req->crc, 4);
2194
2195 err:
2196 spin_unlock_bh(&adapter->mcc_lock);
2197 return status;
2198 }
2199
2200 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2201 struct be_dma_mem *nonemb_cmd)
2202 {
2203 struct be_mcc_wrb *wrb;
2204 struct be_cmd_req_acpi_wol_magic_config *req;
2205 int status;
2206
2207 spin_lock_bh(&adapter->mcc_lock);
2208
2209 wrb = wrb_from_mccq(adapter);
2210 if (!wrb) {
2211 status = -EBUSY;
2212 goto err;
2213 }
2214 req = nonemb_cmd->va;
2215
2216 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2217 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
2218 nonemb_cmd);
2219 memcpy(req->magic_mac, mac, ETH_ALEN);
2220
2221 status = be_mcc_notify_wait(adapter);
2222
2223 err:
2224 spin_unlock_bh(&adapter->mcc_lock);
2225 return status;
2226 }
2227
2228 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2229 u8 loopback_type, u8 enable)
2230 {
2231 struct be_mcc_wrb *wrb;
2232 struct be_cmd_req_set_lmode *req;
2233 int status;
2234
2235 spin_lock_bh(&adapter->mcc_lock);
2236
2237 wrb = wrb_from_mccq(adapter);
2238 if (!wrb) {
2239 status = -EBUSY;
2240 goto err;
2241 }
2242
2243 req = embedded_payload(wrb);
2244
2245 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2246 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
2247 NULL);
2248
2249 req->src_port = port_num;
2250 req->dest_port = port_num;
2251 req->loopback_type = loopback_type;
2252 req->loopback_state = enable;
2253
2254 status = be_mcc_notify_wait(adapter);
2255 err:
2256 spin_unlock_bh(&adapter->mcc_lock);
2257 return status;
2258 }
2259
2260 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2261 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
2262 {
2263 struct be_mcc_wrb *wrb;
2264 struct be_cmd_req_loopback_test *req;
2265 int status;
2266
2267 spin_lock_bh(&adapter->mcc_lock);
2268
2269 wrb = wrb_from_mccq(adapter);
2270 if (!wrb) {
2271 status = -EBUSY;
2272 goto err;
2273 }
2274
2275 req = embedded_payload(wrb);
2276
2277 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2278 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
2279 req->hdr.timeout = cpu_to_le32(4);
2280
2281 req->pattern = cpu_to_le64(pattern);
2282 req->src_port = cpu_to_le32(port_num);
2283 req->dest_port = cpu_to_le32(port_num);
2284 req->pkt_size = cpu_to_le32(pkt_size);
2285 req->num_pkts = cpu_to_le32(num_pkts);
2286 req->loopback_type = cpu_to_le32(loopback_type);
2287
2288 status = be_mcc_notify_wait(adapter);
2289 if (!status) {
2290 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
2291 status = le32_to_cpu(resp->status);
2292 }
2293
2294 err:
2295 spin_unlock_bh(&adapter->mcc_lock);
2296 return status;
2297 }
2298
2299 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2300 u32 byte_cnt, struct be_dma_mem *cmd)
2301 {
2302 struct be_mcc_wrb *wrb;
2303 struct be_cmd_req_ddrdma_test *req;
2304 int status;
2305 int i, j = 0;
2306
2307 spin_lock_bh(&adapter->mcc_lock);
2308
2309 wrb = wrb_from_mccq(adapter);
2310 if (!wrb) {
2311 status = -EBUSY;
2312 goto err;
2313 }
2314 req = cmd->va;
2315 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2316 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
2317
2318 req->pattern = cpu_to_le64(pattern);
2319 req->byte_count = cpu_to_le32(byte_cnt);
2320 for (i = 0; i < byte_cnt; i++) {
2321 req->snd_buff[i] = (u8)(pattern >> (j*8));
2322 j++;
2323 if (j > 7)
2324 j = 0;
2325 }
2326
2327 status = be_mcc_notify_wait(adapter);
2328
2329 if (!status) {
2330 struct be_cmd_resp_ddrdma_test *resp;
2331 resp = cmd->va;
2332 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2333 resp->snd_err) {
2334 status = -1;
2335 }
2336 }
2337
2338 err:
2339 spin_unlock_bh(&adapter->mcc_lock);
2340 return status;
2341 }
2342
2343 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2344 struct be_dma_mem *nonemb_cmd)
2345 {
2346 struct be_mcc_wrb *wrb;
2347 struct be_cmd_req_seeprom_read *req;
2348 int status;
2349
2350 spin_lock_bh(&adapter->mcc_lock);
2351
2352 wrb = wrb_from_mccq(adapter);
2353 if (!wrb) {
2354 status = -EBUSY;
2355 goto err;
2356 }
2357 req = nonemb_cmd->va;
2358
2359 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2360 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2361 nonemb_cmd);
2362
2363 status = be_mcc_notify_wait(adapter);
2364
2365 err:
2366 spin_unlock_bh(&adapter->mcc_lock);
2367 return status;
2368 }
2369
2370 int be_cmd_get_phy_info(struct be_adapter *adapter)
2371 {
2372 struct be_mcc_wrb *wrb;
2373 struct be_cmd_req_get_phy_info *req;
2374 struct be_dma_mem cmd;
2375 int status;
2376
2377 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2378 CMD_SUBSYSTEM_COMMON))
2379 return -EPERM;
2380
2381 spin_lock_bh(&adapter->mcc_lock);
2382
2383 wrb = wrb_from_mccq(adapter);
2384 if (!wrb) {
2385 status = -EBUSY;
2386 goto err;
2387 }
2388 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2389 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2390 &cmd.dma);
2391 if (!cmd.va) {
2392 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2393 status = -ENOMEM;
2394 goto err;
2395 }
2396
2397 req = cmd.va;
2398
2399 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2400 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2401 wrb, &cmd);
2402
2403 status = be_mcc_notify_wait(adapter);
2404 if (!status) {
2405 struct be_phy_info *resp_phy_info =
2406 cmd.va + sizeof(struct be_cmd_req_hdr);
2407 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2408 adapter->phy.interface_type =
2409 le16_to_cpu(resp_phy_info->interface_type);
2410 adapter->phy.auto_speeds_supported =
2411 le16_to_cpu(resp_phy_info->auto_speeds_supported);
2412 adapter->phy.fixed_speeds_supported =
2413 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2414 adapter->phy.misc_params =
2415 le32_to_cpu(resp_phy_info->misc_params);
2416 }
2417 pci_free_consistent(adapter->pdev, cmd.size,
2418 cmd.va, cmd.dma);
2419 err:
2420 spin_unlock_bh(&adapter->mcc_lock);
2421 return status;
2422 }
2423
2424 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2425 {
2426 struct be_mcc_wrb *wrb;
2427 struct be_cmd_req_set_qos *req;
2428 int status;
2429
2430 spin_lock_bh(&adapter->mcc_lock);
2431
2432 wrb = wrb_from_mccq(adapter);
2433 if (!wrb) {
2434 status = -EBUSY;
2435 goto err;
2436 }
2437
2438 req = embedded_payload(wrb);
2439
2440 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2441 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2442
2443 req->hdr.domain = domain;
2444 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2445 req->max_bps_nic = cpu_to_le32(bps);
2446
2447 status = be_mcc_notify_wait(adapter);
2448
2449 err:
2450 spin_unlock_bh(&adapter->mcc_lock);
2451 return status;
2452 }
2453
2454 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2455 {
2456 struct be_mcc_wrb *wrb;
2457 struct be_cmd_req_cntl_attribs *req;
2458 struct be_cmd_resp_cntl_attribs *resp;
2459 int status;
2460 int payload_len = max(sizeof(*req), sizeof(*resp));
2461 struct mgmt_controller_attrib *attribs;
2462 struct be_dma_mem attribs_cmd;
2463
2464 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2465 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2466 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2467 &attribs_cmd.dma);
2468 if (!attribs_cmd.va) {
2469 dev_err(&adapter->pdev->dev,
2470 "Memory allocation failure\n");
2471 return -ENOMEM;
2472 }
2473
2474 if (mutex_lock_interruptible(&adapter->mbox_lock))
2475 return -1;
2476
2477 wrb = wrb_from_mbox(adapter);
2478 if (!wrb) {
2479 status = -EBUSY;
2480 goto err;
2481 }
2482 req = attribs_cmd.va;
2483
2484 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2485 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
2486 &attribs_cmd);
2487
2488 status = be_mbox_notify_wait(adapter);
2489 if (!status) {
2490 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2491 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2492 }
2493
2494 err:
2495 mutex_unlock(&adapter->mbox_lock);
2496 pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2497 attribs_cmd.dma);
2498 return status;
2499 }
2500
2501 /* Uses mbox */
2502 int be_cmd_req_native_mode(struct be_adapter *adapter)
2503 {
2504 struct be_mcc_wrb *wrb;
2505 struct be_cmd_req_set_func_cap *req;
2506 int status;
2507
2508 if (mutex_lock_interruptible(&adapter->mbox_lock))
2509 return -1;
2510
2511 wrb = wrb_from_mbox(adapter);
2512 if (!wrb) {
2513 status = -EBUSY;
2514 goto err;
2515 }
2516
2517 req = embedded_payload(wrb);
2518
2519 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2520 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
2521
2522 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2523 CAPABILITY_BE3_NATIVE_ERX_API);
2524 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2525
2526 status = be_mbox_notify_wait(adapter);
2527 if (!status) {
2528 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2529 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2530 CAPABILITY_BE3_NATIVE_ERX_API;
2531 if (!adapter->be3_native)
2532 dev_warn(&adapter->pdev->dev,
2533 "adapter not in advanced mode\n");
2534 }
2535 err:
2536 mutex_unlock(&adapter->mbox_lock);
2537 return status;
2538 }
2539
2540 /* Get privilege(s) for a function */
2541 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2542 u32 domain)
2543 {
2544 struct be_mcc_wrb *wrb;
2545 struct be_cmd_req_get_fn_privileges *req;
2546 int status;
2547
2548 spin_lock_bh(&adapter->mcc_lock);
2549
2550 wrb = wrb_from_mccq(adapter);
2551 if (!wrb) {
2552 status = -EBUSY;
2553 goto err;
2554 }
2555
2556 req = embedded_payload(wrb);
2557
2558 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2559 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2560 wrb, NULL);
2561
2562 req->hdr.domain = domain;
2563
2564 status = be_mcc_notify_wait(adapter);
2565 if (!status) {
2566 struct be_cmd_resp_get_fn_privileges *resp =
2567 embedded_payload(wrb);
2568 *privilege = le32_to_cpu(resp->privilege_mask);
2569 }
2570
2571 err:
2572 spin_unlock_bh(&adapter->mcc_lock);
2573 return status;
2574 }
2575
2576 /* Uses synchronous MCCQ */
2577 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2578 bool *pmac_id_active, u32 *pmac_id, u8 domain)
2579 {
2580 struct be_mcc_wrb *wrb;
2581 struct be_cmd_req_get_mac_list *req;
2582 int status;
2583 int mac_count;
2584 struct be_dma_mem get_mac_list_cmd;
2585 int i;
2586
2587 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2588 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2589 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2590 get_mac_list_cmd.size,
2591 &get_mac_list_cmd.dma);
2592
2593 if (!get_mac_list_cmd.va) {
2594 dev_err(&adapter->pdev->dev,
2595 "Memory allocation failure during GET_MAC_LIST\n");
2596 return -ENOMEM;
2597 }
2598
2599 spin_lock_bh(&adapter->mcc_lock);
2600
2601 wrb = wrb_from_mccq(adapter);
2602 if (!wrb) {
2603 status = -EBUSY;
2604 goto out;
2605 }
2606
2607 req = get_mac_list_cmd.va;
2608
2609 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2610 OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
2611 wrb, &get_mac_list_cmd);
2612
2613 req->hdr.domain = domain;
2614 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2615 req->perm_override = 1;
2616
2617 status = be_mcc_notify_wait(adapter);
2618 if (!status) {
2619 struct be_cmd_resp_get_mac_list *resp =
2620 get_mac_list_cmd.va;
2621 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2622 /* Mac list returned could contain one or more active mac_ids
2623 * or one or more true or pseudo permanant mac addresses.
2624 * If an active mac_id is present, return first active mac_id
2625 * found.
2626 */
2627 for (i = 0; i < mac_count; i++) {
2628 struct get_list_macaddr *mac_entry;
2629 u16 mac_addr_size;
2630 u32 mac_id;
2631
2632 mac_entry = &resp->macaddr_list[i];
2633 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
2634 /* mac_id is a 32 bit value and mac_addr size
2635 * is 6 bytes
2636 */
2637 if (mac_addr_size == sizeof(u32)) {
2638 *pmac_id_active = true;
2639 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2640 *pmac_id = le32_to_cpu(mac_id);
2641 goto out;
2642 }
2643 }
2644 /* If no active mac_id found, return first mac addr */
2645 *pmac_id_active = false;
2646 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2647 ETH_ALEN);
2648 }
2649
2650 out:
2651 spin_unlock_bh(&adapter->mcc_lock);
2652 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
2653 get_mac_list_cmd.va, get_mac_list_cmd.dma);
2654 return status;
2655 }
2656
2657 /* Uses synchronous MCCQ */
2658 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2659 u8 mac_count, u32 domain)
2660 {
2661 struct be_mcc_wrb *wrb;
2662 struct be_cmd_req_set_mac_list *req;
2663 int status;
2664 struct be_dma_mem cmd;
2665
2666 memset(&cmd, 0, sizeof(struct be_dma_mem));
2667 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2668 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2669 &cmd.dma, GFP_KERNEL);
2670 if (!cmd.va)
2671 return -ENOMEM;
2672
2673 spin_lock_bh(&adapter->mcc_lock);
2674
2675 wrb = wrb_from_mccq(adapter);
2676 if (!wrb) {
2677 status = -EBUSY;
2678 goto err;
2679 }
2680
2681 req = cmd.va;
2682 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2683 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2684 wrb, &cmd);
2685
2686 req->hdr.domain = domain;
2687 req->mac_count = mac_count;
2688 if (mac_count)
2689 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
2690
2691 status = be_mcc_notify_wait(adapter);
2692
2693 err:
2694 dma_free_coherent(&adapter->pdev->dev, cmd.size,
2695 cmd.va, cmd.dma);
2696 spin_unlock_bh(&adapter->mcc_lock);
2697 return status;
2698 }
2699
2700 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2701 u32 domain, u16 intf_id)
2702 {
2703 struct be_mcc_wrb *wrb;
2704 struct be_cmd_req_set_hsw_config *req;
2705 void *ctxt;
2706 int status;
2707
2708 spin_lock_bh(&adapter->mcc_lock);
2709
2710 wrb = wrb_from_mccq(adapter);
2711 if (!wrb) {
2712 status = -EBUSY;
2713 goto err;
2714 }
2715
2716 req = embedded_payload(wrb);
2717 ctxt = &req->context;
2718
2719 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2720 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2721
2722 req->hdr.domain = domain;
2723 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
2724 if (pvid) {
2725 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
2726 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
2727 }
2728
2729 be_dws_cpu_to_le(req->context, sizeof(req->context));
2730 status = be_mcc_notify_wait(adapter);
2731
2732 err:
2733 spin_unlock_bh(&adapter->mcc_lock);
2734 return status;
2735 }
2736
2737 /* Get Hyper switch config */
2738 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
2739 u32 domain, u16 intf_id)
2740 {
2741 struct be_mcc_wrb *wrb;
2742 struct be_cmd_req_get_hsw_config *req;
2743 void *ctxt;
2744 int status;
2745 u16 vid;
2746
2747 spin_lock_bh(&adapter->mcc_lock);
2748
2749 wrb = wrb_from_mccq(adapter);
2750 if (!wrb) {
2751 status = -EBUSY;
2752 goto err;
2753 }
2754
2755 req = embedded_payload(wrb);
2756 ctxt = &req->context;
2757
2758 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2759 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2760
2761 req->hdr.domain = domain;
2762 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
2763 intf_id);
2764 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
2765 be_dws_cpu_to_le(req->context, sizeof(req->context));
2766
2767 status = be_mcc_notify_wait(adapter);
2768 if (!status) {
2769 struct be_cmd_resp_get_hsw_config *resp =
2770 embedded_payload(wrb);
2771 be_dws_le_to_cpu(&resp->context,
2772 sizeof(resp->context));
2773 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
2774 pvid, &resp->context);
2775 *pvid = le16_to_cpu(vid);
2776 }
2777
2778 err:
2779 spin_unlock_bh(&adapter->mcc_lock);
2780 return status;
2781 }
2782
2783 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
2784 {
2785 struct be_mcc_wrb *wrb;
2786 struct be_cmd_req_acpi_wol_magic_config_v1 *req;
2787 int status;
2788 int payload_len = sizeof(*req);
2789 struct be_dma_mem cmd;
2790
2791 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2792 CMD_SUBSYSTEM_ETH))
2793 return -EPERM;
2794
2795 memset(&cmd, 0, sizeof(struct be_dma_mem));
2796 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
2797 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2798 &cmd.dma);
2799 if (!cmd.va) {
2800 dev_err(&adapter->pdev->dev,
2801 "Memory allocation failure\n");
2802 return -ENOMEM;
2803 }
2804
2805 if (mutex_lock_interruptible(&adapter->mbox_lock))
2806 return -1;
2807
2808 wrb = wrb_from_mbox(adapter);
2809 if (!wrb) {
2810 status = -EBUSY;
2811 goto err;
2812 }
2813
2814 req = cmd.va;
2815
2816 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2817 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2818 payload_len, wrb, &cmd);
2819
2820 req->hdr.version = 1;
2821 req->query_options = BE_GET_WOL_CAP;
2822
2823 status = be_mbox_notify_wait(adapter);
2824 if (!status) {
2825 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
2826 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
2827
2828 /* the command could succeed misleadingly on old f/w
2829 * which is not aware of the V1 version. fake an error. */
2830 if (resp->hdr.response_length < payload_len) {
2831 status = -1;
2832 goto err;
2833 }
2834 adapter->wol_cap = resp->wol_settings;
2835 }
2836 err:
2837 mutex_unlock(&adapter->mbox_lock);
2838 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2839 return status;
2840
2841 }
2842 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
2843 struct be_dma_mem *cmd)
2844 {
2845 struct be_mcc_wrb *wrb;
2846 struct be_cmd_req_get_ext_fat_caps *req;
2847 int status;
2848
2849 if (mutex_lock_interruptible(&adapter->mbox_lock))
2850 return -1;
2851
2852 wrb = wrb_from_mbox(adapter);
2853 if (!wrb) {
2854 status = -EBUSY;
2855 goto err;
2856 }
2857
2858 req = cmd->va;
2859 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2860 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
2861 cmd->size, wrb, cmd);
2862 req->parameter_type = cpu_to_le32(1);
2863
2864 status = be_mbox_notify_wait(adapter);
2865 err:
2866 mutex_unlock(&adapter->mbox_lock);
2867 return status;
2868 }
2869
2870 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
2871 struct be_dma_mem *cmd,
2872 struct be_fat_conf_params *configs)
2873 {
2874 struct be_mcc_wrb *wrb;
2875 struct be_cmd_req_set_ext_fat_caps *req;
2876 int status;
2877
2878 spin_lock_bh(&adapter->mcc_lock);
2879
2880 wrb = wrb_from_mccq(adapter);
2881 if (!wrb) {
2882 status = -EBUSY;
2883 goto err;
2884 }
2885
2886 req = cmd->va;
2887 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
2888 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2889 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
2890 cmd->size, wrb, cmd);
2891
2892 status = be_mcc_notify_wait(adapter);
2893 err:
2894 spin_unlock_bh(&adapter->mcc_lock);
2895 return status;
2896 }
2897
2898 int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
2899 {
2900 struct be_mcc_wrb *wrb;
2901 struct be_cmd_req_get_port_name *req;
2902 int status;
2903
2904 if (!lancer_chip(adapter)) {
2905 *port_name = adapter->hba_port_num + '0';
2906 return 0;
2907 }
2908
2909 spin_lock_bh(&adapter->mcc_lock);
2910
2911 wrb = wrb_from_mccq(adapter);
2912 if (!wrb) {
2913 status = -EBUSY;
2914 goto err;
2915 }
2916
2917 req = embedded_payload(wrb);
2918
2919 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2920 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
2921 NULL);
2922 req->hdr.version = 1;
2923
2924 status = be_mcc_notify_wait(adapter);
2925 if (!status) {
2926 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
2927 *port_name = resp->port_name[adapter->hba_port_num];
2928 } else {
2929 *port_name = adapter->hba_port_num + '0';
2930 }
2931 err:
2932 spin_unlock_bh(&adapter->mcc_lock);
2933 return status;
2934 }
2935
2936 static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
2937 u32 max_buf_size)
2938 {
2939 struct be_nic_resource_desc *desc = (struct be_nic_resource_desc *)buf;
2940 int i;
2941
2942 for (i = 0; i < desc_count; i++) {
2943 desc->desc_len = RESOURCE_DESC_SIZE;
2944 if (((void *)desc + desc->desc_len) >
2945 (void *)(buf + max_buf_size)) {
2946 desc = NULL;
2947 break;
2948 }
2949
2950 if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_ID)
2951 break;
2952
2953 desc = (void *)desc + desc->desc_len;
2954 }
2955
2956 if (!desc || i == MAX_RESOURCE_DESC)
2957 return NULL;
2958
2959 return desc;
2960 }
2961
2962 /* Uses Mbox */
2963 int be_cmd_get_func_config(struct be_adapter *adapter)
2964 {
2965 struct be_mcc_wrb *wrb;
2966 struct be_cmd_req_get_func_config *req;
2967 int status;
2968 struct be_dma_mem cmd;
2969
2970 memset(&cmd, 0, sizeof(struct be_dma_mem));
2971 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
2972 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2973 &cmd.dma);
2974 if (!cmd.va) {
2975 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2976 return -ENOMEM;
2977 }
2978 if (mutex_lock_interruptible(&adapter->mbox_lock))
2979 return -1;
2980
2981 wrb = wrb_from_mbox(adapter);
2982 if (!wrb) {
2983 status = -EBUSY;
2984 goto err;
2985 }
2986
2987 req = cmd.va;
2988
2989 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2990 OPCODE_COMMON_GET_FUNC_CONFIG,
2991 cmd.size, wrb, &cmd);
2992
2993 status = be_mbox_notify_wait(adapter);
2994 if (!status) {
2995 struct be_cmd_resp_get_func_config *resp = cmd.va;
2996 u32 desc_count = le32_to_cpu(resp->desc_count);
2997 struct be_nic_resource_desc *desc;
2998
2999 desc = be_get_nic_desc(resp->func_param, desc_count,
3000 sizeof(resp->func_param));
3001 if (!desc) {
3002 status = -EINVAL;
3003 goto err;
3004 }
3005
3006 adapter->pf_number = desc->pf_num;
3007 adapter->max_pmac_cnt = le16_to_cpu(desc->unicast_mac_count);
3008 adapter->max_vlans = le16_to_cpu(desc->vlan_count);
3009 adapter->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3010 adapter->max_tx_queues = le16_to_cpu(desc->txq_count);
3011 adapter->max_rss_queues = le16_to_cpu(desc->rssq_count);
3012 adapter->max_rx_queues = le16_to_cpu(desc->rq_count);
3013
3014 adapter->max_event_queues = le16_to_cpu(desc->eq_count);
3015 adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
3016 }
3017 err:
3018 mutex_unlock(&adapter->mbox_lock);
3019 pci_free_consistent(adapter->pdev, cmd.size,
3020 cmd.va, cmd.dma);
3021 return status;
3022 }
3023
3024 /* Uses sync mcc */
3025 int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
3026 u8 domain)
3027 {
3028 struct be_mcc_wrb *wrb;
3029 struct be_cmd_req_get_profile_config *req;
3030 int status;
3031 struct be_dma_mem cmd;
3032
3033 memset(&cmd, 0, sizeof(struct be_dma_mem));
3034 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3035 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
3036 &cmd.dma);
3037 if (!cmd.va) {
3038 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3039 return -ENOMEM;
3040 }
3041
3042 spin_lock_bh(&adapter->mcc_lock);
3043
3044 wrb = wrb_from_mccq(adapter);
3045 if (!wrb) {
3046 status = -EBUSY;
3047 goto err;
3048 }
3049
3050 req = cmd.va;
3051
3052 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3053 OPCODE_COMMON_GET_PROFILE_CONFIG,
3054 cmd.size, wrb, &cmd);
3055
3056 req->type = ACTIVE_PROFILE_TYPE;
3057 req->hdr.domain = domain;
3058
3059 status = be_mcc_notify_wait(adapter);
3060 if (!status) {
3061 struct be_cmd_resp_get_profile_config *resp = cmd.va;
3062 u32 desc_count = le32_to_cpu(resp->desc_count);
3063 struct be_nic_resource_desc *desc;
3064
3065 desc = be_get_nic_desc(resp->func_param, desc_count,
3066 sizeof(resp->func_param));
3067
3068 if (!desc) {
3069 status = -EINVAL;
3070 goto err;
3071 }
3072 *cap_flags = le32_to_cpu(desc->cap_flags);
3073 }
3074 err:
3075 spin_unlock_bh(&adapter->mcc_lock);
3076 pci_free_consistent(adapter->pdev, cmd.size,
3077 cmd.va, cmd.dma);
3078 return status;
3079 }
3080
3081 /* Uses sync mcc */
3082 int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
3083 u8 domain)
3084 {
3085 struct be_mcc_wrb *wrb;
3086 struct be_cmd_req_set_profile_config *req;
3087 int status;
3088
3089 spin_lock_bh(&adapter->mcc_lock);
3090
3091 wrb = wrb_from_mccq(adapter);
3092 if (!wrb) {
3093 status = -EBUSY;
3094 goto err;
3095 }
3096
3097 req = embedded_payload(wrb);
3098
3099 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3100 OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
3101 wrb, NULL);
3102
3103 req->hdr.domain = domain;
3104 req->desc_count = cpu_to_le32(1);
3105
3106 req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_ID;
3107 req->nic_desc.desc_len = RESOURCE_DESC_SIZE;
3108 req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
3109 req->nic_desc.pf_num = adapter->pf_number;
3110 req->nic_desc.vf_num = domain;
3111
3112 /* Mark fields invalid */
3113 req->nic_desc.unicast_mac_count = 0xFFFF;
3114 req->nic_desc.mcc_count = 0xFFFF;
3115 req->nic_desc.vlan_count = 0xFFFF;
3116 req->nic_desc.mcast_mac_count = 0xFFFF;
3117 req->nic_desc.txq_count = 0xFFFF;
3118 req->nic_desc.rq_count = 0xFFFF;
3119 req->nic_desc.rssq_count = 0xFFFF;
3120 req->nic_desc.lro_count = 0xFFFF;
3121 req->nic_desc.cq_count = 0xFFFF;
3122 req->nic_desc.toe_conn_count = 0xFFFF;
3123 req->nic_desc.eq_count = 0xFFFF;
3124 req->nic_desc.link_param = 0xFF;
3125 req->nic_desc.bw_min = 0xFFFFFFFF;
3126 req->nic_desc.acpi_params = 0xFF;
3127 req->nic_desc.wol_param = 0x0F;
3128
3129 /* Change BW */
3130 req->nic_desc.bw_min = cpu_to_le32(bps);
3131 req->nic_desc.bw_max = cpu_to_le32(bps);
3132 status = be_mcc_notify_wait(adapter);
3133 err:
3134 spin_unlock_bh(&adapter->mcc_lock);
3135 return status;
3136 }
3137
3138 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
3139 int vf_num)
3140 {
3141 struct be_mcc_wrb *wrb;
3142 struct be_cmd_req_get_iface_list *req;
3143 struct be_cmd_resp_get_iface_list *resp;
3144 int status;
3145
3146 spin_lock_bh(&adapter->mcc_lock);
3147
3148 wrb = wrb_from_mccq(adapter);
3149 if (!wrb) {
3150 status = -EBUSY;
3151 goto err;
3152 }
3153 req = embedded_payload(wrb);
3154
3155 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3156 OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
3157 wrb, NULL);
3158 req->hdr.domain = vf_num + 1;
3159
3160 status = be_mcc_notify_wait(adapter);
3161 if (!status) {
3162 resp = (struct be_cmd_resp_get_iface_list *)req;
3163 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
3164 }
3165
3166 err:
3167 spin_unlock_bh(&adapter->mcc_lock);
3168 return status;
3169 }
3170
3171 /* Uses sync mcc */
3172 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
3173 {
3174 struct be_mcc_wrb *wrb;
3175 struct be_cmd_enable_disable_vf *req;
3176 int status;
3177
3178 if (!lancer_chip(adapter))
3179 return 0;
3180
3181 spin_lock_bh(&adapter->mcc_lock);
3182
3183 wrb = wrb_from_mccq(adapter);
3184 if (!wrb) {
3185 status = -EBUSY;
3186 goto err;
3187 }
3188
3189 req = embedded_payload(wrb);
3190
3191 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3192 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
3193 wrb, NULL);
3194
3195 req->hdr.domain = domain;
3196 req->enable = 1;
3197 status = be_mcc_notify_wait(adapter);
3198 err:
3199 spin_unlock_bh(&adapter->mcc_lock);
3200 return status;
3201 }
3202
3203 int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
3204 {
3205 struct be_mcc_wrb *wrb;
3206 struct be_cmd_req_intr_set *req;
3207 int status;
3208
3209 if (mutex_lock_interruptible(&adapter->mbox_lock))
3210 return -1;
3211
3212 wrb = wrb_from_mbox(adapter);
3213
3214 req = embedded_payload(wrb);
3215
3216 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3217 OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
3218 wrb, NULL);
3219
3220 req->intr_enabled = intr_enable;
3221
3222 status = be_mbox_notify_wait(adapter);
3223
3224 mutex_unlock(&adapter->mbox_lock);
3225 return status;
3226 }
3227
3228 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
3229 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
3230 {
3231 struct be_adapter *adapter = netdev_priv(netdev_handle);
3232 struct be_mcc_wrb *wrb;
3233 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
3234 struct be_cmd_req_hdr *req;
3235 struct be_cmd_resp_hdr *resp;
3236 int status;
3237
3238 spin_lock_bh(&adapter->mcc_lock);
3239
3240 wrb = wrb_from_mccq(adapter);
3241 if (!wrb) {
3242 status = -EBUSY;
3243 goto err;
3244 }
3245 req = embedded_payload(wrb);
3246 resp = embedded_payload(wrb);
3247
3248 be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
3249 hdr->opcode, wrb_payload_size, wrb, NULL);
3250 memcpy(req, wrb_payload, wrb_payload_size);
3251 be_dws_cpu_to_le(req, wrb_payload_size);
3252
3253 status = be_mcc_notify_wait(adapter);
3254 if (cmd_status)
3255 *cmd_status = (status & 0xffff);
3256 if (ext_status)
3257 *ext_status = 0;
3258 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
3259 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
3260 err:
3261 spin_unlock_bh(&adapter->mcc_lock);
3262 return status;
3263 }
3264 EXPORT_SYMBOL(be_roce_mcc_cmd);