]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/be2iscsi/be_cmds.c
Merge branches 'for-4.11/upstream-fixes', 'for-4.12/accutouch', 'for-4.12/cp2112...
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / be2iscsi / be_cmds.c
CommitLineData
6733b39a 1/**
60f36e04 2 * Copyright (C) 2005 - 2016 Broadcom
6733b39a
JK
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
60f36e04 11 * linux-drivers@broadcom.com
6733b39a 12 *
c4f39bda 13 * Emulex
255fa9a3
JK
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6733b39a
JK
16 */
17
2177199d
JSJ
18#include <scsi/iscsi_proto.h>
19
4eea99d5 20#include "be_main.h"
6733b39a
JK
21#include "be.h"
22#include "be_mgmt.h"
6733b39a 23
d1d5ca88
JB
24/* UE Status Low CSR */
25static const char * const desc_ue_status_low[] = {
26 "CEV",
27 "CTX",
28 "DBUF",
29 "ERX",
30 "Host",
31 "MPU",
32 "NDMA",
33 "PTC ",
34 "RDMA ",
35 "RXF ",
36 "RXIPS ",
37 "RXULP0 ",
38 "RXULP1 ",
39 "RXULP2 ",
40 "TIM ",
41 "TPOST ",
42 "TPRE ",
43 "TXIPS ",
44 "TXULP0 ",
45 "TXULP1 ",
46 "UC ",
47 "WDMA ",
48 "TXULP2 ",
49 "HOST1 ",
50 "P0_OB_LINK ",
51 "P1_OB_LINK ",
52 "HOST_GPIO ",
53 "MBOX ",
54 "AXGMAC0",
55 "AXGMAC1",
56 "JTAG",
57 "MPU_INTPEND"
58};
59
60/* UE Status High CSR */
61static const char * const desc_ue_status_hi[] = {
62 "LPCMEMHOST",
63 "MGMT_MAC",
64 "PCS0ONLINE",
65 "MPU_IRAM",
66 "PCS1ONLINE",
67 "PCTL0",
68 "PCTL1",
69 "PMEM",
70 "RR",
71 "TXPB",
72 "RXPP",
73 "XAUI",
74 "TXP",
75 "ARM",
76 "IPC",
77 "HOST2",
78 "HOST3",
79 "HOST4",
80 "HOST5",
81 "HOST6",
82 "HOST7",
83 "HOST8",
84 "HOST9",
85 "NETC",
86 "Unknown",
87 "Unknown",
88 "Unknown",
89 "Unknown",
90 "Unknown",
91 "Unknown",
92 "Unknown",
93 "Unknown"
94};
95
090e2184
JB
96struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
97 unsigned int *ref_tag)
69fd6d7b 98{
090e2184
JB
99 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
100 struct be_mcc_wrb *wrb = NULL;
101 unsigned int tag;
102
96eb8d4d 103 spin_lock(&phba->ctrl.mcc_lock);
090e2184
JB
104 if (mccq->used == mccq->len) {
105 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
106 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
107 "BC_%d : MCC queue full: WRB used %u tag avail %u\n",
108 mccq->used, phba->ctrl.mcc_tag_available);
109 goto alloc_failed;
110 }
111
112 if (!phba->ctrl.mcc_tag_available)
113 goto alloc_failed;
114
115 tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
116 if (!tag) {
117 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
118 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
119 "BC_%d : MCC tag 0 allocated: tag avail %u alloc index %u\n",
120 phba->ctrl.mcc_tag_available,
121 phba->ctrl.mcc_alloc_index);
122 goto alloc_failed;
123 }
124
125 /* return this tag for further reference */
126 *ref_tag = tag;
127 phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
128 phba->ctrl.mcc_tag_status[tag] = 0;
129 phba->ctrl.ptag_state[tag].tag_state = 0;
50a4b824 130 phba->ctrl.ptag_state[tag].cbfn = NULL;
090e2184
JB
131 phba->ctrl.mcc_tag_available--;
132 if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
133 phba->ctrl.mcc_alloc_index = 0;
134 else
135 phba->ctrl.mcc_alloc_index++;
136
137 wrb = queue_head_node(mccq);
138 memset(wrb, 0, sizeof(*wrb));
139 wrb->tag0 = tag;
140 wrb->tag0 |= (mccq->head << MCC_Q_WRB_IDX_SHIFT) & MCC_Q_WRB_IDX_MASK;
141 queue_head_inc(mccq);
142 mccq->used++;
143
144alloc_failed:
96eb8d4d 145 spin_unlock(&phba->ctrl.mcc_lock);
090e2184
JB
146 return wrb;
147}
148
149void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag)
150{
151 struct be_queue_info *mccq = &ctrl->mcc_obj.q;
152
96eb8d4d 153 spin_lock(&ctrl->mcc_lock);
69fd6d7b
JB
154 tag = tag & MCC_Q_CMD_TAG_MASK;
155 ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
156 if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
157 ctrl->mcc_free_index = 0;
158 else
159 ctrl->mcc_free_index++;
160 ctrl->mcc_tag_available++;
090e2184 161 mccq->used--;
96eb8d4d 162 spin_unlock(&ctrl->mcc_lock);
69fd6d7b
JB
163}
164
50a4b824
JB
165/*
166 * beiscsi_mcc_compl_status - Return the status of MCC completion
167 * @phba: Driver private structure
168 * @tag: Tag for the MBX Command
169 * @wrb: the WRB used for the MBX Command
170 * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
171 *
172 * return
173 * Success: 0
174 * Failure: Non-Zero
175 */
176int __beiscsi_mcc_compl_status(struct beiscsi_hba *phba,
177 unsigned int tag,
178 struct be_mcc_wrb **wrb,
179 struct be_dma_mem *mbx_cmd_mem)
180{
181 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
182 uint16_t status = 0, addl_status = 0, wrb_num = 0;
183 struct be_cmd_resp_hdr *mbx_resp_hdr;
184 struct be_cmd_req_hdr *mbx_hdr;
185 struct be_mcc_wrb *temp_wrb;
186 uint32_t mcc_tag_status;
187 int rc = 0;
188
189 mcc_tag_status = phba->ctrl.mcc_tag_status[tag];
190 status = (mcc_tag_status & CQE_STATUS_MASK);
191 addl_status = ((mcc_tag_status & CQE_STATUS_ADDL_MASK) >>
192 CQE_STATUS_ADDL_SHIFT);
193
194 if (mbx_cmd_mem) {
195 mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
196 } else {
197 wrb_num = (mcc_tag_status & CQE_STATUS_WRB_MASK) >>
198 CQE_STATUS_WRB_SHIFT;
199 temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
200 mbx_hdr = embedded_payload(temp_wrb);
201
202 if (wrb)
203 *wrb = temp_wrb;
204 }
205
206 if (status || addl_status) {
207 beiscsi_log(phba, KERN_WARNING,
208 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
209 BEISCSI_LOG_CONFIG,
210 "BC_%d : MBX Cmd Failed for Subsys : %d Opcode : %d with Status : %d and Extd_Status : %d\n",
211 mbx_hdr->subsystem, mbx_hdr->opcode,
212 status, addl_status);
213 rc = -EIO;
214 if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
215 mbx_resp_hdr = (struct be_cmd_resp_hdr *)mbx_hdr;
216 beiscsi_log(phba, KERN_WARNING,
217 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
218 BEISCSI_LOG_CONFIG,
219 "BC_%d : Insufficient Buffer Error Resp_Len : %d Actual_Resp_Len : %d\n",
220 mbx_resp_hdr->response_length,
221 mbx_resp_hdr->actual_resp_len);
222 rc = -EAGAIN;
223 }
224 }
225
226 return rc;
227}
228
e175defe 229/*
88840332 230 * beiscsi_mccq_compl_wait()- Process completion in MCC CQ
e175defe
JSJ
231 * @phba: Driver private structure
232 * @tag: Tag for the MBX Command
233 * @wrb: the WRB used for the MBX Command
1957aa7f 234 * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
e175defe
JSJ
235 *
236 * Waits for MBX completion with the passed TAG.
237 *
238 * return
239 * Success: 0
240 * Failure: Non-Zero
241 **/
88840332 242int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
50a4b824
JB
243 unsigned int tag,
244 struct be_mcc_wrb **wrb,
88840332 245 struct be_dma_mem *mbx_cmd_mem)
e175defe
JSJ
246{
247 int rc = 0;
e175defe 248
9122e991
JB
249 if (beiscsi_hba_in_error(phba)) {
250 clear_bit(MCC_TAG_STATE_RUNNING,
251 &phba->ctrl.ptag_state[tag].tag_state);
252 return -EIO;
253 }
7a158003 254
e175defe 255 /* wait for the mccq completion */
50a4b824
JB
256 rc = wait_event_interruptible_timeout(phba->ctrl.mcc_wait[tag],
257 phba->ctrl.mcc_tag_status[tag],
258 msecs_to_jiffies(
259 BEISCSI_HOST_MBX_TIMEOUT));
d1d5ca88
JB
260 /**
261 * Return EIO if port is being disabled. Associated DMA memory, if any,
262 * is freed by the caller. When port goes offline, MCCQ is cleaned up
263 * so does WRB.
264 */
265 if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
266 clear_bit(MCC_TAG_STATE_RUNNING,
267 &phba->ctrl.ptag_state[tag].tag_state);
268 return -EIO;
269 }
50a4b824 270
cdde6682
JB
271 /**
272 * If MBOX cmd timeout expired, tag and resource allocated
273 * for cmd is not freed until FW returns completion.
274 */
e175defe 275 if (rc <= 0) {
1957aa7f 276 struct be_dma_mem *tag_mem;
1957aa7f 277
cdde6682
JB
278 /**
279 * PCI/DMA memory allocated and posted in non-embedded mode
280 * will have mbx_cmd_mem != NULL.
281 * Save virtual and bus addresses for the command so that it
282 * can be freed later.
283 **/
1957aa7f
JK
284 tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
285 if (mbx_cmd_mem) {
286 tag_mem->size = mbx_cmd_mem->size;
287 tag_mem->va = mbx_cmd_mem->va;
288 tag_mem->dma = mbx_cmd_mem->dma;
289 } else
290 tag_mem->size = 0;
291
cdde6682
JB
292 /* first make tag_mem_state visible to all */
293 wmb();
294 set_bit(MCC_TAG_STATE_TIMEOUT,
295 &phba->ctrl.ptag_state[tag].tag_state);
296
e175defe
JSJ
297 beiscsi_log(phba, KERN_ERR,
298 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
299 BEISCSI_LOG_CONFIG,
300 "BC_%d : MBX Cmd Completion timed out\n");
1957aa7f 301 return -EBUSY;
1957aa7f 302 }
e175defe 303
50a4b824 304 rc = __beiscsi_mcc_compl_status(phba, tag, wrb, mbx_cmd_mem);
e175defe 305
090e2184 306 free_mcc_wrb(&phba->ctrl, tag);
e175defe
JSJ
307 return rc;
308}
309
e175defe 310/*
88840332 311 * beiscsi_process_mbox_compl()- Check the MBX completion status
e175defe
JSJ
312 * @ctrl: Function specific MBX data structure
313 * @compl: Completion status of MBX Command
314 *
315 * Check for the MBX completion status when BMBX method used
316 *
317 * return
318 * Success: Zero
319 * Failure: Non-Zero
320 **/
88840332
JB
321static int beiscsi_process_mbox_compl(struct be_ctrl_info *ctrl,
322 struct be_mcc_compl *compl)
6733b39a 323{
e175defe 324 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
99bc5d55 325 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
e175defe 326 struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
6694095b 327 u16 compl_status, extd_status;
6733b39a 328
c448427b
JB
329 /**
330 * To check if valid bit is set, check the entire word as we don't know
331 * the endianness of the data (old entry is host endian while a new
332 * entry is little endian)
333 */
334 if (!compl->flags) {
335 beiscsi_log(phba, KERN_ERR,
336 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
337 "BC_%d : BMBX busy, no completion\n");
338 return -EBUSY;
339 }
340 compl->flags = le32_to_cpu(compl->flags);
341 WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
6733b39a 342
c448427b
JB
343 /**
344 * Just swap the status to host endian;
345 * mcc tag is opaquely copied from mcc_wrb.
346 */
347 be_dws_le_to_cpu(compl, 4);
6733b39a 348 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
c448427b
JB
349 CQE_STATUS_COMPL_MASK;
350 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
351 CQE_STATUS_EXTD_MASK;
352 /* Need to reset the entire word that houses the valid bit */
353 compl->flags = 0;
99bc5d55 354
c448427b
JB
355 if (compl_status == MCC_STATUS_SUCCESS)
356 return 0;
357
358 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
359 "BC_%d : error in cmd completion: Subsystem : %d Opcode : %d status(compl/extd)=%d/%d\n",
360 hdr->subsystem, hdr->opcode, compl_status, extd_status);
6694095b 361 return compl_status;
6733b39a
JK
362}
363
9c4f8b01
JB
364static void beiscsi_process_async_link(struct beiscsi_hba *phba,
365 struct be_mcc_compl *compl)
bfead3b2 366{
9c4f8b01 367 struct be_async_event_link_state *evt;
6ea9b3b0 368
9c4f8b01 369 evt = (struct be_async_event_link_state *)compl;
99bc5d55 370
9c4f8b01
JB
371 phba->port_speed = evt->port_speed;
372 /**
373 * Check logical link status in ASYNC event.
374 * This has been newly introduced in SKH-R Firmware 10.0.338.45.
375 **/
376 if (evt->port_link_status & BE_ASYNC_LINK_UP_MASK) {
50a4b824
JB
377 set_bit(BEISCSI_HBA_LINK_UP, &phba->state);
378 if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state))
379 beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE);
9c4f8b01
JB
380 __beiscsi_log(phba, KERN_ERR,
381 "BC_%d : Link Up on Port %d tag 0x%x\n",
382 evt->physical_port, evt->event_tag);
383 } else {
9122e991 384 clear_bit(BEISCSI_HBA_LINK_UP, &phba->state);
9c4f8b01
JB
385 __beiscsi_log(phba, KERN_ERR,
386 "BC_%d : Link Down on Port %d tag 0x%x\n",
387 evt->physical_port, evt->event_tag);
388 iscsi_host_for_each_session(phba->shost,
480195c2 389 beiscsi_session_fail);
bfead3b2 390 }
6733b39a
JK
391}
392
53aefe25
JB
393static char *beiscsi_port_misconf_event_msg[] = {
394 "Physical Link is functional.",
395 "Optics faulted/incorrectly installed/not installed - Reseat optics, if issue not resolved, replace.",
396 "Optics of two types installed - Remove one optic or install matching pair of optics.",
397 "Incompatible optics - Replace with compatible optics for card to function.",
398 "Unqualified optics - Replace with Avago optics for Warranty and Technical Support.",
399 "Uncertified optics - Replace with Avago Certified optics to enable link operation."
400};
401
402static void beiscsi_process_async_sli(struct beiscsi_hba *phba,
403 struct be_mcc_compl *compl)
404{
405 struct be_async_event_sli *async_sli;
406 u8 evt_type, state, old_state, le;
407 char *sev = KERN_WARNING;
408 char *msg = NULL;
409
410 evt_type = compl->flags >> ASYNC_TRAILER_EVENT_TYPE_SHIFT;
411 evt_type &= ASYNC_TRAILER_EVENT_TYPE_MASK;
412
413 /* processing only MISCONFIGURED physical port event */
414 if (evt_type != ASYNC_SLI_EVENT_TYPE_MISCONFIGURED)
415 return;
416
417 async_sli = (struct be_async_event_sli *)compl;
418 state = async_sli->event_data1 >>
419 (phba->fw_config.phys_port * 8) & 0xff;
420 le = async_sli->event_data2 >>
421 (phba->fw_config.phys_port * 8) & 0xff;
422
423 old_state = phba->optic_state;
424 phba->optic_state = state;
425
426 if (state >= ARRAY_SIZE(beiscsi_port_misconf_event_msg)) {
427 /* fw is reporting a state we don't know, log and return */
428 __beiscsi_log(phba, KERN_ERR,
429 "BC_%d : Port %c: Unrecognized optic state 0x%x\n",
430 phba->port_name, async_sli->event_data1);
431 return;
432 }
433
434 if (ASYNC_SLI_LINK_EFFECT_VALID(le)) {
435 /* log link effect for unqualified-4, uncertified-5 optics */
436 if (state > 3)
437 msg = (ASYNC_SLI_LINK_EFFECT_STATE(le)) ?
438 " Link is non-operational." :
439 " Link is operational.";
440 /* 1 - info */
441 if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 1)
442 sev = KERN_INFO;
443 /* 2 - error */
444 if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 2)
445 sev = KERN_ERR;
446 }
447
448 if (old_state != phba->optic_state)
449 __beiscsi_log(phba, sev, "BC_%d : Port %c: %s%s\n",
450 phba->port_name,
451 beiscsi_port_misconf_event_msg[state],
452 !msg ? "" : msg);
453}
454
455void beiscsi_process_async_event(struct beiscsi_hba *phba,
456 struct be_mcc_compl *compl)
457{
458 char *sev = KERN_INFO;
459 u8 evt_code;
460
461 /* interpret flags as an async trailer */
462 evt_code = compl->flags >> ASYNC_TRAILER_EVENT_CODE_SHIFT;
463 evt_code &= ASYNC_TRAILER_EVENT_CODE_MASK;
464 switch (evt_code) {
465 case ASYNC_EVENT_CODE_LINK_STATE:
9c4f8b01 466 beiscsi_process_async_link(phba, compl);
53aefe25
JB
467 break;
468 case ASYNC_EVENT_CODE_ISCSI:
50a4b824
JB
469 if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state))
470 beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE);
53aefe25
JB
471 sev = KERN_ERR;
472 break;
473 case ASYNC_EVENT_CODE_SLI:
474 beiscsi_process_async_sli(phba, compl);
475 break;
476 default:
477 /* event not registered */
478 sev = KERN_ERR;
479 }
480
481 beiscsi_log(phba, sev, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
9c4f8b01
JB
482 "BC_%d : ASYNC Event %x: status 0x%08x flags 0x%08x\n",
483 evt_code, compl->status, compl->flags);
53aefe25
JB
484}
485
2e4e8f65
JB
486int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
487 struct be_mcc_compl *compl)
bfead3b2 488{
2e4e8f65
JB
489 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
490 u16 compl_status, extd_status;
491 struct be_dma_mem *tag_mem;
492 unsigned int tag, wrb_idx;
493
2e4e8f65
JB
494 be_dws_le_to_cpu(compl, 4);
495 tag = (compl->tag0 & MCC_Q_CMD_TAG_MASK);
496 wrb_idx = (compl->tag0 & CQE_STATUS_WRB_MASK) >> CQE_STATUS_WRB_SHIFT;
497
498 if (!test_bit(MCC_TAG_STATE_RUNNING,
499 &ctrl->ptag_state[tag].tag_state)) {
500 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX |
501 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
502 "BC_%d : MBX cmd completed but not posted\n");
503 return 0;
504 }
505
50a4b824
JB
506 /* end MCC with this tag */
507 clear_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state);
508
2e4e8f65
JB
509 if (test_bit(MCC_TAG_STATE_TIMEOUT, &ctrl->ptag_state[tag].tag_state)) {
510 beiscsi_log(phba, KERN_WARNING,
511 BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
512 BEISCSI_LOG_CONFIG,
513 "BC_%d : MBX Completion for timeout Command from FW\n");
514 /**
515 * Check for the size before freeing resource.
516 * Only for non-embedded cmd, PCI resource is allocated.
517 **/
518 tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
50a4b824 519 if (tag_mem->size) {
2e4e8f65
JB
520 pci_free_consistent(ctrl->pdev, tag_mem->size,
521 tag_mem->va, tag_mem->dma);
50a4b824
JB
522 tag_mem->size = 0;
523 }
090e2184 524 free_mcc_wrb(ctrl, tag);
2e4e8f65 525 return 0;
bfead3b2
JK
526 }
527
2e4e8f65
JB
528 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
529 CQE_STATUS_COMPL_MASK;
530 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
531 CQE_STATUS_EXTD_MASK;
532 /* The ctrl.mcc_tag_status[tag] is filled with
533 * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
534 * [7:0] = compl_status
535 */
536 ctrl->mcc_tag_status[tag] = CQE_VALID_MASK;
537 ctrl->mcc_tag_status[tag] |= (wrb_idx << CQE_STATUS_WRB_SHIFT);
538 ctrl->mcc_tag_status[tag] |= (extd_status << CQE_STATUS_ADDL_SHIFT) &
539 CQE_STATUS_ADDL_MASK;
540 ctrl->mcc_tag_status[tag] |= (compl_status & CQE_STATUS_MASK);
bfead3b2 541
50a4b824
JB
542 if (test_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state)) {
543 if (ctrl->ptag_state[tag].cbfn)
544 ctrl->ptag_state[tag].cbfn(phba, tag);
545 else
6694095b
JB
546 __beiscsi_log(phba, KERN_ERR,
547 "BC_%d : MBX ASYNC command with no callback\n");
50a4b824
JB
548 free_mcc_wrb(ctrl, tag);
549 return 0;
550 }
551
10bcd47d
JB
552 if (test_bit(MCC_TAG_STATE_IGNORE, &ctrl->ptag_state[tag].tag_state)) {
553 /* just check completion status and free wrb */
554 __beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
555 free_mcc_wrb(ctrl, tag);
556 return 0;
557 }
558
2e4e8f65
JB
559 wake_up_interruptible(&ctrl->mcc_wait[tag]);
560 return 0;
bfead3b2
JK
561}
562
69fd6d7b
JB
563void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag)
564{
565 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
566 u32 val = 0;
567
568 set_bit(MCC_TAG_STATE_RUNNING, &phba->ctrl.ptag_state[tag].tag_state);
569 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
570 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
571 /* make request available for DMA */
572 wmb();
573 iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
bfead3b2
JK
574}
575
e175defe 576/*
88840332 577 * be_mbox_db_ready_poll()- Check ready status
e175defe
JSJ
578 * @ctrl: Function specific MBX data structure
579 *
580 * Check for the ready status of FW to send BMBX
581 * commands to adapter.
582 *
583 * return
584 * Success: 0
585 * Failure: Non-Zero
586 **/
88840332 587static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl)
6733b39a 588{
9ec6f6b8
JB
589 /* wait 30s for generic non-flash MBOX operation */
590#define BEISCSI_MBX_RDY_BIT_TIMEOUT 30000
6733b39a 591 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
e175defe 592 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
92665a66 593 unsigned long timeout;
6733b39a
JK
594 u32 ready;
595
6ac999ef
JB
596 /*
597 * This BMBX busy wait path is used during init only.
598 * For the commands executed during init, 5s should suffice.
599 */
600 timeout = jiffies + msecs_to_jiffies(BEISCSI_MBX_RDY_BIT_TIMEOUT);
601 do {
9122e991 602 if (beiscsi_hba_in_error(phba))
6ac999ef 603 return -EIO;
7a158003 604
6ac999ef
JB
605 ready = ioread32(db);
606 if (ready == 0xffffffff)
607 return -EIO;
e175defe 608
6ac999ef
JB
609 ready &= MPU_MAILBOX_DB_RDY_MASK;
610 if (ready)
611 return 0;
6733b39a 612
6ac999ef
JB
613 if (time_after(jiffies, timeout))
614 break;
3c9e36a9
JB
615 /* 1ms sleep is enough in most cases */
616 schedule_timeout_uninterruptible(msecs_to_jiffies(1));
6ac999ef 617 } while (!ready);
92665a66 618
6ac999ef
JB
619 beiscsi_log(phba, KERN_ERR,
620 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
621 "BC_%d : FW Timed Out\n");
9122e991 622 set_bit(BEISCSI_HBA_FW_TIMEOUT, &phba->state);
6ac999ef 623 return -EBUSY;
6733b39a
JK
624}
625
e175defe
JSJ
626/*
627 * be_mbox_notify: Notify adapter of new BMBX command
628 * @ctrl: Function specific MBX data structure
629 *
630 * Ring doorbell to inform adapter of a BMBX command
631 * to process
632 *
633 * return
634 * Success: 0
635 * Failure: Non-Zero
636 **/
480195c2 637static int be_mbox_notify(struct be_ctrl_info *ctrl)
6733b39a
JK
638{
639 int status;
640 u32 val = 0;
641 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
642 struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
643 struct be_mcc_mailbox *mbox = mbox_mem->va;
6733b39a 644
88840332 645 status = be_mbox_db_ready_poll(ctrl);
1e234bbb
JK
646 if (status)
647 return status;
648
6733b39a
JK
649 val &= ~MPU_MAILBOX_DB_RDY_MASK;
650 val |= MPU_MAILBOX_DB_HI_MASK;
651 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
652 iowrite32(val, db);
653
88840332 654 status = be_mbox_db_ready_poll(ctrl);
e175defe 655 if (status)
6733b39a 656 return status;
e175defe 657
6733b39a
JK
658 val = 0;
659 val &= ~MPU_MAILBOX_DB_RDY_MASK;
660 val &= ~MPU_MAILBOX_DB_HI_MASK;
661 val |= (u32) (mbox_mem->dma >> 4) << 2;
662 iowrite32(val, db);
663
88840332 664 status = be_mbox_db_ready_poll(ctrl);
e175defe 665 if (status)
6733b39a 666 return status;
e175defe 667
6ac999ef
JB
668 /* RDY is set; small delay before CQE read. */
669 udelay(1);
670
a264f5e8
JB
671 status = beiscsi_process_mbox_compl(ctrl, &mbox->compl);
672 return status;
bfead3b2
JK
673}
674
6733b39a
JK
675void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
676 bool embedded, u8 sge_cnt)
677{
678 if (embedded)
fa1261c4 679 wrb->emb_sgecnt_special |= MCC_WRB_EMBEDDED_MASK;
6733b39a 680 else
fa1261c4
JB
681 wrb->emb_sgecnt_special |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
682 MCC_WRB_SGE_CNT_SHIFT;
6733b39a
JK
683 wrb->payload_length = payload_len;
684 be_dws_cpu_to_le(wrb, 8);
685}
686
687void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
688 u8 subsystem, u8 opcode, int cmd_len)
689{
690 req_hdr->opcode = opcode;
691 req_hdr->subsystem = subsystem;
692 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
e175defe 693 req_hdr->timeout = BEISCSI_FW_MBX_TIMEOUT;
6733b39a
JK
694}
695
696static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
697 struct be_dma_mem *mem)
698{
699 int i, buf_pages;
700 u64 dma = (u64) mem->dma;
701
702 buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
703 for (i = 0; i < buf_pages; i++) {
704 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
705 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
706 dma += PAGE_SIZE_4K;
707 }
708}
709
710static u32 eq_delay_to_mult(u32 usec_delay)
711{
712#define MAX_INTR_RATE 651042
713 const u32 round = 10;
714 u32 multiplier;
715
716 if (usec_delay == 0)
717 multiplier = 0;
718 else {
719 u32 interrupt_rate = 1000000 / usec_delay;
720 if (interrupt_rate == 0)
721 multiplier = 1023;
722 else {
723 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
724 multiplier /= interrupt_rate;
725 multiplier = (multiplier + round / 2) / round;
726 multiplier = min(multiplier, (u32) 1023);
727 }
728 }
729 return multiplier;
730}
731
732struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
733{
734 return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
735}
736
737int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
738 struct be_queue_info *eq, int eq_delay)
739{
740 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
741 struct be_cmd_req_eq_create *req = embedded_payload(wrb);
742 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
743 struct be_dma_mem *q_mem = &eq->dma_mem;
744 int status;
745
c03a50f7 746 mutex_lock(&ctrl->mbox_lock);
6733b39a
JK
747 memset(wrb, 0, sizeof(*wrb));
748
749 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
750
751 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
752 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
753
754 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
755
756 AMAP_SET_BITS(struct amap_eq_context, func, req->context,
757 PCI_FUNC(ctrl->pdev->devfn));
758 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
759 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
760 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
761 __ilog2_u32(eq->len / 256));
762 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
763 eq_delay_to_mult(eq_delay));
764 be_dws_cpu_to_le(req->context, sizeof(req->context));
765
766 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
767
768 status = be_mbox_notify(ctrl);
769 if (!status) {
770 eq->id = le16_to_cpu(resp->eq_id);
771 eq->created = true;
772 }
c03a50f7 773 mutex_unlock(&ctrl->mbox_lock);
6733b39a
JK
774 return status;
775}
776
6733b39a
JK
777int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
778 struct be_queue_info *cq, struct be_queue_info *eq,
779 bool sol_evts, bool no_delay, int coalesce_wm)
780{
781 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
782 struct be_cmd_req_cq_create *req = embedded_payload(wrb);
783 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
99bc5d55 784 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
6733b39a
JK
785 struct be_dma_mem *q_mem = &cq->dma_mem;
786 void *ctxt = &req->context;
787 int status;
788
c03a50f7 789 mutex_lock(&ctrl->mbox_lock);
6733b39a
JK
790 memset(wrb, 0, sizeof(*wrb));
791
792 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
793
794 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
795 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
6733b39a
JK
796
797 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
2c9dfd36 798 if (is_chip_be2_be3r(phba)) {
eaae5267
JSJ
799 AMAP_SET_BITS(struct amap_cq_context, coalescwm,
800 ctxt, coalesce_wm);
801 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
802 AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
803 __ilog2_u32(cq->len / 256));
804 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
805 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
806 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
807 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
808 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
809 AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
810 PCI_FUNC(ctrl->pdev->devfn));
2c9dfd36
JK
811 } else {
812 req->hdr.version = MBX_CMD_VER2;
813 req->page_size = 1;
814 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
815 ctxt, coalesce_wm);
816 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
817 ctxt, no_delay);
818 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
819 __ilog2_u32(cq->len / 256));
820 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
821 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
822 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
823 AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
eaae5267 824 }
6733b39a 825
6733b39a
JK
826 be_dws_cpu_to_le(ctxt, sizeof(req->context));
827
828 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
829
830 status = be_mbox_notify(ctrl);
831 if (!status) {
832 cq->id = le16_to_cpu(resp->cq_id);
833 cq->created = true;
834 } else
99bc5d55
JSJ
835 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
836 "BC_%d : In be_cmd_cq_create, status=ox%08x\n",
837 status);
838
c03a50f7 839 mutex_unlock(&ctrl->mbox_lock);
6733b39a
JK
840
841 return status;
842}
843
844static u32 be_encoded_q_len(int q_len)
845{
846 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
847 if (len_encoded == 16)
848 len_encoded = 0;
849 return len_encoded;
850}
bfead3b2 851
35e66019 852int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
bfead3b2
JK
853 struct be_queue_info *mccq,
854 struct be_queue_info *cq)
855{
856 struct be_mcc_wrb *wrb;
53aefe25 857 struct be_cmd_req_mcc_create_ext *req;
bfead3b2
JK
858 struct be_dma_mem *q_mem = &mccq->dma_mem;
859 struct be_ctrl_info *ctrl;
860 void *ctxt;
861 int status;
862
c03a50f7 863 mutex_lock(&phba->ctrl.mbox_lock);
bfead3b2
JK
864 ctrl = &phba->ctrl;
865 wrb = wrb_from_mbox(&ctrl->mbox_mem);
37609766 866 memset(wrb, 0, sizeof(*wrb));
bfead3b2
JK
867 req = embedded_payload(wrb);
868 ctxt = &req->context;
869
870 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
871
872 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
53aefe25 873 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
bfead3b2
JK
874
875 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
53aefe25
JB
876 req->async_evt_bitmap = 1 << ASYNC_EVENT_CODE_LINK_STATE;
877 req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_ISCSI;
878 req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_SLI;
bfead3b2
JK
879
880 AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
881 PCI_FUNC(phba->pcidev->devfn));
882 AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
883 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
884 be_encoded_q_len(mccq->len));
885 AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
886
887 be_dws_cpu_to_le(ctxt, sizeof(req->context));
888
889 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
890
a264f5e8 891 status = be_mbox_notify(ctrl);
bfead3b2
JK
892 if (!status) {
893 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
894 mccq->id = le16_to_cpu(resp->id);
895 mccq->created = true;
896 }
c03a50f7 897 mutex_unlock(&phba->ctrl.mbox_lock);
bfead3b2
JK
898
899 return status;
900}
901
6733b39a
JK
902int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
903 int queue_type)
904{
905 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
906 struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
99bc5d55 907 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
6733b39a
JK
908 u8 subsys = 0, opcode = 0;
909 int status;
910
99bc5d55
JSJ
911 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
912 "BC_%d : In beiscsi_cmd_q_destroy "
913 "queue_type : %d\n", queue_type);
914
c03a50f7 915 mutex_lock(&ctrl->mbox_lock);
6733b39a
JK
916 memset(wrb, 0, sizeof(*wrb));
917 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
918
919 switch (queue_type) {
920 case QTYPE_EQ:
921 subsys = CMD_SUBSYSTEM_COMMON;
922 opcode = OPCODE_COMMON_EQ_DESTROY;
923 break;
924 case QTYPE_CQ:
925 subsys = CMD_SUBSYSTEM_COMMON;
926 opcode = OPCODE_COMMON_CQ_DESTROY;
927 break;
bfead3b2
JK
928 case QTYPE_MCCQ:
929 subsys = CMD_SUBSYSTEM_COMMON;
930 opcode = OPCODE_COMMON_MCC_DESTROY;
931 break;
6733b39a
JK
932 case QTYPE_WRBQ:
933 subsys = CMD_SUBSYSTEM_ISCSI;
934 opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
935 break;
936 case QTYPE_DPDUQ:
937 subsys = CMD_SUBSYSTEM_ISCSI;
938 opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
939 break;
940 case QTYPE_SGL:
941 subsys = CMD_SUBSYSTEM_ISCSI;
942 opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
943 break;
944 default:
c03a50f7 945 mutex_unlock(&ctrl->mbox_lock);
6733b39a 946 BUG();
d3ad2bb3 947 return -ENXIO;
6733b39a
JK
948 }
949 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
950 if (queue_type != QTYPE_SGL)
951 req->id = cpu_to_le16(q->id);
952
953 status = be_mbox_notify(ctrl);
954
c03a50f7 955 mutex_unlock(&ctrl->mbox_lock);
6733b39a
JK
956 return status;
957}
958
8a86e833
JK
959/**
960 * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter
961 * @ctrl: ptr to ctrl_info
962 * @cq: Completion Queue
963 * @dq: Default Queue
964 * @lenght: ring size
965 * @entry_size: size of each entry in DEFQ
966 * @is_header: Header or Data DEFQ
967 * @ulp_num: Bind to which ULP
968 *
969 * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted
970 * on this queue by the FW
971 *
972 * return
973 * Success: 0
974 * Failure: Non-Zero Value
975 *
976 **/
6733b39a
JK
977int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
978 struct be_queue_info *cq,
979 struct be_queue_info *dq, int length,
8a86e833
JK
980 int entry_size, uint8_t is_header,
981 uint8_t ulp_num)
6733b39a
JK
982{
983 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
984 struct be_defq_create_req *req = embedded_payload(wrb);
985 struct be_dma_mem *q_mem = &dq->dma_mem;
ef9e1b9b 986 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
6733b39a
JK
987 void *ctxt = &req->context;
988 int status;
989
c03a50f7 990 mutex_lock(&ctrl->mbox_lock);
6733b39a
JK
991 memset(wrb, 0, sizeof(*wrb));
992
993 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
994
995 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
996 OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
997
998 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
8a86e833
JK
999 if (phba->fw_config.dual_ulp_aware) {
1000 req->ulp_num = ulp_num;
1001 req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
1002 req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
1003 }
ef9e1b9b
JK
1004
1005 if (is_chip_be2_be3r(phba)) {
1006 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1007 rx_pdid, ctxt, 0);
1008 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1009 rx_pdid_valid, ctxt, 1);
1010 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1011 pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn));
1012 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1013 ring_size, ctxt,
1014 be_encoded_q_len(length /
1015 sizeof(struct phys_addr)));
1016 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1017 default_buffer_size, ctxt, entry_size);
1018 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1019 cq_id_recv, ctxt, cq->id);
1020 } else {
1021 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1022 rx_pdid, ctxt, 0);
1023 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1024 rx_pdid_valid, ctxt, 1);
1025 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1026 ring_size, ctxt,
1027 be_encoded_q_len(length /
1028 sizeof(struct phys_addr)));
1029 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1030 default_buffer_size, ctxt, entry_size);
1031 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1032 cq_id_recv, ctxt, cq->id);
1033 }
6733b39a
JK
1034
1035 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1036
1037 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1038
1039 status = be_mbox_notify(ctrl);
1040 if (!status) {
8a86e833 1041 struct be_ring *defq_ring;
6733b39a
JK
1042 struct be_defq_create_resp *resp = embedded_payload(wrb);
1043
1044 dq->id = le16_to_cpu(resp->id);
1045 dq->created = true;
8a86e833
JK
1046 if (is_header)
1047 defq_ring = &phba->phwi_ctrlr->default_pdu_hdr[ulp_num];
1048 else
1049 defq_ring = &phba->phwi_ctrlr->
1050 default_pdu_data[ulp_num];
1051
1052 defq_ring->id = dq->id;
1053
1054 if (!phba->fw_config.dual_ulp_aware) {
1055 defq_ring->ulp_num = BEISCSI_ULP0;
1056 defq_ring->doorbell_offset = DB_RXULP0_OFFSET;
1057 } else {
1058 defq_ring->ulp_num = resp->ulp_num;
1059 defq_ring->doorbell_offset = resp->doorbell_offset;
1060 }
6733b39a 1061 }
c03a50f7 1062 mutex_unlock(&ctrl->mbox_lock);
6733b39a
JK
1063
1064 return status;
1065}
1066
4eea99d5
JK
1067/**
1068 * be_cmd_wrbq_create()- Create WRBQ
1069 * @ctrl: ptr to ctrl_info
1070 * @q_mem: memory details for the queue
1071 * @wrbq: queue info
1072 * @pwrb_context: ptr to wrb_context
1073 * @ulp_num: ULP on which the WRBQ is to be created
1074 *
1075 * Create WRBQ on the passed ULP_NUM.
1076 *
1077 **/
1078int be_cmd_wrbq_create(struct be_ctrl_info *ctrl,
1079 struct be_dma_mem *q_mem,
1080 struct be_queue_info *wrbq,
1081 struct hwi_wrb_context *pwrb_context,
1082 uint8_t ulp_num)
6733b39a
JK
1083{
1084 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1085 struct be_wrbq_create_req *req = embedded_payload(wrb);
1086 struct be_wrbq_create_resp *resp = embedded_payload(wrb);
4eea99d5 1087 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
6733b39a
JK
1088 int status;
1089
c03a50f7 1090 mutex_lock(&ctrl->mbox_lock);
6733b39a
JK
1091 memset(wrb, 0, sizeof(*wrb));
1092
1093 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1094
1095 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1096 OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
1097 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
4eea99d5
JK
1098
1099 if (phba->fw_config.dual_ulp_aware) {
1100 req->ulp_num = ulp_num;
1101 req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
1102 req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
1103 }
1104
6733b39a
JK
1105 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1106
1107 status = be_mbox_notify(ctrl);
bfead3b2 1108 if (!status) {
6733b39a 1109 wrbq->id = le16_to_cpu(resp->cid);
bfead3b2 1110 wrbq->created = true;
4eea99d5
JK
1111
1112 pwrb_context->cid = wrbq->id;
1113 if (!phba->fw_config.dual_ulp_aware) {
1114 pwrb_context->doorbell_offset = DB_TXULP0_OFFSET;
1115 pwrb_context->ulp_num = BEISCSI_ULP0;
1116 } else {
1117 pwrb_context->ulp_num = resp->ulp_num;
1118 pwrb_context->doorbell_offset = resp->doorbell_offset;
1119 }
bfead3b2 1120 }
c03a50f7 1121 mutex_unlock(&ctrl->mbox_lock);
6733b39a
JK
1122 return status;
1123}
1124
15a90fe0
JK
1125int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
1126 struct be_dma_mem *q_mem)
1127{
1128 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1129 struct be_post_template_pages_req *req = embedded_payload(wrb);
1130 int status;
1131
c03a50f7 1132 mutex_lock(&ctrl->mbox_lock);
15a90fe0
JK
1133
1134 memset(wrb, 0, sizeof(*wrb));
1135 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1136 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1137 OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS,
1138 sizeof(*req));
1139
1140 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1141 req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
1142 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1143
1144 status = be_mbox_notify(ctrl);
c03a50f7 1145 mutex_unlock(&ctrl->mbox_lock);
15a90fe0
JK
1146 return status;
1147}
1148
1149int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl)
1150{
1151 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1152 struct be_remove_template_pages_req *req = embedded_payload(wrb);
1153 int status;
1154
c03a50f7 1155 mutex_lock(&ctrl->mbox_lock);
15a90fe0
JK
1156
1157 memset(wrb, 0, sizeof(*wrb));
1158 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1159 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1160 OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS,
1161 sizeof(*req));
1162
1163 req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
1164
1165 status = be_mbox_notify(ctrl);
c03a50f7 1166 mutex_unlock(&ctrl->mbox_lock);
15a90fe0
JK
1167 return status;
1168}
1169
6733b39a
JK
1170int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
1171 struct be_dma_mem *q_mem,
1172 u32 page_offset, u32 num_pages)
1173{
1174 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1175 struct be_post_sgl_pages_req *req = embedded_payload(wrb);
99bc5d55 1176 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
6733b39a
JK
1177 int status;
1178 unsigned int curr_pages;
1179 u32 internal_page_offset = 0;
1180 u32 temp_num_pages = num_pages;
1181
1182 if (num_pages == 0xff)
1183 num_pages = 1;
1184
c03a50f7 1185 mutex_lock(&ctrl->mbox_lock);
6733b39a
JK
1186 do {
1187 memset(wrb, 0, sizeof(*wrb));
1188 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1189 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1190 OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
1191 sizeof(*req));
1192 curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
1193 pages);
1194 req->num_pages = min(num_pages, curr_pages);
1195 req->page_offset = page_offset;
1196 be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
1197 q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
1198 internal_page_offset += req->num_pages;
1199 page_offset += req->num_pages;
1200 num_pages -= req->num_pages;
1201
1202 if (temp_num_pages == 0xff)
1203 req->num_pages = temp_num_pages;
1204
1205 status = be_mbox_notify(ctrl);
1206 if (status) {
99bc5d55
JSJ
1207 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1208 "BC_%d : FW CMD to map iscsi frags failed.\n");
1209
6733b39a
JK
1210 goto error;
1211 }
1212 } while (num_pages > 0);
1213error:
c03a50f7 1214 mutex_unlock(&ctrl->mbox_lock);
6733b39a
JK
1215 if (status != 0)
1216 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
1217 return status;
1218}
e5285860 1219
6f72238e
JSJ
1220/**
1221 * be_cmd_set_vlan()- Configure VLAN paramters on the adapter
1222 * @phba: device priv structure instance
1223 * @vlan_tag: TAG to be set
1224 *
1225 * Set the VLAN_TAG for the adapter or Disable VLAN on adapter
1226 *
1227 * returns
1228 * TAG for the MBX Cmd
1229 * **/
1230int be_cmd_set_vlan(struct beiscsi_hba *phba,
1231 uint16_t vlan_tag)
1232{
090e2184 1233 unsigned int tag;
6f72238e
JSJ
1234 struct be_mcc_wrb *wrb;
1235 struct be_cmd_set_vlan_req *req;
1236 struct be_ctrl_info *ctrl = &phba->ctrl;
1237
c03a50f7
JB
1238 if (mutex_lock_interruptible(&ctrl->mbox_lock))
1239 return 0;
090e2184
JB
1240 wrb = alloc_mcc_wrb(phba, &tag);
1241 if (!wrb) {
c03a50f7 1242 mutex_unlock(&ctrl->mbox_lock);
090e2184 1243 return 0;
6f72238e
JSJ
1244 }
1245
6f72238e 1246 req = embedded_payload(wrb);
6f72238e
JSJ
1247 be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0);
1248 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1249 OPCODE_COMMON_ISCSI_NTWK_SET_VLAN,
1250 sizeof(*req));
1251
1252 req->interface_hndl = phba->interface_handle;
1253 req->vlan_priority = vlan_tag;
1254
cdde6682 1255 be_mcc_notify(phba, tag);
c03a50f7 1256 mutex_unlock(&ctrl->mbox_lock);
6f72238e
JSJ
1257
1258 return tag;
1259}
6694095b 1260
480195c2
JB
1261int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
1262 struct beiscsi_hba *phba)
1263{
1264 struct be_dma_mem nonemb_cmd;
1265 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1266 struct be_mgmt_controller_attributes *req;
1267 struct be_sge *sge = nonembedded_sgl(wrb);
1268 int status = 0;
1269
1270 nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
1271 sizeof(struct be_mgmt_controller_attributes),
1272 &nonemb_cmd.dma);
1273 if (nonemb_cmd.va == NULL) {
1274 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1275 "BG_%d : pci_alloc_consistent failed in %s\n",
1276 __func__);
1277 return -ENOMEM;
1278 }
1279 nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
1280 req = nonemb_cmd.va;
1281 memset(req, 0, sizeof(*req));
1282 mutex_lock(&ctrl->mbox_lock);
1283 memset(wrb, 0, sizeof(*wrb));
1284 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
1285 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1286 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req));
1287 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
1288 sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
1289 sge->len = cpu_to_le32(nonemb_cmd.size);
1290 status = be_mbox_notify(ctrl);
1291 if (!status) {
1292 struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
1293
1294 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1295 "BG_%d : Firmware Version of CMD : %s\n"
1296 "Firmware Version is : %s\n"
1297 "Developer Build, not performing version check...\n",
1298 resp->params.hba_attribs
1299 .flashrom_version_string,
1300 resp->params.hba_attribs.
1301 firmware_version_string);
1302
1303 phba->fw_config.iscsi_features =
1304 resp->params.hba_attribs.iscsi_features;
1305 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1306 "BM_%d : phba->fw_config.iscsi_features = %d\n",
1307 phba->fw_config.iscsi_features);
1308 memcpy(phba->fw_ver_str, resp->params.hba_attribs.
1309 firmware_version_string, BEISCSI_VER_STRLEN);
1310 } else
1311 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1312 "BG_%d : Failed in beiscsi_check_supported_fw\n");
1313 mutex_unlock(&ctrl->mbox_lock);
1314 if (nonemb_cmd.va)
1315 pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
1316 nonemb_cmd.va, nonemb_cmd.dma);
1317
1318 return status;
1319}
1320
1321/**
1322 * beiscsi_get_fw_config()- Get the FW config for the function
1323 * @ctrl: ptr to Ctrl Info
1324 * @phba: ptr to the dev priv structure
1325 *
1326 * Get the FW config and resources available for the function.
1327 * The resources are created based on the count received here.
1328 *
1329 * return
1330 * Success: 0
1331 * Failure: Non-Zero Value
1332 **/
1333int beiscsi_get_fw_config(struct be_ctrl_info *ctrl,
1334 struct beiscsi_hba *phba)
1335{
1336 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1337 struct be_fw_cfg *pfw_cfg = embedded_payload(wrb);
1338 uint32_t cid_count, icd_count;
1339 int status = -EINVAL;
1340 uint8_t ulp_num = 0;
1341
1342 mutex_lock(&ctrl->mbox_lock);
1343 memset(wrb, 0, sizeof(*wrb));
1344 be_wrb_hdr_prepare(wrb, sizeof(*pfw_cfg), true, 0);
1345
1346 be_cmd_hdr_prepare(&pfw_cfg->hdr, CMD_SUBSYSTEM_COMMON,
1347 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
1348 EMBED_MBX_MAX_PAYLOAD_SIZE);
1349
1350 if (be_mbox_notify(ctrl)) {
1351 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1352 "BG_%d : Failed in beiscsi_get_fw_config\n");
1353 goto fail_init;
1354 }
1355
1356 /* FW response formats depend on port id */
1357 phba->fw_config.phys_port = pfw_cfg->phys_port;
1358 if (phba->fw_config.phys_port >= BEISCSI_PHYS_PORT_MAX) {
1359 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1360 "BG_%d : invalid physical port id %d\n",
1361 phba->fw_config.phys_port);
1362 goto fail_init;
1363 }
1364
1365 /* populate and check FW config against min and max values */
1366 if (!is_chip_be2_be3r(phba)) {
1367 phba->fw_config.eqid_count = pfw_cfg->eqid_count;
1368 phba->fw_config.cqid_count = pfw_cfg->cqid_count;
1369 if (phba->fw_config.eqid_count == 0 ||
1370 phba->fw_config.eqid_count > 2048) {
1371 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1372 "BG_%d : invalid EQ count %d\n",
1373 phba->fw_config.eqid_count);
1374 goto fail_init;
1375 }
1376 if (phba->fw_config.cqid_count == 0 ||
1377 phba->fw_config.cqid_count > 4096) {
1378 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1379 "BG_%d : invalid CQ count %d\n",
1380 phba->fw_config.cqid_count);
1381 goto fail_init;
1382 }
1383 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1384 "BG_%d : EQ_Count : %d CQ_Count : %d\n",
1385 phba->fw_config.eqid_count,
1386 phba->fw_config.cqid_count);
1387 }
1388
1389 /**
1390 * Check on which all ULP iSCSI Protocol is loaded.
1391 * Set the Bit for those ULP. This set flag is used
1392 * at all places in the code to check on which ULP
1393 * iSCSi Protocol is loaded
1394 **/
1395 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
1396 if (pfw_cfg->ulp[ulp_num].ulp_mode &
1397 BEISCSI_ULP_ISCSI_INI_MODE) {
1398 set_bit(ulp_num, &phba->fw_config.ulp_supported);
1399
1400 /* Get the CID, ICD and Chain count for each ULP */
1401 phba->fw_config.iscsi_cid_start[ulp_num] =
1402 pfw_cfg->ulp[ulp_num].sq_base;
1403 phba->fw_config.iscsi_cid_count[ulp_num] =
1404 pfw_cfg->ulp[ulp_num].sq_count;
1405
1406 phba->fw_config.iscsi_icd_start[ulp_num] =
1407 pfw_cfg->ulp[ulp_num].icd_base;
1408 phba->fw_config.iscsi_icd_count[ulp_num] =
1409 pfw_cfg->ulp[ulp_num].icd_count;
1410
1411 phba->fw_config.iscsi_chain_start[ulp_num] =
1412 pfw_cfg->chain_icd[ulp_num].chain_base;
1413 phba->fw_config.iscsi_chain_count[ulp_num] =
1414 pfw_cfg->chain_icd[ulp_num].chain_count;
1415
1416 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1417 "BG_%d : Function loaded on ULP : %d\n"
1418 "\tiscsi_cid_count : %d\n"
1419 "\tiscsi_cid_start : %d\n"
1420 "\t iscsi_icd_count : %d\n"
1421 "\t iscsi_icd_start : %d\n",
1422 ulp_num,
1423 phba->fw_config.
1424 iscsi_cid_count[ulp_num],
1425 phba->fw_config.
1426 iscsi_cid_start[ulp_num],
1427 phba->fw_config.
1428 iscsi_icd_count[ulp_num],
1429 phba->fw_config.
1430 iscsi_icd_start[ulp_num]);
1431 }
1432 }
1433
1434 if (phba->fw_config.ulp_supported == 0) {
1435 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1436 "BG_%d : iSCSI initiator mode not set: ULP0 %x ULP1 %x\n",
1437 pfw_cfg->ulp[BEISCSI_ULP0].ulp_mode,
1438 pfw_cfg->ulp[BEISCSI_ULP1].ulp_mode);
1439 goto fail_init;
1440 }
1441
1442 /**
1443 * ICD is shared among ULPs. Use icd_count of any one loaded ULP
1444 **/
1445 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
1446 if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
1447 break;
1448 icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
1449 if (icd_count == 0 || icd_count > 65536) {
1450 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1451 "BG_%d: invalid ICD count %d\n", icd_count);
1452 goto fail_init;
1453 }
1454
1455 cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
1456 BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
1457 if (cid_count == 0 || cid_count > 4096) {
1458 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1459 "BG_%d: invalid CID count %d\n", cid_count);
1460 goto fail_init;
1461 }
1462
1463 /**
1464 * Check FW is dual ULP aware i.e. can handle either
1465 * of the protocols.
1466 */
1467 phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
1468 BEISCSI_FUNC_DUA_MODE);
1469
1470 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1471 "BG_%d : DUA Mode : 0x%x\n",
1472 phba->fw_config.dual_ulp_aware);
1473
1474 /* all set, continue using this FW config */
1475 status = 0;
1476fail_init:
1477 mutex_unlock(&ctrl->mbox_lock);
1478 return status;
1479}
1480
1481/**
1482 * beiscsi_get_port_name()- Get port name for the function
1483 * @ctrl: ptr to Ctrl Info
1484 * @phba: ptr to the dev priv structure
1485 *
1486 * Get the alphanumeric character for port
1487 *
1488 **/
1489int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba)
1490{
1491 int ret = 0;
1492 struct be_mcc_wrb *wrb;
1493 struct be_cmd_get_port_name *ioctl;
1494
1495 mutex_lock(&ctrl->mbox_lock);
1496 wrb = wrb_from_mbox(&ctrl->mbox_mem);
1497 memset(wrb, 0, sizeof(*wrb));
1498 ioctl = embedded_payload(wrb);
1499
1500 be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
1501 be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
1502 OPCODE_COMMON_GET_PORT_NAME,
1503 EMBED_MBX_MAX_PAYLOAD_SIZE);
1504 ret = be_mbox_notify(ctrl);
1505 phba->port_name = 0;
1506 if (!ret) {
1507 phba->port_name = ioctl->p.resp.port_names >>
1508 (phba->fw_config.phys_port * 8) & 0xff;
1509 } else {
1510 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1511 "BG_%d : GET_PORT_NAME ret 0x%x status 0x%x\n",
1512 ret, ioctl->h.resp_hdr.status);
1513 }
1514
1515 if (phba->port_name == 0)
1516 phba->port_name = '?';
1517
1518 mutex_unlock(&ctrl->mbox_lock);
1519 return ret;
1520}
1521
6694095b
JB
1522int beiscsi_set_uer_feature(struct beiscsi_hba *phba)
1523{
1524 struct be_ctrl_info *ctrl = &phba->ctrl;
1525 struct be_cmd_set_features *ioctl;
1526 struct be_mcc_wrb *wrb;
1527 int ret = 0;
1528
1529 mutex_lock(&ctrl->mbox_lock);
1530 wrb = wrb_from_mbox(&ctrl->mbox_mem);
1531 memset(wrb, 0, sizeof(*wrb));
1532 ioctl = embedded_payload(wrb);
1533
1534 be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
1535 be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
1536 OPCODE_COMMON_SET_FEATURES,
1537 EMBED_MBX_MAX_PAYLOAD_SIZE);
1538 ioctl->feature = BE_CMD_SET_FEATURE_UER;
1539 ioctl->param_len = sizeof(ioctl->param.req);
1540 ioctl->param.req.uer = BE_CMD_UER_SUPP_BIT;
1541 ret = be_mbox_notify(ctrl);
1542 if (!ret) {
1543 phba->ue2rp = ioctl->param.resp.ue2rp;
1544 set_bit(BEISCSI_HBA_UER_SUPP, &phba->state);
1545 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1546 "BG_%d : HBA error recovery supported\n");
1547 } else {
1548 /**
1549 * Check "MCC_STATUS_INVALID_LENGTH" for SKH.
1550 * Older FW versions return this error.
1551 */
1552 if (ret == MCC_STATUS_ILLEGAL_REQUEST ||
1553 ret == MCC_STATUS_INVALID_LENGTH)
1554 __beiscsi_log(phba, KERN_INFO,
1555 "BG_%d : HBA error recovery not supported\n");
1556 }
1557
1558 mutex_unlock(&ctrl->mbox_lock);
1559 return ret;
1560}
4d2ee1e6
JB
1561
1562static u32 beiscsi_get_post_stage(struct beiscsi_hba *phba)
1563{
1564 u32 sem;
1565
1566 if (is_chip_be2_be3r(phba))
1567 sem = ioread32(phba->csr_va + SLIPORT_SEMAPHORE_OFFSET_BEx);
1568 else
1569 pci_read_config_dword(phba->pcidev,
1570 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
1571 return sem;
1572}
1573
1574int beiscsi_check_fw_rdy(struct beiscsi_hba *phba)
1575{
1576 u32 loop, post, rdy = 0;
1577
1578 loop = 1000;
1579 while (loop--) {
1580 post = beiscsi_get_post_stage(phba);
1581 if (post & POST_ERROR_BIT)
1582 break;
1583 if ((post & POST_STAGE_MASK) == POST_STAGE_ARMFW_RDY) {
1584 rdy = 1;
1585 break;
1586 }
1587 msleep(60);
1588 }
1589
1590 if (!rdy) {
1591 __beiscsi_log(phba, KERN_ERR,
1592 "BC_%d : FW not ready 0x%x\n", post);
1593 }
1594
1595 return rdy;
1596}
1597
4ee1ec42 1598int beiscsi_cmd_function_reset(struct beiscsi_hba *phba)
4d2ee1e6
JB
1599{
1600 struct be_ctrl_info *ctrl = &phba->ctrl;
1601 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
fa1261c4 1602 struct be_post_sgl_pages_req *req;
4d2ee1e6
JB
1603 int status;
1604
1605 mutex_lock(&ctrl->mbox_lock);
1606
1607 req = embedded_payload(wrb);
1608 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1609 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1610 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1611 status = be_mbox_notify(ctrl);
1612
1613 mutex_unlock(&ctrl->mbox_lock);
1614 return status;
1615}
1616
1617int beiscsi_cmd_special_wrb(struct be_ctrl_info *ctrl, u32 load)
1618{
1619 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1620 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
1621 u8 *endian_check;
1622 int status;
1623
1624 mutex_lock(&ctrl->mbox_lock);
1625 memset(wrb, 0, sizeof(*wrb));
1626
1627 endian_check = (u8 *) wrb;
1628 if (load) {
1629 /* to start communicating */
1630 *endian_check++ = 0xFF;
1631 *endian_check++ = 0x12;
1632 *endian_check++ = 0x34;
1633 *endian_check++ = 0xFF;
1634 *endian_check++ = 0xFF;
1635 *endian_check++ = 0x56;
1636 *endian_check++ = 0x78;
1637 *endian_check++ = 0xFF;
1638 } else {
1639 /* to stop communicating */
1640 *endian_check++ = 0xFF;
1641 *endian_check++ = 0xAA;
1642 *endian_check++ = 0xBB;
1643 *endian_check++ = 0xFF;
1644 *endian_check++ = 0xFF;
1645 *endian_check++ = 0xCC;
1646 *endian_check++ = 0xDD;
1647 *endian_check = 0xFF;
1648 }
1649 be_dws_cpu_to_le(wrb, sizeof(*wrb));
1650
1651 status = be_mbox_notify(ctrl);
1652 if (status)
1653 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1654 "BC_%d : special WRB message failed\n");
1655 mutex_unlock(&ctrl->mbox_lock);
1656 return status;
1657}
1658
1659int beiscsi_init_sliport(struct beiscsi_hba *phba)
1660{
1661 int status;
1662
1663 /* check POST stage before talking to FW */
1664 status = beiscsi_check_fw_rdy(phba);
1665 if (!status)
1666 return -EIO;
1667
d1d5ca88
JB
1668 /* clear all error states after checking FW rdy */
1669 phba->state &= ~BEISCSI_HBA_IN_ERR;
1670
1671 /* check again UER support */
1672 phba->state &= ~BEISCSI_HBA_UER_SUPP;
1673
4d2ee1e6
JB
1674 /*
1675 * SLI COMMON_FUNCTION_RESET completion is indicated by BMBX RDY bit.
1676 * It should clean up any stale info in FW for this fn.
1677 */
1678 status = beiscsi_cmd_function_reset(phba);
1679 if (status) {
1680 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1681 "BC_%d : SLI Function Reset failed\n");
1682 return status;
1683 }
1684
1685 /* indicate driver is loading */
1686 return beiscsi_cmd_special_wrb(&phba->ctrl, 1);
1687}
f79929de
JB
1688
1689/**
1690 * beiscsi_cmd_iscsi_cleanup()- Inform FW to cleanup EP data structures.
1691 * @phba: pointer to dev priv structure
1692 * @ulp: ULP number.
1693 *
1694 * return
1695 * Success: 0
1696 * Failure: Non-Zero Value
1697 **/
1698int beiscsi_cmd_iscsi_cleanup(struct beiscsi_hba *phba, unsigned short ulp)
1699{
1700 struct be_ctrl_info *ctrl = &phba->ctrl;
1701 struct iscsi_cleanup_req_v1 *req_v1;
1702 struct iscsi_cleanup_req *req;
d7401055 1703 u16 hdr_ring_id, data_ring_id;
f79929de
JB
1704 struct be_mcc_wrb *wrb;
1705 int status;
1706
1707 mutex_lock(&ctrl->mbox_lock);
1708 wrb = wrb_from_mbox(&ctrl->mbox_mem);
f79929de 1709
d7401055
JB
1710 hdr_ring_id = HWI_GET_DEF_HDRQ_ID(phba, ulp);
1711 data_ring_id = HWI_GET_DEF_BUFQ_ID(phba, ulp);
f79929de 1712 if (is_chip_be2_be3r(phba)) {
d7401055
JB
1713 req = embedded_payload(wrb);
1714 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1715 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1716 OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
f79929de 1717 req->chute = (1 << ulp);
d7401055
JB
1718 /* BE2/BE3 FW creates 8-bit ring id */
1719 req->hdr_ring_id = hdr_ring_id;
1720 req->data_ring_id = data_ring_id;
f79929de 1721 } else {
d7401055
JB
1722 req_v1 = embedded_payload(wrb);
1723 be_wrb_hdr_prepare(wrb, sizeof(*req_v1), true, 0);
1724 be_cmd_hdr_prepare(&req_v1->hdr, CMD_SUBSYSTEM_ISCSI,
1725 OPCODE_COMMON_ISCSI_CLEANUP,
1726 sizeof(*req_v1));
f79929de 1727 req_v1->hdr.version = 1;
d7401055
JB
1728 req_v1->chute = (1 << ulp);
1729 req_v1->hdr_ring_id = cpu_to_le16(hdr_ring_id);
1730 req_v1->data_ring_id = cpu_to_le16(data_ring_id);
f79929de
JB
1731 }
1732
1733 status = be_mbox_notify(ctrl);
1734 if (status)
1735 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
1736 "BG_%d : %s failed %d\n", __func__, ulp);
1737 mutex_unlock(&ctrl->mbox_lock);
1738 return status;
1739}
d1d5ca88
JB
1740
1741/*
1742 * beiscsi_detect_ue()- Detect Unrecoverable Error on adapter
1743 * @phba: Driver priv structure
1744 *
1745 * Read registers linked to UE and check for the UE status
1746 **/
1747int beiscsi_detect_ue(struct beiscsi_hba *phba)
1748{
1749 uint32_t ue_mask_hi = 0, ue_mask_lo = 0;
1750 uint32_t ue_hi = 0, ue_lo = 0;
1751 uint8_t i = 0;
1752 int ret = 0;
1753
1754 pci_read_config_dword(phba->pcidev,
1755 PCICFG_UE_STATUS_LOW, &ue_lo);
1756 pci_read_config_dword(phba->pcidev,
1757 PCICFG_UE_STATUS_MASK_LOW,
1758 &ue_mask_lo);
1759 pci_read_config_dword(phba->pcidev,
1760 PCICFG_UE_STATUS_HIGH,
1761 &ue_hi);
1762 pci_read_config_dword(phba->pcidev,
1763 PCICFG_UE_STATUS_MASK_HI,
1764 &ue_mask_hi);
1765
1766 ue_lo = (ue_lo & ~ue_mask_lo);
1767 ue_hi = (ue_hi & ~ue_mask_hi);
1768
1769
1770 if (ue_lo || ue_hi) {
1771 set_bit(BEISCSI_HBA_IN_UE, &phba->state);
1772 __beiscsi_log(phba, KERN_ERR,
1773 "BC_%d : HBA error detected\n");
1774 ret = 1;
1775 }
1776
1777 if (ue_lo) {
1778 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
1779 if (ue_lo & 1)
1780 __beiscsi_log(phba, KERN_ERR,
1781 "BC_%d : UE_LOW %s bit set\n",
1782 desc_ue_status_low[i]);
1783 }
1784 }
1785
1786 if (ue_hi) {
1787 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
1788 if (ue_hi & 1)
1789 __beiscsi_log(phba, KERN_ERR,
1790 "BC_%d : UE_HIGH %s bit set\n",
1791 desc_ue_status_hi[i]);
1792 }
1793 }
1794 return ret;
1795}
1796
1797/*
1798 * beiscsi_detect_tpe()- Detect Transient Parity Error on adapter
1799 * @phba: Driver priv structure
1800 *
1801 * Read SLIPORT SEMAPHORE register to check for UER
1802 *
1803 **/
1804int beiscsi_detect_tpe(struct beiscsi_hba *phba)
1805{
1806 u32 post, status;
1807 int ret = 0;
1808
1809 post = beiscsi_get_post_stage(phba);
1810 status = post & POST_STAGE_MASK;
1811 if ((status & POST_ERR_RECOVERY_CODE_MASK) ==
1812 POST_STAGE_RECOVERABLE_ERR) {
1813 set_bit(BEISCSI_HBA_IN_TPE, &phba->state);
1814 __beiscsi_log(phba, KERN_INFO,
1815 "BC_%d : HBA error recoverable: 0x%x\n", post);
1816 ret = 1;
1817 } else {
1818 __beiscsi_log(phba, KERN_INFO,
1819 "BC_%d : HBA in UE: 0x%x\n", post);
1820 }
1821
1822 return ret;
1823}