]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/be2iscsi/be_cmds.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / be2iscsi / be_cmds.c
CommitLineData
942b7654
JB
1/*
2 * Copyright 2017 Broadcom. All Rights Reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
6733b39a
JK
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
942b7654 7 * as published by the Free Software Foundation. The full GNU General
6733b39a
JK
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
60f36e04 11 * linux-drivers@broadcom.com
6733b39a 12 *
6733b39a
JK
13 */
14
2177199d
JSJ
15#include <scsi/iscsi_proto.h>
16
4eea99d5 17#include "be_main.h"
6733b39a
JK
18#include "be.h"
19#include "be_mgmt.h"
6733b39a 20
d1d5ca88
JB
21/* UE Status Low CSR */
22static const char * const desc_ue_status_low[] = {
23 "CEV",
24 "CTX",
25 "DBUF",
26 "ERX",
27 "Host",
28 "MPU",
29 "NDMA",
30 "PTC ",
31 "RDMA ",
32 "RXF ",
33 "RXIPS ",
34 "RXULP0 ",
35 "RXULP1 ",
36 "RXULP2 ",
37 "TIM ",
38 "TPOST ",
39 "TPRE ",
40 "TXIPS ",
41 "TXULP0 ",
42 "TXULP1 ",
43 "UC ",
44 "WDMA ",
45 "TXULP2 ",
46 "HOST1 ",
47 "P0_OB_LINK ",
48 "P1_OB_LINK ",
49 "HOST_GPIO ",
50 "MBOX ",
51 "AXGMAC0",
52 "AXGMAC1",
53 "JTAG",
54 "MPU_INTPEND"
55};
56
57/* UE Status High CSR */
58static const char * const desc_ue_status_hi[] = {
59 "LPCMEMHOST",
60 "MGMT_MAC",
61 "PCS0ONLINE",
62 "MPU_IRAM",
63 "PCS1ONLINE",
64 "PCTL0",
65 "PCTL1",
66 "PMEM",
67 "RR",
68 "TXPB",
69 "RXPP",
70 "XAUI",
71 "TXP",
72 "ARM",
73 "IPC",
74 "HOST2",
75 "HOST3",
76 "HOST4",
77 "HOST5",
78 "HOST6",
79 "HOST7",
80 "HOST8",
81 "HOST9",
82 "NETC",
83 "Unknown",
84 "Unknown",
85 "Unknown",
86 "Unknown",
87 "Unknown",
88 "Unknown",
89 "Unknown",
90 "Unknown"
91};
92
090e2184
JB
93struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
94 unsigned int *ref_tag)
69fd6d7b 95{
090e2184
JB
96 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
97 struct be_mcc_wrb *wrb = NULL;
98 unsigned int tag;
99
96eb8d4d 100 spin_lock(&phba->ctrl.mcc_lock);
090e2184
JB
101 if (mccq->used == mccq->len) {
102 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
103 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
104 "BC_%d : MCC queue full: WRB used %u tag avail %u\n",
105 mccq->used, phba->ctrl.mcc_tag_available);
106 goto alloc_failed;
107 }
108
109 if (!phba->ctrl.mcc_tag_available)
110 goto alloc_failed;
111
112 tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
113 if (!tag) {
114 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
115 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
116 "BC_%d : MCC tag 0 allocated: tag avail %u alloc index %u\n",
117 phba->ctrl.mcc_tag_available,
118 phba->ctrl.mcc_alloc_index);
119 goto alloc_failed;
120 }
121
122 /* return this tag for further reference */
123 *ref_tag = tag;
124 phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
125 phba->ctrl.mcc_tag_status[tag] = 0;
126 phba->ctrl.ptag_state[tag].tag_state = 0;
50a4b824 127 phba->ctrl.ptag_state[tag].cbfn = NULL;
090e2184
JB
128 phba->ctrl.mcc_tag_available--;
129 if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
130 phba->ctrl.mcc_alloc_index = 0;
131 else
132 phba->ctrl.mcc_alloc_index++;
133
134 wrb = queue_head_node(mccq);
135 memset(wrb, 0, sizeof(*wrb));
136 wrb->tag0 = tag;
137 wrb->tag0 |= (mccq->head << MCC_Q_WRB_IDX_SHIFT) & MCC_Q_WRB_IDX_MASK;
138 queue_head_inc(mccq);
139 mccq->used++;
140
141alloc_failed:
96eb8d4d 142 spin_unlock(&phba->ctrl.mcc_lock);
090e2184
JB
143 return wrb;
144}
145
146void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag)
147{
148 struct be_queue_info *mccq = &ctrl->mcc_obj.q;
149
96eb8d4d 150 spin_lock(&ctrl->mcc_lock);
69fd6d7b
JB
151 tag = tag & MCC_Q_CMD_TAG_MASK;
152 ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
153 if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
154 ctrl->mcc_free_index = 0;
155 else
156 ctrl->mcc_free_index++;
157 ctrl->mcc_tag_available++;
090e2184 158 mccq->used--;
96eb8d4d 159 spin_unlock(&ctrl->mcc_lock);
69fd6d7b
JB
160}
161
50a4b824
JB
162/*
163 * beiscsi_mcc_compl_status - Return the status of MCC completion
164 * @phba: Driver private structure
165 * @tag: Tag for the MBX Command
166 * @wrb: the WRB used for the MBX Command
167 * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
168 *
169 * return
170 * Success: 0
171 * Failure: Non-Zero
172 */
173int __beiscsi_mcc_compl_status(struct beiscsi_hba *phba,
174 unsigned int tag,
175 struct be_mcc_wrb **wrb,
176 struct be_dma_mem *mbx_cmd_mem)
177{
178 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
179 uint16_t status = 0, addl_status = 0, wrb_num = 0;
180 struct be_cmd_resp_hdr *mbx_resp_hdr;
181 struct be_cmd_req_hdr *mbx_hdr;
182 struct be_mcc_wrb *temp_wrb;
183 uint32_t mcc_tag_status;
184 int rc = 0;
185
186 mcc_tag_status = phba->ctrl.mcc_tag_status[tag];
187 status = (mcc_tag_status & CQE_STATUS_MASK);
188 addl_status = ((mcc_tag_status & CQE_STATUS_ADDL_MASK) >>
189 CQE_STATUS_ADDL_SHIFT);
190
191 if (mbx_cmd_mem) {
192 mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
193 } else {
194 wrb_num = (mcc_tag_status & CQE_STATUS_WRB_MASK) >>
195 CQE_STATUS_WRB_SHIFT;
196 temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
197 mbx_hdr = embedded_payload(temp_wrb);
198
199 if (wrb)
200 *wrb = temp_wrb;
201 }
202
203 if (status || addl_status) {
204 beiscsi_log(phba, KERN_WARNING,
205 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
206 BEISCSI_LOG_CONFIG,
207 "BC_%d : MBX Cmd Failed for Subsys : %d Opcode : %d with Status : %d and Extd_Status : %d\n",
208 mbx_hdr->subsystem, mbx_hdr->opcode,
209 status, addl_status);
210 rc = -EIO;
211 if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
212 mbx_resp_hdr = (struct be_cmd_resp_hdr *)mbx_hdr;
213 beiscsi_log(phba, KERN_WARNING,
214 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
215 BEISCSI_LOG_CONFIG,
216 "BC_%d : Insufficient Buffer Error Resp_Len : %d Actual_Resp_Len : %d\n",
217 mbx_resp_hdr->response_length,
218 mbx_resp_hdr->actual_resp_len);
219 rc = -EAGAIN;
220 }
221 }
222
223 return rc;
224}
225
e175defe 226/*
88840332 227 * beiscsi_mccq_compl_wait()- Process completion in MCC CQ
e175defe
JSJ
228 * @phba: Driver private structure
229 * @tag: Tag for the MBX Command
230 * @wrb: the WRB used for the MBX Command
1957aa7f 231 * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
e175defe
JSJ
232 *
233 * Waits for MBX completion with the passed TAG.
234 *
235 * return
236 * Success: 0
237 * Failure: Non-Zero
238 **/
88840332 239int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
50a4b824
JB
240 unsigned int tag,
241 struct be_mcc_wrb **wrb,
88840332 242 struct be_dma_mem *mbx_cmd_mem)
e175defe
JSJ
243{
244 int rc = 0;
e175defe 245
eb419229
JB
246 if (!tag || tag > MAX_MCC_CMD) {
247 __beiscsi_log(phba, KERN_ERR,
248 "BC_%d : invalid tag %u\n", tag);
249 return -EINVAL;
250 }
251
9122e991
JB
252 if (beiscsi_hba_in_error(phba)) {
253 clear_bit(MCC_TAG_STATE_RUNNING,
254 &phba->ctrl.ptag_state[tag].tag_state);
255 return -EIO;
256 }
7a158003 257
e175defe 258 /* wait for the mccq completion */
50a4b824
JB
259 rc = wait_event_interruptible_timeout(phba->ctrl.mcc_wait[tag],
260 phba->ctrl.mcc_tag_status[tag],
261 msecs_to_jiffies(
262 BEISCSI_HOST_MBX_TIMEOUT));
d1d5ca88
JB
263 /**
264 * Return EIO if port is being disabled. Associated DMA memory, if any,
265 * is freed by the caller. When port goes offline, MCCQ is cleaned up
266 * so does WRB.
267 */
268 if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
269 clear_bit(MCC_TAG_STATE_RUNNING,
270 &phba->ctrl.ptag_state[tag].tag_state);
271 return -EIO;
272 }
50a4b824 273
cdde6682
JB
274 /**
275 * If MBOX cmd timeout expired, tag and resource allocated
276 * for cmd is not freed until FW returns completion.
277 */
e175defe 278 if (rc <= 0) {
1957aa7f 279 struct be_dma_mem *tag_mem;
1957aa7f 280
cdde6682
JB
281 /**
282 * PCI/DMA memory allocated and posted in non-embedded mode
283 * will have mbx_cmd_mem != NULL.
284 * Save virtual and bus addresses for the command so that it
285 * can be freed later.
286 **/
1957aa7f
JK
287 tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
288 if (mbx_cmd_mem) {
289 tag_mem->size = mbx_cmd_mem->size;
290 tag_mem->va = mbx_cmd_mem->va;
291 tag_mem->dma = mbx_cmd_mem->dma;
292 } else
293 tag_mem->size = 0;
294
cdde6682
JB
295 /* first make tag_mem_state visible to all */
296 wmb();
297 set_bit(MCC_TAG_STATE_TIMEOUT,
298 &phba->ctrl.ptag_state[tag].tag_state);
299
e175defe
JSJ
300 beiscsi_log(phba, KERN_ERR,
301 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
302 BEISCSI_LOG_CONFIG,
303 "BC_%d : MBX Cmd Completion timed out\n");
1957aa7f 304 return -EBUSY;
1957aa7f 305 }
e175defe 306
50a4b824 307 rc = __beiscsi_mcc_compl_status(phba, tag, wrb, mbx_cmd_mem);
e175defe 308
090e2184 309 free_mcc_wrb(&phba->ctrl, tag);
e175defe
JSJ
310 return rc;
311}
312
e175defe 313/*
88840332 314 * beiscsi_process_mbox_compl()- Check the MBX completion status
e175defe
JSJ
315 * @ctrl: Function specific MBX data structure
316 * @compl: Completion status of MBX Command
317 *
318 * Check for the MBX completion status when BMBX method used
319 *
320 * return
321 * Success: Zero
322 * Failure: Non-Zero
323 **/
88840332
JB
324static int beiscsi_process_mbox_compl(struct be_ctrl_info *ctrl,
325 struct be_mcc_compl *compl)
6733b39a 326{
e175defe 327 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
99bc5d55 328 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
e175defe 329 struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
6694095b 330 u16 compl_status, extd_status;
6733b39a 331
c448427b
JB
332 /**
333 * To check if valid bit is set, check the entire word as we don't know
334 * the endianness of the data (old entry is host endian while a new
335 * entry is little endian)
336 */
337 if (!compl->flags) {
338 beiscsi_log(phba, KERN_ERR,
339 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
340 "BC_%d : BMBX busy, no completion\n");
341 return -EBUSY;
342 }
343 compl->flags = le32_to_cpu(compl->flags);
344 WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
6733b39a 345
c448427b
JB
346 /**
347 * Just swap the status to host endian;
348 * mcc tag is opaquely copied from mcc_wrb.
349 */
350 be_dws_le_to_cpu(compl, 4);
6733b39a 351 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
c448427b
JB
352 CQE_STATUS_COMPL_MASK;
353 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
354 CQE_STATUS_EXTD_MASK;
355 /* Need to reset the entire word that houses the valid bit */
356 compl->flags = 0;
99bc5d55 357
c448427b
JB
358 if (compl_status == MCC_STATUS_SUCCESS)
359 return 0;
360
361 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
362 "BC_%d : error in cmd completion: Subsystem : %d Opcode : %d status(compl/extd)=%d/%d\n",
363 hdr->subsystem, hdr->opcode, compl_status, extd_status);
6694095b 364 return compl_status;
6733b39a
JK
365}
366
9c4f8b01
JB
367static void beiscsi_process_async_link(struct beiscsi_hba *phba,
368 struct be_mcc_compl *compl)
bfead3b2 369{
9c4f8b01 370 struct be_async_event_link_state *evt;
6ea9b3b0 371
9c4f8b01 372 evt = (struct be_async_event_link_state *)compl;
99bc5d55 373
9c4f8b01
JB
374 phba->port_speed = evt->port_speed;
375 /**
376 * Check logical link status in ASYNC event.
377 * This has been newly introduced in SKH-R Firmware 10.0.338.45.
378 **/
379 if (evt->port_link_status & BE_ASYNC_LINK_UP_MASK) {
50a4b824
JB
380 set_bit(BEISCSI_HBA_LINK_UP, &phba->state);
381 if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state))
382 beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE);
9c4f8b01
JB
383 __beiscsi_log(phba, KERN_ERR,
384 "BC_%d : Link Up on Port %d tag 0x%x\n",
385 evt->physical_port, evt->event_tag);
386 } else {
9122e991 387 clear_bit(BEISCSI_HBA_LINK_UP, &phba->state);
9c4f8b01
JB
388 __beiscsi_log(phba, KERN_ERR,
389 "BC_%d : Link Down on Port %d tag 0x%x\n",
390 evt->physical_port, evt->event_tag);
391 iscsi_host_for_each_session(phba->shost,
480195c2 392 beiscsi_session_fail);
bfead3b2 393 }
6733b39a
JK
394}
395
53aefe25
JB
396static char *beiscsi_port_misconf_event_msg[] = {
397 "Physical Link is functional.",
398 "Optics faulted/incorrectly installed/not installed - Reseat optics, if issue not resolved, replace.",
399 "Optics of two types installed - Remove one optic or install matching pair of optics.",
400 "Incompatible optics - Replace with compatible optics for card to function.",
401 "Unqualified optics - Replace with Avago optics for Warranty and Technical Support.",
402 "Uncertified optics - Replace with Avago Certified optics to enable link operation."
403};
404
405static void beiscsi_process_async_sli(struct beiscsi_hba *phba,
406 struct be_mcc_compl *compl)
407{
408 struct be_async_event_sli *async_sli;
409 u8 evt_type, state, old_state, le;
410 char *sev = KERN_WARNING;
411 char *msg = NULL;
412
413 evt_type = compl->flags >> ASYNC_TRAILER_EVENT_TYPE_SHIFT;
414 evt_type &= ASYNC_TRAILER_EVENT_TYPE_MASK;
415
416 /* processing only MISCONFIGURED physical port event */
417 if (evt_type != ASYNC_SLI_EVENT_TYPE_MISCONFIGURED)
418 return;
419
420 async_sli = (struct be_async_event_sli *)compl;
421 state = async_sli->event_data1 >>
422 (phba->fw_config.phys_port * 8) & 0xff;
423 le = async_sli->event_data2 >>
424 (phba->fw_config.phys_port * 8) & 0xff;
425
426 old_state = phba->optic_state;
427 phba->optic_state = state;
428
429 if (state >= ARRAY_SIZE(beiscsi_port_misconf_event_msg)) {
430 /* fw is reporting a state we don't know, log and return */
431 __beiscsi_log(phba, KERN_ERR,
432 "BC_%d : Port %c: Unrecognized optic state 0x%x\n",
433 phba->port_name, async_sli->event_data1);
434 return;
435 }
436
437 if (ASYNC_SLI_LINK_EFFECT_VALID(le)) {
438 /* log link effect for unqualified-4, uncertified-5 optics */
439 if (state > 3)
440 msg = (ASYNC_SLI_LINK_EFFECT_STATE(le)) ?
441 " Link is non-operational." :
442 " Link is operational.";
443 /* 1 - info */
444 if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 1)
445 sev = KERN_INFO;
446 /* 2 - error */
447 if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 2)
448 sev = KERN_ERR;
449 }
450
451 if (old_state != phba->optic_state)
452 __beiscsi_log(phba, sev, "BC_%d : Port %c: %s%s\n",
453 phba->port_name,
454 beiscsi_port_misconf_event_msg[state],
455 !msg ? "" : msg);
456}
457
458void beiscsi_process_async_event(struct beiscsi_hba *phba,
459 struct be_mcc_compl *compl)
460{
461 char *sev = KERN_INFO;
462 u8 evt_code;
463
464 /* interpret flags as an async trailer */
465 evt_code = compl->flags >> ASYNC_TRAILER_EVENT_CODE_SHIFT;
466 evt_code &= ASYNC_TRAILER_EVENT_CODE_MASK;
467 switch (evt_code) {
468 case ASYNC_EVENT_CODE_LINK_STATE:
9c4f8b01 469 beiscsi_process_async_link(phba, compl);
53aefe25
JB
470 break;
471 case ASYNC_EVENT_CODE_ISCSI:
50a4b824
JB
472 if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state))
473 beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE);
53aefe25
JB
474 sev = KERN_ERR;
475 break;
476 case ASYNC_EVENT_CODE_SLI:
477 beiscsi_process_async_sli(phba, compl);
478 break;
479 default:
480 /* event not registered */
481 sev = KERN_ERR;
482 }
483
484 beiscsi_log(phba, sev, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
9c4f8b01
JB
485 "BC_%d : ASYNC Event %x: status 0x%08x flags 0x%08x\n",
486 evt_code, compl->status, compl->flags);
53aefe25
JB
487}
488
2e4e8f65
JB
489int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
490 struct be_mcc_compl *compl)
bfead3b2 491{
2e4e8f65
JB
492 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
493 u16 compl_status, extd_status;
494 struct be_dma_mem *tag_mem;
495 unsigned int tag, wrb_idx;
496
2e4e8f65
JB
497 be_dws_le_to_cpu(compl, 4);
498 tag = (compl->tag0 & MCC_Q_CMD_TAG_MASK);
499 wrb_idx = (compl->tag0 & CQE_STATUS_WRB_MASK) >> CQE_STATUS_WRB_SHIFT;
500
501 if (!test_bit(MCC_TAG_STATE_RUNNING,
502 &ctrl->ptag_state[tag].tag_state)) {
503 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX |
504 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
505 "BC_%d : MBX cmd completed but not posted\n");
506 return 0;
507 }
508
50a4b824
JB
509 /* end MCC with this tag */
510 clear_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state);
511
2e4e8f65
JB
512 if (test_bit(MCC_TAG_STATE_TIMEOUT, &ctrl->ptag_state[tag].tag_state)) {
513 beiscsi_log(phba, KERN_WARNING,
514 BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
515 BEISCSI_LOG_CONFIG,
516 "BC_%d : MBX Completion for timeout Command from FW\n");
517 /**
518 * Check for the size before freeing resource.
519 * Only for non-embedded cmd, PCI resource is allocated.
520 **/
521 tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
50a4b824 522 if (tag_mem->size) {
2e4e8f65
JB
523 pci_free_consistent(ctrl->pdev, tag_mem->size,
524 tag_mem->va, tag_mem->dma);
50a4b824
JB
525 tag_mem->size = 0;
526 }
090e2184 527 free_mcc_wrb(ctrl, tag);
2e4e8f65 528 return 0;
bfead3b2
JK
529 }
530
2e4e8f65
JB
531 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
532 CQE_STATUS_COMPL_MASK;
533 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
534 CQE_STATUS_EXTD_MASK;
535 /* The ctrl.mcc_tag_status[tag] is filled with
536 * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
537 * [7:0] = compl_status
538 */
539 ctrl->mcc_tag_status[tag] = CQE_VALID_MASK;
540 ctrl->mcc_tag_status[tag] |= (wrb_idx << CQE_STATUS_WRB_SHIFT);
541 ctrl->mcc_tag_status[tag] |= (extd_status << CQE_STATUS_ADDL_SHIFT) &
542 CQE_STATUS_ADDL_MASK;
543 ctrl->mcc_tag_status[tag] |= (compl_status & CQE_STATUS_MASK);
bfead3b2 544
50a4b824
JB
545 if (test_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state)) {
546 if (ctrl->ptag_state[tag].cbfn)
547 ctrl->ptag_state[tag].cbfn(phba, tag);
548 else
6694095b
JB
549 __beiscsi_log(phba, KERN_ERR,
550 "BC_%d : MBX ASYNC command with no callback\n");
50a4b824
JB
551 free_mcc_wrb(ctrl, tag);
552 return 0;
553 }
554
10bcd47d
JB
555 if (test_bit(MCC_TAG_STATE_IGNORE, &ctrl->ptag_state[tag].tag_state)) {
556 /* just check completion status and free wrb */
557 __beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
558 free_mcc_wrb(ctrl, tag);
559 return 0;
560 }
561
2e4e8f65
JB
562 wake_up_interruptible(&ctrl->mcc_wait[tag]);
563 return 0;
bfead3b2
JK
564}
565
69fd6d7b
JB
566void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag)
567{
568 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
569 u32 val = 0;
570
571 set_bit(MCC_TAG_STATE_RUNNING, &phba->ctrl.ptag_state[tag].tag_state);
572 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
573 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
574 /* make request available for DMA */
575 wmb();
576 iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
bfead3b2
JK
577}
578
e175defe 579/*
88840332 580 * be_mbox_db_ready_poll()- Check ready status
e175defe
JSJ
581 * @ctrl: Function specific MBX data structure
582 *
583 * Check for the ready status of FW to send BMBX
584 * commands to adapter.
585 *
586 * return
587 * Success: 0
588 * Failure: Non-Zero
589 **/
88840332 590static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl)
6733b39a 591{
9ec6f6b8
JB
592 /* wait 30s for generic non-flash MBOX operation */
593#define BEISCSI_MBX_RDY_BIT_TIMEOUT 30000
6733b39a 594 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
e175defe 595 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
92665a66 596 unsigned long timeout;
6733b39a
JK
597 u32 ready;
598
6ac999ef
JB
599 /*
600 * This BMBX busy wait path is used during init only.
601 * For the commands executed during init, 5s should suffice.
602 */
603 timeout = jiffies + msecs_to_jiffies(BEISCSI_MBX_RDY_BIT_TIMEOUT);
604 do {
9122e991 605 if (beiscsi_hba_in_error(phba))
6ac999ef 606 return -EIO;
7a158003 607
6ac999ef
JB
608 ready = ioread32(db);
609 if (ready == 0xffffffff)
610 return -EIO;
e175defe 611
6ac999ef
JB
612 ready &= MPU_MAILBOX_DB_RDY_MASK;
613 if (ready)
614 return 0;
6733b39a 615
6ac999ef
JB
616 if (time_after(jiffies, timeout))
617 break;
3c9e36a9
JB
618 /* 1ms sleep is enough in most cases */
619 schedule_timeout_uninterruptible(msecs_to_jiffies(1));
6ac999ef 620 } while (!ready);
92665a66 621
6ac999ef
JB
622 beiscsi_log(phba, KERN_ERR,
623 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
624 "BC_%d : FW Timed Out\n");
9122e991 625 set_bit(BEISCSI_HBA_FW_TIMEOUT, &phba->state);
6ac999ef 626 return -EBUSY;
6733b39a
JK
627}
628
e175defe
JSJ
629/*
630 * be_mbox_notify: Notify adapter of new BMBX command
631 * @ctrl: Function specific MBX data structure
632 *
633 * Ring doorbell to inform adapter of a BMBX command
634 * to process
635 *
636 * return
637 * Success: 0
638 * Failure: Non-Zero
639 **/
480195c2 640static int be_mbox_notify(struct be_ctrl_info *ctrl)
6733b39a
JK
641{
642 int status;
643 u32 val = 0;
644 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
645 struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
646 struct be_mcc_mailbox *mbox = mbox_mem->va;
6733b39a 647
88840332 648 status = be_mbox_db_ready_poll(ctrl);
1e234bbb
JK
649 if (status)
650 return status;
651
6733b39a
JK
652 val &= ~MPU_MAILBOX_DB_RDY_MASK;
653 val |= MPU_MAILBOX_DB_HI_MASK;
654 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
655 iowrite32(val, db);
656
88840332 657 status = be_mbox_db_ready_poll(ctrl);
e175defe 658 if (status)
6733b39a 659 return status;
e175defe 660
6733b39a
JK
661 val = 0;
662 val &= ~MPU_MAILBOX_DB_RDY_MASK;
663 val &= ~MPU_MAILBOX_DB_HI_MASK;
664 val |= (u32) (mbox_mem->dma >> 4) << 2;
665 iowrite32(val, db);
666
88840332 667 status = be_mbox_db_ready_poll(ctrl);
e175defe 668 if (status)
6733b39a 669 return status;
e175defe 670
6ac999ef
JB
671 /* RDY is set; small delay before CQE read. */
672 udelay(1);
673
a264f5e8
JB
674 status = beiscsi_process_mbox_compl(ctrl, &mbox->compl);
675 return status;
bfead3b2
JK
676}
677
6733b39a
JK
678void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
679 bool embedded, u8 sge_cnt)
680{
681 if (embedded)
fa1261c4 682 wrb->emb_sgecnt_special |= MCC_WRB_EMBEDDED_MASK;
6733b39a 683 else
fa1261c4
JB
684 wrb->emb_sgecnt_special |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
685 MCC_WRB_SGE_CNT_SHIFT;
6733b39a
JK
686 wrb->payload_length = payload_len;
687 be_dws_cpu_to_le(wrb, 8);
688}
689
690void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
691 u8 subsystem, u8 opcode, int cmd_len)
692{
693 req_hdr->opcode = opcode;
694 req_hdr->subsystem = subsystem;
695 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
e175defe 696 req_hdr->timeout = BEISCSI_FW_MBX_TIMEOUT;
6733b39a
JK
697}
698
699static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
700 struct be_dma_mem *mem)
701{
702 int i, buf_pages;
703 u64 dma = (u64) mem->dma;
704
705 buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
706 for (i = 0; i < buf_pages; i++) {
707 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
708 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
709 dma += PAGE_SIZE_4K;
710 }
711}
712
713static u32 eq_delay_to_mult(u32 usec_delay)
714{
715#define MAX_INTR_RATE 651042
716 const u32 round = 10;
717 u32 multiplier;
718
719 if (usec_delay == 0)
720 multiplier = 0;
721 else {
722 u32 interrupt_rate = 1000000 / usec_delay;
723 if (interrupt_rate == 0)
724 multiplier = 1023;
725 else {
726 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
727 multiplier /= interrupt_rate;
728 multiplier = (multiplier + round / 2) / round;
729 multiplier = min(multiplier, (u32) 1023);
730 }
731 }
732 return multiplier;
733}
734
735struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
736{
737 return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
738}
739
740int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
741 struct be_queue_info *eq, int eq_delay)
742{
743 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
744 struct be_cmd_req_eq_create *req = embedded_payload(wrb);
745 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
746 struct be_dma_mem *q_mem = &eq->dma_mem;
747 int status;
748
c03a50f7 749 mutex_lock(&ctrl->mbox_lock);
6733b39a
JK
750 memset(wrb, 0, sizeof(*wrb));
751
752 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
753
754 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
755 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
756
757 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
758
759 AMAP_SET_BITS(struct amap_eq_context, func, req->context,
760 PCI_FUNC(ctrl->pdev->devfn));
761 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
762 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
763 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
764 __ilog2_u32(eq->len / 256));
765 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
766 eq_delay_to_mult(eq_delay));
767 be_dws_cpu_to_le(req->context, sizeof(req->context));
768
769 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
770
771 status = be_mbox_notify(ctrl);
772 if (!status) {
773 eq->id = le16_to_cpu(resp->eq_id);
774 eq->created = true;
775 }
c03a50f7 776 mutex_unlock(&ctrl->mbox_lock);
6733b39a
JK
777 return status;
778}
779
6733b39a
JK
780int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
781 struct be_queue_info *cq, struct be_queue_info *eq,
782 bool sol_evts, bool no_delay, int coalesce_wm)
783{
784 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
785 struct be_cmd_req_cq_create *req = embedded_payload(wrb);
786 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
99bc5d55 787 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
6733b39a
JK
788 struct be_dma_mem *q_mem = &cq->dma_mem;
789 void *ctxt = &req->context;
790 int status;
791
c03a50f7 792 mutex_lock(&ctrl->mbox_lock);
6733b39a
JK
793 memset(wrb, 0, sizeof(*wrb));
794
795 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
796
797 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
798 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
6733b39a
JK
799
800 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
2c9dfd36 801 if (is_chip_be2_be3r(phba)) {
eaae5267
JSJ
802 AMAP_SET_BITS(struct amap_cq_context, coalescwm,
803 ctxt, coalesce_wm);
804 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
805 AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
806 __ilog2_u32(cq->len / 256));
807 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
808 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
809 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
810 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
811 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
812 AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
813 PCI_FUNC(ctrl->pdev->devfn));
2c9dfd36
JK
814 } else {
815 req->hdr.version = MBX_CMD_VER2;
816 req->page_size = 1;
817 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
818 ctxt, coalesce_wm);
819 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
820 ctxt, no_delay);
821 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
822 __ilog2_u32(cq->len / 256));
823 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
824 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
825 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
826 AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
eaae5267 827 }
6733b39a 828
6733b39a
JK
829 be_dws_cpu_to_le(ctxt, sizeof(req->context));
830
831 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
832
833 status = be_mbox_notify(ctrl);
834 if (!status) {
835 cq->id = le16_to_cpu(resp->cq_id);
836 cq->created = true;
837 } else
99bc5d55
JSJ
838 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
839 "BC_%d : In be_cmd_cq_create, status=ox%08x\n",
840 status);
841
c03a50f7 842 mutex_unlock(&ctrl->mbox_lock);
6733b39a
JK
843
844 return status;
845}
846
847static u32 be_encoded_q_len(int q_len)
848{
849 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
850 if (len_encoded == 16)
851 len_encoded = 0;
852 return len_encoded;
853}
bfead3b2 854
35e66019 855int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
bfead3b2
JK
856 struct be_queue_info *mccq,
857 struct be_queue_info *cq)
858{
859 struct be_mcc_wrb *wrb;
53aefe25 860 struct be_cmd_req_mcc_create_ext *req;
bfead3b2
JK
861 struct be_dma_mem *q_mem = &mccq->dma_mem;
862 struct be_ctrl_info *ctrl;
863 void *ctxt;
864 int status;
865
c03a50f7 866 mutex_lock(&phba->ctrl.mbox_lock);
bfead3b2
JK
867 ctrl = &phba->ctrl;
868 wrb = wrb_from_mbox(&ctrl->mbox_mem);
37609766 869 memset(wrb, 0, sizeof(*wrb));
bfead3b2
JK
870 req = embedded_payload(wrb);
871 ctxt = &req->context;
872
873 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
874
875 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
53aefe25 876 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
bfead3b2
JK
877
878 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
53aefe25
JB
879 req->async_evt_bitmap = 1 << ASYNC_EVENT_CODE_LINK_STATE;
880 req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_ISCSI;
881 req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_SLI;
bfead3b2
JK
882
883 AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
884 PCI_FUNC(phba->pcidev->devfn));
885 AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
886 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
887 be_encoded_q_len(mccq->len));
888 AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
889
890 be_dws_cpu_to_le(ctxt, sizeof(req->context));
891
892 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
893
a264f5e8 894 status = be_mbox_notify(ctrl);
bfead3b2
JK
895 if (!status) {
896 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
897 mccq->id = le16_to_cpu(resp->id);
898 mccq->created = true;
899 }
c03a50f7 900 mutex_unlock(&phba->ctrl.mbox_lock);
bfead3b2
JK
901
902 return status;
903}
904
6733b39a
JK
905int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
906 int queue_type)
907{
908 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
909 struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
99bc5d55 910 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
6733b39a
JK
911 u8 subsys = 0, opcode = 0;
912 int status;
913
99bc5d55
JSJ
914 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
915 "BC_%d : In beiscsi_cmd_q_destroy "
916 "queue_type : %d\n", queue_type);
917
c03a50f7 918 mutex_lock(&ctrl->mbox_lock);
6733b39a
JK
919 memset(wrb, 0, sizeof(*wrb));
920 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
921
922 switch (queue_type) {
923 case QTYPE_EQ:
924 subsys = CMD_SUBSYSTEM_COMMON;
925 opcode = OPCODE_COMMON_EQ_DESTROY;
926 break;
927 case QTYPE_CQ:
928 subsys = CMD_SUBSYSTEM_COMMON;
929 opcode = OPCODE_COMMON_CQ_DESTROY;
930 break;
bfead3b2
JK
931 case QTYPE_MCCQ:
932 subsys = CMD_SUBSYSTEM_COMMON;
933 opcode = OPCODE_COMMON_MCC_DESTROY;
934 break;
6733b39a
JK
935 case QTYPE_WRBQ:
936 subsys = CMD_SUBSYSTEM_ISCSI;
937 opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
938 break;
939 case QTYPE_DPDUQ:
940 subsys = CMD_SUBSYSTEM_ISCSI;
941 opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
942 break;
943 case QTYPE_SGL:
944 subsys = CMD_SUBSYSTEM_ISCSI;
945 opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
946 break;
947 default:
c03a50f7 948 mutex_unlock(&ctrl->mbox_lock);
6733b39a 949 BUG();
d3ad2bb3 950 return -ENXIO;
6733b39a
JK
951 }
952 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
953 if (queue_type != QTYPE_SGL)
954 req->id = cpu_to_le16(q->id);
955
956 status = be_mbox_notify(ctrl);
957
c03a50f7 958 mutex_unlock(&ctrl->mbox_lock);
6733b39a
JK
959 return status;
960}
961
8a86e833
JK
962/**
963 * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter
964 * @ctrl: ptr to ctrl_info
965 * @cq: Completion Queue
966 * @dq: Default Queue
967 * @lenght: ring size
968 * @entry_size: size of each entry in DEFQ
969 * @is_header: Header or Data DEFQ
970 * @ulp_num: Bind to which ULP
971 *
972 * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted
973 * on this queue by the FW
974 *
975 * return
976 * Success: 0
977 * Failure: Non-Zero Value
978 *
979 **/
6733b39a
JK
980int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
981 struct be_queue_info *cq,
982 struct be_queue_info *dq, int length,
8a86e833
JK
983 int entry_size, uint8_t is_header,
984 uint8_t ulp_num)
6733b39a
JK
985{
986 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
987 struct be_defq_create_req *req = embedded_payload(wrb);
988 struct be_dma_mem *q_mem = &dq->dma_mem;
ef9e1b9b 989 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
6733b39a
JK
990 void *ctxt = &req->context;
991 int status;
992
c03a50f7 993 mutex_lock(&ctrl->mbox_lock);
6733b39a
JK
994 memset(wrb, 0, sizeof(*wrb));
995
996 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
997
998 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
999 OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
1000
1001 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
8a86e833
JK
1002 if (phba->fw_config.dual_ulp_aware) {
1003 req->ulp_num = ulp_num;
1004 req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
1005 req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
1006 }
ef9e1b9b
JK
1007
1008 if (is_chip_be2_be3r(phba)) {
1009 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1010 rx_pdid, ctxt, 0);
1011 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1012 rx_pdid_valid, ctxt, 1);
1013 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1014 pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn));
1015 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1016 ring_size, ctxt,
1017 be_encoded_q_len(length /
1018 sizeof(struct phys_addr)));
1019 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1020 default_buffer_size, ctxt, entry_size);
1021 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1022 cq_id_recv, ctxt, cq->id);
1023 } else {
1024 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1025 rx_pdid, ctxt, 0);
1026 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1027 rx_pdid_valid, ctxt, 1);
1028 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1029 ring_size, ctxt,
1030 be_encoded_q_len(length /
1031 sizeof(struct phys_addr)));
1032 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1033 default_buffer_size, ctxt, entry_size);
1034 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1035 cq_id_recv, ctxt, cq->id);
1036 }
6733b39a
JK
1037
1038 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1039
1040 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1041
1042 status = be_mbox_notify(ctrl);
1043 if (!status) {
8a86e833 1044 struct be_ring *defq_ring;
6733b39a
JK
1045 struct be_defq_create_resp *resp = embedded_payload(wrb);
1046
1047 dq->id = le16_to_cpu(resp->id);
1048 dq->created = true;
8a86e833
JK
1049 if (is_header)
1050 defq_ring = &phba->phwi_ctrlr->default_pdu_hdr[ulp_num];
1051 else
1052 defq_ring = &phba->phwi_ctrlr->
1053 default_pdu_data[ulp_num];
1054
1055 defq_ring->id = dq->id;
1056
1057 if (!phba->fw_config.dual_ulp_aware) {
1058 defq_ring->ulp_num = BEISCSI_ULP0;
1059 defq_ring->doorbell_offset = DB_RXULP0_OFFSET;
1060 } else {
1061 defq_ring->ulp_num = resp->ulp_num;
1062 defq_ring->doorbell_offset = resp->doorbell_offset;
1063 }
6733b39a 1064 }
c03a50f7 1065 mutex_unlock(&ctrl->mbox_lock);
6733b39a
JK
1066
1067 return status;
1068}
1069
4eea99d5
JK
1070/**
1071 * be_cmd_wrbq_create()- Create WRBQ
1072 * @ctrl: ptr to ctrl_info
1073 * @q_mem: memory details for the queue
1074 * @wrbq: queue info
1075 * @pwrb_context: ptr to wrb_context
1076 * @ulp_num: ULP on which the WRBQ is to be created
1077 *
1078 * Create WRBQ on the passed ULP_NUM.
1079 *
1080 **/
1081int be_cmd_wrbq_create(struct be_ctrl_info *ctrl,
1082 struct be_dma_mem *q_mem,
1083 struct be_queue_info *wrbq,
1084 struct hwi_wrb_context *pwrb_context,
1085 uint8_t ulp_num)
6733b39a
JK
1086{
1087 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1088 struct be_wrbq_create_req *req = embedded_payload(wrb);
1089 struct be_wrbq_create_resp *resp = embedded_payload(wrb);
4eea99d5 1090 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
6733b39a
JK
1091 int status;
1092
c03a50f7 1093 mutex_lock(&ctrl->mbox_lock);
6733b39a
JK
1094 memset(wrb, 0, sizeof(*wrb));
1095
1096 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1097
1098 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1099 OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
1100 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
4eea99d5
JK
1101
1102 if (phba->fw_config.dual_ulp_aware) {
1103 req->ulp_num = ulp_num;
1104 req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
1105 req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
1106 }
1107
6733b39a
JK
1108 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1109
1110 status = be_mbox_notify(ctrl);
bfead3b2 1111 if (!status) {
6733b39a 1112 wrbq->id = le16_to_cpu(resp->cid);
bfead3b2 1113 wrbq->created = true;
4eea99d5
JK
1114
1115 pwrb_context->cid = wrbq->id;
1116 if (!phba->fw_config.dual_ulp_aware) {
1117 pwrb_context->doorbell_offset = DB_TXULP0_OFFSET;
1118 pwrb_context->ulp_num = BEISCSI_ULP0;
1119 } else {
1120 pwrb_context->ulp_num = resp->ulp_num;
1121 pwrb_context->doorbell_offset = resp->doorbell_offset;
1122 }
bfead3b2 1123 }
c03a50f7 1124 mutex_unlock(&ctrl->mbox_lock);
6733b39a
JK
1125 return status;
1126}
1127
15a90fe0
JK
1128int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
1129 struct be_dma_mem *q_mem)
1130{
1131 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1132 struct be_post_template_pages_req *req = embedded_payload(wrb);
1133 int status;
1134
c03a50f7 1135 mutex_lock(&ctrl->mbox_lock);
15a90fe0
JK
1136
1137 memset(wrb, 0, sizeof(*wrb));
1138 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1139 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1140 OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS,
1141 sizeof(*req));
1142
1143 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1144 req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
1145 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1146
1147 status = be_mbox_notify(ctrl);
c03a50f7 1148 mutex_unlock(&ctrl->mbox_lock);
15a90fe0
JK
1149 return status;
1150}
1151
1152int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl)
1153{
1154 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1155 struct be_remove_template_pages_req *req = embedded_payload(wrb);
1156 int status;
1157
c03a50f7 1158 mutex_lock(&ctrl->mbox_lock);
15a90fe0
JK
1159
1160 memset(wrb, 0, sizeof(*wrb));
1161 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1162 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1163 OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS,
1164 sizeof(*req));
1165
1166 req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
1167
1168 status = be_mbox_notify(ctrl);
c03a50f7 1169 mutex_unlock(&ctrl->mbox_lock);
15a90fe0
JK
1170 return status;
1171}
1172
6733b39a
JK
1173int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
1174 struct be_dma_mem *q_mem,
1175 u32 page_offset, u32 num_pages)
1176{
1177 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1178 struct be_post_sgl_pages_req *req = embedded_payload(wrb);
99bc5d55 1179 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
6733b39a
JK
1180 int status;
1181 unsigned int curr_pages;
1182 u32 internal_page_offset = 0;
1183 u32 temp_num_pages = num_pages;
1184
1185 if (num_pages == 0xff)
1186 num_pages = 1;
1187
c03a50f7 1188 mutex_lock(&ctrl->mbox_lock);
6733b39a
JK
1189 do {
1190 memset(wrb, 0, sizeof(*wrb));
1191 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1192 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1193 OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
1194 sizeof(*req));
1195 curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
1196 pages);
1197 req->num_pages = min(num_pages, curr_pages);
1198 req->page_offset = page_offset;
1199 be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
1200 q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
1201 internal_page_offset += req->num_pages;
1202 page_offset += req->num_pages;
1203 num_pages -= req->num_pages;
1204
1205 if (temp_num_pages == 0xff)
1206 req->num_pages = temp_num_pages;
1207
1208 status = be_mbox_notify(ctrl);
1209 if (status) {
99bc5d55
JSJ
1210 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1211 "BC_%d : FW CMD to map iscsi frags failed.\n");
1212
6733b39a
JK
1213 goto error;
1214 }
1215 } while (num_pages > 0);
1216error:
c03a50f7 1217 mutex_unlock(&ctrl->mbox_lock);
6733b39a
JK
1218 if (status != 0)
1219 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
1220 return status;
1221}
e5285860 1222
6f72238e
JSJ
1223/**
1224 * be_cmd_set_vlan()- Configure VLAN paramters on the adapter
1225 * @phba: device priv structure instance
1226 * @vlan_tag: TAG to be set
1227 *
1228 * Set the VLAN_TAG for the adapter or Disable VLAN on adapter
1229 *
1230 * returns
1231 * TAG for the MBX Cmd
1232 * **/
1233int be_cmd_set_vlan(struct beiscsi_hba *phba,
1234 uint16_t vlan_tag)
1235{
090e2184 1236 unsigned int tag;
6f72238e
JSJ
1237 struct be_mcc_wrb *wrb;
1238 struct be_cmd_set_vlan_req *req;
1239 struct be_ctrl_info *ctrl = &phba->ctrl;
1240
c03a50f7
JB
1241 if (mutex_lock_interruptible(&ctrl->mbox_lock))
1242 return 0;
090e2184
JB
1243 wrb = alloc_mcc_wrb(phba, &tag);
1244 if (!wrb) {
c03a50f7 1245 mutex_unlock(&ctrl->mbox_lock);
090e2184 1246 return 0;
6f72238e
JSJ
1247 }
1248
6f72238e 1249 req = embedded_payload(wrb);
6f72238e
JSJ
1250 be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0);
1251 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1252 OPCODE_COMMON_ISCSI_NTWK_SET_VLAN,
1253 sizeof(*req));
1254
1255 req->interface_hndl = phba->interface_handle;
1256 req->vlan_priority = vlan_tag;
1257
cdde6682 1258 be_mcc_notify(phba, tag);
c03a50f7 1259 mutex_unlock(&ctrl->mbox_lock);
6f72238e
JSJ
1260
1261 return tag;
1262}
6694095b 1263
480195c2
JB
1264int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
1265 struct beiscsi_hba *phba)
1266{
1267 struct be_dma_mem nonemb_cmd;
1268 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1269 struct be_mgmt_controller_attributes *req;
1270 struct be_sge *sge = nonembedded_sgl(wrb);
1271 int status = 0;
1272
1273 nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
1274 sizeof(struct be_mgmt_controller_attributes),
1275 &nonemb_cmd.dma);
1276 if (nonemb_cmd.va == NULL) {
1277 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1278 "BG_%d : pci_alloc_consistent failed in %s\n",
1279 __func__);
1280 return -ENOMEM;
1281 }
1282 nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
1283 req = nonemb_cmd.va;
1284 memset(req, 0, sizeof(*req));
1285 mutex_lock(&ctrl->mbox_lock);
1286 memset(wrb, 0, sizeof(*wrb));
1287 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
1288 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1289 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req));
1290 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
1291 sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
1292 sge->len = cpu_to_le32(nonemb_cmd.size);
1293 status = be_mbox_notify(ctrl);
1294 if (!status) {
1295 struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
1296
1297 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1298 "BG_%d : Firmware Version of CMD : %s\n"
1299 "Firmware Version is : %s\n"
1300 "Developer Build, not performing version check...\n",
1301 resp->params.hba_attribs
1302 .flashrom_version_string,
1303 resp->params.hba_attribs.
1304 firmware_version_string);
1305
1306 phba->fw_config.iscsi_features =
1307 resp->params.hba_attribs.iscsi_features;
1308 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1309 "BM_%d : phba->fw_config.iscsi_features = %d\n",
1310 phba->fw_config.iscsi_features);
1311 memcpy(phba->fw_ver_str, resp->params.hba_attribs.
1312 firmware_version_string, BEISCSI_VER_STRLEN);
1313 } else
1314 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1315 "BG_%d : Failed in beiscsi_check_supported_fw\n");
1316 mutex_unlock(&ctrl->mbox_lock);
1317 if (nonemb_cmd.va)
1318 pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
1319 nonemb_cmd.va, nonemb_cmd.dma);
1320
1321 return status;
1322}
1323
1324/**
1325 * beiscsi_get_fw_config()- Get the FW config for the function
1326 * @ctrl: ptr to Ctrl Info
1327 * @phba: ptr to the dev priv structure
1328 *
1329 * Get the FW config and resources available for the function.
1330 * The resources are created based on the count received here.
1331 *
1332 * return
1333 * Success: 0
1334 * Failure: Non-Zero Value
1335 **/
1336int beiscsi_get_fw_config(struct be_ctrl_info *ctrl,
1337 struct beiscsi_hba *phba)
1338{
1339 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1340 struct be_fw_cfg *pfw_cfg = embedded_payload(wrb);
1341 uint32_t cid_count, icd_count;
1342 int status = -EINVAL;
1343 uint8_t ulp_num = 0;
1344
1345 mutex_lock(&ctrl->mbox_lock);
1346 memset(wrb, 0, sizeof(*wrb));
1347 be_wrb_hdr_prepare(wrb, sizeof(*pfw_cfg), true, 0);
1348
1349 be_cmd_hdr_prepare(&pfw_cfg->hdr, CMD_SUBSYSTEM_COMMON,
1350 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
1351 EMBED_MBX_MAX_PAYLOAD_SIZE);
1352
1353 if (be_mbox_notify(ctrl)) {
1354 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1355 "BG_%d : Failed in beiscsi_get_fw_config\n");
1356 goto fail_init;
1357 }
1358
1359 /* FW response formats depend on port id */
1360 phba->fw_config.phys_port = pfw_cfg->phys_port;
1361 if (phba->fw_config.phys_port >= BEISCSI_PHYS_PORT_MAX) {
1362 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1363 "BG_%d : invalid physical port id %d\n",
1364 phba->fw_config.phys_port);
1365 goto fail_init;
1366 }
1367
1368 /* populate and check FW config against min and max values */
1369 if (!is_chip_be2_be3r(phba)) {
1370 phba->fw_config.eqid_count = pfw_cfg->eqid_count;
1371 phba->fw_config.cqid_count = pfw_cfg->cqid_count;
1372 if (phba->fw_config.eqid_count == 0 ||
1373 phba->fw_config.eqid_count > 2048) {
1374 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1375 "BG_%d : invalid EQ count %d\n",
1376 phba->fw_config.eqid_count);
1377 goto fail_init;
1378 }
1379 if (phba->fw_config.cqid_count == 0 ||
1380 phba->fw_config.cqid_count > 4096) {
1381 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1382 "BG_%d : invalid CQ count %d\n",
1383 phba->fw_config.cqid_count);
1384 goto fail_init;
1385 }
1386 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1387 "BG_%d : EQ_Count : %d CQ_Count : %d\n",
1388 phba->fw_config.eqid_count,
1389 phba->fw_config.cqid_count);
1390 }
1391
1392 /**
1393 * Check on which all ULP iSCSI Protocol is loaded.
1394 * Set the Bit for those ULP. This set flag is used
1395 * at all places in the code to check on which ULP
1396 * iSCSi Protocol is loaded
1397 **/
1398 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
1399 if (pfw_cfg->ulp[ulp_num].ulp_mode &
1400 BEISCSI_ULP_ISCSI_INI_MODE) {
1401 set_bit(ulp_num, &phba->fw_config.ulp_supported);
1402
1403 /* Get the CID, ICD and Chain count for each ULP */
1404 phba->fw_config.iscsi_cid_start[ulp_num] =
1405 pfw_cfg->ulp[ulp_num].sq_base;
1406 phba->fw_config.iscsi_cid_count[ulp_num] =
1407 pfw_cfg->ulp[ulp_num].sq_count;
1408
1409 phba->fw_config.iscsi_icd_start[ulp_num] =
1410 pfw_cfg->ulp[ulp_num].icd_base;
1411 phba->fw_config.iscsi_icd_count[ulp_num] =
1412 pfw_cfg->ulp[ulp_num].icd_count;
1413
1414 phba->fw_config.iscsi_chain_start[ulp_num] =
1415 pfw_cfg->chain_icd[ulp_num].chain_base;
1416 phba->fw_config.iscsi_chain_count[ulp_num] =
1417 pfw_cfg->chain_icd[ulp_num].chain_count;
1418
1419 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1420 "BG_%d : Function loaded on ULP : %d\n"
1421 "\tiscsi_cid_count : %d\n"
1422 "\tiscsi_cid_start : %d\n"
1423 "\t iscsi_icd_count : %d\n"
1424 "\t iscsi_icd_start : %d\n",
1425 ulp_num,
1426 phba->fw_config.
1427 iscsi_cid_count[ulp_num],
1428 phba->fw_config.
1429 iscsi_cid_start[ulp_num],
1430 phba->fw_config.
1431 iscsi_icd_count[ulp_num],
1432 phba->fw_config.
1433 iscsi_icd_start[ulp_num]);
1434 }
1435 }
1436
1437 if (phba->fw_config.ulp_supported == 0) {
1438 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1439 "BG_%d : iSCSI initiator mode not set: ULP0 %x ULP1 %x\n",
1440 pfw_cfg->ulp[BEISCSI_ULP0].ulp_mode,
1441 pfw_cfg->ulp[BEISCSI_ULP1].ulp_mode);
1442 goto fail_init;
1443 }
1444
1445 /**
1446 * ICD is shared among ULPs. Use icd_count of any one loaded ULP
1447 **/
1448 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
1449 if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
1450 break;
1451 icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
1452 if (icd_count == 0 || icd_count > 65536) {
1453 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1454 "BG_%d: invalid ICD count %d\n", icd_count);
1455 goto fail_init;
1456 }
1457
1458 cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
1459 BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
1460 if (cid_count == 0 || cid_count > 4096) {
1461 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1462 "BG_%d: invalid CID count %d\n", cid_count);
1463 goto fail_init;
1464 }
1465
1466 /**
1467 * Check FW is dual ULP aware i.e. can handle either
1468 * of the protocols.
1469 */
1470 phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
1471 BEISCSI_FUNC_DUA_MODE);
1472
1473 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1474 "BG_%d : DUA Mode : 0x%x\n",
1475 phba->fw_config.dual_ulp_aware);
1476
1477 /* all set, continue using this FW config */
1478 status = 0;
1479fail_init:
1480 mutex_unlock(&ctrl->mbox_lock);
1481 return status;
1482}
1483
1484/**
1485 * beiscsi_get_port_name()- Get port name for the function
1486 * @ctrl: ptr to Ctrl Info
1487 * @phba: ptr to the dev priv structure
1488 *
1489 * Get the alphanumeric character for port
1490 *
1491 **/
1492int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba)
1493{
1494 int ret = 0;
1495 struct be_mcc_wrb *wrb;
1496 struct be_cmd_get_port_name *ioctl;
1497
1498 mutex_lock(&ctrl->mbox_lock);
1499 wrb = wrb_from_mbox(&ctrl->mbox_mem);
1500 memset(wrb, 0, sizeof(*wrb));
1501 ioctl = embedded_payload(wrb);
1502
1503 be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
1504 be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
1505 OPCODE_COMMON_GET_PORT_NAME,
1506 EMBED_MBX_MAX_PAYLOAD_SIZE);
1507 ret = be_mbox_notify(ctrl);
1508 phba->port_name = 0;
1509 if (!ret) {
1510 phba->port_name = ioctl->p.resp.port_names >>
1511 (phba->fw_config.phys_port * 8) & 0xff;
1512 } else {
1513 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1514 "BG_%d : GET_PORT_NAME ret 0x%x status 0x%x\n",
1515 ret, ioctl->h.resp_hdr.status);
1516 }
1517
1518 if (phba->port_name == 0)
1519 phba->port_name = '?';
1520
1521 mutex_unlock(&ctrl->mbox_lock);
1522 return ret;
1523}
1524
6694095b
JB
1525int beiscsi_set_uer_feature(struct beiscsi_hba *phba)
1526{
1527 struct be_ctrl_info *ctrl = &phba->ctrl;
1528 struct be_cmd_set_features *ioctl;
1529 struct be_mcc_wrb *wrb;
1530 int ret = 0;
1531
1532 mutex_lock(&ctrl->mbox_lock);
1533 wrb = wrb_from_mbox(&ctrl->mbox_mem);
1534 memset(wrb, 0, sizeof(*wrb));
1535 ioctl = embedded_payload(wrb);
1536
1537 be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
1538 be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
1539 OPCODE_COMMON_SET_FEATURES,
1540 EMBED_MBX_MAX_PAYLOAD_SIZE);
1541 ioctl->feature = BE_CMD_SET_FEATURE_UER;
1542 ioctl->param_len = sizeof(ioctl->param.req);
1543 ioctl->param.req.uer = BE_CMD_UER_SUPP_BIT;
1544 ret = be_mbox_notify(ctrl);
1545 if (!ret) {
1546 phba->ue2rp = ioctl->param.resp.ue2rp;
1547 set_bit(BEISCSI_HBA_UER_SUPP, &phba->state);
1548 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1549 "BG_%d : HBA error recovery supported\n");
1550 } else {
1551 /**
1552 * Check "MCC_STATUS_INVALID_LENGTH" for SKH.
1553 * Older FW versions return this error.
1554 */
1555 if (ret == MCC_STATUS_ILLEGAL_REQUEST ||
1556 ret == MCC_STATUS_INVALID_LENGTH)
1557 __beiscsi_log(phba, KERN_INFO,
1558 "BG_%d : HBA error recovery not supported\n");
1559 }
1560
1561 mutex_unlock(&ctrl->mbox_lock);
1562 return ret;
1563}
4d2ee1e6
JB
1564
1565static u32 beiscsi_get_post_stage(struct beiscsi_hba *phba)
1566{
1567 u32 sem;
1568
1569 if (is_chip_be2_be3r(phba))
1570 sem = ioread32(phba->csr_va + SLIPORT_SEMAPHORE_OFFSET_BEx);
1571 else
1572 pci_read_config_dword(phba->pcidev,
1573 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
1574 return sem;
1575}
1576
1577int beiscsi_check_fw_rdy(struct beiscsi_hba *phba)
1578{
1579 u32 loop, post, rdy = 0;
1580
1581 loop = 1000;
1582 while (loop--) {
1583 post = beiscsi_get_post_stage(phba);
1584 if (post & POST_ERROR_BIT)
1585 break;
1586 if ((post & POST_STAGE_MASK) == POST_STAGE_ARMFW_RDY) {
1587 rdy = 1;
1588 break;
1589 }
1590 msleep(60);
1591 }
1592
1593 if (!rdy) {
1594 __beiscsi_log(phba, KERN_ERR,
1595 "BC_%d : FW not ready 0x%x\n", post);
1596 }
1597
1598 return rdy;
1599}
1600
4ee1ec42 1601int beiscsi_cmd_function_reset(struct beiscsi_hba *phba)
4d2ee1e6
JB
1602{
1603 struct be_ctrl_info *ctrl = &phba->ctrl;
1604 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
fa1261c4 1605 struct be_post_sgl_pages_req *req;
4d2ee1e6
JB
1606 int status;
1607
1608 mutex_lock(&ctrl->mbox_lock);
1609
1610 req = embedded_payload(wrb);
1611 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1612 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1613 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1614 status = be_mbox_notify(ctrl);
1615
1616 mutex_unlock(&ctrl->mbox_lock);
1617 return status;
1618}
1619
1620int beiscsi_cmd_special_wrb(struct be_ctrl_info *ctrl, u32 load)
1621{
1622 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1623 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
1624 u8 *endian_check;
1625 int status;
1626
1627 mutex_lock(&ctrl->mbox_lock);
1628 memset(wrb, 0, sizeof(*wrb));
1629
1630 endian_check = (u8 *) wrb;
1631 if (load) {
1632 /* to start communicating */
1633 *endian_check++ = 0xFF;
1634 *endian_check++ = 0x12;
1635 *endian_check++ = 0x34;
1636 *endian_check++ = 0xFF;
1637 *endian_check++ = 0xFF;
1638 *endian_check++ = 0x56;
1639 *endian_check++ = 0x78;
1640 *endian_check++ = 0xFF;
1641 } else {
1642 /* to stop communicating */
1643 *endian_check++ = 0xFF;
1644 *endian_check++ = 0xAA;
1645 *endian_check++ = 0xBB;
1646 *endian_check++ = 0xFF;
1647 *endian_check++ = 0xFF;
1648 *endian_check++ = 0xCC;
1649 *endian_check++ = 0xDD;
1650 *endian_check = 0xFF;
1651 }
1652 be_dws_cpu_to_le(wrb, sizeof(*wrb));
1653
1654 status = be_mbox_notify(ctrl);
1655 if (status)
1656 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1657 "BC_%d : special WRB message failed\n");
1658 mutex_unlock(&ctrl->mbox_lock);
1659 return status;
1660}
1661
1662int beiscsi_init_sliport(struct beiscsi_hba *phba)
1663{
1664 int status;
1665
1666 /* check POST stage before talking to FW */
1667 status = beiscsi_check_fw_rdy(phba);
1668 if (!status)
1669 return -EIO;
1670
d1d5ca88
JB
1671 /* clear all error states after checking FW rdy */
1672 phba->state &= ~BEISCSI_HBA_IN_ERR;
1673
1674 /* check again UER support */
1675 phba->state &= ~BEISCSI_HBA_UER_SUPP;
1676
4d2ee1e6
JB
1677 /*
1678 * SLI COMMON_FUNCTION_RESET completion is indicated by BMBX RDY bit.
1679 * It should clean up any stale info in FW for this fn.
1680 */
1681 status = beiscsi_cmd_function_reset(phba);
1682 if (status) {
1683 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1684 "BC_%d : SLI Function Reset failed\n");
1685 return status;
1686 }
1687
1688 /* indicate driver is loading */
1689 return beiscsi_cmd_special_wrb(&phba->ctrl, 1);
1690}
f79929de
JB
1691
1692/**
1693 * beiscsi_cmd_iscsi_cleanup()- Inform FW to cleanup EP data structures.
1694 * @phba: pointer to dev priv structure
1695 * @ulp: ULP number.
1696 *
1697 * return
1698 * Success: 0
1699 * Failure: Non-Zero Value
1700 **/
1701int beiscsi_cmd_iscsi_cleanup(struct beiscsi_hba *phba, unsigned short ulp)
1702{
1703 struct be_ctrl_info *ctrl = &phba->ctrl;
1704 struct iscsi_cleanup_req_v1 *req_v1;
1705 struct iscsi_cleanup_req *req;
d7401055 1706 u16 hdr_ring_id, data_ring_id;
f79929de
JB
1707 struct be_mcc_wrb *wrb;
1708 int status;
1709
1710 mutex_lock(&ctrl->mbox_lock);
1711 wrb = wrb_from_mbox(&ctrl->mbox_mem);
f79929de 1712
d7401055
JB
1713 hdr_ring_id = HWI_GET_DEF_HDRQ_ID(phba, ulp);
1714 data_ring_id = HWI_GET_DEF_BUFQ_ID(phba, ulp);
f79929de 1715 if (is_chip_be2_be3r(phba)) {
d7401055
JB
1716 req = embedded_payload(wrb);
1717 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1718 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1719 OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
f79929de 1720 req->chute = (1 << ulp);
d7401055
JB
1721 /* BE2/BE3 FW creates 8-bit ring id */
1722 req->hdr_ring_id = hdr_ring_id;
1723 req->data_ring_id = data_ring_id;
f79929de 1724 } else {
d7401055
JB
1725 req_v1 = embedded_payload(wrb);
1726 be_wrb_hdr_prepare(wrb, sizeof(*req_v1), true, 0);
1727 be_cmd_hdr_prepare(&req_v1->hdr, CMD_SUBSYSTEM_ISCSI,
1728 OPCODE_COMMON_ISCSI_CLEANUP,
1729 sizeof(*req_v1));
f79929de 1730 req_v1->hdr.version = 1;
d7401055
JB
1731 req_v1->chute = (1 << ulp);
1732 req_v1->hdr_ring_id = cpu_to_le16(hdr_ring_id);
1733 req_v1->data_ring_id = cpu_to_le16(data_ring_id);
f79929de
JB
1734 }
1735
1736 status = be_mbox_notify(ctrl);
1737 if (status)
1738 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
1739 "BG_%d : %s failed %d\n", __func__, ulp);
1740 mutex_unlock(&ctrl->mbox_lock);
1741 return status;
1742}
d1d5ca88
JB
1743
1744/*
1745 * beiscsi_detect_ue()- Detect Unrecoverable Error on adapter
1746 * @phba: Driver priv structure
1747 *
1748 * Read registers linked to UE and check for the UE status
1749 **/
1750int beiscsi_detect_ue(struct beiscsi_hba *phba)
1751{
1752 uint32_t ue_mask_hi = 0, ue_mask_lo = 0;
1753 uint32_t ue_hi = 0, ue_lo = 0;
1754 uint8_t i = 0;
1755 int ret = 0;
1756
1757 pci_read_config_dword(phba->pcidev,
1758 PCICFG_UE_STATUS_LOW, &ue_lo);
1759 pci_read_config_dword(phba->pcidev,
1760 PCICFG_UE_STATUS_MASK_LOW,
1761 &ue_mask_lo);
1762 pci_read_config_dword(phba->pcidev,
1763 PCICFG_UE_STATUS_HIGH,
1764 &ue_hi);
1765 pci_read_config_dword(phba->pcidev,
1766 PCICFG_UE_STATUS_MASK_HI,
1767 &ue_mask_hi);
1768
1769 ue_lo = (ue_lo & ~ue_mask_lo);
1770 ue_hi = (ue_hi & ~ue_mask_hi);
1771
1772
1773 if (ue_lo || ue_hi) {
1774 set_bit(BEISCSI_HBA_IN_UE, &phba->state);
1775 __beiscsi_log(phba, KERN_ERR,
1776 "BC_%d : HBA error detected\n");
1777 ret = 1;
1778 }
1779
1780 if (ue_lo) {
1781 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
1782 if (ue_lo & 1)
1783 __beiscsi_log(phba, KERN_ERR,
1784 "BC_%d : UE_LOW %s bit set\n",
1785 desc_ue_status_low[i]);
1786 }
1787 }
1788
1789 if (ue_hi) {
1790 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
1791 if (ue_hi & 1)
1792 __beiscsi_log(phba, KERN_ERR,
1793 "BC_%d : UE_HIGH %s bit set\n",
1794 desc_ue_status_hi[i]);
1795 }
1796 }
1797 return ret;
1798}
1799
1800/*
1801 * beiscsi_detect_tpe()- Detect Transient Parity Error on adapter
1802 * @phba: Driver priv structure
1803 *
1804 * Read SLIPORT SEMAPHORE register to check for UER
1805 *
1806 **/
1807int beiscsi_detect_tpe(struct beiscsi_hba *phba)
1808{
1809 u32 post, status;
1810 int ret = 0;
1811
1812 post = beiscsi_get_post_stage(phba);
1813 status = post & POST_STAGE_MASK;
1814 if ((status & POST_ERR_RECOVERY_CODE_MASK) ==
1815 POST_STAGE_RECOVERABLE_ERR) {
1816 set_bit(BEISCSI_HBA_IN_TPE, &phba->state);
1817 __beiscsi_log(phba, KERN_INFO,
1818 "BC_%d : HBA error recoverable: 0x%x\n", post);
1819 ret = 1;
1820 } else {
1821 __beiscsi_log(phba, KERN_INFO,
1822 "BC_%d : HBA in UE: 0x%x\n", post);
1823 }
1824
1825 return ret;
1826}