]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/be2iscsi/be_cmds.c
scsi: be2iscsi: Add IOCTL to check UER supported
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / be2iscsi / be_cmds.c
CommitLineData
6733b39a 1/**
c4f39bda 2 * Copyright (C) 2005 - 2015 Emulex
6733b39a
JK
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
4627de93 11 * linux-drivers@avagotech.com
6733b39a 12 *
c4f39bda 13 * Emulex
255fa9a3
JK
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6733b39a
JK
16 */
17
2177199d
JSJ
18#include <scsi/iscsi_proto.h>
19
4eea99d5 20#include "be_main.h"
6733b39a
JK
21#include "be.h"
22#include "be_mgmt.h"
6733b39a 23
e9b91193
JK
24int be_chk_reset_complete(struct beiscsi_hba *phba)
25{
26 unsigned int num_loop;
27 u8 *mpu_sem = 0;
28 u32 status;
29
30 num_loop = 1000;
31 mpu_sem = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
32 msleep(5000);
33
34 while (num_loop) {
35 status = readl((void *)mpu_sem);
36
37 if ((status & 0x80000000) || (status & 0x0000FFFF) == 0xC000)
38 break;
39 msleep(60);
40 num_loop--;
41 }
42
43 if ((status & 0x80000000) || (!num_loop)) {
99bc5d55
JSJ
44 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
45 "BC_%d : Failed in be_chk_reset_complete"
46 "status = 0x%x\n", status);
e9b91193
JK
47 return -EIO;
48 }
49
50 return 0;
51}
52
090e2184
JB
53struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
54 unsigned int *ref_tag)
69fd6d7b 55{
090e2184
JB
56 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
57 struct be_mcc_wrb *wrb = NULL;
58 unsigned int tag;
59
96eb8d4d 60 spin_lock(&phba->ctrl.mcc_lock);
090e2184
JB
61 if (mccq->used == mccq->len) {
62 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
63 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
64 "BC_%d : MCC queue full: WRB used %u tag avail %u\n",
65 mccq->used, phba->ctrl.mcc_tag_available);
66 goto alloc_failed;
67 }
68
69 if (!phba->ctrl.mcc_tag_available)
70 goto alloc_failed;
71
72 tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
73 if (!tag) {
74 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
75 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
76 "BC_%d : MCC tag 0 allocated: tag avail %u alloc index %u\n",
77 phba->ctrl.mcc_tag_available,
78 phba->ctrl.mcc_alloc_index);
79 goto alloc_failed;
80 }
81
82 /* return this tag for further reference */
83 *ref_tag = tag;
84 phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
85 phba->ctrl.mcc_tag_status[tag] = 0;
86 phba->ctrl.ptag_state[tag].tag_state = 0;
50a4b824 87 phba->ctrl.ptag_state[tag].cbfn = NULL;
090e2184
JB
88 phba->ctrl.mcc_tag_available--;
89 if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
90 phba->ctrl.mcc_alloc_index = 0;
91 else
92 phba->ctrl.mcc_alloc_index++;
93
94 wrb = queue_head_node(mccq);
95 memset(wrb, 0, sizeof(*wrb));
96 wrb->tag0 = tag;
97 wrb->tag0 |= (mccq->head << MCC_Q_WRB_IDX_SHIFT) & MCC_Q_WRB_IDX_MASK;
98 queue_head_inc(mccq);
99 mccq->used++;
100
101alloc_failed:
96eb8d4d 102 spin_unlock(&phba->ctrl.mcc_lock);
090e2184
JB
103 return wrb;
104}
105
106void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag)
107{
108 struct be_queue_info *mccq = &ctrl->mcc_obj.q;
109
96eb8d4d 110 spin_lock(&ctrl->mcc_lock);
69fd6d7b
JB
111 tag = tag & MCC_Q_CMD_TAG_MASK;
112 ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
113 if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
114 ctrl->mcc_free_index = 0;
115 else
116 ctrl->mcc_free_index++;
117 ctrl->mcc_tag_available++;
090e2184 118 mccq->used--;
96eb8d4d 119 spin_unlock(&ctrl->mcc_lock);
69fd6d7b
JB
120}
121
122/**
123 * beiscsi_fail_session(): Closing session with appropriate error
124 * @cls_session: ptr to session
125 **/
126void beiscsi_fail_session(struct iscsi_cls_session *cls_session)
127{
128 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
129}
130
50a4b824
JB
131/*
132 * beiscsi_mcc_compl_status - Return the status of MCC completion
133 * @phba: Driver private structure
134 * @tag: Tag for the MBX Command
135 * @wrb: the WRB used for the MBX Command
136 * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
137 *
138 * return
139 * Success: 0
140 * Failure: Non-Zero
141 */
142int __beiscsi_mcc_compl_status(struct beiscsi_hba *phba,
143 unsigned int tag,
144 struct be_mcc_wrb **wrb,
145 struct be_dma_mem *mbx_cmd_mem)
146{
147 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
148 uint16_t status = 0, addl_status = 0, wrb_num = 0;
149 struct be_cmd_resp_hdr *mbx_resp_hdr;
150 struct be_cmd_req_hdr *mbx_hdr;
151 struct be_mcc_wrb *temp_wrb;
152 uint32_t mcc_tag_status;
153 int rc = 0;
154
155 mcc_tag_status = phba->ctrl.mcc_tag_status[tag];
156 status = (mcc_tag_status & CQE_STATUS_MASK);
157 addl_status = ((mcc_tag_status & CQE_STATUS_ADDL_MASK) >>
158 CQE_STATUS_ADDL_SHIFT);
159
160 if (mbx_cmd_mem) {
161 mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
162 } else {
163 wrb_num = (mcc_tag_status & CQE_STATUS_WRB_MASK) >>
164 CQE_STATUS_WRB_SHIFT;
165 temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
166 mbx_hdr = embedded_payload(temp_wrb);
167
168 if (wrb)
169 *wrb = temp_wrb;
170 }
171
172 if (status || addl_status) {
173 beiscsi_log(phba, KERN_WARNING,
174 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
175 BEISCSI_LOG_CONFIG,
176 "BC_%d : MBX Cmd Failed for Subsys : %d Opcode : %d with Status : %d and Extd_Status : %d\n",
177 mbx_hdr->subsystem, mbx_hdr->opcode,
178 status, addl_status);
179 rc = -EIO;
180 if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
181 mbx_resp_hdr = (struct be_cmd_resp_hdr *)mbx_hdr;
182 beiscsi_log(phba, KERN_WARNING,
183 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
184 BEISCSI_LOG_CONFIG,
185 "BC_%d : Insufficient Buffer Error Resp_Len : %d Actual_Resp_Len : %d\n",
186 mbx_resp_hdr->response_length,
187 mbx_resp_hdr->actual_resp_len);
188 rc = -EAGAIN;
189 }
190 }
191
192 return rc;
193}
194
e175defe 195/*
88840332 196 * beiscsi_mccq_compl_wait()- Process completion in MCC CQ
e175defe
JSJ
197 * @phba: Driver private structure
198 * @tag: Tag for the MBX Command
199 * @wrb: the WRB used for the MBX Command
1957aa7f 200 * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
e175defe
JSJ
201 *
202 * Waits for MBX completion with the passed TAG.
203 *
204 * return
205 * Success: 0
206 * Failure: Non-Zero
207 **/
88840332 208int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
50a4b824
JB
209 unsigned int tag,
210 struct be_mcc_wrb **wrb,
88840332 211 struct be_dma_mem *mbx_cmd_mem)
e175defe
JSJ
212{
213 int rc = 0;
e175defe 214
9122e991
JB
215 if (beiscsi_hba_in_error(phba)) {
216 clear_bit(MCC_TAG_STATE_RUNNING,
217 &phba->ctrl.ptag_state[tag].tag_state);
218 return -EIO;
219 }
7a158003 220
e175defe 221 /* wait for the mccq completion */
50a4b824
JB
222 rc = wait_event_interruptible_timeout(phba->ctrl.mcc_wait[tag],
223 phba->ctrl.mcc_tag_status[tag],
224 msecs_to_jiffies(
225 BEISCSI_HOST_MBX_TIMEOUT));
226
cdde6682
JB
227 /**
228 * If MBOX cmd timeout expired, tag and resource allocated
229 * for cmd is not freed until FW returns completion.
230 */
e175defe 231 if (rc <= 0) {
1957aa7f 232 struct be_dma_mem *tag_mem;
1957aa7f 233
cdde6682
JB
234 /**
235 * PCI/DMA memory allocated and posted in non-embedded mode
236 * will have mbx_cmd_mem != NULL.
237 * Save virtual and bus addresses for the command so that it
238 * can be freed later.
239 **/
1957aa7f
JK
240 tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
241 if (mbx_cmd_mem) {
242 tag_mem->size = mbx_cmd_mem->size;
243 tag_mem->va = mbx_cmd_mem->va;
244 tag_mem->dma = mbx_cmd_mem->dma;
245 } else
246 tag_mem->size = 0;
247
cdde6682
JB
248 /* first make tag_mem_state visible to all */
249 wmb();
250 set_bit(MCC_TAG_STATE_TIMEOUT,
251 &phba->ctrl.ptag_state[tag].tag_state);
252
e175defe
JSJ
253 beiscsi_log(phba, KERN_ERR,
254 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
255 BEISCSI_LOG_CONFIG,
256 "BC_%d : MBX Cmd Completion timed out\n");
1957aa7f 257 return -EBUSY;
1957aa7f 258 }
e175defe 259
50a4b824 260 rc = __beiscsi_mcc_compl_status(phba, tag, wrb, mbx_cmd_mem);
e175defe 261
090e2184 262 free_mcc_wrb(&phba->ctrl, tag);
e175defe
JSJ
263 return rc;
264}
265
e175defe 266/*
88840332 267 * beiscsi_process_mbox_compl()- Check the MBX completion status
e175defe
JSJ
268 * @ctrl: Function specific MBX data structure
269 * @compl: Completion status of MBX Command
270 *
271 * Check for the MBX completion status when BMBX method used
272 *
273 * return
274 * Success: Zero
275 * Failure: Non-Zero
276 **/
88840332
JB
277static int beiscsi_process_mbox_compl(struct be_ctrl_info *ctrl,
278 struct be_mcc_compl *compl)
6733b39a 279{
e175defe 280 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
99bc5d55 281 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
e175defe 282 struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
6694095b 283 u16 compl_status, extd_status;
6733b39a 284
c448427b
JB
285 /**
286 * To check if valid bit is set, check the entire word as we don't know
287 * the endianness of the data (old entry is host endian while a new
288 * entry is little endian)
289 */
290 if (!compl->flags) {
291 beiscsi_log(phba, KERN_ERR,
292 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
293 "BC_%d : BMBX busy, no completion\n");
294 return -EBUSY;
295 }
296 compl->flags = le32_to_cpu(compl->flags);
297 WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
6733b39a 298
c448427b
JB
299 /**
300 * Just swap the status to host endian;
301 * mcc tag is opaquely copied from mcc_wrb.
302 */
303 be_dws_le_to_cpu(compl, 4);
6733b39a 304 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
c448427b
JB
305 CQE_STATUS_COMPL_MASK;
306 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
307 CQE_STATUS_EXTD_MASK;
308 /* Need to reset the entire word that houses the valid bit */
309 compl->flags = 0;
99bc5d55 310
c448427b
JB
311 if (compl_status == MCC_STATUS_SUCCESS)
312 return 0;
313
314 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
315 "BC_%d : error in cmd completion: Subsystem : %d Opcode : %d status(compl/extd)=%d/%d\n",
316 hdr->subsystem, hdr->opcode, compl_status, extd_status);
6694095b 317 return compl_status;
6733b39a
JK
318}
319
9c4f8b01
JB
320static void beiscsi_process_async_link(struct beiscsi_hba *phba,
321 struct be_mcc_compl *compl)
bfead3b2 322{
9c4f8b01 323 struct be_async_event_link_state *evt;
6ea9b3b0 324
9c4f8b01 325 evt = (struct be_async_event_link_state *)compl;
99bc5d55 326
9c4f8b01
JB
327 phba->port_speed = evt->port_speed;
328 /**
329 * Check logical link status in ASYNC event.
330 * This has been newly introduced in SKH-R Firmware 10.0.338.45.
331 **/
332 if (evt->port_link_status & BE_ASYNC_LINK_UP_MASK) {
50a4b824
JB
333 set_bit(BEISCSI_HBA_LINK_UP, &phba->state);
334 if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state))
335 beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE);
9c4f8b01
JB
336 __beiscsi_log(phba, KERN_ERR,
337 "BC_%d : Link Up on Port %d tag 0x%x\n",
338 evt->physical_port, evt->event_tag);
339 } else {
9122e991 340 clear_bit(BEISCSI_HBA_LINK_UP, &phba->state);
9c4f8b01
JB
341 __beiscsi_log(phba, KERN_ERR,
342 "BC_%d : Link Down on Port %d tag 0x%x\n",
343 evt->physical_port, evt->event_tag);
344 iscsi_host_for_each_session(phba->shost,
345 beiscsi_fail_session);
bfead3b2 346 }
6733b39a
JK
347}
348
53aefe25
JB
349static char *beiscsi_port_misconf_event_msg[] = {
350 "Physical Link is functional.",
351 "Optics faulted/incorrectly installed/not installed - Reseat optics, if issue not resolved, replace.",
352 "Optics of two types installed - Remove one optic or install matching pair of optics.",
353 "Incompatible optics - Replace with compatible optics for card to function.",
354 "Unqualified optics - Replace with Avago optics for Warranty and Technical Support.",
355 "Uncertified optics - Replace with Avago Certified optics to enable link operation."
356};
357
358static void beiscsi_process_async_sli(struct beiscsi_hba *phba,
359 struct be_mcc_compl *compl)
360{
361 struct be_async_event_sli *async_sli;
362 u8 evt_type, state, old_state, le;
363 char *sev = KERN_WARNING;
364 char *msg = NULL;
365
366 evt_type = compl->flags >> ASYNC_TRAILER_EVENT_TYPE_SHIFT;
367 evt_type &= ASYNC_TRAILER_EVENT_TYPE_MASK;
368
369 /* processing only MISCONFIGURED physical port event */
370 if (evt_type != ASYNC_SLI_EVENT_TYPE_MISCONFIGURED)
371 return;
372
373 async_sli = (struct be_async_event_sli *)compl;
374 state = async_sli->event_data1 >>
375 (phba->fw_config.phys_port * 8) & 0xff;
376 le = async_sli->event_data2 >>
377 (phba->fw_config.phys_port * 8) & 0xff;
378
379 old_state = phba->optic_state;
380 phba->optic_state = state;
381
382 if (state >= ARRAY_SIZE(beiscsi_port_misconf_event_msg)) {
383 /* fw is reporting a state we don't know, log and return */
384 __beiscsi_log(phba, KERN_ERR,
385 "BC_%d : Port %c: Unrecognized optic state 0x%x\n",
386 phba->port_name, async_sli->event_data1);
387 return;
388 }
389
390 if (ASYNC_SLI_LINK_EFFECT_VALID(le)) {
391 /* log link effect for unqualified-4, uncertified-5 optics */
392 if (state > 3)
393 msg = (ASYNC_SLI_LINK_EFFECT_STATE(le)) ?
394 " Link is non-operational." :
395 " Link is operational.";
396 /* 1 - info */
397 if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 1)
398 sev = KERN_INFO;
399 /* 2 - error */
400 if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 2)
401 sev = KERN_ERR;
402 }
403
404 if (old_state != phba->optic_state)
405 __beiscsi_log(phba, sev, "BC_%d : Port %c: %s%s\n",
406 phba->port_name,
407 beiscsi_port_misconf_event_msg[state],
408 !msg ? "" : msg);
409}
410
411void beiscsi_process_async_event(struct beiscsi_hba *phba,
412 struct be_mcc_compl *compl)
413{
414 char *sev = KERN_INFO;
415 u8 evt_code;
416
417 /* interpret flags as an async trailer */
418 evt_code = compl->flags >> ASYNC_TRAILER_EVENT_CODE_SHIFT;
419 evt_code &= ASYNC_TRAILER_EVENT_CODE_MASK;
420 switch (evt_code) {
421 case ASYNC_EVENT_CODE_LINK_STATE:
9c4f8b01 422 beiscsi_process_async_link(phba, compl);
53aefe25
JB
423 break;
424 case ASYNC_EVENT_CODE_ISCSI:
50a4b824
JB
425 if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state))
426 beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE);
53aefe25
JB
427 sev = KERN_ERR;
428 break;
429 case ASYNC_EVENT_CODE_SLI:
430 beiscsi_process_async_sli(phba, compl);
431 break;
432 default:
433 /* event not registered */
434 sev = KERN_ERR;
435 }
436
437 beiscsi_log(phba, sev, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
9c4f8b01
JB
438 "BC_%d : ASYNC Event %x: status 0x%08x flags 0x%08x\n",
439 evt_code, compl->status, compl->flags);
53aefe25
JB
440}
441
2e4e8f65
JB
442int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
443 struct be_mcc_compl *compl)
bfead3b2 444{
2e4e8f65
JB
445 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
446 u16 compl_status, extd_status;
447 struct be_dma_mem *tag_mem;
448 unsigned int tag, wrb_idx;
449
2e4e8f65
JB
450 be_dws_le_to_cpu(compl, 4);
451 tag = (compl->tag0 & MCC_Q_CMD_TAG_MASK);
452 wrb_idx = (compl->tag0 & CQE_STATUS_WRB_MASK) >> CQE_STATUS_WRB_SHIFT;
453
454 if (!test_bit(MCC_TAG_STATE_RUNNING,
455 &ctrl->ptag_state[tag].tag_state)) {
456 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX |
457 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
458 "BC_%d : MBX cmd completed but not posted\n");
459 return 0;
460 }
461
50a4b824
JB
462 /* end MCC with this tag */
463 clear_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state);
464
2e4e8f65
JB
465 if (test_bit(MCC_TAG_STATE_TIMEOUT, &ctrl->ptag_state[tag].tag_state)) {
466 beiscsi_log(phba, KERN_WARNING,
467 BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
468 BEISCSI_LOG_CONFIG,
469 "BC_%d : MBX Completion for timeout Command from FW\n");
470 /**
471 * Check for the size before freeing resource.
472 * Only for non-embedded cmd, PCI resource is allocated.
473 **/
474 tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
50a4b824 475 if (tag_mem->size) {
2e4e8f65
JB
476 pci_free_consistent(ctrl->pdev, tag_mem->size,
477 tag_mem->va, tag_mem->dma);
50a4b824
JB
478 tag_mem->size = 0;
479 }
090e2184 480 free_mcc_wrb(ctrl, tag);
2e4e8f65 481 return 0;
bfead3b2
JK
482 }
483
2e4e8f65
JB
484 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
485 CQE_STATUS_COMPL_MASK;
486 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
487 CQE_STATUS_EXTD_MASK;
488 /* The ctrl.mcc_tag_status[tag] is filled with
489 * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
490 * [7:0] = compl_status
491 */
492 ctrl->mcc_tag_status[tag] = CQE_VALID_MASK;
493 ctrl->mcc_tag_status[tag] |= (wrb_idx << CQE_STATUS_WRB_SHIFT);
494 ctrl->mcc_tag_status[tag] |= (extd_status << CQE_STATUS_ADDL_SHIFT) &
495 CQE_STATUS_ADDL_MASK;
496 ctrl->mcc_tag_status[tag] |= (compl_status & CQE_STATUS_MASK);
bfead3b2 497
50a4b824
JB
498 if (test_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state)) {
499 if (ctrl->ptag_state[tag].cbfn)
500 ctrl->ptag_state[tag].cbfn(phba, tag);
501 else
6694095b
JB
502 __beiscsi_log(phba, KERN_ERR,
503 "BC_%d : MBX ASYNC command with no callback\n");
50a4b824
JB
504 free_mcc_wrb(ctrl, tag);
505 return 0;
506 }
507
10bcd47d
JB
508 if (test_bit(MCC_TAG_STATE_IGNORE, &ctrl->ptag_state[tag].tag_state)) {
509 /* just check completion status and free wrb */
510 __beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
511 free_mcc_wrb(ctrl, tag);
512 return 0;
513 }
514
2e4e8f65
JB
515 wake_up_interruptible(&ctrl->mcc_wait[tag]);
516 return 0;
bfead3b2
JK
517}
518
e175defe 519/*
88840332 520 * be_mcc_compl_poll()- Wait for MBX completion
e175defe
JSJ
521 * @phba: driver private structure
522 *
523 * Wait till no more pending mcc requests are present
524 *
525 * return
526 * Success: 0
527 * Failure: Non-Zero
528 *
529 **/
2e4e8f65 530int be_mcc_compl_poll(struct beiscsi_hba *phba, unsigned int tag)
bfead3b2 531{
69fd6d7b 532 struct be_ctrl_info *ctrl = &phba->ctrl;
2e4e8f65
JB
533 int i;
534
090e2184
JB
535 if (!test_bit(MCC_TAG_STATE_RUNNING,
536 &ctrl->ptag_state[tag].tag_state)) {
537 beiscsi_log(phba, KERN_ERR,
538 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
539 "BC_%d: tag %u state not running\n", tag);
540 return 0;
541 }
bfead3b2 542 for (i = 0; i < mcc_timeout; i++) {
9122e991 543 if (beiscsi_hba_in_error(phba))
e175defe
JSJ
544 return -EIO;
545
2e4e8f65 546 beiscsi_process_mcc_cq(phba);
090e2184 547 /* after polling, wrb and tag need to be released */
69fd6d7b 548 if (!test_bit(MCC_TAG_STATE_RUNNING,
090e2184
JB
549 &ctrl->ptag_state[tag].tag_state)) {
550 free_mcc_wrb(ctrl, tag);
bfead3b2 551 break;
090e2184 552 }
bfead3b2
JK
553 udelay(100);
554 }
69fd6d7b
JB
555
556 if (i < mcc_timeout)
557 return 0;
558
559 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
560 "BC_%d : FW Timed Out\n");
9122e991 561 set_bit(BEISCSI_HBA_FW_TIMEOUT, &phba->state);
69fd6d7b
JB
562 beiscsi_ue_detect(phba);
563 return -EBUSY;
564}
565
566void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag)
567{
568 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
569 u32 val = 0;
570
571 set_bit(MCC_TAG_STATE_RUNNING, &phba->ctrl.ptag_state[tag].tag_state);
572 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
573 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
574 /* make request available for DMA */
575 wmb();
576 iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
bfead3b2
JK
577}
578
e175defe 579/*
88840332 580 * be_mbox_db_ready_poll()- Check ready status
e175defe
JSJ
581 * @ctrl: Function specific MBX data structure
582 *
583 * Check for the ready status of FW to send BMBX
584 * commands to adapter.
585 *
586 * return
587 * Success: 0
588 * Failure: Non-Zero
589 **/
88840332 590static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl)
6733b39a 591{
9ec6f6b8
JB
592 /* wait 30s for generic non-flash MBOX operation */
593#define BEISCSI_MBX_RDY_BIT_TIMEOUT 30000
6733b39a 594 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
e175defe 595 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
92665a66 596 unsigned long timeout;
6733b39a
JK
597 u32 ready;
598
6ac999ef
JB
599 /*
600 * This BMBX busy wait path is used during init only.
601 * For the commands executed during init, 5s should suffice.
602 */
603 timeout = jiffies + msecs_to_jiffies(BEISCSI_MBX_RDY_BIT_TIMEOUT);
604 do {
9122e991 605 if (beiscsi_hba_in_error(phba))
6ac999ef 606 return -EIO;
7a158003 607
6ac999ef
JB
608 ready = ioread32(db);
609 if (ready == 0xffffffff)
610 return -EIO;
e175defe 611
6ac999ef
JB
612 ready &= MPU_MAILBOX_DB_RDY_MASK;
613 if (ready)
614 return 0;
6733b39a 615
6ac999ef
JB
616 if (time_after(jiffies, timeout))
617 break;
3c9e36a9
JB
618 /* 1ms sleep is enough in most cases */
619 schedule_timeout_uninterruptible(msecs_to_jiffies(1));
6ac999ef 620 } while (!ready);
92665a66 621
6ac999ef
JB
622 beiscsi_log(phba, KERN_ERR,
623 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
624 "BC_%d : FW Timed Out\n");
9122e991 625 set_bit(BEISCSI_HBA_FW_TIMEOUT, &phba->state);
6ac999ef 626 beiscsi_ue_detect(phba);
6ac999ef 627 return -EBUSY;
6733b39a
JK
628}
629
e175defe
JSJ
630/*
631 * be_mbox_notify: Notify adapter of new BMBX command
632 * @ctrl: Function specific MBX data structure
633 *
634 * Ring doorbell to inform adapter of a BMBX command
635 * to process
636 *
637 * return
638 * Success: 0
639 * Failure: Non-Zero
640 **/
6733b39a
JK
641int be_mbox_notify(struct be_ctrl_info *ctrl)
642{
643 int status;
644 u32 val = 0;
645 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
646 struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
647 struct be_mcc_mailbox *mbox = mbox_mem->va;
6733b39a 648
88840332 649 status = be_mbox_db_ready_poll(ctrl);
1e234bbb
JK
650 if (status)
651 return status;
652
6733b39a
JK
653 val &= ~MPU_MAILBOX_DB_RDY_MASK;
654 val |= MPU_MAILBOX_DB_HI_MASK;
655 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
656 iowrite32(val, db);
657
88840332 658 status = be_mbox_db_ready_poll(ctrl);
e175defe 659 if (status)
6733b39a 660 return status;
e175defe 661
6733b39a
JK
662 val = 0;
663 val &= ~MPU_MAILBOX_DB_RDY_MASK;
664 val &= ~MPU_MAILBOX_DB_HI_MASK;
665 val |= (u32) (mbox_mem->dma >> 4) << 2;
666 iowrite32(val, db);
667
88840332 668 status = be_mbox_db_ready_poll(ctrl);
e175defe 669 if (status)
6733b39a 670 return status;
e175defe 671
6ac999ef
JB
672 /* RDY is set; small delay before CQE read. */
673 udelay(1);
674
a264f5e8
JB
675 status = beiscsi_process_mbox_compl(ctrl, &mbox->compl);
676 return status;
bfead3b2
JK
677}
678
6733b39a
JK
679void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
680 bool embedded, u8 sge_cnt)
681{
682 if (embedded)
683 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
684 else
685 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
686 MCC_WRB_SGE_CNT_SHIFT;
687 wrb->payload_length = payload_len;
688 be_dws_cpu_to_le(wrb, 8);
689}
690
691void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
692 u8 subsystem, u8 opcode, int cmd_len)
693{
694 req_hdr->opcode = opcode;
695 req_hdr->subsystem = subsystem;
696 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
e175defe 697 req_hdr->timeout = BEISCSI_FW_MBX_TIMEOUT;
6733b39a
JK
698}
699
700static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
701 struct be_dma_mem *mem)
702{
703 int i, buf_pages;
704 u64 dma = (u64) mem->dma;
705
706 buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
707 for (i = 0; i < buf_pages; i++) {
708 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
709 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
710 dma += PAGE_SIZE_4K;
711 }
712}
713
714static u32 eq_delay_to_mult(u32 usec_delay)
715{
716#define MAX_INTR_RATE 651042
717 const u32 round = 10;
718 u32 multiplier;
719
720 if (usec_delay == 0)
721 multiplier = 0;
722 else {
723 u32 interrupt_rate = 1000000 / usec_delay;
724 if (interrupt_rate == 0)
725 multiplier = 1023;
726 else {
727 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
728 multiplier /= interrupt_rate;
729 multiplier = (multiplier + round / 2) / round;
730 multiplier = min(multiplier, (u32) 1023);
731 }
732 }
733 return multiplier;
734}
735
736struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
737{
738 return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
739}
740
741int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
742 struct be_queue_info *eq, int eq_delay)
743{
744 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
745 struct be_cmd_req_eq_create *req = embedded_payload(wrb);
746 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
747 struct be_dma_mem *q_mem = &eq->dma_mem;
748 int status;
749
c03a50f7 750 mutex_lock(&ctrl->mbox_lock);
6733b39a
JK
751 memset(wrb, 0, sizeof(*wrb));
752
753 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
754
755 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
756 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
757
758 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
759
760 AMAP_SET_BITS(struct amap_eq_context, func, req->context,
761 PCI_FUNC(ctrl->pdev->devfn));
762 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
763 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
764 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
765 __ilog2_u32(eq->len / 256));
766 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
767 eq_delay_to_mult(eq_delay));
768 be_dws_cpu_to_le(req->context, sizeof(req->context));
769
770 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
771
772 status = be_mbox_notify(ctrl);
773 if (!status) {
774 eq->id = le16_to_cpu(resp->eq_id);
775 eq->created = true;
776 }
c03a50f7 777 mutex_unlock(&ctrl->mbox_lock);
6733b39a
JK
778 return status;
779}
780
0283fbb1
JK
781/**
782 * be_cmd_fw_initialize()- Initialize FW
783 * @ctrl: Pointer to function control structure
784 *
785 * Send FW initialize pattern for the function.
786 *
787 * return
788 * Success: 0
789 * Failure: Non-Zero value
790 **/
6733b39a
JK
791int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
792{
793 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
99bc5d55 794 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
6733b39a
JK
795 int status;
796 u8 *endian_check;
797
c03a50f7 798 mutex_lock(&ctrl->mbox_lock);
6733b39a
JK
799 memset(wrb, 0, sizeof(*wrb));
800
801 endian_check = (u8 *) wrb;
802 *endian_check++ = 0xFF;
803 *endian_check++ = 0x12;
804 *endian_check++ = 0x34;
805 *endian_check++ = 0xFF;
806 *endian_check++ = 0xFF;
807 *endian_check++ = 0x56;
808 *endian_check++ = 0x78;
809 *endian_check++ = 0xFF;
810 be_dws_cpu_to_le(wrb, sizeof(*wrb));
811
812 status = be_mbox_notify(ctrl);
813 if (status)
99bc5d55
JSJ
814 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
815 "BC_%d : be_cmd_fw_initialize Failed\n");
6733b39a 816
c03a50f7 817 mutex_unlock(&ctrl->mbox_lock);
6733b39a
JK
818 return status;
819}
820
0283fbb1
JK
821/**
822 * be_cmd_fw_uninit()- Uinitialize FW
823 * @ctrl: Pointer to function control structure
824 *
825 * Send FW uninitialize pattern for the function
826 *
827 * return
828 * Success: 0
829 * Failure: Non-Zero value
830 **/
831int be_cmd_fw_uninit(struct be_ctrl_info *ctrl)
832{
833 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
834 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
835 int status;
836 u8 *endian_check;
837
c03a50f7 838 mutex_lock(&ctrl->mbox_lock);
0283fbb1
JK
839 memset(wrb, 0, sizeof(*wrb));
840
841 endian_check = (u8 *) wrb;
842 *endian_check++ = 0xFF;
843 *endian_check++ = 0xAA;
844 *endian_check++ = 0xBB;
845 *endian_check++ = 0xFF;
846 *endian_check++ = 0xFF;
847 *endian_check++ = 0xCC;
848 *endian_check++ = 0xDD;
849 *endian_check = 0xFF;
850
851 be_dws_cpu_to_le(wrb, sizeof(*wrb));
852
853 status = be_mbox_notify(ctrl);
854 if (status)
855 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
856 "BC_%d : be_cmd_fw_uninit Failed\n");
857
c03a50f7 858 mutex_unlock(&ctrl->mbox_lock);
0283fbb1
JK
859 return status;
860}
861
6733b39a
JK
862int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
863 struct be_queue_info *cq, struct be_queue_info *eq,
864 bool sol_evts, bool no_delay, int coalesce_wm)
865{
866 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
867 struct be_cmd_req_cq_create *req = embedded_payload(wrb);
868 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
99bc5d55 869 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
6733b39a
JK
870 struct be_dma_mem *q_mem = &cq->dma_mem;
871 void *ctxt = &req->context;
872 int status;
873
c03a50f7 874 mutex_lock(&ctrl->mbox_lock);
6733b39a
JK
875 memset(wrb, 0, sizeof(*wrb));
876
877 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
878
879 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
880 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
6733b39a
JK
881
882 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
2c9dfd36 883 if (is_chip_be2_be3r(phba)) {
eaae5267
JSJ
884 AMAP_SET_BITS(struct amap_cq_context, coalescwm,
885 ctxt, coalesce_wm);
886 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
887 AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
888 __ilog2_u32(cq->len / 256));
889 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
890 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
891 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
892 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
893 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
894 AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
895 PCI_FUNC(ctrl->pdev->devfn));
2c9dfd36
JK
896 } else {
897 req->hdr.version = MBX_CMD_VER2;
898 req->page_size = 1;
899 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
900 ctxt, coalesce_wm);
901 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
902 ctxt, no_delay);
903 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
904 __ilog2_u32(cq->len / 256));
905 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
906 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
907 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
908 AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
eaae5267 909 }
6733b39a 910
6733b39a
JK
911 be_dws_cpu_to_le(ctxt, sizeof(req->context));
912
913 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
914
915 status = be_mbox_notify(ctrl);
916 if (!status) {
917 cq->id = le16_to_cpu(resp->cq_id);
918 cq->created = true;
919 } else
99bc5d55
JSJ
920 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
921 "BC_%d : In be_cmd_cq_create, status=ox%08x\n",
922 status);
923
c03a50f7 924 mutex_unlock(&ctrl->mbox_lock);
6733b39a
JK
925
926 return status;
927}
928
929static u32 be_encoded_q_len(int q_len)
930{
931 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
932 if (len_encoded == 16)
933 len_encoded = 0;
934 return len_encoded;
935}
bfead3b2 936
35e66019 937int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
bfead3b2
JK
938 struct be_queue_info *mccq,
939 struct be_queue_info *cq)
940{
941 struct be_mcc_wrb *wrb;
53aefe25 942 struct be_cmd_req_mcc_create_ext *req;
bfead3b2
JK
943 struct be_dma_mem *q_mem = &mccq->dma_mem;
944 struct be_ctrl_info *ctrl;
945 void *ctxt;
946 int status;
947
c03a50f7 948 mutex_lock(&phba->ctrl.mbox_lock);
bfead3b2
JK
949 ctrl = &phba->ctrl;
950 wrb = wrb_from_mbox(&ctrl->mbox_mem);
37609766 951 memset(wrb, 0, sizeof(*wrb));
bfead3b2
JK
952 req = embedded_payload(wrb);
953 ctxt = &req->context;
954
955 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
956
957 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
53aefe25 958 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
bfead3b2
JK
959
960 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
53aefe25
JB
961 req->async_evt_bitmap = 1 << ASYNC_EVENT_CODE_LINK_STATE;
962 req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_ISCSI;
963 req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_SLI;
bfead3b2
JK
964
965 AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
966 PCI_FUNC(phba->pcidev->devfn));
967 AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
968 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
969 be_encoded_q_len(mccq->len));
970 AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
971
972 be_dws_cpu_to_le(ctxt, sizeof(req->context));
973
974 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
975
a264f5e8 976 status = be_mbox_notify(ctrl);
bfead3b2
JK
977 if (!status) {
978 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
979 mccq->id = le16_to_cpu(resp->id);
980 mccq->created = true;
981 }
c03a50f7 982 mutex_unlock(&phba->ctrl.mbox_lock);
bfead3b2
JK
983
984 return status;
985}
986
6733b39a
JK
987int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
988 int queue_type)
989{
990 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
991 struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
99bc5d55 992 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
6733b39a
JK
993 u8 subsys = 0, opcode = 0;
994 int status;
995
99bc5d55
JSJ
996 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
997 "BC_%d : In beiscsi_cmd_q_destroy "
998 "queue_type : %d\n", queue_type);
999
c03a50f7 1000 mutex_lock(&ctrl->mbox_lock);
6733b39a
JK
1001 memset(wrb, 0, sizeof(*wrb));
1002 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1003
1004 switch (queue_type) {
1005 case QTYPE_EQ:
1006 subsys = CMD_SUBSYSTEM_COMMON;
1007 opcode = OPCODE_COMMON_EQ_DESTROY;
1008 break;
1009 case QTYPE_CQ:
1010 subsys = CMD_SUBSYSTEM_COMMON;
1011 opcode = OPCODE_COMMON_CQ_DESTROY;
1012 break;
bfead3b2
JK
1013 case QTYPE_MCCQ:
1014 subsys = CMD_SUBSYSTEM_COMMON;
1015 opcode = OPCODE_COMMON_MCC_DESTROY;
1016 break;
6733b39a
JK
1017 case QTYPE_WRBQ:
1018 subsys = CMD_SUBSYSTEM_ISCSI;
1019 opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
1020 break;
1021 case QTYPE_DPDUQ:
1022 subsys = CMD_SUBSYSTEM_ISCSI;
1023 opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
1024 break;
1025 case QTYPE_SGL:
1026 subsys = CMD_SUBSYSTEM_ISCSI;
1027 opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
1028 break;
1029 default:
c03a50f7 1030 mutex_unlock(&ctrl->mbox_lock);
6733b39a 1031 BUG();
d3ad2bb3 1032 return -ENXIO;
6733b39a
JK
1033 }
1034 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
1035 if (queue_type != QTYPE_SGL)
1036 req->id = cpu_to_le16(q->id);
1037
1038 status = be_mbox_notify(ctrl);
1039
c03a50f7 1040 mutex_unlock(&ctrl->mbox_lock);
6733b39a
JK
1041 return status;
1042}
1043
8a86e833
JK
1044/**
1045 * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter
1046 * @ctrl: ptr to ctrl_info
1047 * @cq: Completion Queue
1048 * @dq: Default Queue
1049 * @lenght: ring size
1050 * @entry_size: size of each entry in DEFQ
1051 * @is_header: Header or Data DEFQ
1052 * @ulp_num: Bind to which ULP
1053 *
1054 * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted
1055 * on this queue by the FW
1056 *
1057 * return
1058 * Success: 0
1059 * Failure: Non-Zero Value
1060 *
1061 **/
6733b39a
JK
1062int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
1063 struct be_queue_info *cq,
1064 struct be_queue_info *dq, int length,
8a86e833
JK
1065 int entry_size, uint8_t is_header,
1066 uint8_t ulp_num)
6733b39a
JK
1067{
1068 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1069 struct be_defq_create_req *req = embedded_payload(wrb);
1070 struct be_dma_mem *q_mem = &dq->dma_mem;
ef9e1b9b 1071 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
6733b39a
JK
1072 void *ctxt = &req->context;
1073 int status;
1074
c03a50f7 1075 mutex_lock(&ctrl->mbox_lock);
6733b39a
JK
1076 memset(wrb, 0, sizeof(*wrb));
1077
1078 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1079
1080 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1081 OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
1082
1083 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
8a86e833
JK
1084 if (phba->fw_config.dual_ulp_aware) {
1085 req->ulp_num = ulp_num;
1086 req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
1087 req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
1088 }
ef9e1b9b
JK
1089
1090 if (is_chip_be2_be3r(phba)) {
1091 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1092 rx_pdid, ctxt, 0);
1093 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1094 rx_pdid_valid, ctxt, 1);
1095 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1096 pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn));
1097 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1098 ring_size, ctxt,
1099 be_encoded_q_len(length /
1100 sizeof(struct phys_addr)));
1101 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1102 default_buffer_size, ctxt, entry_size);
1103 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1104 cq_id_recv, ctxt, cq->id);
1105 } else {
1106 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1107 rx_pdid, ctxt, 0);
1108 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1109 rx_pdid_valid, ctxt, 1);
1110 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1111 ring_size, ctxt,
1112 be_encoded_q_len(length /
1113 sizeof(struct phys_addr)));
1114 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1115 default_buffer_size, ctxt, entry_size);
1116 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1117 cq_id_recv, ctxt, cq->id);
1118 }
6733b39a
JK
1119
1120 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1121
1122 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1123
1124 status = be_mbox_notify(ctrl);
1125 if (!status) {
8a86e833 1126 struct be_ring *defq_ring;
6733b39a
JK
1127 struct be_defq_create_resp *resp = embedded_payload(wrb);
1128
1129 dq->id = le16_to_cpu(resp->id);
1130 dq->created = true;
8a86e833
JK
1131 if (is_header)
1132 defq_ring = &phba->phwi_ctrlr->default_pdu_hdr[ulp_num];
1133 else
1134 defq_ring = &phba->phwi_ctrlr->
1135 default_pdu_data[ulp_num];
1136
1137 defq_ring->id = dq->id;
1138
1139 if (!phba->fw_config.dual_ulp_aware) {
1140 defq_ring->ulp_num = BEISCSI_ULP0;
1141 defq_ring->doorbell_offset = DB_RXULP0_OFFSET;
1142 } else {
1143 defq_ring->ulp_num = resp->ulp_num;
1144 defq_ring->doorbell_offset = resp->doorbell_offset;
1145 }
6733b39a 1146 }
c03a50f7 1147 mutex_unlock(&ctrl->mbox_lock);
6733b39a
JK
1148
1149 return status;
1150}
1151
4eea99d5
JK
1152/**
1153 * be_cmd_wrbq_create()- Create WRBQ
1154 * @ctrl: ptr to ctrl_info
1155 * @q_mem: memory details for the queue
1156 * @wrbq: queue info
1157 * @pwrb_context: ptr to wrb_context
1158 * @ulp_num: ULP on which the WRBQ is to be created
1159 *
1160 * Create WRBQ on the passed ULP_NUM.
1161 *
1162 **/
1163int be_cmd_wrbq_create(struct be_ctrl_info *ctrl,
1164 struct be_dma_mem *q_mem,
1165 struct be_queue_info *wrbq,
1166 struct hwi_wrb_context *pwrb_context,
1167 uint8_t ulp_num)
6733b39a
JK
1168{
1169 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1170 struct be_wrbq_create_req *req = embedded_payload(wrb);
1171 struct be_wrbq_create_resp *resp = embedded_payload(wrb);
4eea99d5 1172 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
6733b39a
JK
1173 int status;
1174
c03a50f7 1175 mutex_lock(&ctrl->mbox_lock);
6733b39a
JK
1176 memset(wrb, 0, sizeof(*wrb));
1177
1178 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1179
1180 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1181 OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
1182 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
4eea99d5
JK
1183
1184 if (phba->fw_config.dual_ulp_aware) {
1185 req->ulp_num = ulp_num;
1186 req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
1187 req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
1188 }
1189
6733b39a
JK
1190 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1191
1192 status = be_mbox_notify(ctrl);
bfead3b2 1193 if (!status) {
6733b39a 1194 wrbq->id = le16_to_cpu(resp->cid);
bfead3b2 1195 wrbq->created = true;
4eea99d5
JK
1196
1197 pwrb_context->cid = wrbq->id;
1198 if (!phba->fw_config.dual_ulp_aware) {
1199 pwrb_context->doorbell_offset = DB_TXULP0_OFFSET;
1200 pwrb_context->ulp_num = BEISCSI_ULP0;
1201 } else {
1202 pwrb_context->ulp_num = resp->ulp_num;
1203 pwrb_context->doorbell_offset = resp->doorbell_offset;
1204 }
bfead3b2 1205 }
c03a50f7 1206 mutex_unlock(&ctrl->mbox_lock);
6733b39a
JK
1207 return status;
1208}
1209
15a90fe0
JK
1210int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
1211 struct be_dma_mem *q_mem)
1212{
1213 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1214 struct be_post_template_pages_req *req = embedded_payload(wrb);
1215 int status;
1216
c03a50f7 1217 mutex_lock(&ctrl->mbox_lock);
15a90fe0
JK
1218
1219 memset(wrb, 0, sizeof(*wrb));
1220 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1221 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1222 OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS,
1223 sizeof(*req));
1224
1225 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1226 req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
1227 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1228
1229 status = be_mbox_notify(ctrl);
c03a50f7 1230 mutex_unlock(&ctrl->mbox_lock);
15a90fe0
JK
1231 return status;
1232}
1233
1234int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl)
1235{
1236 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1237 struct be_remove_template_pages_req *req = embedded_payload(wrb);
1238 int status;
1239
c03a50f7 1240 mutex_lock(&ctrl->mbox_lock);
15a90fe0
JK
1241
1242 memset(wrb, 0, sizeof(*wrb));
1243 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1244 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1245 OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS,
1246 sizeof(*req));
1247
1248 req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
1249
1250 status = be_mbox_notify(ctrl);
c03a50f7 1251 mutex_unlock(&ctrl->mbox_lock);
15a90fe0
JK
1252 return status;
1253}
1254
6733b39a
JK
1255int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
1256 struct be_dma_mem *q_mem,
1257 u32 page_offset, u32 num_pages)
1258{
1259 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1260 struct be_post_sgl_pages_req *req = embedded_payload(wrb);
99bc5d55 1261 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
6733b39a
JK
1262 int status;
1263 unsigned int curr_pages;
1264 u32 internal_page_offset = 0;
1265 u32 temp_num_pages = num_pages;
1266
1267 if (num_pages == 0xff)
1268 num_pages = 1;
1269
c03a50f7 1270 mutex_lock(&ctrl->mbox_lock);
6733b39a
JK
1271 do {
1272 memset(wrb, 0, sizeof(*wrb));
1273 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1274 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1275 OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
1276 sizeof(*req));
1277 curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
1278 pages);
1279 req->num_pages = min(num_pages, curr_pages);
1280 req->page_offset = page_offset;
1281 be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
1282 q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
1283 internal_page_offset += req->num_pages;
1284 page_offset += req->num_pages;
1285 num_pages -= req->num_pages;
1286
1287 if (temp_num_pages == 0xff)
1288 req->num_pages = temp_num_pages;
1289
1290 status = be_mbox_notify(ctrl);
1291 if (status) {
99bc5d55
JSJ
1292 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1293 "BC_%d : FW CMD to map iscsi frags failed.\n");
1294
6733b39a
JK
1295 goto error;
1296 }
1297 } while (num_pages > 0);
1298error:
c03a50f7 1299 mutex_unlock(&ctrl->mbox_lock);
6733b39a
JK
1300 if (status != 0)
1301 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
1302 return status;
1303}
e5285860
JK
1304
1305int beiscsi_cmd_reset_function(struct beiscsi_hba *phba)
1306{
1307 struct be_ctrl_info *ctrl = &phba->ctrl;
1308 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1309 struct be_post_sgl_pages_req *req = embedded_payload(wrb);
1310 int status;
1311
c03a50f7 1312 mutex_lock(&ctrl->mbox_lock);
e5285860
JK
1313
1314 req = embedded_payload(wrb);
1315 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1316 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1317 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
a264f5e8 1318 status = be_mbox_notify(ctrl);
e5285860 1319
c03a50f7 1320 mutex_unlock(&ctrl->mbox_lock);
e5285860
JK
1321 return status;
1322}
6f72238e
JSJ
1323
1324/**
1325 * be_cmd_set_vlan()- Configure VLAN paramters on the adapter
1326 * @phba: device priv structure instance
1327 * @vlan_tag: TAG to be set
1328 *
1329 * Set the VLAN_TAG for the adapter or Disable VLAN on adapter
1330 *
1331 * returns
1332 * TAG for the MBX Cmd
1333 * **/
1334int be_cmd_set_vlan(struct beiscsi_hba *phba,
1335 uint16_t vlan_tag)
1336{
090e2184 1337 unsigned int tag;
6f72238e
JSJ
1338 struct be_mcc_wrb *wrb;
1339 struct be_cmd_set_vlan_req *req;
1340 struct be_ctrl_info *ctrl = &phba->ctrl;
1341
c03a50f7
JB
1342 if (mutex_lock_interruptible(&ctrl->mbox_lock))
1343 return 0;
090e2184
JB
1344 wrb = alloc_mcc_wrb(phba, &tag);
1345 if (!wrb) {
c03a50f7 1346 mutex_unlock(&ctrl->mbox_lock);
090e2184 1347 return 0;
6f72238e
JSJ
1348 }
1349
6f72238e 1350 req = embedded_payload(wrb);
6f72238e
JSJ
1351 be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0);
1352 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1353 OPCODE_COMMON_ISCSI_NTWK_SET_VLAN,
1354 sizeof(*req));
1355
1356 req->interface_hndl = phba->interface_handle;
1357 req->vlan_priority = vlan_tag;
1358
cdde6682 1359 be_mcc_notify(phba, tag);
c03a50f7 1360 mutex_unlock(&ctrl->mbox_lock);
6f72238e
JSJ
1361
1362 return tag;
1363}
6694095b
JB
1364
1365int beiscsi_set_uer_feature(struct beiscsi_hba *phba)
1366{
1367 struct be_ctrl_info *ctrl = &phba->ctrl;
1368 struct be_cmd_set_features *ioctl;
1369 struct be_mcc_wrb *wrb;
1370 int ret = 0;
1371
1372 mutex_lock(&ctrl->mbox_lock);
1373 wrb = wrb_from_mbox(&ctrl->mbox_mem);
1374 memset(wrb, 0, sizeof(*wrb));
1375 ioctl = embedded_payload(wrb);
1376
1377 be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
1378 be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
1379 OPCODE_COMMON_SET_FEATURES,
1380 EMBED_MBX_MAX_PAYLOAD_SIZE);
1381 ioctl->feature = BE_CMD_SET_FEATURE_UER;
1382 ioctl->param_len = sizeof(ioctl->param.req);
1383 ioctl->param.req.uer = BE_CMD_UER_SUPP_BIT;
1384 ret = be_mbox_notify(ctrl);
1385 if (!ret) {
1386 phba->ue2rp = ioctl->param.resp.ue2rp;
1387 set_bit(BEISCSI_HBA_UER_SUPP, &phba->state);
1388 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1389 "BG_%d : HBA error recovery supported\n");
1390 } else {
1391 /**
1392 * Check "MCC_STATUS_INVALID_LENGTH" for SKH.
1393 * Older FW versions return this error.
1394 */
1395 if (ret == MCC_STATUS_ILLEGAL_REQUEST ||
1396 ret == MCC_STATUS_INVALID_LENGTH)
1397 __beiscsi_log(phba, KERN_INFO,
1398 "BG_%d : HBA error recovery not supported\n");
1399 }
1400
1401 mutex_unlock(&ctrl->mbox_lock);
1402 return ret;
1403}