2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include <linux/delay.h>
10 #include <linux/ratelimit.h>
11 #include <linux/vmalloc.h>
12 #include <scsi/scsi_tcq.h>
13 #include <linux/utsname.h>
16 /* QLAFX00 specific Mailbox implementation functions */
19 * qlafx00_mailbox_command
20 * Issue mailbox command and waits for completion.
23 * ha = adapter block pointer.
24 * mcp = driver internal mbx struct pointer.
27 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
30 * 0 : QLA_SUCCESS = cmd performed success
31 * 1 : QLA_FUNCTION_FAILED (error encountered)
32 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
38 qlafx00_mailbox_command(scsi_qla_host_t
*vha
, struct mbx_cmd_32
*mcp
)
42 unsigned long flags
= 0;
43 device_reg_t __iomem
*reg
;
48 uint32_t __iomem
*optr
;
51 unsigned long wait_time
;
52 struct qla_hw_data
*ha
= vha
->hw
;
53 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
55 if (ha
->pdev
->error_state
> pci_channel_io_frozen
) {
56 ql_log(ql_log_warn
, vha
, 0x115c,
57 "error_state is greater than pci_channel_io_frozen, "
59 return QLA_FUNCTION_TIMEOUT
;
62 if (vha
->device_flags
& DFLG_DEV_FAILED
) {
63 ql_log(ql_log_warn
, vha
, 0x115f,
64 "Device in failed state, exiting.\n");
65 return QLA_FUNCTION_TIMEOUT
;
69 io_lock_on
= base_vha
->flags
.init_done
;
72 abort_active
= test_bit(ABORT_ISP_ACTIVE
, &base_vha
->dpc_flags
);
74 if (ha
->flags
.pci_channel_io_perm_failure
) {
75 ql_log(ql_log_warn
, vha
, 0x1175,
76 "Perm failure on EEH timeout MBX, exiting.\n");
77 return QLA_FUNCTION_TIMEOUT
;
80 if (ha
->flags
.isp82xx_fw_hung
) {
81 /* Setting Link-Down error */
82 mcp
->mb
[0] = MBS_LINK_DOWN_ERROR
;
83 ql_log(ql_log_warn
, vha
, 0x1176,
84 "FW hung = %d.\n", ha
->flags
.isp82xx_fw_hung
);
85 rval
= QLA_FUNCTION_FAILED
;
90 * Wait for active mailbox commands to finish by waiting at most tov
91 * seconds. This is to serialize actual issuing of mailbox cmds during
94 if (!wait_for_completion_timeout(&ha
->mbx_cmd_comp
, mcp
->tov
* HZ
)) {
95 /* Timeout occurred. Return error. */
96 ql_log(ql_log_warn
, vha
, 0x1177,
97 "Cmd access timeout, cmd=0x%x, Exiting.\n",
99 return QLA_FUNCTION_TIMEOUT
;
102 ha
->flags
.mbox_busy
= 1;
103 /* Save mailbox command for debug */
106 ql_dbg(ql_dbg_mbx
, vha
, 0x1178,
107 "Prepare to issue mbox cmd=0x%x.\n", mcp
->mb
[0]);
109 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
111 /* Load mailbox registers. */
112 optr
= (uint32_t __iomem
*)®
->ispfx00
.mailbox0
;
115 command
= mcp
->mb
[0];
116 mboxes
= mcp
->out_mb
;
118 for (cnt
= 0; cnt
< ha
->mbx_count
; cnt
++) {
120 WRT_REG_DWORD(optr
, *iptr
);
127 /* Issue set host interrupt command to send cmd out. */
128 ha
->flags
.mbox_int
= 0;
129 clear_bit(MBX_INTERRUPT
, &ha
->mbx_cmd_flags
);
131 ql_dump_buffer(ql_dbg_mbx
+ ql_dbg_buffer
, vha
, 0x1172,
132 (uint8_t *)mcp
->mb
, 16);
133 ql_dump_buffer(ql_dbg_mbx
+ ql_dbg_buffer
, vha
, 0x1173,
134 ((uint8_t *)mcp
->mb
+ 0x10), 16);
135 ql_dump_buffer(ql_dbg_mbx
+ ql_dbg_buffer
, vha
, 0x1174,
136 ((uint8_t *)mcp
->mb
+ 0x20), 8);
138 /* Unlock mbx registers and wait for interrupt */
139 ql_dbg(ql_dbg_mbx
, vha
, 0x1179,
140 "Going to unlock irq & waiting for interrupts. "
141 "jiffies=%lx.\n", jiffies
);
143 /* Wait for mbx cmd completion until timeout */
144 if ((!abort_active
&& io_lock_on
) || IS_NOPOLLING_TYPE(ha
)) {
145 set_bit(MBX_INTR_WAIT
, &ha
->mbx_cmd_flags
);
147 QLAFX00_SET_HST_INTR(ha
, ha
->mbx_intr_code
);
148 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
150 wait_for_completion_timeout(&ha
->mbx_intr_comp
, mcp
->tov
* HZ
);
152 ql_dbg(ql_dbg_mbx
, vha
, 0x112c,
153 "Cmd=%x Polling Mode.\n", command
);
155 QLAFX00_SET_HST_INTR(ha
, ha
->mbx_intr_code
);
156 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
158 wait_time
= jiffies
+ mcp
->tov
* HZ
; /* wait at most tov secs */
159 while (!ha
->flags
.mbox_int
) {
160 if (time_after(jiffies
, wait_time
))
163 /* Check for pending interrupts. */
164 qla2x00_poll(ha
->rsp_q_map
[0]);
166 if (!ha
->flags
.mbox_int
&&
168 command
== MBC_LOAD_RISC_RAM_EXTENDED
))
169 usleep_range(10000, 11000);
171 ql_dbg(ql_dbg_mbx
, vha
, 0x112d,
173 (uint
)((jiffies
- (wait_time
- (mcp
->tov
* HZ
)))/HZ
));
176 /* Check whether we timed out */
177 if (ha
->flags
.mbox_int
) {
180 ql_dbg(ql_dbg_mbx
, vha
, 0x112e,
181 "Cmd=%x completed.\n", command
);
183 /* Got interrupt. Clear the flag. */
184 ha
->flags
.mbox_int
= 0;
185 clear_bit(MBX_INTERRUPT
, &ha
->mbx_cmd_flags
);
187 if (ha
->mailbox_out32
[0] != MBS_COMMAND_COMPLETE
)
188 rval
= QLA_FUNCTION_FAILED
;
190 /* Load return mailbox registers. */
192 iptr
= (uint32_t *)&ha
->mailbox_out32
[0];
194 for (cnt
= 0; cnt
< ha
->mbx_count
; cnt
++) {
204 rval
= QLA_FUNCTION_TIMEOUT
;
207 ha
->flags
.mbox_busy
= 0;
212 if ((abort_active
|| !io_lock_on
) && !IS_NOPOLLING_TYPE(ha
)) {
213 ql_dbg(ql_dbg_mbx
, vha
, 0x113a,
214 "checking for additional resp interrupt.\n");
216 /* polling mode for non isp_abort commands. */
217 qla2x00_poll(ha
->rsp_q_map
[0]);
220 if (rval
== QLA_FUNCTION_TIMEOUT
&&
221 mcp
->mb
[0] != MBC_GEN_SYSTEM_ERROR
) {
222 if (!io_lock_on
|| (mcp
->flags
& IOCTL_CMD
) ||
223 ha
->flags
.eeh_busy
) {
224 /* not in dpc. schedule it for dpc to take over. */
225 ql_dbg(ql_dbg_mbx
, vha
, 0x115d,
226 "Timeout, schedule isp_abort_needed.\n");
228 if (!test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
) &&
229 !test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) &&
230 !test_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
)) {
232 ql_log(ql_log_info
, base_vha
, 0x115e,
233 "Mailbox cmd timeout occurred, cmd=0x%x, "
234 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
235 "abort.\n", command
, mcp
->mb
[0],
237 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
238 qla2xxx_wake_dpc(vha
);
240 } else if (!abort_active
) {
241 /* call abort directly since we are in the DPC thread */
242 ql_dbg(ql_dbg_mbx
, vha
, 0x1160,
243 "Timeout, calling abort_isp.\n");
245 if (!test_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
) &&
246 !test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
) &&
247 !test_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
)) {
249 ql_log(ql_log_info
, base_vha
, 0x1161,
250 "Mailbox cmd timeout occurred, cmd=0x%x, "
251 "mb[0]=0x%x. Scheduling ISP abort ",
252 command
, mcp
->mb
[0]);
254 set_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
);
255 clear_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
256 if (ha
->isp_ops
->abort_isp(vha
)) {
257 /* Failed. retry later. */
258 set_bit(ISP_ABORT_NEEDED
,
261 clear_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
);
262 ql_dbg(ql_dbg_mbx
, vha
, 0x1162,
263 "Finished abort_isp.\n");
269 /* Allow next mbx cmd to come in. */
270 complete(&ha
->mbx_cmd_comp
);
273 ql_log(ql_log_warn
, base_vha
, 0x1163,
274 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, "
275 "mb[3]=%x, cmd=%x ****.\n",
276 mcp
->mb
[0], mcp
->mb
[1], mcp
->mb
[2], mcp
->mb
[3], command
);
278 ql_dbg(ql_dbg_mbx
, base_vha
, 0x1164, "Done %s.\n", __func__
);
285 * qlafx00_driver_shutdown
286 * Indicate a driver shutdown to firmware.
289 * ha = adapter block pointer.
292 * local function return status code.
298 qlafx00_driver_shutdown(scsi_qla_host_t
*vha
, int tmo
)
301 struct mbx_cmd_32 mc
;
302 struct mbx_cmd_32
*mcp
= &mc
;
304 ql_dbg(ql_dbg_mbx
+ ql_dbg_verbose
, vha
, 0x1166,
305 "Entered %s.\n", __func__
);
307 mcp
->mb
[0] = MBC_MR_DRV_SHUTDOWN
;
313 mcp
->tov
= MBX_TOV_SECONDS
;
315 rval
= qlafx00_mailbox_command(vha
, mcp
);
317 if (rval
!= QLA_SUCCESS
) {
318 ql_dbg(ql_dbg_mbx
, vha
, 0x1167,
319 "Failed=%x.\n", rval
);
321 ql_dbg(ql_dbg_mbx
+ ql_dbg_verbose
, vha
, 0x1168,
322 "Done %s.\n", __func__
);
329 * qlafx00_get_firmware_state
330 * Get adapter firmware state.
333 * ha = adapter block pointer.
334 * TARGET_QUEUE_LOCK must be released.
335 * ADAPTER_STATE_LOCK must be released.
338 * qla7xxx local function return status code.
344 qlafx00_get_firmware_state(scsi_qla_host_t
*vha
, uint32_t *states
)
347 struct mbx_cmd_32 mc
;
348 struct mbx_cmd_32
*mcp
= &mc
;
350 ql_dbg(ql_dbg_mbx
+ ql_dbg_verbose
, vha
, 0x1169,
351 "Entered %s.\n", __func__
);
353 mcp
->mb
[0] = MBC_GET_FIRMWARE_STATE
;
355 mcp
->in_mb
= MBX_1
|MBX_0
;
356 mcp
->tov
= MBX_TOV_SECONDS
;
358 rval
= qlafx00_mailbox_command(vha
, mcp
);
360 /* Return firmware states. */
361 states
[0] = mcp
->mb
[1];
363 if (rval
!= QLA_SUCCESS
) {
364 ql_dbg(ql_dbg_mbx
, vha
, 0x116a,
365 "Failed=%x mb[0]=%x.\n", rval
, mcp
->mb
[0]);
367 ql_dbg(ql_dbg_mbx
+ ql_dbg_verbose
, vha
, 0x116b,
368 "Done %s.\n", __func__
);
374 * qlafx00_init_firmware
375 * Initialize adapter firmware.
378 * ha = adapter block pointer.
379 * dptr = Initialization control block pointer.
380 * size = size of initialization control block.
381 * TARGET_QUEUE_LOCK must be released.
382 * ADAPTER_STATE_LOCK must be released.
385 * qlafx00 local function return status code.
391 qlafx00_init_firmware(scsi_qla_host_t
*vha
, uint16_t size
)
394 struct mbx_cmd_32 mc
;
395 struct mbx_cmd_32
*mcp
= &mc
;
396 struct qla_hw_data
*ha
= vha
->hw
;
398 ql_dbg(ql_dbg_mbx
+ ql_dbg_verbose
, vha
, 0x116c,
399 "Entered %s.\n", __func__
);
401 mcp
->mb
[0] = MBC_INITIALIZE_FIRMWARE
;
404 mcp
->mb
[2] = MSD(ha
->init_cb_dma
);
405 mcp
->mb
[3] = LSD(ha
->init_cb_dma
);
407 mcp
->out_mb
= MBX_3
|MBX_2
|MBX_1
|MBX_0
;
409 mcp
->buf_size
= size
;
410 mcp
->flags
= MBX_DMA_OUT
;
411 mcp
->tov
= MBX_TOV_SECONDS
;
412 rval
= qlafx00_mailbox_command(vha
, mcp
);
414 if (rval
!= QLA_SUCCESS
) {
415 ql_dbg(ql_dbg_mbx
, vha
, 0x116d,
416 "Failed=%x mb[0]=%x.\n", rval
, mcp
->mb
[0]);
418 ql_dbg(ql_dbg_mbx
+ ql_dbg_verbose
, vha
, 0x116e,
419 "Done %s.\n", __func__
);
425 * qlafx00_mbx_reg_test
428 qlafx00_mbx_reg_test(scsi_qla_host_t
*vha
)
431 struct mbx_cmd_32 mc
;
432 struct mbx_cmd_32
*mcp
= &mc
;
434 ql_dbg(ql_dbg_mbx
+ ql_dbg_verbose
, vha
, 0x116f,
435 "Entered %s.\n", __func__
);
438 mcp
->mb
[0] = MBC_MAILBOX_REGISTER_TEST
;
448 mcp
->mb
[10] = 0xBB66;
449 mcp
->mb
[11] = 0x66BB;
450 mcp
->mb
[12] = 0xB6B6;
451 mcp
->mb
[13] = 0x6B6B;
452 mcp
->mb
[14] = 0x3636;
453 mcp
->mb
[15] = 0xCCCC;
456 mcp
->out_mb
= MBX_15
|MBX_14
|MBX_13
|MBX_12
|MBX_11
|MBX_10
|MBX_9
|MBX_8
|
457 MBX_7
|MBX_6
|MBX_5
|MBX_4
|MBX_3
|MBX_2
|MBX_1
|MBX_0
;
458 mcp
->in_mb
= MBX_15
|MBX_14
|MBX_13
|MBX_12
|MBX_11
|MBX_10
|MBX_9
|MBX_8
|
459 MBX_7
|MBX_6
|MBX_5
|MBX_4
|MBX_3
|MBX_2
|MBX_1
|MBX_0
;
461 mcp
->flags
= MBX_DMA_OUT
;
462 mcp
->tov
= MBX_TOV_SECONDS
;
463 rval
= qlafx00_mailbox_command(vha
, mcp
);
464 if (rval
== QLA_SUCCESS
) {
465 if (mcp
->mb
[17] != 0xAAAA || mcp
->mb
[18] != 0x5555 ||
466 mcp
->mb
[19] != 0xAA55 || mcp
->mb
[20] != 0x55AA)
467 rval
= QLA_FUNCTION_FAILED
;
468 if (mcp
->mb
[21] != 0xA5A5 || mcp
->mb
[22] != 0x5A5A ||
469 mcp
->mb
[23] != 0x2525 || mcp
->mb
[24] != 0xBBBB)
470 rval
= QLA_FUNCTION_FAILED
;
471 if (mcp
->mb
[25] != 0x6666 || mcp
->mb
[26] != 0xBB66 ||
472 mcp
->mb
[27] != 0x66BB || mcp
->mb
[28] != 0xB6B6)
473 rval
= QLA_FUNCTION_FAILED
;
474 if (mcp
->mb
[29] != 0x6B6B || mcp
->mb
[30] != 0x3636 ||
475 mcp
->mb
[31] != 0xCCCC)
476 rval
= QLA_FUNCTION_FAILED
;
479 if (rval
!= QLA_SUCCESS
) {
480 ql_dbg(ql_dbg_mbx
, vha
, 0x1170,
481 "Failed=%x mb[0]=%x.\n", rval
, mcp
->mb
[0]);
483 ql_dbg(ql_dbg_mbx
+ ql_dbg_verbose
, vha
, 0x1171,
484 "Done %s.\n", __func__
);
490 * qlafx00_pci_config() - Setup ISPFx00 PCI configuration registers.
493 * Returns 0 on success.
496 qlafx00_pci_config(scsi_qla_host_t
*vha
)
499 struct qla_hw_data
*ha
= vha
->hw
;
501 pci_set_master(ha
->pdev
);
502 pci_try_set_mwi(ha
->pdev
);
504 pci_read_config_word(ha
->pdev
, PCI_COMMAND
, &w
);
505 w
|= (PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
);
506 w
&= ~PCI_COMMAND_INTX_DISABLE
;
507 pci_write_config_word(ha
->pdev
, PCI_COMMAND
, w
);
509 /* PCIe -- adjust Maximum Read Request Size (2048). */
510 if (pci_find_capability(ha
->pdev
, PCI_CAP_ID_EXP
))
511 pcie_set_readrq(ha
->pdev
, 2048);
513 ha
->chip_revision
= ha
->pdev
->revision
;
519 * qlafx00_warm_reset() - Perform warm reset of iSA(CPUs being reset on SOC).
524 qlafx00_soc_cpu_reset(scsi_qla_host_t
*vha
)
526 unsigned long flags
= 0;
527 struct qla_hw_data
*ha
= vha
->hw
;
531 /* Set all 4 cores in reset */
532 for (i
= 0; i
< 4; i
++) {
533 QLAFX00_SET_HBA_SOC_REG(ha
,
534 (SOC_SW_RST_CONTROL_REG_CORE0
+ 8*i
), (0xF01));
537 /* Set all 4 core Clock gating control */
538 for (i
= 0; i
< 4; i
++) {
539 QLAFX00_SET_HBA_SOC_REG(ha
,
540 (SOC_SW_RST_CONTROL_REG_CORE0
+ 4 + 8*i
), (0x01010101));
543 /* Reset all units in Fabric */
544 QLAFX00_SET_HBA_SOC_REG(ha
, SOC_FABRIC_RST_CONTROL_REG
, (0x11F0101));
546 /* Reset all interrupt control registers */
547 for (i
= 0; i
< 115; i
++) {
548 QLAFX00_SET_HBA_SOC_REG(ha
,
549 (SOC_INTERRUPT_SOURCE_I_CONTROL_REG
+ 4*i
), (0x0));
552 /* Reset Timers control registers. per core */
553 for (core
= 0; core
< 4; core
++)
554 for (i
= 0; i
< 8; i
++)
555 QLAFX00_SET_HBA_SOC_REG(ha
,
556 (SOC_CORE_TIMER_REG
+ 0x100*core
+ 4*i
), (0x0));
558 /* Reset per core IRQ ack register */
559 for (core
= 0; core
< 4; core
++)
560 QLAFX00_SET_HBA_SOC_REG(ha
,
561 (SOC_IRQ_ACK_REG
+ 0x100*core
), (0x3FF));
563 /* Set Fabric control and config to defaults */
564 QLAFX00_SET_HBA_SOC_REG(ha
, SOC_FABRIC_CONTROL_REG
, (0x2));
565 QLAFX00_SET_HBA_SOC_REG(ha
, SOC_FABRIC_CONFIG_REG
, (0x3));
567 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
569 /* Kick in Fabric units */
570 QLAFX00_SET_HBA_SOC_REG(ha
, SOC_FABRIC_RST_CONTROL_REG
, (0x0));
572 /* Kick in Core0 to start boot process */
573 QLAFX00_SET_HBA_SOC_REG(ha
, SOC_SW_RST_CONTROL_REG_CORE0
, (0xF00));
575 /* Wait 10secs for soft-reset to complete. */
576 for (cnt
= 10; cnt
; cnt
--) {
580 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
584 * qlafx00_soft_reset() - Soft Reset ISPFx00.
587 * Returns 0 on success.
590 qlafx00_soft_reset(scsi_qla_host_t
*vha
)
592 struct qla_hw_data
*ha
= vha
->hw
;
594 if (unlikely(pci_channel_offline(ha
->pdev
) &&
595 ha
->flags
.pci_channel_io_perm_failure
))
598 ha
->isp_ops
->disable_intrs(ha
);
599 qlafx00_soc_cpu_reset(vha
);
600 ha
->isp_ops
->enable_intrs(ha
);
604 * qlafx00_chip_diag() - Test ISPFx00 for proper operation.
607 * Returns 0 on success.
610 qlafx00_chip_diag(scsi_qla_host_t
*vha
)
613 struct qla_hw_data
*ha
= vha
->hw
;
614 struct req_que
*req
= ha
->req_q_map
[0];
616 ha
->fw_transfer_size
= REQUEST_ENTRY_SIZE
* req
->length
;
618 rval
= qlafx00_mbx_reg_test(vha
);
620 ql_log(ql_log_warn
, vha
, 0x1165,
621 "Failed mailbox send register test\n");
623 /* Flag a successful rval */
630 qlafx00_config_rings(struct scsi_qla_host
*vha
)
632 struct qla_hw_data
*ha
= vha
->hw
;
633 struct device_reg_fx00 __iomem
*reg
= &ha
->iobase
->ispfx00
;
634 struct init_cb_fx
*icb
;
635 struct req_que
*req
= ha
->req_q_map
[0];
636 struct rsp_que
*rsp
= ha
->rsp_q_map
[0];
638 /* Setup ring parameters in initialization control block. */
639 icb
= (struct init_cb_fx
*)ha
->init_cb
;
640 icb
->request_q_outpointer
= __constant_cpu_to_le16(0);
641 icb
->response_q_inpointer
= __constant_cpu_to_le16(0);
642 icb
->request_q_length
= cpu_to_le16(req
->length
);
643 icb
->response_q_length
= cpu_to_le16(rsp
->length
);
644 icb
->request_q_address
[0] = cpu_to_le32(LSD(req
->dma
));
645 icb
->request_q_address
[1] = cpu_to_le32(MSD(req
->dma
));
646 icb
->response_q_address
[0] = cpu_to_le32(LSD(rsp
->dma
));
647 icb
->response_q_address
[1] = cpu_to_le32(MSD(rsp
->dma
));
649 WRT_REG_DWORD(®
->req_q_in
, 0);
650 WRT_REG_DWORD(®
->req_q_out
, 0);
652 WRT_REG_DWORD(®
->rsp_q_in
, 0);
653 WRT_REG_DWORD(®
->rsp_q_out
, 0);
656 RD_REG_DWORD(®
->rsp_q_out
);
660 qlafx00_pci_info_str(struct scsi_qla_host
*vha
, char *str
)
662 struct qla_hw_data
*ha
= vha
->hw
;
665 pcie_reg
= pci_find_capability(ha
->pdev
, PCI_CAP_ID_EXP
);
667 strcpy(str
, "PCIe iSA");
674 qlafx00_fw_version_str(struct scsi_qla_host
*vha
, char *str
)
676 struct qla_hw_data
*ha
= vha
->hw
;
678 sprintf(str
, "%s", ha
->mr
.fw_version
);
683 qlafx00_enable_intrs(struct qla_hw_data
*ha
)
685 unsigned long flags
= 0;
687 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
688 ha
->interrupts_on
= 1;
689 QLAFX00_ENABLE_ICNTRL_REG(ha
);
690 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
694 qlafx00_disable_intrs(struct qla_hw_data
*ha
)
696 unsigned long flags
= 0;
698 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
699 ha
->interrupts_on
= 0;
700 QLAFX00_DISABLE_ICNTRL_REG(ha
);
701 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
705 qlafx00_tmf_iocb_timeout(void *data
)
707 srb_t
*sp
= (srb_t
*)data
;
708 struct srb_iocb
*tmf
= &sp
->u
.iocb_cmd
;
710 tmf
->u
.tmf
.comp_status
= cpu_to_le16((uint16_t)CS_TIMEOUT
);
711 complete(&tmf
->u
.tmf
.comp
);
715 qlafx00_tmf_sp_done(void *data
, void *ptr
, int res
)
717 srb_t
*sp
= (srb_t
*)ptr
;
718 struct srb_iocb
*tmf
= &sp
->u
.iocb_cmd
;
720 complete(&tmf
->u
.tmf
.comp
);
724 qlafx00_async_tm_cmd(fc_port_t
*fcport
, uint32_t flags
,
725 uint32_t lun
, uint32_t tag
)
727 scsi_qla_host_t
*vha
= fcport
->vha
;
728 struct srb_iocb
*tm_iocb
;
730 int rval
= QLA_FUNCTION_FAILED
;
732 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
736 tm_iocb
= &sp
->u
.iocb_cmd
;
737 sp
->type
= SRB_TM_CMD
;
739 qla2x00_init_timer(sp
, qla2x00_get_async_timeout(vha
));
740 tm_iocb
->u
.tmf
.flags
= flags
;
741 tm_iocb
->u
.tmf
.lun
= lun
;
742 tm_iocb
->u
.tmf
.data
= tag
;
743 sp
->done
= qlafx00_tmf_sp_done
;
744 tm_iocb
->timeout
= qlafx00_tmf_iocb_timeout
;
745 init_completion(&tm_iocb
->u
.tmf
.comp
);
747 rval
= qla2x00_start_sp(sp
);
748 if (rval
!= QLA_SUCCESS
)
751 ql_dbg(ql_dbg_async
, vha
, 0x507b,
752 "Task management command issued target_id=%x\n",
755 wait_for_completion(&tm_iocb
->u
.tmf
.comp
);
757 rval
= tm_iocb
->u
.tmf
.comp_status
== CS_COMPLETE
?
758 QLA_SUCCESS
: QLA_FUNCTION_FAILED
;
767 qlafx00_abort_target(fc_port_t
*fcport
, unsigned int l
, int tag
)
769 return qlafx00_async_tm_cmd(fcport
, TCF_TARGET_RESET
, l
, tag
);
773 qlafx00_lun_reset(fc_port_t
*fcport
, unsigned int l
, int tag
)
775 return qlafx00_async_tm_cmd(fcport
, TCF_LUN_RESET
, l
, tag
);
779 qlafx00_iospace_config(struct qla_hw_data
*ha
)
781 if (pci_request_selected_regions(ha
->pdev
, ha
->bars
,
782 QLA2XXX_DRIVER_NAME
)) {
783 ql_log_pci(ql_log_fatal
, ha
->pdev
, 0x014e,
784 "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
786 goto iospace_error_exit
;
789 /* Use MMIO operations for all accesses. */
790 if (!(pci_resource_flags(ha
->pdev
, 0) & IORESOURCE_MEM
)) {
791 ql_log_pci(ql_log_warn
, ha
->pdev
, 0x014f,
792 "Invalid pci I/O region size (%s).\n",
794 goto iospace_error_exit
;
796 if (pci_resource_len(ha
->pdev
, 0) < BAR0_LEN_FX00
) {
797 ql_log_pci(ql_log_warn
, ha
->pdev
, 0x0127,
798 "Invalid PCI mem BAR0 region size (%s), aborting\n",
800 goto iospace_error_exit
;
804 ioremap_nocache(pci_resource_start(ha
->pdev
, 0), BAR0_LEN_FX00
);
806 ql_log_pci(ql_log_fatal
, ha
->pdev
, 0x0128,
807 "cannot remap MMIO (%s), aborting\n", pci_name(ha
->pdev
));
808 goto iospace_error_exit
;
811 if (!(pci_resource_flags(ha
->pdev
, 2) & IORESOURCE_MEM
)) {
812 ql_log_pci(ql_log_warn
, ha
->pdev
, 0x0129,
813 "region #2 not an MMIO resource (%s), aborting\n",
815 goto iospace_error_exit
;
817 if (pci_resource_len(ha
->pdev
, 2) < BAR2_LEN_FX00
) {
818 ql_log_pci(ql_log_warn
, ha
->pdev
, 0x012a,
819 "Invalid PCI mem BAR2 region size (%s), aborting\n",
821 goto iospace_error_exit
;
825 ioremap_nocache(pci_resource_start(ha
->pdev
, 2), BAR2_LEN_FX00
);
827 ql_log_pci(ql_log_fatal
, ha
->pdev
, 0x012b,
828 "cannot remap MMIO (%s), aborting\n", pci_name(ha
->pdev
));
829 goto iospace_error_exit
;
832 /* Determine queue resources */
833 ha
->max_req_queues
= ha
->max_rsp_queues
= 1;
835 ql_log_pci(ql_log_info
, ha
->pdev
, 0x012c,
836 "Bars 0x%x, iobase0 0x%p, iobase2 0x%p\n",
837 ha
->bars
, ha
->cregbase
, ha
->iobase
);
846 qlafx00_save_queue_ptrs(struct scsi_qla_host
*vha
)
848 struct qla_hw_data
*ha
= vha
->hw
;
849 struct req_que
*req
= ha
->req_q_map
[0];
850 struct rsp_que
*rsp
= ha
->rsp_q_map
[0];
852 req
->length_fx00
= req
->length
;
853 req
->ring_fx00
= req
->ring
;
854 req
->dma_fx00
= req
->dma
;
856 rsp
->length_fx00
= rsp
->length
;
857 rsp
->ring_fx00
= rsp
->ring
;
858 rsp
->dma_fx00
= rsp
->dma
;
860 ql_dbg(ql_dbg_init
, vha
, 0x012d,
861 "req: %p, ring_fx00: %p, length_fx00: 0x%x,"
862 "req->dma_fx00: 0x%llx\n", req
, req
->ring_fx00
,
863 req
->length_fx00
, (u64
)req
->dma_fx00
);
865 ql_dbg(ql_dbg_init
, vha
, 0x012e,
866 "rsp: %p, ring_fx00: %p, length_fx00: 0x%x,"
867 "rsp->dma_fx00: 0x%llx\n", rsp
, rsp
->ring_fx00
,
868 rsp
->length_fx00
, (u64
)rsp
->dma_fx00
);
872 qlafx00_config_queues(struct scsi_qla_host
*vha
)
874 struct qla_hw_data
*ha
= vha
->hw
;
875 struct req_que
*req
= ha
->req_q_map
[0];
876 struct rsp_que
*rsp
= ha
->rsp_q_map
[0];
877 dma_addr_t bar2_hdl
= pci_resource_start(ha
->pdev
, 2);
879 req
->length
= ha
->req_que_len
;
880 req
->ring
= (void *)ha
->iobase
+ ha
->req_que_off
;
881 req
->dma
= bar2_hdl
+ ha
->req_que_off
;
882 if ((!req
->ring
) || (req
->length
== 0)) {
883 ql_log_pci(ql_log_info
, ha
->pdev
, 0x012f,
884 "Unable to allocate memory for req_ring\n");
885 return QLA_FUNCTION_FAILED
;
888 ql_dbg(ql_dbg_init
, vha
, 0x0130,
889 "req: %p req_ring pointer %p req len 0x%x "
890 "req off 0x%x\n, req->dma: 0x%llx",
891 req
, req
->ring
, req
->length
,
892 ha
->req_que_off
, (u64
)req
->dma
);
894 rsp
->length
= ha
->rsp_que_len
;
895 rsp
->ring
= (void *)ha
->iobase
+ ha
->rsp_que_off
;
896 rsp
->dma
= bar2_hdl
+ ha
->rsp_que_off
;
897 if ((!rsp
->ring
) || (rsp
->length
== 0)) {
898 ql_log_pci(ql_log_info
, ha
->pdev
, 0x0131,
899 "Unable to allocate memory for rsp_ring\n");
900 return QLA_FUNCTION_FAILED
;
903 ql_dbg(ql_dbg_init
, vha
, 0x0132,
904 "rsp: %p rsp_ring pointer %p rsp len 0x%x "
905 "rsp off 0x%x, rsp->dma: 0x%llx\n",
906 rsp
, rsp
->ring
, rsp
->length
,
907 ha
->rsp_que_off
, (u64
)rsp
->dma
);
913 qlafx00_init_fw_ready(scsi_qla_host_t
*vha
)
917 uint16_t wait_time
; /* Wait time */
918 struct qla_hw_data
*ha
= vha
->hw
;
919 struct device_reg_fx00 __iomem
*reg
= &ha
->iobase
->ispfx00
;
920 uint32_t aenmbx
, aenmbx7
= 0;
924 /* 30 seconds wait - Adjust if required */
927 /* wait time before firmware ready */
928 wtime
= jiffies
+ (wait_time
* HZ
);
930 aenmbx
= RD_REG_DWORD(®
->aenmailbox0
);
932 ql_dbg(ql_dbg_mbx
, vha
, 0x0133,
933 "aenmbx: 0x%x\n", aenmbx
);
936 case MBA_FW_NOT_STARTED
:
937 case MBA_FW_STARTING
:
941 case MBA_REQ_TRANSFER_ERR
:
942 case MBA_RSP_TRANSFER_ERR
:
943 case MBA_FW_INIT_FAILURE
:
944 qlafx00_soft_reset(vha
);
947 case MBA_FW_RESTART_CMPLT
:
948 /* Set the mbx and rqstq intr code */
949 aenmbx7
= RD_REG_DWORD(®
->aenmailbox7
);
950 ha
->mbx_intr_code
= MSW(aenmbx7
);
951 ha
->rqstq_intr_code
= LSW(aenmbx7
);
952 ha
->req_que_off
= RD_REG_DWORD(®
->aenmailbox1
);
953 ha
->rsp_que_off
= RD_REG_DWORD(®
->aenmailbox3
);
954 ha
->req_que_len
= RD_REG_DWORD(®
->aenmailbox5
);
955 ha
->rsp_que_len
= RD_REG_DWORD(®
->aenmailbox6
);
956 WRT_REG_DWORD(®
->aenmailbox0
, 0);
957 RD_REG_DWORD_RELAXED(®
->aenmailbox0
);
958 ql_dbg(ql_dbg_init
, vha
, 0x0134,
959 "f/w returned mbx_intr_code: 0x%x, "
960 "rqstq_intr_code: 0x%x\n",
961 ha
->mbx_intr_code
, ha
->rqstq_intr_code
);
962 QLAFX00_CLR_INTR_REG(ha
, QLAFX00_HST_INT_STS_BITS
);
968 /* If fw is apparently not ready. In order to continue,
969 * we might need to issue Mbox cmd, but the problem is
970 * that the DoorBell vector values that come with the
971 * 8060 AEN are most likely gone by now (and thus no
972 * bell would be rung on the fw side when mbox cmd is
973 * issued). We have to therefore grab the 8060 AEN
974 * shadow regs (filled in by FW when the last 8060
975 * AEN was being posted).
976 * Do the following to determine what is needed in
977 * order to get the FW ready:
978 * 1. reload the 8060 AEN values from the shadow regs
979 * 2. clear int status to get rid of possible pending
981 * 3. issue Get FW State Mbox cmd to determine fw state
982 * Set the mbx and rqstq intr code from Shadow Regs
984 aenmbx7
= RD_REG_DWORD(®
->initval7
);
985 ha
->mbx_intr_code
= MSW(aenmbx7
);
986 ha
->rqstq_intr_code
= LSW(aenmbx7
);
987 ha
->req_que_off
= RD_REG_DWORD(®
->initval1
);
988 ha
->rsp_que_off
= RD_REG_DWORD(®
->initval3
);
989 ha
->req_que_len
= RD_REG_DWORD(®
->initval5
);
990 ha
->rsp_que_len
= RD_REG_DWORD(®
->initval6
);
991 ql_dbg(ql_dbg_init
, vha
, 0x0135,
992 "f/w returned mbx_intr_code: 0x%x, "
993 "rqstq_intr_code: 0x%x\n",
994 ha
->mbx_intr_code
, ha
->rqstq_intr_code
);
995 QLAFX00_CLR_INTR_REG(ha
, QLAFX00_HST_INT_STS_BITS
);
997 /* Get the FW state */
998 rval
= qlafx00_get_firmware_state(vha
, state
);
999 if (rval
!= QLA_SUCCESS
) {
1000 /* Retry if timer has not expired */
1004 if (state
[0] == FSTATE_FX00_CONFIG_WAIT
) {
1005 /* Firmware is waiting to be
1006 * initialized by driver
1013 /* Issue driver shutdown and wait until f/w recovers.
1014 * Driver should continue to poll until 8060 AEN is
1015 * received indicating firmware recovery.
1017 ql_dbg(ql_dbg_init
, vha
, 0x0136,
1018 "Sending Driver shutdown fw_state 0x%x\n",
1021 rval
= qlafx00_driver_shutdown(vha
, 10);
1022 if (rval
!= QLA_SUCCESS
) {
1023 rval
= QLA_FUNCTION_FAILED
;
1028 wtime
= jiffies
+ (wait_time
* HZ
);
1033 if (time_after_eq(jiffies
, wtime
)) {
1034 ql_dbg(ql_dbg_init
, vha
, 0x0137,
1035 "Init f/w failed: aen[7]: 0x%x\n",
1036 RD_REG_DWORD(®
->aenmailbox7
));
1037 rval
= QLA_FUNCTION_FAILED
;
1041 /* Delay for a while */
1047 ql_dbg(ql_dbg_init
, vha
, 0x0138,
1048 "%s **** FAILED ****.\n", __func__
);
1050 ql_dbg(ql_dbg_init
, vha
, 0x0139,
1051 "%s **** SUCCESS ****.\n", __func__
);
1057 * qlafx00_fw_ready() - Waits for firmware ready.
1060 * Returns 0 on success.
1063 qlafx00_fw_ready(scsi_qla_host_t
*vha
)
1066 unsigned long wtime
;
1067 uint16_t wait_time
; /* Wait time if loop is coming ready */
1074 /* wait time before firmware ready */
1075 wtime
= jiffies
+ (wait_time
* HZ
);
1077 /* Wait for ISP to finish init */
1078 if (!vha
->flags
.init_done
)
1079 ql_dbg(ql_dbg_init
, vha
, 0x013a,
1080 "Waiting for init to complete...\n");
1083 rval
= qlafx00_get_firmware_state(vha
, state
);
1085 if (rval
== QLA_SUCCESS
) {
1086 if (state
[0] == FSTATE_FX00_INITIALIZED
) {
1087 ql_dbg(ql_dbg_init
, vha
, 0x013b,
1088 "fw_state=%x\n", state
[0]);
1093 rval
= QLA_FUNCTION_FAILED
;
1095 if (time_after_eq(jiffies
, wtime
))
1098 /* Delay for a while */
1101 ql_dbg(ql_dbg_init
, vha
, 0x013c,
1102 "fw_state=%x curr time=%lx.\n", state
[0], jiffies
);
1107 ql_dbg(ql_dbg_init
, vha
, 0x013d,
1108 "Firmware ready **** FAILED ****.\n");
1110 ql_dbg(ql_dbg_init
, vha
, 0x013e,
1111 "Firmware ready **** SUCCESS ****.\n");
1117 qlafx00_find_all_targets(scsi_qla_host_t
*vha
,
1118 struct list_head
*new_fcports
)
1122 fc_port_t
*fcport
, *new_fcport
;
1124 struct qla_hw_data
*ha
= vha
->hw
;
1128 if (!test_bit(LOOP_RESYNC_ACTIVE
, &vha
->dpc_flags
))
1129 return QLA_FUNCTION_FAILED
;
1131 if ((atomic_read(&vha
->loop_down_timer
) ||
1132 STATE_TRANSITION(vha
))) {
1133 atomic_set(&vha
->loop_down_timer
, 0);
1134 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
1135 return QLA_FUNCTION_FAILED
;
1138 ql_dbg(ql_dbg_disc
+ ql_dbg_init
, vha
, 0x2088,
1139 "Listing Target bit map...\n");
1140 ql_dump_buffer(ql_dbg_disc
+ ql_dbg_init
, vha
,
1141 0x2089, (uint8_t *)ha
->gid_list
, 32);
1143 /* Allocate temporary rmtport for any new rmtports discovered. */
1144 new_fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
1145 if (new_fcport
== NULL
)
1146 return QLA_MEMORY_ALLOC_FAILED
;
1148 for_each_set_bit(tgt_id
, (void *)ha
->gid_list
,
1149 QLAFX00_TGT_NODE_LIST_SIZE
) {
1151 /* Send get target node info */
1152 new_fcport
->tgt_id
= tgt_id
;
1153 rval
= qlafx00_fx_disc(vha
, new_fcport
,
1154 FXDISC_GET_TGT_NODE_INFO
);
1155 if (rval
!= QLA_SUCCESS
) {
1156 ql_log(ql_log_warn
, vha
, 0x208a,
1157 "Target info scan failed -- assuming zero-entry "
1162 /* Locate matching device in database. */
1164 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
1165 if (memcmp(new_fcport
->port_name
,
1166 fcport
->port_name
, WWN_SIZE
))
1172 * If tgt_id is same and state FCS_ONLINE, nothing
1175 if (fcport
->tgt_id
== new_fcport
->tgt_id
&&
1176 atomic_read(&fcport
->state
) == FCS_ONLINE
)
1180 * Tgt ID changed or device was marked to be updated.
1182 ql_dbg(ql_dbg_disc
+ ql_dbg_init
, vha
, 0x208b,
1183 "TGT-ID Change(%s): Present tgt id: "
1185 "wwnn = %llx wwpn = %llx.\n",
1186 __func__
, fcport
->tgt_id
,
1187 atomic_read(&fcport
->state
),
1188 (unsigned long long)wwn_to_u64(fcport
->node_name
),
1189 (unsigned long long)wwn_to_u64(fcport
->port_name
));
1191 ql_log(ql_log_info
, vha
, 0x208c,
1192 "TGT-ID Announce(%s): Discovered tgt "
1193 "id 0x%x wwnn = %llx "
1194 "wwpn = %llx.\n", __func__
, new_fcport
->tgt_id
,
1195 (unsigned long long)
1196 wwn_to_u64(new_fcport
->node_name
),
1197 (unsigned long long)
1198 wwn_to_u64(new_fcport
->port_name
));
1200 if (atomic_read(&fcport
->state
) != FCS_ONLINE
) {
1201 fcport
->old_tgt_id
= fcport
->tgt_id
;
1202 fcport
->tgt_id
= new_fcport
->tgt_id
;
1203 ql_log(ql_log_info
, vha
, 0x208d,
1204 "TGT-ID: New fcport Added: %p\n", fcport
);
1205 qla2x00_update_fcport(vha
, fcport
);
1207 ql_log(ql_log_info
, vha
, 0x208e,
1208 " Existing TGT-ID %x did not get "
1209 " offline event from firmware.\n",
1210 fcport
->old_tgt_id
);
1211 qla2x00_mark_device_lost(vha
, fcport
, 0, 0);
1212 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
1222 /* If device was not in our fcports list, then add it. */
1223 list_add_tail(&new_fcport
->list
, new_fcports
);
1225 /* Allocate a new replacement fcport. */
1226 new_fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
1227 if (new_fcport
== NULL
)
1228 return QLA_MEMORY_ALLOC_FAILED
;
1236 * qlafx00_configure_all_targets
1237 * Setup target devices with node ID's.
1240 * ha = adapter block pointer.
1247 qlafx00_configure_all_targets(scsi_qla_host_t
*vha
)
1250 fc_port_t
*fcport
, *rmptemp
;
1251 LIST_HEAD(new_fcports
);
1253 rval
= qlafx00_fx_disc(vha
, &vha
->hw
->mr
.fcport
,
1254 FXDISC_GET_TGT_NODE_LIST
);
1255 if (rval
!= QLA_SUCCESS
) {
1256 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
1260 rval
= qlafx00_find_all_targets(vha
, &new_fcports
);
1261 if (rval
!= QLA_SUCCESS
) {
1262 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
1267 * Delete all previous devices marked lost.
1269 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
1270 if (test_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
))
1273 if (atomic_read(&fcport
->state
) == FCS_DEVICE_LOST
) {
1274 if (fcport
->port_type
!= FCT_INITIATOR
)
1275 qla2x00_mark_device_lost(vha
, fcport
, 0, 0);
1280 * Add the new devices to our devices list.
1282 list_for_each_entry_safe(fcport
, rmptemp
, &new_fcports
, list
) {
1283 if (test_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
))
1286 qla2x00_update_fcport(vha
, fcport
);
1287 list_move_tail(&fcport
->list
, &vha
->vp_fcports
);
1288 ql_log(ql_log_info
, vha
, 0x208f,
1289 "Attach new target id 0x%x wwnn = %llx "
1292 (unsigned long long)wwn_to_u64(fcport
->node_name
),
1293 (unsigned long long)wwn_to_u64(fcport
->port_name
));
1296 /* Free all new device structures not processed. */
1297 list_for_each_entry_safe(fcport
, rmptemp
, &new_fcports
, list
) {
1298 list_del(&fcport
->list
);
1306 * qlafx00_configure_devices
1307 * Updates Fibre Channel Device Database with what is actually on loop.
1310 * ha = adapter block pointer.
1315 * 2 = database was full and device was not configured.
1318 qlafx00_configure_devices(scsi_qla_host_t
*vha
)
1321 unsigned long flags
, save_flags
;
1324 save_flags
= flags
= vha
->dpc_flags
;
1326 ql_dbg(ql_dbg_disc
, vha
, 0x2090,
1327 "Configure devices -- dpc flags =0x%lx\n", flags
);
1329 rval
= qlafx00_configure_all_targets(vha
);
1331 if (rval
== QLA_SUCCESS
) {
1332 if (test_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
)) {
1333 rval
= QLA_FUNCTION_FAILED
;
1335 atomic_set(&vha
->loop_state
, LOOP_READY
);
1336 ql_log(ql_log_info
, vha
, 0x2091,
1342 ql_dbg(ql_dbg_disc
, vha
, 0x2092,
1343 "%s *** FAILED ***.\n", __func__
);
1345 ql_dbg(ql_dbg_disc
, vha
, 0x2093,
1346 "%s: exiting normally.\n", __func__
);
1352 qlafx00_abort_isp_cleanup(scsi_qla_host_t
*vha
)
1354 struct qla_hw_data
*ha
= vha
->hw
;
1357 vha
->flags
.online
= 0;
1358 ha
->flags
.chip_reset_done
= 0;
1359 ha
->mr
.fw_hbt_en
= 0;
1360 clear_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
1361 vha
->qla_stats
.total_isp_aborts
++;
1363 ql_log(ql_log_info
, vha
, 0x013f,
1364 "Performing ISP error recovery - ha = %p.\n", ha
);
1366 ha
->isp_ops
->reset_chip(vha
);
1368 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
1369 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
1370 atomic_set(&vha
->loop_down_timer
,
1371 QLAFX00_LOOP_DOWN_TIME
);
1373 if (!atomic_read(&vha
->loop_down_timer
))
1374 atomic_set(&vha
->loop_down_timer
,
1375 QLAFX00_LOOP_DOWN_TIME
);
1378 /* Clear all async request states across all VPs. */
1379 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
1381 if (atomic_read(&fcport
->state
) == FCS_ONLINE
)
1382 qla2x00_set_fcport_state(fcport
, FCS_DEVICE_LOST
);
1385 if (!ha
->flags
.eeh_busy
) {
1386 /* Requeue all commands in outstanding command list. */
1387 qla2x00_abort_all_cmds(vha
, DID_RESET
<< 16);
1390 qla2x00_free_irqs(vha
);
1391 set_bit(FX00_RESET_RECOVERY
, &vha
->dpc_flags
);
1393 /* Clear the Interrupts */
1394 QLAFX00_CLR_INTR_REG(ha
, QLAFX00_HST_INT_STS_BITS
);
1396 ql_log(ql_log_info
, vha
, 0x0140,
1397 "%s Done done - ha=%p.\n", __func__
, ha
);
1401 * qlafx00_init_response_q_entries() - Initializes response queue entries.
1404 * Beginning of request ring has initialization control block already built
1405 * by nvram config routine.
1407 * Returns 0 on success.
1410 qlafx00_init_response_q_entries(struct rsp_que
*rsp
)
1415 rsp
->ring_ptr
= rsp
->ring
;
1416 rsp
->ring_index
= 0;
1417 rsp
->status_srb
= NULL
;
1418 pkt
= rsp
->ring_ptr
;
1419 for (cnt
= 0; cnt
< rsp
->length
; cnt
++) {
1420 pkt
->signature
= RESPONSE_PROCESSED
;
1421 WRT_REG_DWORD((void __iomem
*)&pkt
->signature
,
1422 RESPONSE_PROCESSED
);
1428 qlafx00_rescan_isp(scsi_qla_host_t
*vha
)
1430 uint32_t status
= QLA_FUNCTION_FAILED
;
1431 struct qla_hw_data
*ha
= vha
->hw
;
1432 struct device_reg_fx00 __iomem
*reg
= &ha
->iobase
->ispfx00
;
1435 qla2x00_request_irqs(ha
, ha
->rsp_q_map
[0]);
1437 aenmbx7
= RD_REG_DWORD(®
->aenmailbox7
);
1438 ha
->mbx_intr_code
= MSW(aenmbx7
);
1439 ha
->rqstq_intr_code
= LSW(aenmbx7
);
1440 ha
->req_que_off
= RD_REG_DWORD(®
->aenmailbox1
);
1441 ha
->rsp_que_off
= RD_REG_DWORD(®
->aenmailbox3
);
1442 ha
->req_que_len
= RD_REG_DWORD(®
->aenmailbox5
);
1443 ha
->rsp_que_len
= RD_REG_DWORD(®
->aenmailbox6
);
1445 ql_dbg(ql_dbg_disc
, vha
, 0x2094,
1446 "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x "
1447 " Req que offset 0x%x Rsp que offset 0x%x\n",
1448 ha
->mbx_intr_code
, ha
->rqstq_intr_code
,
1449 ha
->req_que_off
, ha
->rsp_que_len
);
1451 /* Clear the Interrupts */
1452 QLAFX00_CLR_INTR_REG(ha
, QLAFX00_HST_INT_STS_BITS
);
1454 status
= qla2x00_init_rings(vha
);
1456 vha
->flags
.online
= 1;
1458 /* if no cable then assume it's good */
1459 if ((vha
->device_flags
& DFLG_NO_CABLE
))
1461 /* Register system information */
1462 if (qlafx00_fx_disc(vha
,
1463 &vha
->hw
->mr
.fcport
, FXDISC_REG_HOST_INFO
))
1464 ql_dbg(ql_dbg_disc
, vha
, 0x2095,
1465 "failed to register host info\n");
1467 scsi_unblock_requests(vha
->host
);
1472 qlafx00_timer_routine(scsi_qla_host_t
*vha
)
1474 struct qla_hw_data
*ha
= vha
->hw
;
1475 uint32_t fw_heart_beat
;
1477 struct device_reg_fx00 __iomem
*reg
= &ha
->iobase
->ispfx00
;
1479 /* Check firmware health */
1480 if (ha
->mr
.fw_hbt_cnt
)
1481 ha
->mr
.fw_hbt_cnt
--;
1483 if ((!ha
->flags
.mr_reset_hdlr_active
) &&
1484 (!test_bit(UNLOADING
, &vha
->dpc_flags
)) &&
1485 (!test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
)) &&
1486 (ha
->mr
.fw_hbt_en
)) {
1487 fw_heart_beat
= RD_REG_DWORD(®
->fwheartbeat
);
1488 if (fw_heart_beat
!= ha
->mr
.old_fw_hbt_cnt
) {
1489 ha
->mr
.old_fw_hbt_cnt
= fw_heart_beat
;
1490 ha
->mr
.fw_hbt_miss_cnt
= 0;
1492 ha
->mr
.fw_hbt_miss_cnt
++;
1493 if (ha
->mr
.fw_hbt_miss_cnt
==
1494 QLAFX00_HEARTBEAT_MISS_CNT
) {
1495 set_bit(ISP_ABORT_NEEDED
,
1497 qla2xxx_wake_dpc(vha
);
1498 ha
->mr
.fw_hbt_miss_cnt
= 0;
1502 ha
->mr
.fw_hbt_cnt
= QLAFX00_HEARTBEAT_INTERVAL
;
1505 if (test_bit(FX00_RESET_RECOVERY
, &vha
->dpc_flags
)) {
1506 /* Reset recovery to be performed in timer routine */
1507 aenmbx0
= RD_REG_DWORD(®
->aenmailbox0
);
1508 if (ha
->mr
.fw_reset_timer_exp
) {
1509 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
1510 qla2xxx_wake_dpc(vha
);
1511 ha
->mr
.fw_reset_timer_exp
= 0;
1512 } else if (aenmbx0
== MBA_FW_RESTART_CMPLT
) {
1513 /* Wake up DPC to rescan the targets */
1514 set_bit(FX00_TARGET_SCAN
, &vha
->dpc_flags
);
1515 clear_bit(FX00_RESET_RECOVERY
, &vha
->dpc_flags
);
1516 qla2xxx_wake_dpc(vha
);
1517 ha
->mr
.fw_reset_timer_tick
= QLAFX00_RESET_INTERVAL
;
1518 } else if ((aenmbx0
== MBA_FW_STARTING
) &&
1519 (!ha
->mr
.fw_hbt_en
)) {
1520 ha
->mr
.fw_hbt_en
= 1;
1521 } else if (!ha
->mr
.fw_reset_timer_tick
) {
1522 if (aenmbx0
== ha
->mr
.old_aenmbx0_state
)
1523 ha
->mr
.fw_reset_timer_exp
= 1;
1524 ha
->mr
.fw_reset_timer_tick
= QLAFX00_RESET_INTERVAL
;
1525 } else if (aenmbx0
== 0xFFFFFFFF) {
1526 uint32_t data0
, data1
;
1528 data0
= QLAFX00_RD_REG(ha
,
1529 QLAFX00_BAR1_BASE_ADDR_REG
);
1530 data1
= QLAFX00_RD_REG(ha
,
1531 QLAFX00_PEX0_WIN0_BASE_ADDR_REG
);
1533 data0
&= 0xffff0000;
1534 data1
&= 0x0000ffff;
1537 QLAFX00_PEX0_WIN0_BASE_ADDR_REG
,
1539 } else if ((aenmbx0
& 0xFF00) == MBA_FW_POLL_STATE
) {
1540 ha
->mr
.fw_reset_timer_tick
=
1541 QLAFX00_MAX_RESET_INTERVAL
;
1543 ha
->mr
.old_aenmbx0_state
= aenmbx0
;
1544 ha
->mr
.fw_reset_timer_tick
--;
1549 * qlfx00a_reset_initialize
1550 * Re-initialize after a iSA device reset.
1553 * ha = adapter block pointer.
1559 qlafx00_reset_initialize(scsi_qla_host_t
*vha
)
1561 struct qla_hw_data
*ha
= vha
->hw
;
1563 if (vha
->device_flags
& DFLG_DEV_FAILED
) {
1564 ql_dbg(ql_dbg_init
, vha
, 0x0142,
1565 "Device in failed state\n");
1569 ha
->flags
.mr_reset_hdlr_active
= 1;
1571 if (vha
->flags
.online
) {
1572 scsi_block_requests(vha
->host
);
1573 qlafx00_abort_isp_cleanup(vha
);
1576 ql_log(ql_log_info
, vha
, 0x0143,
1577 "(%s): succeeded.\n", __func__
);
1578 ha
->flags
.mr_reset_hdlr_active
= 0;
1584 * Resets ISP and aborts all outstanding commands.
1587 * ha = adapter block pointer.
1593 qlafx00_abort_isp(scsi_qla_host_t
*vha
)
1595 struct qla_hw_data
*ha
= vha
->hw
;
1597 if (vha
->flags
.online
) {
1598 if (unlikely(pci_channel_offline(ha
->pdev
) &&
1599 ha
->flags
.pci_channel_io_perm_failure
)) {
1600 clear_bit(ISP_ABORT_RETRY
, &vha
->dpc_flags
);
1604 scsi_block_requests(vha
->host
);
1605 qlafx00_abort_isp_cleanup(vha
);
1608 ql_log(ql_log_info
, vha
, 0x0145,
1609 "(%s): succeeded.\n", __func__
);
1614 static inline fc_port_t
*
1615 qlafx00_get_fcport(struct scsi_qla_host
*vha
, int tgt_id
)
1619 /* Check for matching device in remote port list. */
1621 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
1622 if (fcport
->tgt_id
== tgt_id
) {
1623 ql_dbg(ql_dbg_async
, vha
, 0x5072,
1624 "Matching fcport(%p) found with TGT-ID: 0x%x "
1625 "and Remote TGT_ID: 0x%x\n",
1626 fcport
, fcport
->tgt_id
, tgt_id
);
1634 qlafx00_tgt_detach(struct scsi_qla_host
*vha
, int tgt_id
)
1638 ql_log(ql_log_info
, vha
, 0x5073,
1639 "Detach TGT-ID: 0x%x\n", tgt_id
);
1641 fcport
= qlafx00_get_fcport(vha
, tgt_id
);
1645 qla2x00_mark_device_lost(vha
, fcport
, 0, 0);
1651 qlafx00_process_aen(struct scsi_qla_host
*vha
, struct qla_work_evt
*evt
)
1654 uint32_t aen_code
, aen_data
;
1656 aen_code
= FCH_EVT_VENDOR_UNIQUE
;
1657 aen_data
= evt
->u
.aenfx
.evtcode
;
1659 switch (evt
->u
.aenfx
.evtcode
) {
1660 case QLAFX00_MBA_PORT_UPDATE
: /* Port database update */
1661 if (evt
->u
.aenfx
.mbx
[1] == 0) {
1662 if (evt
->u
.aenfx
.mbx
[2] == 1) {
1663 if (!vha
->flags
.fw_tgt_reported
)
1664 vha
->flags
.fw_tgt_reported
= 1;
1665 atomic_set(&vha
->loop_down_timer
, 0);
1666 atomic_set(&vha
->loop_state
, LOOP_UP
);
1667 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
1668 qla2xxx_wake_dpc(vha
);
1669 } else if (evt
->u
.aenfx
.mbx
[2] == 2) {
1670 qlafx00_tgt_detach(vha
, evt
->u
.aenfx
.mbx
[3]);
1672 } else if (evt
->u
.aenfx
.mbx
[1] == 0xffff) {
1673 if (evt
->u
.aenfx
.mbx
[2] == 1) {
1674 if (!vha
->flags
.fw_tgt_reported
)
1675 vha
->flags
.fw_tgt_reported
= 1;
1676 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
1677 } else if (evt
->u
.aenfx
.mbx
[2] == 2) {
1678 vha
->device_flags
|= DFLG_NO_CABLE
;
1679 qla2x00_mark_all_devices_lost(vha
, 1);
1683 case QLAFX00_MBA_LINK_UP
:
1684 aen_code
= FCH_EVT_LINKUP
;
1687 case QLAFX00_MBA_LINK_DOWN
:
1688 aen_code
= FCH_EVT_LINKDOWN
;
1693 fc_host_post_event(vha
->host
, fc_get_event_number(),
1694 aen_code
, aen_data
);
1700 qlafx00_update_host_attr(scsi_qla_host_t
*vha
, struct port_info_data
*pinfo
)
1702 u64 port_name
= 0, node_name
= 0;
1704 port_name
= (unsigned long long)wwn_to_u64(pinfo
->port_name
);
1705 node_name
= (unsigned long long)wwn_to_u64(pinfo
->node_name
);
1707 fc_host_node_name(vha
->host
) = node_name
;
1708 fc_host_port_name(vha
->host
) = port_name
;
1709 if (!pinfo
->port_type
)
1710 vha
->hw
->current_topology
= ISP_CFG_F
;
1711 if (pinfo
->link_status
== QLAFX00_LINK_STATUS_UP
)
1712 atomic_set(&vha
->loop_state
, LOOP_READY
);
1713 else if (pinfo
->link_status
== QLAFX00_LINK_STATUS_DOWN
)
1714 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
1715 vha
->hw
->link_data_rate
= (uint16_t)pinfo
->link_config
;
1719 qla2x00_fxdisc_iocb_timeout(void *data
)
1721 srb_t
*sp
= (srb_t
*)data
;
1722 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
1724 complete(&lio
->u
.fxiocb
.fxiocb_comp
);
1728 qla2x00_fxdisc_sp_done(void *data
, void *ptr
, int res
)
1730 srb_t
*sp
= (srb_t
*)ptr
;
1731 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
1733 complete(&lio
->u
.fxiocb
.fxiocb_comp
);
1737 qlafx00_fx_disc(scsi_qla_host_t
*vha
, fc_port_t
*fcport
, uint16_t fx_type
)
1740 struct srb_iocb
*fdisc
;
1741 int rval
= QLA_FUNCTION_FAILED
;
1742 struct qla_hw_data
*ha
= vha
->hw
;
1743 struct host_system_info
*phost_info
;
1744 struct register_host_info
*preg_hsi
;
1745 struct new_utsname
*p_sysid
= NULL
;
1748 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
1752 fdisc
= &sp
->u
.iocb_cmd
;
1754 case FXDISC_GET_CONFIG_INFO
:
1755 fdisc
->u
.fxiocb
.flags
=
1756 SRB_FXDISC_RESP_DMA_VALID
;
1757 fdisc
->u
.fxiocb
.rsp_len
= sizeof(struct config_info_data
);
1759 case FXDISC_GET_PORT_INFO
:
1760 fdisc
->u
.fxiocb
.flags
=
1761 SRB_FXDISC_RESP_DMA_VALID
| SRB_FXDISC_REQ_DWRD_VALID
;
1762 fdisc
->u
.fxiocb
.rsp_len
= QLAFX00_PORT_DATA_INFO
;
1763 fdisc
->u
.fxiocb
.req_data
= cpu_to_le32(fcport
->port_id
);
1765 case FXDISC_GET_TGT_NODE_INFO
:
1766 fdisc
->u
.fxiocb
.flags
=
1767 SRB_FXDISC_RESP_DMA_VALID
| SRB_FXDISC_REQ_DWRD_VALID
;
1768 fdisc
->u
.fxiocb
.rsp_len
= QLAFX00_TGT_NODE_INFO
;
1769 fdisc
->u
.fxiocb
.req_data
= cpu_to_le32(fcport
->tgt_id
);
1771 case FXDISC_GET_TGT_NODE_LIST
:
1772 fdisc
->u
.fxiocb
.flags
=
1773 SRB_FXDISC_RESP_DMA_VALID
| SRB_FXDISC_REQ_DWRD_VALID
;
1774 fdisc
->u
.fxiocb
.rsp_len
= QLAFX00_TGT_NODE_LIST_SIZE
;
1776 case FXDISC_REG_HOST_INFO
:
1777 fdisc
->u
.fxiocb
.flags
= SRB_FXDISC_REQ_DMA_VALID
;
1778 fdisc
->u
.fxiocb
.req_len
= sizeof(struct register_host_info
);
1779 p_sysid
= utsname();
1781 ql_log(ql_log_warn
, vha
, 0x303c,
1782 "Not able to get the system informtion\n");
1790 if (fdisc
->u
.fxiocb
.flags
& SRB_FXDISC_REQ_DMA_VALID
) {
1791 fdisc
->u
.fxiocb
.req_addr
= dma_alloc_coherent(&ha
->pdev
->dev
,
1792 fdisc
->u
.fxiocb
.req_len
,
1793 &fdisc
->u
.fxiocb
.req_dma_handle
, GFP_KERNEL
);
1794 if (!fdisc
->u
.fxiocb
.req_addr
)
1797 if (fx_type
== FXDISC_REG_HOST_INFO
) {
1798 preg_hsi
= (struct register_host_info
*)
1799 fdisc
->u
.fxiocb
.req_addr
;
1800 phost_info
= &preg_hsi
->hsi
;
1801 memset(preg_hsi
, 0, sizeof(struct register_host_info
));
1802 phost_info
->os_type
= OS_TYPE_LINUX
;
1803 strncpy(phost_info
->sysname
,
1804 p_sysid
->sysname
, SYSNAME_LENGTH
);
1805 strncpy(phost_info
->nodename
,
1806 p_sysid
->nodename
, NODENAME_LENGTH
);
1807 strncpy(phost_info
->release
,
1808 p_sysid
->release
, RELEASE_LENGTH
);
1809 strncpy(phost_info
->version
,
1810 p_sysid
->version
, VERSION_LENGTH
);
1811 strncpy(phost_info
->machine
,
1812 p_sysid
->machine
, MACHINE_LENGTH
);
1813 strncpy(phost_info
->domainname
,
1814 p_sysid
->domainname
, DOMNAME_LENGTH
);
1815 strncpy(phost_info
->hostdriver
,
1816 QLA2XXX_VERSION
, VERSION_LENGTH
);
1817 do_gettimeofday(&tv
);
1818 preg_hsi
->utc
= (uint64_t)tv
.tv_sec
;
1819 ql_dbg(ql_dbg_init
, vha
, 0x0149,
1820 "ISP%04X: Host registration with firmware\n",
1822 ql_dbg(ql_dbg_init
, vha
, 0x014a,
1823 "os_type = '%d', sysname = '%s', nodname = '%s'\n",
1824 phost_info
->os_type
,
1825 phost_info
->sysname
,
1826 phost_info
->nodename
);
1827 ql_dbg(ql_dbg_init
, vha
, 0x014b,
1828 "release = '%s', version = '%s'\n",
1829 phost_info
->release
,
1830 phost_info
->version
);
1831 ql_dbg(ql_dbg_init
, vha
, 0x014c,
1833 "domainname = '%s', hostdriver = '%s'\n",
1834 phost_info
->machine
,
1835 phost_info
->domainname
,
1836 phost_info
->hostdriver
);
1837 ql_dump_buffer(ql_dbg_init
+ ql_dbg_disc
, vha
, 0x014d,
1838 (uint8_t *)phost_info
,
1839 sizeof(struct host_system_info
));
1843 if (fdisc
->u
.fxiocb
.flags
& SRB_FXDISC_RESP_DMA_VALID
) {
1844 fdisc
->u
.fxiocb
.rsp_addr
= dma_alloc_coherent(&ha
->pdev
->dev
,
1845 fdisc
->u
.fxiocb
.rsp_len
,
1846 &fdisc
->u
.fxiocb
.rsp_dma_handle
, GFP_KERNEL
);
1847 if (!fdisc
->u
.fxiocb
.rsp_addr
)
1848 goto done_unmap_req
;
1851 sp
->type
= SRB_FXIOCB_DCMD
;
1852 sp
->name
= "fxdisc";
1853 qla2x00_init_timer(sp
, FXDISC_TIMEOUT
);
1854 fdisc
->timeout
= qla2x00_fxdisc_iocb_timeout
;
1855 fdisc
->u
.fxiocb
.req_func_type
= cpu_to_le16(fx_type
);
1856 sp
->done
= qla2x00_fxdisc_sp_done
;
1858 rval
= qla2x00_start_sp(sp
);
1859 if (rval
!= QLA_SUCCESS
)
1860 goto done_unmap_dma
;
1862 wait_for_completion(&fdisc
->u
.fxiocb
.fxiocb_comp
);
1864 if (fx_type
== FXDISC_GET_CONFIG_INFO
) {
1865 struct config_info_data
*pinfo
=
1866 (struct config_info_data
*) fdisc
->u
.fxiocb
.rsp_addr
;
1867 memcpy(&vha
->hw
->mr
.product_name
, pinfo
->product_name
,
1868 sizeof(vha
->hw
->mr
.product_name
));
1869 memcpy(&vha
->hw
->mr
.symbolic_name
, pinfo
->symbolic_name
,
1870 sizeof(vha
->hw
->mr
.symbolic_name
));
1871 memcpy(&vha
->hw
->mr
.serial_num
, pinfo
->serial_num
,
1872 sizeof(vha
->hw
->mr
.serial_num
));
1873 memcpy(&vha
->hw
->mr
.hw_version
, pinfo
->hw_version
,
1874 sizeof(vha
->hw
->mr
.hw_version
));
1875 memcpy(&vha
->hw
->mr
.fw_version
, pinfo
->fw_version
,
1876 sizeof(vha
->hw
->mr
.fw_version
));
1877 strim(vha
->hw
->mr
.fw_version
);
1878 memcpy(&vha
->hw
->mr
.uboot_version
, pinfo
->uboot_version
,
1879 sizeof(vha
->hw
->mr
.uboot_version
));
1880 memcpy(&vha
->hw
->mr
.fru_serial_num
, pinfo
->fru_serial_num
,
1881 sizeof(vha
->hw
->mr
.fru_serial_num
));
1882 } else if (fx_type
== FXDISC_GET_PORT_INFO
) {
1883 struct port_info_data
*pinfo
=
1884 (struct port_info_data
*) fdisc
->u
.fxiocb
.rsp_addr
;
1885 memcpy(vha
->node_name
, pinfo
->node_name
, WWN_SIZE
);
1886 memcpy(vha
->port_name
, pinfo
->port_name
, WWN_SIZE
);
1887 vha
->d_id
.b
.domain
= pinfo
->port_id
[0];
1888 vha
->d_id
.b
.area
= pinfo
->port_id
[1];
1889 vha
->d_id
.b
.al_pa
= pinfo
->port_id
[2];
1890 qlafx00_update_host_attr(vha
, pinfo
);
1891 ql_dump_buffer(ql_dbg_init
+ ql_dbg_buffer
, vha
, 0x0141,
1892 (uint8_t *)pinfo
, 16);
1893 } else if (fx_type
== FXDISC_GET_TGT_NODE_INFO
) {
1894 struct qlafx00_tgt_node_info
*pinfo
=
1895 (struct qlafx00_tgt_node_info
*) fdisc
->u
.fxiocb
.rsp_addr
;
1896 memcpy(fcport
->node_name
, pinfo
->tgt_node_wwnn
, WWN_SIZE
);
1897 memcpy(fcport
->port_name
, pinfo
->tgt_node_wwpn
, WWN_SIZE
);
1898 fcport
->port_type
= FCT_TARGET
;
1899 ql_dump_buffer(ql_dbg_init
+ ql_dbg_buffer
, vha
, 0x0144,
1900 (uint8_t *)pinfo
, 16);
1901 } else if (fx_type
== FXDISC_GET_TGT_NODE_LIST
) {
1902 struct qlafx00_tgt_node_info
*pinfo
=
1903 (struct qlafx00_tgt_node_info
*) fdisc
->u
.fxiocb
.rsp_addr
;
1904 ql_dump_buffer(ql_dbg_init
+ ql_dbg_buffer
, vha
, 0x0146,
1905 (uint8_t *)pinfo
, 16);
1906 memcpy(vha
->hw
->gid_list
, pinfo
, QLAFX00_TGT_NODE_LIST_SIZE
);
1908 rval
= le32_to_cpu(fdisc
->u
.fxiocb
.result
);
1911 if (fdisc
->u
.fxiocb
.rsp_addr
)
1912 dma_free_coherent(&ha
->pdev
->dev
, fdisc
->u
.fxiocb
.rsp_len
,
1913 fdisc
->u
.fxiocb
.rsp_addr
, fdisc
->u
.fxiocb
.rsp_dma_handle
);
1916 if (fdisc
->u
.fxiocb
.req_addr
)
1917 dma_free_coherent(&ha
->pdev
->dev
, fdisc
->u
.fxiocb
.req_len
,
1918 fdisc
->u
.fxiocb
.req_addr
, fdisc
->u
.fxiocb
.req_dma_handle
);
1926 qlafx00_abort_iocb_timeout(void *data
)
1928 srb_t
*sp
= (srb_t
*)data
;
1929 struct srb_iocb
*abt
= &sp
->u
.iocb_cmd
;
1931 abt
->u
.abt
.comp_status
= cpu_to_le16((uint16_t)CS_TIMEOUT
);
1932 complete(&abt
->u
.abt
.comp
);
1936 qlafx00_abort_sp_done(void *data
, void *ptr
, int res
)
1938 srb_t
*sp
= (srb_t
*)ptr
;
1939 struct srb_iocb
*abt
= &sp
->u
.iocb_cmd
;
1941 complete(&abt
->u
.abt
.comp
);
1945 qlafx00_async_abt_cmd(srb_t
*cmd_sp
)
1947 scsi_qla_host_t
*vha
= cmd_sp
->fcport
->vha
;
1948 fc_port_t
*fcport
= cmd_sp
->fcport
;
1949 struct srb_iocb
*abt_iocb
;
1951 int rval
= QLA_FUNCTION_FAILED
;
1953 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
1957 abt_iocb
= &sp
->u
.iocb_cmd
;
1958 sp
->type
= SRB_ABT_CMD
;
1960 qla2x00_init_timer(sp
, FXDISC_TIMEOUT
);
1961 abt_iocb
->u
.abt
.cmd_hndl
= cmd_sp
->handle
;
1962 sp
->done
= qlafx00_abort_sp_done
;
1963 abt_iocb
->timeout
= qlafx00_abort_iocb_timeout
;
1964 init_completion(&abt_iocb
->u
.abt
.comp
);
1966 rval
= qla2x00_start_sp(sp
);
1967 if (rval
!= QLA_SUCCESS
)
1970 ql_dbg(ql_dbg_async
, vha
, 0x507c,
1971 "Abort command issued - hdl=%x, target_id=%x\n",
1972 cmd_sp
->handle
, fcport
->tgt_id
);
1974 wait_for_completion(&abt_iocb
->u
.abt
.comp
);
1976 rval
= abt_iocb
->u
.abt
.comp_status
== CS_COMPLETE
?
1977 QLA_SUCCESS
: QLA_FUNCTION_FAILED
;
1986 qlafx00_abort_command(srb_t
*sp
)
1988 unsigned long flags
= 0;
1991 fc_port_t
*fcport
= sp
->fcport
;
1992 struct scsi_qla_host
*vha
= fcport
->vha
;
1993 struct qla_hw_data
*ha
= vha
->hw
;
1994 struct req_que
*req
= vha
->req
;
1996 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1997 for (handle
= 1; handle
< DEFAULT_OUTSTANDING_COMMANDS
; handle
++) {
1998 if (req
->outstanding_cmds
[handle
] == sp
)
2001 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2002 if (handle
== DEFAULT_OUTSTANDING_COMMANDS
) {
2003 /* Command not found. */
2004 return QLA_FUNCTION_FAILED
;
2006 return qlafx00_async_abt_cmd(sp
);
2010 * qlafx00_initialize_adapter
2014 * ha = adapter block pointer.
2020 qlafx00_initialize_adapter(scsi_qla_host_t
*vha
)
2023 struct qla_hw_data
*ha
= vha
->hw
;
2025 /* Clear adapter flags. */
2026 vha
->flags
.online
= 0;
2027 ha
->flags
.chip_reset_done
= 0;
2028 vha
->flags
.reset_active
= 0;
2029 ha
->flags
.pci_channel_io_perm_failure
= 0;
2030 ha
->flags
.eeh_busy
= 0;
2031 ha
->thermal_support
= 0;
2032 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
2033 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
2034 vha
->device_flags
= DFLG_NO_CABLE
;
2036 vha
->flags
.management_server_logged_in
= 0;
2037 vha
->marker_needed
= 0;
2038 ha
->isp_abort_cnt
= 0;
2039 ha
->beacon_blink_led
= 0;
2041 set_bit(0, ha
->req_qid_map
);
2042 set_bit(0, ha
->rsp_qid_map
);
2044 ql_dbg(ql_dbg_init
, vha
, 0x0147,
2045 "Configuring PCI space...\n");
2047 rval
= ha
->isp_ops
->pci_config(vha
);
2049 ql_log(ql_log_warn
, vha
, 0x0148,
2050 "Unable to configure PCI space.\n");
2054 rval
= qlafx00_init_fw_ready(vha
);
2055 if (rval
!= QLA_SUCCESS
)
2058 qlafx00_save_queue_ptrs(vha
);
2060 rval
= qlafx00_config_queues(vha
);
2061 if (rval
!= QLA_SUCCESS
)
2065 * Allocate the array of outstanding commands
2066 * now that we know the firmware resources.
2068 rval
= qla2x00_alloc_outstanding_cmds(ha
, vha
->req
);
2069 if (rval
!= QLA_SUCCESS
)
2072 rval
= qla2x00_init_rings(vha
);
2073 ha
->flags
.chip_reset_done
= 1;
2079 qlafx00_fw_state_show(struct device
*dev
, struct device_attribute
*attr
,
2082 scsi_qla_host_t
*vha
= shost_priv(class_to_shost(dev
));
2083 int rval
= QLA_FUNCTION_FAILED
;
2086 if (qla2x00_reset_active(vha
))
2087 ql_log(ql_log_warn
, vha
, 0x70ce,
2088 "ISP reset active.\n");
2089 else if (!vha
->hw
->flags
.eeh_busy
) {
2090 rval
= qlafx00_get_firmware_state(vha
, state
);
2092 if (rval
!= QLA_SUCCESS
)
2093 memset(state
, -1, sizeof(state
));
2099 qlafx00_get_host_speed(struct Scsi_Host
*shost
)
2101 struct qla_hw_data
*ha
= ((struct scsi_qla_host
*)
2102 (shost_priv(shost
)))->hw
;
2103 u32 speed
= FC_PORTSPEED_UNKNOWN
;
2105 switch (ha
->link_data_rate
) {
2106 case QLAFX00_PORT_SPEED_2G
:
2107 speed
= FC_PORTSPEED_2GBIT
;
2109 case QLAFX00_PORT_SPEED_4G
:
2110 speed
= FC_PORTSPEED_4GBIT
;
2112 case QLAFX00_PORT_SPEED_8G
:
2113 speed
= FC_PORTSPEED_8GBIT
;
2115 case QLAFX00_PORT_SPEED_10G
:
2116 speed
= FC_PORTSPEED_10GBIT
;
2119 fc_host_speed(shost
) = speed
;
2122 /** QLAFX00 specific ISR implementation functions */
2125 qlafx00_handle_sense(srb_t
*sp
, uint8_t *sense_data
, uint32_t par_sense_len
,
2126 uint32_t sense_len
, struct rsp_que
*rsp
, int res
)
2128 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
2129 struct scsi_cmnd
*cp
= GET_CMD_SP(sp
);
2130 uint32_t track_sense_len
;
2132 SET_FW_SENSE_LEN(sp
, sense_len
);
2134 if (sense_len
>= SCSI_SENSE_BUFFERSIZE
)
2135 sense_len
= SCSI_SENSE_BUFFERSIZE
;
2137 SET_CMD_SENSE_LEN(sp
, sense_len
);
2138 SET_CMD_SENSE_PTR(sp
, cp
->sense_buffer
);
2139 track_sense_len
= sense_len
;
2141 if (sense_len
> par_sense_len
)
2142 sense_len
= par_sense_len
;
2144 memcpy(cp
->sense_buffer
, sense_data
, sense_len
);
2146 SET_FW_SENSE_LEN(sp
, GET_FW_SENSE_LEN(sp
) - sense_len
);
2148 SET_CMD_SENSE_PTR(sp
, cp
->sense_buffer
+ sense_len
);
2149 track_sense_len
-= sense_len
;
2150 SET_CMD_SENSE_LEN(sp
, track_sense_len
);
2152 ql_dbg(ql_dbg_io
, vha
, 0x304d,
2153 "sense_len=0x%x par_sense_len=0x%x track_sense_len=0x%x.\n",
2154 sense_len
, par_sense_len
, track_sense_len
);
2155 if (GET_FW_SENSE_LEN(sp
) > 0) {
2156 rsp
->status_srb
= sp
;
2161 ql_dbg(ql_dbg_io
+ ql_dbg_buffer
, vha
, 0x3039,
2162 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
2163 sp
->fcport
->vha
->host_no
, cp
->device
->id
, cp
->device
->lun
,
2165 ql_dump_buffer(ql_dbg_io
+ ql_dbg_buffer
, vha
, 0x3049,
2166 cp
->sense_buffer
, sense_len
);
2171 qlafx00_tm_iocb_entry(scsi_qla_host_t
*vha
, struct req_que
*req
,
2172 struct tsk_mgmt_entry_fx00
*pkt
, srb_t
*sp
,
2173 __le16 sstatus
, __le16 cpstatus
)
2175 struct srb_iocb
*tmf
;
2177 tmf
= &sp
->u
.iocb_cmd
;
2178 if (cpstatus
!= cpu_to_le16((uint16_t)CS_COMPLETE
) ||
2179 (sstatus
& cpu_to_le16((uint16_t)SS_RESPONSE_INFO_LEN_VALID
)))
2180 cpstatus
= cpu_to_le16((uint16_t)CS_INCOMPLETE
);
2181 tmf
->u
.tmf
.comp_status
= cpstatus
;
2182 sp
->done(vha
, sp
, 0);
2186 qlafx00_abort_iocb_entry(scsi_qla_host_t
*vha
, struct req_que
*req
,
2187 struct abort_iocb_entry_fx00
*pkt
)
2189 const char func
[] = "ABT_IOCB";
2191 struct srb_iocb
*abt
;
2193 sp
= qla2x00_get_sp_from_handle(vha
, func
, req
, pkt
);
2197 abt
= &sp
->u
.iocb_cmd
;
2198 abt
->u
.abt
.comp_status
= pkt
->tgt_id_sts
;
2199 sp
->done(vha
, sp
, 0);
2203 qlafx00_ioctl_iosb_entry(scsi_qla_host_t
*vha
, struct req_que
*req
,
2204 struct ioctl_iocb_entry_fx00
*pkt
)
2206 const char func
[] = "IOSB_IOCB";
2208 struct fc_bsg_job
*bsg_job
;
2209 struct srb_iocb
*iocb_job
;
2211 struct qla_mt_iocb_rsp_fx00 fstatus
;
2212 uint8_t *fw_sts_ptr
;
2214 sp
= qla2x00_get_sp_from_handle(vha
, func
, req
, pkt
);
2218 if (sp
->type
== SRB_FXIOCB_DCMD
) {
2219 iocb_job
= &sp
->u
.iocb_cmd
;
2220 iocb_job
->u
.fxiocb
.seq_number
= pkt
->seq_no
;
2221 iocb_job
->u
.fxiocb
.fw_flags
= pkt
->fw_iotcl_flags
;
2222 iocb_job
->u
.fxiocb
.result
= pkt
->status
;
2223 if (iocb_job
->u
.fxiocb
.flags
& SRB_FXDISC_RSP_DWRD_VALID
)
2224 iocb_job
->u
.fxiocb
.req_data
=
2227 bsg_job
= sp
->u
.bsg_job
;
2229 memset(&fstatus
, 0, sizeof(struct qla_mt_iocb_rsp_fx00
));
2231 fstatus
.reserved_1
= pkt
->reserved_0
;
2232 fstatus
.func_type
= pkt
->comp_func_num
;
2233 fstatus
.ioctl_flags
= pkt
->fw_iotcl_flags
;
2234 fstatus
.ioctl_data
= pkt
->dataword_r
;
2235 fstatus
.adapid
= pkt
->adapid
;
2236 fstatus
.adapid_hi
= pkt
->adapid_hi
;
2237 fstatus
.reserved_2
= pkt
->reserved_1
;
2238 fstatus
.res_count
= pkt
->residuallen
;
2239 fstatus
.status
= pkt
->status
;
2240 fstatus
.seq_number
= pkt
->seq_no
;
2241 memcpy(fstatus
.reserved_3
,
2242 pkt
->reserved_2
, 20 * sizeof(uint8_t));
2244 fw_sts_ptr
= ((uint8_t *)bsg_job
->req
->sense
) +
2245 sizeof(struct fc_bsg_reply
);
2247 memcpy(fw_sts_ptr
, (uint8_t *)&fstatus
,
2248 sizeof(struct qla_mt_iocb_rsp_fx00
));
2249 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
) +
2250 sizeof(struct qla_mt_iocb_rsp_fx00
) + sizeof(uint8_t);
2252 ql_dump_buffer(ql_dbg_user
+ ql_dbg_verbose
,
2253 sp
->fcport
->vha
, 0x5080,
2254 (uint8_t *)pkt
, sizeof(struct ioctl_iocb_entry_fx00
));
2256 ql_dump_buffer(ql_dbg_user
+ ql_dbg_verbose
,
2257 sp
->fcport
->vha
, 0x5074,
2258 (uint8_t *)fw_sts_ptr
, sizeof(struct qla_mt_iocb_rsp_fx00
));
2260 res
= bsg_job
->reply
->result
= DID_OK
<< 16;
2261 bsg_job
->reply
->reply_payload_rcv_len
=
2262 bsg_job
->reply_payload
.payload_len
;
2264 sp
->done(vha
, sp
, res
);
2268 * qlafx00_status_entry() - Process a Status IOCB entry.
2269 * @ha: SCSI driver HA context
2270 * @pkt: Entry pointer
2273 qlafx00_status_entry(scsi_qla_host_t
*vha
, struct rsp_que
*rsp
, void *pkt
)
2277 struct scsi_cmnd
*cp
;
2278 struct sts_entry_fx00
*sts
;
2282 __le16 lscsi_status
;
2284 uint32_t sense_len
, par_sense_len
, rsp_info_len
, resid_len
,
2286 uint8_t *rsp_info
= NULL
, *sense_data
= NULL
;
2287 struct qla_hw_data
*ha
= vha
->hw
;
2288 uint32_t hindex
, handle
;
2290 struct req_que
*req
;
2294 sts
= (struct sts_entry_fx00
*) pkt
;
2296 comp_status
= sts
->comp_status
;
2297 scsi_status
= sts
->scsi_status
& cpu_to_le16((uint16_t)SS_MASK
);
2298 hindex
= sts
->handle
;
2299 handle
= LSW(hindex
);
2302 req
= ha
->req_q_map
[que
];
2304 /* Validate handle. */
2305 if (handle
< req
->num_outstanding_cmds
)
2306 sp
= req
->outstanding_cmds
[handle
];
2311 ql_dbg(ql_dbg_io
, vha
, 0x3034,
2312 "Invalid status handle (0x%x).\n", handle
);
2314 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
2315 qla2xxx_wake_dpc(vha
);
2319 if (sp
->type
== SRB_TM_CMD
) {
2320 req
->outstanding_cmds
[handle
] = NULL
;
2321 qlafx00_tm_iocb_entry(vha
, req
, pkt
, sp
,
2322 scsi_status
, comp_status
);
2326 /* Fast path completion. */
2327 if (comp_status
== CS_COMPLETE
&& scsi_status
== 0) {
2328 qla2x00_do_host_ramp_up(vha
);
2329 qla2x00_process_completed_request(vha
, req
, handle
);
2333 req
->outstanding_cmds
[handle
] = NULL
;
2334 cp
= GET_CMD_SP(sp
);
2336 ql_dbg(ql_dbg_io
, vha
, 0x3048,
2337 "Command already returned (0x%x/%p).\n",
2343 lscsi_status
= scsi_status
& cpu_to_le16((uint16_t)STATUS_MASK
);
2345 fcport
= sp
->fcport
;
2348 sense_len
= par_sense_len
= rsp_info_len
= resid_len
=
2350 if (scsi_status
& cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID
))
2351 sense_len
= sts
->sense_len
;
2352 if (scsi_status
& cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER
2353 | (uint16_t)SS_RESIDUAL_OVER
)))
2354 resid_len
= le32_to_cpu(sts
->residual_len
);
2355 if (comp_status
== cpu_to_le16((uint16_t)CS_DATA_UNDERRUN
))
2356 fw_resid_len
= le32_to_cpu(sts
->residual_len
);
2357 rsp_info
= sense_data
= sts
->data
;
2358 par_sense_len
= sizeof(sts
->data
);
2360 /* Check for overrun. */
2361 if (comp_status
== CS_COMPLETE
&&
2362 scsi_status
& cpu_to_le16((uint16_t)SS_RESIDUAL_OVER
))
2363 comp_status
= cpu_to_le16((uint16_t)CS_DATA_OVERRUN
);
2366 * Based on Host and scsi status generate status code for Linux
2368 switch (le16_to_cpu(comp_status
)) {
2371 if (scsi_status
== 0) {
2375 if (scsi_status
& cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER
2376 | (uint16_t)SS_RESIDUAL_OVER
))) {
2378 scsi_set_resid(cp
, resid
);
2380 if (!lscsi_status
&&
2381 ((unsigned)(scsi_bufflen(cp
) - resid
) <
2383 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x3050,
2384 "Mid-layer underflow "
2385 "detected (0x%x of 0x%x bytes).\n",
2386 resid
, scsi_bufflen(cp
));
2388 res
= DID_ERROR
<< 16;
2392 res
= DID_OK
<< 16 | le16_to_cpu(lscsi_status
);
2395 cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL
)) {
2396 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x3051,
2397 "QUEUE FULL detected.\n");
2401 if (lscsi_status
!= cpu_to_le16((uint16_t)SS_CHECK_CONDITION
))
2404 memset(cp
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
2405 if (!(scsi_status
& cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID
)))
2408 qlafx00_handle_sense(sp
, sense_data
, par_sense_len
, sense_len
,
2412 case CS_DATA_UNDERRUN
:
2413 /* Use F/W calculated residual length. */
2414 if (IS_FWI2_CAPABLE(ha
) || IS_QLAFX00(ha
))
2415 resid
= fw_resid_len
;
2418 scsi_set_resid(cp
, resid
);
2419 if (scsi_status
& cpu_to_le16((uint16_t)SS_RESIDUAL_UNDER
)) {
2420 if ((IS_FWI2_CAPABLE(ha
) || IS_QLAFX00(ha
))
2421 && fw_resid_len
!= resid_len
) {
2422 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x3052,
2423 "Dropped frame(s) detected "
2424 "(0x%x of 0x%x bytes).\n",
2425 resid
, scsi_bufflen(cp
));
2427 res
= DID_ERROR
<< 16 |
2428 le16_to_cpu(lscsi_status
);
2429 goto check_scsi_status
;
2432 if (!lscsi_status
&&
2433 ((unsigned)(scsi_bufflen(cp
) - resid
) <
2435 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x3053,
2436 "Mid-layer underflow "
2437 "detected (0x%x of 0x%x bytes, "
2438 "cp->underflow: 0x%x).\n",
2439 resid
, scsi_bufflen(cp
), cp
->underflow
);
2441 res
= DID_ERROR
<< 16;
2444 } else if (lscsi_status
!=
2445 cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL
) &&
2446 lscsi_status
!= cpu_to_le16((uint16_t)SAM_STAT_BUSY
)) {
2448 * scsi status of task set and busy are considered
2449 * to be task not completed.
2452 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x3054,
2453 "Dropped frame(s) detected (0x%x "
2454 "of 0x%x bytes).\n", resid
,
2457 res
= DID_ERROR
<< 16 | le16_to_cpu(lscsi_status
);
2458 goto check_scsi_status
;
2460 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x3055,
2461 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2462 scsi_status
, lscsi_status
);
2465 res
= DID_OK
<< 16 | le16_to_cpu(lscsi_status
);
2470 * Check to see if SCSI Status is non zero. If so report SCSI
2473 if (lscsi_status
!= 0) {
2475 cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL
)) {
2476 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x3056,
2477 "QUEUE FULL detected.\n");
2482 cpu_to_le16((uint16_t)SS_CHECK_CONDITION
))
2485 memset(cp
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
2487 cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID
)))
2490 qlafx00_handle_sense(sp
, sense_data
, par_sense_len
,
2491 sense_len
, rsp
, res
);
2495 case CS_PORT_LOGGED_OUT
:
2496 case CS_PORT_CONFIG_CHG
:
2499 case CS_PORT_UNAVAILABLE
:
2504 * We are going to have the fc class block the rport
2505 * while we try to recover so instruct the mid layer
2506 * to requeue until the class decides how to handle this.
2508 res
= DID_TRANSPORT_DISRUPTED
<< 16;
2510 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x3057,
2511 "Port down status: port-state=0x%x.\n",
2512 atomic_read(&fcport
->state
));
2514 if (atomic_read(&fcport
->state
) == FCS_ONLINE
)
2515 qla2x00_mark_device_lost(fcport
->vha
, fcport
, 1, 1);
2519 res
= DID_RESET
<< 16;
2523 res
= DID_ERROR
<< 16;
2528 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x3058,
2529 "FCP command status: 0x%x-0x%x (0x%x) "
2530 "nexus=%ld:%d:%d tgt_id: 0x%x lscsi_status: 0x%x"
2531 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
2532 "rsp_info=0x%x resid=0x%x fw_resid=0x%x "
2533 "sense_len=0x%x, par_sense_len=0x%x, rsp_info_len=0x%x\n",
2534 comp_status
, scsi_status
, res
, vha
->host_no
,
2535 cp
->device
->id
, cp
->device
->lun
, fcport
->tgt_id
,
2536 lscsi_status
, cp
->cmnd
[0], cp
->cmnd
[1], cp
->cmnd
[2],
2537 cp
->cmnd
[3], cp
->cmnd
[4], cp
->cmnd
[5], cp
->cmnd
[6],
2538 cp
->cmnd
[7], cp
->cmnd
[8], cp
->cmnd
[9], scsi_bufflen(cp
),
2539 rsp_info_len
, resid_len
, fw_resid_len
, sense_len
,
2540 par_sense_len
, rsp_info_len
);
2543 qla2x00_do_host_ramp_up(vha
);
2545 if (rsp
->status_srb
== NULL
)
2546 sp
->done(ha
, sp
, res
);
2550 * qlafx00_status_cont_entry() - Process a Status Continuations entry.
2551 * @ha: SCSI driver HA context
2552 * @pkt: Entry pointer
2554 * Extended sense data.
2557 qlafx00_status_cont_entry(struct rsp_que
*rsp
, sts_cont_entry_t
*pkt
)
2559 uint8_t sense_sz
= 0;
2560 struct qla_hw_data
*ha
= rsp
->hw
;
2561 struct scsi_qla_host
*vha
= pci_get_drvdata(ha
->pdev
);
2562 srb_t
*sp
= rsp
->status_srb
;
2563 struct scsi_cmnd
*cp
;
2568 ql_dbg(ql_dbg_io
, vha
, 0x3037,
2569 "no SP, sp = %p\n", sp
);
2573 if (!GET_FW_SENSE_LEN(sp
)) {
2574 ql_dbg(ql_dbg_io
, vha
, 0x304b,
2575 "no fw sense data, sp = %p\n", sp
);
2578 cp
= GET_CMD_SP(sp
);
2580 ql_log(ql_log_warn
, vha
, 0x303b,
2581 "cmd is NULL: already returned to OS (sp=%p).\n", sp
);
2583 rsp
->status_srb
= NULL
;
2587 if (!GET_CMD_SENSE_LEN(sp
)) {
2588 ql_dbg(ql_dbg_io
, vha
, 0x304c,
2589 "no sense data, sp = %p\n", sp
);
2591 sense_len
= GET_CMD_SENSE_LEN(sp
);
2592 sense_ptr
= GET_CMD_SENSE_PTR(sp
);
2593 ql_dbg(ql_dbg_io
, vha
, 0x304f,
2594 "sp=%p sense_len=0x%x sense_ptr=%p.\n",
2595 sp
, sense_len
, sense_ptr
);
2597 if (sense_len
> sizeof(pkt
->data
))
2598 sense_sz
= sizeof(pkt
->data
);
2600 sense_sz
= sense_len
;
2602 /* Move sense data. */
2603 ql_dump_buffer(ql_dbg_io
+ ql_dbg_buffer
, vha
, 0x304e,
2604 (uint8_t *)pkt
, sizeof(sts_cont_entry_t
));
2605 memcpy(sense_ptr
, pkt
->data
, sense_sz
);
2606 ql_dump_buffer(ql_dbg_io
+ ql_dbg_buffer
, vha
, 0x304a,
2607 sense_ptr
, sense_sz
);
2609 sense_len
-= sense_sz
;
2610 sense_ptr
+= sense_sz
;
2612 SET_CMD_SENSE_PTR(sp
, sense_ptr
);
2613 SET_CMD_SENSE_LEN(sp
, sense_len
);
2615 sense_len
= GET_FW_SENSE_LEN(sp
);
2616 sense_len
= (sense_len
> sizeof(pkt
->data
)) ?
2617 (sense_len
- sizeof(pkt
->data
)) : 0;
2618 SET_FW_SENSE_LEN(sp
, sense_len
);
2620 /* Place command on done queue. */
2621 if (sense_len
== 0) {
2622 rsp
->status_srb
= NULL
;
2623 sp
->done(ha
, sp
, cp
->result
);
2628 * qlafx00_multistatus_entry() - Process Multi response queue entries.
2629 * @ha: SCSI driver HA context
2632 qlafx00_multistatus_entry(struct scsi_qla_host
*vha
,
2633 struct rsp_que
*rsp
, void *pkt
)
2636 struct multi_sts_entry_fx00
*stsmfx
;
2637 struct qla_hw_data
*ha
= vha
->hw
;
2638 uint32_t handle
, hindex
, handle_count
, i
;
2640 struct req_que
*req
;
2643 stsmfx
= (struct multi_sts_entry_fx00
*) pkt
;
2645 handle_count
= stsmfx
->handle_count
;
2647 if (handle_count
> MAX_HANDLE_COUNT
) {
2648 ql_dbg(ql_dbg_io
, vha
, 0x3035,
2649 "Invalid handle count (0x%x).\n", handle_count
);
2650 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
2651 qla2xxx_wake_dpc(vha
);
2655 handle_ptr
= &stsmfx
->handles
[0];
2657 for (i
= 0; i
< handle_count
; i
++) {
2658 hindex
= le32_to_cpu(*handle_ptr
);
2659 handle
= LSW(hindex
);
2661 req
= ha
->req_q_map
[que
];
2663 /* Validate handle. */
2664 if (handle
< req
->num_outstanding_cmds
)
2665 sp
= req
->outstanding_cmds
[handle
];
2670 ql_dbg(ql_dbg_io
, vha
, 0x3044,
2671 "Invalid status handle (0x%x).\n", handle
);
2672 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
2673 qla2xxx_wake_dpc(vha
);
2676 qla2x00_process_completed_request(vha
, req
, handle
);
2682 * qlafx00_error_entry() - Process an error entry.
2683 * @ha: SCSI driver HA context
2684 * @pkt: Entry pointer
2687 qlafx00_error_entry(scsi_qla_host_t
*vha
, struct rsp_que
*rsp
,
2688 struct sts_entry_fx00
*pkt
, uint8_t estatus
, uint8_t etype
)
2691 struct qla_hw_data
*ha
= vha
->hw
;
2692 const char func
[] = "ERROR-IOCB";
2693 uint16_t que
= MSW(pkt
->handle
);
2694 struct req_que
*req
= NULL
;
2695 int res
= DID_ERROR
<< 16;
2697 ql_dbg(ql_dbg_async
, vha
, 0x507f,
2698 "type of error status in response: 0x%x\n", estatus
);
2700 req
= ha
->req_q_map
[que
];
2702 sp
= qla2x00_get_sp_from_handle(vha
, func
, req
, pkt
);
2704 sp
->done(ha
, sp
, res
);
2708 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
2709 qla2xxx_wake_dpc(vha
);
2713 * qlafx00_process_response_queue() - Process response queue entries.
2714 * @ha: SCSI driver HA context
2717 qlafx00_process_response_queue(struct scsi_qla_host
*vha
,
2718 struct rsp_que
*rsp
)
2720 struct sts_entry_fx00
*pkt
;
2723 if (!vha
->flags
.online
)
2726 while (RD_REG_DWORD((void __iomem
*)&(rsp
->ring_ptr
->signature
)) !=
2727 RESPONSE_PROCESSED
) {
2728 lptr
= rsp
->ring_ptr
;
2729 memcpy_fromio(rsp
->rsp_pkt
, (void __iomem
*)lptr
,
2730 sizeof(rsp
->rsp_pkt
));
2731 pkt
= (struct sts_entry_fx00
*)rsp
->rsp_pkt
;
2734 if (rsp
->ring_index
== rsp
->length
) {
2735 rsp
->ring_index
= 0;
2736 rsp
->ring_ptr
= rsp
->ring
;
2741 if (pkt
->entry_status
!= 0 &&
2742 pkt
->entry_type
!= IOCTL_IOSB_TYPE_FX00
) {
2743 qlafx00_error_entry(vha
, rsp
,
2744 (struct sts_entry_fx00
*)pkt
, pkt
->entry_status
,
2750 switch (pkt
->entry_type
) {
2751 case STATUS_TYPE_FX00
:
2752 qlafx00_status_entry(vha
, rsp
, pkt
);
2755 case STATUS_CONT_TYPE_FX00
:
2756 qlafx00_status_cont_entry(rsp
, (sts_cont_entry_t
*)pkt
);
2759 case MULTI_STATUS_TYPE_FX00
:
2760 qlafx00_multistatus_entry(vha
, rsp
, pkt
);
2763 case ABORT_IOCB_TYPE_FX00
:
2764 qlafx00_abort_iocb_entry(vha
, rsp
->req
,
2765 (struct abort_iocb_entry_fx00
*)pkt
);
2768 case IOCTL_IOSB_TYPE_FX00
:
2769 qlafx00_ioctl_iosb_entry(vha
, rsp
->req
,
2770 (struct ioctl_iocb_entry_fx00
*)pkt
);
2773 /* Type Not Supported. */
2774 ql_dbg(ql_dbg_async
, vha
, 0x5081,
2775 "Received unknown response pkt type %x "
2776 "entry status=%x.\n",
2777 pkt
->entry_type
, pkt
->entry_status
);
2781 WRT_REG_DWORD((void __iomem
*)&lptr
->signature
,
2782 RESPONSE_PROCESSED
);
2786 /* Adjust ring index */
2787 WRT_REG_DWORD(rsp
->rsp_q_out
, rsp
->ring_index
);
2791 * qlafx00_async_event() - Process aynchronous events.
2792 * @ha: SCSI driver HA context
2795 qlafx00_async_event(scsi_qla_host_t
*vha
)
2797 struct qla_hw_data
*ha
= vha
->hw
;
2798 struct device_reg_fx00 __iomem
*reg
;
2801 reg
= &ha
->iobase
->ispfx00
;
2802 /* Setup to process RIO completion. */
2803 switch (ha
->aenmb
[0]) {
2804 case QLAFX00_MBA_SYSTEM_ERR
: /* System Error */
2805 ql_log(ql_log_warn
, vha
, 0x5079,
2806 "ISP System Error - mbx1=%x\n", ha
->aenmb
[0]);
2807 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
2810 case QLAFX00_MBA_SHUTDOWN_RQSTD
: /* Shutdown requested */
2811 ql_dbg(ql_dbg_async
, vha
, 0x5076,
2812 "Asynchronous FW shutdown requested.\n");
2813 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
2814 qla2xxx_wake_dpc(vha
);
2817 case QLAFX00_MBA_PORT_UPDATE
: /* Port database update */
2818 ha
->aenmb
[1] = RD_REG_WORD(®
->aenmailbox1
);
2819 ha
->aenmb
[2] = RD_REG_WORD(®
->aenmailbox2
);
2820 ha
->aenmb
[3] = RD_REG_WORD(®
->aenmailbox3
);
2821 ql_dbg(ql_dbg_async
, vha
, 0x5077,
2822 "Asynchronous port Update received "
2823 "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
2824 ha
->aenmb
[0], ha
->aenmb
[1], ha
->aenmb
[2], ha
->aenmb
[3]);
2828 ha
->aenmb
[1] = RD_REG_WORD(®
->aenmailbox1
);
2829 ha
->aenmb
[2] = RD_REG_WORD(®
->aenmailbox2
);
2830 ha
->aenmb
[3] = RD_REG_WORD(®
->aenmailbox3
);
2831 ha
->aenmb
[4] = RD_REG_WORD(®
->aenmailbox4
);
2832 ha
->aenmb
[5] = RD_REG_WORD(®
->aenmailbox5
);
2833 ha
->aenmb
[6] = RD_REG_WORD(®
->aenmailbox6
);
2834 ha
->aenmb
[7] = RD_REG_WORD(®
->aenmailbox7
);
2835 ql_dbg(ql_dbg_async
, vha
, 0x5078,
2836 "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n",
2837 ha
->aenmb
[0], ha
->aenmb
[1], ha
->aenmb
[2], ha
->aenmb
[3],
2838 ha
->aenmb
[4], ha
->aenmb
[5], ha
->aenmb
[6], ha
->aenmb
[7]);
2841 qlafx00_post_aenfx_work(vha
, ha
->aenmb
[0],
2842 (uint32_t *)ha
->aenmb
, data_size
);
2847 * qlafx00x_mbx_completion() - Process mailbox command completions.
2848 * @ha: SCSI driver HA context
2849 * @mb16: Mailbox16 register
2852 qlafx00_mbx_completion(scsi_qla_host_t
*vha
, uint32_t mb0
)
2855 uint16_t __iomem
*wptr
;
2856 struct qla_hw_data
*ha
= vha
->hw
;
2857 struct device_reg_fx00 __iomem
*reg
= &ha
->iobase
->ispfx00
;
2860 ql_dbg(ql_dbg_async
, vha
, 0x507e, "MBX pointer ERROR.\n");
2862 /* Load return mailbox registers. */
2863 ha
->flags
.mbox_int
= 1;
2864 ha
->mailbox_out32
[0] = mb0
;
2865 wptr
= (uint16_t __iomem
*)®
->mailbox17
;
2867 for (cnt
= 1; cnt
< ha
->mbx_count
; cnt
++) {
2868 ha
->mailbox_out32
[cnt
] = RD_REG_WORD(wptr
);
2874 * qlafx00_intr_handler() - Process interrupts for the ISPFX00.
2876 * @dev_id: SCSI driver HA context
2878 * Called by system whenever the host adapter generates an interrupt.
2880 * Returns handled flag.
2883 qlafx00_intr_handler(int irq
, void *dev_id
)
2885 scsi_qla_host_t
*vha
;
2886 struct qla_hw_data
*ha
;
2887 struct device_reg_fx00 __iomem
*reg
;
2892 struct rsp_que
*rsp
;
2893 unsigned long flags
;
2894 uint32_t clr_intr
= 0;
2896 rsp
= (struct rsp_que
*) dev_id
;
2898 ql_log(ql_log_info
, NULL
, 0x507d,
2899 "%s: NULL response queue pointer.\n", __func__
);
2904 reg
= &ha
->iobase
->ispfx00
;
2907 if (unlikely(pci_channel_offline(ha
->pdev
)))
2910 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2911 vha
= pci_get_drvdata(ha
->pdev
);
2912 for (iter
= 50; iter
--; clr_intr
= 0) {
2913 stat
= QLAFX00_RD_INTR_REG(ha
);
2914 if ((stat
& QLAFX00_HST_INT_STS_BITS
) == 0)
2917 switch (stat
& QLAFX00_HST_INT_STS_BITS
) {
2918 case QLAFX00_INTR_MB_CMPLT
:
2919 case QLAFX00_INTR_MB_RSP_CMPLT
:
2920 case QLAFX00_INTR_MB_ASYNC_CMPLT
:
2921 case QLAFX00_INTR_ALL_CMPLT
:
2922 mb
[0] = RD_REG_WORD(®
->mailbox16
);
2923 qlafx00_mbx_completion(vha
, mb
[0]);
2924 status
|= MBX_INTERRUPT
;
2925 clr_intr
|= QLAFX00_INTR_MB_CMPLT
;
2927 case QLAFX00_INTR_ASYNC_CMPLT
:
2928 case QLAFX00_INTR_RSP_ASYNC_CMPLT
:
2929 ha
->aenmb
[0] = RD_REG_WORD(®
->aenmailbox0
);
2930 qlafx00_async_event(vha
);
2931 clr_intr
|= QLAFX00_INTR_ASYNC_CMPLT
;
2933 case QLAFX00_INTR_RSP_CMPLT
:
2934 qlafx00_process_response_queue(vha
, rsp
);
2935 clr_intr
|= QLAFX00_INTR_RSP_CMPLT
;
2938 ql_dbg(ql_dbg_async
, vha
, 0x507a,
2939 "Unrecognized interrupt type (%d).\n", stat
);
2942 QLAFX00_CLR_INTR_REG(ha
, clr_intr
);
2943 QLAFX00_RD_INTR_REG(ha
);
2946 qla2x00_handle_mbx_completion(ha
, status
);
2947 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2952 /** QLAFX00 specific IOCB implementation functions */
2954 static inline cont_a64_entry_t
*
2955 qlafx00_prep_cont_type1_iocb(struct req_que
*req
,
2956 cont_a64_entry_t
*lcont_pkt
)
2958 cont_a64_entry_t
*cont_pkt
;
2960 /* Adjust ring index. */
2962 if (req
->ring_index
== req
->length
) {
2963 req
->ring_index
= 0;
2964 req
->ring_ptr
= req
->ring
;
2969 cont_pkt
= (cont_a64_entry_t
*)req
->ring_ptr
;
2971 /* Load packet defaults. */
2972 lcont_pkt
->entry_type
= CONTINUE_A64_TYPE_FX00
;
2978 qlafx00_build_scsi_iocbs(srb_t
*sp
, struct cmd_type_7_fx00
*cmd_pkt
,
2979 uint16_t tot_dsds
, struct cmd_type_7_fx00
*lcmd_pkt
)
2981 uint16_t avail_dsds
;
2983 scsi_qla_host_t
*vha
;
2984 struct scsi_cmnd
*cmd
;
2985 struct scatterlist
*sg
;
2987 struct req_que
*req
;
2988 cont_a64_entry_t lcont_pkt
;
2989 cont_a64_entry_t
*cont_pkt
;
2991 vha
= sp
->fcport
->vha
;
2994 cmd
= GET_CMD_SP(sp
);
2998 /* Update entry type to indicate Command Type 3 IOCB */
2999 lcmd_pkt
->entry_type
= FX00_COMMAND_TYPE_7
;
3001 /* No data transfer */
3002 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
3003 lcmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
3007 /* Set transfer direction */
3008 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
3009 lcmd_pkt
->cntrl_flags
= TMF_WRITE_DATA
;
3010 vha
->qla_stats
.output_bytes
+= scsi_bufflen(cmd
);
3011 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
3012 lcmd_pkt
->cntrl_flags
= TMF_READ_DATA
;
3013 vha
->qla_stats
.input_bytes
+= scsi_bufflen(cmd
);
3016 /* One DSD is available in the Command Type 3 IOCB */
3018 cur_dsd
= (__le32
*)&lcmd_pkt
->dseg_0_address
;
3020 /* Load data segments */
3021 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
3024 /* Allocate additional continuation packets? */
3025 if (avail_dsds
== 0) {
3027 * Five DSDs are available in the Continuation
3030 memset(&lcont_pkt
, 0, REQUEST_ENTRY_SIZE
);
3032 qlafx00_prep_cont_type1_iocb(req
, &lcont_pkt
);
3033 cur_dsd
= (__le32
*)lcont_pkt
.dseg_0_address
;
3038 sle_dma
= sg_dma_address(sg
);
3039 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
3040 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
3041 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
3043 if (avail_dsds
== 0 && cont
== 1) {
3045 memcpy_toio((void __iomem
*)cont_pkt
, &lcont_pkt
,
3046 REQUEST_ENTRY_SIZE
);
3050 if (avail_dsds
!= 0 && cont
== 1) {
3051 memcpy_toio((void __iomem
*)cont_pkt
, &lcont_pkt
,
3052 REQUEST_ENTRY_SIZE
);
3057 * qlafx00_start_scsi() - Send a SCSI command to the ISP
3058 * @sp: command to send to the ISP
3060 * Returns non-zero if a failure occurred, else zero.
3063 qlafx00_start_scsi(srb_t
*sp
)
3066 unsigned long flags
;
3072 struct req_que
*req
= NULL
;
3073 struct rsp_que
*rsp
= NULL
;
3074 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
3075 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
3076 struct qla_hw_data
*ha
= vha
->hw
;
3077 struct cmd_type_7_fx00
*cmd_pkt
;
3078 struct cmd_type_7_fx00 lcmd_pkt
;
3079 struct scsi_lun llun
;
3082 /* Setup device pointers. */
3085 rsp
= ha
->rsp_q_map
[0];
3088 /* So we know we haven't pci_map'ed anything yet */
3091 /* Forcing marker needed for now */
3092 vha
->marker_needed
= 0;
3094 /* Send marker if required */
3095 if (vha
->marker_needed
!= 0) {
3096 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
3098 return QLA_FUNCTION_FAILED
;
3099 vha
->marker_needed
= 0;
3102 /* Acquire ring specific lock */
3103 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3105 /* Check for room in outstanding command list. */
3106 handle
= req
->current_outstanding_cmd
;
3107 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
3109 if (handle
== req
->num_outstanding_cmds
)
3111 if (!req
->outstanding_cmds
[handle
])
3114 if (index
== req
->num_outstanding_cmds
)
3117 /* Map the sg table so we have an accurate count of sg entries needed */
3118 if (scsi_sg_count(cmd
)) {
3119 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
3120 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
3121 if (unlikely(!nseg
))
3127 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
3128 if (req
->cnt
< (req_cnt
+ 2)) {
3129 cnt
= RD_REG_DWORD_RELAXED(req
->req_q_out
);
3131 if (req
->ring_index
< cnt
)
3132 req
->cnt
= cnt
- req
->ring_index
;
3134 req
->cnt
= req
->length
-
3135 (req
->ring_index
- cnt
);
3136 if (req
->cnt
< (req_cnt
+ 2))
3140 /* Build command packet. */
3141 req
->current_outstanding_cmd
= handle
;
3142 req
->outstanding_cmds
[handle
] = sp
;
3143 sp
->handle
= handle
;
3144 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
3145 req
->cnt
-= req_cnt
;
3147 cmd_pkt
= (struct cmd_type_7_fx00
*)req
->ring_ptr
;
3149 memset(&lcmd_pkt
, 0, REQUEST_ENTRY_SIZE
);
3151 lcmd_pkt
.handle
= MAKE_HANDLE(req
->id
, sp
->handle
);
3152 lcmd_pkt
.handle_hi
= 0;
3153 lcmd_pkt
.dseg_count
= cpu_to_le16(tot_dsds
);
3154 lcmd_pkt
.tgt_idx
= cpu_to_le16(sp
->fcport
->tgt_id
);
3156 int_to_scsilun(cmd
->device
->lun
, &llun
);
3157 host_to_adap((uint8_t *)&llun
, (uint8_t *)&lcmd_pkt
.lun
,
3158 sizeof(lcmd_pkt
.lun
));
3160 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
3161 if (scsi_populate_tag_msg(cmd
, tag
)) {
3163 case HEAD_OF_QUEUE_TAG
:
3164 lcmd_pkt
.task
= TSK_HEAD_OF_QUEUE
;
3166 case ORDERED_QUEUE_TAG
:
3167 lcmd_pkt
.task
= TSK_ORDERED
;
3172 /* Load SCSI command packet. */
3173 host_to_adap(cmd
->cmnd
, lcmd_pkt
.fcp_cdb
, sizeof(lcmd_pkt
.fcp_cdb
));
3174 lcmd_pkt
.byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
3176 /* Build IOCB segments */
3177 qlafx00_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
, &lcmd_pkt
);
3179 /* Set total data segment count. */
3180 lcmd_pkt
.entry_count
= (uint8_t)req_cnt
;
3182 /* Specify response queue number where completion should happen */
3183 lcmd_pkt
.entry_status
= (uint8_t) rsp
->id
;
3185 ql_dump_buffer(ql_dbg_io
+ ql_dbg_buffer
, vha
, 0x302e,
3186 (uint8_t *)cmd
->cmnd
, cmd
->cmd_len
);
3187 ql_dump_buffer(ql_dbg_io
+ ql_dbg_buffer
, vha
, 0x3032,
3188 (uint8_t *)&lcmd_pkt
, REQUEST_ENTRY_SIZE
);
3190 memcpy_toio((void __iomem
*)cmd_pkt
, &lcmd_pkt
, REQUEST_ENTRY_SIZE
);
3193 /* Adjust ring index. */
3195 if (req
->ring_index
== req
->length
) {
3196 req
->ring_index
= 0;
3197 req
->ring_ptr
= req
->ring
;
3201 sp
->flags
|= SRB_DMA_VALID
;
3203 /* Set chip new ring index. */
3204 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
3205 QLAFX00_SET_HST_INTR(ha
, ha
->rqstq_intr_code
);
3207 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3212 scsi_dma_unmap(cmd
);
3214 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3216 return QLA_FUNCTION_FAILED
;
3220 qlafx00_tm_iocb(srb_t
*sp
, struct tsk_mgmt_entry_fx00
*ptm_iocb
)
3222 struct srb_iocb
*fxio
= &sp
->u
.iocb_cmd
;
3223 scsi_qla_host_t
*vha
= sp
->fcport
->vha
;
3224 struct req_que
*req
= vha
->req
;
3225 struct tsk_mgmt_entry_fx00 tm_iocb
;
3226 struct scsi_lun llun
;
3228 memset(&tm_iocb
, 0, sizeof(struct tsk_mgmt_entry_fx00
));
3229 tm_iocb
.entry_type
= TSK_MGMT_IOCB_TYPE_FX00
;
3230 tm_iocb
.entry_count
= 1;
3231 tm_iocb
.handle
= cpu_to_le32(MAKE_HANDLE(req
->id
, sp
->handle
));
3232 tm_iocb
.handle_hi
= 0;
3233 tm_iocb
.timeout
= cpu_to_le16(qla2x00_get_async_timeout(vha
) + 2);
3234 tm_iocb
.tgt_id
= cpu_to_le16(sp
->fcport
->tgt_id
);
3235 tm_iocb
.control_flags
= cpu_to_le32(fxio
->u
.tmf
.flags
);
3236 if (tm_iocb
.control_flags
== cpu_to_le32((uint32_t)TCF_LUN_RESET
)) {
3237 int_to_scsilun(fxio
->u
.tmf
.lun
, &llun
);
3238 host_to_adap((uint8_t *)&llun
, (uint8_t *)&tm_iocb
.lun
,
3239 sizeof(struct scsi_lun
));
3242 memcpy((void *)ptm_iocb
, &tm_iocb
,
3243 sizeof(struct tsk_mgmt_entry_fx00
));
3248 qlafx00_abort_iocb(srb_t
*sp
, struct abort_iocb_entry_fx00
*pabt_iocb
)
3250 struct srb_iocb
*fxio
= &sp
->u
.iocb_cmd
;
3251 scsi_qla_host_t
*vha
= sp
->fcport
->vha
;
3252 struct req_que
*req
= vha
->req
;
3253 struct abort_iocb_entry_fx00 abt_iocb
;
3255 memset(&abt_iocb
, 0, sizeof(struct abort_iocb_entry_fx00
));
3256 abt_iocb
.entry_type
= ABORT_IOCB_TYPE_FX00
;
3257 abt_iocb
.entry_count
= 1;
3258 abt_iocb
.handle
= cpu_to_le32(MAKE_HANDLE(req
->id
, sp
->handle
));
3259 abt_iocb
.abort_handle
=
3260 cpu_to_le32(MAKE_HANDLE(req
->id
, fxio
->u
.abt
.cmd_hndl
));
3261 abt_iocb
.tgt_id_sts
= cpu_to_le16(sp
->fcport
->tgt_id
);
3262 abt_iocb
.req_que_no
= cpu_to_le16(req
->id
);
3264 memcpy((void *)pabt_iocb
, &abt_iocb
,
3265 sizeof(struct abort_iocb_entry_fx00
));
3270 qlafx00_fxdisc_iocb(srb_t
*sp
, struct fxdisc_entry_fx00
*pfxiocb
)
3272 struct srb_iocb
*fxio
= &sp
->u
.iocb_cmd
;
3273 struct qla_mt_iocb_rqst_fx00
*piocb_rqst
;
3274 struct fc_bsg_job
*bsg_job
;
3275 struct fxdisc_entry_fx00 fx_iocb
;
3276 uint8_t entry_cnt
= 1;
3278 memset(&fx_iocb
, 0, sizeof(struct fxdisc_entry_fx00
));
3279 fx_iocb
.entry_type
= FX00_IOCB_TYPE
;
3280 fx_iocb
.handle
= cpu_to_le32(sp
->handle
);
3281 fx_iocb
.entry_count
= entry_cnt
;
3283 if (sp
->type
== SRB_FXIOCB_DCMD
) {
3285 sp
->u
.iocb_cmd
.u
.fxiocb
.req_func_type
;
3286 fx_iocb
.adapid
= fxio
->u
.fxiocb
.adapter_id
;
3287 fx_iocb
.adapid_hi
= fxio
->u
.fxiocb
.adapter_id_hi
;
3288 fx_iocb
.reserved_0
= fxio
->u
.fxiocb
.reserved_0
;
3289 fx_iocb
.reserved_1
= fxio
->u
.fxiocb
.reserved_1
;
3290 fx_iocb
.dataword_extra
= fxio
->u
.fxiocb
.req_data_extra
;
3292 if (fxio
->u
.fxiocb
.flags
& SRB_FXDISC_REQ_DMA_VALID
) {
3293 fx_iocb
.req_dsdcnt
= cpu_to_le16(1);
3294 fx_iocb
.req_xfrcnt
=
3295 cpu_to_le16(fxio
->u
.fxiocb
.req_len
);
3296 fx_iocb
.dseg_rq_address
[0] =
3297 cpu_to_le32(LSD(fxio
->u
.fxiocb
.req_dma_handle
));
3298 fx_iocb
.dseg_rq_address
[1] =
3299 cpu_to_le32(MSD(fxio
->u
.fxiocb
.req_dma_handle
));
3300 fx_iocb
.dseg_rq_len
=
3301 cpu_to_le32(fxio
->u
.fxiocb
.req_len
);
3304 if (fxio
->u
.fxiocb
.flags
& SRB_FXDISC_RESP_DMA_VALID
) {
3305 fx_iocb
.rsp_dsdcnt
= cpu_to_le16(1);
3306 fx_iocb
.rsp_xfrcnt
=
3307 cpu_to_le16(fxio
->u
.fxiocb
.rsp_len
);
3308 fx_iocb
.dseg_rsp_address
[0] =
3309 cpu_to_le32(LSD(fxio
->u
.fxiocb
.rsp_dma_handle
));
3310 fx_iocb
.dseg_rsp_address
[1] =
3311 cpu_to_le32(MSD(fxio
->u
.fxiocb
.rsp_dma_handle
));
3312 fx_iocb
.dseg_rsp_len
=
3313 cpu_to_le32(fxio
->u
.fxiocb
.rsp_len
);
3316 if (fxio
->u
.fxiocb
.flags
& SRB_FXDISC_REQ_DWRD_VALID
) {
3317 fx_iocb
.dataword
= fxio
->u
.fxiocb
.req_data
;
3319 fx_iocb
.flags
= fxio
->u
.fxiocb
.flags
;
3321 struct scatterlist
*sg
;
3322 bsg_job
= sp
->u
.bsg_job
;
3323 piocb_rqst
= (struct qla_mt_iocb_rqst_fx00
*)
3324 &bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
3326 fx_iocb
.func_num
= piocb_rqst
->func_type
;
3327 fx_iocb
.adapid
= piocb_rqst
->adapid
;
3328 fx_iocb
.adapid_hi
= piocb_rqst
->adapid_hi
;
3329 fx_iocb
.reserved_0
= piocb_rqst
->reserved_0
;
3330 fx_iocb
.reserved_1
= piocb_rqst
->reserved_1
;
3331 fx_iocb
.dataword_extra
= piocb_rqst
->dataword_extra
;
3332 fx_iocb
.dataword
= piocb_rqst
->dataword
;
3333 fx_iocb
.req_xfrcnt
= piocb_rqst
->req_len
;
3334 fx_iocb
.rsp_xfrcnt
= piocb_rqst
->rsp_len
;
3336 if (piocb_rqst
->flags
& SRB_FXDISC_REQ_DMA_VALID
) {
3337 int avail_dsds
, tot_dsds
;
3338 cont_a64_entry_t lcont_pkt
;
3339 cont_a64_entry_t
*cont_pkt
= NULL
;
3341 int index
= 0, cont
= 0;
3343 fx_iocb
.req_dsdcnt
=
3344 cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
3346 bsg_job
->request_payload
.sg_cnt
;
3347 cur_dsd
= (__le32
*)&fx_iocb
.dseg_rq_address
[0];
3349 for_each_sg(bsg_job
->request_payload
.sg_list
, sg
,
3353 /* Allocate additional continuation packets? */
3354 if (avail_dsds
== 0) {
3356 * Five DSDs are available in the Cont.
3359 memset(&lcont_pkt
, 0,
3360 REQUEST_ENTRY_SIZE
);
3362 qlafx00_prep_cont_type1_iocb(
3363 sp
->fcport
->vha
->req
,
3365 cur_dsd
= (__le32
*)
3366 lcont_pkt
.dseg_0_address
;
3372 sle_dma
= sg_dma_address(sg
);
3373 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
3374 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
3375 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
3378 if (avail_dsds
== 0 && cont
== 1) {
3381 (void __iomem
*)cont_pkt
,
3382 &lcont_pkt
, REQUEST_ENTRY_SIZE
);
3384 ql_dbg_user
+ ql_dbg_verbose
,
3385 sp
->fcport
->vha
, 0x3042,
3386 (uint8_t *)&lcont_pkt
,
3387 REQUEST_ENTRY_SIZE
);
3390 if (avail_dsds
!= 0 && cont
== 1) {
3391 memcpy_toio((void __iomem
*)cont_pkt
,
3392 &lcont_pkt
, REQUEST_ENTRY_SIZE
);
3393 ql_dump_buffer(ql_dbg_user
+ ql_dbg_verbose
,
3394 sp
->fcport
->vha
, 0x3043,
3395 (uint8_t *)&lcont_pkt
, REQUEST_ENTRY_SIZE
);
3399 if (piocb_rqst
->flags
& SRB_FXDISC_RESP_DMA_VALID
) {
3400 int avail_dsds
, tot_dsds
;
3401 cont_a64_entry_t lcont_pkt
;
3402 cont_a64_entry_t
*cont_pkt
= NULL
;
3404 int index
= 0, cont
= 0;
3406 fx_iocb
.rsp_dsdcnt
=
3407 cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
3408 tot_dsds
= bsg_job
->reply_payload
.sg_cnt
;
3409 cur_dsd
= (__le32
*)&fx_iocb
.dseg_rsp_address
[0];
3412 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
,
3416 /* Allocate additional continuation packets? */
3417 if (avail_dsds
== 0) {
3419 * Five DSDs are available in the Cont.
3422 memset(&lcont_pkt
, 0,
3423 REQUEST_ENTRY_SIZE
);
3425 qlafx00_prep_cont_type1_iocb(
3426 sp
->fcport
->vha
->req
,
3428 cur_dsd
= (__le32
*)
3429 lcont_pkt
.dseg_0_address
;
3435 sle_dma
= sg_dma_address(sg
);
3436 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
3437 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
3438 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
3441 if (avail_dsds
== 0 && cont
== 1) {
3443 memcpy_toio((void __iomem
*)cont_pkt
,
3445 REQUEST_ENTRY_SIZE
);
3447 ql_dbg_user
+ ql_dbg_verbose
,
3448 sp
->fcport
->vha
, 0x3045,
3449 (uint8_t *)&lcont_pkt
,
3450 REQUEST_ENTRY_SIZE
);
3453 if (avail_dsds
!= 0 && cont
== 1) {
3454 memcpy_toio((void __iomem
*)cont_pkt
,
3455 &lcont_pkt
, REQUEST_ENTRY_SIZE
);
3456 ql_dump_buffer(ql_dbg_user
+ ql_dbg_verbose
,
3457 sp
->fcport
->vha
, 0x3046,
3458 (uint8_t *)&lcont_pkt
, REQUEST_ENTRY_SIZE
);
3462 if (piocb_rqst
->flags
& SRB_FXDISC_REQ_DWRD_VALID
)
3463 fx_iocb
.dataword
= piocb_rqst
->dataword
;
3464 fx_iocb
.flags
= piocb_rqst
->flags
;
3465 fx_iocb
.entry_count
= entry_cnt
;
3468 ql_dump_buffer(ql_dbg_user
+ ql_dbg_verbose
,
3469 sp
->fcport
->vha
, 0x3047,
3470 (uint8_t *)&fx_iocb
, sizeof(struct fxdisc_entry_fx00
));
3472 memcpy((void *)pfxiocb
, &fx_iocb
,
3473 sizeof(struct fxdisc_entry_fx00
));