2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
13 #include <scsi/scsi_tcq.h>
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
19 * Returns the proper CF_* direction based on CDB.
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t
*sp
)
25 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
26 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
30 /* Set transfer direction */
31 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
33 vha
->qla_stats
.output_bytes
+= scsi_bufflen(cmd
);
34 vha
->qla_stats
.output_requests
++;
35 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
37 vha
->qla_stats
.input_bytes
+= scsi_bufflen(cmd
);
38 vha
->qla_stats
.input_requests
++;
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
47 * @dsds: number of data segment decriptors needed
49 * Returns the number of IOCB entries needed to store @dsds.
52 qla2x00_calc_iocbs_32(uint16_t dsds
)
58 iocbs
+= (dsds
- 3) / 7;
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
69 * @dsds: number of data segment decriptors needed
71 * Returns the number of IOCB entries needed to store @dsds.
74 qla2x00_calc_iocbs_64(uint16_t dsds
)
80 iocbs
+= (dsds
- 2) / 5;
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
93 static inline cont_entry_t
*
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host
*vha
)
96 cont_entry_t
*cont_pkt
;
97 struct req_que
*req
= vha
->req
;
98 /* Adjust ring index. */
100 if (req
->ring_index
== req
->length
) {
102 req
->ring_ptr
= req
->ring
;
107 cont_pkt
= (cont_entry_t
*)req
->ring_ptr
;
109 /* Load packet defaults. */
110 *((uint32_t *)(&cont_pkt
->entry_type
)) = cpu_to_le32(CONTINUE_TYPE
);
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
119 * Returns a pointer to the continuation type 1 IOCB packet.
121 static inline cont_a64_entry_t
*
122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t
*vha
, struct req_que
*req
)
124 cont_a64_entry_t
*cont_pkt
;
126 /* Adjust ring index. */
128 if (req
->ring_index
== req
->length
) {
130 req
->ring_ptr
= req
->ring
;
135 cont_pkt
= (cont_a64_entry_t
*)req
->ring_ptr
;
137 /* Load packet defaults. */
138 *((uint32_t *)(&cont_pkt
->entry_type
)) = IS_QLAFX00(vha
->hw
) ?
139 cpu_to_le32(CONTINUE_A64_TYPE_FX00
) :
140 cpu_to_le32(CONTINUE_A64_TYPE
);
146 qla24xx_configure_prot_mode(srb_t
*sp
, uint16_t *fw_prot_opts
)
148 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
149 uint8_t guard
= scsi_host_get_guard(cmd
->device
->host
);
151 /* We always use DIFF Bundling for best performance */
154 /* Translate SCSI opcode to a protection opcode */
155 switch (scsi_get_prot_op(cmd
)) {
156 case SCSI_PROT_READ_STRIP
:
157 *fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
159 case SCSI_PROT_WRITE_INSERT
:
160 *fw_prot_opts
|= PO_MODE_DIF_INSERT
;
162 case SCSI_PROT_READ_INSERT
:
163 *fw_prot_opts
|= PO_MODE_DIF_INSERT
;
165 case SCSI_PROT_WRITE_STRIP
:
166 *fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
168 case SCSI_PROT_READ_PASS
:
169 case SCSI_PROT_WRITE_PASS
:
170 if (guard
& SHOST_DIX_GUARD_IP
)
171 *fw_prot_opts
|= PO_MODE_DIF_TCP_CKSUM
;
173 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
175 default: /* Normal Request */
176 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
180 return scsi_prot_sg_count(cmd
);
184 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185 * capable IOCB types.
187 * @sp: SRB command to process
188 * @cmd_pkt: Command type 2 IOCB
189 * @tot_dsds: Total number of segments to transfer
191 void qla2x00_build_scsi_iocbs_32(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
196 scsi_qla_host_t
*vha
;
197 struct scsi_cmnd
*cmd
;
198 struct scatterlist
*sg
;
201 cmd
= GET_CMD_SP(sp
);
203 /* Update entry type to indicate Command Type 2 IOCB */
204 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
205 cpu_to_le32(COMMAND_TYPE
);
207 /* No data transfer */
208 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
209 cmd_pkt
->byte_count
= cpu_to_le32(0);
213 vha
= sp
->fcport
->vha
;
214 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
216 /* Three DSDs are available in the Command Type 2 IOCB */
218 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
220 /* Load data segments */
221 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
222 cont_entry_t
*cont_pkt
;
224 /* Allocate additional continuation packets? */
225 if (avail_dsds
== 0) {
227 * Seven DSDs are available in the Continuation
230 cont_pkt
= qla2x00_prep_cont_type0_iocb(vha
);
231 cur_dsd
= (uint32_t *)&cont_pkt
->dseg_0_address
;
235 *cur_dsd
++ = cpu_to_le32(sg_dma_address(sg
));
236 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
242 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
243 * capable IOCB types.
245 * @sp: SRB command to process
246 * @cmd_pkt: Command type 3 IOCB
247 * @tot_dsds: Total number of segments to transfer
249 void qla2x00_build_scsi_iocbs_64(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
254 scsi_qla_host_t
*vha
;
255 struct scsi_cmnd
*cmd
;
256 struct scatterlist
*sg
;
259 cmd
= GET_CMD_SP(sp
);
261 /* Update entry type to indicate Command Type 3 IOCB */
262 *((uint32_t *)(&cmd_pkt
->entry_type
)) = cpu_to_le32(COMMAND_A64_TYPE
);
264 /* No data transfer */
265 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
266 cmd_pkt
->byte_count
= cpu_to_le32(0);
270 vha
= sp
->fcport
->vha
;
271 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
273 /* Two DSDs are available in the Command Type 3 IOCB */
275 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
277 /* Load data segments */
278 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
280 cont_a64_entry_t
*cont_pkt
;
282 /* Allocate additional continuation packets? */
283 if (avail_dsds
== 0) {
285 * Five DSDs are available in the Continuation
288 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
289 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
293 sle_dma
= sg_dma_address(sg
);
294 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
295 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
296 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
302 * qla2x00_start_scsi() - Send a SCSI command to the ISP
303 * @sp: command to send to the ISP
305 * Returns non-zero if a failure occurred, else zero.
308 qla2x00_start_scsi(srb_t
*sp
)
312 scsi_qla_host_t
*vha
;
313 struct scsi_cmnd
*cmd
;
317 cmd_entry_t
*cmd_pkt
;
321 struct device_reg_2xxx __iomem
*reg
;
322 struct qla_hw_data
*ha
;
326 /* Setup device pointers. */
327 vha
= sp
->fcport
->vha
;
329 reg
= &ha
->iobase
->isp
;
330 cmd
= GET_CMD_SP(sp
);
331 req
= ha
->req_q_map
[0];
332 rsp
= ha
->rsp_q_map
[0];
333 /* So we know we haven't pci_map'ed anything yet */
336 /* Send marker if required */
337 if (vha
->marker_needed
!= 0) {
338 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
340 return (QLA_FUNCTION_FAILED
);
342 vha
->marker_needed
= 0;
345 /* Acquire ring specific lock */
346 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
348 /* Check for room in outstanding command list. */
349 handle
= req
->current_outstanding_cmd
;
350 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
352 if (handle
== req
->num_outstanding_cmds
)
354 if (!req
->outstanding_cmds
[handle
])
357 if (index
== req
->num_outstanding_cmds
)
360 /* Map the sg table so we have an accurate count of sg entries needed */
361 if (scsi_sg_count(cmd
)) {
362 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
363 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
371 /* Calculate the number of request entries needed. */
372 req_cnt
= ha
->isp_ops
->calc_req_entries(tot_dsds
);
373 if (req
->cnt
< (req_cnt
+ 2)) {
374 cnt
= RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha
, reg
));
375 if (req
->ring_index
< cnt
)
376 req
->cnt
= cnt
- req
->ring_index
;
378 req
->cnt
= req
->length
-
379 (req
->ring_index
- cnt
);
380 /* If still no head room then bail out */
381 if (req
->cnt
< (req_cnt
+ 2))
385 /* Build command packet */
386 req
->current_outstanding_cmd
= handle
;
387 req
->outstanding_cmds
[handle
] = sp
;
389 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
392 cmd_pkt
= (cmd_entry_t
*)req
->ring_ptr
;
393 cmd_pkt
->handle
= handle
;
394 /* Zero out remaining portion of packet. */
395 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
396 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
397 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
399 /* Set target ID and LUN number*/
400 SET_TARGET_ID(ha
, cmd_pkt
->target
, sp
->fcport
->loop_id
);
401 cmd_pkt
->lun
= cpu_to_le16(cmd
->device
->lun
);
402 cmd_pkt
->control_flags
= cpu_to_le16(CF_SIMPLE_TAG
);
404 /* Load SCSI command packet. */
405 memcpy(cmd_pkt
->scsi_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
406 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
408 /* Build IOCB segments */
409 ha
->isp_ops
->build_iocbs(sp
, cmd_pkt
, tot_dsds
);
411 /* Set total data segment count. */
412 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
415 /* Adjust ring index. */
417 if (req
->ring_index
== req
->length
) {
419 req
->ring_ptr
= req
->ring
;
423 sp
->flags
|= SRB_DMA_VALID
;
425 /* Set chip new ring index. */
426 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, reg
), req
->ring_index
);
427 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, reg
)); /* PCI Posting. */
429 /* Manage unprocessed RIO/ZIO commands in response queue. */
430 if (vha
->flags
.process_response_queue
&&
431 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
432 qla2x00_process_response_queue(rsp
);
434 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
435 return (QLA_SUCCESS
);
441 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
443 return (QLA_FUNCTION_FAILED
);
447 * qla2x00_start_iocbs() - Execute the IOCB command
450 qla2x00_start_iocbs(struct scsi_qla_host
*vha
, struct req_que
*req
)
452 struct qla_hw_data
*ha
= vha
->hw
;
453 device_reg_t
*reg
= ISP_QUE_REG(ha
, req
->id
);
455 if (IS_P3P_TYPE(ha
)) {
456 qla82xx_start_iocbs(vha
);
458 /* Adjust ring index. */
460 if (req
->ring_index
== req
->length
) {
462 req
->ring_ptr
= req
->ring
;
466 /* Set chip new ring index. */
467 if (ha
->mqenable
|| IS_QLA83XX(ha
) || IS_QLA27XX(ha
)) {
468 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
469 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
470 } else if (IS_QLAFX00(ha
)) {
471 WRT_REG_DWORD(®
->ispfx00
.req_q_in
, req
->ring_index
);
472 RD_REG_DWORD_RELAXED(®
->ispfx00
.req_q_in
);
473 QLAFX00_SET_HST_INTR(ha
, ha
->rqstq_intr_code
);
474 } else if (IS_FWI2_CAPABLE(ha
)) {
475 WRT_REG_DWORD(®
->isp24
.req_q_in
, req
->ring_index
);
476 RD_REG_DWORD_RELAXED(®
->isp24
.req_q_in
);
478 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, ®
->isp
),
480 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, ®
->isp
));
486 * qla2x00_marker() - Send a marker IOCB to the firmware.
490 * @type: marker modifier
492 * Can be called from both normal and interrupt context.
494 * Returns non-zero if a failure occurred, else zero.
497 __qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
498 struct rsp_que
*rsp
, uint16_t loop_id
,
499 uint64_t lun
, uint8_t type
)
502 struct mrk_entry_24xx
*mrk24
= NULL
;
504 struct qla_hw_data
*ha
= vha
->hw
;
505 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
507 req
= ha
->req_q_map
[0];
508 mrk
= (mrk_entry_t
*)qla2x00_alloc_iocbs(vha
, NULL
);
510 ql_log(ql_log_warn
, base_vha
, 0x3026,
511 "Failed to allocate Marker IOCB.\n");
513 return (QLA_FUNCTION_FAILED
);
516 mrk
->entry_type
= MARKER_TYPE
;
517 mrk
->modifier
= type
;
518 if (type
!= MK_SYNC_ALL
) {
519 if (IS_FWI2_CAPABLE(ha
)) {
520 mrk24
= (struct mrk_entry_24xx
*) mrk
;
521 mrk24
->nport_handle
= cpu_to_le16(loop_id
);
522 int_to_scsilun(lun
, (struct scsi_lun
*)&mrk24
->lun
);
523 host_to_fcp_swap(mrk24
->lun
, sizeof(mrk24
->lun
));
524 mrk24
->vp_index
= vha
->vp_idx
;
525 mrk24
->handle
= MAKE_HANDLE(req
->id
, mrk24
->handle
);
527 SET_TARGET_ID(ha
, mrk
->target
, loop_id
);
528 mrk
->lun
= cpu_to_le16((uint16_t)lun
);
533 qla2x00_start_iocbs(vha
, req
);
535 return (QLA_SUCCESS
);
539 qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
540 struct rsp_que
*rsp
, uint16_t loop_id
, uint64_t lun
,
544 unsigned long flags
= 0;
546 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
547 ret
= __qla2x00_marker(vha
, req
, rsp
, loop_id
, lun
, type
);
548 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
554 * qla2x00_issue_marker
557 * Caller CAN have hardware lock held as specified by ha_locked parameter.
558 * Might release it, then reaquire.
560 int qla2x00_issue_marker(scsi_qla_host_t
*vha
, int ha_locked
)
563 if (__qla2x00_marker(vha
, vha
->req
, vha
->req
->rsp
, 0, 0,
564 MK_SYNC_ALL
) != QLA_SUCCESS
)
565 return QLA_FUNCTION_FAILED
;
567 if (qla2x00_marker(vha
, vha
->req
, vha
->req
->rsp
, 0, 0,
568 MK_SYNC_ALL
) != QLA_SUCCESS
)
569 return QLA_FUNCTION_FAILED
;
571 vha
->marker_needed
= 0;
577 qla24xx_build_scsi_type_6_iocbs(srb_t
*sp
, struct cmd_type_6
*cmd_pkt
,
580 uint32_t *cur_dsd
= NULL
;
581 scsi_qla_host_t
*vha
;
582 struct qla_hw_data
*ha
;
583 struct scsi_cmnd
*cmd
;
584 struct scatterlist
*cur_seg
;
588 uint8_t first_iocb
= 1;
589 uint32_t dsd_list_len
;
590 struct dsd_dma
*dsd_ptr
;
593 cmd
= GET_CMD_SP(sp
);
595 /* Update entry type to indicate Command Type 3 IOCB */
596 *((uint32_t *)(&cmd_pkt
->entry_type
)) = cpu_to_le32(COMMAND_TYPE_6
);
598 /* No data transfer */
599 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
600 cmd_pkt
->byte_count
= cpu_to_le32(0);
604 vha
= sp
->fcport
->vha
;
607 /* Set transfer direction */
608 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
609 cmd_pkt
->control_flags
= cpu_to_le16(CF_WRITE_DATA
);
610 vha
->qla_stats
.output_bytes
+= scsi_bufflen(cmd
);
611 vha
->qla_stats
.output_requests
++;
612 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
613 cmd_pkt
->control_flags
= cpu_to_le16(CF_READ_DATA
);
614 vha
->qla_stats
.input_bytes
+= scsi_bufflen(cmd
);
615 vha
->qla_stats
.input_requests
++;
618 cur_seg
= scsi_sglist(cmd
);
619 ctx
= GET_CMD_CTX_SP(sp
);
622 avail_dsds
= (tot_dsds
> QLA_DSDS_PER_IOCB
) ?
623 QLA_DSDS_PER_IOCB
: tot_dsds
;
624 tot_dsds
-= avail_dsds
;
625 dsd_list_len
= (avail_dsds
+ 1) * QLA_DSD_SIZE
;
627 dsd_ptr
= list_first_entry(&ha
->gbl_dsd_list
,
628 struct dsd_dma
, list
);
629 next_dsd
= dsd_ptr
->dsd_addr
;
630 list_del(&dsd_ptr
->list
);
632 list_add_tail(&dsd_ptr
->list
, &ctx
->dsd_list
);
638 dsd_seg
= (uint32_t *)&cmd_pkt
->fcp_data_dseg_address
;
639 *dsd_seg
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
640 *dsd_seg
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
641 cmd_pkt
->fcp_data_dseg_len
= cpu_to_le32(dsd_list_len
);
643 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
644 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
645 *cur_dsd
++ = cpu_to_le32(dsd_list_len
);
647 cur_dsd
= (uint32_t *)next_dsd
;
651 sle_dma
= sg_dma_address(cur_seg
);
652 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
653 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
654 *cur_dsd
++ = cpu_to_le32(sg_dma_len(cur_seg
));
655 cur_seg
= sg_next(cur_seg
);
660 /* Null termination */
664 cmd_pkt
->control_flags
|= CF_DATA_SEG_DESCR_ENABLE
;
669 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
670 * for Command Type 6.
672 * @dsds: number of data segment decriptors needed
674 * Returns the number of dsd list needed to store @dsds.
676 static inline uint16_t
677 qla24xx_calc_dsd_lists(uint16_t dsds
)
679 uint16_t dsd_lists
= 0;
681 dsd_lists
= (dsds
/QLA_DSDS_PER_IOCB
);
682 if (dsds
% QLA_DSDS_PER_IOCB
)
689 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
692 * @sp: SRB command to process
693 * @cmd_pkt: Command type 3 IOCB
694 * @tot_dsds: Total number of segments to transfer
695 * @req: pointer to request queue
698 qla24xx_build_scsi_iocbs(srb_t
*sp
, struct cmd_type_7
*cmd_pkt
,
699 uint16_t tot_dsds
, struct req_que
*req
)
703 scsi_qla_host_t
*vha
;
704 struct scsi_cmnd
*cmd
;
705 struct scatterlist
*sg
;
708 cmd
= GET_CMD_SP(sp
);
710 /* Update entry type to indicate Command Type 3 IOCB */
711 *((uint32_t *)(&cmd_pkt
->entry_type
)) = cpu_to_le32(COMMAND_TYPE_7
);
713 /* No data transfer */
714 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
715 cmd_pkt
->byte_count
= cpu_to_le32(0);
719 vha
= sp
->fcport
->vha
;
721 /* Set transfer direction */
722 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
723 cmd_pkt
->task_mgmt_flags
= cpu_to_le16(TMF_WRITE_DATA
);
724 vha
->qla_stats
.output_bytes
+= scsi_bufflen(cmd
);
725 vha
->qla_stats
.output_requests
++;
726 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
727 cmd_pkt
->task_mgmt_flags
= cpu_to_le16(TMF_READ_DATA
);
728 vha
->qla_stats
.input_bytes
+= scsi_bufflen(cmd
);
729 vha
->qla_stats
.input_requests
++;
732 /* One DSD is available in the Command Type 3 IOCB */
734 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
736 /* Load data segments */
738 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
740 cont_a64_entry_t
*cont_pkt
;
742 /* Allocate additional continuation packets? */
743 if (avail_dsds
== 0) {
745 * Five DSDs are available in the Continuation
748 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, req
);
749 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
753 sle_dma
= sg_dma_address(sg
);
754 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
755 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
756 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
761 struct fw_dif_context
{
764 uint8_t ref_tag_mask
[4]; /* Validation/Replacement Mask*/
765 uint8_t app_tag_mask
[2]; /* Validation/Replacement Mask*/
769 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
773 qla24xx_set_t10dif_tags(srb_t
*sp
, struct fw_dif_context
*pkt
,
774 unsigned int protcnt
)
776 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
778 switch (scsi_get_prot_type(cmd
)) {
779 case SCSI_PROT_DIF_TYPE0
:
781 * No check for ql2xenablehba_err_chk, as it would be an
782 * I/O error if hba tag generation is not done.
784 pkt
->ref_tag
= cpu_to_le32((uint32_t)
785 (0xffffffff & scsi_get_lba(cmd
)));
787 if (!qla2x00_hba_err_chk_enabled(sp
))
790 pkt
->ref_tag_mask
[0] = 0xff;
791 pkt
->ref_tag_mask
[1] = 0xff;
792 pkt
->ref_tag_mask
[2] = 0xff;
793 pkt
->ref_tag_mask
[3] = 0xff;
797 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
798 * match LBA in CDB + N
800 case SCSI_PROT_DIF_TYPE2
:
801 pkt
->app_tag
= cpu_to_le16(0);
802 pkt
->app_tag_mask
[0] = 0x0;
803 pkt
->app_tag_mask
[1] = 0x0;
805 pkt
->ref_tag
= cpu_to_le32((uint32_t)
806 (0xffffffff & scsi_get_lba(cmd
)));
808 if (!qla2x00_hba_err_chk_enabled(sp
))
811 /* enable ALL bytes of the ref tag */
812 pkt
->ref_tag_mask
[0] = 0xff;
813 pkt
->ref_tag_mask
[1] = 0xff;
814 pkt
->ref_tag_mask
[2] = 0xff;
815 pkt
->ref_tag_mask
[3] = 0xff;
818 /* For Type 3 protection: 16 bit GUARD only */
819 case SCSI_PROT_DIF_TYPE3
:
820 pkt
->ref_tag_mask
[0] = pkt
->ref_tag_mask
[1] =
821 pkt
->ref_tag_mask
[2] = pkt
->ref_tag_mask
[3] =
826 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
829 case SCSI_PROT_DIF_TYPE1
:
830 pkt
->ref_tag
= cpu_to_le32((uint32_t)
831 (0xffffffff & scsi_get_lba(cmd
)));
832 pkt
->app_tag
= cpu_to_le16(0);
833 pkt
->app_tag_mask
[0] = 0x0;
834 pkt
->app_tag_mask
[1] = 0x0;
836 if (!qla2x00_hba_err_chk_enabled(sp
))
839 /* enable ALL bytes of the ref tag */
840 pkt
->ref_tag_mask
[0] = 0xff;
841 pkt
->ref_tag_mask
[1] = 0xff;
842 pkt
->ref_tag_mask
[2] = 0xff;
843 pkt
->ref_tag_mask
[3] = 0xff;
849 qla24xx_get_one_block_sg(uint32_t blk_sz
, struct qla2_sgx
*sgx
,
852 struct scatterlist
*sg
;
853 uint32_t cumulative_partial
, sg_len
;
854 dma_addr_t sg_dma_addr
;
856 if (sgx
->num_bytes
== sgx
->tot_bytes
)
860 cumulative_partial
= sgx
->tot_partial
;
862 sg_dma_addr
= sg_dma_address(sg
);
863 sg_len
= sg_dma_len(sg
);
865 sgx
->dma_addr
= sg_dma_addr
+ sgx
->bytes_consumed
;
867 if ((cumulative_partial
+ (sg_len
- sgx
->bytes_consumed
)) >= blk_sz
) {
868 sgx
->dma_len
= (blk_sz
- cumulative_partial
);
869 sgx
->tot_partial
= 0;
870 sgx
->num_bytes
+= blk_sz
;
873 sgx
->dma_len
= sg_len
- sgx
->bytes_consumed
;
874 sgx
->tot_partial
+= sgx
->dma_len
;
878 sgx
->bytes_consumed
+= sgx
->dma_len
;
880 if (sg_len
== sgx
->bytes_consumed
) {
884 sgx
->bytes_consumed
= 0;
891 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data
*ha
, srb_t
*sp
,
892 uint32_t *dsd
, uint16_t tot_dsds
, struct qla_tgt_cmd
*tc
)
895 uint8_t avail_dsds
= 0;
896 uint32_t dsd_list_len
;
897 struct dsd_dma
*dsd_ptr
;
898 struct scatterlist
*sg_prot
;
899 uint32_t *cur_dsd
= dsd
;
900 uint16_t used_dsds
= tot_dsds
;
902 uint32_t prot_int
; /* protection interval */
906 uint32_t sle_dma_len
, tot_prot_dma_len
= 0;
907 struct scsi_cmnd
*cmd
;
909 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
911 cmd
= GET_CMD_SP(sp
);
912 prot_int
= cmd
->device
->sector_size
;
914 sgx
.tot_bytes
= scsi_bufflen(cmd
);
915 sgx
.cur_sg
= scsi_sglist(cmd
);
918 sg_prot
= scsi_prot_sglist(cmd
);
920 prot_int
= tc
->blk_sz
;
921 sgx
.tot_bytes
= tc
->bufflen
;
923 sg_prot
= tc
->prot_sg
;
929 while (qla24xx_get_one_block_sg(prot_int
, &sgx
, &partial
)) {
931 sle_dma
= sgx
.dma_addr
;
932 sle_dma_len
= sgx
.dma_len
;
934 /* Allocate additional continuation packets? */
935 if (avail_dsds
== 0) {
936 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
937 QLA_DSDS_PER_IOCB
: used_dsds
;
938 dsd_list_len
= (avail_dsds
+ 1) * 12;
939 used_dsds
-= avail_dsds
;
941 /* allocate tracking DS */
942 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
946 /* allocate new list */
947 dsd_ptr
->dsd_addr
= next_dsd
=
948 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
949 &dsd_ptr
->dsd_list_dma
);
953 * Need to cleanup only this dsd_ptr, rest
954 * will be done by sp_free_dma()
961 list_add_tail(&dsd_ptr
->list
,
962 &((struct crc_context
*)
963 sp
->u
.scmd
.ctx
)->dsd_list
);
965 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
967 list_add_tail(&dsd_ptr
->list
,
968 &(tc
->ctx
->dsd_list
));
969 tc
->ctx_dsd_alloced
= 1;
973 /* add new list to cmd iocb or last list */
974 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
975 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
976 *cur_dsd
++ = dsd_list_len
;
977 cur_dsd
= (uint32_t *)next_dsd
;
979 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
980 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
981 *cur_dsd
++ = cpu_to_le32(sle_dma_len
);
985 /* Got a full protection interval */
986 sle_dma
= sg_dma_address(sg_prot
) + tot_prot_dma_len
;
989 tot_prot_dma_len
+= sle_dma_len
;
990 if (tot_prot_dma_len
== sg_dma_len(sg_prot
)) {
991 tot_prot_dma_len
= 0;
992 sg_prot
= sg_next(sg_prot
);
995 partial
= 1; /* So as to not re-enter this block */
999 /* Null termination */
1007 qla24xx_walk_and_build_sglist(struct qla_hw_data
*ha
, srb_t
*sp
, uint32_t *dsd
,
1008 uint16_t tot_dsds
, struct qla_tgt_cmd
*tc
)
1011 uint8_t avail_dsds
= 0;
1012 uint32_t dsd_list_len
;
1013 struct dsd_dma
*dsd_ptr
;
1014 struct scatterlist
*sg
, *sgl
;
1015 uint32_t *cur_dsd
= dsd
;
1017 uint16_t used_dsds
= tot_dsds
;
1018 struct scsi_cmnd
*cmd
;
1021 cmd
= GET_CMD_SP(sp
);
1022 sgl
= scsi_sglist(cmd
);
1031 for_each_sg(sgl
, sg
, tot_dsds
, i
) {
1034 /* Allocate additional continuation packets? */
1035 if (avail_dsds
== 0) {
1036 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
1037 QLA_DSDS_PER_IOCB
: used_dsds
;
1038 dsd_list_len
= (avail_dsds
+ 1) * 12;
1039 used_dsds
-= avail_dsds
;
1041 /* allocate tracking DS */
1042 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
1046 /* allocate new list */
1047 dsd_ptr
->dsd_addr
= next_dsd
=
1048 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
1049 &dsd_ptr
->dsd_list_dma
);
1053 * Need to cleanup only this dsd_ptr, rest
1054 * will be done by sp_free_dma()
1061 list_add_tail(&dsd_ptr
->list
,
1062 &((struct crc_context
*)
1063 sp
->u
.scmd
.ctx
)->dsd_list
);
1065 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
1067 list_add_tail(&dsd_ptr
->list
,
1068 &(tc
->ctx
->dsd_list
));
1069 tc
->ctx_dsd_alloced
= 1;
1072 /* add new list to cmd iocb or last list */
1073 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
1074 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
1075 *cur_dsd
++ = dsd_list_len
;
1076 cur_dsd
= (uint32_t *)next_dsd
;
1078 sle_dma
= sg_dma_address(sg
);
1080 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1081 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1082 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
1086 /* Null termination */
1094 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data
*ha
, srb_t
*sp
,
1095 uint32_t *dsd
, uint16_t tot_dsds
, struct qla_tgt_cmd
*tc
)
1098 uint8_t avail_dsds
= 0;
1099 uint32_t dsd_list_len
;
1100 struct dsd_dma
*dsd_ptr
;
1101 struct scatterlist
*sg
, *sgl
;
1103 struct scsi_cmnd
*cmd
;
1104 uint32_t *cur_dsd
= dsd
;
1105 uint16_t used_dsds
= tot_dsds
;
1106 struct scsi_qla_host
*vha
;
1109 cmd
= GET_CMD_SP(sp
);
1110 sgl
= scsi_prot_sglist(cmd
);
1111 vha
= sp
->fcport
->vha
;
1120 ql_dbg(ql_dbg_tgt
, vha
, 0xe021,
1121 "%s: enter\n", __func__
);
1123 for_each_sg(sgl
, sg
, tot_dsds
, i
) {
1126 /* Allocate additional continuation packets? */
1127 if (avail_dsds
== 0) {
1128 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
1129 QLA_DSDS_PER_IOCB
: used_dsds
;
1130 dsd_list_len
= (avail_dsds
+ 1) * 12;
1131 used_dsds
-= avail_dsds
;
1133 /* allocate tracking DS */
1134 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
1138 /* allocate new list */
1139 dsd_ptr
->dsd_addr
= next_dsd
=
1140 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
1141 &dsd_ptr
->dsd_list_dma
);
1145 * Need to cleanup only this dsd_ptr, rest
1146 * will be done by sp_free_dma()
1153 list_add_tail(&dsd_ptr
->list
,
1154 &((struct crc_context
*)
1155 sp
->u
.scmd
.ctx
)->dsd_list
);
1157 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
1159 list_add_tail(&dsd_ptr
->list
,
1160 &(tc
->ctx
->dsd_list
));
1161 tc
->ctx_dsd_alloced
= 1;
1164 /* add new list to cmd iocb or last list */
1165 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
1166 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
1167 *cur_dsd
++ = dsd_list_len
;
1168 cur_dsd
= (uint32_t *)next_dsd
;
1170 sle_dma
= sg_dma_address(sg
);
1172 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1173 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1174 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
1178 /* Null termination */
1186 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1187 * Type 6 IOCB types.
1189 * @sp: SRB command to process
1190 * @cmd_pkt: Command type 3 IOCB
1191 * @tot_dsds: Total number of segments to transfer
1194 qla24xx_build_scsi_crc_2_iocbs(srb_t
*sp
, struct cmd_type_crc_2
*cmd_pkt
,
1195 uint16_t tot_dsds
, uint16_t tot_prot_dsds
, uint16_t fw_prot_opts
)
1197 uint32_t *cur_dsd
, *fcp_dl
;
1198 scsi_qla_host_t
*vha
;
1199 struct scsi_cmnd
*cmd
;
1200 uint32_t total_bytes
= 0;
1201 uint32_t data_bytes
;
1203 uint8_t bundling
= 1;
1206 struct crc_context
*crc_ctx_pkt
= NULL
;
1207 struct qla_hw_data
*ha
;
1208 uint8_t additional_fcpcdb_len
;
1209 uint16_t fcp_cmnd_len
;
1210 struct fcp_cmnd
*fcp_cmnd
;
1211 dma_addr_t crc_ctx_dma
;
1213 cmd
= GET_CMD_SP(sp
);
1215 /* Update entry type to indicate Command Type CRC_2 IOCB */
1216 *((uint32_t *)(&cmd_pkt
->entry_type
)) = cpu_to_le32(COMMAND_TYPE_CRC_2
);
1218 vha
= sp
->fcport
->vha
;
1221 /* No data transfer */
1222 data_bytes
= scsi_bufflen(cmd
);
1223 if (!data_bytes
|| cmd
->sc_data_direction
== DMA_NONE
) {
1224 cmd_pkt
->byte_count
= cpu_to_le32(0);
1228 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
1230 /* Set transfer direction */
1231 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
1232 cmd_pkt
->control_flags
=
1233 cpu_to_le16(CF_WRITE_DATA
);
1234 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
1235 cmd_pkt
->control_flags
=
1236 cpu_to_le16(CF_READ_DATA
);
1239 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1240 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
) ||
1241 (scsi_get_prot_op(cmd
) == SCSI_PROT_READ_STRIP
) ||
1242 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_INSERT
))
1245 /* Allocate CRC context from global pool */
1246 crc_ctx_pkt
= sp
->u
.scmd
.ctx
=
1247 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
, &crc_ctx_dma
);
1250 goto crc_queuing_error
;
1252 /* Zero out CTX area. */
1253 clr_ptr
= (uint8_t *)crc_ctx_pkt
;
1254 memset(clr_ptr
, 0, sizeof(*crc_ctx_pkt
));
1256 crc_ctx_pkt
->crc_ctx_dma
= crc_ctx_dma
;
1258 sp
->flags
|= SRB_CRC_CTX_DMA_VALID
;
1261 crc_ctx_pkt
->handle
= cmd_pkt
->handle
;
1263 INIT_LIST_HEAD(&crc_ctx_pkt
->dsd_list
);
1265 qla24xx_set_t10dif_tags(sp
, (struct fw_dif_context
*)
1266 &crc_ctx_pkt
->ref_tag
, tot_prot_dsds
);
1268 cmd_pkt
->crc_context_address
[0] = cpu_to_le32(LSD(crc_ctx_dma
));
1269 cmd_pkt
->crc_context_address
[1] = cpu_to_le32(MSD(crc_ctx_dma
));
1270 cmd_pkt
->crc_context_len
= CRC_CONTEXT_LEN_FW
;
1272 /* Determine SCSI command length -- align to 4 byte boundary */
1273 if (cmd
->cmd_len
> 16) {
1274 additional_fcpcdb_len
= cmd
->cmd_len
- 16;
1275 if ((cmd
->cmd_len
% 4) != 0) {
1276 /* SCSI cmd > 16 bytes must be multiple of 4 */
1277 goto crc_queuing_error
;
1279 fcp_cmnd_len
= 12 + cmd
->cmd_len
+ 4;
1281 additional_fcpcdb_len
= 0;
1282 fcp_cmnd_len
= 12 + 16 + 4;
1285 fcp_cmnd
= &crc_ctx_pkt
->fcp_cmnd
;
1287 fcp_cmnd
->additional_cdb_len
= additional_fcpcdb_len
;
1288 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
1289 fcp_cmnd
->additional_cdb_len
|= 1;
1290 else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
1291 fcp_cmnd
->additional_cdb_len
|= 2;
1293 int_to_scsilun(cmd
->device
->lun
, &fcp_cmnd
->lun
);
1294 memcpy(fcp_cmnd
->cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1295 cmd_pkt
->fcp_cmnd_dseg_len
= cpu_to_le16(fcp_cmnd_len
);
1296 cmd_pkt
->fcp_cmnd_dseg_address
[0] = cpu_to_le32(
1297 LSD(crc_ctx_dma
+ CRC_CONTEXT_FCPCMND_OFF
));
1298 cmd_pkt
->fcp_cmnd_dseg_address
[1] = cpu_to_le32(
1299 MSD(crc_ctx_dma
+ CRC_CONTEXT_FCPCMND_OFF
));
1300 fcp_cmnd
->task_management
= 0;
1301 fcp_cmnd
->task_attribute
= TSK_SIMPLE
;
1303 cmd_pkt
->fcp_rsp_dseg_len
= 0; /* Let response come in status iocb */
1305 /* Compute dif len and adjust data len to incude protection */
1307 blk_size
= cmd
->device
->sector_size
;
1308 dif_bytes
= (data_bytes
/ blk_size
) * 8;
1310 switch (scsi_get_prot_op(GET_CMD_SP(sp
))) {
1311 case SCSI_PROT_READ_INSERT
:
1312 case SCSI_PROT_WRITE_STRIP
:
1313 total_bytes
= data_bytes
;
1314 data_bytes
+= dif_bytes
;
1317 case SCSI_PROT_READ_STRIP
:
1318 case SCSI_PROT_WRITE_INSERT
:
1319 case SCSI_PROT_READ_PASS
:
1320 case SCSI_PROT_WRITE_PASS
:
1321 total_bytes
= data_bytes
+ dif_bytes
;
1327 if (!qla2x00_hba_err_chk_enabled(sp
))
1328 fw_prot_opts
|= 0x10; /* Disable Guard tag checking */
1329 /* HBA error checking enabled */
1330 else if (IS_PI_UNINIT_CAPABLE(ha
)) {
1331 if ((scsi_get_prot_type(GET_CMD_SP(sp
)) == SCSI_PROT_DIF_TYPE1
)
1332 || (scsi_get_prot_type(GET_CMD_SP(sp
)) ==
1333 SCSI_PROT_DIF_TYPE2
))
1334 fw_prot_opts
|= BIT_10
;
1335 else if (scsi_get_prot_type(GET_CMD_SP(sp
)) ==
1336 SCSI_PROT_DIF_TYPE3
)
1337 fw_prot_opts
|= BIT_11
;
1341 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.nobundling
.data_address
;
1344 * Configure Bundling if we need to fetch interlaving
1345 * protection PCI accesses
1347 fw_prot_opts
|= PO_ENABLE_DIF_BUNDLING
;
1348 crc_ctx_pkt
->u
.bundling
.dif_byte_count
= cpu_to_le32(dif_bytes
);
1349 crc_ctx_pkt
->u
.bundling
.dseg_count
= cpu_to_le16(tot_dsds
-
1351 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.data_address
;
1354 /* Finish the common fields of CRC pkt */
1355 crc_ctx_pkt
->blk_size
= cpu_to_le16(blk_size
);
1356 crc_ctx_pkt
->prot_opts
= cpu_to_le16(fw_prot_opts
);
1357 crc_ctx_pkt
->byte_count
= cpu_to_le32(data_bytes
);
1358 crc_ctx_pkt
->guard_seed
= cpu_to_le16(0);
1359 /* Fibre channel byte count */
1360 cmd_pkt
->byte_count
= cpu_to_le32(total_bytes
);
1361 fcp_dl
= (uint32_t *)(crc_ctx_pkt
->fcp_cmnd
.cdb
+ 16 +
1362 additional_fcpcdb_len
);
1363 *fcp_dl
= htonl(total_bytes
);
1365 if (!data_bytes
|| cmd
->sc_data_direction
== DMA_NONE
) {
1366 cmd_pkt
->byte_count
= cpu_to_le32(0);
1369 /* Walks data segments */
1371 cmd_pkt
->control_flags
|= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE
);
1373 if (!bundling
&& tot_prot_dsds
) {
1374 if (qla24xx_walk_and_build_sglist_no_difb(ha
, sp
,
1375 cur_dsd
, tot_dsds
, NULL
))
1376 goto crc_queuing_error
;
1377 } else if (qla24xx_walk_and_build_sglist(ha
, sp
, cur_dsd
,
1378 (tot_dsds
- tot_prot_dsds
), NULL
))
1379 goto crc_queuing_error
;
1381 if (bundling
&& tot_prot_dsds
) {
1382 /* Walks dif segments */
1383 cmd_pkt
->control_flags
|= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE
);
1384 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.dif_address
;
1385 if (qla24xx_walk_and_build_prot_sglist(ha
, sp
, cur_dsd
,
1386 tot_prot_dsds
, NULL
))
1387 goto crc_queuing_error
;
1392 /* Cleanup will be performed by the caller */
1394 return QLA_FUNCTION_FAILED
;
1398 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1399 * @sp: command to send to the ISP
1401 * Returns non-zero if a failure occurred, else zero.
1404 qla24xx_start_scsi(srb_t
*sp
)
1407 unsigned long flags
;
1411 struct cmd_type_7
*cmd_pkt
;
1415 struct req_que
*req
= NULL
;
1416 struct rsp_que
*rsp
= NULL
;
1417 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1418 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1419 struct qla_hw_data
*ha
= vha
->hw
;
1421 /* Setup device pointers. */
1425 /* So we know we haven't pci_map'ed anything yet */
1428 /* Send marker if required */
1429 if (vha
->marker_needed
!= 0) {
1430 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1432 return QLA_FUNCTION_FAILED
;
1433 vha
->marker_needed
= 0;
1436 /* Acquire ring specific lock */
1437 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1439 /* Check for room in outstanding command list. */
1440 handle
= req
->current_outstanding_cmd
;
1441 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
1443 if (handle
== req
->num_outstanding_cmds
)
1445 if (!req
->outstanding_cmds
[handle
])
1448 if (index
== req
->num_outstanding_cmds
)
1451 /* Map the sg table so we have an accurate count of sg entries needed */
1452 if (scsi_sg_count(cmd
)) {
1453 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1454 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1455 if (unlikely(!nseg
))
1461 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
1462 if (req
->cnt
< (req_cnt
+ 2)) {
1463 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
1464 RD_REG_DWORD_RELAXED(req
->req_q_out
);
1465 if (req
->ring_index
< cnt
)
1466 req
->cnt
= cnt
- req
->ring_index
;
1468 req
->cnt
= req
->length
-
1469 (req
->ring_index
- cnt
);
1470 if (req
->cnt
< (req_cnt
+ 2))
1474 /* Build command packet. */
1475 req
->current_outstanding_cmd
= handle
;
1476 req
->outstanding_cmds
[handle
] = sp
;
1477 sp
->handle
= handle
;
1478 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1479 req
->cnt
-= req_cnt
;
1481 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
1482 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1484 /* Zero out remaining portion of packet. */
1485 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1486 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1487 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1488 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1490 /* Set NPORT-ID and LUN number*/
1491 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1492 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1493 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1494 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1495 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
1497 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
1498 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1500 cmd_pkt
->task
= TSK_SIMPLE
;
1502 /* Load SCSI command packet. */
1503 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1504 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
1506 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
1508 /* Build IOCB segments */
1509 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
, req
);
1511 /* Set total data segment count. */
1512 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1514 /* Adjust ring index. */
1516 if (req
->ring_index
== req
->length
) {
1517 req
->ring_index
= 0;
1518 req
->ring_ptr
= req
->ring
;
1522 sp
->flags
|= SRB_DMA_VALID
;
1524 /* Set chip new ring index. */
1525 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1526 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
1528 /* Manage unprocessed RIO/ZIO commands in response queue. */
1529 if (vha
->flags
.process_response_queue
&&
1530 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
1531 qla24xx_process_response_queue(vha
, rsp
);
1533 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1538 scsi_dma_unmap(cmd
);
1540 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1542 return QLA_FUNCTION_FAILED
;
1546 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1547 * @sp: command to send to the ISP
1549 * Returns non-zero if a failure occurred, else zero.
1552 qla24xx_dif_start_scsi(srb_t
*sp
)
1555 unsigned long flags
;
1560 uint16_t req_cnt
= 0;
1562 uint16_t tot_prot_dsds
;
1563 uint16_t fw_prot_opts
= 0;
1564 struct req_que
*req
= NULL
;
1565 struct rsp_que
*rsp
= NULL
;
1566 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1567 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1568 struct qla_hw_data
*ha
= vha
->hw
;
1569 struct cmd_type_crc_2
*cmd_pkt
;
1570 uint32_t status
= 0;
1572 #define QDSS_GOT_Q_SPACE BIT_0
1574 /* Only process protection or >16 cdb in this routine */
1575 if (scsi_get_prot_op(cmd
) == SCSI_PROT_NORMAL
) {
1576 if (cmd
->cmd_len
<= 16)
1577 return qla24xx_start_scsi(sp
);
1580 /* Setup device pointers. */
1584 /* So we know we haven't pci_map'ed anything yet */
1587 /* Send marker if required */
1588 if (vha
->marker_needed
!= 0) {
1589 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1591 return QLA_FUNCTION_FAILED
;
1592 vha
->marker_needed
= 0;
1595 /* Acquire ring specific lock */
1596 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1598 /* Check for room in outstanding command list. */
1599 handle
= req
->current_outstanding_cmd
;
1600 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
1602 if (handle
== req
->num_outstanding_cmds
)
1604 if (!req
->outstanding_cmds
[handle
])
1608 if (index
== req
->num_outstanding_cmds
)
1611 /* Compute number of required data segments */
1612 /* Map the sg table so we have an accurate count of sg entries needed */
1613 if (scsi_sg_count(cmd
)) {
1614 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1615 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1616 if (unlikely(!nseg
))
1619 sp
->flags
|= SRB_DMA_VALID
;
1621 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1622 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
1623 struct qla2_sgx sgx
;
1626 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
1627 sgx
.tot_bytes
= scsi_bufflen(cmd
);
1628 sgx
.cur_sg
= scsi_sglist(cmd
);
1632 while (qla24xx_get_one_block_sg(
1633 cmd
->device
->sector_size
, &sgx
, &partial
))
1639 /* number of required data segments */
1642 /* Compute number of required protection segments */
1643 if (qla24xx_configure_prot_mode(sp
, &fw_prot_opts
)) {
1644 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_prot_sglist(cmd
),
1645 scsi_prot_sg_count(cmd
), cmd
->sc_data_direction
);
1646 if (unlikely(!nseg
))
1649 sp
->flags
|= SRB_CRC_PROT_DMA_VALID
;
1651 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1652 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
1653 nseg
= scsi_bufflen(cmd
) / cmd
->device
->sector_size
;
1660 /* Total Data and protection sg segment(s) */
1661 tot_prot_dsds
= nseg
;
1663 if (req
->cnt
< (req_cnt
+ 2)) {
1664 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
1665 RD_REG_DWORD_RELAXED(req
->req_q_out
);
1666 if (req
->ring_index
< cnt
)
1667 req
->cnt
= cnt
- req
->ring_index
;
1669 req
->cnt
= req
->length
-
1670 (req
->ring_index
- cnt
);
1671 if (req
->cnt
< (req_cnt
+ 2))
1675 status
|= QDSS_GOT_Q_SPACE
;
1677 /* Build header part of command packet (excluding the OPCODE). */
1678 req
->current_outstanding_cmd
= handle
;
1679 req
->outstanding_cmds
[handle
] = sp
;
1680 sp
->handle
= handle
;
1681 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1682 req
->cnt
-= req_cnt
;
1684 /* Fill-in common area */
1685 cmd_pkt
= (struct cmd_type_crc_2
*)req
->ring_ptr
;
1686 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1688 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1689 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1691 /* Set NPORT-ID and LUN number*/
1692 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1693 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1694 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1695 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1697 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
1698 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1700 /* Total Data and protection segment(s) */
1701 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1703 /* Build IOCB segments and adjust for data protection segments */
1704 if (qla24xx_build_scsi_crc_2_iocbs(sp
, (struct cmd_type_crc_2
*)
1705 req
->ring_ptr
, tot_dsds
, tot_prot_dsds
, fw_prot_opts
) !=
1709 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1710 /* Specify response queue number where completion should happen */
1711 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
1712 cmd_pkt
->timeout
= cpu_to_le16(0);
1715 /* Adjust ring index. */
1717 if (req
->ring_index
== req
->length
) {
1718 req
->ring_index
= 0;
1719 req
->ring_ptr
= req
->ring
;
1723 /* Set chip new ring index. */
1724 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1725 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
1727 /* Manage unprocessed RIO/ZIO commands in response queue. */
1728 if (vha
->flags
.process_response_queue
&&
1729 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
1730 qla24xx_process_response_queue(vha
, rsp
);
1732 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1737 if (status
& QDSS_GOT_Q_SPACE
) {
1738 req
->outstanding_cmds
[handle
] = NULL
;
1739 req
->cnt
+= req_cnt
;
1741 /* Cleanup will be performed by the caller (queuecommand) */
1743 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1744 return QLA_FUNCTION_FAILED
;
1748 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1749 * @sp: command to send to the ISP
1751 * Returns non-zero if a failure occurred, else zero.
1754 qla2xxx_start_scsi_mq(srb_t
*sp
)
1757 unsigned long flags
;
1761 struct cmd_type_7
*cmd_pkt
;
1765 struct req_que
*req
= NULL
;
1766 struct rsp_que
*rsp
= NULL
;
1767 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1768 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1769 struct qla_hw_data
*ha
= vha
->hw
;
1770 struct qla_qpair
*qpair
= sp
->qpair
;
1772 /* Setup qpair pointers */
1776 /* So we know we haven't pci_map'ed anything yet */
1779 /* Send marker if required */
1780 if (vha
->marker_needed
!= 0) {
1781 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1783 return QLA_FUNCTION_FAILED
;
1784 vha
->marker_needed
= 0;
1787 /* Acquire qpair specific lock */
1788 spin_lock_irqsave(&qpair
->qp_lock
, flags
);
1790 /* Check for room in outstanding command list. */
1791 handle
= req
->current_outstanding_cmd
;
1792 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
1794 if (handle
== req
->num_outstanding_cmds
)
1796 if (!req
->outstanding_cmds
[handle
])
1799 if (index
== req
->num_outstanding_cmds
)
1802 /* Map the sg table so we have an accurate count of sg entries needed */
1803 if (scsi_sg_count(cmd
)) {
1804 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1805 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1806 if (unlikely(!nseg
))
1812 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
1813 if (req
->cnt
< (req_cnt
+ 2)) {
1814 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
1815 RD_REG_DWORD_RELAXED(req
->req_q_out
);
1816 if (req
->ring_index
< cnt
)
1817 req
->cnt
= cnt
- req
->ring_index
;
1819 req
->cnt
= req
->length
-
1820 (req
->ring_index
- cnt
);
1821 if (req
->cnt
< (req_cnt
+ 2))
1825 /* Build command packet. */
1826 req
->current_outstanding_cmd
= handle
;
1827 req
->outstanding_cmds
[handle
] = sp
;
1828 sp
->handle
= handle
;
1829 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1830 req
->cnt
-= req_cnt
;
1832 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
1833 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1835 /* Zero out remaining portion of packet. */
1836 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1837 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1838 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1839 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1841 /* Set NPORT-ID and LUN number*/
1842 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1843 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1844 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1845 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1846 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
1848 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
1849 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1851 cmd_pkt
->task
= TSK_SIMPLE
;
1853 /* Load SCSI command packet. */
1854 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1855 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
1857 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
1859 /* Build IOCB segments */
1860 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
, req
);
1862 /* Set total data segment count. */
1863 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1865 /* Adjust ring index. */
1867 if (req
->ring_index
== req
->length
) {
1868 req
->ring_index
= 0;
1869 req
->ring_ptr
= req
->ring
;
1873 sp
->flags
|= SRB_DMA_VALID
;
1875 /* Set chip new ring index. */
1876 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1878 /* Manage unprocessed RIO/ZIO commands in response queue. */
1879 if (vha
->flags
.process_response_queue
&&
1880 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
1881 qla24xx_process_response_queue(vha
, rsp
);
1883 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
1888 scsi_dma_unmap(cmd
);
1890 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
1892 return QLA_FUNCTION_FAILED
;
1897 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
1898 * @sp: command to send to the ISP
1900 * Returns non-zero if a failure occurred, else zero.
1903 qla2xxx_dif_start_scsi_mq(srb_t
*sp
)
1906 unsigned long flags
;
1911 uint16_t req_cnt
= 0;
1913 uint16_t tot_prot_dsds
;
1914 uint16_t fw_prot_opts
= 0;
1915 struct req_que
*req
= NULL
;
1916 struct rsp_que
*rsp
= NULL
;
1917 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1918 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1919 struct qla_hw_data
*ha
= vha
->hw
;
1920 struct cmd_type_crc_2
*cmd_pkt
;
1921 uint32_t status
= 0;
1922 struct qla_qpair
*qpair
= sp
->qpair
;
1924 #define QDSS_GOT_Q_SPACE BIT_0
1926 /* Check for host side state */
1927 if (!qpair
->online
) {
1928 cmd
->result
= DID_NO_CONNECT
<< 16;
1929 return QLA_INTERFACE_ERROR
;
1932 if (!qpair
->difdix_supported
&&
1933 scsi_get_prot_op(cmd
) != SCSI_PROT_NORMAL
) {
1934 cmd
->result
= DID_NO_CONNECT
<< 16;
1935 return QLA_INTERFACE_ERROR
;
1938 /* Only process protection or >16 cdb in this routine */
1939 if (scsi_get_prot_op(cmd
) == SCSI_PROT_NORMAL
) {
1940 if (cmd
->cmd_len
<= 16)
1941 return qla2xxx_start_scsi_mq(sp
);
1944 /* Setup qpair pointers */
1948 /* So we know we haven't pci_map'ed anything yet */
1951 /* Send marker if required */
1952 if (vha
->marker_needed
!= 0) {
1953 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1955 return QLA_FUNCTION_FAILED
;
1956 vha
->marker_needed
= 0;
1959 /* Acquire ring specific lock */
1960 spin_lock_irqsave(&qpair
->qp_lock
, flags
);
1962 /* Check for room in outstanding command list. */
1963 handle
= req
->current_outstanding_cmd
;
1964 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
1966 if (handle
== req
->num_outstanding_cmds
)
1968 if (!req
->outstanding_cmds
[handle
])
1972 if (index
== req
->num_outstanding_cmds
)
1975 /* Compute number of required data segments */
1976 /* Map the sg table so we have an accurate count of sg entries needed */
1977 if (scsi_sg_count(cmd
)) {
1978 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1979 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1980 if (unlikely(!nseg
))
1983 sp
->flags
|= SRB_DMA_VALID
;
1985 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1986 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
1987 struct qla2_sgx sgx
;
1990 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
1991 sgx
.tot_bytes
= scsi_bufflen(cmd
);
1992 sgx
.cur_sg
= scsi_sglist(cmd
);
1996 while (qla24xx_get_one_block_sg(
1997 cmd
->device
->sector_size
, &sgx
, &partial
))
2003 /* number of required data segments */
2006 /* Compute number of required protection segments */
2007 if (qla24xx_configure_prot_mode(sp
, &fw_prot_opts
)) {
2008 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_prot_sglist(cmd
),
2009 scsi_prot_sg_count(cmd
), cmd
->sc_data_direction
);
2010 if (unlikely(!nseg
))
2013 sp
->flags
|= SRB_CRC_PROT_DMA_VALID
;
2015 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
2016 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
2017 nseg
= scsi_bufflen(cmd
) / cmd
->device
->sector_size
;
2024 /* Total Data and protection sg segment(s) */
2025 tot_prot_dsds
= nseg
;
2027 if (req
->cnt
< (req_cnt
+ 2)) {
2028 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
2029 RD_REG_DWORD_RELAXED(req
->req_q_out
);
2030 if (req
->ring_index
< cnt
)
2031 req
->cnt
= cnt
- req
->ring_index
;
2033 req
->cnt
= req
->length
-
2034 (req
->ring_index
- cnt
);
2035 if (req
->cnt
< (req_cnt
+ 2))
2039 status
|= QDSS_GOT_Q_SPACE
;
2041 /* Build header part of command packet (excluding the OPCODE). */
2042 req
->current_outstanding_cmd
= handle
;
2043 req
->outstanding_cmds
[handle
] = sp
;
2044 sp
->handle
= handle
;
2045 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
2046 req
->cnt
-= req_cnt
;
2048 /* Fill-in common area */
2049 cmd_pkt
= (struct cmd_type_crc_2
*)req
->ring_ptr
;
2050 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
2052 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
2053 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
2055 /* Set NPORT-ID and LUN number*/
2056 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2057 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2058 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2059 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2061 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
2062 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
2064 /* Total Data and protection segment(s) */
2065 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
2067 /* Build IOCB segments and adjust for data protection segments */
2068 if (qla24xx_build_scsi_crc_2_iocbs(sp
, (struct cmd_type_crc_2
*)
2069 req
->ring_ptr
, tot_dsds
, tot_prot_dsds
, fw_prot_opts
) !=
2073 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
2074 cmd_pkt
->timeout
= cpu_to_le16(0);
2077 /* Adjust ring index. */
2079 if (req
->ring_index
== req
->length
) {
2080 req
->ring_index
= 0;
2081 req
->ring_ptr
= req
->ring
;
2085 /* Set chip new ring index. */
2086 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
2088 /* Manage unprocessed RIO/ZIO commands in response queue. */
2089 if (vha
->flags
.process_response_queue
&&
2090 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
2091 qla24xx_process_response_queue(vha
, rsp
);
2093 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
2098 if (status
& QDSS_GOT_Q_SPACE
) {
2099 req
->outstanding_cmds
[handle
] = NULL
;
2100 req
->cnt
+= req_cnt
;
2102 /* Cleanup will be performed by the caller (queuecommand) */
2104 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
2105 return QLA_FUNCTION_FAILED
;
2108 /* Generic Control-SRB manipulation functions. */
2110 /* hardware_lock assumed to be held. */
2112 qla2x00_alloc_iocbs_ready(scsi_qla_host_t
*vha
, srb_t
*sp
)
2114 if (qla2x00_reset_active(vha
))
2117 return qla2x00_alloc_iocbs(vha
, sp
);
2121 qla2x00_alloc_iocbs(scsi_qla_host_t
*vha
, srb_t
*sp
)
2123 struct qla_hw_data
*ha
= vha
->hw
;
2124 struct req_que
*req
= ha
->req_q_map
[0];
2125 device_reg_t
*reg
= ISP_QUE_REG(ha
, req
->id
);
2126 uint32_t index
, handle
;
2128 uint16_t cnt
, req_cnt
;
2135 goto skip_cmd_array
;
2137 /* Check for room in outstanding command list. */
2138 handle
= req
->current_outstanding_cmd
;
2139 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
2141 if (handle
== req
->num_outstanding_cmds
)
2143 if (!req
->outstanding_cmds
[handle
])
2146 if (index
== req
->num_outstanding_cmds
) {
2147 ql_log(ql_log_warn
, vha
, 0x700b,
2148 "No room on outstanding cmd array.\n");
2152 /* Prep command array. */
2153 req
->current_outstanding_cmd
= handle
;
2154 req
->outstanding_cmds
[handle
] = sp
;
2155 sp
->handle
= handle
;
2157 /* Adjust entry-counts as needed. */
2158 if (sp
->type
!= SRB_SCSI_CMD
)
2159 req_cnt
= sp
->iocbs
;
2162 /* Check for room on request queue. */
2163 if (req
->cnt
< req_cnt
+ 2) {
2164 if (ha
->mqenable
|| IS_QLA83XX(ha
) || IS_QLA27XX(ha
))
2165 cnt
= RD_REG_DWORD(®
->isp25mq
.req_q_out
);
2166 else if (IS_P3P_TYPE(ha
))
2167 cnt
= RD_REG_DWORD(®
->isp82
.req_q_out
);
2168 else if (IS_FWI2_CAPABLE(ha
))
2169 cnt
= RD_REG_DWORD(®
->isp24
.req_q_out
);
2170 else if (IS_QLAFX00(ha
))
2171 cnt
= RD_REG_DWORD(®
->ispfx00
.req_q_out
);
2173 cnt
= qla2x00_debounce_register(
2174 ISP_REQ_Q_OUT(ha
, ®
->isp
));
2176 if (req
->ring_index
< cnt
)
2177 req
->cnt
= cnt
- req
->ring_index
;
2179 req
->cnt
= req
->length
-
2180 (req
->ring_index
- cnt
);
2182 if (req
->cnt
< req_cnt
+ 2)
2186 req
->cnt
-= req_cnt
;
2187 pkt
= req
->ring_ptr
;
2188 memset(pkt
, 0, REQUEST_ENTRY_SIZE
);
2189 if (IS_QLAFX00(ha
)) {
2190 WRT_REG_BYTE((void __iomem
*)&pkt
->entry_count
, req_cnt
);
2191 WRT_REG_WORD((void __iomem
*)&pkt
->handle
, handle
);
2193 pkt
->entry_count
= req_cnt
;
2194 pkt
->handle
= handle
;
2198 vha
->tgt_counters
.num_alloc_iocb_failed
++;
2203 qla24xx_login_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
2205 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2207 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
2208 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_PLOGI
);
2209 if (lio
->u
.logio
.flags
& SRB_LOGIN_COND_PLOGI
)
2210 logio
->control_flags
|= cpu_to_le16(LCF_COND_PLOGI
);
2211 if (lio
->u
.logio
.flags
& SRB_LOGIN_SKIP_PRLI
)
2212 logio
->control_flags
|= cpu_to_le16(LCF_SKIP_PRLI
);
2213 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2214 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2215 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2216 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2217 logio
->vp_index
= sp
->fcport
->vha
->vp_idx
;
2221 qla2x00_login_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
2223 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
2224 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2227 mbx
->entry_type
= MBX_IOCB_TYPE
;
2228 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
2229 mbx
->mb0
= cpu_to_le16(MBC_LOGIN_FABRIC_PORT
);
2230 opts
= lio
->u
.logio
.flags
& SRB_LOGIN_COND_PLOGI
? BIT_0
: 0;
2231 opts
|= lio
->u
.logio
.flags
& SRB_LOGIN_SKIP_PRLI
? BIT_1
: 0;
2232 if (HAS_EXTENDED_IDS(ha
)) {
2233 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
2234 mbx
->mb10
= cpu_to_le16(opts
);
2236 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | opts
);
2238 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
2239 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
2240 sp
->fcport
->d_id
.b
.al_pa
);
2241 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vha
->vp_idx
);
2245 qla24xx_logout_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
2247 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
2248 logio
->control_flags
=
2249 cpu_to_le16(LCF_COMMAND_LOGO
|LCF_IMPL_LOGO
);
2250 if (!sp
->fcport
->tgt_session
||
2251 !sp
->fcport
->tgt_session
->keep_nport_handle
)
2252 logio
->control_flags
|= cpu_to_le16(LCF_FREE_NPORT
);
2253 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2254 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2255 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2256 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2257 logio
->vp_index
= sp
->fcport
->vha
->vp_idx
;
2261 qla2x00_logout_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
2263 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
2265 mbx
->entry_type
= MBX_IOCB_TYPE
;
2266 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
2267 mbx
->mb0
= cpu_to_le16(MBC_LOGOUT_FABRIC_PORT
);
2268 mbx
->mb1
= HAS_EXTENDED_IDS(ha
) ?
2269 cpu_to_le16(sp
->fcport
->loop_id
):
2270 cpu_to_le16(sp
->fcport
->loop_id
<< 8);
2271 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
2272 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
2273 sp
->fcport
->d_id
.b
.al_pa
);
2274 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vha
->vp_idx
);
2275 /* Implicit: mbx->mbx10 = 0. */
2279 qla24xx_adisc_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
2281 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
2282 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_ADISC
);
2283 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2284 logio
->vp_index
= sp
->fcport
->vha
->vp_idx
;
2288 qla2x00_adisc_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
2290 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
2292 mbx
->entry_type
= MBX_IOCB_TYPE
;
2293 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
2294 mbx
->mb0
= cpu_to_le16(MBC_GET_PORT_DATABASE
);
2295 if (HAS_EXTENDED_IDS(ha
)) {
2296 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
2297 mbx
->mb10
= cpu_to_le16(BIT_0
);
2299 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | BIT_0
);
2301 mbx
->mb2
= cpu_to_le16(MSW(ha
->async_pd_dma
));
2302 mbx
->mb3
= cpu_to_le16(LSW(ha
->async_pd_dma
));
2303 mbx
->mb6
= cpu_to_le16(MSW(MSD(ha
->async_pd_dma
)));
2304 mbx
->mb7
= cpu_to_le16(LSW(MSD(ha
->async_pd_dma
)));
2305 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vha
->vp_idx
);
2309 qla24xx_tm_iocb(srb_t
*sp
, struct tsk_mgmt_entry
*tsk
)
2313 struct fc_port
*fcport
= sp
->fcport
;
2314 scsi_qla_host_t
*vha
= fcport
->vha
;
2315 struct qla_hw_data
*ha
= vha
->hw
;
2316 struct srb_iocb
*iocb
= &sp
->u
.iocb_cmd
;
2317 struct req_que
*req
= vha
->req
;
2319 flags
= iocb
->u
.tmf
.flags
;
2320 lun
= iocb
->u
.tmf
.lun
;
2322 tsk
->entry_type
= TSK_MGMT_IOCB_TYPE
;
2323 tsk
->entry_count
= 1;
2324 tsk
->handle
= MAKE_HANDLE(req
->id
, tsk
->handle
);
2325 tsk
->nport_handle
= cpu_to_le16(fcport
->loop_id
);
2326 tsk
->timeout
= cpu_to_le16(ha
->r_a_tov
/ 10 * 2);
2327 tsk
->control_flags
= cpu_to_le32(flags
);
2328 tsk
->port_id
[0] = fcport
->d_id
.b
.al_pa
;
2329 tsk
->port_id
[1] = fcport
->d_id
.b
.area
;
2330 tsk
->port_id
[2] = fcport
->d_id
.b
.domain
;
2331 tsk
->vp_index
= fcport
->vha
->vp_idx
;
2333 if (flags
== TCF_LUN_RESET
) {
2334 int_to_scsilun(lun
, &tsk
->lun
);
2335 host_to_fcp_swap((uint8_t *)&tsk
->lun
,
2341 qla2x00_els_dcmd_sp_free(void *ptr
, void *data
)
2343 struct scsi_qla_host
*vha
= (scsi_qla_host_t
*)ptr
;
2344 struct qla_hw_data
*ha
= vha
->hw
;
2345 srb_t
*sp
= (srb_t
*)data
;
2346 struct srb_iocb
*elsio
= &sp
->u
.iocb_cmd
;
2350 if (elsio
->u
.els_logo
.els_logo_pyld
)
2351 dma_free_coherent(&ha
->pdev
->dev
, DMA_POOL_SIZE
,
2352 elsio
->u
.els_logo
.els_logo_pyld
,
2353 elsio
->u
.els_logo
.els_logo_pyld_dma
);
2355 del_timer(&elsio
->timer
);
2356 qla2x00_rel_sp(vha
, sp
);
2360 qla2x00_els_dcmd_iocb_timeout(void *data
)
2362 srb_t
*sp
= (srb_t
*)data
;
2363 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2364 fc_port_t
*fcport
= sp
->fcport
;
2365 struct scsi_qla_host
*vha
= fcport
->vha
;
2366 struct qla_hw_data
*ha
= vha
->hw
;
2367 unsigned long flags
= 0;
2369 ql_dbg(ql_dbg_io
, vha
, 0x3069,
2370 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2371 sp
->name
, sp
->handle
, fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
,
2372 fcport
->d_id
.b
.al_pa
);
2374 /* Abort the exchange */
2375 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2376 if (ha
->isp_ops
->abort_command(sp
)) {
2377 ql_dbg(ql_dbg_io
, vha
, 0x3070,
2378 "mbx abort_command failed.\n");
2380 ql_dbg(ql_dbg_io
, vha
, 0x3071,
2381 "mbx abort_command success.\n");
2383 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2385 complete(&lio
->u
.els_logo
.comp
);
2389 qla2x00_els_dcmd_sp_done(void *data
, void *ptr
, int res
)
2391 srb_t
*sp
= (srb_t
*)ptr
;
2392 fc_port_t
*fcport
= sp
->fcport
;
2393 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2394 struct scsi_qla_host
*vha
= fcport
->vha
;
2396 ql_dbg(ql_dbg_io
, vha
, 0x3072,
2397 "%s hdl=%x, portid=%02x%02x%02x done\n",
2398 sp
->name
, sp
->handle
, fcport
->d_id
.b
.domain
,
2399 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
2401 complete(&lio
->u
.els_logo
.comp
);
2405 qla24xx_els_dcmd_iocb(scsi_qla_host_t
*vha
, int els_opcode
,
2406 port_id_t remote_did
)
2409 fc_port_t
*fcport
= NULL
;
2410 struct srb_iocb
*elsio
= NULL
;
2411 struct qla_hw_data
*ha
= vha
->hw
;
2412 struct els_logo_payload logo_pyld
;
2413 int rval
= QLA_SUCCESS
;
2415 fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
2417 ql_log(ql_log_info
, vha
, 0x70e5, "fcport allocation failed\n");
2421 /* Alloc SRB structure */
2422 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
2425 ql_log(ql_log_info
, vha
, 0x70e6,
2426 "SRB allocation failed\n");
2430 elsio
= &sp
->u
.iocb_cmd
;
2431 fcport
->loop_id
= 0xFFFF;
2432 fcport
->d_id
.b
.domain
= remote_did
.b
.domain
;
2433 fcport
->d_id
.b
.area
= remote_did
.b
.area
;
2434 fcport
->d_id
.b
.al_pa
= remote_did
.b
.al_pa
;
2436 ql_dbg(ql_dbg_io
, vha
, 0x3073, "portid=%02x%02x%02x done\n",
2437 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
2439 sp
->type
= SRB_ELS_DCMD
;
2440 sp
->name
= "ELS_DCMD";
2441 sp
->fcport
= fcport
;
2442 qla2x00_init_timer(sp
, ELS_DCMD_TIMEOUT
);
2443 elsio
->timeout
= qla2x00_els_dcmd_iocb_timeout
;
2444 sp
->done
= qla2x00_els_dcmd_sp_done
;
2445 sp
->free
= qla2x00_els_dcmd_sp_free
;
2447 elsio
->u
.els_logo
.els_logo_pyld
= dma_alloc_coherent(&ha
->pdev
->dev
,
2448 DMA_POOL_SIZE
, &elsio
->u
.els_logo
.els_logo_pyld_dma
,
2451 if (!elsio
->u
.els_logo
.els_logo_pyld
) {
2453 return QLA_FUNCTION_FAILED
;
2456 memset(&logo_pyld
, 0, sizeof(struct els_logo_payload
));
2458 elsio
->u
.els_logo
.els_cmd
= els_opcode
;
2459 logo_pyld
.opcode
= els_opcode
;
2460 logo_pyld
.s_id
[0] = vha
->d_id
.b
.al_pa
;
2461 logo_pyld
.s_id
[1] = vha
->d_id
.b
.area
;
2462 logo_pyld
.s_id
[2] = vha
->d_id
.b
.domain
;
2463 host_to_fcp_swap(logo_pyld
.s_id
, sizeof(uint32_t));
2464 memcpy(&logo_pyld
.wwpn
, vha
->port_name
, WWN_SIZE
);
2466 memcpy(elsio
->u
.els_logo
.els_logo_pyld
, &logo_pyld
,
2467 sizeof(struct els_logo_payload
));
2469 rval
= qla2x00_start_sp(sp
);
2470 if (rval
!= QLA_SUCCESS
) {
2472 return QLA_FUNCTION_FAILED
;
2475 ql_dbg(ql_dbg_io
, vha
, 0x3074,
2476 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2477 sp
->name
, sp
->handle
, fcport
->loop_id
, fcport
->d_id
.b
.domain
,
2478 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
2480 wait_for_completion(&elsio
->u
.els_logo
.comp
);
2487 qla24xx_els_logo_iocb(srb_t
*sp
, struct els_entry_24xx
*els_iocb
)
2489 scsi_qla_host_t
*vha
= sp
->fcport
->vha
;
2490 struct srb_iocb
*elsio
= &sp
->u
.iocb_cmd
;
2492 els_iocb
->entry_type
= ELS_IOCB_TYPE
;
2493 els_iocb
->entry_count
= 1;
2494 els_iocb
->sys_define
= 0;
2495 els_iocb
->entry_status
= 0;
2496 els_iocb
->handle
= sp
->handle
;
2497 els_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2498 els_iocb
->tx_dsd_count
= 1;
2499 els_iocb
->vp_index
= vha
->vp_idx
;
2500 els_iocb
->sof_type
= EST_SOFI3
;
2501 els_iocb
->rx_dsd_count
= 0;
2502 els_iocb
->opcode
= elsio
->u
.els_logo
.els_cmd
;
2504 els_iocb
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2505 els_iocb
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2506 els_iocb
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2507 els_iocb
->control_flags
= 0;
2509 els_iocb
->tx_byte_count
= sizeof(struct els_logo_payload
);
2510 els_iocb
->tx_address
[0] =
2511 cpu_to_le32(LSD(elsio
->u
.els_logo
.els_logo_pyld_dma
));
2512 els_iocb
->tx_address
[1] =
2513 cpu_to_le32(MSD(elsio
->u
.els_logo
.els_logo_pyld_dma
));
2514 els_iocb
->tx_len
= cpu_to_le32(sizeof(struct els_logo_payload
));
2516 els_iocb
->rx_byte_count
= 0;
2517 els_iocb
->rx_address
[0] = 0;
2518 els_iocb
->rx_address
[1] = 0;
2519 els_iocb
->rx_len
= 0;
2521 sp
->fcport
->vha
->qla_stats
.control_requests
++;
2525 qla24xx_els_iocb(srb_t
*sp
, struct els_entry_24xx
*els_iocb
)
2527 struct bsg_job
*bsg_job
= sp
->u
.bsg_job
;
2528 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
2530 els_iocb
->entry_type
= ELS_IOCB_TYPE
;
2531 els_iocb
->entry_count
= 1;
2532 els_iocb
->sys_define
= 0;
2533 els_iocb
->entry_status
= 0;
2534 els_iocb
->handle
= sp
->handle
;
2535 els_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2536 els_iocb
->tx_dsd_count
= cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
2537 els_iocb
->vp_index
= sp
->fcport
->vha
->vp_idx
;
2538 els_iocb
->sof_type
= EST_SOFI3
;
2539 els_iocb
->rx_dsd_count
= cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
2542 sp
->type
== SRB_ELS_CMD_RPT
?
2543 bsg_request
->rqst_data
.r_els
.els_code
:
2544 bsg_request
->rqst_data
.h_els
.command_code
;
2545 els_iocb
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2546 els_iocb
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2547 els_iocb
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2548 els_iocb
->control_flags
= 0;
2549 els_iocb
->rx_byte_count
=
2550 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
2551 els_iocb
->tx_byte_count
=
2552 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2554 els_iocb
->tx_address
[0] = cpu_to_le32(LSD(sg_dma_address
2555 (bsg_job
->request_payload
.sg_list
)));
2556 els_iocb
->tx_address
[1] = cpu_to_le32(MSD(sg_dma_address
2557 (bsg_job
->request_payload
.sg_list
)));
2558 els_iocb
->tx_len
= cpu_to_le32(sg_dma_len
2559 (bsg_job
->request_payload
.sg_list
));
2561 els_iocb
->rx_address
[0] = cpu_to_le32(LSD(sg_dma_address
2562 (bsg_job
->reply_payload
.sg_list
)));
2563 els_iocb
->rx_address
[1] = cpu_to_le32(MSD(sg_dma_address
2564 (bsg_job
->reply_payload
.sg_list
)));
2565 els_iocb
->rx_len
= cpu_to_le32(sg_dma_len
2566 (bsg_job
->reply_payload
.sg_list
));
2568 sp
->fcport
->vha
->qla_stats
.control_requests
++;
2572 qla2x00_ct_iocb(srb_t
*sp
, ms_iocb_entry_t
*ct_iocb
)
2574 uint16_t avail_dsds
;
2576 struct scatterlist
*sg
;
2579 scsi_qla_host_t
*vha
= sp
->fcport
->vha
;
2580 struct qla_hw_data
*ha
= vha
->hw
;
2581 struct bsg_job
*bsg_job
= sp
->u
.bsg_job
;
2582 int loop_iterartion
= 0;
2583 int entry_count
= 1;
2585 memset(ct_iocb
, 0, sizeof(ms_iocb_entry_t
));
2586 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
2587 ct_iocb
->entry_status
= 0;
2588 ct_iocb
->handle1
= sp
->handle
;
2589 SET_TARGET_ID(ha
, ct_iocb
->loop_id
, sp
->fcport
->loop_id
);
2590 ct_iocb
->status
= cpu_to_le16(0);
2591 ct_iocb
->control_flags
= cpu_to_le16(0);
2592 ct_iocb
->timeout
= 0;
2593 ct_iocb
->cmd_dsd_count
=
2594 cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
2595 ct_iocb
->total_dsd_count
=
2596 cpu_to_le16(bsg_job
->request_payload
.sg_cnt
+ 1);
2597 ct_iocb
->req_bytecount
=
2598 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2599 ct_iocb
->rsp_bytecount
=
2600 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
2602 ct_iocb
->dseg_req_address
[0] = cpu_to_le32(LSD(sg_dma_address
2603 (bsg_job
->request_payload
.sg_list
)));
2604 ct_iocb
->dseg_req_address
[1] = cpu_to_le32(MSD(sg_dma_address
2605 (bsg_job
->request_payload
.sg_list
)));
2606 ct_iocb
->dseg_req_length
= ct_iocb
->req_bytecount
;
2608 ct_iocb
->dseg_rsp_address
[0] = cpu_to_le32(LSD(sg_dma_address
2609 (bsg_job
->reply_payload
.sg_list
)));
2610 ct_iocb
->dseg_rsp_address
[1] = cpu_to_le32(MSD(sg_dma_address
2611 (bsg_job
->reply_payload
.sg_list
)));
2612 ct_iocb
->dseg_rsp_length
= ct_iocb
->rsp_bytecount
;
2615 cur_dsd
= (uint32_t *)ct_iocb
->dseg_rsp_address
;
2617 tot_dsds
= bsg_job
->reply_payload
.sg_cnt
;
2619 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, tot_dsds
, index
) {
2621 cont_a64_entry_t
*cont_pkt
;
2623 /* Allocate additional continuation packets? */
2624 if (avail_dsds
== 0) {
2626 * Five DSDs are available in the Cont.
2629 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
,
2630 vha
->hw
->req_q_map
[0]);
2631 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
2636 sle_dma
= sg_dma_address(sg
);
2637 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
2638 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
2639 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
2643 ct_iocb
->entry_count
= entry_count
;
2645 sp
->fcport
->vha
->qla_stats
.control_requests
++;
2649 qla24xx_ct_iocb(srb_t
*sp
, struct ct_entry_24xx
*ct_iocb
)
2651 uint16_t avail_dsds
;
2653 struct scatterlist
*sg
;
2656 scsi_qla_host_t
*vha
= sp
->fcport
->vha
;
2657 struct qla_hw_data
*ha
= vha
->hw
;
2658 struct bsg_job
*bsg_job
= sp
->u
.bsg_job
;
2659 int loop_iterartion
= 0;
2660 int entry_count
= 1;
2662 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
2663 ct_iocb
->entry_status
= 0;
2664 ct_iocb
->sys_define
= 0;
2665 ct_iocb
->handle
= sp
->handle
;
2667 ct_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2668 ct_iocb
->vp_index
= sp
->fcport
->vha
->vp_idx
;
2669 ct_iocb
->comp_status
= cpu_to_le16(0);
2671 ct_iocb
->cmd_dsd_count
=
2672 cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
2673 ct_iocb
->timeout
= 0;
2674 ct_iocb
->rsp_dsd_count
=
2675 cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
2676 ct_iocb
->rsp_byte_count
=
2677 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
2678 ct_iocb
->cmd_byte_count
=
2679 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2680 ct_iocb
->dseg_0_address
[0] = cpu_to_le32(LSD(sg_dma_address
2681 (bsg_job
->request_payload
.sg_list
)));
2682 ct_iocb
->dseg_0_address
[1] = cpu_to_le32(MSD(sg_dma_address
2683 (bsg_job
->request_payload
.sg_list
)));
2684 ct_iocb
->dseg_0_len
= cpu_to_le32(sg_dma_len
2685 (bsg_job
->request_payload
.sg_list
));
2688 cur_dsd
= (uint32_t *)ct_iocb
->dseg_1_address
;
2690 tot_dsds
= bsg_job
->reply_payload
.sg_cnt
;
2692 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, tot_dsds
, index
) {
2694 cont_a64_entry_t
*cont_pkt
;
2696 /* Allocate additional continuation packets? */
2697 if (avail_dsds
== 0) {
2699 * Five DSDs are available in the Cont.
2702 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
,
2704 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
2709 sle_dma
= sg_dma_address(sg
);
2710 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
2711 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
2712 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
2716 ct_iocb
->entry_count
= entry_count
;
2720 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2721 * @sp: command to send to the ISP
2723 * Returns non-zero if a failure occurred, else zero.
2726 qla82xx_start_scsi(srb_t
*sp
)
2729 unsigned long flags
;
2730 struct scsi_cmnd
*cmd
;
2737 struct device_reg_82xx __iomem
*reg
;
2740 uint8_t additional_cdb_len
;
2741 struct ct6_dsd
*ctx
;
2742 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
2743 struct qla_hw_data
*ha
= vha
->hw
;
2744 struct req_que
*req
= NULL
;
2745 struct rsp_que
*rsp
= NULL
;
2747 /* Setup device pointers. */
2748 reg
= &ha
->iobase
->isp82
;
2749 cmd
= GET_CMD_SP(sp
);
2751 rsp
= ha
->rsp_q_map
[0];
2753 /* So we know we haven't pci_map'ed anything yet */
2756 dbval
= 0x04 | (ha
->portnum
<< 5);
2758 /* Send marker if required */
2759 if (vha
->marker_needed
!= 0) {
2760 if (qla2x00_marker(vha
, req
,
2761 rsp
, 0, 0, MK_SYNC_ALL
) != QLA_SUCCESS
) {
2762 ql_log(ql_log_warn
, vha
, 0x300c,
2763 "qla2x00_marker failed for cmd=%p.\n", cmd
);
2764 return QLA_FUNCTION_FAILED
;
2766 vha
->marker_needed
= 0;
2769 /* Acquire ring specific lock */
2770 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2772 /* Check for room in outstanding command list. */
2773 handle
= req
->current_outstanding_cmd
;
2774 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
2776 if (handle
== req
->num_outstanding_cmds
)
2778 if (!req
->outstanding_cmds
[handle
])
2781 if (index
== req
->num_outstanding_cmds
)
2784 /* Map the sg table so we have an accurate count of sg entries needed */
2785 if (scsi_sg_count(cmd
)) {
2786 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
2787 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
2788 if (unlikely(!nseg
))
2795 if (tot_dsds
> ql2xshiftctondsd
) {
2796 struct cmd_type_6
*cmd_pkt
;
2797 uint16_t more_dsd_lists
= 0;
2798 struct dsd_dma
*dsd_ptr
;
2801 more_dsd_lists
= qla24xx_calc_dsd_lists(tot_dsds
);
2802 if ((more_dsd_lists
+ ha
->gbl_dsd_inuse
) >= NUM_DSD_CHAIN
) {
2803 ql_dbg(ql_dbg_io
, vha
, 0x300d,
2804 "Num of DSD list %d is than %d for cmd=%p.\n",
2805 more_dsd_lists
+ ha
->gbl_dsd_inuse
, NUM_DSD_CHAIN
,
2810 if (more_dsd_lists
<= ha
->gbl_dsd_avail
)
2811 goto sufficient_dsds
;
2813 more_dsd_lists
-= ha
->gbl_dsd_avail
;
2815 for (i
= 0; i
< more_dsd_lists
; i
++) {
2816 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
2818 ql_log(ql_log_fatal
, vha
, 0x300e,
2819 "Failed to allocate memory for dsd_dma "
2820 "for cmd=%p.\n", cmd
);
2824 dsd_ptr
->dsd_addr
= dma_pool_alloc(ha
->dl_dma_pool
,
2825 GFP_ATOMIC
, &dsd_ptr
->dsd_list_dma
);
2826 if (!dsd_ptr
->dsd_addr
) {
2828 ql_log(ql_log_fatal
, vha
, 0x300f,
2829 "Failed to allocate memory for dsd_addr "
2830 "for cmd=%p.\n", cmd
);
2833 list_add_tail(&dsd_ptr
->list
, &ha
->gbl_dsd_list
);
2834 ha
->gbl_dsd_avail
++;
2840 if (req
->cnt
< (req_cnt
+ 2)) {
2841 cnt
= (uint16_t)RD_REG_DWORD_RELAXED(
2842 ®
->req_q_out
[0]);
2843 if (req
->ring_index
< cnt
)
2844 req
->cnt
= cnt
- req
->ring_index
;
2846 req
->cnt
= req
->length
-
2847 (req
->ring_index
- cnt
);
2848 if (req
->cnt
< (req_cnt
+ 2))
2852 ctx
= sp
->u
.scmd
.ctx
=
2853 mempool_alloc(ha
->ctx_mempool
, GFP_ATOMIC
);
2855 ql_log(ql_log_fatal
, vha
, 0x3010,
2856 "Failed to allocate ctx for cmd=%p.\n", cmd
);
2860 memset(ctx
, 0, sizeof(struct ct6_dsd
));
2861 ctx
->fcp_cmnd
= dma_pool_alloc(ha
->fcp_cmnd_dma_pool
,
2862 GFP_ATOMIC
, &ctx
->fcp_cmnd_dma
);
2863 if (!ctx
->fcp_cmnd
) {
2864 ql_log(ql_log_fatal
, vha
, 0x3011,
2865 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd
);
2869 /* Initialize the DSD list and dma handle */
2870 INIT_LIST_HEAD(&ctx
->dsd_list
);
2871 ctx
->dsd_use_cnt
= 0;
2873 if (cmd
->cmd_len
> 16) {
2874 additional_cdb_len
= cmd
->cmd_len
- 16;
2875 if ((cmd
->cmd_len
% 4) != 0) {
2876 /* SCSI command bigger than 16 bytes must be
2879 ql_log(ql_log_warn
, vha
, 0x3012,
2880 "scsi cmd len %d not multiple of 4 "
2881 "for cmd=%p.\n", cmd
->cmd_len
, cmd
);
2882 goto queuing_error_fcp_cmnd
;
2884 ctx
->fcp_cmnd_len
= 12 + cmd
->cmd_len
+ 4;
2886 additional_cdb_len
= 0;
2887 ctx
->fcp_cmnd_len
= 12 + 16 + 4;
2890 cmd_pkt
= (struct cmd_type_6
*)req
->ring_ptr
;
2891 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
2893 /* Zero out remaining portion of packet. */
2894 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2895 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
2896 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
2897 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
2899 /* Set NPORT-ID and LUN number*/
2900 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2901 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2902 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2903 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2904 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
2906 /* Build IOCB segments */
2907 if (qla24xx_build_scsi_type_6_iocbs(sp
, cmd_pkt
, tot_dsds
))
2908 goto queuing_error_fcp_cmnd
;
2910 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
2911 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
2913 /* build FCP_CMND IU */
2914 memset(ctx
->fcp_cmnd
, 0, sizeof(struct fcp_cmnd
));
2915 int_to_scsilun(cmd
->device
->lun
, &ctx
->fcp_cmnd
->lun
);
2916 ctx
->fcp_cmnd
->additional_cdb_len
= additional_cdb_len
;
2918 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
2919 ctx
->fcp_cmnd
->additional_cdb_len
|= 1;
2920 else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
2921 ctx
->fcp_cmnd
->additional_cdb_len
|= 2;
2923 /* Populate the FCP_PRIO. */
2924 if (ha
->flags
.fcp_prio_enabled
)
2925 ctx
->fcp_cmnd
->task_attribute
|=
2926 sp
->fcport
->fcp_prio
<< 3;
2928 memcpy(ctx
->fcp_cmnd
->cdb
, cmd
->cmnd
, cmd
->cmd_len
);
2930 fcp_dl
= (uint32_t *)(ctx
->fcp_cmnd
->cdb
+ 16 +
2931 additional_cdb_len
);
2932 *fcp_dl
= htonl((uint32_t)scsi_bufflen(cmd
));
2934 cmd_pkt
->fcp_cmnd_dseg_len
= cpu_to_le16(ctx
->fcp_cmnd_len
);
2935 cmd_pkt
->fcp_cmnd_dseg_address
[0] =
2936 cpu_to_le32(LSD(ctx
->fcp_cmnd_dma
));
2937 cmd_pkt
->fcp_cmnd_dseg_address
[1] =
2938 cpu_to_le32(MSD(ctx
->fcp_cmnd_dma
));
2940 sp
->flags
|= SRB_FCP_CMND_DMA_VALID
;
2941 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
2942 /* Set total data segment count. */
2943 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
2944 /* Specify response queue number where
2945 * completion should happen
2947 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
2949 struct cmd_type_7
*cmd_pkt
;
2950 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
2951 if (req
->cnt
< (req_cnt
+ 2)) {
2952 cnt
= (uint16_t)RD_REG_DWORD_RELAXED(
2953 ®
->req_q_out
[0]);
2954 if (req
->ring_index
< cnt
)
2955 req
->cnt
= cnt
- req
->ring_index
;
2957 req
->cnt
= req
->length
-
2958 (req
->ring_index
- cnt
);
2960 if (req
->cnt
< (req_cnt
+ 2))
2963 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
2964 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
2966 /* Zero out remaining portion of packet. */
2967 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2968 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
2969 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
2970 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
2972 /* Set NPORT-ID and LUN number*/
2973 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2974 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2975 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2976 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2977 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
2979 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
2980 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
,
2981 sizeof(cmd_pkt
->lun
));
2983 /* Populate the FCP_PRIO. */
2984 if (ha
->flags
.fcp_prio_enabled
)
2985 cmd_pkt
->task
|= sp
->fcport
->fcp_prio
<< 3;
2987 /* Load SCSI command packet. */
2988 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
2989 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
2991 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
2993 /* Build IOCB segments */
2994 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
, req
);
2996 /* Set total data segment count. */
2997 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
2998 /* Specify response queue number where
2999 * completion should happen.
3001 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
3004 /* Build command packet. */
3005 req
->current_outstanding_cmd
= handle
;
3006 req
->outstanding_cmds
[handle
] = sp
;
3007 sp
->handle
= handle
;
3008 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
3009 req
->cnt
-= req_cnt
;
3012 /* Adjust ring index. */
3014 if (req
->ring_index
== req
->length
) {
3015 req
->ring_index
= 0;
3016 req
->ring_ptr
= req
->ring
;
3020 sp
->flags
|= SRB_DMA_VALID
;
3022 /* Set chip new ring index. */
3023 /* write, read and verify logic */
3024 dbval
= dbval
| (req
->id
<< 8) | (req
->ring_index
<< 16);
3026 qla82xx_wr_32(ha
, (uintptr_t __force
)ha
->nxdb_wr_ptr
, dbval
);
3028 WRT_REG_DWORD(ha
->nxdb_wr_ptr
, dbval
);
3030 while (RD_REG_DWORD(ha
->nxdb_rd_ptr
) != dbval
) {
3031 WRT_REG_DWORD(ha
->nxdb_wr_ptr
, dbval
);
3036 /* Manage unprocessed RIO/ZIO commands in response queue. */
3037 if (vha
->flags
.process_response_queue
&&
3038 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
3039 qla24xx_process_response_queue(vha
, rsp
);
3041 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3044 queuing_error_fcp_cmnd
:
3045 dma_pool_free(ha
->fcp_cmnd_dma_pool
, ctx
->fcp_cmnd
, ctx
->fcp_cmnd_dma
);
3048 scsi_dma_unmap(cmd
);
3050 if (sp
->u
.scmd
.ctx
) {
3051 mempool_free(sp
->u
.scmd
.ctx
, ha
->ctx_mempool
);
3052 sp
->u
.scmd
.ctx
= NULL
;
3054 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3056 return QLA_FUNCTION_FAILED
;
3060 qla24xx_abort_iocb(srb_t
*sp
, struct abort_entry_24xx
*abt_iocb
)
3062 struct srb_iocb
*aio
= &sp
->u
.iocb_cmd
;
3063 scsi_qla_host_t
*vha
= sp
->fcport
->vha
;
3064 struct req_que
*req
= vha
->req
;
3066 memset(abt_iocb
, 0, sizeof(struct abort_entry_24xx
));
3067 abt_iocb
->entry_type
= ABORT_IOCB_TYPE
;
3068 abt_iocb
->entry_count
= 1;
3069 abt_iocb
->handle
= cpu_to_le32(MAKE_HANDLE(req
->id
, sp
->handle
));
3070 abt_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3071 abt_iocb
->handle_to_abort
=
3072 cpu_to_le32(MAKE_HANDLE(req
->id
, aio
->u
.abt
.cmd_hndl
));
3073 abt_iocb
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
3074 abt_iocb
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
3075 abt_iocb
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
3076 abt_iocb
->vp_index
= vha
->vp_idx
;
3077 abt_iocb
->req_que_no
= cpu_to_le16(req
->id
);
3078 /* Send the command to the firmware */
3083 qla2x00_start_sp(srb_t
*sp
)
3086 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
3088 unsigned long flags
;
3090 rval
= QLA_FUNCTION_FAILED
;
3091 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3092 pkt
= qla2x00_alloc_iocbs(sp
->fcport
->vha
, sp
);
3094 ql_log(ql_log_warn
, sp
->fcport
->vha
, 0x700c,
3095 "qla2x00_alloc_iocbs failed.\n");
3102 IS_FWI2_CAPABLE(ha
) ?
3103 qla24xx_login_iocb(sp
, pkt
) :
3104 qla2x00_login_iocb(sp
, pkt
);
3106 case SRB_LOGOUT_CMD
:
3107 IS_FWI2_CAPABLE(ha
) ?
3108 qla24xx_logout_iocb(sp
, pkt
) :
3109 qla2x00_logout_iocb(sp
, pkt
);
3111 case SRB_ELS_CMD_RPT
:
3112 case SRB_ELS_CMD_HST
:
3113 qla24xx_els_iocb(sp
, pkt
);
3116 IS_FWI2_CAPABLE(ha
) ?
3117 qla24xx_ct_iocb(sp
, pkt
) :
3118 qla2x00_ct_iocb(sp
, pkt
);
3121 IS_FWI2_CAPABLE(ha
) ?
3122 qla24xx_adisc_iocb(sp
, pkt
) :
3123 qla2x00_adisc_iocb(sp
, pkt
);
3127 qlafx00_tm_iocb(sp
, pkt
) :
3128 qla24xx_tm_iocb(sp
, pkt
);
3130 case SRB_FXIOCB_DCMD
:
3131 case SRB_FXIOCB_BCMD
:
3132 qlafx00_fxdisc_iocb(sp
, pkt
);
3136 qlafx00_abort_iocb(sp
, pkt
) :
3137 qla24xx_abort_iocb(sp
, pkt
);
3140 qla24xx_els_logo_iocb(sp
, pkt
);
3147 qla2x00_start_iocbs(sp
->fcport
->vha
, ha
->req_q_map
[0]);
3149 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3154 qla25xx_build_bidir_iocb(srb_t
*sp
, struct scsi_qla_host
*vha
,
3155 struct cmd_bidir
*cmd_pkt
, uint32_t tot_dsds
)
3157 uint16_t avail_dsds
;
3159 uint32_t req_data_len
= 0;
3160 uint32_t rsp_data_len
= 0;
3161 struct scatterlist
*sg
;
3163 int entry_count
= 1;
3164 struct bsg_job
*bsg_job
= sp
->u
.bsg_job
;
3166 /*Update entry type to indicate bidir command */
3167 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
3168 cpu_to_le32(COMMAND_BIDIRECTIONAL
);
3170 /* Set the transfer direction, in this set both flags
3171 * Also set the BD_WRAP_BACK flag, firmware will take care
3172 * assigning DID=SID for outgoing pkts.
3174 cmd_pkt
->wr_dseg_count
= cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
3175 cmd_pkt
->rd_dseg_count
= cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
3176 cmd_pkt
->control_flags
= cpu_to_le16(BD_WRITE_DATA
| BD_READ_DATA
|
3179 req_data_len
= rsp_data_len
= bsg_job
->request_payload
.payload_len
;
3180 cmd_pkt
->wr_byte_count
= cpu_to_le32(req_data_len
);
3181 cmd_pkt
->rd_byte_count
= cpu_to_le32(rsp_data_len
);
3182 cmd_pkt
->timeout
= cpu_to_le16(qla2x00_get_async_timeout(vha
) + 2);
3184 vha
->bidi_stats
.transfer_bytes
+= req_data_len
;
3185 vha
->bidi_stats
.io_count
++;
3187 vha
->qla_stats
.output_bytes
+= req_data_len
;
3188 vha
->qla_stats
.output_requests
++;
3190 /* Only one dsd is available for bidirectional IOCB, remaining dsds
3191 * are bundled in continuation iocb
3194 cur_dsd
= (uint32_t *)&cmd_pkt
->fcp_data_dseg_address
;
3198 for_each_sg(bsg_job
->request_payload
.sg_list
, sg
,
3199 bsg_job
->request_payload
.sg_cnt
, index
) {
3201 cont_a64_entry_t
*cont_pkt
;
3203 /* Allocate additional continuation packets */
3204 if (avail_dsds
== 0) {
3205 /* Continuation type 1 IOCB can accomodate
3208 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
3209 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
3213 sle_dma
= sg_dma_address(sg
);
3214 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
3215 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
3216 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
3219 /* For read request DSD will always goes to continuation IOCB
3220 * and follow the write DSD. If there is room on the current IOCB
3221 * then it is added to that IOCB else new continuation IOCB is
3224 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
,
3225 bsg_job
->reply_payload
.sg_cnt
, index
) {
3227 cont_a64_entry_t
*cont_pkt
;
3229 /* Allocate additional continuation packets */
3230 if (avail_dsds
== 0) {
3231 /* Continuation type 1 IOCB can accomodate
3234 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
3235 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
3239 sle_dma
= sg_dma_address(sg
);
3240 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
3241 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
3242 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
3245 /* This value should be same as number of IOCB required for this cmd */
3246 cmd_pkt
->entry_count
= entry_count
;
3250 qla2x00_start_bidir(srb_t
*sp
, struct scsi_qla_host
*vha
, uint32_t tot_dsds
)
3253 struct qla_hw_data
*ha
= vha
->hw
;
3254 unsigned long flags
;
3260 struct cmd_bidir
*cmd_pkt
= NULL
;
3261 struct rsp_que
*rsp
;
3262 struct req_que
*req
;
3263 int rval
= EXT_STATUS_OK
;
3267 rsp
= ha
->rsp_q_map
[0];
3270 /* Send marker if required */
3271 if (vha
->marker_needed
!= 0) {
3272 if (qla2x00_marker(vha
, req
,
3273 rsp
, 0, 0, MK_SYNC_ALL
) != QLA_SUCCESS
)
3274 return EXT_STATUS_MAILBOX
;
3275 vha
->marker_needed
= 0;
3278 /* Acquire ring specific lock */
3279 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3281 /* Check for room in outstanding command list. */
3282 handle
= req
->current_outstanding_cmd
;
3283 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
3285 if (handle
== req
->num_outstanding_cmds
)
3287 if (!req
->outstanding_cmds
[handle
])
3291 if (index
== req
->num_outstanding_cmds
) {
3292 rval
= EXT_STATUS_BUSY
;
3296 /* Calculate number of IOCB required */
3297 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
3299 /* Check for room on request queue. */
3300 if (req
->cnt
< req_cnt
+ 2) {
3301 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
3302 RD_REG_DWORD_RELAXED(req
->req_q_out
);
3303 if (req
->ring_index
< cnt
)
3304 req
->cnt
= cnt
- req
->ring_index
;
3306 req
->cnt
= req
->length
-
3307 (req
->ring_index
- cnt
);
3309 if (req
->cnt
< req_cnt
+ 2) {
3310 rval
= EXT_STATUS_BUSY
;
3314 cmd_pkt
= (struct cmd_bidir
*)req
->ring_ptr
;
3315 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
3317 /* Zero out remaining portion of packet. */
3318 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3319 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
3320 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
3322 /* Set NPORT-ID (of vha)*/
3323 cmd_pkt
->nport_handle
= cpu_to_le16(vha
->self_login_loop_id
);
3324 cmd_pkt
->port_id
[0] = vha
->d_id
.b
.al_pa
;
3325 cmd_pkt
->port_id
[1] = vha
->d_id
.b
.area
;
3326 cmd_pkt
->port_id
[2] = vha
->d_id
.b
.domain
;
3328 qla25xx_build_bidir_iocb(sp
, vha
, cmd_pkt
, tot_dsds
);
3329 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
3330 /* Build command packet. */
3331 req
->current_outstanding_cmd
= handle
;
3332 req
->outstanding_cmds
[handle
] = sp
;
3333 sp
->handle
= handle
;
3334 req
->cnt
-= req_cnt
;
3336 /* Send the command to the firmware */
3338 qla2x00_start_iocbs(vha
, req
);
3340 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);