]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/scsi/qla2xxx/qla_iocb.c
scsi/qla2xxx: Convert to SPDX license identifiers
[mirror_ubuntu-jammy-kernel.git] / drivers / scsi / qla2xxx / qla_iocb.c
CommitLineData
77adf3f0 1// SPDX-License-Identifier: GPL-2.0-only
fa90c54f
AV
2/*
3 * QLogic Fibre Channel HBA Driver
bd21eaf9 4 * Copyright (c) 2003-2014 QLogic Corporation
fa90c54f 5 */
1da177e4 6#include "qla_def.h"
2d70c103 7#include "qla_target.h"
1da177e4
LT
8
9#include <linux/blkdev.h>
10#include <linux/delay.h>
11
12#include <scsi/scsi_tcq.h>
13
1da177e4
LT
14/**
15 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
2db6228d 16 * @sp: SCSI command
1da177e4
LT
17 *
18 * Returns the proper CF_* direction based on CDB.
19 */
20static inline uint16_t
49fd462a 21qla2x00_get_cmd_direction(srb_t *sp)
1da177e4
LT
22{
23 uint16_t cflags;
9ba56b95 24 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
25ff6af1 25 struct scsi_qla_host *vha = sp->vha;
1da177e4
LT
26
27 cflags = 0;
28
29 /* Set transfer direction */
9ba56b95 30 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1da177e4 31 cflags = CF_WRITE;
2be21fa2 32 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
fabbb8df 33 vha->qla_stats.output_requests++;
9ba56b95 34 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1da177e4 35 cflags = CF_READ;
2be21fa2 36 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
fabbb8df 37 vha->qla_stats.input_requests++;
49fd462a 38 }
1da177e4
LT
39 return (cflags);
40}
41
42/**
43 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
44 * Continuation Type 0 IOCBs to allocate.
45 *
0a19a725 46 * @dsds: number of data segment descriptors needed
1da177e4
LT
47 *
48 * Returns the number of IOCB entries needed to store @dsds.
49 */
50uint16_t
51qla2x00_calc_iocbs_32(uint16_t dsds)
52{
53 uint16_t iocbs;
54
55 iocbs = 1;
56 if (dsds > 3) {
57 iocbs += (dsds - 3) / 7;
58 if ((dsds - 3) % 7)
59 iocbs++;
60 }
61 return (iocbs);
62}
63
64/**
65 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
66 * Continuation Type 1 IOCBs to allocate.
67 *
0a19a725 68 * @dsds: number of data segment descriptors needed
1da177e4
LT
69 *
70 * Returns the number of IOCB entries needed to store @dsds.
71 */
72uint16_t
73qla2x00_calc_iocbs_64(uint16_t dsds)
74{
75 uint16_t iocbs;
76
77 iocbs = 1;
78 if (dsds > 2) {
79 iocbs += (dsds - 2) / 5;
80 if ((dsds - 2) % 5)
81 iocbs++;
82 }
83 return (iocbs);
84}
85
86/**
87 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
2db6228d 88 * @vha: HA context
1da177e4
LT
89 *
90 * Returns a pointer to the Continuation Type 0 IOCB packet.
91 */
92static inline cont_entry_t *
67c2e93a 93qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
1da177e4
LT
94{
95 cont_entry_t *cont_pkt;
67c2e93a 96 struct req_que *req = vha->req;
1da177e4 97 /* Adjust ring index. */
e315cd28
AC
98 req->ring_index++;
99 if (req->ring_index == req->length) {
100 req->ring_index = 0;
101 req->ring_ptr = req->ring;
1da177e4 102 } else {
e315cd28 103 req->ring_ptr++;
1da177e4
LT
104 }
105
e315cd28 106 cont_pkt = (cont_entry_t *)req->ring_ptr;
1da177e4
LT
107
108 /* Load packet defaults. */
2c26348c 109 put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
1da177e4
LT
110
111 return (cont_pkt);
112}
113
114/**
115 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
2db6228d
BVA
116 * @vha: HA context
117 * @req: request queue
1da177e4
LT
118 *
119 * Returns a pointer to the continuation type 1 IOCB packet.
120 */
121static inline cont_a64_entry_t *
0d2aa38e 122qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
1da177e4
LT
123{
124 cont_a64_entry_t *cont_pkt;
125
126 /* Adjust ring index. */
e315cd28
AC
127 req->ring_index++;
128 if (req->ring_index == req->length) {
129 req->ring_index = 0;
130 req->ring_ptr = req->ring;
1da177e4 131 } else {
e315cd28 132 req->ring_ptr++;
1da177e4
LT
133 }
134
e315cd28 135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
1da177e4
LT
136
137 /* Load packet defaults. */
2c26348c
BVA
138 put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
139 CONTINUE_A64_TYPE, &cont_pkt->entry_type);
1da177e4
LT
140
141 return (cont_pkt);
142}
143
d7459527 144inline int
bad75002
AE
145qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
146{
9ba56b95
GM
147 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
148 uint8_t guard = scsi_host_get_guard(cmd->device->host);
bad75002 149
bad75002
AE
150 /* We always use DIFF Bundling for best performance */
151 *fw_prot_opts = 0;
152
153 /* Translate SCSI opcode to a protection opcode */
9ba56b95 154 switch (scsi_get_prot_op(cmd)) {
bad75002
AE
155 case SCSI_PROT_READ_STRIP:
156 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
157 break;
158 case SCSI_PROT_WRITE_INSERT:
159 *fw_prot_opts |= PO_MODE_DIF_INSERT;
160 break;
161 case SCSI_PROT_READ_INSERT:
162 *fw_prot_opts |= PO_MODE_DIF_INSERT;
163 break;
164 case SCSI_PROT_WRITE_STRIP:
165 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
166 break;
167 case SCSI_PROT_READ_PASS:
bad75002 168 case SCSI_PROT_WRITE_PASS:
9e522cd8
AE
169 if (guard & SHOST_DIX_GUARD_IP)
170 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
171 else
172 *fw_prot_opts |= PO_MODE_DIF_PASS;
bad75002
AE
173 break;
174 default: /* Normal Request */
175 *fw_prot_opts |= PO_MODE_DIF_PASS;
176 break;
177 }
178
9ba56b95 179 return scsi_prot_sg_count(cmd);
bad75002
AE
180}
181
182/*
1da177e4
LT
183 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
184 * capable IOCB types.
185 *
186 * @sp: SRB command to process
187 * @cmd_pkt: Command type 2 IOCB
188 * @tot_dsds: Total number of segments to transfer
189 */
190void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
191 uint16_t tot_dsds)
192{
193 uint16_t avail_dsds;
15b7a68c 194 struct dsd32 *cur_dsd;
e315cd28 195 scsi_qla_host_t *vha;
1da177e4 196 struct scsi_cmnd *cmd;
385d70b4
FT
197 struct scatterlist *sg;
198 int i;
1da177e4 199
9ba56b95 200 cmd = GET_CMD_SP(sp);
1da177e4
LT
201
202 /* Update entry type to indicate Command Type 2 IOCB */
2c26348c 203 put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
1da177e4
LT
204
205 /* No data transfer */
385d70b4 206 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
ad950360 207 cmd_pkt->byte_count = cpu_to_le32(0);
1da177e4
LT
208 return;
209 }
210
25ff6af1 211 vha = sp->vha;
49fd462a 212 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
1da177e4
LT
213
214 /* Three DSDs are available in the Command Type 2 IOCB */
15b7a68c
BVA
215 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
216 cur_dsd = cmd_pkt->dsd32;
1da177e4
LT
217
218 /* Load data segments */
385d70b4
FT
219 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
220 cont_entry_t *cont_pkt;
221
222 /* Allocate additional continuation packets? */
223 if (avail_dsds == 0) {
224 /*
225 * Seven DSDs are available in the Continuation
226 * Type 0 IOCB.
227 */
67c2e93a 228 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
15b7a68c
BVA
229 cur_dsd = cont_pkt->dsd;
230 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
1da177e4 231 }
385d70b4 232
15b7a68c 233 append_dsd32(&cur_dsd, sg);
385d70b4 234 avail_dsds--;
1da177e4
LT
235 }
236}
237
238/**
239 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
240 * capable IOCB types.
241 *
242 * @sp: SRB command to process
243 * @cmd_pkt: Command type 3 IOCB
244 * @tot_dsds: Total number of segments to transfer
245 */
246void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
247 uint16_t tot_dsds)
248{
249 uint16_t avail_dsds;
15b7a68c 250 struct dsd64 *cur_dsd;
e315cd28 251 scsi_qla_host_t *vha;
1da177e4 252 struct scsi_cmnd *cmd;
385d70b4
FT
253 struct scatterlist *sg;
254 int i;
1da177e4 255
9ba56b95 256 cmd = GET_CMD_SP(sp);
1da177e4
LT
257
258 /* Update entry type to indicate Command Type 3 IOCB */
2c26348c 259 put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
1da177e4
LT
260
261 /* No data transfer */
385d70b4 262 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
ad950360 263 cmd_pkt->byte_count = cpu_to_le32(0);
1da177e4
LT
264 return;
265 }
266
25ff6af1 267 vha = sp->vha;
49fd462a 268 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
1da177e4
LT
269
270 /* Two DSDs are available in the Command Type 3 IOCB */
15b7a68c
BVA
271 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
272 cur_dsd = cmd_pkt->dsd64;
1da177e4
LT
273
274 /* Load data segments */
385d70b4 275 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
385d70b4
FT
276 cont_a64_entry_t *cont_pkt;
277
278 /* Allocate additional continuation packets? */
279 if (avail_dsds == 0) {
280 /*
281 * Five DSDs are available in the Continuation
282 * Type 1 IOCB.
283 */
0d2aa38e 284 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
15b7a68c
BVA
285 cur_dsd = cont_pkt->dsd;
286 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
1da177e4 287 }
385d70b4 288
15b7a68c 289 append_dsd64(&cur_dsd, sg);
385d70b4 290 avail_dsds--;
1da177e4
LT
291 }
292}
293
bcc85657
BVA
294/*
295 * Find the first handle that is not in use, starting from
296 * req->current_outstanding_cmd + 1. The caller must hold the lock that is
297 * associated with @req.
298 */
299uint32_t qla2xxx_get_next_handle(struct req_que *req)
300{
301 uint32_t index, handle = req->current_outstanding_cmd;
302
303 for (index = 1; index < req->num_outstanding_cmds; index++) {
304 handle++;
305 if (handle == req->num_outstanding_cmds)
306 handle = 1;
307 if (!req->outstanding_cmds[handle])
308 return handle;
309 }
310
311 return 0;
312}
313
1da177e4
LT
314/**
315 * qla2x00_start_scsi() - Send a SCSI command to the ISP
316 * @sp: command to send to the ISP
317 *
cc3ef7bc 318 * Returns non-zero if a failure occurred, else zero.
1da177e4
LT
319 */
320int
321qla2x00_start_scsi(srb_t *sp)
322{
52c82823 323 int nseg;
1da177e4 324 unsigned long flags;
e315cd28 325 scsi_qla_host_t *vha;
1da177e4
LT
326 struct scsi_cmnd *cmd;
327 uint32_t *clr_ptr;
1da177e4
LT
328 uint32_t handle;
329 cmd_entry_t *cmd_pkt;
1da177e4
LT
330 uint16_t cnt;
331 uint16_t req_cnt;
332 uint16_t tot_dsds;
3d71644c 333 struct device_reg_2xxx __iomem *reg;
e315cd28
AC
334 struct qla_hw_data *ha;
335 struct req_que *req;
73208dfd 336 struct rsp_que *rsp;
1da177e4
LT
337
338 /* Setup device pointers. */
25ff6af1 339 vha = sp->vha;
e315cd28 340 ha = vha->hw;
3d71644c 341 reg = &ha->iobase->isp;
9ba56b95 342 cmd = GET_CMD_SP(sp);
73208dfd
AC
343 req = ha->req_q_map[0];
344 rsp = ha->rsp_q_map[0];
83021920
AV
345 /* So we know we haven't pci_map'ed anything yet */
346 tot_dsds = 0;
1da177e4
LT
347
348 /* Send marker if required */
e315cd28 349 if (vha->marker_needed != 0) {
9eb9c6dc 350 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
7c3df132 351 QLA_SUCCESS) {
1da177e4 352 return (QLA_FUNCTION_FAILED);
7c3df132 353 }
e315cd28 354 vha->marker_needed = 0;
1da177e4
LT
355 }
356
357 /* Acquire ring specific lock */
c9c5ced9 358 spin_lock_irqsave(&ha->hardware_lock, flags);
1da177e4 359
bcc85657
BVA
360 handle = qla2xxx_get_next_handle(req);
361 if (handle == 0)
1da177e4
LT
362 goto queuing_error;
363
83021920 364 /* Map the sg table so we have an accurate count of sg entries needed */
2c3dfe3f
SJ
365 if (scsi_sg_count(cmd)) {
366 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
367 scsi_sg_count(cmd), cmd->sc_data_direction);
368 if (unlikely(!nseg))
369 goto queuing_error;
370 } else
371 nseg = 0;
372
385d70b4 373 tot_dsds = nseg;
83021920 374
1da177e4 375 /* Calculate the number of request entries needed. */
fd34f556 376 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
e315cd28 377 if (req->cnt < (req_cnt + 2)) {
04474d3a 378 cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg));
e315cd28
AC
379 if (req->ring_index < cnt)
380 req->cnt = cnt - req->ring_index;
1da177e4 381 else
e315cd28
AC
382 req->cnt = req->length -
383 (req->ring_index - cnt);
a6eb3c9f
CL
384 /* If still no head room then bail out */
385 if (req->cnt < (req_cnt + 2))
386 goto queuing_error;
1da177e4 387 }
1da177e4 388
1da177e4 389 /* Build command packet */
e315cd28
AC
390 req->current_outstanding_cmd = handle;
391 req->outstanding_cmds[handle] = sp;
cf53b069 392 sp->handle = handle;
9ba56b95 393 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
e315cd28 394 req->cnt -= req_cnt;
1da177e4 395
e315cd28 396 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
1da177e4
LT
397 cmd_pkt->handle = handle;
398 /* Zero out remaining portion of packet. */
399 clr_ptr = (uint32_t *)cmd_pkt + 2;
400 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
401 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
402
bdf79621
AV
403 /* Set target ID and LUN number*/
404 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
9ba56b95 405 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
ad950360 406 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
1da177e4 407
1da177e4
LT
408 /* Load SCSI command packet. */
409 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
385d70b4 410 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1da177e4
LT
411
412 /* Build IOCB segments */
fd34f556 413 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
1da177e4
LT
414
415 /* Set total data segment count. */
416 cmd_pkt->entry_count = (uint8_t)req_cnt;
417 wmb();
418
419 /* Adjust ring index. */
e315cd28
AC
420 req->ring_index++;
421 if (req->ring_index == req->length) {
422 req->ring_index = 0;
423 req->ring_ptr = req->ring;
1da177e4 424 } else
e315cd28 425 req->ring_ptr++;
1da177e4 426
1da177e4 427 sp->flags |= SRB_DMA_VALID;
1da177e4
LT
428
429 /* Set chip new ring index. */
04474d3a
BVA
430 wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index);
431 rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
1da177e4 432
4fdfefe5 433 /* Manage unprocessed RIO/ZIO commands in response queue. */
e315cd28 434 if (vha->flags.process_response_queue &&
73208dfd
AC
435 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
436 qla2x00_process_response_queue(rsp);
4fdfefe5 437
c9c5ced9 438 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4
LT
439 return (QLA_SUCCESS);
440
441queuing_error:
385d70b4
FT
442 if (tot_dsds)
443 scsi_dma_unmap(cmd);
444
c9c5ced9 445 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4
LT
446
447 return (QLA_FUNCTION_FAILED);
448}
449
5162cf0c
GM
450/**
451 * qla2x00_start_iocbs() - Execute the IOCB command
2db6228d
BVA
452 * @vha: HA context
453 * @req: request queue
5162cf0c 454 */
2d70c103 455void
5162cf0c
GM
456qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
457{
458 struct qla_hw_data *ha = vha->hw;
118e2ef9 459 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
5162cf0c 460
7ec0effd 461 if (IS_P3P_TYPE(ha)) {
5162cf0c
GM
462 qla82xx_start_iocbs(vha);
463 } else {
464 /* Adjust ring index. */
465 req->ring_index++;
466 if (req->ring_index == req->length) {
467 req->ring_index = 0;
468 req->ring_ptr = req->ring;
469 } else
470 req->ring_ptr++;
471
472 /* Set chip new ring index. */
ecc89f25 473 if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
04474d3a 474 wrt_reg_dword(req->req_q_in, req->ring_index);
d63b328f 475 } else if (IS_QLA83XX(ha)) {
04474d3a
BVA
476 wrt_reg_dword(req->req_q_in, req->ring_index);
477 rd_reg_dword_relaxed(&ha->iobase->isp24.hccr);
8ae6d9c7 478 } else if (IS_QLAFX00(ha)) {
04474d3a
BVA
479 wrt_reg_dword(&reg->ispfx00.req_q_in, req->ring_index);
480 rd_reg_dword_relaxed(&reg->ispfx00.req_q_in);
8ae6d9c7 481 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
5162cf0c 482 } else if (IS_FWI2_CAPABLE(ha)) {
04474d3a
BVA
483 wrt_reg_dword(&reg->isp24.req_q_in, req->ring_index);
484 rd_reg_dword_relaxed(&reg->isp24.req_q_in);
5162cf0c 485 } else {
04474d3a 486 wrt_reg_word(ISP_REQ_Q_IN(ha, &reg->isp),
5162cf0c 487 req->ring_index);
04474d3a 488 rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, &reg->isp));
5162cf0c
GM
489 }
490 }
491}
492
1da177e4
LT
493/**
494 * qla2x00_marker() - Send a marker IOCB to the firmware.
2db6228d 495 * @vha: HA context
9eb9c6dc 496 * @qpair: queue pair pointer
1da177e4
LT
497 * @loop_id: loop ID
498 * @lun: LUN
499 * @type: marker modifier
500 *
501 * Can be called from both normal and interrupt context.
502 *
cc3ef7bc 503 * Returns non-zero if a failure occurred, else zero.
1da177e4 504 */
3dbe756a 505static int
9eb9c6dc
QT
506__qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
507 uint16_t loop_id, uint64_t lun, uint8_t type)
1da177e4 508{
2b6c0cee 509 mrk_entry_t *mrk;
8ae6d9c7 510 struct mrk_entry_24xx *mrk24 = NULL;
9eb9c6dc 511 struct req_que *req = qpair->req;
e315cd28
AC
512 struct qla_hw_data *ha = vha->hw;
513 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1da177e4 514
9eb9c6dc 515 mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
2b6c0cee 516 if (mrk == NULL) {
7c3df132
SK
517 ql_log(ql_log_warn, base_vha, 0x3026,
518 "Failed to allocate Marker IOCB.\n");
1da177e4
LT
519
520 return (QLA_FUNCTION_FAILED);
521 }
522
2b6c0cee
AV
523 mrk->entry_type = MARKER_TYPE;
524 mrk->modifier = type;
1da177e4 525 if (type != MK_SYNC_ALL) {
bfd7334e 526 if (IS_FWI2_CAPABLE(ha)) {
2b6c0cee
AV
527 mrk24 = (struct mrk_entry_24xx *) mrk;
528 mrk24->nport_handle = cpu_to_le16(loop_id);
9cb78c16 529 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
b797b6de 530 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
e315cd28 531 mrk24->vp_index = vha->vp_idx;
c25eb70a 532 mrk24->handle = make_handle(req->id, mrk24->handle);
2b6c0cee
AV
533 } else {
534 SET_TARGET_ID(ha, mrk->target, loop_id);
9cb78c16 535 mrk->lun = cpu_to_le16((uint16_t)lun);
2b6c0cee 536 }
1da177e4
LT
537 }
538 wmb();
539
5162cf0c 540 qla2x00_start_iocbs(vha, req);
1da177e4
LT
541
542 return (QLA_SUCCESS);
543}
544
fa2a1ce5 545int
9eb9c6dc
QT
546qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
547 uint16_t loop_id, uint64_t lun, uint8_t type)
1da177e4
LT
548{
549 int ret;
550 unsigned long flags = 0;
551
9eb9c6dc
QT
552 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
553 ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
554 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1da177e4
LT
555
556 return (ret);
557}
558
2d70c103
NB
559/*
560 * qla2x00_issue_marker
561 *
562 * Issue marker
563 * Caller CAN have hardware lock held as specified by ha_locked parameter.
564 * Might release it, then reaquire.
565 */
566int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
567{
568 if (ha_locked) {
9eb9c6dc 569 if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
2d70c103
NB
570 MK_SYNC_ALL) != QLA_SUCCESS)
571 return QLA_FUNCTION_FAILED;
572 } else {
9eb9c6dc 573 if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
2d70c103
NB
574 MK_SYNC_ALL) != QLA_SUCCESS)
575 return QLA_FUNCTION_FAILED;
576 }
577 vha->marker_needed = 0;
578
579 return QLA_SUCCESS;
580}
581
5162cf0c
GM
582static inline int
583qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
584 uint16_t tot_dsds)
585{
15b7a68c 586 struct dsd64 *cur_dsd = NULL, *next_dsd;
5162cf0c
GM
587 scsi_qla_host_t *vha;
588 struct qla_hw_data *ha;
589 struct scsi_cmnd *cmd;
590 struct scatterlist *cur_seg;
5162cf0c
GM
591 uint8_t avail_dsds;
592 uint8_t first_iocb = 1;
593 uint32_t dsd_list_len;
594 struct dsd_dma *dsd_ptr;
595 struct ct6_dsd *ctx;
1da177e4 596
9ba56b95 597 cmd = GET_CMD_SP(sp);
a9083016 598
5162cf0c 599 /* Update entry type to indicate Command Type 3 IOCB */
2c26348c 600 put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
5162cf0c
GM
601
602 /* No data transfer */
603 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
ad950360 604 cmd_pkt->byte_count = cpu_to_le32(0);
5162cf0c
GM
605 return 0;
606 }
607
25ff6af1 608 vha = sp->vha;
5162cf0c
GM
609 ha = vha->hw;
610
611 /* Set transfer direction */
612 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
ad950360 613 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
2be21fa2 614 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
fabbb8df 615 vha->qla_stats.output_requests++;
5162cf0c 616 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
ad950360 617 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
2be21fa2 618 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
fabbb8df 619 vha->qla_stats.input_requests++;
5162cf0c
GM
620 }
621
622 cur_seg = scsi_sglist(cmd);
5ec9f904 623 ctx = sp->u.scmd.ct6_ctx;
5162cf0c
GM
624
625 while (tot_dsds) {
626 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
627 QLA_DSDS_PER_IOCB : tot_dsds;
628 tot_dsds -= avail_dsds;
629 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
630
631 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
632 struct dsd_dma, list);
633 next_dsd = dsd_ptr->dsd_addr;
634 list_del(&dsd_ptr->list);
635 ha->gbl_dsd_avail--;
636 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
637 ctx->dsd_use_cnt++;
638 ha->gbl_dsd_inuse++;
639
640 if (first_iocb) {
641 first_iocb = 0;
15b7a68c
BVA
642 put_unaligned_le64(dsd_ptr->dsd_list_dma,
643 &cmd_pkt->fcp_dsd.address);
644 cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
73208dfd 645 } else {
15b7a68c
BVA
646 put_unaligned_le64(dsd_ptr->dsd_list_dma,
647 &cur_dsd->address);
648 cur_dsd->length = cpu_to_le32(dsd_list_len);
649 cur_dsd++;
5162cf0c 650 }
15b7a68c 651 cur_dsd = next_dsd;
5162cf0c 652 while (avail_dsds) {
15b7a68c 653 append_dsd64(&cur_dsd, cur_seg);
5162cf0c
GM
654 cur_seg = sg_next(cur_seg);
655 avail_dsds--;
73208dfd 656 }
2b6c0cee
AV
657 }
658
5162cf0c 659 /* Null termination */
15b7a68c
BVA
660 cur_dsd->address = 0;
661 cur_dsd->length = 0;
662 cur_dsd++;
7ffa5b93 663 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
5162cf0c 664 return 0;
2b6c0cee
AV
665}
666
5162cf0c
GM
667/*
668 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
669 * for Command Type 6.
2b6c0cee 670 *
0a19a725 671 * @dsds: number of data segment descriptors needed
2b6c0cee 672 *
5162cf0c 673 * Returns the number of dsd list needed to store @dsds.
2b6c0cee 674 */
2374dd23 675static inline uint16_t
5162cf0c 676qla24xx_calc_dsd_lists(uint16_t dsds)
2b6c0cee 677{
5162cf0c 678 uint16_t dsd_lists = 0;
2b6c0cee 679
5162cf0c
GM
680 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
681 if (dsds % QLA_DSDS_PER_IOCB)
682 dsd_lists++;
683 return dsd_lists;
2b6c0cee
AV
684}
685
5162cf0c 686
2b6c0cee
AV
687/**
688 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
689 * IOCB types.
690 *
691 * @sp: SRB command to process
692 * @cmd_pkt: Command type 3 IOCB
693 * @tot_dsds: Total number of segments to transfer
d7459527 694 * @req: pointer to request queue
2b6c0cee 695 */
d7459527 696inline void
2b6c0cee 697qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
d7459527 698 uint16_t tot_dsds, struct req_que *req)
2b6c0cee
AV
699{
700 uint16_t avail_dsds;
15b7a68c 701 struct dsd64 *cur_dsd;
e315cd28 702 scsi_qla_host_t *vha;
2b6c0cee 703 struct scsi_cmnd *cmd;
385d70b4
FT
704 struct scatterlist *sg;
705 int i;
2b6c0cee 706
9ba56b95 707 cmd = GET_CMD_SP(sp);
2b6c0cee
AV
708
709 /* Update entry type to indicate Command Type 3 IOCB */
2c26348c 710 put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
2b6c0cee
AV
711
712 /* No data transfer */
385d70b4 713 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
ad950360 714 cmd_pkt->byte_count = cpu_to_le32(0);
2b6c0cee
AV
715 return;
716 }
717
25ff6af1 718 vha = sp->vha;
2b6c0cee
AV
719
720 /* Set transfer direction */
49fd462a 721 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
ad950360 722 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
2be21fa2 723 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
fabbb8df 724 vha->qla_stats.output_requests++;
49fd462a 725 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
ad950360 726 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
2be21fa2 727 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
fabbb8df 728 vha->qla_stats.input_requests++;
49fd462a 729 }
2b6c0cee
AV
730
731 /* One DSD is available in the Command Type 3 IOCB */
732 avail_dsds = 1;
15b7a68c 733 cur_dsd = &cmd_pkt->dsd;
2b6c0cee
AV
734
735 /* Load data segments */
385d70b4
FT
736
737 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
385d70b4
FT
738 cont_a64_entry_t *cont_pkt;
739
740 /* Allocate additional continuation packets? */
741 if (avail_dsds == 0) {
742 /*
743 * Five DSDs are available in the Continuation
744 * Type 1 IOCB.
745 */
d7459527 746 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
15b7a68c
BVA
747 cur_dsd = cont_pkt->dsd;
748 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
2b6c0cee 749 }
385d70b4 750
15b7a68c 751 append_dsd64(&cur_dsd, sg);
385d70b4 752 avail_dsds--;
2b6c0cee
AV
753 }
754}
755
bad75002 756struct fw_dif_context {
7ffa5b93
BVA
757 __le32 ref_tag;
758 __le16 app_tag;
bad75002
AE
759 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
760 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
761};
762
763/*
764 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
765 *
766 */
767static inline void
e02587d7 768qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
bad75002
AE
769 unsigned int protcnt)
770{
9ba56b95 771 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
bad75002
AE
772
773 switch (scsi_get_prot_type(cmd)) {
bad75002 774 case SCSI_PROT_DIF_TYPE0:
8cb2049c
AE
775 /*
776 * No check for ql2xenablehba_err_chk, as it would be an
777 * I/O error if hba tag generation is not done.
778 */
779 pkt->ref_tag = cpu_to_le32((uint32_t)
780 (0xffffffff & scsi_get_lba(cmd)));
e02587d7
AE
781
782 if (!qla2x00_hba_err_chk_enabled(sp))
783 break;
784
8cb2049c
AE
785 pkt->ref_tag_mask[0] = 0xff;
786 pkt->ref_tag_mask[1] = 0xff;
787 pkt->ref_tag_mask[2] = 0xff;
788 pkt->ref_tag_mask[3] = 0xff;
bad75002
AE
789 break;
790
791 /*
792 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
793 * match LBA in CDB + N
794 */
795 case SCSI_PROT_DIF_TYPE2:
ad950360 796 pkt->app_tag = cpu_to_le16(0);
e02587d7
AE
797 pkt->app_tag_mask[0] = 0x0;
798 pkt->app_tag_mask[1] = 0x0;
0c470874
AE
799
800 pkt->ref_tag = cpu_to_le32((uint32_t)
801 (0xffffffff & scsi_get_lba(cmd)));
802
e02587d7
AE
803 if (!qla2x00_hba_err_chk_enabled(sp))
804 break;
805
0c470874
AE
806 /* enable ALL bytes of the ref tag */
807 pkt->ref_tag_mask[0] = 0xff;
808 pkt->ref_tag_mask[1] = 0xff;
809 pkt->ref_tag_mask[2] = 0xff;
810 pkt->ref_tag_mask[3] = 0xff;
bad75002
AE
811 break;
812
813 /* For Type 3 protection: 16 bit GUARD only */
814 case SCSI_PROT_DIF_TYPE3:
815 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
816 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
817 0x00;
818 break;
819
820 /*
821 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
822 * 16 bit app tag.
823 */
824 case SCSI_PROT_DIF_TYPE1:
e02587d7
AE
825 pkt->ref_tag = cpu_to_le32((uint32_t)
826 (0xffffffff & scsi_get_lba(cmd)));
ad950360 827 pkt->app_tag = cpu_to_le16(0);
e02587d7
AE
828 pkt->app_tag_mask[0] = 0x0;
829 pkt->app_tag_mask[1] = 0x0;
830
831 if (!qla2x00_hba_err_chk_enabled(sp))
bad75002
AE
832 break;
833
bad75002
AE
834 /* enable ALL bytes of the ref tag */
835 pkt->ref_tag_mask[0] = 0xff;
836 pkt->ref_tag_mask[1] = 0xff;
837 pkt->ref_tag_mask[2] = 0xff;
838 pkt->ref_tag_mask[3] = 0xff;
839 break;
840 }
bad75002
AE
841}
842
d7459527 843int
8cb2049c
AE
844qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
845 uint32_t *partial)
846{
847 struct scatterlist *sg;
848 uint32_t cumulative_partial, sg_len;
849 dma_addr_t sg_dma_addr;
850
851 if (sgx->num_bytes == sgx->tot_bytes)
852 return 0;
853
854 sg = sgx->cur_sg;
855 cumulative_partial = sgx->tot_partial;
856
857 sg_dma_addr = sg_dma_address(sg);
858 sg_len = sg_dma_len(sg);
859
860 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
861
862 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
863 sgx->dma_len = (blk_sz - cumulative_partial);
864 sgx->tot_partial = 0;
865 sgx->num_bytes += blk_sz;
866 *partial = 0;
867 } else {
868 sgx->dma_len = sg_len - sgx->bytes_consumed;
869 sgx->tot_partial += sgx->dma_len;
870 *partial = 1;
871 }
872
873 sgx->bytes_consumed += sgx->dma_len;
874
875 if (sg_len == sgx->bytes_consumed) {
876 sg = sg_next(sg);
877 sgx->num_sg++;
878 sgx->cur_sg = sg;
879 sgx->bytes_consumed = 0;
880 }
881
882 return 1;
883}
884
f83adb61 885int
8cb2049c 886qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
15b7a68c 887 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
8cb2049c
AE
888{
889 void *next_dsd;
890 uint8_t avail_dsds = 0;
891 uint32_t dsd_list_len;
892 struct dsd_dma *dsd_ptr;
893 struct scatterlist *sg_prot;
15b7a68c 894 struct dsd64 *cur_dsd = dsd;
8cb2049c 895 uint16_t used_dsds = tot_dsds;
f83adb61 896 uint32_t prot_int; /* protection interval */
8cb2049c
AE
897 uint32_t partial;
898 struct qla2_sgx sgx;
899 dma_addr_t sle_dma;
900 uint32_t sle_dma_len, tot_prot_dma_len = 0;
f83adb61 901 struct scsi_cmnd *cmd;
8cb2049c
AE
902
903 memset(&sgx, 0, sizeof(struct qla2_sgx));
f83adb61 904 if (sp) {
f83adb61
QT
905 cmd = GET_CMD_SP(sp);
906 prot_int = cmd->device->sector_size;
907
908 sgx.tot_bytes = scsi_bufflen(cmd);
909 sgx.cur_sg = scsi_sglist(cmd);
910 sgx.sp = sp;
911
912 sg_prot = scsi_prot_sglist(cmd);
913 } else if (tc) {
f83adb61
QT
914 prot_int = tc->blk_sz;
915 sgx.tot_bytes = tc->bufflen;
916 sgx.cur_sg = tc->sg;
917 sg_prot = tc->prot_sg;
918 } else {
919 BUG();
920 return 1;
921 }
8cb2049c
AE
922
923 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
924
925 sle_dma = sgx.dma_addr;
926 sle_dma_len = sgx.dma_len;
927alloc_and_fill:
928 /* Allocate additional continuation packets? */
929 if (avail_dsds == 0) {
930 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
931 QLA_DSDS_PER_IOCB : used_dsds;
932 dsd_list_len = (avail_dsds + 1) * 12;
933 used_dsds -= avail_dsds;
934
935 /* allocate tracking DS */
936 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
937 if (!dsd_ptr)
938 return 1;
939
940 /* allocate new list */
941 dsd_ptr->dsd_addr = next_dsd =
942 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
943 &dsd_ptr->dsd_list_dma);
944
945 if (!next_dsd) {
946 /*
947 * Need to cleanup only this dsd_ptr, rest
948 * will be done by sp_free_dma()
949 */
950 kfree(dsd_ptr);
951 return 1;
952 }
953
f83adb61
QT
954 if (sp) {
955 list_add_tail(&dsd_ptr->list,
5ec9f904 956 &sp->u.scmd.crc_ctx->dsd_list);
f83adb61
QT
957
958 sp->flags |= SRB_CRC_CTX_DSD_VALID;
959 } else {
960 list_add_tail(&dsd_ptr->list,
961 &(tc->ctx->dsd_list));
be25152c 962 *tc->ctx_dsd_alloced = 1;
f83adb61 963 }
8cb2049c 964
8cb2049c
AE
965
966 /* add new list to cmd iocb or last list */
15b7a68c
BVA
967 put_unaligned_le64(dsd_ptr->dsd_list_dma,
968 &cur_dsd->address);
969 cur_dsd->length = cpu_to_le32(dsd_list_len);
970 cur_dsd = next_dsd;
8cb2049c 971 }
15b7a68c
BVA
972 put_unaligned_le64(sle_dma, &cur_dsd->address);
973 cur_dsd->length = cpu_to_le32(sle_dma_len);
974 cur_dsd++;
8cb2049c
AE
975 avail_dsds--;
976
977 if (partial == 0) {
978 /* Got a full protection interval */
979 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
980 sle_dma_len = 8;
bad75002 981
8cb2049c
AE
982 tot_prot_dma_len += sle_dma_len;
983 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
984 tot_prot_dma_len = 0;
985 sg_prot = sg_next(sg_prot);
986 }
987
988 partial = 1; /* So as to not re-enter this block */
989 goto alloc_and_fill;
990 }
991 }
992 /* Null termination */
15b7a68c
BVA
993 cur_dsd->address = 0;
994 cur_dsd->length = 0;
995 cur_dsd++;
8cb2049c
AE
996 return 0;
997}
5162cf0c 998
f83adb61 999int
15b7a68c
BVA
1000qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
1001 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
bad75002
AE
1002{
1003 void *next_dsd;
1004 uint8_t avail_dsds = 0;
1005 uint32_t dsd_list_len;
1006 struct dsd_dma *dsd_ptr;
f83adb61 1007 struct scatterlist *sg, *sgl;
15b7a68c 1008 struct dsd64 *cur_dsd = dsd;
bad75002
AE
1009 int i;
1010 uint16_t used_dsds = tot_dsds;
f83adb61 1011 struct scsi_cmnd *cmd;
f83adb61
QT
1012
1013 if (sp) {
1014 cmd = GET_CMD_SP(sp);
1015 sgl = scsi_sglist(cmd);
f83adb61
QT
1016 } else if (tc) {
1017 sgl = tc->sg;
f83adb61
QT
1018 } else {
1019 BUG();
1020 return 1;
1021 }
bad75002 1022
f83adb61
QT
1023
1024 for_each_sg(sgl, sg, tot_dsds, i) {
bad75002
AE
1025 /* Allocate additional continuation packets? */
1026 if (avail_dsds == 0) {
1027 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1028 QLA_DSDS_PER_IOCB : used_dsds;
1029 dsd_list_len = (avail_dsds + 1) * 12;
1030 used_dsds -= avail_dsds;
1031
1032 /* allocate tracking DS */
1033 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1034 if (!dsd_ptr)
1035 return 1;
1036
1037 /* allocate new list */
1038 dsd_ptr->dsd_addr = next_dsd =
1039 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1040 &dsd_ptr->dsd_list_dma);
1041
1042 if (!next_dsd) {
1043 /*
1044 * Need to cleanup only this dsd_ptr, rest
1045 * will be done by sp_free_dma()
1046 */
1047 kfree(dsd_ptr);
1048 return 1;
1049 }
1050
f83adb61
QT
1051 if (sp) {
1052 list_add_tail(&dsd_ptr->list,
5ec9f904 1053 &sp->u.scmd.crc_ctx->dsd_list);
bad75002 1054
f83adb61
QT
1055 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1056 } else {
1057 list_add_tail(&dsd_ptr->list,
1058 &(tc->ctx->dsd_list));
be25152c 1059 *tc->ctx_dsd_alloced = 1;
f83adb61 1060 }
bad75002
AE
1061
1062 /* add new list to cmd iocb or last list */
15b7a68c
BVA
1063 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1064 &cur_dsd->address);
1065 cur_dsd->length = cpu_to_le32(dsd_list_len);
1066 cur_dsd = next_dsd;
bad75002 1067 }
15b7a68c 1068 append_dsd64(&cur_dsd, sg);
bad75002
AE
1069 avail_dsds--;
1070
bad75002
AE
1071 }
1072 /* Null termination */
15b7a68c
BVA
1073 cur_dsd->address = 0;
1074 cur_dsd->length = 0;
1075 cur_dsd++;
bad75002
AE
1076 return 0;
1077}
1078
f83adb61 1079int
bad75002 1080qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
15b7a68c 1081 struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
bad75002 1082{
50b81275 1083 struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
f83adb61 1084 struct scatterlist *sg, *sgl;
50b81275 1085 struct crc_context *difctx = NULL;
f83adb61 1086 struct scsi_qla_host *vha;
50b81275
GM
1087 uint dsd_list_len;
1088 uint avail_dsds = 0;
1089 uint used_dsds = tot_dsds;
1090 bool dif_local_dma_alloc = false;
1091 bool direction_to_device = false;
1092 int i;
f83adb61
QT
1093
1094 if (sp) {
50b81275 1095 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
bd432bb5 1096
f83adb61 1097 sgl = scsi_prot_sglist(cmd);
25ff6af1 1098 vha = sp->vha;
5ec9f904 1099 difctx = sp->u.scmd.crc_ctx;
50b81275
GM
1100 direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1101 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1102 "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1103 __func__, cmd, difctx, sp);
f83adb61
QT
1104 } else if (tc) {
1105 vha = tc->vha;
1106 sgl = tc->prot_sg;
50b81275
GM
1107 difctx = tc->ctx;
1108 direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
f83adb61
QT
1109 } else {
1110 BUG();
1111 return 1;
1112 }
bad75002 1113
50b81275
GM
1114 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1115 "%s: enter (write=%u)\n", __func__, direction_to_device);
1116
1117 /* if initiator doing write or target doing read */
1118 if (direction_to_device) {
1119 for_each_sg(sgl, sg, tot_dsds, i) {
038d710f 1120 u64 sle_phys = sg_phys(sg);
50b81275
GM
1121
1122 /* If SGE addr + len flips bits in upper 32-bits */
1123 if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1124 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1125 "%s: page boundary crossing (phys=%llx len=%x)\n",
1126 __func__, sle_phys, sg->length);
1127
1128 if (difctx) {
1129 ha->dif_bundle_crossed_pages++;
1130 dif_local_dma_alloc = true;
1131 } else {
1132 ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1133 vha, 0xe022,
1134 "%s: difctx pointer is NULL\n",
1135 __func__);
1136 }
1137 break;
1138 }
1139 }
1140 ha->dif_bundle_writes++;
1141 } else {
1142 ha->dif_bundle_reads++;
1143 }
bad75002 1144
50b81275
GM
1145 if (ql2xdifbundlinginternalbuffers)
1146 dif_local_dma_alloc = direction_to_device;
1147
1148 if (dif_local_dma_alloc) {
1149 u32 track_difbundl_buf = 0;
1150 u32 ldma_sg_len = 0;
1151 u8 ldma_needed = 1;
1152
1153 difctx->no_dif_bundl = 0;
1154 difctx->dif_bundl_len = 0;
1155
1156 /* Track DSD buffers */
1157 INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1158 /* Track local DMA buffers */
1159 INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1160
1161 for_each_sg(sgl, sg, tot_dsds, i) {
1162 u32 sglen = sg_dma_len(sg);
1163
1164 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1165 "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
038d710f 1166 __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
50b81275
GM
1167 difctx->dif_bundl_len, ldma_needed);
1168
1169 while (sglen) {
1170 u32 xfrlen = 0;
1171
1172 if (ldma_needed) {
1173 /*
1174 * Allocate list item to store
1175 * the DMA buffers
1176 */
1177 dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1178 GFP_ATOMIC);
1179 if (!dsd_ptr) {
1180 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1181 "%s: failed alloc dsd_ptr\n",
1182 __func__);
1183 return 1;
1184 }
1185 ha->dif_bundle_kallocs++;
1186
1187 /* allocate dma buffer */
1188 dsd_ptr->dsd_addr = dma_pool_alloc
1189 (ha->dif_bundl_pool, GFP_ATOMIC,
1190 &dsd_ptr->dsd_list_dma);
1191 if (!dsd_ptr->dsd_addr) {
1192 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1193 "%s: failed alloc ->dsd_ptr\n",
1194 __func__);
1195 /*
1196 * need to cleanup only this
1197 * dsd_ptr rest will be done
1198 * by sp_free_dma()
1199 */
1200 kfree(dsd_ptr);
1201 ha->dif_bundle_kallocs--;
1202 return 1;
1203 }
1204 ha->dif_bundle_dma_allocs++;
1205 ldma_needed = 0;
1206 difctx->no_dif_bundl++;
1207 list_add_tail(&dsd_ptr->list,
1208 &difctx->ldif_dma_hndl_list);
1209 }
1210
1211 /* xfrlen is min of dma pool size and sglen */
1212 xfrlen = (sglen >
1213 (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1214 DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1215 sglen;
1216
1217 /* replace with local allocated dma buffer */
1218 sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1219 dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1220 difctx->dif_bundl_len);
1221 difctx->dif_bundl_len += xfrlen;
1222 sglen -= xfrlen;
1223 ldma_sg_len += xfrlen;
1224 if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1225 sg_is_last(sg)) {
1226 ldma_needed = 1;
1227 ldma_sg_len = 0;
1228 }
bad75002 1229 }
50b81275 1230 }
bad75002 1231
50b81275
GM
1232 track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1233 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1234 "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1235 difctx->dif_bundl_len, difctx->no_dif_bundl,
1236 track_difbundl_buf);
bad75002 1237
50b81275
GM
1238 if (sp)
1239 sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1240 else
1241 tc->prot_flags = DIF_BUNDL_DMA_VALID;
1242
1243 list_for_each_entry_safe(dif_dsd, nxt_dsd,
1244 &difctx->ldif_dma_hndl_list, list) {
1245 u32 sglen = (difctx->dif_bundl_len >
1246 DIF_BUNDLING_DMA_POOL_SIZE) ?
1247 DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1248
1249 BUG_ON(track_difbundl_buf == 0);
1250
1251 /* Allocate additional continuation packets? */
1252 if (avail_dsds == 0) {
1253 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1254 0xe024,
1255 "%s: adding continuation iocb's\n",
1256 __func__);
1257 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1258 QLA_DSDS_PER_IOCB : used_dsds;
1259 dsd_list_len = (avail_dsds + 1) * 12;
1260 used_dsds -= avail_dsds;
1261
1262 /* allocate tracking DS */
1263 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1264 if (!dsd_ptr) {
1265 ql_dbg(ql_dbg_tgt, vha, 0xe026,
1266 "%s: failed alloc dsd_ptr\n",
1267 __func__);
1268 return 1;
1269 }
1270 ha->dif_bundle_kallocs++;
1271
1272 difctx->no_ldif_dsd++;
1273 /* allocate new list */
1274 dsd_ptr->dsd_addr =
1275 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1276 &dsd_ptr->dsd_list_dma);
1277 if (!dsd_ptr->dsd_addr) {
1278 ql_dbg(ql_dbg_tgt, vha, 0xe026,
1279 "%s: failed alloc ->dsd_addr\n",
1280 __func__);
1281 /*
1282 * need to cleanup only this dsd_ptr
1283 * rest will be done by sp_free_dma()
1284 */
1285 kfree(dsd_ptr);
1286 ha->dif_bundle_kallocs--;
1287 return 1;
1288 }
1289 ha->dif_bundle_dma_allocs++;
1290
1291 if (sp) {
1292 list_add_tail(&dsd_ptr->list,
1293 &difctx->ldif_dsd_list);
1294 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1295 } else {
1296 list_add_tail(&dsd_ptr->list,
1297 &difctx->ldif_dsd_list);
1298 tc->ctx_dsd_alloced = 1;
1299 }
1300
1301 /* add new list to cmd iocb or last list */
15b7a68c
BVA
1302 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1303 &cur_dsd->address);
1304 cur_dsd->length = cpu_to_le32(dsd_list_len);
50b81275 1305 cur_dsd = dsd_ptr->dsd_addr;
f83adb61 1306 }
15b7a68c
BVA
1307 put_unaligned_le64(dif_dsd->dsd_list_dma,
1308 &cur_dsd->address);
1309 cur_dsd->length = cpu_to_le32(sglen);
1310 cur_dsd++;
50b81275
GM
1311 avail_dsds--;
1312 difctx->dif_bundl_len -= sglen;
1313 track_difbundl_buf--;
bad75002 1314 }
bad75002 1315
50b81275
GM
1316 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1317 "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1318 difctx->no_ldif_dsd, difctx->no_dif_bundl);
1319 } else {
1320 for_each_sg(sgl, sg, tot_dsds, i) {
50b81275
GM
1321 /* Allocate additional continuation packets? */
1322 if (avail_dsds == 0) {
1323 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1324 QLA_DSDS_PER_IOCB : used_dsds;
1325 dsd_list_len = (avail_dsds + 1) * 12;
1326 used_dsds -= avail_dsds;
1327
1328 /* allocate tracking DS */
1329 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1330 if (!dsd_ptr) {
1331 ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1332 vha, 0xe027,
1333 "%s: failed alloc dsd_dma...\n",
1334 __func__);
1335 return 1;
1336 }
1337
1338 /* allocate new list */
1339 dsd_ptr->dsd_addr =
1340 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1341 &dsd_ptr->dsd_list_dma);
1342 if (!dsd_ptr->dsd_addr) {
1343 /* need to cleanup only this dsd_ptr */
1344 /* rest will be done by sp_free_dma() */
1345 kfree(dsd_ptr);
1346 return 1;
1347 }
1348
1349 if (sp) {
1350 list_add_tail(&dsd_ptr->list,
1351 &difctx->dsd_list);
1352 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1353 } else {
1354 list_add_tail(&dsd_ptr->list,
1355 &difctx->dsd_list);
1356 tc->ctx_dsd_alloced = 1;
1357 }
1358
1359 /* add new list to cmd iocb or last list */
15b7a68c
BVA
1360 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1361 &cur_dsd->address);
1362 cur_dsd->length = cpu_to_le32(dsd_list_len);
50b81275
GM
1363 cur_dsd = dsd_ptr->dsd_addr;
1364 }
15b7a68c 1365 append_dsd64(&cur_dsd, sg);
50b81275
GM
1366 avail_dsds--;
1367 }
bad75002
AE
1368 }
1369 /* Null termination */
15b7a68c
BVA
1370 cur_dsd->address = 0;
1371 cur_dsd->length = 0;
1372 cur_dsd++;
bad75002
AE
1373 return 0;
1374}
c1c7178c 1375
bad75002
AE
1376/**
1377 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1378 * Type 6 IOCB types.
1379 *
1380 * @sp: SRB command to process
1381 * @cmd_pkt: Command type 3 IOCB
1382 * @tot_dsds: Total number of segments to transfer
807eb907
BVA
1383 * @tot_prot_dsds: Total number of segments with protection information
1384 * @fw_prot_opts: Protection options to be passed to firmware
bad75002 1385 */
c20605ed 1386static inline int
bad75002
AE
1387qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1388 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1389{
15b7a68c 1390 struct dsd64 *cur_dsd;
7ffa5b93 1391 __be32 *fcp_dl;
bad75002
AE
1392 scsi_qla_host_t *vha;
1393 struct scsi_cmnd *cmd;
8cb2049c 1394 uint32_t total_bytes = 0;
bad75002
AE
1395 uint32_t data_bytes;
1396 uint32_t dif_bytes;
1397 uint8_t bundling = 1;
1398 uint16_t blk_size;
bad75002
AE
1399 struct crc_context *crc_ctx_pkt = NULL;
1400 struct qla_hw_data *ha;
1401 uint8_t additional_fcpcdb_len;
1402 uint16_t fcp_cmnd_len;
1403 struct fcp_cmnd *fcp_cmnd;
1404 dma_addr_t crc_ctx_dma;
1405
9ba56b95 1406 cmd = GET_CMD_SP(sp);
bad75002 1407
bad75002 1408 /* Update entry type to indicate Command Type CRC_2 IOCB */
2c26348c 1409 put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
bad75002 1410
25ff6af1 1411 vha = sp->vha;
7c3df132
SK
1412 ha = vha->hw;
1413
bad75002
AE
1414 /* No data transfer */
1415 data_bytes = scsi_bufflen(cmd);
1416 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
ad950360 1417 cmd_pkt->byte_count = cpu_to_le32(0);
bad75002
AE
1418 return QLA_SUCCESS;
1419 }
1420
25ff6af1 1421 cmd_pkt->vp_index = sp->vha->vp_idx;
bad75002
AE
1422
1423 /* Set transfer direction */
1424 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1425 cmd_pkt->control_flags =
ad950360 1426 cpu_to_le16(CF_WRITE_DATA);
bad75002
AE
1427 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1428 cmd_pkt->control_flags =
ad950360 1429 cpu_to_le16(CF_READ_DATA);
bad75002
AE
1430 }
1431
9ba56b95
GM
1432 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1433 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1434 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1435 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
bad75002
AE
1436 bundling = 0;
1437
1438 /* Allocate CRC context from global pool */
5ec9f904 1439 crc_ctx_pkt = sp->u.scmd.crc_ctx =
501017f6 1440 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
bad75002
AE
1441
1442 if (!crc_ctx_pkt)
1443 goto crc_queuing_error;
1444
bad75002
AE
1445 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1446
1447 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1448
1449 /* Set handle */
1450 crc_ctx_pkt->handle = cmd_pkt->handle;
1451
1452 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1453
e02587d7 1454 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
bad75002
AE
1455 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1456
d4556a49 1457 put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
7ffa5b93 1458 cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
bad75002
AE
1459
1460 /* Determine SCSI command length -- align to 4 byte boundary */
1461 if (cmd->cmd_len > 16) {
bad75002
AE
1462 additional_fcpcdb_len = cmd->cmd_len - 16;
1463 if ((cmd->cmd_len % 4) != 0) {
1464 /* SCSI cmd > 16 bytes must be multiple of 4 */
1465 goto crc_queuing_error;
1466 }
1467 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1468 } else {
1469 additional_fcpcdb_len = 0;
1470 fcp_cmnd_len = 12 + 16 + 4;
1471 }
1472
1473 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1474
1475 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1476 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1477 fcp_cmnd->additional_cdb_len |= 1;
1478 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1479 fcp_cmnd->additional_cdb_len |= 2;
1480
9ba56b95 1481 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
bad75002
AE
1482 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1483 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
d4556a49
BVA
1484 put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
1485 &cmd_pkt->fcp_cmnd_dseg_address);
65155b37 1486 fcp_cmnd->task_management = 0;
50668633 1487 fcp_cmnd->task_attribute = TSK_SIMPLE;
ff2fc42e 1488
bad75002
AE
1489 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1490
bad75002 1491 /* Compute dif len and adjust data len to incude protection */
bad75002
AE
1492 dif_bytes = 0;
1493 blk_size = cmd->device->sector_size;
8cb2049c
AE
1494 dif_bytes = (data_bytes / blk_size) * 8;
1495
9ba56b95 1496 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
8cb2049c
AE
1497 case SCSI_PROT_READ_INSERT:
1498 case SCSI_PROT_WRITE_STRIP:
2703eaaf
BVA
1499 total_bytes = data_bytes;
1500 data_bytes += dif_bytes;
1501 break;
8cb2049c
AE
1502
1503 case SCSI_PROT_READ_STRIP:
1504 case SCSI_PROT_WRITE_INSERT:
1505 case SCSI_PROT_READ_PASS:
1506 case SCSI_PROT_WRITE_PASS:
2703eaaf
BVA
1507 total_bytes = data_bytes + dif_bytes;
1508 break;
8cb2049c 1509 default:
2703eaaf 1510 BUG();
bad75002
AE
1511 }
1512
e02587d7 1513 if (!qla2x00_hba_err_chk_enabled(sp))
bad75002 1514 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
9e522cd8
AE
1515 /* HBA error checking enabled */
1516 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1517 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1518 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1519 SCSI_PROT_DIF_TYPE2))
1520 fw_prot_opts |= BIT_10;
1521 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1522 SCSI_PROT_DIF_TYPE3)
1523 fw_prot_opts |= BIT_11;
1524 }
bad75002
AE
1525
1526 if (!bundling) {
9e75b5e2 1527 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
bad75002
AE
1528 } else {
1529 /*
1530 * Configure Bundling if we need to fetch interlaving
1531 * protection PCI accesses
1532 */
1533 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1534 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1535 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1536 tot_prot_dsds);
9e75b5e2 1537 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
bad75002
AE
1538 }
1539
1540 /* Finish the common fields of CRC pkt */
1541 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1542 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1543 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
ad950360 1544 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
bad75002
AE
1545 /* Fibre channel byte count */
1546 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
7ffa5b93 1547 fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
bad75002
AE
1548 additional_fcpcdb_len);
1549 *fcp_dl = htonl(total_bytes);
1550
0c470874 1551 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
ad950360 1552 cmd_pkt->byte_count = cpu_to_le32(0);
0c470874
AE
1553 return QLA_SUCCESS;
1554 }
bad75002
AE
1555 /* Walks data segments */
1556
ad950360 1557 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
8cb2049c
AE
1558
1559 if (!bundling && tot_prot_dsds) {
1560 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
f83adb61 1561 cur_dsd, tot_dsds, NULL))
8cb2049c
AE
1562 goto crc_queuing_error;
1563 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
f83adb61 1564 (tot_dsds - tot_prot_dsds), NULL))
bad75002
AE
1565 goto crc_queuing_error;
1566
1567 if (bundling && tot_prot_dsds) {
1568 /* Walks dif segments */
ad950360 1569 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
15b7a68c 1570 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
bad75002 1571 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
f83adb61 1572 tot_prot_dsds, NULL))
bad75002
AE
1573 goto crc_queuing_error;
1574 }
1575 return QLA_SUCCESS;
1576
1577crc_queuing_error:
bad75002
AE
1578 /* Cleanup will be performed by the caller */
1579
1580 return QLA_FUNCTION_FAILED;
1581}
2b6c0cee
AV
1582
1583/**
1584 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1585 * @sp: command to send to the ISP
1586 *
cc3ef7bc 1587 * Returns non-zero if a failure occurred, else zero.
2b6c0cee
AV
1588 */
1589int
1590qla24xx_start_scsi(srb_t *sp)
1591{
52c82823 1592 int nseg;
2b6c0cee 1593 unsigned long flags;
2b6c0cee 1594 uint32_t *clr_ptr;
2b6c0cee
AV
1595 uint32_t handle;
1596 struct cmd_type_7 *cmd_pkt;
2b6c0cee
AV
1597 uint16_t cnt;
1598 uint16_t req_cnt;
1599 uint16_t tot_dsds;
73208dfd 1600 struct req_que *req = NULL;
9ba56b95 1601 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
25ff6af1 1602 struct scsi_qla_host *vha = sp->vha;
73208dfd 1603 struct qla_hw_data *ha = vha->hw;
2b6c0cee
AV
1604
1605 /* Setup device pointers. */
59e0b8b0 1606 req = vha->req;
73208dfd 1607
2b6c0cee
AV
1608 /* So we know we haven't pci_map'ed anything yet */
1609 tot_dsds = 0;
1610
1611 /* Send marker if required */
e315cd28 1612 if (vha->marker_needed != 0) {
9eb9c6dc 1613 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
7c3df132 1614 QLA_SUCCESS)
2b6c0cee 1615 return QLA_FUNCTION_FAILED;
e315cd28 1616 vha->marker_needed = 0;
2b6c0cee
AV
1617 }
1618
1619 /* Acquire ring specific lock */
e315cd28 1620 spin_lock_irqsave(&ha->hardware_lock, flags);
2b6c0cee 1621
bcc85657
BVA
1622 handle = qla2xxx_get_next_handle(req);
1623 if (handle == 0)
2b6c0cee
AV
1624 goto queuing_error;
1625
1626 /* Map the sg table so we have an accurate count of sg entries needed */
2c3dfe3f
SJ
1627 if (scsi_sg_count(cmd)) {
1628 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1629 scsi_sg_count(cmd), cmd->sc_data_direction);
1630 if (unlikely(!nseg))
2b6c0cee 1631 goto queuing_error;
2c3dfe3f
SJ
1632 } else
1633 nseg = 0;
1634
385d70b4 1635 tot_dsds = nseg;
7c3df132 1636 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
e315cd28 1637 if (req->cnt < (req_cnt + 2)) {
7c6300e3 1638 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
04474d3a 1639 rd_reg_dword_relaxed(req->req_q_out);
e315cd28
AC
1640 if (req->ring_index < cnt)
1641 req->cnt = cnt - req->ring_index;
2b6c0cee 1642 else
e315cd28
AC
1643 req->cnt = req->length -
1644 (req->ring_index - cnt);
a6eb3c9f
CL
1645 if (req->cnt < (req_cnt + 2))
1646 goto queuing_error;
2b6c0cee 1647 }
2b6c0cee
AV
1648
1649 /* Build command packet. */
e315cd28
AC
1650 req->current_outstanding_cmd = handle;
1651 req->outstanding_cmds[handle] = sp;
cf53b069 1652 sp->handle = handle;
9ba56b95 1653 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
e315cd28 1654 req->cnt -= req_cnt;
2b6c0cee 1655
e315cd28 1656 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
c25eb70a 1657 cmd_pkt->handle = make_handle(req->id, handle);
2b6c0cee
AV
1658
1659 /* Zero out remaining portion of packet. */
72df8325 1660 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2b6c0cee
AV
1661 clr_ptr = (uint32_t *)cmd_pkt + 2;
1662 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1663 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1664
1665 /* Set NPORT-ID and LUN number*/
1666 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1667 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1668 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1669 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 1670 cmd_pkt->vp_index = sp->vha->vp_idx;
2b6c0cee 1671
9ba56b95 1672 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
0d4be124 1673 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2b6c0cee 1674
50668633 1675 cmd_pkt->task = TSK_SIMPLE;
ff2fc42e 1676
2b6c0cee
AV
1677 /* Load SCSI command packet. */
1678 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1679 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1680
385d70b4 1681 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2b6c0cee
AV
1682
1683 /* Build IOCB segments */
d7459527 1684 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2b6c0cee
AV
1685
1686 /* Set total data segment count. */
1687 cmd_pkt->entry_count = (uint8_t)req_cnt;
1688 wmb();
2b6c0cee 1689 /* Adjust ring index. */
e315cd28
AC
1690 req->ring_index++;
1691 if (req->ring_index == req->length) {
1692 req->ring_index = 0;
1693 req->ring_ptr = req->ring;
2b6c0cee 1694 } else
e315cd28 1695 req->ring_ptr++;
2b6c0cee
AV
1696
1697 sp->flags |= SRB_DMA_VALID;
2b6c0cee
AV
1698
1699 /* Set chip new ring index. */
04474d3a 1700 wrt_reg_dword(req->req_q_in, req->ring_index);
4fdfefe5 1701
e315cd28 1702 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2b6c0cee
AV
1703 return QLA_SUCCESS;
1704
1705queuing_error:
385d70b4
FT
1706 if (tot_dsds)
1707 scsi_dma_unmap(cmd);
1708
e315cd28 1709 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2b6c0cee
AV
1710
1711 return QLA_FUNCTION_FAILED;
1da177e4 1712}
68ca949c 1713
bad75002
AE
1714/**
1715 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1716 * @sp: command to send to the ISP
1717 *
1718 * Returns non-zero if a failure occurred, else zero.
1719 */
1720int
1721qla24xx_dif_start_scsi(srb_t *sp)
1722{
1723 int nseg;
1724 unsigned long flags;
1725 uint32_t *clr_ptr;
bad75002
AE
1726 uint32_t handle;
1727 uint16_t cnt;
1728 uint16_t req_cnt = 0;
1729 uint16_t tot_dsds;
1730 uint16_t tot_prot_dsds;
1731 uint16_t fw_prot_opts = 0;
1732 struct req_que *req = NULL;
1733 struct rsp_que *rsp = NULL;
9ba56b95 1734 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
25ff6af1 1735 struct scsi_qla_host *vha = sp->vha;
bad75002
AE
1736 struct qla_hw_data *ha = vha->hw;
1737 struct cmd_type_crc_2 *cmd_pkt;
1738 uint32_t status = 0;
1739
1740#define QDSS_GOT_Q_SPACE BIT_0
1741
0c470874
AE
1742 /* Only process protection or >16 cdb in this routine */
1743 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1744 if (cmd->cmd_len <= 16)
1745 return qla24xx_start_scsi(sp);
1746 }
bad75002
AE
1747
1748 /* Setup device pointers. */
bad75002 1749 req = vha->req;
d7459527 1750 rsp = req->rsp;
bad75002
AE
1751
1752 /* So we know we haven't pci_map'ed anything yet */
1753 tot_dsds = 0;
1754
1755 /* Send marker if required */
1756 if (vha->marker_needed != 0) {
9eb9c6dc 1757 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
bad75002
AE
1758 QLA_SUCCESS)
1759 return QLA_FUNCTION_FAILED;
1760 vha->marker_needed = 0;
1761 }
1762
1763 /* Acquire ring specific lock */
1764 spin_lock_irqsave(&ha->hardware_lock, flags);
1765
bcc85657
BVA
1766 handle = qla2xxx_get_next_handle(req);
1767 if (handle == 0)
bad75002
AE
1768 goto queuing_error;
1769
1770 /* Compute number of required data segments */
1771 /* Map the sg table so we have an accurate count of sg entries needed */
1772 if (scsi_sg_count(cmd)) {
1773 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1774 scsi_sg_count(cmd), cmd->sc_data_direction);
1775 if (unlikely(!nseg))
1776 goto queuing_error;
1777 else
1778 sp->flags |= SRB_DMA_VALID;
8cb2049c
AE
1779
1780 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1781 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1782 struct qla2_sgx sgx;
1783 uint32_t partial;
1784
1785 memset(&sgx, 0, sizeof(struct qla2_sgx));
1786 sgx.tot_bytes = scsi_bufflen(cmd);
1787 sgx.cur_sg = scsi_sglist(cmd);
1788 sgx.sp = sp;
1789
1790 nseg = 0;
1791 while (qla24xx_get_one_block_sg(
1792 cmd->device->sector_size, &sgx, &partial))
1793 nseg++;
1794 }
bad75002
AE
1795 } else
1796 nseg = 0;
1797
1798 /* number of required data segments */
1799 tot_dsds = nseg;
1800
1801 /* Compute number of required protection segments */
1802 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1803 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1804 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1805 if (unlikely(!nseg))
1806 goto queuing_error;
1807 else
1808 sp->flags |= SRB_CRC_PROT_DMA_VALID;
8cb2049c
AE
1809
1810 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1811 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1812 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1813 }
bad75002
AE
1814 } else {
1815 nseg = 0;
1816 }
1817
1818 req_cnt = 1;
1819 /* Total Data and protection sg segment(s) */
1820 tot_prot_dsds = nseg;
1821 tot_dsds += nseg;
1822 if (req->cnt < (req_cnt + 2)) {
7c6300e3 1823 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
04474d3a 1824 rd_reg_dword_relaxed(req->req_q_out);
bad75002
AE
1825 if (req->ring_index < cnt)
1826 req->cnt = cnt - req->ring_index;
1827 else
1828 req->cnt = req->length -
1829 (req->ring_index - cnt);
a6eb3c9f
CL
1830 if (req->cnt < (req_cnt + 2))
1831 goto queuing_error;
bad75002
AE
1832 }
1833
bad75002
AE
1834 status |= QDSS_GOT_Q_SPACE;
1835
1836 /* Build header part of command packet (excluding the OPCODE). */
1837 req->current_outstanding_cmd = handle;
1838 req->outstanding_cmds[handle] = sp;
8cb2049c 1839 sp->handle = handle;
9ba56b95 1840 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
bad75002
AE
1841 req->cnt -= req_cnt;
1842
1843 /* Fill-in common area */
1844 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
c25eb70a 1845 cmd_pkt->handle = make_handle(req->id, handle);
bad75002
AE
1846
1847 clr_ptr = (uint32_t *)cmd_pkt + 2;
1848 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1849
1850 /* Set NPORT-ID and LUN number*/
1851 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1852 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1853 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1854 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1855
9ba56b95 1856 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
bad75002
AE
1857 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1858
1859 /* Total Data and protection segment(s) */
1860 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1861
1862 /* Build IOCB segments and adjust for data protection segments */
1863 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1864 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1865 QLA_SUCCESS)
1866 goto queuing_error;
1867
1868 cmd_pkt->entry_count = (uint8_t)req_cnt;
1869 /* Specify response queue number where completion should happen */
1870 cmd_pkt->entry_status = (uint8_t) rsp->id;
ad950360 1871 cmd_pkt->timeout = cpu_to_le16(0);
bad75002
AE
1872 wmb();
1873
1874 /* Adjust ring index. */
1875 req->ring_index++;
1876 if (req->ring_index == req->length) {
1877 req->ring_index = 0;
1878 req->ring_ptr = req->ring;
1879 } else
1880 req->ring_ptr++;
1881
1882 /* Set chip new ring index. */
04474d3a 1883 wrt_reg_dword(req->req_q_in, req->ring_index);
bad75002
AE
1884
1885 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1886
1887 return QLA_SUCCESS;
1888
1889queuing_error:
1890 if (status & QDSS_GOT_Q_SPACE) {
1891 req->outstanding_cmds[handle] = NULL;
1892 req->cnt += req_cnt;
1893 }
1894 /* Cleanup will be performed by the caller (queuecommand) */
1895
1896 spin_unlock_irqrestore(&ha->hardware_lock, flags);
bad75002
AE
1897 return QLA_FUNCTION_FAILED;
1898}
1899
d7459527
MH
1900/**
1901 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1902 * @sp: command to send to the ISP
1903 *
1904 * Returns non-zero if a failure occurred, else zero.
1905 */
1906static int
1907qla2xxx_start_scsi_mq(srb_t *sp)
68ca949c 1908{
d7459527
MH
1909 int nseg;
1910 unsigned long flags;
1911 uint32_t *clr_ptr;
d7459527
MH
1912 uint32_t handle;
1913 struct cmd_type_7 *cmd_pkt;
1914 uint16_t cnt;
1915 uint16_t req_cnt;
1916 uint16_t tot_dsds;
1917 struct req_que *req = NULL;
9ba56b95 1918 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
d7459527
MH
1919 struct scsi_qla_host *vha = sp->fcport->vha;
1920 struct qla_hw_data *ha = vha->hw;
1921 struct qla_qpair *qpair = sp->qpair;
1922
578079fa
JT
1923 /* Acquire qpair specific lock */
1924 spin_lock_irqsave(&qpair->qp_lock, flags);
1925
d7459527 1926 /* Setup qpair pointers */
d7459527
MH
1927 req = qpair->req;
1928
1929 /* So we know we haven't pci_map'ed anything yet */
1930 tot_dsds = 0;
1931
1932 /* Send marker if required */
1933 if (vha->marker_needed != 0) {
9eb9c6dc 1934 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
578079fa
JT
1935 QLA_SUCCESS) {
1936 spin_unlock_irqrestore(&qpair->qp_lock, flags);
d7459527 1937 return QLA_FUNCTION_FAILED;
578079fa 1938 }
d7459527
MH
1939 vha->marker_needed = 0;
1940 }
1941
bcc85657
BVA
1942 handle = qla2xxx_get_next_handle(req);
1943 if (handle == 0)
d7459527
MH
1944 goto queuing_error;
1945
1946 /* Map the sg table so we have an accurate count of sg entries needed */
1947 if (scsi_sg_count(cmd)) {
1948 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1949 scsi_sg_count(cmd), cmd->sc_data_direction);
1950 if (unlikely(!nseg))
1951 goto queuing_error;
1952 } else
1953 nseg = 0;
1954
1955 tot_dsds = nseg;
1956 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1957 if (req->cnt < (req_cnt + 2)) {
1958 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
04474d3a 1959 rd_reg_dword_relaxed(req->req_q_out);
d7459527
MH
1960 if (req->ring_index < cnt)
1961 req->cnt = cnt - req->ring_index;
1962 else
1963 req->cnt = req->length -
1964 (req->ring_index - cnt);
1965 if (req->cnt < (req_cnt + 2))
1966 goto queuing_error;
1967 }
1968
1969 /* Build command packet. */
1970 req->current_outstanding_cmd = handle;
1971 req->outstanding_cmds[handle] = sp;
1972 sp->handle = handle;
1973 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1974 req->cnt -= req_cnt;
1975
1976 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
c25eb70a 1977 cmd_pkt->handle = make_handle(req->id, handle);
d7459527
MH
1978
1979 /* Zero out remaining portion of packet. */
1980 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1981 clr_ptr = (uint32_t *)cmd_pkt + 2;
1982 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1983 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1984
1985 /* Set NPORT-ID and LUN number*/
1986 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1987 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1988 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1989 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1990 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1991
1992 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1993 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1994
1995 cmd_pkt->task = TSK_SIMPLE;
1996
1997 /* Load SCSI command packet. */
1998 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1999 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2000
2001 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2002
2003 /* Build IOCB segments */
2004 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2005
2006 /* Set total data segment count. */
2007 cmd_pkt->entry_count = (uint8_t)req_cnt;
2008 wmb();
2009 /* Adjust ring index. */
2010 req->ring_index++;
2011 if (req->ring_index == req->length) {
2012 req->ring_index = 0;
2013 req->ring_ptr = req->ring;
2014 } else
2015 req->ring_ptr++;
2016
2017 sp->flags |= SRB_DMA_VALID;
2018
2019 /* Set chip new ring index. */
04474d3a 2020 wrt_reg_dword(req->req_q_in, req->ring_index);
d7459527 2021
d7459527
MH
2022 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2023 return QLA_SUCCESS;
2024
2025queuing_error:
2026 if (tot_dsds)
2027 scsi_dma_unmap(cmd);
2028
2029 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2030
2031 return QLA_FUNCTION_FAILED;
2032}
2033
2034
2035/**
2036 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2037 * @sp: command to send to the ISP
2038 *
2039 * Returns non-zero if a failure occurred, else zero.
2040 */
2041int
2042qla2xxx_dif_start_scsi_mq(srb_t *sp)
2043{
2044 int nseg;
2045 unsigned long flags;
2046 uint32_t *clr_ptr;
d7459527
MH
2047 uint32_t handle;
2048 uint16_t cnt;
2049 uint16_t req_cnt = 0;
2050 uint16_t tot_dsds;
2051 uint16_t tot_prot_dsds;
2052 uint16_t fw_prot_opts = 0;
2053 struct req_que *req = NULL;
2054 struct rsp_que *rsp = NULL;
2055 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2056 struct scsi_qla_host *vha = sp->fcport->vha;
2057 struct qla_hw_data *ha = vha->hw;
2058 struct cmd_type_crc_2 *cmd_pkt;
2059 uint32_t status = 0;
2060 struct qla_qpair *qpair = sp->qpair;
2061
2062#define QDSS_GOT_Q_SPACE BIT_0
2063
2064 /* Check for host side state */
2065 if (!qpair->online) {
2066 cmd->result = DID_NO_CONNECT << 16;
2067 return QLA_INTERFACE_ERROR;
2068 }
2069
2070 if (!qpair->difdix_supported &&
2071 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2072 cmd->result = DID_NO_CONNECT << 16;
2073 return QLA_INTERFACE_ERROR;
2074 }
2075
2076 /* Only process protection or >16 cdb in this routine */
2077 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
2078 if (cmd->cmd_len <= 16)
2079 return qla2xxx_start_scsi_mq(sp);
2080 }
2081
578079fa
JT
2082 spin_lock_irqsave(&qpair->qp_lock, flags);
2083
d7459527
MH
2084 /* Setup qpair pointers */
2085 rsp = qpair->rsp;
2086 req = qpair->req;
2087
2088 /* So we know we haven't pci_map'ed anything yet */
2089 tot_dsds = 0;
2090
2091 /* Send marker if required */
2092 if (vha->marker_needed != 0) {
9eb9c6dc 2093 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
578079fa
JT
2094 QLA_SUCCESS) {
2095 spin_unlock_irqrestore(&qpair->qp_lock, flags);
d7459527 2096 return QLA_FUNCTION_FAILED;
578079fa 2097 }
d7459527
MH
2098 vha->marker_needed = 0;
2099 }
2100
bcc85657
BVA
2101 handle = qla2xxx_get_next_handle(req);
2102 if (handle == 0)
d7459527
MH
2103 goto queuing_error;
2104
2105 /* Compute number of required data segments */
2106 /* Map the sg table so we have an accurate count of sg entries needed */
2107 if (scsi_sg_count(cmd)) {
2108 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2109 scsi_sg_count(cmd), cmd->sc_data_direction);
2110 if (unlikely(!nseg))
2111 goto queuing_error;
2112 else
2113 sp->flags |= SRB_DMA_VALID;
2114
2115 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2116 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2117 struct qla2_sgx sgx;
2118 uint32_t partial;
2119
2120 memset(&sgx, 0, sizeof(struct qla2_sgx));
2121 sgx.tot_bytes = scsi_bufflen(cmd);
2122 sgx.cur_sg = scsi_sglist(cmd);
2123 sgx.sp = sp;
2124
2125 nseg = 0;
2126 while (qla24xx_get_one_block_sg(
2127 cmd->device->sector_size, &sgx, &partial))
2128 nseg++;
2129 }
2130 } else
2131 nseg = 0;
2132
2133 /* number of required data segments */
2134 tot_dsds = nseg;
2135
2136 /* Compute number of required protection segments */
2137 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2138 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2139 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2140 if (unlikely(!nseg))
2141 goto queuing_error;
2142 else
2143 sp->flags |= SRB_CRC_PROT_DMA_VALID;
2144
2145 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2146 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2147 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2148 }
2149 } else {
2150 nseg = 0;
2151 }
2152
2153 req_cnt = 1;
2154 /* Total Data and protection sg segment(s) */
2155 tot_prot_dsds = nseg;
2156 tot_dsds += nseg;
2157 if (req->cnt < (req_cnt + 2)) {
2158 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
04474d3a 2159 rd_reg_dword_relaxed(req->req_q_out);
d7459527
MH
2160 if (req->ring_index < cnt)
2161 req->cnt = cnt - req->ring_index;
2162 else
2163 req->cnt = req->length -
2164 (req->ring_index - cnt);
2165 if (req->cnt < (req_cnt + 2))
2166 goto queuing_error;
2167 }
2168
2169 status |= QDSS_GOT_Q_SPACE;
2170
2171 /* Build header part of command packet (excluding the OPCODE). */
2172 req->current_outstanding_cmd = handle;
2173 req->outstanding_cmds[handle] = sp;
2174 sp->handle = handle;
2175 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2176 req->cnt -= req_cnt;
2177
2178 /* Fill-in common area */
2179 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
c25eb70a 2180 cmd_pkt->handle = make_handle(req->id, handle);
d7459527
MH
2181
2182 clr_ptr = (uint32_t *)cmd_pkt + 2;
2183 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2184
2185 /* Set NPORT-ID and LUN number*/
2186 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2187 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2188 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2189 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
68ca949c 2190
d7459527
MH
2191 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2192 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2193
2194 /* Total Data and protection segment(s) */
2195 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2196
2197 /* Build IOCB segments and adjust for data protection segments */
2198 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2199 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2200 QLA_SUCCESS)
2201 goto queuing_error;
2202
2203 cmd_pkt->entry_count = (uint8_t)req_cnt;
2204 cmd_pkt->timeout = cpu_to_le16(0);
2205 wmb();
2206
2207 /* Adjust ring index. */
2208 req->ring_index++;
2209 if (req->ring_index == req->length) {
2210 req->ring_index = 0;
2211 req->ring_ptr = req->ring;
2212 } else
2213 req->ring_ptr++;
2214
2215 /* Set chip new ring index. */
04474d3a 2216 wrt_reg_dword(req->req_q_in, req->ring_index);
d7459527
MH
2217
2218 /* Manage unprocessed RIO/ZIO commands in response queue. */
2219 if (vha->flags.process_response_queue &&
2220 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2221 qla24xx_process_response_queue(vha, rsp);
2222
2223 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2224
2225 return QLA_SUCCESS;
2226
2227queuing_error:
2228 if (status & QDSS_GOT_Q_SPACE) {
2229 req->outstanding_cmds[handle] = NULL;
2230 req->cnt += req_cnt;
2231 }
2232 /* Cleanup will be performed by the caller (queuecommand) */
2233
2234 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2235 return QLA_FUNCTION_FAILED;
68ca949c 2236}
ac280b67
AV
2237
2238/* Generic Control-SRB manipulation functions. */
b6a029e1
AE
2239
2240/* hardware_lock assumed to be held. */
b6a029e1 2241
d94d10e7 2242void *
82de802a 2243__qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
ac280b67 2244{
82de802a 2245 scsi_qla_host_t *vha = qpair->vha;
ac280b67 2246 struct qla_hw_data *ha = vha->hw;
82de802a 2247 struct req_que *req = qpair->req;
118e2ef9 2248 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
bcc85657 2249 uint32_t handle;
ac280b67
AV
2250 request_t *pkt;
2251 uint16_t cnt, req_cnt;
2252
2253 pkt = NULL;
2254 req_cnt = 1;
d94d10e7
GM
2255 handle = 0;
2256
5e53be8e
QT
2257 if (sp && (sp->type != SRB_SCSI_CMD)) {
2258 /* Adjust entry-counts as needed. */
9ba56b95 2259 req_cnt = sp->iocbs;
5e53be8e 2260 }
5780790e 2261
ac280b67 2262 /* Check for room on request queue. */
94007037 2263 if (req->cnt < req_cnt + 2) {
1586e07a
QT
2264 if (qpair->use_shadow_reg)
2265 cnt = *req->out_ptr;
ecc89f25
JC
2266 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2267 IS_QLA28XX(ha))
04474d3a 2268 cnt = rd_reg_dword(&reg->isp25mq.req_q_out);
7ec0effd 2269 else if (IS_P3P_TYPE(ha))
04474d3a 2270 cnt = rd_reg_dword(reg->isp82.req_q_out);
ac280b67 2271 else if (IS_FWI2_CAPABLE(ha))
04474d3a 2272 cnt = rd_reg_dword(&reg->isp24.req_q_out);
8ae6d9c7 2273 else if (IS_QLAFX00(ha))
04474d3a 2274 cnt = rd_reg_dword(&reg->ispfx00.req_q_out);
ac280b67
AV
2275 else
2276 cnt = qla2x00_debounce_register(
2277 ISP_REQ_Q_OUT(ha, &reg->isp));
2278
2279 if (req->ring_index < cnt)
2280 req->cnt = cnt - req->ring_index;
2281 else
2282 req->cnt = req->length -
2283 (req->ring_index - cnt);
2284 }
94007037 2285 if (req->cnt < req_cnt + 2)
ac280b67
AV
2286 goto queuing_error;
2287
5e53be8e 2288 if (sp) {
bcc85657
BVA
2289 handle = qla2xxx_get_next_handle(req);
2290 if (handle == 0) {
5e53be8e
QT
2291 ql_log(ql_log_warn, vha, 0x700b,
2292 "No room on outstanding cmd array.\n");
2293 goto queuing_error;
2294 }
2295
2296 /* Prep command array. */
2297 req->current_outstanding_cmd = handle;
2298 req->outstanding_cmds[handle] = sp;
2299 sp->handle = handle;
2300 }
2301
ac280b67 2302 /* Prep packet */
ac280b67 2303 req->cnt -= req_cnt;
ac280b67
AV
2304 pkt = req->ring_ptr;
2305 memset(pkt, 0, REQUEST_ENTRY_SIZE);
8ae6d9c7 2306 if (IS_QLAFX00(ha)) {
f8f12bda
BVA
2307 wrt_reg_byte((u8 __force __iomem *)&pkt->entry_count, req_cnt);
2308 wrt_reg_dword((__le32 __force __iomem *)&pkt->handle, handle);
8ae6d9c7
GM
2309 } else {
2310 pkt->entry_count = req_cnt;
2311 pkt->handle = handle;
2312 }
ac280b67 2313
5e53be8e
QT
2314 return pkt;
2315
ac280b67 2316queuing_error:
60a9eadb 2317 qpair->tgt_counters.num_alloc_iocb_failed++;
ac280b67
AV
2318 return pkt;
2319}
2320
82de802a
QT
2321void *
2322qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2323{
2324 scsi_qla_host_t *vha = qpair->vha;
2325
2326 if (qla2x00_reset_active(vha))
2327 return NULL;
2328
2329 return __qla2x00_alloc_iocbs(qpair, sp);
2330}
2331
2332void *
2333qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2334{
2335 return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2336}
2337
a5d42f4c
DG
2338static void
2339qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2340{
2341 struct srb_iocb *lio = &sp->u.iocb_cmd;
2342
2343 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2344 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
03aaa89f 2345 if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
7ffa5b93 2346 logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI);
03aaa89f 2347 if (sp->vha->flags.nvme_first_burst)
7ffa5b93
BVA
2348 logio->io_parameter[0] =
2349 cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
03aaa89f 2350 }
a5d42f4c
DG
2351
2352 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2353 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2354 logio->port_id[1] = sp->fcport->d_id.b.area;
2355 logio->port_id[2] = sp->fcport->d_id.b.domain;
2356 logio->vp_index = sp->vha->vp_idx;
2357}
2358
ac280b67
AV
2359static void
2360qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2361{
9ba56b95 2362 struct srb_iocb *lio = &sp->u.iocb_cmd;
ac280b67
AV
2363
2364 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
419ae5fe
HM
2365 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2366
48acad09
QT
2367 if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2368 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2369 } else {
2370 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2371 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2372 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2373 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2374 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2375 }
ac280b67
AV
2376 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2377 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2378 logio->port_id[1] = sp->fcport->d_id.b.area;
2379 logio->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 2380 logio->vp_index = sp->vha->vp_idx;
ac280b67
AV
2381}
2382
2383static void
2384qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2385{
25ff6af1 2386 struct qla_hw_data *ha = sp->vha->hw;
9ba56b95 2387 struct srb_iocb *lio = &sp->u.iocb_cmd;
ac280b67
AV
2388 uint16_t opts;
2389
b963752f 2390 mbx->entry_type = MBX_IOCB_TYPE;
ac280b67
AV
2391 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2392 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
4916392b
MI
2393 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2394 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
ac280b67
AV
2395 if (HAS_EXTENDED_IDS(ha)) {
2396 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2397 mbx->mb10 = cpu_to_le16(opts);
2398 } else {
2399 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2400 }
2401 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2402 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2403 sp->fcport->d_id.b.al_pa);
25ff6af1 2404 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
ac280b67
AV
2405}
2406
2407static void
2408qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2409{
86196a8f 2410 u16 control_flags = LCF_COMMAND_LOGO;
ac280b67 2411 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
86196a8f
QT
2412
2413 if (sp->fcport->explicit_logout) {
2414 control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT;
2415 } else {
2416 control_flags |= LCF_IMPL_LOGO;
2417
2418 if (!sp->fcport->keep_nport_handle)
2419 control_flags |= LCF_FREE_NPORT;
2420 }
2421
2422 logio->control_flags = cpu_to_le16(control_flags);
ac280b67
AV
2423 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2424 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2425 logio->port_id[1] = sp->fcport->d_id.b.area;
2426 logio->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 2427 logio->vp_index = sp->vha->vp_idx;
ac280b67
AV
2428}
2429
2430static void
2431qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2432{
25ff6af1 2433 struct qla_hw_data *ha = sp->vha->hw;
ac280b67 2434
b963752f 2435 mbx->entry_type = MBX_IOCB_TYPE;
ac280b67
AV
2436 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2437 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2438 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
58e2753c 2439 cpu_to_le16(sp->fcport->loop_id) :
ac280b67
AV
2440 cpu_to_le16(sp->fcport->loop_id << 8);
2441 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2442 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2443 sp->fcport->d_id.b.al_pa);
25ff6af1 2444 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
ac280b67
AV
2445 /* Implicit: mbx->mbx10 = 0. */
2446}
2447
5ff1d584
AV
2448static void
2449qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2450{
2451 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2452 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2453 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
25ff6af1 2454 logio->vp_index = sp->vha->vp_idx;
5ff1d584
AV
2455}
2456
2457static void
2458qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2459{
25ff6af1 2460 struct qla_hw_data *ha = sp->vha->hw;
5ff1d584
AV
2461
2462 mbx->entry_type = MBX_IOCB_TYPE;
2463 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2464 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2465 if (HAS_EXTENDED_IDS(ha)) {
2466 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2467 mbx->mb10 = cpu_to_le16(BIT_0);
2468 } else {
2469 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2470 }
2471 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2472 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2473 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2474 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
25ff6af1 2475 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
5ff1d584
AV
2476}
2477
3822263e
MI
2478static void
2479qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2480{
2481 uint32_t flags;
9cb78c16 2482 uint64_t lun;
3822263e
MI
2483 struct fc_port *fcport = sp->fcport;
2484 scsi_qla_host_t *vha = fcport->vha;
2485 struct qla_hw_data *ha = vha->hw;
9ba56b95 2486 struct srb_iocb *iocb = &sp->u.iocb_cmd;
3822263e
MI
2487 struct req_que *req = vha->req;
2488
2489 flags = iocb->u.tmf.flags;
2490 lun = iocb->u.tmf.lun;
2491
2492 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2493 tsk->entry_count = 1;
c25eb70a 2494 tsk->handle = make_handle(req->id, tsk->handle);
3822263e
MI
2495 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2496 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2497 tsk->control_flags = cpu_to_le32(flags);
2498 tsk->port_id[0] = fcport->d_id.b.al_pa;
2499 tsk->port_id[1] = fcport->d_id.b.area;
2500 tsk->port_id[2] = fcport->d_id.b.domain;
c6d39e23 2501 tsk->vp_index = fcport->vha->vp_idx;
3822263e
MI
2502
2503 if (flags == TCF_LUN_RESET) {
2504 int_to_scsilun(lun, &tsk->lun);
2505 host_to_fcp_swap((uint8_t *)&tsk->lun,
2506 sizeof(tsk->lun));
2507 }
2508}
2509
12975426
BVA
2510void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
2511{
2512 timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
2513 sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
2514 sp->free = qla2x00_sp_free;
12975426
BVA
2515 if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
2516 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
3a4b6cc7 2517 sp->start_timer = 1;
12975426
BVA
2518}
2519
6c18a43e 2520static void qla2x00_els_dcmd_sp_free(srb_t *sp)
6eb54715 2521{
6eb54715
HM
2522 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2523
2524 kfree(sp->fcport);
2525
2526 if (elsio->u.els_logo.els_logo_pyld)
25ff6af1 2527 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
6eb54715
HM
2528 elsio->u.els_logo.els_logo_pyld,
2529 elsio->u.els_logo.els_logo_pyld_dma);
2530
2531 del_timer(&elsio->timer);
25ff6af1 2532 qla2x00_rel_sp(sp);
6eb54715
HM
2533}
2534
2535static void
2536qla2x00_els_dcmd_iocb_timeout(void *data)
2537{
25ff6af1 2538 srb_t *sp = data;
6eb54715 2539 fc_port_t *fcport = sp->fcport;
25ff6af1 2540 struct scsi_qla_host *vha = sp->vha;
25ff6af1 2541 struct srb_iocb *lio = &sp->u.iocb_cmd;
f57a0107
QT
2542 unsigned long flags = 0;
2543 int res, h;
6eb54715
HM
2544
2545 ql_dbg(ql_dbg_io, vha, 0x3069,
2546 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2547 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2548 fcport->d_id.b.al_pa);
2549
f57a0107
QT
2550 /* Abort the exchange */
2551 res = qla24xx_async_abort_cmd(sp, false);
2552 if (res) {
2553 ql_dbg(ql_dbg_io, vha, 0x3070,
2554 "mbx abort_command failed.\n");
2555 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2556 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2557 if (sp->qpair->req->outstanding_cmds[h] == sp) {
2558 sp->qpair->req->outstanding_cmds[h] = NULL;
2559 break;
2560 }
2561 }
2562 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2563 complete(&lio->u.els_logo.comp);
2564 } else {
2565 ql_dbg(ql_dbg_io, vha, 0x3071,
2566 "mbx abort_command success.\n");
2567 }
6eb54715
HM
2568}
2569
6c18a43e 2570static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
6eb54715 2571{
6eb54715
HM
2572 fc_port_t *fcport = sp->fcport;
2573 struct srb_iocb *lio = &sp->u.iocb_cmd;
25ff6af1 2574 struct scsi_qla_host *vha = sp->vha;
6eb54715
HM
2575
2576 ql_dbg(ql_dbg_io, vha, 0x3072,
2577 "%s hdl=%x, portid=%02x%02x%02x done\n",
2578 sp->name, sp->handle, fcport->d_id.b.domain,
2579 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2580
2581 complete(&lio->u.els_logo.comp);
2582}
2583
2584int
2585qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2586 port_id_t remote_did)
2587{
2588 srb_t *sp;
2589 fc_port_t *fcport = NULL;
2590 struct srb_iocb *elsio = NULL;
2591 struct qla_hw_data *ha = vha->hw;
2592 struct els_logo_payload logo_pyld;
2593 int rval = QLA_SUCCESS;
2594
2595 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2596 if (!fcport) {
2597 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2598 return -ENOMEM;
2599 }
2600
2601 /* Alloc SRB structure */
2602 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2603 if (!sp) {
2604 kfree(fcport);
2605 ql_log(ql_log_info, vha, 0x70e6,
2606 "SRB allocation failed\n");
2607 return -ENOMEM;
2608 }
2609
2610 elsio = &sp->u.iocb_cmd;
2611 fcport->loop_id = 0xFFFF;
2612 fcport->d_id.b.domain = remote_did.b.domain;
2613 fcport->d_id.b.area = remote_did.b.area;
2614 fcport->d_id.b.al_pa = remote_did.b.al_pa;
2615
2616 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2617 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2618
2619 sp->type = SRB_ELS_DCMD;
2620 sp->name = "ELS_DCMD";
2621 sp->fcport = fcport;
6eb54715 2622 elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
e74e7d95 2623 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
8777e431 2624 init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
6eb54715
HM
2625 sp->done = qla2x00_els_dcmd_sp_done;
2626 sp->free = qla2x00_els_dcmd_sp_free;
2627
2628 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2629 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2630 GFP_KERNEL);
2631
2632 if (!elsio->u.els_logo.els_logo_pyld) {
25ff6af1 2633 sp->free(sp);
6eb54715
HM
2634 return QLA_FUNCTION_FAILED;
2635 }
2636
2637 memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2638
2639 elsio->u.els_logo.els_cmd = els_opcode;
2640 logo_pyld.opcode = els_opcode;
2641 logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2642 logo_pyld.s_id[1] = vha->d_id.b.area;
2643 logo_pyld.s_id[2] = vha->d_id.b.domain;
2644 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2645 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2646
2647 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2648 sizeof(struct els_logo_payload));
9545767d
RB
2649 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:");
2650 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a,
2651 elsio->u.els_logo.els_logo_pyld,
2652 sizeof(*elsio->u.els_logo.els_logo_pyld));
6eb54715
HM
2653
2654 rval = qla2x00_start_sp(sp);
2655 if (rval != QLA_SUCCESS) {
25ff6af1 2656 sp->free(sp);
6eb54715
HM
2657 return QLA_FUNCTION_FAILED;
2658 }
2659
2660 ql_dbg(ql_dbg_io, vha, 0x3074,
2661 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2662 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2663 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2664
2665 wait_for_completion(&elsio->u.els_logo.comp);
2666
25ff6af1 2667 sp->free(sp);
6eb54715
HM
2668 return rval;
2669}
2670
2671static void
2672qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2673{
25ff6af1 2674 scsi_qla_host_t *vha = sp->vha;
6eb54715
HM
2675 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2676
2677 els_iocb->entry_type = ELS_IOCB_TYPE;
2678 els_iocb->entry_count = 1;
2679 els_iocb->sys_define = 0;
2680 els_iocb->entry_status = 0;
2681 els_iocb->handle = sp->handle;
2682 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
7ffa5b93 2683 els_iocb->tx_dsd_count = cpu_to_le16(1);
6eb54715
HM
2684 els_iocb->vp_index = vha->vp_idx;
2685 els_iocb->sof_type = EST_SOFI3;
2686 els_iocb->rx_dsd_count = 0;
2687 els_iocb->opcode = elsio->u.els_logo.els_cmd;
2688
09e382bc
JC
2689 els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
2690 els_iocb->d_id[1] = sp->fcport->d_id.b.area;
2691 els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
0aabb6b6
QT
2692 /* For SID the byte order is different than DID */
2693 els_iocb->s_id[1] = vha->d_id.b.al_pa;
2694 els_iocb->s_id[2] = vha->d_id.b.area;
2695 els_iocb->s_id[0] = vha->d_id.b.domain;
6eb54715 2696
edd05de1 2697 if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
0f8243e6 2698 els_iocb->control_flags = 0;
8777e431 2699 els_iocb->tx_byte_count = els_iocb->tx_len =
9933c050 2700 cpu_to_le32(sizeof(struct els_plogi_payload));
d4556a49
BVA
2701 put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
2702 &els_iocb->tx_address);
7ffa5b93 2703 els_iocb->rx_dsd_count = cpu_to_le16(1);
8777e431 2704 els_iocb->rx_byte_count = els_iocb->rx_len =
9933c050 2705 cpu_to_le32(sizeof(struct els_plogi_payload));
d4556a49
BVA
2706 put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
2707 &els_iocb->rx_address);
8777e431 2708
edd05de1
DG
2709 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2710 "PLOGI ELS IOCB:\n");
2711 ql_dump_buffer(ql_log_info, vha, 0x0109,
0334cdea
RB
2712 (uint8_t *)els_iocb,
2713 sizeof(*els_iocb));
edd05de1 2714 } else {
7ffa5b93 2715 els_iocb->control_flags = cpu_to_le16(1 << 13);
9933c050
BVA
2716 els_iocb->tx_byte_count =
2717 cpu_to_le32(sizeof(struct els_logo_payload));
d4556a49
BVA
2718 put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
2719 &els_iocb->tx_address);
edd05de1 2720 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
6eb54715 2721
edd05de1 2722 els_iocb->rx_byte_count = 0;
d4556a49 2723 els_iocb->rx_address = 0;
edd05de1 2724 els_iocb->rx_len = 0;
9545767d
RB
2725 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076,
2726 "LOGO ELS IOCB:");
2727 ql_dump_buffer(ql_log_info, vha, 0x010b,
2728 els_iocb,
2729 sizeof(*els_iocb));
edd05de1 2730 }
6eb54715 2731
25ff6af1 2732 sp->vha->qla_stats.control_requests++;
6eb54715
HM
2733}
2734
edd05de1
DG
2735static void
2736qla2x00_els_dcmd2_iocb_timeout(void *data)
2737{
2738 srb_t *sp = data;
2739 fc_port_t *fcport = sp->fcport;
2740 struct scsi_qla_host *vha = sp->vha;
edd05de1 2741 unsigned long flags = 0;
f57a0107 2742 int res, h;
edd05de1
DG
2743
2744 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2745 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2746 sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2747
2748 /* Abort the exchange */
f57a0107 2749 res = qla24xx_async_abort_cmd(sp, false);
edd05de1
DG
2750 ql_dbg(ql_dbg_io, vha, 0x3070,
2751 "mbx abort_command %s\n",
2752 (res == QLA_SUCCESS) ? "successful" : "failed");
f57a0107
QT
2753 if (res) {
2754 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2755 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2756 if (sp->qpair->req->outstanding_cmds[h] == sp) {
2757 sp->qpair->req->outstanding_cmds[h] = NULL;
2758 break;
2759 }
2760 }
2761 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2762 sp->done(sp, QLA_FUNCTION_TIMEOUT);
2763 }
edd05de1
DG
2764}
2765
c6e58160
BVA
2766void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
2767{
2768 if (els_plogi->els_plogi_pyld)
2769 dma_free_coherent(&vha->hw->pdev->dev,
2770 els_plogi->tx_size,
2771 els_plogi->els_plogi_pyld,
2772 els_plogi->els_plogi_pyld_dma);
2773
2774 if (els_plogi->els_resp_pyld)
2775 dma_free_coherent(&vha->hw->pdev->dev,
2776 els_plogi->rx_size,
2777 els_plogi->els_resp_pyld,
2778 els_plogi->els_resp_pyld_dma);
2779}
2780
6c18a43e 2781static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
edd05de1 2782{
edd05de1
DG
2783 fc_port_t *fcport = sp->fcport;
2784 struct srb_iocb *lio = &sp->u.iocb_cmd;
2785 struct scsi_qla_host *vha = sp->vha;
8777e431
QT
2786 struct event_arg ea;
2787 struct qla_work_evt *e;
c76ae845
QT
2788 struct fc_port *conflict_fcport;
2789 port_id_t cid; /* conflict Nport id */
7ffa5b93 2790 const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
c76ae845 2791 u16 lid;
8777e431
QT
2792
2793 ql_dbg(ql_dbg_disc, vha, 0x3072,
2794 "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2795 sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
edd05de1 2796
8777e431
QT
2797 fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2798 del_timer(&sp->u.iocb_cmd.timer);
edd05de1 2799
8777e431
QT
2800 if (sp->flags & SRB_WAKEUP_ON_COMP)
2801 complete(&lio->u.els_plogi.comp);
2802 else {
7ffa5b93 2803 switch (le32_to_cpu(fw_status[0])) {
c76ae845
QT
2804 case CS_DATA_UNDERRUN:
2805 case CS_COMPLETE:
8777e431
QT
2806 memset(&ea, 0, sizeof(ea));
2807 ea.fcport = fcport;
65e92009
AE
2808 ea.rc = res;
2809 qla_handle_els_plogi_done(vha, &ea);
c76ae845 2810 break;
983f1276 2811
c76ae845 2812 case CS_IOCB_ERROR:
7ffa5b93 2813 switch (le32_to_cpu(fw_status[1])) {
c76ae845 2814 case LSC_SCODE_PORTID_USED:
7ffa5b93 2815 lid = le32_to_cpu(fw_status[2]) & 0xffff;
c76ae845
QT
2816 qlt_find_sess_invalidate_other(vha,
2817 wwn_to_u64(fcport->port_name),
2818 fcport->d_id, lid, &conflict_fcport);
2819 if (conflict_fcport) {
2820 /*
2821 * Another fcport shares the same
2822 * loop_id & nport id; conflict
2823 * fcport needs to finish cleanup
2824 * before this fcport can proceed
2825 * to login.
2826 */
2827 conflict_fcport->conflict = fcport;
2828 fcport->login_pause = 1;
2829 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2830 "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n",
2831 __func__, __LINE__,
2832 fcport->port_name,
2833 fcport->d_id.b24, lid);
2834 } else {
2835 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2836 "%s %d %8phC pid %06x inuse with lid %#x sched del\n",
2837 __func__, __LINE__,
2838 fcport->port_name,
2839 fcport->d_id.b24, lid);
2840 qla2x00_clear_loop_id(fcport);
2841 set_bit(lid, vha->hw->loop_id_map);
2842 fcport->loop_id = lid;
2843 fcport->keep_nport_handle = 0;
2844 qlt_schedule_sess_for_deletion(fcport);
2845 }
2846 break;
2847
2848 case LSC_SCODE_NPORT_USED:
7ffa5b93
BVA
2849 cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16)
2850 & 0xff;
2851 cid.b.area = (le32_to_cpu(fw_status[2]) >> 8)
2852 & 0xff;
2853 cid.b.al_pa = le32_to_cpu(fw_status[2]) & 0xff;
c76ae845
QT
2854 cid.b.rsvd_1 = 0;
2855
2856 ql_dbg(ql_dbg_disc, vha, 0x20ec,
2857 "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2858 __func__, __LINE__, fcport->port_name,
2859 fcport->loop_id, cid.b24);
2860 set_bit(fcport->loop_id,
2861 vha->hw->loop_id_map);
2862 fcport->loop_id = FC_NO_LOOP_ID;
2863 qla24xx_post_gnl_work(vha, fcport);
2864 break;
2865
2866 case LSC_SCODE_NOXCB:
2867 vha->hw->exch_starvation++;
2868 if (vha->hw->exch_starvation > 5) {
2869 ql_log(ql_log_warn, vha, 0xd046,
2870 "Exchange starvation. Resetting RISC\n");
2871 vha->hw->exch_starvation = 0;
2872 set_bit(ISP_ABORT_NEEDED,
2873 &vha->dpc_flags);
2874 qla2xxx_wake_dpc(vha);
2875 }
df561f66 2876 fallthrough;
c76ae845
QT
2877 default:
2878 ql_dbg(ql_dbg_disc, vha, 0x20eb,
2879 "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
2880 __func__, sp->fcport->port_name,
2881 fw_status[0], fw_status[1], fw_status[2]);
2882
2883 fcport->flags &= ~FCF_ASYNC_SENT;
27258a57
SS
2884 qla2x00_set_fcport_disc_state(fcport,
2885 DSC_LOGIN_FAILED);
c76ae845
QT
2886 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2887 break;
2888 }
2889 break;
2890
2891 default:
2892 ql_dbg(ql_dbg_disc, vha, 0x20eb,
2893 "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n",
2894 __func__, sp->fcport->port_name,
2895 fw_status[0], fw_status[1], fw_status[2]);
2896
2897 sp->fcport->flags &= ~FCF_ASYNC_SENT;
27258a57 2898 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED);
c76ae845
QT
2899 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2900 break;
8777e431 2901 }
edd05de1 2902
8777e431
QT
2903 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2904 if (!e) {
2905 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2906
c6e58160 2907 qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
8777e431 2908 sp->free(sp);
e9f7be0c 2909 return;
8777e431
QT
2910 }
2911 e->u.iosb.sp = sp;
2912 qla2x00_post_work(vha, e);
2913 }
edd05de1
DG
2914}
2915
2916int
2917qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
8777e431 2918 fc_port_t *fcport, bool wait)
edd05de1
DG
2919{
2920 srb_t *sp;
2921 struct srb_iocb *elsio = NULL;
2922 struct qla_hw_data *ha = vha->hw;
2923 int rval = QLA_SUCCESS;
2924 void *ptr, *resp_ptr;
edd05de1
DG
2925
2926 /* Alloc SRB structure */
2927 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2928 if (!sp) {
2929 ql_log(ql_log_info, vha, 0x70e6,
2930 "SRB allocation failed\n");
17e64648 2931 fcport->flags &= ~FCF_ASYNC_ACTIVE;
edd05de1
DG
2932 return -ENOMEM;
2933 }
2934
983f1276 2935 fcport->flags |= FCF_ASYNC_SENT;
27258a57 2936 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
edd05de1 2937 elsio = &sp->u.iocb_cmd;
edd05de1
DG
2938 ql_dbg(ql_dbg_io, vha, 0x3073,
2939 "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2940
2941 sp->type = SRB_ELS_DCMD;
2942 sp->name = "ELS_DCMD";
2943 sp->fcport = fcport;
e74e7d95 2944
edd05de1 2945 elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
8777e431
QT
2946 if (wait)
2947 sp->flags = SRB_WAKEUP_ON_COMP;
2948
2949 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
e74e7d95 2950
edd05de1 2951 sp->done = qla2x00_els_dcmd2_sp_done;
8777e431 2952 elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
edd05de1
DG
2953
2954 ptr = elsio->u.els_plogi.els_plogi_pyld =
419ae5fe 2955 dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size,
edd05de1 2956 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
edd05de1
DG
2957
2958 if (!elsio->u.els_plogi.els_plogi_pyld) {
2959 rval = QLA_FUNCTION_FAILED;
2960 goto out;
2961 }
2962
2963 resp_ptr = elsio->u.els_plogi.els_resp_pyld =
419ae5fe 2964 dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size,
edd05de1
DG
2965 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2966
2967 if (!elsio->u.els_plogi.els_resp_pyld) {
2968 rval = QLA_FUNCTION_FAILED;
2969 goto out;
2970 }
2971
2972 ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2973
2974 memset(ptr, 0, sizeof(struct els_plogi_payload));
2975 memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
8777e431
QT
2976 memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
2977 &ha->plogi_els_payld.data,
2978 sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
2979
edd05de1
DG
2980 elsio->u.els_plogi.els_cmd = els_opcode;
2981 elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
edd05de1 2982
8777e431
QT
2983 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2984 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
0334cdea
RB
2985 (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
2986 sizeof(*elsio->u.els_plogi.els_plogi_pyld));
edd05de1 2987
419ae5fe 2988 init_completion(&elsio->u.els_plogi.comp);
edd05de1
DG
2989 rval = qla2x00_start_sp(sp);
2990 if (rval != QLA_SUCCESS) {
2991 rval = QLA_FUNCTION_FAILED;
8777e431
QT
2992 } else {
2993 ql_dbg(ql_dbg_disc, vha, 0x3074,
2994 "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
2995 sp->name, sp->handle, fcport->loop_id,
2996 fcport->d_id.b24, vha->d_id.b24);
edd05de1
DG
2997 }
2998
8777e431
QT
2999 if (wait) {
3000 wait_for_completion(&elsio->u.els_plogi.comp);
edd05de1 3001
8777e431
QT
3002 if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
3003 rval = QLA_FUNCTION_FAILED;
3004 } else {
3005 goto done;
3006 }
edd05de1
DG
3007
3008out:
17e64648 3009 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
c6e58160 3010 qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
edd05de1 3011 sp->free(sp);
8777e431 3012done:
edd05de1
DG
3013 return rval;
3014}
3015
9a069e19
GM
3016static void
3017qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
3018{
75cc8cfc 3019 struct bsg_job *bsg_job = sp->u.bsg_job;
01e0e15c 3020 struct fc_bsg_request *bsg_request = bsg_job->request;
9a069e19
GM
3021
3022 els_iocb->entry_type = ELS_IOCB_TYPE;
3023 els_iocb->entry_count = 1;
3024 els_iocb->sys_define = 0;
3025 els_iocb->entry_status = 0;
3026 els_iocb->handle = sp->handle;
7ffa5b93 3027 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
ad950360 3028 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
25ff6af1 3029 els_iocb->vp_index = sp->vha->vp_idx;
9a069e19 3030 els_iocb->sof_type = EST_SOFI3;
ad950360 3031 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
9a069e19 3032
4916392b 3033 els_iocb->opcode =
9ba56b95 3034 sp->type == SRB_ELS_CMD_RPT ?
01e0e15c
JT
3035 bsg_request->rqst_data.r_els.els_code :
3036 bsg_request->rqst_data.h_els.command_code;
09e382bc
JC
3037 els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
3038 els_iocb->d_id[1] = sp->fcport->d_id.b.area;
3039 els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
9a069e19
GM
3040 els_iocb->control_flags = 0;
3041 els_iocb->rx_byte_count =
3042 cpu_to_le32(bsg_job->reply_payload.payload_len);
3043 els_iocb->tx_byte_count =
3044 cpu_to_le32(bsg_job->request_payload.payload_len);
3045
d4556a49
BVA
3046 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3047 &els_iocb->tx_address);
9a069e19
GM
3048 els_iocb->tx_len = cpu_to_le32(sg_dma_len
3049 (bsg_job->request_payload.sg_list));
3050
d4556a49
BVA
3051 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3052 &els_iocb->rx_address);
9a069e19
GM
3053 els_iocb->rx_len = cpu_to_le32(sg_dma_len
3054 (bsg_job->reply_payload.sg_list));
fabbb8df 3055
25ff6af1 3056 sp->vha->qla_stats.control_requests++;
9a069e19
GM
3057}
3058
9bc4f4fb
HZ
3059static void
3060qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
3061{
3062 uint16_t avail_dsds;
15b7a68c 3063 struct dsd64 *cur_dsd;
9bc4f4fb
HZ
3064 struct scatterlist *sg;
3065 int index;
3066 uint16_t tot_dsds;
25ff6af1 3067 scsi_qla_host_t *vha = sp->vha;
9bc4f4fb 3068 struct qla_hw_data *ha = vha->hw;
75cc8cfc 3069 struct bsg_job *bsg_job = sp->u.bsg_job;
9bc4f4fb
HZ
3070 int entry_count = 1;
3071
3072 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
3073 ct_iocb->entry_type = CT_IOCB_TYPE;
3074 ct_iocb->entry_status = 0;
3075 ct_iocb->handle1 = sp->handle;
3076 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
ad950360
BVA
3077 ct_iocb->status = cpu_to_le16(0);
3078 ct_iocb->control_flags = cpu_to_le16(0);
9bc4f4fb
HZ
3079 ct_iocb->timeout = 0;
3080 ct_iocb->cmd_dsd_count =
ad950360 3081 cpu_to_le16(bsg_job->request_payload.sg_cnt);
9bc4f4fb 3082 ct_iocb->total_dsd_count =
ad950360 3083 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
9bc4f4fb
HZ
3084 ct_iocb->req_bytecount =
3085 cpu_to_le32(bsg_job->request_payload.payload_len);
3086 ct_iocb->rsp_bytecount =
3087 cpu_to_le32(bsg_job->reply_payload.payload_len);
3088
d4556a49
BVA
3089 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3090 &ct_iocb->req_dsd.address);
15b7a68c 3091 ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
9bc4f4fb 3092
d4556a49
BVA
3093 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3094 &ct_iocb->rsp_dsd.address);
15b7a68c 3095 ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
9bc4f4fb
HZ
3096
3097 avail_dsds = 1;
15b7a68c 3098 cur_dsd = &ct_iocb->rsp_dsd;
9bc4f4fb
HZ
3099 index = 0;
3100 tot_dsds = bsg_job->reply_payload.sg_cnt;
3101
3102 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
9bc4f4fb
HZ
3103 cont_a64_entry_t *cont_pkt;
3104
3105 /* Allocate additional continuation packets? */
3106 if (avail_dsds == 0) {
3107 /*
3108 * Five DSDs are available in the Cont.
3109 * Type 1 IOCB.
3110 */
0d2aa38e
GM
3111 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3112 vha->hw->req_q_map[0]);
15b7a68c 3113 cur_dsd = cont_pkt->dsd;
9bc4f4fb 3114 avail_dsds = 5;
9bc4f4fb
HZ
3115 entry_count++;
3116 }
3117
15b7a68c 3118 append_dsd64(&cur_dsd, sg);
9bc4f4fb
HZ
3119 avail_dsds--;
3120 }
3121 ct_iocb->entry_count = entry_count;
fabbb8df 3122
25ff6af1 3123 sp->vha->qla_stats.control_requests++;
9bc4f4fb
HZ
3124}
3125
9a069e19
GM
3126static void
3127qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3128{
3129 uint16_t avail_dsds;
15b7a68c 3130 struct dsd64 *cur_dsd;
9a069e19
GM
3131 struct scatterlist *sg;
3132 int index;
ce0779c7 3133 uint16_t cmd_dsds, rsp_dsds;
25ff6af1 3134 scsi_qla_host_t *vha = sp->vha;
0d2aa38e 3135 struct qla_hw_data *ha = vha->hw;
75cc8cfc 3136 struct bsg_job *bsg_job = sp->u.bsg_job;
9a069e19 3137 int entry_count = 1;
ce0779c7 3138 cont_a64_entry_t *cont_pkt = NULL;
9a069e19
GM
3139
3140 ct_iocb->entry_type = CT_IOCB_TYPE;
3141 ct_iocb->entry_status = 0;
3142 ct_iocb->sys_define = 0;
3143 ct_iocb->handle = sp->handle;
3144
3145 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
25ff6af1 3146 ct_iocb->vp_index = sp->vha->vp_idx;
ad950360 3147 ct_iocb->comp_status = cpu_to_le16(0);
9a069e19 3148
ce0779c7
GM
3149 cmd_dsds = bsg_job->request_payload.sg_cnt;
3150 rsp_dsds = bsg_job->reply_payload.sg_cnt;
3151
3152 ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
9a069e19 3153 ct_iocb->timeout = 0;
ce0779c7 3154 ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
9a069e19
GM
3155 ct_iocb->cmd_byte_count =
3156 cpu_to_le32(bsg_job->request_payload.payload_len);
9a069e19 3157
ce0779c7 3158 avail_dsds = 2;
15b7a68c 3159 cur_dsd = ct_iocb->dsd;
9a069e19 3160 index = 0;
9a069e19 3161
ce0779c7 3162 for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
ce0779c7
GM
3163 /* Allocate additional continuation packets? */
3164 if (avail_dsds == 0) {
3165 /*
3166 * Five DSDs are available in the Cont.
3167 * Type 1 IOCB.
3168 */
3169 cont_pkt = qla2x00_prep_cont_type1_iocb(
3170 vha, ha->req_q_map[0]);
15b7a68c 3171 cur_dsd = cont_pkt->dsd;
ce0779c7
GM
3172 avail_dsds = 5;
3173 entry_count++;
3174 }
3175
15b7a68c 3176 append_dsd64(&cur_dsd, sg);
ce0779c7
GM
3177 avail_dsds--;
3178 }
3179
3180 index = 0;
3181
3182 for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
9a069e19
GM
3183 /* Allocate additional continuation packets? */
3184 if (avail_dsds == 0) {
3185 /*
3186 * Five DSDs are available in the Cont.
3187 * Type 1 IOCB.
3188 */
0d2aa38e
GM
3189 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3190 ha->req_q_map[0]);
15b7a68c 3191 cur_dsd = cont_pkt->dsd;
9a069e19 3192 avail_dsds = 5;
9a069e19
GM
3193 entry_count++;
3194 }
3195
15b7a68c 3196 append_dsd64(&cur_dsd, sg);
9a069e19
GM
3197 avail_dsds--;
3198 }
3199 ct_iocb->entry_count = entry_count;
3200}
3201
5162cf0c
GM
3202/*
3203 * qla82xx_start_scsi() - Send a SCSI command to the ISP
3204 * @sp: command to send to the ISP
3205 *
3206 * Returns non-zero if a failure occurred, else zero.
3207 */
3208int
3209qla82xx_start_scsi(srb_t *sp)
3210{
52c82823 3211 int nseg;
5162cf0c
GM
3212 unsigned long flags;
3213 struct scsi_cmnd *cmd;
3214 uint32_t *clr_ptr;
5162cf0c
GM
3215 uint32_t handle;
3216 uint16_t cnt;
3217 uint16_t req_cnt;
3218 uint16_t tot_dsds;
3219 struct device_reg_82xx __iomem *reg;
3220 uint32_t dbval;
7ffa5b93 3221 __be32 *fcp_dl;
5162cf0c
GM
3222 uint8_t additional_cdb_len;
3223 struct ct6_dsd *ctx;
25ff6af1 3224 struct scsi_qla_host *vha = sp->vha;
5162cf0c
GM
3225 struct qla_hw_data *ha = vha->hw;
3226 struct req_que *req = NULL;
3227 struct rsp_que *rsp = NULL;
5162cf0c
GM
3228
3229 /* Setup device pointers. */
5162cf0c 3230 reg = &ha->iobase->isp82;
9ba56b95 3231 cmd = GET_CMD_SP(sp);
5162cf0c
GM
3232 req = vha->req;
3233 rsp = ha->rsp_q_map[0];
3234
3235 /* So we know we haven't pci_map'ed anything yet */
3236 tot_dsds = 0;
3237
3238 dbval = 0x04 | (ha->portnum << 5);
3239
3240 /* Send marker if required */
3241 if (vha->marker_needed != 0) {
9eb9c6dc
QT
3242 if (qla2x00_marker(vha, ha->base_qpair,
3243 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
5162cf0c
GM
3244 ql_log(ql_log_warn, vha, 0x300c,
3245 "qla2x00_marker failed for cmd=%p.\n", cmd);
3246 return QLA_FUNCTION_FAILED;
3247 }
3248 vha->marker_needed = 0;
3249 }
3250
3251 /* Acquire ring specific lock */
3252 spin_lock_irqsave(&ha->hardware_lock, flags);
3253
bcc85657
BVA
3254 handle = qla2xxx_get_next_handle(req);
3255 if (handle == 0)
5162cf0c
GM
3256 goto queuing_error;
3257
3258 /* Map the sg table so we have an accurate count of sg entries needed */
3259 if (scsi_sg_count(cmd)) {
3260 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3261 scsi_sg_count(cmd), cmd->sc_data_direction);
3262 if (unlikely(!nseg))
3263 goto queuing_error;
3264 } else
3265 nseg = 0;
3266
3267 tot_dsds = nseg;
3268
3269 if (tot_dsds > ql2xshiftctondsd) {
3270 struct cmd_type_6 *cmd_pkt;
3271 uint16_t more_dsd_lists = 0;
3272 struct dsd_dma *dsd_ptr;
3273 uint16_t i;
3274
3275 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3276 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3277 ql_dbg(ql_dbg_io, vha, 0x300d,
3278 "Num of DSD list %d is than %d for cmd=%p.\n",
3279 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3280 cmd);
3281 goto queuing_error;
3282 }
3283
3284 if (more_dsd_lists <= ha->gbl_dsd_avail)
3285 goto sufficient_dsds;
3286 else
3287 more_dsd_lists -= ha->gbl_dsd_avail;
3288
3289 for (i = 0; i < more_dsd_lists; i++) {
3290 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3291 if (!dsd_ptr) {
3292 ql_log(ql_log_fatal, vha, 0x300e,
3293 "Failed to allocate memory for dsd_dma "
3294 "for cmd=%p.\n", cmd);
3295 goto queuing_error;
3296 }
3297
3298 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3299 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3300 if (!dsd_ptr->dsd_addr) {
3301 kfree(dsd_ptr);
3302 ql_log(ql_log_fatal, vha, 0x300f,
3303 "Failed to allocate memory for dsd_addr "
3304 "for cmd=%p.\n", cmd);
3305 goto queuing_error;
3306 }
3307 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3308 ha->gbl_dsd_avail++;
3309 }
3310
3311sufficient_dsds:
3312 req_cnt = 1;
3313
3314 if (req->cnt < (req_cnt + 2)) {
04474d3a 3315 cnt = (uint16_t)rd_reg_dword_relaxed(
5162cf0c
GM
3316 &reg->req_q_out[0]);
3317 if (req->ring_index < cnt)
3318 req->cnt = cnt - req->ring_index;
3319 else
3320 req->cnt = req->length -
3321 (req->ring_index - cnt);
a6eb3c9f
CL
3322 if (req->cnt < (req_cnt + 2))
3323 goto queuing_error;
5162cf0c
GM
3324 }
3325
5ec9f904 3326 ctx = sp->u.scmd.ct6_ctx =
9ba56b95
GM
3327 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3328 if (!ctx) {
5162cf0c
GM
3329 ql_log(ql_log_fatal, vha, 0x3010,
3330 "Failed to allocate ctx for cmd=%p.\n", cmd);
3331 goto queuing_error;
3332 }
9ba56b95 3333
5162cf0c 3334 memset(ctx, 0, sizeof(struct ct6_dsd));
501017f6 3335 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
5162cf0c
GM
3336 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3337 if (!ctx->fcp_cmnd) {
3338 ql_log(ql_log_fatal, vha, 0x3011,
3339 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
841f97bf 3340 goto queuing_error;
5162cf0c
GM
3341 }
3342
3343 /* Initialize the DSD list and dma handle */
3344 INIT_LIST_HEAD(&ctx->dsd_list);
3345 ctx->dsd_use_cnt = 0;
3346
3347 if (cmd->cmd_len > 16) {
3348 additional_cdb_len = cmd->cmd_len - 16;
3349 if ((cmd->cmd_len % 4) != 0) {
3350 /* SCSI command bigger than 16 bytes must be
3351 * multiple of 4
3352 */
3353 ql_log(ql_log_warn, vha, 0x3012,
3354 "scsi cmd len %d not multiple of 4 "
3355 "for cmd=%p.\n", cmd->cmd_len, cmd);
3356 goto queuing_error_fcp_cmnd;
3357 }
3358 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3359 } else {
3360 additional_cdb_len = 0;
3361 ctx->fcp_cmnd_len = 12 + 16 + 4;
3362 }
3363
3364 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
c25eb70a 3365 cmd_pkt->handle = make_handle(req->id, handle);
5162cf0c
GM
3366
3367 /* Zero out remaining portion of packet. */
3368 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
3369 clr_ptr = (uint32_t *)cmd_pkt + 2;
3370 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3371 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3372
3373 /* Set NPORT-ID and LUN number*/
3374 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3375 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3376 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3377 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 3378 cmd_pkt->vp_index = sp->vha->vp_idx;
5162cf0c
GM
3379
3380 /* Build IOCB segments */
3381 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3382 goto queuing_error_fcp_cmnd;
3383
9ba56b95 3384 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
5162cf0c
GM
3385 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3386
3387 /* build FCP_CMND IU */
9ba56b95 3388 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
5162cf0c
GM
3389 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3390
3391 if (cmd->sc_data_direction == DMA_TO_DEVICE)
3392 ctx->fcp_cmnd->additional_cdb_len |= 1;
3393 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3394 ctx->fcp_cmnd->additional_cdb_len |= 2;
3395
a00f6296
SK
3396 /* Populate the FCP_PRIO. */
3397 if (ha->flags.fcp_prio_enabled)
3398 ctx->fcp_cmnd->task_attribute |=
3399 sp->fcport->fcp_prio << 3;
3400
5162cf0c
GM
3401 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3402
7ffa5b93 3403 fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
5162cf0c
GM
3404 additional_cdb_len);
3405 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3406
3407 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
d4556a49
BVA
3408 put_unaligned_le64(ctx->fcp_cmnd_dma,
3409 &cmd_pkt->fcp_cmnd_dseg_address);
5162cf0c
GM
3410
3411 sp->flags |= SRB_FCP_CMND_DMA_VALID;
3412 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3413 /* Set total data segment count. */
3414 cmd_pkt->entry_count = (uint8_t)req_cnt;
3415 /* Specify response queue number where
3416 * completion should happen
3417 */
3418 cmd_pkt->entry_status = (uint8_t) rsp->id;
3419 } else {
3420 struct cmd_type_7 *cmd_pkt;
bd432bb5 3421
5162cf0c
GM
3422 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3423 if (req->cnt < (req_cnt + 2)) {
04474d3a 3424 cnt = (uint16_t)rd_reg_dword_relaxed(
5162cf0c
GM
3425 &reg->req_q_out[0]);
3426 if (req->ring_index < cnt)
3427 req->cnt = cnt - req->ring_index;
3428 else
3429 req->cnt = req->length -
3430 (req->ring_index - cnt);
3431 }
3432 if (req->cnt < (req_cnt + 2))
3433 goto queuing_error;
3434
3435 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
c25eb70a 3436 cmd_pkt->handle = make_handle(req->id, handle);
5162cf0c
GM
3437
3438 /* Zero out remaining portion of packet. */
3439 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3440 clr_ptr = (uint32_t *)cmd_pkt + 2;
3441 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3442 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3443
3444 /* Set NPORT-ID and LUN number*/
3445 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3446 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3447 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3448 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 3449 cmd_pkt->vp_index = sp->vha->vp_idx;
5162cf0c 3450
9ba56b95 3451 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
5162cf0c 3452 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
9ba56b95 3453 sizeof(cmd_pkt->lun));
5162cf0c 3454
a00f6296
SK
3455 /* Populate the FCP_PRIO. */
3456 if (ha->flags.fcp_prio_enabled)
3457 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3458
5162cf0c
GM
3459 /* Load SCSI command packet. */
3460 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3461 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3462
3463 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3464
3465 /* Build IOCB segments */
d7459527 3466 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
5162cf0c
GM
3467
3468 /* Set total data segment count. */
3469 cmd_pkt->entry_count = (uint8_t)req_cnt;
3470 /* Specify response queue number where
3471 * completion should happen.
3472 */
3473 cmd_pkt->entry_status = (uint8_t) rsp->id;
3474
3475 }
3476 /* Build command packet. */
3477 req->current_outstanding_cmd = handle;
3478 req->outstanding_cmds[handle] = sp;
3479 sp->handle = handle;
9ba56b95 3480 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
5162cf0c
GM
3481 req->cnt -= req_cnt;
3482 wmb();
3483
3484 /* Adjust ring index. */
3485 req->ring_index++;
3486 if (req->ring_index == req->length) {
3487 req->ring_index = 0;
3488 req->ring_ptr = req->ring;
3489 } else
3490 req->ring_ptr++;
3491
3492 sp->flags |= SRB_DMA_VALID;
3493
3494 /* Set chip new ring index. */
3495 /* write, read and verify logic */
3496 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3497 if (ql2xdbwr)
8dfa4b5a 3498 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
5162cf0c 3499 else {
04474d3a 3500 wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
5162cf0c 3501 wmb();
04474d3a
BVA
3502 while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
3503 wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
5162cf0c
GM
3504 wmb();
3505 }
3506 }
3507
3508 /* Manage unprocessed RIO/ZIO commands in response queue. */
3509 if (vha->flags.process_response_queue &&
3510 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3511 qla24xx_process_response_queue(vha, rsp);
3512
3513 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3514 return QLA_SUCCESS;
3515
3516queuing_error_fcp_cmnd:
3517 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3518queuing_error:
3519 if (tot_dsds)
3520 scsi_dma_unmap(cmd);
3521
5ec9f904
BVA
3522 if (sp->u.scmd.crc_ctx) {
3523 mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
3524 sp->u.scmd.crc_ctx = NULL;
5162cf0c
GM
3525 }
3526 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3527
3528 return QLA_FUNCTION_FAILED;
3529}
3530
6d78e557 3531static void
4440e46d
AB
3532qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3533{
3534 struct srb_iocb *aio = &sp->u.iocb_cmd;
25ff6af1 3535 scsi_qla_host_t *vha = sp->vha;
49cecca7 3536 struct req_que *req = sp->qpair->req;
4440e46d
AB
3537
3538 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3539 abt_iocb->entry_type = ABORT_IOCB_TYPE;
3540 abt_iocb->entry_count = 1;
7ffa5b93 3541 abt_iocb->handle = make_handle(req->id, sp->handle);
49cecca7
QT
3542 if (sp->fcport) {
3543 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3544 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3545 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3546 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3547 }
4440e46d 3548 abt_iocb->handle_to_abort =
7ffa5b93
BVA
3549 make_handle(le16_to_cpu(aio->u.abt.req_que_no),
3550 aio->u.abt.cmd_hndl);
4440e46d 3551 abt_iocb->vp_index = vha->vp_idx;
7ffa5b93 3552 abt_iocb->req_que_no = aio->u.abt.req_que_no;
4440e46d
AB
3553 /* Send the command to the firmware */
3554 wmb();
3555}
3556
726b8548
QT
3557static void
3558qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3559{
3560 int i, sz;
3561
3562 mbx->entry_type = MBX_IOCB_TYPE;
3563 mbx->handle = sp->handle;
3564 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3565
3566 for (i = 0; i < sz; i++)
7ffa5b93 3567 mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i];
726b8548
QT
3568}
3569
3570static void
3571qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3572{
3573 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3574 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3575 ct_pkt->handle = sp->handle;
3576}
3577
3578static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3579 struct nack_to_isp *nack)
3580{
3581 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3582
3583 nack->entry_type = NOTIFY_ACK_TYPE;
3584 nack->entry_count = 1;
3585 nack->ox_id = ntfy->ox_id;
3586
3587 nack->u.isp24.handle = sp->handle;
3588 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3589 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3590 nack->u.isp24.flags = ntfy->u.isp24.flags &
7ffa5b93 3591 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
726b8548
QT
3592 }
3593 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3594 nack->u.isp24.status = ntfy->u.isp24.status;
3595 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3596 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3597 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3598 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3599 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3600 nack->u.isp24.srr_flags = 0;
3601 nack->u.isp24.srr_reject_code = 0;
3602 nack->u.isp24.srr_reject_code_expl = 0;
3603 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3604}
3605
e84067d7
DG
3606/*
3607 * Build NVME LS request
3608 */
ac988c49 3609static void
e84067d7
DG
3610qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3611{
3612 struct srb_iocb *nvme;
e84067d7
DG
3613
3614 nvme = &sp->u.iocb_cmd;
3615 cmd_pkt->entry_type = PT_LS4_REQUEST;
3616 cmd_pkt->entry_count = 1;
7ffa5b93 3617 cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
e84067d7
DG
3618
3619 cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3620 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3621 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3622
7ffa5b93
BVA
3623 cmd_pkt->tx_dseg_count = cpu_to_le16(1);
3624 cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len);
3625 cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len);
15b7a68c 3626 put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
e84067d7 3627
7ffa5b93
BVA
3628 cmd_pkt->rx_dseg_count = cpu_to_le16(1);
3629 cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len);
3630 cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len);
15b7a68c 3631 put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
e84067d7
DG
3632}
3633
2853192e
QT
3634static void
3635qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3636{
3637 int map, pos;
3638
3639 vce->entry_type = VP_CTRL_IOCB_TYPE;
3640 vce->handle = sp->handle;
3641 vce->entry_count = 1;
3642 vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3643 vce->vp_count = cpu_to_le16(1);
3644
3645 /*
3646 * index map in firmware starts with 1; decrement index
3647 * this is ok as we never use index 0
3648 */
3649 map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3650 pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3651 vce->vp_idx_map[map] |= 1 << pos;
3652}
3653
11aea16a
QT
3654static void
3655qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3656{
3657 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3658 logio->control_flags =
3659 cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3660
3661 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3662 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3663 logio->port_id[1] = sp->fcport->d_id.b.area;
3664 logio->port_id[2] = sp->fcport->d_id.b.domain;
3665 logio->vp_index = sp->fcport->vha->vp_idx;
3666}
3667
ac280b67
AV
3668int
3669qla2x00_start_sp(srb_t *sp)
3670{
80676d05 3671 int rval = QLA_SUCCESS;
25ff6af1 3672 scsi_qla_host_t *vha = sp->vha;
726b8548 3673 struct qla_hw_data *ha = vha->hw;
6a629468 3674 struct qla_qpair *qp = sp->qpair;
ac280b67 3675 void *pkt;
ac280b67
AV
3676 unsigned long flags;
3677
6a629468
QT
3678 spin_lock_irqsave(qp->qp_lock_ptr, flags);
3679 pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
7c3df132 3680 if (!pkt) {
80676d05 3681 rval = EAGAIN;
726b8548 3682 ql_log(ql_log_warn, vha, 0x700c,
7c3df132 3683 "qla2x00_alloc_iocbs failed.\n");
ac280b67 3684 goto done;
7c3df132 3685 }
ac280b67 3686
9ba56b95 3687 switch (sp->type) {
ac280b67
AV
3688 case SRB_LOGIN_CMD:
3689 IS_FWI2_CAPABLE(ha) ?
5ff1d584 3690 qla24xx_login_iocb(sp, pkt) :
ac280b67
AV
3691 qla2x00_login_iocb(sp, pkt);
3692 break;
a5d42f4c
DG
3693 case SRB_PRLI_CMD:
3694 qla24xx_prli_iocb(sp, pkt);
3695 break;
ac280b67
AV
3696 case SRB_LOGOUT_CMD:
3697 IS_FWI2_CAPABLE(ha) ?
5ff1d584 3698 qla24xx_logout_iocb(sp, pkt) :
ac280b67
AV
3699 qla2x00_logout_iocb(sp, pkt);
3700 break;
9a069e19
GM
3701 case SRB_ELS_CMD_RPT:
3702 case SRB_ELS_CMD_HST:
3703 qla24xx_els_iocb(sp, pkt);
3704 break;
3705 case SRB_CT_CMD:
9bc4f4fb 3706 IS_FWI2_CAPABLE(ha) ?
5780790e
AV
3707 qla24xx_ct_iocb(sp, pkt) :
3708 qla2x00_ct_iocb(sp, pkt);
9a069e19 3709 break;
5ff1d584
AV
3710 case SRB_ADISC_CMD:
3711 IS_FWI2_CAPABLE(ha) ?
3712 qla24xx_adisc_iocb(sp, pkt) :
3713 qla2x00_adisc_iocb(sp, pkt);
3714 break;
3822263e 3715 case SRB_TM_CMD:
8ae6d9c7
GM
3716 IS_QLAFX00(ha) ?
3717 qlafx00_tm_iocb(sp, pkt) :
3718 qla24xx_tm_iocb(sp, pkt);
3719 break;
3720 case SRB_FXIOCB_DCMD:
3721 case SRB_FXIOCB_BCMD:
3722 qlafx00_fxdisc_iocb(sp, pkt);
3723 break;
e84067d7
DG
3724 case SRB_NVME_LS:
3725 qla_nvme_ls(sp, pkt);
3726 break;
8ae6d9c7 3727 case SRB_ABT_CMD:
4440e46d
AB
3728 IS_QLAFX00(ha) ?
3729 qlafx00_abort_iocb(sp, pkt) :
3730 qla24xx_abort_iocb(sp, pkt);
3822263e 3731 break;
6eb54715
HM
3732 case SRB_ELS_DCMD:
3733 qla24xx_els_logo_iocb(sp, pkt);
3734 break;
726b8548
QT
3735 case SRB_CT_PTHRU_CMD:
3736 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3737 break;
3738 case SRB_MB_IOCB:
3739 qla2x00_mb_iocb(sp, pkt);
3740 break;
3741 case SRB_NACK_PLOGI:
3742 case SRB_NACK_PRLI:
3743 case SRB_NACK_LOGO:
3744 qla2x00_send_notify_ack_iocb(sp, pkt);
3745 break;
2853192e
QT
3746 case SRB_CTRL_VP:
3747 qla25xx_ctrlvp_iocb(sp, pkt);
3748 break;
11aea16a
QT
3749 case SRB_PRLO_CMD:
3750 qla24xx_prlo_iocb(sp, pkt);
3751 break;
ac280b67
AV
3752 default:
3753 break;
3754 }
3755
3a4b6cc7
QT
3756 if (sp->start_timer)
3757 add_timer(&sp->u.iocb_cmd.timer);
3758
ac280b67 3759 wmb();
6a629468 3760 qla2x00_start_iocbs(vha, qp->req);
ac280b67 3761done:
6a629468 3762 spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
ac280b67
AV
3763 return rval;
3764}
a9b6f722
SK
3765
3766static void
3767qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3768 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3769{
3770 uint16_t avail_dsds;
15b7a68c 3771 struct dsd64 *cur_dsd;
a9b6f722
SK
3772 uint32_t req_data_len = 0;
3773 uint32_t rsp_data_len = 0;
3774 struct scatterlist *sg;
3775 int index;
3776 int entry_count = 1;
75cc8cfc 3777 struct bsg_job *bsg_job = sp->u.bsg_job;
a9b6f722
SK
3778
3779 /*Update entry type to indicate bidir command */
2c26348c 3780 put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
a9b6f722
SK
3781
3782 /* Set the transfer direction, in this set both flags
3783 * Also set the BD_WRAP_BACK flag, firmware will take care
3784 * assigning DID=SID for outgoing pkts.
3785 */
3786 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3787 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
ad950360 3788 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
a9b6f722
SK
3789 BD_WRAP_BACK);
3790
3791 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3792 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3793 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3794 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3795
3796 vha->bidi_stats.transfer_bytes += req_data_len;
3797 vha->bidi_stats.io_count++;
3798
fabbb8df
JC
3799 vha->qla_stats.output_bytes += req_data_len;
3800 vha->qla_stats.output_requests++;
3801
a9b6f722
SK
3802 /* Only one dsd is available for bidirectional IOCB, remaining dsds
3803 * are bundled in continuation iocb
3804 */
3805 avail_dsds = 1;
15b7a68c 3806 cur_dsd = &cmd_pkt->fcp_dsd;
a9b6f722
SK
3807
3808 index = 0;
3809
3810 for_each_sg(bsg_job->request_payload.sg_list, sg,
3811 bsg_job->request_payload.sg_cnt, index) {
a9b6f722
SK
3812 cont_a64_entry_t *cont_pkt;
3813
3814 /* Allocate additional continuation packets */
3815 if (avail_dsds == 0) {
3816 /* Continuation type 1 IOCB can accomodate
3817 * 5 DSDS
3818 */
3819 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
15b7a68c 3820 cur_dsd = cont_pkt->dsd;
a9b6f722
SK
3821 avail_dsds = 5;
3822 entry_count++;
3823 }
15b7a68c 3824 append_dsd64(&cur_dsd, sg);
a9b6f722
SK
3825 avail_dsds--;
3826 }
3827 /* For read request DSD will always goes to continuation IOCB
3828 * and follow the write DSD. If there is room on the current IOCB
3829 * then it is added to that IOCB else new continuation IOCB is
3830 * allocated.
3831 */
3832 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3833 bsg_job->reply_payload.sg_cnt, index) {
a9b6f722
SK
3834 cont_a64_entry_t *cont_pkt;
3835
3836 /* Allocate additional continuation packets */
3837 if (avail_dsds == 0) {
3838 /* Continuation type 1 IOCB can accomodate
3839 * 5 DSDS
3840 */
3841 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
15b7a68c 3842 cur_dsd = cont_pkt->dsd;
a9b6f722
SK
3843 avail_dsds = 5;
3844 entry_count++;
3845 }
15b7a68c 3846 append_dsd64(&cur_dsd, sg);
a9b6f722
SK
3847 avail_dsds--;
3848 }
3849 /* This value should be same as number of IOCB required for this cmd */
3850 cmd_pkt->entry_count = entry_count;
3851}
3852
3853int
3854qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3855{
3856
3857 struct qla_hw_data *ha = vha->hw;
3858 unsigned long flags;
3859 uint32_t handle;
a9b6f722
SK
3860 uint16_t req_cnt;
3861 uint16_t cnt;
3862 uint32_t *clr_ptr;
3863 struct cmd_bidir *cmd_pkt = NULL;
3864 struct rsp_que *rsp;
3865 struct req_que *req;
3866 int rval = EXT_STATUS_OK;
a9b6f722
SK
3867
3868 rval = QLA_SUCCESS;
3869
3870 rsp = ha->rsp_q_map[0];
3871 req = vha->req;
3872
3873 /* Send marker if required */
3874 if (vha->marker_needed != 0) {
9eb9c6dc
QT
3875 if (qla2x00_marker(vha, ha->base_qpair,
3876 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
a9b6f722
SK
3877 return EXT_STATUS_MAILBOX;
3878 vha->marker_needed = 0;
3879 }
3880
3881 /* Acquire ring specific lock */
3882 spin_lock_irqsave(&ha->hardware_lock, flags);
3883
bcc85657
BVA
3884 handle = qla2xxx_get_next_handle(req);
3885 if (handle == 0) {
a9b6f722
SK
3886 rval = EXT_STATUS_BUSY;
3887 goto queuing_error;
3888 }
3889
3890 /* Calculate number of IOCB required */
3891 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3892
3893 /* Check for room on request queue. */
3894 if (req->cnt < req_cnt + 2) {
7c6300e3 3895 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
04474d3a 3896 rd_reg_dword_relaxed(req->req_q_out);
a9b6f722
SK
3897 if (req->ring_index < cnt)
3898 req->cnt = cnt - req->ring_index;
3899 else
3900 req->cnt = req->length -
3901 (req->ring_index - cnt);
3902 }
3903 if (req->cnt < req_cnt + 2) {
3904 rval = EXT_STATUS_BUSY;
3905 goto queuing_error;
3906 }
3907
3908 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
c25eb70a 3909 cmd_pkt->handle = make_handle(req->id, handle);
a9b6f722
SK
3910
3911 /* Zero out remaining portion of packet. */
3912 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3913 clr_ptr = (uint32_t *)cmd_pkt + 2;
3914 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3915
3916 /* Set NPORT-ID (of vha)*/
3917 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3918 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3919 cmd_pkt->port_id[1] = vha->d_id.b.area;
3920 cmd_pkt->port_id[2] = vha->d_id.b.domain;
3921
3922 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3923 cmd_pkt->entry_status = (uint8_t) rsp->id;
3924 /* Build command packet. */
3925 req->current_outstanding_cmd = handle;
3926 req->outstanding_cmds[handle] = sp;
3927 sp->handle = handle;
3928 req->cnt -= req_cnt;
3929
3930 /* Send the command to the firmware */
3931 wmb();
3932 qla2x00_start_iocbs(vha, req);
3933queuing_error:
3934 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3935 return rval;
3936}