2 * QLogic iSCSI Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
10 #include <linux/blkdev.h>
11 #include <scsi/scsi_tcq.h>
12 #include <linux/delay.h>
15 #include "qedi_iscsi.h"
17 #include "qedi_fw_iscsi.h"
18 #include "qedi_fw_scsi.h"
20 static int qedi_send_iscsi_tmf(struct qedi_conn
*qedi_conn
,
21 struct iscsi_task
*mtask
);
23 void qedi_iscsi_unmap_sg_list(struct qedi_cmd
*cmd
)
25 struct scsi_cmnd
*sc
= cmd
->scsi_cmd
;
27 if (cmd
->io_tbl
.sge_valid
&& sc
) {
28 cmd
->io_tbl
.sge_valid
= 0;
33 static void qedi_process_logout_resp(struct qedi_ctx
*qedi
,
35 struct iscsi_task
*task
,
36 struct qedi_conn
*qedi_conn
)
38 struct iscsi_conn
*conn
= qedi_conn
->cls_conn
->dd_data
;
39 struct iscsi_logout_rsp
*resp_hdr
;
40 struct iscsi_session
*session
= conn
->session
;
41 struct iscsi_logout_response_hdr
*cqe_logout_response
;
44 cmd
= (struct qedi_cmd
*)task
->dd_data
;
45 cqe_logout_response
= &cqe
->cqe_common
.iscsi_hdr
.logout_response
;
46 spin_lock(&session
->back_lock
);
47 resp_hdr
= (struct iscsi_logout_rsp
*)&qedi_conn
->gen_pdu
.resp_hdr
;
48 memset(resp_hdr
, 0, sizeof(struct iscsi_hdr
));
49 resp_hdr
->opcode
= cqe_logout_response
->opcode
;
50 resp_hdr
->flags
= cqe_logout_response
->flags
;
51 resp_hdr
->hlength
= 0;
53 resp_hdr
->itt
= build_itt(cqe
->cqe_solicited
.itid
, conn
->session
->age
);
54 resp_hdr
->statsn
= cpu_to_be32(cqe_logout_response
->stat_sn
);
55 resp_hdr
->exp_cmdsn
= cpu_to_be32(cqe_logout_response
->exp_cmd_sn
);
56 resp_hdr
->max_cmdsn
= cpu_to_be32(cqe_logout_response
->max_cmd_sn
);
58 resp_hdr
->t2wait
= cpu_to_be32(cqe_logout_response
->time_2_wait
);
59 resp_hdr
->t2retain
= cpu_to_be32(cqe_logout_response
->time_2_retain
);
61 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_TID
,
62 "Freeing tid=0x%x for cid=0x%x\n",
63 cmd
->task_id
, qedi_conn
->iscsi_conn_id
);
65 if (likely(cmd
->io_cmd_in_list
)) {
66 cmd
->io_cmd_in_list
= false;
67 list_del_init(&cmd
->io_cmd
);
68 qedi_conn
->active_cmd_count
--;
70 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_INFO
,
71 "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
72 cmd
->task_id
, qedi_conn
->iscsi_conn_id
,
76 cmd
->state
= RESPONSE_RECEIVED
;
77 qedi_clear_task_idx(qedi
, cmd
->task_id
);
78 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)resp_hdr
, NULL
, 0);
80 spin_unlock(&session
->back_lock
);
83 static void qedi_process_text_resp(struct qedi_ctx
*qedi
,
85 struct iscsi_task
*task
,
86 struct qedi_conn
*qedi_conn
)
88 struct iscsi_conn
*conn
= qedi_conn
->cls_conn
->dd_data
;
89 struct iscsi_session
*session
= conn
->session
;
90 struct iscsi_task_context
*task_ctx
;
91 struct iscsi_text_rsp
*resp_hdr_ptr
;
92 struct iscsi_text_response_hdr
*cqe_text_response
;
97 cmd
= (struct qedi_cmd
*)task
->dd_data
;
98 task_ctx
= qedi_get_task_mem(&qedi
->tasks
, cmd
->task_id
);
100 cqe_text_response
= &cqe
->cqe_common
.iscsi_hdr
.text_response
;
101 spin_lock(&session
->back_lock
);
102 resp_hdr_ptr
= (struct iscsi_text_rsp
*)&qedi_conn
->gen_pdu
.resp_hdr
;
103 memset(resp_hdr_ptr
, 0, sizeof(struct iscsi_hdr
));
104 resp_hdr_ptr
->opcode
= cqe_text_response
->opcode
;
105 resp_hdr_ptr
->flags
= cqe_text_response
->flags
;
106 resp_hdr_ptr
->hlength
= 0;
108 hton24(resp_hdr_ptr
->dlength
,
109 (cqe_text_response
->hdr_second_dword
&
110 ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK
));
111 tmp
= (u32
*)resp_hdr_ptr
->dlength
;
113 resp_hdr_ptr
->itt
= build_itt(cqe
->cqe_solicited
.itid
,
115 resp_hdr_ptr
->ttt
= cqe_text_response
->ttt
;
116 resp_hdr_ptr
->statsn
= cpu_to_be32(cqe_text_response
->stat_sn
);
117 resp_hdr_ptr
->exp_cmdsn
= cpu_to_be32(cqe_text_response
->exp_cmd_sn
);
118 resp_hdr_ptr
->max_cmdsn
= cpu_to_be32(cqe_text_response
->max_cmd_sn
);
120 pld_len
= cqe_text_response
->hdr_second_dword
&
121 ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK
;
122 qedi_conn
->gen_pdu
.resp_wr_ptr
= qedi_conn
->gen_pdu
.resp_buf
+ pld_len
;
124 memset(task_ctx
, '\0', sizeof(*task_ctx
));
126 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_TID
,
127 "Freeing tid=0x%x for cid=0x%x\n",
128 cmd
->task_id
, qedi_conn
->iscsi_conn_id
);
130 if (likely(cmd
->io_cmd_in_list
)) {
131 cmd
->io_cmd_in_list
= false;
132 list_del_init(&cmd
->io_cmd
);
133 qedi_conn
->active_cmd_count
--;
135 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_INFO
,
136 "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
137 cmd
->task_id
, qedi_conn
->iscsi_conn_id
,
141 cmd
->state
= RESPONSE_RECEIVED
;
142 qedi_clear_task_idx(qedi
, cmd
->task_id
);
144 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)resp_hdr_ptr
,
145 qedi_conn
->gen_pdu
.resp_buf
,
146 (qedi_conn
->gen_pdu
.resp_wr_ptr
-
147 qedi_conn
->gen_pdu
.resp_buf
));
148 spin_unlock(&session
->back_lock
);
151 static void qedi_tmf_resp_work(struct work_struct
*work
)
153 struct qedi_cmd
*qedi_cmd
=
154 container_of(work
, struct qedi_cmd
, tmf_work
);
155 struct qedi_conn
*qedi_conn
= qedi_cmd
->conn
;
156 struct qedi_ctx
*qedi
= qedi_conn
->qedi
;
157 struct iscsi_conn
*conn
= qedi_conn
->cls_conn
->dd_data
;
158 struct iscsi_session
*session
= conn
->session
;
159 struct iscsi_tm_rsp
*resp_hdr_ptr
;
160 struct iscsi_cls_session
*cls_sess
;
163 set_bit(QEDI_CONN_FW_CLEANUP
, &qedi_conn
->flags
);
164 resp_hdr_ptr
= (struct iscsi_tm_rsp
*)qedi_cmd
->tmf_resp_buf
;
165 cls_sess
= iscsi_conn_to_session(qedi_conn
->cls_conn
);
167 iscsi_block_session(session
->cls_session
);
168 rval
= qedi_cleanup_all_io(qedi
, qedi_conn
, qedi_cmd
->task
, true);
170 qedi_clear_task_idx(qedi
, qedi_cmd
->task_id
);
171 iscsi_unblock_session(session
->cls_session
);
175 iscsi_unblock_session(session
->cls_session
);
176 qedi_clear_task_idx(qedi
, qedi_cmd
->task_id
);
178 spin_lock(&session
->back_lock
);
179 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)resp_hdr_ptr
, NULL
, 0);
180 spin_unlock(&session
->back_lock
);
184 clear_bit(QEDI_CONN_FW_CLEANUP
, &qedi_conn
->flags
);
187 static void qedi_process_tmf_resp(struct qedi_ctx
*qedi
,
188 union iscsi_cqe
*cqe
,
189 struct iscsi_task
*task
,
190 struct qedi_conn
*qedi_conn
)
193 struct iscsi_conn
*conn
= qedi_conn
->cls_conn
->dd_data
;
194 struct iscsi_session
*session
= conn
->session
;
195 struct iscsi_tmf_response_hdr
*cqe_tmp_response
;
196 struct iscsi_tm_rsp
*resp_hdr_ptr
;
197 struct iscsi_tm
*tmf_hdr
;
198 struct qedi_cmd
*qedi_cmd
= NULL
;
201 cqe_tmp_response
= &cqe
->cqe_common
.iscsi_hdr
.tmf_response
;
203 qedi_cmd
= task
->dd_data
;
204 qedi_cmd
->tmf_resp_buf
= kzalloc(sizeof(*resp_hdr_ptr
), GFP_KERNEL
);
205 if (!qedi_cmd
->tmf_resp_buf
) {
206 QEDI_ERR(&qedi
->dbg_ctx
,
207 "Failed to allocate resp buf, cid=0x%x\n",
208 qedi_conn
->iscsi_conn_id
);
212 spin_lock(&session
->back_lock
);
213 resp_hdr_ptr
= (struct iscsi_tm_rsp
*)qedi_cmd
->tmf_resp_buf
;
214 memset(resp_hdr_ptr
, 0, sizeof(struct iscsi_tm_rsp
));
216 /* Fill up the header */
217 resp_hdr_ptr
->opcode
= cqe_tmp_response
->opcode
;
218 resp_hdr_ptr
->flags
= cqe_tmp_response
->hdr_flags
;
219 resp_hdr_ptr
->response
= cqe_tmp_response
->hdr_response
;
220 resp_hdr_ptr
->hlength
= 0;
222 hton24(resp_hdr_ptr
->dlength
,
223 (cqe_tmp_response
->hdr_second_dword
&
224 ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK
));
225 tmp
= (u32
*)resp_hdr_ptr
->dlength
;
226 resp_hdr_ptr
->itt
= build_itt(cqe
->cqe_solicited
.itid
,
228 resp_hdr_ptr
->statsn
= cpu_to_be32(cqe_tmp_response
->stat_sn
);
229 resp_hdr_ptr
->exp_cmdsn
= cpu_to_be32(cqe_tmp_response
->exp_cmd_sn
);
230 resp_hdr_ptr
->max_cmdsn
= cpu_to_be32(cqe_tmp_response
->max_cmd_sn
);
232 tmf_hdr
= (struct iscsi_tm
*)qedi_cmd
->task
->hdr
;
234 if (likely(qedi_cmd
->io_cmd_in_list
)) {
235 qedi_cmd
->io_cmd_in_list
= false;
236 list_del_init(&qedi_cmd
->io_cmd
);
237 qedi_conn
->active_cmd_count
--;
240 if (((tmf_hdr
->flags
& ISCSI_FLAG_TM_FUNC_MASK
) ==
241 ISCSI_TM_FUNC_LOGICAL_UNIT_RESET
) ||
242 ((tmf_hdr
->flags
& ISCSI_FLAG_TM_FUNC_MASK
) ==
243 ISCSI_TM_FUNC_TARGET_WARM_RESET
) ||
244 ((tmf_hdr
->flags
& ISCSI_FLAG_TM_FUNC_MASK
) ==
245 ISCSI_TM_FUNC_TARGET_COLD_RESET
)) {
246 INIT_WORK(&qedi_cmd
->tmf_work
, qedi_tmf_resp_work
);
247 queue_work(qedi
->tmf_thread
, &qedi_cmd
->tmf_work
);
251 qedi_clear_task_idx(qedi
, qedi_cmd
->task_id
);
253 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)resp_hdr_ptr
, NULL
, 0);
257 spin_unlock(&session
->back_lock
);
260 static void qedi_process_login_resp(struct qedi_ctx
*qedi
,
261 union iscsi_cqe
*cqe
,
262 struct iscsi_task
*task
,
263 struct qedi_conn
*qedi_conn
)
265 struct iscsi_conn
*conn
= qedi_conn
->cls_conn
->dd_data
;
266 struct iscsi_session
*session
= conn
->session
;
267 struct iscsi_task_context
*task_ctx
;
268 struct iscsi_login_rsp
*resp_hdr_ptr
;
269 struct iscsi_login_response_hdr
*cqe_login_response
;
270 struct qedi_cmd
*cmd
;
274 cmd
= (struct qedi_cmd
*)task
->dd_data
;
276 cqe_login_response
= &cqe
->cqe_common
.iscsi_hdr
.login_response
;
277 task_ctx
= qedi_get_task_mem(&qedi
->tasks
, cmd
->task_id
);
279 spin_lock(&session
->back_lock
);
280 resp_hdr_ptr
= (struct iscsi_login_rsp
*)&qedi_conn
->gen_pdu
.resp_hdr
;
281 memset(resp_hdr_ptr
, 0, sizeof(struct iscsi_login_rsp
));
282 resp_hdr_ptr
->opcode
= cqe_login_response
->opcode
;
283 resp_hdr_ptr
->flags
= cqe_login_response
->flags_attr
;
284 resp_hdr_ptr
->hlength
= 0;
286 hton24(resp_hdr_ptr
->dlength
,
287 (cqe_login_response
->hdr_second_dword
&
288 ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK
));
289 tmp
= (u32
*)resp_hdr_ptr
->dlength
;
290 resp_hdr_ptr
->itt
= build_itt(cqe
->cqe_solicited
.itid
,
292 resp_hdr_ptr
->tsih
= cqe_login_response
->tsih
;
293 resp_hdr_ptr
->statsn
= cpu_to_be32(cqe_login_response
->stat_sn
);
294 resp_hdr_ptr
->exp_cmdsn
= cpu_to_be32(cqe_login_response
->exp_cmd_sn
);
295 resp_hdr_ptr
->max_cmdsn
= cpu_to_be32(cqe_login_response
->max_cmd_sn
);
296 resp_hdr_ptr
->status_class
= cqe_login_response
->status_class
;
297 resp_hdr_ptr
->status_detail
= cqe_login_response
->status_detail
;
298 pld_len
= cqe_login_response
->hdr_second_dword
&
299 ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK
;
300 qedi_conn
->gen_pdu
.resp_wr_ptr
= qedi_conn
->gen_pdu
.resp_buf
+ pld_len
;
302 if (likely(cmd
->io_cmd_in_list
)) {
303 cmd
->io_cmd_in_list
= false;
304 list_del_init(&cmd
->io_cmd
);
305 qedi_conn
->active_cmd_count
--;
308 memset(task_ctx
, '\0', sizeof(*task_ctx
));
310 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)resp_hdr_ptr
,
311 qedi_conn
->gen_pdu
.resp_buf
,
312 (qedi_conn
->gen_pdu
.resp_wr_ptr
-
313 qedi_conn
->gen_pdu
.resp_buf
));
315 spin_unlock(&session
->back_lock
);
316 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_TID
,
317 "Freeing tid=0x%x for cid=0x%x\n",
318 cmd
->task_id
, qedi_conn
->iscsi_conn_id
);
319 cmd
->state
= RESPONSE_RECEIVED
;
320 qedi_clear_task_idx(qedi
, cmd
->task_id
);
323 static void qedi_get_rq_bdq_buf(struct qedi_ctx
*qedi
,
324 struct iscsi_cqe_unsolicited
*cqe
,
329 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_CONN
,
330 "pld_len [%d], bdq_prod_idx [%d], idx [%d]\n",
331 len
, qedi
->bdq_prod_idx
,
332 (qedi
->bdq_prod_idx
% qedi
->rq_num_entries
));
334 /* Obtain buffer address from rqe_opaque */
335 idx
= cqe
->rqe_opaque
.lo
;
336 if (idx
> (QEDI_BDQ_NUM
- 1)) {
337 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_CONN
,
338 "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
343 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_CONN
,
344 "rqe_opaque.lo [0x%p], rqe_opaque.hi [0x%p], idx [%d]\n",
345 cqe
->rqe_opaque
.lo
, cqe
->rqe_opaque
.hi
, idx
);
347 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_CONN
,
348 "unsol_cqe_type = %d\n", cqe
->unsol_cqe_type
);
349 switch (cqe
->unsol_cqe_type
) {
350 case ISCSI_CQE_UNSOLICITED_SINGLE
:
351 case ISCSI_CQE_UNSOLICITED_FIRST
:
353 memcpy(ptr
, (void *)qedi
->bdq
[idx
].buf_addr
, len
);
355 case ISCSI_CQE_UNSOLICITED_MIDDLE
:
356 case ISCSI_CQE_UNSOLICITED_LAST
:
363 static void qedi_put_rq_bdq_buf(struct qedi_ctx
*qedi
,
364 struct iscsi_cqe_unsolicited
*cqe
,
371 /* Obtain buffer address from rqe_opaque */
372 idx
= cqe
->rqe_opaque
.lo
;
373 if (idx
> (QEDI_BDQ_NUM
- 1)) {
374 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_CONN
,
375 "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
380 pbl
= (struct scsi_bd
*)qedi
->bdq_pbl
;
381 pbl
+= (qedi
->bdq_prod_idx
% qedi
->rq_num_entries
);
382 pbl
->address
.hi
= cpu_to_le32(QEDI_U64_HI(qedi
->bdq
[idx
].buf_dma
));
383 pbl
->address
.lo
= cpu_to_le32(QEDI_U64_LO(qedi
->bdq
[idx
].buf_dma
));
384 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_CONN
,
385 "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n",
386 pbl
, pbl
->address
.hi
, pbl
->address
.lo
, idx
);
388 pbl
->opaque
.lo
= cpu_to_le32(QEDI_U64_LO(idx
));
390 /* Increment producer to let f/w know we've handled the frame */
391 qedi
->bdq_prod_idx
+= count
;
393 writew(qedi
->bdq_prod_idx
, qedi
->bdq_primary_prod
);
394 tmp
= readw(qedi
->bdq_primary_prod
);
396 writew(qedi
->bdq_prod_idx
, qedi
->bdq_secondary_prod
);
397 tmp
= readw(qedi
->bdq_secondary_prod
);
400 static void qedi_unsol_pdu_adjust_bdq(struct qedi_ctx
*qedi
,
401 struct iscsi_cqe_unsolicited
*cqe
,
402 u32 pdu_len
, u32 num_bdqs
,
405 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_CONN
,
406 "num_bdqs [%d]\n", num_bdqs
);
408 qedi_get_rq_bdq_buf(qedi
, cqe
, bdq_data
, pdu_len
);
409 qedi_put_rq_bdq_buf(qedi
, cqe
, (num_bdqs
+ 1));
412 static int qedi_process_nopin_mesg(struct qedi_ctx
*qedi
,
413 union iscsi_cqe
*cqe
,
414 struct iscsi_task
*task
,
415 struct qedi_conn
*qedi_conn
, u16 que_idx
)
417 struct iscsi_conn
*conn
= qedi_conn
->cls_conn
->dd_data
;
418 struct iscsi_session
*session
= conn
->session
;
419 struct iscsi_nop_in_hdr
*cqe_nop_in
;
420 struct iscsi_nopin
*hdr
;
421 struct qedi_cmd
*cmd
;
422 int tgt_async_nop
= 0;
424 u32 pdu_len
, num_bdqs
;
425 char bdq_data
[QEDI_BDQ_BUF_SIZE
];
428 spin_lock_bh(&session
->back_lock
);
429 cqe_nop_in
= &cqe
->cqe_common
.iscsi_hdr
.nop_in
;
431 pdu_len
= cqe_nop_in
->hdr_second_dword
&
432 ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK
;
433 num_bdqs
= pdu_len
/ QEDI_BDQ_BUF_SIZE
;
435 hdr
= (struct iscsi_nopin
*)&qedi_conn
->gen_pdu
.resp_hdr
;
436 memset(hdr
, 0, sizeof(struct iscsi_hdr
));
437 hdr
->opcode
= cqe_nop_in
->opcode
;
438 hdr
->max_cmdsn
= cpu_to_be32(cqe_nop_in
->max_cmd_sn
);
439 hdr
->exp_cmdsn
= cpu_to_be32(cqe_nop_in
->exp_cmd_sn
);
440 hdr
->statsn
= cpu_to_be32(cqe_nop_in
->stat_sn
);
441 hdr
->ttt
= cpu_to_be32(cqe_nop_in
->ttt
);
443 if (cqe
->cqe_common
.cqe_type
== ISCSI_CQE_TYPE_UNSOLICITED
) {
444 spin_lock_irqsave(&qedi
->hba_lock
, flags
);
445 qedi_unsol_pdu_adjust_bdq(qedi
, &cqe
->cqe_unsolicited
,
446 pdu_len
, num_bdqs
, bdq_data
);
447 hdr
->itt
= RESERVED_ITT
;
449 spin_unlock_irqrestore(&qedi
->hba_lock
, flags
);
453 /* Response to one of our nop-outs */
456 hdr
->flags
= ISCSI_FLAG_CMD_FINAL
;
457 hdr
->itt
= build_itt(cqe
->cqe_solicited
.itid
,
461 memcpy(&hdr
->lun
, lun
, sizeof(struct scsi_lun
));
462 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_TID
,
463 "Freeing tid=0x%x for cid=0x%x\n",
464 cmd
->task_id
, qedi_conn
->iscsi_conn_id
);
465 cmd
->state
= RESPONSE_RECEIVED
;
466 spin_lock(&qedi_conn
->list_lock
);
467 if (likely(cmd
->io_cmd_in_list
)) {
468 cmd
->io_cmd_in_list
= false;
469 list_del_init(&cmd
->io_cmd
);
470 qedi_conn
->active_cmd_count
--;
473 spin_unlock(&qedi_conn
->list_lock
);
474 qedi_clear_task_idx(qedi
, cmd
->task_id
);
478 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)hdr
, bdq_data
, pdu_len
);
480 spin_unlock_bh(&session
->back_lock
);
481 return tgt_async_nop
;
484 static void qedi_process_async_mesg(struct qedi_ctx
*qedi
,
485 union iscsi_cqe
*cqe
,
486 struct iscsi_task
*task
,
487 struct qedi_conn
*qedi_conn
,
490 struct iscsi_conn
*conn
= qedi_conn
->cls_conn
->dd_data
;
491 struct iscsi_session
*session
= conn
->session
;
492 struct iscsi_async_msg_hdr
*cqe_async_msg
;
493 struct iscsi_async
*resp_hdr
;
495 u32 pdu_len
, num_bdqs
;
496 char bdq_data
[QEDI_BDQ_BUF_SIZE
];
499 spin_lock_bh(&session
->back_lock
);
501 cqe_async_msg
= &cqe
->cqe_common
.iscsi_hdr
.async_msg
;
502 pdu_len
= cqe_async_msg
->hdr_second_dword
&
503 ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK
;
504 num_bdqs
= pdu_len
/ QEDI_BDQ_BUF_SIZE
;
506 if (cqe
->cqe_common
.cqe_type
== ISCSI_CQE_TYPE_UNSOLICITED
) {
507 spin_lock_irqsave(&qedi
->hba_lock
, flags
);
508 qedi_unsol_pdu_adjust_bdq(qedi
, &cqe
->cqe_unsolicited
,
509 pdu_len
, num_bdqs
, bdq_data
);
510 spin_unlock_irqrestore(&qedi
->hba_lock
, flags
);
513 resp_hdr
= (struct iscsi_async
*)&qedi_conn
->gen_pdu
.resp_hdr
;
514 memset(resp_hdr
, 0, sizeof(struct iscsi_hdr
));
515 resp_hdr
->opcode
= cqe_async_msg
->opcode
;
516 resp_hdr
->flags
= 0x80;
518 lun
[0] = cpu_to_be32(cqe_async_msg
->lun
.lo
);
519 lun
[1] = cpu_to_be32(cqe_async_msg
->lun
.hi
);
520 memcpy(&resp_hdr
->lun
, lun
, sizeof(struct scsi_lun
));
521 resp_hdr
->exp_cmdsn
= cpu_to_be32(cqe_async_msg
->exp_cmd_sn
);
522 resp_hdr
->max_cmdsn
= cpu_to_be32(cqe_async_msg
->max_cmd_sn
);
523 resp_hdr
->statsn
= cpu_to_be32(cqe_async_msg
->stat_sn
);
525 resp_hdr
->async_event
= cqe_async_msg
->async_event
;
526 resp_hdr
->async_vcode
= cqe_async_msg
->async_vcode
;
528 resp_hdr
->param1
= cpu_to_be16(cqe_async_msg
->param1_rsrv
);
529 resp_hdr
->param2
= cpu_to_be16(cqe_async_msg
->param2_rsrv
);
530 resp_hdr
->param3
= cpu_to_be16(cqe_async_msg
->param3_rsrv
);
532 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)resp_hdr
, bdq_data
,
535 spin_unlock_bh(&session
->back_lock
);
538 static void qedi_process_reject_mesg(struct qedi_ctx
*qedi
,
539 union iscsi_cqe
*cqe
,
540 struct iscsi_task
*task
,
541 struct qedi_conn
*qedi_conn
,
544 struct iscsi_conn
*conn
= qedi_conn
->cls_conn
->dd_data
;
545 struct iscsi_session
*session
= conn
->session
;
546 struct iscsi_reject_hdr
*cqe_reject
;
547 struct iscsi_reject
*hdr
;
548 u32 pld_len
, num_bdqs
;
551 spin_lock_bh(&session
->back_lock
);
552 cqe_reject
= &cqe
->cqe_common
.iscsi_hdr
.reject
;
553 pld_len
= cqe_reject
->hdr_second_dword
&
554 ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK
;
555 num_bdqs
= pld_len
/ QEDI_BDQ_BUF_SIZE
;
557 if (cqe
->cqe_common
.cqe_type
== ISCSI_CQE_TYPE_UNSOLICITED
) {
558 spin_lock_irqsave(&qedi
->hba_lock
, flags
);
559 qedi_unsol_pdu_adjust_bdq(qedi
, &cqe
->cqe_unsolicited
,
560 pld_len
, num_bdqs
, conn
->data
);
561 spin_unlock_irqrestore(&qedi
->hba_lock
, flags
);
563 hdr
= (struct iscsi_reject
*)&qedi_conn
->gen_pdu
.resp_hdr
;
564 memset(hdr
, 0, sizeof(struct iscsi_hdr
));
565 hdr
->opcode
= cqe_reject
->opcode
;
566 hdr
->reason
= cqe_reject
->hdr_reason
;
567 hdr
->flags
= cqe_reject
->hdr_flags
;
568 hton24(hdr
->dlength
, (cqe_reject
->hdr_second_dword
&
569 ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK
));
570 hdr
->max_cmdsn
= cpu_to_be32(cqe_reject
->max_cmd_sn
);
571 hdr
->exp_cmdsn
= cpu_to_be32(cqe_reject
->exp_cmd_sn
);
572 hdr
->statsn
= cpu_to_be32(cqe_reject
->stat_sn
);
573 hdr
->ffffffff
= cpu_to_be32(0xffffffff);
575 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)hdr
,
576 conn
->data
, pld_len
);
577 spin_unlock_bh(&session
->back_lock
);
580 static void qedi_scsi_completion(struct qedi_ctx
*qedi
,
581 union iscsi_cqe
*cqe
,
582 struct iscsi_task
*task
,
583 struct iscsi_conn
*conn
)
585 struct scsi_cmnd
*sc_cmd
;
586 struct qedi_cmd
*cmd
= task
->dd_data
;
587 struct iscsi_session
*session
= conn
->session
;
588 struct iscsi_scsi_rsp
*hdr
;
589 struct iscsi_data_in_hdr
*cqe_data_in
;
591 struct qedi_conn
*qedi_conn
;
593 bool mark_cmd_node_deleted
= false;
596 iscsi_cid
= cqe
->cqe_common
.conn_id
;
597 qedi_conn
= qedi
->cid_que
.conn_cid_tbl
[iscsi_cid
];
599 cqe_data_in
= &cqe
->cqe_common
.iscsi_hdr
.data_in
;
601 cqe
->cqe_common
.error_bitmap
.error_bits
.cqe_error_status_bits
;
603 spin_lock_bh(&session
->back_lock
);
604 /* get the scsi command */
605 sc_cmd
= cmd
->scsi_cmd
;
608 QEDI_WARN(&qedi
->dbg_ctx
, "sc_cmd is NULL!\n");
612 if (!sc_cmd
->SCp
.ptr
) {
613 QEDI_WARN(&qedi
->dbg_ctx
,
614 "SCp.ptr is NULL, returned in another context.\n");
618 if (!sc_cmd
->request
) {
619 QEDI_WARN(&qedi
->dbg_ctx
,
620 "sc_cmd->request is NULL, sc_cmd=%p.\n",
625 if (!sc_cmd
->request
->special
) {
626 QEDI_WARN(&qedi
->dbg_ctx
,
627 "request->special is NULL so request not valid, sc_cmd=%p.\n",
632 if (!sc_cmd
->request
->q
) {
633 QEDI_WARN(&qedi
->dbg_ctx
,
634 "request->q is NULL so request is not valid, sc_cmd=%p.\n",
639 qedi_iscsi_unmap_sg_list(cmd
);
641 hdr
= (struct iscsi_scsi_rsp
*)task
->hdr
;
642 hdr
->opcode
= cqe_data_in
->opcode
;
643 hdr
->max_cmdsn
= cpu_to_be32(cqe_data_in
->max_cmd_sn
);
644 hdr
->exp_cmdsn
= cpu_to_be32(cqe_data_in
->exp_cmd_sn
);
645 hdr
->itt
= build_itt(cqe
->cqe_solicited
.itid
, conn
->session
->age
);
646 hdr
->response
= cqe_data_in
->reserved1
;
647 hdr
->cmd_status
= cqe_data_in
->status_rsvd
;
648 hdr
->flags
= cqe_data_in
->flags
;
649 hdr
->residual_count
= cpu_to_be32(cqe_data_in
->residual_count
);
651 if (hdr
->cmd_status
== SAM_STAT_CHECK_CONDITION
) {
652 datalen
= cqe_data_in
->reserved2
&
653 ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK
;
654 memcpy((char *)conn
->data
, (char *)cmd
->sense_buffer
, datalen
);
657 /* If f/w reports data underrun err then set residual to IO transfer
658 * length, set Underrun flag and clear Overrun flag explicitly
660 if (unlikely(cqe_err_bits
&&
661 GET_FIELD(cqe_err_bits
, CQE_ERROR_BITMAP_UNDER_RUN_ERR
))) {
662 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_INFO
,
663 "Under flow itt=0x%x proto flags=0x%x tid=0x%x cid 0x%x fw resid 0x%x sc dlen 0x%x\n",
664 hdr
->itt
, cqe_data_in
->flags
, cmd
->task_id
,
665 qedi_conn
->iscsi_conn_id
, hdr
->residual_count
,
666 scsi_bufflen(sc_cmd
));
667 hdr
->residual_count
= cpu_to_be32(scsi_bufflen(sc_cmd
));
668 hdr
->flags
|= ISCSI_FLAG_CMD_UNDERFLOW
;
669 hdr
->flags
&= (~ISCSI_FLAG_CMD_OVERFLOW
);
672 spin_lock(&qedi_conn
->list_lock
);
673 if (likely(cmd
->io_cmd_in_list
)) {
674 cmd
->io_cmd_in_list
= false;
675 list_del_init(&cmd
->io_cmd
);
676 qedi_conn
->active_cmd_count
--;
677 mark_cmd_node_deleted
= true;
679 spin_unlock(&qedi_conn
->list_lock
);
681 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_TID
,
682 "Freeing tid=0x%x for cid=0x%x\n",
683 cmd
->task_id
, qedi_conn
->iscsi_conn_id
);
684 cmd
->state
= RESPONSE_RECEIVED
;
686 qedi_trace_io(qedi
, task
, cmd
->task_id
, QEDI_IO_TRACE_RSP
);
688 qedi_clear_task_idx(qedi
, cmd
->task_id
);
689 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)hdr
,
690 conn
->data
, datalen
);
692 spin_unlock_bh(&session
->back_lock
);
695 static void qedi_mtask_completion(struct qedi_ctx
*qedi
,
696 union iscsi_cqe
*cqe
,
697 struct iscsi_task
*task
,
698 struct qedi_conn
*conn
, uint16_t que_idx
)
700 struct iscsi_conn
*iscsi_conn
;
703 hdr_opcode
= cqe
->cqe_common
.iscsi_hdr
.common
.hdr_first_byte
;
704 iscsi_conn
= conn
->cls_conn
->dd_data
;
706 switch (hdr_opcode
) {
707 case ISCSI_OPCODE_SCSI_RESPONSE
:
708 case ISCSI_OPCODE_DATA_IN
:
709 qedi_scsi_completion(qedi
, cqe
, task
, iscsi_conn
);
711 case ISCSI_OPCODE_LOGIN_RESPONSE
:
712 qedi_process_login_resp(qedi
, cqe
, task
, conn
);
714 case ISCSI_OPCODE_TMF_RESPONSE
:
715 qedi_process_tmf_resp(qedi
, cqe
, task
, conn
);
717 case ISCSI_OPCODE_TEXT_RESPONSE
:
718 qedi_process_text_resp(qedi
, cqe
, task
, conn
);
720 case ISCSI_OPCODE_LOGOUT_RESPONSE
:
721 qedi_process_logout_resp(qedi
, cqe
, task
, conn
);
723 case ISCSI_OPCODE_NOP_IN
:
724 qedi_process_nopin_mesg(qedi
, cqe
, task
, conn
, que_idx
);
727 QEDI_ERR(&qedi
->dbg_ctx
, "unknown opcode\n");
731 static void qedi_process_nopin_local_cmpl(struct qedi_ctx
*qedi
,
732 struct iscsi_cqe_solicited
*cqe
,
733 struct iscsi_task
*task
,
734 struct qedi_conn
*qedi_conn
)
736 struct iscsi_conn
*conn
= qedi_conn
->cls_conn
->dd_data
;
737 struct iscsi_session
*session
= conn
->session
;
738 struct qedi_cmd
*cmd
= task
->dd_data
;
740 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_UNSOL
,
741 "itid=0x%x, cmd task id=0x%x\n",
742 cqe
->itid
, cmd
->task_id
);
744 cmd
->state
= RESPONSE_RECEIVED
;
745 qedi_clear_task_idx(qedi
, cmd
->task_id
);
747 spin_lock_bh(&session
->back_lock
);
748 __iscsi_put_task(task
);
749 spin_unlock_bh(&session
->back_lock
);
752 static void qedi_process_cmd_cleanup_resp(struct qedi_ctx
*qedi
,
753 struct iscsi_cqe_solicited
*cqe
,
754 struct iscsi_task
*task
,
755 struct iscsi_conn
*conn
)
757 struct qedi_work_map
*work
, *work_tmp
;
758 u32 proto_itt
= cqe
->itid
;
762 struct qedi_cmd
*qedi_cmd
= NULL
;
765 struct qedi_conn
*qedi_conn
;
766 struct qedi_cmd
*cmd_new
, *dbg_cmd
;
767 struct iscsi_task
*mtask
;
768 struct iscsi_tm
*tmf_hdr
= NULL
;
770 iscsi_cid
= cqe
->conn_id
;
771 qedi_conn
= qedi
->cid_que
.conn_cid_tbl
[iscsi_cid
];
773 /* Based on this itt get the corresponding qedi_cmd */
774 spin_lock_bh(&qedi_conn
->tmf_work_lock
);
775 list_for_each_entry_safe(work
, work_tmp
, &qedi_conn
->tmf_work_list
,
777 if (work
->rtid
== proto_itt
) {
778 /* We found the command */
779 qedi_cmd
= work
->qedi_cmd
;
780 if (!qedi_cmd
->list_tmf_work
) {
781 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_SCSI_TM
,
782 "TMF work not found, cqe->tid=0x%x, cid=0x%x\n",
783 proto_itt
, qedi_conn
->iscsi_conn_id
);
787 mtask
= qedi_cmd
->task
;
788 tmf_hdr
= (struct iscsi_tm
*)mtask
->hdr
;
791 list_del_init(&work
->list
);
793 qedi_cmd
->list_tmf_work
= NULL
;
796 spin_unlock_bh(&qedi_conn
->tmf_work_lock
);
799 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_SCSI_TM
,
800 "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
801 proto_itt
, tmf_hdr
->flags
, qedi_conn
->iscsi_conn_id
);
803 if ((tmf_hdr
->flags
& ISCSI_FLAG_TM_FUNC_MASK
) ==
804 ISCSI_TM_FUNC_ABORT_TASK
) {
805 spin_lock_bh(&conn
->session
->back_lock
);
807 protoitt
= build_itt(get_itt(tmf_hdr
->rtt
),
809 task
= iscsi_itt_to_task(conn
, protoitt
);
811 spin_unlock_bh(&conn
->session
->back_lock
);
814 QEDI_NOTICE(&qedi
->dbg_ctx
,
815 "IO task completed, tmf rtt=0x%x, cid=0x%x\n",
816 get_itt(tmf_hdr
->rtt
),
817 qedi_conn
->iscsi_conn_id
);
821 dbg_cmd
= task
->dd_data
;
823 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_SCSI_TM
,
824 "Abort tmf rtt=0x%x, i/o itt=0x%x, i/o tid=0x%x, cid=0x%x\n",
825 get_itt(tmf_hdr
->rtt
), get_itt(task
->itt
),
826 dbg_cmd
->task_id
, qedi_conn
->iscsi_conn_id
);
828 if (qedi_cmd
->state
== CLEANUP_WAIT_FAILED
)
829 qedi_cmd
->state
= CLEANUP_RECV
;
831 qedi_clear_task_idx(qedi_conn
->qedi
, rtid
);
833 spin_lock(&qedi_conn
->list_lock
);
834 list_del_init(&dbg_cmd
->io_cmd
);
835 qedi_conn
->active_cmd_count
--;
836 spin_unlock(&qedi_conn
->list_lock
);
837 qedi_cmd
->state
= CLEANUP_RECV
;
838 wake_up_interruptible(&qedi_conn
->wait_queue
);
840 } else if (qedi_conn
->cmd_cleanup_req
> 0) {
841 spin_lock_bh(&conn
->session
->back_lock
);
842 qedi_get_proto_itt(qedi
, cqe
->itid
, &ptmp_itt
);
843 protoitt
= build_itt(ptmp_itt
, conn
->session
->age
);
844 task
= iscsi_itt_to_task(conn
, protoitt
);
845 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_SCSI_TM
,
846 "cleanup io itid=0x%x, protoitt=0x%x, cmd_cleanup_cmpl=%d, cid=0x%x\n",
847 cqe
->itid
, protoitt
, qedi_conn
->cmd_cleanup_cmpl
,
848 qedi_conn
->iscsi_conn_id
);
850 spin_unlock_bh(&conn
->session
->back_lock
);
852 QEDI_NOTICE(&qedi
->dbg_ctx
,
853 "task is null, itid=0x%x, cid=0x%x\n",
854 cqe
->itid
, qedi_conn
->iscsi_conn_id
);
857 qedi_conn
->cmd_cleanup_cmpl
++;
858 wake_up(&qedi_conn
->wait_queue
);
859 cmd_new
= task
->dd_data
;
861 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_TID
,
862 "Freeing tid=0x%x for cid=0x%x\n",
863 cqe
->itid
, qedi_conn
->iscsi_conn_id
);
864 qedi_clear_task_idx(qedi_conn
->qedi
, cqe
->itid
);
867 qedi_get_proto_itt(qedi
, cqe
->itid
, &ptmp_itt
);
868 protoitt
= build_itt(ptmp_itt
, conn
->session
->age
);
869 task
= iscsi_itt_to_task(conn
, protoitt
);
870 QEDI_ERR(&qedi
->dbg_ctx
,
871 "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n",
872 protoitt
, cqe
->itid
, qedi_conn
->iscsi_conn_id
, task
);
876 void qedi_fp_process_cqes(struct qedi_work
*work
)
878 struct qedi_ctx
*qedi
= work
->qedi
;
879 union iscsi_cqe
*cqe
= &work
->cqe
;
880 struct iscsi_task
*task
= NULL
;
881 struct iscsi_nopout
*nopout_hdr
;
882 struct qedi_conn
*q_conn
;
883 struct iscsi_conn
*conn
;
884 struct qedi_cmd
*qedi_cmd
;
888 u16 que_idx
= work
->que_idx
;
891 comp_type
= cqe
->cqe_common
.cqe_type
;
892 hdr_opcode
= cqe
->cqe_common
.iscsi_hdr
.common
.hdr_first_byte
;
894 cqe
->cqe_common
.error_bitmap
.error_bits
.cqe_error_status_bits
;
896 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_CONN
,
897 "fw_cid=0x%x, cqe type=0x%x, opcode=0x%x\n",
898 cqe
->cqe_common
.conn_id
, comp_type
, hdr_opcode
);
900 if (comp_type
>= MAX_ISCSI_CQES_TYPE
) {
901 QEDI_WARN(&qedi
->dbg_ctx
, "Invalid CqE type\n");
905 iscsi_cid
= cqe
->cqe_common
.conn_id
;
906 q_conn
= qedi
->cid_que
.conn_cid_tbl
[iscsi_cid
];
908 QEDI_WARN(&qedi
->dbg_ctx
,
909 "Session no longer exists for cid=0x%x!!\n",
914 conn
= q_conn
->cls_conn
->dd_data
;
916 if (unlikely(cqe_err_bits
&&
917 GET_FIELD(cqe_err_bits
,
918 CQE_ERROR_BITMAP_DATA_DIGEST_ERR
))) {
919 iscsi_conn_failure(conn
, ISCSI_ERR_DATA_DGST
);
924 case ISCSI_CQE_TYPE_SOLICITED
:
925 case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE
:
926 qedi_cmd
= container_of(work
, struct qedi_cmd
, cqe_work
);
927 task
= qedi_cmd
->task
;
929 QEDI_WARN(&qedi
->dbg_ctx
, "task is NULL\n");
933 /* Process NOPIN local completion */
934 nopout_hdr
= (struct iscsi_nopout
*)task
->hdr
;
935 if ((nopout_hdr
->itt
== RESERVED_ITT
) &&
936 (cqe
->cqe_solicited
.itid
!= (u16
)RESERVED_ITT
)) {
937 qedi_process_nopin_local_cmpl(qedi
, &cqe
->cqe_solicited
,
940 cqe
->cqe_solicited
.itid
=
941 qedi_get_itt(cqe
->cqe_solicited
);
942 /* Process other solicited responses */
943 qedi_mtask_completion(qedi
, cqe
, task
, q_conn
, que_idx
);
946 case ISCSI_CQE_TYPE_UNSOLICITED
:
947 switch (hdr_opcode
) {
948 case ISCSI_OPCODE_NOP_IN
:
949 qedi_process_nopin_mesg(qedi
, cqe
, task
, q_conn
,
952 case ISCSI_OPCODE_ASYNC_MSG
:
953 qedi_process_async_mesg(qedi
, cqe
, task
, q_conn
,
956 case ISCSI_OPCODE_REJECT
:
957 qedi_process_reject_mesg(qedi
, cqe
, task
, q_conn
,
961 goto exit_fp_process
;
962 case ISCSI_CQE_TYPE_DUMMY
:
963 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_SCSI_TM
, "Dummy CqE\n");
964 goto exit_fp_process
;
965 case ISCSI_CQE_TYPE_TASK_CLEANUP
:
966 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_SCSI_TM
, "CleanUp CqE\n");
967 qedi_process_cmd_cleanup_resp(qedi
, &cqe
->cqe_solicited
, task
,
969 goto exit_fp_process
;
971 QEDI_ERR(&qedi
->dbg_ctx
, "Error cqe.\n");
979 static void qedi_ring_doorbell(struct qedi_conn
*qedi_conn
)
981 struct iscsi_db_data dbell
= { 0 };
985 dbell
.params
|= DB_DEST_XCM
<< ISCSI_DB_DATA_DEST_SHIFT
;
986 dbell
.params
|= DB_AGG_CMD_SET
<< ISCSI_DB_DATA_AGG_CMD_SHIFT
;
988 DQ_XCM_ISCSI_SQ_PROD_CMD
<< ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT
;
990 dbell
.sq_prod
= qedi_conn
->ep
->fw_sq_prod_idx
;
991 writel(*(u32
*)&dbell
, qedi_conn
->ep
->p_doorbell
);
993 /* Make sure fw write idx is coherent, and include both memory barriers
994 * as a failsafe as for some architectures the call is the same but on
995 * others they are two different assembly operations.
999 QEDI_INFO(&qedi_conn
->qedi
->dbg_ctx
, QEDI_LOG_MP_REQ
,
1000 "prod_idx=0x%x, fw_prod_idx=0x%x, cid=0x%x\n",
1001 qedi_conn
->ep
->sq_prod_idx
, qedi_conn
->ep
->fw_sq_prod_idx
,
1002 qedi_conn
->iscsi_conn_id
);
1005 static u16
qedi_get_wqe_idx(struct qedi_conn
*qedi_conn
)
1007 struct qedi_endpoint
*ep
;
1011 rval
= ep
->sq_prod_idx
;
1013 /* Increament SQ index */
1015 ep
->fw_sq_prod_idx
++;
1016 if (ep
->sq_prod_idx
== QEDI_SQ_SIZE
)
1017 ep
->sq_prod_idx
= 0;
1022 int qedi_send_iscsi_login(struct qedi_conn
*qedi_conn
,
1023 struct iscsi_task
*task
)
1025 struct iscsi_login_req_hdr login_req_pdu_header
;
1026 struct scsi_sgl_task_params tx_sgl_task_params
;
1027 struct scsi_sgl_task_params rx_sgl_task_params
;
1028 struct iscsi_task_params task_params
;
1029 struct iscsi_task_context
*fw_task_ctx
;
1030 struct qedi_ctx
*qedi
= qedi_conn
->qedi
;
1031 struct iscsi_login_req
*login_hdr
;
1032 struct scsi_sge
*req_sge
= NULL
;
1033 struct scsi_sge
*resp_sge
= NULL
;
1034 struct qedi_cmd
*qedi_cmd
;
1035 struct qedi_endpoint
*ep
;
1040 req_sge
= (struct scsi_sge
*)qedi_conn
->gen_pdu
.req_bd_tbl
;
1041 resp_sge
= (struct scsi_sge
*)qedi_conn
->gen_pdu
.resp_bd_tbl
;
1042 qedi_cmd
= (struct qedi_cmd
*)task
->dd_data
;
1044 login_hdr
= (struct iscsi_login_req
*)task
->hdr
;
1046 tid
= qedi_get_task_idx(qedi
);
1051 (struct iscsi_task_context
*)qedi_get_task_mem(&qedi
->tasks
, tid
);
1052 memset(fw_task_ctx
, 0, sizeof(struct iscsi_task_context
));
1054 qedi_cmd
->task_id
= tid
;
1056 memset(&task_params
, 0, sizeof(task_params
));
1057 memset(&login_req_pdu_header
, 0, sizeof(login_req_pdu_header
));
1058 memset(&tx_sgl_task_params
, 0, sizeof(tx_sgl_task_params
));
1059 memset(&rx_sgl_task_params
, 0, sizeof(rx_sgl_task_params
));
1060 /* Update header info */
1061 login_req_pdu_header
.opcode
= login_hdr
->opcode
;
1062 login_req_pdu_header
.version_min
= login_hdr
->min_version
;
1063 login_req_pdu_header
.version_max
= login_hdr
->max_version
;
1064 login_req_pdu_header
.flags_attr
= login_hdr
->flags
;
1065 login_req_pdu_header
.isid_tabc
= swab32p((u32
*)login_hdr
->isid
);
1066 login_req_pdu_header
.isid_d
= swab16p((u16
*)&login_hdr
->isid
[4]);
1068 login_req_pdu_header
.tsih
= login_hdr
->tsih
;
1069 login_req_pdu_header
.hdr_second_dword
= ntoh24(login_hdr
->dlength
);
1071 qedi_update_itt_map(qedi
, tid
, task
->itt
, qedi_cmd
);
1072 login_req_pdu_header
.itt
= qedi_set_itt(tid
, get_itt(task
->itt
));
1073 login_req_pdu_header
.cid
= qedi_conn
->iscsi_conn_id
;
1074 login_req_pdu_header
.cmd_sn
= be32_to_cpu(login_hdr
->cmdsn
);
1075 login_req_pdu_header
.exp_stat_sn
= be32_to_cpu(login_hdr
->exp_statsn
);
1076 login_req_pdu_header
.exp_stat_sn
= 0;
1078 /* Fill tx AHS and rx buffer */
1079 tx_sgl_task_params
.sgl
=
1080 (struct scsi_sge
*)qedi_conn
->gen_pdu
.req_bd_tbl
;
1081 tx_sgl_task_params
.sgl_phys_addr
.lo
=
1082 (u32
)(qedi_conn
->gen_pdu
.req_dma_addr
);
1083 tx_sgl_task_params
.sgl_phys_addr
.hi
=
1084 (u32
)((u64
)qedi_conn
->gen_pdu
.req_dma_addr
>> 32);
1085 tx_sgl_task_params
.total_buffer_size
= ntoh24(login_hdr
->dlength
);
1086 tx_sgl_task_params
.num_sges
= 1;
1088 rx_sgl_task_params
.sgl
=
1089 (struct scsi_sge
*)qedi_conn
->gen_pdu
.resp_bd_tbl
;
1090 rx_sgl_task_params
.sgl_phys_addr
.lo
=
1091 (u32
)(qedi_conn
->gen_pdu
.resp_dma_addr
);
1092 rx_sgl_task_params
.sgl_phys_addr
.hi
=
1093 (u32
)((u64
)qedi_conn
->gen_pdu
.resp_dma_addr
>> 32);
1094 rx_sgl_task_params
.total_buffer_size
= resp_sge
->sge_len
;
1095 rx_sgl_task_params
.num_sges
= 1;
1097 /* Fill fw input params */
1098 task_params
.context
= fw_task_ctx
;
1099 task_params
.conn_icid
= (u16
)qedi_conn
->iscsi_conn_id
;
1100 task_params
.itid
= tid
;
1101 task_params
.cq_rss_number
= 0;
1102 task_params
.tx_io_size
= ntoh24(login_hdr
->dlength
);
1103 task_params
.rx_io_size
= resp_sge
->sge_len
;
1105 sq_idx
= qedi_get_wqe_idx(qedi_conn
);
1106 task_params
.sqe
= &ep
->sq
[sq_idx
];
1108 memset(task_params
.sqe
, 0, sizeof(struct iscsi_wqe
));
1109 rval
= init_initiator_login_request_task(&task_params
,
1110 &login_req_pdu_header
,
1111 &tx_sgl_task_params
,
1112 &rx_sgl_task_params
);
1116 spin_lock(&qedi_conn
->list_lock
);
1117 list_add_tail(&qedi_cmd
->io_cmd
, &qedi_conn
->active_cmd_list
);
1118 qedi_cmd
->io_cmd_in_list
= true;
1119 qedi_conn
->active_cmd_count
++;
1120 spin_unlock(&qedi_conn
->list_lock
);
1122 qedi_ring_doorbell(qedi_conn
);
1126 int qedi_send_iscsi_logout(struct qedi_conn
*qedi_conn
,
1127 struct iscsi_task
*task
)
1129 struct iscsi_logout_req_hdr logout_pdu_header
;
1130 struct scsi_sgl_task_params tx_sgl_task_params
;
1131 struct scsi_sgl_task_params rx_sgl_task_params
;
1132 struct iscsi_task_params task_params
;
1133 struct iscsi_task_context
*fw_task_ctx
;
1134 struct iscsi_logout
*logout_hdr
= NULL
;
1135 struct qedi_ctx
*qedi
= qedi_conn
->qedi
;
1136 struct qedi_cmd
*qedi_cmd
;
1137 struct qedi_endpoint
*ep
;
1142 qedi_cmd
= (struct qedi_cmd
*)task
->dd_data
;
1143 logout_hdr
= (struct iscsi_logout
*)task
->hdr
;
1146 tid
= qedi_get_task_idx(qedi
);
1151 (struct iscsi_task_context
*)qedi_get_task_mem(&qedi
->tasks
, tid
);
1152 memset(fw_task_ctx
, 0, sizeof(struct iscsi_task_context
));
1154 qedi_cmd
->task_id
= tid
;
1156 memset(&task_params
, 0, sizeof(task_params
));
1157 memset(&logout_pdu_header
, 0, sizeof(logout_pdu_header
));
1158 memset(&tx_sgl_task_params
, 0, sizeof(tx_sgl_task_params
));
1159 memset(&rx_sgl_task_params
, 0, sizeof(rx_sgl_task_params
));
1161 /* Update header info */
1162 logout_pdu_header
.opcode
= logout_hdr
->opcode
;
1163 logout_pdu_header
.reason_code
= 0x80 | logout_hdr
->flags
;
1164 qedi_update_itt_map(qedi
, tid
, task
->itt
, qedi_cmd
);
1165 logout_pdu_header
.itt
= qedi_set_itt(tid
, get_itt(task
->itt
));
1166 logout_pdu_header
.exp_stat_sn
= be32_to_cpu(logout_hdr
->exp_statsn
);
1167 logout_pdu_header
.cmd_sn
= be32_to_cpu(logout_hdr
->cmdsn
);
1168 logout_pdu_header
.cid
= qedi_conn
->iscsi_conn_id
;
1170 /* Fill fw input params */
1171 task_params
.context
= fw_task_ctx
;
1172 task_params
.conn_icid
= (u16
)qedi_conn
->iscsi_conn_id
;
1173 task_params
.itid
= tid
;
1174 task_params
.cq_rss_number
= 0;
1175 task_params
.tx_io_size
= 0;
1176 task_params
.rx_io_size
= 0;
1178 sq_idx
= qedi_get_wqe_idx(qedi_conn
);
1179 task_params
.sqe
= &ep
->sq
[sq_idx
];
1180 memset(task_params
.sqe
, 0, sizeof(struct iscsi_wqe
));
1182 rval
= init_initiator_logout_request_task(&task_params
,
1188 spin_lock(&qedi_conn
->list_lock
);
1189 list_add_tail(&qedi_cmd
->io_cmd
, &qedi_conn
->active_cmd_list
);
1190 qedi_cmd
->io_cmd_in_list
= true;
1191 qedi_conn
->active_cmd_count
++;
1192 spin_unlock(&qedi_conn
->list_lock
);
1194 qedi_ring_doorbell(qedi_conn
);
1198 int qedi_cleanup_all_io(struct qedi_ctx
*qedi
, struct qedi_conn
*qedi_conn
,
1199 struct iscsi_task
*task
, bool in_recovery
)
1202 struct iscsi_task
*ctask
;
1203 struct qedi_cmd
*cmd
, *cmd_tmp
;
1204 struct iscsi_tm
*tmf_hdr
;
1205 unsigned int lun
= 0;
1206 bool lun_reset
= false;
1207 struct iscsi_conn
*conn
= qedi_conn
->cls_conn
->dd_data
;
1208 struct iscsi_session
*session
= conn
->session
;
1210 /* From recovery, task is NULL or from tmf resp valid task */
1212 tmf_hdr
= (struct iscsi_tm
*)task
->hdr
;
1214 if ((tmf_hdr
->flags
& ISCSI_FLAG_TM_FUNC_MASK
) ==
1215 ISCSI_TM_FUNC_LOGICAL_UNIT_RESET
) {
1217 lun
= scsilun_to_int(&tmf_hdr
->lun
);
1221 qedi_conn
->cmd_cleanup_req
= 0;
1222 qedi_conn
->cmd_cleanup_cmpl
= 0;
1224 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_SCSI_TM
,
1225 "active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n",
1226 qedi_conn
->active_cmd_count
, qedi_conn
->iscsi_conn_id
,
1227 in_recovery
, lun_reset
);
1230 spin_lock_bh(&session
->back_lock
);
1232 spin_lock(&qedi_conn
->list_lock
);
1234 list_for_each_entry_safe(cmd
, cmd_tmp
, &qedi_conn
->active_cmd_list
,
1241 if (cmd
->scsi_cmd
&& cmd
->scsi_cmd
->device
) {
1242 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_SCSI_TM
,
1243 "tid=0x%x itt=0x%x scsi_cmd_ptr=%p device=%p task_state=%d cmd_state=0%x cid=0x%x\n",
1244 cmd
->task_id
, get_itt(ctask
->itt
),
1245 cmd
->scsi_cmd
, cmd
->scsi_cmd
->device
,
1246 ctask
->state
, cmd
->state
,
1247 qedi_conn
->iscsi_conn_id
);
1248 if (cmd
->scsi_cmd
->device
->lun
!= lun
)
1252 qedi_conn
->cmd_cleanup_req
++;
1253 qedi_iscsi_cleanup_task(ctask
, true);
1255 list_del_init(&cmd
->io_cmd
);
1256 qedi_conn
->active_cmd_count
--;
1257 QEDI_WARN(&qedi
->dbg_ctx
,
1258 "Deleted active cmd list node io_cmd=%p, cid=0x%x\n",
1259 &cmd
->io_cmd
, qedi_conn
->iscsi_conn_id
);
1262 spin_unlock(&qedi_conn
->list_lock
);
1265 spin_unlock_bh(&session
->back_lock
);
1267 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_SCSI_TM
,
1268 "cmd_cleanup_req=%d, cid=0x%x\n",
1269 qedi_conn
->cmd_cleanup_req
,
1270 qedi_conn
->iscsi_conn_id
);
1272 rval
= wait_event_interruptible_timeout(qedi_conn
->wait_queue
,
1273 ((qedi_conn
->cmd_cleanup_req
==
1274 qedi_conn
->cmd_cleanup_cmpl
) ||
1278 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_SCSI_TM
,
1279 "i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
1280 qedi_conn
->cmd_cleanup_req
,
1281 qedi_conn
->cmd_cleanup_cmpl
,
1282 qedi_conn
->iscsi_conn_id
);
1287 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_SCSI_TM
,
1288 "i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
1289 qedi_conn
->cmd_cleanup_req
,
1290 qedi_conn
->cmd_cleanup_cmpl
,
1291 qedi_conn
->iscsi_conn_id
);
1293 iscsi_host_for_each_session(qedi
->shost
,
1294 qedi_mark_device_missing
);
1295 qedi_ops
->common
->drain(qedi
->cdev
);
1297 /* Enable IOs for all other sessions except current.*/
1298 if (!wait_event_interruptible_timeout(qedi_conn
->wait_queue
,
1299 (qedi_conn
->cmd_cleanup_req
==
1300 qedi_conn
->cmd_cleanup_cmpl
),
1302 iscsi_host_for_each_session(qedi
->shost
,
1303 qedi_mark_device_available
);
1307 iscsi_host_for_each_session(qedi
->shost
,
1308 qedi_mark_device_available
);
1313 void qedi_clearsq(struct qedi_ctx
*qedi
, struct qedi_conn
*qedi_conn
,
1314 struct iscsi_task
*task
)
1316 struct qedi_endpoint
*qedi_ep
;
1319 qedi_ep
= qedi_conn
->ep
;
1320 qedi_conn
->cmd_cleanup_req
= 0;
1321 qedi_conn
->cmd_cleanup_cmpl
= 0;
1324 QEDI_WARN(&qedi
->dbg_ctx
,
1325 "Cannot proceed, ep already disconnected, cid=0x%x\n",
1326 qedi_conn
->iscsi_conn_id
);
1330 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_INFO
,
1331 "Clearing SQ for cid=0x%x, conn=%p, ep=%p\n",
1332 qedi_conn
->iscsi_conn_id
, qedi_conn
, qedi_ep
);
1334 qedi_ops
->clear_sq(qedi
->cdev
, qedi_ep
->handle
);
1336 rval
= qedi_cleanup_all_io(qedi
, qedi_conn
, task
, true);
1338 QEDI_ERR(&qedi
->dbg_ctx
,
1339 "fatal error, need hard reset, cid=0x%x\n",
1340 qedi_conn
->iscsi_conn_id
);
1345 static int qedi_wait_for_cleanup_request(struct qedi_ctx
*qedi
,
1346 struct qedi_conn
*qedi_conn
,
1347 struct iscsi_task
*task
,
1348 struct qedi_cmd
*qedi_cmd
,
1349 struct qedi_work_map
*list_work
)
1351 struct qedi_cmd
*cmd
= (struct qedi_cmd
*)task
->dd_data
;
1354 wait
= wait_event_interruptible_timeout(qedi_conn
->wait_queue
,
1355 ((qedi_cmd
->state
==
1357 ((qedi_cmd
->type
== TYPEIO
) &&
1359 RESPONSE_RECEIVED
))),
1362 qedi_cmd
->state
= CLEANUP_WAIT_FAILED
;
1364 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_SCSI_TM
,
1365 "Cleanup timedout tid=0x%x, issue connection recovery, cid=0x%x\n",
1366 cmd
->task_id
, qedi_conn
->iscsi_conn_id
);
1373 static void qedi_tmf_work(struct work_struct
*work
)
1375 struct qedi_cmd
*qedi_cmd
=
1376 container_of(work
, struct qedi_cmd
, tmf_work
);
1377 struct qedi_conn
*qedi_conn
= qedi_cmd
->conn
;
1378 struct qedi_ctx
*qedi
= qedi_conn
->qedi
;
1379 struct iscsi_conn
*conn
= qedi_conn
->cls_conn
->dd_data
;
1380 struct iscsi_cls_session
*cls_sess
;
1381 struct qedi_work_map
*list_work
= NULL
;
1382 struct iscsi_task
*mtask
;
1383 struct qedi_cmd
*cmd
;
1384 struct iscsi_task
*ctask
;
1385 struct iscsi_tm
*tmf_hdr
;
1389 mtask
= qedi_cmd
->task
;
1390 tmf_hdr
= (struct iscsi_tm
*)mtask
->hdr
;
1391 cls_sess
= iscsi_conn_to_session(qedi_conn
->cls_conn
);
1392 set_bit(QEDI_CONN_FW_CLEANUP
, &qedi_conn
->flags
);
1394 ctask
= iscsi_itt_to_task(conn
, tmf_hdr
->rtt
);
1395 if (!ctask
|| !ctask
->sc
) {
1396 QEDI_ERR(&qedi
->dbg_ctx
, "Task already completed\n");
1400 cmd
= (struct qedi_cmd
*)ctask
->dd_data
;
1401 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_INFO
,
1402 "Abort tmf rtt=0x%x, cmd itt=0x%x, cmd tid=0x%x, cid=0x%x\n",
1403 get_itt(tmf_hdr
->rtt
), get_itt(ctask
->itt
), cmd
->task_id
,
1404 qedi_conn
->iscsi_conn_id
);
1406 if (qedi_do_not_recover
) {
1407 QEDI_ERR(&qedi
->dbg_ctx
, "DONT SEND CLEANUP/ABORT %d\n",
1408 qedi_do_not_recover
);
1412 list_work
= kzalloc(sizeof(*list_work
), GFP_ATOMIC
);
1414 QEDI_ERR(&qedi
->dbg_ctx
, "Memory alloction failed\n");
1418 qedi_cmd
->type
= TYPEIO
;
1419 list_work
->qedi_cmd
= qedi_cmd
;
1420 list_work
->rtid
= cmd
->task_id
;
1421 list_work
->state
= QEDI_WORK_SCHEDULED
;
1422 qedi_cmd
->list_tmf_work
= list_work
;
1424 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_SCSI_TM
,
1425 "Queue tmf work=%p, list node=%p, cid=0x%x, tmf flags=0x%x\n",
1426 list_work
->ptr_tmf_work
, list_work
, qedi_conn
->iscsi_conn_id
,
1429 spin_lock_bh(&qedi_conn
->tmf_work_lock
);
1430 list_add_tail(&list_work
->list
, &qedi_conn
->tmf_work_list
);
1431 spin_unlock_bh(&qedi_conn
->tmf_work_lock
);
1433 qedi_iscsi_cleanup_task(ctask
, false);
1435 rval
= qedi_wait_for_cleanup_request(qedi
, qedi_conn
, ctask
, qedi_cmd
,
1438 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_INFO
,
1439 "FW cleanup got escalated, cid=0x%x\n",
1440 qedi_conn
->iscsi_conn_id
);
1444 tid
= qedi_get_task_idx(qedi
);
1446 QEDI_ERR(&qedi
->dbg_ctx
, "Invalid tid, cid=0x%x\n",
1447 qedi_conn
->iscsi_conn_id
);
1451 qedi_cmd
->task_id
= tid
;
1452 qedi_send_iscsi_tmf(qedi_conn
, qedi_cmd
->task
);
1455 clear_bit(QEDI_CONN_FW_CLEANUP
, &qedi_conn
->flags
);
1459 spin_lock_bh(&qedi_conn
->tmf_work_lock
);
1460 if (!qedi_cmd
->list_tmf_work
) {
1461 list_del_init(&list_work
->list
);
1462 qedi_cmd
->list_tmf_work
= NULL
;
1465 spin_unlock_bh(&qedi_conn
->tmf_work_lock
);
1467 spin_lock(&qedi_conn
->list_lock
);
1468 list_del_init(&cmd
->io_cmd
);
1469 qedi_conn
->active_cmd_count
--;
1470 spin_unlock(&qedi_conn
->list_lock
);
1472 clear_bit(QEDI_CONN_FW_CLEANUP
, &qedi_conn
->flags
);
1475 static int qedi_send_iscsi_tmf(struct qedi_conn
*qedi_conn
,
1476 struct iscsi_task
*mtask
)
1478 struct iscsi_tmf_request_hdr tmf_pdu_header
;
1479 struct iscsi_task_params task_params
;
1480 struct qedi_ctx
*qedi
= qedi_conn
->qedi
;
1481 struct iscsi_task_context
*fw_task_ctx
;
1482 struct iscsi_conn
*conn
= qedi_conn
->cls_conn
->dd_data
;
1483 struct iscsi_task
*ctask
;
1484 struct iscsi_tm
*tmf_hdr
;
1485 struct qedi_cmd
*qedi_cmd
;
1486 struct qedi_cmd
*cmd
;
1487 struct qedi_endpoint
*ep
;
1493 tmf_hdr
= (struct iscsi_tm
*)mtask
->hdr
;
1494 qedi_cmd
= (struct qedi_cmd
*)mtask
->dd_data
;
1499 tid
= qedi_get_task_idx(qedi
);
1504 (struct iscsi_task_context
*)qedi_get_task_mem(&qedi
->tasks
, tid
);
1505 memset(fw_task_ctx
, 0, sizeof(struct iscsi_task_context
));
1507 qedi_cmd
->task_id
= tid
;
1509 memset(&task_params
, 0, sizeof(task_params
));
1510 memset(&tmf_pdu_header
, 0, sizeof(tmf_pdu_header
));
1512 /* Update header info */
1513 qedi_update_itt_map(qedi
, tid
, mtask
->itt
, qedi_cmd
);
1514 tmf_pdu_header
.itt
= qedi_set_itt(tid
, get_itt(mtask
->itt
));
1515 tmf_pdu_header
.cmd_sn
= be32_to_cpu(tmf_hdr
->cmdsn
);
1517 memcpy(scsi_lun
, &tmf_hdr
->lun
, sizeof(struct scsi_lun
));
1518 tmf_pdu_header
.lun
.lo
= be32_to_cpu(scsi_lun
[0]);
1519 tmf_pdu_header
.lun
.hi
= be32_to_cpu(scsi_lun
[1]);
1521 if ((tmf_hdr
->flags
& ISCSI_FLAG_TM_FUNC_MASK
) ==
1522 ISCSI_TM_FUNC_ABORT_TASK
) {
1523 ctask
= iscsi_itt_to_task(conn
, tmf_hdr
->rtt
);
1524 if (!ctask
|| !ctask
->sc
) {
1525 QEDI_ERR(&qedi
->dbg_ctx
,
1526 "Could not get reference task\n");
1529 cmd
= (struct qedi_cmd
*)ctask
->dd_data
;
1530 tmf_pdu_header
.rtt
=
1531 qedi_set_itt(cmd
->task_id
,
1532 get_itt(tmf_hdr
->rtt
));
1534 tmf_pdu_header
.rtt
= ISCSI_RESERVED_TAG
;
1537 tmf_pdu_header
.opcode
= tmf_hdr
->opcode
;
1538 tmf_pdu_header
.function
= tmf_hdr
->flags
;
1539 tmf_pdu_header
.hdr_second_dword
= ntoh24(tmf_hdr
->dlength
);
1540 tmf_pdu_header
.ref_cmd_sn
= be32_to_cpu(tmf_hdr
->refcmdsn
);
1542 /* Fill fw input params */
1543 task_params
.context
= fw_task_ctx
;
1544 task_params
.conn_icid
= (u16
)qedi_conn
->iscsi_conn_id
;
1545 task_params
.itid
= tid
;
1546 task_params
.cq_rss_number
= 0;
1547 task_params
.tx_io_size
= 0;
1548 task_params
.rx_io_size
= 0;
1550 sq_idx
= qedi_get_wqe_idx(qedi_conn
);
1551 task_params
.sqe
= &ep
->sq
[sq_idx
];
1553 memset(task_params
.sqe
, 0, sizeof(struct iscsi_wqe
));
1554 rval
= init_initiator_tmf_request_task(&task_params
,
1559 spin_lock(&qedi_conn
->list_lock
);
1560 list_add_tail(&qedi_cmd
->io_cmd
, &qedi_conn
->active_cmd_list
);
1561 qedi_cmd
->io_cmd_in_list
= true;
1562 qedi_conn
->active_cmd_count
++;
1563 spin_unlock(&qedi_conn
->list_lock
);
1565 qedi_ring_doorbell(qedi_conn
);
1569 int qedi_iscsi_abort_work(struct qedi_conn
*qedi_conn
,
1570 struct iscsi_task
*mtask
)
1572 struct qedi_ctx
*qedi
= qedi_conn
->qedi
;
1573 struct iscsi_tm
*tmf_hdr
;
1574 struct qedi_cmd
*qedi_cmd
= (struct qedi_cmd
*)mtask
->dd_data
;
1577 tmf_hdr
= (struct iscsi_tm
*)mtask
->hdr
;
1578 qedi_cmd
->task
= mtask
;
1580 /* If abort task then schedule the work and return */
1581 if ((tmf_hdr
->flags
& ISCSI_FLAG_TM_FUNC_MASK
) ==
1582 ISCSI_TM_FUNC_ABORT_TASK
) {
1583 qedi_cmd
->state
= CLEANUP_WAIT
;
1584 INIT_WORK(&qedi_cmd
->tmf_work
, qedi_tmf_work
);
1585 queue_work(qedi
->tmf_thread
, &qedi_cmd
->tmf_work
);
1587 } else if (((tmf_hdr
->flags
& ISCSI_FLAG_TM_FUNC_MASK
) ==
1588 ISCSI_TM_FUNC_LOGICAL_UNIT_RESET
) ||
1589 ((tmf_hdr
->flags
& ISCSI_FLAG_TM_FUNC_MASK
) ==
1590 ISCSI_TM_FUNC_TARGET_WARM_RESET
) ||
1591 ((tmf_hdr
->flags
& ISCSI_FLAG_TM_FUNC_MASK
) ==
1592 ISCSI_TM_FUNC_TARGET_COLD_RESET
)) {
1593 tid
= qedi_get_task_idx(qedi
);
1595 QEDI_ERR(&qedi
->dbg_ctx
, "Invalid tid, cid=0x%x\n",
1596 qedi_conn
->iscsi_conn_id
);
1599 qedi_cmd
->task_id
= tid
;
1601 qedi_send_iscsi_tmf(qedi_conn
, qedi_cmd
->task
);
1604 QEDI_ERR(&qedi
->dbg_ctx
, "Invalid tmf, cid=0x%x\n",
1605 qedi_conn
->iscsi_conn_id
);
1612 int qedi_send_iscsi_text(struct qedi_conn
*qedi_conn
,
1613 struct iscsi_task
*task
)
1615 struct iscsi_text_request_hdr text_request_pdu_header
;
1616 struct scsi_sgl_task_params tx_sgl_task_params
;
1617 struct scsi_sgl_task_params rx_sgl_task_params
;
1618 struct iscsi_task_params task_params
;
1619 struct iscsi_task_context
*fw_task_ctx
;
1620 struct qedi_ctx
*qedi
= qedi_conn
->qedi
;
1621 struct iscsi_text
*text_hdr
;
1622 struct scsi_sge
*req_sge
= NULL
;
1623 struct scsi_sge
*resp_sge
= NULL
;
1624 struct qedi_cmd
*qedi_cmd
;
1625 struct qedi_endpoint
*ep
;
1630 req_sge
= (struct scsi_sge
*)qedi_conn
->gen_pdu
.req_bd_tbl
;
1631 resp_sge
= (struct scsi_sge
*)qedi_conn
->gen_pdu
.resp_bd_tbl
;
1632 qedi_cmd
= (struct qedi_cmd
*)task
->dd_data
;
1633 text_hdr
= (struct iscsi_text
*)task
->hdr
;
1636 tid
= qedi_get_task_idx(qedi
);
1641 (struct iscsi_task_context
*)qedi_get_task_mem(&qedi
->tasks
, tid
);
1642 memset(fw_task_ctx
, 0, sizeof(struct iscsi_task_context
));
1644 qedi_cmd
->task_id
= tid
;
1646 memset(&task_params
, 0, sizeof(task_params
));
1647 memset(&text_request_pdu_header
, 0, sizeof(text_request_pdu_header
));
1648 memset(&tx_sgl_task_params
, 0, sizeof(tx_sgl_task_params
));
1649 memset(&rx_sgl_task_params
, 0, sizeof(rx_sgl_task_params
));
1651 /* Update header info */
1652 text_request_pdu_header
.opcode
= text_hdr
->opcode
;
1653 text_request_pdu_header
.flags_attr
= text_hdr
->flags
;
1655 qedi_update_itt_map(qedi
, tid
, task
->itt
, qedi_cmd
);
1656 text_request_pdu_header
.itt
= qedi_set_itt(tid
, get_itt(task
->itt
));
1657 text_request_pdu_header
.ttt
= text_hdr
->ttt
;
1658 text_request_pdu_header
.cmd_sn
= be32_to_cpu(text_hdr
->cmdsn
);
1659 text_request_pdu_header
.exp_stat_sn
= be32_to_cpu(text_hdr
->exp_statsn
);
1660 text_request_pdu_header
.hdr_second_dword
= ntoh24(text_hdr
->dlength
);
1662 /* Fill tx AHS and rx buffer */
1663 tx_sgl_task_params
.sgl
=
1664 (struct scsi_sge
*)qedi_conn
->gen_pdu
.req_bd_tbl
;
1665 tx_sgl_task_params
.sgl_phys_addr
.lo
=
1666 (u32
)(qedi_conn
->gen_pdu
.req_dma_addr
);
1667 tx_sgl_task_params
.sgl_phys_addr
.hi
=
1668 (u32
)((u64
)qedi_conn
->gen_pdu
.req_dma_addr
>> 32);
1669 tx_sgl_task_params
.total_buffer_size
= req_sge
->sge_len
;
1670 tx_sgl_task_params
.num_sges
= 1;
1672 rx_sgl_task_params
.sgl
=
1673 (struct scsi_sge
*)qedi_conn
->gen_pdu
.resp_bd_tbl
;
1674 rx_sgl_task_params
.sgl_phys_addr
.lo
=
1675 (u32
)(qedi_conn
->gen_pdu
.resp_dma_addr
);
1676 rx_sgl_task_params
.sgl_phys_addr
.hi
=
1677 (u32
)((u64
)qedi_conn
->gen_pdu
.resp_dma_addr
>> 32);
1678 rx_sgl_task_params
.total_buffer_size
= resp_sge
->sge_len
;
1679 rx_sgl_task_params
.num_sges
= 1;
1681 /* Fill fw input params */
1682 task_params
.context
= fw_task_ctx
;
1683 task_params
.conn_icid
= (u16
)qedi_conn
->iscsi_conn_id
;
1684 task_params
.itid
= tid
;
1685 task_params
.cq_rss_number
= 0;
1686 task_params
.tx_io_size
= ntoh24(text_hdr
->dlength
);
1687 task_params
.rx_io_size
= resp_sge
->sge_len
;
1689 sq_idx
= qedi_get_wqe_idx(qedi_conn
);
1690 task_params
.sqe
= &ep
->sq
[sq_idx
];
1692 memset(task_params
.sqe
, 0, sizeof(struct iscsi_wqe
));
1693 rval
= init_initiator_text_request_task(&task_params
,
1694 &text_request_pdu_header
,
1695 &tx_sgl_task_params
,
1696 &rx_sgl_task_params
);
1700 spin_lock(&qedi_conn
->list_lock
);
1701 list_add_tail(&qedi_cmd
->io_cmd
, &qedi_conn
->active_cmd_list
);
1702 qedi_cmd
->io_cmd_in_list
= true;
1703 qedi_conn
->active_cmd_count
++;
1704 spin_unlock(&qedi_conn
->list_lock
);
1706 qedi_ring_doorbell(qedi_conn
);
1710 int qedi_send_iscsi_nopout(struct qedi_conn
*qedi_conn
,
1711 struct iscsi_task
*task
,
1712 char *datap
, int data_len
, int unsol
)
1714 struct iscsi_nop_out_hdr nop_out_pdu_header
;
1715 struct scsi_sgl_task_params tx_sgl_task_params
;
1716 struct scsi_sgl_task_params rx_sgl_task_params
;
1717 struct iscsi_task_params task_params
;
1718 struct qedi_ctx
*qedi
= qedi_conn
->qedi
;
1719 struct iscsi_task_context
*fw_task_ctx
;
1720 struct iscsi_nopout
*nopout_hdr
;
1721 struct scsi_sge
*req_sge
= NULL
;
1722 struct scsi_sge
*resp_sge
= NULL
;
1723 struct qedi_cmd
*qedi_cmd
;
1724 struct qedi_endpoint
*ep
;
1730 req_sge
= (struct scsi_sge
*)qedi_conn
->gen_pdu
.req_bd_tbl
;
1731 resp_sge
= (struct scsi_sge
*)qedi_conn
->gen_pdu
.resp_bd_tbl
;
1732 qedi_cmd
= (struct qedi_cmd
*)task
->dd_data
;
1733 nopout_hdr
= (struct iscsi_nopout
*)task
->hdr
;
1736 tid
= qedi_get_task_idx(qedi
);
1741 (struct iscsi_task_context
*)qedi_get_task_mem(&qedi
->tasks
, tid
);
1742 memset(fw_task_ctx
, 0, sizeof(struct iscsi_task_context
));
1744 qedi_cmd
->task_id
= tid
;
1746 memset(&task_params
, 0, sizeof(task_params
));
1747 memset(&nop_out_pdu_header
, 0, sizeof(nop_out_pdu_header
));
1748 memset(&tx_sgl_task_params
, 0, sizeof(tx_sgl_task_params
));
1749 memset(&rx_sgl_task_params
, 0, sizeof(rx_sgl_task_params
));
1751 /* Update header info */
1752 nop_out_pdu_header
.opcode
= nopout_hdr
->opcode
;
1753 SET_FIELD(nop_out_pdu_header
.flags_attr
, ISCSI_NOP_OUT_HDR_CONST1
, 1);
1754 SET_FIELD(nop_out_pdu_header
.flags_attr
, ISCSI_NOP_OUT_HDR_RSRV
, 0);
1756 memcpy(scsi_lun
, &nopout_hdr
->lun
, sizeof(struct scsi_lun
));
1757 nop_out_pdu_header
.lun
.lo
= be32_to_cpu(scsi_lun
[0]);
1758 nop_out_pdu_header
.lun
.hi
= be32_to_cpu(scsi_lun
[1]);
1759 nop_out_pdu_header
.cmd_sn
= be32_to_cpu(nopout_hdr
->cmdsn
);
1760 nop_out_pdu_header
.exp_stat_sn
= be32_to_cpu(nopout_hdr
->exp_statsn
);
1762 qedi_update_itt_map(qedi
, tid
, task
->itt
, qedi_cmd
);
1764 if (nopout_hdr
->ttt
!= ISCSI_TTT_ALL_ONES
) {
1765 nop_out_pdu_header
.itt
= be32_to_cpu(nopout_hdr
->itt
);
1766 nop_out_pdu_header
.ttt
= be32_to_cpu(nopout_hdr
->ttt
);
1768 nop_out_pdu_header
.itt
= qedi_set_itt(tid
, get_itt(task
->itt
));
1769 nop_out_pdu_header
.ttt
= ISCSI_TTT_ALL_ONES
;
1771 spin_lock(&qedi_conn
->list_lock
);
1772 list_add_tail(&qedi_cmd
->io_cmd
, &qedi_conn
->active_cmd_list
);
1773 qedi_cmd
->io_cmd_in_list
= true;
1774 qedi_conn
->active_cmd_count
++;
1775 spin_unlock(&qedi_conn
->list_lock
);
1778 /* Fill tx AHS and rx buffer */
1780 tx_sgl_task_params
.sgl
=
1781 (struct scsi_sge
*)qedi_conn
->gen_pdu
.req_bd_tbl
;
1782 tx_sgl_task_params
.sgl_phys_addr
.lo
=
1783 (u32
)(qedi_conn
->gen_pdu
.req_dma_addr
);
1784 tx_sgl_task_params
.sgl_phys_addr
.hi
=
1785 (u32
)((u64
)qedi_conn
->gen_pdu
.req_dma_addr
>> 32);
1786 tx_sgl_task_params
.total_buffer_size
= data_len
;
1787 tx_sgl_task_params
.num_sges
= 1;
1789 rx_sgl_task_params
.sgl
=
1790 (struct scsi_sge
*)qedi_conn
->gen_pdu
.resp_bd_tbl
;
1791 rx_sgl_task_params
.sgl_phys_addr
.lo
=
1792 (u32
)(qedi_conn
->gen_pdu
.resp_dma_addr
);
1793 rx_sgl_task_params
.sgl_phys_addr
.hi
=
1794 (u32
)((u64
)qedi_conn
->gen_pdu
.resp_dma_addr
>> 32);
1795 rx_sgl_task_params
.total_buffer_size
= resp_sge
->sge_len
;
1796 rx_sgl_task_params
.num_sges
= 1;
1799 /* Fill fw input params */
1800 task_params
.context
= fw_task_ctx
;
1801 task_params
.conn_icid
= (u16
)qedi_conn
->iscsi_conn_id
;
1802 task_params
.itid
= tid
;
1803 task_params
.cq_rss_number
= 0;
1804 task_params
.tx_io_size
= data_len
;
1805 task_params
.rx_io_size
= resp_sge
->sge_len
;
1807 sq_idx
= qedi_get_wqe_idx(qedi_conn
);
1808 task_params
.sqe
= &ep
->sq
[sq_idx
];
1810 memset(task_params
.sqe
, 0, sizeof(struct iscsi_wqe
));
1811 rval
= init_initiator_nop_out_task(&task_params
,
1812 &nop_out_pdu_header
,
1813 &tx_sgl_task_params
,
1814 &rx_sgl_task_params
);
1818 qedi_ring_doorbell(qedi_conn
);
1822 static int qedi_split_bd(struct qedi_cmd
*cmd
, u64 addr
, int sg_len
,
1825 struct scsi_sge
*bd
= cmd
->io_tbl
.sge_tbl
;
1826 int frag_size
, sg_frags
;
1831 if (addr
% QEDI_PAGE_SIZE
)
1833 (QEDI_PAGE_SIZE
- (addr
% QEDI_PAGE_SIZE
));
1835 frag_size
= (sg_len
> QEDI_BD_SPLIT_SZ
) ? 0 :
1836 (sg_len
% QEDI_BD_SPLIT_SZ
);
1839 frag_size
= QEDI_BD_SPLIT_SZ
;
1841 bd
[bd_index
+ sg_frags
].sge_addr
.lo
= (addr
& 0xffffffff);
1842 bd
[bd_index
+ sg_frags
].sge_addr
.hi
= (addr
>> 32);
1843 bd
[bd_index
+ sg_frags
].sge_len
= (u16
)frag_size
;
1844 QEDI_INFO(&cmd
->conn
->qedi
->dbg_ctx
, QEDI_LOG_IO
,
1845 "split sge %d: addr=%llx, len=%x",
1846 (bd_index
+ sg_frags
), addr
, frag_size
);
1848 addr
+= (u64
)frag_size
;
1850 sg_len
-= frag_size
;
1855 static int qedi_map_scsi_sg(struct qedi_ctx
*qedi
, struct qedi_cmd
*cmd
)
1857 struct scsi_cmnd
*sc
= cmd
->scsi_cmd
;
1858 struct scsi_sge
*bd
= cmd
->io_tbl
.sge_tbl
;
1859 struct scatterlist
*sg
;
1868 WARN_ON(scsi_sg_count(sc
) > QEDI_ISCSI_MAX_BDS_PER_CMD
);
1870 sg_count
= dma_map_sg(&qedi
->pdev
->dev
, scsi_sglist(sc
),
1871 scsi_sg_count(sc
), sc
->sc_data_direction
);
1874 * New condition to send single SGE as cached-SGL.
1875 * Single SGE with length less than 64K.
1877 sg
= scsi_sglist(sc
);
1878 if ((sg_count
== 1) && (sg_dma_len(sg
) <= MAX_SGLEN_FOR_CACHESGL
)) {
1879 sg_len
= sg_dma_len(sg
);
1880 addr
= (u64
)sg_dma_address(sg
);
1882 bd
[bd_count
].sge_addr
.lo
= (addr
& 0xffffffff);
1883 bd
[bd_count
].sge_addr
.hi
= (addr
>> 32);
1884 bd
[bd_count
].sge_len
= (u16
)sg_len
;
1886 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_IO
,
1887 "single-cashed-sgl: bd_count:%d addr=%llx, len=%x",
1888 sg_count
, addr
, sg_len
);
1893 scsi_for_each_sg(sc
, sg
, sg_count
, i
) {
1894 sg_len
= sg_dma_len(sg
);
1895 addr
= (u64
)sg_dma_address(sg
);
1896 end_addr
= (addr
+ sg_len
);
1899 * first sg elem in the 'list',
1900 * check if end addr is page-aligned.
1902 if ((i
== 0) && (sg_count
> 1) && (end_addr
% QEDI_PAGE_SIZE
))
1903 cmd
->use_slowpath
= true;
1906 * last sg elem in the 'list',
1907 * check if start addr is page-aligned.
1909 else if ((i
== (sg_count
- 1)) &&
1910 (sg_count
> 1) && (addr
% QEDI_PAGE_SIZE
))
1911 cmd
->use_slowpath
= true;
1914 * middle sg elements in list,
1915 * check if start and end addr is page-aligned
1917 else if ((i
!= 0) && (i
!= (sg_count
- 1)) &&
1918 ((addr
% QEDI_PAGE_SIZE
) ||
1919 (end_addr
% QEDI_PAGE_SIZE
)))
1920 cmd
->use_slowpath
= true;
1922 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_IO
, "sg[%d] size=0x%x",
1925 if (sg_len
> QEDI_BD_SPLIT_SZ
) {
1926 sg_frags
= qedi_split_bd(cmd
, addr
, sg_len
, bd_count
);
1929 bd
[bd_count
].sge_addr
.lo
= addr
& 0xffffffff;
1930 bd
[bd_count
].sge_addr
.hi
= addr
>> 32;
1931 bd
[bd_count
].sge_len
= sg_len
;
1933 byte_count
+= sg_len
;
1934 bd_count
+= sg_frags
;
1937 if (byte_count
!= scsi_bufflen(sc
))
1938 QEDI_ERR(&qedi
->dbg_ctx
,
1939 "byte_count = %d != scsi_bufflen = %d\n", byte_count
,
1942 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_IO
, "byte_count = %d\n",
1945 WARN_ON(byte_count
!= scsi_bufflen(sc
));
1950 static void qedi_iscsi_map_sg_list(struct qedi_cmd
*cmd
)
1953 struct scsi_cmnd
*sc
= cmd
->scsi_cmd
;
1955 if (scsi_sg_count(sc
)) {
1956 bd_count
= qedi_map_scsi_sg(cmd
->conn
->qedi
, cmd
);
1960 struct scsi_sge
*bd
= cmd
->io_tbl
.sge_tbl
;
1962 bd
[0].sge_addr
.lo
= 0;
1963 bd
[0].sge_addr
.hi
= 0;
1967 cmd
->io_tbl
.sge_valid
= bd_count
;
1970 static void qedi_cpy_scsi_cdb(struct scsi_cmnd
*sc
, u32
*dstp
)
1976 lpcnt
= sc
->cmd_len
/ sizeof(dword
);
1977 srcp
= (u8
*)sc
->cmnd
;
1979 memcpy(&dword
, (const void *)srcp
, 4);
1980 *dstp
= cpu_to_be32(dword
);
1984 if (sc
->cmd_len
& 0x3) {
1985 dword
= (u32
)srcp
[0] | ((u32
)srcp
[1] << 8);
1986 *dstp
= cpu_to_be32(dword
);
1990 void qedi_trace_io(struct qedi_ctx
*qedi
, struct iscsi_task
*task
,
1991 u16 tid
, int8_t direction
)
1993 struct qedi_io_log
*io_log
;
1994 struct iscsi_conn
*conn
= task
->conn
;
1995 struct qedi_conn
*qedi_conn
= conn
->dd_data
;
1996 struct scsi_cmnd
*sc_cmd
= task
->sc
;
1997 unsigned long flags
;
2000 spin_lock_irqsave(&qedi
->io_trace_lock
, flags
);
2002 io_log
= &qedi
->io_trace_buf
[qedi
->io_trace_idx
];
2003 io_log
->direction
= direction
;
2004 io_log
->task_id
= tid
;
2005 io_log
->cid
= qedi_conn
->iscsi_conn_id
;
2006 io_log
->lun
= sc_cmd
->device
->lun
;
2007 io_log
->op
= sc_cmd
->cmnd
[0];
2008 op
= sc_cmd
->cmnd
[0];
2009 io_log
->lba
[0] = sc_cmd
->cmnd
[2];
2010 io_log
->lba
[1] = sc_cmd
->cmnd
[3];
2011 io_log
->lba
[2] = sc_cmd
->cmnd
[4];
2012 io_log
->lba
[3] = sc_cmd
->cmnd
[5];
2013 io_log
->bufflen
= scsi_bufflen(sc_cmd
);
2014 io_log
->sg_count
= scsi_sg_count(sc_cmd
);
2015 io_log
->fast_sgs
= qedi
->fast_sgls
;
2016 io_log
->cached_sgs
= qedi
->cached_sgls
;
2017 io_log
->slow_sgs
= qedi
->slow_sgls
;
2018 io_log
->cached_sge
= qedi
->use_cached_sge
;
2019 io_log
->slow_sge
= qedi
->use_slow_sge
;
2020 io_log
->fast_sge
= qedi
->use_fast_sge
;
2021 io_log
->result
= sc_cmd
->result
;
2022 io_log
->jiffies
= jiffies
;
2023 io_log
->blk_req_cpu
= smp_processor_id();
2025 if (direction
== QEDI_IO_TRACE_REQ
) {
2026 /* For requests we only care about the submission CPU */
2027 io_log
->req_cpu
= smp_processor_id() % qedi
->num_queues
;
2028 io_log
->intr_cpu
= 0;
2029 io_log
->blk_rsp_cpu
= 0;
2030 } else if (direction
== QEDI_IO_TRACE_RSP
) {
2031 io_log
->req_cpu
= smp_processor_id() % qedi
->num_queues
;
2032 io_log
->intr_cpu
= qedi
->intr_cpu
;
2033 io_log
->blk_rsp_cpu
= smp_processor_id();
2036 qedi
->io_trace_idx
++;
2037 if (qedi
->io_trace_idx
== QEDI_IO_TRACE_SIZE
)
2038 qedi
->io_trace_idx
= 0;
2040 qedi
->use_cached_sge
= false;
2041 qedi
->use_slow_sge
= false;
2042 qedi
->use_fast_sge
= false;
2044 spin_unlock_irqrestore(&qedi
->io_trace_lock
, flags
);
2047 int qedi_iscsi_send_ioreq(struct iscsi_task
*task
)
2049 struct iscsi_conn
*conn
= task
->conn
;
2050 struct iscsi_session
*session
= conn
->session
;
2051 struct Scsi_Host
*shost
= iscsi_session_to_shost(session
->cls_session
);
2052 struct qedi_ctx
*qedi
= iscsi_host_priv(shost
);
2053 struct qedi_conn
*qedi_conn
= conn
->dd_data
;
2054 struct qedi_cmd
*cmd
= task
->dd_data
;
2055 struct scsi_cmnd
*sc
= task
->sc
;
2056 struct iscsi_cmd_hdr cmd_pdu_header
;
2057 struct scsi_sgl_task_params tx_sgl_task_params
;
2058 struct scsi_sgl_task_params rx_sgl_task_params
;
2059 struct scsi_sgl_task_params
*prx_sgl
= NULL
;
2060 struct scsi_sgl_task_params
*ptx_sgl
= NULL
;
2061 struct iscsi_task_params task_params
;
2062 struct iscsi_conn_params conn_params
;
2063 struct scsi_initiator_cmd_params cmd_params
;
2064 struct iscsi_task_context
*fw_task_ctx
;
2065 struct iscsi_cls_conn
*cls_conn
;
2066 struct iscsi_scsi_req
*hdr
= (struct iscsi_scsi_req
*)task
->hdr
;
2067 enum iscsi_task_type task_type
= MAX_ISCSI_TASK_TYPE
;
2068 struct qedi_endpoint
*ep
;
2076 cls_conn
= qedi_conn
->cls_conn
;
2077 conn
= cls_conn
->dd_data
;
2079 qedi_iscsi_map_sg_list(cmd
);
2080 int_to_scsilun(sc
->device
->lun
, (struct scsi_lun
*)scsi_lun
);
2082 tid
= qedi_get_task_idx(qedi
);
2087 (struct iscsi_task_context
*)qedi_get_task_mem(&qedi
->tasks
, tid
);
2088 memset(fw_task_ctx
, 0, sizeof(struct iscsi_task_context
));
2092 memset(&task_params
, 0, sizeof(task_params
));
2093 memset(&cmd_pdu_header
, 0, sizeof(cmd_pdu_header
));
2094 memset(&tx_sgl_task_params
, 0, sizeof(tx_sgl_task_params
));
2095 memset(&rx_sgl_task_params
, 0, sizeof(rx_sgl_task_params
));
2096 memset(&conn_params
, 0, sizeof(conn_params
));
2097 memset(&cmd_params
, 0, sizeof(cmd_params
));
2099 cq_idx
= smp_processor_id() % qedi
->num_queues
;
2100 /* Update header info */
2101 SET_FIELD(cmd_pdu_header
.flags_attr
, ISCSI_CMD_HDR_ATTR
,
2103 if (hdr
->cdb
[0] != TEST_UNIT_READY
) {
2104 if (sc
->sc_data_direction
== DMA_TO_DEVICE
) {
2105 SET_FIELD(cmd_pdu_header
.flags_attr
,
2106 ISCSI_CMD_HDR_WRITE
, 1);
2107 task_type
= ISCSI_TASK_TYPE_INITIATOR_WRITE
;
2109 SET_FIELD(cmd_pdu_header
.flags_attr
,
2110 ISCSI_CMD_HDR_READ
, 1);
2111 task_type
= ISCSI_TASK_TYPE_INITIATOR_READ
;
2115 cmd_pdu_header
.lun
.lo
= be32_to_cpu(scsi_lun
[0]);
2116 cmd_pdu_header
.lun
.hi
= be32_to_cpu(scsi_lun
[1]);
2118 qedi_update_itt_map(qedi
, tid
, task
->itt
, cmd
);
2119 cmd_pdu_header
.itt
= qedi_set_itt(tid
, get_itt(task
->itt
));
2120 cmd_pdu_header
.expected_transfer_length
= cpu_to_be32(hdr
->data_length
);
2121 cmd_pdu_header
.hdr_second_dword
= ntoh24(hdr
->dlength
);
2122 cmd_pdu_header
.cmd_sn
= be32_to_cpu(hdr
->cmdsn
);
2123 cmd_pdu_header
.hdr_first_byte
= hdr
->opcode
;
2124 qedi_cpy_scsi_cdb(sc
, (u32
*)cmd_pdu_header
.cdb
);
2126 /* Fill tx AHS and rx buffer */
2127 if (task_type
== ISCSI_TASK_TYPE_INITIATOR_WRITE
) {
2128 tx_sgl_task_params
.sgl
= cmd
->io_tbl
.sge_tbl
;
2129 tx_sgl_task_params
.sgl_phys_addr
.lo
=
2130 (u32
)(cmd
->io_tbl
.sge_tbl_dma
);
2131 tx_sgl_task_params
.sgl_phys_addr
.hi
=
2132 (u32
)((u64
)cmd
->io_tbl
.sge_tbl_dma
>> 32);
2133 tx_sgl_task_params
.total_buffer_size
= scsi_bufflen(sc
);
2134 tx_sgl_task_params
.num_sges
= cmd
->io_tbl
.sge_valid
;
2135 if (cmd
->use_slowpath
)
2136 tx_sgl_task_params
.small_mid_sge
= true;
2137 } else if (task_type
== ISCSI_TASK_TYPE_INITIATOR_READ
) {
2138 rx_sgl_task_params
.sgl
= cmd
->io_tbl
.sge_tbl
;
2139 rx_sgl_task_params
.sgl_phys_addr
.lo
=
2140 (u32
)(cmd
->io_tbl
.sge_tbl_dma
);
2141 rx_sgl_task_params
.sgl_phys_addr
.hi
=
2142 (u32
)((u64
)cmd
->io_tbl
.sge_tbl_dma
>> 32);
2143 rx_sgl_task_params
.total_buffer_size
= scsi_bufflen(sc
);
2144 rx_sgl_task_params
.num_sges
= cmd
->io_tbl
.sge_valid
;
2147 /* Add conn param */
2148 conn_params
.first_burst_length
= conn
->session
->first_burst
;
2149 conn_params
.max_send_pdu_length
= conn
->max_xmit_dlength
;
2150 conn_params
.max_burst_length
= conn
->session
->max_burst
;
2151 if (conn
->session
->initial_r2t_en
)
2152 conn_params
.initial_r2t
= true;
2153 if (conn
->session
->imm_data_en
)
2154 conn_params
.immediate_data
= true;
2156 /* Add cmd params */
2157 cmd_params
.sense_data_buffer_phys_addr
.lo
= (u32
)cmd
->sense_buffer_dma
;
2158 cmd_params
.sense_data_buffer_phys_addr
.hi
=
2159 (u32
)((u64
)cmd
->sense_buffer_dma
>> 32);
2160 /* Fill fw input params */
2161 task_params
.context
= fw_task_ctx
;
2162 task_params
.conn_icid
= (u16
)qedi_conn
->iscsi_conn_id
;
2163 task_params
.itid
= tid
;
2164 task_params
.cq_rss_number
= cq_idx
;
2165 if (task_type
== ISCSI_TASK_TYPE_INITIATOR_WRITE
)
2166 task_params
.tx_io_size
= scsi_bufflen(sc
);
2167 else if (task_type
== ISCSI_TASK_TYPE_INITIATOR_READ
)
2168 task_params
.rx_io_size
= scsi_bufflen(sc
);
2170 sq_idx
= qedi_get_wqe_idx(qedi_conn
);
2171 task_params
.sqe
= &ep
->sq
[sq_idx
];
2173 QEDI_INFO(&qedi
->dbg_ctx
, QEDI_LOG_IO
,
2174 "%s: %s-SGL: sg_len=0x%x num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x\n",
2175 (task_type
== ISCSI_TASK_TYPE_INITIATOR_WRITE
) ?
2176 "Write " : "Read ", (cmd
->io_tbl
.sge_valid
== 1) ?
2177 "Single" : (cmd
->use_slowpath
? "SLOW" : "FAST"),
2178 (u16
)cmd
->io_tbl
.sge_valid
, scsi_bufflen(sc
),
2179 (u32
)(cmd
->io_tbl
.sge_tbl_dma
),
2180 (u32
)((u64
)cmd
->io_tbl
.sge_tbl_dma
>> 32));
2182 memset(task_params
.sqe
, 0, sizeof(struct iscsi_wqe
));
2184 if (task_params
.tx_io_size
!= 0)
2185 ptx_sgl
= &tx_sgl_task_params
;
2186 if (task_params
.rx_io_size
!= 0)
2187 prx_sgl
= &rx_sgl_task_params
;
2189 rval
= init_initiator_rw_iscsi_task(&task_params
, &conn_params
,
2190 &cmd_params
, &cmd_pdu_header
,
2196 spin_lock(&qedi_conn
->list_lock
);
2197 list_add_tail(&cmd
->io_cmd
, &qedi_conn
->active_cmd_list
);
2198 cmd
->io_cmd_in_list
= true;
2199 qedi_conn
->active_cmd_count
++;
2200 spin_unlock(&qedi_conn
->list_lock
);
2202 qedi_ring_doorbell(qedi_conn
);
2206 int qedi_iscsi_cleanup_task(struct iscsi_task
*task
, bool mark_cmd_node_deleted
)
2208 struct iscsi_task_params task_params
;
2209 struct qedi_endpoint
*ep
;
2210 struct iscsi_conn
*conn
= task
->conn
;
2211 struct qedi_conn
*qedi_conn
= conn
->dd_data
;
2212 struct qedi_cmd
*cmd
= task
->dd_data
;
2216 QEDI_INFO(&qedi_conn
->qedi
->dbg_ctx
, QEDI_LOG_SCSI_TM
,
2217 "issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n",
2218 cmd
->task_id
, get_itt(task
->itt
), task
->state
,
2219 cmd
->state
, qedi_conn
->iscsi_conn_id
);
2221 memset(&task_params
, 0, sizeof(task_params
));
2224 sq_idx
= qedi_get_wqe_idx(qedi_conn
);
2226 task_params
.sqe
= &ep
->sq
[sq_idx
];
2227 memset(task_params
.sqe
, 0, sizeof(struct iscsi_wqe
));
2228 task_params
.itid
= cmd
->task_id
;
2230 rval
= init_cleanup_task(&task_params
);
2234 qedi_ring_doorbell(qedi_conn
);