]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/scsi/qedi/qedi_fw.c
Merge tag 'char-misc-4.10-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregk...
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / qedi / qedi_fw.c
CommitLineData
ace7f46b
MR
1/*
2 * QLogic iSCSI Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9
10#include <linux/blkdev.h>
11#include <scsi/scsi_tcq.h>
12#include <linux/delay.h>
13
14#include "qedi.h"
15#include "qedi_iscsi.h"
16#include "qedi_gbl.h"
17
18static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
19 struct iscsi_task *mtask);
20
21void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd)
22{
23 struct scsi_cmnd *sc = cmd->scsi_cmd;
24
25 if (cmd->io_tbl.sge_valid && sc) {
26 cmd->io_tbl.sge_valid = 0;
27 scsi_dma_unmap(sc);
28 }
29}
30
31static void qedi_process_logout_resp(struct qedi_ctx *qedi,
32 union iscsi_cqe *cqe,
33 struct iscsi_task *task,
34 struct qedi_conn *qedi_conn)
35{
36 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
37 struct iscsi_logout_rsp *resp_hdr;
38 struct iscsi_session *session = conn->session;
39 struct iscsi_logout_response_hdr *cqe_logout_response;
40 struct qedi_cmd *cmd;
41
42 cmd = (struct qedi_cmd *)task->dd_data;
43 cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response;
44 spin_lock(&session->back_lock);
45 resp_hdr = (struct iscsi_logout_rsp *)&qedi_conn->gen_pdu.resp_hdr;
46 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
47 resp_hdr->opcode = cqe_logout_response->opcode;
48 resp_hdr->flags = cqe_logout_response->flags;
49 resp_hdr->hlength = 0;
50
51 resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
52 resp_hdr->statsn = cpu_to_be32(cqe_logout_response->stat_sn);
53 resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn);
54 resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn);
55
56 resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time2wait);
57 resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time2retain);
58
59 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
60 "Freeing tid=0x%x for cid=0x%x\n",
61 cmd->task_id, qedi_conn->iscsi_conn_id);
62
63 if (likely(cmd->io_cmd_in_list)) {
64 cmd->io_cmd_in_list = false;
65 list_del_init(&cmd->io_cmd);
66 qedi_conn->active_cmd_count--;
67 } else {
68 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
69 "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
70 cmd->task_id, qedi_conn->iscsi_conn_id,
71 &cmd->io_cmd);
72 }
73
74 cmd->state = RESPONSE_RECEIVED;
75 qedi_clear_task_idx(qedi, cmd->task_id);
76 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
77
78 spin_unlock(&session->back_lock);
79}
80
81static void qedi_process_text_resp(struct qedi_ctx *qedi,
82 union iscsi_cqe *cqe,
83 struct iscsi_task *task,
84 struct qedi_conn *qedi_conn)
85{
86 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
87 struct iscsi_session *session = conn->session;
88 struct iscsi_task_context *task_ctx;
89 struct iscsi_text_rsp *resp_hdr_ptr;
90 struct iscsi_text_response_hdr *cqe_text_response;
91 struct qedi_cmd *cmd;
92 int pld_len;
93 u32 *tmp;
94
95 cmd = (struct qedi_cmd *)task->dd_data;
96 task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
97
98 cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response;
99 spin_lock(&session->back_lock);
100 resp_hdr_ptr = (struct iscsi_text_rsp *)&qedi_conn->gen_pdu.resp_hdr;
101 memset(resp_hdr_ptr, 0, sizeof(struct iscsi_hdr));
102 resp_hdr_ptr->opcode = cqe_text_response->opcode;
103 resp_hdr_ptr->flags = cqe_text_response->flags;
104 resp_hdr_ptr->hlength = 0;
105
106 hton24(resp_hdr_ptr->dlength,
107 (cqe_text_response->hdr_second_dword &
108 ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK));
109 tmp = (u32 *)resp_hdr_ptr->dlength;
110
111 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
112 conn->session->age);
113 resp_hdr_ptr->ttt = cqe_text_response->ttt;
114 resp_hdr_ptr->statsn = cpu_to_be32(cqe_text_response->stat_sn);
115 resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_text_response->exp_cmd_sn);
116 resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_text_response->max_cmd_sn);
117
118 pld_len = cqe_text_response->hdr_second_dword &
119 ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK;
120 qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
121
122 memset(task_ctx, '\0', sizeof(*task_ctx));
123
124 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
125 "Freeing tid=0x%x for cid=0x%x\n",
126 cmd->task_id, qedi_conn->iscsi_conn_id);
127
128 if (likely(cmd->io_cmd_in_list)) {
129 cmd->io_cmd_in_list = false;
130 list_del_init(&cmd->io_cmd);
131 qedi_conn->active_cmd_count--;
132 } else {
133 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
134 "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
135 cmd->task_id, qedi_conn->iscsi_conn_id,
136 &cmd->io_cmd);
137 }
138
139 cmd->state = RESPONSE_RECEIVED;
140 qedi_clear_task_idx(qedi, cmd->task_id);
141
142 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
143 qedi_conn->gen_pdu.resp_buf,
144 (qedi_conn->gen_pdu.resp_wr_ptr -
145 qedi_conn->gen_pdu.resp_buf));
146 spin_unlock(&session->back_lock);
147}
148
149static void qedi_tmf_resp_work(struct work_struct *work)
150{
151 struct qedi_cmd *qedi_cmd =
152 container_of(work, struct qedi_cmd, tmf_work);
153 struct qedi_conn *qedi_conn = qedi_cmd->conn;
154 struct qedi_ctx *qedi = qedi_conn->qedi;
155 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
156 struct iscsi_session *session = conn->session;
157 struct iscsi_tm_rsp *resp_hdr_ptr;
158 struct iscsi_cls_session *cls_sess;
159 int rval = 0;
160
161 set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
162 resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
163 cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
164
165 iscsi_block_session(session->cls_session);
166 rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true);
167 if (rval) {
168 clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
169 qedi_clear_task_idx(qedi, qedi_cmd->task_id);
170 iscsi_unblock_session(session->cls_session);
171 return;
172 }
173
174 iscsi_unblock_session(session->cls_session);
175 qedi_clear_task_idx(qedi, qedi_cmd->task_id);
176
177 spin_lock(&session->back_lock);
178 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
179 spin_unlock(&session->back_lock);
180 kfree(resp_hdr_ptr);
181 clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
182}
183
184static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
185 union iscsi_cqe *cqe,
186 struct iscsi_task *task,
187 struct qedi_conn *qedi_conn)
188
189{
190 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
191 struct iscsi_session *session = conn->session;
192 struct iscsi_tmf_response_hdr *cqe_tmp_response;
193 struct iscsi_tm_rsp *resp_hdr_ptr;
194 struct iscsi_tm *tmf_hdr;
195 struct qedi_cmd *qedi_cmd = NULL;
196 u32 *tmp;
197
198 cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response;
199
200 qedi_cmd = task->dd_data;
201 qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_KERNEL);
202 if (!qedi_cmd->tmf_resp_buf) {
203 QEDI_ERR(&qedi->dbg_ctx,
204 "Failed to allocate resp buf, cid=0x%x\n",
205 qedi_conn->iscsi_conn_id);
206 return;
207 }
208
209 spin_lock(&session->back_lock);
210 resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
211 memset(resp_hdr_ptr, 0, sizeof(struct iscsi_tm_rsp));
212
213 /* Fill up the header */
214 resp_hdr_ptr->opcode = cqe_tmp_response->opcode;
215 resp_hdr_ptr->flags = cqe_tmp_response->hdr_flags;
216 resp_hdr_ptr->response = cqe_tmp_response->hdr_response;
217 resp_hdr_ptr->hlength = 0;
218
219 hton24(resp_hdr_ptr->dlength,
220 (cqe_tmp_response->hdr_second_dword &
221 ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK));
222 tmp = (u32 *)resp_hdr_ptr->dlength;
223 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
224 conn->session->age);
225 resp_hdr_ptr->statsn = cpu_to_be32(cqe_tmp_response->stat_sn);
226 resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_tmp_response->exp_cmd_sn);
227 resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_tmp_response->max_cmd_sn);
228
229 tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
230
231 if (likely(qedi_cmd->io_cmd_in_list)) {
232 qedi_cmd->io_cmd_in_list = false;
233 list_del_init(&qedi_cmd->io_cmd);
234 qedi_conn->active_cmd_count--;
235 }
236
237 if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
238 ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
239 ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
240 ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
241 ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
242 ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
243 INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_resp_work);
244 queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
245 goto unblock_sess;
246 }
247
248 qedi_clear_task_idx(qedi, qedi_cmd->task_id);
249
250 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
251 kfree(resp_hdr_ptr);
252
253unblock_sess:
254 spin_unlock(&session->back_lock);
255}
256
257static void qedi_process_login_resp(struct qedi_ctx *qedi,
258 union iscsi_cqe *cqe,
259 struct iscsi_task *task,
260 struct qedi_conn *qedi_conn)
261{
262 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
263 struct iscsi_session *session = conn->session;
264 struct iscsi_task_context *task_ctx;
265 struct iscsi_login_rsp *resp_hdr_ptr;
266 struct iscsi_login_response_hdr *cqe_login_response;
267 struct qedi_cmd *cmd;
268 int pld_len;
269 u32 *tmp;
270
271 cmd = (struct qedi_cmd *)task->dd_data;
272
273 cqe_login_response = &cqe->cqe_common.iscsi_hdr.login_response;
274 task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
275
276 spin_lock(&session->back_lock);
277 resp_hdr_ptr = (struct iscsi_login_rsp *)&qedi_conn->gen_pdu.resp_hdr;
278 memset(resp_hdr_ptr, 0, sizeof(struct iscsi_login_rsp));
279 resp_hdr_ptr->opcode = cqe_login_response->opcode;
280 resp_hdr_ptr->flags = cqe_login_response->flags_attr;
281 resp_hdr_ptr->hlength = 0;
282
283 hton24(resp_hdr_ptr->dlength,
284 (cqe_login_response->hdr_second_dword &
285 ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK));
286 tmp = (u32 *)resp_hdr_ptr->dlength;
287 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
288 conn->session->age);
289 resp_hdr_ptr->tsih = cqe_login_response->tsih;
290 resp_hdr_ptr->statsn = cpu_to_be32(cqe_login_response->stat_sn);
291 resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_login_response->exp_cmd_sn);
292 resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_login_response->max_cmd_sn);
293 resp_hdr_ptr->status_class = cqe_login_response->status_class;
294 resp_hdr_ptr->status_detail = cqe_login_response->status_detail;
295 pld_len = cqe_login_response->hdr_second_dword &
296 ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK;
297 qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
298
299 if (likely(cmd->io_cmd_in_list)) {
300 cmd->io_cmd_in_list = false;
301 list_del_init(&cmd->io_cmd);
302 qedi_conn->active_cmd_count--;
303 }
304
305 memset(task_ctx, '\0', sizeof(*task_ctx));
306
307 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
308 qedi_conn->gen_pdu.resp_buf,
309 (qedi_conn->gen_pdu.resp_wr_ptr -
310 qedi_conn->gen_pdu.resp_buf));
311
312 spin_unlock(&session->back_lock);
313 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
314 "Freeing tid=0x%x for cid=0x%x\n",
315 cmd->task_id, qedi_conn->iscsi_conn_id);
316 cmd->state = RESPONSE_RECEIVED;
317 qedi_clear_task_idx(qedi, cmd->task_id);
318}
319
320static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
321 struct iscsi_cqe_unsolicited *cqe,
322 char *ptr, int len)
323{
324 u16 idx = 0;
325
326 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
327 "pld_len [%d], bdq_prod_idx [%d], idx [%d]\n",
328 len, qedi->bdq_prod_idx,
329 (qedi->bdq_prod_idx % qedi->rq_num_entries));
330
331 /* Obtain buffer address from rqe_opaque */
332 idx = cqe->rqe_opaque.lo;
333 if ((idx < 0) || (idx > (QEDI_BDQ_NUM - 1))) {
334 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
335 "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
336 idx);
337 return;
338 }
339
340 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
341 "rqe_opaque.lo [0x%p], rqe_opaque.hi [0x%p], idx [%d]\n",
342 cqe->rqe_opaque.lo, cqe->rqe_opaque.hi, idx);
343
344 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
345 "unsol_cqe_type = %d\n", cqe->unsol_cqe_type);
346 switch (cqe->unsol_cqe_type) {
347 case ISCSI_CQE_UNSOLICITED_SINGLE:
348 case ISCSI_CQE_UNSOLICITED_FIRST:
349 if (len)
350 memcpy(ptr, (void *)qedi->bdq[idx].buf_addr, len);
351 break;
352 case ISCSI_CQE_UNSOLICITED_MIDDLE:
353 case ISCSI_CQE_UNSOLICITED_LAST:
354 break;
355 default:
356 break;
357 }
358}
359
360static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
361 struct iscsi_cqe_unsolicited *cqe,
362 int count)
363{
364 u16 tmp;
365 u16 idx = 0;
366 struct scsi_bd *pbl;
367
368 /* Obtain buffer address from rqe_opaque */
369 idx = cqe->rqe_opaque.lo;
370 if ((idx < 0) || (idx > (QEDI_BDQ_NUM - 1))) {
371 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
372 "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
373 idx);
374 return;
375 }
376
377 pbl = (struct scsi_bd *)qedi->bdq_pbl;
378 pbl += (qedi->bdq_prod_idx % qedi->rq_num_entries);
379 pbl->address.hi = cpu_to_le32(QEDI_U64_HI(qedi->bdq[idx].buf_dma));
380 pbl->address.lo = cpu_to_le32(QEDI_U64_LO(qedi->bdq[idx].buf_dma));
381 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
382 "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n",
383 pbl, pbl->address.hi, pbl->address.lo, idx);
384 pbl->opaque.hi = 0;
385 pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(idx));
386
387 /* Increment producer to let f/w know we've handled the frame */
388 qedi->bdq_prod_idx += count;
389
390 writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod);
391 tmp = readw(qedi->bdq_primary_prod);
392
393 writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod);
394 tmp = readw(qedi->bdq_secondary_prod);
395}
396
397static void qedi_unsol_pdu_adjust_bdq(struct qedi_ctx *qedi,
398 struct iscsi_cqe_unsolicited *cqe,
399 u32 pdu_len, u32 num_bdqs,
400 char *bdq_data)
401{
402 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
403 "num_bdqs [%d]\n", num_bdqs);
404
405 qedi_get_rq_bdq_buf(qedi, cqe, bdq_data, pdu_len);
406 qedi_put_rq_bdq_buf(qedi, cqe, (num_bdqs + 1));
407}
408
409static int qedi_process_nopin_mesg(struct qedi_ctx *qedi,
410 union iscsi_cqe *cqe,
411 struct iscsi_task *task,
412 struct qedi_conn *qedi_conn, u16 que_idx)
413{
414 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
415 struct iscsi_session *session = conn->session;
416 struct iscsi_nop_in_hdr *cqe_nop_in;
417 struct iscsi_nopin *hdr;
418 struct qedi_cmd *cmd;
419 int tgt_async_nop = 0;
420 u32 lun[2];
421 u32 pdu_len, num_bdqs;
422 char bdq_data[QEDI_BDQ_BUF_SIZE];
423 unsigned long flags;
424
425 spin_lock_bh(&session->back_lock);
426 cqe_nop_in = &cqe->cqe_common.iscsi_hdr.nop_in;
427
428 pdu_len = cqe_nop_in->hdr_second_dword &
429 ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK;
430 num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE;
431
432 hdr = (struct iscsi_nopin *)&qedi_conn->gen_pdu.resp_hdr;
433 memset(hdr, 0, sizeof(struct iscsi_hdr));
434 hdr->opcode = cqe_nop_in->opcode;
435 hdr->max_cmdsn = cpu_to_be32(cqe_nop_in->max_cmd_sn);
436 hdr->exp_cmdsn = cpu_to_be32(cqe_nop_in->exp_cmd_sn);
437 hdr->statsn = cpu_to_be32(cqe_nop_in->stat_sn);
438 hdr->ttt = cpu_to_be32(cqe_nop_in->ttt);
439
440 if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
441 spin_lock_irqsave(&qedi->hba_lock, flags);
442 qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
443 pdu_len, num_bdqs, bdq_data);
444 hdr->itt = RESERVED_ITT;
445 tgt_async_nop = 1;
446 spin_unlock_irqrestore(&qedi->hba_lock, flags);
447 goto done;
448 }
449
450 /* Response to one of our nop-outs */
451 if (task) {
452 cmd = task->dd_data;
453 hdr->flags = ISCSI_FLAG_CMD_FINAL;
454 hdr->itt = build_itt(cqe->cqe_solicited.itid,
455 conn->session->age);
456 lun[0] = 0xffffffff;
457 lun[1] = 0xffffffff;
458 memcpy(&hdr->lun, lun, sizeof(struct scsi_lun));
459 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
460 "Freeing tid=0x%x for cid=0x%x\n",
461 cmd->task_id, qedi_conn->iscsi_conn_id);
462 cmd->state = RESPONSE_RECEIVED;
463 spin_lock(&qedi_conn->list_lock);
464 if (likely(cmd->io_cmd_in_list)) {
465 cmd->io_cmd_in_list = false;
466 list_del_init(&cmd->io_cmd);
467 qedi_conn->active_cmd_count--;
468 }
469
470 spin_unlock(&qedi_conn->list_lock);
471 qedi_clear_task_idx(qedi, cmd->task_id);
472 }
473
474done:
475 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, bdq_data, pdu_len);
476
477 spin_unlock_bh(&session->back_lock);
478 return tgt_async_nop;
479}
480
481static void qedi_process_async_mesg(struct qedi_ctx *qedi,
482 union iscsi_cqe *cqe,
483 struct iscsi_task *task,
484 struct qedi_conn *qedi_conn,
485 u16 que_idx)
486{
487 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
488 struct iscsi_session *session = conn->session;
489 struct iscsi_async_msg_hdr *cqe_async_msg;
490 struct iscsi_async *resp_hdr;
491 u32 lun[2];
492 u32 pdu_len, num_bdqs;
493 char bdq_data[QEDI_BDQ_BUF_SIZE];
494 unsigned long flags;
495
496 spin_lock_bh(&session->back_lock);
497
498 cqe_async_msg = &cqe->cqe_common.iscsi_hdr.async_msg;
499 pdu_len = cqe_async_msg->hdr_second_dword &
500 ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK;
501 num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE;
502
503 if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
504 spin_lock_irqsave(&qedi->hba_lock, flags);
505 qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
506 pdu_len, num_bdqs, bdq_data);
507 spin_unlock_irqrestore(&qedi->hba_lock, flags);
508 }
509
510 resp_hdr = (struct iscsi_async *)&qedi_conn->gen_pdu.resp_hdr;
511 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
512 resp_hdr->opcode = cqe_async_msg->opcode;
513 resp_hdr->flags = 0x80;
514
515 lun[0] = cpu_to_be32(cqe_async_msg->lun.lo);
516 lun[1] = cpu_to_be32(cqe_async_msg->lun.hi);
517 memcpy(&resp_hdr->lun, lun, sizeof(struct scsi_lun));
518 resp_hdr->exp_cmdsn = cpu_to_be32(cqe_async_msg->exp_cmd_sn);
519 resp_hdr->max_cmdsn = cpu_to_be32(cqe_async_msg->max_cmd_sn);
520 resp_hdr->statsn = cpu_to_be32(cqe_async_msg->stat_sn);
521
522 resp_hdr->async_event = cqe_async_msg->async_event;
523 resp_hdr->async_vcode = cqe_async_msg->async_vcode;
524
525 resp_hdr->param1 = cpu_to_be16(cqe_async_msg->param1_rsrv);
526 resp_hdr->param2 = cpu_to_be16(cqe_async_msg->param2_rsrv);
527 resp_hdr->param3 = cpu_to_be16(cqe_async_msg->param3_rsrv);
528
529 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, bdq_data,
530 pdu_len);
531
532 spin_unlock_bh(&session->back_lock);
533}
534
535static void qedi_process_reject_mesg(struct qedi_ctx *qedi,
536 union iscsi_cqe *cqe,
537 struct iscsi_task *task,
538 struct qedi_conn *qedi_conn,
539 uint16_t que_idx)
540{
541 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
542 struct iscsi_session *session = conn->session;
543 struct iscsi_reject_hdr *cqe_reject;
544 struct iscsi_reject *hdr;
545 u32 pld_len, num_bdqs;
546 unsigned long flags;
547
548 spin_lock_bh(&session->back_lock);
549 cqe_reject = &cqe->cqe_common.iscsi_hdr.reject;
550 pld_len = cqe_reject->hdr_second_dword &
551 ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK;
552 num_bdqs = pld_len / QEDI_BDQ_BUF_SIZE;
553
554 if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
555 spin_lock_irqsave(&qedi->hba_lock, flags);
556 qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
557 pld_len, num_bdqs, conn->data);
558 spin_unlock_irqrestore(&qedi->hba_lock, flags);
559 }
560 hdr = (struct iscsi_reject *)&qedi_conn->gen_pdu.resp_hdr;
561 memset(hdr, 0, sizeof(struct iscsi_hdr));
562 hdr->opcode = cqe_reject->opcode;
563 hdr->reason = cqe_reject->hdr_reason;
564 hdr->flags = cqe_reject->hdr_flags;
565 hton24(hdr->dlength, (cqe_reject->hdr_second_dword &
566 ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK));
567 hdr->max_cmdsn = cpu_to_be32(cqe_reject->max_cmd_sn);
568 hdr->exp_cmdsn = cpu_to_be32(cqe_reject->exp_cmd_sn);
569 hdr->statsn = cpu_to_be32(cqe_reject->stat_sn);
570 hdr->ffffffff = cpu_to_be32(0xffffffff);
571
572 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
573 conn->data, pld_len);
574 spin_unlock_bh(&session->back_lock);
575}
576
577static void qedi_scsi_completion(struct qedi_ctx *qedi,
578 union iscsi_cqe *cqe,
579 struct iscsi_task *task,
580 struct iscsi_conn *conn)
581{
582 struct scsi_cmnd *sc_cmd;
583 struct qedi_cmd *cmd = task->dd_data;
584 struct iscsi_session *session = conn->session;
585 struct iscsi_scsi_rsp *hdr;
586 struct iscsi_data_in_hdr *cqe_data_in;
587 int datalen = 0;
588 struct qedi_conn *qedi_conn;
589 u32 iscsi_cid;
590 bool mark_cmd_node_deleted = false;
591 u8 cqe_err_bits = 0;
592
593 iscsi_cid = cqe->cqe_common.conn_id;
594 qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
595
596 cqe_data_in = &cqe->cqe_common.iscsi_hdr.data_in;
597 cqe_err_bits =
598 cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
599
600 spin_lock_bh(&session->back_lock);
601 /* get the scsi command */
602 sc_cmd = cmd->scsi_cmd;
603
604 if (!sc_cmd) {
605 QEDI_WARN(&qedi->dbg_ctx, "sc_cmd is NULL!\n");
606 goto error;
607 }
608
609 if (!sc_cmd->SCp.ptr) {
610 QEDI_WARN(&qedi->dbg_ctx,
611 "SCp.ptr is NULL, returned in another context.\n");
612 goto error;
613 }
614
615 if (!sc_cmd->request) {
616 QEDI_WARN(&qedi->dbg_ctx,
617 "sc_cmd->request is NULL, sc_cmd=%p.\n",
618 sc_cmd);
619 goto error;
620 }
621
622 if (!sc_cmd->request->special) {
623 QEDI_WARN(&qedi->dbg_ctx,
624 "request->special is NULL so request not valid, sc_cmd=%p.\n",
625 sc_cmd);
626 goto error;
627 }
628
629 if (!sc_cmd->request->q) {
630 QEDI_WARN(&qedi->dbg_ctx,
631 "request->q is NULL so request is not valid, sc_cmd=%p.\n",
632 sc_cmd);
633 goto error;
634 }
635
636 qedi_iscsi_unmap_sg_list(cmd);
637
638 hdr = (struct iscsi_scsi_rsp *)task->hdr;
639 hdr->opcode = cqe_data_in->opcode;
640 hdr->max_cmdsn = cpu_to_be32(cqe_data_in->max_cmd_sn);
641 hdr->exp_cmdsn = cpu_to_be32(cqe_data_in->exp_cmd_sn);
642 hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
643 hdr->response = cqe_data_in->reserved1;
644 hdr->cmd_status = cqe_data_in->status_rsvd;
645 hdr->flags = cqe_data_in->flags;
646 hdr->residual_count = cpu_to_be32(cqe_data_in->residual_count);
647
648 if (hdr->cmd_status == SAM_STAT_CHECK_CONDITION) {
649 datalen = cqe_data_in->reserved2 &
650 ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK;
651 memcpy((char *)conn->data, (char *)cmd->sense_buffer, datalen);
652 }
653
654 /* If f/w reports data underrun err then set residual to IO transfer
655 * length, set Underrun flag and clear Overrun flag explicitly
656 */
657 if (unlikely(cqe_err_bits &&
658 GET_FIELD(cqe_err_bits, CQE_ERROR_BITMAP_UNDER_RUN_ERR))) {
659 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
660 "Under flow itt=0x%x proto flags=0x%x tid=0x%x cid 0x%x fw resid 0x%x sc dlen 0x%x\n",
661 hdr->itt, cqe_data_in->flags, cmd->task_id,
662 qedi_conn->iscsi_conn_id, hdr->residual_count,
663 scsi_bufflen(sc_cmd));
664 hdr->residual_count = cpu_to_be32(scsi_bufflen(sc_cmd));
665 hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
666 hdr->flags &= (~ISCSI_FLAG_CMD_OVERFLOW);
667 }
668
669 spin_lock(&qedi_conn->list_lock);
670 if (likely(cmd->io_cmd_in_list)) {
671 cmd->io_cmd_in_list = false;
672 list_del_init(&cmd->io_cmd);
673 qedi_conn->active_cmd_count--;
674 mark_cmd_node_deleted = true;
675 }
676 spin_unlock(&qedi_conn->list_lock);
677
678 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
679 "Freeing tid=0x%x for cid=0x%x\n",
680 cmd->task_id, qedi_conn->iscsi_conn_id);
681 cmd->state = RESPONSE_RECEIVED;
682 if (qedi_io_tracing)
683 qedi_trace_io(qedi, task, cmd->task_id, QEDI_IO_TRACE_RSP);
684
685 qedi_clear_task_idx(qedi, cmd->task_id);
686 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
687 conn->data, datalen);
688error:
689 spin_unlock_bh(&session->back_lock);
690}
691
692static void qedi_mtask_completion(struct qedi_ctx *qedi,
693 union iscsi_cqe *cqe,
694 struct iscsi_task *task,
695 struct qedi_conn *conn, uint16_t que_idx)
696{
697 struct iscsi_conn *iscsi_conn;
698 u32 hdr_opcode;
699
700 hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
701 iscsi_conn = conn->cls_conn->dd_data;
702
703 switch (hdr_opcode) {
704 case ISCSI_OPCODE_SCSI_RESPONSE:
705 case ISCSI_OPCODE_DATA_IN:
706 qedi_scsi_completion(qedi, cqe, task, iscsi_conn);
707 break;
708 case ISCSI_OPCODE_LOGIN_RESPONSE:
709 qedi_process_login_resp(qedi, cqe, task, conn);
710 break;
711 case ISCSI_OPCODE_TMF_RESPONSE:
712 qedi_process_tmf_resp(qedi, cqe, task, conn);
713 break;
714 case ISCSI_OPCODE_TEXT_RESPONSE:
715 qedi_process_text_resp(qedi, cqe, task, conn);
716 break;
717 case ISCSI_OPCODE_LOGOUT_RESPONSE:
718 qedi_process_logout_resp(qedi, cqe, task, conn);
719 break;
720 case ISCSI_OPCODE_NOP_IN:
721 qedi_process_nopin_mesg(qedi, cqe, task, conn, que_idx);
722 break;
723 default:
724 QEDI_ERR(&qedi->dbg_ctx, "unknown opcode\n");
725 }
726}
727
728static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi,
729 struct iscsi_cqe_solicited *cqe,
730 struct iscsi_task *task,
731 struct qedi_conn *qedi_conn)
732{
733 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
734 struct iscsi_session *session = conn->session;
735 struct qedi_cmd *cmd = task->dd_data;
736
737 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UNSOL,
738 "itid=0x%x, cmd task id=0x%x\n",
739 cqe->itid, cmd->task_id);
740
741 cmd->state = RESPONSE_RECEIVED;
742 qedi_clear_task_idx(qedi, cmd->task_id);
743
744 spin_lock_bh(&session->back_lock);
745 __iscsi_put_task(task);
746 spin_unlock_bh(&session->back_lock);
747}
748
749static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
750 struct iscsi_cqe_solicited *cqe,
751 struct iscsi_task *task,
752 struct iscsi_conn *conn)
753{
754 struct qedi_work_map *work, *work_tmp;
755 u32 proto_itt = cqe->itid;
756 u32 ptmp_itt = 0;
757 itt_t protoitt = 0;
758 int found = 0;
759 struct qedi_cmd *qedi_cmd = NULL;
760 u32 rtid = 0;
761 u32 iscsi_cid;
762 struct qedi_conn *qedi_conn;
763 struct qedi_cmd *cmd_new, *dbg_cmd;
764 struct iscsi_task *mtask;
765 struct iscsi_tm *tmf_hdr = NULL;
766
767 iscsi_cid = cqe->conn_id;
768 qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
769
770 /* Based on this itt get the corresponding qedi_cmd */
771 spin_lock_bh(&qedi_conn->tmf_work_lock);
772 list_for_each_entry_safe(work, work_tmp, &qedi_conn->tmf_work_list,
773 list) {
774 if (work->rtid == proto_itt) {
775 /* We found the command */
776 qedi_cmd = work->qedi_cmd;
777 if (!qedi_cmd->list_tmf_work) {
778 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
779 "TMF work not found, cqe->tid=0x%x, cid=0x%x\n",
780 proto_itt, qedi_conn->iscsi_conn_id);
781 WARN_ON(1);
782 }
783 found = 1;
784 mtask = qedi_cmd->task;
785 tmf_hdr = (struct iscsi_tm *)mtask->hdr;
786 rtid = work->rtid;
787
788 list_del_init(&work->list);
789 kfree(work);
790 qedi_cmd->list_tmf_work = NULL;
791 }
792 }
793 spin_unlock_bh(&qedi_conn->tmf_work_lock);
794
795 if (found) {
796 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
797 "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
798 proto_itt, tmf_hdr->flags, qedi_conn->iscsi_conn_id);
799
800 if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
801 ISCSI_TM_FUNC_ABORT_TASK) {
802 spin_lock_bh(&conn->session->back_lock);
803
804 protoitt = build_itt(get_itt(tmf_hdr->rtt),
805 conn->session->age);
806 task = iscsi_itt_to_task(conn, protoitt);
807
808 spin_unlock_bh(&conn->session->back_lock);
809
810 if (!task) {
811 QEDI_NOTICE(&qedi->dbg_ctx,
812 "IO task completed, tmf rtt=0x%x, cid=0x%x\n",
813 get_itt(tmf_hdr->rtt),
814 qedi_conn->iscsi_conn_id);
815 return;
816 }
817
818 dbg_cmd = task->dd_data;
819
820 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
821 "Abort tmf rtt=0x%x, i/o itt=0x%x, i/o tid=0x%x, cid=0x%x\n",
822 get_itt(tmf_hdr->rtt), get_itt(task->itt),
823 dbg_cmd->task_id, qedi_conn->iscsi_conn_id);
824
825 if (qedi_cmd->state == CLEANUP_WAIT_FAILED)
826 qedi_cmd->state = CLEANUP_RECV;
827
828 qedi_clear_task_idx(qedi_conn->qedi, rtid);
829
830 spin_lock(&qedi_conn->list_lock);
831 list_del_init(&dbg_cmd->io_cmd);
832 qedi_conn->active_cmd_count--;
833 spin_unlock(&qedi_conn->list_lock);
834 qedi_cmd->state = CLEANUP_RECV;
835 wake_up_interruptible(&qedi_conn->wait_queue);
836 }
837 } else if (qedi_conn->cmd_cleanup_req > 0) {
838 spin_lock_bh(&conn->session->back_lock);
839 qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
840 protoitt = build_itt(ptmp_itt, conn->session->age);
841 task = iscsi_itt_to_task(conn, protoitt);
842 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
843 "cleanup io itid=0x%x, protoitt=0x%x, cmd_cleanup_cmpl=%d, cid=0x%x\n",
844 cqe->itid, protoitt, qedi_conn->cmd_cleanup_cmpl,
845 qedi_conn->iscsi_conn_id);
846
847 spin_unlock_bh(&conn->session->back_lock);
848 if (!task) {
849 QEDI_NOTICE(&qedi->dbg_ctx,
850 "task is null, itid=0x%x, cid=0x%x\n",
851 cqe->itid, qedi_conn->iscsi_conn_id);
852 return;
853 }
854 qedi_conn->cmd_cleanup_cmpl++;
855 wake_up(&qedi_conn->wait_queue);
856 cmd_new = task->dd_data;
857
858 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
859 "Freeing tid=0x%x for cid=0x%x\n",
860 cqe->itid, qedi_conn->iscsi_conn_id);
861 qedi_clear_task_idx(qedi_conn->qedi, cqe->itid);
862
863 } else {
864 qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
865 protoitt = build_itt(ptmp_itt, conn->session->age);
866 task = iscsi_itt_to_task(conn, protoitt);
867 QEDI_ERR(&qedi->dbg_ctx,
868 "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n",
869 protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task);
870 WARN_ON(1);
871 }
872}
873
874void qedi_fp_process_cqes(struct qedi_work *work)
875{
876 struct qedi_ctx *qedi = work->qedi;
877 union iscsi_cqe *cqe = &work->cqe;
878 struct iscsi_task *task = NULL;
879 struct iscsi_nopout *nopout_hdr;
880 struct qedi_conn *q_conn;
881 struct iscsi_conn *conn;
882 struct qedi_cmd *qedi_cmd;
883 u32 comp_type;
884 u32 iscsi_cid;
885 u32 hdr_opcode;
886 u16 que_idx = work->que_idx;
887 u8 cqe_err_bits = 0;
888
889 comp_type = cqe->cqe_common.cqe_type;
890 hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
891 cqe_err_bits =
892 cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
893
894 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
895 "fw_cid=0x%x, cqe type=0x%x, opcode=0x%x\n",
896 cqe->cqe_common.conn_id, comp_type, hdr_opcode);
897
898 if (comp_type >= MAX_ISCSI_CQES_TYPE) {
899 QEDI_WARN(&qedi->dbg_ctx, "Invalid CqE type\n");
900 return;
901 }
902
903 iscsi_cid = cqe->cqe_common.conn_id;
904 q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
905 if (!q_conn) {
906 QEDI_WARN(&qedi->dbg_ctx,
907 "Session no longer exists for cid=0x%x!!\n",
908 iscsi_cid);
909 return;
910 }
911
912 conn = q_conn->cls_conn->dd_data;
913
914 if (unlikely(cqe_err_bits &&
915 GET_FIELD(cqe_err_bits,
916 CQE_ERROR_BITMAP_DATA_DIGEST_ERR))) {
917 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
918 return;
919 }
920
921 switch (comp_type) {
922 case ISCSI_CQE_TYPE_SOLICITED:
923 case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE:
924 qedi_cmd = container_of(work, struct qedi_cmd, cqe_work);
925 task = qedi_cmd->task;
926 if (!task) {
927 QEDI_WARN(&qedi->dbg_ctx, "task is NULL\n");
928 return;
929 }
930
931 /* Process NOPIN local completion */
932 nopout_hdr = (struct iscsi_nopout *)task->hdr;
933 if ((nopout_hdr->itt == RESERVED_ITT) &&
934 (cqe->cqe_solicited.itid != (u16)RESERVED_ITT)) {
935 qedi_process_nopin_local_cmpl(qedi, &cqe->cqe_solicited,
936 task, q_conn);
937 } else {
938 cqe->cqe_solicited.itid =
939 qedi_get_itt(cqe->cqe_solicited);
940 /* Process other solicited responses */
941 qedi_mtask_completion(qedi, cqe, task, q_conn, que_idx);
942 }
943 break;
944 case ISCSI_CQE_TYPE_UNSOLICITED:
945 switch (hdr_opcode) {
946 case ISCSI_OPCODE_NOP_IN:
947 qedi_process_nopin_mesg(qedi, cqe, task, q_conn,
948 que_idx);
949 break;
950 case ISCSI_OPCODE_ASYNC_MSG:
951 qedi_process_async_mesg(qedi, cqe, task, q_conn,
952 que_idx);
953 break;
954 case ISCSI_OPCODE_REJECT:
955 qedi_process_reject_mesg(qedi, cqe, task, q_conn,
956 que_idx);
957 break;
958 }
959 goto exit_fp_process;
960 case ISCSI_CQE_TYPE_DUMMY:
961 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "Dummy CqE\n");
962 goto exit_fp_process;
963 case ISCSI_CQE_TYPE_TASK_CLEANUP:
964 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "CleanUp CqE\n");
965 qedi_process_cmd_cleanup_resp(qedi, &cqe->cqe_solicited, task,
966 conn);
967 goto exit_fp_process;
968 default:
969 QEDI_ERR(&qedi->dbg_ctx, "Error cqe.\n");
970 break;
971 }
972
973exit_fp_process:
974 return;
975}
976
977static void qedi_add_to_sq(struct qedi_conn *qedi_conn, struct iscsi_task *task,
978 u16 tid, uint16_t ptu_invalidate, int is_cleanup)
979{
980 struct iscsi_wqe *wqe;
981 struct iscsi_wqe_field *cont_field;
982 struct qedi_endpoint *ep;
983 struct scsi_cmnd *sc = task->sc;
984 struct iscsi_login_req *login_hdr;
985 struct qedi_cmd *cmd = task->dd_data;
986
987 login_hdr = (struct iscsi_login_req *)task->hdr;
988 ep = qedi_conn->ep;
989 wqe = &ep->sq[ep->sq_prod_idx];
990
991 memset(wqe, 0, sizeof(*wqe));
992
993 ep->sq_prod_idx++;
994 ep->fw_sq_prod_idx++;
995 if (ep->sq_prod_idx == QEDI_SQ_SIZE)
996 ep->sq_prod_idx = 0;
997
998 if (is_cleanup) {
999 SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
1000 ISCSI_WQE_TYPE_TASK_CLEANUP);
1001 wqe->task_id = tid;
1002 return;
1003 }
1004
1005 if (ptu_invalidate) {
1006 SET_FIELD(wqe->flags, ISCSI_WQE_PTU_INVALIDATE,
1007 ISCSI_WQE_SET_PTU_INVALIDATE);
1008 }
1009
1010 cont_field = &wqe->cont_prevtid_union.cont_field;
1011
1012 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
1013 case ISCSI_OP_LOGIN:
1014 case ISCSI_OP_TEXT:
1015 SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
1016 ISCSI_WQE_TYPE_MIDDLE_PATH);
1017 SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
1018 1);
1019 cont_field->contlen_cdbsize_field = ntoh24(login_hdr->dlength);
1020 break;
1021 case ISCSI_OP_LOGOUT:
1022 case ISCSI_OP_NOOP_OUT:
1023 case ISCSI_OP_SCSI_TMFUNC:
1024 SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
1025 ISCSI_WQE_TYPE_NORMAL);
1026 break;
1027 default:
1028 if (!sc)
1029 break;
1030
1031 SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
1032 ISCSI_WQE_TYPE_NORMAL);
1033 cont_field->contlen_cdbsize_field =
1034 (sc->sc_data_direction == DMA_TO_DEVICE) ?
1035 scsi_bufflen(sc) : 0;
1036 if (cmd->use_slowpath)
1037 SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES, 0);
1038 else
1039 SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
1040 (sc->sc_data_direction ==
1041 DMA_TO_DEVICE) ?
1042 min((u16)QEDI_FAST_SGE_COUNT,
1043 (u16)cmd->io_tbl.sge_valid) : 0);
1044 break;
1045 }
1046
1047 wqe->task_id = tid;
1048 /* Make sure SQ data is coherent */
1049 wmb();
1050}
1051
1052static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
1053{
1054 struct iscsi_db_data dbell = { 0 };
1055
1056 dbell.agg_flags = 0;
1057
1058 dbell.params |= DB_DEST_XCM << ISCSI_DB_DATA_DEST_SHIFT;
1059 dbell.params |= DB_AGG_CMD_SET << ISCSI_DB_DATA_AGG_CMD_SHIFT;
1060 dbell.params |=
1061 DQ_XCM_ISCSI_SQ_PROD_CMD << ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT;
1062
1063 dbell.sq_prod = qedi_conn->ep->fw_sq_prod_idx;
1064 writel(*(u32 *)&dbell, qedi_conn->ep->p_doorbell);
1065
1066 /* Make sure fw write idx is coherent, and include both memory barriers
1067 * as a failsafe as for some architectures the call is the same but on
1068 * others they are two different assembly operations.
1069 */
1070 wmb();
1071 mmiowb();
1072 QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_MP_REQ,
1073 "prod_idx=0x%x, fw_prod_idx=0x%x, cid=0x%x\n",
1074 qedi_conn->ep->sq_prod_idx, qedi_conn->ep->fw_sq_prod_idx,
1075 qedi_conn->iscsi_conn_id);
1076}
1077
1078int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
1079 struct iscsi_task *task)
1080{
1081 struct qedi_ctx *qedi = qedi_conn->qedi;
1082 struct iscsi_task_context *fw_task_ctx;
1083 struct iscsi_login_req *login_hdr;
1084 struct iscsi_login_req_hdr *fw_login_req = NULL;
1085 struct iscsi_cached_sge_ctx *cached_sge = NULL;
1086 struct iscsi_sge *single_sge = NULL;
1087 struct iscsi_sge *req_sge = NULL;
1088 struct iscsi_sge *resp_sge = NULL;
1089 struct qedi_cmd *qedi_cmd;
1090 s16 ptu_invalidate = 0;
1091 s16 tid = 0;
1092
1093 req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
1094 resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
1095 qedi_cmd = (struct qedi_cmd *)task->dd_data;
1096 login_hdr = (struct iscsi_login_req *)task->hdr;
1097
1098 tid = qedi_get_task_idx(qedi);
1099 if (tid == -1)
1100 return -ENOMEM;
1101
1102 fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
1103 memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
1104
1105 qedi_cmd->task_id = tid;
1106
1107 /* Ystorm context */
1108 fw_login_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.login_req;
1109 fw_login_req->opcode = login_hdr->opcode;
1110 fw_login_req->version_min = login_hdr->min_version;
1111 fw_login_req->version_max = login_hdr->max_version;
1112 fw_login_req->flags_attr = login_hdr->flags;
1113 fw_login_req->isid_tabc = *((u16 *)login_hdr->isid + 2);
1114 fw_login_req->isid_d = *((u32 *)login_hdr->isid);
1115 fw_login_req->tsih = login_hdr->tsih;
1116 qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
1117 fw_login_req->itt = qedi_set_itt(tid, get_itt(task->itt));
1118 fw_login_req->cid = qedi_conn->iscsi_conn_id;
1119 fw_login_req->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
1120 fw_login_req->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
1121 fw_login_req->exp_stat_sn = 0;
1122
1123 if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
1124 ptu_invalidate = 1;
1125 qedi->tid_reuse_count[tid] = 0;
1126 }
1127
1128 fw_task_ctx->ystorm_st_context.state.reuse_count =
1129 qedi->tid_reuse_count[tid];
1130 fw_task_ctx->mstorm_st_context.reuse_count =
1131 qedi->tid_reuse_count[tid]++;
1132 cached_sge =
1133 &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
1134 cached_sge->sge.sge_len = req_sge->sge_len;
1135 cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
1136 cached_sge->sge.sge_addr.hi =
1137 (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
1138
1139 /* Mstorm context */
1140 single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
1141 fw_task_ctx->mstorm_st_context.task_type = 0x2;
1142 fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
1143 single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
1144 single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
1145 single_sge->sge_len = resp_sge->sge_len;
1146
1147 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
1148 ISCSI_MFLAGS_SINGLE_SGE, 1);
1149 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
1150 ISCSI_MFLAGS_SLOW_IO, 0);
1151 fw_task_ctx->mstorm_st_context.sgl_size = 1;
1152 fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
1153
1154 /* Ustorm context */
1155 fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
1156 fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
1157 ntoh24(login_hdr->dlength);
1158 fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
1159 fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
1160 fw_task_ctx->ustorm_st_context.task_type = 0x2;
1161 fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
1162 fw_task_ctx->ustorm_ag_context.exp_data_acked =
1163 ntoh24(login_hdr->dlength);
1164 SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
1165 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
1166 SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
1167 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
1168
1169 spin_lock(&qedi_conn->list_lock);
1170 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
1171 qedi_cmd->io_cmd_in_list = true;
1172 qedi_conn->active_cmd_count++;
1173 spin_unlock(&qedi_conn->list_lock);
1174
1175 qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
1176 qedi_ring_doorbell(qedi_conn);
1177 return 0;
1178}
1179
1180int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
1181 struct iscsi_task *task)
1182{
1183 struct qedi_ctx *qedi = qedi_conn->qedi;
1184 struct iscsi_logout_req_hdr *fw_logout_req = NULL;
1185 struct iscsi_task_context *fw_task_ctx = NULL;
1186 struct iscsi_logout *logout_hdr = NULL;
1187 struct qedi_cmd *qedi_cmd = NULL;
1188 s16 tid = 0;
1189 s16 ptu_invalidate = 0;
1190
1191 qedi_cmd = (struct qedi_cmd *)task->dd_data;
1192 logout_hdr = (struct iscsi_logout *)task->hdr;
1193
1194 tid = qedi_get_task_idx(qedi);
1195 if (tid == -1)
1196 return -ENOMEM;
1197
1198 fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
1199
1200 memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
1201 qedi_cmd->task_id = tid;
1202
1203 /* Ystorm context */
1204 fw_logout_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.logout_req;
1205 fw_logout_req->opcode = ISCSI_OPCODE_LOGOUT_REQUEST;
1206 fw_logout_req->reason_code = 0x80 | logout_hdr->flags;
1207 qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
1208 fw_logout_req->itt = qedi_set_itt(tid, get_itt(task->itt));
1209 fw_logout_req->exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
1210 fw_logout_req->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
1211
1212 if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
1213 ptu_invalidate = 1;
1214 qedi->tid_reuse_count[tid] = 0;
1215 }
1216 fw_task_ctx->ystorm_st_context.state.reuse_count =
1217 qedi->tid_reuse_count[tid];
1218 fw_task_ctx->mstorm_st_context.reuse_count =
1219 qedi->tid_reuse_count[tid]++;
1220 fw_logout_req->cid = qedi_conn->iscsi_conn_id;
1221 fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
1222
1223 /* Mstorm context */
1224 fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
1225 fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
1226
1227 /* Ustorm context */
1228 fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
1229 fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
1230 fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
1231 fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
1232 fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
1233
1234 SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
1235 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
1236 SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
1237 ISCSI_REG1_NUM_FAST_SGES, 0);
1238
1239 fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
1240 SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
1241 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
1242
1243 spin_lock(&qedi_conn->list_lock);
1244 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
1245 qedi_cmd->io_cmd_in_list = true;
1246 qedi_conn->active_cmd_count++;
1247 spin_unlock(&qedi_conn->list_lock);
1248
1249 qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
1250 qedi_ring_doorbell(qedi_conn);
1251
1252 return 0;
1253}
1254
1255int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
1256 struct iscsi_task *task, bool in_recovery)
1257{
1258 int rval;
1259 struct iscsi_task *ctask;
1260 struct qedi_cmd *cmd, *cmd_tmp;
1261 struct iscsi_tm *tmf_hdr;
1262 unsigned int lun = 0;
1263 bool lun_reset = false;
1264 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
1265 struct iscsi_session *session = conn->session;
1266
1267 /* From recovery, task is NULL or from tmf resp valid task */
1268 if (task) {
1269 tmf_hdr = (struct iscsi_tm *)task->hdr;
1270
1271 if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
1272 ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) {
1273 lun_reset = true;
1274 lun = scsilun_to_int(&tmf_hdr->lun);
1275 }
1276 }
1277
1278 qedi_conn->cmd_cleanup_req = 0;
1279 qedi_conn->cmd_cleanup_cmpl = 0;
1280
1281 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
1282 "active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n",
1283 qedi_conn->active_cmd_count, qedi_conn->iscsi_conn_id,
1284 in_recovery, lun_reset);
1285
1286 if (lun_reset)
1287 spin_lock_bh(&session->back_lock);
1288
1289 spin_lock(&qedi_conn->list_lock);
1290
1291 list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
1292 io_cmd) {
1293 ctask = cmd->task;
1294 if (ctask == task)
1295 continue;
1296
1297 if (lun_reset) {
1298 if (cmd->scsi_cmd && cmd->scsi_cmd->device) {
1299 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
1300 "tid=0x%x itt=0x%x scsi_cmd_ptr=%p device=%p task_state=%d cmd_state=0%x cid=0x%x\n",
1301 cmd->task_id, get_itt(ctask->itt),
1302 cmd->scsi_cmd, cmd->scsi_cmd->device,
1303 ctask->state, cmd->state,
1304 qedi_conn->iscsi_conn_id);
1305 if (cmd->scsi_cmd->device->lun != lun)
1306 continue;
1307 }
1308 }
1309 qedi_conn->cmd_cleanup_req++;
1310 qedi_iscsi_cleanup_task(ctask, true);
1311
1312 list_del_init(&cmd->io_cmd);
1313 qedi_conn->active_cmd_count--;
1314 QEDI_WARN(&qedi->dbg_ctx,
1315 "Deleted active cmd list node io_cmd=%p, cid=0x%x\n",
1316 &cmd->io_cmd, qedi_conn->iscsi_conn_id);
1317 }
1318
1319 spin_unlock(&qedi_conn->list_lock);
1320
1321 if (lun_reset)
1322 spin_unlock_bh(&session->back_lock);
1323
1324 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
1325 "cmd_cleanup_req=%d, cid=0x%x\n",
1326 qedi_conn->cmd_cleanup_req,
1327 qedi_conn->iscsi_conn_id);
1328
1329 rval = wait_event_interruptible_timeout(qedi_conn->wait_queue,
1330 ((qedi_conn->cmd_cleanup_req ==
1331 qedi_conn->cmd_cleanup_cmpl) ||
1332 qedi_conn->ep),
1333 5 * HZ);
1334 if (rval) {
1335 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
1336 "i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
1337 qedi_conn->cmd_cleanup_req,
1338 qedi_conn->cmd_cleanup_cmpl,
1339 qedi_conn->iscsi_conn_id);
1340
1341 return 0;
1342 }
1343
1344 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
1345 "i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
1346 qedi_conn->cmd_cleanup_req,
1347 qedi_conn->cmd_cleanup_cmpl,
1348 qedi_conn->iscsi_conn_id);
1349
1350 iscsi_host_for_each_session(qedi->shost,
1351 qedi_mark_device_missing);
1352 qedi_ops->common->drain(qedi->cdev);
1353
1354 /* Enable IOs for all other sessions except current.*/
1355 if (!wait_event_interruptible_timeout(qedi_conn->wait_queue,
1356 (qedi_conn->cmd_cleanup_req ==
1357 qedi_conn->cmd_cleanup_cmpl),
1358 5 * HZ)) {
1359 iscsi_host_for_each_session(qedi->shost,
1360 qedi_mark_device_available);
1361 return -1;
1362 }
1363
1364 iscsi_host_for_each_session(qedi->shost,
1365 qedi_mark_device_available);
1366
1367 return 0;
1368}
1369
1370void qedi_clearsq(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
1371 struct iscsi_task *task)
1372{
1373 struct qedi_endpoint *qedi_ep;
1374 int rval;
1375
1376 qedi_ep = qedi_conn->ep;
1377 qedi_conn->cmd_cleanup_req = 0;
1378 qedi_conn->cmd_cleanup_cmpl = 0;
1379
1380 if (!qedi_ep) {
1381 QEDI_WARN(&qedi->dbg_ctx,
1382 "Cannot proceed, ep already disconnected, cid=0x%x\n",
1383 qedi_conn->iscsi_conn_id);
1384 return;
1385 }
1386
1387 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1388 "Clearing SQ for cid=0x%x, conn=%p, ep=%p\n",
1389 qedi_conn->iscsi_conn_id, qedi_conn, qedi_ep);
1390
1391 qedi_ops->clear_sq(qedi->cdev, qedi_ep->handle);
1392
1393 rval = qedi_cleanup_all_io(qedi, qedi_conn, task, true);
1394 if (rval) {
1395 QEDI_ERR(&qedi->dbg_ctx,
1396 "fatal error, need hard reset, cid=0x%x\n",
1397 qedi_conn->iscsi_conn_id);
1398 WARN_ON(1);
1399 }
1400}
1401
1402static int qedi_wait_for_cleanup_request(struct qedi_ctx *qedi,
1403 struct qedi_conn *qedi_conn,
1404 struct iscsi_task *task,
1405 struct qedi_cmd *qedi_cmd,
1406 struct qedi_work_map *list_work)
1407{
1408 struct qedi_cmd *cmd = (struct qedi_cmd *)task->dd_data;
1409 int wait;
1410
1411 wait = wait_event_interruptible_timeout(qedi_conn->wait_queue,
1412 ((qedi_cmd->state ==
1413 CLEANUP_RECV) ||
1414 ((qedi_cmd->type == TYPEIO) &&
1415 (cmd->state ==
1416 RESPONSE_RECEIVED))),
1417 5 * HZ);
1418 if (!wait) {
1419 qedi_cmd->state = CLEANUP_WAIT_FAILED;
1420
1421 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
1422 "Cleanup timedout tid=0x%x, issue connection recovery, cid=0x%x\n",
1423 cmd->task_id, qedi_conn->iscsi_conn_id);
1424
1425 return -1;
1426 }
1427 return 0;
1428}
1429
1430static void qedi_tmf_work(struct work_struct *work)
1431{
1432 struct qedi_cmd *qedi_cmd =
1433 container_of(work, struct qedi_cmd, tmf_work);
1434 struct qedi_conn *qedi_conn = qedi_cmd->conn;
1435 struct qedi_ctx *qedi = qedi_conn->qedi;
1436 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
1437 struct iscsi_cls_session *cls_sess;
1438 struct qedi_work_map *list_work = NULL;
1439 struct iscsi_task *mtask;
1440 struct qedi_cmd *cmd;
1441 struct iscsi_task *ctask;
1442 struct iscsi_tm *tmf_hdr;
1443 s16 rval = 0;
1444 s16 tid = 0;
1445
1446 mtask = qedi_cmd->task;
1447 tmf_hdr = (struct iscsi_tm *)mtask->hdr;
1448 cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
1449 set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
1450
1451 ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
1452 if (!ctask || !ctask->sc) {
1453 QEDI_ERR(&qedi->dbg_ctx, "Task already completed\n");
1454 goto abort_ret;
1455 }
1456
1457 cmd = (struct qedi_cmd *)ctask->dd_data;
1458 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1459 "Abort tmf rtt=0x%x, cmd itt=0x%x, cmd tid=0x%x, cid=0x%x\n",
1460 get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id,
1461 qedi_conn->iscsi_conn_id);
1462
1463 if (do_not_recover) {
1464 QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n",
1465 do_not_recover);
1466 goto abort_ret;
1467 }
1468
1469 list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC);
1470 if (!list_work) {
1471 QEDI_ERR(&qedi->dbg_ctx, "Memory alloction failed\n");
1472 goto abort_ret;
1473 }
1474
1475 qedi_cmd->type = TYPEIO;
1476 list_work->qedi_cmd = qedi_cmd;
1477 list_work->rtid = cmd->task_id;
1478 list_work->state = QEDI_WORK_SCHEDULED;
1479 qedi_cmd->list_tmf_work = list_work;
1480
1481 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
1482 "Queue tmf work=%p, list node=%p, cid=0x%x, tmf flags=0x%x\n",
1483 list_work->ptr_tmf_work, list_work, qedi_conn->iscsi_conn_id,
1484 tmf_hdr->flags);
1485
1486 spin_lock_bh(&qedi_conn->tmf_work_lock);
1487 list_add_tail(&list_work->list, &qedi_conn->tmf_work_list);
1488 spin_unlock_bh(&qedi_conn->tmf_work_lock);
1489
1490 qedi_iscsi_cleanup_task(ctask, false);
1491
1492 rval = qedi_wait_for_cleanup_request(qedi, qedi_conn, ctask, qedi_cmd,
1493 list_work);
1494 if (rval == -1) {
1495 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1496 "FW cleanup got escalated, cid=0x%x\n",
1497 qedi_conn->iscsi_conn_id);
1498 goto ldel_exit;
1499 }
1500
1501 tid = qedi_get_task_idx(qedi);
1502 if (tid == -1) {
1503 QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
1504 qedi_conn->iscsi_conn_id);
1505 goto ldel_exit;
1506 }
1507
1508 qedi_cmd->task_id = tid;
1509 qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
1510
1511abort_ret:
1512 clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
1513 return;
1514
1515ldel_exit:
1516 spin_lock_bh(&qedi_conn->tmf_work_lock);
1517 if (!qedi_cmd->list_tmf_work) {
1518 list_del_init(&list_work->list);
1519 qedi_cmd->list_tmf_work = NULL;
1520 kfree(list_work);
1521 }
1522 spin_unlock_bh(&qedi_conn->tmf_work_lock);
1523
1524 spin_lock(&qedi_conn->list_lock);
1525 list_del_init(&cmd->io_cmd);
1526 qedi_conn->active_cmd_count--;
1527 spin_unlock(&qedi_conn->list_lock);
1528
1529 clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
1530}
1531
1532static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
1533 struct iscsi_task *mtask)
1534{
1535 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
1536 struct qedi_ctx *qedi = qedi_conn->qedi;
1537 struct iscsi_task_context *fw_task_ctx;
1538 struct iscsi_tmf_request_hdr *fw_tmf_request;
1539 struct iscsi_sge *single_sge;
1540 struct qedi_cmd *qedi_cmd;
1541 struct qedi_cmd *cmd;
1542 struct iscsi_task *ctask;
1543 struct iscsi_tm *tmf_hdr;
1544 struct iscsi_sge *req_sge;
1545 struct iscsi_sge *resp_sge;
1546 u32 lun[2];
1547 s16 tid = 0, ptu_invalidate = 0;
1548
1549 req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
1550 resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
1551 qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
1552 tmf_hdr = (struct iscsi_tm *)mtask->hdr;
1553
1554 tid = qedi_cmd->task_id;
1555 qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
1556
1557 fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
1558 memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
1559
1560 fw_tmf_request = &fw_task_ctx->ystorm_st_context.pdu_hdr.tmf_request;
1561 fw_tmf_request->itt = qedi_set_itt(tid, get_itt(mtask->itt));
1562 fw_tmf_request->cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
1563
1564 memcpy(lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
1565 fw_tmf_request->lun.lo = be32_to_cpu(lun[0]);
1566 fw_tmf_request->lun.hi = be32_to_cpu(lun[1]);
1567
1568 if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
1569 ptu_invalidate = 1;
1570 qedi->tid_reuse_count[tid] = 0;
1571 }
1572 fw_task_ctx->ystorm_st_context.state.reuse_count =
1573 qedi->tid_reuse_count[tid];
1574 fw_task_ctx->mstorm_st_context.reuse_count =
1575 qedi->tid_reuse_count[tid]++;
1576
1577 if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
1578 ISCSI_TM_FUNC_ABORT_TASK) {
1579 ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
1580 if (!ctask || !ctask->sc) {
1581 QEDI_ERR(&qedi->dbg_ctx,
1582 "Could not get reference task\n");
1583 return 0;
1584 }
1585 cmd = (struct qedi_cmd *)ctask->dd_data;
1586 fw_tmf_request->rtt =
1587 qedi_set_itt(cmd->task_id,
1588 get_itt(tmf_hdr->rtt));
1589 } else {
1590 fw_tmf_request->rtt = ISCSI_RESERVED_TAG;
1591 }
1592
1593 fw_tmf_request->opcode = tmf_hdr->opcode;
1594 fw_tmf_request->function = tmf_hdr->flags;
1595 fw_tmf_request->hdr_second_dword = ntoh24(tmf_hdr->dlength);
1596 fw_tmf_request->ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
1597
1598 single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
1599 fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
1600 fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
1601 single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
1602 single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
1603 single_sge->sge_len = resp_sge->sge_len;
1604
1605 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
1606 ISCSI_MFLAGS_SINGLE_SGE, 1);
1607 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
1608 ISCSI_MFLAGS_SLOW_IO, 0);
1609 fw_task_ctx->mstorm_st_context.sgl_size = 1;
1610 fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
1611
1612 /* Ustorm context */
1613 fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
1614 fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
1615 fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
1616 fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
1617 fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
1618
1619 SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
1620 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
1621 SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
1622 ISCSI_REG1_NUM_FAST_SGES, 0);
1623
1624 fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
1625 SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
1626 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
1627 fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
1628 fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
1629
1630 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
1631 "Add TMF to SQ, tmf tid=0x%x, itt=0x%x, cid=0x%x\n",
1632 tid, mtask->itt, qedi_conn->iscsi_conn_id);
1633
1634 spin_lock(&qedi_conn->list_lock);
1635 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
1636 qedi_cmd->io_cmd_in_list = true;
1637 qedi_conn->active_cmd_count++;
1638 spin_unlock(&qedi_conn->list_lock);
1639
1640 qedi_add_to_sq(qedi_conn, mtask, tid, ptu_invalidate, false);
1641 qedi_ring_doorbell(qedi_conn);
1642 return 0;
1643}
1644
1645int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
1646 struct iscsi_task *mtask)
1647{
1648 struct qedi_ctx *qedi = qedi_conn->qedi;
1649 struct iscsi_tm *tmf_hdr;
1650 struct qedi_cmd *qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
1651 s16 tid = 0;
1652
1653 tmf_hdr = (struct iscsi_tm *)mtask->hdr;
1654 qedi_cmd->task = mtask;
1655
1656 /* If abort task then schedule the work and return */
1657 if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
1658 ISCSI_TM_FUNC_ABORT_TASK) {
1659 qedi_cmd->state = CLEANUP_WAIT;
1660 INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_work);
1661 queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
1662
1663 } else if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
1664 ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
1665 ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
1666 ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
1667 ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
1668 ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
1669 tid = qedi_get_task_idx(qedi);
1670 if (tid == -1) {
1671 QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
1672 qedi_conn->iscsi_conn_id);
1673 return -1;
1674 }
1675 qedi_cmd->task_id = tid;
1676
1677 qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
1678
1679 } else {
1680 QEDI_ERR(&qedi->dbg_ctx, "Invalid tmf, cid=0x%x\n",
1681 qedi_conn->iscsi_conn_id);
1682 return -1;
1683 }
1684
1685 return 0;
1686}
1687
1688int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
1689 struct iscsi_task *task)
1690{
1691 struct qedi_ctx *qedi = qedi_conn->qedi;
1692 struct iscsi_task_context *fw_task_ctx;
1693 struct iscsi_text_request_hdr *fw_text_request;
1694 struct iscsi_cached_sge_ctx *cached_sge;
1695 struct iscsi_sge *single_sge;
1696 struct qedi_cmd *qedi_cmd;
1697 /* For 6.5 hdr iscsi_hdr */
1698 struct iscsi_text *text_hdr;
1699 struct iscsi_sge *req_sge;
1700 struct iscsi_sge *resp_sge;
1701 s16 ptu_invalidate = 0;
1702 s16 tid = 0;
1703
1704 req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
1705 resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
1706 qedi_cmd = (struct qedi_cmd *)task->dd_data;
1707 text_hdr = (struct iscsi_text *)task->hdr;
1708
1709 tid = qedi_get_task_idx(qedi);
1710 if (tid == -1)
1711 return -ENOMEM;
1712
1713 fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
1714 memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
1715
1716 qedi_cmd->task_id = tid;
1717
1718 /* Ystorm context */
1719 fw_text_request =
1720 &fw_task_ctx->ystorm_st_context.pdu_hdr.text_request;
1721 fw_text_request->opcode = text_hdr->opcode;
1722 fw_text_request->flags_attr = text_hdr->flags;
1723
1724 qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
1725 fw_text_request->itt = qedi_set_itt(tid, get_itt(task->itt));
1726 fw_text_request->ttt = text_hdr->ttt;
1727 fw_text_request->cmd_sn = be32_to_cpu(text_hdr->cmdsn);
1728 fw_text_request->exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
1729 fw_text_request->hdr_second_dword = ntoh24(text_hdr->dlength);
1730
1731 if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
1732 ptu_invalidate = 1;
1733 qedi->tid_reuse_count[tid] = 0;
1734 }
1735 fw_task_ctx->ystorm_st_context.state.reuse_count =
1736 qedi->tid_reuse_count[tid];
1737 fw_task_ctx->mstorm_st_context.reuse_count =
1738 qedi->tid_reuse_count[tid]++;
1739
1740 cached_sge =
1741 &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
1742 cached_sge->sge.sge_len = req_sge->sge_len;
1743 cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
1744 cached_sge->sge.sge_addr.hi =
1745 (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
1746
1747 /* Mstorm context */
1748 single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
1749 fw_task_ctx->mstorm_st_context.task_type = 0x2;
1750 fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
1751 single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
1752 single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
1753 single_sge->sge_len = resp_sge->sge_len;
1754
1755 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
1756 ISCSI_MFLAGS_SINGLE_SGE, 1);
1757 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
1758 ISCSI_MFLAGS_SLOW_IO, 0);
1759 fw_task_ctx->mstorm_st_context.sgl_size = 1;
1760 fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
1761
1762 /* Ustorm context */
1763 fw_task_ctx->ustorm_ag_context.exp_data_acked =
1764 ntoh24(text_hdr->dlength);
1765 fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
1766 fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
1767 ntoh24(text_hdr->dlength);
1768 fw_task_ctx->ustorm_st_context.exp_data_sn =
1769 be32_to_cpu(text_hdr->exp_statsn);
1770 fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
1771 fw_task_ctx->ustorm_st_context.task_type = 0x2;
1772 fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
1773 SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
1774 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
1775
1776 /* Add command in active command list */
1777 spin_lock(&qedi_conn->list_lock);
1778 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
1779 qedi_cmd->io_cmd_in_list = true;
1780 qedi_conn->active_cmd_count++;
1781 spin_unlock(&qedi_conn->list_lock);
1782
1783 qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
1784 qedi_ring_doorbell(qedi_conn);
1785
1786 return 0;
1787}
1788
1789int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
1790 struct iscsi_task *task,
1791 char *datap, int data_len, int unsol)
1792{
1793 struct qedi_ctx *qedi = qedi_conn->qedi;
1794 struct iscsi_task_context *fw_task_ctx;
1795 struct iscsi_nop_out_hdr *fw_nop_out;
1796 struct qedi_cmd *qedi_cmd;
1797 /* For 6.5 hdr iscsi_hdr */
1798 struct iscsi_nopout *nopout_hdr;
1799 struct iscsi_cached_sge_ctx *cached_sge;
1800 struct iscsi_sge *single_sge;
1801 struct iscsi_sge *req_sge;
1802 struct iscsi_sge *resp_sge;
1803 u32 lun[2];
1804 s16 ptu_invalidate = 0;
1805 s16 tid = 0;
1806
1807 req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
1808 resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
1809 qedi_cmd = (struct qedi_cmd *)task->dd_data;
1810 nopout_hdr = (struct iscsi_nopout *)task->hdr;
1811
1812 tid = qedi_get_task_idx(qedi);
1813 if (tid == -1) {
1814 QEDI_WARN(&qedi->dbg_ctx, "Invalid tid\n");
1815 return -ENOMEM;
1816 }
1817
1818 fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
1819
1820 memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
1821 qedi_cmd->task_id = tid;
1822
1823 /* Ystorm context */
1824 fw_nop_out = &fw_task_ctx->ystorm_st_context.pdu_hdr.nop_out;
1825 SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
1826 SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
1827
1828 memcpy(lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
1829 fw_nop_out->lun.lo = be32_to_cpu(lun[0]);
1830 fw_nop_out->lun.hi = be32_to_cpu(lun[1]);
1831
1832 qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
1833
1834 if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) {
1835 fw_nop_out->itt = be32_to_cpu(nopout_hdr->itt);
1836 fw_nop_out->ttt = be32_to_cpu(nopout_hdr->ttt);
1837 fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
1838 fw_task_ctx->ystorm_st_context.state.local_comp = 1;
1839 SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
1840 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
1841 } else {
1842 fw_nop_out->itt = qedi_set_itt(tid, get_itt(task->itt));
1843 fw_nop_out->ttt = ISCSI_TTT_ALL_ONES;
1844 fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
1845
1846 spin_lock(&qedi_conn->list_lock);
1847 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
1848 qedi_cmd->io_cmd_in_list = true;
1849 qedi_conn->active_cmd_count++;
1850 spin_unlock(&qedi_conn->list_lock);
1851 }
1852
1853 fw_nop_out->opcode = ISCSI_OPCODE_NOP_OUT;
1854 fw_nop_out->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
1855 fw_nop_out->exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
1856
1857 cached_sge =
1858 &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
1859 cached_sge->sge.sge_len = req_sge->sge_len;
1860 cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
1861 cached_sge->sge.sge_addr.hi =
1862 (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
1863
1864 /* Mstorm context */
1865 fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
1866 fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
1867
1868 single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
1869 single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
1870 single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
1871 single_sge->sge_len = resp_sge->sge_len;
1872 fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
1873
1874 if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
1875 ptu_invalidate = 1;
1876 qedi->tid_reuse_count[tid] = 0;
1877 }
1878 fw_task_ctx->ystorm_st_context.state.reuse_count =
1879 qedi->tid_reuse_count[tid];
1880 fw_task_ctx->mstorm_st_context.reuse_count =
1881 qedi->tid_reuse_count[tid]++;
1882 /* Ustorm context */
1883 fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
1884 fw_task_ctx->ustorm_st_context.exp_data_transfer_len = data_len;
1885 fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
1886 fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
1887 fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
1888
1889 SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
1890 ISCSI_REG1_NUM_FAST_SGES, 0);
1891
1892 fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
1893 SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
1894 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
1895
1896 fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
1897 fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
1898
1899 qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
1900 qedi_ring_doorbell(qedi_conn);
1901 return 0;
1902}
1903
1904static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
1905 int bd_index)
1906{
1907 struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
1908 int frag_size, sg_frags;
1909
1910 sg_frags = 0;
1911
1912 while (sg_len) {
1913 if (addr % QEDI_PAGE_SIZE)
1914 frag_size =
1915 (QEDI_PAGE_SIZE - (addr % QEDI_PAGE_SIZE));
1916 else
1917 frag_size = (sg_len > QEDI_BD_SPLIT_SZ) ? 0 :
1918 (sg_len % QEDI_BD_SPLIT_SZ);
1919
1920 if (frag_size == 0)
1921 frag_size = QEDI_BD_SPLIT_SZ;
1922
1923 bd[bd_index + sg_frags].sge_addr.lo = (addr & 0xffffffff);
1924 bd[bd_index + sg_frags].sge_addr.hi = (addr >> 32);
1925 bd[bd_index + sg_frags].sge_len = (u16)frag_size;
1926 QEDI_INFO(&cmd->conn->qedi->dbg_ctx, QEDI_LOG_IO,
1927 "split sge %d: addr=%llx, len=%x",
1928 (bd_index + sg_frags), addr, frag_size);
1929
1930 addr += (u64)frag_size;
1931 sg_frags++;
1932 sg_len -= frag_size;
1933 }
1934 return sg_frags;
1935}
1936
1937static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd)
1938{
1939 struct scsi_cmnd *sc = cmd->scsi_cmd;
1940 struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
1941 struct scatterlist *sg;
1942 int byte_count = 0;
1943 int bd_count = 0;
1944 int sg_count;
1945 int sg_len;
1946 int sg_frags;
1947 u64 addr, end_addr;
1948 int i;
1949
1950 WARN_ON(scsi_sg_count(sc) > QEDI_ISCSI_MAX_BDS_PER_CMD);
1951
1952 sg_count = dma_map_sg(&qedi->pdev->dev, scsi_sglist(sc),
1953 scsi_sg_count(sc), sc->sc_data_direction);
1954
1955 /*
1956 * New condition to send single SGE as cached-SGL.
1957 * Single SGE with length less than 64K.
1958 */
1959 sg = scsi_sglist(sc);
1960 if ((sg_count == 1) && (sg_dma_len(sg) <= MAX_SGLEN_FOR_CACHESGL)) {
1961 sg_len = sg_dma_len(sg);
1962 addr = (u64)sg_dma_address(sg);
1963
1964 bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
1965 bd[bd_count].sge_addr.hi = (addr >> 32);
1966 bd[bd_count].sge_len = (u16)sg_len;
1967
1968 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
1969 "single-cashed-sgl: bd_count:%d addr=%llx, len=%x",
1970 sg_count, addr, sg_len);
1971
1972 return ++bd_count;
1973 }
1974
1975 scsi_for_each_sg(sc, sg, sg_count, i) {
1976 sg_len = sg_dma_len(sg);
1977 addr = (u64)sg_dma_address(sg);
1978 end_addr = (addr + sg_len);
1979
1980 /*
1981 * first sg elem in the 'list',
1982 * check if end addr is page-aligned.
1983 */
1984 if ((i == 0) && (sg_count > 1) && (end_addr % QEDI_PAGE_SIZE))
1985 cmd->use_slowpath = true;
1986
1987 /*
1988 * last sg elem in the 'list',
1989 * check if start addr is page-aligned.
1990 */
1991 else if ((i == (sg_count - 1)) &&
1992 (sg_count > 1) && (addr % QEDI_PAGE_SIZE))
1993 cmd->use_slowpath = true;
1994
1995 /*
1996 * middle sg elements in list,
1997 * check if start and end addr is page-aligned
1998 */
1999 else if ((i != 0) && (i != (sg_count - 1)) &&
2000 ((addr % QEDI_PAGE_SIZE) ||
2001 (end_addr % QEDI_PAGE_SIZE)))
2002 cmd->use_slowpath = true;
2003
2004 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "sg[%d] size=0x%x",
2005 i, sg_len);
2006
2007 if (sg_len > QEDI_BD_SPLIT_SZ) {
2008 sg_frags = qedi_split_bd(cmd, addr, sg_len, bd_count);
2009 } else {
2010 sg_frags = 1;
2011 bd[bd_count].sge_addr.lo = addr & 0xffffffff;
2012 bd[bd_count].sge_addr.hi = addr >> 32;
2013 bd[bd_count].sge_len = sg_len;
2014 }
2015 byte_count += sg_len;
2016 bd_count += sg_frags;
2017 }
2018
2019 if (byte_count != scsi_bufflen(sc))
2020 QEDI_ERR(&qedi->dbg_ctx,
2021 "byte_count = %d != scsi_bufflen = %d\n", byte_count,
2022 scsi_bufflen(sc));
2023 else
2024 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "byte_count = %d\n",
2025 byte_count);
2026
2027 WARN_ON(byte_count != scsi_bufflen(sc));
2028
2029 return bd_count;
2030}
2031
2032static void qedi_iscsi_map_sg_list(struct qedi_cmd *cmd)
2033{
2034 int bd_count;
2035 struct scsi_cmnd *sc = cmd->scsi_cmd;
2036
2037 if (scsi_sg_count(sc)) {
2038 bd_count = qedi_map_scsi_sg(cmd->conn->qedi, cmd);
2039 if (bd_count == 0)
2040 return;
2041 } else {
2042 struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
2043
2044 bd[0].sge_addr.lo = 0;
2045 bd[0].sge_addr.hi = 0;
2046 bd[0].sge_len = 0;
2047 bd_count = 0;
2048 }
2049 cmd->io_tbl.sge_valid = bd_count;
2050}
2051
2052static void qedi_cpy_scsi_cdb(struct scsi_cmnd *sc, u32 *dstp)
2053{
2054 u32 dword;
2055 int lpcnt;
2056 u8 *srcp;
2057
2058 lpcnt = sc->cmd_len / sizeof(dword);
2059 srcp = (u8 *)sc->cmnd;
2060 while (lpcnt--) {
2061 memcpy(&dword, (const void *)srcp, 4);
2062 *dstp = cpu_to_be32(dword);
2063 srcp += 4;
2064 dstp++;
2065 }
2066 if (sc->cmd_len & 0x3) {
2067 dword = (u32)srcp[0] | ((u32)srcp[1] << 8);
2068 *dstp = cpu_to_be32(dword);
2069 }
2070}
2071
2072void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
2073 u16 tid, int8_t direction)
2074{
2075 struct qedi_io_log *io_log;
2076 struct iscsi_conn *conn = task->conn;
2077 struct qedi_conn *qedi_conn = conn->dd_data;
2078 struct scsi_cmnd *sc_cmd = task->sc;
2079 unsigned long flags;
2080 u8 op;
2081
2082 spin_lock_irqsave(&qedi->io_trace_lock, flags);
2083
2084 io_log = &qedi->io_trace_buf[qedi->io_trace_idx];
2085 io_log->direction = direction;
2086 io_log->task_id = tid;
2087 io_log->cid = qedi_conn->iscsi_conn_id;
2088 io_log->lun = sc_cmd->device->lun;
2089 io_log->op = sc_cmd->cmnd[0];
2090 op = sc_cmd->cmnd[0];
2091 io_log->lba[0] = sc_cmd->cmnd[2];
2092 io_log->lba[1] = sc_cmd->cmnd[3];
2093 io_log->lba[2] = sc_cmd->cmnd[4];
2094 io_log->lba[3] = sc_cmd->cmnd[5];
2095 io_log->bufflen = scsi_bufflen(sc_cmd);
2096 io_log->sg_count = scsi_sg_count(sc_cmd);
2097 io_log->fast_sgs = qedi->fast_sgls;
2098 io_log->cached_sgs = qedi->cached_sgls;
2099 io_log->slow_sgs = qedi->slow_sgls;
2100 io_log->cached_sge = qedi->use_cached_sge;
2101 io_log->slow_sge = qedi->use_slow_sge;
2102 io_log->fast_sge = qedi->use_fast_sge;
2103 io_log->result = sc_cmd->result;
2104 io_log->jiffies = jiffies;
2105 io_log->blk_req_cpu = smp_processor_id();
2106
2107 if (direction == QEDI_IO_TRACE_REQ) {
2108 /* For requests we only care about the submission CPU */
2109 io_log->req_cpu = smp_processor_id() % qedi->num_queues;
2110 io_log->intr_cpu = 0;
2111 io_log->blk_rsp_cpu = 0;
2112 } else if (direction == QEDI_IO_TRACE_RSP) {
2113 io_log->req_cpu = smp_processor_id() % qedi->num_queues;
2114 io_log->intr_cpu = qedi->intr_cpu;
2115 io_log->blk_rsp_cpu = smp_processor_id();
2116 }
2117
2118 qedi->io_trace_idx++;
2119 if (qedi->io_trace_idx == QEDI_IO_TRACE_SIZE)
2120 qedi->io_trace_idx = 0;
2121
2122 qedi->use_cached_sge = false;
2123 qedi->use_slow_sge = false;
2124 qedi->use_fast_sge = false;
2125
2126 spin_unlock_irqrestore(&qedi->io_trace_lock, flags);
2127}
2128
2129int qedi_iscsi_send_ioreq(struct iscsi_task *task)
2130{
2131 struct iscsi_conn *conn = task->conn;
2132 struct iscsi_session *session = conn->session;
2133 struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
2134 struct qedi_ctx *qedi = iscsi_host_priv(shost);
2135 struct qedi_conn *qedi_conn = conn->dd_data;
2136 struct qedi_cmd *cmd = task->dd_data;
2137 struct scsi_cmnd *sc = task->sc;
2138 struct iscsi_task_context *fw_task_ctx;
2139 struct iscsi_cached_sge_ctx *cached_sge;
2140 struct iscsi_phys_sgl_ctx *phys_sgl;
2141 struct iscsi_virt_sgl_ctx *virt_sgl;
2142 struct ystorm_iscsi_task_st_ctx *yst_cxt;
2143 struct mstorm_iscsi_task_st_ctx *mst_cxt;
2144 struct iscsi_sgl *sgl_struct;
2145 struct iscsi_sge *single_sge;
2146 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
2147 struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
2148 enum iscsi_task_type task_type;
2149 struct iscsi_cmd_hdr *fw_cmd;
2150 u32 lun[2];
2151 u32 exp_data;
2152 u16 cq_idx = smp_processor_id() % qedi->num_queues;
2153 s16 ptu_invalidate = 0;
2154 s16 tid = 0;
2155 u8 num_fast_sgs;
2156
2157 tid = qedi_get_task_idx(qedi);
2158 if (tid == -1)
2159 return -ENOMEM;
2160
2161 qedi_iscsi_map_sg_list(cmd);
2162
2163 int_to_scsilun(sc->device->lun, (struct scsi_lun *)lun);
2164 fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
2165
2166 memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
2167 cmd->task_id = tid;
2168
2169 /* Ystorm context */
2170 fw_cmd = &fw_task_ctx->ystorm_st_context.pdu_hdr.cmd;
2171 SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_ATTR, ISCSI_ATTR_SIMPLE);
2172
2173 if (sc->sc_data_direction == DMA_TO_DEVICE) {
2174 if (conn->session->initial_r2t_en) {
2175 exp_data = min((conn->session->imm_data_en *
2176 conn->max_xmit_dlength),
2177 conn->session->first_burst);
2178 exp_data = min(exp_data, scsi_bufflen(sc));
2179 fw_task_ctx->ustorm_ag_context.exp_data_acked =
2180 cpu_to_le32(exp_data);
2181 } else {
2182 fw_task_ctx->ustorm_ag_context.exp_data_acked =
2183 min(conn->session->first_burst, scsi_bufflen(sc));
2184 }
2185
2186 SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_WRITE, 1);
2187 task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
2188 } else {
2189 if (scsi_bufflen(sc))
2190 SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_READ, 1);
2191 task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
2192 }
2193
2194 fw_cmd->lun.lo = be32_to_cpu(lun[0]);
2195 fw_cmd->lun.hi = be32_to_cpu(lun[1]);
2196
2197 qedi_update_itt_map(qedi, tid, task->itt, cmd);
2198 fw_cmd->itt = qedi_set_itt(tid, get_itt(task->itt));
2199 fw_cmd->expected_transfer_length = scsi_bufflen(sc);
2200 fw_cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
2201 fw_cmd->opcode = hdr->opcode;
2202 qedi_cpy_scsi_cdb(sc, (u32 *)fw_cmd->cdb);
2203
2204 /* Mstorm context */
2205 fw_task_ctx->mstorm_st_context.sense_db.lo = (u32)cmd->sense_buffer_dma;
2206 fw_task_ctx->mstorm_st_context.sense_db.hi =
2207 (u32)((u64)cmd->sense_buffer_dma >> 32);
2208 fw_task_ctx->mstorm_ag_context.task_cid = qedi_conn->iscsi_conn_id;
2209 fw_task_ctx->mstorm_st_context.task_type = task_type;
2210
2211 if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
2212 ptu_invalidate = 1;
2213 qedi->tid_reuse_count[tid] = 0;
2214 }
2215 fw_task_ctx->ystorm_st_context.state.reuse_count =
2216 qedi->tid_reuse_count[tid];
2217 fw_task_ctx->mstorm_st_context.reuse_count =
2218 qedi->tid_reuse_count[tid]++;
2219
2220 /* Ustorm context */
2221 fw_task_ctx->ustorm_st_context.rem_rcv_len = scsi_bufflen(sc);
2222 fw_task_ctx->ustorm_st_context.exp_data_transfer_len = scsi_bufflen(sc);
2223 fw_task_ctx->ustorm_st_context.exp_data_sn =
2224 be32_to_cpu(hdr->exp_statsn);
2225 fw_task_ctx->ustorm_st_context.task_type = task_type;
2226 fw_task_ctx->ustorm_st_context.cq_rss_number = cq_idx;
2227 fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
2228
2229 SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
2230 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
2231 SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
2232 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
2233
2234 num_fast_sgs = (cmd->io_tbl.sge_valid ?
2235 min((u16)QEDI_FAST_SGE_COUNT,
2236 (u16)cmd->io_tbl.sge_valid) : 0);
2237 SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
2238 ISCSI_REG1_NUM_FAST_SGES, num_fast_sgs);
2239
2240 fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
2241 fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
2242
2243 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "Total sge count [%d]\n",
2244 cmd->io_tbl.sge_valid);
2245
2246 yst_cxt = &fw_task_ctx->ystorm_st_context;
2247 mst_cxt = &fw_task_ctx->mstorm_st_context;
2248 /* Tx path */
2249 if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
2250 /* not considering superIO or FastIO */
2251 if (cmd->io_tbl.sge_valid == 1) {
2252 cached_sge = &yst_cxt->state.sgl_ctx_union.cached_sge;
2253 cached_sge->sge.sge_addr.lo = bd[0].sge_addr.lo;
2254 cached_sge->sge.sge_addr.hi = bd[0].sge_addr.hi;
2255 cached_sge->sge.sge_len = bd[0].sge_len;
2256 qedi->cached_sgls++;
2257 } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
2258 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
2259 ISCSI_MFLAGS_SLOW_IO, 1);
2260 SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
2261 ISCSI_REG1_NUM_FAST_SGES, 0);
2262 phys_sgl = &yst_cxt->state.sgl_ctx_union.phys_sgl;
2263 phys_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
2264 phys_sgl->sgl_base.hi =
2265 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
2266 phys_sgl->sgl_size = cmd->io_tbl.sge_valid;
2267 qedi->slow_sgls++;
2268 } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
2269 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
2270 ISCSI_MFLAGS_SLOW_IO, 0);
2271 SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
2272 ISCSI_REG1_NUM_FAST_SGES,
2273 min((u16)QEDI_FAST_SGE_COUNT,
2274 (u16)cmd->io_tbl.sge_valid));
2275 virt_sgl = &yst_cxt->state.sgl_ctx_union.virt_sgl;
2276 virt_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
2277 virt_sgl->sgl_base.hi =
2278 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
2279 virt_sgl->sgl_initial_offset =
2280 (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
2281 qedi->fast_sgls++;
2282 }
2283 fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
2284 fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
2285 } else {
2286 /* Rx path */
2287 if (cmd->io_tbl.sge_valid == 1) {
2288 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
2289 ISCSI_MFLAGS_SLOW_IO, 0);
2290 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
2291 ISCSI_MFLAGS_SINGLE_SGE, 1);
2292 single_sge = &mst_cxt->sgl_union.single_sge;
2293 single_sge->sge_addr.lo = bd[0].sge_addr.lo;
2294 single_sge->sge_addr.hi = bd[0].sge_addr.hi;
2295 single_sge->sge_len = bd[0].sge_len;
2296 qedi->cached_sgls++;
2297 } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
2298 sgl_struct = &mst_cxt->sgl_union.sgl_struct;
2299 sgl_struct->sgl_addr.lo =
2300 (u32)(cmd->io_tbl.sge_tbl_dma);
2301 sgl_struct->sgl_addr.hi =
2302 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
2303 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
2304 ISCSI_MFLAGS_SLOW_IO, 1);
2305 SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
2306 ISCSI_REG1_NUM_FAST_SGES, 0);
2307 sgl_struct->updated_sge_size = 0;
2308 sgl_struct->updated_sge_offset = 0;
2309 qedi->slow_sgls++;
2310 } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
2311 sgl_struct = &mst_cxt->sgl_union.sgl_struct;
2312 sgl_struct->sgl_addr.lo =
2313 (u32)(cmd->io_tbl.sge_tbl_dma);
2314 sgl_struct->sgl_addr.hi =
2315 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
2316 sgl_struct->byte_offset =
2317 (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
2318 SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
2319 ISCSI_MFLAGS_SLOW_IO, 0);
2320 SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
2321 ISCSI_REG1_NUM_FAST_SGES, 0);
2322 sgl_struct->updated_sge_size = 0;
2323 sgl_struct->updated_sge_offset = 0;
2324 qedi->fast_sgls++;
2325 }
2326 fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
2327 fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
2328 }
2329
2330 if (cmd->io_tbl.sge_valid == 1)
2331 /* Singel-SGL */
2332 qedi->use_cached_sge = true;
2333 else {
2334 if (cmd->use_slowpath)
2335 qedi->use_slow_sge = true;
2336 else
2337 qedi->use_fast_sge = true;
2338 }
2339 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
2340 "%s: %s-SGL: num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x",
2341 (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ?
2342 "Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ?
2343 "Single" : (cmd->use_slowpath ? "SLOW" : "FAST"),
2344 (u16)cmd->io_tbl.sge_valid, (u32)(cmd->io_tbl.sge_tbl_dma),
2345 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32));
2346
2347 /* Add command in active command list */
2348 spin_lock(&qedi_conn->list_lock);
2349 list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list);
2350 cmd->io_cmd_in_list = true;
2351 qedi_conn->active_cmd_count++;
2352 spin_unlock(&qedi_conn->list_lock);
2353
2354 qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
2355 qedi_ring_doorbell(qedi_conn);
2356 if (qedi_io_tracing)
2357 qedi_trace_io(qedi, task, tid, QEDI_IO_TRACE_REQ);
2358
2359 return 0;
2360}
2361
2362int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted)
2363{
2364 struct iscsi_conn *conn = task->conn;
2365 struct qedi_conn *qedi_conn = conn->dd_data;
2366 struct qedi_cmd *cmd = task->dd_data;
2367 s16 ptu_invalidate = 0;
2368
2369 QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
2370 "issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n",
2371 cmd->task_id, get_itt(task->itt), task->state,
2372 cmd->state, qedi_conn->iscsi_conn_id);
2373
2374 qedi_add_to_sq(qedi_conn, task, cmd->task_id, ptu_invalidate, true);
2375 qedi_ring_doorbell(qedi_conn);
2376
2377 return 0;
2378}