]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/qedf/qedf_io.c
ASoC: wm_adsp: add support for DSP region lock
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / qedf / qedf_io.c
1 /*
2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
11 #include "qedf.h"
12 #include <scsi/scsi_tcq.h>
13
14 void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
15 unsigned int timer_msec)
16 {
17 queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
18 msecs_to_jiffies(timer_msec));
19 }
20
21 static void qedf_cmd_timeout(struct work_struct *work)
22 {
23
24 struct qedf_ioreq *io_req =
25 container_of(work, struct qedf_ioreq, timeout_work.work);
26 struct qedf_ctx *qedf = io_req->fcport->qedf;
27 struct qedf_rport *fcport = io_req->fcport;
28 u8 op = 0;
29
30 switch (io_req->cmd_type) {
31 case QEDF_ABTS:
32 QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
33 io_req->xid);
34 /* Cleanup timed out ABTS */
35 qedf_initiate_cleanup(io_req, true);
36 complete(&io_req->abts_done);
37
38 /*
39 * Need to call kref_put for reference taken when initiate_abts
40 * was called since abts_compl won't be called now that we've
41 * cleaned up the task.
42 */
43 kref_put(&io_req->refcount, qedf_release_cmd);
44
45 /*
46 * Now that the original I/O and the ABTS are complete see
47 * if we need to reconnect to the target.
48 */
49 qedf_restart_rport(fcport);
50 break;
51 case QEDF_ELS:
52 kref_get(&io_req->refcount);
53 /*
54 * Don't attempt to clean an ELS timeout as any subseqeunt
55 * ABTS or cleanup requests just hang. For now just free
56 * the resources of the original I/O and the RRQ
57 */
58 QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
59 io_req->xid);
60 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
61 /* Call callback function to complete command */
62 if (io_req->cb_func && io_req->cb_arg) {
63 op = io_req->cb_arg->op;
64 io_req->cb_func(io_req->cb_arg);
65 io_req->cb_arg = NULL;
66 }
67 qedf_initiate_cleanup(io_req, true);
68 kref_put(&io_req->refcount, qedf_release_cmd);
69 break;
70 case QEDF_SEQ_CLEANUP:
71 QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, "
72 "xid=0x%x.\n", io_req->xid);
73 qedf_initiate_cleanup(io_req, true);
74 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
75 qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
76 break;
77 default:
78 break;
79 }
80 }
81
82 void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
83 {
84 struct io_bdt *bdt_info;
85 struct qedf_ctx *qedf = cmgr->qedf;
86 size_t bd_tbl_sz;
87 u16 min_xid = QEDF_MIN_XID;
88 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
89 int num_ios;
90 int i;
91 struct qedf_ioreq *io_req;
92
93 num_ios = max_xid - min_xid + 1;
94
95 /* Free fcoe_bdt_ctx structures */
96 if (!cmgr->io_bdt_pool)
97 goto free_cmd_pool;
98
99 bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge);
100 for (i = 0; i < num_ios; i++) {
101 bdt_info = cmgr->io_bdt_pool[i];
102 if (bdt_info->bd_tbl) {
103 dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz,
104 bdt_info->bd_tbl, bdt_info->bd_tbl_dma);
105 bdt_info->bd_tbl = NULL;
106 }
107 }
108
109 /* Destroy io_bdt pool */
110 for (i = 0; i < num_ios; i++) {
111 kfree(cmgr->io_bdt_pool[i]);
112 cmgr->io_bdt_pool[i] = NULL;
113 }
114
115 kfree(cmgr->io_bdt_pool);
116 cmgr->io_bdt_pool = NULL;
117
118 free_cmd_pool:
119
120 for (i = 0; i < num_ios; i++) {
121 io_req = &cmgr->cmds[i];
122 /* Make sure we free per command sense buffer */
123 if (io_req->sense_buffer)
124 dma_free_coherent(&qedf->pdev->dev,
125 QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
126 io_req->sense_buffer_dma);
127 cancel_delayed_work_sync(&io_req->rrq_work);
128 }
129
130 /* Free command manager itself */
131 vfree(cmgr);
132 }
133
134 static void qedf_handle_rrq(struct work_struct *work)
135 {
136 struct qedf_ioreq *io_req =
137 container_of(work, struct qedf_ioreq, rrq_work.work);
138
139 qedf_send_rrq(io_req);
140
141 }
142
143 struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
144 {
145 struct qedf_cmd_mgr *cmgr;
146 struct io_bdt *bdt_info;
147 struct qedf_ioreq *io_req;
148 u16 xid;
149 int i;
150 int num_ios;
151 u16 min_xid = QEDF_MIN_XID;
152 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
153
154 /* Make sure num_queues is already set before calling this function */
155 if (!qedf->num_queues) {
156 QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n");
157 return NULL;
158 }
159
160 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
161 QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and "
162 "max_xid 0x%x.\n", min_xid, max_xid);
163 return NULL;
164 }
165
166 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid "
167 "0x%x.\n", min_xid, max_xid);
168
169 num_ios = max_xid - min_xid + 1;
170
171 cmgr = vzalloc(sizeof(struct qedf_cmd_mgr));
172 if (!cmgr) {
173 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n");
174 return NULL;
175 }
176
177 cmgr->qedf = qedf;
178 spin_lock_init(&cmgr->lock);
179
180 /*
181 * Initialize list of qedf_ioreq.
182 */
183 xid = QEDF_MIN_XID;
184
185 for (i = 0; i < num_ios; i++) {
186 io_req = &cmgr->cmds[i];
187 INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
188
189 io_req->xid = xid++;
190
191 INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
192
193 /* Allocate DMA memory to hold sense buffer */
194 io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
195 QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
196 GFP_KERNEL);
197 if (!io_req->sense_buffer)
198 goto mem_err;
199 }
200
201 /* Allocate pool of io_bdts - one for each qedf_ioreq */
202 cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *),
203 GFP_KERNEL);
204
205 if (!cmgr->io_bdt_pool) {
206 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n");
207 goto mem_err;
208 }
209
210 for (i = 0; i < num_ios; i++) {
211 cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
212 GFP_KERNEL);
213 if (!cmgr->io_bdt_pool[i]) {
214 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc "
215 "io_bdt_pool[%d].\n", i);
216 goto mem_err;
217 }
218 }
219
220 for (i = 0; i < num_ios; i++) {
221 bdt_info = cmgr->io_bdt_pool[i];
222 bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
223 QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge),
224 &bdt_info->bd_tbl_dma, GFP_KERNEL);
225 if (!bdt_info->bd_tbl) {
226 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc "
227 "bdt_tbl[%d].\n", i);
228 goto mem_err;
229 }
230 }
231 atomic_set(&cmgr->free_list_cnt, num_ios);
232 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
233 "cmgr->free_list_cnt=%d.\n",
234 atomic_read(&cmgr->free_list_cnt));
235
236 return cmgr;
237
238 mem_err:
239 qedf_cmd_mgr_free(cmgr);
240 return NULL;
241 }
242
243 struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
244 {
245 struct qedf_ctx *qedf = fcport->qedf;
246 struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr;
247 struct qedf_ioreq *io_req = NULL;
248 struct io_bdt *bd_tbl;
249 u16 xid;
250 uint32_t free_sqes;
251 int i;
252 unsigned long flags;
253
254 free_sqes = atomic_read(&fcport->free_sqes);
255
256 if (!free_sqes) {
257 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
258 "Returning NULL, free_sqes=%d.\n ",
259 free_sqes);
260 goto out_failed;
261 }
262
263 /* Limit the number of outstanding R/W tasks */
264 if ((atomic_read(&fcport->num_active_ios) >=
265 NUM_RW_TASKS_PER_CONNECTION)) {
266 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
267 "Returning NULL, num_active_ios=%d.\n",
268 atomic_read(&fcport->num_active_ios));
269 goto out_failed;
270 }
271
272 /* Limit global TIDs certain tasks */
273 if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) {
274 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
275 "Returning NULL, free_list_cnt=%d.\n",
276 atomic_read(&cmd_mgr->free_list_cnt));
277 goto out_failed;
278 }
279
280 spin_lock_irqsave(&cmd_mgr->lock, flags);
281 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
282 io_req = &cmd_mgr->cmds[cmd_mgr->idx];
283 cmd_mgr->idx++;
284 if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS)
285 cmd_mgr->idx = 0;
286
287 /* Check to make sure command was previously freed */
288 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags))
289 break;
290 }
291
292 if (i == FCOE_PARAMS_NUM_TASKS) {
293 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
294 goto out_failed;
295 }
296
297 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
298 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
299
300 atomic_inc(&fcport->num_active_ios);
301 atomic_dec(&fcport->free_sqes);
302 xid = io_req->xid;
303 atomic_dec(&cmd_mgr->free_list_cnt);
304
305 io_req->cmd_mgr = cmd_mgr;
306 io_req->fcport = fcport;
307
308 /* Hold the io_req against deletion */
309 kref_init(&io_req->refcount);
310
311 /* Bind io_bdt for this io_req */
312 /* Have a static link between io_req and io_bdt_pool */
313 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
314 if (bd_tbl == NULL) {
315 QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid);
316 kref_put(&io_req->refcount, qedf_release_cmd);
317 goto out_failed;
318 }
319 bd_tbl->io_req = io_req;
320 io_req->cmd_type = cmd_type;
321
322 /* Reset sequence offset data */
323 io_req->rx_buf_off = 0;
324 io_req->tx_buf_off = 0;
325 io_req->rx_id = 0xffff; /* No OX_ID */
326
327 return io_req;
328
329 out_failed:
330 /* Record failure for stats and return NULL to caller */
331 qedf->alloc_failures++;
332 return NULL;
333 }
334
335 static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
336 {
337 struct qedf_mp_req *mp_req = &(io_req->mp_req);
338 struct qedf_ctx *qedf = io_req->fcport->qedf;
339 uint64_t sz = sizeof(struct fcoe_sge);
340
341 /* clear tm flags */
342 mp_req->tm_flags = 0;
343 if (mp_req->mp_req_bd) {
344 dma_free_coherent(&qedf->pdev->dev, sz,
345 mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
346 mp_req->mp_req_bd = NULL;
347 }
348 if (mp_req->mp_resp_bd) {
349 dma_free_coherent(&qedf->pdev->dev, sz,
350 mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma);
351 mp_req->mp_resp_bd = NULL;
352 }
353 if (mp_req->req_buf) {
354 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
355 mp_req->req_buf, mp_req->req_buf_dma);
356 mp_req->req_buf = NULL;
357 }
358 if (mp_req->resp_buf) {
359 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
360 mp_req->resp_buf, mp_req->resp_buf_dma);
361 mp_req->resp_buf = NULL;
362 }
363 }
364
365 void qedf_release_cmd(struct kref *ref)
366 {
367 struct qedf_ioreq *io_req =
368 container_of(ref, struct qedf_ioreq, refcount);
369 struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
370 struct qedf_rport *fcport = io_req->fcport;
371
372 if (io_req->cmd_type == QEDF_ELS ||
373 io_req->cmd_type == QEDF_TASK_MGMT_CMD)
374 qedf_free_mp_resc(io_req);
375
376 atomic_inc(&cmd_mgr->free_list_cnt);
377 atomic_dec(&fcport->num_active_ios);
378 if (atomic_read(&fcport->num_active_ios) < 0)
379 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
380
381 /* Increment task retry identifier now that the request is released */
382 io_req->task_retry_identifier++;
383
384 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
385 }
386
387 static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
388 int bd_index)
389 {
390 struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
391 int frag_size, sg_frags;
392
393 sg_frags = 0;
394 while (sg_len) {
395 if (sg_len > QEDF_BD_SPLIT_SZ)
396 frag_size = QEDF_BD_SPLIT_SZ;
397 else
398 frag_size = sg_len;
399 bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr);
400 bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr);
401 bd[bd_index + sg_frags].size = (uint16_t)frag_size;
402
403 addr += (u64)frag_size;
404 sg_frags++;
405 sg_len -= frag_size;
406 }
407 return sg_frags;
408 }
409
410 static int qedf_map_sg(struct qedf_ioreq *io_req)
411 {
412 struct scsi_cmnd *sc = io_req->sc_cmd;
413 struct Scsi_Host *host = sc->device->host;
414 struct fc_lport *lport = shost_priv(host);
415 struct qedf_ctx *qedf = lport_priv(lport);
416 struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
417 struct scatterlist *sg;
418 int byte_count = 0;
419 int sg_count = 0;
420 int bd_count = 0;
421 int sg_frags;
422 unsigned int sg_len;
423 u64 addr, end_addr;
424 int i;
425
426 sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
427 scsi_sg_count(sc), sc->sc_data_direction);
428
429 sg = scsi_sglist(sc);
430
431 /*
432 * New condition to send single SGE as cached-SGL with length less
433 * than 64k.
434 */
435 if ((sg_count == 1) && (sg_dma_len(sg) <=
436 QEDF_MAX_SGLEN_FOR_CACHESGL)) {
437 sg_len = sg_dma_len(sg);
438 addr = (u64)sg_dma_address(sg);
439
440 bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
441 bd[bd_count].sge_addr.hi = (addr >> 32);
442 bd[bd_count].size = (u16)sg_len;
443
444 return ++bd_count;
445 }
446
447 scsi_for_each_sg(sc, sg, sg_count, i) {
448 sg_len = sg_dma_len(sg);
449 addr = (u64)sg_dma_address(sg);
450 end_addr = (u64)(addr + sg_len);
451
452 /*
453 * First s/g element in the list so check if the end_addr
454 * is paged aligned. Also check to make sure the length is
455 * at least page size.
456 */
457 if ((i == 0) && (sg_count > 1) &&
458 ((end_addr % QEDF_PAGE_SIZE) ||
459 sg_len < QEDF_PAGE_SIZE))
460 io_req->use_slowpath = true;
461 /*
462 * Last s/g element so check if the start address is paged
463 * aligned.
464 */
465 else if ((i == (sg_count - 1)) && (sg_count > 1) &&
466 (addr % QEDF_PAGE_SIZE))
467 io_req->use_slowpath = true;
468 /*
469 * Intermediate s/g element so check if start and end address
470 * is page aligned.
471 */
472 else if ((i != 0) && (i != (sg_count - 1)) &&
473 ((addr % QEDF_PAGE_SIZE) || (end_addr % QEDF_PAGE_SIZE)))
474 io_req->use_slowpath = true;
475
476 if (sg_len > QEDF_MAX_BD_LEN) {
477 sg_frags = qedf_split_bd(io_req, addr, sg_len,
478 bd_count);
479 } else {
480 sg_frags = 1;
481 bd[bd_count].sge_addr.lo = U64_LO(addr);
482 bd[bd_count].sge_addr.hi = U64_HI(addr);
483 bd[bd_count].size = (uint16_t)sg_len;
484 }
485
486 bd_count += sg_frags;
487 byte_count += sg_len;
488 }
489
490 if (byte_count != scsi_bufflen(sc))
491 QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
492 "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
493 scsi_bufflen(sc), io_req->xid);
494
495 return bd_count;
496 }
497
498 static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
499 {
500 struct scsi_cmnd *sc = io_req->sc_cmd;
501 struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
502 int bd_count;
503
504 if (scsi_sg_count(sc)) {
505 bd_count = qedf_map_sg(io_req);
506 if (bd_count == 0)
507 return -ENOMEM;
508 } else {
509 bd_count = 0;
510 bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
511 bd[0].size = 0;
512 }
513 io_req->bd_tbl->bd_valid = bd_count;
514
515 return 0;
516 }
517
518 static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
519 struct fcp_cmnd *fcp_cmnd)
520 {
521 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
522
523 /* fcp_cmnd is 32 bytes */
524 memset(fcp_cmnd, 0, FCP_CMND_LEN);
525
526 /* 8 bytes: SCSI LUN info */
527 int_to_scsilun(sc_cmd->device->lun,
528 (struct scsi_lun *)&fcp_cmnd->fc_lun);
529
530 /* 4 bytes: flag info */
531 fcp_cmnd->fc_pri_ta = 0;
532 fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
533 fcp_cmnd->fc_flags = io_req->io_req_flags;
534 fcp_cmnd->fc_cmdref = 0;
535
536 /* Populate data direction */
537 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
538 fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
539 else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
540 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
541
542 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
543
544 /* 16 bytes: CDB information */
545 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
546
547 /* 4 bytes: FCP data length */
548 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
549
550 }
551
552 static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
553 struct qedf_ioreq *io_req, u32 *ptu_invalidate,
554 struct fcoe_task_context *task_ctx)
555 {
556 enum fcoe_task_type task_type;
557 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
558 struct io_bdt *bd_tbl = io_req->bd_tbl;
559 union fcoe_data_desc_ctx *data_desc;
560 u32 *fcp_cmnd;
561 u32 tmp_fcp_cmnd[8];
562 int cnt, i;
563 int bd_count;
564 struct qedf_ctx *qedf = fcport->qedf;
565 uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
566 u8 tmp_sgl_mode = 0;
567 u8 mst_sgl_mode = 0;
568
569 memset(task_ctx, 0, sizeof(struct fcoe_task_context));
570 io_req->task = task_ctx;
571
572 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
573 task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
574 else
575 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
576
577 /* Y Storm context */
578 task_ctx->ystorm_st_context.expect_first_xfer = 1;
579 task_ctx->ystorm_st_context.data_2_trns_rem = io_req->data_xfer_len;
580 /* Check if this is required */
581 task_ctx->ystorm_st_context.ox_id = io_req->xid;
582 task_ctx->ystorm_st_context.task_rety_identifier =
583 io_req->task_retry_identifier;
584
585 /* T Storm ag context */
586 SET_FIELD(task_ctx->tstorm_ag_context.flags0,
587 TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, PROTOCOLID_FCOE);
588 task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid;
589
590 /* T Storm st context */
591 SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
592 FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME,
593 1);
594 task_ctx->tstorm_st_context.read_write.rx_id = 0xffff;
595
596 task_ctx->tstorm_st_context.read_only.dev_type =
597 FCOE_TASK_DEV_TYPE_DISK;
598 task_ctx->tstorm_st_context.read_only.conf_supported = 0;
599 task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid;
600
601 /* Completion queue for response. */
602 task_ctx->tstorm_st_context.read_only.glbl_q_num = cq_idx;
603 task_ctx->tstorm_st_context.read_only.fcp_cmd_trns_size =
604 io_req->data_xfer_len;
605 task_ctx->tstorm_st_context.read_write.e_d_tov_exp_timeout_val =
606 lport->e_d_tov;
607
608 task_ctx->ustorm_ag_context.global_cq_num = cq_idx;
609 io_req->fp_idx = cq_idx;
610
611 bd_count = bd_tbl->bd_valid;
612 if (task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) {
613 /* Setup WRITE task */
614 struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl;
615
616 task_ctx->ystorm_st_context.task_type =
617 FCOE_TASK_TYPE_WRITE_INITIATOR;
618 data_desc = &task_ctx->ystorm_st_context.data_desc;
619
620 if (io_req->use_slowpath) {
621 SET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
622 YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
623 FCOE_SLOW_SGL);
624 data_desc->slow.base_sgl_addr.lo =
625 U64_LO(bd_tbl->bd_tbl_dma);
626 data_desc->slow.base_sgl_addr.hi =
627 U64_HI(bd_tbl->bd_tbl_dma);
628 data_desc->slow.remainder_num_sges = bd_count;
629 data_desc->slow.curr_sge_off = 0;
630 data_desc->slow.curr_sgl_index = 0;
631 qedf->slow_sge_ios++;
632 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
633 } else {
634 SET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
635 YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
636 (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count :
637 FCOE_MUL_FAST_SGES);
638
639 if (bd_count == 1) {
640 data_desc->single_sge.sge_addr.lo =
641 fcoe_bd_tbl->sge_addr.lo;
642 data_desc->single_sge.sge_addr.hi =
643 fcoe_bd_tbl->sge_addr.hi;
644 data_desc->single_sge.size =
645 fcoe_bd_tbl->size;
646 data_desc->single_sge.is_valid_sge = 0;
647 qedf->single_sge_ios++;
648 io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
649 } else {
650 data_desc->fast.sgl_start_addr.lo =
651 U64_LO(bd_tbl->bd_tbl_dma);
652 data_desc->fast.sgl_start_addr.hi =
653 U64_HI(bd_tbl->bd_tbl_dma);
654 data_desc->fast.sgl_byte_offset =
655 data_desc->fast.sgl_start_addr.lo &
656 (QEDF_PAGE_SIZE - 1);
657 if (data_desc->fast.sgl_byte_offset > 0)
658 QEDF_ERR(&(qedf->dbg_ctx),
659 "byte_offset=%u for xid=0x%x.\n",
660 io_req->xid,
661 data_desc->fast.sgl_byte_offset);
662 data_desc->fast.task_reuse_cnt =
663 io_req->reuse_count;
664 io_req->reuse_count++;
665 if (io_req->reuse_count == QEDF_MAX_REUSE) {
666 *ptu_invalidate = 1;
667 io_req->reuse_count = 0;
668 }
669 qedf->fast_sge_ios++;
670 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
671 }
672 }
673
674 /* T Storm context */
675 task_ctx->tstorm_st_context.read_only.task_type =
676 FCOE_TASK_TYPE_WRITE_INITIATOR;
677
678 /* M Storm context */
679 tmp_sgl_mode = GET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
680 YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE);
681 SET_FIELD(task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
682 FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE,
683 tmp_sgl_mode);
684
685 } else {
686 /* Setup READ task */
687
688 /* M Storm context */
689 struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl;
690
691 data_desc = &task_ctx->mstorm_st_context.fp.data_desc;
692 task_ctx->mstorm_st_context.fp.data_2_trns_rem =
693 io_req->data_xfer_len;
694
695 if (io_req->use_slowpath) {
696 SET_FIELD(
697 task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
698 FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE,
699 FCOE_SLOW_SGL);
700 data_desc->slow.base_sgl_addr.lo =
701 U64_LO(bd_tbl->bd_tbl_dma);
702 data_desc->slow.base_sgl_addr.hi =
703 U64_HI(bd_tbl->bd_tbl_dma);
704 data_desc->slow.remainder_num_sges =
705 bd_count;
706 data_desc->slow.curr_sge_off = 0;
707 data_desc->slow.curr_sgl_index = 0;
708 qedf->slow_sge_ios++;
709 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
710 } else {
711 SET_FIELD(
712 task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
713 FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE,
714 (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count :
715 FCOE_MUL_FAST_SGES);
716
717 if (bd_count == 1) {
718 data_desc->single_sge.sge_addr.lo =
719 fcoe_bd_tbl->sge_addr.lo;
720 data_desc->single_sge.sge_addr.hi =
721 fcoe_bd_tbl->sge_addr.hi;
722 data_desc->single_sge.size =
723 fcoe_bd_tbl->size;
724 data_desc->single_sge.is_valid_sge = 0;
725 qedf->single_sge_ios++;
726 io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
727 } else {
728 data_desc->fast.sgl_start_addr.lo =
729 U64_LO(bd_tbl->bd_tbl_dma);
730 data_desc->fast.sgl_start_addr.hi =
731 U64_HI(bd_tbl->bd_tbl_dma);
732 data_desc->fast.sgl_byte_offset = 0;
733 data_desc->fast.task_reuse_cnt =
734 io_req->reuse_count;
735 io_req->reuse_count++;
736 if (io_req->reuse_count == QEDF_MAX_REUSE) {
737 *ptu_invalidate = 1;
738 io_req->reuse_count = 0;
739 }
740 qedf->fast_sge_ios++;
741 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
742 }
743 }
744
745 /* Y Storm context */
746 task_ctx->ystorm_st_context.expect_first_xfer = 0;
747 task_ctx->ystorm_st_context.task_type =
748 FCOE_TASK_TYPE_READ_INITIATOR;
749
750 /* T Storm context */
751 task_ctx->tstorm_st_context.read_only.task_type =
752 FCOE_TASK_TYPE_READ_INITIATOR;
753 mst_sgl_mode = GET_FIELD(
754 task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
755 FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE);
756 SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
757 FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE,
758 mst_sgl_mode);
759 }
760
761 /* fill FCP_CMND IU */
762 fcp_cmnd = (u32 *)task_ctx->ystorm_st_context.tx_info_union.fcp_cmd_payload.opaque;
763 qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
764
765 /* Swap fcp_cmnd since FC is big endian */
766 cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
767
768 for (i = 0; i < cnt; i++) {
769 *fcp_cmnd = cpu_to_be32(tmp_fcp_cmnd[i]);
770 fcp_cmnd++;
771 }
772
773 /* M Storm context - Sense buffer */
774 task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo =
775 U64_LO(io_req->sense_buffer_dma);
776 task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi =
777 U64_HI(io_req->sense_buffer_dma);
778 }
779
780 void qedf_init_mp_task(struct qedf_ioreq *io_req,
781 struct fcoe_task_context *task_ctx)
782 {
783 struct qedf_mp_req *mp_req = &(io_req->mp_req);
784 struct qedf_rport *fcport = io_req->fcport;
785 struct qedf_ctx *qedf = io_req->fcport->qedf;
786 struct fc_frame_header *fc_hdr;
787 enum fcoe_task_type task_type = 0;
788 union fcoe_data_desc_ctx *data_desc;
789
790 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Initializing MP task "
791 "for cmd_type = %d\n", io_req->cmd_type);
792
793 qedf->control_requests++;
794
795 /* Obtain task_type */
796 if ((io_req->cmd_type == QEDF_TASK_MGMT_CMD) ||
797 (io_req->cmd_type == QEDF_ELS)) {
798 task_type = FCOE_TASK_TYPE_MIDPATH;
799 } else if (io_req->cmd_type == QEDF_ABTS) {
800 task_type = FCOE_TASK_TYPE_ABTS;
801 }
802
803 memset(task_ctx, 0, sizeof(struct fcoe_task_context));
804
805 /* Setup the task from io_req for easy reference */
806 io_req->task = task_ctx;
807
808 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "task type = %d\n",
809 task_type);
810
811 /* YSTORM only */
812 {
813 /* Initialize YSTORM task context */
814 struct fcoe_tx_mid_path_params *task_fc_hdr =
815 &task_ctx->ystorm_st_context.tx_info_union.tx_params.mid_path;
816 memset(task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
817 task_ctx->ystorm_st_context.task_rety_identifier =
818 io_req->task_retry_identifier;
819
820 /* Init SGL parameters */
821 if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
822 (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
823 data_desc = &task_ctx->ystorm_st_context.data_desc;
824 data_desc->slow.base_sgl_addr.lo =
825 U64_LO(mp_req->mp_req_bd_dma);
826 data_desc->slow.base_sgl_addr.hi =
827 U64_HI(mp_req->mp_req_bd_dma);
828 data_desc->slow.remainder_num_sges = 1;
829 data_desc->slow.curr_sge_off = 0;
830 data_desc->slow.curr_sgl_index = 0;
831 }
832
833 fc_hdr = &(mp_req->req_fc_hdr);
834 if (task_type == FCOE_TASK_TYPE_MIDPATH) {
835 fc_hdr->fh_ox_id = io_req->xid;
836 fc_hdr->fh_rx_id = htons(0xffff);
837 } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
838 fc_hdr->fh_rx_id = io_req->xid;
839 }
840
841 /* Fill FC Header into middle path buffer */
842 task_fc_hdr->parameter = fc_hdr->fh_parm_offset;
843 task_fc_hdr->r_ctl = fc_hdr->fh_r_ctl;
844 task_fc_hdr->type = fc_hdr->fh_type;
845 task_fc_hdr->cs_ctl = fc_hdr->fh_cs_ctl;
846 task_fc_hdr->df_ctl = fc_hdr->fh_df_ctl;
847 task_fc_hdr->rx_id = fc_hdr->fh_rx_id;
848 task_fc_hdr->ox_id = fc_hdr->fh_ox_id;
849
850 task_ctx->ystorm_st_context.data_2_trns_rem =
851 io_req->data_xfer_len;
852 task_ctx->ystorm_st_context.task_type = task_type;
853 }
854
855 /* TSTORM ONLY */
856 {
857 task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid;
858 task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid;
859 /* Always send middle-path repsonses on CQ #0 */
860 task_ctx->tstorm_st_context.read_only.glbl_q_num = 0;
861 io_req->fp_idx = 0;
862 SET_FIELD(task_ctx->tstorm_ag_context.flags0,
863 TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE,
864 PROTOCOLID_FCOE);
865 task_ctx->tstorm_st_context.read_only.task_type = task_type;
866 SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
867 FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME,
868 1);
869 task_ctx->tstorm_st_context.read_write.rx_id = 0xffff;
870 }
871
872 /* MSTORM only */
873 {
874 if (task_type == FCOE_TASK_TYPE_MIDPATH) {
875 /* Initialize task context */
876 data_desc = &task_ctx->mstorm_st_context.fp.data_desc;
877
878 /* Set cache sges address and length */
879 data_desc->slow.base_sgl_addr.lo =
880 U64_LO(mp_req->mp_resp_bd_dma);
881 data_desc->slow.base_sgl_addr.hi =
882 U64_HI(mp_req->mp_resp_bd_dma);
883 data_desc->slow.remainder_num_sges = 1;
884 data_desc->slow.curr_sge_off = 0;
885 data_desc->slow.curr_sgl_index = 0;
886
887 /*
888 * Also need to fil in non-fastpath response address
889 * for middle path commands.
890 */
891 task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo =
892 U64_LO(mp_req->mp_resp_bd_dma);
893 task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi =
894 U64_HI(mp_req->mp_resp_bd_dma);
895 }
896 }
897
898 /* USTORM ONLY */
899 {
900 task_ctx->ustorm_ag_context.global_cq_num = 0;
901 }
902
903 /* I/O stats. Middle path commands always use slow SGEs */
904 qedf->slow_sge_ios++;
905 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
906 }
907
908 void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid, u32 ptu_invalidate,
909 enum fcoe_task_type req_type, u32 offset)
910 {
911 struct fcoe_wqe *sqe;
912 uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
913
914 sqe = &fcport->sq[fcport->sq_prod_idx];
915
916 fcport->sq_prod_idx++;
917 fcport->fw_sq_prod_idx++;
918 if (fcport->sq_prod_idx == total_sqe)
919 fcport->sq_prod_idx = 0;
920
921 switch (req_type) {
922 case FCOE_TASK_TYPE_WRITE_INITIATOR:
923 case FCOE_TASK_TYPE_READ_INITIATOR:
924 SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_CMD);
925 if (ptu_invalidate)
926 SET_FIELD(sqe->flags, FCOE_WQE_INVALIDATE_PTU, 1);
927 break;
928 case FCOE_TASK_TYPE_MIDPATH:
929 SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_MIDPATH);
930 break;
931 case FCOE_TASK_TYPE_ABTS:
932 SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
933 SEND_FCOE_ABTS_REQUEST);
934 break;
935 case FCOE_TASK_TYPE_EXCHANGE_CLEANUP:
936 SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
937 FCOE_EXCHANGE_CLEANUP);
938 break;
939 case FCOE_TASK_TYPE_SEQUENCE_CLEANUP:
940 SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
941 FCOE_SEQUENCE_RECOVERY);
942 /* NOTE: offset param only used for sequence recovery */
943 sqe->additional_info_union.seq_rec_updated_offset = offset;
944 break;
945 case FCOE_TASK_TYPE_UNSOLICITED:
946 break;
947 default:
948 break;
949 }
950
951 sqe->task_id = xid;
952
953 /* Make sure SQ data is coherent */
954 wmb();
955
956 }
957
958 void qedf_ring_doorbell(struct qedf_rport *fcport)
959 {
960 struct fcoe_db_data dbell = { 0 };
961
962 dbell.agg_flags = 0;
963
964 dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT;
965 dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT;
966 dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD <<
967 FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
968
969 dbell.sq_prod = fcport->fw_sq_prod_idx;
970 writel(*(u32 *)&dbell, fcport->p_doorbell);
971 /* Make sure SQ index is updated so f/w prcesses requests in order */
972 wmb();
973 mmiowb();
974 }
975
976 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
977 int8_t direction)
978 {
979 struct qedf_ctx *qedf = fcport->qedf;
980 struct qedf_io_log *io_log;
981 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
982 unsigned long flags;
983 uint8_t op;
984
985 spin_lock_irqsave(&qedf->io_trace_lock, flags);
986
987 io_log = &qedf->io_trace_buf[qedf->io_trace_idx];
988 io_log->direction = direction;
989 io_log->task_id = io_req->xid;
990 io_log->port_id = fcport->rdata->ids.port_id;
991 io_log->lun = sc_cmd->device->lun;
992 io_log->op = op = sc_cmd->cmnd[0];
993 io_log->lba[0] = sc_cmd->cmnd[2];
994 io_log->lba[1] = sc_cmd->cmnd[3];
995 io_log->lba[2] = sc_cmd->cmnd[4];
996 io_log->lba[3] = sc_cmd->cmnd[5];
997 io_log->bufflen = scsi_bufflen(sc_cmd);
998 io_log->sg_count = scsi_sg_count(sc_cmd);
999 io_log->result = sc_cmd->result;
1000 io_log->jiffies = jiffies;
1001 io_log->refcount = kref_read(&io_req->refcount);
1002
1003 if (direction == QEDF_IO_TRACE_REQ) {
1004 /* For requests we only care abot the submission CPU */
1005 io_log->req_cpu = io_req->cpu;
1006 io_log->int_cpu = 0;
1007 io_log->rsp_cpu = 0;
1008 } else if (direction == QEDF_IO_TRACE_RSP) {
1009 io_log->req_cpu = io_req->cpu;
1010 io_log->int_cpu = io_req->int_cpu;
1011 io_log->rsp_cpu = smp_processor_id();
1012 }
1013
1014 io_log->sge_type = io_req->sge_type;
1015
1016 qedf->io_trace_idx++;
1017 if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE)
1018 qedf->io_trace_idx = 0;
1019
1020 spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
1021 }
1022
1023 int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
1024 {
1025 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1026 struct Scsi_Host *host = sc_cmd->device->host;
1027 struct fc_lport *lport = shost_priv(host);
1028 struct qedf_ctx *qedf = lport_priv(lport);
1029 struct fcoe_task_context *task_ctx;
1030 u16 xid;
1031 enum fcoe_task_type req_type = 0;
1032 u32 ptu_invalidate = 0;
1033
1034 /* Initialize rest of io_req fileds */
1035 io_req->data_xfer_len = scsi_bufflen(sc_cmd);
1036 sc_cmd->SCp.ptr = (char *)io_req;
1037 io_req->use_slowpath = false; /* Assume fast SGL by default */
1038
1039 /* Record which cpu this request is associated with */
1040 io_req->cpu = smp_processor_id();
1041
1042 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
1043 req_type = FCOE_TASK_TYPE_READ_INITIATOR;
1044 io_req->io_req_flags = QEDF_READ;
1045 qedf->input_requests++;
1046 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
1047 req_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
1048 io_req->io_req_flags = QEDF_WRITE;
1049 qedf->output_requests++;
1050 } else {
1051 io_req->io_req_flags = 0;
1052 qedf->control_requests++;
1053 }
1054
1055 xid = io_req->xid;
1056
1057 /* Build buffer descriptor list for firmware from sg list */
1058 if (qedf_build_bd_list_from_sg(io_req)) {
1059 QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
1060 kref_put(&io_req->refcount, qedf_release_cmd);
1061 return -EAGAIN;
1062 }
1063
1064 /* Get the task context */
1065 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
1066 if (!task_ctx) {
1067 QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
1068 xid);
1069 kref_put(&io_req->refcount, qedf_release_cmd);
1070 return -EINVAL;
1071 }
1072
1073 qedf_init_task(fcport, lport, io_req, &ptu_invalidate, task_ctx);
1074
1075 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1076 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
1077 kref_put(&io_req->refcount, qedf_release_cmd);
1078 }
1079
1080 /* Obtain free SQ entry */
1081 qedf_add_to_sq(fcport, xid, ptu_invalidate, req_type, 0);
1082
1083 /* Ring doorbell */
1084 qedf_ring_doorbell(fcport);
1085
1086 if (qedf_io_tracing && io_req->sc_cmd)
1087 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
1088
1089 return false;
1090 }
1091
1092 int
1093 qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
1094 {
1095 struct fc_lport *lport = shost_priv(host);
1096 struct qedf_ctx *qedf = lport_priv(lport);
1097 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1098 struct fc_rport_libfc_priv *rp = rport->dd_data;
1099 struct qedf_rport *fcport = rport->dd_data;
1100 struct qedf_ioreq *io_req;
1101 int rc = 0;
1102 int rval;
1103 unsigned long flags = 0;
1104
1105
1106 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
1107 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
1108 sc_cmd->result = DID_NO_CONNECT << 16;
1109 sc_cmd->scsi_done(sc_cmd);
1110 return 0;
1111 }
1112
1113 rval = fc_remote_port_chkready(rport);
1114 if (rval) {
1115 sc_cmd->result = rval;
1116 sc_cmd->scsi_done(sc_cmd);
1117 return 0;
1118 }
1119
1120 /* Retry command if we are doing a qed drain operation */
1121 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
1122 rc = SCSI_MLQUEUE_HOST_BUSY;
1123 goto exit_qcmd;
1124 }
1125
1126 if (lport->state != LPORT_ST_READY ||
1127 atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
1128 rc = SCSI_MLQUEUE_HOST_BUSY;
1129 goto exit_qcmd;
1130 }
1131
1132 /* rport and tgt are allocated together, so tgt should be non-NULL */
1133 fcport = (struct qedf_rport *)&rp[1];
1134
1135 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1136 /*
1137 * Session is not offloaded yet. Let SCSI-ml retry
1138 * the command.
1139 */
1140 rc = SCSI_MLQUEUE_TARGET_BUSY;
1141 goto exit_qcmd;
1142 }
1143 if (fcport->retry_delay_timestamp) {
1144 if (time_after(jiffies, fcport->retry_delay_timestamp)) {
1145 fcport->retry_delay_timestamp = 0;
1146 } else {
1147 /* If retry_delay timer is active, flow off the ML */
1148 rc = SCSI_MLQUEUE_TARGET_BUSY;
1149 goto exit_qcmd;
1150 }
1151 }
1152
1153 io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
1154 if (!io_req) {
1155 rc = SCSI_MLQUEUE_HOST_BUSY;
1156 goto exit_qcmd;
1157 }
1158
1159 io_req->sc_cmd = sc_cmd;
1160
1161 /* Take fcport->rport_lock for posting to fcport send queue */
1162 spin_lock_irqsave(&fcport->rport_lock, flags);
1163 if (qedf_post_io_req(fcport, io_req)) {
1164 QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
1165 /* Return SQE to pool */
1166 atomic_inc(&fcport->free_sqes);
1167 rc = SCSI_MLQUEUE_HOST_BUSY;
1168 }
1169 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1170
1171 exit_qcmd:
1172 return rc;
1173 }
1174
1175 static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
1176 struct fcoe_cqe_rsp_info *fcp_rsp)
1177 {
1178 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1179 struct qedf_ctx *qedf = io_req->fcport->qedf;
1180 u8 rsp_flags = fcp_rsp->rsp_flags.flags;
1181 int fcp_sns_len = 0;
1182 int fcp_rsp_len = 0;
1183 uint8_t *rsp_info, *sense_data;
1184
1185 io_req->fcp_status = FC_GOOD;
1186 io_req->fcp_resid = 0;
1187 if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
1188 FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
1189 io_req->fcp_resid = fcp_rsp->fcp_resid;
1190
1191 io_req->scsi_comp_flags = rsp_flags;
1192 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1193 fcp_rsp->scsi_status_code;
1194
1195 if (rsp_flags &
1196 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
1197 fcp_rsp_len = fcp_rsp->fcp_rsp_len;
1198
1199 if (rsp_flags &
1200 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID)
1201 fcp_sns_len = fcp_rsp->fcp_sns_len;
1202
1203 io_req->fcp_rsp_len = fcp_rsp_len;
1204 io_req->fcp_sns_len = fcp_sns_len;
1205 rsp_info = sense_data = io_req->sense_buffer;
1206
1207 /* fetch fcp_rsp_code */
1208 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1209 /* Only for task management function */
1210 io_req->fcp_rsp_code = rsp_info[3];
1211 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1212 "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
1213 /* Adjust sense-data location. */
1214 sense_data += fcp_rsp_len;
1215 }
1216
1217 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1218 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1219 "Truncating sense buffer\n");
1220 fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1221 }
1222
1223 memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1224 if (fcp_sns_len)
1225 memcpy(sc_cmd->sense_buffer, sense_data,
1226 fcp_sns_len);
1227 }
1228
1229 static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
1230 {
1231 struct scsi_cmnd *sc = io_req->sc_cmd;
1232
1233 if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1234 dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc),
1235 scsi_sg_count(sc), sc->sc_data_direction);
1236 io_req->bd_tbl->bd_valid = 0;
1237 }
1238 }
1239
1240 void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1241 struct qedf_ioreq *io_req)
1242 {
1243 u16 xid, rval;
1244 struct fcoe_task_context *task_ctx;
1245 struct scsi_cmnd *sc_cmd;
1246 struct fcoe_cqe_rsp_info *fcp_rsp;
1247 struct qedf_rport *fcport;
1248 int refcount;
1249 u16 scope, qualifier = 0;
1250 u8 fw_residual_flag = 0;
1251
1252 if (!io_req)
1253 return;
1254 if (!cqe)
1255 return;
1256
1257 xid = io_req->xid;
1258 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
1259 sc_cmd = io_req->sc_cmd;
1260 fcp_rsp = &cqe->cqe_info.rsp_info;
1261
1262 if (!sc_cmd) {
1263 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1264 return;
1265 }
1266
1267 if (!sc_cmd->SCp.ptr) {
1268 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1269 "another context.\n");
1270 return;
1271 }
1272
1273 if (!sc_cmd->request) {
1274 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
1275 "sc_cmd=%p.\n", sc_cmd);
1276 return;
1277 }
1278
1279 if (!sc_cmd->request->special) {
1280 QEDF_WARN(&(qedf->dbg_ctx), "request->special is NULL so "
1281 "request not valid, sc_cmd=%p.\n", sc_cmd);
1282 return;
1283 }
1284
1285 if (!sc_cmd->request->q) {
1286 QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
1287 "is not valid, sc_cmd=%p.\n", sc_cmd);
1288 return;
1289 }
1290
1291 fcport = io_req->fcport;
1292
1293 qedf_parse_fcp_rsp(io_req, fcp_rsp);
1294
1295 qedf_unmap_sg_list(qedf, io_req);
1296
1297 /* Check for FCP transport error */
1298 if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
1299 QEDF_ERR(&(qedf->dbg_ctx),
1300 "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
1301 "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
1302 io_req->fcp_rsp_code);
1303 sc_cmd->result = DID_BUS_BUSY << 16;
1304 goto out;
1305 }
1306
1307 fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
1308 FCOE_CQE_RSP_INFO_FW_UNDERRUN);
1309 if (fw_residual_flag) {
1310 QEDF_ERR(&(qedf->dbg_ctx),
1311 "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x "
1312 "fcp_resid=%d fw_residual=0x%x.\n", io_req->xid,
1313 fcp_rsp->rsp_flags.flags, io_req->fcp_resid,
1314 cqe->cqe_info.rsp_info.fw_residual);
1315
1316 if (io_req->cdb_status == 0)
1317 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1318 else
1319 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1320
1321 /* Abort the command since we did not get all the data */
1322 init_completion(&io_req->abts_done);
1323 rval = qedf_initiate_abts(io_req, true);
1324 if (rval) {
1325 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1326 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1327 }
1328
1329 /*
1330 * Set resid to the whole buffer length so we won't try to resue
1331 * any previously data.
1332 */
1333 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1334 goto out;
1335 }
1336
1337 switch (io_req->fcp_status) {
1338 case FC_GOOD:
1339 if (io_req->cdb_status == 0) {
1340 /* Good I/O completion */
1341 sc_cmd->result = DID_OK << 16;
1342 } else {
1343 refcount = kref_read(&io_req->refcount);
1344 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1345 "%d:0:%d:%d xid=0x%0x op=0x%02x "
1346 "lba=%02x%02x%02x%02x cdb_status=%d "
1347 "fcp_resid=0x%x refcount=%d.\n",
1348 qedf->lport->host->host_no, sc_cmd->device->id,
1349 sc_cmd->device->lun, io_req->xid,
1350 sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3],
1351 sc_cmd->cmnd[4], sc_cmd->cmnd[5],
1352 io_req->cdb_status, io_req->fcp_resid,
1353 refcount);
1354 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1355
1356 if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1357 io_req->cdb_status == SAM_STAT_BUSY) {
1358 /*
1359 * Check whether we need to set retry_delay at
1360 * all based on retry_delay module parameter
1361 * and the status qualifier.
1362 */
1363
1364 /* Upper 2 bits */
1365 scope = fcp_rsp->retry_delay_timer & 0xC000;
1366 /* Lower 14 bits */
1367 qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
1368
1369 if (qedf_retry_delay &&
1370 scope > 0 && qualifier > 0 &&
1371 qualifier <= 0x3FEF) {
1372 /* Check we don't go over the max */
1373 if (qualifier > QEDF_RETRY_DELAY_MAX)
1374 qualifier =
1375 QEDF_RETRY_DELAY_MAX;
1376 fcport->retry_delay_timestamp =
1377 jiffies + (qualifier * HZ / 10);
1378 }
1379 }
1380 }
1381 if (io_req->fcp_resid)
1382 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1383 break;
1384 default:
1385 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
1386 io_req->fcp_status);
1387 break;
1388 }
1389
1390 out:
1391 if (qedf_io_tracing)
1392 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
1393
1394 io_req->sc_cmd = NULL;
1395 sc_cmd->SCp.ptr = NULL;
1396 sc_cmd->scsi_done(sc_cmd);
1397 kref_put(&io_req->refcount, qedf_release_cmd);
1398 }
1399
1400 /* Return a SCSI command in some other context besides a normal completion */
1401 void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1402 int result)
1403 {
1404 u16 xid;
1405 struct scsi_cmnd *sc_cmd;
1406 int refcount;
1407
1408 if (!io_req)
1409 return;
1410
1411 xid = io_req->xid;
1412 sc_cmd = io_req->sc_cmd;
1413
1414 if (!sc_cmd) {
1415 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1416 return;
1417 }
1418
1419 if (!sc_cmd->SCp.ptr) {
1420 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1421 "another context.\n");
1422 return;
1423 }
1424
1425 qedf_unmap_sg_list(qedf, io_req);
1426
1427 sc_cmd->result = result << 16;
1428 refcount = kref_read(&io_req->refcount);
1429 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%d: Completing "
1430 "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1431 "allowed=%d retries=%d refcount=%d.\n",
1432 qedf->lport->host->host_no, sc_cmd->device->id,
1433 sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0],
1434 sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4],
1435 sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries,
1436 refcount);
1437
1438 /*
1439 * Set resid to the whole buffer length so we won't try to resue any
1440 * previously read data
1441 */
1442 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1443
1444 if (qedf_io_tracing)
1445 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
1446
1447 io_req->sc_cmd = NULL;
1448 sc_cmd->SCp.ptr = NULL;
1449 sc_cmd->scsi_done(sc_cmd);
1450 kref_put(&io_req->refcount, qedf_release_cmd);
1451 }
1452
1453 /*
1454 * Handle warning type CQE completions. This is mainly used for REC timer
1455 * popping.
1456 */
1457 void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1458 struct qedf_ioreq *io_req)
1459 {
1460 int rval, i;
1461 struct qedf_rport *fcport = io_req->fcport;
1462 u64 err_warn_bit_map;
1463 u8 err_warn = 0xff;
1464
1465 if (!cqe)
1466 return;
1467
1468 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1469 "xid=0x%x\n", io_req->xid);
1470 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1471 "err_warn_bitmap=%08x:%08x\n",
1472 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1473 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1474 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1475 "rx_buff_off=%08x, rx_id=%04x\n",
1476 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1477 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1478 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1479
1480 /* Normalize the error bitmap value to an just an unsigned int */
1481 err_warn_bit_map = (u64)
1482 ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) |
1483 (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo;
1484 for (i = 0; i < 64; i++) {
1485 if (err_warn_bit_map & (u64)((u64)1 << i)) {
1486 err_warn = i;
1487 break;
1488 }
1489 }
1490
1491 /* Check if REC TOV expired if this is a tape device */
1492 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1493 if (err_warn ==
1494 FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) {
1495 QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n");
1496 if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
1497 io_req->rx_buf_off =
1498 cqe->cqe_info.err_info.rx_buf_off;
1499 io_req->tx_buf_off =
1500 cqe->cqe_info.err_info.tx_buf_off;
1501 io_req->rx_id = cqe->cqe_info.err_info.rx_id;
1502 rval = qedf_send_rec(io_req);
1503 /*
1504 * We only want to abort the io_req if we
1505 * can't queue the REC command as we want to
1506 * keep the exchange open for recovery.
1507 */
1508 if (rval)
1509 goto send_abort;
1510 }
1511 return;
1512 }
1513 }
1514
1515 send_abort:
1516 init_completion(&io_req->abts_done);
1517 rval = qedf_initiate_abts(io_req, true);
1518 if (rval)
1519 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1520 }
1521
1522 /* Cleanup a command when we receive an error detection completion */
1523 void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1524 struct qedf_ioreq *io_req)
1525 {
1526 int rval;
1527
1528 if (!cqe)
1529 return;
1530
1531 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1532 "xid=0x%x\n", io_req->xid);
1533 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1534 "err_warn_bitmap=%08x:%08x\n",
1535 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1536 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1537 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1538 "rx_buff_off=%08x, rx_id=%04x\n",
1539 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1540 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1541 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1542
1543 if (qedf->stop_io_on_error) {
1544 qedf_stop_all_io(qedf);
1545 return;
1546 }
1547
1548 init_completion(&io_req->abts_done);
1549 rval = qedf_initiate_abts(io_req, true);
1550 if (rval)
1551 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1552 }
1553
1554 static void qedf_flush_els_req(struct qedf_ctx *qedf,
1555 struct qedf_ioreq *els_req)
1556 {
1557 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1558 "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid,
1559 kref_read(&els_req->refcount));
1560
1561 /*
1562 * Need to distinguish this from a timeout when calling the
1563 * els_req->cb_func.
1564 */
1565 els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
1566
1567 /* Cancel the timer */
1568 cancel_delayed_work_sync(&els_req->timeout_work);
1569
1570 /* Call callback function to complete command */
1571 if (els_req->cb_func && els_req->cb_arg) {
1572 els_req->cb_func(els_req->cb_arg);
1573 els_req->cb_arg = NULL;
1574 }
1575
1576 /* Release kref for original initiate_els */
1577 kref_put(&els_req->refcount, qedf_release_cmd);
1578 }
1579
1580 /* A value of -1 for lun is a wild card that means flush all
1581 * active SCSI I/Os for the target.
1582 */
1583 void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1584 {
1585 struct qedf_ioreq *io_req;
1586 struct qedf_ctx *qedf;
1587 struct qedf_cmd_mgr *cmd_mgr;
1588 int i, rc;
1589
1590 if (!fcport)
1591 return;
1592
1593 qedf = fcport->qedf;
1594 cmd_mgr = qedf->cmd_mgr;
1595
1596 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Flush active i/o's.\n");
1597
1598 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1599 io_req = &cmd_mgr->cmds[i];
1600
1601 if (!io_req)
1602 continue;
1603 if (io_req->fcport != fcport)
1604 continue;
1605 if (io_req->cmd_type == QEDF_ELS) {
1606 rc = kref_get_unless_zero(&io_req->refcount);
1607 if (!rc) {
1608 QEDF_ERR(&(qedf->dbg_ctx),
1609 "Could not get kref for io_req=0x%p.\n",
1610 io_req);
1611 continue;
1612 }
1613 qedf_flush_els_req(qedf, io_req);
1614 /*
1615 * Release the kref and go back to the top of the
1616 * loop.
1617 */
1618 goto free_cmd;
1619 }
1620
1621 if (!io_req->sc_cmd)
1622 continue;
1623 if (lun > 0) {
1624 if (io_req->sc_cmd->device->lun !=
1625 (u64)lun)
1626 continue;
1627 }
1628
1629 /*
1630 * Use kref_get_unless_zero in the unlikely case the command
1631 * we're about to flush was completed in the normal SCSI path
1632 */
1633 rc = kref_get_unless_zero(&io_req->refcount);
1634 if (!rc) {
1635 QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
1636 "io_req=0x%p\n", io_req);
1637 continue;
1638 }
1639 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1640 "Cleanup xid=0x%x.\n", io_req->xid);
1641
1642 /* Cleanup task and return I/O mid-layer */
1643 qedf_initiate_cleanup(io_req, true);
1644
1645 free_cmd:
1646 kref_put(&io_req->refcount, qedf_release_cmd);
1647 }
1648 }
1649
1650 /*
1651 * Initiate a ABTS middle path command. Note that we don't have to initialize
1652 * the task context for an ABTS task.
1653 */
1654 int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1655 {
1656 struct fc_lport *lport;
1657 struct qedf_rport *fcport = io_req->fcport;
1658 struct fc_rport_priv *rdata = fcport->rdata;
1659 struct qedf_ctx *qedf = fcport->qedf;
1660 u16 xid;
1661 u32 r_a_tov = 0;
1662 int rc = 0;
1663 unsigned long flags;
1664
1665 r_a_tov = rdata->r_a_tov;
1666 lport = qedf->lport;
1667
1668 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1669 QEDF_ERR(&(qedf->dbg_ctx), "tgt not offloaded\n");
1670 rc = 1;
1671 goto abts_err;
1672 }
1673
1674 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
1675 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
1676 rc = 1;
1677 goto abts_err;
1678 }
1679
1680 if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
1681 QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
1682 rc = 1;
1683 goto abts_err;
1684 }
1685
1686 /* Ensure room on SQ */
1687 if (!atomic_read(&fcport->free_sqes)) {
1688 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1689 rc = 1;
1690 goto abts_err;
1691 }
1692
1693
1694 kref_get(&io_req->refcount);
1695
1696 xid = io_req->xid;
1697 qedf->control_requests++;
1698 qedf->packet_aborts++;
1699
1700 /* Set the return CPU to be the same as the request one */
1701 io_req->cpu = smp_processor_id();
1702
1703 /* Set the command type to abort */
1704 io_req->cmd_type = QEDF_ABTS;
1705 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1706
1707 set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1708 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "ABTS io_req xid = "
1709 "0x%x\n", xid);
1710
1711 qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT * HZ);
1712
1713 spin_lock_irqsave(&fcport->rport_lock, flags);
1714
1715 /* Add ABTS to send queue */
1716 qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_ABTS, 0);
1717
1718 /* Ring doorbell */
1719 qedf_ring_doorbell(fcport);
1720
1721 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1722
1723 return rc;
1724 abts_err:
1725 /*
1726 * If the ABTS task fails to queue then we need to cleanup the
1727 * task at the firmware.
1728 */
1729 qedf_initiate_cleanup(io_req, return_scsi_cmd_on_abts);
1730 return rc;
1731 }
1732
1733 void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1734 struct qedf_ioreq *io_req)
1735 {
1736 uint32_t r_ctl;
1737 uint16_t xid;
1738
1739 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
1740 "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
1741
1742 cancel_delayed_work(&io_req->timeout_work);
1743
1744 xid = io_req->xid;
1745 r_ctl = cqe->cqe_info.abts_info.r_ctl;
1746
1747 switch (r_ctl) {
1748 case FC_RCTL_BA_ACC:
1749 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1750 "ABTS response - ACC Send RRQ after R_A_TOV\n");
1751 io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
1752 /*
1753 * Dont release this cmd yet. It will be relesed
1754 * after we get RRQ response
1755 */
1756 kref_get(&io_req->refcount);
1757 queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
1758 msecs_to_jiffies(qedf->lport->r_a_tov));
1759 break;
1760 /* For error cases let the cleanup return the command */
1761 case FC_RCTL_BA_RJT:
1762 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1763 "ABTS response - RJT\n");
1764 io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
1765 break;
1766 default:
1767 QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n");
1768 break;
1769 }
1770
1771 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1772
1773 if (io_req->sc_cmd) {
1774 if (io_req->return_scsi_cmd_on_abts)
1775 qedf_scsi_done(qedf, io_req, DID_ERROR);
1776 }
1777
1778 /* Notify eh_abort handler that ABTS is complete */
1779 complete(&io_req->abts_done);
1780
1781 kref_put(&io_req->refcount, qedf_release_cmd);
1782 }
1783
1784 int qedf_init_mp_req(struct qedf_ioreq *io_req)
1785 {
1786 struct qedf_mp_req *mp_req;
1787 struct fcoe_sge *mp_req_bd;
1788 struct fcoe_sge *mp_resp_bd;
1789 struct qedf_ctx *qedf = io_req->fcport->qedf;
1790 dma_addr_t addr;
1791 uint64_t sz;
1792
1793 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n");
1794
1795 mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
1796 memset(mp_req, 0, sizeof(struct qedf_mp_req));
1797
1798 if (io_req->cmd_type != QEDF_ELS) {
1799 mp_req->req_len = sizeof(struct fcp_cmnd);
1800 io_req->data_xfer_len = mp_req->req_len;
1801 } else
1802 mp_req->req_len = io_req->data_xfer_len;
1803
1804 mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
1805 &mp_req->req_buf_dma, GFP_KERNEL);
1806 if (!mp_req->req_buf) {
1807 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n");
1808 qedf_free_mp_resc(io_req);
1809 return -ENOMEM;
1810 }
1811
1812 mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev,
1813 QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL);
1814 if (!mp_req->resp_buf) {
1815 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp "
1816 "buffer\n");
1817 qedf_free_mp_resc(io_req);
1818 return -ENOMEM;
1819 }
1820
1821 /* Allocate and map mp_req_bd and mp_resp_bd */
1822 sz = sizeof(struct fcoe_sge);
1823 mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
1824 &mp_req->mp_req_bd_dma, GFP_KERNEL);
1825 if (!mp_req->mp_req_bd) {
1826 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n");
1827 qedf_free_mp_resc(io_req);
1828 return -ENOMEM;
1829 }
1830
1831 mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
1832 &mp_req->mp_resp_bd_dma, GFP_KERNEL);
1833 if (!mp_req->mp_resp_bd) {
1834 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n");
1835 qedf_free_mp_resc(io_req);
1836 return -ENOMEM;
1837 }
1838
1839 /* Fill bd table */
1840 addr = mp_req->req_buf_dma;
1841 mp_req_bd = mp_req->mp_req_bd;
1842 mp_req_bd->sge_addr.lo = U64_LO(addr);
1843 mp_req_bd->sge_addr.hi = U64_HI(addr);
1844 mp_req_bd->size = QEDF_PAGE_SIZE;
1845
1846 /*
1847 * MP buffer is either a task mgmt command or an ELS.
1848 * So the assumption is that it consumes a single bd
1849 * entry in the bd table
1850 */
1851 mp_resp_bd = mp_req->mp_resp_bd;
1852 addr = mp_req->resp_buf_dma;
1853 mp_resp_bd->sge_addr.lo = U64_LO(addr);
1854 mp_resp_bd->sge_addr.hi = U64_HI(addr);
1855 mp_resp_bd->size = QEDF_PAGE_SIZE;
1856
1857 return 0;
1858 }
1859
1860 /*
1861 * Last ditch effort to clear the port if it's stuck. Used only after a
1862 * cleanup task times out.
1863 */
1864 static void qedf_drain_request(struct qedf_ctx *qedf)
1865 {
1866 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
1867 QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n");
1868 return;
1869 }
1870
1871 /* Set bit to return all queuecommand requests as busy */
1872 set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
1873
1874 /* Call qed drain request for function. Should be synchronous */
1875 qed_ops->common->drain(qedf->cdev);
1876
1877 /* Settle time for CQEs to be returned */
1878 msleep(100);
1879
1880 /* Unplug and continue */
1881 clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
1882 }
1883
1884 /*
1885 * Returns SUCCESS if the cleanup task does not timeout, otherwise return
1886 * FAILURE.
1887 */
1888 int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
1889 bool return_scsi_cmd_on_abts)
1890 {
1891 struct qedf_rport *fcport;
1892 struct qedf_ctx *qedf;
1893 uint16_t xid;
1894 struct fcoe_task_context *task;
1895 int tmo = 0;
1896 int rc = SUCCESS;
1897 unsigned long flags;
1898
1899 fcport = io_req->fcport;
1900 if (!fcport) {
1901 QEDF_ERR(NULL, "fcport is NULL.\n");
1902 return SUCCESS;
1903 }
1904
1905 qedf = fcport->qedf;
1906 if (!qedf) {
1907 QEDF_ERR(NULL, "qedf is NULL.\n");
1908 return SUCCESS;
1909 }
1910
1911 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1912 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
1913 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
1914 "cleanup processing or already completed.\n",
1915 io_req->xid);
1916 return SUCCESS;
1917 }
1918
1919 /* Ensure room on SQ */
1920 if (!atomic_read(&fcport->free_sqes)) {
1921 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1922 return FAILED;
1923 }
1924
1925
1926 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid=0x%x\n",
1927 io_req->xid);
1928
1929 /* Cleanup cmds re-use the same TID as the original I/O */
1930 xid = io_req->xid;
1931 io_req->cmd_type = QEDF_CLEANUP;
1932 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1933
1934 /* Set the return CPU to be the same as the request one */
1935 io_req->cpu = smp_processor_id();
1936
1937 set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1938
1939 task = qedf_get_task_mem(&qedf->tasks, xid);
1940
1941 init_completion(&io_req->tm_done);
1942
1943 /* Obtain free SQ entry */
1944 spin_lock_irqsave(&fcport->rport_lock, flags);
1945 qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_EXCHANGE_CLEANUP, 0);
1946
1947 /* Ring doorbell */
1948 qedf_ring_doorbell(fcport);
1949 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1950
1951 tmo = wait_for_completion_timeout(&io_req->tm_done,
1952 QEDF_CLEANUP_TIMEOUT * HZ);
1953
1954 if (!tmo) {
1955 rc = FAILED;
1956 /* Timeout case */
1957 QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, "
1958 "xid=%x.\n", io_req->xid);
1959 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1960 /* Issue a drain request if cleanup task times out */
1961 QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n");
1962 qedf_drain_request(qedf);
1963 }
1964
1965 if (io_req->sc_cmd) {
1966 if (io_req->return_scsi_cmd_on_abts)
1967 qedf_scsi_done(qedf, io_req, DID_ERROR);
1968 }
1969
1970 if (rc == SUCCESS)
1971 io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
1972 else
1973 io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
1974
1975 return rc;
1976 }
1977
1978 void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1979 struct qedf_ioreq *io_req)
1980 {
1981 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n",
1982 io_req->xid);
1983
1984 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1985
1986 /* Complete so we can finish cleaning up the I/O */
1987 complete(&io_req->tm_done);
1988 }
1989
1990 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
1991 uint8_t tm_flags)
1992 {
1993 struct qedf_ioreq *io_req;
1994 struct qedf_mp_req *tm_req;
1995 struct fcoe_task_context *task;
1996 struct fc_frame_header *fc_hdr;
1997 struct fcp_cmnd *fcp_cmnd;
1998 struct qedf_ctx *qedf = fcport->qedf;
1999 int rc = 0;
2000 uint16_t xid;
2001 uint32_t sid, did;
2002 int tmo = 0;
2003 unsigned long flags;
2004
2005 if (!sc_cmd) {
2006 QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n");
2007 return FAILED;
2008 }
2009
2010 if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
2011 QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
2012 rc = FAILED;
2013 return FAILED;
2014 }
2015
2016 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "portid = 0x%x "
2017 "tm_flags = %d\n", fcport->rdata->ids.port_id, tm_flags);
2018
2019 io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
2020 if (!io_req) {
2021 QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
2022 rc = -EAGAIN;
2023 goto reset_tmf_err;
2024 }
2025
2026 /* Initialize rest of io_req fields */
2027 io_req->sc_cmd = sc_cmd;
2028 io_req->fcport = fcport;
2029 io_req->cmd_type = QEDF_TASK_MGMT_CMD;
2030
2031 /* Set the return CPU to be the same as the request one */
2032 io_req->cpu = smp_processor_id();
2033
2034 tm_req = (struct qedf_mp_req *)&(io_req->mp_req);
2035
2036 rc = qedf_init_mp_req(io_req);
2037 if (rc == FAILED) {
2038 QEDF_ERR(&(qedf->dbg_ctx), "Task mgmt MP request init "
2039 "failed\n");
2040 kref_put(&io_req->refcount, qedf_release_cmd);
2041 goto reset_tmf_err;
2042 }
2043
2044 /* Set TM flags */
2045 io_req->io_req_flags = 0;
2046 tm_req->tm_flags = tm_flags;
2047
2048 /* Default is to return a SCSI command when an error occurs */
2049 io_req->return_scsi_cmd_on_abts = true;
2050
2051 /* Fill FCP_CMND */
2052 qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
2053 fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
2054 memset(fcp_cmnd->fc_cdb, 0, FCP_CMND_LEN);
2055 fcp_cmnd->fc_dl = 0;
2056
2057 /* Fill FC header */
2058 fc_hdr = &(tm_req->req_fc_hdr);
2059 sid = fcport->sid;
2060 did = fcport->rdata->ids.port_id;
2061 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, sid, did,
2062 FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
2063 FC_FC_SEQ_INIT, 0);
2064 /* Obtain exchange id */
2065 xid = io_req->xid;
2066
2067 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
2068 "0x%x\n", xid);
2069
2070 /* Initialize task context for this IO request */
2071 task = qedf_get_task_mem(&qedf->tasks, xid);
2072 qedf_init_mp_task(io_req, task);
2073
2074 init_completion(&io_req->tm_done);
2075
2076 /* Obtain free SQ entry */
2077 spin_lock_irqsave(&fcport->rport_lock, flags);
2078 qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0);
2079
2080 /* Ring doorbell */
2081 qedf_ring_doorbell(fcport);
2082 spin_unlock_irqrestore(&fcport->rport_lock, flags);
2083
2084 tmo = wait_for_completion_timeout(&io_req->tm_done,
2085 QEDF_TM_TIMEOUT * HZ);
2086
2087 if (!tmo) {
2088 rc = FAILED;
2089 QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
2090 } else {
2091 /* Check TMF response code */
2092 if (io_req->fcp_rsp_code == 0)
2093 rc = SUCCESS;
2094 else
2095 rc = FAILED;
2096 }
2097
2098 if (tm_flags == FCP_TMF_LUN_RESET)
2099 qedf_flush_active_ios(fcport, (int)sc_cmd->device->lun);
2100 else
2101 qedf_flush_active_ios(fcport, -1);
2102
2103 kref_put(&io_req->refcount, qedf_release_cmd);
2104
2105 if (rc != SUCCESS) {
2106 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
2107 rc = FAILED;
2108 } else {
2109 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
2110 rc = SUCCESS;
2111 }
2112 reset_tmf_err:
2113 return rc;
2114 }
2115
2116 int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
2117 {
2118 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
2119 struct fc_rport_libfc_priv *rp = rport->dd_data;
2120 struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
2121 struct qedf_ctx *qedf;
2122 struct fc_lport *lport;
2123 int rc = SUCCESS;
2124 int rval;
2125
2126 rval = fc_remote_port_chkready(rport);
2127
2128 if (rval) {
2129 QEDF_ERR(NULL, "device_reset rport not ready\n");
2130 rc = FAILED;
2131 goto tmf_err;
2132 }
2133
2134 if (fcport == NULL) {
2135 QEDF_ERR(NULL, "device_reset: rport is NULL\n");
2136 rc = FAILED;
2137 goto tmf_err;
2138 }
2139
2140 qedf = fcport->qedf;
2141 lport = qedf->lport;
2142
2143 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
2144 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
2145 rc = SUCCESS;
2146 goto tmf_err;
2147 }
2148
2149 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
2150 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
2151 rc = FAILED;
2152 goto tmf_err;
2153 }
2154
2155 rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
2156
2157 tmf_err:
2158 return rc;
2159 }
2160
2161 void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2162 struct qedf_ioreq *io_req)
2163 {
2164 struct fcoe_cqe_rsp_info *fcp_rsp;
2165 struct fcoe_cqe_midpath_info *mp_info;
2166
2167
2168 /* Get TMF response length from CQE */
2169 mp_info = &cqe->cqe_info.midpath_info;
2170 io_req->mp_req.resp_len = mp_info->data_placement_size;
2171 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
2172 "Response len is %d.\n", io_req->mp_req.resp_len);
2173
2174 fcp_rsp = &cqe->cqe_info.rsp_info;
2175 qedf_parse_fcp_rsp(io_req, fcp_rsp);
2176
2177 io_req->sc_cmd = NULL;
2178 complete(&io_req->tm_done);
2179 }
2180
2181 void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
2182 struct fcoe_cqe *cqe)
2183 {
2184 unsigned long flags;
2185 uint16_t tmp;
2186 uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
2187 u32 payload_len, crc;
2188 struct fc_frame_header *fh;
2189 struct fc_frame *fp;
2190 struct qedf_io_work *io_work;
2191 u32 bdq_idx;
2192 void *bdq_addr;
2193
2194 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2195 "address.hi=%x address.lo=%x opaque_data.hi=%x "
2196 "opaque_data.lo=%x bdq_prod_idx=%u len=%u.\n",
2197 le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.hi),
2198 le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.lo),
2199 le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.hi),
2200 le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo),
2201 qedf->bdq_prod_idx, pktlen);
2202
2203 bdq_idx = le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo);
2204 if (bdq_idx >= QEDF_BDQ_SIZE) {
2205 QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
2206 bdq_idx);
2207 goto increment_prod;
2208 }
2209
2210 bdq_addr = qedf->bdq[bdq_idx].buf_addr;
2211 if (!bdq_addr) {
2212 QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping "
2213 "unsolicited packet.\n");
2214 goto increment_prod;
2215 }
2216
2217 if (qedf_dump_frames) {
2218 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2219 "BDQ frame is at addr=%p.\n", bdq_addr);
2220 print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1,
2221 (void *)bdq_addr, pktlen, false);
2222 }
2223
2224 /* Allocate frame */
2225 payload_len = pktlen - sizeof(struct fc_frame_header);
2226 fp = fc_frame_alloc(qedf->lport, payload_len);
2227 if (!fp) {
2228 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n");
2229 goto increment_prod;
2230 }
2231
2232 /* Copy data from BDQ buffer into fc_frame struct */
2233 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
2234 memcpy(fh, (void *)bdq_addr, pktlen);
2235
2236 /* Initialize the frame so libfc sees it as a valid frame */
2237 crc = fcoe_fc_crc(fp);
2238 fc_frame_init(fp);
2239 fr_dev(fp) = qedf->lport;
2240 fr_sof(fp) = FC_SOF_I3;
2241 fr_eof(fp) = FC_EOF_T;
2242 fr_crc(fp) = cpu_to_le32(~crc);
2243
2244 /*
2245 * We need to return the frame back up to libfc in a non-atomic
2246 * context
2247 */
2248 io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2249 if (!io_work) {
2250 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2251 "work for I/O completion.\n");
2252 fc_frame_free(fp);
2253 goto increment_prod;
2254 }
2255 memset(io_work, 0, sizeof(struct qedf_io_work));
2256
2257 INIT_WORK(&io_work->work, qedf_fp_io_handler);
2258
2259 /* Copy contents of CQE for deferred processing */
2260 memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2261
2262 io_work->qedf = qedf;
2263 io_work->fp = fp;
2264
2265 queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work);
2266 increment_prod:
2267 spin_lock_irqsave(&qedf->hba_lock, flags);
2268
2269 /* Increment producer to let f/w know we've handled the frame */
2270 qedf->bdq_prod_idx++;
2271
2272 /* Producer index wraps at uint16_t boundary */
2273 if (qedf->bdq_prod_idx == 0xffff)
2274 qedf->bdq_prod_idx = 0;
2275
2276 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
2277 tmp = readw(qedf->bdq_primary_prod);
2278 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
2279 tmp = readw(qedf->bdq_secondary_prod);
2280
2281 spin_unlock_irqrestore(&qedf->hba_lock, flags);
2282 }