2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
12 #include <scsi/scsi_tcq.h>
14 void qedf_cmd_timer_set(struct qedf_ctx
*qedf
, struct qedf_ioreq
*io_req
,
15 unsigned int timer_msec
)
17 queue_delayed_work(qedf
->timer_work_queue
, &io_req
->timeout_work
,
18 msecs_to_jiffies(timer_msec
));
21 static void qedf_cmd_timeout(struct work_struct
*work
)
24 struct qedf_ioreq
*io_req
=
25 container_of(work
, struct qedf_ioreq
, timeout_work
.work
);
26 struct qedf_ctx
*qedf
= io_req
->fcport
->qedf
;
27 struct qedf_rport
*fcport
= io_req
->fcport
;
30 switch (io_req
->cmd_type
) {
32 QEDF_ERR((&qedf
->dbg_ctx
), "ABTS timeout, xid=0x%x.\n",
34 /* Cleanup timed out ABTS */
35 qedf_initiate_cleanup(io_req
, true);
36 complete(&io_req
->abts_done
);
39 * Need to call kref_put for reference taken when initiate_abts
40 * was called since abts_compl won't be called now that we've
41 * cleaned up the task.
43 kref_put(&io_req
->refcount
, qedf_release_cmd
);
46 * Now that the original I/O and the ABTS are complete see
47 * if we need to reconnect to the target.
49 qedf_restart_rport(fcport
);
52 kref_get(&io_req
->refcount
);
54 * Don't attempt to clean an ELS timeout as any subseqeunt
55 * ABTS or cleanup requests just hang. For now just free
56 * the resources of the original I/O and the RRQ
58 QEDF_ERR(&(qedf
->dbg_ctx
), "ELS timeout, xid=0x%x.\n",
60 io_req
->event
= QEDF_IOREQ_EV_ELS_TMO
;
61 /* Call callback function to complete command */
62 if (io_req
->cb_func
&& io_req
->cb_arg
) {
63 op
= io_req
->cb_arg
->op
;
64 io_req
->cb_func(io_req
->cb_arg
);
65 io_req
->cb_arg
= NULL
;
67 qedf_initiate_cleanup(io_req
, true);
68 kref_put(&io_req
->refcount
, qedf_release_cmd
);
70 case QEDF_SEQ_CLEANUP
:
71 QEDF_ERR(&(qedf
->dbg_ctx
), "Sequence cleanup timeout, "
72 "xid=0x%x.\n", io_req
->xid
);
73 qedf_initiate_cleanup(io_req
, true);
74 io_req
->event
= QEDF_IOREQ_EV_ELS_TMO
;
75 qedf_process_seq_cleanup_compl(qedf
, NULL
, io_req
);
82 void qedf_cmd_mgr_free(struct qedf_cmd_mgr
*cmgr
)
84 struct io_bdt
*bdt_info
;
85 struct qedf_ctx
*qedf
= cmgr
->qedf
;
87 u16 min_xid
= QEDF_MIN_XID
;
88 u16 max_xid
= (FCOE_PARAMS_NUM_TASKS
- 1);
91 struct qedf_ioreq
*io_req
;
93 num_ios
= max_xid
- min_xid
+ 1;
95 /* Free fcoe_bdt_ctx structures */
96 if (!cmgr
->io_bdt_pool
)
99 bd_tbl_sz
= QEDF_MAX_BDS_PER_CMD
* sizeof(struct fcoe_sge
);
100 for (i
= 0; i
< num_ios
; i
++) {
101 bdt_info
= cmgr
->io_bdt_pool
[i
];
102 if (bdt_info
->bd_tbl
) {
103 dma_free_coherent(&qedf
->pdev
->dev
, bd_tbl_sz
,
104 bdt_info
->bd_tbl
, bdt_info
->bd_tbl_dma
);
105 bdt_info
->bd_tbl
= NULL
;
109 /* Destroy io_bdt pool */
110 for (i
= 0; i
< num_ios
; i
++) {
111 kfree(cmgr
->io_bdt_pool
[i
]);
112 cmgr
->io_bdt_pool
[i
] = NULL
;
115 kfree(cmgr
->io_bdt_pool
);
116 cmgr
->io_bdt_pool
= NULL
;
120 for (i
= 0; i
< num_ios
; i
++) {
121 io_req
= &cmgr
->cmds
[i
];
122 /* Make sure we free per command sense buffer */
123 if (io_req
->sense_buffer
)
124 dma_free_coherent(&qedf
->pdev
->dev
,
125 QEDF_SCSI_SENSE_BUFFERSIZE
, io_req
->sense_buffer
,
126 io_req
->sense_buffer_dma
);
127 cancel_delayed_work_sync(&io_req
->rrq_work
);
130 /* Free command manager itself */
134 static void qedf_handle_rrq(struct work_struct
*work
)
136 struct qedf_ioreq
*io_req
=
137 container_of(work
, struct qedf_ioreq
, rrq_work
.work
);
139 qedf_send_rrq(io_req
);
143 struct qedf_cmd_mgr
*qedf_cmd_mgr_alloc(struct qedf_ctx
*qedf
)
145 struct qedf_cmd_mgr
*cmgr
;
146 struct io_bdt
*bdt_info
;
147 struct qedf_ioreq
*io_req
;
151 u16 min_xid
= QEDF_MIN_XID
;
152 u16 max_xid
= (FCOE_PARAMS_NUM_TASKS
- 1);
154 /* Make sure num_queues is already set before calling this function */
155 if (!qedf
->num_queues
) {
156 QEDF_ERR(&(qedf
->dbg_ctx
), "num_queues is not set.\n");
160 if (max_xid
<= min_xid
|| max_xid
== FC_XID_UNKNOWN
) {
161 QEDF_WARN(&(qedf
->dbg_ctx
), "Invalid min_xid 0x%x and "
162 "max_xid 0x%x.\n", min_xid
, max_xid
);
166 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_DISC
, "min xid 0x%x, max xid "
167 "0x%x.\n", min_xid
, max_xid
);
169 num_ios
= max_xid
- min_xid
+ 1;
171 cmgr
= vzalloc(sizeof(struct qedf_cmd_mgr
));
173 QEDF_WARN(&(qedf
->dbg_ctx
), "Failed to alloc cmd mgr.\n");
178 spin_lock_init(&cmgr
->lock
);
181 * Initialize list of qedf_ioreq.
185 for (i
= 0; i
< num_ios
; i
++) {
186 io_req
= &cmgr
->cmds
[i
];
187 INIT_DELAYED_WORK(&io_req
->timeout_work
, qedf_cmd_timeout
);
191 INIT_DELAYED_WORK(&io_req
->rrq_work
, qedf_handle_rrq
);
193 /* Allocate DMA memory to hold sense buffer */
194 io_req
->sense_buffer
= dma_alloc_coherent(&qedf
->pdev
->dev
,
195 QEDF_SCSI_SENSE_BUFFERSIZE
, &io_req
->sense_buffer_dma
,
197 if (!io_req
->sense_buffer
)
201 /* Allocate pool of io_bdts - one for each qedf_ioreq */
202 cmgr
->io_bdt_pool
= kmalloc_array(num_ios
, sizeof(struct io_bdt
*),
205 if (!cmgr
->io_bdt_pool
) {
206 QEDF_WARN(&(qedf
->dbg_ctx
), "Failed to alloc io_bdt_pool.\n");
210 for (i
= 0; i
< num_ios
; i
++) {
211 cmgr
->io_bdt_pool
[i
] = kmalloc(sizeof(struct io_bdt
),
213 if (!cmgr
->io_bdt_pool
[i
]) {
214 QEDF_WARN(&(qedf
->dbg_ctx
), "Failed to alloc "
215 "io_bdt_pool[%d].\n", i
);
220 for (i
= 0; i
< num_ios
; i
++) {
221 bdt_info
= cmgr
->io_bdt_pool
[i
];
222 bdt_info
->bd_tbl
= dma_alloc_coherent(&qedf
->pdev
->dev
,
223 QEDF_MAX_BDS_PER_CMD
* sizeof(struct fcoe_sge
),
224 &bdt_info
->bd_tbl_dma
, GFP_KERNEL
);
225 if (!bdt_info
->bd_tbl
) {
226 QEDF_WARN(&(qedf
->dbg_ctx
), "Failed to alloc "
227 "bdt_tbl[%d].\n", i
);
231 atomic_set(&cmgr
->free_list_cnt
, num_ios
);
232 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
233 "cmgr->free_list_cnt=%d.\n",
234 atomic_read(&cmgr
->free_list_cnt
));
239 qedf_cmd_mgr_free(cmgr
);
243 struct qedf_ioreq
*qedf_alloc_cmd(struct qedf_rport
*fcport
, u8 cmd_type
)
245 struct qedf_ctx
*qedf
= fcport
->qedf
;
246 struct qedf_cmd_mgr
*cmd_mgr
= qedf
->cmd_mgr
;
247 struct qedf_ioreq
*io_req
= NULL
;
248 struct io_bdt
*bd_tbl
;
254 free_sqes
= atomic_read(&fcport
->free_sqes
);
257 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
258 "Returning NULL, free_sqes=%d.\n ",
263 /* Limit the number of outstanding R/W tasks */
264 if ((atomic_read(&fcport
->num_active_ios
) >=
265 NUM_RW_TASKS_PER_CONNECTION
)) {
266 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
267 "Returning NULL, num_active_ios=%d.\n",
268 atomic_read(&fcport
->num_active_ios
));
272 /* Limit global TIDs certain tasks */
273 if (atomic_read(&cmd_mgr
->free_list_cnt
) <= GBL_RSVD_TASKS
) {
274 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
275 "Returning NULL, free_list_cnt=%d.\n",
276 atomic_read(&cmd_mgr
->free_list_cnt
));
280 spin_lock_irqsave(&cmd_mgr
->lock
, flags
);
281 for (i
= 0; i
< FCOE_PARAMS_NUM_TASKS
; i
++) {
282 io_req
= &cmd_mgr
->cmds
[cmd_mgr
->idx
];
284 if (cmd_mgr
->idx
== FCOE_PARAMS_NUM_TASKS
)
287 /* Check to make sure command was previously freed */
288 if (!test_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
))
292 if (i
== FCOE_PARAMS_NUM_TASKS
) {
293 spin_unlock_irqrestore(&cmd_mgr
->lock
, flags
);
297 set_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
);
298 spin_unlock_irqrestore(&cmd_mgr
->lock
, flags
);
300 atomic_inc(&fcport
->num_active_ios
);
301 atomic_dec(&fcport
->free_sqes
);
303 atomic_dec(&cmd_mgr
->free_list_cnt
);
305 io_req
->cmd_mgr
= cmd_mgr
;
306 io_req
->fcport
= fcport
;
308 /* Hold the io_req against deletion */
309 kref_init(&io_req
->refcount
);
311 /* Bind io_bdt for this io_req */
312 /* Have a static link between io_req and io_bdt_pool */
313 bd_tbl
= io_req
->bd_tbl
= cmd_mgr
->io_bdt_pool
[xid
];
314 if (bd_tbl
== NULL
) {
315 QEDF_ERR(&(qedf
->dbg_ctx
), "bd_tbl is NULL, xid=%x.\n", xid
);
316 kref_put(&io_req
->refcount
, qedf_release_cmd
);
319 bd_tbl
->io_req
= io_req
;
320 io_req
->cmd_type
= cmd_type
;
322 /* Reset sequence offset data */
323 io_req
->rx_buf_off
= 0;
324 io_req
->tx_buf_off
= 0;
325 io_req
->rx_id
= 0xffff; /* No OX_ID */
330 /* Record failure for stats and return NULL to caller */
331 qedf
->alloc_failures
++;
335 static void qedf_free_mp_resc(struct qedf_ioreq
*io_req
)
337 struct qedf_mp_req
*mp_req
= &(io_req
->mp_req
);
338 struct qedf_ctx
*qedf
= io_req
->fcport
->qedf
;
339 uint64_t sz
= sizeof(struct fcoe_sge
);
342 mp_req
->tm_flags
= 0;
343 if (mp_req
->mp_req_bd
) {
344 dma_free_coherent(&qedf
->pdev
->dev
, sz
,
345 mp_req
->mp_req_bd
, mp_req
->mp_req_bd_dma
);
346 mp_req
->mp_req_bd
= NULL
;
348 if (mp_req
->mp_resp_bd
) {
349 dma_free_coherent(&qedf
->pdev
->dev
, sz
,
350 mp_req
->mp_resp_bd
, mp_req
->mp_resp_bd_dma
);
351 mp_req
->mp_resp_bd
= NULL
;
353 if (mp_req
->req_buf
) {
354 dma_free_coherent(&qedf
->pdev
->dev
, QEDF_PAGE_SIZE
,
355 mp_req
->req_buf
, mp_req
->req_buf_dma
);
356 mp_req
->req_buf
= NULL
;
358 if (mp_req
->resp_buf
) {
359 dma_free_coherent(&qedf
->pdev
->dev
, QEDF_PAGE_SIZE
,
360 mp_req
->resp_buf
, mp_req
->resp_buf_dma
);
361 mp_req
->resp_buf
= NULL
;
365 void qedf_release_cmd(struct kref
*ref
)
367 struct qedf_ioreq
*io_req
=
368 container_of(ref
, struct qedf_ioreq
, refcount
);
369 struct qedf_cmd_mgr
*cmd_mgr
= io_req
->cmd_mgr
;
370 struct qedf_rport
*fcport
= io_req
->fcport
;
372 if (io_req
->cmd_type
== QEDF_ELS
||
373 io_req
->cmd_type
== QEDF_TASK_MGMT_CMD
)
374 qedf_free_mp_resc(io_req
);
376 atomic_inc(&cmd_mgr
->free_list_cnt
);
377 atomic_dec(&fcport
->num_active_ios
);
378 if (atomic_read(&fcport
->num_active_ios
) < 0)
379 QEDF_WARN(&(fcport
->qedf
->dbg_ctx
), "active_ios < 0.\n");
381 /* Increment task retry identifier now that the request is released */
382 io_req
->task_retry_identifier
++;
384 clear_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
);
387 static int qedf_split_bd(struct qedf_ioreq
*io_req
, u64 addr
, int sg_len
,
390 struct fcoe_sge
*bd
= io_req
->bd_tbl
->bd_tbl
;
391 int frag_size
, sg_frags
;
395 if (sg_len
> QEDF_BD_SPLIT_SZ
)
396 frag_size
= QEDF_BD_SPLIT_SZ
;
399 bd
[bd_index
+ sg_frags
].sge_addr
.lo
= U64_LO(addr
);
400 bd
[bd_index
+ sg_frags
].sge_addr
.hi
= U64_HI(addr
);
401 bd
[bd_index
+ sg_frags
].size
= (uint16_t)frag_size
;
403 addr
+= (u64
)frag_size
;
410 static int qedf_map_sg(struct qedf_ioreq
*io_req
)
412 struct scsi_cmnd
*sc
= io_req
->sc_cmd
;
413 struct Scsi_Host
*host
= sc
->device
->host
;
414 struct fc_lport
*lport
= shost_priv(host
);
415 struct qedf_ctx
*qedf
= lport_priv(lport
);
416 struct fcoe_sge
*bd
= io_req
->bd_tbl
->bd_tbl
;
417 struct scatterlist
*sg
;
426 sg_count
= dma_map_sg(&qedf
->pdev
->dev
, scsi_sglist(sc
),
427 scsi_sg_count(sc
), sc
->sc_data_direction
);
429 sg
= scsi_sglist(sc
);
432 * New condition to send single SGE as cached-SGL with length less
435 if ((sg_count
== 1) && (sg_dma_len(sg
) <=
436 QEDF_MAX_SGLEN_FOR_CACHESGL
)) {
437 sg_len
= sg_dma_len(sg
);
438 addr
= (u64
)sg_dma_address(sg
);
440 bd
[bd_count
].sge_addr
.lo
= (addr
& 0xffffffff);
441 bd
[bd_count
].sge_addr
.hi
= (addr
>> 32);
442 bd
[bd_count
].size
= (u16
)sg_len
;
447 scsi_for_each_sg(sc
, sg
, sg_count
, i
) {
448 sg_len
= sg_dma_len(sg
);
449 addr
= (u64
)sg_dma_address(sg
);
450 end_addr
= (u64
)(addr
+ sg_len
);
453 * First s/g element in the list so check if the end_addr
454 * is paged aligned. Also check to make sure the length is
455 * at least page size.
457 if ((i
== 0) && (sg_count
> 1) &&
458 ((end_addr
% QEDF_PAGE_SIZE
) ||
459 sg_len
< QEDF_PAGE_SIZE
))
460 io_req
->use_slowpath
= true;
462 * Last s/g element so check if the start address is paged
465 else if ((i
== (sg_count
- 1)) && (sg_count
> 1) &&
466 (addr
% QEDF_PAGE_SIZE
))
467 io_req
->use_slowpath
= true;
469 * Intermediate s/g element so check if start and end address
472 else if ((i
!= 0) && (i
!= (sg_count
- 1)) &&
473 ((addr
% QEDF_PAGE_SIZE
) || (end_addr
% QEDF_PAGE_SIZE
)))
474 io_req
->use_slowpath
= true;
476 if (sg_len
> QEDF_MAX_BD_LEN
) {
477 sg_frags
= qedf_split_bd(io_req
, addr
, sg_len
,
481 bd
[bd_count
].sge_addr
.lo
= U64_LO(addr
);
482 bd
[bd_count
].sge_addr
.hi
= U64_HI(addr
);
483 bd
[bd_count
].size
= (uint16_t)sg_len
;
486 bd_count
+= sg_frags
;
487 byte_count
+= sg_len
;
490 if (byte_count
!= scsi_bufflen(sc
))
491 QEDF_ERR(&(qedf
->dbg_ctx
), "byte_count = %d != "
492 "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count
,
493 scsi_bufflen(sc
), io_req
->xid
);
498 static int qedf_build_bd_list_from_sg(struct qedf_ioreq
*io_req
)
500 struct scsi_cmnd
*sc
= io_req
->sc_cmd
;
501 struct fcoe_sge
*bd
= io_req
->bd_tbl
->bd_tbl
;
504 if (scsi_sg_count(sc
)) {
505 bd_count
= qedf_map_sg(io_req
);
510 bd
[0].sge_addr
.lo
= bd
[0].sge_addr
.hi
= 0;
513 io_req
->bd_tbl
->bd_valid
= bd_count
;
518 static void qedf_build_fcp_cmnd(struct qedf_ioreq
*io_req
,
519 struct fcp_cmnd
*fcp_cmnd
)
521 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
523 /* fcp_cmnd is 32 bytes */
524 memset(fcp_cmnd
, 0, FCP_CMND_LEN
);
526 /* 8 bytes: SCSI LUN info */
527 int_to_scsilun(sc_cmd
->device
->lun
,
528 (struct scsi_lun
*)&fcp_cmnd
->fc_lun
);
530 /* 4 bytes: flag info */
531 fcp_cmnd
->fc_pri_ta
= 0;
532 fcp_cmnd
->fc_tm_flags
= io_req
->mp_req
.tm_flags
;
533 fcp_cmnd
->fc_flags
= io_req
->io_req_flags
;
534 fcp_cmnd
->fc_cmdref
= 0;
536 /* Populate data direction */
537 if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
)
538 fcp_cmnd
->fc_flags
|= FCP_CFL_WRDATA
;
539 else if (sc_cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
540 fcp_cmnd
->fc_flags
|= FCP_CFL_RDDATA
;
542 fcp_cmnd
->fc_pri_ta
= FCP_PTA_SIMPLE
;
544 /* 16 bytes: CDB information */
545 memcpy(fcp_cmnd
->fc_cdb
, sc_cmd
->cmnd
, sc_cmd
->cmd_len
);
547 /* 4 bytes: FCP data length */
548 fcp_cmnd
->fc_dl
= htonl(io_req
->data_xfer_len
);
552 static void qedf_init_task(struct qedf_rport
*fcport
, struct fc_lport
*lport
,
553 struct qedf_ioreq
*io_req
, u32
*ptu_invalidate
,
554 struct fcoe_task_context
*task_ctx
)
556 enum fcoe_task_type task_type
;
557 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
558 struct io_bdt
*bd_tbl
= io_req
->bd_tbl
;
559 union fcoe_data_desc_ctx
*data_desc
;
564 struct qedf_ctx
*qedf
= fcport
->qedf
;
565 uint16_t cq_idx
= smp_processor_id() % qedf
->num_queues
;
569 memset(task_ctx
, 0, sizeof(struct fcoe_task_context
));
570 io_req
->task
= task_ctx
;
572 if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
)
573 task_type
= FCOE_TASK_TYPE_WRITE_INITIATOR
;
575 task_type
= FCOE_TASK_TYPE_READ_INITIATOR
;
577 /* Y Storm context */
578 task_ctx
->ystorm_st_context
.expect_first_xfer
= 1;
579 task_ctx
->ystorm_st_context
.data_2_trns_rem
= io_req
->data_xfer_len
;
580 /* Check if this is required */
581 task_ctx
->ystorm_st_context
.ox_id
= io_req
->xid
;
582 task_ctx
->ystorm_st_context
.task_rety_identifier
=
583 io_req
->task_retry_identifier
;
585 /* T Storm ag context */
586 SET_FIELD(task_ctx
->tstorm_ag_context
.flags0
,
587 TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE
, PROTOCOLID_FCOE
);
588 task_ctx
->tstorm_ag_context
.icid
= (u16
)fcport
->fw_cid
;
590 /* T Storm st context */
591 SET_FIELD(task_ctx
->tstorm_st_context
.read_write
.flags
,
592 FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME
,
594 task_ctx
->tstorm_st_context
.read_write
.rx_id
= 0xffff;
596 task_ctx
->tstorm_st_context
.read_only
.dev_type
=
597 FCOE_TASK_DEV_TYPE_DISK
;
598 task_ctx
->tstorm_st_context
.read_only
.conf_supported
= 0;
599 task_ctx
->tstorm_st_context
.read_only
.cid
= fcport
->fw_cid
;
601 /* Completion queue for response. */
602 task_ctx
->tstorm_st_context
.read_only
.glbl_q_num
= cq_idx
;
603 task_ctx
->tstorm_st_context
.read_only
.fcp_cmd_trns_size
=
604 io_req
->data_xfer_len
;
605 task_ctx
->tstorm_st_context
.read_write
.e_d_tov_exp_timeout_val
=
608 task_ctx
->ustorm_ag_context
.global_cq_num
= cq_idx
;
609 io_req
->fp_idx
= cq_idx
;
611 bd_count
= bd_tbl
->bd_valid
;
612 if (task_type
== FCOE_TASK_TYPE_WRITE_INITIATOR
) {
613 /* Setup WRITE task */
614 struct fcoe_sge
*fcoe_bd_tbl
= bd_tbl
->bd_tbl
;
616 task_ctx
->ystorm_st_context
.task_type
=
617 FCOE_TASK_TYPE_WRITE_INITIATOR
;
618 data_desc
= &task_ctx
->ystorm_st_context
.data_desc
;
620 if (io_req
->use_slowpath
) {
621 SET_FIELD(task_ctx
->ystorm_st_context
.sgl_mode
,
622 YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE
,
624 data_desc
->slow
.base_sgl_addr
.lo
=
625 U64_LO(bd_tbl
->bd_tbl_dma
);
626 data_desc
->slow
.base_sgl_addr
.hi
=
627 U64_HI(bd_tbl
->bd_tbl_dma
);
628 data_desc
->slow
.remainder_num_sges
= bd_count
;
629 data_desc
->slow
.curr_sge_off
= 0;
630 data_desc
->slow
.curr_sgl_index
= 0;
631 qedf
->slow_sge_ios
++;
632 io_req
->sge_type
= QEDF_IOREQ_SLOW_SGE
;
634 SET_FIELD(task_ctx
->ystorm_st_context
.sgl_mode
,
635 YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE
,
636 (bd_count
<= 4) ? (enum fcoe_sgl_mode
)bd_count
:
640 data_desc
->single_sge
.sge_addr
.lo
=
641 fcoe_bd_tbl
->sge_addr
.lo
;
642 data_desc
->single_sge
.sge_addr
.hi
=
643 fcoe_bd_tbl
->sge_addr
.hi
;
644 data_desc
->single_sge
.size
=
646 data_desc
->single_sge
.is_valid_sge
= 0;
647 qedf
->single_sge_ios
++;
648 io_req
->sge_type
= QEDF_IOREQ_SINGLE_SGE
;
650 data_desc
->fast
.sgl_start_addr
.lo
=
651 U64_LO(bd_tbl
->bd_tbl_dma
);
652 data_desc
->fast
.sgl_start_addr
.hi
=
653 U64_HI(bd_tbl
->bd_tbl_dma
);
654 data_desc
->fast
.sgl_byte_offset
=
655 data_desc
->fast
.sgl_start_addr
.lo
&
656 (QEDF_PAGE_SIZE
- 1);
657 if (data_desc
->fast
.sgl_byte_offset
> 0)
658 QEDF_ERR(&(qedf
->dbg_ctx
),
659 "byte_offset=%u for xid=0x%x.\n",
661 data_desc
->fast
.sgl_byte_offset
);
662 data_desc
->fast
.task_reuse_cnt
=
664 io_req
->reuse_count
++;
665 if (io_req
->reuse_count
== QEDF_MAX_REUSE
) {
667 io_req
->reuse_count
= 0;
669 qedf
->fast_sge_ios
++;
670 io_req
->sge_type
= QEDF_IOREQ_FAST_SGE
;
674 /* T Storm context */
675 task_ctx
->tstorm_st_context
.read_only
.task_type
=
676 FCOE_TASK_TYPE_WRITE_INITIATOR
;
678 /* M Storm context */
679 tmp_sgl_mode
= GET_FIELD(task_ctx
->ystorm_st_context
.sgl_mode
,
680 YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE
);
681 SET_FIELD(task_ctx
->mstorm_st_context
.non_fp
.tx_rx_sgl_mode
,
682 FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE
,
686 /* Setup READ task */
688 /* M Storm context */
689 struct fcoe_sge
*fcoe_bd_tbl
= bd_tbl
->bd_tbl
;
691 data_desc
= &task_ctx
->mstorm_st_context
.fp
.data_desc
;
692 task_ctx
->mstorm_st_context
.fp
.data_2_trns_rem
=
693 io_req
->data_xfer_len
;
695 if (io_req
->use_slowpath
) {
697 task_ctx
->mstorm_st_context
.non_fp
.tx_rx_sgl_mode
,
698 FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE
,
700 data_desc
->slow
.base_sgl_addr
.lo
=
701 U64_LO(bd_tbl
->bd_tbl_dma
);
702 data_desc
->slow
.base_sgl_addr
.hi
=
703 U64_HI(bd_tbl
->bd_tbl_dma
);
704 data_desc
->slow
.remainder_num_sges
=
706 data_desc
->slow
.curr_sge_off
= 0;
707 data_desc
->slow
.curr_sgl_index
= 0;
708 qedf
->slow_sge_ios
++;
709 io_req
->sge_type
= QEDF_IOREQ_SLOW_SGE
;
712 task_ctx
->mstorm_st_context
.non_fp
.tx_rx_sgl_mode
,
713 FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE
,
714 (bd_count
<= 4) ? (enum fcoe_sgl_mode
)bd_count
:
718 data_desc
->single_sge
.sge_addr
.lo
=
719 fcoe_bd_tbl
->sge_addr
.lo
;
720 data_desc
->single_sge
.sge_addr
.hi
=
721 fcoe_bd_tbl
->sge_addr
.hi
;
722 data_desc
->single_sge
.size
=
724 data_desc
->single_sge
.is_valid_sge
= 0;
725 qedf
->single_sge_ios
++;
726 io_req
->sge_type
= QEDF_IOREQ_SINGLE_SGE
;
728 data_desc
->fast
.sgl_start_addr
.lo
=
729 U64_LO(bd_tbl
->bd_tbl_dma
);
730 data_desc
->fast
.sgl_start_addr
.hi
=
731 U64_HI(bd_tbl
->bd_tbl_dma
);
732 data_desc
->fast
.sgl_byte_offset
= 0;
733 data_desc
->fast
.task_reuse_cnt
=
735 io_req
->reuse_count
++;
736 if (io_req
->reuse_count
== QEDF_MAX_REUSE
) {
738 io_req
->reuse_count
= 0;
740 qedf
->fast_sge_ios
++;
741 io_req
->sge_type
= QEDF_IOREQ_FAST_SGE
;
745 /* Y Storm context */
746 task_ctx
->ystorm_st_context
.expect_first_xfer
= 0;
747 task_ctx
->ystorm_st_context
.task_type
=
748 FCOE_TASK_TYPE_READ_INITIATOR
;
750 /* T Storm context */
751 task_ctx
->tstorm_st_context
.read_only
.task_type
=
752 FCOE_TASK_TYPE_READ_INITIATOR
;
753 mst_sgl_mode
= GET_FIELD(
754 task_ctx
->mstorm_st_context
.non_fp
.tx_rx_sgl_mode
,
755 FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE
);
756 SET_FIELD(task_ctx
->tstorm_st_context
.read_write
.flags
,
757 FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE
,
761 /* fill FCP_CMND IU */
762 fcp_cmnd
= (u32
*)task_ctx
->ystorm_st_context
.tx_info_union
.fcp_cmd_payload
.opaque
;
763 qedf_build_fcp_cmnd(io_req
, (struct fcp_cmnd
*)&tmp_fcp_cmnd
);
765 /* Swap fcp_cmnd since FC is big endian */
766 cnt
= sizeof(struct fcp_cmnd
) / sizeof(u32
);
768 for (i
= 0; i
< cnt
; i
++) {
769 *fcp_cmnd
= cpu_to_be32(tmp_fcp_cmnd
[i
]);
773 /* M Storm context - Sense buffer */
774 task_ctx
->mstorm_st_context
.non_fp
.rsp_buf_addr
.lo
=
775 U64_LO(io_req
->sense_buffer_dma
);
776 task_ctx
->mstorm_st_context
.non_fp
.rsp_buf_addr
.hi
=
777 U64_HI(io_req
->sense_buffer_dma
);
780 void qedf_init_mp_task(struct qedf_ioreq
*io_req
,
781 struct fcoe_task_context
*task_ctx
)
783 struct qedf_mp_req
*mp_req
= &(io_req
->mp_req
);
784 struct qedf_rport
*fcport
= io_req
->fcport
;
785 struct qedf_ctx
*qedf
= io_req
->fcport
->qedf
;
786 struct fc_frame_header
*fc_hdr
;
787 enum fcoe_task_type task_type
= 0;
788 union fcoe_data_desc_ctx
*data_desc
;
790 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_DISC
, "Initializing MP task "
791 "for cmd_type = %d\n", io_req
->cmd_type
);
793 qedf
->control_requests
++;
795 /* Obtain task_type */
796 if ((io_req
->cmd_type
== QEDF_TASK_MGMT_CMD
) ||
797 (io_req
->cmd_type
== QEDF_ELS
)) {
798 task_type
= FCOE_TASK_TYPE_MIDPATH
;
799 } else if (io_req
->cmd_type
== QEDF_ABTS
) {
800 task_type
= FCOE_TASK_TYPE_ABTS
;
803 memset(task_ctx
, 0, sizeof(struct fcoe_task_context
));
805 /* Setup the task from io_req for easy reference */
806 io_req
->task
= task_ctx
;
808 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_DISC
, "task type = %d\n",
813 /* Initialize YSTORM task context */
814 struct fcoe_tx_mid_path_params
*task_fc_hdr
=
815 &task_ctx
->ystorm_st_context
.tx_info_union
.tx_params
.mid_path
;
816 memset(task_fc_hdr
, 0, sizeof(struct fcoe_tx_mid_path_params
));
817 task_ctx
->ystorm_st_context
.task_rety_identifier
=
818 io_req
->task_retry_identifier
;
820 /* Init SGL parameters */
821 if ((task_type
== FCOE_TASK_TYPE_MIDPATH
) ||
822 (task_type
== FCOE_TASK_TYPE_UNSOLICITED
)) {
823 data_desc
= &task_ctx
->ystorm_st_context
.data_desc
;
824 data_desc
->slow
.base_sgl_addr
.lo
=
825 U64_LO(mp_req
->mp_req_bd_dma
);
826 data_desc
->slow
.base_sgl_addr
.hi
=
827 U64_HI(mp_req
->mp_req_bd_dma
);
828 data_desc
->slow
.remainder_num_sges
= 1;
829 data_desc
->slow
.curr_sge_off
= 0;
830 data_desc
->slow
.curr_sgl_index
= 0;
833 fc_hdr
= &(mp_req
->req_fc_hdr
);
834 if (task_type
== FCOE_TASK_TYPE_MIDPATH
) {
835 fc_hdr
->fh_ox_id
= io_req
->xid
;
836 fc_hdr
->fh_rx_id
= htons(0xffff);
837 } else if (task_type
== FCOE_TASK_TYPE_UNSOLICITED
) {
838 fc_hdr
->fh_rx_id
= io_req
->xid
;
841 /* Fill FC Header into middle path buffer */
842 task_fc_hdr
->parameter
= fc_hdr
->fh_parm_offset
;
843 task_fc_hdr
->r_ctl
= fc_hdr
->fh_r_ctl
;
844 task_fc_hdr
->type
= fc_hdr
->fh_type
;
845 task_fc_hdr
->cs_ctl
= fc_hdr
->fh_cs_ctl
;
846 task_fc_hdr
->df_ctl
= fc_hdr
->fh_df_ctl
;
847 task_fc_hdr
->rx_id
= fc_hdr
->fh_rx_id
;
848 task_fc_hdr
->ox_id
= fc_hdr
->fh_ox_id
;
850 task_ctx
->ystorm_st_context
.data_2_trns_rem
=
851 io_req
->data_xfer_len
;
852 task_ctx
->ystorm_st_context
.task_type
= task_type
;
857 task_ctx
->tstorm_ag_context
.icid
= (u16
)fcport
->fw_cid
;
858 task_ctx
->tstorm_st_context
.read_only
.cid
= fcport
->fw_cid
;
859 /* Always send middle-path repsonses on CQ #0 */
860 task_ctx
->tstorm_st_context
.read_only
.glbl_q_num
= 0;
862 SET_FIELD(task_ctx
->tstorm_ag_context
.flags0
,
863 TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE
,
865 task_ctx
->tstorm_st_context
.read_only
.task_type
= task_type
;
866 SET_FIELD(task_ctx
->tstorm_st_context
.read_write
.flags
,
867 FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME
,
869 task_ctx
->tstorm_st_context
.read_write
.rx_id
= 0xffff;
874 if (task_type
== FCOE_TASK_TYPE_MIDPATH
) {
875 /* Initialize task context */
876 data_desc
= &task_ctx
->mstorm_st_context
.fp
.data_desc
;
878 /* Set cache sges address and length */
879 data_desc
->slow
.base_sgl_addr
.lo
=
880 U64_LO(mp_req
->mp_resp_bd_dma
);
881 data_desc
->slow
.base_sgl_addr
.hi
=
882 U64_HI(mp_req
->mp_resp_bd_dma
);
883 data_desc
->slow
.remainder_num_sges
= 1;
884 data_desc
->slow
.curr_sge_off
= 0;
885 data_desc
->slow
.curr_sgl_index
= 0;
888 * Also need to fil in non-fastpath response address
889 * for middle path commands.
891 task_ctx
->mstorm_st_context
.non_fp
.rsp_buf_addr
.lo
=
892 U64_LO(mp_req
->mp_resp_bd_dma
);
893 task_ctx
->mstorm_st_context
.non_fp
.rsp_buf_addr
.hi
=
894 U64_HI(mp_req
->mp_resp_bd_dma
);
900 task_ctx
->ustorm_ag_context
.global_cq_num
= 0;
903 /* I/O stats. Middle path commands always use slow SGEs */
904 qedf
->slow_sge_ios
++;
905 io_req
->sge_type
= QEDF_IOREQ_SLOW_SGE
;
908 void qedf_add_to_sq(struct qedf_rport
*fcport
, u16 xid
, u32 ptu_invalidate
,
909 enum fcoe_task_type req_type
, u32 offset
)
911 struct fcoe_wqe
*sqe
;
912 uint16_t total_sqe
= (fcport
->sq_mem_size
)/(sizeof(struct fcoe_wqe
));
914 sqe
= &fcport
->sq
[fcport
->sq_prod_idx
];
916 fcport
->sq_prod_idx
++;
917 fcport
->fw_sq_prod_idx
++;
918 if (fcport
->sq_prod_idx
== total_sqe
)
919 fcport
->sq_prod_idx
= 0;
922 case FCOE_TASK_TYPE_WRITE_INITIATOR
:
923 case FCOE_TASK_TYPE_READ_INITIATOR
:
924 SET_FIELD(sqe
->flags
, FCOE_WQE_REQ_TYPE
, SEND_FCOE_CMD
);
926 SET_FIELD(sqe
->flags
, FCOE_WQE_INVALIDATE_PTU
, 1);
928 case FCOE_TASK_TYPE_MIDPATH
:
929 SET_FIELD(sqe
->flags
, FCOE_WQE_REQ_TYPE
, SEND_FCOE_MIDPATH
);
931 case FCOE_TASK_TYPE_ABTS
:
932 SET_FIELD(sqe
->flags
, FCOE_WQE_REQ_TYPE
,
933 SEND_FCOE_ABTS_REQUEST
);
935 case FCOE_TASK_TYPE_EXCHANGE_CLEANUP
:
936 SET_FIELD(sqe
->flags
, FCOE_WQE_REQ_TYPE
,
937 FCOE_EXCHANGE_CLEANUP
);
939 case FCOE_TASK_TYPE_SEQUENCE_CLEANUP
:
940 SET_FIELD(sqe
->flags
, FCOE_WQE_REQ_TYPE
,
941 FCOE_SEQUENCE_RECOVERY
);
942 /* NOTE: offset param only used for sequence recovery */
943 sqe
->additional_info_union
.seq_rec_updated_offset
= offset
;
945 case FCOE_TASK_TYPE_UNSOLICITED
:
953 /* Make sure SQ data is coherent */
958 void qedf_ring_doorbell(struct qedf_rport
*fcport
)
960 struct fcoe_db_data dbell
= { 0 };
964 dbell
.params
|= DB_DEST_XCM
<< FCOE_DB_DATA_DEST_SHIFT
;
965 dbell
.params
|= DB_AGG_CMD_SET
<< FCOE_DB_DATA_AGG_CMD_SHIFT
;
966 dbell
.params
|= DQ_XCM_FCOE_SQ_PROD_CMD
<<
967 FCOE_DB_DATA_AGG_VAL_SEL_SHIFT
;
969 dbell
.sq_prod
= fcport
->fw_sq_prod_idx
;
970 writel(*(u32
*)&dbell
, fcport
->p_doorbell
);
971 /* Make sure SQ index is updated so f/w prcesses requests in order */
976 static void qedf_trace_io(struct qedf_rport
*fcport
, struct qedf_ioreq
*io_req
,
979 struct qedf_ctx
*qedf
= fcport
->qedf
;
980 struct qedf_io_log
*io_log
;
981 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
985 spin_lock_irqsave(&qedf
->io_trace_lock
, flags
);
987 io_log
= &qedf
->io_trace_buf
[qedf
->io_trace_idx
];
988 io_log
->direction
= direction
;
989 io_log
->task_id
= io_req
->xid
;
990 io_log
->port_id
= fcport
->rdata
->ids
.port_id
;
991 io_log
->lun
= sc_cmd
->device
->lun
;
992 io_log
->op
= op
= sc_cmd
->cmnd
[0];
993 io_log
->lba
[0] = sc_cmd
->cmnd
[2];
994 io_log
->lba
[1] = sc_cmd
->cmnd
[3];
995 io_log
->lba
[2] = sc_cmd
->cmnd
[4];
996 io_log
->lba
[3] = sc_cmd
->cmnd
[5];
997 io_log
->bufflen
= scsi_bufflen(sc_cmd
);
998 io_log
->sg_count
= scsi_sg_count(sc_cmd
);
999 io_log
->result
= sc_cmd
->result
;
1000 io_log
->jiffies
= jiffies
;
1001 io_log
->refcount
= kref_read(&io_req
->refcount
);
1003 if (direction
== QEDF_IO_TRACE_REQ
) {
1004 /* For requests we only care abot the submission CPU */
1005 io_log
->req_cpu
= io_req
->cpu
;
1006 io_log
->int_cpu
= 0;
1007 io_log
->rsp_cpu
= 0;
1008 } else if (direction
== QEDF_IO_TRACE_RSP
) {
1009 io_log
->req_cpu
= io_req
->cpu
;
1010 io_log
->int_cpu
= io_req
->int_cpu
;
1011 io_log
->rsp_cpu
= smp_processor_id();
1014 io_log
->sge_type
= io_req
->sge_type
;
1016 qedf
->io_trace_idx
++;
1017 if (qedf
->io_trace_idx
== QEDF_IO_TRACE_SIZE
)
1018 qedf
->io_trace_idx
= 0;
1020 spin_unlock_irqrestore(&qedf
->io_trace_lock
, flags
);
1023 int qedf_post_io_req(struct qedf_rport
*fcport
, struct qedf_ioreq
*io_req
)
1025 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
1026 struct Scsi_Host
*host
= sc_cmd
->device
->host
;
1027 struct fc_lport
*lport
= shost_priv(host
);
1028 struct qedf_ctx
*qedf
= lport_priv(lport
);
1029 struct fcoe_task_context
*task_ctx
;
1031 enum fcoe_task_type req_type
= 0;
1032 u32 ptu_invalidate
= 0;
1034 /* Initialize rest of io_req fileds */
1035 io_req
->data_xfer_len
= scsi_bufflen(sc_cmd
);
1036 sc_cmd
->SCp
.ptr
= (char *)io_req
;
1037 io_req
->use_slowpath
= false; /* Assume fast SGL by default */
1039 /* Record which cpu this request is associated with */
1040 io_req
->cpu
= smp_processor_id();
1042 if (sc_cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
1043 req_type
= FCOE_TASK_TYPE_READ_INITIATOR
;
1044 io_req
->io_req_flags
= QEDF_READ
;
1045 qedf
->input_requests
++;
1046 } else if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
1047 req_type
= FCOE_TASK_TYPE_WRITE_INITIATOR
;
1048 io_req
->io_req_flags
= QEDF_WRITE
;
1049 qedf
->output_requests
++;
1051 io_req
->io_req_flags
= 0;
1052 qedf
->control_requests
++;
1057 /* Build buffer descriptor list for firmware from sg list */
1058 if (qedf_build_bd_list_from_sg(io_req
)) {
1059 QEDF_ERR(&(qedf
->dbg_ctx
), "BD list creation failed.\n");
1060 kref_put(&io_req
->refcount
, qedf_release_cmd
);
1064 /* Get the task context */
1065 task_ctx
= qedf_get_task_mem(&qedf
->tasks
, xid
);
1067 QEDF_WARN(&(qedf
->dbg_ctx
), "task_ctx is NULL, xid=%d.\n",
1069 kref_put(&io_req
->refcount
, qedf_release_cmd
);
1073 qedf_init_task(fcport
, lport
, io_req
, &ptu_invalidate
, task_ctx
);
1075 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
1076 QEDF_ERR(&(qedf
->dbg_ctx
), "Session not offloaded yet.\n");
1077 kref_put(&io_req
->refcount
, qedf_release_cmd
);
1080 /* Obtain free SQ entry */
1081 qedf_add_to_sq(fcport
, xid
, ptu_invalidate
, req_type
, 0);
1084 qedf_ring_doorbell(fcport
);
1086 if (qedf_io_tracing
&& io_req
->sc_cmd
)
1087 qedf_trace_io(fcport
, io_req
, QEDF_IO_TRACE_REQ
);
1093 qedf_queuecommand(struct Scsi_Host
*host
, struct scsi_cmnd
*sc_cmd
)
1095 struct fc_lport
*lport
= shost_priv(host
);
1096 struct qedf_ctx
*qedf
= lport_priv(lport
);
1097 struct fc_rport
*rport
= starget_to_rport(scsi_target(sc_cmd
->device
));
1098 struct fc_rport_libfc_priv
*rp
= rport
->dd_data
;
1099 struct qedf_rport
*fcport
= rport
->dd_data
;
1100 struct qedf_ioreq
*io_req
;
1103 unsigned long flags
= 0;
1106 if (test_bit(QEDF_UNLOADING
, &qedf
->flags
) ||
1107 test_bit(QEDF_DBG_STOP_IO
, &qedf
->flags
)) {
1108 sc_cmd
->result
= DID_NO_CONNECT
<< 16;
1109 sc_cmd
->scsi_done(sc_cmd
);
1113 rval
= fc_remote_port_chkready(rport
);
1115 sc_cmd
->result
= rval
;
1116 sc_cmd
->scsi_done(sc_cmd
);
1120 /* Retry command if we are doing a qed drain operation */
1121 if (test_bit(QEDF_DRAIN_ACTIVE
, &qedf
->flags
)) {
1122 rc
= SCSI_MLQUEUE_HOST_BUSY
;
1126 if (lport
->state
!= LPORT_ST_READY
||
1127 atomic_read(&qedf
->link_state
) != QEDF_LINK_UP
) {
1128 rc
= SCSI_MLQUEUE_HOST_BUSY
;
1132 /* rport and tgt are allocated together, so tgt should be non-NULL */
1133 fcport
= (struct qedf_rport
*)&rp
[1];
1135 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
1137 * Session is not offloaded yet. Let SCSI-ml retry
1140 rc
= SCSI_MLQUEUE_TARGET_BUSY
;
1143 if (fcport
->retry_delay_timestamp
) {
1144 if (time_after(jiffies
, fcport
->retry_delay_timestamp
)) {
1145 fcport
->retry_delay_timestamp
= 0;
1147 /* If retry_delay timer is active, flow off the ML */
1148 rc
= SCSI_MLQUEUE_TARGET_BUSY
;
1153 io_req
= qedf_alloc_cmd(fcport
, QEDF_SCSI_CMD
);
1155 rc
= SCSI_MLQUEUE_HOST_BUSY
;
1159 io_req
->sc_cmd
= sc_cmd
;
1161 /* Take fcport->rport_lock for posting to fcport send queue */
1162 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
1163 if (qedf_post_io_req(fcport
, io_req
)) {
1164 QEDF_WARN(&(qedf
->dbg_ctx
), "Unable to post io_req\n");
1165 /* Return SQE to pool */
1166 atomic_inc(&fcport
->free_sqes
);
1167 rc
= SCSI_MLQUEUE_HOST_BUSY
;
1169 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
1175 static void qedf_parse_fcp_rsp(struct qedf_ioreq
*io_req
,
1176 struct fcoe_cqe_rsp_info
*fcp_rsp
)
1178 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
1179 struct qedf_ctx
*qedf
= io_req
->fcport
->qedf
;
1180 u8 rsp_flags
= fcp_rsp
->rsp_flags
.flags
;
1181 int fcp_sns_len
= 0;
1182 int fcp_rsp_len
= 0;
1183 uint8_t *rsp_info
, *sense_data
;
1185 io_req
->fcp_status
= FC_GOOD
;
1186 io_req
->fcp_resid
= 0;
1187 if (rsp_flags
& (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER
|
1188 FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER
))
1189 io_req
->fcp_resid
= fcp_rsp
->fcp_resid
;
1191 io_req
->scsi_comp_flags
= rsp_flags
;
1192 CMD_SCSI_STATUS(sc_cmd
) = io_req
->cdb_status
=
1193 fcp_rsp
->scsi_status_code
;
1196 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID
)
1197 fcp_rsp_len
= fcp_rsp
->fcp_rsp_len
;
1200 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID
)
1201 fcp_sns_len
= fcp_rsp
->fcp_sns_len
;
1203 io_req
->fcp_rsp_len
= fcp_rsp_len
;
1204 io_req
->fcp_sns_len
= fcp_sns_len
;
1205 rsp_info
= sense_data
= io_req
->sense_buffer
;
1207 /* fetch fcp_rsp_code */
1208 if ((fcp_rsp_len
== 4) || (fcp_rsp_len
== 8)) {
1209 /* Only for task management function */
1210 io_req
->fcp_rsp_code
= rsp_info
[3];
1211 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
1212 "fcp_rsp_code = %d\n", io_req
->fcp_rsp_code
);
1213 /* Adjust sense-data location. */
1214 sense_data
+= fcp_rsp_len
;
1217 if (fcp_sns_len
> SCSI_SENSE_BUFFERSIZE
) {
1218 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
1219 "Truncating sense buffer\n");
1220 fcp_sns_len
= SCSI_SENSE_BUFFERSIZE
;
1223 memset(sc_cmd
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
1225 memcpy(sc_cmd
->sense_buffer
, sense_data
,
1229 static void qedf_unmap_sg_list(struct qedf_ctx
*qedf
, struct qedf_ioreq
*io_req
)
1231 struct scsi_cmnd
*sc
= io_req
->sc_cmd
;
1233 if (io_req
->bd_tbl
->bd_valid
&& sc
&& scsi_sg_count(sc
)) {
1234 dma_unmap_sg(&qedf
->pdev
->dev
, scsi_sglist(sc
),
1235 scsi_sg_count(sc
), sc
->sc_data_direction
);
1236 io_req
->bd_tbl
->bd_valid
= 0;
1240 void qedf_scsi_completion(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
1241 struct qedf_ioreq
*io_req
)
1244 struct fcoe_task_context
*task_ctx
;
1245 struct scsi_cmnd
*sc_cmd
;
1246 struct fcoe_cqe_rsp_info
*fcp_rsp
;
1247 struct qedf_rport
*fcport
;
1249 u16 scope
, qualifier
= 0;
1250 u8 fw_residual_flag
= 0;
1258 task_ctx
= qedf_get_task_mem(&qedf
->tasks
, xid
);
1259 sc_cmd
= io_req
->sc_cmd
;
1260 fcp_rsp
= &cqe
->cqe_info
.rsp_info
;
1263 QEDF_WARN(&(qedf
->dbg_ctx
), "sc_cmd is NULL!\n");
1267 if (!sc_cmd
->SCp
.ptr
) {
1268 QEDF_WARN(&(qedf
->dbg_ctx
), "SCp.ptr is NULL, returned in "
1269 "another context.\n");
1273 if (!sc_cmd
->request
) {
1274 QEDF_WARN(&(qedf
->dbg_ctx
), "sc_cmd->request is NULL, "
1275 "sc_cmd=%p.\n", sc_cmd
);
1279 if (!sc_cmd
->request
->special
) {
1280 QEDF_WARN(&(qedf
->dbg_ctx
), "request->special is NULL so "
1281 "request not valid, sc_cmd=%p.\n", sc_cmd
);
1285 if (!sc_cmd
->request
->q
) {
1286 QEDF_WARN(&(qedf
->dbg_ctx
), "request->q is NULL so request "
1287 "is not valid, sc_cmd=%p.\n", sc_cmd
);
1291 fcport
= io_req
->fcport
;
1293 qedf_parse_fcp_rsp(io_req
, fcp_rsp
);
1295 qedf_unmap_sg_list(qedf
, io_req
);
1297 /* Check for FCP transport error */
1298 if (io_req
->fcp_rsp_len
> 3 && io_req
->fcp_rsp_code
) {
1299 QEDF_ERR(&(qedf
->dbg_ctx
),
1300 "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
1301 "fcp_rsp_code=%d.\n", io_req
->xid
, io_req
->fcp_rsp_len
,
1302 io_req
->fcp_rsp_code
);
1303 sc_cmd
->result
= DID_BUS_BUSY
<< 16;
1307 fw_residual_flag
= GET_FIELD(cqe
->cqe_info
.rsp_info
.fw_error_flags
,
1308 FCOE_CQE_RSP_INFO_FW_UNDERRUN
);
1309 if (fw_residual_flag
) {
1310 QEDF_ERR(&(qedf
->dbg_ctx
),
1311 "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x "
1312 "fcp_resid=%d fw_residual=0x%x.\n", io_req
->xid
,
1313 fcp_rsp
->rsp_flags
.flags
, io_req
->fcp_resid
,
1314 cqe
->cqe_info
.rsp_info
.fw_residual
);
1316 if (io_req
->cdb_status
== 0)
1317 sc_cmd
->result
= (DID_ERROR
<< 16) | io_req
->cdb_status
;
1319 sc_cmd
->result
= (DID_OK
<< 16) | io_req
->cdb_status
;
1321 /* Abort the command since we did not get all the data */
1322 init_completion(&io_req
->abts_done
);
1323 rval
= qedf_initiate_abts(io_req
, true);
1325 QEDF_ERR(&(qedf
->dbg_ctx
), "Failed to queue ABTS.\n");
1326 sc_cmd
->result
= (DID_ERROR
<< 16) | io_req
->cdb_status
;
1330 * Set resid to the whole buffer length so we won't try to resue
1331 * any previously data.
1333 scsi_set_resid(sc_cmd
, scsi_bufflen(sc_cmd
));
1337 switch (io_req
->fcp_status
) {
1339 if (io_req
->cdb_status
== 0) {
1340 /* Good I/O completion */
1341 sc_cmd
->result
= DID_OK
<< 16;
1343 refcount
= kref_read(&io_req
->refcount
);
1344 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
1345 "%d:0:%d:%d xid=0x%0x op=0x%02x "
1346 "lba=%02x%02x%02x%02x cdb_status=%d "
1347 "fcp_resid=0x%x refcount=%d.\n",
1348 qedf
->lport
->host
->host_no
, sc_cmd
->device
->id
,
1349 sc_cmd
->device
->lun
, io_req
->xid
,
1350 sc_cmd
->cmnd
[0], sc_cmd
->cmnd
[2], sc_cmd
->cmnd
[3],
1351 sc_cmd
->cmnd
[4], sc_cmd
->cmnd
[5],
1352 io_req
->cdb_status
, io_req
->fcp_resid
,
1354 sc_cmd
->result
= (DID_OK
<< 16) | io_req
->cdb_status
;
1356 if (io_req
->cdb_status
== SAM_STAT_TASK_SET_FULL
||
1357 io_req
->cdb_status
== SAM_STAT_BUSY
) {
1359 * Check whether we need to set retry_delay at
1360 * all based on retry_delay module parameter
1361 * and the status qualifier.
1365 scope
= fcp_rsp
->retry_delay_timer
& 0xC000;
1367 qualifier
= fcp_rsp
->retry_delay_timer
& 0x3FFF;
1369 if (qedf_retry_delay
&&
1370 scope
> 0 && qualifier
> 0 &&
1371 qualifier
<= 0x3FEF) {
1372 /* Check we don't go over the max */
1373 if (qualifier
> QEDF_RETRY_DELAY_MAX
)
1375 QEDF_RETRY_DELAY_MAX
;
1376 fcport
->retry_delay_timestamp
=
1377 jiffies
+ (qualifier
* HZ
/ 10);
1381 if (io_req
->fcp_resid
)
1382 scsi_set_resid(sc_cmd
, io_req
->fcp_resid
);
1385 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
, "fcp_status=%d.\n",
1386 io_req
->fcp_status
);
1391 if (qedf_io_tracing
)
1392 qedf_trace_io(fcport
, io_req
, QEDF_IO_TRACE_RSP
);
1394 io_req
->sc_cmd
= NULL
;
1395 sc_cmd
->SCp
.ptr
= NULL
;
1396 sc_cmd
->scsi_done(sc_cmd
);
1397 kref_put(&io_req
->refcount
, qedf_release_cmd
);
1400 /* Return a SCSI command in some other context besides a normal completion */
1401 void qedf_scsi_done(struct qedf_ctx
*qedf
, struct qedf_ioreq
*io_req
,
1405 struct scsi_cmnd
*sc_cmd
;
1412 sc_cmd
= io_req
->sc_cmd
;
1415 QEDF_WARN(&(qedf
->dbg_ctx
), "sc_cmd is NULL!\n");
1419 if (!sc_cmd
->SCp
.ptr
) {
1420 QEDF_WARN(&(qedf
->dbg_ctx
), "SCp.ptr is NULL, returned in "
1421 "another context.\n");
1425 qedf_unmap_sg_list(qedf
, io_req
);
1427 sc_cmd
->result
= result
<< 16;
1428 refcount
= kref_read(&io_req
->refcount
);
1429 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
, "%d:0:%d:%d: Completing "
1430 "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1431 "allowed=%d retries=%d refcount=%d.\n",
1432 qedf
->lport
->host
->host_no
, sc_cmd
->device
->id
,
1433 sc_cmd
->device
->lun
, sc_cmd
, sc_cmd
->result
, sc_cmd
->cmnd
[0],
1434 sc_cmd
->cmnd
[2], sc_cmd
->cmnd
[3], sc_cmd
->cmnd
[4],
1435 sc_cmd
->cmnd
[5], sc_cmd
->allowed
, sc_cmd
->retries
,
1439 * Set resid to the whole buffer length so we won't try to resue any
1440 * previously read data
1442 scsi_set_resid(sc_cmd
, scsi_bufflen(sc_cmd
));
1444 if (qedf_io_tracing
)
1445 qedf_trace_io(io_req
->fcport
, io_req
, QEDF_IO_TRACE_RSP
);
1447 io_req
->sc_cmd
= NULL
;
1448 sc_cmd
->SCp
.ptr
= NULL
;
1449 sc_cmd
->scsi_done(sc_cmd
);
1450 kref_put(&io_req
->refcount
, qedf_release_cmd
);
1454 * Handle warning type CQE completions. This is mainly used for REC timer
1457 void qedf_process_warning_compl(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
1458 struct qedf_ioreq
*io_req
)
1461 struct qedf_rport
*fcport
= io_req
->fcport
;
1462 u64 err_warn_bit_map
;
1468 QEDF_ERR(&(io_req
->fcport
->qedf
->dbg_ctx
), "Warning CQE, "
1469 "xid=0x%x\n", io_req
->xid
);
1470 QEDF_ERR(&(io_req
->fcport
->qedf
->dbg_ctx
),
1471 "err_warn_bitmap=%08x:%08x\n",
1472 le32_to_cpu(cqe
->cqe_info
.err_info
.err_warn_bitmap_hi
),
1473 le32_to_cpu(cqe
->cqe_info
.err_info
.err_warn_bitmap_lo
));
1474 QEDF_ERR(&(io_req
->fcport
->qedf
->dbg_ctx
), "tx_buff_off=%08x, "
1475 "rx_buff_off=%08x, rx_id=%04x\n",
1476 le32_to_cpu(cqe
->cqe_info
.err_info
.tx_buf_off
),
1477 le32_to_cpu(cqe
->cqe_info
.err_info
.rx_buf_off
),
1478 le32_to_cpu(cqe
->cqe_info
.err_info
.rx_id
));
1480 /* Normalize the error bitmap value to an just an unsigned int */
1481 err_warn_bit_map
= (u64
)
1482 ((u64
)cqe
->cqe_info
.err_info
.err_warn_bitmap_hi
<< 32) |
1483 (u64
)cqe
->cqe_info
.err_info
.err_warn_bitmap_lo
;
1484 for (i
= 0; i
< 64; i
++) {
1485 if (err_warn_bit_map
& (u64
)((u64
)1 << i
)) {
1491 /* Check if REC TOV expired if this is a tape device */
1492 if (fcport
->dev_type
== QEDF_RPORT_TYPE_TAPE
) {
1494 FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION
) {
1495 QEDF_ERR(&(qedf
->dbg_ctx
), "REC timer expired.\n");
1496 if (!test_bit(QEDF_CMD_SRR_SENT
, &io_req
->flags
)) {
1497 io_req
->rx_buf_off
=
1498 cqe
->cqe_info
.err_info
.rx_buf_off
;
1499 io_req
->tx_buf_off
=
1500 cqe
->cqe_info
.err_info
.tx_buf_off
;
1501 io_req
->rx_id
= cqe
->cqe_info
.err_info
.rx_id
;
1502 rval
= qedf_send_rec(io_req
);
1504 * We only want to abort the io_req if we
1505 * can't queue the REC command as we want to
1506 * keep the exchange open for recovery.
1516 init_completion(&io_req
->abts_done
);
1517 rval
= qedf_initiate_abts(io_req
, true);
1519 QEDF_ERR(&(qedf
->dbg_ctx
), "Failed to queue ABTS.\n");
1522 /* Cleanup a command when we receive an error detection completion */
1523 void qedf_process_error_detect(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
1524 struct qedf_ioreq
*io_req
)
1531 QEDF_ERR(&(io_req
->fcport
->qedf
->dbg_ctx
), "Error detection CQE, "
1532 "xid=0x%x\n", io_req
->xid
);
1533 QEDF_ERR(&(io_req
->fcport
->qedf
->dbg_ctx
),
1534 "err_warn_bitmap=%08x:%08x\n",
1535 le32_to_cpu(cqe
->cqe_info
.err_info
.err_warn_bitmap_hi
),
1536 le32_to_cpu(cqe
->cqe_info
.err_info
.err_warn_bitmap_lo
));
1537 QEDF_ERR(&(io_req
->fcport
->qedf
->dbg_ctx
), "tx_buff_off=%08x, "
1538 "rx_buff_off=%08x, rx_id=%04x\n",
1539 le32_to_cpu(cqe
->cqe_info
.err_info
.tx_buf_off
),
1540 le32_to_cpu(cqe
->cqe_info
.err_info
.rx_buf_off
),
1541 le32_to_cpu(cqe
->cqe_info
.err_info
.rx_id
));
1543 if (qedf
->stop_io_on_error
) {
1544 qedf_stop_all_io(qedf
);
1548 init_completion(&io_req
->abts_done
);
1549 rval
= qedf_initiate_abts(io_req
, true);
1551 QEDF_ERR(&(qedf
->dbg_ctx
), "Failed to queue ABTS.\n");
1554 static void qedf_flush_els_req(struct qedf_ctx
*qedf
,
1555 struct qedf_ioreq
*els_req
)
1557 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
1558 "Flushing ELS request xid=0x%x refcount=%d.\n", els_req
->xid
,
1559 kref_read(&els_req
->refcount
));
1562 * Need to distinguish this from a timeout when calling the
1565 els_req
->event
= QEDF_IOREQ_EV_ELS_FLUSH
;
1567 /* Cancel the timer */
1568 cancel_delayed_work_sync(&els_req
->timeout_work
);
1570 /* Call callback function to complete command */
1571 if (els_req
->cb_func
&& els_req
->cb_arg
) {
1572 els_req
->cb_func(els_req
->cb_arg
);
1573 els_req
->cb_arg
= NULL
;
1576 /* Release kref for original initiate_els */
1577 kref_put(&els_req
->refcount
, qedf_release_cmd
);
1580 /* A value of -1 for lun is a wild card that means flush all
1581 * active SCSI I/Os for the target.
1583 void qedf_flush_active_ios(struct qedf_rport
*fcport
, int lun
)
1585 struct qedf_ioreq
*io_req
;
1586 struct qedf_ctx
*qedf
;
1587 struct qedf_cmd_mgr
*cmd_mgr
;
1593 qedf
= fcport
->qedf
;
1594 cmd_mgr
= qedf
->cmd_mgr
;
1596 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
, "Flush active i/o's.\n");
1598 for (i
= 0; i
< FCOE_PARAMS_NUM_TASKS
; i
++) {
1599 io_req
= &cmd_mgr
->cmds
[i
];
1603 if (io_req
->fcport
!= fcport
)
1605 if (io_req
->cmd_type
== QEDF_ELS
) {
1606 rc
= kref_get_unless_zero(&io_req
->refcount
);
1608 QEDF_ERR(&(qedf
->dbg_ctx
),
1609 "Could not get kref for io_req=0x%p.\n",
1613 qedf_flush_els_req(qedf
, io_req
);
1615 * Release the kref and go back to the top of the
1621 if (!io_req
->sc_cmd
)
1624 if (io_req
->sc_cmd
->device
->lun
!=
1630 * Use kref_get_unless_zero in the unlikely case the command
1631 * we're about to flush was completed in the normal SCSI path
1633 rc
= kref_get_unless_zero(&io_req
->refcount
);
1635 QEDF_ERR(&(qedf
->dbg_ctx
), "Could not get kref for "
1636 "io_req=0x%p\n", io_req
);
1639 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
1640 "Cleanup xid=0x%x.\n", io_req
->xid
);
1642 /* Cleanup task and return I/O mid-layer */
1643 qedf_initiate_cleanup(io_req
, true);
1646 kref_put(&io_req
->refcount
, qedf_release_cmd
);
1651 * Initiate a ABTS middle path command. Note that we don't have to initialize
1652 * the task context for an ABTS task.
1654 int qedf_initiate_abts(struct qedf_ioreq
*io_req
, bool return_scsi_cmd_on_abts
)
1656 struct fc_lport
*lport
;
1657 struct qedf_rport
*fcport
= io_req
->fcport
;
1658 struct fc_rport_priv
*rdata
= fcport
->rdata
;
1659 struct qedf_ctx
*qedf
= fcport
->qedf
;
1663 unsigned long flags
;
1665 r_a_tov
= rdata
->r_a_tov
;
1666 lport
= qedf
->lport
;
1668 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
1669 QEDF_ERR(&(qedf
->dbg_ctx
), "tgt not offloaded\n");
1674 if (lport
->state
!= LPORT_ST_READY
|| !(lport
->link_up
)) {
1675 QEDF_ERR(&(qedf
->dbg_ctx
), "link is not ready\n");
1680 if (atomic_read(&qedf
->link_down_tmo_valid
) > 0) {
1681 QEDF_ERR(&(qedf
->dbg_ctx
), "link_down_tmo active.\n");
1686 /* Ensure room on SQ */
1687 if (!atomic_read(&fcport
->free_sqes
)) {
1688 QEDF_ERR(&(qedf
->dbg_ctx
), "No SQ entries available\n");
1694 kref_get(&io_req
->refcount
);
1697 qedf
->control_requests
++;
1698 qedf
->packet_aborts
++;
1700 /* Set the return CPU to be the same as the request one */
1701 io_req
->cpu
= smp_processor_id();
1703 /* Set the command type to abort */
1704 io_req
->cmd_type
= QEDF_ABTS
;
1705 io_req
->return_scsi_cmd_on_abts
= return_scsi_cmd_on_abts
;
1707 set_bit(QEDF_CMD_IN_ABORT
, &io_req
->flags
);
1708 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_SCSI_TM
, "ABTS io_req xid = "
1711 qedf_cmd_timer_set(qedf
, io_req
, QEDF_ABORT_TIMEOUT
* HZ
);
1713 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
1715 /* Add ABTS to send queue */
1716 qedf_add_to_sq(fcport
, xid
, 0, FCOE_TASK_TYPE_ABTS
, 0);
1719 qedf_ring_doorbell(fcport
);
1721 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
1726 * If the ABTS task fails to queue then we need to cleanup the
1727 * task at the firmware.
1729 qedf_initiate_cleanup(io_req
, return_scsi_cmd_on_abts
);
1733 void qedf_process_abts_compl(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
1734 struct qedf_ioreq
*io_req
)
1739 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_SCSI_TM
, "Entered with xid = "
1740 "0x%x cmd_type = %d\n", io_req
->xid
, io_req
->cmd_type
);
1742 cancel_delayed_work(&io_req
->timeout_work
);
1745 r_ctl
= cqe
->cqe_info
.abts_info
.r_ctl
;
1748 case FC_RCTL_BA_ACC
:
1749 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_SCSI_TM
,
1750 "ABTS response - ACC Send RRQ after R_A_TOV\n");
1751 io_req
->event
= QEDF_IOREQ_EV_ABORT_SUCCESS
;
1753 * Dont release this cmd yet. It will be relesed
1754 * after we get RRQ response
1756 kref_get(&io_req
->refcount
);
1757 queue_delayed_work(qedf
->dpc_wq
, &io_req
->rrq_work
,
1758 msecs_to_jiffies(qedf
->lport
->r_a_tov
));
1760 /* For error cases let the cleanup return the command */
1761 case FC_RCTL_BA_RJT
:
1762 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_SCSI_TM
,
1763 "ABTS response - RJT\n");
1764 io_req
->event
= QEDF_IOREQ_EV_ABORT_FAILED
;
1767 QEDF_ERR(&(qedf
->dbg_ctx
), "Unknown ABTS response\n");
1771 clear_bit(QEDF_CMD_IN_ABORT
, &io_req
->flags
);
1773 if (io_req
->sc_cmd
) {
1774 if (io_req
->return_scsi_cmd_on_abts
)
1775 qedf_scsi_done(qedf
, io_req
, DID_ERROR
);
1778 /* Notify eh_abort handler that ABTS is complete */
1779 complete(&io_req
->abts_done
);
1781 kref_put(&io_req
->refcount
, qedf_release_cmd
);
1784 int qedf_init_mp_req(struct qedf_ioreq
*io_req
)
1786 struct qedf_mp_req
*mp_req
;
1787 struct fcoe_sge
*mp_req_bd
;
1788 struct fcoe_sge
*mp_resp_bd
;
1789 struct qedf_ctx
*qedf
= io_req
->fcport
->qedf
;
1793 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_MP_REQ
, "Entered.\n");
1795 mp_req
= (struct qedf_mp_req
*)&(io_req
->mp_req
);
1796 memset(mp_req
, 0, sizeof(struct qedf_mp_req
));
1798 if (io_req
->cmd_type
!= QEDF_ELS
) {
1799 mp_req
->req_len
= sizeof(struct fcp_cmnd
);
1800 io_req
->data_xfer_len
= mp_req
->req_len
;
1802 mp_req
->req_len
= io_req
->data_xfer_len
;
1804 mp_req
->req_buf
= dma_alloc_coherent(&qedf
->pdev
->dev
, QEDF_PAGE_SIZE
,
1805 &mp_req
->req_buf_dma
, GFP_KERNEL
);
1806 if (!mp_req
->req_buf
) {
1807 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to alloc MP req buffer\n");
1808 qedf_free_mp_resc(io_req
);
1812 mp_req
->resp_buf
= dma_alloc_coherent(&qedf
->pdev
->dev
,
1813 QEDF_PAGE_SIZE
, &mp_req
->resp_buf_dma
, GFP_KERNEL
);
1814 if (!mp_req
->resp_buf
) {
1815 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to alloc TM resp "
1817 qedf_free_mp_resc(io_req
);
1821 /* Allocate and map mp_req_bd and mp_resp_bd */
1822 sz
= sizeof(struct fcoe_sge
);
1823 mp_req
->mp_req_bd
= dma_alloc_coherent(&qedf
->pdev
->dev
, sz
,
1824 &mp_req
->mp_req_bd_dma
, GFP_KERNEL
);
1825 if (!mp_req
->mp_req_bd
) {
1826 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to alloc MP req bd\n");
1827 qedf_free_mp_resc(io_req
);
1831 mp_req
->mp_resp_bd
= dma_alloc_coherent(&qedf
->pdev
->dev
, sz
,
1832 &mp_req
->mp_resp_bd_dma
, GFP_KERNEL
);
1833 if (!mp_req
->mp_resp_bd
) {
1834 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to alloc MP resp bd\n");
1835 qedf_free_mp_resc(io_req
);
1840 addr
= mp_req
->req_buf_dma
;
1841 mp_req_bd
= mp_req
->mp_req_bd
;
1842 mp_req_bd
->sge_addr
.lo
= U64_LO(addr
);
1843 mp_req_bd
->sge_addr
.hi
= U64_HI(addr
);
1844 mp_req_bd
->size
= QEDF_PAGE_SIZE
;
1847 * MP buffer is either a task mgmt command or an ELS.
1848 * So the assumption is that it consumes a single bd
1849 * entry in the bd table
1851 mp_resp_bd
= mp_req
->mp_resp_bd
;
1852 addr
= mp_req
->resp_buf_dma
;
1853 mp_resp_bd
->sge_addr
.lo
= U64_LO(addr
);
1854 mp_resp_bd
->sge_addr
.hi
= U64_HI(addr
);
1855 mp_resp_bd
->size
= QEDF_PAGE_SIZE
;
1861 * Last ditch effort to clear the port if it's stuck. Used only after a
1862 * cleanup task times out.
1864 static void qedf_drain_request(struct qedf_ctx
*qedf
)
1866 if (test_bit(QEDF_DRAIN_ACTIVE
, &qedf
->flags
)) {
1867 QEDF_ERR(&(qedf
->dbg_ctx
), "MCP drain already active.\n");
1871 /* Set bit to return all queuecommand requests as busy */
1872 set_bit(QEDF_DRAIN_ACTIVE
, &qedf
->flags
);
1874 /* Call qed drain request for function. Should be synchronous */
1875 qed_ops
->common
->drain(qedf
->cdev
);
1877 /* Settle time for CQEs to be returned */
1880 /* Unplug and continue */
1881 clear_bit(QEDF_DRAIN_ACTIVE
, &qedf
->flags
);
1885 * Returns SUCCESS if the cleanup task does not timeout, otherwise return
1888 int qedf_initiate_cleanup(struct qedf_ioreq
*io_req
,
1889 bool return_scsi_cmd_on_abts
)
1891 struct qedf_rport
*fcport
;
1892 struct qedf_ctx
*qedf
;
1894 struct fcoe_task_context
*task
;
1897 unsigned long flags
;
1899 fcport
= io_req
->fcport
;
1901 QEDF_ERR(NULL
, "fcport is NULL.\n");
1905 qedf
= fcport
->qedf
;
1907 QEDF_ERR(NULL
, "qedf is NULL.\n");
1911 if (!test_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
) ||
1912 test_bit(QEDF_CMD_IN_CLEANUP
, &io_req
->flags
)) {
1913 QEDF_ERR(&(qedf
->dbg_ctx
), "io_req xid=0x%x already in "
1914 "cleanup processing or already completed.\n",
1919 /* Ensure room on SQ */
1920 if (!atomic_read(&fcport
->free_sqes
)) {
1921 QEDF_ERR(&(qedf
->dbg_ctx
), "No SQ entries available\n");
1926 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
, "Entered xid=0x%x\n",
1929 /* Cleanup cmds re-use the same TID as the original I/O */
1931 io_req
->cmd_type
= QEDF_CLEANUP
;
1932 io_req
->return_scsi_cmd_on_abts
= return_scsi_cmd_on_abts
;
1934 /* Set the return CPU to be the same as the request one */
1935 io_req
->cpu
= smp_processor_id();
1937 set_bit(QEDF_CMD_IN_CLEANUP
, &io_req
->flags
);
1939 task
= qedf_get_task_mem(&qedf
->tasks
, xid
);
1941 init_completion(&io_req
->tm_done
);
1943 /* Obtain free SQ entry */
1944 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
1945 qedf_add_to_sq(fcport
, xid
, 0, FCOE_TASK_TYPE_EXCHANGE_CLEANUP
, 0);
1948 qedf_ring_doorbell(fcport
);
1949 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
1951 tmo
= wait_for_completion_timeout(&io_req
->tm_done
,
1952 QEDF_CLEANUP_TIMEOUT
* HZ
);
1957 QEDF_ERR(&(qedf
->dbg_ctx
), "Cleanup command timeout, "
1958 "xid=%x.\n", io_req
->xid
);
1959 clear_bit(QEDF_CMD_IN_CLEANUP
, &io_req
->flags
);
1960 /* Issue a drain request if cleanup task times out */
1961 QEDF_ERR(&(qedf
->dbg_ctx
), "Issuing MCP drain request.\n");
1962 qedf_drain_request(qedf
);
1965 if (io_req
->sc_cmd
) {
1966 if (io_req
->return_scsi_cmd_on_abts
)
1967 qedf_scsi_done(qedf
, io_req
, DID_ERROR
);
1971 io_req
->event
= QEDF_IOREQ_EV_CLEANUP_SUCCESS
;
1973 io_req
->event
= QEDF_IOREQ_EV_CLEANUP_FAILED
;
1978 void qedf_process_cleanup_compl(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
1979 struct qedf_ioreq
*io_req
)
1981 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
, "Entered xid = 0x%x\n",
1984 clear_bit(QEDF_CMD_IN_CLEANUP
, &io_req
->flags
);
1986 /* Complete so we can finish cleaning up the I/O */
1987 complete(&io_req
->tm_done
);
1990 static int qedf_execute_tmf(struct qedf_rport
*fcport
, struct scsi_cmnd
*sc_cmd
,
1993 struct qedf_ioreq
*io_req
;
1994 struct qedf_mp_req
*tm_req
;
1995 struct fcoe_task_context
*task
;
1996 struct fc_frame_header
*fc_hdr
;
1997 struct fcp_cmnd
*fcp_cmnd
;
1998 struct qedf_ctx
*qedf
= fcport
->qedf
;
2003 unsigned long flags
;
2006 QEDF_ERR(&(qedf
->dbg_ctx
), "invalid arg\n");
2010 if (!(test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
))) {
2011 QEDF_ERR(&(qedf
->dbg_ctx
), "fcport not offloaded\n");
2016 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_SCSI_TM
, "portid = 0x%x "
2017 "tm_flags = %d\n", fcport
->rdata
->ids
.port_id
, tm_flags
);
2019 io_req
= qedf_alloc_cmd(fcport
, QEDF_TASK_MGMT_CMD
);
2021 QEDF_ERR(&(qedf
->dbg_ctx
), "Failed TMF");
2026 /* Initialize rest of io_req fields */
2027 io_req
->sc_cmd
= sc_cmd
;
2028 io_req
->fcport
= fcport
;
2029 io_req
->cmd_type
= QEDF_TASK_MGMT_CMD
;
2031 /* Set the return CPU to be the same as the request one */
2032 io_req
->cpu
= smp_processor_id();
2034 tm_req
= (struct qedf_mp_req
*)&(io_req
->mp_req
);
2036 rc
= qedf_init_mp_req(io_req
);
2038 QEDF_ERR(&(qedf
->dbg_ctx
), "Task mgmt MP request init "
2040 kref_put(&io_req
->refcount
, qedf_release_cmd
);
2045 io_req
->io_req_flags
= 0;
2046 tm_req
->tm_flags
= tm_flags
;
2048 /* Default is to return a SCSI command when an error occurs */
2049 io_req
->return_scsi_cmd_on_abts
= true;
2052 qedf_build_fcp_cmnd(io_req
, (struct fcp_cmnd
*)tm_req
->req_buf
);
2053 fcp_cmnd
= (struct fcp_cmnd
*)tm_req
->req_buf
;
2054 memset(fcp_cmnd
->fc_cdb
, 0, FCP_CMND_LEN
);
2055 fcp_cmnd
->fc_dl
= 0;
2057 /* Fill FC header */
2058 fc_hdr
= &(tm_req
->req_fc_hdr
);
2060 did
= fcport
->rdata
->ids
.port_id
;
2061 __fc_fill_fc_hdr(fc_hdr
, FC_RCTL_DD_UNSOL_CMD
, sid
, did
,
2062 FC_TYPE_FCP
, FC_FC_FIRST_SEQ
| FC_FC_END_SEQ
|
2064 /* Obtain exchange id */
2067 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_SCSI_TM
, "TMF io_req xid = "
2070 /* Initialize task context for this IO request */
2071 task
= qedf_get_task_mem(&qedf
->tasks
, xid
);
2072 qedf_init_mp_task(io_req
, task
);
2074 init_completion(&io_req
->tm_done
);
2076 /* Obtain free SQ entry */
2077 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
2078 qedf_add_to_sq(fcport
, xid
, 0, FCOE_TASK_TYPE_MIDPATH
, 0);
2081 qedf_ring_doorbell(fcport
);
2082 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
2084 tmo
= wait_for_completion_timeout(&io_req
->tm_done
,
2085 QEDF_TM_TIMEOUT
* HZ
);
2089 QEDF_ERR(&(qedf
->dbg_ctx
), "wait for tm_cmpl timeout!\n");
2091 /* Check TMF response code */
2092 if (io_req
->fcp_rsp_code
== 0)
2098 if (tm_flags
== FCP_TMF_LUN_RESET
)
2099 qedf_flush_active_ios(fcport
, (int)sc_cmd
->device
->lun
);
2101 qedf_flush_active_ios(fcport
, -1);
2103 kref_put(&io_req
->refcount
, qedf_release_cmd
);
2105 if (rc
!= SUCCESS
) {
2106 QEDF_ERR(&(qedf
->dbg_ctx
), "task mgmt command failed...\n");
2109 QEDF_ERR(&(qedf
->dbg_ctx
), "task mgmt command success...\n");
2116 int qedf_initiate_tmf(struct scsi_cmnd
*sc_cmd
, u8 tm_flags
)
2118 struct fc_rport
*rport
= starget_to_rport(scsi_target(sc_cmd
->device
));
2119 struct fc_rport_libfc_priv
*rp
= rport
->dd_data
;
2120 struct qedf_rport
*fcport
= (struct qedf_rport
*)&rp
[1];
2121 struct qedf_ctx
*qedf
;
2122 struct fc_lport
*lport
;
2126 rval
= fc_remote_port_chkready(rport
);
2129 QEDF_ERR(NULL
, "device_reset rport not ready\n");
2134 if (fcport
== NULL
) {
2135 QEDF_ERR(NULL
, "device_reset: rport is NULL\n");
2140 qedf
= fcport
->qedf
;
2141 lport
= qedf
->lport
;
2143 if (test_bit(QEDF_UNLOADING
, &qedf
->flags
) ||
2144 test_bit(QEDF_DBG_STOP_IO
, &qedf
->flags
)) {
2149 if (lport
->state
!= LPORT_ST_READY
|| !(lport
->link_up
)) {
2150 QEDF_ERR(&(qedf
->dbg_ctx
), "link is not ready\n");
2155 rc
= qedf_execute_tmf(fcport
, sc_cmd
, tm_flags
);
2161 void qedf_process_tmf_compl(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
2162 struct qedf_ioreq
*io_req
)
2164 struct fcoe_cqe_rsp_info
*fcp_rsp
;
2165 struct fcoe_cqe_midpath_info
*mp_info
;
2168 /* Get TMF response length from CQE */
2169 mp_info
= &cqe
->cqe_info
.midpath_info
;
2170 io_req
->mp_req
.resp_len
= mp_info
->data_placement_size
;
2171 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_SCSI_TM
,
2172 "Response len is %d.\n", io_req
->mp_req
.resp_len
);
2174 fcp_rsp
= &cqe
->cqe_info
.rsp_info
;
2175 qedf_parse_fcp_rsp(io_req
, fcp_rsp
);
2177 io_req
->sc_cmd
= NULL
;
2178 complete(&io_req
->tm_done
);
2181 void qedf_process_unsol_compl(struct qedf_ctx
*qedf
, uint16_t que_idx
,
2182 struct fcoe_cqe
*cqe
)
2184 unsigned long flags
;
2186 uint16_t pktlen
= cqe
->cqe_info
.unsolic_info
.pkt_len
;
2187 u32 payload_len
, crc
;
2188 struct fc_frame_header
*fh
;
2189 struct fc_frame
*fp
;
2190 struct qedf_io_work
*io_work
;
2194 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_UNSOL
,
2195 "address.hi=%x address.lo=%x opaque_data.hi=%x "
2196 "opaque_data.lo=%x bdq_prod_idx=%u len=%u.\n",
2197 le32_to_cpu(cqe
->cqe_info
.unsolic_info
.bd_info
.address
.hi
),
2198 le32_to_cpu(cqe
->cqe_info
.unsolic_info
.bd_info
.address
.lo
),
2199 le32_to_cpu(cqe
->cqe_info
.unsolic_info
.bd_info
.opaque
.hi
),
2200 le32_to_cpu(cqe
->cqe_info
.unsolic_info
.bd_info
.opaque
.lo
),
2201 qedf
->bdq_prod_idx
, pktlen
);
2203 bdq_idx
= le32_to_cpu(cqe
->cqe_info
.unsolic_info
.bd_info
.opaque
.lo
);
2204 if (bdq_idx
>= QEDF_BDQ_SIZE
) {
2205 QEDF_ERR(&(qedf
->dbg_ctx
), "bdq_idx is out of range %d.\n",
2207 goto increment_prod
;
2210 bdq_addr
= qedf
->bdq
[bdq_idx
].buf_addr
;
2212 QEDF_ERR(&(qedf
->dbg_ctx
), "bdq_addr is NULL, dropping "
2213 "unsolicited packet.\n");
2214 goto increment_prod
;
2217 if (qedf_dump_frames
) {
2218 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_UNSOL
,
2219 "BDQ frame is at addr=%p.\n", bdq_addr
);
2220 print_hex_dump(KERN_WARNING
, "bdq ", DUMP_PREFIX_OFFSET
, 16, 1,
2221 (void *)bdq_addr
, pktlen
, false);
2224 /* Allocate frame */
2225 payload_len
= pktlen
- sizeof(struct fc_frame_header
);
2226 fp
= fc_frame_alloc(qedf
->lport
, payload_len
);
2228 QEDF_ERR(&(qedf
->dbg_ctx
), "Could not allocate fp.\n");
2229 goto increment_prod
;
2232 /* Copy data from BDQ buffer into fc_frame struct */
2233 fh
= (struct fc_frame_header
*)fc_frame_header_get(fp
);
2234 memcpy(fh
, (void *)bdq_addr
, pktlen
);
2236 /* Initialize the frame so libfc sees it as a valid frame */
2237 crc
= fcoe_fc_crc(fp
);
2239 fr_dev(fp
) = qedf
->lport
;
2240 fr_sof(fp
) = FC_SOF_I3
;
2241 fr_eof(fp
) = FC_EOF_T
;
2242 fr_crc(fp
) = cpu_to_le32(~crc
);
2245 * We need to return the frame back up to libfc in a non-atomic
2248 io_work
= mempool_alloc(qedf
->io_mempool
, GFP_ATOMIC
);
2250 QEDF_WARN(&(qedf
->dbg_ctx
), "Could not allocate "
2251 "work for I/O completion.\n");
2253 goto increment_prod
;
2255 memset(io_work
, 0, sizeof(struct qedf_io_work
));
2257 INIT_WORK(&io_work
->work
, qedf_fp_io_handler
);
2259 /* Copy contents of CQE for deferred processing */
2260 memcpy(&io_work
->cqe
, cqe
, sizeof(struct fcoe_cqe
));
2262 io_work
->qedf
= qedf
;
2265 queue_work_on(smp_processor_id(), qedf_io_wq
, &io_work
->work
);
2267 spin_lock_irqsave(&qedf
->hba_lock
, flags
);
2269 /* Increment producer to let f/w know we've handled the frame */
2270 qedf
->bdq_prod_idx
++;
2272 /* Producer index wraps at uint16_t boundary */
2273 if (qedf
->bdq_prod_idx
== 0xffff)
2274 qedf
->bdq_prod_idx
= 0;
2276 writew(qedf
->bdq_prod_idx
, qedf
->bdq_primary_prod
);
2277 tmp
= readw(qedf
->bdq_primary_prod
);
2278 writew(qedf
->bdq_prod_idx
, qedf
->bdq_secondary_prod
);
2279 tmp
= readw(qedf
->bdq_secondary_prod
);
2281 spin_unlock_irqrestore(&qedf
->hba_lock
, flags
);