1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channsel Host Bus Adapters. *
4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
39 #include <linux/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
43 #include "lpfc_version.h"
47 #include "lpfc_sli4.h"
49 #include "lpfc_disc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_logmsg.h"
55 #include "lpfc_crtn.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_debugfs.h"
59 static struct lpfc_iocbq
*lpfc_nvmet_prep_ls_wqe(struct lpfc_hba
*,
60 struct lpfc_nvmet_rcv_ctx
*,
63 static struct lpfc_iocbq
*lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba
*,
64 struct lpfc_nvmet_rcv_ctx
*);
65 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba
*,
66 struct lpfc_nvmet_rcv_ctx
*,
68 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba
*,
69 struct lpfc_nvmet_rcv_ctx
*,
71 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba
*,
72 struct lpfc_nvmet_rcv_ctx
*,
74 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba
*, struct lpfc_queue
*,
75 struct lpfc_nvmet_rcv_ctx
*);
76 static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct
*);
78 static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf
*ctx_buf
);
80 static union lpfc_wqe128 lpfc_tsend_cmd_template
;
81 static union lpfc_wqe128 lpfc_treceive_cmd_template
;
82 static union lpfc_wqe128 lpfc_trsp_cmd_template
;
84 /* Setup WQE templates for NVME IOs */
86 lpfc_nvmet_cmd_template(void)
88 union lpfc_wqe128
*wqe
;
91 wqe
= &lpfc_tsend_cmd_template
;
92 memset(wqe
, 0, sizeof(union lpfc_wqe128
));
94 /* Word 0, 1, 2 - BDE is variable */
96 /* Word 3 - payload_offset_len is zero */
98 /* Word 4 - relative_offset is variable */
100 /* Word 5 - is zero */
102 /* Word 6 - ctxt_tag, xri_tag is variable */
104 /* Word 7 - wqe_ar is variable */
105 bf_set(wqe_cmnd
, &wqe
->fcp_tsend
.wqe_com
, CMD_FCP_TSEND64_WQE
);
106 bf_set(wqe_pu
, &wqe
->fcp_tsend
.wqe_com
, PARM_REL_OFF
);
107 bf_set(wqe_class
, &wqe
->fcp_tsend
.wqe_com
, CLASS3
);
108 bf_set(wqe_ct
, &wqe
->fcp_tsend
.wqe_com
, SLI4_CT_RPI
);
109 bf_set(wqe_ar
, &wqe
->fcp_tsend
.wqe_com
, 1);
111 /* Word 8 - abort_tag is variable */
113 /* Word 9 - reqtag, rcvoxid is variable */
115 /* Word 10 - wqes, xc is variable */
116 bf_set(wqe_nvme
, &wqe
->fcp_tsend
.wqe_com
, 1);
117 bf_set(wqe_dbde
, &wqe
->fcp_tsend
.wqe_com
, 1);
118 bf_set(wqe_wqes
, &wqe
->fcp_tsend
.wqe_com
, 0);
119 bf_set(wqe_xc
, &wqe
->fcp_tsend
.wqe_com
, 1);
120 bf_set(wqe_iod
, &wqe
->fcp_tsend
.wqe_com
, LPFC_WQE_IOD_WRITE
);
121 bf_set(wqe_lenloc
, &wqe
->fcp_tsend
.wqe_com
, LPFC_WQE_LENLOC_WORD12
);
123 /* Word 11 - sup, irsp, irsplen is variable */
124 bf_set(wqe_cmd_type
, &wqe
->fcp_tsend
.wqe_com
, FCP_COMMAND_TSEND
);
125 bf_set(wqe_cqid
, &wqe
->fcp_tsend
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
126 bf_set(wqe_sup
, &wqe
->fcp_tsend
.wqe_com
, 0);
127 bf_set(wqe_irsp
, &wqe
->fcp_tsend
.wqe_com
, 0);
128 bf_set(wqe_irsplen
, &wqe
->fcp_tsend
.wqe_com
, 0);
129 bf_set(wqe_pbde
, &wqe
->fcp_tsend
.wqe_com
, 0);
131 /* Word 12 - fcp_data_len is variable */
133 /* Word 13, 14, 15 - PBDE is zero */
135 /* TRECEIVE template */
136 wqe
= &lpfc_treceive_cmd_template
;
137 memset(wqe
, 0, sizeof(union lpfc_wqe128
));
139 /* Word 0, 1, 2 - BDE is variable */
142 wqe
->fcp_treceive
.payload_offset_len
= TXRDY_PAYLOAD_LEN
;
144 /* Word 4 - relative_offset is variable */
146 /* Word 5 - is zero */
148 /* Word 6 - ctxt_tag, xri_tag is variable */
151 bf_set(wqe_cmnd
, &wqe
->fcp_treceive
.wqe_com
, CMD_FCP_TRECEIVE64_WQE
);
152 bf_set(wqe_pu
, &wqe
->fcp_treceive
.wqe_com
, PARM_REL_OFF
);
153 bf_set(wqe_class
, &wqe
->fcp_treceive
.wqe_com
, CLASS3
);
154 bf_set(wqe_ct
, &wqe
->fcp_treceive
.wqe_com
, SLI4_CT_RPI
);
155 bf_set(wqe_ar
, &wqe
->fcp_treceive
.wqe_com
, 0);
157 /* Word 8 - abort_tag is variable */
159 /* Word 9 - reqtag, rcvoxid is variable */
161 /* Word 10 - xc is variable */
162 bf_set(wqe_dbde
, &wqe
->fcp_treceive
.wqe_com
, 1);
163 bf_set(wqe_wqes
, &wqe
->fcp_treceive
.wqe_com
, 0);
164 bf_set(wqe_nvme
, &wqe
->fcp_treceive
.wqe_com
, 1);
165 bf_set(wqe_iod
, &wqe
->fcp_treceive
.wqe_com
, LPFC_WQE_IOD_READ
);
166 bf_set(wqe_lenloc
, &wqe
->fcp_treceive
.wqe_com
, LPFC_WQE_LENLOC_WORD12
);
167 bf_set(wqe_xc
, &wqe
->fcp_tsend
.wqe_com
, 1);
169 /* Word 11 - pbde is variable */
170 bf_set(wqe_cmd_type
, &wqe
->fcp_treceive
.wqe_com
, FCP_COMMAND_TRECEIVE
);
171 bf_set(wqe_cqid
, &wqe
->fcp_treceive
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
172 bf_set(wqe_sup
, &wqe
->fcp_treceive
.wqe_com
, 0);
173 bf_set(wqe_irsp
, &wqe
->fcp_treceive
.wqe_com
, 0);
174 bf_set(wqe_irsplen
, &wqe
->fcp_treceive
.wqe_com
, 0);
175 bf_set(wqe_pbde
, &wqe
->fcp_treceive
.wqe_com
, 1);
177 /* Word 12 - fcp_data_len is variable */
179 /* Word 13, 14, 15 - PBDE is variable */
182 wqe
= &lpfc_trsp_cmd_template
;
183 memset(wqe
, 0, sizeof(union lpfc_wqe128
));
185 /* Word 0, 1, 2 - BDE is variable */
187 /* Word 3 - response_len is variable */
189 /* Word 4, 5 - is zero */
191 /* Word 6 - ctxt_tag, xri_tag is variable */
194 bf_set(wqe_cmnd
, &wqe
->fcp_trsp
.wqe_com
, CMD_FCP_TRSP64_WQE
);
195 bf_set(wqe_pu
, &wqe
->fcp_trsp
.wqe_com
, PARM_UNUSED
);
196 bf_set(wqe_class
, &wqe
->fcp_trsp
.wqe_com
, CLASS3
);
197 bf_set(wqe_ct
, &wqe
->fcp_trsp
.wqe_com
, SLI4_CT_RPI
);
198 bf_set(wqe_ag
, &wqe
->fcp_trsp
.wqe_com
, 1); /* wqe_ar */
200 /* Word 8 - abort_tag is variable */
202 /* Word 9 - reqtag is variable */
204 /* Word 10 wqes, xc is variable */
205 bf_set(wqe_dbde
, &wqe
->fcp_trsp
.wqe_com
, 1);
206 bf_set(wqe_nvme
, &wqe
->fcp_trsp
.wqe_com
, 1);
207 bf_set(wqe_wqes
, &wqe
->fcp_trsp
.wqe_com
, 0);
208 bf_set(wqe_xc
, &wqe
->fcp_trsp
.wqe_com
, 0);
209 bf_set(wqe_iod
, &wqe
->fcp_trsp
.wqe_com
, LPFC_WQE_IOD_NONE
);
210 bf_set(wqe_lenloc
, &wqe
->fcp_trsp
.wqe_com
, LPFC_WQE_LENLOC_WORD3
);
212 /* Word 11 irsp, irsplen is variable */
213 bf_set(wqe_cmd_type
, &wqe
->fcp_trsp
.wqe_com
, FCP_COMMAND_TRSP
);
214 bf_set(wqe_cqid
, &wqe
->fcp_trsp
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
215 bf_set(wqe_sup
, &wqe
->fcp_trsp
.wqe_com
, 0);
216 bf_set(wqe_irsp
, &wqe
->fcp_trsp
.wqe_com
, 0);
217 bf_set(wqe_irsplen
, &wqe
->fcp_trsp
.wqe_com
, 0);
218 bf_set(wqe_pbde
, &wqe
->fcp_trsp
.wqe_com
, 0);
220 /* Word 12, 13, 14, 15 - is zero */
224 lpfc_nvmet_defer_release(struct lpfc_hba
*phba
, struct lpfc_nvmet_rcv_ctx
*ctxp
)
226 lockdep_assert_held(&ctxp
->ctxlock
);
228 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
229 "6313 NVMET Defer ctx release xri x%x flg x%x\n",
230 ctxp
->oxid
, ctxp
->flag
);
232 if (ctxp
->flag
& LPFC_NVMET_CTX_RLS
)
235 ctxp
->flag
|= LPFC_NVMET_CTX_RLS
;
236 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
237 list_add_tail(&ctxp
->list
, &phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
);
238 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
242 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
243 * @phba: Pointer to HBA context object.
244 * @cmdwqe: Pointer to driver command WQE object.
245 * @wcqe: Pointer to driver response CQE object.
247 * The function is called from SLI ring event handler with no
248 * lock held. This function is the completion handler for NVME LS commands
249 * The function frees memory resources used for the NVME commands.
252 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
253 struct lpfc_wcqe_complete
*wcqe
)
255 struct lpfc_nvmet_tgtport
*tgtp
;
256 struct nvmefc_tgt_ls_req
*rsp
;
257 struct lpfc_nvmet_rcv_ctx
*ctxp
;
258 uint32_t status
, result
;
260 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
261 result
= wcqe
->parameter
;
262 ctxp
= cmdwqe
->context2
;
264 if (ctxp
->state
!= LPFC_NVMET_STE_LS_RSP
|| ctxp
->entry_cnt
!= 2) {
265 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
266 "6410 NVMET LS cmpl state mismatch IO x%x: "
268 ctxp
->oxid
, ctxp
->state
, ctxp
->entry_cnt
);
271 if (!phba
->targetport
)
274 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
278 atomic_inc(&tgtp
->xmt_ls_rsp_error
);
279 if (result
== IOERR_ABORT_REQUESTED
)
280 atomic_inc(&tgtp
->xmt_ls_rsp_aborted
);
281 if (bf_get(lpfc_wcqe_c_xb
, wcqe
))
282 atomic_inc(&tgtp
->xmt_ls_rsp_xb_set
);
284 atomic_inc(&tgtp
->xmt_ls_rsp_cmpl
);
289 rsp
= &ctxp
->ctx
.ls_req
;
291 lpfc_nvmeio_data(phba
, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
292 ctxp
->oxid
, status
, result
);
294 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
295 "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
296 status
, result
, ctxp
->oxid
);
298 lpfc_nlp_put(cmdwqe
->context1
);
299 cmdwqe
->context2
= NULL
;
300 cmdwqe
->context3
= NULL
;
301 lpfc_sli_release_iocbq(phba
, cmdwqe
);
307 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
308 * @phba: HBA buffer is associated with
309 * @ctxp: context to clean up
310 * @mp: Buffer to free
312 * Description: Frees the given DMA buffer in the appropriate way given by
313 * reposting it to its associated RQ so it can be reused.
315 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
320 lpfc_nvmet_ctxbuf_post(struct lpfc_hba
*phba
, struct lpfc_nvmet_ctxbuf
*ctx_buf
)
322 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
323 struct lpfc_nvmet_rcv_ctx
*ctxp
= ctx_buf
->context
;
324 struct lpfc_nvmet_tgtport
*tgtp
;
325 struct fc_frame_header
*fc_hdr
;
326 struct rqb_dmabuf
*nvmebuf
;
327 struct lpfc_nvmet_ctx_info
*infop
;
329 uint32_t size
, oxid
, sid
;
334 dma_pool_free(phba
->txrdy_payload_pool
, ctxp
->txrdy
,
337 ctxp
->txrdy_phys
= 0;
340 if (ctxp
->state
== LPFC_NVMET_STE_FREE
) {
341 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
342 "6411 NVMET free, already free IO x%x: %d %d\n",
343 ctxp
->oxid
, ctxp
->state
, ctxp
->entry_cnt
);
346 if (ctxp
->rqb_buffer
) {
347 nvmebuf
= ctxp
->rqb_buffer
;
348 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
349 ctxp
->rqb_buffer
= NULL
;
350 if (ctxp
->flag
& LPFC_NVMET_CTX_REUSE_WQ
) {
351 ctxp
->flag
&= ~LPFC_NVMET_CTX_REUSE_WQ
;
352 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
353 nvmebuf
->hrq
->rqbp
->rqb_free_buffer(phba
, nvmebuf
);
355 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
356 lpfc_rq_buf_free(phba
, &nvmebuf
->hbuf
); /* repost */
359 ctxp
->state
= LPFC_NVMET_STE_FREE
;
361 spin_lock_irqsave(&phba
->sli4_hba
.nvmet_io_wait_lock
, iflag
);
362 if (phba
->sli4_hba
.nvmet_io_wait_cnt
) {
363 list_remove_head(&phba
->sli4_hba
.lpfc_nvmet_io_wait_list
,
364 nvmebuf
, struct rqb_dmabuf
,
366 phba
->sli4_hba
.nvmet_io_wait_cnt
--;
367 spin_unlock_irqrestore(&phba
->sli4_hba
.nvmet_io_wait_lock
,
370 fc_hdr
= (struct fc_frame_header
*)(nvmebuf
->hbuf
.virt
);
371 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
372 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
373 payload
= (uint32_t *)(nvmebuf
->dbuf
.virt
);
374 size
= nvmebuf
->bytes_recv
;
375 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
377 ctxp
= (struct lpfc_nvmet_rcv_ctx
*)ctx_buf
->context
;
385 ctxp
->state
= LPFC_NVMET_STE_RCV
;
388 ctxp
->ctxbuf
= ctx_buf
;
389 ctxp
->rqb_buffer
= (void *)nvmebuf
;
390 spin_lock_init(&ctxp
->ctxlock
);
392 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
393 if (ctxp
->ts_cmd_nvme
) {
394 ctxp
->ts_cmd_nvme
= ktime_get_ns();
395 ctxp
->ts_nvme_data
= 0;
396 ctxp
->ts_data_wqput
= 0;
397 ctxp
->ts_isr_data
= 0;
398 ctxp
->ts_data_nvme
= 0;
399 ctxp
->ts_nvme_status
= 0;
400 ctxp
->ts_status_wqput
= 0;
401 ctxp
->ts_isr_status
= 0;
402 ctxp
->ts_status_nvme
= 0;
405 atomic_inc(&tgtp
->rcv_fcp_cmd_in
);
407 /* flag new work queued, replacement buffer has already
410 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
411 ctxp
->flag
|= LPFC_NVMET_CTX_REUSE_WQ
;
412 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
414 if (!queue_work(phba
->wq
, &ctx_buf
->defer_work
)) {
415 atomic_inc(&tgtp
->rcv_fcp_cmd_drop
);
416 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
417 "6181 Unable to queue deferred work "
419 "FCP Drop IO [x%x x%x x%x]\n",
421 atomic_read(&tgtp
->rcv_fcp_cmd_in
),
422 atomic_read(&tgtp
->rcv_fcp_cmd_out
),
423 atomic_read(&tgtp
->xmt_fcp_release
));
425 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
426 lpfc_nvmet_defer_release(phba
, ctxp
);
427 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
428 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, sid
, oxid
);
432 spin_unlock_irqrestore(&phba
->sli4_hba
.nvmet_io_wait_lock
, iflag
);
435 * Use the CPU context list, from the MRQ the IO was received on
436 * (ctxp->idx), to save context structure.
438 cpu
= smp_processor_id();
439 infop
= lpfc_get_ctx_list(phba
, cpu
, ctxp
->idx
);
440 spin_lock_irqsave(&infop
->nvmet_ctx_list_lock
, iflag
);
441 list_add_tail(&ctx_buf
->list
, &infop
->nvmet_ctx_list
);
442 infop
->nvmet_ctx_list_cnt
++;
443 spin_unlock_irqrestore(&infop
->nvmet_ctx_list_lock
, iflag
);
447 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
449 lpfc_nvmet_ktime(struct lpfc_hba
*phba
,
450 struct lpfc_nvmet_rcv_ctx
*ctxp
)
452 uint64_t seg1
, seg2
, seg3
, seg4
, seg5
;
453 uint64_t seg6
, seg7
, seg8
, seg9
, seg10
;
456 if (!ctxp
->ts_isr_cmd
|| !ctxp
->ts_cmd_nvme
||
457 !ctxp
->ts_nvme_data
|| !ctxp
->ts_data_wqput
||
458 !ctxp
->ts_isr_data
|| !ctxp
->ts_data_nvme
||
459 !ctxp
->ts_nvme_status
|| !ctxp
->ts_status_wqput
||
460 !ctxp
->ts_isr_status
|| !ctxp
->ts_status_nvme
)
463 if (ctxp
->ts_status_nvme
< ctxp
->ts_isr_cmd
)
465 if (ctxp
->ts_isr_cmd
> ctxp
->ts_cmd_nvme
)
467 if (ctxp
->ts_cmd_nvme
> ctxp
->ts_nvme_data
)
469 if (ctxp
->ts_nvme_data
> ctxp
->ts_data_wqput
)
471 if (ctxp
->ts_data_wqput
> ctxp
->ts_isr_data
)
473 if (ctxp
->ts_isr_data
> ctxp
->ts_data_nvme
)
475 if (ctxp
->ts_data_nvme
> ctxp
->ts_nvme_status
)
477 if (ctxp
->ts_nvme_status
> ctxp
->ts_status_wqput
)
479 if (ctxp
->ts_status_wqput
> ctxp
->ts_isr_status
)
481 if (ctxp
->ts_isr_status
> ctxp
->ts_status_nvme
)
484 * Segment 1 - Time from FCP command received by MSI-X ISR
485 * to FCP command is passed to NVME Layer.
486 * Segment 2 - Time from FCP command payload handed
487 * off to NVME Layer to Driver receives a Command op
489 * Segment 3 - Time from Driver receives a Command op
490 * from NVME Layer to Command is put on WQ.
491 * Segment 4 - Time from Driver WQ put is done
492 * to MSI-X ISR for Command cmpl.
493 * Segment 5 - Time from MSI-X ISR for Command cmpl to
494 * Command cmpl is passed to NVME Layer.
495 * Segment 6 - Time from Command cmpl is passed to NVME
496 * Layer to Driver receives a RSP op from NVME Layer.
497 * Segment 7 - Time from Driver receives a RSP op from
498 * NVME Layer to WQ put is done on TRSP FCP Status.
499 * Segment 8 - Time from Driver WQ put is done on TRSP
500 * FCP Status to MSI-X ISR for TRSP cmpl.
501 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
502 * TRSP cmpl is passed to NVME Layer.
503 * Segment 10 - Time from FCP command received by
504 * MSI-X ISR to command is completed on wire.
505 * (Segments 1 thru 8) for READDATA / WRITEDATA
506 * (Segments 1 thru 4) for READDATA_RSP
508 seg1
= ctxp
->ts_cmd_nvme
- ctxp
->ts_isr_cmd
;
511 seg2
= ctxp
->ts_nvme_data
- ctxp
->ts_isr_cmd
;
517 seg3
= ctxp
->ts_data_wqput
- ctxp
->ts_isr_cmd
;
523 seg4
= ctxp
->ts_isr_data
- ctxp
->ts_isr_cmd
;
529 seg5
= ctxp
->ts_data_nvme
- ctxp
->ts_isr_cmd
;
536 /* For auto rsp commands seg6 thru seg10 will be 0 */
537 if (ctxp
->ts_nvme_status
> ctxp
->ts_data_nvme
) {
538 seg6
= ctxp
->ts_nvme_status
- ctxp
->ts_isr_cmd
;
544 seg7
= ctxp
->ts_status_wqput
- ctxp
->ts_isr_cmd
;
550 seg8
= ctxp
->ts_isr_status
- ctxp
->ts_isr_cmd
;
556 seg9
= ctxp
->ts_status_nvme
- ctxp
->ts_isr_cmd
;
562 if (ctxp
->ts_isr_status
< ctxp
->ts_isr_cmd
)
564 seg10
= (ctxp
->ts_isr_status
-
567 if (ctxp
->ts_isr_data
< ctxp
->ts_isr_cmd
)
573 seg10
= (ctxp
->ts_isr_data
- ctxp
->ts_isr_cmd
);
576 phba
->ktime_seg1_total
+= seg1
;
577 if (seg1
< phba
->ktime_seg1_min
)
578 phba
->ktime_seg1_min
= seg1
;
579 else if (seg1
> phba
->ktime_seg1_max
)
580 phba
->ktime_seg1_max
= seg1
;
582 phba
->ktime_seg2_total
+= seg2
;
583 if (seg2
< phba
->ktime_seg2_min
)
584 phba
->ktime_seg2_min
= seg2
;
585 else if (seg2
> phba
->ktime_seg2_max
)
586 phba
->ktime_seg2_max
= seg2
;
588 phba
->ktime_seg3_total
+= seg3
;
589 if (seg3
< phba
->ktime_seg3_min
)
590 phba
->ktime_seg3_min
= seg3
;
591 else if (seg3
> phba
->ktime_seg3_max
)
592 phba
->ktime_seg3_max
= seg3
;
594 phba
->ktime_seg4_total
+= seg4
;
595 if (seg4
< phba
->ktime_seg4_min
)
596 phba
->ktime_seg4_min
= seg4
;
597 else if (seg4
> phba
->ktime_seg4_max
)
598 phba
->ktime_seg4_max
= seg4
;
600 phba
->ktime_seg5_total
+= seg5
;
601 if (seg5
< phba
->ktime_seg5_min
)
602 phba
->ktime_seg5_min
= seg5
;
603 else if (seg5
> phba
->ktime_seg5_max
)
604 phba
->ktime_seg5_max
= seg5
;
606 phba
->ktime_data_samples
++;
610 phba
->ktime_seg6_total
+= seg6
;
611 if (seg6
< phba
->ktime_seg6_min
)
612 phba
->ktime_seg6_min
= seg6
;
613 else if (seg6
> phba
->ktime_seg6_max
)
614 phba
->ktime_seg6_max
= seg6
;
616 phba
->ktime_seg7_total
+= seg7
;
617 if (seg7
< phba
->ktime_seg7_min
)
618 phba
->ktime_seg7_min
= seg7
;
619 else if (seg7
> phba
->ktime_seg7_max
)
620 phba
->ktime_seg7_max
= seg7
;
622 phba
->ktime_seg8_total
+= seg8
;
623 if (seg8
< phba
->ktime_seg8_min
)
624 phba
->ktime_seg8_min
= seg8
;
625 else if (seg8
> phba
->ktime_seg8_max
)
626 phba
->ktime_seg8_max
= seg8
;
628 phba
->ktime_seg9_total
+= seg9
;
629 if (seg9
< phba
->ktime_seg9_min
)
630 phba
->ktime_seg9_min
= seg9
;
631 else if (seg9
> phba
->ktime_seg9_max
)
632 phba
->ktime_seg9_max
= seg9
;
634 phba
->ktime_seg10_total
+= seg10
;
635 if (seg10
< phba
->ktime_seg10_min
)
636 phba
->ktime_seg10_min
= seg10
;
637 else if (seg10
> phba
->ktime_seg10_max
)
638 phba
->ktime_seg10_max
= seg10
;
639 phba
->ktime_status_samples
++;
644 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
645 * @phba: Pointer to HBA context object.
646 * @cmdwqe: Pointer to driver command WQE object.
647 * @wcqe: Pointer to driver response CQE object.
649 * The function is called from SLI ring event handler with no
650 * lock held. This function is the completion handler for NVME FCP commands
651 * The function frees memory resources used for the NVME commands.
654 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
655 struct lpfc_wcqe_complete
*wcqe
)
657 struct lpfc_nvmet_tgtport
*tgtp
;
658 struct nvmefc_tgt_fcp_req
*rsp
;
659 struct lpfc_nvmet_rcv_ctx
*ctxp
;
660 uint32_t status
, result
, op
, start_clean
, logerr
;
661 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
665 ctxp
= cmdwqe
->context2
;
666 ctxp
->flag
&= ~LPFC_NVMET_IO_INP
;
668 rsp
= &ctxp
->ctx
.fcp_req
;
671 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
672 result
= wcqe
->parameter
;
674 if (phba
->targetport
)
675 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
679 lpfc_nvmeio_data(phba
, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
680 ctxp
->oxid
, op
, status
);
683 rsp
->fcp_error
= NVME_SC_DATA_XFER_ERROR
;
684 rsp
->transferred_length
= 0;
686 atomic_inc(&tgtp
->xmt_fcp_rsp_error
);
687 if (result
== IOERR_ABORT_REQUESTED
)
688 atomic_inc(&tgtp
->xmt_fcp_rsp_aborted
);
691 logerr
= LOG_NVME_IOERR
;
693 /* pick up SLI4 exhange busy condition */
694 if (bf_get(lpfc_wcqe_c_xb
, wcqe
)) {
695 ctxp
->flag
|= LPFC_NVMET_XBUSY
;
696 logerr
|= LOG_NVME_ABTS
;
698 atomic_inc(&tgtp
->xmt_fcp_rsp_xb_set
);
701 ctxp
->flag
&= ~LPFC_NVMET_XBUSY
;
704 lpfc_printf_log(phba
, KERN_INFO
, logerr
,
705 "6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n",
706 ctxp
->oxid
, status
, result
, ctxp
->flag
);
709 rsp
->fcp_error
= NVME_SC_SUCCESS
;
710 if (op
== NVMET_FCOP_RSP
)
711 rsp
->transferred_length
= rsp
->rsplen
;
713 rsp
->transferred_length
= rsp
->transfer_length
;
715 atomic_inc(&tgtp
->xmt_fcp_rsp_cmpl
);
718 if ((op
== NVMET_FCOP_READDATA_RSP
) ||
719 (op
== NVMET_FCOP_RSP
)) {
721 ctxp
->state
= LPFC_NVMET_STE_DONE
;
724 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
725 if (ctxp
->ts_cmd_nvme
) {
726 if (rsp
->op
== NVMET_FCOP_READDATA_RSP
) {
728 cmdwqe
->isr_timestamp
;
731 ctxp
->ts_nvme_status
=
733 ctxp
->ts_status_wqput
=
735 ctxp
->ts_isr_status
=
737 ctxp
->ts_status_nvme
=
740 ctxp
->ts_isr_status
=
741 cmdwqe
->isr_timestamp
;
742 ctxp
->ts_status_nvme
=
748 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
749 if (ctxp
->ts_cmd_nvme
)
750 lpfc_nvmet_ktime(phba
, ctxp
);
752 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
755 start_clean
= offsetof(struct lpfc_iocbq
, iocb_flag
);
756 memset(((char *)cmdwqe
) + start_clean
, 0,
757 (sizeof(struct lpfc_iocbq
) - start_clean
));
758 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
759 if (ctxp
->ts_cmd_nvme
) {
760 ctxp
->ts_isr_data
= cmdwqe
->isr_timestamp
;
761 ctxp
->ts_data_nvme
= ktime_get_ns();
766 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
767 if (phba
->cpucheck_on
& LPFC_CHECK_NVMET_IO
) {
768 id
= smp_processor_id();
769 if (id
< LPFC_CHECK_CPU_CNT
) {
771 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
772 "6704 CPU Check cmdcmpl: "
773 "cpu %d expect %d\n",
775 phba
->sli4_hba
.hdwq
[rsp
->hwqid
].cpucheck_cmpl_io
[id
]++;
782 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port
*tgtport
,
783 struct nvmefc_tgt_ls_req
*rsp
)
785 struct lpfc_nvmet_rcv_ctx
*ctxp
=
786 container_of(rsp
, struct lpfc_nvmet_rcv_ctx
, ctx
.ls_req
);
787 struct lpfc_hba
*phba
= ctxp
->phba
;
788 struct hbq_dmabuf
*nvmebuf
=
789 (struct hbq_dmabuf
*)ctxp
->rqb_buffer
;
790 struct lpfc_iocbq
*nvmewqeq
;
791 struct lpfc_nvmet_tgtport
*nvmep
= tgtport
->private;
792 struct lpfc_dmabuf dmabuf
;
793 struct ulp_bde64 bpl
;
796 if (phba
->pport
->load_flag
& FC_UNLOADING
)
799 if (phba
->pport
->load_flag
& FC_UNLOADING
)
802 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
803 "6023 NVMET LS rsp oxid x%x\n", ctxp
->oxid
);
805 if ((ctxp
->state
!= LPFC_NVMET_STE_LS_RCV
) ||
806 (ctxp
->entry_cnt
!= 1)) {
807 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
808 "6412 NVMET LS rsp state mismatch "
810 ctxp
->oxid
, ctxp
->state
, ctxp
->entry_cnt
);
812 ctxp
->state
= LPFC_NVMET_STE_LS_RSP
;
815 nvmewqeq
= lpfc_nvmet_prep_ls_wqe(phba
, ctxp
, rsp
->rspdma
,
817 if (nvmewqeq
== NULL
) {
818 atomic_inc(&nvmep
->xmt_ls_drop
);
819 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
820 "6150 LS Drop IO x%x: Prep\n",
822 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
823 atomic_inc(&nvmep
->xmt_ls_abort
);
824 lpfc_nvmet_unsol_ls_issue_abort(phba
, ctxp
,
825 ctxp
->sid
, ctxp
->oxid
);
829 /* Save numBdes for bpl2sgl */
831 nvmewqeq
->hba_wqidx
= 0;
832 nvmewqeq
->context3
= &dmabuf
;
834 bpl
.addrLow
= nvmewqeq
->wqe
.xmit_sequence
.bde
.addrLow
;
835 bpl
.addrHigh
= nvmewqeq
->wqe
.xmit_sequence
.bde
.addrHigh
;
836 bpl
.tus
.f
.bdeSize
= rsp
->rsplen
;
837 bpl
.tus
.f
.bdeFlags
= 0;
838 bpl
.tus
.w
= le32_to_cpu(bpl
.tus
.w
);
840 nvmewqeq
->wqe_cmpl
= lpfc_nvmet_xmt_ls_rsp_cmp
;
841 nvmewqeq
->iocb_cmpl
= NULL
;
842 nvmewqeq
->context2
= ctxp
;
844 lpfc_nvmeio_data(phba
, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
845 ctxp
->oxid
, nvmewqeq
->hba_wqidx
, rsp
->rsplen
);
847 rc
= lpfc_sli4_issue_wqe(phba
, ctxp
->hdwq
, nvmewqeq
);
848 if (rc
== WQE_SUCCESS
) {
850 * Okay to repost buffer here, but wait till cmpl
851 * before freeing ctxp and iocbq.
853 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
854 ctxp
->rqb_buffer
= 0;
855 atomic_inc(&nvmep
->xmt_ls_rsp
);
858 /* Give back resources */
859 atomic_inc(&nvmep
->xmt_ls_drop
);
860 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
861 "6151 LS Drop IO x%x: Issue %d\n",
864 lpfc_nlp_put(nvmewqeq
->context1
);
866 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
867 atomic_inc(&nvmep
->xmt_ls_abort
);
868 lpfc_nvmet_unsol_ls_issue_abort(phba
, ctxp
, ctxp
->sid
, ctxp
->oxid
);
873 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port
*tgtport
,
874 struct nvmefc_tgt_fcp_req
*rsp
)
876 struct lpfc_nvmet_tgtport
*lpfc_nvmep
= tgtport
->private;
877 struct lpfc_nvmet_rcv_ctx
*ctxp
=
878 container_of(rsp
, struct lpfc_nvmet_rcv_ctx
, ctx
.fcp_req
);
879 struct lpfc_hba
*phba
= ctxp
->phba
;
880 struct lpfc_queue
*wq
;
881 struct lpfc_iocbq
*nvmewqeq
;
882 struct lpfc_sli_ring
*pring
;
883 unsigned long iflags
;
886 if (phba
->pport
->load_flag
& FC_UNLOADING
) {
891 if (phba
->pport
->load_flag
& FC_UNLOADING
) {
896 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
897 if (ctxp
->ts_cmd_nvme
) {
898 if (rsp
->op
== NVMET_FCOP_RSP
)
899 ctxp
->ts_nvme_status
= ktime_get_ns();
901 ctxp
->ts_nvme_data
= ktime_get_ns();
904 /* Setup the hdw queue if not already set */
906 ctxp
->hdwq
= &phba
->sli4_hba
.hdwq
[rsp
->hwqid
];
908 if (phba
->cpucheck_on
& LPFC_CHECK_NVMET_IO
) {
909 int id
= smp_processor_id();
910 if (id
< LPFC_CHECK_CPU_CNT
) {
911 if (rsp
->hwqid
!= id
)
912 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
913 "6705 CPU Check OP: "
914 "cpu %d expect %d\n",
916 phba
->sli4_hba
.hdwq
[rsp
->hwqid
].cpucheck_xmt_io
[id
]++;
918 ctxp
->cpu
= id
; /* Setup cpu for cmpl check */
923 if ((ctxp
->flag
& LPFC_NVMET_ABTS_RCV
) ||
924 (ctxp
->state
== LPFC_NVMET_STE_ABORT
)) {
925 atomic_inc(&lpfc_nvmep
->xmt_fcp_drop
);
926 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
927 "6102 IO xri x%x aborted\n",
933 nvmewqeq
= lpfc_nvmet_prep_fcp_wqe(phba
, ctxp
);
934 if (nvmewqeq
== NULL
) {
935 atomic_inc(&lpfc_nvmep
->xmt_fcp_drop
);
936 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
937 "6152 FCP Drop IO x%x: Prep\n",
943 nvmewqeq
->wqe_cmpl
= lpfc_nvmet_xmt_fcp_op_cmp
;
944 nvmewqeq
->iocb_cmpl
= NULL
;
945 nvmewqeq
->context2
= ctxp
;
946 nvmewqeq
->iocb_flag
|= LPFC_IO_NVMET
;
947 ctxp
->wqeq
->hba_wqidx
= rsp
->hwqid
;
949 lpfc_nvmeio_data(phba
, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
950 ctxp
->oxid
, rsp
->op
, rsp
->rsplen
);
952 ctxp
->flag
|= LPFC_NVMET_IO_INP
;
953 rc
= lpfc_sli4_issue_wqe(phba
, ctxp
->hdwq
, nvmewqeq
);
954 if (rc
== WQE_SUCCESS
) {
955 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
956 if (!ctxp
->ts_cmd_nvme
)
958 if (rsp
->op
== NVMET_FCOP_RSP
)
959 ctxp
->ts_status_wqput
= ktime_get_ns();
961 ctxp
->ts_data_wqput
= ktime_get_ns();
968 * WQ was full, so queue nvmewqeq to be sent after
971 ctxp
->flag
|= LPFC_NVMET_DEFER_WQFULL
;
972 wq
= ctxp
->hdwq
->nvme_wq
;
974 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
975 list_add_tail(&nvmewqeq
->list
, &wq
->wqfull_list
);
976 wq
->q_flag
|= HBA_NVMET_WQFULL
;
977 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
978 atomic_inc(&lpfc_nvmep
->defer_wqfull
);
982 /* Give back resources */
983 atomic_inc(&lpfc_nvmep
->xmt_fcp_drop
);
984 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
985 "6153 FCP Drop IO x%x: Issue: %d\n",
988 ctxp
->wqeq
->hba_wqidx
= 0;
989 nvmewqeq
->context2
= NULL
;
990 nvmewqeq
->context3
= NULL
;
997 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port
*targetport
)
999 struct lpfc_nvmet_tgtport
*tport
= targetport
->private;
1001 /* release any threads waiting for the unreg to complete */
1002 if (tport
->phba
->targetport
)
1003 complete(tport
->tport_unreg_cmp
);
1007 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port
*tgtport
,
1008 struct nvmefc_tgt_fcp_req
*req
)
1010 struct lpfc_nvmet_tgtport
*lpfc_nvmep
= tgtport
->private;
1011 struct lpfc_nvmet_rcv_ctx
*ctxp
=
1012 container_of(req
, struct lpfc_nvmet_rcv_ctx
, ctx
.fcp_req
);
1013 struct lpfc_hba
*phba
= ctxp
->phba
;
1014 struct lpfc_queue
*wq
;
1015 unsigned long flags
;
1017 if (phba
->pport
->load_flag
& FC_UNLOADING
)
1020 if (phba
->pport
->load_flag
& FC_UNLOADING
)
1024 ctxp
->hdwq
= &phba
->sli4_hba
.hdwq
[0];
1026 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1027 "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
1028 ctxp
->oxid
, ctxp
->flag
, ctxp
->state
);
1030 lpfc_nvmeio_data(phba
, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1031 ctxp
->oxid
, ctxp
->flag
, ctxp
->state
);
1033 atomic_inc(&lpfc_nvmep
->xmt_fcp_abort
);
1035 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
1037 /* Since iaab/iaar are NOT set, we need to check
1038 * if the firmware is in process of aborting IO
1040 if (ctxp
->flag
& LPFC_NVMET_XBUSY
) {
1041 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
1044 ctxp
->flag
|= LPFC_NVMET_ABORT_OP
;
1046 if (ctxp
->flag
& LPFC_NVMET_DEFER_WQFULL
) {
1047 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
1048 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
,
1050 wq
= ctxp
->hdwq
->nvme_wq
;
1051 lpfc_nvmet_wqfull_flush(phba
, wq
, ctxp
);
1054 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
1056 /* An state of LPFC_NVMET_STE_RCV means we have just received
1057 * the NVME command and have not started processing it.
1058 * (by issuing any IO WQEs on this exchange yet)
1060 if (ctxp
->state
== LPFC_NVMET_STE_RCV
)
1061 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
,
1064 lpfc_nvmet_sol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
,
1069 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port
*tgtport
,
1070 struct nvmefc_tgt_fcp_req
*rsp
)
1072 struct lpfc_nvmet_tgtport
*lpfc_nvmep
= tgtport
->private;
1073 struct lpfc_nvmet_rcv_ctx
*ctxp
=
1074 container_of(rsp
, struct lpfc_nvmet_rcv_ctx
, ctx
.fcp_req
);
1075 struct lpfc_hba
*phba
= ctxp
->phba
;
1076 unsigned long flags
;
1077 bool aborting
= false;
1079 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
1080 if (ctxp
->flag
& LPFC_NVMET_XBUSY
)
1081 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
1082 "6027 NVMET release with XBUSY flag x%x"
1084 ctxp
->flag
, ctxp
->oxid
);
1085 else if (ctxp
->state
!= LPFC_NVMET_STE_DONE
&&
1086 ctxp
->state
!= LPFC_NVMET_STE_ABORT
)
1087 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1088 "6413 NVMET release bad state %d %d oxid x%x\n",
1089 ctxp
->state
, ctxp
->entry_cnt
, ctxp
->oxid
);
1091 if ((ctxp
->flag
& LPFC_NVMET_ABORT_OP
) ||
1092 (ctxp
->flag
& LPFC_NVMET_XBUSY
)) {
1094 /* let the abort path do the real release */
1095 lpfc_nvmet_defer_release(phba
, ctxp
);
1097 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
1099 lpfc_nvmeio_data(phba
, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp
->oxid
,
1100 ctxp
->state
, aborting
);
1102 atomic_inc(&lpfc_nvmep
->xmt_fcp_release
);
1107 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
1111 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port
*tgtport
,
1112 struct nvmefc_tgt_fcp_req
*rsp
)
1114 struct lpfc_nvmet_tgtport
*tgtp
;
1115 struct lpfc_nvmet_rcv_ctx
*ctxp
=
1116 container_of(rsp
, struct lpfc_nvmet_rcv_ctx
, ctx
.fcp_req
);
1117 struct rqb_dmabuf
*nvmebuf
= ctxp
->rqb_buffer
;
1118 struct lpfc_hba
*phba
= ctxp
->phba
;
1119 unsigned long iflag
;
1122 lpfc_nvmeio_data(phba
, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1123 ctxp
->oxid
, ctxp
->size
, smp_processor_id());
1126 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
1127 "6425 Defer rcv: no buffer xri x%x: "
1129 ctxp
->oxid
, ctxp
->flag
, ctxp
->state
);
1133 tgtp
= phba
->targetport
->private;
1135 atomic_inc(&tgtp
->rcv_fcp_cmd_defer
);
1137 /* Free the nvmebuf since a new buffer already replaced it */
1138 nvmebuf
->hrq
->rqbp
->rqb_free_buffer(phba
, nvmebuf
);
1139 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
1140 ctxp
->rqb_buffer
= NULL
;
1141 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
1144 static struct nvmet_fc_target_template lpfc_tgttemplate
= {
1145 .targetport_delete
= lpfc_nvmet_targetport_delete
,
1146 .xmt_ls_rsp
= lpfc_nvmet_xmt_ls_rsp
,
1147 .fcp_op
= lpfc_nvmet_xmt_fcp_op
,
1148 .fcp_abort
= lpfc_nvmet_xmt_fcp_abort
,
1149 .fcp_req_release
= lpfc_nvmet_xmt_fcp_release
,
1150 .defer_rcv
= lpfc_nvmet_defer_rcv
,
1153 .max_sgl_segments
= LPFC_NVMET_DEFAULT_SEGS
,
1154 .max_dif_sgl_segments
= LPFC_NVMET_DEFAULT_SEGS
,
1155 .dma_boundary
= 0xFFFFFFFF,
1157 /* optional features */
1158 .target_features
= 0,
1159 /* sizes of additional private data for data structures */
1160 .target_priv_sz
= sizeof(struct lpfc_nvmet_tgtport
),
1164 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba
*phba
,
1165 struct lpfc_nvmet_ctx_info
*infop
)
1167 struct lpfc_nvmet_ctxbuf
*ctx_buf
, *next_ctx_buf
;
1168 unsigned long flags
;
1170 spin_lock_irqsave(&infop
->nvmet_ctx_list_lock
, flags
);
1171 list_for_each_entry_safe(ctx_buf
, next_ctx_buf
,
1172 &infop
->nvmet_ctx_list
, list
) {
1173 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1174 list_del_init(&ctx_buf
->list
);
1175 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1177 __lpfc_clear_active_sglq(phba
, ctx_buf
->sglq
->sli4_lxritag
);
1178 ctx_buf
->sglq
->state
= SGL_FREED
;
1179 ctx_buf
->sglq
->ndlp
= NULL
;
1181 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
1182 list_add_tail(&ctx_buf
->sglq
->list
,
1183 &phba
->sli4_hba
.lpfc_nvmet_sgl_list
);
1184 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
1186 lpfc_sli_release_iocbq(phba
, ctx_buf
->iocbq
);
1187 kfree(ctx_buf
->context
);
1189 spin_unlock_irqrestore(&infop
->nvmet_ctx_list_lock
, flags
);
1193 lpfc_nvmet_cleanup_io_context(struct lpfc_hba
*phba
)
1195 struct lpfc_nvmet_ctx_info
*infop
;
1198 /* The first context list, MRQ 0 CPU 0 */
1199 infop
= phba
->sli4_hba
.nvmet_ctx_info
;
1203 /* Cycle the the entire CPU context list for every MRQ */
1204 for (i
= 0; i
< phba
->cfg_nvmet_mrq
; i
++) {
1205 for_each_present_cpu(j
) {
1206 infop
= lpfc_get_ctx_list(phba
, j
, i
);
1207 __lpfc_nvmet_clean_io_for_cpu(phba
, infop
);
1210 kfree(phba
->sli4_hba
.nvmet_ctx_info
);
1211 phba
->sli4_hba
.nvmet_ctx_info
= NULL
;
1215 lpfc_nvmet_setup_io_context(struct lpfc_hba
*phba
)
1217 struct lpfc_nvmet_ctxbuf
*ctx_buf
;
1218 struct lpfc_iocbq
*nvmewqe
;
1219 union lpfc_wqe128
*wqe
;
1220 struct lpfc_nvmet_ctx_info
*last_infop
;
1221 struct lpfc_nvmet_ctx_info
*infop
;
1224 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME
,
1225 "6403 Allocate NVMET resources for %d XRIs\n",
1226 phba
->sli4_hba
.nvmet_xri_cnt
);
1228 phba
->sli4_hba
.nvmet_ctx_info
= kcalloc(
1229 phba
->sli4_hba
.num_possible_cpu
* phba
->cfg_nvmet_mrq
,
1230 sizeof(struct lpfc_nvmet_ctx_info
), GFP_KERNEL
);
1231 if (!phba
->sli4_hba
.nvmet_ctx_info
) {
1232 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1233 "6419 Failed allocate memory for "
1234 "nvmet context lists\n");
1239 * Assuming X CPUs in the system, and Y MRQs, allocate some
1240 * lpfc_nvmet_ctx_info structures as follows:
1242 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1243 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1245 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1247 * Each line represents a MRQ "silo" containing an entry for
1250 * MRQ X is initially assumed to be associated with CPU X, thus
1251 * contexts are initially distributed across all MRQs using
1252 * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1253 * freed, the are freed to the MRQ silo based on the CPU number
1254 * of the IO completion. Thus a context that was allocated for MRQ A
1255 * whose IO completed on CPU B will be freed to cpuB/mrqA.
1257 for_each_possible_cpu(i
) {
1258 for (j
= 0; j
< phba
->cfg_nvmet_mrq
; j
++) {
1259 infop
= lpfc_get_ctx_list(phba
, i
, j
);
1260 INIT_LIST_HEAD(&infop
->nvmet_ctx_list
);
1261 spin_lock_init(&infop
->nvmet_ctx_list_lock
);
1262 infop
->nvmet_ctx_list_cnt
= 0;
1267 * Setup the next CPU context info ptr for each MRQ.
1268 * MRQ 0 will cycle thru CPUs 0 - X separately from
1269 * MRQ 1 cycling thru CPUs 0 - X, and so on.
1271 for (j
= 0; j
< phba
->cfg_nvmet_mrq
; j
++) {
1272 last_infop
= lpfc_get_ctx_list(phba
,
1273 cpumask_first(cpu_present_mask
),
1275 for (i
= phba
->sli4_hba
.num_possible_cpu
- 1; i
>= 0; i
--) {
1276 infop
= lpfc_get_ctx_list(phba
, i
, j
);
1277 infop
->nvmet_ctx_next_cpu
= last_infop
;
1282 /* For all nvmet xris, allocate resources needed to process a
1283 * received command on a per xri basis.
1286 cpu
= cpumask_first(cpu_present_mask
);
1287 for (i
= 0; i
< phba
->sli4_hba
.nvmet_xri_cnt
; i
++) {
1288 ctx_buf
= kzalloc(sizeof(*ctx_buf
), GFP_KERNEL
);
1290 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
1291 "6404 Ran out of memory for NVMET\n");
1295 ctx_buf
->context
= kzalloc(sizeof(*ctx_buf
->context
),
1297 if (!ctx_buf
->context
) {
1299 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
1300 "6405 Ran out of NVMET "
1301 "context memory\n");
1304 ctx_buf
->context
->ctxbuf
= ctx_buf
;
1305 ctx_buf
->context
->state
= LPFC_NVMET_STE_FREE
;
1307 ctx_buf
->iocbq
= lpfc_sli_get_iocbq(phba
);
1308 if (!ctx_buf
->iocbq
) {
1309 kfree(ctx_buf
->context
);
1311 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
1312 "6406 Ran out of NVMET iocb/WQEs\n");
1315 ctx_buf
->iocbq
->iocb_flag
= LPFC_IO_NVMET
;
1316 nvmewqe
= ctx_buf
->iocbq
;
1317 wqe
= &nvmewqe
->wqe
;
1319 /* Initialize WQE */
1320 memset(wqe
, 0, sizeof(union lpfc_wqe
));
1322 ctx_buf
->iocbq
->context1
= NULL
;
1323 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
1324 ctx_buf
->sglq
= __lpfc_sli_get_nvmet_sglq(phba
, ctx_buf
->iocbq
);
1325 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
1326 if (!ctx_buf
->sglq
) {
1327 lpfc_sli_release_iocbq(phba
, ctx_buf
->iocbq
);
1328 kfree(ctx_buf
->context
);
1330 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
1331 "6407 Ran out of NVMET XRIs\n");
1334 INIT_WORK(&ctx_buf
->defer_work
, lpfc_nvmet_fcp_rqst_defer_work
);
1337 * Add ctx to MRQidx context list. Our initial assumption
1338 * is MRQidx will be associated with CPUidx. This association
1339 * can change on the fly.
1341 infop
= lpfc_get_ctx_list(phba
, cpu
, idx
);
1342 spin_lock(&infop
->nvmet_ctx_list_lock
);
1343 list_add_tail(&ctx_buf
->list
, &infop
->nvmet_ctx_list
);
1344 infop
->nvmet_ctx_list_cnt
++;
1345 spin_unlock(&infop
->nvmet_ctx_list_lock
);
1347 /* Spread ctx structures evenly across all MRQs */
1349 if (idx
>= phba
->cfg_nvmet_mrq
) {
1351 cpu
= cpumask_first(cpu_present_mask
);
1354 cpu
= cpumask_next(cpu
, cpu_present_mask
);
1355 if (cpu
== nr_cpu_ids
)
1356 cpu
= cpumask_first(cpu_present_mask
);
1360 for_each_present_cpu(i
) {
1361 for (j
= 0; j
< phba
->cfg_nvmet_mrq
; j
++) {
1362 infop
= lpfc_get_ctx_list(phba
, i
, j
);
1363 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME
| LOG_INIT
,
1364 "6408 TOTAL NVMET ctx for CPU %d "
1365 "MRQ %d: cnt %d nextcpu %p\n",
1366 i
, j
, infop
->nvmet_ctx_list_cnt
,
1367 infop
->nvmet_ctx_next_cpu
);
1374 lpfc_nvmet_create_targetport(struct lpfc_hba
*phba
)
1376 struct lpfc_vport
*vport
= phba
->pport
;
1377 struct lpfc_nvmet_tgtport
*tgtp
;
1378 struct nvmet_fc_port_info pinfo
;
1381 if (phba
->targetport
)
1384 error
= lpfc_nvmet_setup_io_context(phba
);
1388 memset(&pinfo
, 0, sizeof(struct nvmet_fc_port_info
));
1389 pinfo
.node_name
= wwn_to_u64(vport
->fc_nodename
.u
.wwn
);
1390 pinfo
.port_name
= wwn_to_u64(vport
->fc_portname
.u
.wwn
);
1391 pinfo
.port_id
= vport
->fc_myDID
;
1393 /* We need to tell the transport layer + 1 because it takes page
1394 * alignment into account. When space for the SGL is allocated we
1395 * allocate + 3, one for cmd, one for rsp and one for this alignment
1397 lpfc_tgttemplate
.max_sgl_segments
= phba
->cfg_nvme_seg_cnt
+ 1;
1398 lpfc_tgttemplate
.max_hw_queues
= phba
->cfg_hdw_queue
;
1399 lpfc_tgttemplate
.target_features
= NVMET_FCTGTFEAT_READDATA_RSP
;
1401 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1402 error
= nvmet_fc_register_targetport(&pinfo
, &lpfc_tgttemplate
,
1409 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_DISC
,
1410 "6025 Cannot register NVME targetport x%x: "
1411 "portnm %llx nodenm %llx segs %d qs %d\n",
1413 pinfo
.port_name
, pinfo
.node_name
,
1414 lpfc_tgttemplate
.max_sgl_segments
,
1415 lpfc_tgttemplate
.max_hw_queues
);
1416 phba
->targetport
= NULL
;
1417 phba
->nvmet_support
= 0;
1419 lpfc_nvmet_cleanup_io_context(phba
);
1422 tgtp
= (struct lpfc_nvmet_tgtport
*)
1423 phba
->targetport
->private;
1426 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
1427 "6026 Registered NVME "
1428 "targetport: %p, private %p "
1429 "portnm %llx nodenm %llx segs %d qs %d\n",
1430 phba
->targetport
, tgtp
,
1431 pinfo
.port_name
, pinfo
.node_name
,
1432 lpfc_tgttemplate
.max_sgl_segments
,
1433 lpfc_tgttemplate
.max_hw_queues
);
1435 atomic_set(&tgtp
->rcv_ls_req_in
, 0);
1436 atomic_set(&tgtp
->rcv_ls_req_out
, 0);
1437 atomic_set(&tgtp
->rcv_ls_req_drop
, 0);
1438 atomic_set(&tgtp
->xmt_ls_abort
, 0);
1439 atomic_set(&tgtp
->xmt_ls_abort_cmpl
, 0);
1440 atomic_set(&tgtp
->xmt_ls_rsp
, 0);
1441 atomic_set(&tgtp
->xmt_ls_drop
, 0);
1442 atomic_set(&tgtp
->xmt_ls_rsp_error
, 0);
1443 atomic_set(&tgtp
->xmt_ls_rsp_xb_set
, 0);
1444 atomic_set(&tgtp
->xmt_ls_rsp_aborted
, 0);
1445 atomic_set(&tgtp
->xmt_ls_rsp_cmpl
, 0);
1446 atomic_set(&tgtp
->rcv_fcp_cmd_in
, 0);
1447 atomic_set(&tgtp
->rcv_fcp_cmd_out
, 0);
1448 atomic_set(&tgtp
->rcv_fcp_cmd_drop
, 0);
1449 atomic_set(&tgtp
->xmt_fcp_drop
, 0);
1450 atomic_set(&tgtp
->xmt_fcp_read_rsp
, 0);
1451 atomic_set(&tgtp
->xmt_fcp_read
, 0);
1452 atomic_set(&tgtp
->xmt_fcp_write
, 0);
1453 atomic_set(&tgtp
->xmt_fcp_rsp
, 0);
1454 atomic_set(&tgtp
->xmt_fcp_release
, 0);
1455 atomic_set(&tgtp
->xmt_fcp_rsp_cmpl
, 0);
1456 atomic_set(&tgtp
->xmt_fcp_rsp_error
, 0);
1457 atomic_set(&tgtp
->xmt_fcp_rsp_xb_set
, 0);
1458 atomic_set(&tgtp
->xmt_fcp_rsp_aborted
, 0);
1459 atomic_set(&tgtp
->xmt_fcp_rsp_drop
, 0);
1460 atomic_set(&tgtp
->xmt_fcp_xri_abort_cqe
, 0);
1461 atomic_set(&tgtp
->xmt_fcp_abort
, 0);
1462 atomic_set(&tgtp
->xmt_fcp_abort_cmpl
, 0);
1463 atomic_set(&tgtp
->xmt_abort_unsol
, 0);
1464 atomic_set(&tgtp
->xmt_abort_sol
, 0);
1465 atomic_set(&tgtp
->xmt_abort_rsp
, 0);
1466 atomic_set(&tgtp
->xmt_abort_rsp_error
, 0);
1467 atomic_set(&tgtp
->defer_ctx
, 0);
1468 atomic_set(&tgtp
->defer_fod
, 0);
1469 atomic_set(&tgtp
->defer_wqfull
, 0);
1475 lpfc_nvmet_update_targetport(struct lpfc_hba
*phba
)
1477 struct lpfc_vport
*vport
= phba
->pport
;
1479 if (!phba
->targetport
)
1482 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME
,
1483 "6007 Update NVMET port %p did x%x\n",
1484 phba
->targetport
, vport
->fc_myDID
);
1486 phba
->targetport
->port_id
= vport
->fc_myDID
;
1491 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1492 * @phba: pointer to lpfc hba data structure.
1493 * @axri: pointer to the nvmet xri abort wcqe structure.
1495 * This routine is invoked by the worker thread to process a SLI4 fast-path
1496 * NVMET aborted xri.
1499 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba
*phba
,
1500 struct sli4_wcqe_xri_aborted
*axri
)
1502 uint16_t xri
= bf_get(lpfc_wcqe_xa_xri
, axri
);
1503 uint16_t rxid
= bf_get(lpfc_wcqe_xa_remote_xid
, axri
);
1504 struct lpfc_nvmet_rcv_ctx
*ctxp
, *next_ctxp
;
1505 struct lpfc_nvmet_tgtport
*tgtp
;
1506 struct lpfc_nodelist
*ndlp
;
1507 unsigned long iflag
= 0;
1509 bool released
= false;
1511 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1512 "6317 XB aborted xri x%x rxid x%x\n", xri
, rxid
);
1514 if (!(phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
))
1517 if (phba
->targetport
) {
1518 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
1519 atomic_inc(&tgtp
->xmt_fcp_xri_abort_cqe
);
1522 spin_lock_irqsave(&phba
->hbalock
, iflag
);
1523 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1524 list_for_each_entry_safe(ctxp
, next_ctxp
,
1525 &phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
,
1527 if (ctxp
->ctxbuf
->sglq
->sli4_xritag
!= xri
)
1530 spin_lock(&ctxp
->ctxlock
);
1531 /* Check if we already received a free context call
1532 * and we have completed processing an abort situation.
1534 if (ctxp
->flag
& LPFC_NVMET_CTX_RLS
&&
1535 !(ctxp
->flag
& LPFC_NVMET_ABORT_OP
)) {
1536 list_del(&ctxp
->list
);
1539 ctxp
->flag
&= ~LPFC_NVMET_XBUSY
;
1540 spin_unlock(&ctxp
->ctxlock
);
1541 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1543 rrq_empty
= list_empty(&phba
->active_rrq_list
);
1544 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
1545 ndlp
= lpfc_findnode_did(phba
->pport
, ctxp
->sid
);
1546 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
) &&
1547 (ndlp
->nlp_state
== NLP_STE_UNMAPPED_NODE
||
1548 ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)) {
1549 lpfc_set_rrq_active(phba
, ndlp
,
1550 ctxp
->ctxbuf
->sglq
->sli4_lxritag
,
1552 lpfc_sli4_abts_err_handler(phba
, ndlp
, axri
);
1555 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1556 "6318 XB aborted oxid %x flg x%x (%x)\n",
1557 ctxp
->oxid
, ctxp
->flag
, released
);
1559 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
1562 lpfc_worker_wake_up(phba
);
1565 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1566 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
1570 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport
*vport
,
1571 struct fc_frame_header
*fc_hdr
)
1573 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1574 struct lpfc_hba
*phba
= vport
->phba
;
1575 struct lpfc_nvmet_rcv_ctx
*ctxp
, *next_ctxp
;
1576 struct nvmefc_tgt_fcp_req
*rsp
;
1578 unsigned long iflag
= 0;
1580 xri
= be16_to_cpu(fc_hdr
->fh_ox_id
);
1582 spin_lock_irqsave(&phba
->hbalock
, iflag
);
1583 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1584 list_for_each_entry_safe(ctxp
, next_ctxp
,
1585 &phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
,
1587 if (ctxp
->ctxbuf
->sglq
->sli4_xritag
!= xri
)
1590 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1591 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
1593 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
1594 ctxp
->flag
|= LPFC_NVMET_ABTS_RCV
;
1595 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
1597 lpfc_nvmeio_data(phba
,
1598 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1599 xri
, smp_processor_id(), 0);
1601 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1602 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri
);
1604 rsp
= &ctxp
->ctx
.fcp_req
;
1605 nvmet_fc_rcv_fcp_abort(phba
->targetport
, rsp
);
1607 /* Respond with BA_ACC accordingly */
1608 lpfc_sli4_seq_abort_rsp(vport
, fc_hdr
, 1);
1611 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
1612 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
1614 lpfc_nvmeio_data(phba
, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1615 xri
, smp_processor_id(), 1);
1617 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1618 "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri
);
1620 /* Respond with BA_RJT accordingly */
1621 lpfc_sli4_seq_abort_rsp(vport
, fc_hdr
, 0);
1627 lpfc_nvmet_wqfull_flush(struct lpfc_hba
*phba
, struct lpfc_queue
*wq
,
1628 struct lpfc_nvmet_rcv_ctx
*ctxp
)
1630 struct lpfc_sli_ring
*pring
;
1631 struct lpfc_iocbq
*nvmewqeq
;
1632 struct lpfc_iocbq
*next_nvmewqeq
;
1633 unsigned long iflags
;
1634 struct lpfc_wcqe_complete wcqe
;
1635 struct lpfc_wcqe_complete
*wcqep
;
1640 /* Fake an ABORT error code back to cmpl routine */
1641 memset(wcqep
, 0, sizeof(struct lpfc_wcqe_complete
));
1642 bf_set(lpfc_wcqe_c_status
, wcqep
, IOSTAT_LOCAL_REJECT
);
1643 wcqep
->parameter
= IOERR_ABORT_REQUESTED
;
1645 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
1646 list_for_each_entry_safe(nvmewqeq
, next_nvmewqeq
,
1647 &wq
->wqfull_list
, list
) {
1649 /* Checking for a specific IO to flush */
1650 if (nvmewqeq
->context2
== ctxp
) {
1651 list_del(&nvmewqeq
->list
);
1652 spin_unlock_irqrestore(&pring
->ring_lock
,
1654 lpfc_nvmet_xmt_fcp_op_cmp(phba
, nvmewqeq
,
1661 list_del(&nvmewqeq
->list
);
1662 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
1663 lpfc_nvmet_xmt_fcp_op_cmp(phba
, nvmewqeq
, wcqep
);
1664 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
1668 wq
->q_flag
&= ~HBA_NVMET_WQFULL
;
1669 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
1673 lpfc_nvmet_wqfull_process(struct lpfc_hba
*phba
,
1674 struct lpfc_queue
*wq
)
1676 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1677 struct lpfc_sli_ring
*pring
;
1678 struct lpfc_iocbq
*nvmewqeq
;
1679 struct lpfc_nvmet_rcv_ctx
*ctxp
;
1680 unsigned long iflags
;
1684 * Some WQE slots are available, so try to re-issue anything
1685 * on the WQ wqfull_list.
1688 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
1689 while (!list_empty(&wq
->wqfull_list
)) {
1690 list_remove_head(&wq
->wqfull_list
, nvmewqeq
, struct lpfc_iocbq
,
1692 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
1693 ctxp
= (struct lpfc_nvmet_rcv_ctx
*)nvmewqeq
->context2
;
1694 rc
= lpfc_sli4_issue_wqe(phba
, ctxp
->hdwq
, nvmewqeq
);
1695 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
1697 /* WQ was full again, so put it back on the list */
1698 list_add(&nvmewqeq
->list
, &wq
->wqfull_list
);
1699 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
1703 wq
->q_flag
&= ~HBA_NVMET_WQFULL
;
1704 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
1710 lpfc_nvmet_destroy_targetport(struct lpfc_hba
*phba
)
1712 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1713 struct lpfc_nvmet_tgtport
*tgtp
;
1714 struct lpfc_queue
*wq
;
1716 DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp
);
1718 if (phba
->nvmet_support
== 0)
1720 if (phba
->targetport
) {
1721 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
1722 for (qidx
= 0; qidx
< phba
->cfg_hdw_queue
; qidx
++) {
1723 wq
= phba
->sli4_hba
.hdwq
[qidx
].nvme_wq
;
1724 lpfc_nvmet_wqfull_flush(phba
, wq
, NULL
);
1726 tgtp
->tport_unreg_cmp
= &tport_unreg_cmp
;
1727 nvmet_fc_unregister_targetport(phba
->targetport
);
1728 wait_for_completion_timeout(&tport_unreg_cmp
, 5);
1729 lpfc_nvmet_cleanup_io_context(phba
);
1731 phba
->targetport
= NULL
;
1736 * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1737 * @phba: pointer to lpfc hba data structure.
1738 * @pring: pointer to a SLI ring.
1739 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1741 * This routine is used for processing the WQE associated with a unsolicited
1742 * event. It first determines whether there is an existing ndlp that matches
1743 * the DID from the unsolicited WQE. If not, it will create a new one with
1744 * the DID from the unsolicited WQE. The ELS command from the unsolicited
1745 * WQE is then used to invoke the proper routine and to set up proper state
1746 * of the discovery state machine.
1749 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
1750 struct hbq_dmabuf
*nvmebuf
)
1752 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1753 struct lpfc_nvmet_tgtport
*tgtp
;
1754 struct fc_frame_header
*fc_hdr
;
1755 struct lpfc_nvmet_rcv_ctx
*ctxp
;
1757 uint32_t size
, oxid
, sid
, rc
;
1759 fc_hdr
= (struct fc_frame_header
*)(nvmebuf
->hbuf
.virt
);
1760 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
1762 if (!phba
->targetport
) {
1763 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1764 "6154 LS Drop IO x%x\n", oxid
);
1772 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
1773 payload
= (uint32_t *)(nvmebuf
->dbuf
.virt
);
1774 size
= bf_get(lpfc_rcqe_length
, &nvmebuf
->cq_event
.cqe
.rcqe_cmpl
);
1775 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
1777 ctxp
= kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx
), GFP_ATOMIC
);
1779 atomic_inc(&tgtp
->rcv_ls_req_drop
);
1780 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1781 "6155 LS Drop IO x%x: Alloc\n",
1784 lpfc_nvmeio_data(phba
, "NVMET LS DROP: "
1785 "xri x%x sz %d from %06x\n",
1787 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
1795 ctxp
->state
= LPFC_NVMET_STE_LS_RCV
;
1796 ctxp
->entry_cnt
= 1;
1797 ctxp
->rqb_buffer
= (void *)nvmebuf
;
1798 ctxp
->hdwq
= &phba
->sli4_hba
.hdwq
[0];
1800 lpfc_nvmeio_data(phba
, "NVMET LS RCV: xri x%x sz %d from %06x\n",
1803 * The calling sequence should be:
1804 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
1805 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
1807 atomic_inc(&tgtp
->rcv_ls_req_in
);
1808 rc
= nvmet_fc_rcv_ls_req(phba
->targetport
, &ctxp
->ctx
.ls_req
,
1811 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
1812 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
1813 "%08x %08x %08x\n", size
, rc
,
1814 *payload
, *(payload
+1), *(payload
+2),
1815 *(payload
+3), *(payload
+4), *(payload
+5));
1818 atomic_inc(&tgtp
->rcv_ls_req_out
);
1822 lpfc_nvmeio_data(phba
, "NVMET LS DROP: xri x%x sz %d from %06x\n",
1825 atomic_inc(&tgtp
->rcv_ls_req_drop
);
1826 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1827 "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
1830 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1831 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
1833 atomic_inc(&tgtp
->xmt_ls_abort
);
1834 lpfc_nvmet_unsol_ls_issue_abort(phba
, ctxp
, sid
, oxid
);
1839 lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf
*ctx_buf
)
1841 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1842 struct lpfc_nvmet_rcv_ctx
*ctxp
= ctx_buf
->context
;
1843 struct lpfc_hba
*phba
= ctxp
->phba
;
1844 struct rqb_dmabuf
*nvmebuf
= ctxp
->rqb_buffer
;
1845 struct lpfc_nvmet_tgtport
*tgtp
;
1848 unsigned long iflags
;
1851 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1852 "6159 process_rcv_fcp_req, nvmebuf is NULL, "
1853 "oxid: x%x flg: x%x state: x%x\n",
1854 ctxp
->oxid
, ctxp
->flag
, ctxp
->state
);
1855 spin_lock_irqsave(&ctxp
->ctxlock
, iflags
);
1856 lpfc_nvmet_defer_release(phba
, ctxp
);
1857 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflags
);
1858 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
,
1863 payload
= (uint32_t *)(nvmebuf
->dbuf
.virt
);
1864 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
1866 * The calling sequence should be:
1867 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
1868 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
1869 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
1870 * the NVME command / FC header is stored.
1871 * A buffer has already been reposted for this IO, so just free
1874 rc
= nvmet_fc_rcv_fcp_req(phba
->targetport
, &ctxp
->ctx
.fcp_req
,
1875 payload
, ctxp
->size
);
1876 /* Process FCP command */
1878 atomic_inc(&tgtp
->rcv_fcp_cmd_out
);
1882 /* Processing of FCP command is deferred */
1883 if (rc
== -EOVERFLOW
) {
1884 lpfc_nvmeio_data(phba
, "NVMET RCV BUSY: xri x%x sz %d "
1886 ctxp
->oxid
, ctxp
->size
, ctxp
->sid
);
1887 atomic_inc(&tgtp
->rcv_fcp_cmd_out
);
1888 atomic_inc(&tgtp
->defer_fod
);
1891 atomic_inc(&tgtp
->rcv_fcp_cmd_drop
);
1892 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1893 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
1895 atomic_read(&tgtp
->rcv_fcp_cmd_in
),
1896 atomic_read(&tgtp
->rcv_fcp_cmd_out
),
1897 atomic_read(&tgtp
->xmt_fcp_release
));
1898 lpfc_nvmeio_data(phba
, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
1899 ctxp
->oxid
, ctxp
->size
, ctxp
->sid
);
1900 spin_lock_irqsave(&ctxp
->ctxlock
, iflags
);
1901 lpfc_nvmet_defer_release(phba
, ctxp
);
1902 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflags
);
1903 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
, ctxp
->oxid
);
1908 lpfc_nvmet_fcp_rqst_defer_work(struct work_struct
*work
)
1910 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1911 struct lpfc_nvmet_ctxbuf
*ctx_buf
=
1912 container_of(work
, struct lpfc_nvmet_ctxbuf
, defer_work
);
1914 lpfc_nvmet_process_rcv_fcp_req(ctx_buf
);
1918 static struct lpfc_nvmet_ctxbuf
*
1919 lpfc_nvmet_replenish_context(struct lpfc_hba
*phba
,
1920 struct lpfc_nvmet_ctx_info
*current_infop
)
1922 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1923 struct lpfc_nvmet_ctxbuf
*ctx_buf
= NULL
;
1924 struct lpfc_nvmet_ctx_info
*get_infop
;
1928 * The current_infop for the MRQ a NVME command IU was received
1929 * on is empty. Our goal is to replenish this MRQs context
1930 * list from a another CPUs.
1932 * First we need to pick a context list to start looking on.
1933 * nvmet_ctx_start_cpu has available context the last time
1934 * we needed to replenish this CPU where nvmet_ctx_next_cpu
1935 * is just the next sequential CPU for this MRQ.
1937 if (current_infop
->nvmet_ctx_start_cpu
)
1938 get_infop
= current_infop
->nvmet_ctx_start_cpu
;
1940 get_infop
= current_infop
->nvmet_ctx_next_cpu
;
1942 for (i
= 0; i
< phba
->sli4_hba
.num_possible_cpu
; i
++) {
1943 if (get_infop
== current_infop
) {
1944 get_infop
= get_infop
->nvmet_ctx_next_cpu
;
1947 spin_lock(&get_infop
->nvmet_ctx_list_lock
);
1949 /* Just take the entire context list, if there are any */
1950 if (get_infop
->nvmet_ctx_list_cnt
) {
1951 list_splice_init(&get_infop
->nvmet_ctx_list
,
1952 ¤t_infop
->nvmet_ctx_list
);
1953 current_infop
->nvmet_ctx_list_cnt
=
1954 get_infop
->nvmet_ctx_list_cnt
- 1;
1955 get_infop
->nvmet_ctx_list_cnt
= 0;
1956 spin_unlock(&get_infop
->nvmet_ctx_list_lock
);
1958 current_infop
->nvmet_ctx_start_cpu
= get_infop
;
1959 list_remove_head(¤t_infop
->nvmet_ctx_list
,
1960 ctx_buf
, struct lpfc_nvmet_ctxbuf
,
1965 /* Otherwise, move on to the next CPU for this MRQ */
1966 spin_unlock(&get_infop
->nvmet_ctx_list_lock
);
1967 get_infop
= get_infop
->nvmet_ctx_next_cpu
;
1971 /* Nothing found, all contexts for the MRQ are in-flight */
1976 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
1977 * @phba: pointer to lpfc hba data structure.
1978 * @idx: relative index of MRQ vector
1979 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1981 * This routine is used for processing the WQE associated with a unsolicited
1982 * event. It first determines whether there is an existing ndlp that matches
1983 * the DID from the unsolicited WQE. If not, it will create a new one with
1984 * the DID from the unsolicited WQE. The ELS command from the unsolicited
1985 * WQE is then used to invoke the proper routine and to set up proper state
1986 * of the discovery state machine.
1989 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba
*phba
,
1991 struct rqb_dmabuf
*nvmebuf
,
1992 uint64_t isr_timestamp
)
1994 struct lpfc_nvmet_rcv_ctx
*ctxp
;
1995 struct lpfc_nvmet_tgtport
*tgtp
;
1996 struct fc_frame_header
*fc_hdr
;
1997 struct lpfc_nvmet_ctxbuf
*ctx_buf
;
1998 struct lpfc_nvmet_ctx_info
*current_infop
;
2000 uint32_t size
, oxid
, sid
, qno
;
2001 unsigned long iflag
;
2004 if (!IS_ENABLED(CONFIG_NVME_TARGET_FC
))
2008 if (!nvmebuf
|| !phba
->targetport
) {
2009 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2010 "6157 NVMET FCP Drop IO\n");
2012 lpfc_rq_buf_free(phba
, &nvmebuf
->hbuf
);
2017 * Get a pointer to the context list for this MRQ based on
2018 * the CPU this MRQ IRQ is associated with. If the CPU association
2019 * changes from our initial assumption, the context list could
2020 * be empty, thus it would need to be replenished with the
2021 * context list from another CPU for this MRQ.
2023 current_cpu
= smp_processor_id();
2024 current_infop
= lpfc_get_ctx_list(phba
, current_cpu
, idx
);
2025 spin_lock_irqsave(¤t_infop
->nvmet_ctx_list_lock
, iflag
);
2026 if (current_infop
->nvmet_ctx_list_cnt
) {
2027 list_remove_head(¤t_infop
->nvmet_ctx_list
,
2028 ctx_buf
, struct lpfc_nvmet_ctxbuf
, list
);
2029 current_infop
->nvmet_ctx_list_cnt
--;
2031 ctx_buf
= lpfc_nvmet_replenish_context(phba
, current_infop
);
2033 spin_unlock_irqrestore(¤t_infop
->nvmet_ctx_list_lock
, iflag
);
2035 fc_hdr
= (struct fc_frame_header
*)(nvmebuf
->hbuf
.virt
);
2036 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
2037 size
= nvmebuf
->bytes_recv
;
2039 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2040 if (phba
->cpucheck_on
& LPFC_CHECK_NVMET_RCV
) {
2041 if (current_cpu
< LPFC_CHECK_CPU_CNT
) {
2042 if (idx
!= current_cpu
)
2043 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
2044 "6703 CPU Check rcv: "
2045 "cpu %d expect %d\n",
2047 phba
->sli4_hba
.hdwq
[idx
].cpucheck_rcv_io
[current_cpu
]++;
2052 lpfc_nvmeio_data(phba
, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
2053 oxid
, size
, smp_processor_id());
2055 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2058 /* Queue this NVME IO to process later */
2059 spin_lock_irqsave(&phba
->sli4_hba
.nvmet_io_wait_lock
, iflag
);
2060 list_add_tail(&nvmebuf
->hbuf
.list
,
2061 &phba
->sli4_hba
.lpfc_nvmet_io_wait_list
);
2062 phba
->sli4_hba
.nvmet_io_wait_cnt
++;
2063 phba
->sli4_hba
.nvmet_io_wait_total
++;
2064 spin_unlock_irqrestore(&phba
->sli4_hba
.nvmet_io_wait_lock
,
2067 /* Post a brand new DMA buffer to RQ */
2069 lpfc_post_rq_buffer(
2070 phba
, phba
->sli4_hba
.nvmet_mrq_hdr
[qno
],
2071 phba
->sli4_hba
.nvmet_mrq_data
[qno
], 1, qno
);
2073 atomic_inc(&tgtp
->defer_ctx
);
2077 payload
= (uint32_t *)(nvmebuf
->dbuf
.virt
);
2078 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
2080 ctxp
= (struct lpfc_nvmet_rcv_ctx
*)ctx_buf
->context
;
2081 if (ctxp
->state
!= LPFC_NVMET_STE_FREE
) {
2082 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2083 "6414 NVMET Context corrupt %d %d oxid x%x\n",
2084 ctxp
->state
, ctxp
->entry_cnt
, ctxp
->oxid
);
2094 ctxp
->state
= LPFC_NVMET_STE_RCV
;
2095 ctxp
->entry_cnt
= 1;
2097 ctxp
->ctxbuf
= ctx_buf
;
2098 ctxp
->rqb_buffer
= (void *)nvmebuf
;
2100 spin_lock_init(&ctxp
->ctxlock
);
2102 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2103 if (isr_timestamp
) {
2104 ctxp
->ts_isr_cmd
= isr_timestamp
;
2105 ctxp
->ts_cmd_nvme
= ktime_get_ns();
2106 ctxp
->ts_nvme_data
= 0;
2107 ctxp
->ts_data_wqput
= 0;
2108 ctxp
->ts_isr_data
= 0;
2109 ctxp
->ts_data_nvme
= 0;
2110 ctxp
->ts_nvme_status
= 0;
2111 ctxp
->ts_status_wqput
= 0;
2112 ctxp
->ts_isr_status
= 0;
2113 ctxp
->ts_status_nvme
= 0;
2115 ctxp
->ts_cmd_nvme
= 0;
2119 atomic_inc(&tgtp
->rcv_fcp_cmd_in
);
2120 lpfc_nvmet_process_rcv_fcp_req(ctx_buf
);
2124 * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
2125 * @phba: pointer to lpfc hba data structure.
2126 * @pring: pointer to a SLI ring.
2127 * @nvmebuf: pointer to received nvme data structure.
2129 * This routine is used to process an unsolicited event received from a SLI
2130 * (Service Level Interface) ring. The actual processing of the data buffer
2131 * associated with the unsolicited event is done by invoking the routine
2132 * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
2133 * SLI RQ on which the unsolicited event was received.
2136 lpfc_nvmet_unsol_ls_event(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2137 struct lpfc_iocbq
*piocb
)
2139 struct lpfc_dmabuf
*d_buf
;
2140 struct hbq_dmabuf
*nvmebuf
;
2142 d_buf
= piocb
->context2
;
2143 nvmebuf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
2145 if (phba
->nvmet_support
== 0) {
2146 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
2149 lpfc_nvmet_unsol_ls_buffer(phba
, pring
, nvmebuf
);
2153 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2154 * @phba: pointer to lpfc hba data structure.
2155 * @idx: relative index of MRQ vector
2156 * @nvmebuf: pointer to received nvme data structure.
2158 * This routine is used to process an unsolicited event received from a SLI
2159 * (Service Level Interface) ring. The actual processing of the data buffer
2160 * associated with the unsolicited event is done by invoking the routine
2161 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2162 * SLI RQ on which the unsolicited event was received.
2165 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba
*phba
,
2167 struct rqb_dmabuf
*nvmebuf
,
2168 uint64_t isr_timestamp
)
2170 if (phba
->nvmet_support
== 0) {
2171 lpfc_rq_buf_free(phba
, &nvmebuf
->hbuf
);
2174 lpfc_nvmet_unsol_fcp_buffer(phba
, idx
, nvmebuf
,
2179 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2180 * @phba: pointer to a host N_Port data structure.
2181 * @ctxp: Context info for NVME LS Request
2182 * @rspbuf: DMA buffer of NVME command.
2183 * @rspsize: size of the NVME command.
2185 * This routine is used for allocating a lpfc-WQE data structure from
2186 * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2187 * passed into the routine for discovery state machine to issue an Extended
2188 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2189 * and preparation routine that is used by all the discovery state machine
2190 * routines and the NVME command-specific fields will be later set up by
2191 * the individual discovery machine routines after calling this routine
2192 * allocating and preparing a generic WQE data structure. It fills in the
2193 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2194 * payload and response payload (if expected). The reference count on the
2195 * ndlp is incremented by 1 and the reference to the ndlp is put into
2196 * context1 of the WQE data structure for this WQE to hold the ndlp
2197 * reference for the command's callback function to access later.
2200 * Pointer to the newly allocated/prepared nvme wqe data structure
2201 * NULL - when nvme wqe data structure allocation/preparation failed
2203 static struct lpfc_iocbq
*
2204 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba
*phba
,
2205 struct lpfc_nvmet_rcv_ctx
*ctxp
,
2206 dma_addr_t rspbuf
, uint16_t rspsize
)
2208 struct lpfc_nodelist
*ndlp
;
2209 struct lpfc_iocbq
*nvmewqe
;
2210 union lpfc_wqe128
*wqe
;
2212 if (!lpfc_is_link_up(phba
)) {
2213 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_DISC
,
2214 "6104 NVMET prep LS wqe: link err: "
2215 "NPORT x%x oxid:x%x ste %d\n",
2216 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
2220 /* Allocate buffer for command wqe */
2221 nvmewqe
= lpfc_sli_get_iocbq(phba
);
2222 if (nvmewqe
== NULL
) {
2223 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_DISC
,
2224 "6105 NVMET prep LS wqe: No WQE: "
2225 "NPORT x%x oxid x%x ste %d\n",
2226 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
2230 ndlp
= lpfc_findnode_did(phba
->pport
, ctxp
->sid
);
2231 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) ||
2232 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
2233 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
2234 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_DISC
,
2235 "6106 NVMET prep LS wqe: No ndlp: "
2236 "NPORT x%x oxid x%x ste %d\n",
2237 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
2238 goto nvme_wqe_free_wqeq_exit
;
2240 ctxp
->wqeq
= nvmewqe
;
2242 /* prevent preparing wqe with NULL ndlp reference */
2243 nvmewqe
->context1
= lpfc_nlp_get(ndlp
);
2244 if (nvmewqe
->context1
== NULL
)
2245 goto nvme_wqe_free_wqeq_exit
;
2246 nvmewqe
->context2
= ctxp
;
2248 wqe
= &nvmewqe
->wqe
;
2249 memset(wqe
, 0, sizeof(union lpfc_wqe
));
2252 wqe
->xmit_sequence
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
2253 wqe
->xmit_sequence
.bde
.tus
.f
.bdeSize
= rspsize
;
2254 wqe
->xmit_sequence
.bde
.addrLow
= le32_to_cpu(putPaddrLow(rspbuf
));
2255 wqe
->xmit_sequence
.bde
.addrHigh
= le32_to_cpu(putPaddrHigh(rspbuf
));
2262 bf_set(wqe_dfctl
, &wqe
->xmit_sequence
.wge_ctl
, 0);
2263 bf_set(wqe_ls
, &wqe
->xmit_sequence
.wge_ctl
, 1);
2264 bf_set(wqe_la
, &wqe
->xmit_sequence
.wge_ctl
, 0);
2265 bf_set(wqe_rctl
, &wqe
->xmit_sequence
.wge_ctl
, FC_RCTL_ELS4_REP
);
2266 bf_set(wqe_type
, &wqe
->xmit_sequence
.wge_ctl
, FC_TYPE_NVME
);
2269 bf_set(wqe_ctxt_tag
, &wqe
->xmit_sequence
.wqe_com
,
2270 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
2271 bf_set(wqe_xri_tag
, &wqe
->xmit_sequence
.wqe_com
, nvmewqe
->sli4_xritag
);
2274 bf_set(wqe_cmnd
, &wqe
->xmit_sequence
.wqe_com
,
2275 CMD_XMIT_SEQUENCE64_WQE
);
2276 bf_set(wqe_ct
, &wqe
->xmit_sequence
.wqe_com
, SLI4_CT_RPI
);
2277 bf_set(wqe_class
, &wqe
->xmit_sequence
.wqe_com
, CLASS3
);
2278 bf_set(wqe_pu
, &wqe
->xmit_sequence
.wqe_com
, 0);
2281 wqe
->xmit_sequence
.wqe_com
.abort_tag
= nvmewqe
->iotag
;
2284 bf_set(wqe_reqtag
, &wqe
->xmit_sequence
.wqe_com
, nvmewqe
->iotag
);
2285 /* Needs to be set by caller */
2286 bf_set(wqe_rcvoxid
, &wqe
->xmit_sequence
.wqe_com
, ctxp
->oxid
);
2289 bf_set(wqe_dbde
, &wqe
->xmit_sequence
.wqe_com
, 1);
2290 bf_set(wqe_iod
, &wqe
->xmit_sequence
.wqe_com
, LPFC_WQE_IOD_WRITE
);
2291 bf_set(wqe_lenloc
, &wqe
->xmit_sequence
.wqe_com
,
2292 LPFC_WQE_LENLOC_WORD12
);
2293 bf_set(wqe_ebde_cnt
, &wqe
->xmit_sequence
.wqe_com
, 0);
2296 bf_set(wqe_cqid
, &wqe
->xmit_sequence
.wqe_com
,
2297 LPFC_WQE_CQ_ID_DEFAULT
);
2298 bf_set(wqe_cmd_type
, &wqe
->xmit_sequence
.wqe_com
,
2302 wqe
->xmit_sequence
.xmit_len
= rspsize
;
2305 nvmewqe
->vport
= phba
->pport
;
2306 nvmewqe
->drvrTimeout
= (phba
->fc_ratov
* 3) + LPFC_DRVR_TIMEOUT
;
2307 nvmewqe
->iocb_flag
|= LPFC_IO_NVME_LS
;
2309 /* Xmit NVMET response to remote NPORT <did> */
2310 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
2311 "6039 Xmit NVMET LS response to remote "
2312 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2313 ndlp
->nlp_DID
, nvmewqe
->iotag
, ctxp
->oxid
,
2317 nvme_wqe_free_wqeq_exit
:
2318 nvmewqe
->context2
= NULL
;
2319 nvmewqe
->context3
= NULL
;
2320 lpfc_sli_release_iocbq(phba
, nvmewqe
);
2325 static struct lpfc_iocbq
*
2326 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba
*phba
,
2327 struct lpfc_nvmet_rcv_ctx
*ctxp
)
2329 struct nvmefc_tgt_fcp_req
*rsp
= &ctxp
->ctx
.fcp_req
;
2330 struct lpfc_nvmet_tgtport
*tgtp
;
2331 struct sli4_sge
*sgl
;
2332 struct lpfc_nodelist
*ndlp
;
2333 struct lpfc_iocbq
*nvmewqe
;
2334 struct scatterlist
*sgel
;
2335 union lpfc_wqe128
*wqe
;
2336 struct ulp_bde64
*bde
;
2338 dma_addr_t physaddr
;
2343 if (!lpfc_is_link_up(phba
)) {
2344 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2345 "6107 NVMET prep FCP wqe: link err:"
2346 "NPORT x%x oxid x%x ste %d\n",
2347 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
2351 ndlp
= lpfc_findnode_did(phba
->pport
, ctxp
->sid
);
2352 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) ||
2353 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
2354 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
2355 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2356 "6108 NVMET prep FCP wqe: no ndlp: "
2357 "NPORT x%x oxid x%x ste %d\n",
2358 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
2362 if (rsp
->sg_cnt
> lpfc_tgttemplate
.max_sgl_segments
) {
2363 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2364 "6109 NVMET prep FCP wqe: seg cnt err: "
2365 "NPORT x%x oxid x%x ste %d cnt %d\n",
2366 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
,
2367 phba
->cfg_nvme_seg_cnt
);
2371 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2372 nvmewqe
= ctxp
->wqeq
;
2373 if (nvmewqe
== NULL
) {
2374 /* Allocate buffer for command wqe */
2375 nvmewqe
= ctxp
->ctxbuf
->iocbq
;
2376 if (nvmewqe
== NULL
) {
2377 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2378 "6110 NVMET prep FCP wqe: No "
2379 "WQE: NPORT x%x oxid x%x ste %d\n",
2380 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
2383 ctxp
->wqeq
= nvmewqe
;
2384 xc
= 0; /* create new XRI */
2385 nvmewqe
->sli4_lxritag
= NO_XRI
;
2386 nvmewqe
->sli4_xritag
= NO_XRI
;
2390 if (((ctxp
->state
== LPFC_NVMET_STE_RCV
) &&
2391 (ctxp
->entry_cnt
== 1)) ||
2392 (ctxp
->state
== LPFC_NVMET_STE_DATA
)) {
2393 wqe
= &nvmewqe
->wqe
;
2395 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2396 "6111 Wrong state NVMET FCP: %d cnt %d\n",
2397 ctxp
->state
, ctxp
->entry_cnt
);
2401 sgl
= (struct sli4_sge
*)ctxp
->ctxbuf
->sglq
->sgl
;
2403 case NVMET_FCOP_READDATA
:
2404 case NVMET_FCOP_READDATA_RSP
:
2405 /* From the tsend template, initialize words 7 - 11 */
2406 memcpy(&wqe
->words
[7],
2407 &lpfc_tsend_cmd_template
.words
[7],
2408 sizeof(uint32_t) * 5);
2410 /* Words 0 - 2 : The first sg segment */
2412 physaddr
= sg_dma_address(sgel
);
2413 wqe
->fcp_tsend
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
2414 wqe
->fcp_tsend
.bde
.tus
.f
.bdeSize
= sg_dma_len(sgel
);
2415 wqe
->fcp_tsend
.bde
.addrLow
= cpu_to_le32(putPaddrLow(physaddr
));
2416 wqe
->fcp_tsend
.bde
.addrHigh
=
2417 cpu_to_le32(putPaddrHigh(physaddr
));
2420 wqe
->fcp_tsend
.payload_offset_len
= 0;
2423 wqe
->fcp_tsend
.relative_offset
= ctxp
->offset
;
2426 wqe
->fcp_tsend
.reserved
= 0;
2429 bf_set(wqe_ctxt_tag
, &wqe
->fcp_tsend
.wqe_com
,
2430 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
2431 bf_set(wqe_xri_tag
, &wqe
->fcp_tsend
.wqe_com
,
2432 nvmewqe
->sli4_xritag
);
2434 /* Word 7 - set ar later */
2437 wqe
->fcp_tsend
.wqe_com
.abort_tag
= nvmewqe
->iotag
;
2440 bf_set(wqe_reqtag
, &wqe
->fcp_tsend
.wqe_com
, nvmewqe
->iotag
);
2441 bf_set(wqe_rcvoxid
, &wqe
->fcp_tsend
.wqe_com
, ctxp
->oxid
);
2443 /* Word 10 - set wqes later, in template xc=1 */
2445 bf_set(wqe_xc
, &wqe
->fcp_tsend
.wqe_com
, 0);
2447 /* Word 11 - set sup, irsp, irsplen later */
2451 wqe
->fcp_tsend
.fcp_data_len
= rsp
->transfer_length
;
2453 /* Setup 2 SKIP SGEs */
2457 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_SKIP
);
2458 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2464 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_SKIP
);
2465 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2468 if (rsp
->op
== NVMET_FCOP_READDATA_RSP
) {
2469 atomic_inc(&tgtp
->xmt_fcp_read_rsp
);
2471 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2473 if (rsp
->rsplen
== LPFC_NVMET_SUCCESS_LEN
) {
2474 if (ndlp
->nlp_flag
& NLP_SUPPRESS_RSP
)
2476 &wqe
->fcp_tsend
.wqe_com
, 1);
2478 bf_set(wqe_wqes
, &wqe
->fcp_tsend
.wqe_com
, 1);
2479 bf_set(wqe_irsp
, &wqe
->fcp_tsend
.wqe_com
, 1);
2480 bf_set(wqe_irsplen
, &wqe
->fcp_tsend
.wqe_com
,
2481 ((rsp
->rsplen
>> 2) - 1));
2482 memcpy(&wqe
->words
[16], rsp
->rspaddr
,
2486 atomic_inc(&tgtp
->xmt_fcp_read
);
2488 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2489 bf_set(wqe_ar
, &wqe
->fcp_tsend
.wqe_com
, 0);
2493 case NVMET_FCOP_WRITEDATA
:
2494 /* From the treceive template, initialize words 3 - 11 */
2495 memcpy(&wqe
->words
[3],
2496 &lpfc_treceive_cmd_template
.words
[3],
2497 sizeof(uint32_t) * 9);
2499 /* Words 0 - 2 : The first sg segment */
2500 txrdy
= dma_pool_alloc(phba
->txrdy_payload_pool
,
2501 GFP_KERNEL
, &physaddr
);
2503 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2504 "6041 Bad txrdy buffer: oxid x%x\n",
2508 ctxp
->txrdy
= txrdy
;
2509 ctxp
->txrdy_phys
= physaddr
;
2510 wqe
->fcp_treceive
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
2511 wqe
->fcp_treceive
.bde
.tus
.f
.bdeSize
= TXRDY_PAYLOAD_LEN
;
2512 wqe
->fcp_treceive
.bde
.addrLow
=
2513 cpu_to_le32(putPaddrLow(physaddr
));
2514 wqe
->fcp_treceive
.bde
.addrHigh
=
2515 cpu_to_le32(putPaddrHigh(physaddr
));
2518 wqe
->fcp_treceive
.relative_offset
= ctxp
->offset
;
2521 bf_set(wqe_ctxt_tag
, &wqe
->fcp_treceive
.wqe_com
,
2522 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
2523 bf_set(wqe_xri_tag
, &wqe
->fcp_treceive
.wqe_com
,
2524 nvmewqe
->sli4_xritag
);
2529 wqe
->fcp_treceive
.wqe_com
.abort_tag
= nvmewqe
->iotag
;
2532 bf_set(wqe_reqtag
, &wqe
->fcp_treceive
.wqe_com
, nvmewqe
->iotag
);
2533 bf_set(wqe_rcvoxid
, &wqe
->fcp_treceive
.wqe_com
, ctxp
->oxid
);
2535 /* Word 10 - in template xc=1 */
2537 bf_set(wqe_xc
, &wqe
->fcp_treceive
.wqe_com
, 0);
2539 /* Word 11 - set pbde later */
2540 if (phba
->cfg_enable_pbde
) {
2543 bf_set(wqe_pbde
, &wqe
->fcp_treceive
.wqe_com
, 0);
2548 wqe
->fcp_tsend
.fcp_data_len
= rsp
->transfer_length
;
2550 /* Setup 1 TXRDY and 1 SKIP SGE */
2552 txrdy
[1] = cpu_to_be32(rsp
->transfer_length
);
2555 sgl
->addr_hi
= putPaddrHigh(physaddr
);
2556 sgl
->addr_lo
= putPaddrLow(physaddr
);
2558 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_DATA
);
2559 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2560 sgl
->sge_len
= cpu_to_le32(TXRDY_PAYLOAD_LEN
);
2565 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_SKIP
);
2566 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2569 atomic_inc(&tgtp
->xmt_fcp_write
);
2572 case NVMET_FCOP_RSP
:
2573 /* From the treceive template, initialize words 4 - 11 */
2574 memcpy(&wqe
->words
[4],
2575 &lpfc_trsp_cmd_template
.words
[4],
2576 sizeof(uint32_t) * 8);
2579 physaddr
= rsp
->rspdma
;
2580 wqe
->fcp_trsp
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
2581 wqe
->fcp_trsp
.bde
.tus
.f
.bdeSize
= rsp
->rsplen
;
2582 wqe
->fcp_trsp
.bde
.addrLow
=
2583 cpu_to_le32(putPaddrLow(physaddr
));
2584 wqe
->fcp_trsp
.bde
.addrHigh
=
2585 cpu_to_le32(putPaddrHigh(physaddr
));
2588 wqe
->fcp_trsp
.response_len
= rsp
->rsplen
;
2591 bf_set(wqe_ctxt_tag
, &wqe
->fcp_trsp
.wqe_com
,
2592 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
2593 bf_set(wqe_xri_tag
, &wqe
->fcp_trsp
.wqe_com
,
2594 nvmewqe
->sli4_xritag
);
2599 wqe
->fcp_trsp
.wqe_com
.abort_tag
= nvmewqe
->iotag
;
2602 bf_set(wqe_reqtag
, &wqe
->fcp_trsp
.wqe_com
, nvmewqe
->iotag
);
2603 bf_set(wqe_rcvoxid
, &wqe
->fcp_trsp
.wqe_com
, ctxp
->oxid
);
2607 bf_set(wqe_xc
, &wqe
->fcp_trsp
.wqe_com
, 1);
2610 /* In template wqes=0 irsp=0 irsplen=0 - good response */
2611 if (rsp
->rsplen
!= LPFC_NVMET_SUCCESS_LEN
) {
2612 /* Bad response - embed it */
2613 bf_set(wqe_wqes
, &wqe
->fcp_trsp
.wqe_com
, 1);
2614 bf_set(wqe_irsp
, &wqe
->fcp_trsp
.wqe_com
, 1);
2615 bf_set(wqe_irsplen
, &wqe
->fcp_trsp
.wqe_com
,
2616 ((rsp
->rsplen
>> 2) - 1));
2617 memcpy(&wqe
->words
[16], rsp
->rspaddr
, rsp
->rsplen
);
2622 wqe
->fcp_trsp
.rsvd_12_15
[0] = 0;
2624 /* Use rspbuf, NOT sg list */
2627 atomic_inc(&tgtp
->xmt_fcp_rsp
);
2631 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
2632 "6064 Unknown Rsp Op %d\n",
2638 nvmewqe
->vport
= phba
->pport
;
2639 nvmewqe
->drvrTimeout
= (phba
->fc_ratov
* 3) + LPFC_DRVR_TIMEOUT
;
2640 nvmewqe
->context1
= ndlp
;
2642 for (i
= 0; i
< rsp
->sg_cnt
; i
++) {
2644 physaddr
= sg_dma_address(sgel
);
2645 cnt
= sg_dma_len(sgel
);
2646 sgl
->addr_hi
= putPaddrHigh(physaddr
);
2647 sgl
->addr_lo
= putPaddrLow(physaddr
);
2649 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_DATA
);
2650 bf_set(lpfc_sli4_sge_offset
, sgl
, ctxp
->offset
);
2651 if ((i
+1) == rsp
->sg_cnt
)
2652 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
2653 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2654 sgl
->sge_len
= cpu_to_le32(cnt
);
2656 bde
= (struct ulp_bde64
*)&wqe
->words
[13];
2658 /* Words 13-15 (PBDE) */
2659 bde
->addrLow
= sgl
->addr_lo
;
2660 bde
->addrHigh
= sgl
->addr_hi
;
2661 bde
->tus
.f
.bdeSize
=
2662 le32_to_cpu(sgl
->sge_len
);
2663 bde
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
2664 bde
->tus
.w
= cpu_to_le32(bde
->tus
.w
);
2666 memset(bde
, 0, sizeof(struct ulp_bde64
));
2670 ctxp
->offset
+= cnt
;
2672 ctxp
->state
= LPFC_NVMET_STE_DATA
;
2678 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2679 * @phba: Pointer to HBA context object.
2680 * @cmdwqe: Pointer to driver command WQE object.
2681 * @wcqe: Pointer to driver response CQE object.
2683 * The function is called from SLI ring event handler with no
2684 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2685 * The function frees memory resources used for the NVME commands.
2688 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
2689 struct lpfc_wcqe_complete
*wcqe
)
2691 struct lpfc_nvmet_rcv_ctx
*ctxp
;
2692 struct lpfc_nvmet_tgtport
*tgtp
;
2693 uint32_t status
, result
;
2694 unsigned long flags
;
2695 bool released
= false;
2697 ctxp
= cmdwqe
->context2
;
2698 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
2699 result
= wcqe
->parameter
;
2701 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2702 if (ctxp
->flag
& LPFC_NVMET_ABORT_OP
)
2703 atomic_inc(&tgtp
->xmt_fcp_abort_cmpl
);
2705 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
2706 ctxp
->state
= LPFC_NVMET_STE_DONE
;
2708 /* Check if we already received a free context call
2709 * and we have completed processing an abort situation.
2711 if ((ctxp
->flag
& LPFC_NVMET_CTX_RLS
) &&
2712 !(ctxp
->flag
& LPFC_NVMET_XBUSY
)) {
2713 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
2714 list_del(&ctxp
->list
);
2715 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
2718 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
2719 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
2720 atomic_inc(&tgtp
->xmt_abort_rsp
);
2722 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
2723 "6165 ABORT cmpl: xri x%x flg x%x (%d) "
2724 "WCQE: %08x %08x %08x %08x\n",
2725 ctxp
->oxid
, ctxp
->flag
, released
,
2726 wcqe
->word0
, wcqe
->total_data_placed
,
2727 result
, wcqe
->word3
);
2729 cmdwqe
->context2
= NULL
;
2730 cmdwqe
->context3
= NULL
;
2732 * if transport has released ctx, then can reuse it. Otherwise,
2733 * will be recycled by transport release call.
2736 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
2738 /* This is the iocbq for the abort, not the command */
2739 lpfc_sli_release_iocbq(phba
, cmdwqe
);
2741 /* Since iaab/iaar are NOT set, there is no work left.
2742 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2743 * should have been called already.
2748 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
2749 * @phba: Pointer to HBA context object.
2750 * @cmdwqe: Pointer to driver command WQE object.
2751 * @wcqe: Pointer to driver response CQE object.
2753 * The function is called from SLI ring event handler with no
2754 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2755 * The function frees memory resources used for the NVME commands.
2758 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
2759 struct lpfc_wcqe_complete
*wcqe
)
2761 struct lpfc_nvmet_rcv_ctx
*ctxp
;
2762 struct lpfc_nvmet_tgtport
*tgtp
;
2763 unsigned long flags
;
2764 uint32_t status
, result
;
2765 bool released
= false;
2767 ctxp
= cmdwqe
->context2
;
2768 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
2769 result
= wcqe
->parameter
;
2772 /* if context is clear, related io alrady complete */
2773 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
2774 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
2775 wcqe
->word0
, wcqe
->total_data_placed
,
2776 result
, wcqe
->word3
);
2780 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2781 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
2782 if (ctxp
->flag
& LPFC_NVMET_ABORT_OP
)
2783 atomic_inc(&tgtp
->xmt_fcp_abort_cmpl
);
2786 if (ctxp
->state
!= LPFC_NVMET_STE_ABORT
) {
2787 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
2788 "6112 ABTS Wrong state:%d oxid x%x\n",
2789 ctxp
->state
, ctxp
->oxid
);
2792 /* Check if we already received a free context call
2793 * and we have completed processing an abort situation.
2795 ctxp
->state
= LPFC_NVMET_STE_DONE
;
2796 if ((ctxp
->flag
& LPFC_NVMET_CTX_RLS
) &&
2797 !(ctxp
->flag
& LPFC_NVMET_XBUSY
)) {
2798 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
2799 list_del(&ctxp
->list
);
2800 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
2803 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
2804 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
2805 atomic_inc(&tgtp
->xmt_abort_rsp
);
2807 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
2808 "6316 ABTS cmpl xri x%x flg x%x (%x) "
2809 "WCQE: %08x %08x %08x %08x\n",
2810 ctxp
->oxid
, ctxp
->flag
, released
,
2811 wcqe
->word0
, wcqe
->total_data_placed
,
2812 result
, wcqe
->word3
);
2814 cmdwqe
->context2
= NULL
;
2815 cmdwqe
->context3
= NULL
;
2817 * if transport has released ctx, then can reuse it. Otherwise,
2818 * will be recycled by transport release call.
2821 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
2823 /* Since iaab/iaar are NOT set, there is no work left.
2824 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2825 * should have been called already.
2830 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
2831 * @phba: Pointer to HBA context object.
2832 * @cmdwqe: Pointer to driver command WQE object.
2833 * @wcqe: Pointer to driver response CQE object.
2835 * The function is called from SLI ring event handler with no
2836 * lock held. This function is the completion handler for NVME ABTS for LS cmds
2837 * The function frees memory resources used for the NVME commands.
2840 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
2841 struct lpfc_wcqe_complete
*wcqe
)
2843 struct lpfc_nvmet_rcv_ctx
*ctxp
;
2844 struct lpfc_nvmet_tgtport
*tgtp
;
2845 uint32_t status
, result
;
2847 ctxp
= cmdwqe
->context2
;
2848 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
2849 result
= wcqe
->parameter
;
2851 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2852 atomic_inc(&tgtp
->xmt_ls_abort_cmpl
);
2854 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
2855 "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
2856 ctxp
, wcqe
->word0
, wcqe
->total_data_placed
,
2857 result
, wcqe
->word3
);
2860 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
2861 "6415 NVMET LS Abort No ctx: WCQE: "
2862 "%08x %08x %08x %08x\n",
2863 wcqe
->word0
, wcqe
->total_data_placed
,
2864 result
, wcqe
->word3
);
2866 lpfc_sli_release_iocbq(phba
, cmdwqe
);
2870 if (ctxp
->state
!= LPFC_NVMET_STE_LS_ABORT
) {
2871 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2872 "6416 NVMET LS abort cmpl state mismatch: "
2873 "oxid x%x: %d %d\n",
2874 ctxp
->oxid
, ctxp
->state
, ctxp
->entry_cnt
);
2877 cmdwqe
->context2
= NULL
;
2878 cmdwqe
->context3
= NULL
;
2879 lpfc_sli_release_iocbq(phba
, cmdwqe
);
2884 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba
*phba
,
2885 struct lpfc_nvmet_rcv_ctx
*ctxp
,
2886 uint32_t sid
, uint16_t xri
)
2888 struct lpfc_nvmet_tgtport
*tgtp
;
2889 struct lpfc_iocbq
*abts_wqeq
;
2890 union lpfc_wqe128
*wqe_abts
;
2891 struct lpfc_nodelist
*ndlp
;
2893 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
2894 "6067 ABTS: sid %x xri x%x/x%x\n",
2895 sid
, xri
, ctxp
->wqeq
->sli4_xritag
);
2897 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2899 ndlp
= lpfc_findnode_did(phba
->pport
, sid
);
2900 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) ||
2901 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
2902 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
2903 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
2904 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
2905 "6134 Drop ABTS - wrong NDLP state x%x.\n",
2906 (ndlp
) ? ndlp
->nlp_state
: NLP_STE_MAX_STATE
);
2908 /* No failure to an ABTS request. */
2912 abts_wqeq
= ctxp
->wqeq
;
2913 wqe_abts
= &abts_wqeq
->wqe
;
2916 * Since we zero the whole WQE, we need to ensure we set the WQE fields
2917 * that were initialized in lpfc_sli4_nvmet_alloc.
2919 memset(wqe_abts
, 0, sizeof(union lpfc_wqe
));
2922 bf_set(wqe_dfctl
, &wqe_abts
->xmit_sequence
.wge_ctl
, 0);
2923 bf_set(wqe_ls
, &wqe_abts
->xmit_sequence
.wge_ctl
, 1);
2924 bf_set(wqe_la
, &wqe_abts
->xmit_sequence
.wge_ctl
, 0);
2925 bf_set(wqe_rctl
, &wqe_abts
->xmit_sequence
.wge_ctl
, FC_RCTL_BA_ABTS
);
2926 bf_set(wqe_type
, &wqe_abts
->xmit_sequence
.wge_ctl
, FC_TYPE_BLS
);
2929 bf_set(wqe_ctxt_tag
, &wqe_abts
->xmit_sequence
.wqe_com
,
2930 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
2931 bf_set(wqe_xri_tag
, &wqe_abts
->xmit_sequence
.wqe_com
,
2932 abts_wqeq
->sli4_xritag
);
2935 bf_set(wqe_cmnd
, &wqe_abts
->xmit_sequence
.wqe_com
,
2936 CMD_XMIT_SEQUENCE64_WQE
);
2937 bf_set(wqe_ct
, &wqe_abts
->xmit_sequence
.wqe_com
, SLI4_CT_RPI
);
2938 bf_set(wqe_class
, &wqe_abts
->xmit_sequence
.wqe_com
, CLASS3
);
2939 bf_set(wqe_pu
, &wqe_abts
->xmit_sequence
.wqe_com
, 0);
2942 wqe_abts
->xmit_sequence
.wqe_com
.abort_tag
= abts_wqeq
->iotag
;
2945 bf_set(wqe_reqtag
, &wqe_abts
->xmit_sequence
.wqe_com
, abts_wqeq
->iotag
);
2946 /* Needs to be set by caller */
2947 bf_set(wqe_rcvoxid
, &wqe_abts
->xmit_sequence
.wqe_com
, xri
);
2950 bf_set(wqe_dbde
, &wqe_abts
->xmit_sequence
.wqe_com
, 1);
2951 bf_set(wqe_iod
, &wqe_abts
->xmit_sequence
.wqe_com
, LPFC_WQE_IOD_WRITE
);
2952 bf_set(wqe_lenloc
, &wqe_abts
->xmit_sequence
.wqe_com
,
2953 LPFC_WQE_LENLOC_WORD12
);
2954 bf_set(wqe_ebde_cnt
, &wqe_abts
->xmit_sequence
.wqe_com
, 0);
2955 bf_set(wqe_qosd
, &wqe_abts
->xmit_sequence
.wqe_com
, 0);
2958 bf_set(wqe_cqid
, &wqe_abts
->xmit_sequence
.wqe_com
,
2959 LPFC_WQE_CQ_ID_DEFAULT
);
2960 bf_set(wqe_cmd_type
, &wqe_abts
->xmit_sequence
.wqe_com
,
2963 abts_wqeq
->vport
= phba
->pport
;
2964 abts_wqeq
->context1
= ndlp
;
2965 abts_wqeq
->context2
= ctxp
;
2966 abts_wqeq
->context3
= NULL
;
2967 abts_wqeq
->rsvd2
= 0;
2968 /* hba_wqidx should already be setup from command we are aborting */
2969 abts_wqeq
->iocb
.ulpCommand
= CMD_XMIT_SEQUENCE64_CR
;
2970 abts_wqeq
->iocb
.ulpLe
= 1;
2972 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
2973 "6069 Issue ABTS to xri x%x reqtag x%x\n",
2974 xri
, abts_wqeq
->iotag
);
2979 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba
*phba
,
2980 struct lpfc_nvmet_rcv_ctx
*ctxp
,
2981 uint32_t sid
, uint16_t xri
)
2983 struct lpfc_nvmet_tgtport
*tgtp
;
2984 struct lpfc_iocbq
*abts_wqeq
;
2985 union lpfc_wqe128
*abts_wqe
;
2986 struct lpfc_nodelist
*ndlp
;
2987 unsigned long flags
;
2990 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2992 ctxp
->wqeq
= ctxp
->ctxbuf
->iocbq
;
2993 ctxp
->wqeq
->hba_wqidx
= 0;
2996 ndlp
= lpfc_findnode_did(phba
->pport
, sid
);
2997 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) ||
2998 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
2999 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
3000 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3001 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
3002 "6160 Drop ABORT - wrong NDLP state x%x.\n",
3003 (ndlp
) ? ndlp
->nlp_state
: NLP_STE_MAX_STATE
);
3005 /* No failure to an ABTS request. */
3006 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3007 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
3008 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3012 /* Issue ABTS for this WQE based on iotag */
3013 ctxp
->abort_wqeq
= lpfc_sli_get_iocbq(phba
);
3014 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3015 if (!ctxp
->abort_wqeq
) {
3016 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3017 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
3018 "6161 ABORT failed: No wqeqs: "
3019 "xri: x%x\n", ctxp
->oxid
);
3020 /* No failure to an ABTS request. */
3021 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
3022 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3025 abts_wqeq
= ctxp
->abort_wqeq
;
3026 abts_wqe
= &abts_wqeq
->wqe
;
3027 ctxp
->state
= LPFC_NVMET_STE_ABORT
;
3028 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3030 /* Announce entry to new IO submit field. */
3031 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
3032 "6162 ABORT Request to rport DID x%06x "
3033 "for xri x%x x%x\n",
3034 ctxp
->sid
, ctxp
->oxid
, ctxp
->wqeq
->sli4_xritag
);
3036 /* If the hba is getting reset, this flag is set. It is
3037 * cleared when the reset is complete and rings reestablished.
3039 spin_lock_irqsave(&phba
->hbalock
, flags
);
3040 /* driver queued commands are in process of being flushed */
3041 if (phba
->hba_flag
& HBA_NVME_IOQ_FLUSH
) {
3042 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
3043 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3044 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
3045 "6163 Driver in reset cleanup - flushing "
3046 "NVME Req now. hba_flag x%x oxid x%x\n",
3047 phba
->hba_flag
, ctxp
->oxid
);
3048 lpfc_sli_release_iocbq(phba
, abts_wqeq
);
3049 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3050 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
3051 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3055 /* Outstanding abort is in progress */
3056 if (abts_wqeq
->iocb_flag
& LPFC_DRIVER_ABORTED
) {
3057 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
3058 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3059 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
3060 "6164 Outstanding NVME I/O Abort Request "
3061 "still pending on oxid x%x\n",
3063 lpfc_sli_release_iocbq(phba
, abts_wqeq
);
3064 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3065 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
3066 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3070 /* Ready - mark outstanding as aborted by driver. */
3071 abts_wqeq
->iocb_flag
|= LPFC_DRIVER_ABORTED
;
3073 /* WQEs are reused. Clear stale data and set key fields to
3074 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
3076 memset(abts_wqe
, 0, sizeof(union lpfc_wqe
));
3079 bf_set(abort_cmd_criteria
, &abts_wqe
->abort_cmd
, T_XRI_TAG
);
3082 bf_set(wqe_ct
, &abts_wqe
->abort_cmd
.wqe_com
, 0);
3083 bf_set(wqe_cmnd
, &abts_wqe
->abort_cmd
.wqe_com
, CMD_ABORT_XRI_CX
);
3085 /* word 8 - tell the FW to abort the IO associated with this
3086 * outstanding exchange ID.
3088 abts_wqe
->abort_cmd
.wqe_com
.abort_tag
= ctxp
->wqeq
->sli4_xritag
;
3090 /* word 9 - this is the iotag for the abts_wqe completion. */
3091 bf_set(wqe_reqtag
, &abts_wqe
->abort_cmd
.wqe_com
,
3095 bf_set(wqe_qosd
, &abts_wqe
->abort_cmd
.wqe_com
, 1);
3096 bf_set(wqe_lenloc
, &abts_wqe
->abort_cmd
.wqe_com
, LPFC_WQE_LENLOC_NONE
);
3099 bf_set(wqe_cmd_type
, &abts_wqe
->abort_cmd
.wqe_com
, OTHER_COMMAND
);
3100 bf_set(wqe_wqec
, &abts_wqe
->abort_cmd
.wqe_com
, 1);
3101 bf_set(wqe_cqid
, &abts_wqe
->abort_cmd
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
3103 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3104 abts_wqeq
->hba_wqidx
= ctxp
->wqeq
->hba_wqidx
;
3105 abts_wqeq
->wqe_cmpl
= lpfc_nvmet_sol_fcp_abort_cmp
;
3106 abts_wqeq
->iocb_cmpl
= 0;
3107 abts_wqeq
->iocb_flag
|= LPFC_IO_NVME
;
3108 abts_wqeq
->context2
= ctxp
;
3109 abts_wqeq
->vport
= phba
->pport
;
3111 ctxp
->hdwq
= &phba
->sli4_hba
.hdwq
[abts_wqeq
->hba_wqidx
];
3113 rc
= lpfc_sli4_issue_wqe(phba
, ctxp
->hdwq
, abts_wqeq
);
3114 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
3115 if (rc
== WQE_SUCCESS
) {
3116 atomic_inc(&tgtp
->xmt_abort_sol
);
3120 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3121 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3122 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
3123 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3124 lpfc_sli_release_iocbq(phba
, abts_wqeq
);
3125 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
3126 "6166 Failed ABORT issue_wqe with status x%x "
3133 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba
*phba
,
3134 struct lpfc_nvmet_rcv_ctx
*ctxp
,
3135 uint32_t sid
, uint16_t xri
)
3137 struct lpfc_nvmet_tgtport
*tgtp
;
3138 struct lpfc_iocbq
*abts_wqeq
;
3139 unsigned long flags
;
3140 bool released
= false;
3143 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
3145 ctxp
->wqeq
= ctxp
->ctxbuf
->iocbq
;
3146 ctxp
->wqeq
->hba_wqidx
= 0;
3149 if (ctxp
->state
== LPFC_NVMET_STE_FREE
) {
3150 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
3151 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3152 ctxp
->state
, ctxp
->entry_cnt
, ctxp
->oxid
);
3156 ctxp
->state
= LPFC_NVMET_STE_ABORT
;
3158 rc
= lpfc_nvmet_unsol_issue_abort(phba
, ctxp
, sid
, xri
);
3162 spin_lock_irqsave(&phba
->hbalock
, flags
);
3163 abts_wqeq
= ctxp
->wqeq
;
3164 abts_wqeq
->wqe_cmpl
= lpfc_nvmet_unsol_fcp_abort_cmp
;
3165 abts_wqeq
->iocb_cmpl
= NULL
;
3166 abts_wqeq
->iocb_flag
|= LPFC_IO_NVMET
;
3168 ctxp
->hdwq
= &phba
->sli4_hba
.hdwq
[abts_wqeq
->hba_wqidx
];
3170 rc
= lpfc_sli4_issue_wqe(phba
, ctxp
->hdwq
, abts_wqeq
);
3171 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
3172 if (rc
== WQE_SUCCESS
) {
3177 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
3178 if (ctxp
->flag
& LPFC_NVMET_CTX_RLS
) {
3179 spin_lock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
3180 list_del(&ctxp
->list
);
3181 spin_unlock(&phba
->sli4_hba
.abts_nvmet_buf_list_lock
);
3184 ctxp
->flag
&= ~(LPFC_NVMET_ABORT_OP
| LPFC_NVMET_CTX_RLS
);
3185 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
3187 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3188 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
3189 "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
3192 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
3197 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba
*phba
,
3198 struct lpfc_nvmet_rcv_ctx
*ctxp
,
3199 uint32_t sid
, uint16_t xri
)
3201 struct lpfc_nvmet_tgtport
*tgtp
;
3202 struct lpfc_iocbq
*abts_wqeq
;
3203 union lpfc_wqe128
*wqe_abts
;
3204 unsigned long flags
;
3207 if ((ctxp
->state
== LPFC_NVMET_STE_LS_RCV
&& ctxp
->entry_cnt
== 1) ||
3208 (ctxp
->state
== LPFC_NVMET_STE_LS_RSP
&& ctxp
->entry_cnt
== 2)) {
3209 ctxp
->state
= LPFC_NVMET_STE_LS_ABORT
;
3212 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
3213 "6418 NVMET LS abort state mismatch "
3215 ctxp
->oxid
, ctxp
->state
, ctxp
->entry_cnt
);
3216 ctxp
->state
= LPFC_NVMET_STE_LS_ABORT
;
3219 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
3221 /* Issue ABTS for this WQE based on iotag */
3222 ctxp
->wqeq
= lpfc_sli_get_iocbq(phba
);
3224 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
3225 "6068 Abort failed: No wqeqs: "
3227 /* No failure to an ABTS request. */
3232 abts_wqeq
= ctxp
->wqeq
;
3233 wqe_abts
= &abts_wqeq
->wqe
;
3235 if (lpfc_nvmet_unsol_issue_abort(phba
, ctxp
, sid
, xri
) == 0) {
3240 spin_lock_irqsave(&phba
->hbalock
, flags
);
3241 abts_wqeq
->wqe_cmpl
= lpfc_nvmet_xmt_ls_abort_cmp
;
3242 abts_wqeq
->iocb_cmpl
= 0;
3243 abts_wqeq
->iocb_flag
|= LPFC_IO_NVME_LS
;
3244 rc
= lpfc_sli4_issue_wqe(phba
, ctxp
->hdwq
, abts_wqeq
);
3245 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
3246 if (rc
== WQE_SUCCESS
) {
3247 atomic_inc(&tgtp
->xmt_abort_unsol
);
3251 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
3252 abts_wqeq
->context2
= NULL
;
3253 abts_wqeq
->context3
= NULL
;
3254 lpfc_sli_release_iocbq(phba
, abts_wqeq
);
3256 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
3257 "6056 Failed to Issue ABTS. Status x%x\n", rc
);