1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channsel Host Bus Adapters. *
4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
39 #include <../drivers/nvme/host/nvme.h>
40 #include <linux/nvme-fc-driver.h>
42 #include "lpfc_version.h"
46 #include "lpfc_sli4.h"
48 #include "lpfc_disc.h"
50 #include "lpfc_scsi.h"
51 #include "lpfc_nvme.h"
52 #include "lpfc_nvmet.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_crtn.h"
55 #include "lpfc_vport.h"
56 #include "lpfc_debugfs.h"
58 static struct lpfc_iocbq
*lpfc_nvmet_prep_ls_wqe(struct lpfc_hba
*,
59 struct lpfc_nvmet_rcv_ctx
*,
62 static struct lpfc_iocbq
*lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba
*,
63 struct lpfc_nvmet_rcv_ctx
*);
64 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba
*,
65 struct lpfc_nvmet_rcv_ctx
*,
67 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba
*,
68 struct lpfc_nvmet_rcv_ctx
*,
70 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba
*,
71 struct lpfc_nvmet_rcv_ctx
*,
75 lpfc_nvmet_defer_release(struct lpfc_hba
*phba
, struct lpfc_nvmet_rcv_ctx
*ctxp
)
79 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
80 "6313 NVMET Defer ctx release xri x%x flg x%x\n",
81 ctxp
->oxid
, ctxp
->flag
);
83 spin_lock_irqsave(&phba
->sli4_hba
.abts_nvme_buf_list_lock
, iflag
);
84 if (ctxp
->flag
& LPFC_NVMET_CTX_RLS
) {
85 spin_unlock_irqrestore(&phba
->sli4_hba
.abts_nvme_buf_list_lock
,
89 ctxp
->flag
|= LPFC_NVMET_CTX_RLS
;
90 list_add_tail(&ctxp
->list
, &phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
);
91 spin_unlock_irqrestore(&phba
->sli4_hba
.abts_nvme_buf_list_lock
, iflag
);
95 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
96 * @phba: Pointer to HBA context object.
97 * @cmdwqe: Pointer to driver command WQE object.
98 * @wcqe: Pointer to driver response CQE object.
100 * The function is called from SLI ring event handler with no
101 * lock held. This function is the completion handler for NVME LS commands
102 * The function frees memory resources used for the NVME commands.
105 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
106 struct lpfc_wcqe_complete
*wcqe
)
108 struct lpfc_nvmet_tgtport
*tgtp
;
109 struct nvmefc_tgt_ls_req
*rsp
;
110 struct lpfc_nvmet_rcv_ctx
*ctxp
;
111 uint32_t status
, result
;
113 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
114 result
= wcqe
->parameter
;
115 ctxp
= cmdwqe
->context2
;
117 if (ctxp
->state
!= LPFC_NVMET_STE_LS_RSP
|| ctxp
->entry_cnt
!= 2) {
118 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
119 "6410 NVMET LS cmpl state mismatch IO x%x: "
121 ctxp
->oxid
, ctxp
->state
, ctxp
->entry_cnt
);
124 if (!phba
->targetport
)
127 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
130 atomic_inc(&tgtp
->xmt_ls_rsp_error
);
132 atomic_inc(&tgtp
->xmt_ls_rsp_cmpl
);
135 rsp
= &ctxp
->ctx
.ls_req
;
137 lpfc_nvmeio_data(phba
, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
138 ctxp
->oxid
, status
, result
);
140 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
141 "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
142 status
, result
, ctxp
->oxid
);
144 lpfc_nlp_put(cmdwqe
->context1
);
145 cmdwqe
->context2
= NULL
;
146 cmdwqe
->context3
= NULL
;
147 lpfc_sli_release_iocbq(phba
, cmdwqe
);
153 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
154 * @phba: HBA buffer is associated with
155 * @ctxp: context to clean up
156 * @mp: Buffer to free
158 * Description: Frees the given DMA buffer in the appropriate way given by
159 * reposting it to its associated RQ so it can be reused.
161 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
166 lpfc_nvmet_ctxbuf_post(struct lpfc_hba
*phba
, struct lpfc_nvmet_ctxbuf
*ctx_buf
)
168 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
169 struct lpfc_nvmet_rcv_ctx
*ctxp
= ctx_buf
->context
;
170 struct lpfc_nvmet_tgtport
*tgtp
;
171 struct fc_frame_header
*fc_hdr
;
172 struct rqb_dmabuf
*nvmebuf
;
174 uint32_t size
, oxid
, sid
, rc
;
178 pci_pool_free(phba
->txrdy_payload_pool
, ctxp
->txrdy
,
181 ctxp
->txrdy_phys
= 0;
184 if (ctxp
->state
== LPFC_NVMET_STE_FREE
) {
185 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
186 "6411 NVMET free, already free IO x%x: %d %d\n",
187 ctxp
->oxid
, ctxp
->state
, ctxp
->entry_cnt
);
189 ctxp
->state
= LPFC_NVMET_STE_FREE
;
191 spin_lock_irqsave(&phba
->sli4_hba
.nvmet_io_wait_lock
, iflag
);
192 if (phba
->sli4_hba
.nvmet_io_wait_cnt
) {
193 list_remove_head(&phba
->sli4_hba
.lpfc_nvmet_io_wait_list
,
194 nvmebuf
, struct rqb_dmabuf
,
196 phba
->sli4_hba
.nvmet_io_wait_cnt
--;
197 spin_unlock_irqrestore(&phba
->sli4_hba
.nvmet_io_wait_lock
,
200 fc_hdr
= (struct fc_frame_header
*)(nvmebuf
->hbuf
.virt
);
201 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
202 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
203 payload
= (uint32_t *)(nvmebuf
->dbuf
.virt
);
204 size
= nvmebuf
->bytes_recv
;
205 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
207 ctxp
= (struct lpfc_nvmet_rcv_ctx
*)ctx_buf
->context
;
215 ctxp
->state
= LPFC_NVMET_STE_RCV
;
218 ctxp
->ctxbuf
= ctx_buf
;
219 spin_lock_init(&ctxp
->ctxlock
);
221 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
222 if (phba
->ktime_on
) {
223 ctxp
->ts_cmd_nvme
= ktime_get_ns();
224 ctxp
->ts_isr_cmd
= ctxp
->ts_cmd_nvme
;
225 ctxp
->ts_nvme_data
= 0;
226 ctxp
->ts_data_wqput
= 0;
227 ctxp
->ts_isr_data
= 0;
228 ctxp
->ts_data_nvme
= 0;
229 ctxp
->ts_nvme_status
= 0;
230 ctxp
->ts_status_wqput
= 0;
231 ctxp
->ts_isr_status
= 0;
232 ctxp
->ts_status_nvme
= 0;
235 atomic_inc(&tgtp
->rcv_fcp_cmd_in
);
237 * The calling sequence should be:
238 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
239 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
240 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
241 * the NVME command / FC header is stored.
242 * A buffer has already been reposted for this IO, so just free
245 rc
= nvmet_fc_rcv_fcp_req(phba
->targetport
, &ctxp
->ctx
.fcp_req
,
248 /* Process FCP command */
250 atomic_inc(&tgtp
->rcv_fcp_cmd_out
);
251 nvmebuf
->hrq
->rqbp
->rqb_free_buffer(phba
, nvmebuf
);
255 atomic_inc(&tgtp
->rcv_fcp_cmd_drop
);
256 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
257 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
259 atomic_read(&tgtp
->rcv_fcp_cmd_in
),
260 atomic_read(&tgtp
->rcv_fcp_cmd_out
),
261 atomic_read(&tgtp
->xmt_fcp_release
));
263 lpfc_nvmet_defer_release(phba
, ctxp
);
264 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, sid
, oxid
);
265 nvmebuf
->hrq
->rqbp
->rqb_free_buffer(phba
, nvmebuf
);
268 spin_unlock_irqrestore(&phba
->sli4_hba
.nvmet_io_wait_lock
, iflag
);
270 spin_lock_irqsave(&phba
->sli4_hba
.nvmet_ctx_put_lock
, iflag
);
271 list_add_tail(&ctx_buf
->list
,
272 &phba
->sli4_hba
.lpfc_nvmet_ctx_put_list
);
273 phba
->sli4_hba
.nvmet_ctx_put_cnt
++;
274 spin_unlock_irqrestore(&phba
->sli4_hba
.nvmet_ctx_put_lock
, iflag
);
278 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
280 lpfc_nvmet_ktime(struct lpfc_hba
*phba
,
281 struct lpfc_nvmet_rcv_ctx
*ctxp
)
283 uint64_t seg1
, seg2
, seg3
, seg4
, seg5
;
284 uint64_t seg6
, seg7
, seg8
, seg9
, seg10
;
289 if (!ctxp
->ts_isr_cmd
|| !ctxp
->ts_cmd_nvme
||
290 !ctxp
->ts_nvme_data
|| !ctxp
->ts_data_wqput
||
291 !ctxp
->ts_isr_data
|| !ctxp
->ts_data_nvme
||
292 !ctxp
->ts_nvme_status
|| !ctxp
->ts_status_wqput
||
293 !ctxp
->ts_isr_status
|| !ctxp
->ts_status_nvme
)
296 if (ctxp
->ts_isr_cmd
> ctxp
->ts_cmd_nvme
)
298 if (ctxp
->ts_cmd_nvme
> ctxp
->ts_nvme_data
)
300 if (ctxp
->ts_nvme_data
> ctxp
->ts_data_wqput
)
302 if (ctxp
->ts_data_wqput
> ctxp
->ts_isr_data
)
304 if (ctxp
->ts_isr_data
> ctxp
->ts_data_nvme
)
306 if (ctxp
->ts_data_nvme
> ctxp
->ts_nvme_status
)
308 if (ctxp
->ts_nvme_status
> ctxp
->ts_status_wqput
)
310 if (ctxp
->ts_status_wqput
> ctxp
->ts_isr_status
)
312 if (ctxp
->ts_isr_status
> ctxp
->ts_status_nvme
)
315 * Segment 1 - Time from FCP command received by MSI-X ISR
316 * to FCP command is passed to NVME Layer.
317 * Segment 2 - Time from FCP command payload handed
318 * off to NVME Layer to Driver receives a Command op
320 * Segment 3 - Time from Driver receives a Command op
321 * from NVME Layer to Command is put on WQ.
322 * Segment 4 - Time from Driver WQ put is done
323 * to MSI-X ISR for Command cmpl.
324 * Segment 5 - Time from MSI-X ISR for Command cmpl to
325 * Command cmpl is passed to NVME Layer.
326 * Segment 6 - Time from Command cmpl is passed to NVME
327 * Layer to Driver receives a RSP op from NVME Layer.
328 * Segment 7 - Time from Driver receives a RSP op from
329 * NVME Layer to WQ put is done on TRSP FCP Status.
330 * Segment 8 - Time from Driver WQ put is done on TRSP
331 * FCP Status to MSI-X ISR for TRSP cmpl.
332 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
333 * TRSP cmpl is passed to NVME Layer.
334 * Segment 10 - Time from FCP command received by
335 * MSI-X ISR to command is completed on wire.
336 * (Segments 1 thru 8) for READDATA / WRITEDATA
337 * (Segments 1 thru 4) for READDATA_RSP
339 seg1
= ctxp
->ts_cmd_nvme
- ctxp
->ts_isr_cmd
;
340 seg2
= (ctxp
->ts_nvme_data
- ctxp
->ts_isr_cmd
) - seg1
;
341 seg3
= (ctxp
->ts_data_wqput
- ctxp
->ts_isr_cmd
) -
343 seg4
= (ctxp
->ts_isr_data
- ctxp
->ts_isr_cmd
) -
345 seg5
= (ctxp
->ts_data_nvme
- ctxp
->ts_isr_cmd
) -
346 seg1
- seg2
- seg3
- seg4
;
348 /* For auto rsp commands seg6 thru seg10 will be 0 */
349 if (ctxp
->ts_nvme_status
> ctxp
->ts_data_nvme
) {
350 seg6
= (ctxp
->ts_nvme_status
-
352 seg1
- seg2
- seg3
- seg4
- seg5
;
353 seg7
= (ctxp
->ts_status_wqput
-
357 seg8
= (ctxp
->ts_isr_status
-
359 seg1
- seg2
- seg3
- seg4
-
361 seg9
= (ctxp
->ts_status_nvme
-
363 seg1
- seg2
- seg3
- seg4
-
364 seg5
- seg6
- seg7
- seg8
;
365 seg10
= (ctxp
->ts_isr_status
-
372 seg10
= (ctxp
->ts_isr_data
- ctxp
->ts_isr_cmd
);
375 phba
->ktime_seg1_total
+= seg1
;
376 if (seg1
< phba
->ktime_seg1_min
)
377 phba
->ktime_seg1_min
= seg1
;
378 else if (seg1
> phba
->ktime_seg1_max
)
379 phba
->ktime_seg1_max
= seg1
;
381 phba
->ktime_seg2_total
+= seg2
;
382 if (seg2
< phba
->ktime_seg2_min
)
383 phba
->ktime_seg2_min
= seg2
;
384 else if (seg2
> phba
->ktime_seg2_max
)
385 phba
->ktime_seg2_max
= seg2
;
387 phba
->ktime_seg3_total
+= seg3
;
388 if (seg3
< phba
->ktime_seg3_min
)
389 phba
->ktime_seg3_min
= seg3
;
390 else if (seg3
> phba
->ktime_seg3_max
)
391 phba
->ktime_seg3_max
= seg3
;
393 phba
->ktime_seg4_total
+= seg4
;
394 if (seg4
< phba
->ktime_seg4_min
)
395 phba
->ktime_seg4_min
= seg4
;
396 else if (seg4
> phba
->ktime_seg4_max
)
397 phba
->ktime_seg4_max
= seg4
;
399 phba
->ktime_seg5_total
+= seg5
;
400 if (seg5
< phba
->ktime_seg5_min
)
401 phba
->ktime_seg5_min
= seg5
;
402 else if (seg5
> phba
->ktime_seg5_max
)
403 phba
->ktime_seg5_max
= seg5
;
405 phba
->ktime_data_samples
++;
409 phba
->ktime_seg6_total
+= seg6
;
410 if (seg6
< phba
->ktime_seg6_min
)
411 phba
->ktime_seg6_min
= seg6
;
412 else if (seg6
> phba
->ktime_seg6_max
)
413 phba
->ktime_seg6_max
= seg6
;
415 phba
->ktime_seg7_total
+= seg7
;
416 if (seg7
< phba
->ktime_seg7_min
)
417 phba
->ktime_seg7_min
= seg7
;
418 else if (seg7
> phba
->ktime_seg7_max
)
419 phba
->ktime_seg7_max
= seg7
;
421 phba
->ktime_seg8_total
+= seg8
;
422 if (seg8
< phba
->ktime_seg8_min
)
423 phba
->ktime_seg8_min
= seg8
;
424 else if (seg8
> phba
->ktime_seg8_max
)
425 phba
->ktime_seg8_max
= seg8
;
427 phba
->ktime_seg9_total
+= seg9
;
428 if (seg9
< phba
->ktime_seg9_min
)
429 phba
->ktime_seg9_min
= seg9
;
430 else if (seg9
> phba
->ktime_seg9_max
)
431 phba
->ktime_seg9_max
= seg9
;
433 phba
->ktime_seg10_total
+= seg10
;
434 if (seg10
< phba
->ktime_seg10_min
)
435 phba
->ktime_seg10_min
= seg10
;
436 else if (seg10
> phba
->ktime_seg10_max
)
437 phba
->ktime_seg10_max
= seg10
;
438 phba
->ktime_status_samples
++;
443 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
444 * @phba: Pointer to HBA context object.
445 * @cmdwqe: Pointer to driver command WQE object.
446 * @wcqe: Pointer to driver response CQE object.
448 * The function is called from SLI ring event handler with no
449 * lock held. This function is the completion handler for NVME FCP commands
450 * The function frees memory resources used for the NVME commands.
453 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
454 struct lpfc_wcqe_complete
*wcqe
)
456 struct lpfc_nvmet_tgtport
*tgtp
;
457 struct nvmefc_tgt_fcp_req
*rsp
;
458 struct lpfc_nvmet_rcv_ctx
*ctxp
;
459 uint32_t status
, result
, op
, start_clean
;
460 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
464 ctxp
= cmdwqe
->context2
;
465 ctxp
->flag
&= ~LPFC_NVMET_IO_INP
;
467 rsp
= &ctxp
->ctx
.fcp_req
;
470 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
471 result
= wcqe
->parameter
;
473 if (phba
->targetport
)
474 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
478 lpfc_nvmeio_data(phba
, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
479 ctxp
->oxid
, op
, status
);
482 rsp
->fcp_error
= NVME_SC_DATA_XFER_ERROR
;
483 rsp
->transferred_length
= 0;
485 atomic_inc(&tgtp
->xmt_fcp_rsp_error
);
487 /* pick up SLI4 exhange busy condition */
488 if (bf_get(lpfc_wcqe_c_xb
, wcqe
)) {
489 ctxp
->flag
|= LPFC_NVMET_XBUSY
;
491 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
492 "6315 IO Cmpl XBUSY: xri x%x: %x/%x\n",
493 ctxp
->oxid
, status
, result
);
495 ctxp
->flag
&= ~LPFC_NVMET_XBUSY
;
499 rsp
->fcp_error
= NVME_SC_SUCCESS
;
500 if (op
== NVMET_FCOP_RSP
)
501 rsp
->transferred_length
= rsp
->rsplen
;
503 rsp
->transferred_length
= rsp
->transfer_length
;
505 atomic_inc(&tgtp
->xmt_fcp_rsp_cmpl
);
508 if ((op
== NVMET_FCOP_READDATA_RSP
) ||
509 (op
== NVMET_FCOP_RSP
)) {
511 ctxp
->state
= LPFC_NVMET_STE_DONE
;
514 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
515 if (phba
->ktime_on
) {
516 if (rsp
->op
== NVMET_FCOP_READDATA_RSP
) {
518 cmdwqe
->isr_timestamp
;
521 ctxp
->ts_nvme_status
=
523 ctxp
->ts_status_wqput
=
525 ctxp
->ts_isr_status
=
527 ctxp
->ts_status_nvme
=
530 ctxp
->ts_isr_status
=
531 cmdwqe
->isr_timestamp
;
532 ctxp
->ts_status_nvme
=
536 if (phba
->cpucheck_on
& LPFC_CHECK_NVMET_IO
) {
537 id
= smp_processor_id();
539 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
540 "6703 CPU Check cmpl: "
541 "cpu %d expect %d\n",
543 if (ctxp
->cpu
< LPFC_CHECK_CPU_CNT
)
544 phba
->cpucheck_cmpl_io
[id
]++;
548 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
550 lpfc_nvmet_ktime(phba
, ctxp
);
552 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
555 start_clean
= offsetof(struct lpfc_iocbq
, wqe
);
556 memset(((char *)cmdwqe
) + start_clean
, 0,
557 (sizeof(struct lpfc_iocbq
) - start_clean
));
558 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
559 if (phba
->ktime_on
) {
560 ctxp
->ts_isr_data
= cmdwqe
->isr_timestamp
;
561 ctxp
->ts_data_nvme
= ktime_get_ns();
563 if (phba
->cpucheck_on
& LPFC_CHECK_NVMET_IO
) {
564 id
= smp_processor_id();
566 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
567 "6704 CPU Check cmdcmpl: "
568 "cpu %d expect %d\n",
570 if (ctxp
->cpu
< LPFC_CHECK_CPU_CNT
)
571 phba
->cpucheck_ccmpl_io
[id
]++;
579 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port
*tgtport
,
580 struct nvmefc_tgt_ls_req
*rsp
)
582 struct lpfc_nvmet_rcv_ctx
*ctxp
=
583 container_of(rsp
, struct lpfc_nvmet_rcv_ctx
, ctx
.ls_req
);
584 struct lpfc_hba
*phba
= ctxp
->phba
;
585 struct hbq_dmabuf
*nvmebuf
=
586 (struct hbq_dmabuf
*)ctxp
->rqb_buffer
;
587 struct lpfc_iocbq
*nvmewqeq
;
588 struct lpfc_nvmet_tgtport
*nvmep
= tgtport
->private;
589 struct lpfc_dmabuf dmabuf
;
590 struct ulp_bde64 bpl
;
593 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
594 "6023 NVMET LS rsp oxid x%x\n", ctxp
->oxid
);
596 if ((ctxp
->state
!= LPFC_NVMET_STE_LS_RCV
) ||
597 (ctxp
->entry_cnt
!= 1)) {
598 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
599 "6412 NVMET LS rsp state mismatch "
601 ctxp
->oxid
, ctxp
->state
, ctxp
->entry_cnt
);
603 ctxp
->state
= LPFC_NVMET_STE_LS_RSP
;
606 nvmewqeq
= lpfc_nvmet_prep_ls_wqe(phba
, ctxp
, rsp
->rspdma
,
608 if (nvmewqeq
== NULL
) {
609 atomic_inc(&nvmep
->xmt_ls_drop
);
610 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
611 "6150 LS Drop IO x%x: Prep\n",
613 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
614 atomic_inc(&nvmep
->xmt_ls_abort
);
615 lpfc_nvmet_unsol_ls_issue_abort(phba
, ctxp
,
616 ctxp
->sid
, ctxp
->oxid
);
620 /* Save numBdes for bpl2sgl */
622 nvmewqeq
->hba_wqidx
= 0;
623 nvmewqeq
->context3
= &dmabuf
;
625 bpl
.addrLow
= nvmewqeq
->wqe
.xmit_sequence
.bde
.addrLow
;
626 bpl
.addrHigh
= nvmewqeq
->wqe
.xmit_sequence
.bde
.addrHigh
;
627 bpl
.tus
.f
.bdeSize
= rsp
->rsplen
;
628 bpl
.tus
.f
.bdeFlags
= 0;
629 bpl
.tus
.w
= le32_to_cpu(bpl
.tus
.w
);
631 nvmewqeq
->wqe_cmpl
= lpfc_nvmet_xmt_ls_rsp_cmp
;
632 nvmewqeq
->iocb_cmpl
= NULL
;
633 nvmewqeq
->context2
= ctxp
;
635 lpfc_nvmeio_data(phba
, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
636 ctxp
->oxid
, nvmewqeq
->hba_wqidx
, rsp
->rsplen
);
638 rc
= lpfc_sli4_issue_wqe(phba
, LPFC_ELS_RING
, nvmewqeq
);
639 if (rc
== WQE_SUCCESS
) {
641 * Okay to repost buffer here, but wait till cmpl
642 * before freeing ctxp and iocbq.
644 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
645 ctxp
->rqb_buffer
= 0;
646 atomic_inc(&nvmep
->xmt_ls_rsp
);
649 /* Give back resources */
650 atomic_inc(&nvmep
->xmt_ls_drop
);
651 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
652 "6151 LS Drop IO x%x: Issue %d\n",
655 lpfc_nlp_put(nvmewqeq
->context1
);
657 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
658 atomic_inc(&nvmep
->xmt_ls_abort
);
659 lpfc_nvmet_unsol_ls_issue_abort(phba
, ctxp
, ctxp
->sid
, ctxp
->oxid
);
664 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port
*tgtport
,
665 struct nvmefc_tgt_fcp_req
*rsp
)
667 struct lpfc_nvmet_tgtport
*lpfc_nvmep
= tgtport
->private;
668 struct lpfc_nvmet_rcv_ctx
*ctxp
=
669 container_of(rsp
, struct lpfc_nvmet_rcv_ctx
, ctx
.fcp_req
);
670 struct lpfc_hba
*phba
= ctxp
->phba
;
671 struct lpfc_iocbq
*nvmewqeq
;
674 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
675 if (phba
->ktime_on
) {
676 if (rsp
->op
== NVMET_FCOP_RSP
)
677 ctxp
->ts_nvme_status
= ktime_get_ns();
679 ctxp
->ts_nvme_data
= ktime_get_ns();
681 if (phba
->cpucheck_on
& LPFC_CHECK_NVMET_IO
) {
682 int id
= smp_processor_id();
684 if (id
< LPFC_CHECK_CPU_CNT
)
685 phba
->cpucheck_xmt_io
[id
]++;
686 if (rsp
->hwqid
!= id
) {
687 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
688 "6705 CPU Check OP: "
689 "cpu %d expect %d\n",
691 ctxp
->cpu
= rsp
->hwqid
;
697 if ((ctxp
->flag
& LPFC_NVMET_ABTS_RCV
) ||
698 (ctxp
->state
== LPFC_NVMET_STE_ABORT
)) {
699 atomic_inc(&lpfc_nvmep
->xmt_fcp_drop
);
700 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
701 "6102 IO xri x%x aborted\n",
707 nvmewqeq
= lpfc_nvmet_prep_fcp_wqe(phba
, ctxp
);
708 if (nvmewqeq
== NULL
) {
709 atomic_inc(&lpfc_nvmep
->xmt_fcp_drop
);
710 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
711 "6152 FCP Drop IO x%x: Prep\n",
717 nvmewqeq
->wqe_cmpl
= lpfc_nvmet_xmt_fcp_op_cmp
;
718 nvmewqeq
->iocb_cmpl
= NULL
;
719 nvmewqeq
->context2
= ctxp
;
720 nvmewqeq
->iocb_flag
|= LPFC_IO_NVMET
;
721 ctxp
->wqeq
->hba_wqidx
= rsp
->hwqid
;
723 lpfc_nvmeio_data(phba
, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
724 ctxp
->oxid
, rsp
->op
, rsp
->rsplen
);
726 ctxp
->flag
|= LPFC_NVMET_IO_INP
;
727 rc
= lpfc_sli4_issue_wqe(phba
, LPFC_FCP_RING
, nvmewqeq
);
728 if (rc
== WQE_SUCCESS
) {
729 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
732 if (rsp
->op
== NVMET_FCOP_RSP
)
733 ctxp
->ts_status_wqput
= ktime_get_ns();
735 ctxp
->ts_data_wqput
= ktime_get_ns();
740 /* Give back resources */
741 atomic_inc(&lpfc_nvmep
->xmt_fcp_drop
);
742 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
743 "6153 FCP Drop IO x%x: Issue: %d\n",
746 ctxp
->wqeq
->hba_wqidx
= 0;
747 nvmewqeq
->context2
= NULL
;
748 nvmewqeq
->context3
= NULL
;
755 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port
*targetport
)
757 struct lpfc_nvmet_tgtport
*tport
= targetport
->private;
759 /* release any threads waiting for the unreg to complete */
760 complete(&tport
->tport_unreg_done
);
764 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port
*tgtport
,
765 struct nvmefc_tgt_fcp_req
*req
)
767 struct lpfc_nvmet_tgtport
*lpfc_nvmep
= tgtport
->private;
768 struct lpfc_nvmet_rcv_ctx
*ctxp
=
769 container_of(req
, struct lpfc_nvmet_rcv_ctx
, ctx
.fcp_req
);
770 struct lpfc_hba
*phba
= ctxp
->phba
;
773 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
774 "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
775 ctxp
->oxid
, ctxp
->flag
, ctxp
->state
);
777 lpfc_nvmeio_data(phba
, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
778 ctxp
->oxid
, ctxp
->flag
, ctxp
->state
);
780 atomic_inc(&lpfc_nvmep
->xmt_fcp_abort
);
782 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
784 /* Since iaab/iaar are NOT set, we need to check
785 * if the firmware is in process of aborting IO
787 if (ctxp
->flag
& LPFC_NVMET_XBUSY
) {
788 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
791 ctxp
->flag
|= LPFC_NVMET_ABORT_OP
;
793 /* An state of LPFC_NVMET_STE_RCV means we have just received
794 * the NVME command and have not started processing it.
795 * (by issuing any IO WQEs on this exchange yet)
797 if (ctxp
->state
== LPFC_NVMET_STE_RCV
)
798 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
,
801 lpfc_nvmet_sol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
,
803 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
807 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port
*tgtport
,
808 struct nvmefc_tgt_fcp_req
*rsp
)
810 struct lpfc_nvmet_tgtport
*lpfc_nvmep
= tgtport
->private;
811 struct lpfc_nvmet_rcv_ctx
*ctxp
=
812 container_of(rsp
, struct lpfc_nvmet_rcv_ctx
, ctx
.fcp_req
);
813 struct lpfc_hba
*phba
= ctxp
->phba
;
815 bool aborting
= false;
817 if (ctxp
->state
!= LPFC_NVMET_STE_DONE
&&
818 ctxp
->state
!= LPFC_NVMET_STE_ABORT
) {
819 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
820 "6413 NVMET release bad state %d %d oxid x%x\n",
821 ctxp
->state
, ctxp
->entry_cnt
, ctxp
->oxid
);
824 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
825 if ((ctxp
->flag
& LPFC_NVMET_ABORT_OP
) ||
826 (ctxp
->flag
& LPFC_NVMET_XBUSY
)) {
828 /* let the abort path do the real release */
829 lpfc_nvmet_defer_release(phba
, ctxp
);
831 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
833 lpfc_nvmeio_data(phba
, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp
->oxid
,
834 ctxp
->state
, aborting
);
836 atomic_inc(&lpfc_nvmep
->xmt_fcp_release
);
841 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
844 static struct nvmet_fc_target_template lpfc_tgttemplate
= {
845 .targetport_delete
= lpfc_nvmet_targetport_delete
,
846 .xmt_ls_rsp
= lpfc_nvmet_xmt_ls_rsp
,
847 .fcp_op
= lpfc_nvmet_xmt_fcp_op
,
848 .fcp_abort
= lpfc_nvmet_xmt_fcp_abort
,
849 .fcp_req_release
= lpfc_nvmet_xmt_fcp_release
,
852 .max_sgl_segments
= LPFC_NVMET_DEFAULT_SEGS
,
853 .max_dif_sgl_segments
= LPFC_NVMET_DEFAULT_SEGS
,
854 .dma_boundary
= 0xFFFFFFFF,
856 /* optional features */
857 .target_features
= 0,
858 /* sizes of additional private data for data structures */
859 .target_priv_sz
= sizeof(struct lpfc_nvmet_tgtport
),
863 lpfc_nvmet_cleanup_io_context(struct lpfc_hba
*phba
)
865 struct lpfc_nvmet_ctxbuf
*ctx_buf
, *next_ctx_buf
;
868 spin_lock_irqsave(&phba
->sli4_hba
.nvmet_ctx_get_lock
, flags
);
869 spin_lock(&phba
->sli4_hba
.nvmet_ctx_put_lock
);
870 list_for_each_entry_safe(ctx_buf
, next_ctx_buf
,
871 &phba
->sli4_hba
.lpfc_nvmet_ctx_get_list
, list
) {
872 spin_lock(&phba
->sli4_hba
.abts_nvme_buf_list_lock
);
873 list_del_init(&ctx_buf
->list
);
874 spin_unlock(&phba
->sli4_hba
.abts_nvme_buf_list_lock
);
875 __lpfc_clear_active_sglq(phba
,
876 ctx_buf
->sglq
->sli4_lxritag
);
877 ctx_buf
->sglq
->state
= SGL_FREED
;
878 ctx_buf
->sglq
->ndlp
= NULL
;
880 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
881 list_add_tail(&ctx_buf
->sglq
->list
,
882 &phba
->sli4_hba
.lpfc_nvmet_sgl_list
);
883 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
885 lpfc_sli_release_iocbq(phba
, ctx_buf
->iocbq
);
886 kfree(ctx_buf
->context
);
888 list_for_each_entry_safe(ctx_buf
, next_ctx_buf
,
889 &phba
->sli4_hba
.lpfc_nvmet_ctx_put_list
, list
) {
890 spin_lock(&phba
->sli4_hba
.abts_nvme_buf_list_lock
);
891 list_del_init(&ctx_buf
->list
);
892 spin_unlock(&phba
->sli4_hba
.abts_nvme_buf_list_lock
);
893 __lpfc_clear_active_sglq(phba
,
894 ctx_buf
->sglq
->sli4_lxritag
);
895 ctx_buf
->sglq
->state
= SGL_FREED
;
896 ctx_buf
->sglq
->ndlp
= NULL
;
898 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
899 list_add_tail(&ctx_buf
->sglq
->list
,
900 &phba
->sli4_hba
.lpfc_nvmet_sgl_list
);
901 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
903 lpfc_sli_release_iocbq(phba
, ctx_buf
->iocbq
);
904 kfree(ctx_buf
->context
);
906 spin_unlock(&phba
->sli4_hba
.nvmet_ctx_put_lock
);
907 spin_unlock_irqrestore(&phba
->sli4_hba
.nvmet_ctx_get_lock
, flags
);
911 lpfc_nvmet_setup_io_context(struct lpfc_hba
*phba
)
913 struct lpfc_nvmet_ctxbuf
*ctx_buf
;
914 struct lpfc_iocbq
*nvmewqe
;
915 union lpfc_wqe128
*wqe
;
918 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME
,
919 "6403 Allocate NVMET resources for %d XRIs\n",
920 phba
->sli4_hba
.nvmet_xri_cnt
);
922 /* For all nvmet xris, allocate resources needed to process a
923 * received command on a per xri basis.
925 for (i
= 0; i
< phba
->sli4_hba
.nvmet_xri_cnt
; i
++) {
926 ctx_buf
= kzalloc(sizeof(*ctx_buf
), GFP_KERNEL
);
928 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
929 "6404 Ran out of memory for NVMET\n");
933 ctx_buf
->context
= kzalloc(sizeof(*ctx_buf
->context
),
935 if (!ctx_buf
->context
) {
937 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
938 "6405 Ran out of NVMET "
942 ctx_buf
->context
->ctxbuf
= ctx_buf
;
943 ctx_buf
->context
->state
= LPFC_NVMET_STE_FREE
;
945 ctx_buf
->iocbq
= lpfc_sli_get_iocbq(phba
);
946 if (!ctx_buf
->iocbq
) {
947 kfree(ctx_buf
->context
);
949 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
950 "6406 Ran out of NVMET iocb/WQEs\n");
953 ctx_buf
->iocbq
->iocb_flag
= LPFC_IO_NVMET
;
954 nvmewqe
= ctx_buf
->iocbq
;
955 wqe
= (union lpfc_wqe128
*)&nvmewqe
->wqe
;
957 memset(wqe
, 0, sizeof(union lpfc_wqe
));
959 bf_set(wqe_ct
, &wqe
->generic
.wqe_com
, SLI4_CT_RPI
);
960 bf_set(wqe_class
, &wqe
->generic
.wqe_com
, CLASS3
);
961 bf_set(wqe_pu
, &wqe
->generic
.wqe_com
, 1);
963 bf_set(wqe_nvme
, &wqe
->fcp_tsend
.wqe_com
, 1);
964 bf_set(wqe_ebde_cnt
, &wqe
->generic
.wqe_com
, 0);
965 bf_set(wqe_qosd
, &wqe
->generic
.wqe_com
, 0);
967 ctx_buf
->iocbq
->context1
= NULL
;
968 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
969 ctx_buf
->sglq
= __lpfc_sli_get_nvmet_sglq(phba
, ctx_buf
->iocbq
);
970 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
971 if (!ctx_buf
->sglq
) {
972 lpfc_sli_release_iocbq(phba
, ctx_buf
->iocbq
);
973 kfree(ctx_buf
->context
);
975 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
976 "6407 Ran out of NVMET XRIs\n");
979 spin_lock(&phba
->sli4_hba
.nvmet_ctx_get_lock
);
980 list_add_tail(&ctx_buf
->list
,
981 &phba
->sli4_hba
.lpfc_nvmet_ctx_get_list
);
982 spin_unlock(&phba
->sli4_hba
.nvmet_ctx_get_lock
);
984 phba
->sli4_hba
.nvmet_ctx_get_cnt
= phba
->sli4_hba
.nvmet_xri_cnt
;
989 lpfc_nvmet_create_targetport(struct lpfc_hba
*phba
)
991 struct lpfc_vport
*vport
= phba
->pport
;
992 struct lpfc_nvmet_tgtport
*tgtp
;
993 struct nvmet_fc_port_info pinfo
;
996 if (phba
->targetport
)
999 error
= lpfc_nvmet_setup_io_context(phba
);
1003 memset(&pinfo
, 0, sizeof(struct nvmet_fc_port_info
));
1004 pinfo
.node_name
= wwn_to_u64(vport
->fc_nodename
.u
.wwn
);
1005 pinfo
.port_name
= wwn_to_u64(vport
->fc_portname
.u
.wwn
);
1006 pinfo
.port_id
= vport
->fc_myDID
;
1008 /* Limit to LPFC_MAX_NVME_SEG_CNT.
1009 * For now need + 1 to get around NVME transport logic.
1011 if (phba
->cfg_sg_seg_cnt
> LPFC_MAX_NVME_SEG_CNT
) {
1012 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME
| LOG_INIT
,
1013 "6400 Reducing sg segment cnt to %d\n",
1014 LPFC_MAX_NVME_SEG_CNT
);
1015 phba
->cfg_nvme_seg_cnt
= LPFC_MAX_NVME_SEG_CNT
;
1017 phba
->cfg_nvme_seg_cnt
= phba
->cfg_sg_seg_cnt
;
1019 lpfc_tgttemplate
.max_sgl_segments
= phba
->cfg_nvme_seg_cnt
+ 1;
1020 lpfc_tgttemplate
.max_hw_queues
= phba
->cfg_nvme_io_channel
;
1021 lpfc_tgttemplate
.target_features
= NVMET_FCTGTFEAT_READDATA_RSP
|
1022 NVMET_FCTGTFEAT_CMD_IN_ISR
|
1023 NVMET_FCTGTFEAT_OPDONE_IN_ISR
;
1025 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1026 error
= nvmet_fc_register_targetport(&pinfo
, &lpfc_tgttemplate
,
1033 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_DISC
,
1034 "6025 Cannot register NVME targetport "
1036 phba
->targetport
= NULL
;
1038 lpfc_nvmet_cleanup_io_context(phba
);
1041 tgtp
= (struct lpfc_nvmet_tgtport
*)
1042 phba
->targetport
->private;
1045 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
1046 "6026 Registered NVME "
1047 "targetport: %p, private %p "
1048 "portnm %llx nodenm %llx\n",
1049 phba
->targetport
, tgtp
,
1050 pinfo
.port_name
, pinfo
.node_name
);
1052 atomic_set(&tgtp
->rcv_ls_req_in
, 0);
1053 atomic_set(&tgtp
->rcv_ls_req_out
, 0);
1054 atomic_set(&tgtp
->rcv_ls_req_drop
, 0);
1055 atomic_set(&tgtp
->xmt_ls_abort
, 0);
1056 atomic_set(&tgtp
->xmt_ls_abort_cmpl
, 0);
1057 atomic_set(&tgtp
->xmt_ls_rsp
, 0);
1058 atomic_set(&tgtp
->xmt_ls_drop
, 0);
1059 atomic_set(&tgtp
->xmt_ls_rsp_error
, 0);
1060 atomic_set(&tgtp
->xmt_ls_rsp_cmpl
, 0);
1061 atomic_set(&tgtp
->rcv_fcp_cmd_in
, 0);
1062 atomic_set(&tgtp
->rcv_fcp_cmd_out
, 0);
1063 atomic_set(&tgtp
->rcv_fcp_cmd_drop
, 0);
1064 atomic_set(&tgtp
->xmt_fcp_drop
, 0);
1065 atomic_set(&tgtp
->xmt_fcp_read_rsp
, 0);
1066 atomic_set(&tgtp
->xmt_fcp_read
, 0);
1067 atomic_set(&tgtp
->xmt_fcp_write
, 0);
1068 atomic_set(&tgtp
->xmt_fcp_rsp
, 0);
1069 atomic_set(&tgtp
->xmt_fcp_release
, 0);
1070 atomic_set(&tgtp
->xmt_fcp_rsp_cmpl
, 0);
1071 atomic_set(&tgtp
->xmt_fcp_rsp_error
, 0);
1072 atomic_set(&tgtp
->xmt_fcp_rsp_drop
, 0);
1073 atomic_set(&tgtp
->xmt_fcp_abort
, 0);
1074 atomic_set(&tgtp
->xmt_fcp_abort_cmpl
, 0);
1075 atomic_set(&tgtp
->xmt_abort_unsol
, 0);
1076 atomic_set(&tgtp
->xmt_abort_sol
, 0);
1077 atomic_set(&tgtp
->xmt_abort_rsp
, 0);
1078 atomic_set(&tgtp
->xmt_abort_rsp_error
, 0);
1084 lpfc_nvmet_update_targetport(struct lpfc_hba
*phba
)
1086 struct lpfc_vport
*vport
= phba
->pport
;
1088 if (!phba
->targetport
)
1091 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME
,
1092 "6007 Update NVMET port %p did x%x\n",
1093 phba
->targetport
, vport
->fc_myDID
);
1095 phba
->targetport
->port_id
= vport
->fc_myDID
;
1100 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1101 * @phba: pointer to lpfc hba data structure.
1102 * @axri: pointer to the nvmet xri abort wcqe structure.
1104 * This routine is invoked by the worker thread to process a SLI4 fast-path
1105 * NVMET aborted xri.
1108 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba
*phba
,
1109 struct sli4_wcqe_xri_aborted
*axri
)
1111 uint16_t xri
= bf_get(lpfc_wcqe_xa_xri
, axri
);
1112 uint16_t rxid
= bf_get(lpfc_wcqe_xa_remote_xid
, axri
);
1113 struct lpfc_nvmet_rcv_ctx
*ctxp
, *next_ctxp
;
1114 struct lpfc_nodelist
*ndlp
;
1115 unsigned long iflag
= 0;
1117 bool released
= false;
1119 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1120 "6317 XB aborted xri x%x rxid x%x\n", xri
, rxid
);
1122 if (!(phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
))
1124 spin_lock_irqsave(&phba
->hbalock
, iflag
);
1125 spin_lock(&phba
->sli4_hba
.abts_nvme_buf_list_lock
);
1126 list_for_each_entry_safe(ctxp
, next_ctxp
,
1127 &phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
,
1129 if (ctxp
->ctxbuf
->sglq
->sli4_xritag
!= xri
)
1132 /* Check if we already received a free context call
1133 * and we have completed processing an abort situation.
1135 if (ctxp
->flag
& LPFC_NVMET_CTX_RLS
&&
1136 !(ctxp
->flag
& LPFC_NVMET_ABORT_OP
)) {
1137 list_del(&ctxp
->list
);
1140 ctxp
->flag
&= ~LPFC_NVMET_XBUSY
;
1141 spin_unlock(&phba
->sli4_hba
.abts_nvme_buf_list_lock
);
1143 rrq_empty
= list_empty(&phba
->active_rrq_list
);
1144 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
1145 ndlp
= lpfc_findnode_did(phba
->pport
, ctxp
->sid
);
1146 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
) &&
1147 (ndlp
->nlp_state
== NLP_STE_UNMAPPED_NODE
||
1148 ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)) {
1149 lpfc_set_rrq_active(phba
, ndlp
,
1150 ctxp
->ctxbuf
->sglq
->sli4_lxritag
,
1152 lpfc_sli4_abts_err_handler(phba
, ndlp
, axri
);
1155 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1156 "6318 XB aborted oxid %x flg x%x (%x)\n",
1157 ctxp
->oxid
, ctxp
->flag
, released
);
1159 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
1162 lpfc_worker_wake_up(phba
);
1165 spin_unlock(&phba
->sli4_hba
.abts_nvme_buf_list_lock
);
1166 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
1170 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport
*vport
,
1171 struct fc_frame_header
*fc_hdr
)
1174 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1175 struct lpfc_hba
*phba
= vport
->phba
;
1176 struct lpfc_nvmet_rcv_ctx
*ctxp
, *next_ctxp
;
1177 struct nvmefc_tgt_fcp_req
*rsp
;
1179 unsigned long iflag
= 0;
1181 xri
= be16_to_cpu(fc_hdr
->fh_ox_id
);
1183 spin_lock_irqsave(&phba
->hbalock
, iflag
);
1184 spin_lock(&phba
->sli4_hba
.abts_nvme_buf_list_lock
);
1185 list_for_each_entry_safe(ctxp
, next_ctxp
,
1186 &phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
,
1188 if (ctxp
->ctxbuf
->sglq
->sli4_xritag
!= xri
)
1191 spin_unlock(&phba
->sli4_hba
.abts_nvme_buf_list_lock
);
1192 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
1194 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
1195 ctxp
->flag
|= LPFC_NVMET_ABTS_RCV
;
1196 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
1198 lpfc_nvmeio_data(phba
,
1199 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1200 xri
, smp_processor_id(), 0);
1202 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1203 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri
);
1205 rsp
= &ctxp
->ctx
.fcp_req
;
1206 nvmet_fc_rcv_fcp_abort(phba
->targetport
, rsp
);
1208 /* Respond with BA_ACC accordingly */
1209 lpfc_sli4_seq_abort_rsp(vport
, fc_hdr
, 1);
1212 spin_unlock(&phba
->sli4_hba
.abts_nvme_buf_list_lock
);
1213 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
1215 lpfc_nvmeio_data(phba
, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1216 xri
, smp_processor_id(), 1);
1218 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1219 "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri
);
1221 /* Respond with BA_RJT accordingly */
1222 lpfc_sli4_seq_abort_rsp(vport
, fc_hdr
, 0);
1228 lpfc_nvmet_destroy_targetport(struct lpfc_hba
*phba
)
1230 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1231 struct lpfc_nvmet_tgtport
*tgtp
;
1233 if (phba
->nvmet_support
== 0)
1235 if (phba
->targetport
) {
1236 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
1237 init_completion(&tgtp
->tport_unreg_done
);
1238 nvmet_fc_unregister_targetport(phba
->targetport
);
1239 wait_for_completion_timeout(&tgtp
->tport_unreg_done
, 5);
1240 lpfc_nvmet_cleanup_io_context(phba
);
1242 phba
->targetport
= NULL
;
1247 * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1248 * @phba: pointer to lpfc hba data structure.
1249 * @pring: pointer to a SLI ring.
1250 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1252 * This routine is used for processing the WQE associated with a unsolicited
1253 * event. It first determines whether there is an existing ndlp that matches
1254 * the DID from the unsolicited WQE. If not, it will create a new one with
1255 * the DID from the unsolicited WQE. The ELS command from the unsolicited
1256 * WQE is then used to invoke the proper routine and to set up proper state
1257 * of the discovery state machine.
1260 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
1261 struct hbq_dmabuf
*nvmebuf
)
1263 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1264 struct lpfc_nvmet_tgtport
*tgtp
;
1265 struct fc_frame_header
*fc_hdr
;
1266 struct lpfc_nvmet_rcv_ctx
*ctxp
;
1268 uint32_t size
, oxid
, sid
, rc
;
1270 if (!nvmebuf
|| !phba
->targetport
) {
1271 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1272 "6154 LS Drop IO\n");
1280 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
1281 payload
= (uint32_t *)(nvmebuf
->dbuf
.virt
);
1282 fc_hdr
= (struct fc_frame_header
*)(nvmebuf
->hbuf
.virt
);
1283 size
= bf_get(lpfc_rcqe_length
, &nvmebuf
->cq_event
.cqe
.rcqe_cmpl
);
1284 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
1285 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
1287 ctxp
= kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx
), GFP_ATOMIC
);
1289 atomic_inc(&tgtp
->rcv_ls_req_drop
);
1290 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1291 "6155 LS Drop IO x%x: Alloc\n",
1294 lpfc_nvmeio_data(phba
, "NVMET LS DROP: "
1295 "xri x%x sz %d from %06x\n",
1298 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
1306 ctxp
->state
= LPFC_NVMET_STE_LS_RCV
;
1307 ctxp
->entry_cnt
= 1;
1308 ctxp
->rqb_buffer
= (void *)nvmebuf
;
1310 lpfc_nvmeio_data(phba
, "NVMET LS RCV: xri x%x sz %d from %06x\n",
1313 * The calling sequence should be:
1314 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
1315 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
1317 atomic_inc(&tgtp
->rcv_ls_req_in
);
1318 rc
= nvmet_fc_rcv_ls_req(phba
->targetport
, &ctxp
->ctx
.ls_req
,
1321 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
1322 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
1323 "%08x %08x %08x\n", size
, rc
,
1324 *payload
, *(payload
+1), *(payload
+2),
1325 *(payload
+3), *(payload
+4), *(payload
+5));
1328 atomic_inc(&tgtp
->rcv_ls_req_out
);
1332 lpfc_nvmeio_data(phba
, "NVMET LS DROP: xri x%x sz %d from %06x\n",
1335 atomic_inc(&tgtp
->rcv_ls_req_drop
);
1336 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1337 "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
1340 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1342 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
1344 atomic_inc(&tgtp
->xmt_ls_abort
);
1345 lpfc_nvmet_unsol_ls_issue_abort(phba
, ctxp
, sid
, oxid
);
1350 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
1351 * @phba: pointer to lpfc hba data structure.
1352 * @pring: pointer to a SLI ring.
1353 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1355 * This routine is used for processing the WQE associated with a unsolicited
1356 * event. It first determines whether there is an existing ndlp that matches
1357 * the DID from the unsolicited WQE. If not, it will create a new one with
1358 * the DID from the unsolicited WQE. The ELS command from the unsolicited
1359 * WQE is then used to invoke the proper routine and to set up proper state
1360 * of the discovery state machine.
1363 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba
*phba
,
1364 struct lpfc_sli_ring
*pring
,
1365 struct rqb_dmabuf
*nvmebuf
,
1366 uint64_t isr_timestamp
)
1368 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1369 struct lpfc_nvmet_rcv_ctx
*ctxp
;
1370 struct lpfc_nvmet_tgtport
*tgtp
;
1371 struct fc_frame_header
*fc_hdr
;
1372 struct lpfc_nvmet_ctxbuf
*ctx_buf
;
1374 uint32_t size
, oxid
, sid
, rc
, qno
;
1375 unsigned long iflag
;
1376 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1381 if (!nvmebuf
|| !phba
->targetport
) {
1382 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1383 "6157 NVMET FCP Drop IO\n");
1391 spin_lock_irqsave(&phba
->sli4_hba
.nvmet_ctx_get_lock
, iflag
);
1392 if (phba
->sli4_hba
.nvmet_ctx_get_cnt
) {
1393 list_remove_head(&phba
->sli4_hba
.lpfc_nvmet_ctx_get_list
,
1394 ctx_buf
, struct lpfc_nvmet_ctxbuf
, list
);
1395 phba
->sli4_hba
.nvmet_ctx_get_cnt
--;
1397 spin_lock(&phba
->sli4_hba
.nvmet_ctx_put_lock
);
1398 if (phba
->sli4_hba
.nvmet_ctx_put_cnt
) {
1399 list_splice(&phba
->sli4_hba
.lpfc_nvmet_ctx_put_list
,
1400 &phba
->sli4_hba
.lpfc_nvmet_ctx_get_list
);
1401 INIT_LIST_HEAD(&phba
->sli4_hba
.lpfc_nvmet_ctx_put_list
);
1402 phba
->sli4_hba
.nvmet_ctx_get_cnt
=
1403 phba
->sli4_hba
.nvmet_ctx_put_cnt
;
1404 phba
->sli4_hba
.nvmet_ctx_put_cnt
= 0;
1405 spin_unlock(&phba
->sli4_hba
.nvmet_ctx_put_lock
);
1408 &phba
->sli4_hba
.lpfc_nvmet_ctx_get_list
,
1409 ctx_buf
, struct lpfc_nvmet_ctxbuf
, list
);
1410 phba
->sli4_hba
.nvmet_ctx_get_cnt
--;
1412 spin_unlock(&phba
->sli4_hba
.nvmet_ctx_put_lock
);
1415 spin_unlock_irqrestore(&phba
->sli4_hba
.nvmet_ctx_get_lock
, iflag
);
1417 fc_hdr
= (struct fc_frame_header
*)(nvmebuf
->hbuf
.virt
);
1418 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
1419 size
= nvmebuf
->bytes_recv
;
1421 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1422 if (phba
->cpucheck_on
& LPFC_CHECK_NVMET_RCV
) {
1423 id
= smp_processor_id();
1424 if (id
< LPFC_CHECK_CPU_CNT
)
1425 phba
->cpucheck_rcv_io
[id
]++;
1429 lpfc_nvmeio_data(phba
, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
1430 oxid
, size
, smp_processor_id());
1433 /* Queue this NVME IO to process later */
1434 spin_lock_irqsave(&phba
->sli4_hba
.nvmet_io_wait_lock
, iflag
);
1435 list_add_tail(&nvmebuf
->hbuf
.list
,
1436 &phba
->sli4_hba
.lpfc_nvmet_io_wait_list
);
1437 phba
->sli4_hba
.nvmet_io_wait_cnt
++;
1438 phba
->sli4_hba
.nvmet_io_wait_total
++;
1439 spin_unlock_irqrestore(&phba
->sli4_hba
.nvmet_io_wait_lock
,
1442 /* Post a brand new DMA buffer to RQ */
1444 lpfc_post_rq_buffer(
1445 phba
, phba
->sli4_hba
.nvmet_mrq_hdr
[qno
],
1446 phba
->sli4_hba
.nvmet_mrq_data
[qno
], 1, qno
);
1450 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
1451 payload
= (uint32_t *)(nvmebuf
->dbuf
.virt
);
1452 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
1454 ctxp
= (struct lpfc_nvmet_rcv_ctx
*)ctx_buf
->context
;
1455 if (ctxp
->state
!= LPFC_NVMET_STE_FREE
) {
1456 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1457 "6414 NVMET Context corrupt %d %d oxid x%x\n",
1458 ctxp
->state
, ctxp
->entry_cnt
, ctxp
->oxid
);
1467 ctxp
->state
= LPFC_NVMET_STE_RCV
;
1468 ctxp
->entry_cnt
= 1;
1470 ctxp
->ctxbuf
= ctx_buf
;
1471 spin_lock_init(&ctxp
->ctxlock
);
1473 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1474 if (phba
->ktime_on
) {
1475 ctxp
->ts_isr_cmd
= isr_timestamp
;
1476 ctxp
->ts_cmd_nvme
= ktime_get_ns();
1477 ctxp
->ts_nvme_data
= 0;
1478 ctxp
->ts_data_wqput
= 0;
1479 ctxp
->ts_isr_data
= 0;
1480 ctxp
->ts_data_nvme
= 0;
1481 ctxp
->ts_nvme_status
= 0;
1482 ctxp
->ts_status_wqput
= 0;
1483 ctxp
->ts_isr_status
= 0;
1484 ctxp
->ts_status_nvme
= 0;
1488 atomic_inc(&tgtp
->rcv_fcp_cmd_in
);
1490 * The calling sequence should be:
1491 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
1492 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
1493 * When we return from nvmet_fc_rcv_fcp_req, all relevant info in
1494 * the NVME command / FC header is stored, so we are free to repost
1497 rc
= nvmet_fc_rcv_fcp_req(phba
->targetport
, &ctxp
->ctx
.fcp_req
,
1500 /* Process FCP command */
1502 atomic_inc(&tgtp
->rcv_fcp_cmd_out
);
1503 lpfc_rq_buf_free(phba
, &nvmebuf
->hbuf
); /* repost */
1507 atomic_inc(&tgtp
->rcv_fcp_cmd_drop
);
1508 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1509 "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
1511 atomic_read(&tgtp
->rcv_fcp_cmd_in
),
1512 atomic_read(&tgtp
->rcv_fcp_cmd_out
),
1513 atomic_read(&tgtp
->xmt_fcp_release
));
1515 lpfc_nvmeio_data(phba
, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
1518 lpfc_nvmet_defer_release(phba
, ctxp
);
1519 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, sid
, oxid
);
1520 lpfc_rq_buf_free(phba
, &nvmebuf
->hbuf
); /* repost */
1525 lpfc_nvmet_ctxbuf_post(phba
, ctx_buf
);
1528 lpfc_rq_buf_free(phba
, &nvmebuf
->hbuf
); /* repost */
1533 * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
1534 * @phba: pointer to lpfc hba data structure.
1535 * @pring: pointer to a SLI ring.
1536 * @nvmebuf: pointer to received nvme data structure.
1538 * This routine is used to process an unsolicited event received from a SLI
1539 * (Service Level Interface) ring. The actual processing of the data buffer
1540 * associated with the unsolicited event is done by invoking the routine
1541 * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
1542 * SLI RQ on which the unsolicited event was received.
1545 lpfc_nvmet_unsol_ls_event(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
1546 struct lpfc_iocbq
*piocb
)
1548 struct lpfc_dmabuf
*d_buf
;
1549 struct hbq_dmabuf
*nvmebuf
;
1551 d_buf
= piocb
->context2
;
1552 nvmebuf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
1554 if (phba
->nvmet_support
== 0) {
1555 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
1558 lpfc_nvmet_unsol_ls_buffer(phba
, pring
, nvmebuf
);
1562 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
1563 * @phba: pointer to lpfc hba data structure.
1564 * @pring: pointer to a SLI ring.
1565 * @nvmebuf: pointer to received nvme data structure.
1567 * This routine is used to process an unsolicited event received from a SLI
1568 * (Service Level Interface) ring. The actual processing of the data buffer
1569 * associated with the unsolicited event is done by invoking the routine
1570 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
1571 * SLI RQ on which the unsolicited event was received.
1574 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba
*phba
,
1575 struct lpfc_sli_ring
*pring
,
1576 struct rqb_dmabuf
*nvmebuf
,
1577 uint64_t isr_timestamp
)
1579 if (phba
->nvmet_support
== 0) {
1580 lpfc_rq_buf_free(phba
, &nvmebuf
->hbuf
);
1583 lpfc_nvmet_unsol_fcp_buffer(phba
, pring
, nvmebuf
,
1588 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
1589 * @phba: pointer to a host N_Port data structure.
1590 * @ctxp: Context info for NVME LS Request
1591 * @rspbuf: DMA buffer of NVME command.
1592 * @rspsize: size of the NVME command.
1594 * This routine is used for allocating a lpfc-WQE data structure from
1595 * the driver lpfc-WQE free-list and prepare the WQE with the parameters
1596 * passed into the routine for discovery state machine to issue an Extended
1597 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
1598 * and preparation routine that is used by all the discovery state machine
1599 * routines and the NVME command-specific fields will be later set up by
1600 * the individual discovery machine routines after calling this routine
1601 * allocating and preparing a generic WQE data structure. It fills in the
1602 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
1603 * payload and response payload (if expected). The reference count on the
1604 * ndlp is incremented by 1 and the reference to the ndlp is put into
1605 * context1 of the WQE data structure for this WQE to hold the ndlp
1606 * reference for the command's callback function to access later.
1609 * Pointer to the newly allocated/prepared nvme wqe data structure
1610 * NULL - when nvme wqe data structure allocation/preparation failed
1612 static struct lpfc_iocbq
*
1613 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba
*phba
,
1614 struct lpfc_nvmet_rcv_ctx
*ctxp
,
1615 dma_addr_t rspbuf
, uint16_t rspsize
)
1617 struct lpfc_nodelist
*ndlp
;
1618 struct lpfc_iocbq
*nvmewqe
;
1619 union lpfc_wqe
*wqe
;
1621 if (!lpfc_is_link_up(phba
)) {
1622 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_DISC
,
1623 "6104 NVMET prep LS wqe: link err: "
1624 "NPORT x%x oxid:x%x ste %d\n",
1625 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
1629 /* Allocate buffer for command wqe */
1630 nvmewqe
= lpfc_sli_get_iocbq(phba
);
1631 if (nvmewqe
== NULL
) {
1632 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_DISC
,
1633 "6105 NVMET prep LS wqe: No WQE: "
1634 "NPORT x%x oxid x%x ste %d\n",
1635 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
1639 ndlp
= lpfc_findnode_did(phba
->pport
, ctxp
->sid
);
1640 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) ||
1641 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
1642 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
1643 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_DISC
,
1644 "6106 NVMET prep LS wqe: No ndlp: "
1645 "NPORT x%x oxid x%x ste %d\n",
1646 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
1647 goto nvme_wqe_free_wqeq_exit
;
1649 ctxp
->wqeq
= nvmewqe
;
1651 /* prevent preparing wqe with NULL ndlp reference */
1652 nvmewqe
->context1
= lpfc_nlp_get(ndlp
);
1653 if (nvmewqe
->context1
== NULL
)
1654 goto nvme_wqe_free_wqeq_exit
;
1655 nvmewqe
->context2
= ctxp
;
1657 wqe
= &nvmewqe
->wqe
;
1658 memset(wqe
, 0, sizeof(union lpfc_wqe
));
1661 wqe
->xmit_sequence
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
1662 wqe
->xmit_sequence
.bde
.tus
.f
.bdeSize
= rspsize
;
1663 wqe
->xmit_sequence
.bde
.addrLow
= le32_to_cpu(putPaddrLow(rspbuf
));
1664 wqe
->xmit_sequence
.bde
.addrHigh
= le32_to_cpu(putPaddrHigh(rspbuf
));
1671 bf_set(wqe_dfctl
, &wqe
->xmit_sequence
.wge_ctl
, 0);
1672 bf_set(wqe_ls
, &wqe
->xmit_sequence
.wge_ctl
, 1);
1673 bf_set(wqe_la
, &wqe
->xmit_sequence
.wge_ctl
, 0);
1674 bf_set(wqe_rctl
, &wqe
->xmit_sequence
.wge_ctl
, FC_RCTL_ELS4_REP
);
1675 bf_set(wqe_type
, &wqe
->xmit_sequence
.wge_ctl
, FC_TYPE_NVME
);
1678 bf_set(wqe_ctxt_tag
, &wqe
->xmit_sequence
.wqe_com
,
1679 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
1680 bf_set(wqe_xri_tag
, &wqe
->xmit_sequence
.wqe_com
, nvmewqe
->sli4_xritag
);
1683 bf_set(wqe_cmnd
, &wqe
->xmit_sequence
.wqe_com
,
1684 CMD_XMIT_SEQUENCE64_WQE
);
1685 bf_set(wqe_ct
, &wqe
->xmit_sequence
.wqe_com
, SLI4_CT_RPI
);
1686 bf_set(wqe_class
, &wqe
->xmit_sequence
.wqe_com
, CLASS3
);
1687 bf_set(wqe_pu
, &wqe
->xmit_sequence
.wqe_com
, 0);
1690 wqe
->xmit_sequence
.wqe_com
.abort_tag
= nvmewqe
->iotag
;
1693 bf_set(wqe_reqtag
, &wqe
->xmit_sequence
.wqe_com
, nvmewqe
->iotag
);
1694 /* Needs to be set by caller */
1695 bf_set(wqe_rcvoxid
, &wqe
->xmit_sequence
.wqe_com
, ctxp
->oxid
);
1698 bf_set(wqe_dbde
, &wqe
->xmit_sequence
.wqe_com
, 1);
1699 bf_set(wqe_iod
, &wqe
->xmit_sequence
.wqe_com
, LPFC_WQE_IOD_WRITE
);
1700 bf_set(wqe_lenloc
, &wqe
->xmit_sequence
.wqe_com
,
1701 LPFC_WQE_LENLOC_WORD12
);
1702 bf_set(wqe_ebde_cnt
, &wqe
->xmit_sequence
.wqe_com
, 0);
1705 bf_set(wqe_cqid
, &wqe
->xmit_sequence
.wqe_com
,
1706 LPFC_WQE_CQ_ID_DEFAULT
);
1707 bf_set(wqe_cmd_type
, &wqe
->xmit_sequence
.wqe_com
,
1711 wqe
->xmit_sequence
.xmit_len
= rspsize
;
1714 nvmewqe
->vport
= phba
->pport
;
1715 nvmewqe
->drvrTimeout
= (phba
->fc_ratov
* 3) + LPFC_DRVR_TIMEOUT
;
1716 nvmewqe
->iocb_flag
|= LPFC_IO_NVME_LS
;
1718 /* Xmit NVMET response to remote NPORT <did> */
1719 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
1720 "6039 Xmit NVMET LS response to remote "
1721 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
1722 ndlp
->nlp_DID
, nvmewqe
->iotag
, ctxp
->oxid
,
1726 nvme_wqe_free_wqeq_exit
:
1727 nvmewqe
->context2
= NULL
;
1728 nvmewqe
->context3
= NULL
;
1729 lpfc_sli_release_iocbq(phba
, nvmewqe
);
1734 static struct lpfc_iocbq
*
1735 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba
*phba
,
1736 struct lpfc_nvmet_rcv_ctx
*ctxp
)
1738 struct nvmefc_tgt_fcp_req
*rsp
= &ctxp
->ctx
.fcp_req
;
1739 struct lpfc_nvmet_tgtport
*tgtp
;
1740 struct sli4_sge
*sgl
;
1741 struct lpfc_nodelist
*ndlp
;
1742 struct lpfc_iocbq
*nvmewqe
;
1743 struct scatterlist
*sgel
;
1744 union lpfc_wqe128
*wqe
;
1746 dma_addr_t physaddr
;
1750 if (!lpfc_is_link_up(phba
)) {
1751 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1752 "6107 NVMET prep FCP wqe: link err:"
1753 "NPORT x%x oxid x%x ste %d\n",
1754 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
1758 ndlp
= lpfc_findnode_did(phba
->pport
, ctxp
->sid
);
1759 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) ||
1760 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
1761 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
1762 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1763 "6108 NVMET prep FCP wqe: no ndlp: "
1764 "NPORT x%x oxid x%x ste %d\n",
1765 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
1769 if (rsp
->sg_cnt
> phba
->cfg_nvme_seg_cnt
) {
1770 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1771 "6109 NVMET prep FCP wqe: seg cnt err: "
1772 "NPORT x%x oxid x%x ste %d cnt %d\n",
1773 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
,
1774 phba
->cfg_nvme_seg_cnt
);
1778 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
1779 nvmewqe
= ctxp
->wqeq
;
1780 if (nvmewqe
== NULL
) {
1781 /* Allocate buffer for command wqe */
1782 nvmewqe
= ctxp
->ctxbuf
->iocbq
;
1783 if (nvmewqe
== NULL
) {
1784 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1785 "6110 NVMET prep FCP wqe: No "
1786 "WQE: NPORT x%x oxid x%x ste %d\n",
1787 ctxp
->sid
, ctxp
->oxid
, ctxp
->state
);
1790 ctxp
->wqeq
= nvmewqe
;
1791 xc
= 0; /* create new XRI */
1792 nvmewqe
->sli4_lxritag
= NO_XRI
;
1793 nvmewqe
->sli4_xritag
= NO_XRI
;
1797 if (((ctxp
->state
== LPFC_NVMET_STE_RCV
) &&
1798 (ctxp
->entry_cnt
== 1)) ||
1799 (ctxp
->state
== LPFC_NVMET_STE_DATA
)) {
1800 wqe
= (union lpfc_wqe128
*)&nvmewqe
->wqe
;
1802 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1803 "6111 Wrong state NVMET FCP: %d cnt %d\n",
1804 ctxp
->state
, ctxp
->entry_cnt
);
1808 sgl
= (struct sli4_sge
*)ctxp
->ctxbuf
->sglq
->sgl
;
1810 case NVMET_FCOP_READDATA
:
1811 case NVMET_FCOP_READDATA_RSP
:
1812 /* Words 0 - 2 : The first sg segment */
1814 physaddr
= sg_dma_address(sgel
);
1815 wqe
->fcp_tsend
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
1816 wqe
->fcp_tsend
.bde
.tus
.f
.bdeSize
= sg_dma_len(sgel
);
1817 wqe
->fcp_tsend
.bde
.addrLow
= cpu_to_le32(putPaddrLow(physaddr
));
1818 wqe
->fcp_tsend
.bde
.addrHigh
=
1819 cpu_to_le32(putPaddrHigh(physaddr
));
1822 wqe
->fcp_tsend
.payload_offset_len
= 0;
1825 wqe
->fcp_tsend
.relative_offset
= ctxp
->offset
;
1830 bf_set(wqe_ctxt_tag
, &wqe
->fcp_tsend
.wqe_com
,
1831 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
1832 bf_set(wqe_xri_tag
, &wqe
->fcp_tsend
.wqe_com
,
1833 nvmewqe
->sli4_xritag
);
1836 bf_set(wqe_cmnd
, &wqe
->fcp_tsend
.wqe_com
, CMD_FCP_TSEND64_WQE
);
1839 wqe
->fcp_tsend
.wqe_com
.abort_tag
= nvmewqe
->iotag
;
1842 bf_set(wqe_reqtag
, &wqe
->fcp_tsend
.wqe_com
, nvmewqe
->iotag
);
1843 bf_set(wqe_rcvoxid
, &wqe
->fcp_tsend
.wqe_com
, ctxp
->oxid
);
1846 bf_set(wqe_nvme
, &wqe
->fcp_tsend
.wqe_com
, 1);
1847 bf_set(wqe_dbde
, &wqe
->fcp_tsend
.wqe_com
, 1);
1848 bf_set(wqe_iod
, &wqe
->fcp_tsend
.wqe_com
, LPFC_WQE_IOD_WRITE
);
1849 bf_set(wqe_lenloc
, &wqe
->fcp_tsend
.wqe_com
,
1850 LPFC_WQE_LENLOC_WORD12
);
1851 bf_set(wqe_ebde_cnt
, &wqe
->fcp_tsend
.wqe_com
, 0);
1852 bf_set(wqe_xc
, &wqe
->fcp_tsend
.wqe_com
, xc
);
1853 bf_set(wqe_nvme
, &wqe
->fcp_tsend
.wqe_com
, 1);
1854 if (phba
->cfg_nvme_oas
)
1855 bf_set(wqe_oas
, &wqe
->fcp_tsend
.wqe_com
, 1);
1858 bf_set(wqe_cqid
, &wqe
->fcp_tsend
.wqe_com
,
1859 LPFC_WQE_CQ_ID_DEFAULT
);
1860 bf_set(wqe_cmd_type
, &wqe
->fcp_tsend
.wqe_com
,
1864 wqe
->fcp_tsend
.fcp_data_len
= rsp
->transfer_length
;
1866 /* Setup 2 SKIP SGEs */
1870 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_SKIP
);
1871 sgl
->word2
= cpu_to_le32(sgl
->word2
);
1877 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_SKIP
);
1878 sgl
->word2
= cpu_to_le32(sgl
->word2
);
1881 if (rsp
->op
== NVMET_FCOP_READDATA_RSP
) {
1882 atomic_inc(&tgtp
->xmt_fcp_read_rsp
);
1883 bf_set(wqe_ar
, &wqe
->fcp_tsend
.wqe_com
, 1);
1884 if ((ndlp
->nlp_flag
& NLP_SUPPRESS_RSP
) &&
1885 (rsp
->rsplen
== 12)) {
1886 bf_set(wqe_sup
, &wqe
->fcp_tsend
.wqe_com
, 1);
1887 bf_set(wqe_wqes
, &wqe
->fcp_tsend
.wqe_com
, 0);
1888 bf_set(wqe_irsp
, &wqe
->fcp_tsend
.wqe_com
, 0);
1889 bf_set(wqe_irsplen
, &wqe
->fcp_tsend
.wqe_com
, 0);
1891 bf_set(wqe_sup
, &wqe
->fcp_tsend
.wqe_com
, 0);
1892 bf_set(wqe_wqes
, &wqe
->fcp_tsend
.wqe_com
, 1);
1893 bf_set(wqe_irsp
, &wqe
->fcp_tsend
.wqe_com
, 1);
1894 bf_set(wqe_irsplen
, &wqe
->fcp_tsend
.wqe_com
,
1895 ((rsp
->rsplen
>> 2) - 1));
1896 memcpy(&wqe
->words
[16], rsp
->rspaddr
,
1900 atomic_inc(&tgtp
->xmt_fcp_read
);
1902 bf_set(wqe_sup
, &wqe
->fcp_tsend
.wqe_com
, 0);
1903 bf_set(wqe_wqes
, &wqe
->fcp_tsend
.wqe_com
, 0);
1904 bf_set(wqe_irsp
, &wqe
->fcp_tsend
.wqe_com
, 0);
1905 bf_set(wqe_ar
, &wqe
->fcp_tsend
.wqe_com
, 0);
1906 bf_set(wqe_irsplen
, &wqe
->fcp_tsend
.wqe_com
, 0);
1910 case NVMET_FCOP_WRITEDATA
:
1911 /* Words 0 - 2 : The first sg segment */
1912 txrdy
= pci_pool_alloc(phba
->txrdy_payload_pool
,
1913 GFP_KERNEL
, &physaddr
);
1915 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1916 "6041 Bad txrdy buffer: oxid x%x\n",
1920 ctxp
->txrdy
= txrdy
;
1921 ctxp
->txrdy_phys
= physaddr
;
1922 wqe
->fcp_treceive
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
1923 wqe
->fcp_treceive
.bde
.tus
.f
.bdeSize
= TXRDY_PAYLOAD_LEN
;
1924 wqe
->fcp_treceive
.bde
.addrLow
=
1925 cpu_to_le32(putPaddrLow(physaddr
));
1926 wqe
->fcp_treceive
.bde
.addrHigh
=
1927 cpu_to_le32(putPaddrHigh(physaddr
));
1930 wqe
->fcp_treceive
.payload_offset_len
= TXRDY_PAYLOAD_LEN
;
1933 wqe
->fcp_treceive
.relative_offset
= ctxp
->offset
;
1938 bf_set(wqe_ctxt_tag
, &wqe
->fcp_treceive
.wqe_com
,
1939 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
1940 bf_set(wqe_xri_tag
, &wqe
->fcp_treceive
.wqe_com
,
1941 nvmewqe
->sli4_xritag
);
1944 bf_set(wqe_ar
, &wqe
->fcp_treceive
.wqe_com
, 0);
1945 bf_set(wqe_cmnd
, &wqe
->fcp_treceive
.wqe_com
,
1946 CMD_FCP_TRECEIVE64_WQE
);
1949 wqe
->fcp_treceive
.wqe_com
.abort_tag
= nvmewqe
->iotag
;
1952 bf_set(wqe_reqtag
, &wqe
->fcp_treceive
.wqe_com
, nvmewqe
->iotag
);
1953 bf_set(wqe_rcvoxid
, &wqe
->fcp_treceive
.wqe_com
, ctxp
->oxid
);
1956 bf_set(wqe_nvme
, &wqe
->fcp_treceive
.wqe_com
, 1);
1957 bf_set(wqe_dbde
, &wqe
->fcp_treceive
.wqe_com
, 1);
1958 bf_set(wqe_iod
, &wqe
->fcp_treceive
.wqe_com
, LPFC_WQE_IOD_READ
);
1959 bf_set(wqe_lenloc
, &wqe
->fcp_treceive
.wqe_com
,
1960 LPFC_WQE_LENLOC_WORD12
);
1961 bf_set(wqe_xc
, &wqe
->fcp_treceive
.wqe_com
, xc
);
1962 bf_set(wqe_wqes
, &wqe
->fcp_treceive
.wqe_com
, 0);
1963 bf_set(wqe_irsp
, &wqe
->fcp_treceive
.wqe_com
, 0);
1964 bf_set(wqe_irsplen
, &wqe
->fcp_treceive
.wqe_com
, 0);
1965 bf_set(wqe_nvme
, &wqe
->fcp_treceive
.wqe_com
, 1);
1966 if (phba
->cfg_nvme_oas
)
1967 bf_set(wqe_oas
, &wqe
->fcp_treceive
.wqe_com
, 1);
1970 bf_set(wqe_cqid
, &wqe
->fcp_treceive
.wqe_com
,
1971 LPFC_WQE_CQ_ID_DEFAULT
);
1972 bf_set(wqe_cmd_type
, &wqe
->fcp_treceive
.wqe_com
,
1973 FCP_COMMAND_TRECEIVE
);
1974 bf_set(wqe_sup
, &wqe
->fcp_tsend
.wqe_com
, 0);
1977 wqe
->fcp_tsend
.fcp_data_len
= rsp
->transfer_length
;
1979 /* Setup 1 TXRDY and 1 SKIP SGE */
1981 txrdy
[1] = cpu_to_be32(rsp
->transfer_length
);
1984 sgl
->addr_hi
= putPaddrHigh(physaddr
);
1985 sgl
->addr_lo
= putPaddrLow(physaddr
);
1987 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_DATA
);
1988 sgl
->word2
= cpu_to_le32(sgl
->word2
);
1989 sgl
->sge_len
= cpu_to_le32(TXRDY_PAYLOAD_LEN
);
1994 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_SKIP
);
1995 sgl
->word2
= cpu_to_le32(sgl
->word2
);
1998 atomic_inc(&tgtp
->xmt_fcp_write
);
2001 case NVMET_FCOP_RSP
:
2003 physaddr
= rsp
->rspdma
;
2004 wqe
->fcp_trsp
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
2005 wqe
->fcp_trsp
.bde
.tus
.f
.bdeSize
= rsp
->rsplen
;
2006 wqe
->fcp_trsp
.bde
.addrLow
=
2007 cpu_to_le32(putPaddrLow(physaddr
));
2008 wqe
->fcp_trsp
.bde
.addrHigh
=
2009 cpu_to_le32(putPaddrHigh(physaddr
));
2012 wqe
->fcp_trsp
.response_len
= rsp
->rsplen
;
2015 wqe
->fcp_trsp
.rsvd_4_5
[0] = 0;
2021 bf_set(wqe_ctxt_tag
, &wqe
->fcp_trsp
.wqe_com
,
2022 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
2023 bf_set(wqe_xri_tag
, &wqe
->fcp_trsp
.wqe_com
,
2024 nvmewqe
->sli4_xritag
);
2027 bf_set(wqe_ag
, &wqe
->fcp_trsp
.wqe_com
, 1);
2028 bf_set(wqe_cmnd
, &wqe
->fcp_trsp
.wqe_com
, CMD_FCP_TRSP64_WQE
);
2031 wqe
->fcp_trsp
.wqe_com
.abort_tag
= nvmewqe
->iotag
;
2034 bf_set(wqe_reqtag
, &wqe
->fcp_trsp
.wqe_com
, nvmewqe
->iotag
);
2035 bf_set(wqe_rcvoxid
, &wqe
->fcp_trsp
.wqe_com
, ctxp
->oxid
);
2038 bf_set(wqe_nvme
, &wqe
->fcp_trsp
.wqe_com
, 1);
2039 bf_set(wqe_dbde
, &wqe
->fcp_trsp
.wqe_com
, 0);
2040 bf_set(wqe_iod
, &wqe
->fcp_trsp
.wqe_com
, LPFC_WQE_IOD_WRITE
);
2041 bf_set(wqe_lenloc
, &wqe
->fcp_trsp
.wqe_com
,
2042 LPFC_WQE_LENLOC_WORD3
);
2043 bf_set(wqe_xc
, &wqe
->fcp_trsp
.wqe_com
, xc
);
2044 bf_set(wqe_nvme
, &wqe
->fcp_trsp
.wqe_com
, 1);
2045 if (phba
->cfg_nvme_oas
)
2046 bf_set(wqe_oas
, &wqe
->fcp_trsp
.wqe_com
, 1);
2049 bf_set(wqe_cqid
, &wqe
->fcp_trsp
.wqe_com
,
2050 LPFC_WQE_CQ_ID_DEFAULT
);
2051 bf_set(wqe_cmd_type
, &wqe
->fcp_trsp
.wqe_com
,
2053 bf_set(wqe_sup
, &wqe
->fcp_tsend
.wqe_com
, 0);
2055 if (rsp
->rsplen
== LPFC_NVMET_SUCCESS_LEN
) {
2056 /* Good response - all zero's on wire */
2057 bf_set(wqe_wqes
, &wqe
->fcp_trsp
.wqe_com
, 0);
2058 bf_set(wqe_irsp
, &wqe
->fcp_trsp
.wqe_com
, 0);
2059 bf_set(wqe_irsplen
, &wqe
->fcp_trsp
.wqe_com
, 0);
2061 bf_set(wqe_wqes
, &wqe
->fcp_trsp
.wqe_com
, 1);
2062 bf_set(wqe_irsp
, &wqe
->fcp_trsp
.wqe_com
, 1);
2063 bf_set(wqe_irsplen
, &wqe
->fcp_trsp
.wqe_com
,
2064 ((rsp
->rsplen
>> 2) - 1));
2065 memcpy(&wqe
->words
[16], rsp
->rspaddr
, rsp
->rsplen
);
2068 /* Use rspbuf, NOT sg list */
2071 atomic_inc(&tgtp
->xmt_fcp_rsp
);
2075 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
2076 "6064 Unknown Rsp Op %d\n",
2082 nvmewqe
->vport
= phba
->pport
;
2083 nvmewqe
->drvrTimeout
= (phba
->fc_ratov
* 3) + LPFC_DRVR_TIMEOUT
;
2084 nvmewqe
->context1
= ndlp
;
2086 for (i
= 0; i
< rsp
->sg_cnt
; i
++) {
2088 physaddr
= sg_dma_address(sgel
);
2089 cnt
= sg_dma_len(sgel
);
2090 sgl
->addr_hi
= putPaddrHigh(physaddr
);
2091 sgl
->addr_lo
= putPaddrLow(physaddr
);
2093 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_DATA
);
2094 bf_set(lpfc_sli4_sge_offset
, sgl
, ctxp
->offset
);
2095 if ((i
+1) == rsp
->sg_cnt
)
2096 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
2097 sgl
->word2
= cpu_to_le32(sgl
->word2
);
2098 sgl
->sge_len
= cpu_to_le32(cnt
);
2100 ctxp
->offset
+= cnt
;
2102 ctxp
->state
= LPFC_NVMET_STE_DATA
;
2108 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2109 * @phba: Pointer to HBA context object.
2110 * @cmdwqe: Pointer to driver command WQE object.
2111 * @wcqe: Pointer to driver response CQE object.
2113 * The function is called from SLI ring event handler with no
2114 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2115 * The function frees memory resources used for the NVME commands.
2118 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
2119 struct lpfc_wcqe_complete
*wcqe
)
2121 struct lpfc_nvmet_rcv_ctx
*ctxp
;
2122 struct lpfc_nvmet_tgtport
*tgtp
;
2123 uint32_t status
, result
;
2124 unsigned long flags
;
2125 bool released
= false;
2127 ctxp
= cmdwqe
->context2
;
2128 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
2129 result
= wcqe
->parameter
;
2131 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2132 if (ctxp
->flag
& LPFC_NVMET_ABORT_OP
)
2133 atomic_inc(&tgtp
->xmt_fcp_abort_cmpl
);
2135 ctxp
->state
= LPFC_NVMET_STE_DONE
;
2137 /* Check if we already received a free context call
2138 * and we have completed processing an abort situation.
2140 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
2141 if ((ctxp
->flag
& LPFC_NVMET_CTX_RLS
) &&
2142 !(ctxp
->flag
& LPFC_NVMET_XBUSY
)) {
2143 list_del(&ctxp
->list
);
2146 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
2147 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
2148 atomic_inc(&tgtp
->xmt_abort_rsp
);
2150 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
2151 "6165 ABORT cmpl: xri x%x flg x%x (%d) "
2152 "WCQE: %08x %08x %08x %08x\n",
2153 ctxp
->oxid
, ctxp
->flag
, released
,
2154 wcqe
->word0
, wcqe
->total_data_placed
,
2155 result
, wcqe
->word3
);
2157 cmdwqe
->context2
= NULL
;
2158 cmdwqe
->context3
= NULL
;
2160 * if transport has released ctx, then can reuse it. Otherwise,
2161 * will be recycled by transport release call.
2164 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
2166 /* This is the iocbq for the abort, not the command */
2167 lpfc_sli_release_iocbq(phba
, cmdwqe
);
2169 /* Since iaab/iaar are NOT set, there is no work left.
2170 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2171 * should have been called already.
2176 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
2177 * @phba: Pointer to HBA context object.
2178 * @cmdwqe: Pointer to driver command WQE object.
2179 * @wcqe: Pointer to driver response CQE object.
2181 * The function is called from SLI ring event handler with no
2182 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2183 * The function frees memory resources used for the NVME commands.
2186 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
2187 struct lpfc_wcqe_complete
*wcqe
)
2189 struct lpfc_nvmet_rcv_ctx
*ctxp
;
2190 struct lpfc_nvmet_tgtport
*tgtp
;
2191 unsigned long flags
;
2192 uint32_t status
, result
;
2193 bool released
= false;
2195 ctxp
= cmdwqe
->context2
;
2196 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
2197 result
= wcqe
->parameter
;
2200 /* if context is clear, related io alrady complete */
2201 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
2202 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
2203 wcqe
->word0
, wcqe
->total_data_placed
,
2204 result
, wcqe
->word3
);
2208 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2209 if (ctxp
->flag
& LPFC_NVMET_ABORT_OP
)
2210 atomic_inc(&tgtp
->xmt_fcp_abort_cmpl
);
2213 if (ctxp
->state
!= LPFC_NVMET_STE_ABORT
) {
2214 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
2215 "6112 ABTS Wrong state:%d oxid x%x\n",
2216 ctxp
->state
, ctxp
->oxid
);
2219 /* Check if we already received a free context call
2220 * and we have completed processing an abort situation.
2222 ctxp
->state
= LPFC_NVMET_STE_DONE
;
2223 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
2224 if ((ctxp
->flag
& LPFC_NVMET_CTX_RLS
) &&
2225 !(ctxp
->flag
& LPFC_NVMET_XBUSY
)) {
2226 list_del(&ctxp
->list
);
2229 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
2230 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
2231 atomic_inc(&tgtp
->xmt_abort_rsp
);
2233 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
2234 "6316 ABTS cmpl xri x%x flg x%x (%x) "
2235 "WCQE: %08x %08x %08x %08x\n",
2236 ctxp
->oxid
, ctxp
->flag
, released
,
2237 wcqe
->word0
, wcqe
->total_data_placed
,
2238 result
, wcqe
->word3
);
2240 cmdwqe
->context2
= NULL
;
2241 cmdwqe
->context3
= NULL
;
2243 * if transport has released ctx, then can reuse it. Otherwise,
2244 * will be recycled by transport release call.
2247 lpfc_nvmet_ctxbuf_post(phba
, ctxp
->ctxbuf
);
2249 /* Since iaab/iaar are NOT set, there is no work left.
2250 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2251 * should have been called already.
2256 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
2257 * @phba: Pointer to HBA context object.
2258 * @cmdwqe: Pointer to driver command WQE object.
2259 * @wcqe: Pointer to driver response CQE object.
2261 * The function is called from SLI ring event handler with no
2262 * lock held. This function is the completion handler for NVME ABTS for LS cmds
2263 * The function frees memory resources used for the NVME commands.
2266 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
2267 struct lpfc_wcqe_complete
*wcqe
)
2269 struct lpfc_nvmet_rcv_ctx
*ctxp
;
2270 struct lpfc_nvmet_tgtport
*tgtp
;
2271 uint32_t status
, result
;
2273 ctxp
= cmdwqe
->context2
;
2274 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
2275 result
= wcqe
->parameter
;
2277 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2278 atomic_inc(&tgtp
->xmt_ls_abort_cmpl
);
2280 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
2281 "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
2282 ctxp
, wcqe
->word0
, wcqe
->total_data_placed
,
2283 result
, wcqe
->word3
);
2286 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
2287 "6415 NVMET LS Abort No ctx: WCQE: "
2288 "%08x %08x %08x %08x\n",
2289 wcqe
->word0
, wcqe
->total_data_placed
,
2290 result
, wcqe
->word3
);
2292 lpfc_sli_release_iocbq(phba
, cmdwqe
);
2296 if (ctxp
->state
!= LPFC_NVMET_STE_LS_ABORT
) {
2297 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2298 "6416 NVMET LS abort cmpl state mismatch: "
2299 "oxid x%x: %d %d\n",
2300 ctxp
->oxid
, ctxp
->state
, ctxp
->entry_cnt
);
2303 cmdwqe
->context2
= NULL
;
2304 cmdwqe
->context3
= NULL
;
2305 lpfc_sli_release_iocbq(phba
, cmdwqe
);
2310 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba
*phba
,
2311 struct lpfc_nvmet_rcv_ctx
*ctxp
,
2312 uint32_t sid
, uint16_t xri
)
2314 struct lpfc_nvmet_tgtport
*tgtp
;
2315 struct lpfc_iocbq
*abts_wqeq
;
2316 union lpfc_wqe
*wqe_abts
;
2317 struct lpfc_nodelist
*ndlp
;
2319 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
2320 "6067 ABTS: sid %x xri x%x/x%x\n",
2321 sid
, xri
, ctxp
->wqeq
->sli4_xritag
);
2323 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2325 ndlp
= lpfc_findnode_did(phba
->pport
, sid
);
2326 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) ||
2327 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
2328 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
2329 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
2330 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
2331 "6134 Drop ABTS - wrong NDLP state x%x.\n",
2332 (ndlp
) ? ndlp
->nlp_state
: NLP_STE_MAX_STATE
);
2334 /* No failure to an ABTS request. */
2338 abts_wqeq
= ctxp
->wqeq
;
2339 wqe_abts
= &abts_wqeq
->wqe
;
2342 * Since we zero the whole WQE, we need to ensure we set the WQE fields
2343 * that were initialized in lpfc_sli4_nvmet_alloc.
2345 memset(wqe_abts
, 0, sizeof(union lpfc_wqe
));
2348 bf_set(wqe_dfctl
, &wqe_abts
->xmit_sequence
.wge_ctl
, 0);
2349 bf_set(wqe_ls
, &wqe_abts
->xmit_sequence
.wge_ctl
, 1);
2350 bf_set(wqe_la
, &wqe_abts
->xmit_sequence
.wge_ctl
, 0);
2351 bf_set(wqe_rctl
, &wqe_abts
->xmit_sequence
.wge_ctl
, FC_RCTL_BA_ABTS
);
2352 bf_set(wqe_type
, &wqe_abts
->xmit_sequence
.wge_ctl
, FC_TYPE_BLS
);
2355 bf_set(wqe_ctxt_tag
, &wqe_abts
->xmit_sequence
.wqe_com
,
2356 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
2357 bf_set(wqe_xri_tag
, &wqe_abts
->xmit_sequence
.wqe_com
,
2358 abts_wqeq
->sli4_xritag
);
2361 bf_set(wqe_cmnd
, &wqe_abts
->xmit_sequence
.wqe_com
,
2362 CMD_XMIT_SEQUENCE64_WQE
);
2363 bf_set(wqe_ct
, &wqe_abts
->xmit_sequence
.wqe_com
, SLI4_CT_RPI
);
2364 bf_set(wqe_class
, &wqe_abts
->xmit_sequence
.wqe_com
, CLASS3
);
2365 bf_set(wqe_pu
, &wqe_abts
->xmit_sequence
.wqe_com
, 0);
2368 wqe_abts
->xmit_sequence
.wqe_com
.abort_tag
= abts_wqeq
->iotag
;
2371 bf_set(wqe_reqtag
, &wqe_abts
->xmit_sequence
.wqe_com
, abts_wqeq
->iotag
);
2372 /* Needs to be set by caller */
2373 bf_set(wqe_rcvoxid
, &wqe_abts
->xmit_sequence
.wqe_com
, xri
);
2376 bf_set(wqe_dbde
, &wqe_abts
->xmit_sequence
.wqe_com
, 1);
2377 bf_set(wqe_iod
, &wqe_abts
->xmit_sequence
.wqe_com
, LPFC_WQE_IOD_WRITE
);
2378 bf_set(wqe_lenloc
, &wqe_abts
->xmit_sequence
.wqe_com
,
2379 LPFC_WQE_LENLOC_WORD12
);
2380 bf_set(wqe_ebde_cnt
, &wqe_abts
->xmit_sequence
.wqe_com
, 0);
2381 bf_set(wqe_qosd
, &wqe_abts
->xmit_sequence
.wqe_com
, 0);
2384 bf_set(wqe_cqid
, &wqe_abts
->xmit_sequence
.wqe_com
,
2385 LPFC_WQE_CQ_ID_DEFAULT
);
2386 bf_set(wqe_cmd_type
, &wqe_abts
->xmit_sequence
.wqe_com
,
2389 abts_wqeq
->vport
= phba
->pport
;
2390 abts_wqeq
->context1
= ndlp
;
2391 abts_wqeq
->context2
= ctxp
;
2392 abts_wqeq
->context3
= NULL
;
2393 abts_wqeq
->rsvd2
= 0;
2394 /* hba_wqidx should already be setup from command we are aborting */
2395 abts_wqeq
->iocb
.ulpCommand
= CMD_XMIT_SEQUENCE64_CR
;
2396 abts_wqeq
->iocb
.ulpLe
= 1;
2398 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
2399 "6069 Issue ABTS to xri x%x reqtag x%x\n",
2400 xri
, abts_wqeq
->iotag
);
2405 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba
*phba
,
2406 struct lpfc_nvmet_rcv_ctx
*ctxp
,
2407 uint32_t sid
, uint16_t xri
)
2409 struct lpfc_nvmet_tgtport
*tgtp
;
2410 struct lpfc_iocbq
*abts_wqeq
;
2411 union lpfc_wqe
*abts_wqe
;
2412 struct lpfc_nodelist
*ndlp
;
2413 unsigned long flags
;
2416 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2418 ctxp
->wqeq
= ctxp
->ctxbuf
->iocbq
;
2419 ctxp
->wqeq
->hba_wqidx
= 0;
2422 ndlp
= lpfc_findnode_did(phba
->pport
, sid
);
2423 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) ||
2424 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
2425 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
2426 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
2427 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
2428 "6160 Drop ABORT - wrong NDLP state x%x.\n",
2429 (ndlp
) ? ndlp
->nlp_state
: NLP_STE_MAX_STATE
);
2431 /* No failure to an ABTS request. */
2432 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
2436 /* Issue ABTS for this WQE based on iotag */
2437 ctxp
->abort_wqeq
= lpfc_sli_get_iocbq(phba
);
2438 if (!ctxp
->abort_wqeq
) {
2439 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
2440 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
2441 "6161 ABORT failed: No wqeqs: "
2442 "xri: x%x\n", ctxp
->oxid
);
2443 /* No failure to an ABTS request. */
2444 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
2447 abts_wqeq
= ctxp
->abort_wqeq
;
2448 abts_wqe
= &abts_wqeq
->wqe
;
2449 ctxp
->state
= LPFC_NVMET_STE_ABORT
;
2451 /* Announce entry to new IO submit field. */
2452 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
2453 "6162 ABORT Request to rport DID x%06x "
2454 "for xri x%x x%x\n",
2455 ctxp
->sid
, ctxp
->oxid
, ctxp
->wqeq
->sli4_xritag
);
2457 /* If the hba is getting reset, this flag is set. It is
2458 * cleared when the reset is complete and rings reestablished.
2460 spin_lock_irqsave(&phba
->hbalock
, flags
);
2461 /* driver queued commands are in process of being flushed */
2462 if (phba
->hba_flag
& HBA_NVME_IOQ_FLUSH
) {
2463 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
2464 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
2465 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
2466 "6163 Driver in reset cleanup - flushing "
2467 "NVME Req now. hba_flag x%x oxid x%x\n",
2468 phba
->hba_flag
, ctxp
->oxid
);
2469 lpfc_sli_release_iocbq(phba
, abts_wqeq
);
2470 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
2474 /* Outstanding abort is in progress */
2475 if (abts_wqeq
->iocb_flag
& LPFC_DRIVER_ABORTED
) {
2476 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
2477 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
2478 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
2479 "6164 Outstanding NVME I/O Abort Request "
2480 "still pending on oxid x%x\n",
2482 lpfc_sli_release_iocbq(phba
, abts_wqeq
);
2483 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
2487 /* Ready - mark outstanding as aborted by driver. */
2488 abts_wqeq
->iocb_flag
|= LPFC_DRIVER_ABORTED
;
2490 /* WQEs are reused. Clear stale data and set key fields to
2491 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
2493 memset(abts_wqe
, 0, sizeof(union lpfc_wqe
));
2496 bf_set(abort_cmd_criteria
, &abts_wqe
->abort_cmd
, T_XRI_TAG
);
2499 bf_set(wqe_ct
, &abts_wqe
->abort_cmd
.wqe_com
, 0);
2500 bf_set(wqe_cmnd
, &abts_wqe
->abort_cmd
.wqe_com
, CMD_ABORT_XRI_CX
);
2502 /* word 8 - tell the FW to abort the IO associated with this
2503 * outstanding exchange ID.
2505 abts_wqe
->abort_cmd
.wqe_com
.abort_tag
= ctxp
->wqeq
->sli4_xritag
;
2507 /* word 9 - this is the iotag for the abts_wqe completion. */
2508 bf_set(wqe_reqtag
, &abts_wqe
->abort_cmd
.wqe_com
,
2512 bf_set(wqe_qosd
, &abts_wqe
->abort_cmd
.wqe_com
, 1);
2513 bf_set(wqe_lenloc
, &abts_wqe
->abort_cmd
.wqe_com
, LPFC_WQE_LENLOC_NONE
);
2516 bf_set(wqe_cmd_type
, &abts_wqe
->abort_cmd
.wqe_com
, OTHER_COMMAND
);
2517 bf_set(wqe_wqec
, &abts_wqe
->abort_cmd
.wqe_com
, 1);
2518 bf_set(wqe_cqid
, &abts_wqe
->abort_cmd
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
2520 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
2521 abts_wqeq
->hba_wqidx
= ctxp
->wqeq
->hba_wqidx
;
2522 abts_wqeq
->wqe_cmpl
= lpfc_nvmet_sol_fcp_abort_cmp
;
2523 abts_wqeq
->iocb_cmpl
= 0;
2524 abts_wqeq
->iocb_flag
|= LPFC_IO_NVME
;
2525 abts_wqeq
->context2
= ctxp
;
2526 abts_wqeq
->vport
= phba
->pport
;
2527 rc
= lpfc_sli4_issue_wqe(phba
, LPFC_FCP_RING
, abts_wqeq
);
2528 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
2529 if (rc
== WQE_SUCCESS
) {
2530 atomic_inc(&tgtp
->xmt_abort_sol
);
2534 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
2535 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
2536 lpfc_sli_release_iocbq(phba
, abts_wqeq
);
2537 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
2538 "6166 Failed ABORT issue_wqe with status x%x "
2546 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba
*phba
,
2547 struct lpfc_nvmet_rcv_ctx
*ctxp
,
2548 uint32_t sid
, uint16_t xri
)
2550 struct lpfc_nvmet_tgtport
*tgtp
;
2551 struct lpfc_iocbq
*abts_wqeq
;
2552 unsigned long flags
;
2555 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2557 ctxp
->wqeq
= ctxp
->ctxbuf
->iocbq
;
2558 ctxp
->wqeq
->hba_wqidx
= 0;
2561 if (ctxp
->state
== LPFC_NVMET_STE_FREE
) {
2562 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2563 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
2564 ctxp
->state
, ctxp
->entry_cnt
, ctxp
->oxid
);
2568 ctxp
->state
= LPFC_NVMET_STE_ABORT
;
2570 rc
= lpfc_nvmet_unsol_issue_abort(phba
, ctxp
, sid
, xri
);
2574 spin_lock_irqsave(&phba
->hbalock
, flags
);
2575 abts_wqeq
= ctxp
->wqeq
;
2576 abts_wqeq
->wqe_cmpl
= lpfc_nvmet_unsol_fcp_abort_cmp
;
2577 abts_wqeq
->iocb_cmpl
= NULL
;
2578 abts_wqeq
->iocb_flag
|= LPFC_IO_NVMET
;
2579 rc
= lpfc_sli4_issue_wqe(phba
, LPFC_FCP_RING
, abts_wqeq
);
2580 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
2581 if (rc
== WQE_SUCCESS
) {
2586 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
2587 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
2588 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
2589 "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
2595 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba
*phba
,
2596 struct lpfc_nvmet_rcv_ctx
*ctxp
,
2597 uint32_t sid
, uint16_t xri
)
2599 struct lpfc_nvmet_tgtport
*tgtp
;
2600 struct lpfc_iocbq
*abts_wqeq
;
2601 union lpfc_wqe
*wqe_abts
;
2602 unsigned long flags
;
2605 if ((ctxp
->state
== LPFC_NVMET_STE_LS_RCV
&& ctxp
->entry_cnt
== 1) ||
2606 (ctxp
->state
== LPFC_NVMET_STE_LS_RSP
&& ctxp
->entry_cnt
== 2)) {
2607 ctxp
->state
= LPFC_NVMET_STE_LS_ABORT
;
2610 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
2611 "6418 NVMET LS abort state mismatch "
2613 ctxp
->oxid
, ctxp
->state
, ctxp
->entry_cnt
);
2614 ctxp
->state
= LPFC_NVMET_STE_LS_ABORT
;
2617 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2619 /* Issue ABTS for this WQE based on iotag */
2620 ctxp
->wqeq
= lpfc_sli_get_iocbq(phba
);
2622 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
2623 "6068 Abort failed: No wqeqs: "
2625 /* No failure to an ABTS request. */
2630 abts_wqeq
= ctxp
->wqeq
;
2631 wqe_abts
= &abts_wqeq
->wqe
;
2633 if (lpfc_nvmet_unsol_issue_abort(phba
, ctxp
, sid
, xri
) == 0) {
2638 spin_lock_irqsave(&phba
->hbalock
, flags
);
2639 abts_wqeq
->wqe_cmpl
= lpfc_nvmet_xmt_ls_abort_cmp
;
2640 abts_wqeq
->iocb_cmpl
= 0;
2641 abts_wqeq
->iocb_flag
|= LPFC_IO_NVME_LS
;
2642 rc
= lpfc_sli4_issue_wqe(phba
, LPFC_ELS_RING
, abts_wqeq
);
2643 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
2644 if (rc
== WQE_SUCCESS
) {
2645 atomic_inc(&tgtp
->xmt_abort_unsol
);
2649 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
2650 abts_wqeq
->context2
= NULL
;
2651 abts_wqeq
->context3
= NULL
;
2652 lpfc_sli_release_iocbq(phba
, abts_wqeq
);
2654 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
2655 "6056 Failed to Issue ABTS. Status x%x\n", rc
);