1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channsel Host Bus Adapters. *
4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
39 #include <../drivers/nvme/host/nvme.h>
40 #include <linux/nvme-fc-driver.h>
42 #include "lpfc_version.h"
46 #include "lpfc_sli4.h"
48 #include "lpfc_disc.h"
50 #include "lpfc_scsi.h"
51 #include "lpfc_nvme.h"
52 #include "lpfc_nvmet.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_crtn.h"
55 #include "lpfc_vport.h"
56 #include "lpfc_debugfs.h"
58 static struct lpfc_iocbq
*lpfc_nvmet_prep_ls_wqe(struct lpfc_hba
*,
59 struct lpfc_nvmet_rcv_ctx
*,
62 static struct lpfc_iocbq
*lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba
*,
63 struct lpfc_nvmet_rcv_ctx
*);
64 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba
*,
65 struct lpfc_nvmet_rcv_ctx
*,
67 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba
*,
68 struct lpfc_nvmet_rcv_ctx
*,
70 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba
*,
71 struct lpfc_nvmet_rcv_ctx
*,
75 lpfc_nvmet_defer_release(struct lpfc_hba
*phba
, struct lpfc_nvmet_rcv_ctx
*ctxp
)
79 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
80 "6313 NVMET Defer ctx release xri x%x flg x%x\n",
81 ctxp
->oxid
, ctxp
->flag
);
83 spin_lock_irqsave(&phba
->sli4_hba
.abts_nvme_buf_list_lock
, iflag
);
84 if (ctxp
->flag
& LPFC_NVMET_CTX_RLS
) {
85 spin_unlock_irqrestore(&phba
->sli4_hba
.abts_nvme_buf_list_lock
,
89 ctxp
->flag
|= LPFC_NVMET_CTX_RLS
;
90 list_add_tail(&ctxp
->list
, &phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
);
91 spin_unlock_irqrestore(&phba
->sli4_hba
.abts_nvme_buf_list_lock
, iflag
);
95 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
96 * @phba: Pointer to HBA context object.
97 * @cmdwqe: Pointer to driver command WQE object.
98 * @wcqe: Pointer to driver response CQE object.
100 * The function is called from SLI ring event handler with no
101 * lock held. This function is the completion handler for NVME LS commands
102 * The function frees memory resources used for the NVME commands.
105 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
106 struct lpfc_wcqe_complete
*wcqe
)
108 struct lpfc_nvmet_tgtport
*tgtp
;
109 struct nvmefc_tgt_ls_req
*rsp
;
110 struct lpfc_nvmet_rcv_ctx
*ctxp
;
111 uint32_t status
, result
;
113 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
114 result
= wcqe
->parameter
;
115 if (!phba
->targetport
)
118 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
121 atomic_inc(&tgtp
->xmt_ls_rsp_error
);
123 atomic_inc(&tgtp
->xmt_ls_rsp_cmpl
);
126 ctxp
= cmdwqe
->context2
;
127 rsp
= &ctxp
->ctx
.ls_req
;
129 lpfc_nvmeio_data(phba
, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
130 ctxp
->oxid
, status
, result
);
132 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
133 "6038 %s: Entrypoint: ctx %p status %x/%x\n", __func__
,
134 ctxp
, status
, result
);
136 lpfc_nlp_put(cmdwqe
->context1
);
137 cmdwqe
->context2
= NULL
;
138 cmdwqe
->context3
= NULL
;
139 lpfc_sli_release_iocbq(phba
, cmdwqe
);
145 * lpfc_nvmet_rq_post - Repost a NVMET RQ DMA buffer and clean up context
146 * @phba: HBA buffer is associated with
147 * @ctxp: context to clean up
148 * @mp: Buffer to free
150 * Description: Frees the given DMA buffer in the appropriate way given by
151 * reposting it to its associated RQ so it can be reused.
153 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
158 lpfc_nvmet_rq_post(struct lpfc_hba
*phba
, struct lpfc_nvmet_rcv_ctx
*ctxp
,
159 struct lpfc_dmabuf
*mp
)
163 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
164 "6314 rq_post ctx xri x%x flag x%x\n",
165 ctxp
->oxid
, ctxp
->flag
);
168 pci_pool_free(phba
->txrdy_payload_pool
, ctxp
->txrdy
,
171 ctxp
->txrdy_phys
= 0;
173 ctxp
->state
= LPFC_NVMET_STE_FREE
;
175 lpfc_rq_buf_free(phba
, mp
);
178 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
180 lpfc_nvmet_ktime(struct lpfc_hba
*phba
,
181 struct lpfc_nvmet_rcv_ctx
*ctxp
)
183 uint64_t seg1
, seg2
, seg3
, seg4
, seg5
;
184 uint64_t seg6
, seg7
, seg8
, seg9
, seg10
;
189 if (!ctxp
->ts_isr_cmd
|| !ctxp
->ts_cmd_nvme
||
190 !ctxp
->ts_nvme_data
|| !ctxp
->ts_data_wqput
||
191 !ctxp
->ts_isr_data
|| !ctxp
->ts_data_nvme
||
192 !ctxp
->ts_nvme_status
|| !ctxp
->ts_status_wqput
||
193 !ctxp
->ts_isr_status
|| !ctxp
->ts_status_nvme
)
196 if (ctxp
->ts_isr_cmd
> ctxp
->ts_cmd_nvme
)
198 if (ctxp
->ts_cmd_nvme
> ctxp
->ts_nvme_data
)
200 if (ctxp
->ts_nvme_data
> ctxp
->ts_data_wqput
)
202 if (ctxp
->ts_data_wqput
> ctxp
->ts_isr_data
)
204 if (ctxp
->ts_isr_data
> ctxp
->ts_data_nvme
)
206 if (ctxp
->ts_data_nvme
> ctxp
->ts_nvme_status
)
208 if (ctxp
->ts_nvme_status
> ctxp
->ts_status_wqput
)
210 if (ctxp
->ts_status_wqput
> ctxp
->ts_isr_status
)
212 if (ctxp
->ts_isr_status
> ctxp
->ts_status_nvme
)
215 * Segment 1 - Time from FCP command received by MSI-X ISR
216 * to FCP command is passed to NVME Layer.
217 * Segment 2 - Time from FCP command payload handed
218 * off to NVME Layer to Driver receives a Command op
220 * Segment 3 - Time from Driver receives a Command op
221 * from NVME Layer to Command is put on WQ.
222 * Segment 4 - Time from Driver WQ put is done
223 * to MSI-X ISR for Command cmpl.
224 * Segment 5 - Time from MSI-X ISR for Command cmpl to
225 * Command cmpl is passed to NVME Layer.
226 * Segment 6 - Time from Command cmpl is passed to NVME
227 * Layer to Driver receives a RSP op from NVME Layer.
228 * Segment 7 - Time from Driver receives a RSP op from
229 * NVME Layer to WQ put is done on TRSP FCP Status.
230 * Segment 8 - Time from Driver WQ put is done on TRSP
231 * FCP Status to MSI-X ISR for TRSP cmpl.
232 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
233 * TRSP cmpl is passed to NVME Layer.
234 * Segment 10 - Time from FCP command received by
235 * MSI-X ISR to command is completed on wire.
236 * (Segments 1 thru 8) for READDATA / WRITEDATA
237 * (Segments 1 thru 4) for READDATA_RSP
239 seg1
= ctxp
->ts_cmd_nvme
- ctxp
->ts_isr_cmd
;
240 seg2
= (ctxp
->ts_nvme_data
- ctxp
->ts_isr_cmd
) - seg1
;
241 seg3
= (ctxp
->ts_data_wqput
- ctxp
->ts_isr_cmd
) -
243 seg4
= (ctxp
->ts_isr_data
- ctxp
->ts_isr_cmd
) -
245 seg5
= (ctxp
->ts_data_nvme
- ctxp
->ts_isr_cmd
) -
246 seg1
- seg2
- seg3
- seg4
;
248 /* For auto rsp commands seg6 thru seg10 will be 0 */
249 if (ctxp
->ts_nvme_status
> ctxp
->ts_data_nvme
) {
250 seg6
= (ctxp
->ts_nvme_status
-
252 seg1
- seg2
- seg3
- seg4
- seg5
;
253 seg7
= (ctxp
->ts_status_wqput
-
257 seg8
= (ctxp
->ts_isr_status
-
259 seg1
- seg2
- seg3
- seg4
-
261 seg9
= (ctxp
->ts_status_nvme
-
263 seg1
- seg2
- seg3
- seg4
-
264 seg5
- seg6
- seg7
- seg8
;
265 seg10
= (ctxp
->ts_isr_status
-
272 seg10
= (ctxp
->ts_isr_data
- ctxp
->ts_isr_cmd
);
275 phba
->ktime_seg1_total
+= seg1
;
276 if (seg1
< phba
->ktime_seg1_min
)
277 phba
->ktime_seg1_min
= seg1
;
278 else if (seg1
> phba
->ktime_seg1_max
)
279 phba
->ktime_seg1_max
= seg1
;
281 phba
->ktime_seg2_total
+= seg2
;
282 if (seg2
< phba
->ktime_seg2_min
)
283 phba
->ktime_seg2_min
= seg2
;
284 else if (seg2
> phba
->ktime_seg2_max
)
285 phba
->ktime_seg2_max
= seg2
;
287 phba
->ktime_seg3_total
+= seg3
;
288 if (seg3
< phba
->ktime_seg3_min
)
289 phba
->ktime_seg3_min
= seg3
;
290 else if (seg3
> phba
->ktime_seg3_max
)
291 phba
->ktime_seg3_max
= seg3
;
293 phba
->ktime_seg4_total
+= seg4
;
294 if (seg4
< phba
->ktime_seg4_min
)
295 phba
->ktime_seg4_min
= seg4
;
296 else if (seg4
> phba
->ktime_seg4_max
)
297 phba
->ktime_seg4_max
= seg4
;
299 phba
->ktime_seg5_total
+= seg5
;
300 if (seg5
< phba
->ktime_seg5_min
)
301 phba
->ktime_seg5_min
= seg5
;
302 else if (seg5
> phba
->ktime_seg5_max
)
303 phba
->ktime_seg5_max
= seg5
;
305 phba
->ktime_data_samples
++;
309 phba
->ktime_seg6_total
+= seg6
;
310 if (seg6
< phba
->ktime_seg6_min
)
311 phba
->ktime_seg6_min
= seg6
;
312 else if (seg6
> phba
->ktime_seg6_max
)
313 phba
->ktime_seg6_max
= seg6
;
315 phba
->ktime_seg7_total
+= seg7
;
316 if (seg7
< phba
->ktime_seg7_min
)
317 phba
->ktime_seg7_min
= seg7
;
318 else if (seg7
> phba
->ktime_seg7_max
)
319 phba
->ktime_seg7_max
= seg7
;
321 phba
->ktime_seg8_total
+= seg8
;
322 if (seg8
< phba
->ktime_seg8_min
)
323 phba
->ktime_seg8_min
= seg8
;
324 else if (seg8
> phba
->ktime_seg8_max
)
325 phba
->ktime_seg8_max
= seg8
;
327 phba
->ktime_seg9_total
+= seg9
;
328 if (seg9
< phba
->ktime_seg9_min
)
329 phba
->ktime_seg9_min
= seg9
;
330 else if (seg9
> phba
->ktime_seg9_max
)
331 phba
->ktime_seg9_max
= seg9
;
333 phba
->ktime_seg10_total
+= seg10
;
334 if (seg10
< phba
->ktime_seg10_min
)
335 phba
->ktime_seg10_min
= seg10
;
336 else if (seg10
> phba
->ktime_seg10_max
)
337 phba
->ktime_seg10_max
= seg10
;
338 phba
->ktime_status_samples
++;
343 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
344 * @phba: Pointer to HBA context object.
345 * @cmdwqe: Pointer to driver command WQE object.
346 * @wcqe: Pointer to driver response CQE object.
348 * The function is called from SLI ring event handler with no
349 * lock held. This function is the completion handler for NVME FCP commands
350 * The function frees memory resources used for the NVME commands.
353 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
354 struct lpfc_wcqe_complete
*wcqe
)
356 struct lpfc_nvmet_tgtport
*tgtp
;
357 struct nvmefc_tgt_fcp_req
*rsp
;
358 struct lpfc_nvmet_rcv_ctx
*ctxp
;
359 uint32_t status
, result
, op
, start_clean
;
360 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
364 ctxp
= cmdwqe
->context2
;
365 ctxp
->flag
&= ~LPFC_NVMET_IO_INP
;
367 rsp
= &ctxp
->ctx
.fcp_req
;
370 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
371 result
= wcqe
->parameter
;
373 if (phba
->targetport
)
374 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
378 lpfc_nvmeio_data(phba
, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
379 ctxp
->oxid
, op
, status
);
382 rsp
->fcp_error
= NVME_SC_DATA_XFER_ERROR
;
383 rsp
->transferred_length
= 0;
385 atomic_inc(&tgtp
->xmt_fcp_rsp_error
);
387 /* pick up SLI4 exhange busy condition */
388 if (bf_get(lpfc_wcqe_c_xb
, wcqe
)) {
389 ctxp
->flag
|= LPFC_NVMET_XBUSY
;
391 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
392 "6315 IO Cmpl XBUSY: xri x%x: %x/%x\n",
393 ctxp
->oxid
, status
, result
);
395 ctxp
->flag
&= ~LPFC_NVMET_XBUSY
;
399 rsp
->fcp_error
= NVME_SC_SUCCESS
;
400 if (op
== NVMET_FCOP_RSP
)
401 rsp
->transferred_length
= rsp
->rsplen
;
403 rsp
->transferred_length
= rsp
->transfer_length
;
405 atomic_inc(&tgtp
->xmt_fcp_rsp_cmpl
);
408 if ((op
== NVMET_FCOP_READDATA_RSP
) ||
409 (op
== NVMET_FCOP_RSP
)) {
411 ctxp
->state
= LPFC_NVMET_STE_DONE
;
414 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
415 if (phba
->ktime_on
) {
416 if (rsp
->op
== NVMET_FCOP_READDATA_RSP
) {
418 cmdwqe
->isr_timestamp
;
421 ctxp
->ts_nvme_status
=
423 ctxp
->ts_status_wqput
=
425 ctxp
->ts_isr_status
=
427 ctxp
->ts_status_nvme
=
430 ctxp
->ts_isr_status
=
431 cmdwqe
->isr_timestamp
;
432 ctxp
->ts_status_nvme
=
436 if (phba
->cpucheck_on
& LPFC_CHECK_NVMET_IO
) {
437 id
= smp_processor_id();
439 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
440 "6703 CPU Check cmpl: "
441 "cpu %d expect %d\n",
443 if (ctxp
->cpu
< LPFC_CHECK_CPU_CNT
)
444 phba
->cpucheck_cmpl_io
[id
]++;
448 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
450 lpfc_nvmet_ktime(phba
, ctxp
);
452 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
455 start_clean
= offsetof(struct lpfc_iocbq
, wqe
);
456 memset(((char *)cmdwqe
) + start_clean
, 0,
457 (sizeof(struct lpfc_iocbq
) - start_clean
));
458 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
459 if (phba
->ktime_on
) {
460 ctxp
->ts_isr_data
= cmdwqe
->isr_timestamp
;
461 ctxp
->ts_data_nvme
= ktime_get_ns();
463 if (phba
->cpucheck_on
& LPFC_CHECK_NVMET_IO
) {
464 id
= smp_processor_id();
466 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
467 "6704 CPU Check cmdcmpl: "
468 "cpu %d expect %d\n",
470 if (ctxp
->cpu
< LPFC_CHECK_CPU_CNT
)
471 phba
->cpucheck_ccmpl_io
[id
]++;
479 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port
*tgtport
,
480 struct nvmefc_tgt_ls_req
*rsp
)
482 struct lpfc_nvmet_rcv_ctx
*ctxp
=
483 container_of(rsp
, struct lpfc_nvmet_rcv_ctx
, ctx
.ls_req
);
484 struct lpfc_hba
*phba
= ctxp
->phba
;
485 struct hbq_dmabuf
*nvmebuf
=
486 (struct hbq_dmabuf
*)ctxp
->rqb_buffer
;
487 struct lpfc_iocbq
*nvmewqeq
;
488 struct lpfc_nvmet_tgtport
*nvmep
= tgtport
->private;
489 struct lpfc_dmabuf dmabuf
;
490 struct ulp_bde64 bpl
;
493 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
494 "6023 %s: Entrypoint ctx %p %p\n", __func__
,
497 nvmewqeq
= lpfc_nvmet_prep_ls_wqe(phba
, ctxp
, rsp
->rspdma
,
499 if (nvmewqeq
== NULL
) {
500 atomic_inc(&nvmep
->xmt_ls_drop
);
501 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
502 "6150 LS Drop IO x%x: Prep\n",
504 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
505 lpfc_nvmet_unsol_ls_issue_abort(phba
, ctxp
,
506 ctxp
->sid
, ctxp
->oxid
);
510 /* Save numBdes for bpl2sgl */
512 nvmewqeq
->hba_wqidx
= 0;
513 nvmewqeq
->context3
= &dmabuf
;
515 bpl
.addrLow
= nvmewqeq
->wqe
.xmit_sequence
.bde
.addrLow
;
516 bpl
.addrHigh
= nvmewqeq
->wqe
.xmit_sequence
.bde
.addrHigh
;
517 bpl
.tus
.f
.bdeSize
= rsp
->rsplen
;
518 bpl
.tus
.f
.bdeFlags
= 0;
519 bpl
.tus
.w
= le32_to_cpu(bpl
.tus
.w
);
521 nvmewqeq
->wqe_cmpl
= lpfc_nvmet_xmt_ls_rsp_cmp
;
522 nvmewqeq
->iocb_cmpl
= NULL
;
523 nvmewqeq
->context2
= ctxp
;
525 lpfc_nvmeio_data(phba
, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
526 ctxp
->oxid
, nvmewqeq
->hba_wqidx
, rsp
->rsplen
);
528 rc
= lpfc_sli4_issue_wqe(phba
, LPFC_ELS_RING
, nvmewqeq
);
529 if (rc
== WQE_SUCCESS
) {
531 * Okay to repost buffer here, but wait till cmpl
532 * before freeing ctxp and iocbq.
534 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
535 ctxp
->rqb_buffer
= 0;
536 atomic_inc(&nvmep
->xmt_ls_rsp
);
539 /* Give back resources */
540 atomic_inc(&nvmep
->xmt_ls_drop
);
541 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
542 "6151 LS Drop IO x%x: Issue %d\n",
545 lpfc_nlp_put(nvmewqeq
->context1
);
547 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
548 lpfc_nvmet_unsol_ls_issue_abort(phba
, ctxp
, ctxp
->sid
, ctxp
->oxid
);
553 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port
*tgtport
,
554 struct nvmefc_tgt_fcp_req
*rsp
)
556 struct lpfc_nvmet_tgtport
*lpfc_nvmep
= tgtport
->private;
557 struct lpfc_nvmet_rcv_ctx
*ctxp
=
558 container_of(rsp
, struct lpfc_nvmet_rcv_ctx
, ctx
.fcp_req
);
559 struct lpfc_hba
*phba
= ctxp
->phba
;
560 struct lpfc_iocbq
*nvmewqeq
;
563 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
564 if (phba
->ktime_on
) {
565 if (rsp
->op
== NVMET_FCOP_RSP
)
566 ctxp
->ts_nvme_status
= ktime_get_ns();
568 ctxp
->ts_nvme_data
= ktime_get_ns();
570 if (phba
->cpucheck_on
& LPFC_CHECK_NVMET_IO
) {
571 int id
= smp_processor_id();
573 if (id
< LPFC_CHECK_CPU_CNT
)
574 phba
->cpucheck_xmt_io
[id
]++;
575 if (rsp
->hwqid
!= id
) {
576 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
577 "6705 CPU Check OP: "
578 "cpu %d expect %d\n",
580 ctxp
->cpu
= rsp
->hwqid
;
586 if ((ctxp
->flag
& LPFC_NVMET_ABTS_RCV
) ||
587 (ctxp
->state
== LPFC_NVMET_STE_ABORT
)) {
588 atomic_inc(&lpfc_nvmep
->xmt_fcp_drop
);
589 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
590 "6102 IO xri x%x aborted\n",
596 nvmewqeq
= lpfc_nvmet_prep_fcp_wqe(phba
, ctxp
);
597 if (nvmewqeq
== NULL
) {
598 atomic_inc(&lpfc_nvmep
->xmt_fcp_drop
);
599 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
600 "6152 FCP Drop IO x%x: Prep\n",
606 nvmewqeq
->wqe_cmpl
= lpfc_nvmet_xmt_fcp_op_cmp
;
607 nvmewqeq
->iocb_cmpl
= NULL
;
608 nvmewqeq
->context2
= ctxp
;
609 nvmewqeq
->iocb_flag
|= LPFC_IO_NVMET
;
610 ctxp
->wqeq
->hba_wqidx
= rsp
->hwqid
;
612 lpfc_nvmeio_data(phba
, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
613 ctxp
->oxid
, rsp
->op
, rsp
->rsplen
);
615 rc
= lpfc_sli4_issue_wqe(phba
, LPFC_FCP_RING
, nvmewqeq
);
616 if (rc
== WQE_SUCCESS
) {
617 ctxp
->flag
|= LPFC_NVMET_IO_INP
;
618 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
621 if (rsp
->op
== NVMET_FCOP_RSP
)
622 ctxp
->ts_status_wqput
= ktime_get_ns();
624 ctxp
->ts_data_wqput
= ktime_get_ns();
629 /* Give back resources */
630 atomic_inc(&lpfc_nvmep
->xmt_fcp_drop
);
631 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
632 "6153 FCP Drop IO x%x: Issue: %d\n",
635 ctxp
->wqeq
->hba_wqidx
= 0;
636 nvmewqeq
->context2
= NULL
;
637 nvmewqeq
->context3
= NULL
;
644 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port
*targetport
)
646 struct lpfc_nvmet_tgtport
*tport
= targetport
->private;
648 /* release any threads waiting for the unreg to complete */
649 complete(&tport
->tport_unreg_done
);
653 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port
*tgtport
,
654 struct nvmefc_tgt_fcp_req
*req
)
656 struct lpfc_nvmet_tgtport
*lpfc_nvmep
= tgtport
->private;
657 struct lpfc_nvmet_rcv_ctx
*ctxp
=
658 container_of(req
, struct lpfc_nvmet_rcv_ctx
, ctx
.fcp_req
);
659 struct lpfc_hba
*phba
= ctxp
->phba
;
662 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
663 "6103 Abort op: oxri x%x flg x%x cnt %d\n",
664 ctxp
->oxid
, ctxp
->flag
, ctxp
->entry_cnt
);
666 lpfc_nvmeio_data(phba
, "NVMET FCP ABRT: "
667 "xri x%x flg x%x cnt x%x\n",
668 ctxp
->oxid
, ctxp
->flag
, ctxp
->entry_cnt
);
670 atomic_inc(&lpfc_nvmep
->xmt_fcp_abort
);
672 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
674 /* Since iaab/iaar are NOT set, we need to check
675 * if the firmware is in process of aborting IO
677 if (ctxp
->flag
& LPFC_NVMET_XBUSY
) {
678 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
681 ctxp
->flag
|= LPFC_NVMET_ABORT_OP
;
682 if (ctxp
->flag
& LPFC_NVMET_IO_INP
)
683 lpfc_nvmet_sol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
,
686 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, ctxp
->sid
,
688 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
692 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port
*tgtport
,
693 struct nvmefc_tgt_fcp_req
*rsp
)
695 struct lpfc_nvmet_rcv_ctx
*ctxp
=
696 container_of(rsp
, struct lpfc_nvmet_rcv_ctx
, ctx
.fcp_req
);
697 struct lpfc_hba
*phba
= ctxp
->phba
;
699 bool aborting
= false;
701 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
702 if ((ctxp
->flag
& LPFC_NVMET_ABORT_OP
) ||
703 (ctxp
->flag
& LPFC_NVMET_XBUSY
)) {
705 /* let the abort path do the real release */
706 lpfc_nvmet_defer_release(phba
, ctxp
);
708 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
710 lpfc_nvmeio_data(phba
, "NVMET FCP FREE: xri x%x ste %d\n", ctxp
->oxid
,
716 lpfc_nvmet_rq_post(phba
, ctxp
, &ctxp
->rqb_buffer
->hbuf
);
719 static struct nvmet_fc_target_template lpfc_tgttemplate
= {
720 .targetport_delete
= lpfc_nvmet_targetport_delete
,
721 .xmt_ls_rsp
= lpfc_nvmet_xmt_ls_rsp
,
722 .fcp_op
= lpfc_nvmet_xmt_fcp_op
,
723 .fcp_abort
= lpfc_nvmet_xmt_fcp_abort
,
724 .fcp_req_release
= lpfc_nvmet_xmt_fcp_release
,
727 .max_sgl_segments
= LPFC_NVMET_DEFAULT_SEGS
,
728 .max_dif_sgl_segments
= LPFC_NVMET_DEFAULT_SEGS
,
729 .dma_boundary
= 0xFFFFFFFF,
731 /* optional features */
732 .target_features
= 0,
733 /* sizes of additional private data for data structures */
734 .target_priv_sz
= sizeof(struct lpfc_nvmet_tgtport
),
738 lpfc_nvmet_create_targetport(struct lpfc_hba
*phba
)
740 struct lpfc_vport
*vport
= phba
->pport
;
741 struct lpfc_nvmet_tgtport
*tgtp
;
742 struct nvmet_fc_port_info pinfo
;
745 if (phba
->targetport
)
748 memset(&pinfo
, 0, sizeof(struct nvmet_fc_port_info
));
749 pinfo
.node_name
= wwn_to_u64(vport
->fc_nodename
.u
.wwn
);
750 pinfo
.port_name
= wwn_to_u64(vport
->fc_portname
.u
.wwn
);
751 pinfo
.port_id
= vport
->fc_myDID
;
753 /* Limit to LPFC_MAX_NVME_SEG_CNT.
754 * For now need + 1 to get around NVME transport logic.
756 if (phba
->cfg_sg_seg_cnt
> LPFC_MAX_NVME_SEG_CNT
) {
757 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME
| LOG_INIT
,
758 "6400 Reducing sg segment cnt to %d\n",
759 LPFC_MAX_NVME_SEG_CNT
);
760 phba
->cfg_nvme_seg_cnt
= LPFC_MAX_NVME_SEG_CNT
;
762 phba
->cfg_nvme_seg_cnt
= phba
->cfg_sg_seg_cnt
;
764 lpfc_tgttemplate
.max_sgl_segments
= phba
->cfg_nvme_seg_cnt
+ 1;
765 lpfc_tgttemplate
.max_hw_queues
= phba
->cfg_nvme_io_channel
;
766 lpfc_tgttemplate
.target_features
= NVMET_FCTGTFEAT_READDATA_RSP
|
767 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED
|
768 NVMET_FCTGTFEAT_CMD_IN_ISR
|
769 NVMET_FCTGTFEAT_OPDONE_IN_ISR
;
771 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
772 error
= nvmet_fc_register_targetport(&pinfo
, &lpfc_tgttemplate
,
779 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_DISC
,
780 "6025 Cannot register NVME targetport "
782 phba
->targetport
= NULL
;
784 tgtp
= (struct lpfc_nvmet_tgtport
*)
785 phba
->targetport
->private;
788 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
789 "6026 Registered NVME "
790 "targetport: %p, private %p "
791 "portnm %llx nodenm %llx\n",
792 phba
->targetport
, tgtp
,
793 pinfo
.port_name
, pinfo
.node_name
);
795 atomic_set(&tgtp
->rcv_ls_req_in
, 0);
796 atomic_set(&tgtp
->rcv_ls_req_out
, 0);
797 atomic_set(&tgtp
->rcv_ls_req_drop
, 0);
798 atomic_set(&tgtp
->xmt_ls_abort
, 0);
799 atomic_set(&tgtp
->xmt_ls_rsp
, 0);
800 atomic_set(&tgtp
->xmt_ls_drop
, 0);
801 atomic_set(&tgtp
->xmt_ls_rsp_error
, 0);
802 atomic_set(&tgtp
->xmt_ls_rsp_cmpl
, 0);
803 atomic_set(&tgtp
->rcv_fcp_cmd_in
, 0);
804 atomic_set(&tgtp
->rcv_fcp_cmd_out
, 0);
805 atomic_set(&tgtp
->rcv_fcp_cmd_drop
, 0);
806 atomic_set(&tgtp
->xmt_fcp_abort
, 0);
807 atomic_set(&tgtp
->xmt_fcp_drop
, 0);
808 atomic_set(&tgtp
->xmt_fcp_read_rsp
, 0);
809 atomic_set(&tgtp
->xmt_fcp_read
, 0);
810 atomic_set(&tgtp
->xmt_fcp_write
, 0);
811 atomic_set(&tgtp
->xmt_fcp_rsp
, 0);
812 atomic_set(&tgtp
->xmt_fcp_rsp_cmpl
, 0);
813 atomic_set(&tgtp
->xmt_fcp_rsp_error
, 0);
814 atomic_set(&tgtp
->xmt_fcp_rsp_drop
, 0);
815 atomic_set(&tgtp
->xmt_abort_rsp
, 0);
816 atomic_set(&tgtp
->xmt_abort_rsp_error
, 0);
817 atomic_set(&tgtp
->xmt_abort_cmpl
, 0);
823 lpfc_nvmet_update_targetport(struct lpfc_hba
*phba
)
825 struct lpfc_vport
*vport
= phba
->pport
;
827 if (!phba
->targetport
)
830 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME
,
831 "6007 Update NVMET port %p did x%x\n",
832 phba
->targetport
, vport
->fc_myDID
);
834 phba
->targetport
->port_id
= vport
->fc_myDID
;
839 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
840 * @phba: pointer to lpfc hba data structure.
841 * @axri: pointer to the nvmet xri abort wcqe structure.
843 * This routine is invoked by the worker thread to process a SLI4 fast-path
847 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba
*phba
,
848 struct sli4_wcqe_xri_aborted
*axri
)
850 uint16_t xri
= bf_get(lpfc_wcqe_xa_xri
, axri
);
851 uint16_t rxid
= bf_get(lpfc_wcqe_xa_remote_xid
, axri
);
852 struct lpfc_nvmet_rcv_ctx
*ctxp
, *next_ctxp
;
853 struct lpfc_nodelist
*ndlp
;
854 unsigned long iflag
= 0;
856 bool released
= false;
858 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
859 "6317 XB aborted xri x%x rxid x%x\n", xri
, rxid
);
861 if (!(phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
))
863 spin_lock_irqsave(&phba
->hbalock
, iflag
);
864 spin_lock(&phba
->sli4_hba
.abts_nvme_buf_list_lock
);
865 list_for_each_entry_safe(ctxp
, next_ctxp
,
866 &phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
,
868 if (ctxp
->rqb_buffer
->sglq
->sli4_xritag
!= xri
)
871 /* Check if we already received a free context call
872 * and we have completed processing an abort situation.
874 if (ctxp
->flag
& LPFC_NVMET_CTX_RLS
&&
875 !(ctxp
->flag
& LPFC_NVMET_ABORT_OP
)) {
876 list_del(&ctxp
->list
);
879 ctxp
->flag
&= ~LPFC_NVMET_XBUSY
;
880 spin_unlock(&phba
->sli4_hba
.abts_nvme_buf_list_lock
);
882 rrq_empty
= list_empty(&phba
->active_rrq_list
);
883 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
884 ndlp
= lpfc_findnode_did(phba
->pport
, ctxp
->sid
);
885 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
) &&
886 (ndlp
->nlp_state
== NLP_STE_UNMAPPED_NODE
||
887 ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)) {
888 lpfc_set_rrq_active(phba
, ndlp
,
889 ctxp
->rqb_buffer
->sglq
->sli4_lxritag
,
891 lpfc_sli4_abts_err_handler(phba
, ndlp
, axri
);
894 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
895 "6318 XB aborted %x flg x%x (%x)\n",
896 ctxp
->oxid
, ctxp
->flag
, released
);
898 lpfc_nvmet_rq_post(phba
, ctxp
,
899 &ctxp
->rqb_buffer
->hbuf
);
901 lpfc_worker_wake_up(phba
);
904 spin_unlock(&phba
->sli4_hba
.abts_nvme_buf_list_lock
);
905 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
909 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport
*vport
,
910 struct fc_frame_header
*fc_hdr
)
913 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
914 struct lpfc_hba
*phba
= vport
->phba
;
915 struct lpfc_nvmet_rcv_ctx
*ctxp
, *next_ctxp
;
916 struct nvmefc_tgt_fcp_req
*rsp
;
918 unsigned long iflag
= 0;
920 xri
= be16_to_cpu(fc_hdr
->fh_ox_id
);
922 spin_lock_irqsave(&phba
->hbalock
, iflag
);
923 spin_lock(&phba
->sli4_hba
.abts_nvme_buf_list_lock
);
924 list_for_each_entry_safe(ctxp
, next_ctxp
,
925 &phba
->sli4_hba
.lpfc_abts_nvmet_ctx_list
,
927 if (ctxp
->rqb_buffer
->sglq
->sli4_xritag
!= xri
)
930 spin_unlock(&phba
->sli4_hba
.abts_nvme_buf_list_lock
);
931 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
933 spin_lock_irqsave(&ctxp
->ctxlock
, iflag
);
934 ctxp
->flag
|= LPFC_NVMET_ABTS_RCV
;
935 spin_unlock_irqrestore(&ctxp
->ctxlock
, iflag
);
937 lpfc_nvmeio_data(phba
,
938 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
939 xri
, smp_processor_id(), 0);
941 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
942 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri
);
944 rsp
= &ctxp
->ctx
.fcp_req
;
945 nvmet_fc_rcv_fcp_abort(phba
->targetport
, rsp
);
947 /* Respond with BA_ACC accordingly */
948 lpfc_sli4_seq_abort_rsp(vport
, fc_hdr
, 1);
951 spin_unlock(&phba
->sli4_hba
.abts_nvme_buf_list_lock
);
952 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
954 lpfc_nvmeio_data(phba
, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
955 xri
, smp_processor_id(), 1);
957 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
958 "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri
);
960 /* Respond with BA_RJT accordingly */
961 lpfc_sli4_seq_abort_rsp(vport
, fc_hdr
, 0);
967 lpfc_nvmet_destroy_targetport(struct lpfc_hba
*phba
)
969 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
970 struct lpfc_nvmet_tgtport
*tgtp
;
972 if (phba
->nvmet_support
== 0)
974 if (phba
->targetport
) {
975 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
976 init_completion(&tgtp
->tport_unreg_done
);
977 nvmet_fc_unregister_targetport(phba
->targetport
);
978 wait_for_completion_timeout(&tgtp
->tport_unreg_done
, 5);
980 phba
->targetport
= NULL
;
985 * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
986 * @phba: pointer to lpfc hba data structure.
987 * @pring: pointer to a SLI ring.
988 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
990 * This routine is used for processing the WQE associated with a unsolicited
991 * event. It first determines whether there is an existing ndlp that matches
992 * the DID from the unsolicited WQE. If not, it will create a new one with
993 * the DID from the unsolicited WQE. The ELS command from the unsolicited
994 * WQE is then used to invoke the proper routine and to set up proper state
995 * of the discovery state machine.
998 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
999 struct hbq_dmabuf
*nvmebuf
)
1001 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1002 struct lpfc_nvmet_tgtport
*tgtp
;
1003 struct fc_frame_header
*fc_hdr
;
1004 struct lpfc_nvmet_rcv_ctx
*ctxp
;
1006 uint32_t size
, oxid
, sid
, rc
;
1008 if (!nvmebuf
|| !phba
->targetport
) {
1009 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1010 "6154 LS Drop IO\n");
1017 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
1018 payload
= (uint32_t *)(nvmebuf
->dbuf
.virt
);
1019 fc_hdr
= (struct fc_frame_header
*)(nvmebuf
->hbuf
.virt
);
1020 size
= bf_get(lpfc_rcqe_length
, &nvmebuf
->cq_event
.cqe
.rcqe_cmpl
);
1021 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
1022 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
1024 ctxp
= kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx
), GFP_ATOMIC
);
1026 atomic_inc(&tgtp
->rcv_ls_req_drop
);
1027 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1028 "6155 LS Drop IO x%x: Alloc\n",
1031 lpfc_nvmeio_data(phba
, "NVMET LS DROP: "
1032 "xri x%x sz %d from %06x\n",
1035 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
1043 ctxp
->state
= LPFC_NVMET_STE_RCV
;
1044 ctxp
->rqb_buffer
= (void *)nvmebuf
;
1046 lpfc_nvmeio_data(phba
, "NVMET LS RCV: xri x%x sz %d from %06x\n",
1049 * The calling sequence should be:
1050 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
1051 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
1053 atomic_inc(&tgtp
->rcv_ls_req_in
);
1054 rc
= nvmet_fc_rcv_ls_req(phba
->targetport
, &ctxp
->ctx
.ls_req
,
1057 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
1058 "6037 %s: ctx %p sz %d rc %d: %08x %08x %08x "
1059 "%08x %08x %08x\n", __func__
, ctxp
, size
, rc
,
1060 *payload
, *(payload
+1), *(payload
+2),
1061 *(payload
+3), *(payload
+4), *(payload
+5));
1064 atomic_inc(&tgtp
->rcv_ls_req_out
);
1068 lpfc_nvmeio_data(phba
, "NVMET LS DROP: xri x%x sz %d from %06x\n",
1071 atomic_inc(&tgtp
->rcv_ls_req_drop
);
1072 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1073 "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
1076 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1078 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
1080 atomic_inc(&tgtp
->xmt_ls_abort
);
1081 lpfc_nvmet_unsol_ls_issue_abort(phba
, ctxp
, sid
, oxid
);
1086 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
1087 * @phba: pointer to lpfc hba data structure.
1088 * @pring: pointer to a SLI ring.
1089 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1091 * This routine is used for processing the WQE associated with a unsolicited
1092 * event. It first determines whether there is an existing ndlp that matches
1093 * the DID from the unsolicited WQE. If not, it will create a new one with
1094 * the DID from the unsolicited WQE. The ELS command from the unsolicited
1095 * WQE is then used to invoke the proper routine and to set up proper state
1096 * of the discovery state machine.
1099 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba
*phba
,
1100 struct lpfc_sli_ring
*pring
,
1101 struct rqb_dmabuf
*nvmebuf
,
1102 uint64_t isr_timestamp
)
1104 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1105 struct lpfc_nvmet_rcv_ctx
*ctxp
;
1106 struct lpfc_nvmet_tgtport
*tgtp
;
1107 struct fc_frame_header
*fc_hdr
;
1109 uint32_t size
, oxid
, sid
, rc
;
1110 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1114 if (!nvmebuf
|| !phba
->targetport
) {
1115 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1116 "6157 FCP Drop IO\n");
1124 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
1125 payload
= (uint32_t *)(nvmebuf
->dbuf
.virt
);
1126 fc_hdr
= (struct fc_frame_header
*)(nvmebuf
->hbuf
.virt
);
1127 size
= nvmebuf
->bytes_recv
;
1128 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
1129 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
1131 ctxp
= (struct lpfc_nvmet_rcv_ctx
*)nvmebuf
->context
;
1133 atomic_inc(&tgtp
->rcv_fcp_cmd_drop
);
1134 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1135 "6158 FCP Drop IO x%x: Alloc\n",
1137 lpfc_nvmet_rq_post(phba
, NULL
, &nvmebuf
->hbuf
);
1138 /* Cannot send ABTS without context */
1141 memset(ctxp
, 0, sizeof(ctxp
->ctx
));
1149 ctxp
->state
= LPFC_NVMET_STE_RCV
;
1150 ctxp
->rqb_buffer
= nvmebuf
;
1151 ctxp
->entry_cnt
= 1;
1153 spin_lock_init(&ctxp
->ctxlock
);
1155 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1156 if (phba
->ktime_on
) {
1157 ctxp
->ts_isr_cmd
= isr_timestamp
;
1158 ctxp
->ts_cmd_nvme
= ktime_get_ns();
1159 ctxp
->ts_nvme_data
= 0;
1160 ctxp
->ts_data_wqput
= 0;
1161 ctxp
->ts_isr_data
= 0;
1162 ctxp
->ts_data_nvme
= 0;
1163 ctxp
->ts_nvme_status
= 0;
1164 ctxp
->ts_status_wqput
= 0;
1165 ctxp
->ts_isr_status
= 0;
1166 ctxp
->ts_status_nvme
= 0;
1169 if (phba
->cpucheck_on
& LPFC_CHECK_NVMET_RCV
) {
1170 id
= smp_processor_id();
1171 if (id
< LPFC_CHECK_CPU_CNT
)
1172 phba
->cpucheck_rcv_io
[id
]++;
1176 lpfc_nvmeio_data(phba
, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
1177 oxid
, size
, smp_processor_id());
1179 atomic_inc(&tgtp
->rcv_fcp_cmd_in
);
1181 * The calling sequence should be:
1182 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
1183 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
1185 rc
= nvmet_fc_rcv_fcp_req(phba
->targetport
, &ctxp
->ctx
.fcp_req
,
1188 /* Process FCP command */
1190 atomic_inc(&tgtp
->rcv_fcp_cmd_out
);
1194 atomic_inc(&tgtp
->rcv_fcp_cmd_drop
);
1195 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1196 "6159 FCP Drop IO x%x: err x%x\n",
1199 lpfc_nvmeio_data(phba
, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
1202 lpfc_nvmet_unsol_fcp_issue_abort(phba
, ctxp
, sid
, oxid
);
1207 nvmebuf
->iocbq
->hba_wqidx
= 0;
1208 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1209 lpfc_nvmet_rq_post(phba
, NULL
, &nvmebuf
->hbuf
);
1215 * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
1216 * @phba: pointer to lpfc hba data structure.
1217 * @pring: pointer to a SLI ring.
1218 * @nvmebuf: pointer to received nvme data structure.
1220 * This routine is used to process an unsolicited event received from a SLI
1221 * (Service Level Interface) ring. The actual processing of the data buffer
1222 * associated with the unsolicited event is done by invoking the routine
1223 * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
1224 * SLI RQ on which the unsolicited event was received.
1227 lpfc_nvmet_unsol_ls_event(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
1228 struct lpfc_iocbq
*piocb
)
1230 struct lpfc_dmabuf
*d_buf
;
1231 struct hbq_dmabuf
*nvmebuf
;
1233 d_buf
= piocb
->context2
;
1234 nvmebuf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
1236 if (phba
->nvmet_support
== 0) {
1237 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
1240 lpfc_nvmet_unsol_ls_buffer(phba
, pring
, nvmebuf
);
1244 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
1245 * @phba: pointer to lpfc hba data structure.
1246 * @pring: pointer to a SLI ring.
1247 * @nvmebuf: pointer to received nvme data structure.
1249 * This routine is used to process an unsolicited event received from a SLI
1250 * (Service Level Interface) ring. The actual processing of the data buffer
1251 * associated with the unsolicited event is done by invoking the routine
1252 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
1253 * SLI RQ on which the unsolicited event was received.
1256 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba
*phba
,
1257 struct lpfc_sli_ring
*pring
,
1258 struct rqb_dmabuf
*nvmebuf
,
1259 uint64_t isr_timestamp
)
1261 if (phba
->nvmet_support
== 0) {
1262 lpfc_nvmet_rq_post(phba
, NULL
, &nvmebuf
->hbuf
);
1265 lpfc_nvmet_unsol_fcp_buffer(phba
, pring
, nvmebuf
,
1270 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
1271 * @phba: pointer to a host N_Port data structure.
1272 * @ctxp: Context info for NVME LS Request
1273 * @rspbuf: DMA buffer of NVME command.
1274 * @rspsize: size of the NVME command.
1276 * This routine is used for allocating a lpfc-WQE data structure from
1277 * the driver lpfc-WQE free-list and prepare the WQE with the parameters
1278 * passed into the routine for discovery state machine to issue an Extended
1279 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
1280 * and preparation routine that is used by all the discovery state machine
1281 * routines and the NVME command-specific fields will be later set up by
1282 * the individual discovery machine routines after calling this routine
1283 * allocating and preparing a generic WQE data structure. It fills in the
1284 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
1285 * payload and response payload (if expected). The reference count on the
1286 * ndlp is incremented by 1 and the reference to the ndlp is put into
1287 * context1 of the WQE data structure for this WQE to hold the ndlp
1288 * reference for the command's callback function to access later.
1291 * Pointer to the newly allocated/prepared nvme wqe data structure
1292 * NULL - when nvme wqe data structure allocation/preparation failed
1294 static struct lpfc_iocbq
*
1295 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba
*phba
,
1296 struct lpfc_nvmet_rcv_ctx
*ctxp
,
1297 dma_addr_t rspbuf
, uint16_t rspsize
)
1299 struct lpfc_nodelist
*ndlp
;
1300 struct lpfc_iocbq
*nvmewqe
;
1301 union lpfc_wqe
*wqe
;
1303 if (!lpfc_is_link_up(phba
)) {
1304 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_DISC
,
1305 "6104 lpfc_nvmet_prep_ls_wqe: link err: "
1306 "NPORT x%x oxid:x%x\n",
1307 ctxp
->sid
, ctxp
->oxid
);
1311 /* Allocate buffer for command wqe */
1312 nvmewqe
= lpfc_sli_get_iocbq(phba
);
1313 if (nvmewqe
== NULL
) {
1314 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_DISC
,
1315 "6105 lpfc_nvmet_prep_ls_wqe: No WQE: "
1316 "NPORT x%x oxid:x%x\n",
1317 ctxp
->sid
, ctxp
->oxid
);
1321 ndlp
= lpfc_findnode_did(phba
->pport
, ctxp
->sid
);
1322 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) ||
1323 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
1324 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
1325 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_DISC
,
1326 "6106 lpfc_nvmet_prep_ls_wqe: No ndlp: "
1327 "NPORT x%x oxid:x%x\n",
1328 ctxp
->sid
, ctxp
->oxid
);
1329 goto nvme_wqe_free_wqeq_exit
;
1331 ctxp
->wqeq
= nvmewqe
;
1333 /* prevent preparing wqe with NULL ndlp reference */
1334 nvmewqe
->context1
= lpfc_nlp_get(ndlp
);
1335 if (nvmewqe
->context1
== NULL
)
1336 goto nvme_wqe_free_wqeq_exit
;
1337 nvmewqe
->context2
= ctxp
;
1339 wqe
= &nvmewqe
->wqe
;
1340 memset(wqe
, 0, sizeof(union lpfc_wqe
));
1343 wqe
->xmit_sequence
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
1344 wqe
->xmit_sequence
.bde
.tus
.f
.bdeSize
= rspsize
;
1345 wqe
->xmit_sequence
.bde
.addrLow
= le32_to_cpu(putPaddrLow(rspbuf
));
1346 wqe
->xmit_sequence
.bde
.addrHigh
= le32_to_cpu(putPaddrHigh(rspbuf
));
1353 bf_set(wqe_dfctl
, &wqe
->xmit_sequence
.wge_ctl
, 0);
1354 bf_set(wqe_ls
, &wqe
->xmit_sequence
.wge_ctl
, 1);
1355 bf_set(wqe_la
, &wqe
->xmit_sequence
.wge_ctl
, 0);
1356 bf_set(wqe_rctl
, &wqe
->xmit_sequence
.wge_ctl
, FC_RCTL_ELS4_REP
);
1357 bf_set(wqe_type
, &wqe
->xmit_sequence
.wge_ctl
, FC_TYPE_NVME
);
1360 bf_set(wqe_ctxt_tag
, &wqe
->xmit_sequence
.wqe_com
,
1361 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
1362 bf_set(wqe_xri_tag
, &wqe
->xmit_sequence
.wqe_com
, nvmewqe
->sli4_xritag
);
1365 bf_set(wqe_cmnd
, &wqe
->xmit_sequence
.wqe_com
,
1366 CMD_XMIT_SEQUENCE64_WQE
);
1367 bf_set(wqe_ct
, &wqe
->xmit_sequence
.wqe_com
, SLI4_CT_RPI
);
1368 bf_set(wqe_class
, &wqe
->xmit_sequence
.wqe_com
, CLASS3
);
1369 bf_set(wqe_pu
, &wqe
->xmit_sequence
.wqe_com
, 0);
1372 wqe
->xmit_sequence
.wqe_com
.abort_tag
= nvmewqe
->iotag
;
1375 bf_set(wqe_reqtag
, &wqe
->xmit_sequence
.wqe_com
, nvmewqe
->iotag
);
1376 /* Needs to be set by caller */
1377 bf_set(wqe_rcvoxid
, &wqe
->xmit_sequence
.wqe_com
, ctxp
->oxid
);
1380 bf_set(wqe_dbde
, &wqe
->xmit_sequence
.wqe_com
, 1);
1381 bf_set(wqe_iod
, &wqe
->xmit_sequence
.wqe_com
, LPFC_WQE_IOD_WRITE
);
1382 bf_set(wqe_lenloc
, &wqe
->xmit_sequence
.wqe_com
,
1383 LPFC_WQE_LENLOC_WORD12
);
1384 bf_set(wqe_ebde_cnt
, &wqe
->xmit_sequence
.wqe_com
, 0);
1387 bf_set(wqe_cqid
, &wqe
->xmit_sequence
.wqe_com
,
1388 LPFC_WQE_CQ_ID_DEFAULT
);
1389 bf_set(wqe_cmd_type
, &wqe
->xmit_sequence
.wqe_com
,
1393 wqe
->xmit_sequence
.xmit_len
= rspsize
;
1396 nvmewqe
->vport
= phba
->pport
;
1397 nvmewqe
->drvrTimeout
= (phba
->fc_ratov
* 3) + LPFC_DRVR_TIMEOUT
;
1398 nvmewqe
->iocb_flag
|= LPFC_IO_NVME_LS
;
1400 /* Xmit NVME response to remote NPORT <did> */
1401 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_DISC
,
1402 "6039 Xmit NVME LS response to remote "
1403 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
1404 ndlp
->nlp_DID
, nvmewqe
->iotag
, ctxp
->oxid
,
1408 nvme_wqe_free_wqeq_exit
:
1409 nvmewqe
->context2
= NULL
;
1410 nvmewqe
->context3
= NULL
;
1411 lpfc_sli_release_iocbq(phba
, nvmewqe
);
1416 static struct lpfc_iocbq
*
1417 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba
*phba
,
1418 struct lpfc_nvmet_rcv_ctx
*ctxp
)
1420 struct nvmefc_tgt_fcp_req
*rsp
= &ctxp
->ctx
.fcp_req
;
1421 struct lpfc_nvmet_tgtport
*tgtp
;
1422 struct sli4_sge
*sgl
;
1423 struct lpfc_nodelist
*ndlp
;
1424 struct lpfc_iocbq
*nvmewqe
;
1425 struct scatterlist
*sgel
;
1426 union lpfc_wqe128
*wqe
;
1428 dma_addr_t physaddr
;
1432 if (!lpfc_is_link_up(phba
)) {
1433 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1434 "6107 lpfc_nvmet_prep_fcp_wqe: link err:"
1435 "NPORT x%x oxid:x%x\n", ctxp
->sid
,
1440 ndlp
= lpfc_findnode_did(phba
->pport
, ctxp
->sid
);
1441 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) ||
1442 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
1443 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
1444 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1445 "6108 lpfc_nvmet_prep_fcp_wqe: no ndlp: "
1446 "NPORT x%x oxid:x%x\n",
1447 ctxp
->sid
, ctxp
->oxid
);
1451 if (rsp
->sg_cnt
> phba
->cfg_nvme_seg_cnt
) {
1452 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1453 "6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: "
1454 "NPORT x%x oxid:x%x cnt %d\n",
1455 ctxp
->sid
, ctxp
->oxid
, phba
->cfg_nvme_seg_cnt
);
1459 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
1460 nvmewqe
= ctxp
->wqeq
;
1461 if (nvmewqe
== NULL
) {
1462 /* Allocate buffer for command wqe */
1463 nvmewqe
= ctxp
->rqb_buffer
->iocbq
;
1464 if (nvmewqe
== NULL
) {
1465 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1466 "6110 lpfc_nvmet_prep_fcp_wqe: No "
1467 "WQE: NPORT x%x oxid:x%x\n",
1468 ctxp
->sid
, ctxp
->oxid
);
1471 ctxp
->wqeq
= nvmewqe
;
1472 xc
= 0; /* create new XRI */
1473 nvmewqe
->sli4_lxritag
= NO_XRI
;
1474 nvmewqe
->sli4_xritag
= NO_XRI
;
1478 if (((ctxp
->state
== LPFC_NVMET_STE_RCV
) &&
1479 (ctxp
->entry_cnt
== 1)) ||
1480 ((ctxp
->state
== LPFC_NVMET_STE_DATA
) &&
1481 (ctxp
->entry_cnt
> 1))) {
1482 wqe
= (union lpfc_wqe128
*)&nvmewqe
->wqe
;
1484 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1485 "6111 Wrong state %s: %d cnt %d\n",
1486 __func__
, ctxp
->state
, ctxp
->entry_cnt
);
1490 sgl
= (struct sli4_sge
*)ctxp
->rqb_buffer
->sglq
->sgl
;
1492 case NVMET_FCOP_READDATA
:
1493 case NVMET_FCOP_READDATA_RSP
:
1494 /* Words 0 - 2 : The first sg segment */
1496 physaddr
= sg_dma_address(sgel
);
1497 wqe
->fcp_tsend
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
1498 wqe
->fcp_tsend
.bde
.tus
.f
.bdeSize
= sg_dma_len(sgel
);
1499 wqe
->fcp_tsend
.bde
.addrLow
= cpu_to_le32(putPaddrLow(physaddr
));
1500 wqe
->fcp_tsend
.bde
.addrHigh
=
1501 cpu_to_le32(putPaddrHigh(physaddr
));
1504 wqe
->fcp_tsend
.payload_offset_len
= 0;
1507 wqe
->fcp_tsend
.relative_offset
= ctxp
->offset
;
1512 bf_set(wqe_ctxt_tag
, &wqe
->fcp_tsend
.wqe_com
,
1513 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
1514 bf_set(wqe_xri_tag
, &wqe
->fcp_tsend
.wqe_com
,
1515 nvmewqe
->sli4_xritag
);
1518 bf_set(wqe_cmnd
, &wqe
->fcp_tsend
.wqe_com
, CMD_FCP_TSEND64_WQE
);
1521 wqe
->fcp_tsend
.wqe_com
.abort_tag
= nvmewqe
->iotag
;
1524 bf_set(wqe_reqtag
, &wqe
->fcp_tsend
.wqe_com
, nvmewqe
->iotag
);
1525 bf_set(wqe_rcvoxid
, &wqe
->fcp_tsend
.wqe_com
, ctxp
->oxid
);
1528 bf_set(wqe_nvme
, &wqe
->fcp_tsend
.wqe_com
, 1);
1529 bf_set(wqe_dbde
, &wqe
->fcp_tsend
.wqe_com
, 1);
1530 bf_set(wqe_iod
, &wqe
->fcp_tsend
.wqe_com
, LPFC_WQE_IOD_WRITE
);
1531 bf_set(wqe_lenloc
, &wqe
->fcp_tsend
.wqe_com
,
1532 LPFC_WQE_LENLOC_WORD12
);
1533 bf_set(wqe_ebde_cnt
, &wqe
->fcp_tsend
.wqe_com
, 0);
1534 bf_set(wqe_xc
, &wqe
->fcp_tsend
.wqe_com
, xc
);
1535 bf_set(wqe_nvme
, &wqe
->fcp_tsend
.wqe_com
, 1);
1536 if (phba
->cfg_nvme_oas
)
1537 bf_set(wqe_oas
, &wqe
->fcp_tsend
.wqe_com
, 1);
1540 bf_set(wqe_cqid
, &wqe
->fcp_tsend
.wqe_com
,
1541 LPFC_WQE_CQ_ID_DEFAULT
);
1542 bf_set(wqe_cmd_type
, &wqe
->fcp_tsend
.wqe_com
,
1546 wqe
->fcp_tsend
.fcp_data_len
= rsp
->transfer_length
;
1548 /* Setup 2 SKIP SGEs */
1552 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_SKIP
);
1553 sgl
->word2
= cpu_to_le32(sgl
->word2
);
1559 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_SKIP
);
1560 sgl
->word2
= cpu_to_le32(sgl
->word2
);
1563 if (rsp
->op
== NVMET_FCOP_READDATA_RSP
) {
1564 atomic_inc(&tgtp
->xmt_fcp_read_rsp
);
1565 bf_set(wqe_ar
, &wqe
->fcp_tsend
.wqe_com
, 1);
1566 if ((ndlp
->nlp_flag
& NLP_SUPPRESS_RSP
) &&
1567 (rsp
->rsplen
== 12)) {
1568 bf_set(wqe_sup
, &wqe
->fcp_tsend
.wqe_com
, 1);
1569 bf_set(wqe_wqes
, &wqe
->fcp_tsend
.wqe_com
, 0);
1570 bf_set(wqe_irsp
, &wqe
->fcp_tsend
.wqe_com
, 0);
1571 bf_set(wqe_irsplen
, &wqe
->fcp_tsend
.wqe_com
, 0);
1573 bf_set(wqe_sup
, &wqe
->fcp_tsend
.wqe_com
, 0);
1574 bf_set(wqe_wqes
, &wqe
->fcp_tsend
.wqe_com
, 1);
1575 bf_set(wqe_irsp
, &wqe
->fcp_tsend
.wqe_com
, 1);
1576 bf_set(wqe_irsplen
, &wqe
->fcp_tsend
.wqe_com
,
1577 ((rsp
->rsplen
>> 2) - 1));
1578 memcpy(&wqe
->words
[16], rsp
->rspaddr
,
1582 atomic_inc(&tgtp
->xmt_fcp_read
);
1584 bf_set(wqe_sup
, &wqe
->fcp_tsend
.wqe_com
, 0);
1585 bf_set(wqe_wqes
, &wqe
->fcp_tsend
.wqe_com
, 0);
1586 bf_set(wqe_irsp
, &wqe
->fcp_tsend
.wqe_com
, 0);
1587 bf_set(wqe_ar
, &wqe
->fcp_tsend
.wqe_com
, 0);
1588 bf_set(wqe_irsplen
, &wqe
->fcp_tsend
.wqe_com
, 0);
1590 ctxp
->state
= LPFC_NVMET_STE_DATA
;
1593 case NVMET_FCOP_WRITEDATA
:
1594 /* Words 0 - 2 : The first sg segment */
1595 txrdy
= pci_pool_alloc(phba
->txrdy_payload_pool
,
1596 GFP_KERNEL
, &physaddr
);
1598 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_IOERR
,
1599 "6041 Bad txrdy buffer: oxid x%x\n",
1603 ctxp
->txrdy
= txrdy
;
1604 ctxp
->txrdy_phys
= physaddr
;
1605 wqe
->fcp_treceive
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
1606 wqe
->fcp_treceive
.bde
.tus
.f
.bdeSize
= TXRDY_PAYLOAD_LEN
;
1607 wqe
->fcp_treceive
.bde
.addrLow
=
1608 cpu_to_le32(putPaddrLow(physaddr
));
1609 wqe
->fcp_treceive
.bde
.addrHigh
=
1610 cpu_to_le32(putPaddrHigh(physaddr
));
1613 wqe
->fcp_treceive
.payload_offset_len
= TXRDY_PAYLOAD_LEN
;
1616 wqe
->fcp_treceive
.relative_offset
= ctxp
->offset
;
1621 bf_set(wqe_ctxt_tag
, &wqe
->fcp_treceive
.wqe_com
,
1622 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
1623 bf_set(wqe_xri_tag
, &wqe
->fcp_treceive
.wqe_com
,
1624 nvmewqe
->sli4_xritag
);
1627 bf_set(wqe_ar
, &wqe
->fcp_treceive
.wqe_com
, 0);
1628 bf_set(wqe_cmnd
, &wqe
->fcp_treceive
.wqe_com
,
1629 CMD_FCP_TRECEIVE64_WQE
);
1632 wqe
->fcp_treceive
.wqe_com
.abort_tag
= nvmewqe
->iotag
;
1635 bf_set(wqe_reqtag
, &wqe
->fcp_treceive
.wqe_com
, nvmewqe
->iotag
);
1636 bf_set(wqe_rcvoxid
, &wqe
->fcp_treceive
.wqe_com
, ctxp
->oxid
);
1639 bf_set(wqe_nvme
, &wqe
->fcp_treceive
.wqe_com
, 1);
1640 bf_set(wqe_dbde
, &wqe
->fcp_treceive
.wqe_com
, 1);
1641 bf_set(wqe_iod
, &wqe
->fcp_treceive
.wqe_com
, LPFC_WQE_IOD_READ
);
1642 bf_set(wqe_lenloc
, &wqe
->fcp_treceive
.wqe_com
,
1643 LPFC_WQE_LENLOC_WORD12
);
1644 bf_set(wqe_xc
, &wqe
->fcp_treceive
.wqe_com
, xc
);
1645 bf_set(wqe_wqes
, &wqe
->fcp_treceive
.wqe_com
, 0);
1646 bf_set(wqe_irsp
, &wqe
->fcp_treceive
.wqe_com
, 0);
1647 bf_set(wqe_irsplen
, &wqe
->fcp_treceive
.wqe_com
, 0);
1648 bf_set(wqe_nvme
, &wqe
->fcp_treceive
.wqe_com
, 1);
1649 if (phba
->cfg_nvme_oas
)
1650 bf_set(wqe_oas
, &wqe
->fcp_treceive
.wqe_com
, 1);
1653 bf_set(wqe_cqid
, &wqe
->fcp_treceive
.wqe_com
,
1654 LPFC_WQE_CQ_ID_DEFAULT
);
1655 bf_set(wqe_cmd_type
, &wqe
->fcp_treceive
.wqe_com
,
1656 FCP_COMMAND_TRECEIVE
);
1657 bf_set(wqe_sup
, &wqe
->fcp_tsend
.wqe_com
, 0);
1660 wqe
->fcp_tsend
.fcp_data_len
= rsp
->transfer_length
;
1662 /* Setup 1 TXRDY and 1 SKIP SGE */
1664 txrdy
[1] = cpu_to_be32(rsp
->transfer_length
);
1667 sgl
->addr_hi
= putPaddrHigh(physaddr
);
1668 sgl
->addr_lo
= putPaddrLow(physaddr
);
1670 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_DATA
);
1671 sgl
->word2
= cpu_to_le32(sgl
->word2
);
1672 sgl
->sge_len
= cpu_to_le32(TXRDY_PAYLOAD_LEN
);
1677 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_SKIP
);
1678 sgl
->word2
= cpu_to_le32(sgl
->word2
);
1681 ctxp
->state
= LPFC_NVMET_STE_DATA
;
1682 atomic_inc(&tgtp
->xmt_fcp_write
);
1685 case NVMET_FCOP_RSP
:
1687 physaddr
= rsp
->rspdma
;
1688 wqe
->fcp_trsp
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
1689 wqe
->fcp_trsp
.bde
.tus
.f
.bdeSize
= rsp
->rsplen
;
1690 wqe
->fcp_trsp
.bde
.addrLow
=
1691 cpu_to_le32(putPaddrLow(physaddr
));
1692 wqe
->fcp_trsp
.bde
.addrHigh
=
1693 cpu_to_le32(putPaddrHigh(physaddr
));
1696 wqe
->fcp_trsp
.response_len
= rsp
->rsplen
;
1699 wqe
->fcp_trsp
.rsvd_4_5
[0] = 0;
1705 bf_set(wqe_ctxt_tag
, &wqe
->fcp_trsp
.wqe_com
,
1706 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
1707 bf_set(wqe_xri_tag
, &wqe
->fcp_trsp
.wqe_com
,
1708 nvmewqe
->sli4_xritag
);
1711 bf_set(wqe_ag
, &wqe
->fcp_trsp
.wqe_com
, 1);
1712 bf_set(wqe_cmnd
, &wqe
->fcp_trsp
.wqe_com
, CMD_FCP_TRSP64_WQE
);
1715 wqe
->fcp_trsp
.wqe_com
.abort_tag
= nvmewqe
->iotag
;
1718 bf_set(wqe_reqtag
, &wqe
->fcp_trsp
.wqe_com
, nvmewqe
->iotag
);
1719 bf_set(wqe_rcvoxid
, &wqe
->fcp_trsp
.wqe_com
, ctxp
->oxid
);
1722 bf_set(wqe_nvme
, &wqe
->fcp_trsp
.wqe_com
, 1);
1723 bf_set(wqe_dbde
, &wqe
->fcp_trsp
.wqe_com
, 0);
1724 bf_set(wqe_iod
, &wqe
->fcp_trsp
.wqe_com
, LPFC_WQE_IOD_WRITE
);
1725 bf_set(wqe_lenloc
, &wqe
->fcp_trsp
.wqe_com
,
1726 LPFC_WQE_LENLOC_WORD3
);
1727 bf_set(wqe_xc
, &wqe
->fcp_trsp
.wqe_com
, xc
);
1728 bf_set(wqe_nvme
, &wqe
->fcp_trsp
.wqe_com
, 1);
1729 if (phba
->cfg_nvme_oas
)
1730 bf_set(wqe_oas
, &wqe
->fcp_trsp
.wqe_com
, 1);
1733 bf_set(wqe_cqid
, &wqe
->fcp_trsp
.wqe_com
,
1734 LPFC_WQE_CQ_ID_DEFAULT
);
1735 bf_set(wqe_cmd_type
, &wqe
->fcp_trsp
.wqe_com
,
1737 bf_set(wqe_sup
, &wqe
->fcp_tsend
.wqe_com
, 0);
1738 ctxp
->state
= LPFC_NVMET_STE_RSP
;
1740 if (rsp
->rsplen
== LPFC_NVMET_SUCCESS_LEN
) {
1741 /* Good response - all zero's on wire */
1742 bf_set(wqe_wqes
, &wqe
->fcp_trsp
.wqe_com
, 0);
1743 bf_set(wqe_irsp
, &wqe
->fcp_trsp
.wqe_com
, 0);
1744 bf_set(wqe_irsplen
, &wqe
->fcp_trsp
.wqe_com
, 0);
1746 bf_set(wqe_wqes
, &wqe
->fcp_trsp
.wqe_com
, 1);
1747 bf_set(wqe_irsp
, &wqe
->fcp_trsp
.wqe_com
, 1);
1748 bf_set(wqe_irsplen
, &wqe
->fcp_trsp
.wqe_com
,
1749 ((rsp
->rsplen
>> 2) - 1));
1750 memcpy(&wqe
->words
[16], rsp
->rspaddr
, rsp
->rsplen
);
1753 /* Use rspbuf, NOT sg list */
1756 atomic_inc(&tgtp
->xmt_fcp_rsp
);
1760 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_IOERR
,
1761 "6064 Unknown Rsp Op %d\n",
1767 nvmewqe
->vport
= phba
->pport
;
1768 nvmewqe
->drvrTimeout
= (phba
->fc_ratov
* 3) + LPFC_DRVR_TIMEOUT
;
1769 nvmewqe
->context1
= ndlp
;
1771 for (i
= 0; i
< rsp
->sg_cnt
; i
++) {
1773 physaddr
= sg_dma_address(sgel
);
1774 cnt
= sg_dma_len(sgel
);
1775 sgl
->addr_hi
= putPaddrHigh(physaddr
);
1776 sgl
->addr_lo
= putPaddrLow(physaddr
);
1778 bf_set(lpfc_sli4_sge_type
, sgl
, LPFC_SGE_TYPE_DATA
);
1779 bf_set(lpfc_sli4_sge_offset
, sgl
, ctxp
->offset
);
1780 if ((i
+1) == rsp
->sg_cnt
)
1781 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
1782 sgl
->word2
= cpu_to_le32(sgl
->word2
);
1783 sgl
->sge_len
= cpu_to_le32(cnt
);
1785 ctxp
->offset
+= cnt
;
1791 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
1792 * @phba: Pointer to HBA context object.
1793 * @cmdwqe: Pointer to driver command WQE object.
1794 * @wcqe: Pointer to driver response CQE object.
1796 * The function is called from SLI ring event handler with no
1797 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
1798 * The function frees memory resources used for the NVME commands.
1801 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
1802 struct lpfc_wcqe_complete
*wcqe
)
1804 struct lpfc_nvmet_rcv_ctx
*ctxp
;
1805 struct lpfc_nvmet_tgtport
*tgtp
;
1806 uint32_t status
, result
;
1807 unsigned long flags
;
1808 bool released
= false;
1810 ctxp
= cmdwqe
->context2
;
1811 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
1812 result
= wcqe
->parameter
;
1814 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
1815 atomic_inc(&tgtp
->xmt_abort_cmpl
);
1817 ctxp
->state
= LPFC_NVMET_STE_DONE
;
1819 /* Check if we already received a free context call
1820 * and we have completed processing an abort situation.
1822 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
1823 if ((ctxp
->flag
& LPFC_NVMET_CTX_RLS
) &&
1824 !(ctxp
->flag
& LPFC_NVMET_XBUSY
)) {
1825 list_del(&ctxp
->list
);
1828 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
1829 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
1831 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
1832 "6165 ABORT cmpl: xri x%x flg x%x (%d) "
1833 "WCQE: %08x %08x %08x %08x\n",
1834 ctxp
->oxid
, ctxp
->flag
, released
,
1835 wcqe
->word0
, wcqe
->total_data_placed
,
1836 result
, wcqe
->word3
);
1839 * if transport has released ctx, then can reuse it. Otherwise,
1840 * will be recycled by transport release call.
1843 lpfc_nvmet_rq_post(phba
, ctxp
, &ctxp
->rqb_buffer
->hbuf
);
1845 cmdwqe
->context2
= NULL
;
1846 cmdwqe
->context3
= NULL
;
1847 lpfc_sli_release_iocbq(phba
, cmdwqe
);
1849 /* Since iaab/iaar are NOT set, there is no work left.
1850 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
1851 * should have been called already.
1856 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
1857 * @phba: Pointer to HBA context object.
1858 * @cmdwqe: Pointer to driver command WQE object.
1859 * @wcqe: Pointer to driver response CQE object.
1861 * The function is called from SLI ring event handler with no
1862 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
1863 * The function frees memory resources used for the NVME commands.
1866 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
1867 struct lpfc_wcqe_complete
*wcqe
)
1869 struct lpfc_nvmet_rcv_ctx
*ctxp
;
1870 struct lpfc_nvmet_tgtport
*tgtp
;
1871 unsigned long flags
;
1872 uint32_t status
, result
;
1873 bool released
= false;
1875 ctxp
= cmdwqe
->context2
;
1876 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
1877 result
= wcqe
->parameter
;
1879 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
1880 atomic_inc(&tgtp
->xmt_abort_cmpl
);
1883 /* if context is clear, related io alrady complete */
1884 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1885 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
1886 wcqe
->word0
, wcqe
->total_data_placed
,
1887 result
, wcqe
->word3
);
1892 if (ctxp
->state
!= LPFC_NVMET_STE_ABORT
) {
1893 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
1894 "6112 ABTS Wrong state:%d oxid x%x\n",
1895 ctxp
->state
, ctxp
->oxid
);
1898 /* Check if we already received a free context call
1899 * and we have completed processing an abort situation.
1901 ctxp
->state
= LPFC_NVMET_STE_DONE
;
1902 spin_lock_irqsave(&ctxp
->ctxlock
, flags
);
1903 if ((ctxp
->flag
& LPFC_NVMET_CTX_RLS
) &&
1904 !(ctxp
->flag
& LPFC_NVMET_XBUSY
)) {
1905 list_del(&ctxp
->list
);
1908 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
1909 spin_unlock_irqrestore(&ctxp
->ctxlock
, flags
);
1911 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1912 "6316 ABTS cmpl xri x%x flg x%x (%x) "
1913 "WCQE: %08x %08x %08x %08x\n",
1914 ctxp
->oxid
, ctxp
->flag
, released
,
1915 wcqe
->word0
, wcqe
->total_data_placed
,
1916 result
, wcqe
->word3
);
1918 * if transport has released ctx, then can reuse it. Otherwise,
1919 * will be recycled by transport release call.
1922 lpfc_nvmet_rq_post(phba
, ctxp
, &ctxp
->rqb_buffer
->hbuf
);
1924 cmdwqe
->context2
= NULL
;
1925 cmdwqe
->context3
= NULL
;
1927 /* Since iaab/iaar are NOT set, there is no work left.
1928 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
1929 * should have been called already.
1934 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
1935 * @phba: Pointer to HBA context object.
1936 * @cmdwqe: Pointer to driver command WQE object.
1937 * @wcqe: Pointer to driver response CQE object.
1939 * The function is called from SLI ring event handler with no
1940 * lock held. This function is the completion handler for NVME ABTS for LS cmds
1941 * The function frees memory resources used for the NVME commands.
1944 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdwqe
,
1945 struct lpfc_wcqe_complete
*wcqe
)
1947 struct lpfc_nvmet_rcv_ctx
*ctxp
;
1948 struct lpfc_nvmet_tgtport
*tgtp
;
1949 uint32_t status
, result
;
1951 ctxp
= cmdwqe
->context2
;
1952 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
1953 result
= wcqe
->parameter
;
1955 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
1956 atomic_inc(&tgtp
->xmt_abort_cmpl
);
1958 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1959 "6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n",
1960 ctxp
, wcqe
->word0
, wcqe
->total_data_placed
,
1961 result
, wcqe
->word3
);
1964 cmdwqe
->context2
= NULL
;
1965 cmdwqe
->context3
= NULL
;
1966 lpfc_sli_release_iocbq(phba
, cmdwqe
);
1969 lpfc_sli_release_iocbq(phba
, cmdwqe
);
1973 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba
*phba
,
1974 struct lpfc_nvmet_rcv_ctx
*ctxp
,
1975 uint32_t sid
, uint16_t xri
)
1977 struct lpfc_nvmet_tgtport
*tgtp
;
1978 struct lpfc_iocbq
*abts_wqeq
;
1979 union lpfc_wqe
*wqe_abts
;
1980 struct lpfc_nodelist
*ndlp
;
1982 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
1983 "6067 ABTS: sid %x xri x%x/x%x\n",
1984 sid
, xri
, ctxp
->wqeq
->sli4_xritag
);
1986 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
1988 ctxp
->wqeq
= ctxp
->rqb_buffer
->iocbq
;
1989 ctxp
->wqeq
->hba_wqidx
= 0;
1992 ndlp
= lpfc_findnode_did(phba
->pport
, sid
);
1993 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) ||
1994 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
1995 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
1996 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
1997 lpfc_printf_log(phba
, KERN_WARNING
, LOG_NVME_ABTS
,
1998 "6134 Drop ABTS - wrong NDLP state x%x.\n",
1999 (ndlp
) ? ndlp
->nlp_state
: NLP_STE_MAX_STATE
);
2001 /* No failure to an ABTS request. */
2005 abts_wqeq
= ctxp
->wqeq
;
2006 wqe_abts
= &abts_wqeq
->wqe
;
2007 ctxp
->state
= LPFC_NVMET_STE_ABORT
;
2010 * Since we zero the whole WQE, we need to ensure we set the WQE fields
2011 * that were initialized in lpfc_sli4_nvmet_alloc.
2013 memset(wqe_abts
, 0, sizeof(union lpfc_wqe
));
2016 bf_set(wqe_dfctl
, &wqe_abts
->xmit_sequence
.wge_ctl
, 0);
2017 bf_set(wqe_ls
, &wqe_abts
->xmit_sequence
.wge_ctl
, 1);
2018 bf_set(wqe_la
, &wqe_abts
->xmit_sequence
.wge_ctl
, 0);
2019 bf_set(wqe_rctl
, &wqe_abts
->xmit_sequence
.wge_ctl
, FC_RCTL_BA_ABTS
);
2020 bf_set(wqe_type
, &wqe_abts
->xmit_sequence
.wge_ctl
, FC_TYPE_BLS
);
2023 bf_set(wqe_ctxt_tag
, &wqe_abts
->xmit_sequence
.wqe_com
,
2024 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
2025 bf_set(wqe_xri_tag
, &wqe_abts
->xmit_sequence
.wqe_com
,
2026 abts_wqeq
->sli4_xritag
);
2029 bf_set(wqe_cmnd
, &wqe_abts
->xmit_sequence
.wqe_com
,
2030 CMD_XMIT_SEQUENCE64_WQE
);
2031 bf_set(wqe_ct
, &wqe_abts
->xmit_sequence
.wqe_com
, SLI4_CT_RPI
);
2032 bf_set(wqe_class
, &wqe_abts
->xmit_sequence
.wqe_com
, CLASS3
);
2033 bf_set(wqe_pu
, &wqe_abts
->xmit_sequence
.wqe_com
, 0);
2036 wqe_abts
->xmit_sequence
.wqe_com
.abort_tag
= abts_wqeq
->iotag
;
2039 bf_set(wqe_reqtag
, &wqe_abts
->xmit_sequence
.wqe_com
, abts_wqeq
->iotag
);
2040 /* Needs to be set by caller */
2041 bf_set(wqe_rcvoxid
, &wqe_abts
->xmit_sequence
.wqe_com
, xri
);
2044 bf_set(wqe_dbde
, &wqe_abts
->xmit_sequence
.wqe_com
, 1);
2045 bf_set(wqe_iod
, &wqe_abts
->xmit_sequence
.wqe_com
, LPFC_WQE_IOD_WRITE
);
2046 bf_set(wqe_lenloc
, &wqe_abts
->xmit_sequence
.wqe_com
,
2047 LPFC_WQE_LENLOC_WORD12
);
2048 bf_set(wqe_ebde_cnt
, &wqe_abts
->xmit_sequence
.wqe_com
, 0);
2049 bf_set(wqe_qosd
, &wqe_abts
->xmit_sequence
.wqe_com
, 0);
2052 bf_set(wqe_cqid
, &wqe_abts
->xmit_sequence
.wqe_com
,
2053 LPFC_WQE_CQ_ID_DEFAULT
);
2054 bf_set(wqe_cmd_type
, &wqe_abts
->xmit_sequence
.wqe_com
,
2057 abts_wqeq
->vport
= phba
->pport
;
2058 abts_wqeq
->context1
= ndlp
;
2059 abts_wqeq
->context2
= ctxp
;
2060 abts_wqeq
->context3
= NULL
;
2061 abts_wqeq
->rsvd2
= 0;
2062 /* hba_wqidx should already be setup from command we are aborting */
2063 abts_wqeq
->iocb
.ulpCommand
= CMD_XMIT_SEQUENCE64_CR
;
2064 abts_wqeq
->iocb
.ulpLe
= 1;
2066 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
2067 "6069 Issue ABTS to xri x%x reqtag x%x\n",
2068 xri
, abts_wqeq
->iotag
);
2073 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba
*phba
,
2074 struct lpfc_nvmet_rcv_ctx
*ctxp
,
2075 uint32_t sid
, uint16_t xri
)
2077 struct lpfc_nvmet_tgtport
*tgtp
;
2078 struct lpfc_iocbq
*abts_wqeq
;
2079 union lpfc_wqe
*abts_wqe
;
2080 struct lpfc_nodelist
*ndlp
;
2081 unsigned long flags
;
2084 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2086 ctxp
->wqeq
= ctxp
->rqb_buffer
->iocbq
;
2087 ctxp
->wqeq
->hba_wqidx
= 0;
2090 ndlp
= lpfc_findnode_did(phba
->pport
, sid
);
2091 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
) ||
2092 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
2093 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
2094 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
2095 lpfc_printf_log(phba
, KERN_WARNING
, LOG_NVME_ABTS
,
2096 "6160 Drop ABORT - wrong NDLP state x%x.\n",
2097 (ndlp
) ? ndlp
->nlp_state
: NLP_STE_MAX_STATE
);
2099 /* No failure to an ABTS request. */
2100 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
2104 /* Issue ABTS for this WQE based on iotag */
2105 ctxp
->abort_wqeq
= lpfc_sli_get_iocbq(phba
);
2106 if (!ctxp
->abort_wqeq
) {
2107 lpfc_printf_log(phba
, KERN_WARNING
, LOG_NVME_ABTS
,
2108 "6161 ABORT failed: No wqeqs: "
2109 "xri: x%x\n", ctxp
->oxid
);
2110 /* No failure to an ABTS request. */
2111 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
2114 abts_wqeq
= ctxp
->abort_wqeq
;
2115 abts_wqe
= &abts_wqeq
->wqe
;
2116 ctxp
->state
= LPFC_NVMET_STE_ABORT
;
2118 /* Announce entry to new IO submit field. */
2119 lpfc_printf_log(phba
, KERN_INFO
, LOG_NVME_ABTS
,
2120 "6162 ABORT Request to rport DID x%06x "
2121 "for xri x%x x%x\n",
2122 ctxp
->sid
, ctxp
->oxid
, ctxp
->wqeq
->sli4_xritag
);
2124 /* If the hba is getting reset, this flag is set. It is
2125 * cleared when the reset is complete and rings reestablished.
2127 spin_lock_irqsave(&phba
->hbalock
, flags
);
2128 /* driver queued commands are in process of being flushed */
2129 if (phba
->hba_flag
& HBA_NVME_IOQ_FLUSH
) {
2130 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
2131 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
2132 "6163 Driver in reset cleanup - flushing "
2133 "NVME Req now. hba_flag x%x oxid x%x\n",
2134 phba
->hba_flag
, ctxp
->oxid
);
2135 lpfc_sli_release_iocbq(phba
, abts_wqeq
);
2136 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
2140 /* Outstanding abort is in progress */
2141 if (abts_wqeq
->iocb_flag
& LPFC_DRIVER_ABORTED
) {
2142 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
2143 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME
,
2144 "6164 Outstanding NVME I/O Abort Request "
2145 "still pending on oxid x%x\n",
2147 lpfc_sli_release_iocbq(phba
, abts_wqeq
);
2148 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
2152 /* Ready - mark outstanding as aborted by driver. */
2153 abts_wqeq
->iocb_flag
|= LPFC_DRIVER_ABORTED
;
2155 /* WQEs are reused. Clear stale data and set key fields to
2156 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
2158 memset(abts_wqe
, 0, sizeof(union lpfc_wqe
));
2161 bf_set(abort_cmd_criteria
, &abts_wqe
->abort_cmd
, T_XRI_TAG
);
2164 bf_set(wqe_ct
, &abts_wqe
->abort_cmd
.wqe_com
, 0);
2165 bf_set(wqe_cmnd
, &abts_wqe
->abort_cmd
.wqe_com
, CMD_ABORT_XRI_CX
);
2167 /* word 8 - tell the FW to abort the IO associated with this
2168 * outstanding exchange ID.
2170 abts_wqe
->abort_cmd
.wqe_com
.abort_tag
= ctxp
->wqeq
->sli4_xritag
;
2172 /* word 9 - this is the iotag for the abts_wqe completion. */
2173 bf_set(wqe_reqtag
, &abts_wqe
->abort_cmd
.wqe_com
,
2177 bf_set(wqe_qosd
, &abts_wqe
->abort_cmd
.wqe_com
, 1);
2178 bf_set(wqe_lenloc
, &abts_wqe
->abort_cmd
.wqe_com
, LPFC_WQE_LENLOC_NONE
);
2181 bf_set(wqe_cmd_type
, &abts_wqe
->abort_cmd
.wqe_com
, OTHER_COMMAND
);
2182 bf_set(wqe_wqec
, &abts_wqe
->abort_cmd
.wqe_com
, 1);
2183 bf_set(wqe_cqid
, &abts_wqe
->abort_cmd
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
2185 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
2186 abts_wqeq
->hba_wqidx
= ctxp
->wqeq
->hba_wqidx
;
2187 abts_wqeq
->wqe_cmpl
= lpfc_nvmet_sol_fcp_abort_cmp
;
2188 abts_wqeq
->iocb_cmpl
= 0;
2189 abts_wqeq
->iocb_flag
|= LPFC_IO_NVME
;
2190 abts_wqeq
->context2
= ctxp
;
2191 rc
= lpfc_sli4_issue_wqe(phba
, LPFC_FCP_RING
, abts_wqeq
);
2192 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
2193 if (rc
== WQE_SUCCESS
)
2196 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
2197 lpfc_sli_release_iocbq(phba
, abts_wqeq
);
2198 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_ABTS
,
2199 "6166 Failed ABORT issue_wqe with status x%x "
2207 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba
*phba
,
2208 struct lpfc_nvmet_rcv_ctx
*ctxp
,
2209 uint32_t sid
, uint16_t xri
)
2211 struct lpfc_nvmet_tgtport
*tgtp
;
2212 struct lpfc_iocbq
*abts_wqeq
;
2213 unsigned long flags
;
2216 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2218 ctxp
->wqeq
= ctxp
->rqb_buffer
->iocbq
;
2219 ctxp
->wqeq
->hba_wqidx
= 0;
2222 rc
= lpfc_nvmet_unsol_issue_abort(phba
, ctxp
, sid
, xri
);
2226 spin_lock_irqsave(&phba
->hbalock
, flags
);
2227 abts_wqeq
= ctxp
->wqeq
;
2228 abts_wqeq
->wqe_cmpl
= lpfc_nvmet_unsol_fcp_abort_cmp
;
2229 abts_wqeq
->iocb_cmpl
= NULL
;
2230 abts_wqeq
->iocb_flag
|= LPFC_IO_NVMET
;
2231 rc
= lpfc_sli4_issue_wqe(phba
, LPFC_FCP_RING
, abts_wqeq
);
2232 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
2233 if (rc
== WQE_SUCCESS
) {
2234 atomic_inc(&tgtp
->xmt_abort_rsp
);
2239 ctxp
->flag
&= ~LPFC_NVMET_ABORT_OP
;
2240 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
2241 lpfc_printf_log(phba
, KERN_WARNING
, LOG_NVME_ABTS
,
2242 "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
2248 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba
*phba
,
2249 struct lpfc_nvmet_rcv_ctx
*ctxp
,
2250 uint32_t sid
, uint16_t xri
)
2252 struct lpfc_nvmet_tgtport
*tgtp
;
2253 struct lpfc_iocbq
*abts_wqeq
;
2254 union lpfc_wqe
*wqe_abts
;
2255 unsigned long flags
;
2258 tgtp
= (struct lpfc_nvmet_tgtport
*)phba
->targetport
->private;
2260 /* Issue ABTS for this WQE based on iotag */
2261 ctxp
->wqeq
= lpfc_sli_get_iocbq(phba
);
2263 lpfc_printf_log(phba
, KERN_WARNING
, LOG_NVME_ABTS
,
2264 "6068 Abort failed: No wqeqs: "
2266 /* No failure to an ABTS request. */
2271 abts_wqeq
= ctxp
->wqeq
;
2272 wqe_abts
= &abts_wqeq
->wqe
;
2273 lpfc_nvmet_unsol_issue_abort(phba
, ctxp
, sid
, xri
);
2275 spin_lock_irqsave(&phba
->hbalock
, flags
);
2276 abts_wqeq
->wqe_cmpl
= lpfc_nvmet_xmt_ls_abort_cmp
;
2277 abts_wqeq
->iocb_cmpl
= 0;
2278 abts_wqeq
->iocb_flag
|= LPFC_IO_NVME_LS
;
2279 rc
= lpfc_sli4_issue_wqe(phba
, LPFC_ELS_RING
, abts_wqeq
);
2280 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
2281 if (rc
== WQE_SUCCESS
) {
2282 atomic_inc(&tgtp
->xmt_abort_rsp
);
2286 atomic_inc(&tgtp
->xmt_abort_rsp_error
);
2287 abts_wqeq
->context2
= NULL
;
2288 abts_wqeq
->context3
= NULL
;
2289 lpfc_sli_release_iocbq(phba
, abts_wqeq
);
2291 lpfc_printf_log(phba
, KERN_WARNING
, LOG_NVME_ABTS
,
2292 "6056 Failed to Issue ABTS. Status x%x\n", rc
);