]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/lpfc/lpfc_nvmet.c
ASoC: rockchip: add bindings for rk3368 i2s
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / lpfc / lpfc_nvmet.c
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channsel Host Bus Adapters. *
4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38
39 #include <../drivers/nvme/host/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41
42 #include "lpfc_version.h"
43 #include "lpfc_hw4.h"
44 #include "lpfc_hw.h"
45 #include "lpfc_sli.h"
46 #include "lpfc_sli4.h"
47 #include "lpfc_nl.h"
48 #include "lpfc_disc.h"
49 #include "lpfc.h"
50 #include "lpfc_scsi.h"
51 #include "lpfc_nvme.h"
52 #include "lpfc_nvmet.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_crtn.h"
55 #include "lpfc_vport.h"
56 #include "lpfc_debugfs.h"
57
58 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
59 struct lpfc_nvmet_rcv_ctx *,
60 dma_addr_t rspbuf,
61 uint16_t rspsize);
62 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
63 struct lpfc_nvmet_rcv_ctx *);
64 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
65 struct lpfc_nvmet_rcv_ctx *,
66 uint32_t, uint16_t);
67 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
68 struct lpfc_nvmet_rcv_ctx *,
69 uint32_t, uint16_t);
70 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
71 struct lpfc_nvmet_rcv_ctx *,
72 uint32_t, uint16_t);
73
74 /**
75 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
76 * @phba: Pointer to HBA context object.
77 * @cmdwqe: Pointer to driver command WQE object.
78 * @wcqe: Pointer to driver response CQE object.
79 *
80 * The function is called from SLI ring event handler with no
81 * lock held. This function is the completion handler for NVME LS commands
82 * The function frees memory resources used for the NVME commands.
83 **/
84 static void
85 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
86 struct lpfc_wcqe_complete *wcqe)
87 {
88 struct lpfc_nvmet_tgtport *tgtp;
89 struct nvmefc_tgt_ls_req *rsp;
90 struct lpfc_nvmet_rcv_ctx *ctxp;
91 uint32_t status, result;
92
93 status = bf_get(lpfc_wcqe_c_status, wcqe);
94 result = wcqe->parameter;
95 if (!phba->targetport)
96 goto out;
97
98 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
99
100 if (status)
101 atomic_inc(&tgtp->xmt_ls_rsp_error);
102 else
103 atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
104
105 out:
106 ctxp = cmdwqe->context2;
107 rsp = &ctxp->ctx.ls_req;
108
109 lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
110 ctxp->oxid, status, result);
111
112 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
113 "6038 %s: Entrypoint: ctx %p status %x/%x\n", __func__,
114 ctxp, status, result);
115
116 lpfc_nlp_put(cmdwqe->context1);
117 cmdwqe->context2 = NULL;
118 cmdwqe->context3 = NULL;
119 lpfc_sli_release_iocbq(phba, cmdwqe);
120 rsp->done(rsp);
121 kfree(ctxp);
122 }
123
124 /**
125 * lpfc_nvmet_rq_post - Repost a NVMET RQ DMA buffer and clean up context
126 * @phba: HBA buffer is associated with
127 * @ctxp: context to clean up
128 * @mp: Buffer to free
129 *
130 * Description: Frees the given DMA buffer in the appropriate way given by
131 * reposting it to its associated RQ so it can be reused.
132 *
133 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
134 *
135 * Returns: None
136 **/
137 void
138 lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp,
139 struct lpfc_dmabuf *mp)
140 {
141 if (ctxp) {
142 if (ctxp->txrdy) {
143 pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
144 ctxp->txrdy_phys);
145 ctxp->txrdy = NULL;
146 ctxp->txrdy_phys = 0;
147 }
148 ctxp->state = LPFC_NVMET_STE_FREE;
149 }
150 lpfc_rq_buf_free(phba, mp);
151 }
152
153 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
154 static void
155 lpfc_nvmet_ktime(struct lpfc_hba *phba,
156 struct lpfc_nvmet_rcv_ctx *ctxp)
157 {
158 uint64_t seg1, seg2, seg3, seg4, seg5;
159 uint64_t seg6, seg7, seg8, seg9, seg10;
160
161 if (!phba->ktime_on)
162 return;
163
164 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
165 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
166 !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
167 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
168 !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
169 return;
170
171 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
172 return;
173 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
174 return;
175 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
176 return;
177 if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
178 return;
179 if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
180 return;
181 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
182 return;
183 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
184 return;
185 if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
186 return;
187 if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
188 return;
189 /*
190 * Segment 1 - Time from FCP command received by MSI-X ISR
191 * to FCP command is passed to NVME Layer.
192 * Segment 2 - Time from FCP command payload handed
193 * off to NVME Layer to Driver receives a Command op
194 * from NVME Layer.
195 * Segment 3 - Time from Driver receives a Command op
196 * from NVME Layer to Command is put on WQ.
197 * Segment 4 - Time from Driver WQ put is done
198 * to MSI-X ISR for Command cmpl.
199 * Segment 5 - Time from MSI-X ISR for Command cmpl to
200 * Command cmpl is passed to NVME Layer.
201 * Segment 6 - Time from Command cmpl is passed to NVME
202 * Layer to Driver receives a RSP op from NVME Layer.
203 * Segment 7 - Time from Driver receives a RSP op from
204 * NVME Layer to WQ put is done on TRSP FCP Status.
205 * Segment 8 - Time from Driver WQ put is done on TRSP
206 * FCP Status to MSI-X ISR for TRSP cmpl.
207 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
208 * TRSP cmpl is passed to NVME Layer.
209 * Segment 10 - Time from FCP command received by
210 * MSI-X ISR to command is completed on wire.
211 * (Segments 1 thru 8) for READDATA / WRITEDATA
212 * (Segments 1 thru 4) for READDATA_RSP
213 */
214 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
215 seg2 = (ctxp->ts_nvme_data - ctxp->ts_isr_cmd) - seg1;
216 seg3 = (ctxp->ts_data_wqput - ctxp->ts_isr_cmd) -
217 seg1 - seg2;
218 seg4 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd) -
219 seg1 - seg2 - seg3;
220 seg5 = (ctxp->ts_data_nvme - ctxp->ts_isr_cmd) -
221 seg1 - seg2 - seg3 - seg4;
222
223 /* For auto rsp commands seg6 thru seg10 will be 0 */
224 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
225 seg6 = (ctxp->ts_nvme_status -
226 ctxp->ts_isr_cmd) -
227 seg1 - seg2 - seg3 - seg4 - seg5;
228 seg7 = (ctxp->ts_status_wqput -
229 ctxp->ts_isr_cmd) -
230 seg1 - seg2 - seg3 -
231 seg4 - seg5 - seg6;
232 seg8 = (ctxp->ts_isr_status -
233 ctxp->ts_isr_cmd) -
234 seg1 - seg2 - seg3 - seg4 -
235 seg5 - seg6 - seg7;
236 seg9 = (ctxp->ts_status_nvme -
237 ctxp->ts_isr_cmd) -
238 seg1 - seg2 - seg3 - seg4 -
239 seg5 - seg6 - seg7 - seg8;
240 seg10 = (ctxp->ts_isr_status -
241 ctxp->ts_isr_cmd);
242 } else {
243 seg6 = 0;
244 seg7 = 0;
245 seg8 = 0;
246 seg9 = 0;
247 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
248 }
249
250 phba->ktime_seg1_total += seg1;
251 if (seg1 < phba->ktime_seg1_min)
252 phba->ktime_seg1_min = seg1;
253 else if (seg1 > phba->ktime_seg1_max)
254 phba->ktime_seg1_max = seg1;
255
256 phba->ktime_seg2_total += seg2;
257 if (seg2 < phba->ktime_seg2_min)
258 phba->ktime_seg2_min = seg2;
259 else if (seg2 > phba->ktime_seg2_max)
260 phba->ktime_seg2_max = seg2;
261
262 phba->ktime_seg3_total += seg3;
263 if (seg3 < phba->ktime_seg3_min)
264 phba->ktime_seg3_min = seg3;
265 else if (seg3 > phba->ktime_seg3_max)
266 phba->ktime_seg3_max = seg3;
267
268 phba->ktime_seg4_total += seg4;
269 if (seg4 < phba->ktime_seg4_min)
270 phba->ktime_seg4_min = seg4;
271 else if (seg4 > phba->ktime_seg4_max)
272 phba->ktime_seg4_max = seg4;
273
274 phba->ktime_seg5_total += seg5;
275 if (seg5 < phba->ktime_seg5_min)
276 phba->ktime_seg5_min = seg5;
277 else if (seg5 > phba->ktime_seg5_max)
278 phba->ktime_seg5_max = seg5;
279
280 phba->ktime_data_samples++;
281 if (!seg6)
282 goto out;
283
284 phba->ktime_seg6_total += seg6;
285 if (seg6 < phba->ktime_seg6_min)
286 phba->ktime_seg6_min = seg6;
287 else if (seg6 > phba->ktime_seg6_max)
288 phba->ktime_seg6_max = seg6;
289
290 phba->ktime_seg7_total += seg7;
291 if (seg7 < phba->ktime_seg7_min)
292 phba->ktime_seg7_min = seg7;
293 else if (seg7 > phba->ktime_seg7_max)
294 phba->ktime_seg7_max = seg7;
295
296 phba->ktime_seg8_total += seg8;
297 if (seg8 < phba->ktime_seg8_min)
298 phba->ktime_seg8_min = seg8;
299 else if (seg8 > phba->ktime_seg8_max)
300 phba->ktime_seg8_max = seg8;
301
302 phba->ktime_seg9_total += seg9;
303 if (seg9 < phba->ktime_seg9_min)
304 phba->ktime_seg9_min = seg9;
305 else if (seg9 > phba->ktime_seg9_max)
306 phba->ktime_seg9_max = seg9;
307 out:
308 phba->ktime_seg10_total += seg10;
309 if (seg10 < phba->ktime_seg10_min)
310 phba->ktime_seg10_min = seg10;
311 else if (seg10 > phba->ktime_seg10_max)
312 phba->ktime_seg10_max = seg10;
313 phba->ktime_status_samples++;
314 }
315 #endif
316
317 /**
318 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
319 * @phba: Pointer to HBA context object.
320 * @cmdwqe: Pointer to driver command WQE object.
321 * @wcqe: Pointer to driver response CQE object.
322 *
323 * The function is called from SLI ring event handler with no
324 * lock held. This function is the completion handler for NVME FCP commands
325 * The function frees memory resources used for the NVME commands.
326 **/
327 static void
328 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
329 struct lpfc_wcqe_complete *wcqe)
330 {
331 struct lpfc_nvmet_tgtport *tgtp;
332 struct nvmefc_tgt_fcp_req *rsp;
333 struct lpfc_nvmet_rcv_ctx *ctxp;
334 uint32_t status, result, op, start_clean;
335 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
336 uint32_t id;
337 #endif
338
339 ctxp = cmdwqe->context2;
340 rsp = &ctxp->ctx.fcp_req;
341 op = rsp->op;
342 ctxp->flag &= ~LPFC_NVMET_IO_INP;
343
344 status = bf_get(lpfc_wcqe_c_status, wcqe);
345 result = wcqe->parameter;
346
347 if (!phba->targetport)
348 goto out;
349
350 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
351 ctxp->oxid, op, status);
352
353 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
354 if (status) {
355 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
356 rsp->transferred_length = 0;
357 atomic_inc(&tgtp->xmt_fcp_rsp_error);
358 } else {
359 rsp->fcp_error = NVME_SC_SUCCESS;
360 if (op == NVMET_FCOP_RSP)
361 rsp->transferred_length = rsp->rsplen;
362 else
363 rsp->transferred_length = rsp->transfer_length;
364 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
365 }
366
367 out:
368 if ((op == NVMET_FCOP_READDATA_RSP) ||
369 (op == NVMET_FCOP_RSP)) {
370 /* Sanity check */
371 ctxp->state = LPFC_NVMET_STE_DONE;
372 ctxp->entry_cnt++;
373 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
374 if (phba->ktime_on) {
375 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
376 ctxp->ts_isr_data =
377 cmdwqe->isr_timestamp;
378 ctxp->ts_data_nvme =
379 ktime_get_ns();
380 ctxp->ts_nvme_status =
381 ctxp->ts_data_nvme;
382 ctxp->ts_status_wqput =
383 ctxp->ts_data_nvme;
384 ctxp->ts_isr_status =
385 ctxp->ts_data_nvme;
386 ctxp->ts_status_nvme =
387 ctxp->ts_data_nvme;
388 } else {
389 ctxp->ts_isr_status =
390 cmdwqe->isr_timestamp;
391 ctxp->ts_status_nvme =
392 ktime_get_ns();
393 }
394 }
395 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
396 id = smp_processor_id();
397 if (ctxp->cpu != id)
398 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
399 "6703 CPU Check cmpl: "
400 "cpu %d expect %d\n",
401 id, ctxp->cpu);
402 if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
403 phba->cpucheck_cmpl_io[id]++;
404 }
405 #endif
406 rsp->done(rsp);
407 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
408 if (phba->ktime_on)
409 lpfc_nvmet_ktime(phba, ctxp);
410 #endif
411 /* Let Abort cmpl repost the context */
412 if (!(ctxp->flag & LPFC_NVMET_ABORT_OP))
413 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
414 } else {
415 ctxp->entry_cnt++;
416 start_clean = offsetof(struct lpfc_iocbq, wqe);
417 memset(((char *)cmdwqe) + start_clean, 0,
418 (sizeof(struct lpfc_iocbq) - start_clean));
419 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
420 if (phba->ktime_on) {
421 ctxp->ts_isr_data = cmdwqe->isr_timestamp;
422 ctxp->ts_data_nvme = ktime_get_ns();
423 }
424 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
425 id = smp_processor_id();
426 if (ctxp->cpu != id)
427 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
428 "6704 CPU Check cmdcmpl: "
429 "cpu %d expect %d\n",
430 id, ctxp->cpu);
431 if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
432 phba->cpucheck_ccmpl_io[id]++;
433 }
434 #endif
435 rsp->done(rsp);
436 }
437 }
438
439 static int
440 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
441 struct nvmefc_tgt_ls_req *rsp)
442 {
443 struct lpfc_nvmet_rcv_ctx *ctxp =
444 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
445 struct lpfc_hba *phba = ctxp->phba;
446 struct hbq_dmabuf *nvmebuf =
447 (struct hbq_dmabuf *)ctxp->rqb_buffer;
448 struct lpfc_iocbq *nvmewqeq;
449 struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
450 struct lpfc_dmabuf dmabuf;
451 struct ulp_bde64 bpl;
452 int rc;
453
454 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
455 "6023 %s: Entrypoint ctx %p %p\n", __func__,
456 ctxp, tgtport);
457
458 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
459 rsp->rsplen);
460 if (nvmewqeq == NULL) {
461 atomic_inc(&nvmep->xmt_ls_drop);
462 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
463 "6150 LS Drop IO x%x: Prep\n",
464 ctxp->oxid);
465 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
466 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
467 ctxp->sid, ctxp->oxid);
468 return -ENOMEM;
469 }
470
471 /* Save numBdes for bpl2sgl */
472 nvmewqeq->rsvd2 = 1;
473 nvmewqeq->hba_wqidx = 0;
474 nvmewqeq->context3 = &dmabuf;
475 dmabuf.virt = &bpl;
476 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
477 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
478 bpl.tus.f.bdeSize = rsp->rsplen;
479 bpl.tus.f.bdeFlags = 0;
480 bpl.tus.w = le32_to_cpu(bpl.tus.w);
481
482 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
483 nvmewqeq->iocb_cmpl = NULL;
484 nvmewqeq->context2 = ctxp;
485
486 lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
487 ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
488
489 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
490 if (rc == WQE_SUCCESS) {
491 /*
492 * Okay to repost buffer here, but wait till cmpl
493 * before freeing ctxp and iocbq.
494 */
495 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
496 ctxp->rqb_buffer = 0;
497 atomic_inc(&nvmep->xmt_ls_rsp);
498 return 0;
499 }
500 /* Give back resources */
501 atomic_inc(&nvmep->xmt_ls_drop);
502 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
503 "6151 LS Drop IO x%x: Issue %d\n",
504 ctxp->oxid, rc);
505
506 lpfc_nlp_put(nvmewqeq->context1);
507
508 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
509 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
510 return -ENXIO;
511 }
512
513 static int
514 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
515 struct nvmefc_tgt_fcp_req *rsp)
516 {
517 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
518 struct lpfc_nvmet_rcv_ctx *ctxp =
519 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
520 struct lpfc_hba *phba = ctxp->phba;
521 struct lpfc_iocbq *nvmewqeq;
522 unsigned long iflags;
523 int rc, id;
524
525 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
526 if (phba->ktime_on) {
527 if (rsp->op == NVMET_FCOP_RSP)
528 ctxp->ts_nvme_status = ktime_get_ns();
529 else
530 ctxp->ts_nvme_data = ktime_get_ns();
531 }
532 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
533 id = smp_processor_id();
534 ctxp->cpu = id;
535 if (id < LPFC_CHECK_CPU_CNT)
536 phba->cpucheck_xmt_io[id]++;
537 if (rsp->hwqid != id) {
538 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
539 "6705 CPU Check OP: "
540 "cpu %d expect %d\n",
541 id, rsp->hwqid);
542 ctxp->cpu = rsp->hwqid;
543 }
544 }
545 #endif
546
547 if (rsp->op == NVMET_FCOP_ABORT) {
548 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
549 "6103 Abort op: oxri x%x %d cnt %d\n",
550 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
551
552 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: "
553 "xri x%x state x%x cnt x%x\n",
554 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
555
556 atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
557 ctxp->entry_cnt++;
558 ctxp->flag |= LPFC_NVMET_ABORT_OP;
559 if (ctxp->flag & LPFC_NVMET_IO_INP)
560 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
561 ctxp->oxid);
562 else
563 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
564 ctxp->oxid);
565 return 0;
566 }
567
568 /* Sanity check */
569 if (ctxp->state == LPFC_NVMET_STE_ABORT) {
570 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
571 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
572 "6102 Bad state IO x%x aborted\n",
573 ctxp->oxid);
574 goto aerr;
575 }
576
577 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
578 if (nvmewqeq == NULL) {
579 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
580 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
581 "6152 FCP Drop IO x%x: Prep\n",
582 ctxp->oxid);
583 goto aerr;
584 }
585
586 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
587 nvmewqeq->iocb_cmpl = NULL;
588 nvmewqeq->context2 = ctxp;
589 nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
590 ctxp->wqeq->hba_wqidx = rsp->hwqid;
591
592 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
593 ctxp->oxid, rsp->op, rsp->rsplen);
594
595 /* For now we take hbalock */
596 spin_lock_irqsave(&phba->hbalock, iflags);
597 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
598 spin_unlock_irqrestore(&phba->hbalock, iflags);
599 if (rc == WQE_SUCCESS) {
600 ctxp->flag |= LPFC_NVMET_IO_INP;
601 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
602 if (!phba->ktime_on)
603 return 0;
604 if (rsp->op == NVMET_FCOP_RSP)
605 ctxp->ts_status_wqput = ktime_get_ns();
606 else
607 ctxp->ts_data_wqput = ktime_get_ns();
608 #endif
609 return 0;
610 }
611
612 /* Give back resources */
613 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
614 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
615 "6153 FCP Drop IO x%x: Issue: %d\n",
616 ctxp->oxid, rc);
617
618 ctxp->wqeq->hba_wqidx = 0;
619 nvmewqeq->context2 = NULL;
620 nvmewqeq->context3 = NULL;
621 aerr:
622 return -ENXIO;
623 }
624
625 static void
626 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
627 {
628 struct lpfc_nvmet_tgtport *tport = targetport->private;
629
630 /* release any threads waiting for the unreg to complete */
631 complete(&tport->tport_unreg_done);
632 }
633
634 static struct nvmet_fc_target_template lpfc_tgttemplate = {
635 .targetport_delete = lpfc_nvmet_targetport_delete,
636 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
637 .fcp_op = lpfc_nvmet_xmt_fcp_op,
638
639 .max_hw_queues = 1,
640 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
641 .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
642 .dma_boundary = 0xFFFFFFFF,
643
644 /* optional features */
645 .target_features = 0,
646 /* sizes of additional private data for data structures */
647 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
648 };
649
650 int
651 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
652 {
653 struct lpfc_vport *vport = phba->pport;
654 struct lpfc_nvmet_tgtport *tgtp;
655 struct nvmet_fc_port_info pinfo;
656 int error = 0;
657
658 if (phba->targetport)
659 return 0;
660
661 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
662 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
663 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
664 pinfo.port_id = vport->fc_myDID;
665
666 lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
667 lpfc_tgttemplate.max_sgl_segments = phba->cfg_sg_seg_cnt;
668 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
669 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED;
670
671 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
672 &phba->pcidev->dev,
673 &phba->targetport);
674 if (error) {
675 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
676 "6025 Cannot register NVME targetport "
677 "x%x\n", error);
678 phba->targetport = NULL;
679 } else {
680 tgtp = (struct lpfc_nvmet_tgtport *)
681 phba->targetport->private;
682 tgtp->phba = phba;
683
684 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
685 "6026 Registered NVME "
686 "targetport: %p, private %p "
687 "portnm %llx nodenm %llx\n",
688 phba->targetport, tgtp,
689 pinfo.port_name, pinfo.node_name);
690
691 atomic_set(&tgtp->rcv_ls_req_in, 0);
692 atomic_set(&tgtp->rcv_ls_req_out, 0);
693 atomic_set(&tgtp->rcv_ls_req_drop, 0);
694 atomic_set(&tgtp->xmt_ls_abort, 0);
695 atomic_set(&tgtp->xmt_ls_rsp, 0);
696 atomic_set(&tgtp->xmt_ls_drop, 0);
697 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
698 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
699 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
700 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
701 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
702 atomic_set(&tgtp->xmt_fcp_abort, 0);
703 atomic_set(&tgtp->xmt_fcp_drop, 0);
704 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
705 atomic_set(&tgtp->xmt_fcp_read, 0);
706 atomic_set(&tgtp->xmt_fcp_write, 0);
707 atomic_set(&tgtp->xmt_fcp_rsp, 0);
708 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
709 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
710 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
711 atomic_set(&tgtp->xmt_abort_rsp, 0);
712 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
713 atomic_set(&tgtp->xmt_abort_cmpl, 0);
714 }
715 return error;
716 }
717
718 int
719 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
720 {
721 struct lpfc_vport *vport = phba->pport;
722
723 if (!phba->targetport)
724 return 0;
725
726 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
727 "6007 Update NVMET port %p did x%x\n",
728 phba->targetport, vport->fc_myDID);
729
730 phba->targetport->port_id = vport->fc_myDID;
731 return 0;
732 }
733
734 void
735 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
736 {
737 struct lpfc_nvmet_tgtport *tgtp;
738
739 if (phba->nvmet_support == 0)
740 return;
741 if (phba->targetport) {
742 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
743 init_completion(&tgtp->tport_unreg_done);
744 nvmet_fc_unregister_targetport(phba->targetport);
745 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
746 }
747 phba->targetport = NULL;
748 }
749
750 /**
751 * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
752 * @phba: pointer to lpfc hba data structure.
753 * @pring: pointer to a SLI ring.
754 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
755 *
756 * This routine is used for processing the WQE associated with a unsolicited
757 * event. It first determines whether there is an existing ndlp that matches
758 * the DID from the unsolicited WQE. If not, it will create a new one with
759 * the DID from the unsolicited WQE. The ELS command from the unsolicited
760 * WQE is then used to invoke the proper routine and to set up proper state
761 * of the discovery state machine.
762 **/
763 static void
764 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
765 struct hbq_dmabuf *nvmebuf)
766 {
767 struct lpfc_nvmet_tgtport *tgtp;
768 struct fc_frame_header *fc_hdr;
769 struct lpfc_nvmet_rcv_ctx *ctxp;
770 uint32_t *payload;
771 uint32_t size, oxid, sid, rc;
772
773 if (!nvmebuf || !phba->targetport) {
774 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
775 "6154 LS Drop IO\n");
776 oxid = 0;
777 size = 0;
778 sid = 0;
779 goto dropit;
780 }
781
782 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
783 payload = (uint32_t *)(nvmebuf->dbuf.virt);
784 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
785 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
786 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
787 sid = sli4_sid_from_fc_hdr(fc_hdr);
788
789 ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
790 if (ctxp == NULL) {
791 atomic_inc(&tgtp->rcv_ls_req_drop);
792 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
793 "6155 LS Drop IO x%x: Alloc\n",
794 oxid);
795 dropit:
796 lpfc_nvmeio_data(phba, "NVMET LS DROP: "
797 "xri x%x sz %d from %06x\n",
798 oxid, size, sid);
799 if (nvmebuf)
800 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
801 return;
802 }
803 ctxp->phba = phba;
804 ctxp->size = size;
805 ctxp->oxid = oxid;
806 ctxp->sid = sid;
807 ctxp->wqeq = NULL;
808 ctxp->state = LPFC_NVMET_STE_RCV;
809 ctxp->rqb_buffer = (void *)nvmebuf;
810
811 lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
812 oxid, size, sid);
813 /*
814 * The calling sequence should be:
815 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
816 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
817 */
818 atomic_inc(&tgtp->rcv_ls_req_in);
819 rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
820 payload, size);
821
822 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
823 "6037 %s: ctx %p sz %d rc %d: %08x %08x %08x "
824 "%08x %08x %08x\n", __func__, ctxp, size, rc,
825 *payload, *(payload+1), *(payload+2),
826 *(payload+3), *(payload+4), *(payload+5));
827
828 if (rc == 0) {
829 atomic_inc(&tgtp->rcv_ls_req_out);
830 return;
831 }
832
833 lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n",
834 oxid, size, sid);
835
836 atomic_inc(&tgtp->rcv_ls_req_drop);
837 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
838 "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
839 ctxp->oxid, rc);
840
841 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
842 if (nvmebuf)
843 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
844
845 atomic_inc(&tgtp->xmt_ls_abort);
846 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
847 }
848
849 /**
850 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
851 * @phba: pointer to lpfc hba data structure.
852 * @pring: pointer to a SLI ring.
853 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
854 *
855 * This routine is used for processing the WQE associated with a unsolicited
856 * event. It first determines whether there is an existing ndlp that matches
857 * the DID from the unsolicited WQE. If not, it will create a new one with
858 * the DID from the unsolicited WQE. The ELS command from the unsolicited
859 * WQE is then used to invoke the proper routine and to set up proper state
860 * of the discovery state machine.
861 **/
862 static void
863 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
864 struct lpfc_sli_ring *pring,
865 struct rqb_dmabuf *nvmebuf,
866 uint64_t isr_timestamp)
867 {
868 struct lpfc_nvmet_rcv_ctx *ctxp;
869 struct lpfc_nvmet_tgtport *tgtp;
870 struct fc_frame_header *fc_hdr;
871 uint32_t *payload;
872 uint32_t size, oxid, sid, rc;
873 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
874 uint32_t id;
875 #endif
876
877 if (!nvmebuf || !phba->targetport) {
878 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
879 "6157 FCP Drop IO\n");
880 oxid = 0;
881 size = 0;
882 sid = 0;
883 goto dropit;
884 }
885
886
887 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
888 payload = (uint32_t *)(nvmebuf->dbuf.virt);
889 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
890 size = nvmebuf->bytes_recv;
891 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
892 sid = sli4_sid_from_fc_hdr(fc_hdr);
893
894 ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmebuf->context;
895 if (ctxp == NULL) {
896 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
897 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
898 "6158 FCP Drop IO x%x: Alloc\n",
899 oxid);
900 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
901 /* Cannot send ABTS without context */
902 return;
903 }
904 memset(ctxp, 0, sizeof(ctxp->ctx));
905 ctxp->wqeq = NULL;
906 ctxp->txrdy = NULL;
907 ctxp->offset = 0;
908 ctxp->phba = phba;
909 ctxp->size = size;
910 ctxp->oxid = oxid;
911 ctxp->sid = sid;
912 ctxp->state = LPFC_NVMET_STE_RCV;
913 ctxp->rqb_buffer = nvmebuf;
914 ctxp->entry_cnt = 1;
915 ctxp->flag = 0;
916
917 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
918 if (phba->ktime_on) {
919 ctxp->ts_isr_cmd = isr_timestamp;
920 ctxp->ts_cmd_nvme = ktime_get_ns();
921 ctxp->ts_nvme_data = 0;
922 ctxp->ts_data_wqput = 0;
923 ctxp->ts_isr_data = 0;
924 ctxp->ts_data_nvme = 0;
925 ctxp->ts_nvme_status = 0;
926 ctxp->ts_status_wqput = 0;
927 ctxp->ts_isr_status = 0;
928 ctxp->ts_status_nvme = 0;
929 }
930
931 if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
932 id = smp_processor_id();
933 if (id < LPFC_CHECK_CPU_CNT)
934 phba->cpucheck_rcv_io[id]++;
935 }
936 #endif
937
938 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d from %06x\n",
939 oxid, size, sid);
940
941 atomic_inc(&tgtp->rcv_fcp_cmd_in);
942 /*
943 * The calling sequence should be:
944 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
945 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
946 */
947 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
948 payload, size);
949
950 /* Process FCP command */
951 if (rc == 0) {
952 atomic_inc(&tgtp->rcv_fcp_cmd_out);
953 return;
954 }
955
956 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
957 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
958 "6159 FCP Drop IO x%x: nvmet_fc_rcv_fcp_req x%x\n",
959 ctxp->oxid, rc);
960 dropit:
961 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
962 oxid, size, sid);
963 if (oxid) {
964 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
965 return;
966 }
967
968 if (nvmebuf) {
969 nvmebuf->iocbq->hba_wqidx = 0;
970 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
971 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
972 }
973 }
974
975 /**
976 * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
977 * @phba: pointer to lpfc hba data structure.
978 * @pring: pointer to a SLI ring.
979 * @nvmebuf: pointer to received nvme data structure.
980 *
981 * This routine is used to process an unsolicited event received from a SLI
982 * (Service Level Interface) ring. The actual processing of the data buffer
983 * associated with the unsolicited event is done by invoking the routine
984 * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
985 * SLI RQ on which the unsolicited event was received.
986 **/
987 void
988 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
989 struct lpfc_iocbq *piocb)
990 {
991 struct lpfc_dmabuf *d_buf;
992 struct hbq_dmabuf *nvmebuf;
993
994 d_buf = piocb->context2;
995 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
996
997 if (phba->nvmet_support == 0) {
998 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
999 return;
1000 }
1001 lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
1002 }
1003
1004 /**
1005 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
1006 * @phba: pointer to lpfc hba data structure.
1007 * @pring: pointer to a SLI ring.
1008 * @nvmebuf: pointer to received nvme data structure.
1009 *
1010 * This routine is used to process an unsolicited event received from a SLI
1011 * (Service Level Interface) ring. The actual processing of the data buffer
1012 * associated with the unsolicited event is done by invoking the routine
1013 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
1014 * SLI RQ on which the unsolicited event was received.
1015 **/
1016 void
1017 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
1018 struct lpfc_sli_ring *pring,
1019 struct rqb_dmabuf *nvmebuf,
1020 uint64_t isr_timestamp)
1021 {
1022 if (phba->nvmet_support == 0) {
1023 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
1024 return;
1025 }
1026 lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf,
1027 isr_timestamp);
1028 }
1029
1030 /**
1031 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
1032 * @phba: pointer to a host N_Port data structure.
1033 * @ctxp: Context info for NVME LS Request
1034 * @rspbuf: DMA buffer of NVME command.
1035 * @rspsize: size of the NVME command.
1036 *
1037 * This routine is used for allocating a lpfc-WQE data structure from
1038 * the driver lpfc-WQE free-list and prepare the WQE with the parameters
1039 * passed into the routine for discovery state machine to issue an Extended
1040 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
1041 * and preparation routine that is used by all the discovery state machine
1042 * routines and the NVME command-specific fields will be later set up by
1043 * the individual discovery machine routines after calling this routine
1044 * allocating and preparing a generic WQE data structure. It fills in the
1045 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
1046 * payload and response payload (if expected). The reference count on the
1047 * ndlp is incremented by 1 and the reference to the ndlp is put into
1048 * context1 of the WQE data structure for this WQE to hold the ndlp
1049 * reference for the command's callback function to access later.
1050 *
1051 * Return code
1052 * Pointer to the newly allocated/prepared nvme wqe data structure
1053 * NULL - when nvme wqe data structure allocation/preparation failed
1054 **/
1055 static struct lpfc_iocbq *
1056 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
1057 struct lpfc_nvmet_rcv_ctx *ctxp,
1058 dma_addr_t rspbuf, uint16_t rspsize)
1059 {
1060 struct lpfc_nodelist *ndlp;
1061 struct lpfc_iocbq *nvmewqe;
1062 union lpfc_wqe *wqe;
1063
1064 if (!lpfc_is_link_up(phba)) {
1065 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1066 "6104 lpfc_nvmet_prep_ls_wqe: link err: "
1067 "NPORT x%x oxid:x%x\n",
1068 ctxp->sid, ctxp->oxid);
1069 return NULL;
1070 }
1071
1072 /* Allocate buffer for command wqe */
1073 nvmewqe = lpfc_sli_get_iocbq(phba);
1074 if (nvmewqe == NULL) {
1075 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1076 "6105 lpfc_nvmet_prep_ls_wqe: No WQE: "
1077 "NPORT x%x oxid:x%x\n",
1078 ctxp->sid, ctxp->oxid);
1079 return NULL;
1080 }
1081
1082 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1083 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1084 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1085 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1086 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1087 "6106 lpfc_nvmet_prep_ls_wqe: No ndlp: "
1088 "NPORT x%x oxid:x%x\n",
1089 ctxp->sid, ctxp->oxid);
1090 goto nvme_wqe_free_wqeq_exit;
1091 }
1092 ctxp->wqeq = nvmewqe;
1093
1094 /* prevent preparing wqe with NULL ndlp reference */
1095 nvmewqe->context1 = lpfc_nlp_get(ndlp);
1096 if (nvmewqe->context1 == NULL)
1097 goto nvme_wqe_free_wqeq_exit;
1098 nvmewqe->context2 = ctxp;
1099
1100 wqe = &nvmewqe->wqe;
1101 memset(wqe, 0, sizeof(union lpfc_wqe));
1102
1103 /* Words 0 - 2 */
1104 wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1105 wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
1106 wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
1107 wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
1108
1109 /* Word 3 */
1110
1111 /* Word 4 */
1112
1113 /* Word 5 */
1114 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
1115 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
1116 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
1117 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_DD_SOL_CTL);
1118 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
1119
1120 /* Word 6 */
1121 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
1122 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1123 bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
1124
1125 /* Word 7 */
1126 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
1127 CMD_XMIT_SEQUENCE64_WQE);
1128 bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
1129 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
1130 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
1131
1132 /* Word 8 */
1133 wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
1134
1135 /* Word 9 */
1136 bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
1137 /* Needs to be set by caller */
1138 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
1139
1140 /* Word 10 */
1141 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
1142 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
1143 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
1144 LPFC_WQE_LENLOC_WORD12);
1145 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
1146
1147 /* Word 11 */
1148 bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
1149 LPFC_WQE_CQ_ID_DEFAULT);
1150 bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
1151 OTHER_COMMAND);
1152
1153 /* Word 12 */
1154 wqe->xmit_sequence.xmit_len = rspsize;
1155
1156 nvmewqe->retry = 1;
1157 nvmewqe->vport = phba->pport;
1158 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
1159 nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
1160
1161 /* Xmit NVME response to remote NPORT <did> */
1162 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1163 "6039 Xmit NVME LS response to remote "
1164 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
1165 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
1166 rspsize);
1167 return nvmewqe;
1168
1169 nvme_wqe_free_wqeq_exit:
1170 nvmewqe->context2 = NULL;
1171 nvmewqe->context3 = NULL;
1172 lpfc_sli_release_iocbq(phba, nvmewqe);
1173 return NULL;
1174 }
1175
1176
1177 static struct lpfc_iocbq *
1178 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
1179 struct lpfc_nvmet_rcv_ctx *ctxp)
1180 {
1181 struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
1182 struct lpfc_nvmet_tgtport *tgtp;
1183 struct sli4_sge *sgl;
1184 struct lpfc_nodelist *ndlp;
1185 struct lpfc_iocbq *nvmewqe;
1186 struct scatterlist *sgel;
1187 union lpfc_wqe128 *wqe;
1188 uint32_t *txrdy;
1189 dma_addr_t physaddr;
1190 int i, cnt;
1191 int xc = 1;
1192
1193 if (!lpfc_is_link_up(phba)) {
1194 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1195 "6107 lpfc_nvmet_prep_fcp_wqe: link err:"
1196 "NPORT x%x oxid:x%x\n", ctxp->sid,
1197 ctxp->oxid);
1198 return NULL;
1199 }
1200
1201 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1202 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1203 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1204 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1205 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1206 "6108 lpfc_nvmet_prep_fcp_wqe: no ndlp: "
1207 "NPORT x%x oxid:x%x\n",
1208 ctxp->sid, ctxp->oxid);
1209 return NULL;
1210 }
1211
1212 if (rsp->sg_cnt > phba->cfg_sg_seg_cnt) {
1213 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1214 "6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: "
1215 "NPORT x%x oxid:x%x\n",
1216 ctxp->sid, ctxp->oxid);
1217 return NULL;
1218 }
1219
1220 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1221 nvmewqe = ctxp->wqeq;
1222 if (nvmewqe == NULL) {
1223 /* Allocate buffer for command wqe */
1224 nvmewqe = ctxp->rqb_buffer->iocbq;
1225 if (nvmewqe == NULL) {
1226 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1227 "6110 lpfc_nvmet_prep_fcp_wqe: No "
1228 "WQE: NPORT x%x oxid:x%x\n",
1229 ctxp->sid, ctxp->oxid);
1230 return NULL;
1231 }
1232 ctxp->wqeq = nvmewqe;
1233 xc = 0; /* create new XRI */
1234 nvmewqe->sli4_lxritag = NO_XRI;
1235 nvmewqe->sli4_xritag = NO_XRI;
1236 }
1237
1238 /* Sanity check */
1239 if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
1240 (ctxp->entry_cnt == 1)) ||
1241 ((ctxp->state == LPFC_NVMET_STE_DATA) &&
1242 (ctxp->entry_cnt > 1))) {
1243 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
1244 } else {
1245 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1246 "6111 Wrong state %s: %d cnt %d\n",
1247 __func__, ctxp->state, ctxp->entry_cnt);
1248 return NULL;
1249 }
1250
1251 sgl = (struct sli4_sge *)ctxp->rqb_buffer->sglq->sgl;
1252 switch (rsp->op) {
1253 case NVMET_FCOP_READDATA:
1254 case NVMET_FCOP_READDATA_RSP:
1255 /* Words 0 - 2 : The first sg segment */
1256 sgel = &rsp->sg[0];
1257 physaddr = sg_dma_address(sgel);
1258 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1259 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
1260 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
1261 wqe->fcp_tsend.bde.addrHigh =
1262 cpu_to_le32(putPaddrHigh(physaddr));
1263
1264 /* Word 3 */
1265 wqe->fcp_tsend.payload_offset_len = 0;
1266
1267 /* Word 4 */
1268 wqe->fcp_tsend.relative_offset = ctxp->offset;
1269
1270 /* Word 5 */
1271
1272 /* Word 6 */
1273 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
1274 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1275 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
1276 nvmewqe->sli4_xritag);
1277
1278 /* Word 7 */
1279 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
1280
1281 /* Word 8 */
1282 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
1283
1284 /* Word 9 */
1285 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
1286 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
1287
1288 /* Word 10 */
1289 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
1290 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
1291 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
1292 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com,
1293 LPFC_WQE_LENLOC_WORD12);
1294 bf_set(wqe_ebde_cnt, &wqe->fcp_tsend.wqe_com, 0);
1295 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, xc);
1296 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
1297 if (phba->cfg_nvme_oas)
1298 bf_set(wqe_oas, &wqe->fcp_tsend.wqe_com, 1);
1299
1300 /* Word 11 */
1301 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com,
1302 LPFC_WQE_CQ_ID_DEFAULT);
1303 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com,
1304 FCP_COMMAND_TSEND);
1305
1306 /* Word 12 */
1307 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
1308
1309 /* Setup 2 SKIP SGEs */
1310 sgl->addr_hi = 0;
1311 sgl->addr_lo = 0;
1312 sgl->word2 = 0;
1313 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1314 sgl->word2 = cpu_to_le32(sgl->word2);
1315 sgl->sge_len = 0;
1316 sgl++;
1317 sgl->addr_hi = 0;
1318 sgl->addr_lo = 0;
1319 sgl->word2 = 0;
1320 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1321 sgl->word2 = cpu_to_le32(sgl->word2);
1322 sgl->sge_len = 0;
1323 sgl++;
1324 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
1325 atomic_inc(&tgtp->xmt_fcp_read_rsp);
1326 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
1327 if ((ndlp->nlp_flag & NLP_SUPPRESS_RSP) &&
1328 (rsp->rsplen == 12)) {
1329 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1);
1330 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
1331 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
1332 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
1333 } else {
1334 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1335 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
1336 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
1337 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
1338 ((rsp->rsplen >> 2) - 1));
1339 memcpy(&wqe->words[16], rsp->rspaddr,
1340 rsp->rsplen);
1341 }
1342 } else {
1343 atomic_inc(&tgtp->xmt_fcp_read);
1344
1345 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1346 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
1347 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
1348 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
1349 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
1350 }
1351 ctxp->state = LPFC_NVMET_STE_DATA;
1352 break;
1353
1354 case NVMET_FCOP_WRITEDATA:
1355 /* Words 0 - 2 : The first sg segment */
1356 txrdy = pci_pool_alloc(phba->txrdy_payload_pool,
1357 GFP_KERNEL, &physaddr);
1358 if (!txrdy) {
1359 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1360 "6041 Bad txrdy buffer: oxid x%x\n",
1361 ctxp->oxid);
1362 return NULL;
1363 }
1364 ctxp->txrdy = txrdy;
1365 ctxp->txrdy_phys = physaddr;
1366 wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1367 wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
1368 wqe->fcp_treceive.bde.addrLow =
1369 cpu_to_le32(putPaddrLow(physaddr));
1370 wqe->fcp_treceive.bde.addrHigh =
1371 cpu_to_le32(putPaddrHigh(physaddr));
1372
1373 /* Word 3 */
1374 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
1375
1376 /* Word 4 */
1377 wqe->fcp_treceive.relative_offset = ctxp->offset;
1378
1379 /* Word 5 */
1380
1381 /* Word 6 */
1382 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
1383 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1384 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
1385 nvmewqe->sli4_xritag);
1386
1387 /* Word 7 */
1388 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
1389 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com,
1390 CMD_FCP_TRECEIVE64_WQE);
1391
1392 /* Word 8 */
1393 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
1394
1395 /* Word 9 */
1396 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
1397 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
1398
1399 /* Word 10 */
1400 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
1401 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
1402 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
1403 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com,
1404 LPFC_WQE_LENLOC_WORD12);
1405 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, xc);
1406 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
1407 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
1408 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
1409 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
1410 if (phba->cfg_nvme_oas)
1411 bf_set(wqe_oas, &wqe->fcp_treceive.wqe_com, 1);
1412
1413 /* Word 11 */
1414 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com,
1415 LPFC_WQE_CQ_ID_DEFAULT);
1416 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com,
1417 FCP_COMMAND_TRECEIVE);
1418 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1419
1420 /* Word 12 */
1421 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
1422
1423 /* Setup 1 TXRDY and 1 SKIP SGE */
1424 txrdy[0] = 0;
1425 txrdy[1] = cpu_to_be32(rsp->transfer_length);
1426 txrdy[2] = 0;
1427
1428 sgl->addr_hi = putPaddrHigh(physaddr);
1429 sgl->addr_lo = putPaddrLow(physaddr);
1430 sgl->word2 = 0;
1431 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
1432 sgl->word2 = cpu_to_le32(sgl->word2);
1433 sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
1434 sgl++;
1435 sgl->addr_hi = 0;
1436 sgl->addr_lo = 0;
1437 sgl->word2 = 0;
1438 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1439 sgl->word2 = cpu_to_le32(sgl->word2);
1440 sgl->sge_len = 0;
1441 sgl++;
1442 ctxp->state = LPFC_NVMET_STE_DATA;
1443 atomic_inc(&tgtp->xmt_fcp_write);
1444 break;
1445
1446 case NVMET_FCOP_RSP:
1447 /* Words 0 - 2 */
1448 sgel = &rsp->sg[0];
1449 physaddr = rsp->rspdma;
1450 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1451 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
1452 wqe->fcp_trsp.bde.addrLow =
1453 cpu_to_le32(putPaddrLow(physaddr));
1454 wqe->fcp_trsp.bde.addrHigh =
1455 cpu_to_le32(putPaddrHigh(physaddr));
1456
1457 /* Word 3 */
1458 wqe->fcp_trsp.response_len = rsp->rsplen;
1459
1460 /* Word 4 */
1461 wqe->fcp_trsp.rsvd_4_5[0] = 0;
1462
1463
1464 /* Word 5 */
1465
1466 /* Word 6 */
1467 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
1468 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1469 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
1470 nvmewqe->sli4_xritag);
1471
1472 /* Word 7 */
1473 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1);
1474 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
1475
1476 /* Word 8 */
1477 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
1478
1479 /* Word 9 */
1480 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
1481 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
1482
1483 /* Word 10 */
1484 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
1485 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 0);
1486 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_WRITE);
1487 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com,
1488 LPFC_WQE_LENLOC_WORD3);
1489 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, xc);
1490 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
1491 if (phba->cfg_nvme_oas)
1492 bf_set(wqe_oas, &wqe->fcp_trsp.wqe_com, 1);
1493
1494 /* Word 11 */
1495 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com,
1496 LPFC_WQE_CQ_ID_DEFAULT);
1497 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com,
1498 FCP_COMMAND_TRSP);
1499 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1500 ctxp->state = LPFC_NVMET_STE_RSP;
1501
1502 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
1503 /* Good response - all zero's on wire */
1504 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
1505 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
1506 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
1507 } else {
1508 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
1509 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
1510 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
1511 ((rsp->rsplen >> 2) - 1));
1512 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
1513 }
1514
1515 /* Use rspbuf, NOT sg list */
1516 rsp->sg_cnt = 0;
1517 sgl->word2 = 0;
1518 atomic_inc(&tgtp->xmt_fcp_rsp);
1519 break;
1520
1521 default:
1522 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1523 "6064 Unknown Rsp Op %d\n",
1524 rsp->op);
1525 return NULL;
1526 }
1527
1528 nvmewqe->retry = 1;
1529 nvmewqe->vport = phba->pport;
1530 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
1531 nvmewqe->context1 = ndlp;
1532
1533 for (i = 0; i < rsp->sg_cnt; i++) {
1534 sgel = &rsp->sg[i];
1535 physaddr = sg_dma_address(sgel);
1536 cnt = sg_dma_len(sgel);
1537 sgl->addr_hi = putPaddrHigh(physaddr);
1538 sgl->addr_lo = putPaddrLow(physaddr);
1539 sgl->word2 = 0;
1540 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
1541 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
1542 if ((i+1) == rsp->sg_cnt)
1543 bf_set(lpfc_sli4_sge_last, sgl, 1);
1544 sgl->word2 = cpu_to_le32(sgl->word2);
1545 sgl->sge_len = cpu_to_le32(cnt);
1546 sgl++;
1547 ctxp->offset += cnt;
1548 }
1549 return nvmewqe;
1550 }
1551
1552 /**
1553 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
1554 * @phba: Pointer to HBA context object.
1555 * @cmdwqe: Pointer to driver command WQE object.
1556 * @wcqe: Pointer to driver response CQE object.
1557 *
1558 * The function is called from SLI ring event handler with no
1559 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
1560 * The function frees memory resources used for the NVME commands.
1561 **/
1562 static void
1563 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1564 struct lpfc_wcqe_complete *wcqe)
1565 {
1566 struct lpfc_nvmet_rcv_ctx *ctxp;
1567 struct lpfc_nvmet_tgtport *tgtp;
1568 uint32_t status, result;
1569
1570 ctxp = cmdwqe->context2;
1571 status = bf_get(lpfc_wcqe_c_status, wcqe);
1572 result = wcqe->parameter;
1573
1574 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1575 atomic_inc(&tgtp->xmt_abort_cmpl);
1576
1577 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
1578 "6165 Abort cmpl: xri x%x WCQE: %08x %08x %08x %08x\n",
1579 ctxp->oxid, wcqe->word0, wcqe->total_data_placed,
1580 result, wcqe->word3);
1581
1582 ctxp->state = LPFC_NVMET_STE_DONE;
1583 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
1584
1585 cmdwqe->context2 = NULL;
1586 cmdwqe->context3 = NULL;
1587 lpfc_sli_release_iocbq(phba, cmdwqe);
1588 }
1589
1590 /**
1591 * lpfc_nvmet_xmt_fcp_abort_cmp - Completion handler for ABTS
1592 * @phba: Pointer to HBA context object.
1593 * @cmdwqe: Pointer to driver command WQE object.
1594 * @wcqe: Pointer to driver response CQE object.
1595 *
1596 * The function is called from SLI ring event handler with no
1597 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
1598 * The function frees memory resources used for the NVME commands.
1599 **/
1600 static void
1601 lpfc_nvmet_xmt_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1602 struct lpfc_wcqe_complete *wcqe)
1603 {
1604 struct lpfc_nvmet_rcv_ctx *ctxp;
1605 struct lpfc_nvmet_tgtport *tgtp;
1606 uint32_t status, result;
1607
1608 ctxp = cmdwqe->context2;
1609 status = bf_get(lpfc_wcqe_c_status, wcqe);
1610 result = wcqe->parameter;
1611
1612 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1613 atomic_inc(&tgtp->xmt_abort_cmpl);
1614
1615 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1616 "6070 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n",
1617 ctxp, wcqe->word0, wcqe->total_data_placed,
1618 result, wcqe->word3);
1619
1620 if (ctxp) {
1621 /* Sanity check */
1622 if (ctxp->state != LPFC_NVMET_STE_ABORT) {
1623 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
1624 "6112 ABORT Wrong state:%d oxid x%x\n",
1625 ctxp->state, ctxp->oxid);
1626 }
1627 ctxp->state = LPFC_NVMET_STE_DONE;
1628 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
1629 cmdwqe->context2 = NULL;
1630 cmdwqe->context3 = NULL;
1631 }
1632 }
1633
1634 /**
1635 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
1636 * @phba: Pointer to HBA context object.
1637 * @cmdwqe: Pointer to driver command WQE object.
1638 * @wcqe: Pointer to driver response CQE object.
1639 *
1640 * The function is called from SLI ring event handler with no
1641 * lock held. This function is the completion handler for NVME ABTS for LS cmds
1642 * The function frees memory resources used for the NVME commands.
1643 **/
1644 static void
1645 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1646 struct lpfc_wcqe_complete *wcqe)
1647 {
1648 struct lpfc_nvmet_rcv_ctx *ctxp;
1649 struct lpfc_nvmet_tgtport *tgtp;
1650 uint32_t status, result;
1651
1652 ctxp = cmdwqe->context2;
1653 status = bf_get(lpfc_wcqe_c_status, wcqe);
1654 result = wcqe->parameter;
1655
1656 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1657 atomic_inc(&tgtp->xmt_abort_cmpl);
1658
1659 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1660 "6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n",
1661 ctxp, wcqe->word0, wcqe->total_data_placed,
1662 result, wcqe->word3);
1663
1664 if (ctxp) {
1665 cmdwqe->context2 = NULL;
1666 cmdwqe->context3 = NULL;
1667 lpfc_sli_release_iocbq(phba, cmdwqe);
1668 kfree(ctxp);
1669 } else
1670 lpfc_sli_release_iocbq(phba, cmdwqe);
1671 }
1672
1673 static int
1674 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
1675 struct lpfc_nvmet_rcv_ctx *ctxp,
1676 uint32_t sid, uint16_t xri)
1677 {
1678 struct lpfc_nvmet_tgtport *tgtp;
1679 struct lpfc_iocbq *abts_wqeq;
1680 union lpfc_wqe *wqe_abts;
1681 struct lpfc_nodelist *ndlp;
1682
1683 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1684 "6067 %s: Entrypoint: sid %x xri %x\n", __func__,
1685 sid, xri);
1686
1687 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1688
1689 ndlp = lpfc_findnode_did(phba->pport, sid);
1690 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1691 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1692 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1693 atomic_inc(&tgtp->xmt_abort_rsp_error);
1694 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
1695 "6134 Drop ABTS - wrong NDLP state x%x.\n",
1696 ndlp->nlp_state);
1697
1698 /* No failure to an ABTS request. */
1699 return 0;
1700 }
1701
1702 abts_wqeq = ctxp->wqeq;
1703 wqe_abts = &abts_wqeq->wqe;
1704 ctxp->state = LPFC_NVMET_STE_ABORT;
1705
1706 /*
1707 * Since we zero the whole WQE, we need to ensure we set the WQE fields
1708 * that were initialized in lpfc_sli4_nvmet_alloc.
1709 */
1710 memset(wqe_abts, 0, sizeof(union lpfc_wqe));
1711
1712 /* Word 5 */
1713 bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
1714 bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
1715 bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
1716 bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
1717 bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
1718
1719 /* Word 6 */
1720 bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
1721 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1722 bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
1723 abts_wqeq->sli4_xritag);
1724
1725 /* Word 7 */
1726 bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
1727 CMD_XMIT_SEQUENCE64_WQE);
1728 bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
1729 bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
1730 bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
1731
1732 /* Word 8 */
1733 wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
1734
1735 /* Word 9 */
1736 bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
1737 /* Needs to be set by caller */
1738 bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
1739
1740 /* Word 10 */
1741 bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
1742 bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
1743 bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
1744 LPFC_WQE_LENLOC_WORD12);
1745 bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
1746 bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
1747
1748 /* Word 11 */
1749 bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
1750 LPFC_WQE_CQ_ID_DEFAULT);
1751 bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
1752 OTHER_COMMAND);
1753
1754 abts_wqeq->vport = phba->pport;
1755 abts_wqeq->context1 = ndlp;
1756 abts_wqeq->context2 = ctxp;
1757 abts_wqeq->context3 = NULL;
1758 abts_wqeq->rsvd2 = 0;
1759 /* hba_wqidx should already be setup from command we are aborting */
1760 abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
1761 abts_wqeq->iocb.ulpLe = 1;
1762
1763 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1764 "6069 Issue ABTS to xri x%x reqtag x%x\n",
1765 xri, abts_wqeq->iotag);
1766 return 1;
1767 }
1768
1769 static int
1770 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
1771 struct lpfc_nvmet_rcv_ctx *ctxp,
1772 uint32_t sid, uint16_t xri)
1773 {
1774 struct lpfc_nvmet_tgtport *tgtp;
1775 struct lpfc_iocbq *abts_wqeq;
1776 union lpfc_wqe *abts_wqe;
1777 struct lpfc_nodelist *ndlp;
1778 unsigned long flags;
1779 int rc;
1780
1781 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1782 if (!ctxp->wqeq) {
1783 ctxp->wqeq = ctxp->rqb_buffer->iocbq;
1784 ctxp->wqeq->hba_wqidx = 0;
1785 }
1786
1787 ndlp = lpfc_findnode_did(phba->pport, sid);
1788 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1789 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1790 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1791 atomic_inc(&tgtp->xmt_abort_rsp_error);
1792 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
1793 "6160 Drop ABTS - wrong NDLP state x%x.\n",
1794 ndlp->nlp_state);
1795
1796 /* No failure to an ABTS request. */
1797 return 0;
1798 }
1799
1800 /* Issue ABTS for this WQE based on iotag */
1801 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
1802 if (!ctxp->abort_wqeq) {
1803 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
1804 "6161 Abort failed: No wqeqs: "
1805 "xri: x%x\n", ctxp->oxid);
1806 /* No failure to an ABTS request. */
1807 return 0;
1808 }
1809 abts_wqeq = ctxp->abort_wqeq;
1810 abts_wqe = &abts_wqeq->wqe;
1811 ctxp->state = LPFC_NVMET_STE_ABORT;
1812
1813 /* Announce entry to new IO submit field. */
1814 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
1815 "6162 Abort Request to rport DID x%06x "
1816 "for xri x%x x%x\n",
1817 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
1818
1819 /* If the hba is getting reset, this flag is set. It is
1820 * cleared when the reset is complete and rings reestablished.
1821 */
1822 spin_lock_irqsave(&phba->hbalock, flags);
1823 /* driver queued commands are in process of being flushed */
1824 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
1825 spin_unlock_irqrestore(&phba->hbalock, flags);
1826 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1827 "6163 Driver in reset cleanup - flushing "
1828 "NVME Req now. hba_flag x%x oxid x%x\n",
1829 phba->hba_flag, ctxp->oxid);
1830 lpfc_sli_release_iocbq(phba, abts_wqeq);
1831 return 0;
1832 }
1833
1834 /* Outstanding abort is in progress */
1835 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
1836 spin_unlock_irqrestore(&phba->hbalock, flags);
1837 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1838 "6164 Outstanding NVME I/O Abort Request "
1839 "still pending on oxid x%x\n",
1840 ctxp->oxid);
1841 lpfc_sli_release_iocbq(phba, abts_wqeq);
1842 return 0;
1843 }
1844
1845 /* Ready - mark outstanding as aborted by driver. */
1846 abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
1847
1848 /* WQEs are reused. Clear stale data and set key fields to
1849 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
1850 */
1851 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
1852
1853 /* word 3 */
1854 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
1855
1856 /* word 7 */
1857 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
1858 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
1859
1860 /* word 8 - tell the FW to abort the IO associated with this
1861 * outstanding exchange ID.
1862 */
1863 abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
1864
1865 /* word 9 - this is the iotag for the abts_wqe completion. */
1866 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
1867 abts_wqeq->iotag);
1868
1869 /* word 10 */
1870 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
1871 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
1872
1873 /* word 11 */
1874 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
1875 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
1876 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
1877
1878 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
1879 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
1880 abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
1881 abts_wqeq->iocb_cmpl = 0;
1882 abts_wqeq->iocb_flag |= LPFC_IO_NVME;
1883 abts_wqeq->context2 = ctxp;
1884 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
1885 spin_unlock_irqrestore(&phba->hbalock, flags);
1886 if (rc == WQE_SUCCESS)
1887 return 0;
1888
1889 lpfc_sli_release_iocbq(phba, abts_wqeq);
1890 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1891 "6166 Failed abts issue_wqe with status x%x "
1892 "for oxid x%x.\n",
1893 rc, ctxp->oxid);
1894 return 1;
1895 }
1896
1897
1898 static int
1899 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
1900 struct lpfc_nvmet_rcv_ctx *ctxp,
1901 uint32_t sid, uint16_t xri)
1902 {
1903 struct lpfc_nvmet_tgtport *tgtp;
1904 struct lpfc_iocbq *abts_wqeq;
1905 unsigned long flags;
1906 int rc;
1907
1908 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1909 if (!ctxp->wqeq) {
1910 ctxp->wqeq = ctxp->rqb_buffer->iocbq;
1911 ctxp->wqeq->hba_wqidx = 0;
1912 }
1913
1914 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
1915 if (rc == 0)
1916 goto aerr;
1917
1918 spin_lock_irqsave(&phba->hbalock, flags);
1919 abts_wqeq = ctxp->wqeq;
1920 abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_abort_cmp;
1921 abts_wqeq->iocb_cmpl = 0;
1922 abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
1923 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
1924 spin_unlock_irqrestore(&phba->hbalock, flags);
1925 if (rc == WQE_SUCCESS) {
1926 atomic_inc(&tgtp->xmt_abort_rsp);
1927 return 0;
1928 }
1929
1930 aerr:
1931 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
1932 atomic_inc(&tgtp->xmt_abort_rsp_error);
1933 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
1934 "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
1935 ctxp->oxid, rc);
1936 return 1;
1937 }
1938
1939 static int
1940 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
1941 struct lpfc_nvmet_rcv_ctx *ctxp,
1942 uint32_t sid, uint16_t xri)
1943 {
1944 struct lpfc_nvmet_tgtport *tgtp;
1945 struct lpfc_iocbq *abts_wqeq;
1946 union lpfc_wqe *wqe_abts;
1947 unsigned long flags;
1948 int rc;
1949
1950 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1951 if (!ctxp->wqeq) {
1952 /* Issue ABTS for this WQE based on iotag */
1953 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
1954 if (!ctxp->wqeq) {
1955 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
1956 "6068 Abort failed: No wqeqs: "
1957 "xri: x%x\n", xri);
1958 /* No failure to an ABTS request. */
1959 kfree(ctxp);
1960 return 0;
1961 }
1962 }
1963 abts_wqeq = ctxp->wqeq;
1964 wqe_abts = &abts_wqeq->wqe;
1965 lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
1966
1967 spin_lock_irqsave(&phba->hbalock, flags);
1968 abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
1969 abts_wqeq->iocb_cmpl = 0;
1970 abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
1971 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
1972 spin_unlock_irqrestore(&phba->hbalock, flags);
1973 if (rc == WQE_SUCCESS) {
1974 atomic_inc(&tgtp->xmt_abort_rsp);
1975 return 0;
1976 }
1977
1978 atomic_inc(&tgtp->xmt_abort_rsp_error);
1979 abts_wqeq->context2 = NULL;
1980 abts_wqeq->context3 = NULL;
1981 lpfc_sli_release_iocbq(phba, abts_wqeq);
1982 kfree(ctxp);
1983 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
1984 "6056 Failed to Issue ABTS. Status x%x\n", rc);
1985 return 0;
1986 }