]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/scsi/qla2xxx/qla_nvme.c
HID: logitech-dj: fix spelling in printk
[mirror_ubuntu-kernels.git] / drivers / scsi / qla2xxx / qla_nvme.c
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2017 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_nvme.h"
8 #include <linux/scatterlist.h>
9 #include <linux/delay.h>
10 #include <linux/nvme.h>
11 #include <linux/nvme-fc.h>
12
13 static struct nvme_fc_port_template qla_nvme_fc_transport;
14
15 static void qla_nvme_unregister_remote_port(struct work_struct *);
16
17 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
18 {
19 struct qla_nvme_rport *rport;
20 struct nvme_fc_port_info req;
21 int ret;
22
23 if (!IS_ENABLED(CONFIG_NVME_FC))
24 return 0;
25
26 if (!vha->flags.nvme_enabled) {
27 ql_log(ql_log_info, vha, 0x2100,
28 "%s: Not registering target since Host NVME is not enabled\n",
29 __func__);
30 return 0;
31 }
32
33 if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
34 return 0;
35
36 if (!(fcport->nvme_prli_service_param &
37 (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
38 (fcport->nvme_flag & NVME_FLAG_REGISTERED))
39 return 0;
40
41 INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port);
42 fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
43
44 memset(&req, 0, sizeof(struct nvme_fc_port_info));
45 req.port_name = wwn_to_u64(fcport->port_name);
46 req.node_name = wwn_to_u64(fcport->node_name);
47 req.port_role = 0;
48 req.dev_loss_tmo = NVME_FC_DEV_LOSS_TMO;
49
50 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
51 req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
52
53 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
54 req.port_role |= FC_PORT_ROLE_NVME_TARGET;
55
56 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
57 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
58
59 req.port_id = fcport->d_id.b24;
60
61 ql_log(ql_log_info, vha, 0x2102,
62 "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
63 __func__, req.node_name, req.port_name,
64 req.port_id);
65
66 ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
67 &fcport->nvme_remote_port);
68 if (ret) {
69 ql_log(ql_log_warn, vha, 0x212e,
70 "Failed to register remote port. Transport returned %d\n",
71 ret);
72 return ret;
73 }
74
75 rport = fcport->nvme_remote_port->private;
76 rport->fcport = fcport;
77 list_add_tail(&rport->list, &vha->nvme_rport_list);
78
79 fcport->nvme_flag |= NVME_FLAG_REGISTERED;
80 return 0;
81 }
82
83 /* Allocate a queue for NVMe traffic */
84 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
85 unsigned int qidx, u16 qsize, void **handle)
86 {
87 struct scsi_qla_host *vha;
88 struct qla_hw_data *ha;
89 struct qla_qpair *qpair;
90
91 if (!qidx)
92 qidx++;
93
94 vha = (struct scsi_qla_host *)lport->private;
95 ha = vha->hw;
96
97 ql_log(ql_log_info, vha, 0x2104,
98 "%s: handle %p, idx =%d, qsize %d\n",
99 __func__, handle, qidx, qsize);
100
101 if (qidx > qla_nvme_fc_transport.max_hw_queues) {
102 ql_log(ql_log_warn, vha, 0x212f,
103 "%s: Illegal qidx=%d. Max=%d\n",
104 __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
105 return -EINVAL;
106 }
107
108 if (ha->queue_pair_map[qidx]) {
109 *handle = ha->queue_pair_map[qidx];
110 ql_log(ql_log_info, vha, 0x2121,
111 "Returning existing qpair of %p for idx=%x\n",
112 *handle, qidx);
113 return 0;
114 }
115
116 qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
117 if (qpair == NULL) {
118 ql_log(ql_log_warn, vha, 0x2122,
119 "Failed to allocate qpair\n");
120 return -EINVAL;
121 }
122 *handle = qpair;
123
124 return 0;
125 }
126
127 static void qla_nvme_sp_ls_done(void *ptr, int res)
128 {
129 srb_t *sp = ptr;
130 struct srb_iocb *nvme;
131 struct nvmefc_ls_req *fd;
132 struct nvme_private *priv;
133
134 if (atomic_read(&sp->ref_count) == 0) {
135 ql_log(ql_log_warn, sp->fcport->vha, 0x2123,
136 "SP reference-count to ZERO on LS_done -- sp=%p.\n", sp);
137 return;
138 }
139
140 if (!atomic_dec_and_test(&sp->ref_count))
141 return;
142
143 if (res)
144 res = -EINVAL;
145
146 nvme = &sp->u.iocb_cmd;
147 fd = nvme->u.nvme.desc;
148 priv = fd->private;
149 priv->comp_status = res;
150 schedule_work(&priv->ls_work);
151 /* work schedule doesn't need the sp */
152 qla2x00_rel_sp(sp);
153 }
154
155 static void qla_nvme_sp_done(void *ptr, int res)
156 {
157 srb_t *sp = ptr;
158 struct srb_iocb *nvme;
159 struct nvmefc_fcp_req *fd;
160
161 nvme = &sp->u.iocb_cmd;
162 fd = nvme->u.nvme.desc;
163
164 if (!atomic_dec_and_test(&sp->ref_count))
165 return;
166
167 if (res == QLA_SUCCESS)
168 fd->status = 0;
169 else
170 fd->status = NVME_SC_INTERNAL;
171
172 fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
173 fd->done(fd);
174 qla2xxx_rel_qpair_sp(sp->qpair, sp);
175
176 return;
177 }
178
179 static void qla_nvme_abort_work(struct work_struct *work)
180 {
181 struct nvme_private *priv =
182 container_of(work, struct nvme_private, abort_work);
183 srb_t *sp = priv->sp;
184 fc_port_t *fcport = sp->fcport;
185 struct qla_hw_data *ha = fcport->vha->hw;
186 int rval;
187
188 rval = ha->isp_ops->abort_command(sp);
189
190 ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
191 "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
192 __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
193 sp, sp->handle, fcport, rval);
194 }
195
196 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
197 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
198 {
199 struct nvme_private *priv = fd->private;
200
201 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
202 schedule_work(&priv->abort_work);
203 }
204
205 static void qla_nvme_ls_complete(struct work_struct *work)
206 {
207 struct nvme_private *priv =
208 container_of(work, struct nvme_private, ls_work);
209 struct nvmefc_ls_req *fd = priv->fd;
210
211 fd->done(fd, priv->comp_status);
212 }
213
214 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
215 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
216 {
217 struct qla_nvme_rport *qla_rport = rport->private;
218 fc_port_t *fcport = qla_rport->fcport;
219 struct srb_iocb *nvme;
220 struct nvme_private *priv = fd->private;
221 struct scsi_qla_host *vha;
222 int rval = QLA_FUNCTION_FAILED;
223 struct qla_hw_data *ha;
224 srb_t *sp;
225
226 vha = fcport->vha;
227 ha = vha->hw;
228 /* Alloc SRB structure */
229 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
230 if (!sp)
231 return rval;
232
233 sp->type = SRB_NVME_LS;
234 sp->name = "nvme_ls";
235 sp->done = qla_nvme_sp_ls_done;
236 atomic_set(&sp->ref_count, 1);
237 nvme = &sp->u.iocb_cmd;
238 priv->sp = sp;
239 priv->fd = fd;
240 INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
241 nvme->u.nvme.desc = fd;
242 nvme->u.nvme.dir = 0;
243 nvme->u.nvme.dl = 0;
244 nvme->u.nvme.cmd_len = fd->rqstlen;
245 nvme->u.nvme.rsp_len = fd->rsplen;
246 nvme->u.nvme.rsp_dma = fd->rspdma;
247 nvme->u.nvme.timeout_sec = fd->timeout;
248 nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
249 fd->rqstlen, DMA_TO_DEVICE);
250 dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
251 fd->rqstlen, DMA_TO_DEVICE);
252
253 rval = qla2x00_start_sp(sp);
254 if (rval != QLA_SUCCESS) {
255 ql_log(ql_log_warn, vha, 0x700e,
256 "qla2x00_start_sp failed = %d\n", rval);
257 atomic_dec(&sp->ref_count);
258 wake_up(&sp->nvme_ls_waitq);
259 return rval;
260 }
261
262 return rval;
263 }
264
265 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
266 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
267 struct nvmefc_fcp_req *fd)
268 {
269 struct nvme_private *priv = fd->private;
270
271 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
272 schedule_work(&priv->abort_work);
273 }
274
275 static inline int qla2x00_start_nvme_mq(srb_t *sp)
276 {
277 unsigned long flags;
278 uint32_t *clr_ptr;
279 uint32_t index;
280 uint32_t handle;
281 struct cmd_nvme *cmd_pkt;
282 uint16_t cnt, i;
283 uint16_t req_cnt;
284 uint16_t tot_dsds;
285 uint16_t avail_dsds;
286 uint32_t *cur_dsd;
287 struct req_que *req = NULL;
288 struct scsi_qla_host *vha = sp->fcport->vha;
289 struct qla_hw_data *ha = vha->hw;
290 struct qla_qpair *qpair = sp->qpair;
291 struct srb_iocb *nvme = &sp->u.iocb_cmd;
292 struct scatterlist *sgl, *sg;
293 struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
294 uint32_t rval = QLA_SUCCESS;
295
296 /* Setup qpair pointers */
297 req = qpair->req;
298 tot_dsds = fd->sg_cnt;
299
300 /* Acquire qpair specific lock */
301 spin_lock_irqsave(&qpair->qp_lock, flags);
302
303 /* Check for room in outstanding command list. */
304 handle = req->current_outstanding_cmd;
305 for (index = 1; index < req->num_outstanding_cmds; index++) {
306 handle++;
307 if (handle == req->num_outstanding_cmds)
308 handle = 1;
309 if (!req->outstanding_cmds[handle])
310 break;
311 }
312
313 if (index == req->num_outstanding_cmds) {
314 rval = -EBUSY;
315 goto queuing_error;
316 }
317 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
318 if (req->cnt < (req_cnt + 2)) {
319 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
320 RD_REG_DWORD_RELAXED(req->req_q_out);
321
322 if (req->ring_index < cnt)
323 req->cnt = cnt - req->ring_index;
324 else
325 req->cnt = req->length - (req->ring_index - cnt);
326
327 if (req->cnt < (req_cnt + 2)){
328 rval = -EBUSY;
329 goto queuing_error;
330 }
331 }
332
333 if (unlikely(!fd->sqid)) {
334 struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
335 if (cmd->sqe.common.opcode == nvme_admin_async_event) {
336 nvme->u.nvme.aen_op = 1;
337 atomic_inc(&ha->nvme_active_aen_cnt);
338 }
339 }
340
341 /* Build command packet. */
342 req->current_outstanding_cmd = handle;
343 req->outstanding_cmds[handle] = sp;
344 sp->handle = handle;
345 req->cnt -= req_cnt;
346
347 cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
348 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
349
350 /* Zero out remaining portion of packet. */
351 clr_ptr = (uint32_t *)cmd_pkt + 2;
352 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
353
354 cmd_pkt->entry_status = 0;
355
356 /* Update entry type to indicate Command NVME IOCB */
357 cmd_pkt->entry_type = COMMAND_NVME;
358
359 /* No data transfer how do we check buffer len == 0?? */
360 if (fd->io_dir == NVMEFC_FCP_READ) {
361 cmd_pkt->control_flags =
362 cpu_to_le16(CF_READ_DATA | CF_NVME_ENABLE);
363 vha->qla_stats.input_bytes += fd->payload_length;
364 vha->qla_stats.input_requests++;
365 } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
366 cmd_pkt->control_flags =
367 cpu_to_le16(CF_WRITE_DATA | CF_NVME_ENABLE);
368 vha->qla_stats.output_bytes += fd->payload_length;
369 vha->qla_stats.output_requests++;
370 } else if (fd->io_dir == 0) {
371 cmd_pkt->control_flags = cpu_to_le16(CF_NVME_ENABLE);
372 }
373
374 /* Set NPORT-ID */
375 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
376 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
377 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
378 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
379 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
380
381 /* NVME RSP IU */
382 cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
383 cmd_pkt->nvme_rsp_dseg_address[0] = cpu_to_le32(LSD(fd->rspdma));
384 cmd_pkt->nvme_rsp_dseg_address[1] = cpu_to_le32(MSD(fd->rspdma));
385
386 /* NVME CNMD IU */
387 cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
388 cmd_pkt->nvme_cmnd_dseg_address[0] = cpu_to_le32(LSD(fd->cmddma));
389 cmd_pkt->nvme_cmnd_dseg_address[1] = cpu_to_le32(MSD(fd->cmddma));
390
391 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
392 cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
393
394 /* One DSD is available in the Command Type NVME IOCB */
395 avail_dsds = 1;
396 cur_dsd = (uint32_t *)&cmd_pkt->nvme_data_dseg_address[0];
397 sgl = fd->first_sgl;
398
399 /* Load data segments */
400 for_each_sg(sgl, sg, tot_dsds, i) {
401 dma_addr_t sle_dma;
402 cont_a64_entry_t *cont_pkt;
403
404 /* Allocate additional continuation packets? */
405 if (avail_dsds == 0) {
406 /*
407 * Five DSDs are available in the Continuation
408 * Type 1 IOCB.
409 */
410
411 /* Adjust ring index */
412 req->ring_index++;
413 if (req->ring_index == req->length) {
414 req->ring_index = 0;
415 req->ring_ptr = req->ring;
416 } else {
417 req->ring_ptr++;
418 }
419 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
420 *((uint32_t *)(&cont_pkt->entry_type)) =
421 cpu_to_le32(CONTINUE_A64_TYPE);
422
423 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
424 avail_dsds = 5;
425 }
426
427 sle_dma = sg_dma_address(sg);
428 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
429 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
430 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
431 avail_dsds--;
432 }
433
434 /* Set total entry count. */
435 cmd_pkt->entry_count = (uint8_t)req_cnt;
436 wmb();
437
438 /* Adjust ring index. */
439 req->ring_index++;
440 if (req->ring_index == req->length) {
441 req->ring_index = 0;
442 req->ring_ptr = req->ring;
443 } else {
444 req->ring_ptr++;
445 }
446
447 /* Set chip new ring index. */
448 WRT_REG_DWORD(req->req_q_in, req->ring_index);
449
450 queuing_error:
451 spin_unlock_irqrestore(&qpair->qp_lock, flags);
452 return rval;
453 }
454
455 /* Post a command */
456 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
457 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
458 struct nvmefc_fcp_req *fd)
459 {
460 fc_port_t *fcport;
461 struct srb_iocb *nvme;
462 struct scsi_qla_host *vha;
463 int rval = -ENODEV;
464 srb_t *sp;
465 struct qla_qpair *qpair = hw_queue_handle;
466 struct nvme_private *priv = fd->private;
467 struct qla_nvme_rport *qla_rport = rport->private;
468
469 fcport = qla_rport->fcport;
470
471 vha = fcport->vha;
472
473 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
474 return rval;
475
476 /*
477 * If we know the dev is going away while the transport is still sending
478 * IO's return busy back to stall the IO Q. This happens when the
479 * link goes away and fw hasn't notified us yet, but IO's are being
480 * returned. If the dev comes back quickly we won't exhaust the IO
481 * retry count at the core.
482 */
483 if (fcport->nvme_flag & NVME_FLAG_RESETTING)
484 return -EBUSY;
485
486 /* Alloc SRB structure */
487 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
488 if (!sp)
489 return -EBUSY;
490
491 atomic_set(&sp->ref_count, 1);
492 init_waitqueue_head(&sp->nvme_ls_waitq);
493 priv->sp = sp;
494 sp->type = SRB_NVME_CMD;
495 sp->name = "nvme_cmd";
496 sp->done = qla_nvme_sp_done;
497 sp->qpair = qpair;
498 sp->vha = vha;
499 nvme = &sp->u.iocb_cmd;
500 nvme->u.nvme.desc = fd;
501
502 rval = qla2x00_start_nvme_mq(sp);
503 if (rval != QLA_SUCCESS) {
504 ql_log(ql_log_warn, vha, 0x212d,
505 "qla2x00_start_nvme_mq failed = %d\n", rval);
506 atomic_dec(&sp->ref_count);
507 wake_up(&sp->nvme_ls_waitq);
508 }
509
510 return rval;
511 }
512
513 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
514 {
515 struct scsi_qla_host *vha = lport->private;
516
517 ql_log(ql_log_info, vha, 0x210f,
518 "localport delete of %p completed.\n", vha->nvme_local_port);
519 vha->nvme_local_port = NULL;
520 complete(&vha->nvme_del_done);
521 }
522
523 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
524 {
525 fc_port_t *fcport;
526 struct qla_nvme_rport *qla_rport = rport->private, *trport;
527
528 fcport = qla_rport->fcport;
529 fcport->nvme_remote_port = NULL;
530 fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
531
532 list_for_each_entry_safe(qla_rport, trport,
533 &fcport->vha->nvme_rport_list, list) {
534 if (qla_rport->fcport == fcport) {
535 list_del(&qla_rport->list);
536 break;
537 }
538 }
539 complete(&fcport->nvme_del_done);
540
541 if (!test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
542 INIT_WORK(&fcport->free_work, qlt_free_session_done);
543 schedule_work(&fcport->free_work);
544 }
545
546 fcport->nvme_flag &= ~NVME_FLAG_DELETING;
547 ql_log(ql_log_info, fcport->vha, 0x2110,
548 "remoteport_delete of %p completed.\n", fcport);
549 }
550
551 static struct nvme_fc_port_template qla_nvme_fc_transport = {
552 .localport_delete = qla_nvme_localport_delete,
553 .remoteport_delete = qla_nvme_remoteport_delete,
554 .create_queue = qla_nvme_alloc_queue,
555 .delete_queue = NULL,
556 .ls_req = qla_nvme_ls_req,
557 .ls_abort = qla_nvme_ls_abort,
558 .fcp_io = qla_nvme_post_cmd,
559 .fcp_abort = qla_nvme_fcp_abort,
560 .max_hw_queues = 8,
561 .max_sgl_segments = 128,
562 .max_dif_sgl_segments = 64,
563 .dma_boundary = 0xFFFFFFFF,
564 .local_priv_sz = 8,
565 .remote_priv_sz = sizeof(struct qla_nvme_rport),
566 .lsrqst_priv_sz = sizeof(struct nvme_private),
567 .fcprqst_priv_sz = sizeof(struct nvme_private),
568 };
569
570 #define NVME_ABORT_POLLING_PERIOD 2
571 static int qla_nvme_wait_on_command(srb_t *sp)
572 {
573 int ret = QLA_SUCCESS;
574
575 wait_event_timeout(sp->nvme_ls_waitq, (atomic_read(&sp->ref_count) > 1),
576 NVME_ABORT_POLLING_PERIOD*HZ);
577
578 if (atomic_read(&sp->ref_count) > 1)
579 ret = QLA_FUNCTION_FAILED;
580
581 return ret;
582 }
583
584 void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp, int res)
585 {
586 int rval;
587
588 if (ha->flags.fw_started) {
589 rval = ha->isp_ops->abort_command(sp);
590 if (!rval && !qla_nvme_wait_on_command(sp))
591 ql_log(ql_log_warn, NULL, 0x2112,
592 "timed out waiting on sp=%p\n", sp);
593 } else {
594 sp->done(sp, res);
595 }
596 }
597
598 static void qla_nvme_unregister_remote_port(struct work_struct *work)
599 {
600 struct fc_port *fcport = container_of(work, struct fc_port,
601 nvme_del_work);
602 struct qla_nvme_rport *qla_rport, *trport;
603
604 if (!IS_ENABLED(CONFIG_NVME_FC))
605 return;
606
607 ql_log(ql_log_warn, NULL, 0x2112,
608 "%s: unregister remoteport on %p\n",__func__, fcport);
609
610 list_for_each_entry_safe(qla_rport, trport,
611 &fcport->vha->nvme_rport_list, list) {
612 if (qla_rport->fcport == fcport) {
613 ql_log(ql_log_info, fcport->vha, 0x2113,
614 "%s: fcport=%p\n", __func__, fcport);
615 init_completion(&fcport->nvme_del_done);
616 nvme_fc_unregister_remoteport(
617 fcport->nvme_remote_port);
618 wait_for_completion(&fcport->nvme_del_done);
619 break;
620 }
621 }
622 }
623
624 void qla_nvme_delete(struct scsi_qla_host *vha)
625 {
626 struct qla_nvme_rport *qla_rport, *trport;
627 fc_port_t *fcport;
628 int nv_ret;
629
630 if (!IS_ENABLED(CONFIG_NVME_FC))
631 return;
632
633 list_for_each_entry_safe(qla_rport, trport,
634 &vha->nvme_rport_list, list) {
635 fcport = qla_rport->fcport;
636
637 ql_log(ql_log_info, fcport->vha, 0x2114, "%s: fcport=%p\n",
638 __func__, fcport);
639
640 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
641 }
642
643 if (vha->nvme_local_port) {
644 init_completion(&vha->nvme_del_done);
645 ql_log(ql_log_info, vha, 0x2116,
646 "unregister localport=%p\n",
647 vha->nvme_local_port);
648 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
649 if (nv_ret)
650 ql_log(ql_log_info, vha, 0x2115,
651 "Unregister of localport failed\n");
652 else
653 wait_for_completion(&vha->nvme_del_done);
654 }
655 }
656
657 int qla_nvme_register_hba(struct scsi_qla_host *vha)
658 {
659 struct nvme_fc_port_template *tmpl;
660 struct qla_hw_data *ha;
661 struct nvme_fc_port_info pinfo;
662 int ret = EINVAL;
663
664 if (!IS_ENABLED(CONFIG_NVME_FC))
665 return ret;
666
667 ha = vha->hw;
668 tmpl = &qla_nvme_fc_transport;
669
670 WARN_ON(vha->nvme_local_port);
671 WARN_ON(ha->max_req_queues < 3);
672
673 qla_nvme_fc_transport.max_hw_queues =
674 min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
675 (uint8_t)(ha->max_req_queues - 2));
676
677 pinfo.node_name = wwn_to_u64(vha->node_name);
678 pinfo.port_name = wwn_to_u64(vha->port_name);
679 pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
680 pinfo.port_id = vha->d_id.b24;
681
682 ql_log(ql_log_info, vha, 0xffff,
683 "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
684 pinfo.node_name, pinfo.port_name, pinfo.port_id);
685 qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
686
687 ret = nvme_fc_register_localport(&pinfo, tmpl,
688 get_device(&ha->pdev->dev), &vha->nvme_local_port);
689 if (ret) {
690 ql_log(ql_log_warn, vha, 0xffff,
691 "register_localport failed: ret=%x\n", ret);
692 } else {
693 vha->nvme_local_port->private = vha;
694 }
695
696 return ret;
697 }