2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/parser.h>
20 #include <uapi/scsi/fc/fc_fs.h>
21 #include <uapi/scsi/fc/fc_els.h>
22 #include <linux/delay.h>
26 #include <linux/nvme-fc-driver.h>
27 #include <linux/nvme-fc.h>
30 /* *************************** Data Structures/Defines ****************** */
33 enum nvme_fc_queue_flags
{
34 NVME_FC_Q_CONNECTED
= (1 << 0),
37 #define NVMEFC_QUEUE_DELAY 3 /* ms units */
39 #define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */
41 struct nvme_fc_queue
{
42 struct nvme_fc_ctrl
*ctrl
;
44 struct blk_mq_hw_ctx
*hctx
;
46 size_t cmnd_capsule_len
;
55 } __aligned(sizeof(u64
)); /* alignment for other things alloc'd with */
57 enum nvme_fcop_flags
{
58 FCOP_FLAGS_TERMIO
= (1 << 0),
59 FCOP_FLAGS_RELEASED
= (1 << 1),
60 FCOP_FLAGS_COMPLETE
= (1 << 2),
61 FCOP_FLAGS_AEN
= (1 << 3),
64 struct nvmefc_ls_req_op
{
65 struct nvmefc_ls_req ls_req
;
67 struct nvme_fc_rport
*rport
;
68 struct nvme_fc_queue
*queue
;
73 struct completion ls_done
;
74 struct list_head lsreq_list
; /* rport->ls_req_list */
78 enum nvme_fcpop_state
{
79 FCPOP_STATE_UNINIT
= 0,
81 FCPOP_STATE_ACTIVE
= 2,
82 FCPOP_STATE_ABORTED
= 3,
83 FCPOP_STATE_COMPLETE
= 4,
86 struct nvme_fc_fcp_op
{
87 struct nvme_request nreq
; /*
90 * the 1st element in the
95 struct nvmefc_fcp_req fcp_req
;
97 struct nvme_fc_ctrl
*ctrl
;
98 struct nvme_fc_queue
*queue
;
106 struct nvme_fc_cmd_iu cmd_iu
;
107 struct nvme_fc_ersp_iu rsp_iu
;
110 struct nvme_fc_lport
{
111 struct nvme_fc_local_port localport
;
114 struct list_head port_list
; /* nvme_fc_port_list */
115 struct list_head endp_list
;
116 struct device
*dev
; /* physical device for dma */
117 struct nvme_fc_port_template
*ops
;
119 atomic_t act_rport_cnt
;
120 } __aligned(sizeof(u64
)); /* alignment for other things alloc'd with */
122 struct nvme_fc_rport
{
123 struct nvme_fc_remote_port remoteport
;
125 struct list_head endp_list
; /* for lport->endp_list */
126 struct list_head ctrl_list
;
127 struct list_head ls_req_list
;
128 struct device
*dev
; /* physical device for dma */
129 struct nvme_fc_lport
*lport
;
132 atomic_t act_ctrl_cnt
;
133 unsigned long dev_loss_end
;
134 } __aligned(sizeof(u64
)); /* alignment for other things alloc'd with */
136 enum nvme_fcctrl_flags
{
137 FCCTRL_TERMIO
= (1 << 0),
140 struct nvme_fc_ctrl
{
142 struct nvme_fc_queue
*queues
;
144 struct nvme_fc_lport
*lport
;
145 struct nvme_fc_rport
*rport
;
151 struct list_head ctrl_list
; /* rport->ctrl_list */
153 struct blk_mq_tag_set admin_tag_set
;
154 struct blk_mq_tag_set tag_set
;
156 struct delayed_work connect_work
;
161 wait_queue_head_t ioabort_wait
;
163 struct nvme_fc_fcp_op aen_ops
[NVME_NR_AEN_COMMANDS
];
165 struct nvme_ctrl ctrl
;
168 static inline struct nvme_fc_ctrl
*
169 to_fc_ctrl(struct nvme_ctrl
*ctrl
)
171 return container_of(ctrl
, struct nvme_fc_ctrl
, ctrl
);
174 static inline struct nvme_fc_lport
*
175 localport_to_lport(struct nvme_fc_local_port
*portptr
)
177 return container_of(portptr
, struct nvme_fc_lport
, localport
);
180 static inline struct nvme_fc_rport
*
181 remoteport_to_rport(struct nvme_fc_remote_port
*portptr
)
183 return container_of(portptr
, struct nvme_fc_rport
, remoteport
);
186 static inline struct nvmefc_ls_req_op
*
187 ls_req_to_lsop(struct nvmefc_ls_req
*lsreq
)
189 return container_of(lsreq
, struct nvmefc_ls_req_op
, ls_req
);
192 static inline struct nvme_fc_fcp_op
*
193 fcp_req_to_fcp_op(struct nvmefc_fcp_req
*fcpreq
)
195 return container_of(fcpreq
, struct nvme_fc_fcp_op
, fcp_req
);
200 /* *************************** Globals **************************** */
203 static DEFINE_SPINLOCK(nvme_fc_lock
);
205 static LIST_HEAD(nvme_fc_lport_list
);
206 static DEFINE_IDA(nvme_fc_local_port_cnt
);
207 static DEFINE_IDA(nvme_fc_ctrl_cnt
);
212 * These items are short-term. They will eventually be moved into
213 * a generic FC class. See comments in module init.
215 static struct class *fc_class
;
216 static struct device
*fc_udev_device
;
219 /* *********************** FC-NVME Port Management ************************ */
221 static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl
*,
222 struct nvme_fc_queue
*, unsigned int);
225 nvme_fc_free_lport(struct kref
*ref
)
227 struct nvme_fc_lport
*lport
=
228 container_of(ref
, struct nvme_fc_lport
, ref
);
231 WARN_ON(lport
->localport
.port_state
!= FC_OBJSTATE_DELETED
);
232 WARN_ON(!list_empty(&lport
->endp_list
));
234 /* remove from transport list */
235 spin_lock_irqsave(&nvme_fc_lock
, flags
);
236 list_del(&lport
->port_list
);
237 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
239 ida_simple_remove(&nvme_fc_local_port_cnt
, lport
->localport
.port_num
);
240 ida_destroy(&lport
->endp_cnt
);
242 put_device(lport
->dev
);
248 nvme_fc_lport_put(struct nvme_fc_lport
*lport
)
250 kref_put(&lport
->ref
, nvme_fc_free_lport
);
254 nvme_fc_lport_get(struct nvme_fc_lport
*lport
)
256 return kref_get_unless_zero(&lport
->ref
);
260 static struct nvme_fc_lport
*
261 nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info
*pinfo
,
262 struct nvme_fc_port_template
*ops
,
265 struct nvme_fc_lport
*lport
;
268 spin_lock_irqsave(&nvme_fc_lock
, flags
);
270 list_for_each_entry(lport
, &nvme_fc_lport_list
, port_list
) {
271 if (lport
->localport
.node_name
!= pinfo
->node_name
||
272 lport
->localport
.port_name
!= pinfo
->port_name
)
275 if (lport
->dev
!= dev
) {
276 lport
= ERR_PTR(-EXDEV
);
280 if (lport
->localport
.port_state
!= FC_OBJSTATE_DELETED
) {
281 lport
= ERR_PTR(-EEXIST
);
285 if (!nvme_fc_lport_get(lport
)) {
287 * fails if ref cnt already 0. If so,
288 * act as if lport already deleted
294 /* resume the lport */
297 lport
->localport
.port_role
= pinfo
->port_role
;
298 lport
->localport
.port_id
= pinfo
->port_id
;
299 lport
->localport
.port_state
= FC_OBJSTATE_ONLINE
;
301 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
309 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
315 * nvme_fc_register_localport - transport entry point called by an
316 * LLDD to register the existence of a NVME
318 * @pinfo: pointer to information about the port to be registered
319 * @template: LLDD entrypoints and operational parameters for the port
320 * @dev: physical hardware device node port corresponds to. Will be
321 * used for DMA mappings
322 * @lport_p: pointer to a local port pointer. Upon success, the routine
323 * will allocate a nvme_fc_local_port structure and place its
324 * address in the local port pointer. Upon failure, local port
325 * pointer will be set to 0.
328 * a completion status. Must be 0 upon success; a negative errno
329 * (ex: -ENXIO) upon failure.
332 nvme_fc_register_localport(struct nvme_fc_port_info
*pinfo
,
333 struct nvme_fc_port_template
*template,
335 struct nvme_fc_local_port
**portptr
)
337 struct nvme_fc_lport
*newrec
;
341 if (!template->localport_delete
|| !template->remoteport_delete
||
342 !template->ls_req
|| !template->fcp_io
||
343 !template->ls_abort
|| !template->fcp_abort
||
344 !template->max_hw_queues
|| !template->max_sgl_segments
||
345 !template->max_dif_sgl_segments
|| !template->dma_boundary
) {
347 goto out_reghost_failed
;
351 * look to see if there is already a localport that had been
352 * deregistered and in the process of waiting for all the
353 * references to fully be removed. If the references haven't
354 * expired, we can simply re-enable the localport. Remoteports
355 * and controller reconnections should resume naturally.
357 newrec
= nvme_fc_attach_to_unreg_lport(pinfo
, template, dev
);
359 /* found an lport, but something about its state is bad */
360 if (IS_ERR(newrec
)) {
361 ret
= PTR_ERR(newrec
);
362 goto out_reghost_failed
;
364 /* found existing lport, which was resumed */
366 *portptr
= &newrec
->localport
;
370 /* nothing found - allocate a new localport struct */
372 newrec
= kmalloc((sizeof(*newrec
) + template->local_priv_sz
),
376 goto out_reghost_failed
;
379 idx
= ida_simple_get(&nvme_fc_local_port_cnt
, 0, 0, GFP_KERNEL
);
385 if (!get_device(dev
) && dev
) {
390 INIT_LIST_HEAD(&newrec
->port_list
);
391 INIT_LIST_HEAD(&newrec
->endp_list
);
392 kref_init(&newrec
->ref
);
393 atomic_set(&newrec
->act_rport_cnt
, 0);
394 newrec
->ops
= template;
396 ida_init(&newrec
->endp_cnt
);
397 newrec
->localport
.private = &newrec
[1];
398 newrec
->localport
.node_name
= pinfo
->node_name
;
399 newrec
->localport
.port_name
= pinfo
->port_name
;
400 newrec
->localport
.port_role
= pinfo
->port_role
;
401 newrec
->localport
.port_id
= pinfo
->port_id
;
402 newrec
->localport
.port_state
= FC_OBJSTATE_ONLINE
;
403 newrec
->localport
.port_num
= idx
;
405 spin_lock_irqsave(&nvme_fc_lock
, flags
);
406 list_add_tail(&newrec
->port_list
, &nvme_fc_lport_list
);
407 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
410 dma_set_seg_boundary(dev
, template->dma_boundary
);
412 *portptr
= &newrec
->localport
;
416 ida_simple_remove(&nvme_fc_local_port_cnt
, idx
);
424 EXPORT_SYMBOL_GPL(nvme_fc_register_localport
);
427 * nvme_fc_unregister_localport - transport entry point called by an
428 * LLDD to deregister/remove a previously
429 * registered a NVME host FC port.
430 * @localport: pointer to the (registered) local port that is to be
434 * a completion status. Must be 0 upon success; a negative errno
435 * (ex: -ENXIO) upon failure.
438 nvme_fc_unregister_localport(struct nvme_fc_local_port
*portptr
)
440 struct nvme_fc_lport
*lport
= localport_to_lport(portptr
);
446 spin_lock_irqsave(&nvme_fc_lock
, flags
);
448 if (portptr
->port_state
!= FC_OBJSTATE_ONLINE
) {
449 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
452 portptr
->port_state
= FC_OBJSTATE_DELETED
;
454 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
456 if (atomic_read(&lport
->act_rport_cnt
) == 0)
457 lport
->ops
->localport_delete(&lport
->localport
);
459 nvme_fc_lport_put(lport
);
463 EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport
);
466 * TRADDR strings, per FC-NVME are fixed format:
467 * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters
468 * udev event will only differ by prefix of what field is
470 * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters
471 * 19 + 43 + null_fudge = 64 characters
473 #define FCNVME_TRADDR_LENGTH 64
476 nvme_fc_signal_discovery_scan(struct nvme_fc_lport
*lport
,
477 struct nvme_fc_rport
*rport
)
479 char hostaddr
[FCNVME_TRADDR_LENGTH
]; /* NVMEFC_HOST_TRADDR=...*/
480 char tgtaddr
[FCNVME_TRADDR_LENGTH
]; /* NVMEFC_TRADDR=...*/
481 char *envp
[4] = { "FC_EVENT=nvmediscovery", hostaddr
, tgtaddr
, NULL
};
483 if (!(rport
->remoteport
.port_role
& FC_PORT_ROLE_NVME_DISCOVERY
))
486 snprintf(hostaddr
, sizeof(hostaddr
),
487 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
488 lport
->localport
.node_name
, lport
->localport
.port_name
);
489 snprintf(tgtaddr
, sizeof(tgtaddr
),
490 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
491 rport
->remoteport
.node_name
, rport
->remoteport
.port_name
);
492 kobject_uevent_env(&fc_udev_device
->kobj
, KOBJ_CHANGE
, envp
);
496 nvme_fc_free_rport(struct kref
*ref
)
498 struct nvme_fc_rport
*rport
=
499 container_of(ref
, struct nvme_fc_rport
, ref
);
500 struct nvme_fc_lport
*lport
=
501 localport_to_lport(rport
->remoteport
.localport
);
504 WARN_ON(rport
->remoteport
.port_state
!= FC_OBJSTATE_DELETED
);
505 WARN_ON(!list_empty(&rport
->ctrl_list
));
507 /* remove from lport list */
508 spin_lock_irqsave(&nvme_fc_lock
, flags
);
509 list_del(&rport
->endp_list
);
510 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
512 ida_simple_remove(&lport
->endp_cnt
, rport
->remoteport
.port_num
);
516 nvme_fc_lport_put(lport
);
520 nvme_fc_rport_put(struct nvme_fc_rport
*rport
)
522 kref_put(&rport
->ref
, nvme_fc_free_rport
);
526 nvme_fc_rport_get(struct nvme_fc_rport
*rport
)
528 return kref_get_unless_zero(&rport
->ref
);
532 nvme_fc_resume_controller(struct nvme_fc_ctrl
*ctrl
)
534 switch (ctrl
->ctrl
.state
) {
536 case NVME_CTRL_RECONNECTING
:
538 * As all reconnects were suppressed, schedule a
541 dev_info(ctrl
->ctrl
.device
,
542 "NVME-FC{%d}: connectivity re-established. "
543 "Attempting reconnect\n", ctrl
->cnum
);
545 queue_delayed_work(nvme_wq
, &ctrl
->connect_work
, 0);
548 case NVME_CTRL_RESETTING
:
550 * Controller is already in the process of terminating the
551 * association. No need to do anything further. The reconnect
552 * step will naturally occur after the reset completes.
557 /* no action to take - let it delete */
562 static struct nvme_fc_rport
*
563 nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport
*lport
,
564 struct nvme_fc_port_info
*pinfo
)
566 struct nvme_fc_rport
*rport
;
567 struct nvme_fc_ctrl
*ctrl
;
570 spin_lock_irqsave(&nvme_fc_lock
, flags
);
572 list_for_each_entry(rport
, &lport
->endp_list
, endp_list
) {
573 if (rport
->remoteport
.node_name
!= pinfo
->node_name
||
574 rport
->remoteport
.port_name
!= pinfo
->port_name
)
577 if (!nvme_fc_rport_get(rport
)) {
578 rport
= ERR_PTR(-ENOLCK
);
582 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
584 spin_lock_irqsave(&rport
->lock
, flags
);
586 /* has it been unregistered */
587 if (rport
->remoteport
.port_state
!= FC_OBJSTATE_DELETED
) {
588 /* means lldd called us twice */
589 spin_unlock_irqrestore(&rport
->lock
, flags
);
590 nvme_fc_rport_put(rport
);
591 return ERR_PTR(-ESTALE
);
594 rport
->remoteport
.port_state
= FC_OBJSTATE_ONLINE
;
595 rport
->dev_loss_end
= 0;
598 * kick off a reconnect attempt on all associations to the
599 * remote port. A successful reconnects will resume i/o.
601 list_for_each_entry(ctrl
, &rport
->ctrl_list
, ctrl_list
)
602 nvme_fc_resume_controller(ctrl
);
604 spin_unlock_irqrestore(&rport
->lock
, flags
);
612 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
618 __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport
*rport
,
619 struct nvme_fc_port_info
*pinfo
)
621 if (pinfo
->dev_loss_tmo
)
622 rport
->remoteport
.dev_loss_tmo
= pinfo
->dev_loss_tmo
;
624 rport
->remoteport
.dev_loss_tmo
= NVME_FC_DEFAULT_DEV_LOSS_TMO
;
628 * nvme_fc_register_remoteport - transport entry point called by an
629 * LLDD to register the existence of a NVME
630 * subsystem FC port on its fabric.
631 * @localport: pointer to the (registered) local port that the remote
632 * subsystem port is connected to.
633 * @pinfo: pointer to information about the port to be registered
634 * @rport_p: pointer to a remote port pointer. Upon success, the routine
635 * will allocate a nvme_fc_remote_port structure and place its
636 * address in the remote port pointer. Upon failure, remote port
637 * pointer will be set to 0.
640 * a completion status. Must be 0 upon success; a negative errno
641 * (ex: -ENXIO) upon failure.
644 nvme_fc_register_remoteport(struct nvme_fc_local_port
*localport
,
645 struct nvme_fc_port_info
*pinfo
,
646 struct nvme_fc_remote_port
**portptr
)
648 struct nvme_fc_lport
*lport
= localport_to_lport(localport
);
649 struct nvme_fc_rport
*newrec
;
653 if (!nvme_fc_lport_get(lport
)) {
655 goto out_reghost_failed
;
659 * look to see if there is already a remoteport that is waiting
660 * for a reconnect (within dev_loss_tmo) with the same WWN's.
661 * If so, transition to it and reconnect.
663 newrec
= nvme_fc_attach_to_suspended_rport(lport
, pinfo
);
665 /* found an rport, but something about its state is bad */
666 if (IS_ERR(newrec
)) {
667 ret
= PTR_ERR(newrec
);
670 /* found existing rport, which was resumed */
672 nvme_fc_lport_put(lport
);
673 __nvme_fc_set_dev_loss_tmo(newrec
, pinfo
);
674 nvme_fc_signal_discovery_scan(lport
, newrec
);
675 *portptr
= &newrec
->remoteport
;
679 /* nothing found - allocate a new remoteport struct */
681 newrec
= kmalloc((sizeof(*newrec
) + lport
->ops
->remote_priv_sz
),
688 idx
= ida_simple_get(&lport
->endp_cnt
, 0, 0, GFP_KERNEL
);
691 goto out_kfree_rport
;
694 INIT_LIST_HEAD(&newrec
->endp_list
);
695 INIT_LIST_HEAD(&newrec
->ctrl_list
);
696 INIT_LIST_HEAD(&newrec
->ls_req_list
);
697 kref_init(&newrec
->ref
);
698 atomic_set(&newrec
->act_ctrl_cnt
, 0);
699 spin_lock_init(&newrec
->lock
);
700 newrec
->remoteport
.localport
= &lport
->localport
;
701 newrec
->dev
= lport
->dev
;
702 newrec
->lport
= lport
;
703 newrec
->remoteport
.private = &newrec
[1];
704 newrec
->remoteport
.port_role
= pinfo
->port_role
;
705 newrec
->remoteport
.node_name
= pinfo
->node_name
;
706 newrec
->remoteport
.port_name
= pinfo
->port_name
;
707 newrec
->remoteport
.port_id
= pinfo
->port_id
;
708 newrec
->remoteport
.port_state
= FC_OBJSTATE_ONLINE
;
709 newrec
->remoteport
.port_num
= idx
;
710 __nvme_fc_set_dev_loss_tmo(newrec
, pinfo
);
712 spin_lock_irqsave(&nvme_fc_lock
, flags
);
713 list_add_tail(&newrec
->endp_list
, &lport
->endp_list
);
714 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
716 nvme_fc_signal_discovery_scan(lport
, newrec
);
718 *portptr
= &newrec
->remoteport
;
724 nvme_fc_lport_put(lport
);
729 EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport
);
732 nvme_fc_abort_lsops(struct nvme_fc_rport
*rport
)
734 struct nvmefc_ls_req_op
*lsop
;
738 spin_lock_irqsave(&rport
->lock
, flags
);
740 list_for_each_entry(lsop
, &rport
->ls_req_list
, lsreq_list
) {
741 if (!(lsop
->flags
& FCOP_FLAGS_TERMIO
)) {
742 lsop
->flags
|= FCOP_FLAGS_TERMIO
;
743 spin_unlock_irqrestore(&rport
->lock
, flags
);
744 rport
->lport
->ops
->ls_abort(&rport
->lport
->localport
,
750 spin_unlock_irqrestore(&rport
->lock
, flags
);
756 nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl
*ctrl
)
758 dev_info(ctrl
->ctrl
.device
,
759 "NVME-FC{%d}: controller connectivity lost. Awaiting "
760 "Reconnect", ctrl
->cnum
);
762 switch (ctrl
->ctrl
.state
) {
766 * Schedule a controller reset. The reset will terminate the
767 * association and schedule the reconnect timer. Reconnects
768 * will be attempted until either the ctlr_loss_tmo
769 * (max_retries * connect_delay) expires or the remoteport's
770 * dev_loss_tmo expires.
772 if (nvme_reset_ctrl(&ctrl
->ctrl
)) {
773 dev_warn(ctrl
->ctrl
.device
,
774 "NVME-FC{%d}: Couldn't schedule reset. "
775 "Deleting controller.\n",
777 nvme_delete_ctrl(&ctrl
->ctrl
);
781 case NVME_CTRL_RECONNECTING
:
783 * The association has already been terminated and the
784 * controller is attempting reconnects. No need to do anything
785 * futher. Reconnects will be attempted until either the
786 * ctlr_loss_tmo (max_retries * connect_delay) expires or the
787 * remoteport's dev_loss_tmo expires.
791 case NVME_CTRL_RESETTING
:
793 * Controller is already in the process of terminating the
794 * association. No need to do anything further. The reconnect
795 * step will kick in naturally after the association is
800 case NVME_CTRL_DELETING
:
802 /* no action to take - let it delete */
808 * nvme_fc_unregister_remoteport - transport entry point called by an
809 * LLDD to deregister/remove a previously
810 * registered a NVME subsystem FC port.
811 * @remoteport: pointer to the (registered) remote port that is to be
815 * a completion status. Must be 0 upon success; a negative errno
816 * (ex: -ENXIO) upon failure.
819 nvme_fc_unregister_remoteport(struct nvme_fc_remote_port
*portptr
)
821 struct nvme_fc_rport
*rport
= remoteport_to_rport(portptr
);
822 struct nvme_fc_ctrl
*ctrl
;
828 spin_lock_irqsave(&rport
->lock
, flags
);
830 if (portptr
->port_state
!= FC_OBJSTATE_ONLINE
) {
831 spin_unlock_irqrestore(&rport
->lock
, flags
);
834 portptr
->port_state
= FC_OBJSTATE_DELETED
;
836 rport
->dev_loss_end
= jiffies
+ (portptr
->dev_loss_tmo
* HZ
);
838 list_for_each_entry(ctrl
, &rport
->ctrl_list
, ctrl_list
) {
839 /* if dev_loss_tmo==0, dev loss is immediate */
840 if (!portptr
->dev_loss_tmo
) {
841 dev_warn(ctrl
->ctrl
.device
,
842 "NVME-FC{%d}: controller connectivity lost. "
843 "Deleting controller.\n",
845 nvme_delete_ctrl(&ctrl
->ctrl
);
847 nvme_fc_ctrl_connectivity_loss(ctrl
);
850 spin_unlock_irqrestore(&rport
->lock
, flags
);
852 nvme_fc_abort_lsops(rport
);
854 if (atomic_read(&rport
->act_ctrl_cnt
) == 0)
855 rport
->lport
->ops
->remoteport_delete(portptr
);
858 * release the reference, which will allow, if all controllers
859 * go away, which should only occur after dev_loss_tmo occurs,
860 * for the rport to be torn down.
862 nvme_fc_rport_put(rport
);
866 EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport
);
869 * nvme_fc_rescan_remoteport - transport entry point called by an
870 * LLDD to request a nvme device rescan.
871 * @remoteport: pointer to the (registered) remote port that is to be
877 nvme_fc_rescan_remoteport(struct nvme_fc_remote_port
*remoteport
)
879 struct nvme_fc_rport
*rport
= remoteport_to_rport(remoteport
);
881 nvme_fc_signal_discovery_scan(rport
->lport
, rport
);
883 EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport
);
886 nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port
*portptr
,
889 struct nvme_fc_rport
*rport
= remoteport_to_rport(portptr
);
892 spin_lock_irqsave(&rport
->lock
, flags
);
894 if (portptr
->port_state
!= FC_OBJSTATE_ONLINE
) {
895 spin_unlock_irqrestore(&rport
->lock
, flags
);
899 /* a dev_loss_tmo of 0 (immediate) is allowed to be set */
900 rport
->remoteport
.dev_loss_tmo
= dev_loss_tmo
;
902 spin_unlock_irqrestore(&rport
->lock
, flags
);
906 EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss
);
909 /* *********************** FC-NVME DMA Handling **************************** */
912 * The fcloop device passes in a NULL device pointer. Real LLD's will
913 * pass in a valid device pointer. If NULL is passed to the dma mapping
914 * routines, depending on the platform, it may or may not succeed, and
918 * Wrapper all the dma routines and check the dev pointer.
920 * If simple mappings (return just a dma address, we'll noop them,
921 * returning a dma address of 0.
923 * On more complex mappings (dma_map_sg), a pseudo routine fills
924 * in the scatter list, setting all dma addresses to 0.
927 static inline dma_addr_t
928 fc_dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
929 enum dma_data_direction dir
)
931 return dev
? dma_map_single(dev
, ptr
, size
, dir
) : (dma_addr_t
)0L;
935 fc_dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
937 return dev
? dma_mapping_error(dev
, dma_addr
) : 0;
941 fc_dma_unmap_single(struct device
*dev
, dma_addr_t addr
, size_t size
,
942 enum dma_data_direction dir
)
945 dma_unmap_single(dev
, addr
, size
, dir
);
949 fc_dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t addr
, size_t size
,
950 enum dma_data_direction dir
)
953 dma_sync_single_for_cpu(dev
, addr
, size
, dir
);
957 fc_dma_sync_single_for_device(struct device
*dev
, dma_addr_t addr
, size_t size
,
958 enum dma_data_direction dir
)
961 dma_sync_single_for_device(dev
, addr
, size
, dir
);
964 /* pseudo dma_map_sg call */
966 fc_map_sg(struct scatterlist
*sg
, int nents
)
968 struct scatterlist
*s
;
971 WARN_ON(nents
== 0 || sg
[0].length
== 0);
973 for_each_sg(sg
, s
, nents
, i
) {
975 #ifdef CONFIG_NEED_SG_DMA_LENGTH
976 s
->dma_length
= s
->length
;
983 fc_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
984 enum dma_data_direction dir
)
986 return dev
? dma_map_sg(dev
, sg
, nents
, dir
) : fc_map_sg(sg
, nents
);
990 fc_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
991 enum dma_data_direction dir
)
994 dma_unmap_sg(dev
, sg
, nents
, dir
);
997 /* *********************** FC-NVME LS Handling **************************** */
999 static void nvme_fc_ctrl_put(struct nvme_fc_ctrl
*);
1000 static int nvme_fc_ctrl_get(struct nvme_fc_ctrl
*);
1004 __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op
*lsop
)
1006 struct nvme_fc_rport
*rport
= lsop
->rport
;
1007 struct nvmefc_ls_req
*lsreq
= &lsop
->ls_req
;
1008 unsigned long flags
;
1010 spin_lock_irqsave(&rport
->lock
, flags
);
1012 if (!lsop
->req_queued
) {
1013 spin_unlock_irqrestore(&rport
->lock
, flags
);
1017 list_del(&lsop
->lsreq_list
);
1019 lsop
->req_queued
= false;
1021 spin_unlock_irqrestore(&rport
->lock
, flags
);
1023 fc_dma_unmap_single(rport
->dev
, lsreq
->rqstdma
,
1024 (lsreq
->rqstlen
+ lsreq
->rsplen
),
1027 nvme_fc_rport_put(rport
);
1031 __nvme_fc_send_ls_req(struct nvme_fc_rport
*rport
,
1032 struct nvmefc_ls_req_op
*lsop
,
1033 void (*done
)(struct nvmefc_ls_req
*req
, int status
))
1035 struct nvmefc_ls_req
*lsreq
= &lsop
->ls_req
;
1036 unsigned long flags
;
1039 if (rport
->remoteport
.port_state
!= FC_OBJSTATE_ONLINE
)
1040 return -ECONNREFUSED
;
1042 if (!nvme_fc_rport_get(rport
))
1046 lsop
->rport
= rport
;
1047 lsop
->req_queued
= false;
1048 INIT_LIST_HEAD(&lsop
->lsreq_list
);
1049 init_completion(&lsop
->ls_done
);
1051 lsreq
->rqstdma
= fc_dma_map_single(rport
->dev
, lsreq
->rqstaddr
,
1052 lsreq
->rqstlen
+ lsreq
->rsplen
,
1054 if (fc_dma_mapping_error(rport
->dev
, lsreq
->rqstdma
)) {
1058 lsreq
->rspdma
= lsreq
->rqstdma
+ lsreq
->rqstlen
;
1060 spin_lock_irqsave(&rport
->lock
, flags
);
1062 list_add_tail(&lsop
->lsreq_list
, &rport
->ls_req_list
);
1064 lsop
->req_queued
= true;
1066 spin_unlock_irqrestore(&rport
->lock
, flags
);
1068 ret
= rport
->lport
->ops
->ls_req(&rport
->lport
->localport
,
1069 &rport
->remoteport
, lsreq
);
1076 lsop
->ls_error
= ret
;
1077 spin_lock_irqsave(&rport
->lock
, flags
);
1078 lsop
->req_queued
= false;
1079 list_del(&lsop
->lsreq_list
);
1080 spin_unlock_irqrestore(&rport
->lock
, flags
);
1081 fc_dma_unmap_single(rport
->dev
, lsreq
->rqstdma
,
1082 (lsreq
->rqstlen
+ lsreq
->rsplen
),
1085 nvme_fc_rport_put(rport
);
1091 nvme_fc_send_ls_req_done(struct nvmefc_ls_req
*lsreq
, int status
)
1093 struct nvmefc_ls_req_op
*lsop
= ls_req_to_lsop(lsreq
);
1095 lsop
->ls_error
= status
;
1096 complete(&lsop
->ls_done
);
1100 nvme_fc_send_ls_req(struct nvme_fc_rport
*rport
, struct nvmefc_ls_req_op
*lsop
)
1102 struct nvmefc_ls_req
*lsreq
= &lsop
->ls_req
;
1103 struct fcnvme_ls_rjt
*rjt
= lsreq
->rspaddr
;
1106 ret
= __nvme_fc_send_ls_req(rport
, lsop
, nvme_fc_send_ls_req_done
);
1110 * No timeout/not interruptible as we need the struct
1111 * to exist until the lldd calls us back. Thus mandate
1112 * wait until driver calls back. lldd responsible for
1113 * the timeout action
1115 wait_for_completion(&lsop
->ls_done
);
1117 __nvme_fc_finish_ls_req(lsop
);
1119 ret
= lsop
->ls_error
;
1125 /* ACC or RJT payload ? */
1126 if (rjt
->w0
.ls_cmd
== FCNVME_LS_RJT
)
1133 nvme_fc_send_ls_req_async(struct nvme_fc_rport
*rport
,
1134 struct nvmefc_ls_req_op
*lsop
,
1135 void (*done
)(struct nvmefc_ls_req
*req
, int status
))
1137 /* don't wait for completion */
1139 return __nvme_fc_send_ls_req(rport
, lsop
, done
);
1142 /* Validation Error indexes into the string table below */
1146 VERR_LSDESC_RQST
= 2,
1147 VERR_LSDESC_RQST_LEN
= 3,
1149 VERR_ASSOC_ID_LEN
= 5,
1151 VERR_CONN_ID_LEN
= 7,
1153 VERR_CR_ASSOC_ACC_LEN
= 9,
1155 VERR_CR_CONN_ACC_LEN
= 11,
1157 VERR_DISCONN_ACC_LEN
= 13,
1160 static char *validation_errors
[] = {
1164 "Bad LSDESC_RQST Length",
1165 "Not Association ID",
1166 "Bad Association ID Length",
1167 "Not Connection ID",
1168 "Bad Connection ID Length",
1169 "Not CR_ASSOC Rqst",
1170 "Bad CR_ASSOC ACC Length",
1172 "Bad CR_CONN ACC Length",
1173 "Not Disconnect Rqst",
1174 "Bad Disconnect ACC Length",
1178 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl
*ctrl
,
1179 struct nvme_fc_queue
*queue
, u16 qsize
, u16 ersp_ratio
)
1181 struct nvmefc_ls_req_op
*lsop
;
1182 struct nvmefc_ls_req
*lsreq
;
1183 struct fcnvme_ls_cr_assoc_rqst
*assoc_rqst
;
1184 struct fcnvme_ls_cr_assoc_acc
*assoc_acc
;
1187 lsop
= kzalloc((sizeof(*lsop
) +
1188 ctrl
->lport
->ops
->lsrqst_priv_sz
+
1189 sizeof(*assoc_rqst
) + sizeof(*assoc_acc
)), GFP_KERNEL
);
1194 lsreq
= &lsop
->ls_req
;
1196 lsreq
->private = (void *)&lsop
[1];
1197 assoc_rqst
= (struct fcnvme_ls_cr_assoc_rqst
*)
1198 (lsreq
->private + ctrl
->lport
->ops
->lsrqst_priv_sz
);
1199 assoc_acc
= (struct fcnvme_ls_cr_assoc_acc
*)&assoc_rqst
[1];
1201 assoc_rqst
->w0
.ls_cmd
= FCNVME_LS_CREATE_ASSOCIATION
;
1202 assoc_rqst
->desc_list_len
=
1203 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd
));
1205 assoc_rqst
->assoc_cmd
.desc_tag
=
1206 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD
);
1207 assoc_rqst
->assoc_cmd
.desc_len
=
1209 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd
));
1211 assoc_rqst
->assoc_cmd
.ersp_ratio
= cpu_to_be16(ersp_ratio
);
1212 assoc_rqst
->assoc_cmd
.sqsize
= cpu_to_be16(qsize
);
1213 /* Linux supports only Dynamic controllers */
1214 assoc_rqst
->assoc_cmd
.cntlid
= cpu_to_be16(0xffff);
1215 uuid_copy(&assoc_rqst
->assoc_cmd
.hostid
, &ctrl
->ctrl
.opts
->host
->id
);
1216 strncpy(assoc_rqst
->assoc_cmd
.hostnqn
, ctrl
->ctrl
.opts
->host
->nqn
,
1217 min(FCNVME_ASSOC_HOSTNQN_LEN
, NVMF_NQN_SIZE
));
1218 strncpy(assoc_rqst
->assoc_cmd
.subnqn
, ctrl
->ctrl
.opts
->subsysnqn
,
1219 min(FCNVME_ASSOC_SUBNQN_LEN
, NVMF_NQN_SIZE
));
1221 lsop
->queue
= queue
;
1222 lsreq
->rqstaddr
= assoc_rqst
;
1223 lsreq
->rqstlen
= sizeof(*assoc_rqst
);
1224 lsreq
->rspaddr
= assoc_acc
;
1225 lsreq
->rsplen
= sizeof(*assoc_acc
);
1226 lsreq
->timeout
= NVME_FC_CONNECT_TIMEOUT_SEC
;
1228 ret
= nvme_fc_send_ls_req(ctrl
->rport
, lsop
);
1230 goto out_free_buffer
;
1232 /* process connect LS completion */
1234 /* validate the ACC response */
1235 if (assoc_acc
->hdr
.w0
.ls_cmd
!= FCNVME_LS_ACC
)
1237 else if (assoc_acc
->hdr
.desc_list_len
!=
1239 sizeof(struct fcnvme_ls_cr_assoc_acc
)))
1240 fcret
= VERR_CR_ASSOC_ACC_LEN
;
1241 else if (assoc_acc
->hdr
.rqst
.desc_tag
!=
1242 cpu_to_be32(FCNVME_LSDESC_RQST
))
1243 fcret
= VERR_LSDESC_RQST
;
1244 else if (assoc_acc
->hdr
.rqst
.desc_len
!=
1245 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst
)))
1246 fcret
= VERR_LSDESC_RQST_LEN
;
1247 else if (assoc_acc
->hdr
.rqst
.w0
.ls_cmd
!= FCNVME_LS_CREATE_ASSOCIATION
)
1248 fcret
= VERR_CR_ASSOC
;
1249 else if (assoc_acc
->associd
.desc_tag
!=
1250 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID
))
1251 fcret
= VERR_ASSOC_ID
;
1252 else if (assoc_acc
->associd
.desc_len
!=
1254 sizeof(struct fcnvme_lsdesc_assoc_id
)))
1255 fcret
= VERR_ASSOC_ID_LEN
;
1256 else if (assoc_acc
->connectid
.desc_tag
!=
1257 cpu_to_be32(FCNVME_LSDESC_CONN_ID
))
1258 fcret
= VERR_CONN_ID
;
1259 else if (assoc_acc
->connectid
.desc_len
!=
1260 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id
)))
1261 fcret
= VERR_CONN_ID_LEN
;
1266 "q %d connect failed: %s\n",
1267 queue
->qnum
, validation_errors
[fcret
]);
1269 ctrl
->association_id
=
1270 be64_to_cpu(assoc_acc
->associd
.association_id
);
1271 queue
->connection_id
=
1272 be64_to_cpu(assoc_acc
->connectid
.connection_id
);
1273 set_bit(NVME_FC_Q_CONNECTED
, &queue
->flags
);
1281 "queue %d connect admin queue failed (%d).\n",
1287 nvme_fc_connect_queue(struct nvme_fc_ctrl
*ctrl
, struct nvme_fc_queue
*queue
,
1288 u16 qsize
, u16 ersp_ratio
)
1290 struct nvmefc_ls_req_op
*lsop
;
1291 struct nvmefc_ls_req
*lsreq
;
1292 struct fcnvme_ls_cr_conn_rqst
*conn_rqst
;
1293 struct fcnvme_ls_cr_conn_acc
*conn_acc
;
1296 lsop
= kzalloc((sizeof(*lsop
) +
1297 ctrl
->lport
->ops
->lsrqst_priv_sz
+
1298 sizeof(*conn_rqst
) + sizeof(*conn_acc
)), GFP_KERNEL
);
1303 lsreq
= &lsop
->ls_req
;
1305 lsreq
->private = (void *)&lsop
[1];
1306 conn_rqst
= (struct fcnvme_ls_cr_conn_rqst
*)
1307 (lsreq
->private + ctrl
->lport
->ops
->lsrqst_priv_sz
);
1308 conn_acc
= (struct fcnvme_ls_cr_conn_acc
*)&conn_rqst
[1];
1310 conn_rqst
->w0
.ls_cmd
= FCNVME_LS_CREATE_CONNECTION
;
1311 conn_rqst
->desc_list_len
= cpu_to_be32(
1312 sizeof(struct fcnvme_lsdesc_assoc_id
) +
1313 sizeof(struct fcnvme_lsdesc_cr_conn_cmd
));
1315 conn_rqst
->associd
.desc_tag
= cpu_to_be32(FCNVME_LSDESC_ASSOC_ID
);
1316 conn_rqst
->associd
.desc_len
=
1318 sizeof(struct fcnvme_lsdesc_assoc_id
));
1319 conn_rqst
->associd
.association_id
= cpu_to_be64(ctrl
->association_id
);
1320 conn_rqst
->connect_cmd
.desc_tag
=
1321 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD
);
1322 conn_rqst
->connect_cmd
.desc_len
=
1324 sizeof(struct fcnvme_lsdesc_cr_conn_cmd
));
1325 conn_rqst
->connect_cmd
.ersp_ratio
= cpu_to_be16(ersp_ratio
);
1326 conn_rqst
->connect_cmd
.qid
= cpu_to_be16(queue
->qnum
);
1327 conn_rqst
->connect_cmd
.sqsize
= cpu_to_be16(qsize
);
1329 lsop
->queue
= queue
;
1330 lsreq
->rqstaddr
= conn_rqst
;
1331 lsreq
->rqstlen
= sizeof(*conn_rqst
);
1332 lsreq
->rspaddr
= conn_acc
;
1333 lsreq
->rsplen
= sizeof(*conn_acc
);
1334 lsreq
->timeout
= NVME_FC_CONNECT_TIMEOUT_SEC
;
1336 ret
= nvme_fc_send_ls_req(ctrl
->rport
, lsop
);
1338 goto out_free_buffer
;
1340 /* process connect LS completion */
1342 /* validate the ACC response */
1343 if (conn_acc
->hdr
.w0
.ls_cmd
!= FCNVME_LS_ACC
)
1345 else if (conn_acc
->hdr
.desc_list_len
!=
1346 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc
)))
1347 fcret
= VERR_CR_CONN_ACC_LEN
;
1348 else if (conn_acc
->hdr
.rqst
.desc_tag
!= cpu_to_be32(FCNVME_LSDESC_RQST
))
1349 fcret
= VERR_LSDESC_RQST
;
1350 else if (conn_acc
->hdr
.rqst
.desc_len
!=
1351 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst
)))
1352 fcret
= VERR_LSDESC_RQST_LEN
;
1353 else if (conn_acc
->hdr
.rqst
.w0
.ls_cmd
!= FCNVME_LS_CREATE_CONNECTION
)
1354 fcret
= VERR_CR_CONN
;
1355 else if (conn_acc
->connectid
.desc_tag
!=
1356 cpu_to_be32(FCNVME_LSDESC_CONN_ID
))
1357 fcret
= VERR_CONN_ID
;
1358 else if (conn_acc
->connectid
.desc_len
!=
1359 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id
)))
1360 fcret
= VERR_CONN_ID_LEN
;
1365 "q %d connect failed: %s\n",
1366 queue
->qnum
, validation_errors
[fcret
]);
1368 queue
->connection_id
=
1369 be64_to_cpu(conn_acc
->connectid
.connection_id
);
1370 set_bit(NVME_FC_Q_CONNECTED
, &queue
->flags
);
1378 "queue %d connect command failed (%d).\n",
1384 nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req
*lsreq
, int status
)
1386 struct nvmefc_ls_req_op
*lsop
= ls_req_to_lsop(lsreq
);
1388 __nvme_fc_finish_ls_req(lsop
);
1390 /* fc-nvme iniator doesn't care about success or failure of cmd */
1396 * This routine sends a FC-NVME LS to disconnect (aka terminate)
1397 * the FC-NVME Association. Terminating the association also
1398 * terminates the FC-NVME connections (per queue, both admin and io
1399 * queues) that are part of the association. E.g. things are torn
1400 * down, and the related FC-NVME Association ID and Connection IDs
1403 * The behavior of the fc-nvme initiator is such that it's
1404 * understanding of the association and connections will implicitly
1405 * be torn down. The action is implicit as it may be due to a loss of
1406 * connectivity with the fc-nvme target, so you may never get a
1407 * response even if you tried. As such, the action of this routine
1408 * is to asynchronously send the LS, ignore any results of the LS, and
1409 * continue on with terminating the association. If the fc-nvme target
1410 * is present and receives the LS, it too can tear down.
1413 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl
*ctrl
)
1415 struct fcnvme_ls_disconnect_rqst
*discon_rqst
;
1416 struct fcnvme_ls_disconnect_acc
*discon_acc
;
1417 struct nvmefc_ls_req_op
*lsop
;
1418 struct nvmefc_ls_req
*lsreq
;
1421 lsop
= kzalloc((sizeof(*lsop
) +
1422 ctrl
->lport
->ops
->lsrqst_priv_sz
+
1423 sizeof(*discon_rqst
) + sizeof(*discon_acc
)),
1426 /* couldn't sent it... too bad */
1429 lsreq
= &lsop
->ls_req
;
1431 lsreq
->private = (void *)&lsop
[1];
1432 discon_rqst
= (struct fcnvme_ls_disconnect_rqst
*)
1433 (lsreq
->private + ctrl
->lport
->ops
->lsrqst_priv_sz
);
1434 discon_acc
= (struct fcnvme_ls_disconnect_acc
*)&discon_rqst
[1];
1436 discon_rqst
->w0
.ls_cmd
= FCNVME_LS_DISCONNECT
;
1437 discon_rqst
->desc_list_len
= cpu_to_be32(
1438 sizeof(struct fcnvme_lsdesc_assoc_id
) +
1439 sizeof(struct fcnvme_lsdesc_disconn_cmd
));
1441 discon_rqst
->associd
.desc_tag
= cpu_to_be32(FCNVME_LSDESC_ASSOC_ID
);
1442 discon_rqst
->associd
.desc_len
=
1444 sizeof(struct fcnvme_lsdesc_assoc_id
));
1446 discon_rqst
->associd
.association_id
= cpu_to_be64(ctrl
->association_id
);
1448 discon_rqst
->discon_cmd
.desc_tag
= cpu_to_be32(
1449 FCNVME_LSDESC_DISCONN_CMD
);
1450 discon_rqst
->discon_cmd
.desc_len
=
1452 sizeof(struct fcnvme_lsdesc_disconn_cmd
));
1453 discon_rqst
->discon_cmd
.scope
= FCNVME_DISCONN_ASSOCIATION
;
1454 discon_rqst
->discon_cmd
.id
= cpu_to_be64(ctrl
->association_id
);
1456 lsreq
->rqstaddr
= discon_rqst
;
1457 lsreq
->rqstlen
= sizeof(*discon_rqst
);
1458 lsreq
->rspaddr
= discon_acc
;
1459 lsreq
->rsplen
= sizeof(*discon_acc
);
1460 lsreq
->timeout
= NVME_FC_CONNECT_TIMEOUT_SEC
;
1462 ret
= nvme_fc_send_ls_req_async(ctrl
->rport
, lsop
,
1463 nvme_fc_disconnect_assoc_done
);
1467 /* only meaningful part to terminating the association */
1468 ctrl
->association_id
= 0;
1472 /* *********************** NVME Ctrl Routines **************************** */
1474 static void __nvme_fc_final_op_cleanup(struct request
*rq
);
1475 static void nvme_fc_error_recovery(struct nvme_fc_ctrl
*ctrl
, char *errmsg
);
1478 nvme_fc_reinit_request(void *data
, struct request
*rq
)
1480 struct nvme_fc_fcp_op
*op
= blk_mq_rq_to_pdu(rq
);
1481 struct nvme_fc_cmd_iu
*cmdiu
= &op
->cmd_iu
;
1483 memset(cmdiu
, 0, sizeof(*cmdiu
));
1484 cmdiu
->scsi_id
= NVME_CMD_SCSI_ID
;
1485 cmdiu
->fc_id
= NVME_CMD_FC_ID
;
1486 cmdiu
->iu_len
= cpu_to_be16(sizeof(*cmdiu
) / sizeof(u32
));
1487 memset(&op
->rsp_iu
, 0, sizeof(op
->rsp_iu
));
1493 __nvme_fc_exit_request(struct nvme_fc_ctrl
*ctrl
,
1494 struct nvme_fc_fcp_op
*op
)
1496 fc_dma_unmap_single(ctrl
->lport
->dev
, op
->fcp_req
.rspdma
,
1497 sizeof(op
->rsp_iu
), DMA_FROM_DEVICE
);
1498 fc_dma_unmap_single(ctrl
->lport
->dev
, op
->fcp_req
.cmddma
,
1499 sizeof(op
->cmd_iu
), DMA_TO_DEVICE
);
1501 atomic_set(&op
->state
, FCPOP_STATE_UNINIT
);
1505 nvme_fc_exit_request(struct blk_mq_tag_set
*set
, struct request
*rq
,
1506 unsigned int hctx_idx
)
1508 struct nvme_fc_fcp_op
*op
= blk_mq_rq_to_pdu(rq
);
1510 return __nvme_fc_exit_request(set
->driver_data
, op
);
1514 __nvme_fc_abort_op(struct nvme_fc_ctrl
*ctrl
, struct nvme_fc_fcp_op
*op
)
1518 state
= atomic_xchg(&op
->state
, FCPOP_STATE_ABORTED
);
1519 if (state
!= FCPOP_STATE_ACTIVE
) {
1520 atomic_set(&op
->state
, state
);
1524 ctrl
->lport
->ops
->fcp_abort(&ctrl
->lport
->localport
,
1525 &ctrl
->rport
->remoteport
,
1526 op
->queue
->lldd_handle
,
1533 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl
*ctrl
)
1535 struct nvme_fc_fcp_op
*aen_op
= ctrl
->aen_ops
;
1536 unsigned long flags
;
1539 for (i
= 0; i
< NVME_NR_AEN_COMMANDS
; i
++, aen_op
++) {
1540 if (atomic_read(&aen_op
->state
) != FCPOP_STATE_ACTIVE
)
1543 spin_lock_irqsave(&ctrl
->lock
, flags
);
1544 if (ctrl
->flags
& FCCTRL_TERMIO
) {
1546 aen_op
->flags
|= FCOP_FLAGS_TERMIO
;
1548 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
1550 ret
= __nvme_fc_abort_op(ctrl
, aen_op
);
1553 * if __nvme_fc_abort_op failed the io wasn't
1554 * active. Thus this call path is running in
1555 * parallel to the io complete. Treat as non-error.
1558 /* back out the flags/counters */
1559 spin_lock_irqsave(&ctrl
->lock
, flags
);
1560 if (ctrl
->flags
& FCCTRL_TERMIO
)
1562 aen_op
->flags
&= ~FCOP_FLAGS_TERMIO
;
1563 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
1570 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl
*ctrl
,
1571 struct nvme_fc_fcp_op
*op
)
1573 unsigned long flags
;
1574 bool complete_rq
= false;
1576 spin_lock_irqsave(&ctrl
->lock
, flags
);
1577 if (unlikely(op
->flags
& FCOP_FLAGS_TERMIO
)) {
1578 if (ctrl
->flags
& FCCTRL_TERMIO
) {
1580 wake_up(&ctrl
->ioabort_wait
);
1583 if (op
->flags
& FCOP_FLAGS_RELEASED
)
1586 op
->flags
|= FCOP_FLAGS_COMPLETE
;
1587 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
1593 nvme_fc_fcpio_done(struct nvmefc_fcp_req
*req
)
1595 struct nvme_fc_fcp_op
*op
= fcp_req_to_fcp_op(req
);
1596 struct request
*rq
= op
->rq
;
1597 struct nvmefc_fcp_req
*freq
= &op
->fcp_req
;
1598 struct nvme_fc_ctrl
*ctrl
= op
->ctrl
;
1599 struct nvme_fc_queue
*queue
= op
->queue
;
1600 struct nvme_completion
*cqe
= &op
->rsp_iu
.cqe
;
1601 struct nvme_command
*sqe
= &op
->cmd_iu
.sqe
;
1602 __le16 status
= cpu_to_le16(NVME_SC_SUCCESS
<< 1);
1603 union nvme_result result
;
1604 bool terminate_assoc
= true;
1608 * The current linux implementation of a nvme controller
1609 * allocates a single tag set for all io queues and sizes
1610 * the io queues to fully hold all possible tags. Thus, the
1611 * implementation does not reference or care about the sqhd
1612 * value as it never needs to use the sqhd/sqtail pointers
1613 * for submission pacing.
1615 * This affects the FC-NVME implementation in two ways:
1616 * 1) As the value doesn't matter, we don't need to waste
1617 * cycles extracting it from ERSPs and stamping it in the
1618 * cases where the transport fabricates CQEs on successful
1620 * 2) The FC-NVME implementation requires that delivery of
1621 * ERSP completions are to go back to the nvme layer in order
1622 * relative to the rsn, such that the sqhd value will always
1623 * be "in order" for the nvme layer. As the nvme layer in
1624 * linux doesn't care about sqhd, there's no need to return
1628 * As the core nvme layer in linux currently does not look at
1629 * every field in the cqe - in cases where the FC transport must
1630 * fabricate a CQE, the following fields will not be set as they
1631 * are not referenced:
1632 * cqe.sqid, cqe.sqhd, cqe.command_id
1634 * Failure or error of an individual i/o, in a transport
1635 * detected fashion unrelated to the nvme completion status,
1636 * potentially cause the initiator and target sides to get out
1637 * of sync on SQ head/tail (aka outstanding io count allowed).
1638 * Per FC-NVME spec, failure of an individual command requires
1639 * the connection to be terminated, which in turn requires the
1640 * association to be terminated.
1643 fc_dma_sync_single_for_cpu(ctrl
->lport
->dev
, op
->fcp_req
.rspdma
,
1644 sizeof(op
->rsp_iu
), DMA_FROM_DEVICE
);
1646 if (atomic_read(&op
->state
) == FCPOP_STATE_ABORTED
||
1647 op
->flags
& FCOP_FLAGS_TERMIO
)
1648 status
= cpu_to_le16(NVME_SC_ABORT_REQ
<< 1);
1649 else if (freq
->status
)
1650 status
= cpu_to_le16(NVME_SC_INTERNAL
<< 1);
1653 * For the linux implementation, if we have an unsuccesful
1654 * status, they blk-mq layer can typically be called with the
1655 * non-zero status and the content of the cqe isn't important.
1661 * command completed successfully relative to the wire
1662 * protocol. However, validate anything received and
1663 * extract the status and result from the cqe (create it
1667 switch (freq
->rcv_rsplen
) {
1670 case NVME_FC_SIZEOF_ZEROS_RSP
:
1672 * No response payload or 12 bytes of payload (which
1673 * should all be zeros) are considered successful and
1674 * no payload in the CQE by the transport.
1676 if (freq
->transferred_length
!=
1677 be32_to_cpu(op
->cmd_iu
.data_len
)) {
1678 status
= cpu_to_le16(NVME_SC_INTERNAL
<< 1);
1684 case sizeof(struct nvme_fc_ersp_iu
):
1686 * The ERSP IU contains a full completion with CQE.
1687 * Validate ERSP IU and look at cqe.
1689 if (unlikely(be16_to_cpu(op
->rsp_iu
.iu_len
) !=
1690 (freq
->rcv_rsplen
/ 4) ||
1691 be32_to_cpu(op
->rsp_iu
.xfrd_len
) !=
1692 freq
->transferred_length
||
1693 op
->rsp_iu
.status_code
||
1694 sqe
->common
.command_id
!= cqe
->command_id
)) {
1695 status
= cpu_to_le16(NVME_SC_INTERNAL
<< 1);
1698 result
= cqe
->result
;
1699 status
= cqe
->status
;
1703 status
= cpu_to_le16(NVME_SC_INTERNAL
<< 1);
1707 terminate_assoc
= false;
1710 if (op
->flags
& FCOP_FLAGS_AEN
) {
1711 nvme_complete_async_event(&queue
->ctrl
->ctrl
, status
, &result
);
1712 __nvme_fc_fcpop_chk_teardowns(ctrl
, op
);
1713 atomic_set(&op
->state
, FCPOP_STATE_IDLE
);
1714 op
->flags
= FCOP_FLAGS_AEN
; /* clear other flags */
1715 nvme_fc_ctrl_put(ctrl
);
1720 * Force failures of commands if we're killing the controller
1721 * or have an error on a command used to create an new association
1724 (blk_queue_dying(rq
->q
) ||
1725 ctrl
->ctrl
.state
== NVME_CTRL_NEW
||
1726 ctrl
->ctrl
.state
== NVME_CTRL_RECONNECTING
))
1727 status
|= cpu_to_le16(NVME_SC_DNR
<< 1);
1729 if (__nvme_fc_fcpop_chk_teardowns(ctrl
, op
))
1730 __nvme_fc_final_op_cleanup(rq
);
1732 nvme_end_request(rq
, status
, result
);
1735 if (terminate_assoc
)
1736 nvme_fc_error_recovery(ctrl
, "transport detected io error");
1740 __nvme_fc_init_request(struct nvme_fc_ctrl
*ctrl
,
1741 struct nvme_fc_queue
*queue
, struct nvme_fc_fcp_op
*op
,
1742 struct request
*rq
, u32 rqno
)
1744 struct nvme_fc_cmd_iu
*cmdiu
= &op
->cmd_iu
;
1747 memset(op
, 0, sizeof(*op
));
1748 op
->fcp_req
.cmdaddr
= &op
->cmd_iu
;
1749 op
->fcp_req
.cmdlen
= sizeof(op
->cmd_iu
);
1750 op
->fcp_req
.rspaddr
= &op
->rsp_iu
;
1751 op
->fcp_req
.rsplen
= sizeof(op
->rsp_iu
);
1752 op
->fcp_req
.done
= nvme_fc_fcpio_done
;
1753 op
->fcp_req
.first_sgl
= (struct scatterlist
*)&op
[1];
1754 op
->fcp_req
.private = &op
->fcp_req
.first_sgl
[SG_CHUNK_SIZE
];
1760 cmdiu
->scsi_id
= NVME_CMD_SCSI_ID
;
1761 cmdiu
->fc_id
= NVME_CMD_FC_ID
;
1762 cmdiu
->iu_len
= cpu_to_be16(sizeof(*cmdiu
) / sizeof(u32
));
1764 op
->fcp_req
.cmddma
= fc_dma_map_single(ctrl
->lport
->dev
,
1765 &op
->cmd_iu
, sizeof(op
->cmd_iu
), DMA_TO_DEVICE
);
1766 if (fc_dma_mapping_error(ctrl
->lport
->dev
, op
->fcp_req
.cmddma
)) {
1768 "FCP Op failed - cmdiu dma mapping failed.\n");
1773 op
->fcp_req
.rspdma
= fc_dma_map_single(ctrl
->lport
->dev
,
1774 &op
->rsp_iu
, sizeof(op
->rsp_iu
),
1776 if (fc_dma_mapping_error(ctrl
->lport
->dev
, op
->fcp_req
.rspdma
)) {
1778 "FCP Op failed - rspiu dma mapping failed.\n");
1782 atomic_set(&op
->state
, FCPOP_STATE_IDLE
);
1788 nvme_fc_init_request(struct blk_mq_tag_set
*set
, struct request
*rq
,
1789 unsigned int hctx_idx
, unsigned int numa_node
)
1791 struct nvme_fc_ctrl
*ctrl
= set
->driver_data
;
1792 struct nvme_fc_fcp_op
*op
= blk_mq_rq_to_pdu(rq
);
1793 int queue_idx
= (set
== &ctrl
->tag_set
) ? hctx_idx
+ 1 : 0;
1794 struct nvme_fc_queue
*queue
= &ctrl
->queues
[queue_idx
];
1796 return __nvme_fc_init_request(ctrl
, queue
, op
, rq
, queue
->rqcnt
++);
1800 nvme_fc_init_aen_ops(struct nvme_fc_ctrl
*ctrl
)
1802 struct nvme_fc_fcp_op
*aen_op
;
1803 struct nvme_fc_cmd_iu
*cmdiu
;
1804 struct nvme_command
*sqe
;
1808 aen_op
= ctrl
->aen_ops
;
1809 for (i
= 0; i
< NVME_NR_AEN_COMMANDS
; i
++, aen_op
++) {
1810 private = kzalloc(ctrl
->lport
->ops
->fcprqst_priv_sz
,
1815 cmdiu
= &aen_op
->cmd_iu
;
1817 ret
= __nvme_fc_init_request(ctrl
, &ctrl
->queues
[0],
1818 aen_op
, (struct request
*)NULL
,
1819 (NVME_AQ_BLK_MQ_DEPTH
+ i
));
1825 aen_op
->flags
= FCOP_FLAGS_AEN
;
1826 aen_op
->fcp_req
.first_sgl
= NULL
; /* no sg list */
1827 aen_op
->fcp_req
.private = private;
1829 memset(sqe
, 0, sizeof(*sqe
));
1830 sqe
->common
.opcode
= nvme_admin_async_event
;
1831 /* Note: core layer may overwrite the sqe.command_id value */
1832 sqe
->common
.command_id
= NVME_AQ_BLK_MQ_DEPTH
+ i
;
1838 nvme_fc_term_aen_ops(struct nvme_fc_ctrl
*ctrl
)
1840 struct nvme_fc_fcp_op
*aen_op
;
1843 aen_op
= ctrl
->aen_ops
;
1844 for (i
= 0; i
< NVME_NR_AEN_COMMANDS
; i
++, aen_op
++) {
1845 if (!aen_op
->fcp_req
.private)
1848 __nvme_fc_exit_request(ctrl
, aen_op
);
1850 kfree(aen_op
->fcp_req
.private);
1851 aen_op
->fcp_req
.private = NULL
;
1856 __nvme_fc_init_hctx(struct blk_mq_hw_ctx
*hctx
, struct nvme_fc_ctrl
*ctrl
,
1859 struct nvme_fc_queue
*queue
= &ctrl
->queues
[qidx
];
1861 hctx
->driver_data
= queue
;
1866 nvme_fc_init_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
1867 unsigned int hctx_idx
)
1869 struct nvme_fc_ctrl
*ctrl
= data
;
1871 __nvme_fc_init_hctx(hctx
, ctrl
, hctx_idx
+ 1);
1877 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
1878 unsigned int hctx_idx
)
1880 struct nvme_fc_ctrl
*ctrl
= data
;
1882 __nvme_fc_init_hctx(hctx
, ctrl
, hctx_idx
);
1888 nvme_fc_init_queue(struct nvme_fc_ctrl
*ctrl
, int idx
)
1890 struct nvme_fc_queue
*queue
;
1892 queue
= &ctrl
->queues
[idx
];
1893 memset(queue
, 0, sizeof(*queue
));
1896 atomic_set(&queue
->csn
, 1);
1897 queue
->dev
= ctrl
->dev
;
1900 queue
->cmnd_capsule_len
= ctrl
->ctrl
.ioccsz
* 16;
1902 queue
->cmnd_capsule_len
= sizeof(struct nvme_command
);
1905 * Considered whether we should allocate buffers for all SQEs
1906 * and CQEs and dma map them - mapping their respective entries
1907 * into the request structures (kernel vm addr and dma address)
1908 * thus the driver could use the buffers/mappings directly.
1909 * It only makes sense if the LLDD would use them for its
1910 * messaging api. It's very unlikely most adapter api's would use
1911 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
1912 * structures were used instead.
1917 * This routine terminates a queue at the transport level.
1918 * The transport has already ensured that all outstanding ios on
1919 * the queue have been terminated.
1920 * The transport will send a Disconnect LS request to terminate
1921 * the queue's connection. Termination of the admin queue will also
1922 * terminate the association at the target.
1925 nvme_fc_free_queue(struct nvme_fc_queue
*queue
)
1927 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED
, &queue
->flags
))
1931 * Current implementation never disconnects a single queue.
1932 * It always terminates a whole association. So there is never
1933 * a disconnect(queue) LS sent to the target.
1936 queue
->connection_id
= 0;
1937 clear_bit(NVME_FC_Q_CONNECTED
, &queue
->flags
);
1941 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl
*ctrl
,
1942 struct nvme_fc_queue
*queue
, unsigned int qidx
)
1944 if (ctrl
->lport
->ops
->delete_queue
)
1945 ctrl
->lport
->ops
->delete_queue(&ctrl
->lport
->localport
, qidx
,
1946 queue
->lldd_handle
);
1947 queue
->lldd_handle
= NULL
;
1951 nvme_fc_free_io_queues(struct nvme_fc_ctrl
*ctrl
)
1955 for (i
= 1; i
< ctrl
->ctrl
.queue_count
; i
++)
1956 nvme_fc_free_queue(&ctrl
->queues
[i
]);
1960 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl
*ctrl
,
1961 struct nvme_fc_queue
*queue
, unsigned int qidx
, u16 qsize
)
1965 queue
->lldd_handle
= NULL
;
1966 if (ctrl
->lport
->ops
->create_queue
)
1967 ret
= ctrl
->lport
->ops
->create_queue(&ctrl
->lport
->localport
,
1968 qidx
, qsize
, &queue
->lldd_handle
);
1974 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl
*ctrl
)
1976 struct nvme_fc_queue
*queue
= &ctrl
->queues
[ctrl
->ctrl
.queue_count
- 1];
1979 for (i
= ctrl
->ctrl
.queue_count
- 1; i
>= 1; i
--, queue
--)
1980 __nvme_fc_delete_hw_queue(ctrl
, queue
, i
);
1984 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl
*ctrl
, u16 qsize
)
1986 struct nvme_fc_queue
*queue
= &ctrl
->queues
[1];
1989 for (i
= 1; i
< ctrl
->ctrl
.queue_count
; i
++, queue
++) {
1990 ret
= __nvme_fc_create_hw_queue(ctrl
, queue
, i
, qsize
);
1999 __nvme_fc_delete_hw_queue(ctrl
, &ctrl
->queues
[i
], i
);
2004 nvme_fc_connect_io_queues(struct nvme_fc_ctrl
*ctrl
, u16 qsize
)
2008 for (i
= 1; i
< ctrl
->ctrl
.queue_count
; i
++) {
2009 ret
= nvme_fc_connect_queue(ctrl
, &ctrl
->queues
[i
], qsize
,
2013 ret
= nvmf_connect_io_queue(&ctrl
->ctrl
, i
);
2022 nvme_fc_init_io_queues(struct nvme_fc_ctrl
*ctrl
)
2026 for (i
= 1; i
< ctrl
->ctrl
.queue_count
; i
++)
2027 nvme_fc_init_queue(ctrl
, i
);
2031 nvme_fc_ctrl_free(struct kref
*ref
)
2033 struct nvme_fc_ctrl
*ctrl
=
2034 container_of(ref
, struct nvme_fc_ctrl
, ref
);
2035 unsigned long flags
;
2037 if (ctrl
->ctrl
.tagset
) {
2038 blk_cleanup_queue(ctrl
->ctrl
.connect_q
);
2039 blk_mq_free_tag_set(&ctrl
->tag_set
);
2042 /* remove from rport list */
2043 spin_lock_irqsave(&ctrl
->rport
->lock
, flags
);
2044 list_del(&ctrl
->ctrl_list
);
2045 spin_unlock_irqrestore(&ctrl
->rport
->lock
, flags
);
2047 blk_mq_unquiesce_queue(ctrl
->ctrl
.admin_q
);
2048 blk_cleanup_queue(ctrl
->ctrl
.admin_q
);
2049 blk_mq_free_tag_set(&ctrl
->admin_tag_set
);
2051 kfree(ctrl
->queues
);
2053 put_device(ctrl
->dev
);
2054 nvme_fc_rport_put(ctrl
->rport
);
2056 ida_simple_remove(&nvme_fc_ctrl_cnt
, ctrl
->cnum
);
2057 if (ctrl
->ctrl
.opts
)
2058 nvmf_free_options(ctrl
->ctrl
.opts
);
2063 nvme_fc_ctrl_put(struct nvme_fc_ctrl
*ctrl
)
2065 kref_put(&ctrl
->ref
, nvme_fc_ctrl_free
);
2069 nvme_fc_ctrl_get(struct nvme_fc_ctrl
*ctrl
)
2071 return kref_get_unless_zero(&ctrl
->ref
);
2075 * All accesses from nvme core layer done - can now free the
2076 * controller. Called after last nvme_put_ctrl() call
2079 nvme_fc_nvme_ctrl_freed(struct nvme_ctrl
*nctrl
)
2081 struct nvme_fc_ctrl
*ctrl
= to_fc_ctrl(nctrl
);
2083 WARN_ON(nctrl
!= &ctrl
->ctrl
);
2085 nvme_fc_ctrl_put(ctrl
);
2089 nvme_fc_error_recovery(struct nvme_fc_ctrl
*ctrl
, char *errmsg
)
2091 /* only proceed if in LIVE state - e.g. on first error */
2092 if (ctrl
->ctrl
.state
!= NVME_CTRL_LIVE
)
2095 dev_warn(ctrl
->ctrl
.device
,
2096 "NVME-FC{%d}: transport association error detected: %s\n",
2097 ctrl
->cnum
, errmsg
);
2098 dev_warn(ctrl
->ctrl
.device
,
2099 "NVME-FC{%d}: resetting controller\n", ctrl
->cnum
);
2101 nvme_reset_ctrl(&ctrl
->ctrl
);
2104 static enum blk_eh_timer_return
2105 nvme_fc_timeout(struct request
*rq
, bool reserved
)
2107 struct nvme_fc_fcp_op
*op
= blk_mq_rq_to_pdu(rq
);
2108 struct nvme_fc_ctrl
*ctrl
= op
->ctrl
;
2111 if (ctrl
->rport
->remoteport
.port_state
!= FC_OBJSTATE_ONLINE
||
2112 atomic_read(&op
->state
) == FCPOP_STATE_ABORTED
)
2113 return BLK_EH_RESET_TIMER
;
2115 ret
= __nvme_fc_abort_op(ctrl
, op
);
2117 /* io wasn't active to abort */
2118 return BLK_EH_NOT_HANDLED
;
2121 * we can't individually ABTS an io without affecting the queue,
2122 * thus killing the queue, adn thus the association.
2123 * So resolve by performing a controller reset, which will stop
2124 * the host/io stack, terminate the association on the link,
2125 * and recreate an association on the link.
2127 nvme_fc_error_recovery(ctrl
, "io timeout error");
2130 * the io abort has been initiated. Have the reset timer
2131 * restarted and the abort completion will complete the io
2132 * shortly. Avoids a synchronous wait while the abort finishes.
2134 return BLK_EH_RESET_TIMER
;
2138 nvme_fc_map_data(struct nvme_fc_ctrl
*ctrl
, struct request
*rq
,
2139 struct nvme_fc_fcp_op
*op
)
2141 struct nvmefc_fcp_req
*freq
= &op
->fcp_req
;
2142 enum dma_data_direction dir
;
2147 if (!blk_rq_payload_bytes(rq
))
2150 freq
->sg_table
.sgl
= freq
->first_sgl
;
2151 ret
= sg_alloc_table_chained(&freq
->sg_table
,
2152 blk_rq_nr_phys_segments(rq
), freq
->sg_table
.sgl
);
2156 op
->nents
= blk_rq_map_sg(rq
->q
, rq
, freq
->sg_table
.sgl
);
2157 WARN_ON(op
->nents
> blk_rq_nr_phys_segments(rq
));
2158 dir
= (rq_data_dir(rq
) == WRITE
) ? DMA_TO_DEVICE
: DMA_FROM_DEVICE
;
2159 freq
->sg_cnt
= fc_dma_map_sg(ctrl
->lport
->dev
, freq
->sg_table
.sgl
,
2161 if (unlikely(freq
->sg_cnt
<= 0)) {
2162 sg_free_table_chained(&freq
->sg_table
, true);
2168 * TODO: blk_integrity_rq(rq) for DIF
2174 nvme_fc_unmap_data(struct nvme_fc_ctrl
*ctrl
, struct request
*rq
,
2175 struct nvme_fc_fcp_op
*op
)
2177 struct nvmefc_fcp_req
*freq
= &op
->fcp_req
;
2182 fc_dma_unmap_sg(ctrl
->lport
->dev
, freq
->sg_table
.sgl
, op
->nents
,
2183 ((rq_data_dir(rq
) == WRITE
) ?
2184 DMA_TO_DEVICE
: DMA_FROM_DEVICE
));
2186 nvme_cleanup_cmd(rq
);
2188 sg_free_table_chained(&freq
->sg_table
, true);
2194 * In FC, the queue is a logical thing. At transport connect, the target
2195 * creates its "queue" and returns a handle that is to be given to the
2196 * target whenever it posts something to the corresponding SQ. When an
2197 * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
2198 * command contained within the SQE, an io, and assigns a FC exchange
2199 * to it. The SQE and the associated SQ handle are sent in the initial
2200 * CMD IU sents on the exchange. All transfers relative to the io occur
2201 * as part of the exchange. The CQE is the last thing for the io,
2202 * which is transferred (explicitly or implicitly) with the RSP IU
2203 * sent on the exchange. After the CQE is received, the FC exchange is
2204 * terminaed and the Exchange may be used on a different io.
2206 * The transport to LLDD api has the transport making a request for a
2207 * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
2208 * resource and transfers the command. The LLDD will then process all
2209 * steps to complete the io. Upon completion, the transport done routine
2212 * So - while the operation is outstanding to the LLDD, there is a link
2213 * level FC exchange resource that is also outstanding. This must be
2214 * considered in all cleanup operations.
2217 nvme_fc_start_fcp_op(struct nvme_fc_ctrl
*ctrl
, struct nvme_fc_queue
*queue
,
2218 struct nvme_fc_fcp_op
*op
, u32 data_len
,
2219 enum nvmefc_fcp_datadir io_dir
)
2221 struct nvme_fc_cmd_iu
*cmdiu
= &op
->cmd_iu
;
2222 struct nvme_command
*sqe
= &cmdiu
->sqe
;
2227 * before attempting to send the io, check to see if we believe
2228 * the target device is present
2230 if (ctrl
->rport
->remoteport
.port_state
!= FC_OBJSTATE_ONLINE
)
2233 if (!nvme_fc_ctrl_get(ctrl
))
2234 return BLK_STS_IOERR
;
2236 /* format the FC-NVME CMD IU and fcp_req */
2237 cmdiu
->connection_id
= cpu_to_be64(queue
->connection_id
);
2238 csn
= atomic_inc_return(&queue
->csn
);
2239 cmdiu
->csn
= cpu_to_be32(csn
);
2240 cmdiu
->data_len
= cpu_to_be32(data_len
);
2242 case NVMEFC_FCP_WRITE
:
2243 cmdiu
->flags
= FCNVME_CMD_FLAGS_WRITE
;
2245 case NVMEFC_FCP_READ
:
2246 cmdiu
->flags
= FCNVME_CMD_FLAGS_READ
;
2248 case NVMEFC_FCP_NODATA
:
2252 op
->fcp_req
.payload_length
= data_len
;
2253 op
->fcp_req
.io_dir
= io_dir
;
2254 op
->fcp_req
.transferred_length
= 0;
2255 op
->fcp_req
.rcv_rsplen
= 0;
2256 op
->fcp_req
.status
= NVME_SC_SUCCESS
;
2257 op
->fcp_req
.sqid
= cpu_to_le16(queue
->qnum
);
2260 * validate per fabric rules, set fields mandated by fabric spec
2261 * as well as those by FC-NVME spec.
2263 WARN_ON_ONCE(sqe
->common
.metadata
);
2264 sqe
->common
.flags
|= NVME_CMD_SGL_METABUF
;
2267 * format SQE DPTR field per FC-NVME rules:
2268 * type=0x5 Transport SGL Data Block Descriptor
2269 * subtype=0xA Transport-specific value
2271 * length=length of the data series
2273 sqe
->rw
.dptr
.sgl
.type
= (NVME_TRANSPORT_SGL_DATA_DESC
<< 4) |
2274 NVME_SGL_FMT_TRANSPORT_A
;
2275 sqe
->rw
.dptr
.sgl
.length
= cpu_to_le32(data_len
);
2276 sqe
->rw
.dptr
.sgl
.addr
= 0;
2278 if (!(op
->flags
& FCOP_FLAGS_AEN
)) {
2279 ret
= nvme_fc_map_data(ctrl
, op
->rq
, op
);
2281 nvme_cleanup_cmd(op
->rq
);
2282 nvme_fc_ctrl_put(ctrl
);
2283 if (ret
== -ENOMEM
|| ret
== -EAGAIN
)
2284 return BLK_STS_RESOURCE
;
2285 return BLK_STS_IOERR
;
2289 fc_dma_sync_single_for_device(ctrl
->lport
->dev
, op
->fcp_req
.cmddma
,
2290 sizeof(op
->cmd_iu
), DMA_TO_DEVICE
);
2292 atomic_set(&op
->state
, FCPOP_STATE_ACTIVE
);
2294 if (!(op
->flags
& FCOP_FLAGS_AEN
))
2295 blk_mq_start_request(op
->rq
);
2297 ret
= ctrl
->lport
->ops
->fcp_io(&ctrl
->lport
->localport
,
2298 &ctrl
->rport
->remoteport
,
2299 queue
->lldd_handle
, &op
->fcp_req
);
2302 if (!(op
->flags
& FCOP_FLAGS_AEN
))
2303 nvme_fc_unmap_data(ctrl
, op
->rq
, op
);
2305 nvme_fc_ctrl_put(ctrl
);
2307 if (ctrl
->rport
->remoteport
.port_state
== FC_OBJSTATE_ONLINE
&&
2309 return BLK_STS_IOERR
;
2317 if (!(op
->flags
& FCOP_FLAGS_AEN
) && queue
->hctx
)
2318 blk_mq_delay_run_hw_queue(queue
->hctx
, NVMEFC_QUEUE_DELAY
);
2320 return BLK_STS_RESOURCE
;
2324 nvme_fc_queue_rq(struct blk_mq_hw_ctx
*hctx
,
2325 const struct blk_mq_queue_data
*bd
)
2327 struct nvme_ns
*ns
= hctx
->queue
->queuedata
;
2328 struct nvme_fc_queue
*queue
= hctx
->driver_data
;
2329 struct nvme_fc_ctrl
*ctrl
= queue
->ctrl
;
2330 struct request
*rq
= bd
->rq
;
2331 struct nvme_fc_fcp_op
*op
= blk_mq_rq_to_pdu(rq
);
2332 struct nvme_fc_cmd_iu
*cmdiu
= &op
->cmd_iu
;
2333 struct nvme_command
*sqe
= &cmdiu
->sqe
;
2334 enum nvmefc_fcp_datadir io_dir
;
2338 ret
= nvme_setup_cmd(ns
, rq
, sqe
);
2342 data_len
= blk_rq_payload_bytes(rq
);
2344 io_dir
= ((rq_data_dir(rq
) == WRITE
) ?
2345 NVMEFC_FCP_WRITE
: NVMEFC_FCP_READ
);
2347 io_dir
= NVMEFC_FCP_NODATA
;
2349 return nvme_fc_start_fcp_op(ctrl
, queue
, op
, data_len
, io_dir
);
2352 static struct blk_mq_tags
*
2353 nvme_fc_tagset(struct nvme_fc_queue
*queue
)
2355 if (queue
->qnum
== 0)
2356 return queue
->ctrl
->admin_tag_set
.tags
[queue
->qnum
];
2358 return queue
->ctrl
->tag_set
.tags
[queue
->qnum
- 1];
2362 nvme_fc_poll(struct blk_mq_hw_ctx
*hctx
, unsigned int tag
)
2365 struct nvme_fc_queue
*queue
= hctx
->driver_data
;
2366 struct nvme_fc_ctrl
*ctrl
= queue
->ctrl
;
2367 struct request
*req
;
2368 struct nvme_fc_fcp_op
*op
;
2370 req
= blk_mq_tag_to_rq(nvme_fc_tagset(queue
), tag
);
2374 op
= blk_mq_rq_to_pdu(req
);
2376 if ((atomic_read(&op
->state
) == FCPOP_STATE_ACTIVE
) &&
2377 (ctrl
->lport
->ops
->poll_queue
))
2378 ctrl
->lport
->ops
->poll_queue(&ctrl
->lport
->localport
,
2379 queue
->lldd_handle
);
2381 return ((atomic_read(&op
->state
) != FCPOP_STATE_ACTIVE
));
2385 nvme_fc_submit_async_event(struct nvme_ctrl
*arg
)
2387 struct nvme_fc_ctrl
*ctrl
= to_fc_ctrl(arg
);
2388 struct nvme_fc_fcp_op
*aen_op
;
2389 unsigned long flags
;
2390 bool terminating
= false;
2393 spin_lock_irqsave(&ctrl
->lock
, flags
);
2394 if (ctrl
->flags
& FCCTRL_TERMIO
)
2396 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
2401 aen_op
= &ctrl
->aen_ops
[0];
2403 ret
= nvme_fc_start_fcp_op(ctrl
, aen_op
->queue
, aen_op
, 0,
2406 dev_err(ctrl
->ctrl
.device
,
2407 "failed async event work\n");
2411 __nvme_fc_final_op_cleanup(struct request
*rq
)
2413 struct nvme_fc_fcp_op
*op
= blk_mq_rq_to_pdu(rq
);
2414 struct nvme_fc_ctrl
*ctrl
= op
->ctrl
;
2416 atomic_set(&op
->state
, FCPOP_STATE_IDLE
);
2417 op
->flags
&= ~(FCOP_FLAGS_TERMIO
| FCOP_FLAGS_RELEASED
|
2418 FCOP_FLAGS_COMPLETE
);
2420 nvme_fc_unmap_data(ctrl
, rq
, op
);
2421 nvme_complete_rq(rq
);
2422 nvme_fc_ctrl_put(ctrl
);
2427 nvme_fc_complete_rq(struct request
*rq
)
2429 struct nvme_fc_fcp_op
*op
= blk_mq_rq_to_pdu(rq
);
2430 struct nvme_fc_ctrl
*ctrl
= op
->ctrl
;
2431 unsigned long flags
;
2432 bool completed
= false;
2435 * the core layer, on controller resets after calling
2436 * nvme_shutdown_ctrl(), calls complete_rq without our
2437 * calling blk_mq_complete_request(), thus there may still
2438 * be live i/o outstanding with the LLDD. Means transport has
2439 * to track complete calls vs fcpio_done calls to know what
2440 * path to take on completes and dones.
2442 spin_lock_irqsave(&ctrl
->lock
, flags
);
2443 if (op
->flags
& FCOP_FLAGS_COMPLETE
)
2446 op
->flags
|= FCOP_FLAGS_RELEASED
;
2447 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
2450 __nvme_fc_final_op_cleanup(rq
);
2454 * This routine is used by the transport when it needs to find active
2455 * io on a queue that is to be terminated. The transport uses
2456 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
2457 * this routine to kill them on a 1 by 1 basis.
2459 * As FC allocates FC exchange for each io, the transport must contact
2460 * the LLDD to terminate the exchange, thus releasing the FC exchange.
2461 * After terminating the exchange the LLDD will call the transport's
2462 * normal io done path for the request, but it will have an aborted
2463 * status. The done path will return the io request back to the block
2464 * layer with an error status.
2467 nvme_fc_terminate_exchange(struct request
*req
, void *data
, bool reserved
)
2469 struct nvme_ctrl
*nctrl
= data
;
2470 struct nvme_fc_ctrl
*ctrl
= to_fc_ctrl(nctrl
);
2471 struct nvme_fc_fcp_op
*op
= blk_mq_rq_to_pdu(req
);
2472 unsigned long flags
;
2475 if (!blk_mq_request_started(req
))
2478 spin_lock_irqsave(&ctrl
->lock
, flags
);
2479 if (ctrl
->flags
& FCCTRL_TERMIO
) {
2481 op
->flags
|= FCOP_FLAGS_TERMIO
;
2483 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
2485 status
= __nvme_fc_abort_op(ctrl
, op
);
2488 * if __nvme_fc_abort_op failed the io wasn't
2489 * active. Thus this call path is running in
2490 * parallel to the io complete. Treat as non-error.
2493 /* back out the flags/counters */
2494 spin_lock_irqsave(&ctrl
->lock
, flags
);
2495 if (ctrl
->flags
& FCCTRL_TERMIO
)
2497 op
->flags
&= ~FCOP_FLAGS_TERMIO
;
2498 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
2504 static const struct blk_mq_ops nvme_fc_mq_ops
= {
2505 .queue_rq
= nvme_fc_queue_rq
,
2506 .complete
= nvme_fc_complete_rq
,
2507 .init_request
= nvme_fc_init_request
,
2508 .exit_request
= nvme_fc_exit_request
,
2509 .init_hctx
= nvme_fc_init_hctx
,
2510 .poll
= nvme_fc_poll
,
2511 .timeout
= nvme_fc_timeout
,
2515 nvme_fc_create_io_queues(struct nvme_fc_ctrl
*ctrl
)
2517 struct nvmf_ctrl_options
*opts
= ctrl
->ctrl
.opts
;
2518 unsigned int nr_io_queues
;
2521 nr_io_queues
= min(min(opts
->nr_io_queues
, num_online_cpus()),
2522 ctrl
->lport
->ops
->max_hw_queues
);
2523 ret
= nvme_set_queue_count(&ctrl
->ctrl
, &nr_io_queues
);
2525 dev_info(ctrl
->ctrl
.device
,
2526 "set_queue_count failed: %d\n", ret
);
2530 ctrl
->ctrl
.queue_count
= nr_io_queues
+ 1;
2534 nvme_fc_init_io_queues(ctrl
);
2536 memset(&ctrl
->tag_set
, 0, sizeof(ctrl
->tag_set
));
2537 ctrl
->tag_set
.ops
= &nvme_fc_mq_ops
;
2538 ctrl
->tag_set
.queue_depth
= ctrl
->ctrl
.opts
->queue_size
;
2539 ctrl
->tag_set
.reserved_tags
= 1; /* fabric connect */
2540 ctrl
->tag_set
.numa_node
= NUMA_NO_NODE
;
2541 ctrl
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
;
2542 ctrl
->tag_set
.cmd_size
= sizeof(struct nvme_fc_fcp_op
) +
2544 sizeof(struct scatterlist
)) +
2545 ctrl
->lport
->ops
->fcprqst_priv_sz
;
2546 ctrl
->tag_set
.driver_data
= ctrl
;
2547 ctrl
->tag_set
.nr_hw_queues
= ctrl
->ctrl
.queue_count
- 1;
2548 ctrl
->tag_set
.timeout
= NVME_IO_TIMEOUT
;
2550 ret
= blk_mq_alloc_tag_set(&ctrl
->tag_set
);
2554 ctrl
->ctrl
.tagset
= &ctrl
->tag_set
;
2556 ctrl
->ctrl
.connect_q
= blk_mq_init_queue(&ctrl
->tag_set
);
2557 if (IS_ERR(ctrl
->ctrl
.connect_q
)) {
2558 ret
= PTR_ERR(ctrl
->ctrl
.connect_q
);
2559 goto out_free_tag_set
;
2562 ret
= nvme_fc_create_hw_io_queues(ctrl
, ctrl
->ctrl
.opts
->queue_size
);
2564 goto out_cleanup_blk_queue
;
2566 ret
= nvme_fc_connect_io_queues(ctrl
, ctrl
->ctrl
.opts
->queue_size
);
2568 goto out_delete_hw_queues
;
2572 out_delete_hw_queues
:
2573 nvme_fc_delete_hw_io_queues(ctrl
);
2574 out_cleanup_blk_queue
:
2575 blk_cleanup_queue(ctrl
->ctrl
.connect_q
);
2577 blk_mq_free_tag_set(&ctrl
->tag_set
);
2578 nvme_fc_free_io_queues(ctrl
);
2580 /* force put free routine to ignore io queues */
2581 ctrl
->ctrl
.tagset
= NULL
;
2587 nvme_fc_reinit_io_queues(struct nvme_fc_ctrl
*ctrl
)
2589 struct nvmf_ctrl_options
*opts
= ctrl
->ctrl
.opts
;
2590 unsigned int nr_io_queues
;
2593 nr_io_queues
= min(min(opts
->nr_io_queues
, num_online_cpus()),
2594 ctrl
->lport
->ops
->max_hw_queues
);
2595 ret
= nvme_set_queue_count(&ctrl
->ctrl
, &nr_io_queues
);
2597 dev_info(ctrl
->ctrl
.device
,
2598 "set_queue_count failed: %d\n", ret
);
2602 ctrl
->ctrl
.queue_count
= nr_io_queues
+ 1;
2603 /* check for io queues existing */
2604 if (ctrl
->ctrl
.queue_count
== 1)
2607 nvme_fc_init_io_queues(ctrl
);
2609 ret
= nvme_reinit_tagset(&ctrl
->ctrl
, ctrl
->ctrl
.tagset
);
2611 goto out_free_io_queues
;
2613 ret
= nvme_fc_create_hw_io_queues(ctrl
, ctrl
->ctrl
.opts
->queue_size
);
2615 goto out_free_io_queues
;
2617 ret
= nvme_fc_connect_io_queues(ctrl
, ctrl
->ctrl
.opts
->queue_size
);
2619 goto out_delete_hw_queues
;
2621 blk_mq_update_nr_hw_queues(&ctrl
->tag_set
, nr_io_queues
);
2625 out_delete_hw_queues
:
2626 nvme_fc_delete_hw_io_queues(ctrl
);
2628 nvme_fc_free_io_queues(ctrl
);
2633 nvme_fc_rport_active_on_lport(struct nvme_fc_rport
*rport
)
2635 struct nvme_fc_lport
*lport
= rport
->lport
;
2637 atomic_inc(&lport
->act_rport_cnt
);
2641 nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport
*rport
)
2643 struct nvme_fc_lport
*lport
= rport
->lport
;
2646 cnt
= atomic_dec_return(&lport
->act_rport_cnt
);
2647 if (cnt
== 0 && lport
->localport
.port_state
== FC_OBJSTATE_DELETED
)
2648 lport
->ops
->localport_delete(&lport
->localport
);
2652 nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl
*ctrl
)
2654 struct nvme_fc_rport
*rport
= ctrl
->rport
;
2657 if (ctrl
->assoc_active
)
2660 ctrl
->assoc_active
= true;
2661 cnt
= atomic_inc_return(&rport
->act_ctrl_cnt
);
2663 nvme_fc_rport_active_on_lport(rport
);
2669 nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl
*ctrl
)
2671 struct nvme_fc_rport
*rport
= ctrl
->rport
;
2672 struct nvme_fc_lport
*lport
= rport
->lport
;
2675 /* ctrl->assoc_active=false will be set independently */
2677 cnt
= atomic_dec_return(&rport
->act_ctrl_cnt
);
2679 if (rport
->remoteport
.port_state
== FC_OBJSTATE_DELETED
)
2680 lport
->ops
->remoteport_delete(&rport
->remoteport
);
2681 nvme_fc_rport_inactive_on_lport(rport
);
2688 * This routine restarts the controller on the host side, and
2689 * on the link side, recreates the controller association.
2692 nvme_fc_create_association(struct nvme_fc_ctrl
*ctrl
)
2694 struct nvmf_ctrl_options
*opts
= ctrl
->ctrl
.opts
;
2698 ++ctrl
->ctrl
.nr_reconnects
;
2700 if (ctrl
->rport
->remoteport
.port_state
!= FC_OBJSTATE_ONLINE
)
2703 if (nvme_fc_ctlr_active_on_rport(ctrl
))
2707 * Create the admin queue
2710 nvme_fc_init_queue(ctrl
, 0);
2712 ret
= __nvme_fc_create_hw_queue(ctrl
, &ctrl
->queues
[0], 0,
2713 NVME_AQ_BLK_MQ_DEPTH
);
2715 goto out_free_queue
;
2717 ret
= nvme_fc_connect_admin_queue(ctrl
, &ctrl
->queues
[0],
2718 NVME_AQ_BLK_MQ_DEPTH
,
2719 (NVME_AQ_BLK_MQ_DEPTH
/ 4));
2721 goto out_delete_hw_queue
;
2723 if (ctrl
->ctrl
.state
!= NVME_CTRL_NEW
)
2724 blk_mq_unquiesce_queue(ctrl
->ctrl
.admin_q
);
2726 ret
= nvmf_connect_admin_queue(&ctrl
->ctrl
);
2728 goto out_disconnect_admin_queue
;
2731 * Check controller capabilities
2733 * todo:- add code to check if ctrl attributes changed from
2734 * prior connection values
2737 ret
= nvmf_reg_read64(&ctrl
->ctrl
, NVME_REG_CAP
, &ctrl
->ctrl
.cap
);
2739 dev_err(ctrl
->ctrl
.device
,
2740 "prop_get NVME_REG_CAP failed\n");
2741 goto out_disconnect_admin_queue
;
2745 min_t(int, NVME_CAP_MQES(ctrl
->ctrl
.cap
) + 1, ctrl
->ctrl
.sqsize
);
2747 ret
= nvme_enable_ctrl(&ctrl
->ctrl
, ctrl
->ctrl
.cap
);
2749 goto out_disconnect_admin_queue
;
2751 ctrl
->ctrl
.max_hw_sectors
=
2752 (ctrl
->lport
->ops
->max_sgl_segments
- 1) << (PAGE_SHIFT
- 9);
2754 ret
= nvme_init_identify(&ctrl
->ctrl
);
2756 goto out_disconnect_admin_queue
;
2760 /* FC-NVME does not have other data in the capsule */
2761 if (ctrl
->ctrl
.icdoff
) {
2762 dev_err(ctrl
->ctrl
.device
, "icdoff %d is not supported!\n",
2764 goto out_disconnect_admin_queue
;
2767 /* FC-NVME supports normal SGL Data Block Descriptors */
2769 if (opts
->queue_size
> ctrl
->ctrl
.maxcmd
) {
2770 /* warn if maxcmd is lower than queue_size */
2771 dev_warn(ctrl
->ctrl
.device
,
2772 "queue_size %zu > ctrl maxcmd %u, reducing "
2774 opts
->queue_size
, ctrl
->ctrl
.maxcmd
);
2775 opts
->queue_size
= ctrl
->ctrl
.maxcmd
;
2778 ret
= nvme_fc_init_aen_ops(ctrl
);
2780 goto out_term_aen_ops
;
2783 * Create the io queues
2786 if (ctrl
->ctrl
.queue_count
> 1) {
2787 if (ctrl
->ctrl
.state
== NVME_CTRL_NEW
)
2788 ret
= nvme_fc_create_io_queues(ctrl
);
2790 ret
= nvme_fc_reinit_io_queues(ctrl
);
2792 goto out_term_aen_ops
;
2795 changed
= nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_LIVE
);
2797 ctrl
->ctrl
.nr_reconnects
= 0;
2800 nvme_start_ctrl(&ctrl
->ctrl
);
2802 return 0; /* Success */
2805 nvme_fc_term_aen_ops(ctrl
);
2806 out_disconnect_admin_queue
:
2807 /* send a Disconnect(association) LS to fc-nvme target */
2808 nvme_fc_xmt_disconnect_assoc(ctrl
);
2809 out_delete_hw_queue
:
2810 __nvme_fc_delete_hw_queue(ctrl
, &ctrl
->queues
[0], 0);
2812 nvme_fc_free_queue(&ctrl
->queues
[0]);
2813 ctrl
->assoc_active
= false;
2814 nvme_fc_ctlr_inactive_on_rport(ctrl
);
2820 * This routine stops operation of the controller on the host side.
2821 * On the host os stack side: Admin and IO queues are stopped,
2822 * outstanding ios on them terminated via FC ABTS.
2823 * On the link side: the association is terminated.
2826 nvme_fc_delete_association(struct nvme_fc_ctrl
*ctrl
)
2828 unsigned long flags
;
2830 if (!ctrl
->assoc_active
)
2832 ctrl
->assoc_active
= false;
2834 spin_lock_irqsave(&ctrl
->lock
, flags
);
2835 ctrl
->flags
|= FCCTRL_TERMIO
;
2837 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
2840 * If io queues are present, stop them and terminate all outstanding
2841 * ios on them. As FC allocates FC exchange for each io, the
2842 * transport must contact the LLDD to terminate the exchange,
2843 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
2844 * to tell us what io's are busy and invoke a transport routine
2845 * to kill them with the LLDD. After terminating the exchange
2846 * the LLDD will call the transport's normal io done path, but it
2847 * will have an aborted status. The done path will return the
2848 * io requests back to the block layer as part of normal completions
2849 * (but with error status).
2851 if (ctrl
->ctrl
.queue_count
> 1) {
2852 nvme_stop_queues(&ctrl
->ctrl
);
2853 blk_mq_tagset_busy_iter(&ctrl
->tag_set
,
2854 nvme_fc_terminate_exchange
, &ctrl
->ctrl
);
2858 * Other transports, which don't have link-level contexts bound
2859 * to sqe's, would try to gracefully shutdown the controller by
2860 * writing the registers for shutdown and polling (call
2861 * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
2862 * just aborted and we will wait on those contexts, and given
2863 * there was no indication of how live the controlelr is on the
2864 * link, don't send more io to create more contexts for the
2865 * shutdown. Let the controller fail via keepalive failure if
2866 * its still present.
2870 * clean up the admin queue. Same thing as above.
2871 * use blk_mq_tagset_busy_itr() and the transport routine to
2872 * terminate the exchanges.
2874 if (ctrl
->ctrl
.state
!= NVME_CTRL_NEW
)
2875 blk_mq_quiesce_queue(ctrl
->ctrl
.admin_q
);
2876 blk_mq_tagset_busy_iter(&ctrl
->admin_tag_set
,
2877 nvme_fc_terminate_exchange
, &ctrl
->ctrl
);
2879 /* kill the aens as they are a separate path */
2880 nvme_fc_abort_aen_ops(ctrl
);
2882 /* wait for all io that had to be aborted */
2883 spin_lock_irq(&ctrl
->lock
);
2884 wait_event_lock_irq(ctrl
->ioabort_wait
, ctrl
->iocnt
== 0, ctrl
->lock
);
2885 ctrl
->flags
&= ~FCCTRL_TERMIO
;
2886 spin_unlock_irq(&ctrl
->lock
);
2888 nvme_fc_term_aen_ops(ctrl
);
2891 * send a Disconnect(association) LS to fc-nvme target
2892 * Note: could have been sent at top of process, but
2893 * cleaner on link traffic if after the aborts complete.
2894 * Note: if association doesn't exist, association_id will be 0
2896 if (ctrl
->association_id
)
2897 nvme_fc_xmt_disconnect_assoc(ctrl
);
2899 if (ctrl
->ctrl
.tagset
) {
2900 nvme_fc_delete_hw_io_queues(ctrl
);
2901 nvme_fc_free_io_queues(ctrl
);
2904 __nvme_fc_delete_hw_queue(ctrl
, &ctrl
->queues
[0], 0);
2905 nvme_fc_free_queue(&ctrl
->queues
[0]);
2907 nvme_fc_ctlr_inactive_on_rport(ctrl
);
2911 nvme_fc_delete_ctrl(struct nvme_ctrl
*nctrl
)
2913 struct nvme_fc_ctrl
*ctrl
= to_fc_ctrl(nctrl
);
2915 cancel_delayed_work_sync(&ctrl
->connect_work
);
2917 * kill the association on the link side. this will block
2918 * waiting for io to terminate
2920 nvme_fc_delete_association(ctrl
);
2924 nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl
*ctrl
, int status
)
2926 struct nvme_fc_rport
*rport
= ctrl
->rport
;
2927 struct nvme_fc_remote_port
*portptr
= &rport
->remoteport
;
2928 unsigned long recon_delay
= ctrl
->ctrl
.opts
->reconnect_delay
* HZ
;
2931 if (ctrl
->ctrl
.state
!= NVME_CTRL_RECONNECTING
)
2934 if (portptr
->port_state
== FC_OBJSTATE_ONLINE
)
2935 dev_info(ctrl
->ctrl
.device
,
2936 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
2937 ctrl
->cnum
, status
);
2938 else if (time_after_eq(jiffies
, rport
->dev_loss_end
))
2941 if (recon
&& nvmf_should_reconnect(&ctrl
->ctrl
)) {
2942 if (portptr
->port_state
== FC_OBJSTATE_ONLINE
)
2943 dev_info(ctrl
->ctrl
.device
,
2944 "NVME-FC{%d}: Reconnect attempt in %ld "
2946 ctrl
->cnum
, recon_delay
/ HZ
);
2947 else if (time_after(jiffies
+ recon_delay
, rport
->dev_loss_end
))
2948 recon_delay
= rport
->dev_loss_end
- jiffies
;
2950 queue_delayed_work(nvme_wq
, &ctrl
->connect_work
, recon_delay
);
2952 if (portptr
->port_state
== FC_OBJSTATE_ONLINE
)
2953 dev_warn(ctrl
->ctrl
.device
,
2954 "NVME-FC{%d}: Max reconnect attempts (%d) "
2955 "reached. Removing controller\n",
2956 ctrl
->cnum
, ctrl
->ctrl
.nr_reconnects
);
2958 dev_warn(ctrl
->ctrl
.device
,
2959 "NVME-FC{%d}: dev_loss_tmo (%d) expired "
2960 "while waiting for remoteport connectivity. "
2961 "Removing controller\n", ctrl
->cnum
,
2962 portptr
->dev_loss_tmo
);
2963 WARN_ON(nvme_delete_ctrl(&ctrl
->ctrl
));
2968 nvme_fc_reset_ctrl_work(struct work_struct
*work
)
2970 struct nvme_fc_ctrl
*ctrl
=
2971 container_of(work
, struct nvme_fc_ctrl
, ctrl
.reset_work
);
2974 nvme_stop_ctrl(&ctrl
->ctrl
);
2976 /* will block will waiting for io to terminate */
2977 nvme_fc_delete_association(ctrl
);
2979 if (!nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_RECONNECTING
)) {
2980 dev_err(ctrl
->ctrl
.device
,
2981 "NVME-FC{%d}: error_recovery: Couldn't change state "
2982 "to RECONNECTING\n", ctrl
->cnum
);
2986 if (ctrl
->rport
->remoteport
.port_state
== FC_OBJSTATE_ONLINE
)
2987 ret
= nvme_fc_create_association(ctrl
);
2992 nvme_fc_reconnect_or_delete(ctrl
, ret
);
2994 dev_info(ctrl
->ctrl
.device
,
2995 "NVME-FC{%d}: controller reset complete\n",
2999 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops
= {
3001 .module
= THIS_MODULE
,
3002 .flags
= NVME_F_FABRICS
,
3003 .reg_read32
= nvmf_reg_read32
,
3004 .reg_read64
= nvmf_reg_read64
,
3005 .reg_write32
= nvmf_reg_write32
,
3006 .free_ctrl
= nvme_fc_nvme_ctrl_freed
,
3007 .submit_async_event
= nvme_fc_submit_async_event
,
3008 .delete_ctrl
= nvme_fc_delete_ctrl
,
3009 .get_address
= nvmf_get_address
,
3010 .reinit_request
= nvme_fc_reinit_request
,
3014 nvme_fc_connect_ctrl_work(struct work_struct
*work
)
3018 struct nvme_fc_ctrl
*ctrl
=
3019 container_of(to_delayed_work(work
),
3020 struct nvme_fc_ctrl
, connect_work
);
3022 ret
= nvme_fc_create_association(ctrl
);
3024 nvme_fc_reconnect_or_delete(ctrl
, ret
);
3026 dev_info(ctrl
->ctrl
.device
,
3027 "NVME-FC{%d}: controller reconnect complete\n",
3032 static const struct blk_mq_ops nvme_fc_admin_mq_ops
= {
3033 .queue_rq
= nvme_fc_queue_rq
,
3034 .complete
= nvme_fc_complete_rq
,
3035 .init_request
= nvme_fc_init_request
,
3036 .exit_request
= nvme_fc_exit_request
,
3037 .init_hctx
= nvme_fc_init_admin_hctx
,
3038 .timeout
= nvme_fc_timeout
,
3043 * Fails a controller request if it matches an existing controller
3044 * (association) with the same tuple:
3045 * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN>
3047 * The ports don't need to be compared as they are intrinsically
3048 * already matched by the port pointers supplied.
3051 nvme_fc_existing_controller(struct nvme_fc_rport
*rport
,
3052 struct nvmf_ctrl_options
*opts
)
3054 struct nvme_fc_ctrl
*ctrl
;
3055 unsigned long flags
;
3058 spin_lock_irqsave(&rport
->lock
, flags
);
3059 list_for_each_entry(ctrl
, &rport
->ctrl_list
, ctrl_list
) {
3060 found
= nvmf_ctlr_matches_baseopts(&ctrl
->ctrl
, opts
);
3064 spin_unlock_irqrestore(&rport
->lock
, flags
);
3069 static struct nvme_ctrl
*
3070 nvme_fc_init_ctrl(struct device
*dev
, struct nvmf_ctrl_options
*opts
,
3071 struct nvme_fc_lport
*lport
, struct nvme_fc_rport
*rport
)
3073 struct nvme_fc_ctrl
*ctrl
;
3074 unsigned long flags
;
3075 int ret
, idx
, retry
;
3077 if (!(rport
->remoteport
.port_role
&
3078 (FC_PORT_ROLE_NVME_DISCOVERY
| FC_PORT_ROLE_NVME_TARGET
))) {
3083 if (!opts
->duplicate_connect
&&
3084 nvme_fc_existing_controller(rport
, opts
)) {
3089 ctrl
= kzalloc(sizeof(*ctrl
), GFP_KERNEL
);
3095 idx
= ida_simple_get(&nvme_fc_ctrl_cnt
, 0, 0, GFP_KERNEL
);
3101 ctrl
->ctrl
.opts
= opts
;
3102 INIT_LIST_HEAD(&ctrl
->ctrl_list
);
3103 ctrl
->lport
= lport
;
3104 ctrl
->rport
= rport
;
3105 ctrl
->dev
= lport
->dev
;
3107 ctrl
->assoc_active
= false;
3108 init_waitqueue_head(&ctrl
->ioabort_wait
);
3110 get_device(ctrl
->dev
);
3111 kref_init(&ctrl
->ref
);
3113 INIT_WORK(&ctrl
->ctrl
.reset_work
, nvme_fc_reset_ctrl_work
);
3114 INIT_DELAYED_WORK(&ctrl
->connect_work
, nvme_fc_connect_ctrl_work
);
3115 spin_lock_init(&ctrl
->lock
);
3117 /* io queue count */
3118 ctrl
->ctrl
.queue_count
= min_t(unsigned int,
3120 lport
->ops
->max_hw_queues
);
3121 ctrl
->ctrl
.queue_count
++; /* +1 for admin queue */
3123 ctrl
->ctrl
.sqsize
= opts
->queue_size
- 1;
3124 ctrl
->ctrl
.kato
= opts
->kato
;
3127 ctrl
->queues
= kcalloc(ctrl
->ctrl
.queue_count
,
3128 sizeof(struct nvme_fc_queue
), GFP_KERNEL
);
3132 memset(&ctrl
->admin_tag_set
, 0, sizeof(ctrl
->admin_tag_set
));
3133 ctrl
->admin_tag_set
.ops
= &nvme_fc_admin_mq_ops
;
3134 ctrl
->admin_tag_set
.queue_depth
= NVME_AQ_MQ_TAG_DEPTH
;
3135 ctrl
->admin_tag_set
.reserved_tags
= 2; /* fabric connect + Keep-Alive */
3136 ctrl
->admin_tag_set
.numa_node
= NUMA_NO_NODE
;
3137 ctrl
->admin_tag_set
.cmd_size
= sizeof(struct nvme_fc_fcp_op
) +
3139 sizeof(struct scatterlist
)) +
3140 ctrl
->lport
->ops
->fcprqst_priv_sz
;
3141 ctrl
->admin_tag_set
.driver_data
= ctrl
;
3142 ctrl
->admin_tag_set
.nr_hw_queues
= 1;
3143 ctrl
->admin_tag_set
.timeout
= ADMIN_TIMEOUT
;
3144 ctrl
->admin_tag_set
.flags
= BLK_MQ_F_NO_SCHED
;
3146 ret
= blk_mq_alloc_tag_set(&ctrl
->admin_tag_set
);
3148 goto out_free_queues
;
3149 ctrl
->ctrl
.admin_tagset
= &ctrl
->admin_tag_set
;
3151 ctrl
->ctrl
.admin_q
= blk_mq_init_queue(&ctrl
->admin_tag_set
);
3152 if (IS_ERR(ctrl
->ctrl
.admin_q
)) {
3153 ret
= PTR_ERR(ctrl
->ctrl
.admin_q
);
3154 goto out_free_admin_tag_set
;
3158 * Would have been nice to init io queues tag set as well.
3159 * However, we require interaction from the controller
3160 * for max io queue count before we can do so.
3161 * Defer this to the connect path.
3164 ret
= nvme_init_ctrl(&ctrl
->ctrl
, dev
, &nvme_fc_ctrl_ops
, 0);
3166 goto out_cleanup_admin_q
;
3168 /* at this point, teardown path changes to ref counting on nvme ctrl */
3170 spin_lock_irqsave(&rport
->lock
, flags
);
3171 list_add_tail(&ctrl
->ctrl_list
, &rport
->ctrl_list
);
3172 spin_unlock_irqrestore(&rport
->lock
, flags
);
3175 * It's possible that transactions used to create the association
3176 * may fail. Examples: CreateAssociation LS or CreateIOConnection
3177 * LS gets dropped/corrupted/fails; or a frame gets dropped or a
3178 * command times out for one of the actions to init the controller
3179 * (Connect, Get/Set_Property, Set_Features, etc). Many of these
3180 * transport errors (frame drop, LS failure) inherently must kill
3181 * the association. The transport is coded so that any command used
3182 * to create the association (prior to a LIVE state transition
3183 * while NEW or RECONNECTING) will fail if it completes in error or
3186 * As such: as the connect request was mostly likely due to a
3187 * udev event that discovered the remote port, meaning there is
3188 * not an admin or script there to restart if the connect
3189 * request fails, retry the initial connection creation up to
3190 * three times before giving up and declaring failure.
3192 for (retry
= 0; retry
< 3; retry
++) {
3193 ret
= nvme_fc_create_association(ctrl
);
3199 /* couldn't schedule retry - fail out */
3200 dev_err(ctrl
->ctrl
.device
,
3201 "NVME-FC{%d}: Connect retry failed\n", ctrl
->cnum
);
3203 ctrl
->ctrl
.opts
= NULL
;
3205 /* initiate nvme ctrl ref counting teardown */
3206 nvme_uninit_ctrl(&ctrl
->ctrl
);
3207 nvme_put_ctrl(&ctrl
->ctrl
);
3209 /* Remove core ctrl ref. */
3210 nvme_put_ctrl(&ctrl
->ctrl
);
3212 /* as we're past the point where we transition to the ref
3213 * counting teardown path, if we return a bad pointer here,
3214 * the calling routine, thinking it's prior to the
3215 * transition, will do an rport put. Since the teardown
3216 * path also does a rport put, we do an extra get here to
3217 * so proper order/teardown happens.
3219 nvme_fc_rport_get(rport
);
3223 return ERR_PTR(ret
);
3226 nvme_get_ctrl(&ctrl
->ctrl
);
3228 dev_info(ctrl
->ctrl
.device
,
3229 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
3230 ctrl
->cnum
, ctrl
->ctrl
.opts
->subsysnqn
);
3234 out_cleanup_admin_q
:
3235 blk_cleanup_queue(ctrl
->ctrl
.admin_q
);
3236 out_free_admin_tag_set
:
3237 blk_mq_free_tag_set(&ctrl
->admin_tag_set
);
3239 kfree(ctrl
->queues
);
3241 put_device(ctrl
->dev
);
3242 ida_simple_remove(&nvme_fc_ctrl_cnt
, ctrl
->cnum
);
3246 /* exit via here doesn't follow ctlr ref points */
3247 return ERR_PTR(ret
);
3251 struct nvmet_fc_traddr
{
3257 __nvme_fc_parse_u64(substring_t
*sstr
, u64
*val
)
3261 if (match_u64(sstr
, &token64
))
3269 * This routine validates and extracts the WWN's from the TRADDR string.
3270 * As kernel parsers need the 0x to determine number base, universally
3271 * build string to parse with 0x prefix before parsing name strings.
3274 nvme_fc_parse_traddr(struct nvmet_fc_traddr
*traddr
, char *buf
, size_t blen
)
3276 char name
[2 + NVME_FC_TRADDR_HEXNAMELEN
+ 1];
3277 substring_t wwn
= { name
, &name
[sizeof(name
)-1] };
3278 int nnoffset
, pnoffset
;
3280 /* validate it string one of the 2 allowed formats */
3281 if (strnlen(buf
, blen
) == NVME_FC_TRADDR_MAXLENGTH
&&
3282 !strncmp(buf
, "nn-0x", NVME_FC_TRADDR_OXNNLEN
) &&
3283 !strncmp(&buf
[NVME_FC_TRADDR_MAX_PN_OFFSET
],
3284 "pn-0x", NVME_FC_TRADDR_OXNNLEN
)) {
3285 nnoffset
= NVME_FC_TRADDR_OXNNLEN
;
3286 pnoffset
= NVME_FC_TRADDR_MAX_PN_OFFSET
+
3287 NVME_FC_TRADDR_OXNNLEN
;
3288 } else if ((strnlen(buf
, blen
) == NVME_FC_TRADDR_MINLENGTH
&&
3289 !strncmp(buf
, "nn-", NVME_FC_TRADDR_NNLEN
) &&
3290 !strncmp(&buf
[NVME_FC_TRADDR_MIN_PN_OFFSET
],
3291 "pn-", NVME_FC_TRADDR_NNLEN
))) {
3292 nnoffset
= NVME_FC_TRADDR_NNLEN
;
3293 pnoffset
= NVME_FC_TRADDR_MIN_PN_OFFSET
+ NVME_FC_TRADDR_NNLEN
;
3299 name
[2 + NVME_FC_TRADDR_HEXNAMELEN
] = 0;
3301 memcpy(&name
[2], &buf
[nnoffset
], NVME_FC_TRADDR_HEXNAMELEN
);
3302 if (__nvme_fc_parse_u64(&wwn
, &traddr
->nn
))
3305 memcpy(&name
[2], &buf
[pnoffset
], NVME_FC_TRADDR_HEXNAMELEN
);
3306 if (__nvme_fc_parse_u64(&wwn
, &traddr
->pn
))
3312 pr_warn("%s: bad traddr string\n", __func__
);
3316 static struct nvme_ctrl
*
3317 nvme_fc_create_ctrl(struct device
*dev
, struct nvmf_ctrl_options
*opts
)
3319 struct nvme_fc_lport
*lport
;
3320 struct nvme_fc_rport
*rport
;
3321 struct nvme_ctrl
*ctrl
;
3322 struct nvmet_fc_traddr laddr
= { 0L, 0L };
3323 struct nvmet_fc_traddr raddr
= { 0L, 0L };
3324 unsigned long flags
;
3327 ret
= nvme_fc_parse_traddr(&raddr
, opts
->traddr
, NVMF_TRADDR_SIZE
);
3328 if (ret
|| !raddr
.nn
|| !raddr
.pn
)
3329 return ERR_PTR(-EINVAL
);
3331 ret
= nvme_fc_parse_traddr(&laddr
, opts
->host_traddr
, NVMF_TRADDR_SIZE
);
3332 if (ret
|| !laddr
.nn
|| !laddr
.pn
)
3333 return ERR_PTR(-EINVAL
);
3335 /* find the host and remote ports to connect together */
3336 spin_lock_irqsave(&nvme_fc_lock
, flags
);
3337 list_for_each_entry(lport
, &nvme_fc_lport_list
, port_list
) {
3338 if (lport
->localport
.node_name
!= laddr
.nn
||
3339 lport
->localport
.port_name
!= laddr
.pn
)
3342 list_for_each_entry(rport
, &lport
->endp_list
, endp_list
) {
3343 if (rport
->remoteport
.node_name
!= raddr
.nn
||
3344 rport
->remoteport
.port_name
!= raddr
.pn
)
3347 /* if fail to get reference fall through. Will error */
3348 if (!nvme_fc_rport_get(rport
))
3351 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
3353 ctrl
= nvme_fc_init_ctrl(dev
, opts
, lport
, rport
);
3355 nvme_fc_rport_put(rport
);
3359 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
3361 return ERR_PTR(-ENOENT
);
3365 static struct nvmf_transport_ops nvme_fc_transport
= {
3367 .required_opts
= NVMF_OPT_TRADDR
| NVMF_OPT_HOST_TRADDR
,
3368 .allowed_opts
= NVMF_OPT_RECONNECT_DELAY
| NVMF_OPT_CTRL_LOSS_TMO
,
3369 .create_ctrl
= nvme_fc_create_ctrl
,
3372 static int __init
nvme_fc_init_module(void)
3378 * It is expected that in the future the kernel will combine
3379 * the FC-isms that are currently under scsi and now being
3380 * added to by NVME into a new standalone FC class. The SCSI
3381 * and NVME protocols and their devices would be under this
3384 * As we need something to post FC-specific udev events to,
3385 * specifically for nvme probe events, start by creating the
3386 * new device class. When the new standalone FC class is
3387 * put in place, this code will move to a more generic
3388 * location for the class.
3390 fc_class
= class_create(THIS_MODULE
, "fc");
3391 if (IS_ERR(fc_class
)) {
3392 pr_err("couldn't register class fc\n");
3393 return PTR_ERR(fc_class
);
3397 * Create a device for the FC-centric udev events
3399 fc_udev_device
= device_create(fc_class
, NULL
, MKDEV(0, 0), NULL
,
3401 if (IS_ERR(fc_udev_device
)) {
3402 pr_err("couldn't create fc_udev device!\n");
3403 ret
= PTR_ERR(fc_udev_device
);
3404 goto out_destroy_class
;
3407 ret
= nvmf_register_transport(&nvme_fc_transport
);
3409 goto out_destroy_device
;
3414 device_destroy(fc_class
, MKDEV(0, 0));
3416 class_destroy(fc_class
);
3420 static void __exit
nvme_fc_exit_module(void)
3422 /* sanity check - all lports should be removed */
3423 if (!list_empty(&nvme_fc_lport_list
))
3424 pr_warn("%s: localport list not empty\n", __func__
);
3426 nvmf_unregister_transport(&nvme_fc_transport
);
3428 ida_destroy(&nvme_fc_local_port_cnt
);
3429 ida_destroy(&nvme_fc_ctrl_cnt
);
3431 device_destroy(fc_class
, MKDEV(0, 0));
3432 class_destroy(fc_class
);
3435 module_init(nvme_fc_init_module
);
3436 module_exit(nvme_fc_exit_module
);
3438 MODULE_LICENSE("GPL v2");