1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/parser.h>
8 #include <uapi/scsi/fc/fc_fs.h>
9 #include <uapi/scsi/fc/fc_els.h>
10 #include <linux/delay.h>
11 #include <linux/overflow.h>
15 #include <linux/nvme-fc-driver.h>
16 #include <linux/nvme-fc.h>
17 #include <scsi/scsi_transport_fc.h>
19 /* *************************** Data Structures/Defines ****************** */
22 enum nvme_fc_queue_flags
{
23 NVME_FC_Q_CONNECTED
= 0,
27 #define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */
29 struct nvme_fc_queue
{
30 struct nvme_fc_ctrl
*ctrl
;
32 struct blk_mq_hw_ctx
*hctx
;
34 size_t cmnd_capsule_len
;
43 } __aligned(sizeof(u64
)); /* alignment for other things alloc'd with */
45 enum nvme_fcop_flags
{
46 FCOP_FLAGS_TERMIO
= (1 << 0),
47 FCOP_FLAGS_AEN
= (1 << 1),
50 struct nvmefc_ls_req_op
{
51 struct nvmefc_ls_req ls_req
;
53 struct nvme_fc_rport
*rport
;
54 struct nvme_fc_queue
*queue
;
59 struct completion ls_done
;
60 struct list_head lsreq_list
; /* rport->ls_req_list */
64 enum nvme_fcpop_state
{
65 FCPOP_STATE_UNINIT
= 0,
67 FCPOP_STATE_ACTIVE
= 2,
68 FCPOP_STATE_ABORTED
= 3,
69 FCPOP_STATE_COMPLETE
= 4,
72 struct nvme_fc_fcp_op
{
73 struct nvme_request nreq
; /*
76 * the 1st element in the
81 struct nvmefc_fcp_req fcp_req
;
83 struct nvme_fc_ctrl
*ctrl
;
84 struct nvme_fc_queue
*queue
;
92 struct nvme_fc_cmd_iu cmd_iu
;
93 struct nvme_fc_ersp_iu rsp_iu
;
96 struct nvme_fcp_op_w_sgl
{
97 struct nvme_fc_fcp_op op
;
98 struct scatterlist sgl
[SG_CHUNK_SIZE
];
102 struct nvme_fc_lport
{
103 struct nvme_fc_local_port localport
;
106 struct list_head port_list
; /* nvme_fc_port_list */
107 struct list_head endp_list
;
108 struct device
*dev
; /* physical device for dma */
109 struct nvme_fc_port_template
*ops
;
111 atomic_t act_rport_cnt
;
112 } __aligned(sizeof(u64
)); /* alignment for other things alloc'd with */
114 struct nvme_fc_rport
{
115 struct nvme_fc_remote_port remoteport
;
117 struct list_head endp_list
; /* for lport->endp_list */
118 struct list_head ctrl_list
;
119 struct list_head ls_req_list
;
120 struct list_head disc_list
;
121 struct device
*dev
; /* physical device for dma */
122 struct nvme_fc_lport
*lport
;
125 atomic_t act_ctrl_cnt
;
126 unsigned long dev_loss_end
;
127 } __aligned(sizeof(u64
)); /* alignment for other things alloc'd with */
129 enum nvme_fcctrl_flags
{
130 FCCTRL_TERMIO
= (1 << 0),
133 struct nvme_fc_ctrl
{
135 struct nvme_fc_queue
*queues
;
137 struct nvme_fc_lport
*lport
;
138 struct nvme_fc_rport
*rport
;
143 atomic_t err_work_active
;
146 struct list_head ctrl_list
; /* rport->ctrl_list */
148 struct blk_mq_tag_set admin_tag_set
;
149 struct blk_mq_tag_set tag_set
;
151 struct delayed_work connect_work
;
152 struct work_struct err_work
;
157 wait_queue_head_t ioabort_wait
;
159 struct nvme_fc_fcp_op aen_ops
[NVME_NR_AEN_COMMANDS
];
161 struct nvme_ctrl ctrl
;
164 static inline struct nvme_fc_ctrl
*
165 to_fc_ctrl(struct nvme_ctrl
*ctrl
)
167 return container_of(ctrl
, struct nvme_fc_ctrl
, ctrl
);
170 static inline struct nvme_fc_lport
*
171 localport_to_lport(struct nvme_fc_local_port
*portptr
)
173 return container_of(portptr
, struct nvme_fc_lport
, localport
);
176 static inline struct nvme_fc_rport
*
177 remoteport_to_rport(struct nvme_fc_remote_port
*portptr
)
179 return container_of(portptr
, struct nvme_fc_rport
, remoteport
);
182 static inline struct nvmefc_ls_req_op
*
183 ls_req_to_lsop(struct nvmefc_ls_req
*lsreq
)
185 return container_of(lsreq
, struct nvmefc_ls_req_op
, ls_req
);
188 static inline struct nvme_fc_fcp_op
*
189 fcp_req_to_fcp_op(struct nvmefc_fcp_req
*fcpreq
)
191 return container_of(fcpreq
, struct nvme_fc_fcp_op
, fcp_req
);
196 /* *************************** Globals **************************** */
199 static DEFINE_SPINLOCK(nvme_fc_lock
);
201 static LIST_HEAD(nvme_fc_lport_list
);
202 static DEFINE_IDA(nvme_fc_local_port_cnt
);
203 static DEFINE_IDA(nvme_fc_ctrl_cnt
);
205 static struct workqueue_struct
*nvme_fc_wq
;
208 * These items are short-term. They will eventually be moved into
209 * a generic FC class. See comments in module init.
211 static struct device
*fc_udev_device
;
214 /* *********************** FC-NVME Port Management ************************ */
216 static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl
*,
217 struct nvme_fc_queue
*, unsigned int);
220 nvme_fc_free_lport(struct kref
*ref
)
222 struct nvme_fc_lport
*lport
=
223 container_of(ref
, struct nvme_fc_lport
, ref
);
226 WARN_ON(lport
->localport
.port_state
!= FC_OBJSTATE_DELETED
);
227 WARN_ON(!list_empty(&lport
->endp_list
));
229 /* remove from transport list */
230 spin_lock_irqsave(&nvme_fc_lock
, flags
);
231 list_del(&lport
->port_list
);
232 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
234 ida_simple_remove(&nvme_fc_local_port_cnt
, lport
->localport
.port_num
);
235 ida_destroy(&lport
->endp_cnt
);
237 put_device(lport
->dev
);
243 nvme_fc_lport_put(struct nvme_fc_lport
*lport
)
245 kref_put(&lport
->ref
, nvme_fc_free_lport
);
249 nvme_fc_lport_get(struct nvme_fc_lport
*lport
)
251 return kref_get_unless_zero(&lport
->ref
);
255 static struct nvme_fc_lport
*
256 nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info
*pinfo
,
257 struct nvme_fc_port_template
*ops
,
260 struct nvme_fc_lport
*lport
;
263 spin_lock_irqsave(&nvme_fc_lock
, flags
);
265 list_for_each_entry(lport
, &nvme_fc_lport_list
, port_list
) {
266 if (lport
->localport
.node_name
!= pinfo
->node_name
||
267 lport
->localport
.port_name
!= pinfo
->port_name
)
270 if (lport
->dev
!= dev
) {
271 lport
= ERR_PTR(-EXDEV
);
275 if (lport
->localport
.port_state
!= FC_OBJSTATE_DELETED
) {
276 lport
= ERR_PTR(-EEXIST
);
280 if (!nvme_fc_lport_get(lport
)) {
282 * fails if ref cnt already 0. If so,
283 * act as if lport already deleted
289 /* resume the lport */
292 lport
->localport
.port_role
= pinfo
->port_role
;
293 lport
->localport
.port_id
= pinfo
->port_id
;
294 lport
->localport
.port_state
= FC_OBJSTATE_ONLINE
;
296 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
304 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
310 * nvme_fc_register_localport - transport entry point called by an
311 * LLDD to register the existence of a NVME
313 * @pinfo: pointer to information about the port to be registered
314 * @template: LLDD entrypoints and operational parameters for the port
315 * @dev: physical hardware device node port corresponds to. Will be
316 * used for DMA mappings
317 * @portptr: pointer to a local port pointer. Upon success, the routine
318 * will allocate a nvme_fc_local_port structure and place its
319 * address in the local port pointer. Upon failure, local port
320 * pointer will be set to 0.
323 * a completion status. Must be 0 upon success; a negative errno
324 * (ex: -ENXIO) upon failure.
327 nvme_fc_register_localport(struct nvme_fc_port_info
*pinfo
,
328 struct nvme_fc_port_template
*template,
330 struct nvme_fc_local_port
**portptr
)
332 struct nvme_fc_lport
*newrec
;
336 if (!template->localport_delete
|| !template->remoteport_delete
||
337 !template->ls_req
|| !template->fcp_io
||
338 !template->ls_abort
|| !template->fcp_abort
||
339 !template->max_hw_queues
|| !template->max_sgl_segments
||
340 !template->max_dif_sgl_segments
|| !template->dma_boundary
) {
342 goto out_reghost_failed
;
346 * look to see if there is already a localport that had been
347 * deregistered and in the process of waiting for all the
348 * references to fully be removed. If the references haven't
349 * expired, we can simply re-enable the localport. Remoteports
350 * and controller reconnections should resume naturally.
352 newrec
= nvme_fc_attach_to_unreg_lport(pinfo
, template, dev
);
354 /* found an lport, but something about its state is bad */
355 if (IS_ERR(newrec
)) {
356 ret
= PTR_ERR(newrec
);
357 goto out_reghost_failed
;
359 /* found existing lport, which was resumed */
361 *portptr
= &newrec
->localport
;
365 /* nothing found - allocate a new localport struct */
367 newrec
= kmalloc((sizeof(*newrec
) + template->local_priv_sz
),
371 goto out_reghost_failed
;
374 idx
= ida_simple_get(&nvme_fc_local_port_cnt
, 0, 0, GFP_KERNEL
);
380 if (!get_device(dev
) && dev
) {
385 INIT_LIST_HEAD(&newrec
->port_list
);
386 INIT_LIST_HEAD(&newrec
->endp_list
);
387 kref_init(&newrec
->ref
);
388 atomic_set(&newrec
->act_rport_cnt
, 0);
389 newrec
->ops
= template;
391 ida_init(&newrec
->endp_cnt
);
392 newrec
->localport
.private = &newrec
[1];
393 newrec
->localport
.node_name
= pinfo
->node_name
;
394 newrec
->localport
.port_name
= pinfo
->port_name
;
395 newrec
->localport
.port_role
= pinfo
->port_role
;
396 newrec
->localport
.port_id
= pinfo
->port_id
;
397 newrec
->localport
.port_state
= FC_OBJSTATE_ONLINE
;
398 newrec
->localport
.port_num
= idx
;
400 spin_lock_irqsave(&nvme_fc_lock
, flags
);
401 list_add_tail(&newrec
->port_list
, &nvme_fc_lport_list
);
402 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
405 dma_set_seg_boundary(dev
, template->dma_boundary
);
407 *portptr
= &newrec
->localport
;
411 ida_simple_remove(&nvme_fc_local_port_cnt
, idx
);
419 EXPORT_SYMBOL_GPL(nvme_fc_register_localport
);
422 * nvme_fc_unregister_localport - transport entry point called by an
423 * LLDD to deregister/remove a previously
424 * registered a NVME host FC port.
425 * @portptr: pointer to the (registered) local port that is to be deregistered.
428 * a completion status. Must be 0 upon success; a negative errno
429 * (ex: -ENXIO) upon failure.
432 nvme_fc_unregister_localport(struct nvme_fc_local_port
*portptr
)
434 struct nvme_fc_lport
*lport
= localport_to_lport(portptr
);
440 spin_lock_irqsave(&nvme_fc_lock
, flags
);
442 if (portptr
->port_state
!= FC_OBJSTATE_ONLINE
) {
443 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
446 portptr
->port_state
= FC_OBJSTATE_DELETED
;
448 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
450 if (atomic_read(&lport
->act_rport_cnt
) == 0)
451 lport
->ops
->localport_delete(&lport
->localport
);
453 nvme_fc_lport_put(lport
);
457 EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport
);
460 * TRADDR strings, per FC-NVME are fixed format:
461 * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters
462 * udev event will only differ by prefix of what field is
464 * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters
465 * 19 + 43 + null_fudge = 64 characters
467 #define FCNVME_TRADDR_LENGTH 64
470 nvme_fc_signal_discovery_scan(struct nvme_fc_lport
*lport
,
471 struct nvme_fc_rport
*rport
)
473 char hostaddr
[FCNVME_TRADDR_LENGTH
]; /* NVMEFC_HOST_TRADDR=...*/
474 char tgtaddr
[FCNVME_TRADDR_LENGTH
]; /* NVMEFC_TRADDR=...*/
475 char *envp
[4] = { "FC_EVENT=nvmediscovery", hostaddr
, tgtaddr
, NULL
};
477 if (!(rport
->remoteport
.port_role
& FC_PORT_ROLE_NVME_DISCOVERY
))
480 snprintf(hostaddr
, sizeof(hostaddr
),
481 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
482 lport
->localport
.node_name
, lport
->localport
.port_name
);
483 snprintf(tgtaddr
, sizeof(tgtaddr
),
484 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
485 rport
->remoteport
.node_name
, rport
->remoteport
.port_name
);
486 kobject_uevent_env(&fc_udev_device
->kobj
, KOBJ_CHANGE
, envp
);
490 nvme_fc_free_rport(struct kref
*ref
)
492 struct nvme_fc_rport
*rport
=
493 container_of(ref
, struct nvme_fc_rport
, ref
);
494 struct nvme_fc_lport
*lport
=
495 localport_to_lport(rport
->remoteport
.localport
);
498 WARN_ON(rport
->remoteport
.port_state
!= FC_OBJSTATE_DELETED
);
499 WARN_ON(!list_empty(&rport
->ctrl_list
));
501 /* remove from lport list */
502 spin_lock_irqsave(&nvme_fc_lock
, flags
);
503 list_del(&rport
->endp_list
);
504 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
506 WARN_ON(!list_empty(&rport
->disc_list
));
507 ida_simple_remove(&lport
->endp_cnt
, rport
->remoteport
.port_num
);
511 nvme_fc_lport_put(lport
);
515 nvme_fc_rport_put(struct nvme_fc_rport
*rport
)
517 kref_put(&rport
->ref
, nvme_fc_free_rport
);
521 nvme_fc_rport_get(struct nvme_fc_rport
*rport
)
523 return kref_get_unless_zero(&rport
->ref
);
527 nvme_fc_resume_controller(struct nvme_fc_ctrl
*ctrl
)
529 switch (ctrl
->ctrl
.state
) {
531 case NVME_CTRL_CONNECTING
:
533 * As all reconnects were suppressed, schedule a
536 dev_info(ctrl
->ctrl
.device
,
537 "NVME-FC{%d}: connectivity re-established. "
538 "Attempting reconnect\n", ctrl
->cnum
);
540 queue_delayed_work(nvme_wq
, &ctrl
->connect_work
, 0);
543 case NVME_CTRL_RESETTING
:
545 * Controller is already in the process of terminating the
546 * association. No need to do anything further. The reconnect
547 * step will naturally occur after the reset completes.
552 /* no action to take - let it delete */
557 static struct nvme_fc_rport
*
558 nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport
*lport
,
559 struct nvme_fc_port_info
*pinfo
)
561 struct nvme_fc_rport
*rport
;
562 struct nvme_fc_ctrl
*ctrl
;
565 spin_lock_irqsave(&nvme_fc_lock
, flags
);
567 list_for_each_entry(rport
, &lport
->endp_list
, endp_list
) {
568 if (rport
->remoteport
.node_name
!= pinfo
->node_name
||
569 rport
->remoteport
.port_name
!= pinfo
->port_name
)
572 if (!nvme_fc_rport_get(rport
)) {
573 rport
= ERR_PTR(-ENOLCK
);
577 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
579 spin_lock_irqsave(&rport
->lock
, flags
);
581 /* has it been unregistered */
582 if (rport
->remoteport
.port_state
!= FC_OBJSTATE_DELETED
) {
583 /* means lldd called us twice */
584 spin_unlock_irqrestore(&rport
->lock
, flags
);
585 nvme_fc_rport_put(rport
);
586 return ERR_PTR(-ESTALE
);
589 rport
->remoteport
.port_role
= pinfo
->port_role
;
590 rport
->remoteport
.port_id
= pinfo
->port_id
;
591 rport
->remoteport
.port_state
= FC_OBJSTATE_ONLINE
;
592 rport
->dev_loss_end
= 0;
595 * kick off a reconnect attempt on all associations to the
596 * remote port. A successful reconnects will resume i/o.
598 list_for_each_entry(ctrl
, &rport
->ctrl_list
, ctrl_list
)
599 nvme_fc_resume_controller(ctrl
);
601 spin_unlock_irqrestore(&rport
->lock
, flags
);
609 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
615 __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport
*rport
,
616 struct nvme_fc_port_info
*pinfo
)
618 if (pinfo
->dev_loss_tmo
)
619 rport
->remoteport
.dev_loss_tmo
= pinfo
->dev_loss_tmo
;
621 rport
->remoteport
.dev_loss_tmo
= NVME_FC_DEFAULT_DEV_LOSS_TMO
;
625 * nvme_fc_register_remoteport - transport entry point called by an
626 * LLDD to register the existence of a NVME
627 * subsystem FC port on its fabric.
628 * @localport: pointer to the (registered) local port that the remote
629 * subsystem port is connected to.
630 * @pinfo: pointer to information about the port to be registered
631 * @portptr: pointer to a remote port pointer. Upon success, the routine
632 * will allocate a nvme_fc_remote_port structure and place its
633 * address in the remote port pointer. Upon failure, remote port
634 * pointer will be set to 0.
637 * a completion status. Must be 0 upon success; a negative errno
638 * (ex: -ENXIO) upon failure.
641 nvme_fc_register_remoteport(struct nvme_fc_local_port
*localport
,
642 struct nvme_fc_port_info
*pinfo
,
643 struct nvme_fc_remote_port
**portptr
)
645 struct nvme_fc_lport
*lport
= localport_to_lport(localport
);
646 struct nvme_fc_rport
*newrec
;
650 if (!nvme_fc_lport_get(lport
)) {
652 goto out_reghost_failed
;
656 * look to see if there is already a remoteport that is waiting
657 * for a reconnect (within dev_loss_tmo) with the same WWN's.
658 * If so, transition to it and reconnect.
660 newrec
= nvme_fc_attach_to_suspended_rport(lport
, pinfo
);
662 /* found an rport, but something about its state is bad */
663 if (IS_ERR(newrec
)) {
664 ret
= PTR_ERR(newrec
);
667 /* found existing rport, which was resumed */
669 nvme_fc_lport_put(lport
);
670 __nvme_fc_set_dev_loss_tmo(newrec
, pinfo
);
671 nvme_fc_signal_discovery_scan(lport
, newrec
);
672 *portptr
= &newrec
->remoteport
;
676 /* nothing found - allocate a new remoteport struct */
678 newrec
= kmalloc((sizeof(*newrec
) + lport
->ops
->remote_priv_sz
),
685 idx
= ida_simple_get(&lport
->endp_cnt
, 0, 0, GFP_KERNEL
);
688 goto out_kfree_rport
;
691 INIT_LIST_HEAD(&newrec
->endp_list
);
692 INIT_LIST_HEAD(&newrec
->ctrl_list
);
693 INIT_LIST_HEAD(&newrec
->ls_req_list
);
694 INIT_LIST_HEAD(&newrec
->disc_list
);
695 kref_init(&newrec
->ref
);
696 atomic_set(&newrec
->act_ctrl_cnt
, 0);
697 spin_lock_init(&newrec
->lock
);
698 newrec
->remoteport
.localport
= &lport
->localport
;
699 newrec
->dev
= lport
->dev
;
700 newrec
->lport
= lport
;
701 newrec
->remoteport
.private = &newrec
[1];
702 newrec
->remoteport
.port_role
= pinfo
->port_role
;
703 newrec
->remoteport
.node_name
= pinfo
->node_name
;
704 newrec
->remoteport
.port_name
= pinfo
->port_name
;
705 newrec
->remoteport
.port_id
= pinfo
->port_id
;
706 newrec
->remoteport
.port_state
= FC_OBJSTATE_ONLINE
;
707 newrec
->remoteport
.port_num
= idx
;
708 __nvme_fc_set_dev_loss_tmo(newrec
, pinfo
);
710 spin_lock_irqsave(&nvme_fc_lock
, flags
);
711 list_add_tail(&newrec
->endp_list
, &lport
->endp_list
);
712 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
714 nvme_fc_signal_discovery_scan(lport
, newrec
);
716 *portptr
= &newrec
->remoteport
;
722 nvme_fc_lport_put(lport
);
727 EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport
);
730 nvme_fc_abort_lsops(struct nvme_fc_rport
*rport
)
732 struct nvmefc_ls_req_op
*lsop
;
736 spin_lock_irqsave(&rport
->lock
, flags
);
738 list_for_each_entry(lsop
, &rport
->ls_req_list
, lsreq_list
) {
739 if (!(lsop
->flags
& FCOP_FLAGS_TERMIO
)) {
740 lsop
->flags
|= FCOP_FLAGS_TERMIO
;
741 spin_unlock_irqrestore(&rport
->lock
, flags
);
742 rport
->lport
->ops
->ls_abort(&rport
->lport
->localport
,
748 spin_unlock_irqrestore(&rport
->lock
, flags
);
754 nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl
*ctrl
)
756 dev_info(ctrl
->ctrl
.device
,
757 "NVME-FC{%d}: controller connectivity lost. Awaiting "
758 "Reconnect", ctrl
->cnum
);
760 switch (ctrl
->ctrl
.state
) {
764 * Schedule a controller reset. The reset will terminate the
765 * association and schedule the reconnect timer. Reconnects
766 * will be attempted until either the ctlr_loss_tmo
767 * (max_retries * connect_delay) expires or the remoteport's
768 * dev_loss_tmo expires.
770 if (nvme_reset_ctrl(&ctrl
->ctrl
)) {
771 dev_warn(ctrl
->ctrl
.device
,
772 "NVME-FC{%d}: Couldn't schedule reset.\n",
774 nvme_delete_ctrl(&ctrl
->ctrl
);
778 case NVME_CTRL_CONNECTING
:
780 * The association has already been terminated and the
781 * controller is attempting reconnects. No need to do anything
782 * futher. Reconnects will be attempted until either the
783 * ctlr_loss_tmo (max_retries * connect_delay) expires or the
784 * remoteport's dev_loss_tmo expires.
788 case NVME_CTRL_RESETTING
:
790 * Controller is already in the process of terminating the
791 * association. No need to do anything further. The reconnect
792 * step will kick in naturally after the association is
797 case NVME_CTRL_DELETING
:
799 /* no action to take - let it delete */
805 * nvme_fc_unregister_remoteport - transport entry point called by an
806 * LLDD to deregister/remove a previously
807 * registered a NVME subsystem FC port.
808 * @portptr: pointer to the (registered) remote port that is to be
812 * a completion status. Must be 0 upon success; a negative errno
813 * (ex: -ENXIO) upon failure.
816 nvme_fc_unregister_remoteport(struct nvme_fc_remote_port
*portptr
)
818 struct nvme_fc_rport
*rport
= remoteport_to_rport(portptr
);
819 struct nvme_fc_ctrl
*ctrl
;
825 spin_lock_irqsave(&rport
->lock
, flags
);
827 if (portptr
->port_state
!= FC_OBJSTATE_ONLINE
) {
828 spin_unlock_irqrestore(&rport
->lock
, flags
);
831 portptr
->port_state
= FC_OBJSTATE_DELETED
;
833 rport
->dev_loss_end
= jiffies
+ (portptr
->dev_loss_tmo
* HZ
);
835 list_for_each_entry(ctrl
, &rport
->ctrl_list
, ctrl_list
) {
836 /* if dev_loss_tmo==0, dev loss is immediate */
837 if (!portptr
->dev_loss_tmo
) {
838 dev_warn(ctrl
->ctrl
.device
,
839 "NVME-FC{%d}: controller connectivity lost.\n",
841 nvme_delete_ctrl(&ctrl
->ctrl
);
843 nvme_fc_ctrl_connectivity_loss(ctrl
);
846 spin_unlock_irqrestore(&rport
->lock
, flags
);
848 nvme_fc_abort_lsops(rport
);
850 if (atomic_read(&rport
->act_ctrl_cnt
) == 0)
851 rport
->lport
->ops
->remoteport_delete(portptr
);
854 * release the reference, which will allow, if all controllers
855 * go away, which should only occur after dev_loss_tmo occurs,
856 * for the rport to be torn down.
858 nvme_fc_rport_put(rport
);
862 EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport
);
865 * nvme_fc_rescan_remoteport - transport entry point called by an
866 * LLDD to request a nvme device rescan.
867 * @remoteport: pointer to the (registered) remote port that is to be
873 nvme_fc_rescan_remoteport(struct nvme_fc_remote_port
*remoteport
)
875 struct nvme_fc_rport
*rport
= remoteport_to_rport(remoteport
);
877 nvme_fc_signal_discovery_scan(rport
->lport
, rport
);
879 EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport
);
882 nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port
*portptr
,
885 struct nvme_fc_rport
*rport
= remoteport_to_rport(portptr
);
888 spin_lock_irqsave(&rport
->lock
, flags
);
890 if (portptr
->port_state
!= FC_OBJSTATE_ONLINE
) {
891 spin_unlock_irqrestore(&rport
->lock
, flags
);
895 /* a dev_loss_tmo of 0 (immediate) is allowed to be set */
896 rport
->remoteport
.dev_loss_tmo
= dev_loss_tmo
;
898 spin_unlock_irqrestore(&rport
->lock
, flags
);
902 EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss
);
905 /* *********************** FC-NVME DMA Handling **************************** */
908 * The fcloop device passes in a NULL device pointer. Real LLD's will
909 * pass in a valid device pointer. If NULL is passed to the dma mapping
910 * routines, depending on the platform, it may or may not succeed, and
914 * Wrapper all the dma routines and check the dev pointer.
916 * If simple mappings (return just a dma address, we'll noop them,
917 * returning a dma address of 0.
919 * On more complex mappings (dma_map_sg), a pseudo routine fills
920 * in the scatter list, setting all dma addresses to 0.
923 static inline dma_addr_t
924 fc_dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
925 enum dma_data_direction dir
)
927 return dev
? dma_map_single(dev
, ptr
, size
, dir
) : (dma_addr_t
)0L;
931 fc_dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
933 return dev
? dma_mapping_error(dev
, dma_addr
) : 0;
937 fc_dma_unmap_single(struct device
*dev
, dma_addr_t addr
, size_t size
,
938 enum dma_data_direction dir
)
941 dma_unmap_single(dev
, addr
, size
, dir
);
945 fc_dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t addr
, size_t size
,
946 enum dma_data_direction dir
)
949 dma_sync_single_for_cpu(dev
, addr
, size
, dir
);
953 fc_dma_sync_single_for_device(struct device
*dev
, dma_addr_t addr
, size_t size
,
954 enum dma_data_direction dir
)
957 dma_sync_single_for_device(dev
, addr
, size
, dir
);
960 /* pseudo dma_map_sg call */
962 fc_map_sg(struct scatterlist
*sg
, int nents
)
964 struct scatterlist
*s
;
967 WARN_ON(nents
== 0 || sg
[0].length
== 0);
969 for_each_sg(sg
, s
, nents
, i
) {
971 #ifdef CONFIG_NEED_SG_DMA_LENGTH
972 s
->dma_length
= s
->length
;
979 fc_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
980 enum dma_data_direction dir
)
982 return dev
? dma_map_sg(dev
, sg
, nents
, dir
) : fc_map_sg(sg
, nents
);
986 fc_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
987 enum dma_data_direction dir
)
990 dma_unmap_sg(dev
, sg
, nents
, dir
);
993 /* *********************** FC-NVME LS Handling **************************** */
995 static void nvme_fc_ctrl_put(struct nvme_fc_ctrl
*);
996 static int nvme_fc_ctrl_get(struct nvme_fc_ctrl
*);
1000 __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op
*lsop
)
1002 struct nvme_fc_rport
*rport
= lsop
->rport
;
1003 struct nvmefc_ls_req
*lsreq
= &lsop
->ls_req
;
1004 unsigned long flags
;
1006 spin_lock_irqsave(&rport
->lock
, flags
);
1008 if (!lsop
->req_queued
) {
1009 spin_unlock_irqrestore(&rport
->lock
, flags
);
1013 list_del(&lsop
->lsreq_list
);
1015 lsop
->req_queued
= false;
1017 spin_unlock_irqrestore(&rport
->lock
, flags
);
1019 fc_dma_unmap_single(rport
->dev
, lsreq
->rqstdma
,
1020 (lsreq
->rqstlen
+ lsreq
->rsplen
),
1023 nvme_fc_rport_put(rport
);
1027 __nvme_fc_send_ls_req(struct nvme_fc_rport
*rport
,
1028 struct nvmefc_ls_req_op
*lsop
,
1029 void (*done
)(struct nvmefc_ls_req
*req
, int status
))
1031 struct nvmefc_ls_req
*lsreq
= &lsop
->ls_req
;
1032 unsigned long flags
;
1035 if (rport
->remoteport
.port_state
!= FC_OBJSTATE_ONLINE
)
1036 return -ECONNREFUSED
;
1038 if (!nvme_fc_rport_get(rport
))
1042 lsop
->rport
= rport
;
1043 lsop
->req_queued
= false;
1044 INIT_LIST_HEAD(&lsop
->lsreq_list
);
1045 init_completion(&lsop
->ls_done
);
1047 lsreq
->rqstdma
= fc_dma_map_single(rport
->dev
, lsreq
->rqstaddr
,
1048 lsreq
->rqstlen
+ lsreq
->rsplen
,
1050 if (fc_dma_mapping_error(rport
->dev
, lsreq
->rqstdma
)) {
1054 lsreq
->rspdma
= lsreq
->rqstdma
+ lsreq
->rqstlen
;
1056 spin_lock_irqsave(&rport
->lock
, flags
);
1058 list_add_tail(&lsop
->lsreq_list
, &rport
->ls_req_list
);
1060 lsop
->req_queued
= true;
1062 spin_unlock_irqrestore(&rport
->lock
, flags
);
1064 ret
= rport
->lport
->ops
->ls_req(&rport
->lport
->localport
,
1065 &rport
->remoteport
, lsreq
);
1072 lsop
->ls_error
= ret
;
1073 spin_lock_irqsave(&rport
->lock
, flags
);
1074 lsop
->req_queued
= false;
1075 list_del(&lsop
->lsreq_list
);
1076 spin_unlock_irqrestore(&rport
->lock
, flags
);
1077 fc_dma_unmap_single(rport
->dev
, lsreq
->rqstdma
,
1078 (lsreq
->rqstlen
+ lsreq
->rsplen
),
1081 nvme_fc_rport_put(rport
);
1087 nvme_fc_send_ls_req_done(struct nvmefc_ls_req
*lsreq
, int status
)
1089 struct nvmefc_ls_req_op
*lsop
= ls_req_to_lsop(lsreq
);
1091 lsop
->ls_error
= status
;
1092 complete(&lsop
->ls_done
);
1096 nvme_fc_send_ls_req(struct nvme_fc_rport
*rport
, struct nvmefc_ls_req_op
*lsop
)
1098 struct nvmefc_ls_req
*lsreq
= &lsop
->ls_req
;
1099 struct fcnvme_ls_rjt
*rjt
= lsreq
->rspaddr
;
1102 ret
= __nvme_fc_send_ls_req(rport
, lsop
, nvme_fc_send_ls_req_done
);
1106 * No timeout/not interruptible as we need the struct
1107 * to exist until the lldd calls us back. Thus mandate
1108 * wait until driver calls back. lldd responsible for
1109 * the timeout action
1111 wait_for_completion(&lsop
->ls_done
);
1113 __nvme_fc_finish_ls_req(lsop
);
1115 ret
= lsop
->ls_error
;
1121 /* ACC or RJT payload ? */
1122 if (rjt
->w0
.ls_cmd
== FCNVME_LS_RJT
)
1129 nvme_fc_send_ls_req_async(struct nvme_fc_rport
*rport
,
1130 struct nvmefc_ls_req_op
*lsop
,
1131 void (*done
)(struct nvmefc_ls_req
*req
, int status
))
1133 /* don't wait for completion */
1135 return __nvme_fc_send_ls_req(rport
, lsop
, done
);
1138 /* Validation Error indexes into the string table below */
1142 VERR_LSDESC_RQST
= 2,
1143 VERR_LSDESC_RQST_LEN
= 3,
1145 VERR_ASSOC_ID_LEN
= 5,
1147 VERR_CONN_ID_LEN
= 7,
1149 VERR_CR_ASSOC_ACC_LEN
= 9,
1151 VERR_CR_CONN_ACC_LEN
= 11,
1153 VERR_DISCONN_ACC_LEN
= 13,
1156 static char *validation_errors
[] = {
1160 "Bad LSDESC_RQST Length",
1161 "Not Association ID",
1162 "Bad Association ID Length",
1163 "Not Connection ID",
1164 "Bad Connection ID Length",
1165 "Not CR_ASSOC Rqst",
1166 "Bad CR_ASSOC ACC Length",
1168 "Bad CR_CONN ACC Length",
1169 "Not Disconnect Rqst",
1170 "Bad Disconnect ACC Length",
1174 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl
*ctrl
,
1175 struct nvme_fc_queue
*queue
, u16 qsize
, u16 ersp_ratio
)
1177 struct nvmefc_ls_req_op
*lsop
;
1178 struct nvmefc_ls_req
*lsreq
;
1179 struct fcnvme_ls_cr_assoc_rqst
*assoc_rqst
;
1180 struct fcnvme_ls_cr_assoc_acc
*assoc_acc
;
1183 lsop
= kzalloc((sizeof(*lsop
) +
1184 ctrl
->lport
->ops
->lsrqst_priv_sz
+
1185 sizeof(*assoc_rqst
) + sizeof(*assoc_acc
)), GFP_KERNEL
);
1190 lsreq
= &lsop
->ls_req
;
1192 lsreq
->private = (void *)&lsop
[1];
1193 assoc_rqst
= (struct fcnvme_ls_cr_assoc_rqst
*)
1194 (lsreq
->private + ctrl
->lport
->ops
->lsrqst_priv_sz
);
1195 assoc_acc
= (struct fcnvme_ls_cr_assoc_acc
*)&assoc_rqst
[1];
1197 assoc_rqst
->w0
.ls_cmd
= FCNVME_LS_CREATE_ASSOCIATION
;
1198 assoc_rqst
->desc_list_len
=
1199 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd
));
1201 assoc_rqst
->assoc_cmd
.desc_tag
=
1202 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD
);
1203 assoc_rqst
->assoc_cmd
.desc_len
=
1205 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd
));
1207 assoc_rqst
->assoc_cmd
.ersp_ratio
= cpu_to_be16(ersp_ratio
);
1208 assoc_rqst
->assoc_cmd
.sqsize
= cpu_to_be16(qsize
- 1);
1209 /* Linux supports only Dynamic controllers */
1210 assoc_rqst
->assoc_cmd
.cntlid
= cpu_to_be16(0xffff);
1211 uuid_copy(&assoc_rqst
->assoc_cmd
.hostid
, &ctrl
->ctrl
.opts
->host
->id
);
1212 strncpy(assoc_rqst
->assoc_cmd
.hostnqn
, ctrl
->ctrl
.opts
->host
->nqn
,
1213 min(FCNVME_ASSOC_HOSTNQN_LEN
, NVMF_NQN_SIZE
));
1214 strncpy(assoc_rqst
->assoc_cmd
.subnqn
, ctrl
->ctrl
.opts
->subsysnqn
,
1215 min(FCNVME_ASSOC_SUBNQN_LEN
, NVMF_NQN_SIZE
));
1217 lsop
->queue
= queue
;
1218 lsreq
->rqstaddr
= assoc_rqst
;
1219 lsreq
->rqstlen
= sizeof(*assoc_rqst
);
1220 lsreq
->rspaddr
= assoc_acc
;
1221 lsreq
->rsplen
= sizeof(*assoc_acc
);
1222 lsreq
->timeout
= NVME_FC_CONNECT_TIMEOUT_SEC
;
1224 ret
= nvme_fc_send_ls_req(ctrl
->rport
, lsop
);
1226 goto out_free_buffer
;
1228 /* process connect LS completion */
1230 /* validate the ACC response */
1231 if (assoc_acc
->hdr
.w0
.ls_cmd
!= FCNVME_LS_ACC
)
1233 else if (assoc_acc
->hdr
.desc_list_len
!=
1235 sizeof(struct fcnvme_ls_cr_assoc_acc
)))
1236 fcret
= VERR_CR_ASSOC_ACC_LEN
;
1237 else if (assoc_acc
->hdr
.rqst
.desc_tag
!=
1238 cpu_to_be32(FCNVME_LSDESC_RQST
))
1239 fcret
= VERR_LSDESC_RQST
;
1240 else if (assoc_acc
->hdr
.rqst
.desc_len
!=
1241 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst
)))
1242 fcret
= VERR_LSDESC_RQST_LEN
;
1243 else if (assoc_acc
->hdr
.rqst
.w0
.ls_cmd
!= FCNVME_LS_CREATE_ASSOCIATION
)
1244 fcret
= VERR_CR_ASSOC
;
1245 else if (assoc_acc
->associd
.desc_tag
!=
1246 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID
))
1247 fcret
= VERR_ASSOC_ID
;
1248 else if (assoc_acc
->associd
.desc_len
!=
1250 sizeof(struct fcnvme_lsdesc_assoc_id
)))
1251 fcret
= VERR_ASSOC_ID_LEN
;
1252 else if (assoc_acc
->connectid
.desc_tag
!=
1253 cpu_to_be32(FCNVME_LSDESC_CONN_ID
))
1254 fcret
= VERR_CONN_ID
;
1255 else if (assoc_acc
->connectid
.desc_len
!=
1256 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id
)))
1257 fcret
= VERR_CONN_ID_LEN
;
1262 "q %d connect failed: %s\n",
1263 queue
->qnum
, validation_errors
[fcret
]);
1265 ctrl
->association_id
=
1266 be64_to_cpu(assoc_acc
->associd
.association_id
);
1267 queue
->connection_id
=
1268 be64_to_cpu(assoc_acc
->connectid
.connection_id
);
1269 set_bit(NVME_FC_Q_CONNECTED
, &queue
->flags
);
1277 "queue %d connect admin queue failed (%d).\n",
1283 nvme_fc_connect_queue(struct nvme_fc_ctrl
*ctrl
, struct nvme_fc_queue
*queue
,
1284 u16 qsize
, u16 ersp_ratio
)
1286 struct nvmefc_ls_req_op
*lsop
;
1287 struct nvmefc_ls_req
*lsreq
;
1288 struct fcnvme_ls_cr_conn_rqst
*conn_rqst
;
1289 struct fcnvme_ls_cr_conn_acc
*conn_acc
;
1292 lsop
= kzalloc((sizeof(*lsop
) +
1293 ctrl
->lport
->ops
->lsrqst_priv_sz
+
1294 sizeof(*conn_rqst
) + sizeof(*conn_acc
)), GFP_KERNEL
);
1299 lsreq
= &lsop
->ls_req
;
1301 lsreq
->private = (void *)&lsop
[1];
1302 conn_rqst
= (struct fcnvme_ls_cr_conn_rqst
*)
1303 (lsreq
->private + ctrl
->lport
->ops
->lsrqst_priv_sz
);
1304 conn_acc
= (struct fcnvme_ls_cr_conn_acc
*)&conn_rqst
[1];
1306 conn_rqst
->w0
.ls_cmd
= FCNVME_LS_CREATE_CONNECTION
;
1307 conn_rqst
->desc_list_len
= cpu_to_be32(
1308 sizeof(struct fcnvme_lsdesc_assoc_id
) +
1309 sizeof(struct fcnvme_lsdesc_cr_conn_cmd
));
1311 conn_rqst
->associd
.desc_tag
= cpu_to_be32(FCNVME_LSDESC_ASSOC_ID
);
1312 conn_rqst
->associd
.desc_len
=
1314 sizeof(struct fcnvme_lsdesc_assoc_id
));
1315 conn_rqst
->associd
.association_id
= cpu_to_be64(ctrl
->association_id
);
1316 conn_rqst
->connect_cmd
.desc_tag
=
1317 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD
);
1318 conn_rqst
->connect_cmd
.desc_len
=
1320 sizeof(struct fcnvme_lsdesc_cr_conn_cmd
));
1321 conn_rqst
->connect_cmd
.ersp_ratio
= cpu_to_be16(ersp_ratio
);
1322 conn_rqst
->connect_cmd
.qid
= cpu_to_be16(queue
->qnum
);
1323 conn_rqst
->connect_cmd
.sqsize
= cpu_to_be16(qsize
- 1);
1325 lsop
->queue
= queue
;
1326 lsreq
->rqstaddr
= conn_rqst
;
1327 lsreq
->rqstlen
= sizeof(*conn_rqst
);
1328 lsreq
->rspaddr
= conn_acc
;
1329 lsreq
->rsplen
= sizeof(*conn_acc
);
1330 lsreq
->timeout
= NVME_FC_CONNECT_TIMEOUT_SEC
;
1332 ret
= nvme_fc_send_ls_req(ctrl
->rport
, lsop
);
1334 goto out_free_buffer
;
1336 /* process connect LS completion */
1338 /* validate the ACC response */
1339 if (conn_acc
->hdr
.w0
.ls_cmd
!= FCNVME_LS_ACC
)
1341 else if (conn_acc
->hdr
.desc_list_len
!=
1342 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc
)))
1343 fcret
= VERR_CR_CONN_ACC_LEN
;
1344 else if (conn_acc
->hdr
.rqst
.desc_tag
!= cpu_to_be32(FCNVME_LSDESC_RQST
))
1345 fcret
= VERR_LSDESC_RQST
;
1346 else if (conn_acc
->hdr
.rqst
.desc_len
!=
1347 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst
)))
1348 fcret
= VERR_LSDESC_RQST_LEN
;
1349 else if (conn_acc
->hdr
.rqst
.w0
.ls_cmd
!= FCNVME_LS_CREATE_CONNECTION
)
1350 fcret
= VERR_CR_CONN
;
1351 else if (conn_acc
->connectid
.desc_tag
!=
1352 cpu_to_be32(FCNVME_LSDESC_CONN_ID
))
1353 fcret
= VERR_CONN_ID
;
1354 else if (conn_acc
->connectid
.desc_len
!=
1355 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id
)))
1356 fcret
= VERR_CONN_ID_LEN
;
1361 "q %d connect failed: %s\n",
1362 queue
->qnum
, validation_errors
[fcret
]);
1364 queue
->connection_id
=
1365 be64_to_cpu(conn_acc
->connectid
.connection_id
);
1366 set_bit(NVME_FC_Q_CONNECTED
, &queue
->flags
);
1374 "queue %d connect command failed (%d).\n",
1380 nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req
*lsreq
, int status
)
1382 struct nvmefc_ls_req_op
*lsop
= ls_req_to_lsop(lsreq
);
1384 __nvme_fc_finish_ls_req(lsop
);
1386 /* fc-nvme initiator doesn't care about success or failure of cmd */
1392 * This routine sends a FC-NVME LS to disconnect (aka terminate)
1393 * the FC-NVME Association. Terminating the association also
1394 * terminates the FC-NVME connections (per queue, both admin and io
1395 * queues) that are part of the association. E.g. things are torn
1396 * down, and the related FC-NVME Association ID and Connection IDs
1399 * The behavior of the fc-nvme initiator is such that it's
1400 * understanding of the association and connections will implicitly
1401 * be torn down. The action is implicit as it may be due to a loss of
1402 * connectivity with the fc-nvme target, so you may never get a
1403 * response even if you tried. As such, the action of this routine
1404 * is to asynchronously send the LS, ignore any results of the LS, and
1405 * continue on with terminating the association. If the fc-nvme target
1406 * is present and receives the LS, it too can tear down.
1409 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl
*ctrl
)
1411 struct fcnvme_ls_disconnect_rqst
*discon_rqst
;
1412 struct fcnvme_ls_disconnect_acc
*discon_acc
;
1413 struct nvmefc_ls_req_op
*lsop
;
1414 struct nvmefc_ls_req
*lsreq
;
1417 lsop
= kzalloc((sizeof(*lsop
) +
1418 ctrl
->lport
->ops
->lsrqst_priv_sz
+
1419 sizeof(*discon_rqst
) + sizeof(*discon_acc
)),
1422 /* couldn't sent it... too bad */
1425 lsreq
= &lsop
->ls_req
;
1427 lsreq
->private = (void *)&lsop
[1];
1428 discon_rqst
= (struct fcnvme_ls_disconnect_rqst
*)
1429 (lsreq
->private + ctrl
->lport
->ops
->lsrqst_priv_sz
);
1430 discon_acc
= (struct fcnvme_ls_disconnect_acc
*)&discon_rqst
[1];
1432 discon_rqst
->w0
.ls_cmd
= FCNVME_LS_DISCONNECT
;
1433 discon_rqst
->desc_list_len
= cpu_to_be32(
1434 sizeof(struct fcnvme_lsdesc_assoc_id
) +
1435 sizeof(struct fcnvme_lsdesc_disconn_cmd
));
1437 discon_rqst
->associd
.desc_tag
= cpu_to_be32(FCNVME_LSDESC_ASSOC_ID
);
1438 discon_rqst
->associd
.desc_len
=
1440 sizeof(struct fcnvme_lsdesc_assoc_id
));
1442 discon_rqst
->associd
.association_id
= cpu_to_be64(ctrl
->association_id
);
1444 discon_rqst
->discon_cmd
.desc_tag
= cpu_to_be32(
1445 FCNVME_LSDESC_DISCONN_CMD
);
1446 discon_rqst
->discon_cmd
.desc_len
=
1448 sizeof(struct fcnvme_lsdesc_disconn_cmd
));
1449 discon_rqst
->discon_cmd
.scope
= FCNVME_DISCONN_ASSOCIATION
;
1450 discon_rqst
->discon_cmd
.id
= cpu_to_be64(ctrl
->association_id
);
1452 lsreq
->rqstaddr
= discon_rqst
;
1453 lsreq
->rqstlen
= sizeof(*discon_rqst
);
1454 lsreq
->rspaddr
= discon_acc
;
1455 lsreq
->rsplen
= sizeof(*discon_acc
);
1456 lsreq
->timeout
= NVME_FC_CONNECT_TIMEOUT_SEC
;
1458 ret
= nvme_fc_send_ls_req_async(ctrl
->rport
, lsop
,
1459 nvme_fc_disconnect_assoc_done
);
1463 /* only meaningful part to terminating the association */
1464 ctrl
->association_id
= 0;
1468 /* *********************** NVME Ctrl Routines **************************** */
1470 static void nvme_fc_error_recovery(struct nvme_fc_ctrl
*ctrl
, char *errmsg
);
1473 __nvme_fc_exit_request(struct nvme_fc_ctrl
*ctrl
,
1474 struct nvme_fc_fcp_op
*op
)
1476 fc_dma_unmap_single(ctrl
->lport
->dev
, op
->fcp_req
.rspdma
,
1477 sizeof(op
->rsp_iu
), DMA_FROM_DEVICE
);
1478 fc_dma_unmap_single(ctrl
->lport
->dev
, op
->fcp_req
.cmddma
,
1479 sizeof(op
->cmd_iu
), DMA_TO_DEVICE
);
1481 atomic_set(&op
->state
, FCPOP_STATE_UNINIT
);
1485 nvme_fc_exit_request(struct blk_mq_tag_set
*set
, struct request
*rq
,
1486 unsigned int hctx_idx
)
1488 struct nvme_fc_fcp_op
*op
= blk_mq_rq_to_pdu(rq
);
1490 return __nvme_fc_exit_request(set
->driver_data
, op
);
1494 __nvme_fc_abort_op(struct nvme_fc_ctrl
*ctrl
, struct nvme_fc_fcp_op
*op
)
1496 unsigned long flags
;
1499 spin_lock_irqsave(&ctrl
->lock
, flags
);
1500 opstate
= atomic_xchg(&op
->state
, FCPOP_STATE_ABORTED
);
1501 if (opstate
!= FCPOP_STATE_ACTIVE
)
1502 atomic_set(&op
->state
, opstate
);
1503 else if (ctrl
->flags
& FCCTRL_TERMIO
)
1505 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
1507 if (opstate
!= FCPOP_STATE_ACTIVE
)
1510 ctrl
->lport
->ops
->fcp_abort(&ctrl
->lport
->localport
,
1511 &ctrl
->rport
->remoteport
,
1512 op
->queue
->lldd_handle
,
1519 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl
*ctrl
)
1521 struct nvme_fc_fcp_op
*aen_op
= ctrl
->aen_ops
;
1524 /* ensure we've initialized the ops once */
1525 if (!(aen_op
->flags
& FCOP_FLAGS_AEN
))
1528 for (i
= 0; i
< NVME_NR_AEN_COMMANDS
; i
++, aen_op
++)
1529 __nvme_fc_abort_op(ctrl
, aen_op
);
1533 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl
*ctrl
,
1534 struct nvme_fc_fcp_op
*op
, int opstate
)
1536 unsigned long flags
;
1538 if (opstate
== FCPOP_STATE_ABORTED
) {
1539 spin_lock_irqsave(&ctrl
->lock
, flags
);
1540 if (ctrl
->flags
& FCCTRL_TERMIO
) {
1542 wake_up(&ctrl
->ioabort_wait
);
1544 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
1549 nvme_fc_fcpio_done(struct nvmefc_fcp_req
*req
)
1551 struct nvme_fc_fcp_op
*op
= fcp_req_to_fcp_op(req
);
1552 struct request
*rq
= op
->rq
;
1553 struct nvmefc_fcp_req
*freq
= &op
->fcp_req
;
1554 struct nvme_fc_ctrl
*ctrl
= op
->ctrl
;
1555 struct nvme_fc_queue
*queue
= op
->queue
;
1556 struct nvme_completion
*cqe
= &op
->rsp_iu
.cqe
;
1557 struct nvme_command
*sqe
= &op
->cmd_iu
.sqe
;
1558 __le16 status
= cpu_to_le16(NVME_SC_SUCCESS
<< 1);
1559 union nvme_result result
;
1560 bool terminate_assoc
= true;
1565 * The current linux implementation of a nvme controller
1566 * allocates a single tag set for all io queues and sizes
1567 * the io queues to fully hold all possible tags. Thus, the
1568 * implementation does not reference or care about the sqhd
1569 * value as it never needs to use the sqhd/sqtail pointers
1570 * for submission pacing.
1572 * This affects the FC-NVME implementation in two ways:
1573 * 1) As the value doesn't matter, we don't need to waste
1574 * cycles extracting it from ERSPs and stamping it in the
1575 * cases where the transport fabricates CQEs on successful
1577 * 2) The FC-NVME implementation requires that delivery of
1578 * ERSP completions are to go back to the nvme layer in order
1579 * relative to the rsn, such that the sqhd value will always
1580 * be "in order" for the nvme layer. As the nvme layer in
1581 * linux doesn't care about sqhd, there's no need to return
1585 * As the core nvme layer in linux currently does not look at
1586 * every field in the cqe - in cases where the FC transport must
1587 * fabricate a CQE, the following fields will not be set as they
1588 * are not referenced:
1589 * cqe.sqid, cqe.sqhd, cqe.command_id
1591 * Failure or error of an individual i/o, in a transport
1592 * detected fashion unrelated to the nvme completion status,
1593 * potentially cause the initiator and target sides to get out
1594 * of sync on SQ head/tail (aka outstanding io count allowed).
1595 * Per FC-NVME spec, failure of an individual command requires
1596 * the connection to be terminated, which in turn requires the
1597 * association to be terminated.
1600 opstate
= atomic_xchg(&op
->state
, FCPOP_STATE_COMPLETE
);
1602 fc_dma_sync_single_for_cpu(ctrl
->lport
->dev
, op
->fcp_req
.rspdma
,
1603 sizeof(op
->rsp_iu
), DMA_FROM_DEVICE
);
1605 if (opstate
== FCPOP_STATE_ABORTED
)
1606 status
= cpu_to_le16(NVME_SC_ABORT_REQ
<< 1);
1607 else if (freq
->status
)
1608 status
= cpu_to_le16(NVME_SC_INTERNAL
<< 1);
1611 * For the linux implementation, if we have an unsuccesful
1612 * status, they blk-mq layer can typically be called with the
1613 * non-zero status and the content of the cqe isn't important.
1619 * command completed successfully relative to the wire
1620 * protocol. However, validate anything received and
1621 * extract the status and result from the cqe (create it
1625 switch (freq
->rcv_rsplen
) {
1628 case NVME_FC_SIZEOF_ZEROS_RSP
:
1630 * No response payload or 12 bytes of payload (which
1631 * should all be zeros) are considered successful and
1632 * no payload in the CQE by the transport.
1634 if (freq
->transferred_length
!=
1635 be32_to_cpu(op
->cmd_iu
.data_len
)) {
1636 status
= cpu_to_le16(NVME_SC_INTERNAL
<< 1);
1642 case sizeof(struct nvme_fc_ersp_iu
):
1644 * The ERSP IU contains a full completion with CQE.
1645 * Validate ERSP IU and look at cqe.
1647 if (unlikely(be16_to_cpu(op
->rsp_iu
.iu_len
) !=
1648 (freq
->rcv_rsplen
/ 4) ||
1649 be32_to_cpu(op
->rsp_iu
.xfrd_len
) !=
1650 freq
->transferred_length
||
1651 op
->rsp_iu
.status_code
||
1652 sqe
->common
.command_id
!= cqe
->command_id
)) {
1653 status
= cpu_to_le16(NVME_SC_INTERNAL
<< 1);
1656 result
= cqe
->result
;
1657 status
= cqe
->status
;
1661 status
= cpu_to_le16(NVME_SC_INTERNAL
<< 1);
1665 terminate_assoc
= false;
1668 if (op
->flags
& FCOP_FLAGS_AEN
) {
1669 nvme_complete_async_event(&queue
->ctrl
->ctrl
, status
, &result
);
1670 __nvme_fc_fcpop_chk_teardowns(ctrl
, op
, opstate
);
1671 atomic_set(&op
->state
, FCPOP_STATE_IDLE
);
1672 op
->flags
= FCOP_FLAGS_AEN
; /* clear other flags */
1673 nvme_fc_ctrl_put(ctrl
);
1677 __nvme_fc_fcpop_chk_teardowns(ctrl
, op
, opstate
);
1678 nvme_end_request(rq
, status
, result
);
1681 if (terminate_assoc
)
1682 nvme_fc_error_recovery(ctrl
, "transport detected io error");
1686 __nvme_fc_init_request(struct nvme_fc_ctrl
*ctrl
,
1687 struct nvme_fc_queue
*queue
, struct nvme_fc_fcp_op
*op
,
1688 struct request
*rq
, u32 rqno
)
1690 struct nvme_fcp_op_w_sgl
*op_w_sgl
=
1691 container_of(op
, typeof(*op_w_sgl
), op
);
1692 struct nvme_fc_cmd_iu
*cmdiu
= &op
->cmd_iu
;
1695 memset(op
, 0, sizeof(*op
));
1696 op
->fcp_req
.cmdaddr
= &op
->cmd_iu
;
1697 op
->fcp_req
.cmdlen
= sizeof(op
->cmd_iu
);
1698 op
->fcp_req
.rspaddr
= &op
->rsp_iu
;
1699 op
->fcp_req
.rsplen
= sizeof(op
->rsp_iu
);
1700 op
->fcp_req
.done
= nvme_fc_fcpio_done
;
1706 cmdiu
->scsi_id
= NVME_CMD_SCSI_ID
;
1707 cmdiu
->fc_id
= NVME_CMD_FC_ID
;
1708 cmdiu
->iu_len
= cpu_to_be16(sizeof(*cmdiu
) / sizeof(u32
));
1710 op
->fcp_req
.cmddma
= fc_dma_map_single(ctrl
->lport
->dev
,
1711 &op
->cmd_iu
, sizeof(op
->cmd_iu
), DMA_TO_DEVICE
);
1712 if (fc_dma_mapping_error(ctrl
->lport
->dev
, op
->fcp_req
.cmddma
)) {
1714 "FCP Op failed - cmdiu dma mapping failed.\n");
1719 op
->fcp_req
.rspdma
= fc_dma_map_single(ctrl
->lport
->dev
,
1720 &op
->rsp_iu
, sizeof(op
->rsp_iu
),
1722 if (fc_dma_mapping_error(ctrl
->lport
->dev
, op
->fcp_req
.rspdma
)) {
1724 "FCP Op failed - rspiu dma mapping failed.\n");
1728 atomic_set(&op
->state
, FCPOP_STATE_IDLE
);
1734 nvme_fc_init_request(struct blk_mq_tag_set
*set
, struct request
*rq
,
1735 unsigned int hctx_idx
, unsigned int numa_node
)
1737 struct nvme_fc_ctrl
*ctrl
= set
->driver_data
;
1738 struct nvme_fcp_op_w_sgl
*op
= blk_mq_rq_to_pdu(rq
);
1739 int queue_idx
= (set
== &ctrl
->tag_set
) ? hctx_idx
+ 1 : 0;
1740 struct nvme_fc_queue
*queue
= &ctrl
->queues
[queue_idx
];
1743 res
= __nvme_fc_init_request(ctrl
, queue
, &op
->op
, rq
, queue
->rqcnt
++);
1746 op
->op
.fcp_req
.first_sgl
= &op
->sgl
[0];
1747 op
->op
.fcp_req
.private = &op
->priv
[0];
1748 nvme_req(rq
)->ctrl
= &ctrl
->ctrl
;
1753 nvme_fc_init_aen_ops(struct nvme_fc_ctrl
*ctrl
)
1755 struct nvme_fc_fcp_op
*aen_op
;
1756 struct nvme_fc_cmd_iu
*cmdiu
;
1757 struct nvme_command
*sqe
;
1761 aen_op
= ctrl
->aen_ops
;
1762 for (i
= 0; i
< NVME_NR_AEN_COMMANDS
; i
++, aen_op
++) {
1763 private = kzalloc(ctrl
->lport
->ops
->fcprqst_priv_sz
,
1768 cmdiu
= &aen_op
->cmd_iu
;
1770 ret
= __nvme_fc_init_request(ctrl
, &ctrl
->queues
[0],
1771 aen_op
, (struct request
*)NULL
,
1772 (NVME_AQ_BLK_MQ_DEPTH
+ i
));
1778 aen_op
->flags
= FCOP_FLAGS_AEN
;
1779 aen_op
->fcp_req
.private = private;
1781 memset(sqe
, 0, sizeof(*sqe
));
1782 sqe
->common
.opcode
= nvme_admin_async_event
;
1783 /* Note: core layer may overwrite the sqe.command_id value */
1784 sqe
->common
.command_id
= NVME_AQ_BLK_MQ_DEPTH
+ i
;
1790 nvme_fc_term_aen_ops(struct nvme_fc_ctrl
*ctrl
)
1792 struct nvme_fc_fcp_op
*aen_op
;
1795 aen_op
= ctrl
->aen_ops
;
1796 for (i
= 0; i
< NVME_NR_AEN_COMMANDS
; i
++, aen_op
++) {
1797 if (!aen_op
->fcp_req
.private)
1800 __nvme_fc_exit_request(ctrl
, aen_op
);
1802 kfree(aen_op
->fcp_req
.private);
1803 aen_op
->fcp_req
.private = NULL
;
1808 __nvme_fc_init_hctx(struct blk_mq_hw_ctx
*hctx
, struct nvme_fc_ctrl
*ctrl
,
1811 struct nvme_fc_queue
*queue
= &ctrl
->queues
[qidx
];
1813 hctx
->driver_data
= queue
;
1818 nvme_fc_init_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
1819 unsigned int hctx_idx
)
1821 struct nvme_fc_ctrl
*ctrl
= data
;
1823 __nvme_fc_init_hctx(hctx
, ctrl
, hctx_idx
+ 1);
1829 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
1830 unsigned int hctx_idx
)
1832 struct nvme_fc_ctrl
*ctrl
= data
;
1834 __nvme_fc_init_hctx(hctx
, ctrl
, hctx_idx
);
1840 nvme_fc_init_queue(struct nvme_fc_ctrl
*ctrl
, int idx
)
1842 struct nvme_fc_queue
*queue
;
1844 queue
= &ctrl
->queues
[idx
];
1845 memset(queue
, 0, sizeof(*queue
));
1848 atomic_set(&queue
->csn
, 0);
1849 queue
->dev
= ctrl
->dev
;
1852 queue
->cmnd_capsule_len
= ctrl
->ctrl
.ioccsz
* 16;
1854 queue
->cmnd_capsule_len
= sizeof(struct nvme_command
);
1857 * Considered whether we should allocate buffers for all SQEs
1858 * and CQEs and dma map them - mapping their respective entries
1859 * into the request structures (kernel vm addr and dma address)
1860 * thus the driver could use the buffers/mappings directly.
1861 * It only makes sense if the LLDD would use them for its
1862 * messaging api. It's very unlikely most adapter api's would use
1863 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
1864 * structures were used instead.
1869 * This routine terminates a queue at the transport level.
1870 * The transport has already ensured that all outstanding ios on
1871 * the queue have been terminated.
1872 * The transport will send a Disconnect LS request to terminate
1873 * the queue's connection. Termination of the admin queue will also
1874 * terminate the association at the target.
1877 nvme_fc_free_queue(struct nvme_fc_queue
*queue
)
1879 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED
, &queue
->flags
))
1882 clear_bit(NVME_FC_Q_LIVE
, &queue
->flags
);
1884 * Current implementation never disconnects a single queue.
1885 * It always terminates a whole association. So there is never
1886 * a disconnect(queue) LS sent to the target.
1889 queue
->connection_id
= 0;
1890 atomic_set(&queue
->csn
, 0);
1894 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl
*ctrl
,
1895 struct nvme_fc_queue
*queue
, unsigned int qidx
)
1897 if (ctrl
->lport
->ops
->delete_queue
)
1898 ctrl
->lport
->ops
->delete_queue(&ctrl
->lport
->localport
, qidx
,
1899 queue
->lldd_handle
);
1900 queue
->lldd_handle
= NULL
;
1904 nvme_fc_free_io_queues(struct nvme_fc_ctrl
*ctrl
)
1908 for (i
= 1; i
< ctrl
->ctrl
.queue_count
; i
++)
1909 nvme_fc_free_queue(&ctrl
->queues
[i
]);
1913 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl
*ctrl
,
1914 struct nvme_fc_queue
*queue
, unsigned int qidx
, u16 qsize
)
1918 queue
->lldd_handle
= NULL
;
1919 if (ctrl
->lport
->ops
->create_queue
)
1920 ret
= ctrl
->lport
->ops
->create_queue(&ctrl
->lport
->localport
,
1921 qidx
, qsize
, &queue
->lldd_handle
);
1927 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl
*ctrl
)
1929 struct nvme_fc_queue
*queue
= &ctrl
->queues
[ctrl
->ctrl
.queue_count
- 1];
1932 for (i
= ctrl
->ctrl
.queue_count
- 1; i
>= 1; i
--, queue
--)
1933 __nvme_fc_delete_hw_queue(ctrl
, queue
, i
);
1937 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl
*ctrl
, u16 qsize
)
1939 struct nvme_fc_queue
*queue
= &ctrl
->queues
[1];
1942 for (i
= 1; i
< ctrl
->ctrl
.queue_count
; i
++, queue
++) {
1943 ret
= __nvme_fc_create_hw_queue(ctrl
, queue
, i
, qsize
);
1952 __nvme_fc_delete_hw_queue(ctrl
, &ctrl
->queues
[i
], i
);
1957 nvme_fc_connect_io_queues(struct nvme_fc_ctrl
*ctrl
, u16 qsize
)
1961 for (i
= 1; i
< ctrl
->ctrl
.queue_count
; i
++) {
1962 ret
= nvme_fc_connect_queue(ctrl
, &ctrl
->queues
[i
], qsize
,
1966 ret
= nvmf_connect_io_queue(&ctrl
->ctrl
, i
, false);
1970 set_bit(NVME_FC_Q_LIVE
, &ctrl
->queues
[i
].flags
);
1977 nvme_fc_init_io_queues(struct nvme_fc_ctrl
*ctrl
)
1981 for (i
= 1; i
< ctrl
->ctrl
.queue_count
; i
++)
1982 nvme_fc_init_queue(ctrl
, i
);
1986 nvme_fc_ctrl_free(struct kref
*ref
)
1988 struct nvme_fc_ctrl
*ctrl
=
1989 container_of(ref
, struct nvme_fc_ctrl
, ref
);
1990 unsigned long flags
;
1992 if (ctrl
->ctrl
.tagset
) {
1993 blk_cleanup_queue(ctrl
->ctrl
.connect_q
);
1994 blk_mq_free_tag_set(&ctrl
->tag_set
);
1997 /* remove from rport list */
1998 spin_lock_irqsave(&ctrl
->rport
->lock
, flags
);
1999 list_del(&ctrl
->ctrl_list
);
2000 spin_unlock_irqrestore(&ctrl
->rport
->lock
, flags
);
2002 blk_mq_unquiesce_queue(ctrl
->ctrl
.admin_q
);
2003 blk_cleanup_queue(ctrl
->ctrl
.admin_q
);
2004 blk_mq_free_tag_set(&ctrl
->admin_tag_set
);
2006 kfree(ctrl
->queues
);
2008 put_device(ctrl
->dev
);
2009 nvme_fc_rport_put(ctrl
->rport
);
2011 ida_simple_remove(&nvme_fc_ctrl_cnt
, ctrl
->cnum
);
2012 if (ctrl
->ctrl
.opts
)
2013 nvmf_free_options(ctrl
->ctrl
.opts
);
2018 nvme_fc_ctrl_put(struct nvme_fc_ctrl
*ctrl
)
2020 kref_put(&ctrl
->ref
, nvme_fc_ctrl_free
);
2024 nvme_fc_ctrl_get(struct nvme_fc_ctrl
*ctrl
)
2026 return kref_get_unless_zero(&ctrl
->ref
);
2030 * All accesses from nvme core layer done - can now free the
2031 * controller. Called after last nvme_put_ctrl() call
2034 nvme_fc_nvme_ctrl_freed(struct nvme_ctrl
*nctrl
)
2036 struct nvme_fc_ctrl
*ctrl
= to_fc_ctrl(nctrl
);
2038 WARN_ON(nctrl
!= &ctrl
->ctrl
);
2040 nvme_fc_ctrl_put(ctrl
);
2044 nvme_fc_error_recovery(struct nvme_fc_ctrl
*ctrl
, char *errmsg
)
2049 * if an error (io timeout, etc) while (re)connecting,
2050 * it's an error on creating the new association.
2051 * Start the error recovery thread if it hasn't already
2052 * been started. It is expected there could be multiple
2053 * ios hitting this path before things are cleaned up.
2055 if (ctrl
->ctrl
.state
== NVME_CTRL_CONNECTING
) {
2056 active
= atomic_xchg(&ctrl
->err_work_active
, 1);
2057 if (!active
&& !queue_work(nvme_fc_wq
, &ctrl
->err_work
)) {
2058 atomic_set(&ctrl
->err_work_active
, 0);
2064 /* Otherwise, only proceed if in LIVE state - e.g. on first error */
2065 if (ctrl
->ctrl
.state
!= NVME_CTRL_LIVE
)
2068 dev_warn(ctrl
->ctrl
.device
,
2069 "NVME-FC{%d}: transport association error detected: %s\n",
2070 ctrl
->cnum
, errmsg
);
2071 dev_warn(ctrl
->ctrl
.device
,
2072 "NVME-FC{%d}: resetting controller\n", ctrl
->cnum
);
2074 nvme_reset_ctrl(&ctrl
->ctrl
);
2077 static enum blk_eh_timer_return
2078 nvme_fc_timeout(struct request
*rq
, bool reserved
)
2080 struct nvme_fc_fcp_op
*op
= blk_mq_rq_to_pdu(rq
);
2081 struct nvme_fc_ctrl
*ctrl
= op
->ctrl
;
2084 * we can't individually ABTS an io without affecting the queue,
2085 * thus killing the queue, and thus the association.
2086 * So resolve by performing a controller reset, which will stop
2087 * the host/io stack, terminate the association on the link,
2088 * and recreate an association on the link.
2090 nvme_fc_error_recovery(ctrl
, "io timeout error");
2093 * the io abort has been initiated. Have the reset timer
2094 * restarted and the abort completion will complete the io
2095 * shortly. Avoids a synchronous wait while the abort finishes.
2097 return BLK_EH_RESET_TIMER
;
2101 nvme_fc_map_data(struct nvme_fc_ctrl
*ctrl
, struct request
*rq
,
2102 struct nvme_fc_fcp_op
*op
)
2104 struct nvmefc_fcp_req
*freq
= &op
->fcp_req
;
2105 enum dma_data_direction dir
;
2110 if (!blk_rq_nr_phys_segments(rq
))
2113 freq
->sg_table
.sgl
= freq
->first_sgl
;
2114 ret
= sg_alloc_table_chained(&freq
->sg_table
,
2115 blk_rq_nr_phys_segments(rq
), freq
->sg_table
.sgl
);
2119 op
->nents
= blk_rq_map_sg(rq
->q
, rq
, freq
->sg_table
.sgl
);
2120 WARN_ON(op
->nents
> blk_rq_nr_phys_segments(rq
));
2121 dir
= (rq_data_dir(rq
) == WRITE
) ? DMA_TO_DEVICE
: DMA_FROM_DEVICE
;
2122 freq
->sg_cnt
= fc_dma_map_sg(ctrl
->lport
->dev
, freq
->sg_table
.sgl
,
2124 if (unlikely(freq
->sg_cnt
<= 0)) {
2125 sg_free_table_chained(&freq
->sg_table
, true);
2131 * TODO: blk_integrity_rq(rq) for DIF
2137 nvme_fc_unmap_data(struct nvme_fc_ctrl
*ctrl
, struct request
*rq
,
2138 struct nvme_fc_fcp_op
*op
)
2140 struct nvmefc_fcp_req
*freq
= &op
->fcp_req
;
2145 fc_dma_unmap_sg(ctrl
->lport
->dev
, freq
->sg_table
.sgl
, op
->nents
,
2146 ((rq_data_dir(rq
) == WRITE
) ?
2147 DMA_TO_DEVICE
: DMA_FROM_DEVICE
));
2149 nvme_cleanup_cmd(rq
);
2151 sg_free_table_chained(&freq
->sg_table
, true);
2157 * In FC, the queue is a logical thing. At transport connect, the target
2158 * creates its "queue" and returns a handle that is to be given to the
2159 * target whenever it posts something to the corresponding SQ. When an
2160 * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
2161 * command contained within the SQE, an io, and assigns a FC exchange
2162 * to it. The SQE and the associated SQ handle are sent in the initial
2163 * CMD IU sents on the exchange. All transfers relative to the io occur
2164 * as part of the exchange. The CQE is the last thing for the io,
2165 * which is transferred (explicitly or implicitly) with the RSP IU
2166 * sent on the exchange. After the CQE is received, the FC exchange is
2167 * terminaed and the Exchange may be used on a different io.
2169 * The transport to LLDD api has the transport making a request for a
2170 * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
2171 * resource and transfers the command. The LLDD will then process all
2172 * steps to complete the io. Upon completion, the transport done routine
2175 * So - while the operation is outstanding to the LLDD, there is a link
2176 * level FC exchange resource that is also outstanding. This must be
2177 * considered in all cleanup operations.
2180 nvme_fc_start_fcp_op(struct nvme_fc_ctrl
*ctrl
, struct nvme_fc_queue
*queue
,
2181 struct nvme_fc_fcp_op
*op
, u32 data_len
,
2182 enum nvmefc_fcp_datadir io_dir
)
2184 struct nvme_fc_cmd_iu
*cmdiu
= &op
->cmd_iu
;
2185 struct nvme_command
*sqe
= &cmdiu
->sqe
;
2189 * before attempting to send the io, check to see if we believe
2190 * the target device is present
2192 if (ctrl
->rport
->remoteport
.port_state
!= FC_OBJSTATE_ONLINE
)
2193 return BLK_STS_RESOURCE
;
2195 if (!nvme_fc_ctrl_get(ctrl
))
2196 return BLK_STS_IOERR
;
2198 /* format the FC-NVME CMD IU and fcp_req */
2199 cmdiu
->connection_id
= cpu_to_be64(queue
->connection_id
);
2200 cmdiu
->data_len
= cpu_to_be32(data_len
);
2202 case NVMEFC_FCP_WRITE
:
2203 cmdiu
->flags
= FCNVME_CMD_FLAGS_WRITE
;
2205 case NVMEFC_FCP_READ
:
2206 cmdiu
->flags
= FCNVME_CMD_FLAGS_READ
;
2208 case NVMEFC_FCP_NODATA
:
2212 op
->fcp_req
.payload_length
= data_len
;
2213 op
->fcp_req
.io_dir
= io_dir
;
2214 op
->fcp_req
.transferred_length
= 0;
2215 op
->fcp_req
.rcv_rsplen
= 0;
2216 op
->fcp_req
.status
= NVME_SC_SUCCESS
;
2217 op
->fcp_req
.sqid
= cpu_to_le16(queue
->qnum
);
2220 * validate per fabric rules, set fields mandated by fabric spec
2221 * as well as those by FC-NVME spec.
2223 WARN_ON_ONCE(sqe
->common
.metadata
);
2224 sqe
->common
.flags
|= NVME_CMD_SGL_METABUF
;
2227 * format SQE DPTR field per FC-NVME rules:
2228 * type=0x5 Transport SGL Data Block Descriptor
2229 * subtype=0xA Transport-specific value
2231 * length=length of the data series
2233 sqe
->rw
.dptr
.sgl
.type
= (NVME_TRANSPORT_SGL_DATA_DESC
<< 4) |
2234 NVME_SGL_FMT_TRANSPORT_A
;
2235 sqe
->rw
.dptr
.sgl
.length
= cpu_to_le32(data_len
);
2236 sqe
->rw
.dptr
.sgl
.addr
= 0;
2238 if (!(op
->flags
& FCOP_FLAGS_AEN
)) {
2239 ret
= nvme_fc_map_data(ctrl
, op
->rq
, op
);
2241 nvme_cleanup_cmd(op
->rq
);
2242 nvme_fc_ctrl_put(ctrl
);
2243 if (ret
== -ENOMEM
|| ret
== -EAGAIN
)
2244 return BLK_STS_RESOURCE
;
2245 return BLK_STS_IOERR
;
2249 fc_dma_sync_single_for_device(ctrl
->lport
->dev
, op
->fcp_req
.cmddma
,
2250 sizeof(op
->cmd_iu
), DMA_TO_DEVICE
);
2252 atomic_set(&op
->state
, FCPOP_STATE_ACTIVE
);
2254 if (!(op
->flags
& FCOP_FLAGS_AEN
))
2255 blk_mq_start_request(op
->rq
);
2257 cmdiu
->csn
= cpu_to_be32(atomic_inc_return(&queue
->csn
));
2258 ret
= ctrl
->lport
->ops
->fcp_io(&ctrl
->lport
->localport
,
2259 &ctrl
->rport
->remoteport
,
2260 queue
->lldd_handle
, &op
->fcp_req
);
2264 * If the lld fails to send the command is there an issue with
2265 * the csn value? If the command that fails is the Connect,
2266 * no - as the connection won't be live. If it is a command
2267 * post-connect, it's possible a gap in csn may be created.
2268 * Does this matter? As Linux initiators don't send fused
2269 * commands, no. The gap would exist, but as there's nothing
2270 * that depends on csn order to be delivered on the target
2271 * side, it shouldn't hurt. It would be difficult for a
2272 * target to even detect the csn gap as it has no idea when the
2273 * cmd with the csn was supposed to arrive.
2275 opstate
= atomic_xchg(&op
->state
, FCPOP_STATE_COMPLETE
);
2276 __nvme_fc_fcpop_chk_teardowns(ctrl
, op
, opstate
);
2278 if (!(op
->flags
& FCOP_FLAGS_AEN
))
2279 nvme_fc_unmap_data(ctrl
, op
->rq
, op
);
2281 nvme_fc_ctrl_put(ctrl
);
2283 if (ctrl
->rport
->remoteport
.port_state
== FC_OBJSTATE_ONLINE
&&
2285 return BLK_STS_IOERR
;
2287 return BLK_STS_RESOURCE
;
2294 nvme_fc_queue_rq(struct blk_mq_hw_ctx
*hctx
,
2295 const struct blk_mq_queue_data
*bd
)
2297 struct nvme_ns
*ns
= hctx
->queue
->queuedata
;
2298 struct nvme_fc_queue
*queue
= hctx
->driver_data
;
2299 struct nvme_fc_ctrl
*ctrl
= queue
->ctrl
;
2300 struct request
*rq
= bd
->rq
;
2301 struct nvme_fc_fcp_op
*op
= blk_mq_rq_to_pdu(rq
);
2302 struct nvme_fc_cmd_iu
*cmdiu
= &op
->cmd_iu
;
2303 struct nvme_command
*sqe
= &cmdiu
->sqe
;
2304 enum nvmefc_fcp_datadir io_dir
;
2305 bool queue_ready
= test_bit(NVME_FC_Q_LIVE
, &queue
->flags
);
2309 if (ctrl
->rport
->remoteport
.port_state
!= FC_OBJSTATE_ONLINE
||
2310 !nvmf_check_ready(&queue
->ctrl
->ctrl
, rq
, queue_ready
))
2311 return nvmf_fail_nonready_command(&queue
->ctrl
->ctrl
, rq
);
2313 ret
= nvme_setup_cmd(ns
, rq
, sqe
);
2318 * nvme core doesn't quite treat the rq opaquely. Commands such
2319 * as WRITE ZEROES will return a non-zero rq payload_bytes yet
2320 * there is no actual payload to be transferred.
2321 * To get it right, key data transmission on there being 1 or
2322 * more physical segments in the sg list. If there is no
2323 * physical segments, there is no payload.
2325 if (blk_rq_nr_phys_segments(rq
)) {
2326 data_len
= blk_rq_payload_bytes(rq
);
2327 io_dir
= ((rq_data_dir(rq
) == WRITE
) ?
2328 NVMEFC_FCP_WRITE
: NVMEFC_FCP_READ
);
2331 io_dir
= NVMEFC_FCP_NODATA
;
2335 return nvme_fc_start_fcp_op(ctrl
, queue
, op
, data_len
, io_dir
);
2339 nvme_fc_submit_async_event(struct nvme_ctrl
*arg
)
2341 struct nvme_fc_ctrl
*ctrl
= to_fc_ctrl(arg
);
2342 struct nvme_fc_fcp_op
*aen_op
;
2343 unsigned long flags
;
2344 bool terminating
= false;
2347 spin_lock_irqsave(&ctrl
->lock
, flags
);
2348 if (ctrl
->flags
& FCCTRL_TERMIO
)
2350 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
2355 aen_op
= &ctrl
->aen_ops
[0];
2357 ret
= nvme_fc_start_fcp_op(ctrl
, aen_op
->queue
, aen_op
, 0,
2360 dev_err(ctrl
->ctrl
.device
,
2361 "failed async event work\n");
2365 nvme_fc_complete_rq(struct request
*rq
)
2367 struct nvme_fc_fcp_op
*op
= blk_mq_rq_to_pdu(rq
);
2368 struct nvme_fc_ctrl
*ctrl
= op
->ctrl
;
2370 atomic_set(&op
->state
, FCPOP_STATE_IDLE
);
2372 nvme_fc_unmap_data(ctrl
, rq
, op
);
2373 nvme_complete_rq(rq
);
2374 nvme_fc_ctrl_put(ctrl
);
2378 * This routine is used by the transport when it needs to find active
2379 * io on a queue that is to be terminated. The transport uses
2380 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
2381 * this routine to kill them on a 1 by 1 basis.
2383 * As FC allocates FC exchange for each io, the transport must contact
2384 * the LLDD to terminate the exchange, thus releasing the FC exchange.
2385 * After terminating the exchange the LLDD will call the transport's
2386 * normal io done path for the request, but it will have an aborted
2387 * status. The done path will return the io request back to the block
2388 * layer with an error status.
2391 nvme_fc_terminate_exchange(struct request
*req
, void *data
, bool reserved
)
2393 struct nvme_ctrl
*nctrl
= data
;
2394 struct nvme_fc_ctrl
*ctrl
= to_fc_ctrl(nctrl
);
2395 struct nvme_fc_fcp_op
*op
= blk_mq_rq_to_pdu(req
);
2397 __nvme_fc_abort_op(ctrl
, op
);
2402 static const struct blk_mq_ops nvme_fc_mq_ops
= {
2403 .queue_rq
= nvme_fc_queue_rq
,
2404 .complete
= nvme_fc_complete_rq
,
2405 .init_request
= nvme_fc_init_request
,
2406 .exit_request
= nvme_fc_exit_request
,
2407 .init_hctx
= nvme_fc_init_hctx
,
2408 .timeout
= nvme_fc_timeout
,
2412 nvme_fc_create_io_queues(struct nvme_fc_ctrl
*ctrl
)
2414 struct nvmf_ctrl_options
*opts
= ctrl
->ctrl
.opts
;
2415 unsigned int nr_io_queues
;
2418 nr_io_queues
= min(min(opts
->nr_io_queues
, num_online_cpus()),
2419 ctrl
->lport
->ops
->max_hw_queues
);
2420 ret
= nvme_set_queue_count(&ctrl
->ctrl
, &nr_io_queues
);
2422 dev_info(ctrl
->ctrl
.device
,
2423 "set_queue_count failed: %d\n", ret
);
2427 ctrl
->ctrl
.queue_count
= nr_io_queues
+ 1;
2431 nvme_fc_init_io_queues(ctrl
);
2433 memset(&ctrl
->tag_set
, 0, sizeof(ctrl
->tag_set
));
2434 ctrl
->tag_set
.ops
= &nvme_fc_mq_ops
;
2435 ctrl
->tag_set
.queue_depth
= ctrl
->ctrl
.opts
->queue_size
;
2436 ctrl
->tag_set
.reserved_tags
= 1; /* fabric connect */
2437 ctrl
->tag_set
.numa_node
= ctrl
->ctrl
.numa_node
;
2438 ctrl
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
;
2439 ctrl
->tag_set
.cmd_size
=
2440 struct_size((struct nvme_fcp_op_w_sgl
*)NULL
, priv
,
2441 ctrl
->lport
->ops
->fcprqst_priv_sz
);
2442 ctrl
->tag_set
.driver_data
= ctrl
;
2443 ctrl
->tag_set
.nr_hw_queues
= ctrl
->ctrl
.queue_count
- 1;
2444 ctrl
->tag_set
.timeout
= NVME_IO_TIMEOUT
;
2446 ret
= blk_mq_alloc_tag_set(&ctrl
->tag_set
);
2450 ctrl
->ctrl
.tagset
= &ctrl
->tag_set
;
2452 ctrl
->ctrl
.connect_q
= blk_mq_init_queue(&ctrl
->tag_set
);
2453 if (IS_ERR(ctrl
->ctrl
.connect_q
)) {
2454 ret
= PTR_ERR(ctrl
->ctrl
.connect_q
);
2455 goto out_free_tag_set
;
2458 ret
= nvme_fc_create_hw_io_queues(ctrl
, ctrl
->ctrl
.sqsize
+ 1);
2460 goto out_cleanup_blk_queue
;
2462 ret
= nvme_fc_connect_io_queues(ctrl
, ctrl
->ctrl
.sqsize
+ 1);
2464 goto out_delete_hw_queues
;
2466 ctrl
->ioq_live
= true;
2470 out_delete_hw_queues
:
2471 nvme_fc_delete_hw_io_queues(ctrl
);
2472 out_cleanup_blk_queue
:
2473 blk_cleanup_queue(ctrl
->ctrl
.connect_q
);
2475 blk_mq_free_tag_set(&ctrl
->tag_set
);
2476 nvme_fc_free_io_queues(ctrl
);
2478 /* force put free routine to ignore io queues */
2479 ctrl
->ctrl
.tagset
= NULL
;
2485 nvme_fc_recreate_io_queues(struct nvme_fc_ctrl
*ctrl
)
2487 struct nvmf_ctrl_options
*opts
= ctrl
->ctrl
.opts
;
2488 u32 prior_ioq_cnt
= ctrl
->ctrl
.queue_count
- 1;
2489 unsigned int nr_io_queues
;
2492 nr_io_queues
= min(min(opts
->nr_io_queues
, num_online_cpus()),
2493 ctrl
->lport
->ops
->max_hw_queues
);
2494 ret
= nvme_set_queue_count(&ctrl
->ctrl
, &nr_io_queues
);
2496 dev_info(ctrl
->ctrl
.device
,
2497 "set_queue_count failed: %d\n", ret
);
2501 if (!nr_io_queues
&& prior_ioq_cnt
) {
2502 dev_info(ctrl
->ctrl
.device
,
2503 "Fail Reconnect: At least 1 io queue "
2504 "required (was %d)\n", prior_ioq_cnt
);
2508 ctrl
->ctrl
.queue_count
= nr_io_queues
+ 1;
2509 /* check for io queues existing */
2510 if (ctrl
->ctrl
.queue_count
== 1)
2513 ret
= nvme_fc_create_hw_io_queues(ctrl
, ctrl
->ctrl
.sqsize
+ 1);
2515 goto out_free_io_queues
;
2517 ret
= nvme_fc_connect_io_queues(ctrl
, ctrl
->ctrl
.sqsize
+ 1);
2519 goto out_delete_hw_queues
;
2521 if (prior_ioq_cnt
!= nr_io_queues
)
2522 dev_info(ctrl
->ctrl
.device
,
2523 "reconnect: revising io queue count from %d to %d\n",
2524 prior_ioq_cnt
, nr_io_queues
);
2525 blk_mq_update_nr_hw_queues(&ctrl
->tag_set
, nr_io_queues
);
2529 out_delete_hw_queues
:
2530 nvme_fc_delete_hw_io_queues(ctrl
);
2532 nvme_fc_free_io_queues(ctrl
);
2537 nvme_fc_rport_active_on_lport(struct nvme_fc_rport
*rport
)
2539 struct nvme_fc_lport
*lport
= rport
->lport
;
2541 atomic_inc(&lport
->act_rport_cnt
);
2545 nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport
*rport
)
2547 struct nvme_fc_lport
*lport
= rport
->lport
;
2550 cnt
= atomic_dec_return(&lport
->act_rport_cnt
);
2551 if (cnt
== 0 && lport
->localport
.port_state
== FC_OBJSTATE_DELETED
)
2552 lport
->ops
->localport_delete(&lport
->localport
);
2556 nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl
*ctrl
)
2558 struct nvme_fc_rport
*rport
= ctrl
->rport
;
2561 if (ctrl
->assoc_active
)
2564 ctrl
->assoc_active
= true;
2565 cnt
= atomic_inc_return(&rport
->act_ctrl_cnt
);
2567 nvme_fc_rport_active_on_lport(rport
);
2573 nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl
*ctrl
)
2575 struct nvme_fc_rport
*rport
= ctrl
->rport
;
2576 struct nvme_fc_lport
*lport
= rport
->lport
;
2579 /* ctrl->assoc_active=false will be set independently */
2581 cnt
= atomic_dec_return(&rport
->act_ctrl_cnt
);
2583 if (rport
->remoteport
.port_state
== FC_OBJSTATE_DELETED
)
2584 lport
->ops
->remoteport_delete(&rport
->remoteport
);
2585 nvme_fc_rport_inactive_on_lport(rport
);
2592 * This routine restarts the controller on the host side, and
2593 * on the link side, recreates the controller association.
2596 nvme_fc_create_association(struct nvme_fc_ctrl
*ctrl
)
2598 struct nvmf_ctrl_options
*opts
= ctrl
->ctrl
.opts
;
2602 ++ctrl
->ctrl
.nr_reconnects
;
2604 if (ctrl
->rport
->remoteport
.port_state
!= FC_OBJSTATE_ONLINE
)
2607 if (nvme_fc_ctlr_active_on_rport(ctrl
))
2611 * Create the admin queue
2614 ret
= __nvme_fc_create_hw_queue(ctrl
, &ctrl
->queues
[0], 0,
2617 goto out_free_queue
;
2619 ret
= nvme_fc_connect_admin_queue(ctrl
, &ctrl
->queues
[0],
2620 NVME_AQ_DEPTH
, (NVME_AQ_DEPTH
/ 4));
2622 goto out_delete_hw_queue
;
2624 blk_mq_unquiesce_queue(ctrl
->ctrl
.admin_q
);
2626 ret
= nvmf_connect_admin_queue(&ctrl
->ctrl
);
2628 goto out_disconnect_admin_queue
;
2630 set_bit(NVME_FC_Q_LIVE
, &ctrl
->queues
[0].flags
);
2633 * Check controller capabilities
2635 * todo:- add code to check if ctrl attributes changed from
2636 * prior connection values
2639 ret
= nvmf_reg_read64(&ctrl
->ctrl
, NVME_REG_CAP
, &ctrl
->ctrl
.cap
);
2641 dev_err(ctrl
->ctrl
.device
,
2642 "prop_get NVME_REG_CAP failed\n");
2643 goto out_disconnect_admin_queue
;
2647 min_t(int, NVME_CAP_MQES(ctrl
->ctrl
.cap
), ctrl
->ctrl
.sqsize
);
2649 ret
= nvme_enable_ctrl(&ctrl
->ctrl
, ctrl
->ctrl
.cap
);
2651 goto out_disconnect_admin_queue
;
2653 ctrl
->ctrl
.max_hw_sectors
=
2654 (ctrl
->lport
->ops
->max_sgl_segments
- 1) << (PAGE_SHIFT
- 9);
2656 ret
= nvme_init_identify(&ctrl
->ctrl
);
2658 goto out_disconnect_admin_queue
;
2662 /* FC-NVME does not have other data in the capsule */
2663 if (ctrl
->ctrl
.icdoff
) {
2664 dev_err(ctrl
->ctrl
.device
, "icdoff %d is not supported!\n",
2666 goto out_disconnect_admin_queue
;
2669 /* FC-NVME supports normal SGL Data Block Descriptors */
2671 if (opts
->queue_size
> ctrl
->ctrl
.maxcmd
) {
2672 /* warn if maxcmd is lower than queue_size */
2673 dev_warn(ctrl
->ctrl
.device
,
2674 "queue_size %zu > ctrl maxcmd %u, reducing "
2676 opts
->queue_size
, ctrl
->ctrl
.maxcmd
);
2677 opts
->queue_size
= ctrl
->ctrl
.maxcmd
;
2680 if (opts
->queue_size
> ctrl
->ctrl
.sqsize
+ 1) {
2681 /* warn if sqsize is lower than queue_size */
2682 dev_warn(ctrl
->ctrl
.device
,
2683 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2684 opts
->queue_size
, ctrl
->ctrl
.sqsize
+ 1);
2685 opts
->queue_size
= ctrl
->ctrl
.sqsize
+ 1;
2688 ret
= nvme_fc_init_aen_ops(ctrl
);
2690 goto out_term_aen_ops
;
2693 * Create the io queues
2696 if (ctrl
->ctrl
.queue_count
> 1) {
2697 if (!ctrl
->ioq_live
)
2698 ret
= nvme_fc_create_io_queues(ctrl
);
2700 ret
= nvme_fc_recreate_io_queues(ctrl
);
2702 goto out_term_aen_ops
;
2705 changed
= nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_LIVE
);
2707 ctrl
->ctrl
.nr_reconnects
= 0;
2710 nvme_start_ctrl(&ctrl
->ctrl
);
2712 return 0; /* Success */
2715 nvme_fc_term_aen_ops(ctrl
);
2716 out_disconnect_admin_queue
:
2717 /* send a Disconnect(association) LS to fc-nvme target */
2718 nvme_fc_xmt_disconnect_assoc(ctrl
);
2719 out_delete_hw_queue
:
2720 __nvme_fc_delete_hw_queue(ctrl
, &ctrl
->queues
[0], 0);
2722 nvme_fc_free_queue(&ctrl
->queues
[0]);
2723 ctrl
->assoc_active
= false;
2724 nvme_fc_ctlr_inactive_on_rport(ctrl
);
2730 * This routine stops operation of the controller on the host side.
2731 * On the host os stack side: Admin and IO queues are stopped,
2732 * outstanding ios on them terminated via FC ABTS.
2733 * On the link side: the association is terminated.
2736 nvme_fc_delete_association(struct nvme_fc_ctrl
*ctrl
)
2738 unsigned long flags
;
2740 if (!ctrl
->assoc_active
)
2742 ctrl
->assoc_active
= false;
2744 spin_lock_irqsave(&ctrl
->lock
, flags
);
2745 ctrl
->flags
|= FCCTRL_TERMIO
;
2747 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
2750 * If io queues are present, stop them and terminate all outstanding
2751 * ios on them. As FC allocates FC exchange for each io, the
2752 * transport must contact the LLDD to terminate the exchange,
2753 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
2754 * to tell us what io's are busy and invoke a transport routine
2755 * to kill them with the LLDD. After terminating the exchange
2756 * the LLDD will call the transport's normal io done path, but it
2757 * will have an aborted status. The done path will return the
2758 * io requests back to the block layer as part of normal completions
2759 * (but with error status).
2761 if (ctrl
->ctrl
.queue_count
> 1) {
2762 nvme_stop_queues(&ctrl
->ctrl
);
2763 blk_mq_tagset_busy_iter(&ctrl
->tag_set
,
2764 nvme_fc_terminate_exchange
, &ctrl
->ctrl
);
2768 * Other transports, which don't have link-level contexts bound
2769 * to sqe's, would try to gracefully shutdown the controller by
2770 * writing the registers for shutdown and polling (call
2771 * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
2772 * just aborted and we will wait on those contexts, and given
2773 * there was no indication of how live the controlelr is on the
2774 * link, don't send more io to create more contexts for the
2775 * shutdown. Let the controller fail via keepalive failure if
2776 * its still present.
2780 * clean up the admin queue. Same thing as above.
2781 * use blk_mq_tagset_busy_itr() and the transport routine to
2782 * terminate the exchanges.
2784 blk_mq_quiesce_queue(ctrl
->ctrl
.admin_q
);
2785 blk_mq_tagset_busy_iter(&ctrl
->admin_tag_set
,
2786 nvme_fc_terminate_exchange
, &ctrl
->ctrl
);
2788 /* kill the aens as they are a separate path */
2789 nvme_fc_abort_aen_ops(ctrl
);
2791 /* wait for all io that had to be aborted */
2792 spin_lock_irq(&ctrl
->lock
);
2793 wait_event_lock_irq(ctrl
->ioabort_wait
, ctrl
->iocnt
== 0, ctrl
->lock
);
2794 ctrl
->flags
&= ~FCCTRL_TERMIO
;
2795 spin_unlock_irq(&ctrl
->lock
);
2797 nvme_fc_term_aen_ops(ctrl
);
2800 * send a Disconnect(association) LS to fc-nvme target
2801 * Note: could have been sent at top of process, but
2802 * cleaner on link traffic if after the aborts complete.
2803 * Note: if association doesn't exist, association_id will be 0
2805 if (ctrl
->association_id
)
2806 nvme_fc_xmt_disconnect_assoc(ctrl
);
2808 if (ctrl
->ctrl
.tagset
) {
2809 nvme_fc_delete_hw_io_queues(ctrl
);
2810 nvme_fc_free_io_queues(ctrl
);
2813 __nvme_fc_delete_hw_queue(ctrl
, &ctrl
->queues
[0], 0);
2814 nvme_fc_free_queue(&ctrl
->queues
[0]);
2816 /* re-enable the admin_q so anything new can fast fail */
2817 blk_mq_unquiesce_queue(ctrl
->ctrl
.admin_q
);
2819 /* resume the io queues so that things will fast fail */
2820 nvme_start_queues(&ctrl
->ctrl
);
2822 nvme_fc_ctlr_inactive_on_rport(ctrl
);
2826 nvme_fc_delete_ctrl(struct nvme_ctrl
*nctrl
)
2828 struct nvme_fc_ctrl
*ctrl
= to_fc_ctrl(nctrl
);
2830 cancel_work_sync(&ctrl
->err_work
);
2831 cancel_delayed_work_sync(&ctrl
->connect_work
);
2833 * kill the association on the link side. this will block
2834 * waiting for io to terminate
2836 nvme_fc_delete_association(ctrl
);
2840 nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl
*ctrl
, int status
)
2842 struct nvme_fc_rport
*rport
= ctrl
->rport
;
2843 struct nvme_fc_remote_port
*portptr
= &rport
->remoteport
;
2844 unsigned long recon_delay
= ctrl
->ctrl
.opts
->reconnect_delay
* HZ
;
2847 if (ctrl
->ctrl
.state
!= NVME_CTRL_CONNECTING
)
2850 if (portptr
->port_state
== FC_OBJSTATE_ONLINE
)
2851 dev_info(ctrl
->ctrl
.device
,
2852 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
2853 ctrl
->cnum
, status
);
2854 else if (time_after_eq(jiffies
, rport
->dev_loss_end
))
2857 if (recon
&& nvmf_should_reconnect(&ctrl
->ctrl
)) {
2858 if (portptr
->port_state
== FC_OBJSTATE_ONLINE
)
2859 dev_info(ctrl
->ctrl
.device
,
2860 "NVME-FC{%d}: Reconnect attempt in %ld "
2862 ctrl
->cnum
, recon_delay
/ HZ
);
2863 else if (time_after(jiffies
+ recon_delay
, rport
->dev_loss_end
))
2864 recon_delay
= rport
->dev_loss_end
- jiffies
;
2866 queue_delayed_work(nvme_wq
, &ctrl
->connect_work
, recon_delay
);
2868 if (portptr
->port_state
== FC_OBJSTATE_ONLINE
)
2869 dev_warn(ctrl
->ctrl
.device
,
2870 "NVME-FC{%d}: Max reconnect attempts (%d) "
2872 ctrl
->cnum
, ctrl
->ctrl
.nr_reconnects
);
2874 dev_warn(ctrl
->ctrl
.device
,
2875 "NVME-FC{%d}: dev_loss_tmo (%d) expired "
2876 "while waiting for remoteport connectivity.\n",
2877 ctrl
->cnum
, portptr
->dev_loss_tmo
);
2878 WARN_ON(nvme_delete_ctrl(&ctrl
->ctrl
));
2883 __nvme_fc_terminate_io(struct nvme_fc_ctrl
*ctrl
)
2885 nvme_stop_keep_alive(&ctrl
->ctrl
);
2887 /* will block will waiting for io to terminate */
2888 nvme_fc_delete_association(ctrl
);
2890 if (ctrl
->ctrl
.state
!= NVME_CTRL_CONNECTING
&&
2891 !nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_CONNECTING
))
2892 dev_err(ctrl
->ctrl
.device
,
2893 "NVME-FC{%d}: error_recovery: Couldn't change state "
2894 "to CONNECTING\n", ctrl
->cnum
);
2898 nvme_fc_reset_ctrl_work(struct work_struct
*work
)
2900 struct nvme_fc_ctrl
*ctrl
=
2901 container_of(work
, struct nvme_fc_ctrl
, ctrl
.reset_work
);
2904 __nvme_fc_terminate_io(ctrl
);
2906 nvme_stop_ctrl(&ctrl
->ctrl
);
2908 if (ctrl
->rport
->remoteport
.port_state
== FC_OBJSTATE_ONLINE
)
2909 ret
= nvme_fc_create_association(ctrl
);
2914 nvme_fc_reconnect_or_delete(ctrl
, ret
);
2916 dev_info(ctrl
->ctrl
.device
,
2917 "NVME-FC{%d}: controller reset complete\n",
2922 nvme_fc_connect_err_work(struct work_struct
*work
)
2924 struct nvme_fc_ctrl
*ctrl
=
2925 container_of(work
, struct nvme_fc_ctrl
, err_work
);
2927 __nvme_fc_terminate_io(ctrl
);
2929 atomic_set(&ctrl
->err_work_active
, 0);
2932 * Rescheduling the connection after recovering
2933 * from the io error is left to the reconnect work
2934 * item, which is what should have stalled waiting on
2935 * the io that had the error that scheduled this work.
2939 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops
= {
2941 .module
= THIS_MODULE
,
2942 .flags
= NVME_F_FABRICS
,
2943 .reg_read32
= nvmf_reg_read32
,
2944 .reg_read64
= nvmf_reg_read64
,
2945 .reg_write32
= nvmf_reg_write32
,
2946 .free_ctrl
= nvme_fc_nvme_ctrl_freed
,
2947 .submit_async_event
= nvme_fc_submit_async_event
,
2948 .delete_ctrl
= nvme_fc_delete_ctrl
,
2949 .get_address
= nvmf_get_address
,
2953 nvme_fc_connect_ctrl_work(struct work_struct
*work
)
2957 struct nvme_fc_ctrl
*ctrl
=
2958 container_of(to_delayed_work(work
),
2959 struct nvme_fc_ctrl
, connect_work
);
2961 ret
= nvme_fc_create_association(ctrl
);
2963 nvme_fc_reconnect_or_delete(ctrl
, ret
);
2965 dev_info(ctrl
->ctrl
.device
,
2966 "NVME-FC{%d}: controller connect complete\n",
2971 static const struct blk_mq_ops nvme_fc_admin_mq_ops
= {
2972 .queue_rq
= nvme_fc_queue_rq
,
2973 .complete
= nvme_fc_complete_rq
,
2974 .init_request
= nvme_fc_init_request
,
2975 .exit_request
= nvme_fc_exit_request
,
2976 .init_hctx
= nvme_fc_init_admin_hctx
,
2977 .timeout
= nvme_fc_timeout
,
2982 * Fails a controller request if it matches an existing controller
2983 * (association) with the same tuple:
2984 * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN>
2986 * The ports don't need to be compared as they are intrinsically
2987 * already matched by the port pointers supplied.
2990 nvme_fc_existing_controller(struct nvme_fc_rport
*rport
,
2991 struct nvmf_ctrl_options
*opts
)
2993 struct nvme_fc_ctrl
*ctrl
;
2994 unsigned long flags
;
2997 spin_lock_irqsave(&rport
->lock
, flags
);
2998 list_for_each_entry(ctrl
, &rport
->ctrl_list
, ctrl_list
) {
2999 found
= nvmf_ctlr_matches_baseopts(&ctrl
->ctrl
, opts
);
3003 spin_unlock_irqrestore(&rport
->lock
, flags
);
3008 static struct nvme_ctrl
*
3009 nvme_fc_init_ctrl(struct device
*dev
, struct nvmf_ctrl_options
*opts
,
3010 struct nvme_fc_lport
*lport
, struct nvme_fc_rport
*rport
)
3012 struct nvme_fc_ctrl
*ctrl
;
3013 unsigned long flags
;
3016 if (!(rport
->remoteport
.port_role
&
3017 (FC_PORT_ROLE_NVME_DISCOVERY
| FC_PORT_ROLE_NVME_TARGET
))) {
3022 if (!opts
->duplicate_connect
&&
3023 nvme_fc_existing_controller(rport
, opts
)) {
3028 ctrl
= kzalloc(sizeof(*ctrl
), GFP_KERNEL
);
3034 idx
= ida_simple_get(&nvme_fc_ctrl_cnt
, 0, 0, GFP_KERNEL
);
3040 ctrl
->ctrl
.opts
= opts
;
3041 ctrl
->ctrl
.nr_reconnects
= 0;
3043 ctrl
->ctrl
.numa_node
= dev_to_node(lport
->dev
);
3045 ctrl
->ctrl
.numa_node
= NUMA_NO_NODE
;
3046 INIT_LIST_HEAD(&ctrl
->ctrl_list
);
3047 ctrl
->lport
= lport
;
3048 ctrl
->rport
= rport
;
3049 ctrl
->dev
= lport
->dev
;
3051 ctrl
->ioq_live
= false;
3052 ctrl
->assoc_active
= false;
3053 atomic_set(&ctrl
->err_work_active
, 0);
3054 init_waitqueue_head(&ctrl
->ioabort_wait
);
3056 get_device(ctrl
->dev
);
3057 kref_init(&ctrl
->ref
);
3059 INIT_WORK(&ctrl
->ctrl
.reset_work
, nvme_fc_reset_ctrl_work
);
3060 INIT_DELAYED_WORK(&ctrl
->connect_work
, nvme_fc_connect_ctrl_work
);
3061 INIT_WORK(&ctrl
->err_work
, nvme_fc_connect_err_work
);
3062 spin_lock_init(&ctrl
->lock
);
3064 /* io queue count */
3065 ctrl
->ctrl
.queue_count
= min_t(unsigned int,
3067 lport
->ops
->max_hw_queues
);
3068 ctrl
->ctrl
.queue_count
++; /* +1 for admin queue */
3070 ctrl
->ctrl
.sqsize
= opts
->queue_size
- 1;
3071 ctrl
->ctrl
.kato
= opts
->kato
;
3072 ctrl
->ctrl
.cntlid
= 0xffff;
3075 ctrl
->queues
= kcalloc(ctrl
->ctrl
.queue_count
,
3076 sizeof(struct nvme_fc_queue
), GFP_KERNEL
);
3080 nvme_fc_init_queue(ctrl
, 0);
3082 memset(&ctrl
->admin_tag_set
, 0, sizeof(ctrl
->admin_tag_set
));
3083 ctrl
->admin_tag_set
.ops
= &nvme_fc_admin_mq_ops
;
3084 ctrl
->admin_tag_set
.queue_depth
= NVME_AQ_MQ_TAG_DEPTH
;
3085 ctrl
->admin_tag_set
.reserved_tags
= 2; /* fabric connect + Keep-Alive */
3086 ctrl
->admin_tag_set
.numa_node
= ctrl
->ctrl
.numa_node
;
3087 ctrl
->admin_tag_set
.cmd_size
=
3088 struct_size((struct nvme_fcp_op_w_sgl
*)NULL
, priv
,
3089 ctrl
->lport
->ops
->fcprqst_priv_sz
);
3090 ctrl
->admin_tag_set
.driver_data
= ctrl
;
3091 ctrl
->admin_tag_set
.nr_hw_queues
= 1;
3092 ctrl
->admin_tag_set
.timeout
= ADMIN_TIMEOUT
;
3093 ctrl
->admin_tag_set
.flags
= BLK_MQ_F_NO_SCHED
;
3095 ret
= blk_mq_alloc_tag_set(&ctrl
->admin_tag_set
);
3097 goto out_free_queues
;
3098 ctrl
->ctrl
.admin_tagset
= &ctrl
->admin_tag_set
;
3100 ctrl
->ctrl
.admin_q
= blk_mq_init_queue(&ctrl
->admin_tag_set
);
3101 if (IS_ERR(ctrl
->ctrl
.admin_q
)) {
3102 ret
= PTR_ERR(ctrl
->ctrl
.admin_q
);
3103 goto out_free_admin_tag_set
;
3107 * Would have been nice to init io queues tag set as well.
3108 * However, we require interaction from the controller
3109 * for max io queue count before we can do so.
3110 * Defer this to the connect path.
3113 ret
= nvme_init_ctrl(&ctrl
->ctrl
, dev
, &nvme_fc_ctrl_ops
, 0);
3115 goto out_cleanup_admin_q
;
3117 /* at this point, teardown path changes to ref counting on nvme ctrl */
3119 spin_lock_irqsave(&rport
->lock
, flags
);
3120 list_add_tail(&ctrl
->ctrl_list
, &rport
->ctrl_list
);
3121 spin_unlock_irqrestore(&rport
->lock
, flags
);
3123 if (!nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_RESETTING
) ||
3124 !nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_CONNECTING
)) {
3125 dev_err(ctrl
->ctrl
.device
,
3126 "NVME-FC{%d}: failed to init ctrl state\n", ctrl
->cnum
);
3130 nvme_get_ctrl(&ctrl
->ctrl
);
3132 if (!queue_delayed_work(nvme_wq
, &ctrl
->connect_work
, 0)) {
3133 nvme_put_ctrl(&ctrl
->ctrl
);
3134 dev_err(ctrl
->ctrl
.device
,
3135 "NVME-FC{%d}: failed to schedule initial connect\n",
3140 flush_delayed_work(&ctrl
->connect_work
);
3142 dev_info(ctrl
->ctrl
.device
,
3143 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
3144 ctrl
->cnum
, ctrl
->ctrl
.opts
->subsysnqn
);
3149 nvme_change_ctrl_state(&ctrl
->ctrl
, NVME_CTRL_DELETING
);
3150 cancel_work_sync(&ctrl
->ctrl
.reset_work
);
3151 cancel_work_sync(&ctrl
->err_work
);
3152 cancel_delayed_work_sync(&ctrl
->connect_work
);
3154 ctrl
->ctrl
.opts
= NULL
;
3156 /* initiate nvme ctrl ref counting teardown */
3157 nvme_uninit_ctrl(&ctrl
->ctrl
);
3159 /* Remove core ctrl ref. */
3160 nvme_put_ctrl(&ctrl
->ctrl
);
3162 /* as we're past the point where we transition to the ref
3163 * counting teardown path, if we return a bad pointer here,
3164 * the calling routine, thinking it's prior to the
3165 * transition, will do an rport put. Since the teardown
3166 * path also does a rport put, we do an extra get here to
3167 * so proper order/teardown happens.
3169 nvme_fc_rport_get(rport
);
3171 return ERR_PTR(-EIO
);
3173 out_cleanup_admin_q
:
3174 blk_cleanup_queue(ctrl
->ctrl
.admin_q
);
3175 out_free_admin_tag_set
:
3176 blk_mq_free_tag_set(&ctrl
->admin_tag_set
);
3178 kfree(ctrl
->queues
);
3180 put_device(ctrl
->dev
);
3181 ida_simple_remove(&nvme_fc_ctrl_cnt
, ctrl
->cnum
);
3185 /* exit via here doesn't follow ctlr ref points */
3186 return ERR_PTR(ret
);
3190 struct nvmet_fc_traddr
{
3196 __nvme_fc_parse_u64(substring_t
*sstr
, u64
*val
)
3200 if (match_u64(sstr
, &token64
))
3208 * This routine validates and extracts the WWN's from the TRADDR string.
3209 * As kernel parsers need the 0x to determine number base, universally
3210 * build string to parse with 0x prefix before parsing name strings.
3213 nvme_fc_parse_traddr(struct nvmet_fc_traddr
*traddr
, char *buf
, size_t blen
)
3215 char name
[2 + NVME_FC_TRADDR_HEXNAMELEN
+ 1];
3216 substring_t wwn
= { name
, &name
[sizeof(name
)-1] };
3217 int nnoffset
, pnoffset
;
3219 /* validate if string is one of the 2 allowed formats */
3220 if (strnlen(buf
, blen
) == NVME_FC_TRADDR_MAXLENGTH
&&
3221 !strncmp(buf
, "nn-0x", NVME_FC_TRADDR_OXNNLEN
) &&
3222 !strncmp(&buf
[NVME_FC_TRADDR_MAX_PN_OFFSET
],
3223 "pn-0x", NVME_FC_TRADDR_OXNNLEN
)) {
3224 nnoffset
= NVME_FC_TRADDR_OXNNLEN
;
3225 pnoffset
= NVME_FC_TRADDR_MAX_PN_OFFSET
+
3226 NVME_FC_TRADDR_OXNNLEN
;
3227 } else if ((strnlen(buf
, blen
) == NVME_FC_TRADDR_MINLENGTH
&&
3228 !strncmp(buf
, "nn-", NVME_FC_TRADDR_NNLEN
) &&
3229 !strncmp(&buf
[NVME_FC_TRADDR_MIN_PN_OFFSET
],
3230 "pn-", NVME_FC_TRADDR_NNLEN
))) {
3231 nnoffset
= NVME_FC_TRADDR_NNLEN
;
3232 pnoffset
= NVME_FC_TRADDR_MIN_PN_OFFSET
+ NVME_FC_TRADDR_NNLEN
;
3238 name
[2 + NVME_FC_TRADDR_HEXNAMELEN
] = 0;
3240 memcpy(&name
[2], &buf
[nnoffset
], NVME_FC_TRADDR_HEXNAMELEN
);
3241 if (__nvme_fc_parse_u64(&wwn
, &traddr
->nn
))
3244 memcpy(&name
[2], &buf
[pnoffset
], NVME_FC_TRADDR_HEXNAMELEN
);
3245 if (__nvme_fc_parse_u64(&wwn
, &traddr
->pn
))
3251 pr_warn("%s: bad traddr string\n", __func__
);
3255 static struct nvme_ctrl
*
3256 nvme_fc_create_ctrl(struct device
*dev
, struct nvmf_ctrl_options
*opts
)
3258 struct nvme_fc_lport
*lport
;
3259 struct nvme_fc_rport
*rport
;
3260 struct nvme_ctrl
*ctrl
;
3261 struct nvmet_fc_traddr laddr
= { 0L, 0L };
3262 struct nvmet_fc_traddr raddr
= { 0L, 0L };
3263 unsigned long flags
;
3266 ret
= nvme_fc_parse_traddr(&raddr
, opts
->traddr
, NVMF_TRADDR_SIZE
);
3267 if (ret
|| !raddr
.nn
|| !raddr
.pn
)
3268 return ERR_PTR(-EINVAL
);
3270 ret
= nvme_fc_parse_traddr(&laddr
, opts
->host_traddr
, NVMF_TRADDR_SIZE
);
3271 if (ret
|| !laddr
.nn
|| !laddr
.pn
)
3272 return ERR_PTR(-EINVAL
);
3274 /* find the host and remote ports to connect together */
3275 spin_lock_irqsave(&nvme_fc_lock
, flags
);
3276 list_for_each_entry(lport
, &nvme_fc_lport_list
, port_list
) {
3277 if (lport
->localport
.node_name
!= laddr
.nn
||
3278 lport
->localport
.port_name
!= laddr
.pn
)
3281 list_for_each_entry(rport
, &lport
->endp_list
, endp_list
) {
3282 if (rport
->remoteport
.node_name
!= raddr
.nn
||
3283 rport
->remoteport
.port_name
!= raddr
.pn
)
3286 /* if fail to get reference fall through. Will error */
3287 if (!nvme_fc_rport_get(rport
))
3290 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
3292 ctrl
= nvme_fc_init_ctrl(dev
, opts
, lport
, rport
);
3294 nvme_fc_rport_put(rport
);
3298 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
3300 pr_warn("%s: %s - %s combination not found\n",
3301 __func__
, opts
->traddr
, opts
->host_traddr
);
3302 return ERR_PTR(-ENOENT
);
3306 static struct nvmf_transport_ops nvme_fc_transport
= {
3308 .module
= THIS_MODULE
,
3309 .required_opts
= NVMF_OPT_TRADDR
| NVMF_OPT_HOST_TRADDR
,
3310 .allowed_opts
= NVMF_OPT_RECONNECT_DELAY
| NVMF_OPT_CTRL_LOSS_TMO
,
3311 .create_ctrl
= nvme_fc_create_ctrl
,
3314 /* Arbitrary successive failures max. With lots of subsystems could be high */
3315 #define DISCOVERY_MAX_FAIL 20
3317 static ssize_t
nvme_fc_nvme_discovery_store(struct device
*dev
,
3318 struct device_attribute
*attr
, const char *buf
, size_t count
)
3320 unsigned long flags
;
3321 LIST_HEAD(local_disc_list
);
3322 struct nvme_fc_lport
*lport
;
3323 struct nvme_fc_rport
*rport
;
3326 spin_lock_irqsave(&nvme_fc_lock
, flags
);
3328 list_for_each_entry(lport
, &nvme_fc_lport_list
, port_list
) {
3329 list_for_each_entry(rport
, &lport
->endp_list
, endp_list
) {
3330 if (!nvme_fc_lport_get(lport
))
3332 if (!nvme_fc_rport_get(rport
)) {
3334 * This is a temporary condition. Upon restart
3335 * this rport will be gone from the list.
3337 * Revert the lport put and retry. Anything
3338 * added to the list already will be skipped (as
3339 * they are no longer list_empty). Loops should
3340 * resume at rports that were not yet seen.
3342 nvme_fc_lport_put(lport
);
3344 if (failcnt
++ < DISCOVERY_MAX_FAIL
)
3347 pr_err("nvme_discovery: too many reference "
3349 goto process_local_list
;
3351 if (list_empty(&rport
->disc_list
))
3352 list_add_tail(&rport
->disc_list
,
3358 while (!list_empty(&local_disc_list
)) {
3359 rport
= list_first_entry(&local_disc_list
,
3360 struct nvme_fc_rport
, disc_list
);
3361 list_del_init(&rport
->disc_list
);
3362 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
3364 lport
= rport
->lport
;
3365 /* signal discovery. Won't hurt if it repeats */
3366 nvme_fc_signal_discovery_scan(lport
, rport
);
3367 nvme_fc_rport_put(rport
);
3368 nvme_fc_lport_put(lport
);
3370 spin_lock_irqsave(&nvme_fc_lock
, flags
);
3372 spin_unlock_irqrestore(&nvme_fc_lock
, flags
);
3376 static DEVICE_ATTR(nvme_discovery
, 0200, NULL
, nvme_fc_nvme_discovery_store
);
3378 static struct attribute
*nvme_fc_attrs
[] = {
3379 &dev_attr_nvme_discovery
.attr
,
3383 static struct attribute_group nvme_fc_attr_group
= {
3384 .attrs
= nvme_fc_attrs
,
3387 static const struct attribute_group
*nvme_fc_attr_groups
[] = {
3388 &nvme_fc_attr_group
,
3392 static struct class fc_class
= {
3394 .dev_groups
= nvme_fc_attr_groups
,
3395 .owner
= THIS_MODULE
,
3398 static int __init
nvme_fc_init_module(void)
3402 nvme_fc_wq
= alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM
, 0);
3408 * It is expected that in the future the kernel will combine
3409 * the FC-isms that are currently under scsi and now being
3410 * added to by NVME into a new standalone FC class. The SCSI
3411 * and NVME protocols and their devices would be under this
3414 * As we need something to post FC-specific udev events to,
3415 * specifically for nvme probe events, start by creating the
3416 * new device class. When the new standalone FC class is
3417 * put in place, this code will move to a more generic
3418 * location for the class.
3420 ret
= class_register(&fc_class
);
3422 pr_err("couldn't register class fc\n");
3423 goto out_destroy_wq
;
3427 * Create a device for the FC-centric udev events
3429 fc_udev_device
= device_create(&fc_class
, NULL
, MKDEV(0, 0), NULL
,
3431 if (IS_ERR(fc_udev_device
)) {
3432 pr_err("couldn't create fc_udev device!\n");
3433 ret
= PTR_ERR(fc_udev_device
);
3434 goto out_destroy_class
;
3437 ret
= nvmf_register_transport(&nvme_fc_transport
);
3439 goto out_destroy_device
;
3444 device_destroy(&fc_class
, MKDEV(0, 0));
3446 class_unregister(&fc_class
);
3448 destroy_workqueue(nvme_fc_wq
);
3453 static void __exit
nvme_fc_exit_module(void)
3455 /* sanity check - all lports should be removed */
3456 if (!list_empty(&nvme_fc_lport_list
))
3457 pr_warn("%s: localport list not empty\n", __func__
);
3459 nvmf_unregister_transport(&nvme_fc_transport
);
3461 ida_destroy(&nvme_fc_local_port_cnt
);
3462 ida_destroy(&nvme_fc_ctrl_cnt
);
3464 device_destroy(&fc_class
, MKDEV(0, 0));
3465 class_unregister(&fc_class
);
3466 destroy_workqueue(nvme_fc_wq
);
3469 module_init(nvme_fc_init_module
);
3470 module_exit(nvme_fc_exit_module
);
3472 MODULE_LICENSE("GPL v2");