2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/module.h>
18 #include <linux/parser.h>
19 #include <uapi/scsi/fc/fc_fs.h>
21 #include "../host/nvme.h"
22 #include "../target/nvmet.h"
23 #include <linux/nvme-fc-driver.h>
24 #include <linux/nvme-fc.h>
29 NVMF_OPT_WWNN
= 1 << 0,
30 NVMF_OPT_WWPN
= 1 << 1,
31 NVMF_OPT_ROLES
= 1 << 2,
32 NVMF_OPT_FCADDR
= 1 << 3,
33 NVMF_OPT_LPWWNN
= 1 << 4,
34 NVMF_OPT_LPWWPN
= 1 << 5,
37 struct fcloop_ctrl_options
{
47 static const match_table_t opt_tokens
= {
48 { NVMF_OPT_WWNN
, "wwnn=%s" },
49 { NVMF_OPT_WWPN
, "wwpn=%s" },
50 { NVMF_OPT_ROLES
, "roles=%d" },
51 { NVMF_OPT_FCADDR
, "fcaddr=%x" },
52 { NVMF_OPT_LPWWNN
, "lpwwnn=%s" },
53 { NVMF_OPT_LPWWPN
, "lpwwpn=%s" },
54 { NVMF_OPT_ERR
, NULL
}
58 fcloop_parse_options(struct fcloop_ctrl_options
*opts
,
61 substring_t args
[MAX_OPT_ARGS
];
62 char *options
, *o
, *p
;
66 options
= o
= kstrdup(buf
, GFP_KERNEL
);
70 while ((p
= strsep(&o
, ",\n")) != NULL
) {
74 token
= match_token(p
, opt_tokens
, args
);
78 if (match_u64(args
, &token64
)) {
80 goto out_free_options
;
85 if (match_u64(args
, &token64
)) {
87 goto out_free_options
;
92 if (match_int(args
, &token
)) {
94 goto out_free_options
;
99 if (match_hex(args
, &token
)) {
101 goto out_free_options
;
103 opts
->fcaddr
= token
;
105 case NVMF_OPT_LPWWNN
:
106 if (match_u64(args
, &token64
)) {
108 goto out_free_options
;
110 opts
->lpwwnn
= token64
;
112 case NVMF_OPT_LPWWPN
:
113 if (match_u64(args
, &token64
)) {
115 goto out_free_options
;
117 opts
->lpwwpn
= token64
;
120 pr_warn("unknown parameter or missing value '%s'\n", p
);
122 goto out_free_options
;
133 fcloop_parse_nm_options(struct device
*dev
, u64
*nname
, u64
*pname
,
136 substring_t args
[MAX_OPT_ARGS
];
137 char *options
, *o
, *p
;
144 options
= o
= kstrdup(buf
, GFP_KERNEL
);
148 while ((p
= strsep(&o
, ",\n")) != NULL
) {
152 token
= match_token(p
, opt_tokens
, args
);
155 if (match_u64(args
, &token64
)) {
157 goto out_free_options
;
162 if (match_u64(args
, &token64
)) {
164 goto out_free_options
;
169 pr_warn("unknown parameter or missing value '%s'\n", p
);
171 goto out_free_options
;
189 #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
191 #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
192 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
194 #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
196 #define ALL_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | NVMF_OPT_ROLES | \
197 NVMF_OPT_FCADDR | NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
200 static DEFINE_SPINLOCK(fcloop_lock
);
201 static LIST_HEAD(fcloop_lports
);
202 static LIST_HEAD(fcloop_nports
);
204 struct fcloop_lport
{
205 struct nvme_fc_local_port
*localport
;
206 struct list_head lport_list
;
207 struct completion unreg_done
;
210 struct fcloop_rport
{
211 struct nvme_fc_remote_port
*remoteport
;
212 struct nvmet_fc_target_port
*targetport
;
213 struct fcloop_nport
*nport
;
214 struct fcloop_lport
*lport
;
217 struct fcloop_tport
{
218 struct nvmet_fc_target_port
*targetport
;
219 struct nvme_fc_remote_port
*remoteport
;
220 struct fcloop_nport
*nport
;
221 struct fcloop_lport
*lport
;
224 struct fcloop_nport
{
225 struct fcloop_rport
*rport
;
226 struct fcloop_tport
*tport
;
227 struct fcloop_lport
*lport
;
228 struct list_head nport_list
;
230 struct completion rport_unreg_done
;
231 struct completion tport_unreg_done
;
238 struct fcloop_lsreq
{
239 struct fcloop_tport
*tport
;
240 struct nvmefc_ls_req
*lsreq
;
241 struct work_struct work
;
242 struct nvmefc_tgt_ls_req tgt_ls_req
;
246 struct fcloop_fcpreq
{
247 struct fcloop_tport
*tport
;
248 struct nvmefc_fcp_req
*fcpreq
;
250 struct work_struct work
;
251 struct nvmefc_tgt_fcp_req tgt_fcp_req
;
255 static inline struct fcloop_lsreq
*
256 tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req
*tgt_lsreq
)
258 return container_of(tgt_lsreq
, struct fcloop_lsreq
, tgt_ls_req
);
261 static inline struct fcloop_fcpreq
*
262 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
264 return container_of(tgt_fcpreq
, struct fcloop_fcpreq
, tgt_fcp_req
);
269 fcloop_create_queue(struct nvme_fc_local_port
*localport
,
270 unsigned int qidx
, u16 qsize
,
278 fcloop_delete_queue(struct nvme_fc_local_port
*localport
,
279 unsigned int idx
, void *handle
)
285 * Transmit of LS RSP done (e.g. buffers all set). call back up
286 * initiator "done" flows.
289 fcloop_tgt_lsrqst_done_work(struct work_struct
*work
)
291 struct fcloop_lsreq
*tls_req
=
292 container_of(work
, struct fcloop_lsreq
, work
);
293 struct fcloop_tport
*tport
= tls_req
->tport
;
294 struct nvmefc_ls_req
*lsreq
= tls_req
->lsreq
;
296 if (tport
->remoteport
)
297 lsreq
->done(lsreq
, tls_req
->status
);
301 fcloop_ls_req(struct nvme_fc_local_port
*localport
,
302 struct nvme_fc_remote_port
*remoteport
,
303 struct nvmefc_ls_req
*lsreq
)
305 struct fcloop_lsreq
*tls_req
= lsreq
->private;
306 struct fcloop_rport
*rport
= remoteport
->private;
309 tls_req
->lsreq
= lsreq
;
310 INIT_WORK(&tls_req
->work
, fcloop_tgt_lsrqst_done_work
);
312 if (!rport
->targetport
) {
313 tls_req
->status
= -ECONNREFUSED
;
314 schedule_work(&tls_req
->work
);
319 tls_req
->tport
= rport
->targetport
->private;
320 ret
= nvmet_fc_rcv_ls_req(rport
->targetport
, &tls_req
->tgt_ls_req
,
321 lsreq
->rqstaddr
, lsreq
->rqstlen
);
327 fcloop_xmt_ls_rsp(struct nvmet_fc_target_port
*tport
,
328 struct nvmefc_tgt_ls_req
*tgt_lsreq
)
330 struct fcloop_lsreq
*tls_req
= tgt_ls_req_to_lsreq(tgt_lsreq
);
331 struct nvmefc_ls_req
*lsreq
= tls_req
->lsreq
;
333 memcpy(lsreq
->rspaddr
, tgt_lsreq
->rspbuf
,
334 ((lsreq
->rsplen
< tgt_lsreq
->rsplen
) ?
335 lsreq
->rsplen
: tgt_lsreq
->rsplen
));
336 tgt_lsreq
->done(tgt_lsreq
);
338 schedule_work(&tls_req
->work
);
344 * FCP IO operation done. call back up initiator "done" flows.
347 fcloop_tgt_fcprqst_done_work(struct work_struct
*work
)
349 struct fcloop_fcpreq
*tfcp_req
=
350 container_of(work
, struct fcloop_fcpreq
, work
);
351 struct fcloop_tport
*tport
= tfcp_req
->tport
;
352 struct nvmefc_fcp_req
*fcpreq
= tfcp_req
->fcpreq
;
354 if (tport
->remoteport
) {
355 fcpreq
->status
= tfcp_req
->status
;
356 fcpreq
->done(fcpreq
);
362 fcloop_fcp_req(struct nvme_fc_local_port
*localport
,
363 struct nvme_fc_remote_port
*remoteport
,
364 void *hw_queue_handle
,
365 struct nvmefc_fcp_req
*fcpreq
)
367 struct fcloop_fcpreq
*tfcp_req
= fcpreq
->private;
368 struct fcloop_rport
*rport
= remoteport
->private;
371 INIT_WORK(&tfcp_req
->work
, fcloop_tgt_fcprqst_done_work
);
373 if (!rport
->targetport
) {
374 tfcp_req
->status
= NVME_SC_FC_TRANSPORT_ERROR
;
375 schedule_work(&tfcp_req
->work
);
379 tfcp_req
->fcpreq
= fcpreq
;
380 tfcp_req
->tport
= rport
->targetport
->private;
382 ret
= nvmet_fc_rcv_fcp_req(rport
->targetport
, &tfcp_req
->tgt_fcp_req
,
383 fcpreq
->cmdaddr
, fcpreq
->cmdlen
);
389 fcloop_fcp_copy_data(u8 op
, struct scatterlist
*data_sg
,
390 struct scatterlist
*io_sg
, u32 offset
, u32 length
)
393 u32 data_len
, io_len
, tlen
;
395 io_p
= sg_virt(io_sg
);
396 io_len
= io_sg
->length
;
399 tlen
= min_t(u32
, offset
, io_len
);
403 io_sg
= sg_next(io_sg
);
404 io_p
= sg_virt(io_sg
);
405 io_len
= io_sg
->length
;
410 data_p
= sg_virt(data_sg
);
411 data_len
= data_sg
->length
;
414 tlen
= min_t(u32
, io_len
, data_len
);
415 tlen
= min_t(u32
, tlen
, length
);
417 if (op
== NVMET_FCOP_WRITEDATA
)
418 memcpy(data_p
, io_p
, tlen
);
420 memcpy(io_p
, data_p
, tlen
);
425 if ((!io_len
) && (length
)) {
426 io_sg
= sg_next(io_sg
);
427 io_p
= sg_virt(io_sg
);
428 io_len
= io_sg
->length
;
433 if ((!data_len
) && (length
)) {
434 data_sg
= sg_next(data_sg
);
435 data_p
= sg_virt(data_sg
);
436 data_len
= data_sg
->length
;
443 fcloop_fcp_op(struct nvmet_fc_target_port
*tgtport
,
444 struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
446 struct fcloop_fcpreq
*tfcp_req
= tgt_fcp_req_to_fcpreq(tgt_fcpreq
);
447 struct nvmefc_fcp_req
*fcpreq
= tfcp_req
->fcpreq
;
448 u32 rsplen
= 0, xfrlen
= 0;
450 u8 op
= tgt_fcpreq
->op
;
453 case NVMET_FCOP_WRITEDATA
:
454 xfrlen
= tgt_fcpreq
->transfer_length
;
455 fcloop_fcp_copy_data(op
, tgt_fcpreq
->sg
, fcpreq
->first_sgl
,
456 tgt_fcpreq
->offset
, xfrlen
);
457 fcpreq
->transferred_length
+= xfrlen
;
460 case NVMET_FCOP_READDATA
:
461 case NVMET_FCOP_READDATA_RSP
:
462 xfrlen
= tgt_fcpreq
->transfer_length
;
463 fcloop_fcp_copy_data(op
, tgt_fcpreq
->sg
, fcpreq
->first_sgl
,
464 tgt_fcpreq
->offset
, xfrlen
);
465 fcpreq
->transferred_length
+= xfrlen
;
466 if (op
== NVMET_FCOP_READDATA
)
469 /* Fall-Thru to RSP handling */
472 rsplen
= ((fcpreq
->rsplen
< tgt_fcpreq
->rsplen
) ?
473 fcpreq
->rsplen
: tgt_fcpreq
->rsplen
);
474 memcpy(fcpreq
->rspaddr
, tgt_fcpreq
->rspaddr
, rsplen
);
475 if (rsplen
< tgt_fcpreq
->rsplen
)
477 fcpreq
->rcv_rsplen
= rsplen
;
479 tfcp_req
->status
= 0;
482 case NVMET_FCOP_ABORT
:
483 tfcp_req
->status
= NVME_SC_FC_TRANSPORT_ABORTED
;
491 tgt_fcpreq
->transferred_length
= xfrlen
;
492 tgt_fcpreq
->fcp_error
= fcp_err
;
493 tgt_fcpreq
->done(tgt_fcpreq
);
495 if ((!fcp_err
) && (op
== NVMET_FCOP_RSP
||
496 op
== NVMET_FCOP_READDATA_RSP
||
497 op
== NVMET_FCOP_ABORT
))
498 schedule_work(&tfcp_req
->work
);
504 fcloop_ls_abort(struct nvme_fc_local_port
*localport
,
505 struct nvme_fc_remote_port
*remoteport
,
506 struct nvmefc_ls_req
*lsreq
)
511 fcloop_fcp_abort(struct nvme_fc_local_port
*localport
,
512 struct nvme_fc_remote_port
*remoteport
,
513 void *hw_queue_handle
,
514 struct nvmefc_fcp_req
*fcpreq
)
519 fcloop_localport_delete(struct nvme_fc_local_port
*localport
)
521 struct fcloop_lport
*lport
= localport
->private;
523 /* release any threads waiting for the unreg to complete */
524 complete(&lport
->unreg_done
);
528 fcloop_remoteport_delete(struct nvme_fc_remote_port
*remoteport
)
530 struct fcloop_rport
*rport
= remoteport
->private;
532 /* release any threads waiting for the unreg to complete */
533 complete(&rport
->nport
->rport_unreg_done
);
537 fcloop_targetport_delete(struct nvmet_fc_target_port
*targetport
)
539 struct fcloop_tport
*tport
= targetport
->private;
541 /* release any threads waiting for the unreg to complete */
542 complete(&tport
->nport
->tport_unreg_done
);
545 #define FCLOOP_HW_QUEUES 4
546 #define FCLOOP_SGL_SEGS 256
547 #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
549 struct nvme_fc_port_template fctemplate
= {
550 .localport_delete
= fcloop_localport_delete
,
551 .remoteport_delete
= fcloop_remoteport_delete
,
552 .create_queue
= fcloop_create_queue
,
553 .delete_queue
= fcloop_delete_queue
,
554 .ls_req
= fcloop_ls_req
,
555 .fcp_io
= fcloop_fcp_req
,
556 .ls_abort
= fcloop_ls_abort
,
557 .fcp_abort
= fcloop_fcp_abort
,
558 .max_hw_queues
= FCLOOP_HW_QUEUES
,
559 .max_sgl_segments
= FCLOOP_SGL_SEGS
,
560 .max_dif_sgl_segments
= FCLOOP_SGL_SEGS
,
561 .dma_boundary
= FCLOOP_DMABOUND_4G
,
562 /* sizes of additional private data for data structures */
563 .local_priv_sz
= sizeof(struct fcloop_lport
),
564 .remote_priv_sz
= sizeof(struct fcloop_rport
),
565 .lsrqst_priv_sz
= sizeof(struct fcloop_lsreq
),
566 .fcprqst_priv_sz
= sizeof(struct fcloop_fcpreq
),
569 struct nvmet_fc_target_template tgttemplate
= {
570 .targetport_delete
= fcloop_targetport_delete
,
571 .xmt_ls_rsp
= fcloop_xmt_ls_rsp
,
572 .fcp_op
= fcloop_fcp_op
,
573 .max_hw_queues
= FCLOOP_HW_QUEUES
,
574 .max_sgl_segments
= FCLOOP_SGL_SEGS
,
575 .max_dif_sgl_segments
= FCLOOP_SGL_SEGS
,
576 .dma_boundary
= FCLOOP_DMABOUND_4G
,
577 /* optional features */
578 .target_features
= NVMET_FCTGTFEAT_READDATA_RSP
|
579 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED
,
580 /* sizes of additional private data for data structures */
581 .target_priv_sz
= sizeof(struct fcloop_tport
),
585 fcloop_create_local_port(struct device
*dev
, struct device_attribute
*attr
,
586 const char *buf
, size_t count
)
588 struct nvme_fc_port_info pinfo
;
589 struct fcloop_ctrl_options
*opts
;
590 struct nvme_fc_local_port
*localport
;
591 struct fcloop_lport
*lport
;
594 opts
= kzalloc(sizeof(*opts
), GFP_KERNEL
);
598 ret
= fcloop_parse_options(opts
, buf
);
602 /* everything there ? */
603 if ((opts
->mask
& LPORT_OPTS
) != LPORT_OPTS
) {
608 pinfo
.node_name
= opts
->wwnn
;
609 pinfo
.port_name
= opts
->wwpn
;
610 pinfo
.port_role
= opts
->roles
;
611 pinfo
.port_id
= opts
->fcaddr
;
613 ret
= nvme_fc_register_localport(&pinfo
, &fctemplate
, NULL
, &localport
);
618 lport
= localport
->private;
619 lport
->localport
= localport
;
620 INIT_LIST_HEAD(&lport
->lport_list
);
622 spin_lock_irqsave(&fcloop_lock
, flags
);
623 list_add_tail(&lport
->lport_list
, &fcloop_lports
);
624 spin_unlock_irqrestore(&fcloop_lock
, flags
);
626 /* mark all of the input buffer consumed */
632 return ret
? ret
: count
;
637 __unlink_local_port(struct fcloop_lport
*lport
)
639 list_del(&lport
->lport_list
);
643 __wait_localport_unreg(struct fcloop_lport
*lport
)
647 init_completion(&lport
->unreg_done
);
649 ret
= nvme_fc_unregister_localport(lport
->localport
);
651 wait_for_completion(&lport
->unreg_done
);
658 fcloop_delete_local_port(struct device
*dev
, struct device_attribute
*attr
,
659 const char *buf
, size_t count
)
661 struct fcloop_lport
*tlport
, *lport
= NULL
;
662 u64 nodename
, portname
;
666 ret
= fcloop_parse_nm_options(dev
, &nodename
, &portname
, buf
);
670 spin_lock_irqsave(&fcloop_lock
, flags
);
672 list_for_each_entry(tlport
, &fcloop_lports
, lport_list
) {
673 if (tlport
->localport
->node_name
== nodename
&&
674 tlport
->localport
->port_name
== portname
) {
676 __unlink_local_port(lport
);
680 spin_unlock_irqrestore(&fcloop_lock
, flags
);
685 ret
= __wait_localport_unreg(lport
);
687 return ret
? ret
: count
;
691 fcloop_nport_free(struct kref
*ref
)
693 struct fcloop_nport
*nport
=
694 container_of(ref
, struct fcloop_nport
, ref
);
697 spin_lock_irqsave(&fcloop_lock
, flags
);
698 list_del(&nport
->nport_list
);
699 spin_unlock_irqrestore(&fcloop_lock
, flags
);
705 fcloop_nport_put(struct fcloop_nport
*nport
)
707 kref_put(&nport
->ref
, fcloop_nport_free
);
711 fcloop_nport_get(struct fcloop_nport
*nport
)
713 return kref_get_unless_zero(&nport
->ref
);
716 static struct fcloop_nport
*
717 fcloop_alloc_nport(const char *buf
, size_t count
, bool remoteport
)
719 struct fcloop_nport
*newnport
, *nport
= NULL
;
720 struct fcloop_lport
*tmplport
, *lport
= NULL
;
721 struct fcloop_ctrl_options
*opts
;
723 u32 opts_mask
= (remoteport
) ? RPORT_OPTS
: TGTPORT_OPTS
;
726 opts
= kzalloc(sizeof(*opts
), GFP_KERNEL
);
730 ret
= fcloop_parse_options(opts
, buf
);
734 /* everything there ? */
735 if ((opts
->mask
& opts_mask
) != opts_mask
) {
740 newnport
= kzalloc(sizeof(*newnport
), GFP_KERNEL
);
744 INIT_LIST_HEAD(&newnport
->nport_list
);
745 newnport
->node_name
= opts
->wwnn
;
746 newnport
->port_name
= opts
->wwpn
;
747 if (opts
->mask
& NVMF_OPT_ROLES
)
748 newnport
->port_role
= opts
->roles
;
749 if (opts
->mask
& NVMF_OPT_FCADDR
)
750 newnport
->port_id
= opts
->fcaddr
;
751 kref_init(&newnport
->ref
);
753 spin_lock_irqsave(&fcloop_lock
, flags
);
755 list_for_each_entry(tmplport
, &fcloop_lports
, lport_list
) {
756 if (tmplport
->localport
->node_name
== opts
->wwnn
&&
757 tmplport
->localport
->port_name
== opts
->wwpn
)
758 goto out_invalid_opts
;
760 if (tmplport
->localport
->node_name
== opts
->lpwwnn
&&
761 tmplport
->localport
->port_name
== opts
->lpwwpn
)
767 goto out_invalid_opts
;
768 newnport
->lport
= lport
;
771 list_for_each_entry(nport
, &fcloop_nports
, nport_list
) {
772 if (nport
->node_name
== opts
->wwnn
&&
773 nport
->port_name
== opts
->wwpn
) {
774 if ((remoteport
&& nport
->rport
) ||
775 (!remoteport
&& nport
->tport
)) {
777 goto out_invalid_opts
;
780 fcloop_nport_get(nport
);
782 spin_unlock_irqrestore(&fcloop_lock
, flags
);
785 nport
->lport
= lport
;
786 if (opts
->mask
& NVMF_OPT_ROLES
)
787 nport
->port_role
= opts
->roles
;
788 if (opts
->mask
& NVMF_OPT_FCADDR
)
789 nport
->port_id
= opts
->fcaddr
;
790 goto out_free_newnport
;
794 list_add_tail(&newnport
->nport_list
, &fcloop_nports
);
796 spin_unlock_irqrestore(&fcloop_lock
, flags
);
802 spin_unlock_irqrestore(&fcloop_lock
, flags
);
811 fcloop_create_remote_port(struct device
*dev
, struct device_attribute
*attr
,
812 const char *buf
, size_t count
)
814 struct nvme_fc_remote_port
*remoteport
;
815 struct fcloop_nport
*nport
;
816 struct fcloop_rport
*rport
;
817 struct nvme_fc_port_info pinfo
;
820 nport
= fcloop_alloc_nport(buf
, count
, true);
824 pinfo
.node_name
= nport
->node_name
;
825 pinfo
.port_name
= nport
->port_name
;
826 pinfo
.port_role
= nport
->port_role
;
827 pinfo
.port_id
= nport
->port_id
;
829 ret
= nvme_fc_register_remoteport(nport
->lport
->localport
,
830 &pinfo
, &remoteport
);
831 if (ret
|| !remoteport
) {
832 fcloop_nport_put(nport
);
837 rport
= remoteport
->private;
838 rport
->remoteport
= remoteport
;
839 rport
->targetport
= (nport
->tport
) ? nport
->tport
->targetport
: NULL
;
841 nport
->tport
->remoteport
= remoteport
;
842 nport
->tport
->lport
= nport
->lport
;
844 rport
->nport
= nport
;
845 rport
->lport
= nport
->lport
;
846 nport
->rport
= rport
;
848 return ret
? ret
: count
;
852 static struct fcloop_rport
*
853 __unlink_remote_port(struct fcloop_nport
*nport
)
855 struct fcloop_rport
*rport
= nport
->rport
;
857 if (rport
&& nport
->tport
)
858 nport
->tport
->remoteport
= NULL
;
865 __wait_remoteport_unreg(struct fcloop_nport
*nport
, struct fcloop_rport
*rport
)
872 init_completion(&nport
->rport_unreg_done
);
874 ret
= nvme_fc_unregister_remoteport(rport
->remoteport
);
878 wait_for_completion(&nport
->rport_unreg_done
);
880 fcloop_nport_put(nport
);
886 fcloop_delete_remote_port(struct device
*dev
, struct device_attribute
*attr
,
887 const char *buf
, size_t count
)
889 struct fcloop_nport
*nport
= NULL
, *tmpport
;
890 static struct fcloop_rport
*rport
;
891 u64 nodename
, portname
;
895 ret
= fcloop_parse_nm_options(dev
, &nodename
, &portname
, buf
);
899 spin_lock_irqsave(&fcloop_lock
, flags
);
901 list_for_each_entry(tmpport
, &fcloop_nports
, nport_list
) {
902 if (tmpport
->node_name
== nodename
&&
903 tmpport
->port_name
== portname
&& tmpport
->rport
) {
905 rport
= __unlink_remote_port(nport
);
910 spin_unlock_irqrestore(&fcloop_lock
, flags
);
915 ret
= __wait_remoteport_unreg(nport
, rport
);
917 return ret
? ret
: count
;
921 fcloop_create_target_port(struct device
*dev
, struct device_attribute
*attr
,
922 const char *buf
, size_t count
)
924 struct nvmet_fc_target_port
*targetport
;
925 struct fcloop_nport
*nport
;
926 struct fcloop_tport
*tport
;
927 struct nvmet_fc_port_info tinfo
;
930 nport
= fcloop_alloc_nport(buf
, count
, false);
934 tinfo
.node_name
= nport
->node_name
;
935 tinfo
.port_name
= nport
->port_name
;
936 tinfo
.port_id
= nport
->port_id
;
938 ret
= nvmet_fc_register_targetport(&tinfo
, &tgttemplate
, NULL
,
941 fcloop_nport_put(nport
);
946 tport
= targetport
->private;
947 tport
->targetport
= targetport
;
948 tport
->remoteport
= (nport
->rport
) ? nport
->rport
->remoteport
: NULL
;
950 nport
->rport
->targetport
= targetport
;
951 tport
->nport
= nport
;
952 tport
->lport
= nport
->lport
;
953 nport
->tport
= tport
;
955 return ret
? ret
: count
;
959 static struct fcloop_tport
*
960 __unlink_target_port(struct fcloop_nport
*nport
)
962 struct fcloop_tport
*tport
= nport
->tport
;
964 if (tport
&& nport
->rport
)
965 nport
->rport
->targetport
= NULL
;
972 __wait_targetport_unreg(struct fcloop_nport
*nport
, struct fcloop_tport
*tport
)
979 init_completion(&nport
->tport_unreg_done
);
981 ret
= nvmet_fc_unregister_targetport(tport
->targetport
);
985 wait_for_completion(&nport
->tport_unreg_done
);
987 fcloop_nport_put(nport
);
993 fcloop_delete_target_port(struct device
*dev
, struct device_attribute
*attr
,
994 const char *buf
, size_t count
)
996 struct fcloop_nport
*nport
= NULL
, *tmpport
;
997 struct fcloop_tport
*tport
;
998 u64 nodename
, portname
;
1002 ret
= fcloop_parse_nm_options(dev
, &nodename
, &portname
, buf
);
1006 spin_lock_irqsave(&fcloop_lock
, flags
);
1008 list_for_each_entry(tmpport
, &fcloop_nports
, nport_list
) {
1009 if (tmpport
->node_name
== nodename
&&
1010 tmpport
->port_name
== portname
&& tmpport
->tport
) {
1012 tport
= __unlink_target_port(nport
);
1017 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1022 ret
= __wait_targetport_unreg(nport
, tport
);
1024 return ret
? ret
: count
;
1028 static DEVICE_ATTR(add_local_port
, 0200, NULL
, fcloop_create_local_port
);
1029 static DEVICE_ATTR(del_local_port
, 0200, NULL
, fcloop_delete_local_port
);
1030 static DEVICE_ATTR(add_remote_port
, 0200, NULL
, fcloop_create_remote_port
);
1031 static DEVICE_ATTR(del_remote_port
, 0200, NULL
, fcloop_delete_remote_port
);
1032 static DEVICE_ATTR(add_target_port
, 0200, NULL
, fcloop_create_target_port
);
1033 static DEVICE_ATTR(del_target_port
, 0200, NULL
, fcloop_delete_target_port
);
1035 static struct attribute
*fcloop_dev_attrs
[] = {
1036 &dev_attr_add_local_port
.attr
,
1037 &dev_attr_del_local_port
.attr
,
1038 &dev_attr_add_remote_port
.attr
,
1039 &dev_attr_del_remote_port
.attr
,
1040 &dev_attr_add_target_port
.attr
,
1041 &dev_attr_del_target_port
.attr
,
1045 static struct attribute_group fclopp_dev_attrs_group
= {
1046 .attrs
= fcloop_dev_attrs
,
1049 static const struct attribute_group
*fcloop_dev_attr_groups
[] = {
1050 &fclopp_dev_attrs_group
,
1054 static struct class *fcloop_class
;
1055 static struct device
*fcloop_device
;
1058 static int __init
fcloop_init(void)
1062 fcloop_class
= class_create(THIS_MODULE
, "fcloop");
1063 if (IS_ERR(fcloop_class
)) {
1064 pr_err("couldn't register class fcloop\n");
1065 ret
= PTR_ERR(fcloop_class
);
1069 fcloop_device
= device_create_with_groups(
1070 fcloop_class
, NULL
, MKDEV(0, 0), NULL
,
1071 fcloop_dev_attr_groups
, "ctl");
1072 if (IS_ERR(fcloop_device
)) {
1073 pr_err("couldn't create ctl device!\n");
1074 ret
= PTR_ERR(fcloop_device
);
1075 goto out_destroy_class
;
1078 get_device(fcloop_device
);
1083 class_destroy(fcloop_class
);
1087 static void __exit
fcloop_exit(void)
1089 struct fcloop_lport
*lport
;
1090 struct fcloop_nport
*nport
;
1091 struct fcloop_tport
*tport
;
1092 struct fcloop_rport
*rport
;
1093 unsigned long flags
;
1096 spin_lock_irqsave(&fcloop_lock
, flags
);
1099 nport
= list_first_entry_or_null(&fcloop_nports
,
1100 typeof(*nport
), nport_list
);
1104 tport
= __unlink_target_port(nport
);
1105 rport
= __unlink_remote_port(nport
);
1107 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1109 ret
= __wait_targetport_unreg(nport
, tport
);
1111 pr_warn("%s: Failed deleting target port\n", __func__
);
1113 ret
= __wait_remoteport_unreg(nport
, rport
);
1115 pr_warn("%s: Failed deleting remote port\n", __func__
);
1117 spin_lock_irqsave(&fcloop_lock
, flags
);
1121 lport
= list_first_entry_or_null(&fcloop_lports
,
1122 typeof(*lport
), lport_list
);
1126 __unlink_local_port(lport
);
1128 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1130 ret
= __wait_localport_unreg(lport
);
1132 pr_warn("%s: Failed deleting local port\n", __func__
);
1134 spin_lock_irqsave(&fcloop_lock
, flags
);
1137 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1139 put_device(fcloop_device
);
1141 device_destroy(fcloop_class
, MKDEV(0, 0));
1142 class_destroy(fcloop_class
);
1145 module_init(fcloop_init
);
1146 module_exit(fcloop_exit
);
1148 MODULE_LICENSE("GPL v2");