2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/module.h>
18 #include <linux/parser.h>
19 #include <uapi/scsi/fc/fc_fs.h>
21 #include "../host/nvme.h"
22 #include "../target/nvmet.h"
23 #include <linux/nvme-fc-driver.h>
24 #include <linux/nvme-fc.h>
29 NVMF_OPT_WWNN
= 1 << 0,
30 NVMF_OPT_WWPN
= 1 << 1,
31 NVMF_OPT_ROLES
= 1 << 2,
32 NVMF_OPT_FCADDR
= 1 << 3,
33 NVMF_OPT_LPWWNN
= 1 << 4,
34 NVMF_OPT_LPWWPN
= 1 << 5,
37 struct fcloop_ctrl_options
{
47 static const match_table_t opt_tokens
= {
48 { NVMF_OPT_WWNN
, "wwnn=%s" },
49 { NVMF_OPT_WWPN
, "wwpn=%s" },
50 { NVMF_OPT_ROLES
, "roles=%d" },
51 { NVMF_OPT_FCADDR
, "fcaddr=%x" },
52 { NVMF_OPT_LPWWNN
, "lpwwnn=%s" },
53 { NVMF_OPT_LPWWPN
, "lpwwpn=%s" },
54 { NVMF_OPT_ERR
, NULL
}
58 fcloop_parse_options(struct fcloop_ctrl_options
*opts
,
61 substring_t args
[MAX_OPT_ARGS
];
62 char *options
, *o
, *p
;
66 options
= o
= kstrdup(buf
, GFP_KERNEL
);
70 while ((p
= strsep(&o
, ",\n")) != NULL
) {
74 token
= match_token(p
, opt_tokens
, args
);
78 if (match_u64(args
, &token64
)) {
80 goto out_free_options
;
85 if (match_u64(args
, &token64
)) {
87 goto out_free_options
;
92 if (match_int(args
, &token
)) {
94 goto out_free_options
;
99 if (match_hex(args
, &token
)) {
101 goto out_free_options
;
103 opts
->fcaddr
= token
;
105 case NVMF_OPT_LPWWNN
:
106 if (match_u64(args
, &token64
)) {
108 goto out_free_options
;
110 opts
->lpwwnn
= token64
;
112 case NVMF_OPT_LPWWPN
:
113 if (match_u64(args
, &token64
)) {
115 goto out_free_options
;
117 opts
->lpwwpn
= token64
;
120 pr_warn("unknown parameter or missing value '%s'\n", p
);
122 goto out_free_options
;
133 fcloop_parse_nm_options(struct device
*dev
, u64
*nname
, u64
*pname
,
136 substring_t args
[MAX_OPT_ARGS
];
137 char *options
, *o
, *p
;
144 options
= o
= kstrdup(buf
, GFP_KERNEL
);
148 while ((p
= strsep(&o
, ",\n")) != NULL
) {
152 token
= match_token(p
, opt_tokens
, args
);
155 if (match_u64(args
, &token64
)) {
157 goto out_free_options
;
162 if (match_u64(args
, &token64
)) {
164 goto out_free_options
;
169 pr_warn("unknown parameter or missing value '%s'\n", p
);
171 goto out_free_options
;
189 #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
191 #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
192 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
194 #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
197 static DEFINE_SPINLOCK(fcloop_lock
);
198 static LIST_HEAD(fcloop_lports
);
199 static LIST_HEAD(fcloop_nports
);
201 struct fcloop_lport
{
202 struct nvme_fc_local_port
*localport
;
203 struct list_head lport_list
;
204 struct completion unreg_done
;
207 struct fcloop_rport
{
208 struct nvme_fc_remote_port
*remoteport
;
209 struct nvmet_fc_target_port
*targetport
;
210 struct fcloop_nport
*nport
;
211 struct fcloop_lport
*lport
;
214 struct fcloop_tport
{
215 struct nvmet_fc_target_port
*targetport
;
216 struct nvme_fc_remote_port
*remoteport
;
217 struct fcloop_nport
*nport
;
218 struct fcloop_lport
*lport
;
221 struct fcloop_nport
{
222 struct fcloop_rport
*rport
;
223 struct fcloop_tport
*tport
;
224 struct fcloop_lport
*lport
;
225 struct list_head nport_list
;
233 struct fcloop_lsreq
{
234 struct fcloop_tport
*tport
;
235 struct nvmefc_ls_req
*lsreq
;
236 struct work_struct work
;
237 struct nvmefc_tgt_ls_req tgt_ls_req
;
241 struct fcloop_fcpreq
{
242 struct fcloop_tport
*tport
;
243 struct nvmefc_fcp_req
*fcpreq
;
248 struct work_struct work
;
249 struct nvmefc_tgt_fcp_req tgt_fcp_req
;
252 struct fcloop_ini_fcpreq
{
253 struct nvmefc_fcp_req
*fcpreq
;
254 struct fcloop_fcpreq
*tfcp_req
;
255 struct work_struct iniwork
;
258 static inline struct fcloop_lsreq
*
259 tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req
*tgt_lsreq
)
261 return container_of(tgt_lsreq
, struct fcloop_lsreq
, tgt_ls_req
);
264 static inline struct fcloop_fcpreq
*
265 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
267 return container_of(tgt_fcpreq
, struct fcloop_fcpreq
, tgt_fcp_req
);
272 fcloop_create_queue(struct nvme_fc_local_port
*localport
,
273 unsigned int qidx
, u16 qsize
,
281 fcloop_delete_queue(struct nvme_fc_local_port
*localport
,
282 unsigned int idx
, void *handle
)
288 * Transmit of LS RSP done (e.g. buffers all set). call back up
289 * initiator "done" flows.
292 fcloop_tgt_lsrqst_done_work(struct work_struct
*work
)
294 struct fcloop_lsreq
*tls_req
=
295 container_of(work
, struct fcloop_lsreq
, work
);
296 struct fcloop_tport
*tport
= tls_req
->tport
;
297 struct nvmefc_ls_req
*lsreq
= tls_req
->lsreq
;
299 if (tport
->remoteport
)
300 lsreq
->done(lsreq
, tls_req
->status
);
304 fcloop_ls_req(struct nvme_fc_local_port
*localport
,
305 struct nvme_fc_remote_port
*remoteport
,
306 struct nvmefc_ls_req
*lsreq
)
308 struct fcloop_lsreq
*tls_req
= lsreq
->private;
309 struct fcloop_rport
*rport
= remoteport
->private;
312 tls_req
->lsreq
= lsreq
;
313 INIT_WORK(&tls_req
->work
, fcloop_tgt_lsrqst_done_work
);
315 if (!rport
->targetport
) {
316 tls_req
->status
= -ECONNREFUSED
;
317 schedule_work(&tls_req
->work
);
322 tls_req
->tport
= rport
->targetport
->private;
323 ret
= nvmet_fc_rcv_ls_req(rport
->targetport
, &tls_req
->tgt_ls_req
,
324 lsreq
->rqstaddr
, lsreq
->rqstlen
);
330 fcloop_xmt_ls_rsp(struct nvmet_fc_target_port
*tport
,
331 struct nvmefc_tgt_ls_req
*tgt_lsreq
)
333 struct fcloop_lsreq
*tls_req
= tgt_ls_req_to_lsreq(tgt_lsreq
);
334 struct nvmefc_ls_req
*lsreq
= tls_req
->lsreq
;
336 memcpy(lsreq
->rspaddr
, tgt_lsreq
->rspbuf
,
337 ((lsreq
->rsplen
< tgt_lsreq
->rsplen
) ?
338 lsreq
->rsplen
: tgt_lsreq
->rsplen
));
339 tgt_lsreq
->done(tgt_lsreq
);
341 schedule_work(&tls_req
->work
);
347 * FCP IO operation done by initiator abort.
348 * call back up initiator "done" flows.
351 fcloop_tgt_fcprqst_ini_done_work(struct work_struct
*work
)
353 struct fcloop_ini_fcpreq
*inireq
=
354 container_of(work
, struct fcloop_ini_fcpreq
, iniwork
);
356 inireq
->fcpreq
->done(inireq
->fcpreq
);
360 * FCP IO operation done by target completion.
361 * call back up initiator "done" flows.
364 fcloop_tgt_fcprqst_done_work(struct work_struct
*work
)
366 struct fcloop_fcpreq
*tfcp_req
=
367 container_of(work
, struct fcloop_fcpreq
, work
);
368 struct fcloop_tport
*tport
= tfcp_req
->tport
;
369 struct nvmefc_fcp_req
*fcpreq
;
371 spin_lock(&tfcp_req
->reqlock
);
372 fcpreq
= tfcp_req
->fcpreq
;
373 spin_unlock(&tfcp_req
->reqlock
);
375 if (tport
->remoteport
&& fcpreq
) {
376 fcpreq
->status
= tfcp_req
->status
;
377 fcpreq
->done(fcpreq
);
385 fcloop_fcp_req(struct nvme_fc_local_port
*localport
,
386 struct nvme_fc_remote_port
*remoteport
,
387 void *hw_queue_handle
,
388 struct nvmefc_fcp_req
*fcpreq
)
390 struct fcloop_rport
*rport
= remoteport
->private;
391 struct fcloop_ini_fcpreq
*inireq
= fcpreq
->private;
392 struct fcloop_fcpreq
*tfcp_req
;
395 if (!rport
->targetport
)
396 return -ECONNREFUSED
;
398 tfcp_req
= kzalloc(sizeof(*tfcp_req
), GFP_KERNEL
);
402 inireq
->fcpreq
= fcpreq
;
403 inireq
->tfcp_req
= tfcp_req
;
404 INIT_WORK(&inireq
->iniwork
, fcloop_tgt_fcprqst_ini_done_work
);
405 tfcp_req
->fcpreq
= fcpreq
;
406 tfcp_req
->tport
= rport
->targetport
->private;
407 spin_lock_init(&tfcp_req
->reqlock
);
408 INIT_WORK(&tfcp_req
->work
, fcloop_tgt_fcprqst_done_work
);
410 ret
= nvmet_fc_rcv_fcp_req(rport
->targetport
, &tfcp_req
->tgt_fcp_req
,
411 fcpreq
->cmdaddr
, fcpreq
->cmdlen
);
417 fcloop_fcp_copy_data(u8 op
, struct scatterlist
*data_sg
,
418 struct scatterlist
*io_sg
, u32 offset
, u32 length
)
421 u32 data_len
, io_len
, tlen
;
423 io_p
= sg_virt(io_sg
);
424 io_len
= io_sg
->length
;
427 tlen
= min_t(u32
, offset
, io_len
);
431 io_sg
= sg_next(io_sg
);
432 io_p
= sg_virt(io_sg
);
433 io_len
= io_sg
->length
;
438 data_p
= sg_virt(data_sg
);
439 data_len
= data_sg
->length
;
442 tlen
= min_t(u32
, io_len
, data_len
);
443 tlen
= min_t(u32
, tlen
, length
);
445 if (op
== NVMET_FCOP_WRITEDATA
)
446 memcpy(data_p
, io_p
, tlen
);
448 memcpy(io_p
, data_p
, tlen
);
453 if ((!io_len
) && (length
)) {
454 io_sg
= sg_next(io_sg
);
455 io_p
= sg_virt(io_sg
);
456 io_len
= io_sg
->length
;
461 if ((!data_len
) && (length
)) {
462 data_sg
= sg_next(data_sg
);
463 data_p
= sg_virt(data_sg
);
464 data_len
= data_sg
->length
;
471 fcloop_fcp_op(struct nvmet_fc_target_port
*tgtport
,
472 struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
474 struct fcloop_fcpreq
*tfcp_req
= tgt_fcp_req_to_fcpreq(tgt_fcpreq
);
475 struct nvmefc_fcp_req
*fcpreq
;
476 u32 rsplen
= 0, xfrlen
= 0;
477 int fcp_err
= 0, active
, aborted
;
478 u8 op
= tgt_fcpreq
->op
;
480 spin_lock(&tfcp_req
->reqlock
);
481 fcpreq
= tfcp_req
->fcpreq
;
482 active
= tfcp_req
->active
;
483 aborted
= tfcp_req
->aborted
;
484 tfcp_req
->active
= true;
485 spin_unlock(&tfcp_req
->reqlock
);
487 if (unlikely(active
))
488 /* illegal - call while i/o active */
491 if (unlikely(aborted
)) {
492 /* target transport has aborted i/o prior */
493 spin_lock(&tfcp_req
->reqlock
);
494 tfcp_req
->active
= false;
495 spin_unlock(&tfcp_req
->reqlock
);
496 tgt_fcpreq
->transferred_length
= 0;
497 tgt_fcpreq
->fcp_error
= -ECANCELED
;
498 tgt_fcpreq
->done(tgt_fcpreq
);
503 * if fcpreq is NULL, the I/O has been aborted (from
504 * initiator side). For the target side, act as if all is well
505 * but don't actually move data.
509 case NVMET_FCOP_WRITEDATA
:
510 xfrlen
= tgt_fcpreq
->transfer_length
;
512 fcloop_fcp_copy_data(op
, tgt_fcpreq
->sg
,
513 fcpreq
->first_sgl
, tgt_fcpreq
->offset
,
515 fcpreq
->transferred_length
+= xfrlen
;
519 case NVMET_FCOP_READDATA
:
520 case NVMET_FCOP_READDATA_RSP
:
521 xfrlen
= tgt_fcpreq
->transfer_length
;
523 fcloop_fcp_copy_data(op
, tgt_fcpreq
->sg
,
524 fcpreq
->first_sgl
, tgt_fcpreq
->offset
,
526 fcpreq
->transferred_length
+= xfrlen
;
528 if (op
== NVMET_FCOP_READDATA
)
531 /* Fall-Thru to RSP handling */
535 rsplen
= ((fcpreq
->rsplen
< tgt_fcpreq
->rsplen
) ?
536 fcpreq
->rsplen
: tgt_fcpreq
->rsplen
);
537 memcpy(fcpreq
->rspaddr
, tgt_fcpreq
->rspaddr
, rsplen
);
538 if (rsplen
< tgt_fcpreq
->rsplen
)
540 fcpreq
->rcv_rsplen
= rsplen
;
543 tfcp_req
->status
= 0;
551 spin_lock(&tfcp_req
->reqlock
);
552 tfcp_req
->active
= false;
553 spin_unlock(&tfcp_req
->reqlock
);
555 tgt_fcpreq
->transferred_length
= xfrlen
;
556 tgt_fcpreq
->fcp_error
= fcp_err
;
557 tgt_fcpreq
->done(tgt_fcpreq
);
563 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port
*tgtport
,
564 struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
566 struct fcloop_fcpreq
*tfcp_req
= tgt_fcp_req_to_fcpreq(tgt_fcpreq
);
569 * mark aborted only in case there were 2 threads in transport
570 * (one doing io, other doing abort) and only kills ops posted
571 * after the abort request
573 spin_lock(&tfcp_req
->reqlock
);
574 tfcp_req
->aborted
= true;
575 spin_unlock(&tfcp_req
->reqlock
);
577 tfcp_req
->status
= NVME_SC_INTERNAL
;
580 * nothing more to do. If io wasn't active, the transport should
581 * immediately call the req_release. If it was active, the op
582 * will complete, and the lldd should call req_release.
587 fcloop_fcp_req_release(struct nvmet_fc_target_port
*tgtport
,
588 struct nvmefc_tgt_fcp_req
*tgt_fcpreq
)
590 struct fcloop_fcpreq
*tfcp_req
= tgt_fcp_req_to_fcpreq(tgt_fcpreq
);
592 schedule_work(&tfcp_req
->work
);
596 fcloop_ls_abort(struct nvme_fc_local_port
*localport
,
597 struct nvme_fc_remote_port
*remoteport
,
598 struct nvmefc_ls_req
*lsreq
)
603 fcloop_fcp_abort(struct nvme_fc_local_port
*localport
,
604 struct nvme_fc_remote_port
*remoteport
,
605 void *hw_queue_handle
,
606 struct nvmefc_fcp_req
*fcpreq
)
608 struct fcloop_rport
*rport
= remoteport
->private;
609 struct fcloop_ini_fcpreq
*inireq
= fcpreq
->private;
610 struct fcloop_fcpreq
*tfcp_req
= inireq
->tfcp_req
;
613 /* abort has already been called */
616 if (rport
->targetport
)
617 nvmet_fc_rcv_fcp_abort(rport
->targetport
,
618 &tfcp_req
->tgt_fcp_req
);
620 /* break initiator/target relationship for io */
621 spin_lock(&tfcp_req
->reqlock
);
622 inireq
->tfcp_req
= NULL
;
623 tfcp_req
->fcpreq
= NULL
;
624 spin_unlock(&tfcp_req
->reqlock
);
626 /* post the aborted io completion */
627 fcpreq
->status
= -ECANCELED
;
628 schedule_work(&inireq
->iniwork
);
632 fcloop_nport_free(struct kref
*ref
)
634 struct fcloop_nport
*nport
=
635 container_of(ref
, struct fcloop_nport
, ref
);
638 spin_lock_irqsave(&fcloop_lock
, flags
);
639 list_del(&nport
->nport_list
);
640 spin_unlock_irqrestore(&fcloop_lock
, flags
);
646 fcloop_nport_put(struct fcloop_nport
*nport
)
648 kref_put(&nport
->ref
, fcloop_nport_free
);
652 fcloop_nport_get(struct fcloop_nport
*nport
)
654 return kref_get_unless_zero(&nport
->ref
);
658 fcloop_localport_delete(struct nvme_fc_local_port
*localport
)
660 struct fcloop_lport
*lport
= localport
->private;
662 /* release any threads waiting for the unreg to complete */
663 complete(&lport
->unreg_done
);
667 fcloop_remoteport_delete(struct nvme_fc_remote_port
*remoteport
)
669 struct fcloop_rport
*rport
= remoteport
->private;
671 fcloop_nport_put(rport
->nport
);
675 fcloop_targetport_delete(struct nvmet_fc_target_port
*targetport
)
677 struct fcloop_tport
*tport
= targetport
->private;
679 fcloop_nport_put(tport
->nport
);
682 #define FCLOOP_HW_QUEUES 4
683 #define FCLOOP_SGL_SEGS 256
684 #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
686 static struct nvme_fc_port_template fctemplate
= {
687 .localport_delete
= fcloop_localport_delete
,
688 .remoteport_delete
= fcloop_remoteport_delete
,
689 .create_queue
= fcloop_create_queue
,
690 .delete_queue
= fcloop_delete_queue
,
691 .ls_req
= fcloop_ls_req
,
692 .fcp_io
= fcloop_fcp_req
,
693 .ls_abort
= fcloop_ls_abort
,
694 .fcp_abort
= fcloop_fcp_abort
,
695 .max_hw_queues
= FCLOOP_HW_QUEUES
,
696 .max_sgl_segments
= FCLOOP_SGL_SEGS
,
697 .max_dif_sgl_segments
= FCLOOP_SGL_SEGS
,
698 .dma_boundary
= FCLOOP_DMABOUND_4G
,
699 /* sizes of additional private data for data structures */
700 .local_priv_sz
= sizeof(struct fcloop_lport
),
701 .remote_priv_sz
= sizeof(struct fcloop_rport
),
702 .lsrqst_priv_sz
= sizeof(struct fcloop_lsreq
),
703 .fcprqst_priv_sz
= sizeof(struct fcloop_ini_fcpreq
),
706 static struct nvmet_fc_target_template tgttemplate
= {
707 .targetport_delete
= fcloop_targetport_delete
,
708 .xmt_ls_rsp
= fcloop_xmt_ls_rsp
,
709 .fcp_op
= fcloop_fcp_op
,
710 .fcp_abort
= fcloop_tgt_fcp_abort
,
711 .fcp_req_release
= fcloop_fcp_req_release
,
712 .max_hw_queues
= FCLOOP_HW_QUEUES
,
713 .max_sgl_segments
= FCLOOP_SGL_SEGS
,
714 .max_dif_sgl_segments
= FCLOOP_SGL_SEGS
,
715 .dma_boundary
= FCLOOP_DMABOUND_4G
,
716 /* optional features */
717 .target_features
= NVMET_FCTGTFEAT_CMD_IN_ISR
|
718 NVMET_FCTGTFEAT_OPDONE_IN_ISR
,
719 /* sizes of additional private data for data structures */
720 .target_priv_sz
= sizeof(struct fcloop_tport
),
724 fcloop_create_local_port(struct device
*dev
, struct device_attribute
*attr
,
725 const char *buf
, size_t count
)
727 struct nvme_fc_port_info pinfo
;
728 struct fcloop_ctrl_options
*opts
;
729 struct nvme_fc_local_port
*localport
;
730 struct fcloop_lport
*lport
;
733 opts
= kzalloc(sizeof(*opts
), GFP_KERNEL
);
737 ret
= fcloop_parse_options(opts
, buf
);
741 /* everything there ? */
742 if ((opts
->mask
& LPORT_OPTS
) != LPORT_OPTS
) {
747 memset(&pinfo
, 0, sizeof(pinfo
));
748 pinfo
.node_name
= opts
->wwnn
;
749 pinfo
.port_name
= opts
->wwpn
;
750 pinfo
.port_role
= opts
->roles
;
751 pinfo
.port_id
= opts
->fcaddr
;
753 ret
= nvme_fc_register_localport(&pinfo
, &fctemplate
, NULL
, &localport
);
758 lport
= localport
->private;
759 lport
->localport
= localport
;
760 INIT_LIST_HEAD(&lport
->lport_list
);
762 spin_lock_irqsave(&fcloop_lock
, flags
);
763 list_add_tail(&lport
->lport_list
, &fcloop_lports
);
764 spin_unlock_irqrestore(&fcloop_lock
, flags
);
766 /* mark all of the input buffer consumed */
772 return ret
? ret
: count
;
777 __unlink_local_port(struct fcloop_lport
*lport
)
779 list_del(&lport
->lport_list
);
783 __wait_localport_unreg(struct fcloop_lport
*lport
)
787 init_completion(&lport
->unreg_done
);
789 ret
= nvme_fc_unregister_localport(lport
->localport
);
791 wait_for_completion(&lport
->unreg_done
);
798 fcloop_delete_local_port(struct device
*dev
, struct device_attribute
*attr
,
799 const char *buf
, size_t count
)
801 struct fcloop_lport
*tlport
, *lport
= NULL
;
802 u64 nodename
, portname
;
806 ret
= fcloop_parse_nm_options(dev
, &nodename
, &portname
, buf
);
810 spin_lock_irqsave(&fcloop_lock
, flags
);
812 list_for_each_entry(tlport
, &fcloop_lports
, lport_list
) {
813 if (tlport
->localport
->node_name
== nodename
&&
814 tlport
->localport
->port_name
== portname
) {
816 __unlink_local_port(lport
);
820 spin_unlock_irqrestore(&fcloop_lock
, flags
);
825 ret
= __wait_localport_unreg(lport
);
827 return ret
? ret
: count
;
830 static struct fcloop_nport
*
831 fcloop_alloc_nport(const char *buf
, size_t count
, bool remoteport
)
833 struct fcloop_nport
*newnport
, *nport
= NULL
;
834 struct fcloop_lport
*tmplport
, *lport
= NULL
;
835 struct fcloop_ctrl_options
*opts
;
837 u32 opts_mask
= (remoteport
) ? RPORT_OPTS
: TGTPORT_OPTS
;
840 opts
= kzalloc(sizeof(*opts
), GFP_KERNEL
);
844 ret
= fcloop_parse_options(opts
, buf
);
848 /* everything there ? */
849 if ((opts
->mask
& opts_mask
) != opts_mask
) {
854 newnport
= kzalloc(sizeof(*newnport
), GFP_KERNEL
);
858 INIT_LIST_HEAD(&newnport
->nport_list
);
859 newnport
->node_name
= opts
->wwnn
;
860 newnport
->port_name
= opts
->wwpn
;
861 if (opts
->mask
& NVMF_OPT_ROLES
)
862 newnport
->port_role
= opts
->roles
;
863 if (opts
->mask
& NVMF_OPT_FCADDR
)
864 newnport
->port_id
= opts
->fcaddr
;
865 kref_init(&newnport
->ref
);
867 spin_lock_irqsave(&fcloop_lock
, flags
);
869 list_for_each_entry(tmplport
, &fcloop_lports
, lport_list
) {
870 if (tmplport
->localport
->node_name
== opts
->wwnn
&&
871 tmplport
->localport
->port_name
== opts
->wwpn
)
872 goto out_invalid_opts
;
874 if (tmplport
->localport
->node_name
== opts
->lpwwnn
&&
875 tmplport
->localport
->port_name
== opts
->lpwwpn
)
881 goto out_invalid_opts
;
882 newnport
->lport
= lport
;
885 list_for_each_entry(nport
, &fcloop_nports
, nport_list
) {
886 if (nport
->node_name
== opts
->wwnn
&&
887 nport
->port_name
== opts
->wwpn
) {
888 if ((remoteport
&& nport
->rport
) ||
889 (!remoteport
&& nport
->tport
)) {
891 goto out_invalid_opts
;
894 fcloop_nport_get(nport
);
896 spin_unlock_irqrestore(&fcloop_lock
, flags
);
899 nport
->lport
= lport
;
900 if (opts
->mask
& NVMF_OPT_ROLES
)
901 nport
->port_role
= opts
->roles
;
902 if (opts
->mask
& NVMF_OPT_FCADDR
)
903 nport
->port_id
= opts
->fcaddr
;
904 goto out_free_newnport
;
908 list_add_tail(&newnport
->nport_list
, &fcloop_nports
);
910 spin_unlock_irqrestore(&fcloop_lock
, flags
);
916 spin_unlock_irqrestore(&fcloop_lock
, flags
);
925 fcloop_create_remote_port(struct device
*dev
, struct device_attribute
*attr
,
926 const char *buf
, size_t count
)
928 struct nvme_fc_remote_port
*remoteport
;
929 struct fcloop_nport
*nport
;
930 struct fcloop_rport
*rport
;
931 struct nvme_fc_port_info pinfo
;
934 nport
= fcloop_alloc_nport(buf
, count
, true);
938 memset(&pinfo
, 0, sizeof(pinfo
));
939 pinfo
.node_name
= nport
->node_name
;
940 pinfo
.port_name
= nport
->port_name
;
941 pinfo
.port_role
= nport
->port_role
;
942 pinfo
.port_id
= nport
->port_id
;
944 ret
= nvme_fc_register_remoteport(nport
->lport
->localport
,
945 &pinfo
, &remoteport
);
946 if (ret
|| !remoteport
) {
947 fcloop_nport_put(nport
);
952 rport
= remoteport
->private;
953 rport
->remoteport
= remoteport
;
954 rport
->targetport
= (nport
->tport
) ? nport
->tport
->targetport
: NULL
;
956 nport
->tport
->remoteport
= remoteport
;
957 nport
->tport
->lport
= nport
->lport
;
959 rport
->nport
= nport
;
960 rport
->lport
= nport
->lport
;
961 nport
->rport
= rport
;
967 static struct fcloop_rport
*
968 __unlink_remote_port(struct fcloop_nport
*nport
)
970 struct fcloop_rport
*rport
= nport
->rport
;
972 if (rport
&& nport
->tport
)
973 nport
->tport
->remoteport
= NULL
;
980 __remoteport_unreg(struct fcloop_nport
*nport
, struct fcloop_rport
*rport
)
985 return nvme_fc_unregister_remoteport(rport
->remoteport
);
989 fcloop_delete_remote_port(struct device
*dev
, struct device_attribute
*attr
,
990 const char *buf
, size_t count
)
992 struct fcloop_nport
*nport
= NULL
, *tmpport
;
993 static struct fcloop_rport
*rport
;
994 u64 nodename
, portname
;
998 ret
= fcloop_parse_nm_options(dev
, &nodename
, &portname
, buf
);
1002 spin_lock_irqsave(&fcloop_lock
, flags
);
1004 list_for_each_entry(tmpport
, &fcloop_nports
, nport_list
) {
1005 if (tmpport
->node_name
== nodename
&&
1006 tmpport
->port_name
== portname
&& tmpport
->rport
) {
1008 rport
= __unlink_remote_port(nport
);
1013 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1018 ret
= __remoteport_unreg(nport
, rport
);
1020 return ret
? ret
: count
;
1024 fcloop_create_target_port(struct device
*dev
, struct device_attribute
*attr
,
1025 const char *buf
, size_t count
)
1027 struct nvmet_fc_target_port
*targetport
;
1028 struct fcloop_nport
*nport
;
1029 struct fcloop_tport
*tport
;
1030 struct nvmet_fc_port_info tinfo
;
1033 nport
= fcloop_alloc_nport(buf
, count
, false);
1037 tinfo
.node_name
= nport
->node_name
;
1038 tinfo
.port_name
= nport
->port_name
;
1039 tinfo
.port_id
= nport
->port_id
;
1041 ret
= nvmet_fc_register_targetport(&tinfo
, &tgttemplate
, NULL
,
1044 fcloop_nport_put(nport
);
1049 tport
= targetport
->private;
1050 tport
->targetport
= targetport
;
1051 tport
->remoteport
= (nport
->rport
) ? nport
->rport
->remoteport
: NULL
;
1053 nport
->rport
->targetport
= targetport
;
1054 tport
->nport
= nport
;
1055 tport
->lport
= nport
->lport
;
1056 nport
->tport
= tport
;
1062 static struct fcloop_tport
*
1063 __unlink_target_port(struct fcloop_nport
*nport
)
1065 struct fcloop_tport
*tport
= nport
->tport
;
1067 if (tport
&& nport
->rport
)
1068 nport
->rport
->targetport
= NULL
;
1069 nport
->tport
= NULL
;
1075 __targetport_unreg(struct fcloop_nport
*nport
, struct fcloop_tport
*tport
)
1080 return nvmet_fc_unregister_targetport(tport
->targetport
);
1084 fcloop_delete_target_port(struct device
*dev
, struct device_attribute
*attr
,
1085 const char *buf
, size_t count
)
1087 struct fcloop_nport
*nport
= NULL
, *tmpport
;
1088 struct fcloop_tport
*tport
= NULL
;
1089 u64 nodename
, portname
;
1090 unsigned long flags
;
1093 ret
= fcloop_parse_nm_options(dev
, &nodename
, &portname
, buf
);
1097 spin_lock_irqsave(&fcloop_lock
, flags
);
1099 list_for_each_entry(tmpport
, &fcloop_nports
, nport_list
) {
1100 if (tmpport
->node_name
== nodename
&&
1101 tmpport
->port_name
== portname
&& tmpport
->tport
) {
1103 tport
= __unlink_target_port(nport
);
1108 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1113 ret
= __targetport_unreg(nport
, tport
);
1115 return ret
? ret
: count
;
1119 static DEVICE_ATTR(add_local_port
, 0200, NULL
, fcloop_create_local_port
);
1120 static DEVICE_ATTR(del_local_port
, 0200, NULL
, fcloop_delete_local_port
);
1121 static DEVICE_ATTR(add_remote_port
, 0200, NULL
, fcloop_create_remote_port
);
1122 static DEVICE_ATTR(del_remote_port
, 0200, NULL
, fcloop_delete_remote_port
);
1123 static DEVICE_ATTR(add_target_port
, 0200, NULL
, fcloop_create_target_port
);
1124 static DEVICE_ATTR(del_target_port
, 0200, NULL
, fcloop_delete_target_port
);
1126 static struct attribute
*fcloop_dev_attrs
[] = {
1127 &dev_attr_add_local_port
.attr
,
1128 &dev_attr_del_local_port
.attr
,
1129 &dev_attr_add_remote_port
.attr
,
1130 &dev_attr_del_remote_port
.attr
,
1131 &dev_attr_add_target_port
.attr
,
1132 &dev_attr_del_target_port
.attr
,
1136 static struct attribute_group fclopp_dev_attrs_group
= {
1137 .attrs
= fcloop_dev_attrs
,
1140 static const struct attribute_group
*fcloop_dev_attr_groups
[] = {
1141 &fclopp_dev_attrs_group
,
1145 static struct class *fcloop_class
;
1146 static struct device
*fcloop_device
;
1149 static int __init
fcloop_init(void)
1153 fcloop_class
= class_create(THIS_MODULE
, "fcloop");
1154 if (IS_ERR(fcloop_class
)) {
1155 pr_err("couldn't register class fcloop\n");
1156 ret
= PTR_ERR(fcloop_class
);
1160 fcloop_device
= device_create_with_groups(
1161 fcloop_class
, NULL
, MKDEV(0, 0), NULL
,
1162 fcloop_dev_attr_groups
, "ctl");
1163 if (IS_ERR(fcloop_device
)) {
1164 pr_err("couldn't create ctl device!\n");
1165 ret
= PTR_ERR(fcloop_device
);
1166 goto out_destroy_class
;
1169 get_device(fcloop_device
);
1174 class_destroy(fcloop_class
);
1178 static void __exit
fcloop_exit(void)
1180 struct fcloop_lport
*lport
;
1181 struct fcloop_nport
*nport
;
1182 struct fcloop_tport
*tport
;
1183 struct fcloop_rport
*rport
;
1184 unsigned long flags
;
1187 spin_lock_irqsave(&fcloop_lock
, flags
);
1190 nport
= list_first_entry_or_null(&fcloop_nports
,
1191 typeof(*nport
), nport_list
);
1195 tport
= __unlink_target_port(nport
);
1196 rport
= __unlink_remote_port(nport
);
1198 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1200 ret
= __targetport_unreg(nport
, tport
);
1202 pr_warn("%s: Failed deleting target port\n", __func__
);
1204 ret
= __remoteport_unreg(nport
, rport
);
1206 pr_warn("%s: Failed deleting remote port\n", __func__
);
1208 spin_lock_irqsave(&fcloop_lock
, flags
);
1212 lport
= list_first_entry_or_null(&fcloop_lports
,
1213 typeof(*lport
), lport_list
);
1217 __unlink_local_port(lport
);
1219 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1221 ret
= __wait_localport_unreg(lport
);
1223 pr_warn("%s: Failed deleting local port\n", __func__
);
1225 spin_lock_irqsave(&fcloop_lock
, flags
);
1228 spin_unlock_irqrestore(&fcloop_lock
, flags
);
1230 put_device(fcloop_device
);
1232 device_destroy(fcloop_class
, MKDEV(0, 0));
1233 class_destroy(fcloop_class
);
1236 module_init(fcloop_init
);
1237 module_exit(fcloop_exit
);
1239 MODULE_LICENSE("GPL v2");