]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/nvme/target/fcloop.c
MAINTAINERS: Update MAX77802 PMIC entry
[mirror_ubuntu-artful-kernel.git] / drivers / nvme / target / fcloop.c
1 /*
2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
15 */
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/module.h>
18 #include <linux/parser.h>
19 #include <uapi/scsi/fc/fc_fs.h>
20
21 #include "../host/nvme.h"
22 #include "../target/nvmet.h"
23 #include <linux/nvme-fc-driver.h>
24 #include <linux/nvme-fc.h>
25
26
27 enum {
28 NVMF_OPT_ERR = 0,
29 NVMF_OPT_WWNN = 1 << 0,
30 NVMF_OPT_WWPN = 1 << 1,
31 NVMF_OPT_ROLES = 1 << 2,
32 NVMF_OPT_FCADDR = 1 << 3,
33 NVMF_OPT_LPWWNN = 1 << 4,
34 NVMF_OPT_LPWWPN = 1 << 5,
35 };
36
37 struct fcloop_ctrl_options {
38 int mask;
39 u64 wwnn;
40 u64 wwpn;
41 u32 roles;
42 u32 fcaddr;
43 u64 lpwwnn;
44 u64 lpwwpn;
45 };
46
47 static const match_table_t opt_tokens = {
48 { NVMF_OPT_WWNN, "wwnn=%s" },
49 { NVMF_OPT_WWPN, "wwpn=%s" },
50 { NVMF_OPT_ROLES, "roles=%d" },
51 { NVMF_OPT_FCADDR, "fcaddr=%x" },
52 { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
53 { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
54 { NVMF_OPT_ERR, NULL }
55 };
56
57 static int
58 fcloop_parse_options(struct fcloop_ctrl_options *opts,
59 const char *buf)
60 {
61 substring_t args[MAX_OPT_ARGS];
62 char *options, *o, *p;
63 int token, ret = 0;
64 u64 token64;
65
66 options = o = kstrdup(buf, GFP_KERNEL);
67 if (!options)
68 return -ENOMEM;
69
70 while ((p = strsep(&o, ",\n")) != NULL) {
71 if (!*p)
72 continue;
73
74 token = match_token(p, opt_tokens, args);
75 opts->mask |= token;
76 switch (token) {
77 case NVMF_OPT_WWNN:
78 if (match_u64(args, &token64)) {
79 ret = -EINVAL;
80 goto out_free_options;
81 }
82 opts->wwnn = token64;
83 break;
84 case NVMF_OPT_WWPN:
85 if (match_u64(args, &token64)) {
86 ret = -EINVAL;
87 goto out_free_options;
88 }
89 opts->wwpn = token64;
90 break;
91 case NVMF_OPT_ROLES:
92 if (match_int(args, &token)) {
93 ret = -EINVAL;
94 goto out_free_options;
95 }
96 opts->roles = token;
97 break;
98 case NVMF_OPT_FCADDR:
99 if (match_hex(args, &token)) {
100 ret = -EINVAL;
101 goto out_free_options;
102 }
103 opts->fcaddr = token;
104 break;
105 case NVMF_OPT_LPWWNN:
106 if (match_u64(args, &token64)) {
107 ret = -EINVAL;
108 goto out_free_options;
109 }
110 opts->lpwwnn = token64;
111 break;
112 case NVMF_OPT_LPWWPN:
113 if (match_u64(args, &token64)) {
114 ret = -EINVAL;
115 goto out_free_options;
116 }
117 opts->lpwwpn = token64;
118 break;
119 default:
120 pr_warn("unknown parameter or missing value '%s'\n", p);
121 ret = -EINVAL;
122 goto out_free_options;
123 }
124 }
125
126 out_free_options:
127 kfree(options);
128 return ret;
129 }
130
131
132 static int
133 fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
134 const char *buf)
135 {
136 substring_t args[MAX_OPT_ARGS];
137 char *options, *o, *p;
138 int token, ret = 0;
139 u64 token64;
140
141 *nname = -1;
142 *pname = -1;
143
144 options = o = kstrdup(buf, GFP_KERNEL);
145 if (!options)
146 return -ENOMEM;
147
148 while ((p = strsep(&o, ",\n")) != NULL) {
149 if (!*p)
150 continue;
151
152 token = match_token(p, opt_tokens, args);
153 switch (token) {
154 case NVMF_OPT_WWNN:
155 if (match_u64(args, &token64)) {
156 ret = -EINVAL;
157 goto out_free_options;
158 }
159 *nname = token64;
160 break;
161 case NVMF_OPT_WWPN:
162 if (match_u64(args, &token64)) {
163 ret = -EINVAL;
164 goto out_free_options;
165 }
166 *pname = token64;
167 break;
168 default:
169 pr_warn("unknown parameter or missing value '%s'\n", p);
170 ret = -EINVAL;
171 goto out_free_options;
172 }
173 }
174
175 out_free_options:
176 kfree(options);
177
178 if (!ret) {
179 if (*nname == -1)
180 return -EINVAL;
181 if (*pname == -1)
182 return -EINVAL;
183 }
184
185 return ret;
186 }
187
188
189 #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
190
191 #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
192 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
193
194 #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
195
196 #define ALL_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | NVMF_OPT_ROLES | \
197 NVMF_OPT_FCADDR | NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
198
199
200 static DEFINE_SPINLOCK(fcloop_lock);
201 static LIST_HEAD(fcloop_lports);
202 static LIST_HEAD(fcloop_nports);
203
204 struct fcloop_lport {
205 struct nvme_fc_local_port *localport;
206 struct list_head lport_list;
207 struct completion unreg_done;
208 };
209
210 struct fcloop_rport {
211 struct nvme_fc_remote_port *remoteport;
212 struct nvmet_fc_target_port *targetport;
213 struct fcloop_nport *nport;
214 struct fcloop_lport *lport;
215 };
216
217 struct fcloop_tport {
218 struct nvmet_fc_target_port *targetport;
219 struct nvme_fc_remote_port *remoteport;
220 struct fcloop_nport *nport;
221 struct fcloop_lport *lport;
222 };
223
224 struct fcloop_nport {
225 struct fcloop_rport *rport;
226 struct fcloop_tport *tport;
227 struct fcloop_lport *lport;
228 struct list_head nport_list;
229 struct kref ref;
230 struct completion rport_unreg_done;
231 struct completion tport_unreg_done;
232 u64 node_name;
233 u64 port_name;
234 u32 port_role;
235 u32 port_id;
236 };
237
238 struct fcloop_lsreq {
239 struct fcloop_tport *tport;
240 struct nvmefc_ls_req *lsreq;
241 struct work_struct work;
242 struct nvmefc_tgt_ls_req tgt_ls_req;
243 int status;
244 };
245
246 struct fcloop_fcpreq {
247 struct fcloop_tport *tport;
248 struct nvmefc_fcp_req *fcpreq;
249 spinlock_t reqlock;
250 u16 status;
251 bool active;
252 bool aborted;
253 struct work_struct work;
254 struct nvmefc_tgt_fcp_req tgt_fcp_req;
255 };
256
257 struct fcloop_ini_fcpreq {
258 struct nvmefc_fcp_req *fcpreq;
259 struct fcloop_fcpreq *tfcp_req;
260 struct work_struct iniwork;
261 };
262
263 static inline struct fcloop_lsreq *
264 tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
265 {
266 return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
267 }
268
269 static inline struct fcloop_fcpreq *
270 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
271 {
272 return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
273 }
274
275
276 static int
277 fcloop_create_queue(struct nvme_fc_local_port *localport,
278 unsigned int qidx, u16 qsize,
279 void **handle)
280 {
281 *handle = localport;
282 return 0;
283 }
284
285 static void
286 fcloop_delete_queue(struct nvme_fc_local_port *localport,
287 unsigned int idx, void *handle)
288 {
289 }
290
291
292 /*
293 * Transmit of LS RSP done (e.g. buffers all set). call back up
294 * initiator "done" flows.
295 */
296 static void
297 fcloop_tgt_lsrqst_done_work(struct work_struct *work)
298 {
299 struct fcloop_lsreq *tls_req =
300 container_of(work, struct fcloop_lsreq, work);
301 struct fcloop_tport *tport = tls_req->tport;
302 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
303
304 if (tport->remoteport)
305 lsreq->done(lsreq, tls_req->status);
306 }
307
308 static int
309 fcloop_ls_req(struct nvme_fc_local_port *localport,
310 struct nvme_fc_remote_port *remoteport,
311 struct nvmefc_ls_req *lsreq)
312 {
313 struct fcloop_lsreq *tls_req = lsreq->private;
314 struct fcloop_rport *rport = remoteport->private;
315 int ret = 0;
316
317 tls_req->lsreq = lsreq;
318 INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
319
320 if (!rport->targetport) {
321 tls_req->status = -ECONNREFUSED;
322 schedule_work(&tls_req->work);
323 return ret;
324 }
325
326 tls_req->status = 0;
327 tls_req->tport = rport->targetport->private;
328 ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
329 lsreq->rqstaddr, lsreq->rqstlen);
330
331 return ret;
332 }
333
334 static int
335 fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
336 struct nvmefc_tgt_ls_req *tgt_lsreq)
337 {
338 struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
339 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
340
341 memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
342 ((lsreq->rsplen < tgt_lsreq->rsplen) ?
343 lsreq->rsplen : tgt_lsreq->rsplen));
344 tgt_lsreq->done(tgt_lsreq);
345
346 schedule_work(&tls_req->work);
347
348 return 0;
349 }
350
351 /*
352 * FCP IO operation done by initiator abort.
353 * call back up initiator "done" flows.
354 */
355 static void
356 fcloop_tgt_fcprqst_ini_done_work(struct work_struct *work)
357 {
358 struct fcloop_ini_fcpreq *inireq =
359 container_of(work, struct fcloop_ini_fcpreq, iniwork);
360
361 inireq->fcpreq->done(inireq->fcpreq);
362 }
363
364 /*
365 * FCP IO operation done by target completion.
366 * call back up initiator "done" flows.
367 */
368 static void
369 fcloop_tgt_fcprqst_done_work(struct work_struct *work)
370 {
371 struct fcloop_fcpreq *tfcp_req =
372 container_of(work, struct fcloop_fcpreq, work);
373 struct fcloop_tport *tport = tfcp_req->tport;
374 struct nvmefc_fcp_req *fcpreq;
375
376 spin_lock(&tfcp_req->reqlock);
377 fcpreq = tfcp_req->fcpreq;
378 spin_unlock(&tfcp_req->reqlock);
379
380 if (tport->remoteport && fcpreq) {
381 fcpreq->status = tfcp_req->status;
382 fcpreq->done(fcpreq);
383 }
384
385 kfree(tfcp_req);
386 }
387
388
389 static int
390 fcloop_fcp_req(struct nvme_fc_local_port *localport,
391 struct nvme_fc_remote_port *remoteport,
392 void *hw_queue_handle,
393 struct nvmefc_fcp_req *fcpreq)
394 {
395 struct fcloop_rport *rport = remoteport->private;
396 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
397 struct fcloop_fcpreq *tfcp_req;
398 int ret = 0;
399
400 if (!rport->targetport)
401 return -ECONNREFUSED;
402
403 tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_KERNEL);
404 if (!tfcp_req)
405 return -ENOMEM;
406
407 inireq->fcpreq = fcpreq;
408 inireq->tfcp_req = tfcp_req;
409 INIT_WORK(&inireq->iniwork, fcloop_tgt_fcprqst_ini_done_work);
410 tfcp_req->fcpreq = fcpreq;
411 tfcp_req->tport = rport->targetport->private;
412 spin_lock_init(&tfcp_req->reqlock);
413 INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work);
414
415 ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req,
416 fcpreq->cmdaddr, fcpreq->cmdlen);
417
418 return ret;
419 }
420
421 static void
422 fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
423 struct scatterlist *io_sg, u32 offset, u32 length)
424 {
425 void *data_p, *io_p;
426 u32 data_len, io_len, tlen;
427
428 io_p = sg_virt(io_sg);
429 io_len = io_sg->length;
430
431 for ( ; offset; ) {
432 tlen = min_t(u32, offset, io_len);
433 offset -= tlen;
434 io_len -= tlen;
435 if (!io_len) {
436 io_sg = sg_next(io_sg);
437 io_p = sg_virt(io_sg);
438 io_len = io_sg->length;
439 } else
440 io_p += tlen;
441 }
442
443 data_p = sg_virt(data_sg);
444 data_len = data_sg->length;
445
446 for ( ; length; ) {
447 tlen = min_t(u32, io_len, data_len);
448 tlen = min_t(u32, tlen, length);
449
450 if (op == NVMET_FCOP_WRITEDATA)
451 memcpy(data_p, io_p, tlen);
452 else
453 memcpy(io_p, data_p, tlen);
454
455 length -= tlen;
456
457 io_len -= tlen;
458 if ((!io_len) && (length)) {
459 io_sg = sg_next(io_sg);
460 io_p = sg_virt(io_sg);
461 io_len = io_sg->length;
462 } else
463 io_p += tlen;
464
465 data_len -= tlen;
466 if ((!data_len) && (length)) {
467 data_sg = sg_next(data_sg);
468 data_p = sg_virt(data_sg);
469 data_len = data_sg->length;
470 } else
471 data_p += tlen;
472 }
473 }
474
475 static int
476 fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
477 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
478 {
479 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
480 struct nvmefc_fcp_req *fcpreq;
481 u32 rsplen = 0, xfrlen = 0;
482 int fcp_err = 0, active, aborted;
483 u8 op = tgt_fcpreq->op;
484
485 spin_lock(&tfcp_req->reqlock);
486 fcpreq = tfcp_req->fcpreq;
487 active = tfcp_req->active;
488 aborted = tfcp_req->aborted;
489 tfcp_req->active = true;
490 spin_unlock(&tfcp_req->reqlock);
491
492 if (unlikely(active))
493 /* illegal - call while i/o active */
494 return -EALREADY;
495
496 if (unlikely(aborted)) {
497 /* target transport has aborted i/o prior */
498 spin_lock(&tfcp_req->reqlock);
499 tfcp_req->active = false;
500 spin_unlock(&tfcp_req->reqlock);
501 tgt_fcpreq->transferred_length = 0;
502 tgt_fcpreq->fcp_error = -ECANCELED;
503 tgt_fcpreq->done(tgt_fcpreq);
504 return 0;
505 }
506
507 /*
508 * if fcpreq is NULL, the I/O has been aborted (from
509 * initiator side). For the target side, act as if all is well
510 * but don't actually move data.
511 */
512
513 switch (op) {
514 case NVMET_FCOP_WRITEDATA:
515 xfrlen = tgt_fcpreq->transfer_length;
516 if (fcpreq) {
517 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
518 fcpreq->first_sgl, tgt_fcpreq->offset,
519 xfrlen);
520 fcpreq->transferred_length += xfrlen;
521 }
522 break;
523
524 case NVMET_FCOP_READDATA:
525 case NVMET_FCOP_READDATA_RSP:
526 xfrlen = tgt_fcpreq->transfer_length;
527 if (fcpreq) {
528 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
529 fcpreq->first_sgl, tgt_fcpreq->offset,
530 xfrlen);
531 fcpreq->transferred_length += xfrlen;
532 }
533 if (op == NVMET_FCOP_READDATA)
534 break;
535
536 /* Fall-Thru to RSP handling */
537
538 case NVMET_FCOP_RSP:
539 if (fcpreq) {
540 rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
541 fcpreq->rsplen : tgt_fcpreq->rsplen);
542 memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
543 if (rsplen < tgt_fcpreq->rsplen)
544 fcp_err = -E2BIG;
545 fcpreq->rcv_rsplen = rsplen;
546 fcpreq->status = 0;
547 }
548 tfcp_req->status = 0;
549 break;
550
551 default:
552 fcp_err = -EINVAL;
553 break;
554 }
555
556 spin_lock(&tfcp_req->reqlock);
557 tfcp_req->active = false;
558 spin_unlock(&tfcp_req->reqlock);
559
560 tgt_fcpreq->transferred_length = xfrlen;
561 tgt_fcpreq->fcp_error = fcp_err;
562 tgt_fcpreq->done(tgt_fcpreq);
563
564 return 0;
565 }
566
567 static void
568 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
569 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
570 {
571 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
572 int active;
573
574 /*
575 * mark aborted only in case there were 2 threads in transport
576 * (one doing io, other doing abort) and only kills ops posted
577 * after the abort request
578 */
579 spin_lock(&tfcp_req->reqlock);
580 active = tfcp_req->active;
581 tfcp_req->aborted = true;
582 spin_unlock(&tfcp_req->reqlock);
583
584 tfcp_req->status = NVME_SC_FC_TRANSPORT_ABORTED;
585
586 /*
587 * nothing more to do. If io wasn't active, the transport should
588 * immediately call the req_release. If it was active, the op
589 * will complete, and the lldd should call req_release.
590 */
591 }
592
593 static void
594 fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
595 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
596 {
597 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
598
599 schedule_work(&tfcp_req->work);
600 }
601
602 static void
603 fcloop_ls_abort(struct nvme_fc_local_port *localport,
604 struct nvme_fc_remote_port *remoteport,
605 struct nvmefc_ls_req *lsreq)
606 {
607 }
608
609 static void
610 fcloop_fcp_abort(struct nvme_fc_local_port *localport,
611 struct nvme_fc_remote_port *remoteport,
612 void *hw_queue_handle,
613 struct nvmefc_fcp_req *fcpreq)
614 {
615 struct fcloop_rport *rport = remoteport->private;
616 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
617 struct fcloop_fcpreq *tfcp_req = inireq->tfcp_req;
618
619 if (!tfcp_req)
620 /* abort has already been called */
621 return;
622
623 if (rport->targetport)
624 nvmet_fc_rcv_fcp_abort(rport->targetport,
625 &tfcp_req->tgt_fcp_req);
626
627 /* break initiator/target relationship for io */
628 spin_lock(&tfcp_req->reqlock);
629 inireq->tfcp_req = NULL;
630 tfcp_req->fcpreq = NULL;
631 spin_unlock(&tfcp_req->reqlock);
632
633 /* post the aborted io completion */
634 fcpreq->status = -ECANCELED;
635 schedule_work(&inireq->iniwork);
636 }
637
638 static void
639 fcloop_localport_delete(struct nvme_fc_local_port *localport)
640 {
641 struct fcloop_lport *lport = localport->private;
642
643 /* release any threads waiting for the unreg to complete */
644 complete(&lport->unreg_done);
645 }
646
647 static void
648 fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
649 {
650 struct fcloop_rport *rport = remoteport->private;
651
652 /* release any threads waiting for the unreg to complete */
653 complete(&rport->nport->rport_unreg_done);
654 }
655
656 static void
657 fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
658 {
659 struct fcloop_tport *tport = targetport->private;
660
661 /* release any threads waiting for the unreg to complete */
662 complete(&tport->nport->tport_unreg_done);
663 }
664
665 #define FCLOOP_HW_QUEUES 4
666 #define FCLOOP_SGL_SEGS 256
667 #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
668
669 static struct nvme_fc_port_template fctemplate = {
670 .localport_delete = fcloop_localport_delete,
671 .remoteport_delete = fcloop_remoteport_delete,
672 .create_queue = fcloop_create_queue,
673 .delete_queue = fcloop_delete_queue,
674 .ls_req = fcloop_ls_req,
675 .fcp_io = fcloop_fcp_req,
676 .ls_abort = fcloop_ls_abort,
677 .fcp_abort = fcloop_fcp_abort,
678 .max_hw_queues = FCLOOP_HW_QUEUES,
679 .max_sgl_segments = FCLOOP_SGL_SEGS,
680 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
681 .dma_boundary = FCLOOP_DMABOUND_4G,
682 /* sizes of additional private data for data structures */
683 .local_priv_sz = sizeof(struct fcloop_lport),
684 .remote_priv_sz = sizeof(struct fcloop_rport),
685 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
686 .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
687 };
688
689 static struct nvmet_fc_target_template tgttemplate = {
690 .targetport_delete = fcloop_targetport_delete,
691 .xmt_ls_rsp = fcloop_xmt_ls_rsp,
692 .fcp_op = fcloop_fcp_op,
693 .fcp_abort = fcloop_tgt_fcp_abort,
694 .fcp_req_release = fcloop_fcp_req_release,
695 .max_hw_queues = FCLOOP_HW_QUEUES,
696 .max_sgl_segments = FCLOOP_SGL_SEGS,
697 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
698 .dma_boundary = FCLOOP_DMABOUND_4G,
699 /* optional features */
700 .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR |
701 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED |
702 NVMET_FCTGTFEAT_OPDONE_IN_ISR,
703 /* sizes of additional private data for data structures */
704 .target_priv_sz = sizeof(struct fcloop_tport),
705 };
706
707 static ssize_t
708 fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
709 const char *buf, size_t count)
710 {
711 struct nvme_fc_port_info pinfo;
712 struct fcloop_ctrl_options *opts;
713 struct nvme_fc_local_port *localport;
714 struct fcloop_lport *lport;
715 int ret;
716
717 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
718 if (!opts)
719 return -ENOMEM;
720
721 ret = fcloop_parse_options(opts, buf);
722 if (ret)
723 goto out_free_opts;
724
725 /* everything there ? */
726 if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
727 ret = -EINVAL;
728 goto out_free_opts;
729 }
730
731 pinfo.node_name = opts->wwnn;
732 pinfo.port_name = opts->wwpn;
733 pinfo.port_role = opts->roles;
734 pinfo.port_id = opts->fcaddr;
735
736 ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
737 if (!ret) {
738 unsigned long flags;
739
740 /* success */
741 lport = localport->private;
742 lport->localport = localport;
743 INIT_LIST_HEAD(&lport->lport_list);
744
745 spin_lock_irqsave(&fcloop_lock, flags);
746 list_add_tail(&lport->lport_list, &fcloop_lports);
747 spin_unlock_irqrestore(&fcloop_lock, flags);
748
749 /* mark all of the input buffer consumed */
750 ret = count;
751 }
752
753 out_free_opts:
754 kfree(opts);
755 return ret ? ret : count;
756 }
757
758
759 static void
760 __unlink_local_port(struct fcloop_lport *lport)
761 {
762 list_del(&lport->lport_list);
763 }
764
765 static int
766 __wait_localport_unreg(struct fcloop_lport *lport)
767 {
768 int ret;
769
770 init_completion(&lport->unreg_done);
771
772 ret = nvme_fc_unregister_localport(lport->localport);
773
774 wait_for_completion(&lport->unreg_done);
775
776 return ret;
777 }
778
779
780 static ssize_t
781 fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
782 const char *buf, size_t count)
783 {
784 struct fcloop_lport *tlport, *lport = NULL;
785 u64 nodename, portname;
786 unsigned long flags;
787 int ret;
788
789 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
790 if (ret)
791 return ret;
792
793 spin_lock_irqsave(&fcloop_lock, flags);
794
795 list_for_each_entry(tlport, &fcloop_lports, lport_list) {
796 if (tlport->localport->node_name == nodename &&
797 tlport->localport->port_name == portname) {
798 lport = tlport;
799 __unlink_local_port(lport);
800 break;
801 }
802 }
803 spin_unlock_irqrestore(&fcloop_lock, flags);
804
805 if (!lport)
806 return -ENOENT;
807
808 ret = __wait_localport_unreg(lport);
809
810 return ret ? ret : count;
811 }
812
813 static void
814 fcloop_nport_free(struct kref *ref)
815 {
816 struct fcloop_nport *nport =
817 container_of(ref, struct fcloop_nport, ref);
818 unsigned long flags;
819
820 spin_lock_irqsave(&fcloop_lock, flags);
821 list_del(&nport->nport_list);
822 spin_unlock_irqrestore(&fcloop_lock, flags);
823
824 kfree(nport);
825 }
826
827 static void
828 fcloop_nport_put(struct fcloop_nport *nport)
829 {
830 kref_put(&nport->ref, fcloop_nport_free);
831 }
832
833 static int
834 fcloop_nport_get(struct fcloop_nport *nport)
835 {
836 return kref_get_unless_zero(&nport->ref);
837 }
838
839 static struct fcloop_nport *
840 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
841 {
842 struct fcloop_nport *newnport, *nport = NULL;
843 struct fcloop_lport *tmplport, *lport = NULL;
844 struct fcloop_ctrl_options *opts;
845 unsigned long flags;
846 u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
847 int ret;
848
849 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
850 if (!opts)
851 return NULL;
852
853 ret = fcloop_parse_options(opts, buf);
854 if (ret)
855 goto out_free_opts;
856
857 /* everything there ? */
858 if ((opts->mask & opts_mask) != opts_mask) {
859 ret = -EINVAL;
860 goto out_free_opts;
861 }
862
863 newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
864 if (!newnport)
865 goto out_free_opts;
866
867 INIT_LIST_HEAD(&newnport->nport_list);
868 newnport->node_name = opts->wwnn;
869 newnport->port_name = opts->wwpn;
870 if (opts->mask & NVMF_OPT_ROLES)
871 newnport->port_role = opts->roles;
872 if (opts->mask & NVMF_OPT_FCADDR)
873 newnport->port_id = opts->fcaddr;
874 kref_init(&newnport->ref);
875
876 spin_lock_irqsave(&fcloop_lock, flags);
877
878 list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
879 if (tmplport->localport->node_name == opts->wwnn &&
880 tmplport->localport->port_name == opts->wwpn)
881 goto out_invalid_opts;
882
883 if (tmplport->localport->node_name == opts->lpwwnn &&
884 tmplport->localport->port_name == opts->lpwwpn)
885 lport = tmplport;
886 }
887
888 if (remoteport) {
889 if (!lport)
890 goto out_invalid_opts;
891 newnport->lport = lport;
892 }
893
894 list_for_each_entry(nport, &fcloop_nports, nport_list) {
895 if (nport->node_name == opts->wwnn &&
896 nport->port_name == opts->wwpn) {
897 if ((remoteport && nport->rport) ||
898 (!remoteport && nport->tport)) {
899 nport = NULL;
900 goto out_invalid_opts;
901 }
902
903 fcloop_nport_get(nport);
904
905 spin_unlock_irqrestore(&fcloop_lock, flags);
906
907 if (remoteport)
908 nport->lport = lport;
909 if (opts->mask & NVMF_OPT_ROLES)
910 nport->port_role = opts->roles;
911 if (opts->mask & NVMF_OPT_FCADDR)
912 nport->port_id = opts->fcaddr;
913 goto out_free_newnport;
914 }
915 }
916
917 list_add_tail(&newnport->nport_list, &fcloop_nports);
918
919 spin_unlock_irqrestore(&fcloop_lock, flags);
920
921 kfree(opts);
922 return newnport;
923
924 out_invalid_opts:
925 spin_unlock_irqrestore(&fcloop_lock, flags);
926 out_free_newnport:
927 kfree(newnport);
928 out_free_opts:
929 kfree(opts);
930 return nport;
931 }
932
933 static ssize_t
934 fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
935 const char *buf, size_t count)
936 {
937 struct nvme_fc_remote_port *remoteport;
938 struct fcloop_nport *nport;
939 struct fcloop_rport *rport;
940 struct nvme_fc_port_info pinfo;
941 int ret;
942
943 nport = fcloop_alloc_nport(buf, count, true);
944 if (!nport)
945 return -EIO;
946
947 pinfo.node_name = nport->node_name;
948 pinfo.port_name = nport->port_name;
949 pinfo.port_role = nport->port_role;
950 pinfo.port_id = nport->port_id;
951
952 ret = nvme_fc_register_remoteport(nport->lport->localport,
953 &pinfo, &remoteport);
954 if (ret || !remoteport) {
955 fcloop_nport_put(nport);
956 return ret;
957 }
958
959 /* success */
960 rport = remoteport->private;
961 rport->remoteport = remoteport;
962 rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
963 if (nport->tport) {
964 nport->tport->remoteport = remoteport;
965 nport->tport->lport = nport->lport;
966 }
967 rport->nport = nport;
968 rport->lport = nport->lport;
969 nport->rport = rport;
970
971 return count;
972 }
973
974
975 static struct fcloop_rport *
976 __unlink_remote_port(struct fcloop_nport *nport)
977 {
978 struct fcloop_rport *rport = nport->rport;
979
980 if (rport && nport->tport)
981 nport->tport->remoteport = NULL;
982 nport->rport = NULL;
983
984 return rport;
985 }
986
987 static int
988 __wait_remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
989 {
990 int ret;
991
992 if (!rport)
993 return -EALREADY;
994
995 init_completion(&nport->rport_unreg_done);
996
997 ret = nvme_fc_unregister_remoteport(rport->remoteport);
998 if (ret)
999 return ret;
1000
1001 wait_for_completion(&nport->rport_unreg_done);
1002
1003 fcloop_nport_put(nport);
1004
1005 return ret;
1006 }
1007
1008 static ssize_t
1009 fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1010 const char *buf, size_t count)
1011 {
1012 struct fcloop_nport *nport = NULL, *tmpport;
1013 static struct fcloop_rport *rport;
1014 u64 nodename, portname;
1015 unsigned long flags;
1016 int ret;
1017
1018 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1019 if (ret)
1020 return ret;
1021
1022 spin_lock_irqsave(&fcloop_lock, flags);
1023
1024 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1025 if (tmpport->node_name == nodename &&
1026 tmpport->port_name == portname && tmpport->rport) {
1027 nport = tmpport;
1028 rport = __unlink_remote_port(nport);
1029 break;
1030 }
1031 }
1032
1033 spin_unlock_irqrestore(&fcloop_lock, flags);
1034
1035 if (!nport)
1036 return -ENOENT;
1037
1038 ret = __wait_remoteport_unreg(nport, rport);
1039
1040 return ret ? ret : count;
1041 }
1042
1043 static ssize_t
1044 fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1045 const char *buf, size_t count)
1046 {
1047 struct nvmet_fc_target_port *targetport;
1048 struct fcloop_nport *nport;
1049 struct fcloop_tport *tport;
1050 struct nvmet_fc_port_info tinfo;
1051 int ret;
1052
1053 nport = fcloop_alloc_nport(buf, count, false);
1054 if (!nport)
1055 return -EIO;
1056
1057 tinfo.node_name = nport->node_name;
1058 tinfo.port_name = nport->port_name;
1059 tinfo.port_id = nport->port_id;
1060
1061 ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1062 &targetport);
1063 if (ret) {
1064 fcloop_nport_put(nport);
1065 return ret;
1066 }
1067
1068 /* success */
1069 tport = targetport->private;
1070 tport->targetport = targetport;
1071 tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
1072 if (nport->rport)
1073 nport->rport->targetport = targetport;
1074 tport->nport = nport;
1075 tport->lport = nport->lport;
1076 nport->tport = tport;
1077
1078 return count;
1079 }
1080
1081
1082 static struct fcloop_tport *
1083 __unlink_target_port(struct fcloop_nport *nport)
1084 {
1085 struct fcloop_tport *tport = nport->tport;
1086
1087 if (tport && nport->rport)
1088 nport->rport->targetport = NULL;
1089 nport->tport = NULL;
1090
1091 return tport;
1092 }
1093
1094 static int
1095 __wait_targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1096 {
1097 int ret;
1098
1099 if (!tport)
1100 return -EALREADY;
1101
1102 init_completion(&nport->tport_unreg_done);
1103
1104 ret = nvmet_fc_unregister_targetport(tport->targetport);
1105 if (ret)
1106 return ret;
1107
1108 wait_for_completion(&nport->tport_unreg_done);
1109
1110 fcloop_nport_put(nport);
1111
1112 return ret;
1113 }
1114
1115 static ssize_t
1116 fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1117 const char *buf, size_t count)
1118 {
1119 struct fcloop_nport *nport = NULL, *tmpport;
1120 struct fcloop_tport *tport;
1121 u64 nodename, portname;
1122 unsigned long flags;
1123 int ret;
1124
1125 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1126 if (ret)
1127 return ret;
1128
1129 spin_lock_irqsave(&fcloop_lock, flags);
1130
1131 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1132 if (tmpport->node_name == nodename &&
1133 tmpport->port_name == portname && tmpport->tport) {
1134 nport = tmpport;
1135 tport = __unlink_target_port(nport);
1136 break;
1137 }
1138 }
1139
1140 spin_unlock_irqrestore(&fcloop_lock, flags);
1141
1142 if (!nport)
1143 return -ENOENT;
1144
1145 ret = __wait_targetport_unreg(nport, tport);
1146
1147 return ret ? ret : count;
1148 }
1149
1150
1151 static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1152 static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1153 static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1154 static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1155 static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1156 static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1157
1158 static struct attribute *fcloop_dev_attrs[] = {
1159 &dev_attr_add_local_port.attr,
1160 &dev_attr_del_local_port.attr,
1161 &dev_attr_add_remote_port.attr,
1162 &dev_attr_del_remote_port.attr,
1163 &dev_attr_add_target_port.attr,
1164 &dev_attr_del_target_port.attr,
1165 NULL
1166 };
1167
1168 static struct attribute_group fclopp_dev_attrs_group = {
1169 .attrs = fcloop_dev_attrs,
1170 };
1171
1172 static const struct attribute_group *fcloop_dev_attr_groups[] = {
1173 &fclopp_dev_attrs_group,
1174 NULL,
1175 };
1176
1177 static struct class *fcloop_class;
1178 static struct device *fcloop_device;
1179
1180
1181 static int __init fcloop_init(void)
1182 {
1183 int ret;
1184
1185 fcloop_class = class_create(THIS_MODULE, "fcloop");
1186 if (IS_ERR(fcloop_class)) {
1187 pr_err("couldn't register class fcloop\n");
1188 ret = PTR_ERR(fcloop_class);
1189 return ret;
1190 }
1191
1192 fcloop_device = device_create_with_groups(
1193 fcloop_class, NULL, MKDEV(0, 0), NULL,
1194 fcloop_dev_attr_groups, "ctl");
1195 if (IS_ERR(fcloop_device)) {
1196 pr_err("couldn't create ctl device!\n");
1197 ret = PTR_ERR(fcloop_device);
1198 goto out_destroy_class;
1199 }
1200
1201 get_device(fcloop_device);
1202
1203 return 0;
1204
1205 out_destroy_class:
1206 class_destroy(fcloop_class);
1207 return ret;
1208 }
1209
1210 static void __exit fcloop_exit(void)
1211 {
1212 struct fcloop_lport *lport;
1213 struct fcloop_nport *nport;
1214 struct fcloop_tport *tport;
1215 struct fcloop_rport *rport;
1216 unsigned long flags;
1217 int ret;
1218
1219 spin_lock_irqsave(&fcloop_lock, flags);
1220
1221 for (;;) {
1222 nport = list_first_entry_or_null(&fcloop_nports,
1223 typeof(*nport), nport_list);
1224 if (!nport)
1225 break;
1226
1227 tport = __unlink_target_port(nport);
1228 rport = __unlink_remote_port(nport);
1229
1230 spin_unlock_irqrestore(&fcloop_lock, flags);
1231
1232 ret = __wait_targetport_unreg(nport, tport);
1233 if (ret)
1234 pr_warn("%s: Failed deleting target port\n", __func__);
1235
1236 ret = __wait_remoteport_unreg(nport, rport);
1237 if (ret)
1238 pr_warn("%s: Failed deleting remote port\n", __func__);
1239
1240 spin_lock_irqsave(&fcloop_lock, flags);
1241 }
1242
1243 for (;;) {
1244 lport = list_first_entry_or_null(&fcloop_lports,
1245 typeof(*lport), lport_list);
1246 if (!lport)
1247 break;
1248
1249 __unlink_local_port(lport);
1250
1251 spin_unlock_irqrestore(&fcloop_lock, flags);
1252
1253 ret = __wait_localport_unreg(lport);
1254 if (ret)
1255 pr_warn("%s: Failed deleting local port\n", __func__);
1256
1257 spin_lock_irqsave(&fcloop_lock, flags);
1258 }
1259
1260 spin_unlock_irqrestore(&fcloop_lock, flags);
1261
1262 put_device(fcloop_device);
1263
1264 device_destroy(fcloop_class, MKDEV(0, 0));
1265 class_destroy(fcloop_class);
1266 }
1267
1268 module_init(fcloop_init);
1269 module_exit(fcloop_exit);
1270
1271 MODULE_LICENSE("GPL v2");