]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/nvme/target/fcloop.c
nvme_fcloop: fix abort race condition
[mirror_ubuntu-bionic-kernel.git] / drivers / nvme / target / fcloop.c
CommitLineData
475d0fe7
JS
1/*
2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
15 */
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17#include <linux/module.h>
18#include <linux/parser.h>
19#include <uapi/scsi/fc/fc_fs.h>
20
21#include "../host/nvme.h"
22#include "../target/nvmet.h"
23#include <linux/nvme-fc-driver.h>
24#include <linux/nvme-fc.h>
25
26
27enum {
28 NVMF_OPT_ERR = 0,
29 NVMF_OPT_WWNN = 1 << 0,
30 NVMF_OPT_WWPN = 1 << 1,
31 NVMF_OPT_ROLES = 1 << 2,
32 NVMF_OPT_FCADDR = 1 << 3,
33 NVMF_OPT_LPWWNN = 1 << 4,
34 NVMF_OPT_LPWWPN = 1 << 5,
35};
36
37struct fcloop_ctrl_options {
38 int mask;
39 u64 wwnn;
40 u64 wwpn;
41 u32 roles;
42 u32 fcaddr;
43 u64 lpwwnn;
44 u64 lpwwpn;
45};
46
47static const match_table_t opt_tokens = {
48 { NVMF_OPT_WWNN, "wwnn=%s" },
49 { NVMF_OPT_WWPN, "wwpn=%s" },
50 { NVMF_OPT_ROLES, "roles=%d" },
51 { NVMF_OPT_FCADDR, "fcaddr=%x" },
52 { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
53 { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
54 { NVMF_OPT_ERR, NULL }
55};
56
57static int
58fcloop_parse_options(struct fcloop_ctrl_options *opts,
59 const char *buf)
60{
61 substring_t args[MAX_OPT_ARGS];
62 char *options, *o, *p;
63 int token, ret = 0;
64 u64 token64;
65
66 options = o = kstrdup(buf, GFP_KERNEL);
67 if (!options)
68 return -ENOMEM;
69
70 while ((p = strsep(&o, ",\n")) != NULL) {
71 if (!*p)
72 continue;
73
74 token = match_token(p, opt_tokens, args);
75 opts->mask |= token;
76 switch (token) {
77 case NVMF_OPT_WWNN:
78 if (match_u64(args, &token64)) {
79 ret = -EINVAL;
80 goto out_free_options;
81 }
82 opts->wwnn = token64;
83 break;
84 case NVMF_OPT_WWPN:
85 if (match_u64(args, &token64)) {
86 ret = -EINVAL;
87 goto out_free_options;
88 }
89 opts->wwpn = token64;
90 break;
91 case NVMF_OPT_ROLES:
92 if (match_int(args, &token)) {
93 ret = -EINVAL;
94 goto out_free_options;
95 }
96 opts->roles = token;
97 break;
98 case NVMF_OPT_FCADDR:
99 if (match_hex(args, &token)) {
100 ret = -EINVAL;
101 goto out_free_options;
102 }
103 opts->fcaddr = token;
104 break;
105 case NVMF_OPT_LPWWNN:
106 if (match_u64(args, &token64)) {
107 ret = -EINVAL;
108 goto out_free_options;
109 }
110 opts->lpwwnn = token64;
111 break;
112 case NVMF_OPT_LPWWPN:
113 if (match_u64(args, &token64)) {
114 ret = -EINVAL;
115 goto out_free_options;
116 }
117 opts->lpwwpn = token64;
118 break;
119 default:
120 pr_warn("unknown parameter or missing value '%s'\n", p);
121 ret = -EINVAL;
122 goto out_free_options;
123 }
124 }
125
126out_free_options:
127 kfree(options);
128 return ret;
129}
130
131
132static int
133fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
134 const char *buf)
135{
136 substring_t args[MAX_OPT_ARGS];
137 char *options, *o, *p;
138 int token, ret = 0;
139 u64 token64;
140
141 *nname = -1;
142 *pname = -1;
143
144 options = o = kstrdup(buf, GFP_KERNEL);
145 if (!options)
146 return -ENOMEM;
147
148 while ((p = strsep(&o, ",\n")) != NULL) {
149 if (!*p)
150 continue;
151
152 token = match_token(p, opt_tokens, args);
153 switch (token) {
154 case NVMF_OPT_WWNN:
155 if (match_u64(args, &token64)) {
156 ret = -EINVAL;
157 goto out_free_options;
158 }
159 *nname = token64;
160 break;
161 case NVMF_OPT_WWPN:
162 if (match_u64(args, &token64)) {
163 ret = -EINVAL;
164 goto out_free_options;
165 }
166 *pname = token64;
167 break;
168 default:
169 pr_warn("unknown parameter or missing value '%s'\n", p);
170 ret = -EINVAL;
171 goto out_free_options;
172 }
173 }
174
175out_free_options:
176 kfree(options);
177
178 if (!ret) {
179 if (*nname == -1)
180 return -EINVAL;
181 if (*pname == -1)
182 return -EINVAL;
183 }
184
185 return ret;
186}
187
188
189#define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
190
191#define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
192 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
193
194#define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
195
475d0fe7
JS
196
197static DEFINE_SPINLOCK(fcloop_lock);
198static LIST_HEAD(fcloop_lports);
199static LIST_HEAD(fcloop_nports);
200
201struct fcloop_lport {
202 struct nvme_fc_local_port *localport;
203 struct list_head lport_list;
204 struct completion unreg_done;
205};
206
c3d502e6
JS
207struct fcloop_lport_priv {
208 struct fcloop_lport *lport;
209};
210
475d0fe7
JS
211struct fcloop_rport {
212 struct nvme_fc_remote_port *remoteport;
213 struct nvmet_fc_target_port *targetport;
214 struct fcloop_nport *nport;
215 struct fcloop_lport *lport;
216};
217
218struct fcloop_tport {
219 struct nvmet_fc_target_port *targetport;
220 struct nvme_fc_remote_port *remoteport;
221 struct fcloop_nport *nport;
222 struct fcloop_lport *lport;
223};
224
225struct fcloop_nport {
226 struct fcloop_rport *rport;
227 struct fcloop_tport *tport;
228 struct fcloop_lport *lport;
229 struct list_head nport_list;
230 struct kref ref;
475d0fe7
JS
231 u64 node_name;
232 u64 port_name;
233 u32 port_role;
234 u32 port_id;
235};
236
237struct fcloop_lsreq {
238 struct fcloop_tport *tport;
239 struct nvmefc_ls_req *lsreq;
240 struct work_struct work;
241 struct nvmefc_tgt_ls_req tgt_ls_req;
242 int status;
243};
244
245struct fcloop_fcpreq {
246 struct fcloop_tport *tport;
247 struct nvmefc_fcp_req *fcpreq;
a97ec51b 248 spinlock_t reqlock;
475d0fe7 249 u16 status;
a97ec51b
JS
250 bool active;
251 bool aborted;
475d0fe7
JS
252 struct work_struct work;
253 struct nvmefc_tgt_fcp_req tgt_fcp_req;
254};
255
ce79bfc2
JS
256struct fcloop_ini_fcpreq {
257 struct nvmefc_fcp_req *fcpreq;
258 struct fcloop_fcpreq *tfcp_req;
a97ec51b 259 struct work_struct iniwork;
ce79bfc2 260};
475d0fe7
JS
261
262static inline struct fcloop_lsreq *
263tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
264{
265 return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
266}
267
268static inline struct fcloop_fcpreq *
269tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
270{
271 return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
272}
273
274
275static int
276fcloop_create_queue(struct nvme_fc_local_port *localport,
277 unsigned int qidx, u16 qsize,
278 void **handle)
279{
280 *handle = localport;
281 return 0;
282}
283
284static void
285fcloop_delete_queue(struct nvme_fc_local_port *localport,
286 unsigned int idx, void *handle)
287{
288}
289
290
291/*
292 * Transmit of LS RSP done (e.g. buffers all set). call back up
293 * initiator "done" flows.
294 */
295static void
296fcloop_tgt_lsrqst_done_work(struct work_struct *work)
297{
298 struct fcloop_lsreq *tls_req =
299 container_of(work, struct fcloop_lsreq, work);
300 struct fcloop_tport *tport = tls_req->tport;
301 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
302
303 if (tport->remoteport)
304 lsreq->done(lsreq, tls_req->status);
305}
306
307static int
308fcloop_ls_req(struct nvme_fc_local_port *localport,
309 struct nvme_fc_remote_port *remoteport,
310 struct nvmefc_ls_req *lsreq)
311{
312 struct fcloop_lsreq *tls_req = lsreq->private;
313 struct fcloop_rport *rport = remoteport->private;
314 int ret = 0;
315
316 tls_req->lsreq = lsreq;
317 INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
318
319 if (!rport->targetport) {
320 tls_req->status = -ECONNREFUSED;
321 schedule_work(&tls_req->work);
322 return ret;
323 }
324
325 tls_req->status = 0;
326 tls_req->tport = rport->targetport->private;
327 ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
328 lsreq->rqstaddr, lsreq->rqstlen);
329
330 return ret;
331}
332
333static int
334fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
335 struct nvmefc_tgt_ls_req *tgt_lsreq)
336{
337 struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
338 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
339
340 memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
341 ((lsreq->rsplen < tgt_lsreq->rsplen) ?
342 lsreq->rsplen : tgt_lsreq->rsplen));
343 tgt_lsreq->done(tgt_lsreq);
344
345 schedule_work(&tls_req->work);
346
347 return 0;
348}
349
350/*
a97ec51b
JS
351 * FCP IO operation done by initiator abort.
352 * call back up initiator "done" flows.
353 */
354static void
355fcloop_tgt_fcprqst_ini_done_work(struct work_struct *work)
356{
357 struct fcloop_ini_fcpreq *inireq =
358 container_of(work, struct fcloop_ini_fcpreq, iniwork);
359
360 inireq->fcpreq->done(inireq->fcpreq);
361}
362
363/*
364 * FCP IO operation done by target completion.
365 * call back up initiator "done" flows.
475d0fe7
JS
366 */
367static void
368fcloop_tgt_fcprqst_done_work(struct work_struct *work)
369{
370 struct fcloop_fcpreq *tfcp_req =
371 container_of(work, struct fcloop_fcpreq, work);
372 struct fcloop_tport *tport = tfcp_req->tport;
a97ec51b 373 struct nvmefc_fcp_req *fcpreq;
475d0fe7 374
a97ec51b
JS
375 spin_lock(&tfcp_req->reqlock);
376 fcpreq = tfcp_req->fcpreq;
13dfd4cf 377 tfcp_req->fcpreq = NULL;
a97ec51b
JS
378 spin_unlock(&tfcp_req->reqlock);
379
380 if (tport->remoteport && fcpreq) {
475d0fe7
JS
381 fcpreq->status = tfcp_req->status;
382 fcpreq->done(fcpreq);
383 }
ce79bfc2
JS
384
385 kfree(tfcp_req);
475d0fe7
JS
386}
387
388
389static int
390fcloop_fcp_req(struct nvme_fc_local_port *localport,
391 struct nvme_fc_remote_port *remoteport,
392 void *hw_queue_handle,
393 struct nvmefc_fcp_req *fcpreq)
394{
475d0fe7 395 struct fcloop_rport *rport = remoteport->private;
ce79bfc2
JS
396 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
397 struct fcloop_fcpreq *tfcp_req;
475d0fe7
JS
398 int ret = 0;
399
ce79bfc2
JS
400 if (!rport->targetport)
401 return -ECONNREFUSED;
475d0fe7 402
ce79bfc2
JS
403 tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_KERNEL);
404 if (!tfcp_req)
405 return -ENOMEM;
475d0fe7 406
ce79bfc2
JS
407 inireq->fcpreq = fcpreq;
408 inireq->tfcp_req = tfcp_req;
a97ec51b 409 INIT_WORK(&inireq->iniwork, fcloop_tgt_fcprqst_ini_done_work);
475d0fe7
JS
410 tfcp_req->fcpreq = fcpreq;
411 tfcp_req->tport = rport->targetport->private;
a97ec51b 412 spin_lock_init(&tfcp_req->reqlock);
ce79bfc2 413 INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work);
475d0fe7
JS
414
415 ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req,
416 fcpreq->cmdaddr, fcpreq->cmdlen);
417
418 return ret;
419}
420
421static void
422fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
423 struct scatterlist *io_sg, u32 offset, u32 length)
424{
425 void *data_p, *io_p;
426 u32 data_len, io_len, tlen;
427
428 io_p = sg_virt(io_sg);
429 io_len = io_sg->length;
430
431 for ( ; offset; ) {
432 tlen = min_t(u32, offset, io_len);
433 offset -= tlen;
434 io_len -= tlen;
435 if (!io_len) {
436 io_sg = sg_next(io_sg);
437 io_p = sg_virt(io_sg);
438 io_len = io_sg->length;
439 } else
440 io_p += tlen;
441 }
442
443 data_p = sg_virt(data_sg);
444 data_len = data_sg->length;
445
446 for ( ; length; ) {
447 tlen = min_t(u32, io_len, data_len);
448 tlen = min_t(u32, tlen, length);
449
450 if (op == NVMET_FCOP_WRITEDATA)
451 memcpy(data_p, io_p, tlen);
452 else
453 memcpy(io_p, data_p, tlen);
454
455 length -= tlen;
456
457 io_len -= tlen;
458 if ((!io_len) && (length)) {
459 io_sg = sg_next(io_sg);
460 io_p = sg_virt(io_sg);
461 io_len = io_sg->length;
462 } else
463 io_p += tlen;
464
465 data_len -= tlen;
466 if ((!data_len) && (length)) {
467 data_sg = sg_next(data_sg);
468 data_p = sg_virt(data_sg);
469 data_len = data_sg->length;
470 } else
471 data_p += tlen;
472 }
473}
474
475static int
476fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
477 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
478{
479 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
a97ec51b 480 struct nvmefc_fcp_req *fcpreq;
475d0fe7 481 u32 rsplen = 0, xfrlen = 0;
a97ec51b 482 int fcp_err = 0, active, aborted;
475d0fe7
JS
483 u8 op = tgt_fcpreq->op;
484
a97ec51b
JS
485 spin_lock(&tfcp_req->reqlock);
486 fcpreq = tfcp_req->fcpreq;
487 active = tfcp_req->active;
488 aborted = tfcp_req->aborted;
489 tfcp_req->active = true;
490 spin_unlock(&tfcp_req->reqlock);
491
492 if (unlikely(active))
493 /* illegal - call while i/o active */
494 return -EALREADY;
495
496 if (unlikely(aborted)) {
497 /* target transport has aborted i/o prior */
498 spin_lock(&tfcp_req->reqlock);
499 tfcp_req->active = false;
500 spin_unlock(&tfcp_req->reqlock);
501 tgt_fcpreq->transferred_length = 0;
502 tgt_fcpreq->fcp_error = -ECANCELED;
503 tgt_fcpreq->done(tgt_fcpreq);
504 return 0;
505 }
506
507 /*
508 * if fcpreq is NULL, the I/O has been aborted (from
509 * initiator side). For the target side, act as if all is well
510 * but don't actually move data.
511 */
512
475d0fe7
JS
513 switch (op) {
514 case NVMET_FCOP_WRITEDATA:
515 xfrlen = tgt_fcpreq->transfer_length;
a97ec51b
JS
516 if (fcpreq) {
517 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
518 fcpreq->first_sgl, tgt_fcpreq->offset,
519 xfrlen);
520 fcpreq->transferred_length += xfrlen;
521 }
475d0fe7
JS
522 break;
523
524 case NVMET_FCOP_READDATA:
525 case NVMET_FCOP_READDATA_RSP:
526 xfrlen = tgt_fcpreq->transfer_length;
a97ec51b
JS
527 if (fcpreq) {
528 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
529 fcpreq->first_sgl, tgt_fcpreq->offset,
530 xfrlen);
531 fcpreq->transferred_length += xfrlen;
532 }
475d0fe7
JS
533 if (op == NVMET_FCOP_READDATA)
534 break;
535
536 /* Fall-Thru to RSP handling */
537
538 case NVMET_FCOP_RSP:
a97ec51b
JS
539 if (fcpreq) {
540 rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
541 fcpreq->rsplen : tgt_fcpreq->rsplen);
542 memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
543 if (rsplen < tgt_fcpreq->rsplen)
544 fcp_err = -E2BIG;
545 fcpreq->rcv_rsplen = rsplen;
546 fcpreq->status = 0;
547 }
475d0fe7
JS
548 tfcp_req->status = 0;
549 break;
550
475d0fe7
JS
551 default:
552 fcp_err = -EINVAL;
553 break;
554 }
555
a97ec51b
JS
556 spin_lock(&tfcp_req->reqlock);
557 tfcp_req->active = false;
558 spin_unlock(&tfcp_req->reqlock);
559
475d0fe7
JS
560 tgt_fcpreq->transferred_length = xfrlen;
561 tgt_fcpreq->fcp_error = fcp_err;
562 tgt_fcpreq->done(tgt_fcpreq);
563
475d0fe7
JS
564 return 0;
565}
566
a97ec51b
JS
567static void
568fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
569 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
570{
571 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
a97ec51b
JS
572
573 /*
574 * mark aborted only in case there were 2 threads in transport
575 * (one doing io, other doing abort) and only kills ops posted
576 * after the abort request
577 */
578 spin_lock(&tfcp_req->reqlock);
a97ec51b
JS
579 tfcp_req->aborted = true;
580 spin_unlock(&tfcp_req->reqlock);
581
fc9608e8 582 tfcp_req->status = NVME_SC_INTERNAL;
a97ec51b
JS
583
584 /*
585 * nothing more to do. If io wasn't active, the transport should
586 * immediately call the req_release. If it was active, the op
587 * will complete, and the lldd should call req_release.
588 */
589}
590
19b58d94
JS
591static void
592fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
593 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
594{
595 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
596
597 schedule_work(&tfcp_req->work);
598}
599
475d0fe7
JS
600static void
601fcloop_ls_abort(struct nvme_fc_local_port *localport,
602 struct nvme_fc_remote_port *remoteport,
603 struct nvmefc_ls_req *lsreq)
604{
605}
606
607static void
608fcloop_fcp_abort(struct nvme_fc_local_port *localport,
609 struct nvme_fc_remote_port *remoteport,
610 void *hw_queue_handle,
611 struct nvmefc_fcp_req *fcpreq)
612{
a97ec51b
JS
613 struct fcloop_rport *rport = remoteport->private;
614 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
615 struct fcloop_fcpreq *tfcp_req = inireq->tfcp_req;
616
617 if (!tfcp_req)
618 /* abort has already been called */
13dfd4cf 619 goto finish;
a97ec51b
JS
620
621 /* break initiator/target relationship for io */
622 spin_lock(&tfcp_req->reqlock);
623 inireq->tfcp_req = NULL;
624 tfcp_req->fcpreq = NULL;
625 spin_unlock(&tfcp_req->reqlock);
626
13dfd4cf
JS
627 if (rport->targetport)
628 nvmet_fc_rcv_fcp_abort(rport->targetport,
629 &tfcp_req->tgt_fcp_req);
630
631finish:
a97ec51b
JS
632 /* post the aborted io completion */
633 fcpreq->status = -ECANCELED;
634 schedule_work(&inireq->iniwork);
475d0fe7
JS
635}
636
fddc9923
JS
637static void
638fcloop_nport_free(struct kref *ref)
639{
640 struct fcloop_nport *nport =
641 container_of(ref, struct fcloop_nport, ref);
642 unsigned long flags;
643
644 spin_lock_irqsave(&fcloop_lock, flags);
645 list_del(&nport->nport_list);
646 spin_unlock_irqrestore(&fcloop_lock, flags);
647
648 kfree(nport);
649}
650
651static void
652fcloop_nport_put(struct fcloop_nport *nport)
653{
654 kref_put(&nport->ref, fcloop_nport_free);
655}
656
657static int
658fcloop_nport_get(struct fcloop_nport *nport)
659{
660 return kref_get_unless_zero(&nport->ref);
661}
662
475d0fe7
JS
663static void
664fcloop_localport_delete(struct nvme_fc_local_port *localport)
665{
c3d502e6
JS
666 struct fcloop_lport_priv *lport_priv = localport->private;
667 struct fcloop_lport *lport = lport_priv->lport;
475d0fe7
JS
668
669 /* release any threads waiting for the unreg to complete */
670 complete(&lport->unreg_done);
671}
672
673static void
674fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
675{
676 struct fcloop_rport *rport = remoteport->private;
677
fddc9923 678 fcloop_nport_put(rport->nport);
475d0fe7
JS
679}
680
681static void
682fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
683{
684 struct fcloop_tport *tport = targetport->private;
685
fddc9923 686 fcloop_nport_put(tport->nport);
475d0fe7
JS
687}
688
689#define FCLOOP_HW_QUEUES 4
690#define FCLOOP_SGL_SEGS 256
691#define FCLOOP_DMABOUND_4G 0xFFFFFFFF
692
36b8890e 693static struct nvme_fc_port_template fctemplate = {
475d0fe7
JS
694 .localport_delete = fcloop_localport_delete,
695 .remoteport_delete = fcloop_remoteport_delete,
696 .create_queue = fcloop_create_queue,
697 .delete_queue = fcloop_delete_queue,
698 .ls_req = fcloop_ls_req,
699 .fcp_io = fcloop_fcp_req,
700 .ls_abort = fcloop_ls_abort,
701 .fcp_abort = fcloop_fcp_abort,
702 .max_hw_queues = FCLOOP_HW_QUEUES,
703 .max_sgl_segments = FCLOOP_SGL_SEGS,
704 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
705 .dma_boundary = FCLOOP_DMABOUND_4G,
706 /* sizes of additional private data for data structures */
c3d502e6 707 .local_priv_sz = sizeof(struct fcloop_lport_priv),
475d0fe7
JS
708 .remote_priv_sz = sizeof(struct fcloop_rport),
709 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
ce79bfc2 710 .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
475d0fe7
JS
711};
712
36b8890e 713static struct nvmet_fc_target_template tgttemplate = {
475d0fe7
JS
714 .targetport_delete = fcloop_targetport_delete,
715 .xmt_ls_rsp = fcloop_xmt_ls_rsp,
716 .fcp_op = fcloop_fcp_op,
a97ec51b 717 .fcp_abort = fcloop_tgt_fcp_abort,
19b58d94 718 .fcp_req_release = fcloop_fcp_req_release,
475d0fe7
JS
719 .max_hw_queues = FCLOOP_HW_QUEUES,
720 .max_sgl_segments = FCLOOP_SGL_SEGS,
721 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
722 .dma_boundary = FCLOOP_DMABOUND_4G,
723 /* optional features */
39498fae 724 .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR |
39498fae 725 NVMET_FCTGTFEAT_OPDONE_IN_ISR,
475d0fe7
JS
726 /* sizes of additional private data for data structures */
727 .target_priv_sz = sizeof(struct fcloop_tport),
728};
729
730static ssize_t
731fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
732 const char *buf, size_t count)
733{
734 struct nvme_fc_port_info pinfo;
735 struct fcloop_ctrl_options *opts;
736 struct nvme_fc_local_port *localport;
737 struct fcloop_lport *lport;
c3d502e6
JS
738 struct fcloop_lport_priv *lport_priv;
739 unsigned long flags;
740 int ret = -ENOMEM;
741
742 lport = kzalloc(sizeof(*lport), GFP_KERNEL);
743 if (!lport)
744 return -ENOMEM;
475d0fe7
JS
745
746 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
747 if (!opts)
c3d502e6 748 goto out_free_lport;
475d0fe7
JS
749
750 ret = fcloop_parse_options(opts, buf);
751 if (ret)
752 goto out_free_opts;
753
754 /* everything there ? */
755 if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
756 ret = -EINVAL;
757 goto out_free_opts;
758 }
759
fddc9923 760 memset(&pinfo, 0, sizeof(pinfo));
475d0fe7
JS
761 pinfo.node_name = opts->wwnn;
762 pinfo.port_name = opts->wwpn;
763 pinfo.port_role = opts->roles;
764 pinfo.port_id = opts->fcaddr;
765
766 ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
767 if (!ret) {
475d0fe7 768 /* success */
c3d502e6
JS
769 lport_priv = localport->private;
770 lport_priv->lport = lport;
771
475d0fe7
JS
772 lport->localport = localport;
773 INIT_LIST_HEAD(&lport->lport_list);
774
775 spin_lock_irqsave(&fcloop_lock, flags);
776 list_add_tail(&lport->lport_list, &fcloop_lports);
777 spin_unlock_irqrestore(&fcloop_lock, flags);
475d0fe7
JS
778 }
779
780out_free_opts:
781 kfree(opts);
c3d502e6
JS
782out_free_lport:
783 /* free only if we're going to fail */
784 if (ret)
785 kfree(lport);
786
475d0fe7
JS
787 return ret ? ret : count;
788}
789
790
791static void
792__unlink_local_port(struct fcloop_lport *lport)
793{
794 list_del(&lport->lport_list);
795}
796
797static int
798__wait_localport_unreg(struct fcloop_lport *lport)
799{
800 int ret;
801
802 init_completion(&lport->unreg_done);
803
804 ret = nvme_fc_unregister_localport(lport->localport);
805
806 wait_for_completion(&lport->unreg_done);
807
c3d502e6
JS
808 kfree(lport);
809
475d0fe7
JS
810 return ret;
811}
812
813
814static ssize_t
815fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
816 const char *buf, size_t count)
817{
818 struct fcloop_lport *tlport, *lport = NULL;
819 u64 nodename, portname;
820 unsigned long flags;
821 int ret;
822
823 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
824 if (ret)
825 return ret;
826
827 spin_lock_irqsave(&fcloop_lock, flags);
828
829 list_for_each_entry(tlport, &fcloop_lports, lport_list) {
830 if (tlport->localport->node_name == nodename &&
831 tlport->localport->port_name == portname) {
832 lport = tlport;
833 __unlink_local_port(lport);
834 break;
835 }
836 }
837 spin_unlock_irqrestore(&fcloop_lock, flags);
838
839 if (!lport)
840 return -ENOENT;
841
842 ret = __wait_localport_unreg(lport);
843
844 return ret ? ret : count;
845}
846
475d0fe7
JS
847static struct fcloop_nport *
848fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
849{
850 struct fcloop_nport *newnport, *nport = NULL;
851 struct fcloop_lport *tmplport, *lport = NULL;
852 struct fcloop_ctrl_options *opts;
853 unsigned long flags;
854 u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
855 int ret;
856
857 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
858 if (!opts)
859 return NULL;
860
861 ret = fcloop_parse_options(opts, buf);
862 if (ret)
863 goto out_free_opts;
864
865 /* everything there ? */
866 if ((opts->mask & opts_mask) != opts_mask) {
867 ret = -EINVAL;
868 goto out_free_opts;
869 }
870
871 newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
872 if (!newnport)
873 goto out_free_opts;
874
875 INIT_LIST_HEAD(&newnport->nport_list);
876 newnport->node_name = opts->wwnn;
877 newnport->port_name = opts->wwpn;
878 if (opts->mask & NVMF_OPT_ROLES)
879 newnport->port_role = opts->roles;
880 if (opts->mask & NVMF_OPT_FCADDR)
881 newnport->port_id = opts->fcaddr;
882 kref_init(&newnport->ref);
883
884 spin_lock_irqsave(&fcloop_lock, flags);
885
886 list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
887 if (tmplport->localport->node_name == opts->wwnn &&
888 tmplport->localport->port_name == opts->wwpn)
889 goto out_invalid_opts;
890
891 if (tmplport->localport->node_name == opts->lpwwnn &&
892 tmplport->localport->port_name == opts->lpwwpn)
893 lport = tmplport;
894 }
895
896 if (remoteport) {
897 if (!lport)
898 goto out_invalid_opts;
899 newnport->lport = lport;
900 }
901
902 list_for_each_entry(nport, &fcloop_nports, nport_list) {
903 if (nport->node_name == opts->wwnn &&
904 nport->port_name == opts->wwpn) {
905 if ((remoteport && nport->rport) ||
906 (!remoteport && nport->tport)) {
907 nport = NULL;
908 goto out_invalid_opts;
909 }
910
911 fcloop_nport_get(nport);
912
913 spin_unlock_irqrestore(&fcloop_lock, flags);
914
915 if (remoteport)
916 nport->lport = lport;
917 if (opts->mask & NVMF_OPT_ROLES)
918 nport->port_role = opts->roles;
919 if (opts->mask & NVMF_OPT_FCADDR)
920 nport->port_id = opts->fcaddr;
921 goto out_free_newnport;
922 }
923 }
924
925 list_add_tail(&newnport->nport_list, &fcloop_nports);
926
927 spin_unlock_irqrestore(&fcloop_lock, flags);
928
929 kfree(opts);
930 return newnport;
931
932out_invalid_opts:
933 spin_unlock_irqrestore(&fcloop_lock, flags);
934out_free_newnport:
935 kfree(newnport);
936out_free_opts:
937 kfree(opts);
938 return nport;
939}
940
941static ssize_t
942fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
943 const char *buf, size_t count)
944{
945 struct nvme_fc_remote_port *remoteport;
946 struct fcloop_nport *nport;
947 struct fcloop_rport *rport;
948 struct nvme_fc_port_info pinfo;
949 int ret;
950
951 nport = fcloop_alloc_nport(buf, count, true);
952 if (!nport)
953 return -EIO;
954
fddc9923 955 memset(&pinfo, 0, sizeof(pinfo));
475d0fe7
JS
956 pinfo.node_name = nport->node_name;
957 pinfo.port_name = nport->port_name;
958 pinfo.port_role = nport->port_role;
959 pinfo.port_id = nport->port_id;
960
961 ret = nvme_fc_register_remoteport(nport->lport->localport,
962 &pinfo, &remoteport);
963 if (ret || !remoteport) {
964 fcloop_nport_put(nport);
965 return ret;
966 }
967
968 /* success */
969 rport = remoteport->private;
970 rport->remoteport = remoteport;
971 rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
972 if (nport->tport) {
973 nport->tport->remoteport = remoteport;
974 nport->tport->lport = nport->lport;
975 }
976 rport->nport = nport;
977 rport->lport = nport->lport;
978 nport->rport = rport;
979
7c3a23b8 980 return count;
475d0fe7
JS
981}
982
983
984static struct fcloop_rport *
985__unlink_remote_port(struct fcloop_nport *nport)
986{
987 struct fcloop_rport *rport = nport->rport;
988
989 if (rport && nport->tport)
990 nport->tport->remoteport = NULL;
991 nport->rport = NULL;
992
993 return rport;
994}
995
996static int
fddc9923 997__remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
475d0fe7 998{
475d0fe7
JS
999 if (!rport)
1000 return -EALREADY;
1001
fddc9923 1002 return nvme_fc_unregister_remoteport(rport->remoteport);
475d0fe7
JS
1003}
1004
1005static ssize_t
1006fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1007 const char *buf, size_t count)
1008{
1009 struct fcloop_nport *nport = NULL, *tmpport;
1010 static struct fcloop_rport *rport;
1011 u64 nodename, portname;
1012 unsigned long flags;
1013 int ret;
1014
1015 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1016 if (ret)
1017 return ret;
1018
1019 spin_lock_irqsave(&fcloop_lock, flags);
1020
1021 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1022 if (tmpport->node_name == nodename &&
1023 tmpport->port_name == portname && tmpport->rport) {
1024 nport = tmpport;
1025 rport = __unlink_remote_port(nport);
1026 break;
1027 }
1028 }
1029
1030 spin_unlock_irqrestore(&fcloop_lock, flags);
1031
1032 if (!nport)
1033 return -ENOENT;
1034
fddc9923 1035 ret = __remoteport_unreg(nport, rport);
475d0fe7
JS
1036
1037 return ret ? ret : count;
1038}
1039
1040static ssize_t
1041fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1042 const char *buf, size_t count)
1043{
1044 struct nvmet_fc_target_port *targetport;
1045 struct fcloop_nport *nport;
1046 struct fcloop_tport *tport;
1047 struct nvmet_fc_port_info tinfo;
1048 int ret;
1049
1050 nport = fcloop_alloc_nport(buf, count, false);
1051 if (!nport)
1052 return -EIO;
1053
1054 tinfo.node_name = nport->node_name;
1055 tinfo.port_name = nport->port_name;
1056 tinfo.port_id = nport->port_id;
1057
1058 ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1059 &targetport);
1060 if (ret) {
1061 fcloop_nport_put(nport);
1062 return ret;
1063 }
1064
1065 /* success */
1066 tport = targetport->private;
1067 tport->targetport = targetport;
1068 tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
1069 if (nport->rport)
1070 nport->rport->targetport = targetport;
1071 tport->nport = nport;
1072 tport->lport = nport->lport;
1073 nport->tport = tport;
1074
7c3a23b8 1075 return count;
475d0fe7
JS
1076}
1077
1078
1079static struct fcloop_tport *
1080__unlink_target_port(struct fcloop_nport *nport)
1081{
1082 struct fcloop_tport *tport = nport->tport;
1083
1084 if (tport && nport->rport)
1085 nport->rport->targetport = NULL;
1086 nport->tport = NULL;
1087
1088 return tport;
1089}
1090
1091static int
fddc9923 1092__targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
475d0fe7 1093{
475d0fe7
JS
1094 if (!tport)
1095 return -EALREADY;
1096
fddc9923 1097 return nvmet_fc_unregister_targetport(tport->targetport);
475d0fe7
JS
1098}
1099
1100static ssize_t
1101fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1102 const char *buf, size_t count)
1103{
1104 struct fcloop_nport *nport = NULL, *tmpport;
254beb84 1105 struct fcloop_tport *tport = NULL;
475d0fe7
JS
1106 u64 nodename, portname;
1107 unsigned long flags;
1108 int ret;
1109
1110 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1111 if (ret)
1112 return ret;
1113
1114 spin_lock_irqsave(&fcloop_lock, flags);
1115
1116 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1117 if (tmpport->node_name == nodename &&
1118 tmpport->port_name == portname && tmpport->tport) {
1119 nport = tmpport;
1120 tport = __unlink_target_port(nport);
1121 break;
1122 }
1123 }
1124
1125 spin_unlock_irqrestore(&fcloop_lock, flags);
1126
1127 if (!nport)
1128 return -ENOENT;
1129
fddc9923 1130 ret = __targetport_unreg(nport, tport);
475d0fe7
JS
1131
1132 return ret ? ret : count;
1133}
1134
1135
1136static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1137static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1138static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1139static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1140static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1141static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1142
1143static struct attribute *fcloop_dev_attrs[] = {
1144 &dev_attr_add_local_port.attr,
1145 &dev_attr_del_local_port.attr,
1146 &dev_attr_add_remote_port.attr,
1147 &dev_attr_del_remote_port.attr,
1148 &dev_attr_add_target_port.attr,
1149 &dev_attr_del_target_port.attr,
1150 NULL
1151};
1152
1153static struct attribute_group fclopp_dev_attrs_group = {
1154 .attrs = fcloop_dev_attrs,
1155};
1156
1157static const struct attribute_group *fcloop_dev_attr_groups[] = {
1158 &fclopp_dev_attrs_group,
1159 NULL,
1160};
1161
1162static struct class *fcloop_class;
1163static struct device *fcloop_device;
1164
1165
1166static int __init fcloop_init(void)
1167{
1168 int ret;
1169
1170 fcloop_class = class_create(THIS_MODULE, "fcloop");
1171 if (IS_ERR(fcloop_class)) {
1172 pr_err("couldn't register class fcloop\n");
1173 ret = PTR_ERR(fcloop_class);
1174 return ret;
1175 }
1176
1177 fcloop_device = device_create_with_groups(
1178 fcloop_class, NULL, MKDEV(0, 0), NULL,
1179 fcloop_dev_attr_groups, "ctl");
1180 if (IS_ERR(fcloop_device)) {
1181 pr_err("couldn't create ctl device!\n");
1182 ret = PTR_ERR(fcloop_device);
1183 goto out_destroy_class;
1184 }
1185
1186 get_device(fcloop_device);
1187
1188 return 0;
1189
1190out_destroy_class:
1191 class_destroy(fcloop_class);
1192 return ret;
1193}
1194
1195static void __exit fcloop_exit(void)
1196{
1197 struct fcloop_lport *lport;
1198 struct fcloop_nport *nport;
1199 struct fcloop_tport *tport;
1200 struct fcloop_rport *rport;
1201 unsigned long flags;
1202 int ret;
1203
1204 spin_lock_irqsave(&fcloop_lock, flags);
1205
1206 for (;;) {
1207 nport = list_first_entry_or_null(&fcloop_nports,
1208 typeof(*nport), nport_list);
1209 if (!nport)
1210 break;
1211
1212 tport = __unlink_target_port(nport);
1213 rport = __unlink_remote_port(nport);
1214
1215 spin_unlock_irqrestore(&fcloop_lock, flags);
1216
fddc9923 1217 ret = __targetport_unreg(nport, tport);
475d0fe7
JS
1218 if (ret)
1219 pr_warn("%s: Failed deleting target port\n", __func__);
1220
fddc9923 1221 ret = __remoteport_unreg(nport, rport);
475d0fe7
JS
1222 if (ret)
1223 pr_warn("%s: Failed deleting remote port\n", __func__);
1224
1225 spin_lock_irqsave(&fcloop_lock, flags);
1226 }
1227
1228 for (;;) {
1229 lport = list_first_entry_or_null(&fcloop_lports,
1230 typeof(*lport), lport_list);
1231 if (!lport)
1232 break;
1233
1234 __unlink_local_port(lport);
1235
1236 spin_unlock_irqrestore(&fcloop_lock, flags);
1237
1238 ret = __wait_localport_unreg(lport);
1239 if (ret)
1240 pr_warn("%s: Failed deleting local port\n", __func__);
1241
1242 spin_lock_irqsave(&fcloop_lock, flags);
1243 }
1244
1245 spin_unlock_irqrestore(&fcloop_lock, flags);
1246
1247 put_device(fcloop_device);
1248
1249 device_destroy(fcloop_class, MKDEV(0, 0));
1250 class_destroy(fcloop_class);
1251}
1252
1253module_init(fcloop_init);
1254module_exit(fcloop_exit);
1255
1256MODULE_LICENSE("GPL v2");