]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/nvme/target/fc.c
Merge remote-tracking branch 'regulator/fix/max77802' into regulator-linus
[mirror_ubuntu-artful-kernel.git] / drivers / nvme / target / fc.c
CommitLineData
c5343203
JS
1/*
2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
15 *
16 */
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/blk-mq.h>
21#include <linux/parser.h>
22#include <linux/random.h>
23#include <uapi/scsi/fc/fc_fs.h>
24#include <uapi/scsi/fc/fc_els.h>
25
26#include "nvmet.h"
27#include <linux/nvme-fc-driver.h>
28#include <linux/nvme-fc.h>
29
30
31/* *************************** Data Structures/Defines ****************** */
32
33
34#define NVMET_LS_CTX_COUNT 4
35
36/* for this implementation, assume small single frame rqst/rsp */
37#define NVME_FC_MAX_LS_BUFFER_SIZE 2048
38
39struct nvmet_fc_tgtport;
40struct nvmet_fc_tgt_assoc;
41
42struct nvmet_fc_ls_iod {
43 struct nvmefc_tgt_ls_req *lsreq;
44 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
45
46 struct list_head ls_list; /* tgtport->ls_list */
47
48 struct nvmet_fc_tgtport *tgtport;
49 struct nvmet_fc_tgt_assoc *assoc;
50
51 u8 *rqstbuf;
52 u8 *rspbuf;
53 u16 rqstdatalen;
54 dma_addr_t rspdma;
55
56 struct scatterlist sg[2];
57
58 struct work_struct work;
59} __aligned(sizeof(unsigned long long));
60
61#define NVMET_FC_MAX_KB_PER_XFR 256
62
63enum nvmet_fcp_datadir {
64 NVMET_FCP_NODATA,
65 NVMET_FCP_WRITE,
66 NVMET_FCP_READ,
67 NVMET_FCP_ABORTED,
68};
69
70struct nvmet_fc_fcp_iod {
71 struct nvmefc_tgt_fcp_req *fcpreq;
72
73 struct nvme_fc_cmd_iu cmdiubuf;
74 struct nvme_fc_ersp_iu rspiubuf;
75 dma_addr_t rspdma;
76 struct scatterlist *data_sg;
77 struct scatterlist *next_sg;
78 int data_sg_cnt;
79 u32 next_sg_offset;
80 u32 total_length;
81 u32 offset;
82 enum nvmet_fcp_datadir io_dir;
83 bool active;
84 bool abort;
a97ec51b
JS
85 bool aborted;
86 bool writedataactive;
c5343203
JS
87 spinlock_t flock;
88
89 struct nvmet_req req;
90 struct work_struct work;
39498fae 91 struct work_struct done_work;
c5343203
JS
92
93 struct nvmet_fc_tgtport *tgtport;
94 struct nvmet_fc_tgt_queue *queue;
95
96 struct list_head fcp_list; /* tgtport->fcp_list */
97};
98
99struct nvmet_fc_tgtport {
100
101 struct nvmet_fc_target_port fc_target_port;
102
103 struct list_head tgt_list; /* nvmet_fc_target_list */
104 struct device *dev; /* dev for dma mapping */
105 struct nvmet_fc_target_template *ops;
106
107 struct nvmet_fc_ls_iod *iod;
108 spinlock_t lock;
109 struct list_head ls_list;
110 struct list_head ls_busylist;
111 struct list_head assoc_list;
112 struct ida assoc_cnt;
113 struct nvmet_port *port;
114 struct kref ref;
115};
116
117struct nvmet_fc_tgt_queue {
118 bool ninetypercent;
119 u16 qid;
120 u16 sqsize;
121 u16 ersp_ratio;
f63688a6 122 __le16 sqhd;
c5343203
JS
123 int cpu;
124 atomic_t connected;
125 atomic_t sqtail;
126 atomic_t zrspcnt;
127 atomic_t rsn;
128 spinlock_t qlock;
129 struct nvmet_port *port;
130 struct nvmet_cq nvme_cq;
131 struct nvmet_sq nvme_sq;
132 struct nvmet_fc_tgt_assoc *assoc;
133 struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
134 struct list_head fod_list;
135 struct workqueue_struct *work_q;
136 struct kref ref;
137} __aligned(sizeof(unsigned long long));
138
139struct nvmet_fc_tgt_assoc {
140 u64 association_id;
141 u32 a_id;
142 struct nvmet_fc_tgtport *tgtport;
143 struct list_head a_list;
144 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES];
145 struct kref ref;
146};
147
148
149static inline int
150nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
151{
152 return (iodptr - iodptr->tgtport->iod);
153}
154
155static inline int
156nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
157{
158 return (fodptr - fodptr->queue->fod);
159}
160
161
162/*
163 * Association and Connection IDs:
164 *
165 * Association ID will have random number in upper 6 bytes and zero
166 * in lower 2 bytes
167 *
168 * Connection IDs will be Association ID with QID or'd in lower 2 bytes
169 *
170 * note: Association ID = Connection ID for queue 0
171 */
172#define BYTES_FOR_QID sizeof(u16)
173#define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
174#define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
175
176static inline u64
177nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
178{
179 return (assoc->association_id | qid);
180}
181
182static inline u64
183nvmet_fc_getassociationid(u64 connectionid)
184{
185 return connectionid & ~NVMET_FC_QUEUEID_MASK;
186}
187
188static inline u16
189nvmet_fc_getqueueid(u64 connectionid)
190{
191 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
192}
193
194static inline struct nvmet_fc_tgtport *
195targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
196{
197 return container_of(targetport, struct nvmet_fc_tgtport,
198 fc_target_port);
199}
200
201static inline struct nvmet_fc_fcp_iod *
202nvmet_req_to_fod(struct nvmet_req *nvme_req)
203{
204 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
205}
206
207
208/* *************************** Globals **************************** */
209
210
211static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
212
213static LIST_HEAD(nvmet_fc_target_list);
214static DEFINE_IDA(nvmet_fc_tgtport_cnt);
215
216
217static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
218static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
39498fae 219static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work);
c5343203
JS
220static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
221static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
222static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
223static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
224static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
225static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
226
227
228/* *********************** FC-NVME DMA Handling **************************** */
229
230/*
231 * The fcloop device passes in a NULL device pointer. Real LLD's will
232 * pass in a valid device pointer. If NULL is passed to the dma mapping
233 * routines, depending on the platform, it may or may not succeed, and
234 * may crash.
235 *
236 * As such:
237 * Wrapper all the dma routines and check the dev pointer.
238 *
239 * If simple mappings (return just a dma address, we'll noop them,
240 * returning a dma address of 0.
241 *
242 * On more complex mappings (dma_map_sg), a pseudo routine fills
243 * in the scatter list, setting all dma addresses to 0.
244 */
245
246static inline dma_addr_t
247fc_dma_map_single(struct device *dev, void *ptr, size_t size,
248 enum dma_data_direction dir)
249{
250 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
251}
252
253static inline int
254fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
255{
256 return dev ? dma_mapping_error(dev, dma_addr) : 0;
257}
258
259static inline void
260fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
261 enum dma_data_direction dir)
262{
263 if (dev)
264 dma_unmap_single(dev, addr, size, dir);
265}
266
267static inline void
268fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
269 enum dma_data_direction dir)
270{
271 if (dev)
272 dma_sync_single_for_cpu(dev, addr, size, dir);
273}
274
275static inline void
276fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
277 enum dma_data_direction dir)
278{
279 if (dev)
280 dma_sync_single_for_device(dev, addr, size, dir);
281}
282
283/* pseudo dma_map_sg call */
284static int
285fc_map_sg(struct scatterlist *sg, int nents)
286{
287 struct scatterlist *s;
288 int i;
289
290 WARN_ON(nents == 0 || sg[0].length == 0);
291
292 for_each_sg(sg, s, nents, i) {
293 s->dma_address = 0L;
294#ifdef CONFIG_NEED_SG_DMA_LENGTH
295 s->dma_length = s->length;
296#endif
297 }
298 return nents;
299}
300
301static inline int
302fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
303 enum dma_data_direction dir)
304{
305 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
306}
307
308static inline void
309fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
310 enum dma_data_direction dir)
311{
312 if (dev)
313 dma_unmap_sg(dev, sg, nents, dir);
314}
315
316
317/* *********************** FC-NVME Port Management ************************ */
318
319
320static int
321nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
322{
323 struct nvmet_fc_ls_iod *iod;
324 int i;
325
326 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
327 GFP_KERNEL);
328 if (!iod)
329 return -ENOMEM;
330
331 tgtport->iod = iod;
332
333 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
334 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
335 iod->tgtport = tgtport;
336 list_add_tail(&iod->ls_list, &tgtport->ls_list);
337
338 iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
339 GFP_KERNEL);
340 if (!iod->rqstbuf)
341 goto out_fail;
342
343 iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
344
345 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
346 NVME_FC_MAX_LS_BUFFER_SIZE,
347 DMA_TO_DEVICE);
348 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
349 goto out_fail;
350 }
351
352 return 0;
353
354out_fail:
355 kfree(iod->rqstbuf);
356 list_del(&iod->ls_list);
357 for (iod--, i--; i >= 0; iod--, i--) {
358 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
359 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
360 kfree(iod->rqstbuf);
361 list_del(&iod->ls_list);
362 }
363
364 kfree(iod);
365
366 return -EFAULT;
367}
368
369static void
370nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
371{
372 struct nvmet_fc_ls_iod *iod = tgtport->iod;
373 int i;
374
375 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
376 fc_dma_unmap_single(tgtport->dev,
377 iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
378 DMA_TO_DEVICE);
379 kfree(iod->rqstbuf);
380 list_del(&iod->ls_list);
381 }
382 kfree(tgtport->iod);
383}
384
385static struct nvmet_fc_ls_iod *
386nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
387{
388 static struct nvmet_fc_ls_iod *iod;
389 unsigned long flags;
390
391 spin_lock_irqsave(&tgtport->lock, flags);
392 iod = list_first_entry_or_null(&tgtport->ls_list,
393 struct nvmet_fc_ls_iod, ls_list);
394 if (iod)
395 list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
396 spin_unlock_irqrestore(&tgtport->lock, flags);
397 return iod;
398}
399
400
401static void
402nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
403 struct nvmet_fc_ls_iod *iod)
404{
405 unsigned long flags;
406
407 spin_lock_irqsave(&tgtport->lock, flags);
408 list_move(&iod->ls_list, &tgtport->ls_list);
409 spin_unlock_irqrestore(&tgtport->lock, flags);
410}
411
412static void
413nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
414 struct nvmet_fc_tgt_queue *queue)
415{
416 struct nvmet_fc_fcp_iod *fod = queue->fod;
417 int i;
418
419 for (i = 0; i < queue->sqsize; fod++, i++) {
420 INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
39498fae 421 INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work);
c5343203
JS
422 fod->tgtport = tgtport;
423 fod->queue = queue;
424 fod->active = false;
a97ec51b
JS
425 fod->abort = false;
426 fod->aborted = false;
427 fod->fcpreq = NULL;
c5343203
JS
428 list_add_tail(&fod->fcp_list, &queue->fod_list);
429 spin_lock_init(&fod->flock);
430
431 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
432 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
433 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
434 list_del(&fod->fcp_list);
435 for (fod--, i--; i >= 0; fod--, i--) {
436 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
437 sizeof(fod->rspiubuf),
438 DMA_TO_DEVICE);
439 fod->rspdma = 0L;
440 list_del(&fod->fcp_list);
441 }
442
443 return;
444 }
445 }
446}
447
448static void
449nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
450 struct nvmet_fc_tgt_queue *queue)
451{
452 struct nvmet_fc_fcp_iod *fod = queue->fod;
453 int i;
454
455 for (i = 0; i < queue->sqsize; fod++, i++) {
456 if (fod->rspdma)
457 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
458 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
459 }
460}
461
462static struct nvmet_fc_fcp_iod *
463nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
464{
465 static struct nvmet_fc_fcp_iod *fod;
466 unsigned long flags;
467
468 spin_lock_irqsave(&queue->qlock, flags);
469 fod = list_first_entry_or_null(&queue->fod_list,
470 struct nvmet_fc_fcp_iod, fcp_list);
471 if (fod) {
472 list_del(&fod->fcp_list);
473 fod->active = true;
c5343203
JS
474 /*
475 * no queue reference is taken, as it was taken by the
476 * queue lookup just prior to the allocation. The iod
477 * will "inherit" that reference.
478 */
479 }
480 spin_unlock_irqrestore(&queue->qlock, flags);
481 return fod;
482}
483
484
485static void
486nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
487 struct nvmet_fc_fcp_iod *fod)
488{
19b58d94
JS
489 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
490 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
c5343203
JS
491 unsigned long flags;
492
a97ec51b
JS
493 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
494 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
495
496 fcpreq->nvmet_fc_private = NULL;
497
c5343203
JS
498 spin_lock_irqsave(&queue->qlock, flags);
499 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
500 fod->active = false;
a97ec51b
JS
501 fod->abort = false;
502 fod->aborted = false;
503 fod->writedataactive = false;
504 fod->fcpreq = NULL;
c5343203
JS
505 spin_unlock_irqrestore(&queue->qlock, flags);
506
507 /*
508 * release the reference taken at queue lookup and fod allocation
509 */
510 nvmet_fc_tgt_q_put(queue);
19b58d94
JS
511
512 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
c5343203
JS
513}
514
515static int
516nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
517{
518 int cpu, idx, cnt;
519
4b8ba5fa 520 if (tgtport->ops->max_hw_queues == 1)
c5343203
JS
521 return WORK_CPU_UNBOUND;
522
523 /* Simple cpu selection based on qid modulo active cpu count */
524 idx = !qid ? 0 : (qid - 1) % num_active_cpus();
525
526 /* find the n'th active cpu */
527 for (cpu = 0, cnt = 0; ; ) {
528 if (cpu_active(cpu)) {
529 if (cnt == idx)
530 break;
531 cnt++;
532 }
533 cpu = (cpu + 1) % num_possible_cpus();
534 }
535
536 return cpu;
537}
538
539static struct nvmet_fc_tgt_queue *
540nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
541 u16 qid, u16 sqsize)
542{
543 struct nvmet_fc_tgt_queue *queue;
544 unsigned long flags;
545 int ret;
546
547 if (qid >= NVMET_NR_QUEUES)
548 return NULL;
549
550 queue = kzalloc((sizeof(*queue) +
551 (sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
552 GFP_KERNEL);
553 if (!queue)
554 return NULL;
555
556 if (!nvmet_fc_tgt_a_get(assoc))
557 goto out_free_queue;
558
559 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
560 assoc->tgtport->fc_target_port.port_num,
561 assoc->a_id, qid);
562 if (!queue->work_q)
563 goto out_a_put;
564
565 queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
566 queue->qid = qid;
567 queue->sqsize = sqsize;
568 queue->assoc = assoc;
569 queue->port = assoc->tgtport->port;
570 queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
571 INIT_LIST_HEAD(&queue->fod_list);
572 atomic_set(&queue->connected, 0);
573 atomic_set(&queue->sqtail, 0);
574 atomic_set(&queue->rsn, 1);
575 atomic_set(&queue->zrspcnt, 0);
576 spin_lock_init(&queue->qlock);
577 kref_init(&queue->ref);
578
579 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
580
581 ret = nvmet_sq_init(&queue->nvme_sq);
582 if (ret)
583 goto out_fail_iodlist;
584
585 WARN_ON(assoc->queues[qid]);
586 spin_lock_irqsave(&assoc->tgtport->lock, flags);
587 assoc->queues[qid] = queue;
588 spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
589
590 return queue;
591
592out_fail_iodlist:
593 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
594 destroy_workqueue(queue->work_q);
595out_a_put:
596 nvmet_fc_tgt_a_put(assoc);
597out_free_queue:
598 kfree(queue);
599 return NULL;
600}
601
602
603static void
604nvmet_fc_tgt_queue_free(struct kref *ref)
605{
606 struct nvmet_fc_tgt_queue *queue =
607 container_of(ref, struct nvmet_fc_tgt_queue, ref);
608 unsigned long flags;
609
610 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
611 queue->assoc->queues[queue->qid] = NULL;
612 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
613
614 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
615
616 nvmet_fc_tgt_a_put(queue->assoc);
617
618 destroy_workqueue(queue->work_q);
619
620 kfree(queue);
621}
622
623static void
624nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
625{
626 kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
627}
628
629static int
630nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
631{
632 return kref_get_unless_zero(&queue->ref);
633}
634
635
c5343203
JS
636static void
637nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
638{
a97ec51b 639 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
c5343203
JS
640 struct nvmet_fc_fcp_iod *fod = queue->fod;
641 unsigned long flags;
a97ec51b 642 int i, writedataactive;
c5343203
JS
643 bool disconnect;
644
645 disconnect = atomic_xchg(&queue->connected, 0);
646
647 spin_lock_irqsave(&queue->qlock, flags);
648 /* about outstanding io's */
649 for (i = 0; i < queue->sqsize; fod++, i++) {
650 if (fod->active) {
651 spin_lock(&fod->flock);
652 fod->abort = true;
a97ec51b 653 writedataactive = fod->writedataactive;
c5343203 654 spin_unlock(&fod->flock);
a97ec51b
JS
655 /*
656 * only call lldd abort routine if waiting for
657 * writedata. other outstanding ops should finish
658 * on their own.
659 */
660 if (writedataactive) {
661 spin_lock(&fod->flock);
662 fod->aborted = true;
663 spin_unlock(&fod->flock);
664 tgtport->ops->fcp_abort(
665 &tgtport->fc_target_port, fod->fcpreq);
666 }
c5343203
JS
667 }
668 }
669 spin_unlock_irqrestore(&queue->qlock, flags);
670
671 flush_workqueue(queue->work_q);
672
673 if (disconnect)
674 nvmet_sq_destroy(&queue->nvme_sq);
675
676 nvmet_fc_tgt_q_put(queue);
677}
678
679static struct nvmet_fc_tgt_queue *
680nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
681 u64 connection_id)
682{
683 struct nvmet_fc_tgt_assoc *assoc;
684 struct nvmet_fc_tgt_queue *queue;
685 u64 association_id = nvmet_fc_getassociationid(connection_id);
686 u16 qid = nvmet_fc_getqueueid(connection_id);
687 unsigned long flags;
688
689 spin_lock_irqsave(&tgtport->lock, flags);
690 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
691 if (association_id == assoc->association_id) {
692 queue = assoc->queues[qid];
693 if (queue &&
694 (!atomic_read(&queue->connected) ||
695 !nvmet_fc_tgt_q_get(queue)))
696 queue = NULL;
697 spin_unlock_irqrestore(&tgtport->lock, flags);
698 return queue;
699 }
700 }
701 spin_unlock_irqrestore(&tgtport->lock, flags);
702 return NULL;
703}
704
705static struct nvmet_fc_tgt_assoc *
706nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
707{
708 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
709 unsigned long flags;
710 u64 ran;
711 int idx;
712 bool needrandom = true;
713
714 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
715 if (!assoc)
716 return NULL;
717
718 idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
719 if (idx < 0)
720 goto out_free_assoc;
721
722 if (!nvmet_fc_tgtport_get(tgtport))
723 goto out_ida_put;
724
725 assoc->tgtport = tgtport;
726 assoc->a_id = idx;
727 INIT_LIST_HEAD(&assoc->a_list);
728 kref_init(&assoc->ref);
729
730 while (needrandom) {
731 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
732 ran = ran << BYTES_FOR_QID_SHIFT;
733
734 spin_lock_irqsave(&tgtport->lock, flags);
735 needrandom = false;
736 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
737 if (ran == tmpassoc->association_id) {
738 needrandom = true;
739 break;
740 }
741 if (!needrandom) {
742 assoc->association_id = ran;
743 list_add_tail(&assoc->a_list, &tgtport->assoc_list);
744 }
745 spin_unlock_irqrestore(&tgtport->lock, flags);
746 }
747
748 return assoc;
749
750out_ida_put:
751 ida_simple_remove(&tgtport->assoc_cnt, idx);
752out_free_assoc:
753 kfree(assoc);
754 return NULL;
755}
756
757static void
758nvmet_fc_target_assoc_free(struct kref *ref)
759{
760 struct nvmet_fc_tgt_assoc *assoc =
761 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
762 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
763 unsigned long flags;
764
765 spin_lock_irqsave(&tgtport->lock, flags);
766 list_del(&assoc->a_list);
767 spin_unlock_irqrestore(&tgtport->lock, flags);
768 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
769 kfree(assoc);
770 nvmet_fc_tgtport_put(tgtport);
771}
772
773static void
774nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
775{
776 kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
777}
778
779static int
780nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
781{
782 return kref_get_unless_zero(&assoc->ref);
783}
784
785static void
786nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
787{
788 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
789 struct nvmet_fc_tgt_queue *queue;
790 unsigned long flags;
791 int i;
792
793 spin_lock_irqsave(&tgtport->lock, flags);
794 for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) {
795 queue = assoc->queues[i];
796 if (queue) {
797 if (!nvmet_fc_tgt_q_get(queue))
798 continue;
799 spin_unlock_irqrestore(&tgtport->lock, flags);
800 nvmet_fc_delete_target_queue(queue);
801 nvmet_fc_tgt_q_put(queue);
802 spin_lock_irqsave(&tgtport->lock, flags);
803 }
804 }
805 spin_unlock_irqrestore(&tgtport->lock, flags);
806
807 nvmet_fc_tgt_a_put(assoc);
808}
809
810static struct nvmet_fc_tgt_assoc *
811nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
812 u64 association_id)
813{
814 struct nvmet_fc_tgt_assoc *assoc;
815 struct nvmet_fc_tgt_assoc *ret = NULL;
816 unsigned long flags;
817
818 spin_lock_irqsave(&tgtport->lock, flags);
819 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
820 if (association_id == assoc->association_id) {
821 ret = assoc;
822 nvmet_fc_tgt_a_get(assoc);
823 break;
824 }
825 }
826 spin_unlock_irqrestore(&tgtport->lock, flags);
827
828 return ret;
829}
830
831
832/**
833 * nvme_fc_register_targetport - transport entry point called by an
834 * LLDD to register the existence of a local
835 * NVME subystem FC port.
836 * @pinfo: pointer to information about the port to be registered
837 * @template: LLDD entrypoints and operational parameters for the port
838 * @dev: physical hardware device node port corresponds to. Will be
839 * used for DMA mappings
840 * @portptr: pointer to a local port pointer. Upon success, the routine
841 * will allocate a nvme_fc_local_port structure and place its
842 * address in the local port pointer. Upon failure, local port
843 * pointer will be set to NULL.
844 *
845 * Returns:
846 * a completion status. Must be 0 upon success; a negative errno
847 * (ex: -ENXIO) upon failure.
848 */
849int
850nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
851 struct nvmet_fc_target_template *template,
852 struct device *dev,
853 struct nvmet_fc_target_port **portptr)
854{
855 struct nvmet_fc_tgtport *newrec;
856 unsigned long flags;
857 int ret, idx;
858
859 if (!template->xmt_ls_rsp || !template->fcp_op ||
a97ec51b 860 !template->fcp_abort ||
19b58d94 861 !template->fcp_req_release || !template->targetport_delete ||
c5343203
JS
862 !template->max_hw_queues || !template->max_sgl_segments ||
863 !template->max_dif_sgl_segments || !template->dma_boundary) {
864 ret = -EINVAL;
865 goto out_regtgt_failed;
866 }
867
868 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
869 GFP_KERNEL);
870 if (!newrec) {
871 ret = -ENOMEM;
872 goto out_regtgt_failed;
873 }
874
875 idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
876 if (idx < 0) {
877 ret = -ENOSPC;
878 goto out_fail_kfree;
879 }
880
881 if (!get_device(dev) && dev) {
882 ret = -ENODEV;
883 goto out_ida_put;
884 }
885
886 newrec->fc_target_port.node_name = pinfo->node_name;
887 newrec->fc_target_port.port_name = pinfo->port_name;
888 newrec->fc_target_port.private = &newrec[1];
889 newrec->fc_target_port.port_id = pinfo->port_id;
890 newrec->fc_target_port.port_num = idx;
891 INIT_LIST_HEAD(&newrec->tgt_list);
892 newrec->dev = dev;
893 newrec->ops = template;
894 spin_lock_init(&newrec->lock);
895 INIT_LIST_HEAD(&newrec->ls_list);
896 INIT_LIST_HEAD(&newrec->ls_busylist);
897 INIT_LIST_HEAD(&newrec->assoc_list);
898 kref_init(&newrec->ref);
899 ida_init(&newrec->assoc_cnt);
900
901 ret = nvmet_fc_alloc_ls_iodlist(newrec);
902 if (ret) {
903 ret = -ENOMEM;
904 goto out_free_newrec;
905 }
906
907 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
908 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
909 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
910
911 *portptr = &newrec->fc_target_port;
912 return 0;
913
914out_free_newrec:
915 put_device(dev);
916out_ida_put:
917 ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
918out_fail_kfree:
919 kfree(newrec);
920out_regtgt_failed:
921 *portptr = NULL;
922 return ret;
923}
924EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
925
926
927static void
928nvmet_fc_free_tgtport(struct kref *ref)
929{
930 struct nvmet_fc_tgtport *tgtport =
931 container_of(ref, struct nvmet_fc_tgtport, ref);
932 struct device *dev = tgtport->dev;
933 unsigned long flags;
934
935 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
936 list_del(&tgtport->tgt_list);
937 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
938
939 nvmet_fc_free_ls_iodlist(tgtport);
940
941 /* let the LLDD know we've finished tearing it down */
942 tgtport->ops->targetport_delete(&tgtport->fc_target_port);
943
944 ida_simple_remove(&nvmet_fc_tgtport_cnt,
945 tgtport->fc_target_port.port_num);
946
947 ida_destroy(&tgtport->assoc_cnt);
948
949 kfree(tgtport);
950
951 put_device(dev);
952}
953
954static void
955nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
956{
957 kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
958}
959
960static int
961nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
962{
963 return kref_get_unless_zero(&tgtport->ref);
964}
965
966static void
967__nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
968{
969 struct nvmet_fc_tgt_assoc *assoc, *next;
970 unsigned long flags;
971
972 spin_lock_irqsave(&tgtport->lock, flags);
973 list_for_each_entry_safe(assoc, next,
974 &tgtport->assoc_list, a_list) {
975 if (!nvmet_fc_tgt_a_get(assoc))
976 continue;
977 spin_unlock_irqrestore(&tgtport->lock, flags);
978 nvmet_fc_delete_target_assoc(assoc);
979 nvmet_fc_tgt_a_put(assoc);
980 spin_lock_irqsave(&tgtport->lock, flags);
981 }
982 spin_unlock_irqrestore(&tgtport->lock, flags);
983}
984
985/*
986 * nvmet layer has called to terminate an association
987 */
988static void
989nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
990{
991 struct nvmet_fc_tgtport *tgtport, *next;
992 struct nvmet_fc_tgt_assoc *assoc;
993 struct nvmet_fc_tgt_queue *queue;
994 unsigned long flags;
995 bool found_ctrl = false;
996
997 /* this is a bit ugly, but don't want to make locks layered */
998 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
999 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1000 tgt_list) {
1001 if (!nvmet_fc_tgtport_get(tgtport))
1002 continue;
1003 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1004
1005 spin_lock_irqsave(&tgtport->lock, flags);
1006 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1007 queue = assoc->queues[0];
1008 if (queue && queue->nvme_sq.ctrl == ctrl) {
1009 if (nvmet_fc_tgt_a_get(assoc))
1010 found_ctrl = true;
1011 break;
1012 }
1013 }
1014 spin_unlock_irqrestore(&tgtport->lock, flags);
1015
1016 nvmet_fc_tgtport_put(tgtport);
1017
1018 if (found_ctrl) {
1019 nvmet_fc_delete_target_assoc(assoc);
1020 nvmet_fc_tgt_a_put(assoc);
1021 return;
1022 }
1023
1024 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1025 }
1026 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1027}
1028
1029/**
1030 * nvme_fc_unregister_targetport - transport entry point called by an
1031 * LLDD to deregister/remove a previously
1032 * registered a local NVME subsystem FC port.
1033 * @tgtport: pointer to the (registered) target port that is to be
1034 * deregistered.
1035 *
1036 * Returns:
1037 * a completion status. Must be 0 upon success; a negative errno
1038 * (ex: -ENXIO) upon failure.
1039 */
1040int
1041nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1042{
1043 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1044
1045 /* terminate any outstanding associations */
1046 __nvmet_fc_free_assocs(tgtport);
1047
1048 nvmet_fc_tgtport_put(tgtport);
1049
1050 return 0;
1051}
1052EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1053
1054
1055/* *********************** FC-NVME LS Handling **************************** */
1056
1057
1058static void
3f5e1188 1059nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
c5343203
JS
1060{
1061 struct fcnvme_ls_acc_hdr *acc = buf;
1062
1063 acc->w0.ls_cmd = ls_cmd;
1064 acc->desc_list_len = desc_len;
1065 acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1066 acc->rqst.desc_len =
1067 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1068 acc->rqst.w0.ls_cmd = rqst_ls_cmd;
1069}
1070
1071static int
1072nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
1073 u8 reason, u8 explanation, u8 vendor)
1074{
1075 struct fcnvme_ls_rjt *rjt = buf;
1076
1077 nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
1078 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
1079 ls_cmd);
1080 rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1081 rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1082 rjt->rjt.reason_code = reason;
1083 rjt->rjt.reason_explanation = explanation;
1084 rjt->rjt.vendor = vendor;
1085
1086 return sizeof(struct fcnvme_ls_rjt);
1087}
1088
1089/* Validation Error indexes into the string table below */
1090enum {
1091 VERR_NO_ERROR = 0,
1092 VERR_CR_ASSOC_LEN = 1,
1093 VERR_CR_ASSOC_RQST_LEN = 2,
1094 VERR_CR_ASSOC_CMD = 3,
1095 VERR_CR_ASSOC_CMD_LEN = 4,
1096 VERR_ERSP_RATIO = 5,
1097 VERR_ASSOC_ALLOC_FAIL = 6,
1098 VERR_QUEUE_ALLOC_FAIL = 7,
1099 VERR_CR_CONN_LEN = 8,
1100 VERR_CR_CONN_RQST_LEN = 9,
1101 VERR_ASSOC_ID = 10,
1102 VERR_ASSOC_ID_LEN = 11,
1103 VERR_NO_ASSOC = 12,
1104 VERR_CONN_ID = 13,
1105 VERR_CONN_ID_LEN = 14,
1106 VERR_NO_CONN = 15,
1107 VERR_CR_CONN_CMD = 16,
1108 VERR_CR_CONN_CMD_LEN = 17,
1109 VERR_DISCONN_LEN = 18,
1110 VERR_DISCONN_RQST_LEN = 19,
1111 VERR_DISCONN_CMD = 20,
1112 VERR_DISCONN_CMD_LEN = 21,
1113 VERR_DISCONN_SCOPE = 22,
1114 VERR_RS_LEN = 23,
1115 VERR_RS_RQST_LEN = 24,
1116 VERR_RS_CMD = 25,
1117 VERR_RS_CMD_LEN = 26,
1118 VERR_RS_RCTL = 27,
1119 VERR_RS_RO = 28,
1120};
1121
1122static char *validation_errors[] = {
1123 "OK",
1124 "Bad CR_ASSOC Length",
1125 "Bad CR_ASSOC Rqst Length",
1126 "Not CR_ASSOC Cmd",
1127 "Bad CR_ASSOC Cmd Length",
1128 "Bad Ersp Ratio",
1129 "Association Allocation Failed",
1130 "Queue Allocation Failed",
1131 "Bad CR_CONN Length",
1132 "Bad CR_CONN Rqst Length",
1133 "Not Association ID",
1134 "Bad Association ID Length",
1135 "No Association",
1136 "Not Connection ID",
1137 "Bad Connection ID Length",
1138 "No Connection",
1139 "Not CR_CONN Cmd",
1140 "Bad CR_CONN Cmd Length",
1141 "Bad DISCONN Length",
1142 "Bad DISCONN Rqst Length",
1143 "Not DISCONN Cmd",
1144 "Bad DISCONN Cmd Length",
1145 "Bad Disconnect Scope",
1146 "Bad RS Length",
1147 "Bad RS Rqst Length",
1148 "Not RS Cmd",
1149 "Bad RS Cmd Length",
1150 "Bad RS R_CTL",
1151 "Bad RS Relative Offset",
1152};
1153
1154static void
1155nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1156 struct nvmet_fc_ls_iod *iod)
1157{
1158 struct fcnvme_ls_cr_assoc_rqst *rqst =
1159 (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
1160 struct fcnvme_ls_cr_assoc_acc *acc =
1161 (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
1162 struct nvmet_fc_tgt_queue *queue;
1163 int ret = 0;
1164
1165 memset(acc, 0, sizeof(*acc));
1166
1167 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_assoc_rqst))
1168 ret = VERR_CR_ASSOC_LEN;
1169 else if (rqst->desc_list_len !=
1170 fcnvme_lsdesc_len(
1171 sizeof(struct fcnvme_ls_cr_assoc_rqst)))
1172 ret = VERR_CR_ASSOC_RQST_LEN;
1173 else if (rqst->assoc_cmd.desc_tag !=
1174 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1175 ret = VERR_CR_ASSOC_CMD;
1176 else if (rqst->assoc_cmd.desc_len !=
1177 fcnvme_lsdesc_len(
1178 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)))
1179 ret = VERR_CR_ASSOC_CMD_LEN;
1180 else if (!rqst->assoc_cmd.ersp_ratio ||
1181 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1182 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1183 ret = VERR_ERSP_RATIO;
1184
1185 else {
1186 /* new association w/ admin queue */
1187 iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
1188 if (!iod->assoc)
1189 ret = VERR_ASSOC_ALLOC_FAIL;
1190 else {
1191 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1192 be16_to_cpu(rqst->assoc_cmd.sqsize));
1193 if (!queue)
1194 ret = VERR_QUEUE_ALLOC_FAIL;
1195 }
1196 }
1197
1198 if (ret) {
1199 dev_err(tgtport->dev,
1200 "Create Association LS failed: %s\n",
1201 validation_errors[ret]);
1202 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1203 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
4083aa98
JS
1204 FCNVME_RJT_RC_LOGIC,
1205 FCNVME_RJT_EXP_NONE, 0);
c5343203
JS
1206 return;
1207 }
1208
1209 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1210 atomic_set(&queue->connected, 1);
1211 queue->sqhd = 0; /* best place to init value */
1212
1213 /* format a response */
1214
1215 iod->lsreq->rsplen = sizeof(*acc);
1216
1217 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1218 fcnvme_lsdesc_len(
1219 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1220 FCNVME_LS_CREATE_ASSOCIATION);
1221 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1222 acc->associd.desc_len =
1223 fcnvme_lsdesc_len(
1224 sizeof(struct fcnvme_lsdesc_assoc_id));
1225 acc->associd.association_id =
1226 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1227 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1228 acc->connectid.desc_len =
1229 fcnvme_lsdesc_len(
1230 sizeof(struct fcnvme_lsdesc_conn_id));
1231 acc->connectid.connection_id = acc->associd.association_id;
1232}
1233
1234static void
1235nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1236 struct nvmet_fc_ls_iod *iod)
1237{
1238 struct fcnvme_ls_cr_conn_rqst *rqst =
1239 (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
1240 struct fcnvme_ls_cr_conn_acc *acc =
1241 (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
1242 struct nvmet_fc_tgt_queue *queue;
1243 int ret = 0;
1244
1245 memset(acc, 0, sizeof(*acc));
1246
1247 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1248 ret = VERR_CR_CONN_LEN;
1249 else if (rqst->desc_list_len !=
1250 fcnvme_lsdesc_len(
1251 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1252 ret = VERR_CR_CONN_RQST_LEN;
1253 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1254 ret = VERR_ASSOC_ID;
1255 else if (rqst->associd.desc_len !=
1256 fcnvme_lsdesc_len(
1257 sizeof(struct fcnvme_lsdesc_assoc_id)))
1258 ret = VERR_ASSOC_ID_LEN;
1259 else if (rqst->connect_cmd.desc_tag !=
1260 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1261 ret = VERR_CR_CONN_CMD;
1262 else if (rqst->connect_cmd.desc_len !=
1263 fcnvme_lsdesc_len(
1264 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1265 ret = VERR_CR_CONN_CMD_LEN;
1266 else if (!rqst->connect_cmd.ersp_ratio ||
1267 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1268 be16_to_cpu(rqst->connect_cmd.sqsize)))
1269 ret = VERR_ERSP_RATIO;
1270
1271 else {
1272 /* new io queue */
1273 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1274 be64_to_cpu(rqst->associd.association_id));
1275 if (!iod->assoc)
1276 ret = VERR_NO_ASSOC;
1277 else {
1278 queue = nvmet_fc_alloc_target_queue(iod->assoc,
1279 be16_to_cpu(rqst->connect_cmd.qid),
1280 be16_to_cpu(rqst->connect_cmd.sqsize));
1281 if (!queue)
1282 ret = VERR_QUEUE_ALLOC_FAIL;
1283
1284 /* release get taken in nvmet_fc_find_target_assoc */
1285 nvmet_fc_tgt_a_put(iod->assoc);
1286 }
1287 }
1288
1289 if (ret) {
1290 dev_err(tgtport->dev,
1291 "Create Connection LS failed: %s\n",
1292 validation_errors[ret]);
1293 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1294 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1295 (ret == VERR_NO_ASSOC) ?
4083aa98
JS
1296 FCNVME_RJT_RC_INV_ASSOC :
1297 FCNVME_RJT_RC_LOGIC,
1298 FCNVME_RJT_EXP_NONE, 0);
c5343203
JS
1299 return;
1300 }
1301
1302 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1303 atomic_set(&queue->connected, 1);
1304 queue->sqhd = 0; /* best place to init value */
1305
1306 /* format a response */
1307
1308 iod->lsreq->rsplen = sizeof(*acc);
1309
1310 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1311 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1312 FCNVME_LS_CREATE_CONNECTION);
1313 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1314 acc->connectid.desc_len =
1315 fcnvme_lsdesc_len(
1316 sizeof(struct fcnvme_lsdesc_conn_id));
1317 acc->connectid.connection_id =
1318 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1319 be16_to_cpu(rqst->connect_cmd.qid)));
1320}
1321
1322static void
1323nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1324 struct nvmet_fc_ls_iod *iod)
1325{
1326 struct fcnvme_ls_disconnect_rqst *rqst =
1327 (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1328 struct fcnvme_ls_disconnect_acc *acc =
1329 (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
c81e55e0 1330 struct nvmet_fc_tgt_queue *queue = NULL;
c5343203
JS
1331 struct nvmet_fc_tgt_assoc *assoc;
1332 int ret = 0;
1333 bool del_assoc = false;
1334
1335 memset(acc, 0, sizeof(*acc));
1336
1337 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
1338 ret = VERR_DISCONN_LEN;
1339 else if (rqst->desc_list_len !=
1340 fcnvme_lsdesc_len(
1341 sizeof(struct fcnvme_ls_disconnect_rqst)))
1342 ret = VERR_DISCONN_RQST_LEN;
1343 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1344 ret = VERR_ASSOC_ID;
1345 else if (rqst->associd.desc_len !=
1346 fcnvme_lsdesc_len(
1347 sizeof(struct fcnvme_lsdesc_assoc_id)))
1348 ret = VERR_ASSOC_ID_LEN;
1349 else if (rqst->discon_cmd.desc_tag !=
1350 cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
1351 ret = VERR_DISCONN_CMD;
1352 else if (rqst->discon_cmd.desc_len !=
1353 fcnvme_lsdesc_len(
1354 sizeof(struct fcnvme_lsdesc_disconn_cmd)))
1355 ret = VERR_DISCONN_CMD_LEN;
1356 else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
1357 (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
1358 ret = VERR_DISCONN_SCOPE;
1359 else {
1360 /* match an active association */
1361 assoc = nvmet_fc_find_target_assoc(tgtport,
1362 be64_to_cpu(rqst->associd.association_id));
1363 iod->assoc = assoc;
c81e55e0
JS
1364 if (assoc) {
1365 if (rqst->discon_cmd.scope ==
1366 FCNVME_DISCONN_CONNECTION) {
1367 queue = nvmet_fc_find_target_queue(tgtport,
1368 be64_to_cpu(
1369 rqst->discon_cmd.id));
1370 if (!queue) {
1371 nvmet_fc_tgt_a_put(assoc);
1372 ret = VERR_NO_CONN;
1373 }
1374 }
1375 } else
c5343203
JS
1376 ret = VERR_NO_ASSOC;
1377 }
1378
1379 if (ret) {
1380 dev_err(tgtport->dev,
1381 "Disconnect LS failed: %s\n",
1382 validation_errors[ret]);
1383 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1384 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
4083aa98
JS
1385 (ret == VERR_NO_ASSOC) ?
1386 FCNVME_RJT_RC_INV_ASSOC :
1387 (ret == VERR_NO_CONN) ?
1388 FCNVME_RJT_RC_INV_CONN :
1389 FCNVME_RJT_RC_LOGIC,
1390 FCNVME_RJT_EXP_NONE, 0);
c5343203
JS
1391 return;
1392 }
1393
1394 /* format a response */
1395
1396 iod->lsreq->rsplen = sizeof(*acc);
1397
1398 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1399 fcnvme_lsdesc_len(
1400 sizeof(struct fcnvme_ls_disconnect_acc)),
1401 FCNVME_LS_DISCONNECT);
1402
1403
c81e55e0
JS
1404 /* are we to delete a Connection ID (queue) */
1405 if (queue) {
1406 int qid = queue->qid;
c5343203 1407
c81e55e0 1408 nvmet_fc_delete_target_queue(queue);
c5343203 1409
c81e55e0
JS
1410 /* release the get taken by find_target_queue */
1411 nvmet_fc_tgt_q_put(queue);
c5343203 1412
c81e55e0
JS
1413 /* tear association down if io queue terminated */
1414 if (!qid)
1415 del_assoc = true;
c5343203
JS
1416 }
1417
1418 /* release get taken in nvmet_fc_find_target_assoc */
1419 nvmet_fc_tgt_a_put(iod->assoc);
1420
1421 if (del_assoc)
1422 nvmet_fc_delete_target_assoc(iod->assoc);
1423}
1424
1425
1426/* *********************** NVME Ctrl Routines **************************** */
1427
1428
1429static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1430
1431static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1432
1433static void
1434nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
1435{
1436 struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
1437 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1438
1439 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1440 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1441 nvmet_fc_free_ls_iod(tgtport, iod);
1442 nvmet_fc_tgtport_put(tgtport);
1443}
1444
1445static void
1446nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1447 struct nvmet_fc_ls_iod *iod)
1448{
1449 int ret;
1450
1451 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1452 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1453
1454 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
1455 if (ret)
1456 nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
1457}
1458
1459/*
1460 * Actual processing routine for received FC-NVME LS Requests from the LLD
1461 */
1462static void
1463nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1464 struct nvmet_fc_ls_iod *iod)
1465{
1466 struct fcnvme_ls_rqst_w0 *w0 =
1467 (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
1468
1469 iod->lsreq->nvmet_fc_private = iod;
1470 iod->lsreq->rspbuf = iod->rspbuf;
1471 iod->lsreq->rspdma = iod->rspdma;
1472 iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
1473 /* Be preventative. handlers will later set to valid length */
1474 iod->lsreq->rsplen = 0;
1475
1476 iod->assoc = NULL;
1477
1478 /*
1479 * handlers:
1480 * parse request input, execute the request, and format the
1481 * LS response
1482 */
1483 switch (w0->ls_cmd) {
1484 case FCNVME_LS_CREATE_ASSOCIATION:
1485 /* Creates Association and initial Admin Queue/Connection */
1486 nvmet_fc_ls_create_association(tgtport, iod);
1487 break;
1488 case FCNVME_LS_CREATE_CONNECTION:
1489 /* Creates an IO Queue/Connection */
1490 nvmet_fc_ls_create_connection(tgtport, iod);
1491 break;
1492 case FCNVME_LS_DISCONNECT:
1493 /* Terminate a Queue/Connection or the Association */
1494 nvmet_fc_ls_disconnect(tgtport, iod);
1495 break;
1496 default:
1497 iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
1498 NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
4083aa98 1499 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
c5343203
JS
1500 }
1501
1502 nvmet_fc_xmt_ls_rsp(tgtport, iod);
1503}
1504
1505/*
1506 * Actual processing routine for received FC-NVME LS Requests from the LLD
1507 */
1508static void
1509nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1510{
1511 struct nvmet_fc_ls_iod *iod =
1512 container_of(work, struct nvmet_fc_ls_iod, work);
1513 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1514
1515 nvmet_fc_handle_ls_rqst(tgtport, iod);
1516}
1517
1518
1519/**
1520 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1521 * upon the reception of a NVME LS request.
1522 *
1523 * The nvmet-fc layer will copy payload to an internal structure for
1524 * processing. As such, upon completion of the routine, the LLDD may
1525 * immediately free/reuse the LS request buffer passed in the call.
1526 *
1527 * If this routine returns error, the LLDD should abort the exchange.
1528 *
1529 * @tgtport: pointer to the (registered) target port the LS was
1530 * received on.
1531 * @lsreq: pointer to a lsreq request structure to be used to reference
1532 * the exchange corresponding to the LS.
1533 * @lsreqbuf: pointer to the buffer containing the LS Request
1534 * @lsreqbuf_len: length, in bytes, of the received LS request
1535 */
1536int
1537nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
1538 struct nvmefc_tgt_ls_req *lsreq,
1539 void *lsreqbuf, u32 lsreqbuf_len)
1540{
1541 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1542 struct nvmet_fc_ls_iod *iod;
1543
1544 if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
1545 return -E2BIG;
1546
1547 if (!nvmet_fc_tgtport_get(tgtport))
1548 return -ESHUTDOWN;
1549
1550 iod = nvmet_fc_alloc_ls_iod(tgtport);
1551 if (!iod) {
1552 nvmet_fc_tgtport_put(tgtport);
1553 return -ENOENT;
1554 }
1555
1556 iod->lsreq = lsreq;
1557 iod->fcpreq = NULL;
1558 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
1559 iod->rqstdatalen = lsreqbuf_len;
1560
1561 schedule_work(&iod->work);
1562
1563 return 0;
1564}
1565EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
1566
1567
1568/*
1569 * **********************
1570 * Start of FCP handling
1571 * **********************
1572 */
1573
1574static int
1575nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1576{
1577 struct scatterlist *sg;
1578 struct page *page;
1579 unsigned int nent;
1580 u32 page_len, length;
1581 int i = 0;
1582
1583 length = fod->total_length;
1584 nent = DIV_ROUND_UP(length, PAGE_SIZE);
1585 sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
1586 if (!sg)
1587 goto out;
1588
1589 sg_init_table(sg, nent);
1590
1591 while (length) {
1592 page_len = min_t(u32, length, PAGE_SIZE);
1593
1594 page = alloc_page(GFP_KERNEL);
1595 if (!page)
1596 goto out_free_pages;
1597
1598 sg_set_page(&sg[i], page, page_len, 0);
1599 length -= page_len;
1600 i++;
1601 }
1602
1603 fod->data_sg = sg;
1604 fod->data_sg_cnt = nent;
1605 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
1606 ((fod->io_dir == NVMET_FCP_WRITE) ?
1607 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1608 /* note: write from initiator perspective */
1609
1610 return 0;
1611
1612out_free_pages:
1613 while (i > 0) {
1614 i--;
1615 __free_page(sg_page(&sg[i]));
1616 }
1617 kfree(sg);
1618 fod->data_sg = NULL;
1619 fod->data_sg_cnt = 0;
1620out:
1621 return NVME_SC_INTERNAL;
1622}
1623
1624static void
1625nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1626{
1627 struct scatterlist *sg;
1628 int count;
1629
1630 if (!fod->data_sg || !fod->data_sg_cnt)
1631 return;
1632
1633 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
1634 ((fod->io_dir == NVMET_FCP_WRITE) ?
1635 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1636 for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
1637 __free_page(sg_page(sg));
1638 kfree(fod->data_sg);
c820ad4c
JS
1639 fod->data_sg = NULL;
1640 fod->data_sg_cnt = 0;
c5343203
JS
1641}
1642
1643
1644static bool
1645queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
1646{
1647 u32 sqtail, used;
1648
1649 /* egad, this is ugly. And sqtail is just a best guess */
1650 sqtail = atomic_read(&q->sqtail) % q->sqsize;
1651
1652 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
1653 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
1654}
1655
1656/*
1657 * Prep RSP payload.
1658 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
1659 */
1660static void
1661nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1662 struct nvmet_fc_fcp_iod *fod)
1663{
1664 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
1665 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1666 struct nvme_completion *cqe = &ersp->cqe;
1667 u32 *cqewd = (u32 *)cqe;
1668 bool send_ersp = false;
1669 u32 rsn, rspcnt, xfr_length;
1670
1671 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
1672 xfr_length = fod->total_length;
1673 else
1674 xfr_length = fod->offset;
1675
1676 /*
1677 * check to see if we can send a 0's rsp.
1678 * Note: to send a 0's response, the NVME-FC host transport will
1679 * recreate the CQE. The host transport knows: sq id, SQHD (last
1680 * seen in an ersp), and command_id. Thus it will create a
1681 * zero-filled CQE with those known fields filled in. Transport
1682 * must send an ersp for any condition where the cqe won't match
1683 * this.
1684 *
1685 * Here are the FC-NVME mandated cases where we must send an ersp:
1686 * every N responses, where N=ersp_ratio
1687 * force fabric commands to send ersp's (not in FC-NVME but good
1688 * practice)
1689 * normal cmds: any time status is non-zero, or status is zero
1690 * but words 0 or 1 are non-zero.
1691 * the SQ is 90% or more full
1692 * the cmd is a fused command
1693 * transferred data length not equal to cmd iu length
1694 */
1695 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
1696 if (!(rspcnt % fod->queue->ersp_ratio) ||
1697 sqe->opcode == nvme_fabrics_command ||
1698 xfr_length != fod->total_length ||
1699 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
1700 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
8ad76cf1 1701 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
c5343203
JS
1702 send_ersp = true;
1703
1704 /* re-set the fields */
1705 fod->fcpreq->rspaddr = ersp;
1706 fod->fcpreq->rspdma = fod->rspdma;
1707
1708 if (!send_ersp) {
1709 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
1710 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
1711 } else {
1712 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
1713 rsn = atomic_inc_return(&fod->queue->rsn);
1714 ersp->rsn = cpu_to_be32(rsn);
1715 ersp->xfrd_len = cpu_to_be32(xfr_length);
1716 fod->fcpreq->rsplen = sizeof(*ersp);
1717 }
1718
1719 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
1720 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1721}
1722
1723static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
1724
a97ec51b
JS
1725static void
1726nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
1727 struct nvmet_fc_fcp_iod *fod)
1728{
1729 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1730
1731 /* data no longer needed */
1732 nvmet_fc_free_tgt_pgs(fod);
1733
1734 /*
1735 * if an ABTS was received or we issued the fcp_abort early
1736 * don't call abort routine again.
1737 */
1738 /* no need to take lock - lock was taken earlier to get here */
1739 if (!fod->aborted)
1740 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
1741
1742 nvmet_fc_free_fcp_iod(fod->queue, fod);
1743}
1744
c5343203
JS
1745static void
1746nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1747 struct nvmet_fc_fcp_iod *fod)
1748{
1749 int ret;
1750
1751 fod->fcpreq->op = NVMET_FCOP_RSP;
1752 fod->fcpreq->timeout = 0;
1753
1754 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1755
1756 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1757 if (ret)
a97ec51b 1758 nvmet_fc_abort_op(tgtport, fod);
c5343203
JS
1759}
1760
1761static void
1762nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1763 struct nvmet_fc_fcp_iod *fod, u8 op)
1764{
1765 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1766 struct scatterlist *sg, *datasg;
a97ec51b 1767 unsigned long flags;
c5343203
JS
1768 u32 tlen, sg_off;
1769 int ret;
1770
1771 fcpreq->op = op;
1772 fcpreq->offset = fod->offset;
1773 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1774 tlen = min_t(u32, (NVMET_FC_MAX_KB_PER_XFR * 1024),
1775 (fod->total_length - fod->offset));
1776 tlen = min_t(u32, tlen, NVME_FC_MAX_SEGMENTS * PAGE_SIZE);
1777 tlen = min_t(u32, tlen, fod->tgtport->ops->max_sgl_segments
1778 * PAGE_SIZE);
1779 fcpreq->transfer_length = tlen;
1780 fcpreq->transferred_length = 0;
1781 fcpreq->fcp_error = 0;
1782 fcpreq->rsplen = 0;
1783
1784 fcpreq->sg_cnt = 0;
1785
1786 datasg = fod->next_sg;
1787 sg_off = fod->next_sg_offset;
1788
1789 for (sg = fcpreq->sg ; tlen; sg++) {
1790 *sg = *datasg;
1791 if (sg_off) {
1792 sg->offset += sg_off;
1793 sg->length -= sg_off;
1794 sg->dma_address += sg_off;
1795 sg_off = 0;
1796 }
1797 if (tlen < sg->length) {
1798 sg->length = tlen;
1799 fod->next_sg = datasg;
1800 fod->next_sg_offset += tlen;
1801 } else if (tlen == sg->length) {
1802 fod->next_sg_offset = 0;
1803 fod->next_sg = sg_next(datasg);
1804 } else {
1805 fod->next_sg_offset = 0;
1806 datasg = sg_next(datasg);
1807 }
1808 tlen -= sg->length;
1809 fcpreq->sg_cnt++;
1810 }
1811
1812 /*
1813 * If the last READDATA request: check if LLDD supports
1814 * combined xfr with response.
1815 */
1816 if ((op == NVMET_FCOP_READDATA) &&
1817 ((fod->offset + fcpreq->transfer_length) == fod->total_length) &&
1818 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
1819 fcpreq->op = NVMET_FCOP_READDATA_RSP;
1820 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1821 }
1822
1823 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1824 if (ret) {
1825 /*
1826 * should be ok to set w/o lock as its in the thread of
1827 * execution (not an async timer routine) and doesn't
1828 * contend with any clearing action
1829 */
1830 fod->abort = true;
1831
a97ec51b
JS
1832 if (op == NVMET_FCOP_WRITEDATA) {
1833 spin_lock_irqsave(&fod->flock, flags);
1834 fod->writedataactive = false;
1835 spin_unlock_irqrestore(&fod->flock, flags);
c5343203
JS
1836 nvmet_req_complete(&fod->req,
1837 NVME_SC_FC_TRANSPORT_ERROR);
a97ec51b 1838 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
c5343203
JS
1839 fcpreq->fcp_error = ret;
1840 fcpreq->transferred_length = 0;
1841 nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
1842 }
1843 }
1844}
1845
a97ec51b
JS
1846static inline bool
1847__nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
1848{
1849 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1850 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1851
1852 /* if in the middle of an io and we need to tear down */
1853 if (abort) {
1854 if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
1855 nvmet_req_complete(&fod->req,
1856 NVME_SC_FC_TRANSPORT_ERROR);
1857 return true;
1858 }
1859
1860 nvmet_fc_abort_op(tgtport, fod);
1861 return true;
1862 }
1863
1864 return false;
1865}
1866
39498fae
JS
1867/*
1868 * actual done handler for FCP operations when completed by the lldd
1869 */
c5343203 1870static void
39498fae 1871nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
c5343203 1872{
39498fae 1873 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
c5343203
JS
1874 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1875 unsigned long flags;
1876 bool abort;
1877
1878 spin_lock_irqsave(&fod->flock, flags);
1879 abort = fod->abort;
a97ec51b 1880 fod->writedataactive = false;
c5343203
JS
1881 spin_unlock_irqrestore(&fod->flock, flags);
1882
c5343203
JS
1883 switch (fcpreq->op) {
1884
1885 case NVMET_FCOP_WRITEDATA:
a97ec51b
JS
1886 if (__nvmet_fc_fod_op_abort(fod, abort))
1887 return;
f64935ab 1888 if (fcpreq->fcp_error ||
c5343203 1889 fcpreq->transferred_length != fcpreq->transfer_length) {
a97ec51b
JS
1890 spin_lock(&fod->flock);
1891 fod->abort = true;
1892 spin_unlock(&fod->flock);
1893
c5343203
JS
1894 nvmet_req_complete(&fod->req,
1895 NVME_SC_FC_TRANSPORT_ERROR);
1896 return;
1897 }
1898
1899 fod->offset += fcpreq->transferred_length;
1900 if (fod->offset != fod->total_length) {
a97ec51b
JS
1901 spin_lock_irqsave(&fod->flock, flags);
1902 fod->writedataactive = true;
1903 spin_unlock_irqrestore(&fod->flock, flags);
1904
c5343203
JS
1905 /* transfer the next chunk */
1906 nvmet_fc_transfer_fcp_data(tgtport, fod,
1907 NVMET_FCOP_WRITEDATA);
1908 return;
1909 }
1910
1911 /* data transfer complete, resume with nvmet layer */
1912
1913 fod->req.execute(&fod->req);
1914
1915 break;
1916
1917 case NVMET_FCOP_READDATA:
1918 case NVMET_FCOP_READDATA_RSP:
a97ec51b
JS
1919 if (__nvmet_fc_fod_op_abort(fod, abort))
1920 return;
f64935ab 1921 if (fcpreq->fcp_error ||
c5343203 1922 fcpreq->transferred_length != fcpreq->transfer_length) {
a97ec51b 1923 nvmet_fc_abort_op(tgtport, fod);
c5343203
JS
1924 return;
1925 }
1926
1927 /* success */
1928
1929 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
1930 /* data no longer needed */
1931 nvmet_fc_free_tgt_pgs(fod);
c5343203
JS
1932 nvmet_fc_free_fcp_iod(fod->queue, fod);
1933 return;
1934 }
1935
1936 fod->offset += fcpreq->transferred_length;
1937 if (fod->offset != fod->total_length) {
1938 /* transfer the next chunk */
1939 nvmet_fc_transfer_fcp_data(tgtport, fod,
1940 NVMET_FCOP_READDATA);
1941 return;
1942 }
1943
1944 /* data transfer complete, send response */
1945
1946 /* data no longer needed */
1947 nvmet_fc_free_tgt_pgs(fod);
1948
1949 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
1950
1951 break;
1952
1953 case NVMET_FCOP_RSP:
a97ec51b
JS
1954 if (__nvmet_fc_fod_op_abort(fod, abort))
1955 return;
c5343203
JS
1956 nvmet_fc_free_fcp_iod(fod->queue, fod);
1957 break;
1958
1959 default:
c5343203
JS
1960 break;
1961 }
1962}
1963
39498fae
JS
1964static void
1965nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work)
1966{
1967 struct nvmet_fc_fcp_iod *fod =
1968 container_of(work, struct nvmet_fc_fcp_iod, done_work);
1969
1970 nvmet_fc_fod_op_done(fod);
1971}
1972
1973static void
1974nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
1975{
1976 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
1977 struct nvmet_fc_tgt_queue *queue = fod->queue;
1978
1979 if (fod->tgtport->ops->target_features & NVMET_FCTGTFEAT_OPDONE_IN_ISR)
1980 /* context switch so completion is not in ISR context */
1981 queue_work_on(queue->cpu, queue->work_q, &fod->done_work);
1982 else
1983 nvmet_fc_fod_op_done(fod);
1984}
1985
c5343203
JS
1986/*
1987 * actual completion handler after execution by the nvmet layer
1988 */
1989static void
1990__nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
1991 struct nvmet_fc_fcp_iod *fod, int status)
1992{
1993 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1994 struct nvme_completion *cqe = &fod->rspiubuf.cqe;
1995 unsigned long flags;
1996 bool abort;
1997
1998 spin_lock_irqsave(&fod->flock, flags);
1999 abort = fod->abort;
2000 spin_unlock_irqrestore(&fod->flock, flags);
2001
2002 /* if we have a CQE, snoop the last sq_head value */
2003 if (!status)
2004 fod->queue->sqhd = cqe->sq_head;
2005
2006 if (abort) {
a97ec51b 2007 nvmet_fc_abort_op(tgtport, fod);
c5343203
JS
2008 return;
2009 }
2010
2011 /* if an error handling the cmd post initial parsing */
2012 if (status) {
2013 /* fudge up a failed CQE status for our transport error */
2014 memset(cqe, 0, sizeof(*cqe));
2015 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
2016 cqe->sq_id = cpu_to_le16(fod->queue->qid);
2017 cqe->command_id = sqe->command_id;
2018 cqe->status = cpu_to_le16(status);
2019 } else {
2020
2021 /*
2022 * try to push the data even if the SQE status is non-zero.
2023 * There may be a status where data still was intended to
2024 * be moved
2025 */
2026 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2027 /* push the data over before sending rsp */
2028 nvmet_fc_transfer_fcp_data(tgtport, fod,
2029 NVMET_FCOP_READDATA);
2030 return;
2031 }
2032
2033 /* writes & no data - fall thru */
2034 }
2035
2036 /* data no longer needed */
2037 nvmet_fc_free_tgt_pgs(fod);
2038
2039 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2040}
2041
2042
2043static void
2044nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2045{
2046 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2047 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2048
2049 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2050}
2051
2052
2053/*
2054 * Actual processing routine for received FC-NVME LS Requests from the LLD
2055 */
edba98dd 2056static void
c5343203
JS
2057nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2058 struct nvmet_fc_fcp_iod *fod)
2059{
2060 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2061 int ret;
2062
2063 /*
2064 * Fused commands are currently not supported in the linux
2065 * implementation.
2066 *
2067 * As such, the implementation of the FC transport does not
2068 * look at the fused commands and order delivery to the upper
2069 * layer until we have both based on csn.
2070 */
2071
2072 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2073
2074 fod->total_length = be32_to_cpu(cmdiu->data_len);
2075 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2076 fod->io_dir = NVMET_FCP_WRITE;
2077 if (!nvme_is_write(&cmdiu->sqe))
2078 goto transport_error;
2079 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2080 fod->io_dir = NVMET_FCP_READ;
2081 if (nvme_is_write(&cmdiu->sqe))
2082 goto transport_error;
2083 } else {
2084 fod->io_dir = NVMET_FCP_NODATA;
2085 if (fod->total_length)
2086 goto transport_error;
2087 }
2088
2089 fod->req.cmd = &fod->cmdiubuf.sqe;
2090 fod->req.rsp = &fod->rspiubuf.cqe;
2091 fod->req.port = fod->queue->port;
2092
2093 /* ensure nvmet handlers will set cmd handler callback */
2094 fod->req.execute = NULL;
2095
2096 /* clear any response payload */
2097 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2098
2099 ret = nvmet_req_init(&fod->req,
2100 &fod->queue->nvme_cq,
2101 &fod->queue->nvme_sq,
2102 &nvmet_fc_tgt_fcp_ops);
a97ec51b
JS
2103 if (!ret) { /* bad SQE content or invalid ctrl state */
2104 nvmet_fc_abort_op(tgtport, fod);
c5343203
JS
2105 return;
2106 }
2107
2108 /* keep a running counter of tail position */
2109 atomic_inc(&fod->queue->sqtail);
2110
2111 fod->data_sg = NULL;
2112 fod->data_sg_cnt = 0;
2113 if (fod->total_length) {
2114 ret = nvmet_fc_alloc_tgt_pgs(fod);
2115 if (ret) {
2116 nvmet_req_complete(&fod->req, ret);
2117 return;
2118 }
2119 }
2120 fod->req.sg = fod->data_sg;
2121 fod->req.sg_cnt = fod->data_sg_cnt;
2122 fod->offset = 0;
2123 fod->next_sg = fod->data_sg;
2124 fod->next_sg_offset = 0;
2125
2126 if (fod->io_dir == NVMET_FCP_WRITE) {
2127 /* pull the data over before invoking nvmet layer */
2128 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2129 return;
2130 }
2131
2132 /*
2133 * Reads or no data:
2134 *
2135 * can invoke the nvmet_layer now. If read data, cmd completion will
2136 * push the data
2137 */
2138
2139 fod->req.execute(&fod->req);
2140
2141 return;
2142
2143transport_error:
a97ec51b 2144 nvmet_fc_abort_op(tgtport, fod);
c5343203
JS
2145}
2146
2147/*
2148 * Actual processing routine for received FC-NVME LS Requests from the LLD
2149 */
2150static void
2151nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
2152{
2153 struct nvmet_fc_fcp_iod *fod =
2154 container_of(work, struct nvmet_fc_fcp_iod, work);
2155 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2156
2157 nvmet_fc_handle_fcp_rqst(tgtport, fod);
2158}
2159
2160/**
2161 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2162 * upon the reception of a NVME FCP CMD IU.
2163 *
2164 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2165 * layer for processing.
2166 *
2167 * The nvmet-fc layer will copy cmd payload to an internal structure for
2168 * processing. As such, upon completion of the routine, the LLDD may
2169 * immediately free/reuse the CMD IU buffer passed in the call.
2170 *
2171 * If this routine returns error, the lldd should abort the exchange.
2172 *
2173 * @target_port: pointer to the (registered) target port the FCP CMD IU
19b58d94 2174 * was received on.
c5343203
JS
2175 * @fcpreq: pointer to a fcpreq request structure to be used to reference
2176 * the exchange corresponding to the FCP Exchange.
2177 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
2178 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2179 */
2180int
2181nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2182 struct nvmefc_tgt_fcp_req *fcpreq,
2183 void *cmdiubuf, u32 cmdiubuf_len)
2184{
2185 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2186 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2187 struct nvmet_fc_tgt_queue *queue;
2188 struct nvmet_fc_fcp_iod *fod;
2189
2190 /* validate iu, so the connection id can be used to find the queue */
2191 if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2192 (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
2193 (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2194 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2195 return -EIO;
2196
c5343203
JS
2197 queue = nvmet_fc_find_target_queue(tgtport,
2198 be64_to_cpu(cmdiu->connection_id));
2199 if (!queue)
2200 return -ENOTCONN;
2201
2202 /*
2203 * note: reference taken by find_target_queue
2204 * After successful fod allocation, the fod will inherit the
2205 * ownership of that reference and will remove the reference
2206 * when the fod is freed.
2207 */
2208
2209 fod = nvmet_fc_alloc_fcp_iod(queue);
2210 if (!fod) {
2211 /* release the queue lookup reference */
2212 nvmet_fc_tgt_q_put(queue);
2213 return -ENOENT;
2214 }
2215
2216 fcpreq->nvmet_fc_private = fod;
2217 fod->fcpreq = fcpreq;
2218 /*
2219 * put all admin cmds on hw queue id 0. All io commands go to
2220 * the respective hw queue based on a modulo basis
2221 */
2222 fcpreq->hwqid = queue->qid ?
2223 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
2224 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2225
39498fae
JS
2226 if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
2227 queue_work_on(queue->cpu, queue->work_q, &fod->work);
2228 else
2229 nvmet_fc_handle_fcp_rqst(tgtport, fod);
c5343203
JS
2230
2231 return 0;
2232}
2233EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2234
a97ec51b
JS
2235/**
2236 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2237 * upon the reception of an ABTS for a FCP command
2238 *
2239 * Notify the transport that an ABTS has been received for a FCP command
2240 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
2241 * LLDD believes the command is still being worked on
2242 * (template_ops->fcp_req_release() has not been called).
2243 *
2244 * The transport will wait for any outstanding work (an op to the LLDD,
2245 * which the lldd should complete with error due to the ABTS; or the
2246 * completion from the nvmet layer of the nvme command), then will
2247 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
2248 * return the i/o context to the LLDD. The LLDD may send the BA_ACC
2249 * to the ABTS either after return from this function (assuming any
2250 * outstanding op work has been terminated) or upon the callback being
2251 * called.
2252 *
2253 * @target_port: pointer to the (registered) target port the FCP CMD IU
2254 * was received on.
2255 * @fcpreq: pointer to the fcpreq request structure that corresponds
2256 * to the exchange that received the ABTS.
2257 */
2258void
2259nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2260 struct nvmefc_tgt_fcp_req *fcpreq)
2261{
2262 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2263 struct nvmet_fc_tgt_queue *queue;
2264 unsigned long flags;
2265
2266 if (!fod || fod->fcpreq != fcpreq)
2267 /* job appears to have already completed, ignore abort */
2268 return;
2269
2270 queue = fod->queue;
2271
2272 spin_lock_irqsave(&queue->qlock, flags);
2273 if (fod->active) {
2274 /*
2275 * mark as abort. The abort handler, invoked upon completion
2276 * of any work, will detect the aborted status and do the
2277 * callback.
2278 */
2279 spin_lock(&fod->flock);
2280 fod->abort = true;
2281 fod->aborted = true;
2282 spin_unlock(&fod->flock);
2283 }
2284 spin_unlock_irqrestore(&queue->qlock, flags);
2285}
2286EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2287
c5343203
JS
2288enum {
2289 FCT_TRADDR_ERR = 0,
2290 FCT_TRADDR_WWNN = 1 << 0,
2291 FCT_TRADDR_WWPN = 1 << 1,
2292};
2293
2294struct nvmet_fc_traddr {
2295 u64 nn;
2296 u64 pn;
2297};
2298
2299static const match_table_t traddr_opt_tokens = {
2300 { FCT_TRADDR_WWNN, "nn-%s" },
2301 { FCT_TRADDR_WWPN, "pn-%s" },
2302 { FCT_TRADDR_ERR, NULL }
2303};
2304
2305static int
2306nvmet_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf)
2307{
2308 substring_t args[MAX_OPT_ARGS];
2309 char *options, *o, *p;
2310 int token, ret = 0;
2311 u64 token64;
2312
2313 options = o = kstrdup(buf, GFP_KERNEL);
2314 if (!options)
2315 return -ENOMEM;
2316
43631357 2317 while ((p = strsep(&o, ":\n")) != NULL) {
c5343203
JS
2318 if (!*p)
2319 continue;
2320
2321 token = match_token(p, traddr_opt_tokens, args);
2322 switch (token) {
2323 case FCT_TRADDR_WWNN:
2324 if (match_u64(args, &token64)) {
2325 ret = -EINVAL;
2326 goto out;
2327 }
2328 traddr->nn = token64;
2329 break;
2330 case FCT_TRADDR_WWPN:
2331 if (match_u64(args, &token64)) {
2332 ret = -EINVAL;
2333 goto out;
2334 }
2335 traddr->pn = token64;
2336 break;
2337 default:
2338 pr_warn("unknown traddr token or missing value '%s'\n",
2339 p);
2340 ret = -EINVAL;
2341 goto out;
2342 }
2343 }
2344
2345out:
2346 kfree(options);
2347 return ret;
2348}
2349
2350static int
2351nvmet_fc_add_port(struct nvmet_port *port)
2352{
2353 struct nvmet_fc_tgtport *tgtport;
2354 struct nvmet_fc_traddr traddr = { 0L, 0L };
2355 unsigned long flags;
2356 int ret;
2357
2358 /* validate the address info */
2359 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2360 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2361 return -EINVAL;
2362
2363 /* map the traddr address info to a target port */
2364
2365 ret = nvmet_fc_parse_traddr(&traddr, port->disc_addr.traddr);
2366 if (ret)
2367 return ret;
2368
2369 ret = -ENXIO;
2370 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2371 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2372 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2373 (tgtport->fc_target_port.port_name == traddr.pn)) {
2374 /* a FC port can only be 1 nvmet port id */
2375 if (!tgtport->port) {
2376 tgtport->port = port;
2377 port->priv = tgtport;
568ad51e 2378 nvmet_fc_tgtport_get(tgtport);
c5343203
JS
2379 ret = 0;
2380 } else
2381 ret = -EALREADY;
2382 break;
2383 }
2384 }
2385 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2386 return ret;
2387}
2388
2389static void
2390nvmet_fc_remove_port(struct nvmet_port *port)
2391{
2392 struct nvmet_fc_tgtport *tgtport = port->priv;
2393 unsigned long flags;
2394
2395 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2396 if (tgtport->port == port) {
2397 nvmet_fc_tgtport_put(tgtport);
2398 tgtport->port = NULL;
2399 }
2400 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2401}
2402
2403static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2404 .owner = THIS_MODULE,
2405 .type = NVMF_TRTYPE_FC,
2406 .msdbd = 1,
2407 .add_port = nvmet_fc_add_port,
2408 .remove_port = nvmet_fc_remove_port,
2409 .queue_response = nvmet_fc_fcp_nvme_cmd_done,
2410 .delete_ctrl = nvmet_fc_delete_ctrl,
2411};
2412
2413static int __init nvmet_fc_init_module(void)
2414{
2415 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2416}
2417
2418static void __exit nvmet_fc_exit_module(void)
2419{
2420 /* sanity check - all lports should be removed */
2421 if (!list_empty(&nvmet_fc_target_list))
2422 pr_warn("%s: targetport list not empty\n", __func__);
2423
2424 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2425
2426 ida_destroy(&nvmet_fc_tgtport_cnt);
2427}
2428
2429module_init(nvmet_fc_init_module);
2430module_exit(nvmet_fc_exit_module);
2431
2432MODULE_LICENSE("GPL v2");