]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/nvme/target/fc.c
nvme_fc: correct LS validation
[mirror_ubuntu-artful-kernel.git] / drivers / nvme / target / fc.c
CommitLineData
c5343203
JS
1/*
2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
15 *
16 */
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/blk-mq.h>
21#include <linux/parser.h>
22#include <linux/random.h>
23#include <uapi/scsi/fc/fc_fs.h>
24#include <uapi/scsi/fc/fc_els.h>
25
26#include "nvmet.h"
27#include <linux/nvme-fc-driver.h>
28#include <linux/nvme-fc.h>
29
30
31/* *************************** Data Structures/Defines ****************** */
32
33
34#define NVMET_LS_CTX_COUNT 4
35
36/* for this implementation, assume small single frame rqst/rsp */
37#define NVME_FC_MAX_LS_BUFFER_SIZE 2048
38
39struct nvmet_fc_tgtport;
40struct nvmet_fc_tgt_assoc;
41
42struct nvmet_fc_ls_iod {
43 struct nvmefc_tgt_ls_req *lsreq;
44 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
45
46 struct list_head ls_list; /* tgtport->ls_list */
47
48 struct nvmet_fc_tgtport *tgtport;
49 struct nvmet_fc_tgt_assoc *assoc;
50
51 u8 *rqstbuf;
52 u8 *rspbuf;
53 u16 rqstdatalen;
54 dma_addr_t rspdma;
55
56 struct scatterlist sg[2];
57
58 struct work_struct work;
59} __aligned(sizeof(unsigned long long));
60
61#define NVMET_FC_MAX_KB_PER_XFR 256
62
63enum nvmet_fcp_datadir {
64 NVMET_FCP_NODATA,
65 NVMET_FCP_WRITE,
66 NVMET_FCP_READ,
67 NVMET_FCP_ABORTED,
68};
69
70struct nvmet_fc_fcp_iod {
71 struct nvmefc_tgt_fcp_req *fcpreq;
72
73 struct nvme_fc_cmd_iu cmdiubuf;
74 struct nvme_fc_ersp_iu rspiubuf;
75 dma_addr_t rspdma;
76 struct scatterlist *data_sg;
77 struct scatterlist *next_sg;
78 int data_sg_cnt;
79 u32 next_sg_offset;
80 u32 total_length;
81 u32 offset;
82 enum nvmet_fcp_datadir io_dir;
83 bool active;
84 bool abort;
85 spinlock_t flock;
86
87 struct nvmet_req req;
88 struct work_struct work;
89
90 struct nvmet_fc_tgtport *tgtport;
91 struct nvmet_fc_tgt_queue *queue;
92
93 struct list_head fcp_list; /* tgtport->fcp_list */
94};
95
96struct nvmet_fc_tgtport {
97
98 struct nvmet_fc_target_port fc_target_port;
99
100 struct list_head tgt_list; /* nvmet_fc_target_list */
101 struct device *dev; /* dev for dma mapping */
102 struct nvmet_fc_target_template *ops;
103
104 struct nvmet_fc_ls_iod *iod;
105 spinlock_t lock;
106 struct list_head ls_list;
107 struct list_head ls_busylist;
108 struct list_head assoc_list;
109 struct ida assoc_cnt;
110 struct nvmet_port *port;
111 struct kref ref;
112};
113
114struct nvmet_fc_tgt_queue {
115 bool ninetypercent;
116 u16 qid;
117 u16 sqsize;
118 u16 ersp_ratio;
119 u16 sqhd;
120 int cpu;
121 atomic_t connected;
122 atomic_t sqtail;
123 atomic_t zrspcnt;
124 atomic_t rsn;
125 spinlock_t qlock;
126 struct nvmet_port *port;
127 struct nvmet_cq nvme_cq;
128 struct nvmet_sq nvme_sq;
129 struct nvmet_fc_tgt_assoc *assoc;
130 struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
131 struct list_head fod_list;
132 struct workqueue_struct *work_q;
133 struct kref ref;
134} __aligned(sizeof(unsigned long long));
135
136struct nvmet_fc_tgt_assoc {
137 u64 association_id;
138 u32 a_id;
139 struct nvmet_fc_tgtport *tgtport;
140 struct list_head a_list;
141 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES];
142 struct kref ref;
143};
144
145
146static inline int
147nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
148{
149 return (iodptr - iodptr->tgtport->iod);
150}
151
152static inline int
153nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
154{
155 return (fodptr - fodptr->queue->fod);
156}
157
158
159/*
160 * Association and Connection IDs:
161 *
162 * Association ID will have random number in upper 6 bytes and zero
163 * in lower 2 bytes
164 *
165 * Connection IDs will be Association ID with QID or'd in lower 2 bytes
166 *
167 * note: Association ID = Connection ID for queue 0
168 */
169#define BYTES_FOR_QID sizeof(u16)
170#define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
171#define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
172
173static inline u64
174nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
175{
176 return (assoc->association_id | qid);
177}
178
179static inline u64
180nvmet_fc_getassociationid(u64 connectionid)
181{
182 return connectionid & ~NVMET_FC_QUEUEID_MASK;
183}
184
185static inline u16
186nvmet_fc_getqueueid(u64 connectionid)
187{
188 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
189}
190
191static inline struct nvmet_fc_tgtport *
192targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
193{
194 return container_of(targetport, struct nvmet_fc_tgtport,
195 fc_target_port);
196}
197
198static inline struct nvmet_fc_fcp_iod *
199nvmet_req_to_fod(struct nvmet_req *nvme_req)
200{
201 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
202}
203
204
205/* *************************** Globals **************************** */
206
207
208static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
209
210static LIST_HEAD(nvmet_fc_target_list);
211static DEFINE_IDA(nvmet_fc_tgtport_cnt);
212
213
214static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
215static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
216static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
217static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
218static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
219static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
220static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
221static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
222
223
224/* *********************** FC-NVME DMA Handling **************************** */
225
226/*
227 * The fcloop device passes in a NULL device pointer. Real LLD's will
228 * pass in a valid device pointer. If NULL is passed to the dma mapping
229 * routines, depending on the platform, it may or may not succeed, and
230 * may crash.
231 *
232 * As such:
233 * Wrapper all the dma routines and check the dev pointer.
234 *
235 * If simple mappings (return just a dma address, we'll noop them,
236 * returning a dma address of 0.
237 *
238 * On more complex mappings (dma_map_sg), a pseudo routine fills
239 * in the scatter list, setting all dma addresses to 0.
240 */
241
242static inline dma_addr_t
243fc_dma_map_single(struct device *dev, void *ptr, size_t size,
244 enum dma_data_direction dir)
245{
246 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
247}
248
249static inline int
250fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
251{
252 return dev ? dma_mapping_error(dev, dma_addr) : 0;
253}
254
255static inline void
256fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
257 enum dma_data_direction dir)
258{
259 if (dev)
260 dma_unmap_single(dev, addr, size, dir);
261}
262
263static inline void
264fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
265 enum dma_data_direction dir)
266{
267 if (dev)
268 dma_sync_single_for_cpu(dev, addr, size, dir);
269}
270
271static inline void
272fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
273 enum dma_data_direction dir)
274{
275 if (dev)
276 dma_sync_single_for_device(dev, addr, size, dir);
277}
278
279/* pseudo dma_map_sg call */
280static int
281fc_map_sg(struct scatterlist *sg, int nents)
282{
283 struct scatterlist *s;
284 int i;
285
286 WARN_ON(nents == 0 || sg[0].length == 0);
287
288 for_each_sg(sg, s, nents, i) {
289 s->dma_address = 0L;
290#ifdef CONFIG_NEED_SG_DMA_LENGTH
291 s->dma_length = s->length;
292#endif
293 }
294 return nents;
295}
296
297static inline int
298fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
299 enum dma_data_direction dir)
300{
301 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
302}
303
304static inline void
305fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
306 enum dma_data_direction dir)
307{
308 if (dev)
309 dma_unmap_sg(dev, sg, nents, dir);
310}
311
312
313/* *********************** FC-NVME Port Management ************************ */
314
315
316static int
317nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
318{
319 struct nvmet_fc_ls_iod *iod;
320 int i;
321
322 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
323 GFP_KERNEL);
324 if (!iod)
325 return -ENOMEM;
326
327 tgtport->iod = iod;
328
329 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
330 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
331 iod->tgtport = tgtport;
332 list_add_tail(&iod->ls_list, &tgtport->ls_list);
333
334 iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
335 GFP_KERNEL);
336 if (!iod->rqstbuf)
337 goto out_fail;
338
339 iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
340
341 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
342 NVME_FC_MAX_LS_BUFFER_SIZE,
343 DMA_TO_DEVICE);
344 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
345 goto out_fail;
346 }
347
348 return 0;
349
350out_fail:
351 kfree(iod->rqstbuf);
352 list_del(&iod->ls_list);
353 for (iod--, i--; i >= 0; iod--, i--) {
354 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
355 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
356 kfree(iod->rqstbuf);
357 list_del(&iod->ls_list);
358 }
359
360 kfree(iod);
361
362 return -EFAULT;
363}
364
365static void
366nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
367{
368 struct nvmet_fc_ls_iod *iod = tgtport->iod;
369 int i;
370
371 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
372 fc_dma_unmap_single(tgtport->dev,
373 iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
374 DMA_TO_DEVICE);
375 kfree(iod->rqstbuf);
376 list_del(&iod->ls_list);
377 }
378 kfree(tgtport->iod);
379}
380
381static struct nvmet_fc_ls_iod *
382nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
383{
384 static struct nvmet_fc_ls_iod *iod;
385 unsigned long flags;
386
387 spin_lock_irqsave(&tgtport->lock, flags);
388 iod = list_first_entry_or_null(&tgtport->ls_list,
389 struct nvmet_fc_ls_iod, ls_list);
390 if (iod)
391 list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
392 spin_unlock_irqrestore(&tgtport->lock, flags);
393 return iod;
394}
395
396
397static void
398nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
399 struct nvmet_fc_ls_iod *iod)
400{
401 unsigned long flags;
402
403 spin_lock_irqsave(&tgtport->lock, flags);
404 list_move(&iod->ls_list, &tgtport->ls_list);
405 spin_unlock_irqrestore(&tgtport->lock, flags);
406}
407
408static void
409nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
410 struct nvmet_fc_tgt_queue *queue)
411{
412 struct nvmet_fc_fcp_iod *fod = queue->fod;
413 int i;
414
415 for (i = 0; i < queue->sqsize; fod++, i++) {
416 INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
417 fod->tgtport = tgtport;
418 fod->queue = queue;
419 fod->active = false;
420 list_add_tail(&fod->fcp_list, &queue->fod_list);
421 spin_lock_init(&fod->flock);
422
423 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
424 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
425 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
426 list_del(&fod->fcp_list);
427 for (fod--, i--; i >= 0; fod--, i--) {
428 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
429 sizeof(fod->rspiubuf),
430 DMA_TO_DEVICE);
431 fod->rspdma = 0L;
432 list_del(&fod->fcp_list);
433 }
434
435 return;
436 }
437 }
438}
439
440static void
441nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
442 struct nvmet_fc_tgt_queue *queue)
443{
444 struct nvmet_fc_fcp_iod *fod = queue->fod;
445 int i;
446
447 for (i = 0; i < queue->sqsize; fod++, i++) {
448 if (fod->rspdma)
449 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
450 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
451 }
452}
453
454static struct nvmet_fc_fcp_iod *
455nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
456{
457 static struct nvmet_fc_fcp_iod *fod;
458 unsigned long flags;
459
460 spin_lock_irqsave(&queue->qlock, flags);
461 fod = list_first_entry_or_null(&queue->fod_list,
462 struct nvmet_fc_fcp_iod, fcp_list);
463 if (fod) {
464 list_del(&fod->fcp_list);
465 fod->active = true;
466 fod->abort = false;
467 /*
468 * no queue reference is taken, as it was taken by the
469 * queue lookup just prior to the allocation. The iod
470 * will "inherit" that reference.
471 */
472 }
473 spin_unlock_irqrestore(&queue->qlock, flags);
474 return fod;
475}
476
477
478static void
479nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
480 struct nvmet_fc_fcp_iod *fod)
481{
482 unsigned long flags;
483
484 spin_lock_irqsave(&queue->qlock, flags);
485 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
486 fod->active = false;
487 spin_unlock_irqrestore(&queue->qlock, flags);
488
489 /*
490 * release the reference taken at queue lookup and fod allocation
491 */
492 nvmet_fc_tgt_q_put(queue);
493}
494
495static int
496nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
497{
498 int cpu, idx, cnt;
499
500 if (!(tgtport->ops->target_features &
501 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED) ||
502 tgtport->ops->max_hw_queues == 1)
503 return WORK_CPU_UNBOUND;
504
505 /* Simple cpu selection based on qid modulo active cpu count */
506 idx = !qid ? 0 : (qid - 1) % num_active_cpus();
507
508 /* find the n'th active cpu */
509 for (cpu = 0, cnt = 0; ; ) {
510 if (cpu_active(cpu)) {
511 if (cnt == idx)
512 break;
513 cnt++;
514 }
515 cpu = (cpu + 1) % num_possible_cpus();
516 }
517
518 return cpu;
519}
520
521static struct nvmet_fc_tgt_queue *
522nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
523 u16 qid, u16 sqsize)
524{
525 struct nvmet_fc_tgt_queue *queue;
526 unsigned long flags;
527 int ret;
528
529 if (qid >= NVMET_NR_QUEUES)
530 return NULL;
531
532 queue = kzalloc((sizeof(*queue) +
533 (sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
534 GFP_KERNEL);
535 if (!queue)
536 return NULL;
537
538 if (!nvmet_fc_tgt_a_get(assoc))
539 goto out_free_queue;
540
541 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
542 assoc->tgtport->fc_target_port.port_num,
543 assoc->a_id, qid);
544 if (!queue->work_q)
545 goto out_a_put;
546
547 queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
548 queue->qid = qid;
549 queue->sqsize = sqsize;
550 queue->assoc = assoc;
551 queue->port = assoc->tgtport->port;
552 queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
553 INIT_LIST_HEAD(&queue->fod_list);
554 atomic_set(&queue->connected, 0);
555 atomic_set(&queue->sqtail, 0);
556 atomic_set(&queue->rsn, 1);
557 atomic_set(&queue->zrspcnt, 0);
558 spin_lock_init(&queue->qlock);
559 kref_init(&queue->ref);
560
561 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
562
563 ret = nvmet_sq_init(&queue->nvme_sq);
564 if (ret)
565 goto out_fail_iodlist;
566
567 WARN_ON(assoc->queues[qid]);
568 spin_lock_irqsave(&assoc->tgtport->lock, flags);
569 assoc->queues[qid] = queue;
570 spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
571
572 return queue;
573
574out_fail_iodlist:
575 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
576 destroy_workqueue(queue->work_q);
577out_a_put:
578 nvmet_fc_tgt_a_put(assoc);
579out_free_queue:
580 kfree(queue);
581 return NULL;
582}
583
584
585static void
586nvmet_fc_tgt_queue_free(struct kref *ref)
587{
588 struct nvmet_fc_tgt_queue *queue =
589 container_of(ref, struct nvmet_fc_tgt_queue, ref);
590 unsigned long flags;
591
592 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
593 queue->assoc->queues[queue->qid] = NULL;
594 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
595
596 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
597
598 nvmet_fc_tgt_a_put(queue->assoc);
599
600 destroy_workqueue(queue->work_q);
601
602 kfree(queue);
603}
604
605static void
606nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
607{
608 kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
609}
610
611static int
612nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
613{
614 return kref_get_unless_zero(&queue->ref);
615}
616
617
618static void
619nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
620 struct nvmefc_tgt_fcp_req *fcpreq)
621{
622 int ret;
623
624 fcpreq->op = NVMET_FCOP_ABORT;
625 fcpreq->offset = 0;
626 fcpreq->timeout = 0;
627 fcpreq->transfer_length = 0;
628 fcpreq->transferred_length = 0;
629 fcpreq->fcp_error = 0;
630 fcpreq->sg_cnt = 0;
631
632 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fcpreq);
633 if (ret)
634 /* should never reach here !! */
635 WARN_ON(1);
636}
637
638
639static void
640nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
641{
642 struct nvmet_fc_fcp_iod *fod = queue->fod;
643 unsigned long flags;
644 int i;
645 bool disconnect;
646
647 disconnect = atomic_xchg(&queue->connected, 0);
648
649 spin_lock_irqsave(&queue->qlock, flags);
650 /* about outstanding io's */
651 for (i = 0; i < queue->sqsize; fod++, i++) {
652 if (fod->active) {
653 spin_lock(&fod->flock);
654 fod->abort = true;
655 spin_unlock(&fod->flock);
656 }
657 }
658 spin_unlock_irqrestore(&queue->qlock, flags);
659
660 flush_workqueue(queue->work_q);
661
662 if (disconnect)
663 nvmet_sq_destroy(&queue->nvme_sq);
664
665 nvmet_fc_tgt_q_put(queue);
666}
667
668static struct nvmet_fc_tgt_queue *
669nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
670 u64 connection_id)
671{
672 struct nvmet_fc_tgt_assoc *assoc;
673 struct nvmet_fc_tgt_queue *queue;
674 u64 association_id = nvmet_fc_getassociationid(connection_id);
675 u16 qid = nvmet_fc_getqueueid(connection_id);
676 unsigned long flags;
677
678 spin_lock_irqsave(&tgtport->lock, flags);
679 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
680 if (association_id == assoc->association_id) {
681 queue = assoc->queues[qid];
682 if (queue &&
683 (!atomic_read(&queue->connected) ||
684 !nvmet_fc_tgt_q_get(queue)))
685 queue = NULL;
686 spin_unlock_irqrestore(&tgtport->lock, flags);
687 return queue;
688 }
689 }
690 spin_unlock_irqrestore(&tgtport->lock, flags);
691 return NULL;
692}
693
694static struct nvmet_fc_tgt_assoc *
695nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
696{
697 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
698 unsigned long flags;
699 u64 ran;
700 int idx;
701 bool needrandom = true;
702
703 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
704 if (!assoc)
705 return NULL;
706
707 idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
708 if (idx < 0)
709 goto out_free_assoc;
710
711 if (!nvmet_fc_tgtport_get(tgtport))
712 goto out_ida_put;
713
714 assoc->tgtport = tgtport;
715 assoc->a_id = idx;
716 INIT_LIST_HEAD(&assoc->a_list);
717 kref_init(&assoc->ref);
718
719 while (needrandom) {
720 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
721 ran = ran << BYTES_FOR_QID_SHIFT;
722
723 spin_lock_irqsave(&tgtport->lock, flags);
724 needrandom = false;
725 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
726 if (ran == tmpassoc->association_id) {
727 needrandom = true;
728 break;
729 }
730 if (!needrandom) {
731 assoc->association_id = ran;
732 list_add_tail(&assoc->a_list, &tgtport->assoc_list);
733 }
734 spin_unlock_irqrestore(&tgtport->lock, flags);
735 }
736
737 return assoc;
738
739out_ida_put:
740 ida_simple_remove(&tgtport->assoc_cnt, idx);
741out_free_assoc:
742 kfree(assoc);
743 return NULL;
744}
745
746static void
747nvmet_fc_target_assoc_free(struct kref *ref)
748{
749 struct nvmet_fc_tgt_assoc *assoc =
750 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
751 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
752 unsigned long flags;
753
754 spin_lock_irqsave(&tgtport->lock, flags);
755 list_del(&assoc->a_list);
756 spin_unlock_irqrestore(&tgtport->lock, flags);
757 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
758 kfree(assoc);
759 nvmet_fc_tgtport_put(tgtport);
760}
761
762static void
763nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
764{
765 kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
766}
767
768static int
769nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
770{
771 return kref_get_unless_zero(&assoc->ref);
772}
773
774static void
775nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
776{
777 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
778 struct nvmet_fc_tgt_queue *queue;
779 unsigned long flags;
780 int i;
781
782 spin_lock_irqsave(&tgtport->lock, flags);
783 for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) {
784 queue = assoc->queues[i];
785 if (queue) {
786 if (!nvmet_fc_tgt_q_get(queue))
787 continue;
788 spin_unlock_irqrestore(&tgtport->lock, flags);
789 nvmet_fc_delete_target_queue(queue);
790 nvmet_fc_tgt_q_put(queue);
791 spin_lock_irqsave(&tgtport->lock, flags);
792 }
793 }
794 spin_unlock_irqrestore(&tgtport->lock, flags);
795
796 nvmet_fc_tgt_a_put(assoc);
797}
798
799static struct nvmet_fc_tgt_assoc *
800nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
801 u64 association_id)
802{
803 struct nvmet_fc_tgt_assoc *assoc;
804 struct nvmet_fc_tgt_assoc *ret = NULL;
805 unsigned long flags;
806
807 spin_lock_irqsave(&tgtport->lock, flags);
808 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
809 if (association_id == assoc->association_id) {
810 ret = assoc;
811 nvmet_fc_tgt_a_get(assoc);
812 break;
813 }
814 }
815 spin_unlock_irqrestore(&tgtport->lock, flags);
816
817 return ret;
818}
819
820
821/**
822 * nvme_fc_register_targetport - transport entry point called by an
823 * LLDD to register the existence of a local
824 * NVME subystem FC port.
825 * @pinfo: pointer to information about the port to be registered
826 * @template: LLDD entrypoints and operational parameters for the port
827 * @dev: physical hardware device node port corresponds to. Will be
828 * used for DMA mappings
829 * @portptr: pointer to a local port pointer. Upon success, the routine
830 * will allocate a nvme_fc_local_port structure and place its
831 * address in the local port pointer. Upon failure, local port
832 * pointer will be set to NULL.
833 *
834 * Returns:
835 * a completion status. Must be 0 upon success; a negative errno
836 * (ex: -ENXIO) upon failure.
837 */
838int
839nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
840 struct nvmet_fc_target_template *template,
841 struct device *dev,
842 struct nvmet_fc_target_port **portptr)
843{
844 struct nvmet_fc_tgtport *newrec;
845 unsigned long flags;
846 int ret, idx;
847
848 if (!template->xmt_ls_rsp || !template->fcp_op ||
849 !template->targetport_delete ||
850 !template->max_hw_queues || !template->max_sgl_segments ||
851 !template->max_dif_sgl_segments || !template->dma_boundary) {
852 ret = -EINVAL;
853 goto out_regtgt_failed;
854 }
855
856 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
857 GFP_KERNEL);
858 if (!newrec) {
859 ret = -ENOMEM;
860 goto out_regtgt_failed;
861 }
862
863 idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
864 if (idx < 0) {
865 ret = -ENOSPC;
866 goto out_fail_kfree;
867 }
868
869 if (!get_device(dev) && dev) {
870 ret = -ENODEV;
871 goto out_ida_put;
872 }
873
874 newrec->fc_target_port.node_name = pinfo->node_name;
875 newrec->fc_target_port.port_name = pinfo->port_name;
876 newrec->fc_target_port.private = &newrec[1];
877 newrec->fc_target_port.port_id = pinfo->port_id;
878 newrec->fc_target_port.port_num = idx;
879 INIT_LIST_HEAD(&newrec->tgt_list);
880 newrec->dev = dev;
881 newrec->ops = template;
882 spin_lock_init(&newrec->lock);
883 INIT_LIST_HEAD(&newrec->ls_list);
884 INIT_LIST_HEAD(&newrec->ls_busylist);
885 INIT_LIST_HEAD(&newrec->assoc_list);
886 kref_init(&newrec->ref);
887 ida_init(&newrec->assoc_cnt);
888
889 ret = nvmet_fc_alloc_ls_iodlist(newrec);
890 if (ret) {
891 ret = -ENOMEM;
892 goto out_free_newrec;
893 }
894
895 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
896 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
897 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
898
899 *portptr = &newrec->fc_target_port;
900 return 0;
901
902out_free_newrec:
903 put_device(dev);
904out_ida_put:
905 ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
906out_fail_kfree:
907 kfree(newrec);
908out_regtgt_failed:
909 *portptr = NULL;
910 return ret;
911}
912EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
913
914
915static void
916nvmet_fc_free_tgtport(struct kref *ref)
917{
918 struct nvmet_fc_tgtport *tgtport =
919 container_of(ref, struct nvmet_fc_tgtport, ref);
920 struct device *dev = tgtport->dev;
921 unsigned long flags;
922
923 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
924 list_del(&tgtport->tgt_list);
925 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
926
927 nvmet_fc_free_ls_iodlist(tgtport);
928
929 /* let the LLDD know we've finished tearing it down */
930 tgtport->ops->targetport_delete(&tgtport->fc_target_port);
931
932 ida_simple_remove(&nvmet_fc_tgtport_cnt,
933 tgtport->fc_target_port.port_num);
934
935 ida_destroy(&tgtport->assoc_cnt);
936
937 kfree(tgtport);
938
939 put_device(dev);
940}
941
942static void
943nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
944{
945 kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
946}
947
948static int
949nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
950{
951 return kref_get_unless_zero(&tgtport->ref);
952}
953
954static void
955__nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
956{
957 struct nvmet_fc_tgt_assoc *assoc, *next;
958 unsigned long flags;
959
960 spin_lock_irqsave(&tgtport->lock, flags);
961 list_for_each_entry_safe(assoc, next,
962 &tgtport->assoc_list, a_list) {
963 if (!nvmet_fc_tgt_a_get(assoc))
964 continue;
965 spin_unlock_irqrestore(&tgtport->lock, flags);
966 nvmet_fc_delete_target_assoc(assoc);
967 nvmet_fc_tgt_a_put(assoc);
968 spin_lock_irqsave(&tgtport->lock, flags);
969 }
970 spin_unlock_irqrestore(&tgtport->lock, flags);
971}
972
973/*
974 * nvmet layer has called to terminate an association
975 */
976static void
977nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
978{
979 struct nvmet_fc_tgtport *tgtport, *next;
980 struct nvmet_fc_tgt_assoc *assoc;
981 struct nvmet_fc_tgt_queue *queue;
982 unsigned long flags;
983 bool found_ctrl = false;
984
985 /* this is a bit ugly, but don't want to make locks layered */
986 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
987 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
988 tgt_list) {
989 if (!nvmet_fc_tgtport_get(tgtport))
990 continue;
991 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
992
993 spin_lock_irqsave(&tgtport->lock, flags);
994 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
995 queue = assoc->queues[0];
996 if (queue && queue->nvme_sq.ctrl == ctrl) {
997 if (nvmet_fc_tgt_a_get(assoc))
998 found_ctrl = true;
999 break;
1000 }
1001 }
1002 spin_unlock_irqrestore(&tgtport->lock, flags);
1003
1004 nvmet_fc_tgtport_put(tgtport);
1005
1006 if (found_ctrl) {
1007 nvmet_fc_delete_target_assoc(assoc);
1008 nvmet_fc_tgt_a_put(assoc);
1009 return;
1010 }
1011
1012 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1013 }
1014 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1015}
1016
1017/**
1018 * nvme_fc_unregister_targetport - transport entry point called by an
1019 * LLDD to deregister/remove a previously
1020 * registered a local NVME subsystem FC port.
1021 * @tgtport: pointer to the (registered) target port that is to be
1022 * deregistered.
1023 *
1024 * Returns:
1025 * a completion status. Must be 0 upon success; a negative errno
1026 * (ex: -ENXIO) upon failure.
1027 */
1028int
1029nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1030{
1031 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1032
1033 /* terminate any outstanding associations */
1034 __nvmet_fc_free_assocs(tgtport);
1035
1036 nvmet_fc_tgtport_put(tgtport);
1037
1038 return 0;
1039}
1040EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1041
1042
1043/* *********************** FC-NVME LS Handling **************************** */
1044
1045
1046static void
1047nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, u32 desc_len, u8 rqst_ls_cmd)
1048{
1049 struct fcnvme_ls_acc_hdr *acc = buf;
1050
1051 acc->w0.ls_cmd = ls_cmd;
1052 acc->desc_list_len = desc_len;
1053 acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1054 acc->rqst.desc_len =
1055 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1056 acc->rqst.w0.ls_cmd = rqst_ls_cmd;
1057}
1058
1059static int
1060nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
1061 u8 reason, u8 explanation, u8 vendor)
1062{
1063 struct fcnvme_ls_rjt *rjt = buf;
1064
1065 nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
1066 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
1067 ls_cmd);
1068 rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1069 rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1070 rjt->rjt.reason_code = reason;
1071 rjt->rjt.reason_explanation = explanation;
1072 rjt->rjt.vendor = vendor;
1073
1074 return sizeof(struct fcnvme_ls_rjt);
1075}
1076
1077/* Validation Error indexes into the string table below */
1078enum {
1079 VERR_NO_ERROR = 0,
1080 VERR_CR_ASSOC_LEN = 1,
1081 VERR_CR_ASSOC_RQST_LEN = 2,
1082 VERR_CR_ASSOC_CMD = 3,
1083 VERR_CR_ASSOC_CMD_LEN = 4,
1084 VERR_ERSP_RATIO = 5,
1085 VERR_ASSOC_ALLOC_FAIL = 6,
1086 VERR_QUEUE_ALLOC_FAIL = 7,
1087 VERR_CR_CONN_LEN = 8,
1088 VERR_CR_CONN_RQST_LEN = 9,
1089 VERR_ASSOC_ID = 10,
1090 VERR_ASSOC_ID_LEN = 11,
1091 VERR_NO_ASSOC = 12,
1092 VERR_CONN_ID = 13,
1093 VERR_CONN_ID_LEN = 14,
1094 VERR_NO_CONN = 15,
1095 VERR_CR_CONN_CMD = 16,
1096 VERR_CR_CONN_CMD_LEN = 17,
1097 VERR_DISCONN_LEN = 18,
1098 VERR_DISCONN_RQST_LEN = 19,
1099 VERR_DISCONN_CMD = 20,
1100 VERR_DISCONN_CMD_LEN = 21,
1101 VERR_DISCONN_SCOPE = 22,
1102 VERR_RS_LEN = 23,
1103 VERR_RS_RQST_LEN = 24,
1104 VERR_RS_CMD = 25,
1105 VERR_RS_CMD_LEN = 26,
1106 VERR_RS_RCTL = 27,
1107 VERR_RS_RO = 28,
1108};
1109
1110static char *validation_errors[] = {
1111 "OK",
1112 "Bad CR_ASSOC Length",
1113 "Bad CR_ASSOC Rqst Length",
1114 "Not CR_ASSOC Cmd",
1115 "Bad CR_ASSOC Cmd Length",
1116 "Bad Ersp Ratio",
1117 "Association Allocation Failed",
1118 "Queue Allocation Failed",
1119 "Bad CR_CONN Length",
1120 "Bad CR_CONN Rqst Length",
1121 "Not Association ID",
1122 "Bad Association ID Length",
1123 "No Association",
1124 "Not Connection ID",
1125 "Bad Connection ID Length",
1126 "No Connection",
1127 "Not CR_CONN Cmd",
1128 "Bad CR_CONN Cmd Length",
1129 "Bad DISCONN Length",
1130 "Bad DISCONN Rqst Length",
1131 "Not DISCONN Cmd",
1132 "Bad DISCONN Cmd Length",
1133 "Bad Disconnect Scope",
1134 "Bad RS Length",
1135 "Bad RS Rqst Length",
1136 "Not RS Cmd",
1137 "Bad RS Cmd Length",
1138 "Bad RS R_CTL",
1139 "Bad RS Relative Offset",
1140};
1141
1142static void
1143nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1144 struct nvmet_fc_ls_iod *iod)
1145{
1146 struct fcnvme_ls_cr_assoc_rqst *rqst =
1147 (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
1148 struct fcnvme_ls_cr_assoc_acc *acc =
1149 (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
1150 struct nvmet_fc_tgt_queue *queue;
1151 int ret = 0;
1152
1153 memset(acc, 0, sizeof(*acc));
1154
1155 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_assoc_rqst))
1156 ret = VERR_CR_ASSOC_LEN;
1157 else if (rqst->desc_list_len !=
1158 fcnvme_lsdesc_len(
1159 sizeof(struct fcnvme_ls_cr_assoc_rqst)))
1160 ret = VERR_CR_ASSOC_RQST_LEN;
1161 else if (rqst->assoc_cmd.desc_tag !=
1162 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1163 ret = VERR_CR_ASSOC_CMD;
1164 else if (rqst->assoc_cmd.desc_len !=
1165 fcnvme_lsdesc_len(
1166 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)))
1167 ret = VERR_CR_ASSOC_CMD_LEN;
1168 else if (!rqst->assoc_cmd.ersp_ratio ||
1169 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1170 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1171 ret = VERR_ERSP_RATIO;
1172
1173 else {
1174 /* new association w/ admin queue */
1175 iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
1176 if (!iod->assoc)
1177 ret = VERR_ASSOC_ALLOC_FAIL;
1178 else {
1179 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1180 be16_to_cpu(rqst->assoc_cmd.sqsize));
1181 if (!queue)
1182 ret = VERR_QUEUE_ALLOC_FAIL;
1183 }
1184 }
1185
1186 if (ret) {
1187 dev_err(tgtport->dev,
1188 "Create Association LS failed: %s\n",
1189 validation_errors[ret]);
1190 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1191 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
4083aa98
JS
1192 FCNVME_RJT_RC_LOGIC,
1193 FCNVME_RJT_EXP_NONE, 0);
c5343203
JS
1194 return;
1195 }
1196
1197 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1198 atomic_set(&queue->connected, 1);
1199 queue->sqhd = 0; /* best place to init value */
1200
1201 /* format a response */
1202
1203 iod->lsreq->rsplen = sizeof(*acc);
1204
1205 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1206 fcnvme_lsdesc_len(
1207 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1208 FCNVME_LS_CREATE_ASSOCIATION);
1209 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1210 acc->associd.desc_len =
1211 fcnvme_lsdesc_len(
1212 sizeof(struct fcnvme_lsdesc_assoc_id));
1213 acc->associd.association_id =
1214 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1215 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1216 acc->connectid.desc_len =
1217 fcnvme_lsdesc_len(
1218 sizeof(struct fcnvme_lsdesc_conn_id));
1219 acc->connectid.connection_id = acc->associd.association_id;
1220}
1221
1222static void
1223nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1224 struct nvmet_fc_ls_iod *iod)
1225{
1226 struct fcnvme_ls_cr_conn_rqst *rqst =
1227 (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
1228 struct fcnvme_ls_cr_conn_acc *acc =
1229 (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
1230 struct nvmet_fc_tgt_queue *queue;
1231 int ret = 0;
1232
1233 memset(acc, 0, sizeof(*acc));
1234
1235 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1236 ret = VERR_CR_CONN_LEN;
1237 else if (rqst->desc_list_len !=
1238 fcnvme_lsdesc_len(
1239 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1240 ret = VERR_CR_CONN_RQST_LEN;
1241 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1242 ret = VERR_ASSOC_ID;
1243 else if (rqst->associd.desc_len !=
1244 fcnvme_lsdesc_len(
1245 sizeof(struct fcnvme_lsdesc_assoc_id)))
1246 ret = VERR_ASSOC_ID_LEN;
1247 else if (rqst->connect_cmd.desc_tag !=
1248 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1249 ret = VERR_CR_CONN_CMD;
1250 else if (rqst->connect_cmd.desc_len !=
1251 fcnvme_lsdesc_len(
1252 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1253 ret = VERR_CR_CONN_CMD_LEN;
1254 else if (!rqst->connect_cmd.ersp_ratio ||
1255 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1256 be16_to_cpu(rqst->connect_cmd.sqsize)))
1257 ret = VERR_ERSP_RATIO;
1258
1259 else {
1260 /* new io queue */
1261 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1262 be64_to_cpu(rqst->associd.association_id));
1263 if (!iod->assoc)
1264 ret = VERR_NO_ASSOC;
1265 else {
1266 queue = nvmet_fc_alloc_target_queue(iod->assoc,
1267 be16_to_cpu(rqst->connect_cmd.qid),
1268 be16_to_cpu(rqst->connect_cmd.sqsize));
1269 if (!queue)
1270 ret = VERR_QUEUE_ALLOC_FAIL;
1271
1272 /* release get taken in nvmet_fc_find_target_assoc */
1273 nvmet_fc_tgt_a_put(iod->assoc);
1274 }
1275 }
1276
1277 if (ret) {
1278 dev_err(tgtport->dev,
1279 "Create Connection LS failed: %s\n",
1280 validation_errors[ret]);
1281 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1282 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1283 (ret == VERR_NO_ASSOC) ?
4083aa98
JS
1284 FCNVME_RJT_RC_INV_ASSOC :
1285 FCNVME_RJT_RC_LOGIC,
1286 FCNVME_RJT_EXP_NONE, 0);
c5343203
JS
1287 return;
1288 }
1289
1290 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1291 atomic_set(&queue->connected, 1);
1292 queue->sqhd = 0; /* best place to init value */
1293
1294 /* format a response */
1295
1296 iod->lsreq->rsplen = sizeof(*acc);
1297
1298 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1299 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1300 FCNVME_LS_CREATE_CONNECTION);
1301 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1302 acc->connectid.desc_len =
1303 fcnvme_lsdesc_len(
1304 sizeof(struct fcnvme_lsdesc_conn_id));
1305 acc->connectid.connection_id =
1306 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1307 be16_to_cpu(rqst->connect_cmd.qid)));
1308}
1309
1310static void
1311nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1312 struct nvmet_fc_ls_iod *iod)
1313{
1314 struct fcnvme_ls_disconnect_rqst *rqst =
1315 (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1316 struct fcnvme_ls_disconnect_acc *acc =
1317 (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
c81e55e0 1318 struct nvmet_fc_tgt_queue *queue = NULL;
c5343203
JS
1319 struct nvmet_fc_tgt_assoc *assoc;
1320 int ret = 0;
1321 bool del_assoc = false;
1322
1323 memset(acc, 0, sizeof(*acc));
1324
1325 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
1326 ret = VERR_DISCONN_LEN;
1327 else if (rqst->desc_list_len !=
1328 fcnvme_lsdesc_len(
1329 sizeof(struct fcnvme_ls_disconnect_rqst)))
1330 ret = VERR_DISCONN_RQST_LEN;
1331 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1332 ret = VERR_ASSOC_ID;
1333 else if (rqst->associd.desc_len !=
1334 fcnvme_lsdesc_len(
1335 sizeof(struct fcnvme_lsdesc_assoc_id)))
1336 ret = VERR_ASSOC_ID_LEN;
1337 else if (rqst->discon_cmd.desc_tag !=
1338 cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
1339 ret = VERR_DISCONN_CMD;
1340 else if (rqst->discon_cmd.desc_len !=
1341 fcnvme_lsdesc_len(
1342 sizeof(struct fcnvme_lsdesc_disconn_cmd)))
1343 ret = VERR_DISCONN_CMD_LEN;
1344 else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
1345 (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
1346 ret = VERR_DISCONN_SCOPE;
1347 else {
1348 /* match an active association */
1349 assoc = nvmet_fc_find_target_assoc(tgtport,
1350 be64_to_cpu(rqst->associd.association_id));
1351 iod->assoc = assoc;
c81e55e0
JS
1352 if (assoc) {
1353 if (rqst->discon_cmd.scope ==
1354 FCNVME_DISCONN_CONNECTION) {
1355 queue = nvmet_fc_find_target_queue(tgtport,
1356 be64_to_cpu(
1357 rqst->discon_cmd.id));
1358 if (!queue) {
1359 nvmet_fc_tgt_a_put(assoc);
1360 ret = VERR_NO_CONN;
1361 }
1362 }
1363 } else
c5343203
JS
1364 ret = VERR_NO_ASSOC;
1365 }
1366
1367 if (ret) {
1368 dev_err(tgtport->dev,
1369 "Disconnect LS failed: %s\n",
1370 validation_errors[ret]);
1371 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1372 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
4083aa98
JS
1373 (ret == VERR_NO_ASSOC) ?
1374 FCNVME_RJT_RC_INV_ASSOC :
1375 (ret == VERR_NO_CONN) ?
1376 FCNVME_RJT_RC_INV_CONN :
1377 FCNVME_RJT_RC_LOGIC,
1378 FCNVME_RJT_EXP_NONE, 0);
c5343203
JS
1379 return;
1380 }
1381
1382 /* format a response */
1383
1384 iod->lsreq->rsplen = sizeof(*acc);
1385
1386 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1387 fcnvme_lsdesc_len(
1388 sizeof(struct fcnvme_ls_disconnect_acc)),
1389 FCNVME_LS_DISCONNECT);
1390
1391
c81e55e0
JS
1392 /* are we to delete a Connection ID (queue) */
1393 if (queue) {
1394 int qid = queue->qid;
c5343203 1395
c81e55e0 1396 nvmet_fc_delete_target_queue(queue);
c5343203 1397
c81e55e0
JS
1398 /* release the get taken by find_target_queue */
1399 nvmet_fc_tgt_q_put(queue);
c5343203 1400
c81e55e0
JS
1401 /* tear association down if io queue terminated */
1402 if (!qid)
1403 del_assoc = true;
c5343203
JS
1404 }
1405
1406 /* release get taken in nvmet_fc_find_target_assoc */
1407 nvmet_fc_tgt_a_put(iod->assoc);
1408
1409 if (del_assoc)
1410 nvmet_fc_delete_target_assoc(iod->assoc);
1411}
1412
1413
1414/* *********************** NVME Ctrl Routines **************************** */
1415
1416
1417static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1418
1419static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1420
1421static void
1422nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
1423{
1424 struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
1425 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1426
1427 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1428 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1429 nvmet_fc_free_ls_iod(tgtport, iod);
1430 nvmet_fc_tgtport_put(tgtport);
1431}
1432
1433static void
1434nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1435 struct nvmet_fc_ls_iod *iod)
1436{
1437 int ret;
1438
1439 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1440 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1441
1442 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
1443 if (ret)
1444 nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
1445}
1446
1447/*
1448 * Actual processing routine for received FC-NVME LS Requests from the LLD
1449 */
1450static void
1451nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1452 struct nvmet_fc_ls_iod *iod)
1453{
1454 struct fcnvme_ls_rqst_w0 *w0 =
1455 (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
1456
1457 iod->lsreq->nvmet_fc_private = iod;
1458 iod->lsreq->rspbuf = iod->rspbuf;
1459 iod->lsreq->rspdma = iod->rspdma;
1460 iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
1461 /* Be preventative. handlers will later set to valid length */
1462 iod->lsreq->rsplen = 0;
1463
1464 iod->assoc = NULL;
1465
1466 /*
1467 * handlers:
1468 * parse request input, execute the request, and format the
1469 * LS response
1470 */
1471 switch (w0->ls_cmd) {
1472 case FCNVME_LS_CREATE_ASSOCIATION:
1473 /* Creates Association and initial Admin Queue/Connection */
1474 nvmet_fc_ls_create_association(tgtport, iod);
1475 break;
1476 case FCNVME_LS_CREATE_CONNECTION:
1477 /* Creates an IO Queue/Connection */
1478 nvmet_fc_ls_create_connection(tgtport, iod);
1479 break;
1480 case FCNVME_LS_DISCONNECT:
1481 /* Terminate a Queue/Connection or the Association */
1482 nvmet_fc_ls_disconnect(tgtport, iod);
1483 break;
1484 default:
1485 iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
1486 NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
4083aa98 1487 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
c5343203
JS
1488 }
1489
1490 nvmet_fc_xmt_ls_rsp(tgtport, iod);
1491}
1492
1493/*
1494 * Actual processing routine for received FC-NVME LS Requests from the LLD
1495 */
1496static void
1497nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1498{
1499 struct nvmet_fc_ls_iod *iod =
1500 container_of(work, struct nvmet_fc_ls_iod, work);
1501 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1502
1503 nvmet_fc_handle_ls_rqst(tgtport, iod);
1504}
1505
1506
1507/**
1508 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1509 * upon the reception of a NVME LS request.
1510 *
1511 * The nvmet-fc layer will copy payload to an internal structure for
1512 * processing. As such, upon completion of the routine, the LLDD may
1513 * immediately free/reuse the LS request buffer passed in the call.
1514 *
1515 * If this routine returns error, the LLDD should abort the exchange.
1516 *
1517 * @tgtport: pointer to the (registered) target port the LS was
1518 * received on.
1519 * @lsreq: pointer to a lsreq request structure to be used to reference
1520 * the exchange corresponding to the LS.
1521 * @lsreqbuf: pointer to the buffer containing the LS Request
1522 * @lsreqbuf_len: length, in bytes, of the received LS request
1523 */
1524int
1525nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
1526 struct nvmefc_tgt_ls_req *lsreq,
1527 void *lsreqbuf, u32 lsreqbuf_len)
1528{
1529 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1530 struct nvmet_fc_ls_iod *iod;
1531
1532 if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
1533 return -E2BIG;
1534
1535 if (!nvmet_fc_tgtport_get(tgtport))
1536 return -ESHUTDOWN;
1537
1538 iod = nvmet_fc_alloc_ls_iod(tgtport);
1539 if (!iod) {
1540 nvmet_fc_tgtport_put(tgtport);
1541 return -ENOENT;
1542 }
1543
1544 iod->lsreq = lsreq;
1545 iod->fcpreq = NULL;
1546 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
1547 iod->rqstdatalen = lsreqbuf_len;
1548
1549 schedule_work(&iod->work);
1550
1551 return 0;
1552}
1553EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
1554
1555
1556/*
1557 * **********************
1558 * Start of FCP handling
1559 * **********************
1560 */
1561
1562static int
1563nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1564{
1565 struct scatterlist *sg;
1566 struct page *page;
1567 unsigned int nent;
1568 u32 page_len, length;
1569 int i = 0;
1570
1571 length = fod->total_length;
1572 nent = DIV_ROUND_UP(length, PAGE_SIZE);
1573 sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
1574 if (!sg)
1575 goto out;
1576
1577 sg_init_table(sg, nent);
1578
1579 while (length) {
1580 page_len = min_t(u32, length, PAGE_SIZE);
1581
1582 page = alloc_page(GFP_KERNEL);
1583 if (!page)
1584 goto out_free_pages;
1585
1586 sg_set_page(&sg[i], page, page_len, 0);
1587 length -= page_len;
1588 i++;
1589 }
1590
1591 fod->data_sg = sg;
1592 fod->data_sg_cnt = nent;
1593 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
1594 ((fod->io_dir == NVMET_FCP_WRITE) ?
1595 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1596 /* note: write from initiator perspective */
1597
1598 return 0;
1599
1600out_free_pages:
1601 while (i > 0) {
1602 i--;
1603 __free_page(sg_page(&sg[i]));
1604 }
1605 kfree(sg);
1606 fod->data_sg = NULL;
1607 fod->data_sg_cnt = 0;
1608out:
1609 return NVME_SC_INTERNAL;
1610}
1611
1612static void
1613nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1614{
1615 struct scatterlist *sg;
1616 int count;
1617
1618 if (!fod->data_sg || !fod->data_sg_cnt)
1619 return;
1620
1621 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
1622 ((fod->io_dir == NVMET_FCP_WRITE) ?
1623 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1624 for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
1625 __free_page(sg_page(sg));
1626 kfree(fod->data_sg);
1627}
1628
1629
1630static bool
1631queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
1632{
1633 u32 sqtail, used;
1634
1635 /* egad, this is ugly. And sqtail is just a best guess */
1636 sqtail = atomic_read(&q->sqtail) % q->sqsize;
1637
1638 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
1639 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
1640}
1641
1642/*
1643 * Prep RSP payload.
1644 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
1645 */
1646static void
1647nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1648 struct nvmet_fc_fcp_iod *fod)
1649{
1650 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
1651 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1652 struct nvme_completion *cqe = &ersp->cqe;
1653 u32 *cqewd = (u32 *)cqe;
1654 bool send_ersp = false;
1655 u32 rsn, rspcnt, xfr_length;
1656
1657 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
1658 xfr_length = fod->total_length;
1659 else
1660 xfr_length = fod->offset;
1661
1662 /*
1663 * check to see if we can send a 0's rsp.
1664 * Note: to send a 0's response, the NVME-FC host transport will
1665 * recreate the CQE. The host transport knows: sq id, SQHD (last
1666 * seen in an ersp), and command_id. Thus it will create a
1667 * zero-filled CQE with those known fields filled in. Transport
1668 * must send an ersp for any condition where the cqe won't match
1669 * this.
1670 *
1671 * Here are the FC-NVME mandated cases where we must send an ersp:
1672 * every N responses, where N=ersp_ratio
1673 * force fabric commands to send ersp's (not in FC-NVME but good
1674 * practice)
1675 * normal cmds: any time status is non-zero, or status is zero
1676 * but words 0 or 1 are non-zero.
1677 * the SQ is 90% or more full
1678 * the cmd is a fused command
1679 * transferred data length not equal to cmd iu length
1680 */
1681 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
1682 if (!(rspcnt % fod->queue->ersp_ratio) ||
1683 sqe->opcode == nvme_fabrics_command ||
1684 xfr_length != fod->total_length ||
1685 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
1686 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
1687 queue_90percent_full(fod->queue, cqe->sq_head))
1688 send_ersp = true;
1689
1690 /* re-set the fields */
1691 fod->fcpreq->rspaddr = ersp;
1692 fod->fcpreq->rspdma = fod->rspdma;
1693
1694 if (!send_ersp) {
1695 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
1696 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
1697 } else {
1698 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
1699 rsn = atomic_inc_return(&fod->queue->rsn);
1700 ersp->rsn = cpu_to_be32(rsn);
1701 ersp->xfrd_len = cpu_to_be32(xfr_length);
1702 fod->fcpreq->rsplen = sizeof(*ersp);
1703 }
1704
1705 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
1706 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1707}
1708
1709static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
1710
1711static void
1712nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1713 struct nvmet_fc_fcp_iod *fod)
1714{
1715 int ret;
1716
1717 fod->fcpreq->op = NVMET_FCOP_RSP;
1718 fod->fcpreq->timeout = 0;
1719
1720 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1721
1722 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1723 if (ret)
1724 nvmet_fc_abort_op(tgtport, fod->fcpreq);
1725}
1726
1727static void
1728nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1729 struct nvmet_fc_fcp_iod *fod, u8 op)
1730{
1731 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1732 struct scatterlist *sg, *datasg;
1733 u32 tlen, sg_off;
1734 int ret;
1735
1736 fcpreq->op = op;
1737 fcpreq->offset = fod->offset;
1738 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1739 tlen = min_t(u32, (NVMET_FC_MAX_KB_PER_XFR * 1024),
1740 (fod->total_length - fod->offset));
1741 tlen = min_t(u32, tlen, NVME_FC_MAX_SEGMENTS * PAGE_SIZE);
1742 tlen = min_t(u32, tlen, fod->tgtport->ops->max_sgl_segments
1743 * PAGE_SIZE);
1744 fcpreq->transfer_length = tlen;
1745 fcpreq->transferred_length = 0;
1746 fcpreq->fcp_error = 0;
1747 fcpreq->rsplen = 0;
1748
1749 fcpreq->sg_cnt = 0;
1750
1751 datasg = fod->next_sg;
1752 sg_off = fod->next_sg_offset;
1753
1754 for (sg = fcpreq->sg ; tlen; sg++) {
1755 *sg = *datasg;
1756 if (sg_off) {
1757 sg->offset += sg_off;
1758 sg->length -= sg_off;
1759 sg->dma_address += sg_off;
1760 sg_off = 0;
1761 }
1762 if (tlen < sg->length) {
1763 sg->length = tlen;
1764 fod->next_sg = datasg;
1765 fod->next_sg_offset += tlen;
1766 } else if (tlen == sg->length) {
1767 fod->next_sg_offset = 0;
1768 fod->next_sg = sg_next(datasg);
1769 } else {
1770 fod->next_sg_offset = 0;
1771 datasg = sg_next(datasg);
1772 }
1773 tlen -= sg->length;
1774 fcpreq->sg_cnt++;
1775 }
1776
1777 /*
1778 * If the last READDATA request: check if LLDD supports
1779 * combined xfr with response.
1780 */
1781 if ((op == NVMET_FCOP_READDATA) &&
1782 ((fod->offset + fcpreq->transfer_length) == fod->total_length) &&
1783 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
1784 fcpreq->op = NVMET_FCOP_READDATA_RSP;
1785 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1786 }
1787
1788 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1789 if (ret) {
1790 /*
1791 * should be ok to set w/o lock as its in the thread of
1792 * execution (not an async timer routine) and doesn't
1793 * contend with any clearing action
1794 */
1795 fod->abort = true;
1796
1797 if (op == NVMET_FCOP_WRITEDATA)
1798 nvmet_req_complete(&fod->req,
1799 NVME_SC_FC_TRANSPORT_ERROR);
1800 else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
1801 fcpreq->fcp_error = ret;
1802 fcpreq->transferred_length = 0;
1803 nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
1804 }
1805 }
1806}
1807
1808static void
1809nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
1810{
1811 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
1812 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1813 unsigned long flags;
1814 bool abort;
1815
1816 spin_lock_irqsave(&fod->flock, flags);
1817 abort = fod->abort;
1818 spin_unlock_irqrestore(&fod->flock, flags);
1819
1820 /* if in the middle of an io and we need to tear down */
1821 if (abort && fcpreq->op != NVMET_FCOP_ABORT) {
1822 /* data no longer needed */
1823 nvmet_fc_free_tgt_pgs(fod);
1824
f64935ab 1825 nvmet_req_complete(&fod->req, fcpreq->fcp_error);
c5343203
JS
1826 return;
1827 }
1828
1829 switch (fcpreq->op) {
1830
1831 case NVMET_FCOP_WRITEDATA:
f64935ab 1832 if (fcpreq->fcp_error ||
c5343203
JS
1833 fcpreq->transferred_length != fcpreq->transfer_length) {
1834 nvmet_req_complete(&fod->req,
1835 NVME_SC_FC_TRANSPORT_ERROR);
1836 return;
1837 }
1838
1839 fod->offset += fcpreq->transferred_length;
1840 if (fod->offset != fod->total_length) {
1841 /* transfer the next chunk */
1842 nvmet_fc_transfer_fcp_data(tgtport, fod,
1843 NVMET_FCOP_WRITEDATA);
1844 return;
1845 }
1846
1847 /* data transfer complete, resume with nvmet layer */
1848
1849 fod->req.execute(&fod->req);
1850
1851 break;
1852
1853 case NVMET_FCOP_READDATA:
1854 case NVMET_FCOP_READDATA_RSP:
f64935ab 1855 if (fcpreq->fcp_error ||
c5343203
JS
1856 fcpreq->transferred_length != fcpreq->transfer_length) {
1857 /* data no longer needed */
1858 nvmet_fc_free_tgt_pgs(fod);
1859
1860 nvmet_fc_abort_op(tgtport, fod->fcpreq);
1861 return;
1862 }
1863
1864 /* success */
1865
1866 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
1867 /* data no longer needed */
1868 nvmet_fc_free_tgt_pgs(fod);
1869 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
1870 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1871 nvmet_fc_free_fcp_iod(fod->queue, fod);
1872 return;
1873 }
1874
1875 fod->offset += fcpreq->transferred_length;
1876 if (fod->offset != fod->total_length) {
1877 /* transfer the next chunk */
1878 nvmet_fc_transfer_fcp_data(tgtport, fod,
1879 NVMET_FCOP_READDATA);
1880 return;
1881 }
1882
1883 /* data transfer complete, send response */
1884
1885 /* data no longer needed */
1886 nvmet_fc_free_tgt_pgs(fod);
1887
1888 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
1889
1890 break;
1891
1892 case NVMET_FCOP_RSP:
1893 case NVMET_FCOP_ABORT:
1894 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
1895 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1896 nvmet_fc_free_fcp_iod(fod->queue, fod);
1897 break;
1898
1899 default:
1900 nvmet_fc_free_tgt_pgs(fod);
1901 nvmet_fc_abort_op(tgtport, fod->fcpreq);
1902 break;
1903 }
1904}
1905
1906/*
1907 * actual completion handler after execution by the nvmet layer
1908 */
1909static void
1910__nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
1911 struct nvmet_fc_fcp_iod *fod, int status)
1912{
1913 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1914 struct nvme_completion *cqe = &fod->rspiubuf.cqe;
1915 unsigned long flags;
1916 bool abort;
1917
1918 spin_lock_irqsave(&fod->flock, flags);
1919 abort = fod->abort;
1920 spin_unlock_irqrestore(&fod->flock, flags);
1921
1922 /* if we have a CQE, snoop the last sq_head value */
1923 if (!status)
1924 fod->queue->sqhd = cqe->sq_head;
1925
1926 if (abort) {
1927 /* data no longer needed */
1928 nvmet_fc_free_tgt_pgs(fod);
1929
1930 nvmet_fc_abort_op(tgtport, fod->fcpreq);
1931 return;
1932 }
1933
1934 /* if an error handling the cmd post initial parsing */
1935 if (status) {
1936 /* fudge up a failed CQE status for our transport error */
1937 memset(cqe, 0, sizeof(*cqe));
1938 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
1939 cqe->sq_id = cpu_to_le16(fod->queue->qid);
1940 cqe->command_id = sqe->command_id;
1941 cqe->status = cpu_to_le16(status);
1942 } else {
1943
1944 /*
1945 * try to push the data even if the SQE status is non-zero.
1946 * There may be a status where data still was intended to
1947 * be moved
1948 */
1949 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
1950 /* push the data over before sending rsp */
1951 nvmet_fc_transfer_fcp_data(tgtport, fod,
1952 NVMET_FCOP_READDATA);
1953 return;
1954 }
1955
1956 /* writes & no data - fall thru */
1957 }
1958
1959 /* data no longer needed */
1960 nvmet_fc_free_tgt_pgs(fod);
1961
1962 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
1963}
1964
1965
1966static void
1967nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
1968{
1969 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
1970 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1971
1972 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
1973}
1974
1975
1976/*
1977 * Actual processing routine for received FC-NVME LS Requests from the LLD
1978 */
1979void
1980nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
1981 struct nvmet_fc_fcp_iod *fod)
1982{
1983 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
1984 int ret;
1985
1986 /*
1987 * Fused commands are currently not supported in the linux
1988 * implementation.
1989 *
1990 * As such, the implementation of the FC transport does not
1991 * look at the fused commands and order delivery to the upper
1992 * layer until we have both based on csn.
1993 */
1994
1995 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
1996
1997 fod->total_length = be32_to_cpu(cmdiu->data_len);
1998 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
1999 fod->io_dir = NVMET_FCP_WRITE;
2000 if (!nvme_is_write(&cmdiu->sqe))
2001 goto transport_error;
2002 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2003 fod->io_dir = NVMET_FCP_READ;
2004 if (nvme_is_write(&cmdiu->sqe))
2005 goto transport_error;
2006 } else {
2007 fod->io_dir = NVMET_FCP_NODATA;
2008 if (fod->total_length)
2009 goto transport_error;
2010 }
2011
2012 fod->req.cmd = &fod->cmdiubuf.sqe;
2013 fod->req.rsp = &fod->rspiubuf.cqe;
2014 fod->req.port = fod->queue->port;
2015
2016 /* ensure nvmet handlers will set cmd handler callback */
2017 fod->req.execute = NULL;
2018
2019 /* clear any response payload */
2020 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2021
2022 ret = nvmet_req_init(&fod->req,
2023 &fod->queue->nvme_cq,
2024 &fod->queue->nvme_sq,
2025 &nvmet_fc_tgt_fcp_ops);
2026 if (!ret) { /* bad SQE content */
2027 nvmet_fc_abort_op(tgtport, fod->fcpreq);
2028 return;
2029 }
2030
2031 /* keep a running counter of tail position */
2032 atomic_inc(&fod->queue->sqtail);
2033
2034 fod->data_sg = NULL;
2035 fod->data_sg_cnt = 0;
2036 if (fod->total_length) {
2037 ret = nvmet_fc_alloc_tgt_pgs(fod);
2038 if (ret) {
2039 nvmet_req_complete(&fod->req, ret);
2040 return;
2041 }
2042 }
2043 fod->req.sg = fod->data_sg;
2044 fod->req.sg_cnt = fod->data_sg_cnt;
2045 fod->offset = 0;
2046 fod->next_sg = fod->data_sg;
2047 fod->next_sg_offset = 0;
2048
2049 if (fod->io_dir == NVMET_FCP_WRITE) {
2050 /* pull the data over before invoking nvmet layer */
2051 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2052 return;
2053 }
2054
2055 /*
2056 * Reads or no data:
2057 *
2058 * can invoke the nvmet_layer now. If read data, cmd completion will
2059 * push the data
2060 */
2061
2062 fod->req.execute(&fod->req);
2063
2064 return;
2065
2066transport_error:
2067 nvmet_fc_abort_op(tgtport, fod->fcpreq);
2068}
2069
2070/*
2071 * Actual processing routine for received FC-NVME LS Requests from the LLD
2072 */
2073static void
2074nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
2075{
2076 struct nvmet_fc_fcp_iod *fod =
2077 container_of(work, struct nvmet_fc_fcp_iod, work);
2078 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2079
2080 nvmet_fc_handle_fcp_rqst(tgtport, fod);
2081}
2082
2083/**
2084 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2085 * upon the reception of a NVME FCP CMD IU.
2086 *
2087 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2088 * layer for processing.
2089 *
2090 * The nvmet-fc layer will copy cmd payload to an internal structure for
2091 * processing. As such, upon completion of the routine, the LLDD may
2092 * immediately free/reuse the CMD IU buffer passed in the call.
2093 *
2094 * If this routine returns error, the lldd should abort the exchange.
2095 *
2096 * @target_port: pointer to the (registered) target port the FCP CMD IU
2097 * was receive on.
2098 * @fcpreq: pointer to a fcpreq request structure to be used to reference
2099 * the exchange corresponding to the FCP Exchange.
2100 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
2101 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2102 */
2103int
2104nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2105 struct nvmefc_tgt_fcp_req *fcpreq,
2106 void *cmdiubuf, u32 cmdiubuf_len)
2107{
2108 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2109 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2110 struct nvmet_fc_tgt_queue *queue;
2111 struct nvmet_fc_fcp_iod *fod;
2112
2113 /* validate iu, so the connection id can be used to find the queue */
2114 if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2115 (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
2116 (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2117 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2118 return -EIO;
2119
2120
2121 queue = nvmet_fc_find_target_queue(tgtport,
2122 be64_to_cpu(cmdiu->connection_id));
2123 if (!queue)
2124 return -ENOTCONN;
2125
2126 /*
2127 * note: reference taken by find_target_queue
2128 * After successful fod allocation, the fod will inherit the
2129 * ownership of that reference and will remove the reference
2130 * when the fod is freed.
2131 */
2132
2133 fod = nvmet_fc_alloc_fcp_iod(queue);
2134 if (!fod) {
2135 /* release the queue lookup reference */
2136 nvmet_fc_tgt_q_put(queue);
2137 return -ENOENT;
2138 }
2139
2140 fcpreq->nvmet_fc_private = fod;
2141 fod->fcpreq = fcpreq;
2142 /*
2143 * put all admin cmds on hw queue id 0. All io commands go to
2144 * the respective hw queue based on a modulo basis
2145 */
2146 fcpreq->hwqid = queue->qid ?
2147 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
2148 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2149
2150 queue_work_on(queue->cpu, queue->work_q, &fod->work);
2151
2152 return 0;
2153}
2154EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2155
2156enum {
2157 FCT_TRADDR_ERR = 0,
2158 FCT_TRADDR_WWNN = 1 << 0,
2159 FCT_TRADDR_WWPN = 1 << 1,
2160};
2161
2162struct nvmet_fc_traddr {
2163 u64 nn;
2164 u64 pn;
2165};
2166
2167static const match_table_t traddr_opt_tokens = {
2168 { FCT_TRADDR_WWNN, "nn-%s" },
2169 { FCT_TRADDR_WWPN, "pn-%s" },
2170 { FCT_TRADDR_ERR, NULL }
2171};
2172
2173static int
2174nvmet_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf)
2175{
2176 substring_t args[MAX_OPT_ARGS];
2177 char *options, *o, *p;
2178 int token, ret = 0;
2179 u64 token64;
2180
2181 options = o = kstrdup(buf, GFP_KERNEL);
2182 if (!options)
2183 return -ENOMEM;
2184
2185 while ((p = strsep(&o, ",\n")) != NULL) {
2186 if (!*p)
2187 continue;
2188
2189 token = match_token(p, traddr_opt_tokens, args);
2190 switch (token) {
2191 case FCT_TRADDR_WWNN:
2192 if (match_u64(args, &token64)) {
2193 ret = -EINVAL;
2194 goto out;
2195 }
2196 traddr->nn = token64;
2197 break;
2198 case FCT_TRADDR_WWPN:
2199 if (match_u64(args, &token64)) {
2200 ret = -EINVAL;
2201 goto out;
2202 }
2203 traddr->pn = token64;
2204 break;
2205 default:
2206 pr_warn("unknown traddr token or missing value '%s'\n",
2207 p);
2208 ret = -EINVAL;
2209 goto out;
2210 }
2211 }
2212
2213out:
2214 kfree(options);
2215 return ret;
2216}
2217
2218static int
2219nvmet_fc_add_port(struct nvmet_port *port)
2220{
2221 struct nvmet_fc_tgtport *tgtport;
2222 struct nvmet_fc_traddr traddr = { 0L, 0L };
2223 unsigned long flags;
2224 int ret;
2225
2226 /* validate the address info */
2227 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2228 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2229 return -EINVAL;
2230
2231 /* map the traddr address info to a target port */
2232
2233 ret = nvmet_fc_parse_traddr(&traddr, port->disc_addr.traddr);
2234 if (ret)
2235 return ret;
2236
2237 ret = -ENXIO;
2238 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2239 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2240 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2241 (tgtport->fc_target_port.port_name == traddr.pn)) {
2242 /* a FC port can only be 1 nvmet port id */
2243 if (!tgtport->port) {
2244 tgtport->port = port;
2245 port->priv = tgtport;
2246 ret = 0;
2247 } else
2248 ret = -EALREADY;
2249 break;
2250 }
2251 }
2252 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2253 return ret;
2254}
2255
2256static void
2257nvmet_fc_remove_port(struct nvmet_port *port)
2258{
2259 struct nvmet_fc_tgtport *tgtport = port->priv;
2260 unsigned long flags;
2261
2262 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2263 if (tgtport->port == port) {
2264 nvmet_fc_tgtport_put(tgtport);
2265 tgtport->port = NULL;
2266 }
2267 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2268}
2269
2270static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2271 .owner = THIS_MODULE,
2272 .type = NVMF_TRTYPE_FC,
2273 .msdbd = 1,
2274 .add_port = nvmet_fc_add_port,
2275 .remove_port = nvmet_fc_remove_port,
2276 .queue_response = nvmet_fc_fcp_nvme_cmd_done,
2277 .delete_ctrl = nvmet_fc_delete_ctrl,
2278};
2279
2280static int __init nvmet_fc_init_module(void)
2281{
2282 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2283}
2284
2285static void __exit nvmet_fc_exit_module(void)
2286{
2287 /* sanity check - all lports should be removed */
2288 if (!list_empty(&nvmet_fc_target_list))
2289 pr_warn("%s: targetport list not empty\n", __func__);
2290
2291 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2292
2293 ida_destroy(&nvmet_fc_tgtport_cnt);
2294}
2295
2296module_init(nvmet_fc_init_module);
2297module_exit(nvmet_fc_exit_module);
2298
2299MODULE_LICENSE("GPL v2");