]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/nvme/target/fc.c
mtd: nand: atmel: Relax tADL_min constraint
[mirror_ubuntu-artful-kernel.git] / drivers / nvme / target / fc.c
1 /*
2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
15 *
16 */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/blk-mq.h>
21 #include <linux/parser.h>
22 #include <linux/random.h>
23 #include <uapi/scsi/fc/fc_fs.h>
24 #include <uapi/scsi/fc/fc_els.h>
25
26 #include "nvmet.h"
27 #include <linux/nvme-fc-driver.h>
28 #include <linux/nvme-fc.h>
29
30
31 /* *************************** Data Structures/Defines ****************** */
32
33
34 #define NVMET_LS_CTX_COUNT 4
35
36 /* for this implementation, assume small single frame rqst/rsp */
37 #define NVME_FC_MAX_LS_BUFFER_SIZE 2048
38
39 struct nvmet_fc_tgtport;
40 struct nvmet_fc_tgt_assoc;
41
42 struct nvmet_fc_ls_iod {
43 struct nvmefc_tgt_ls_req *lsreq;
44 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
45
46 struct list_head ls_list; /* tgtport->ls_list */
47
48 struct nvmet_fc_tgtport *tgtport;
49 struct nvmet_fc_tgt_assoc *assoc;
50
51 u8 *rqstbuf;
52 u8 *rspbuf;
53 u16 rqstdatalen;
54 dma_addr_t rspdma;
55
56 struct scatterlist sg[2];
57
58 struct work_struct work;
59 } __aligned(sizeof(unsigned long long));
60
61 #define NVMET_FC_MAX_KB_PER_XFR 256
62
63 enum nvmet_fcp_datadir {
64 NVMET_FCP_NODATA,
65 NVMET_FCP_WRITE,
66 NVMET_FCP_READ,
67 NVMET_FCP_ABORTED,
68 };
69
70 struct nvmet_fc_fcp_iod {
71 struct nvmefc_tgt_fcp_req *fcpreq;
72
73 struct nvme_fc_cmd_iu cmdiubuf;
74 struct nvme_fc_ersp_iu rspiubuf;
75 dma_addr_t rspdma;
76 struct scatterlist *data_sg;
77 struct scatterlist *next_sg;
78 int data_sg_cnt;
79 u32 next_sg_offset;
80 u32 total_length;
81 u32 offset;
82 enum nvmet_fcp_datadir io_dir;
83 bool active;
84 bool abort;
85 bool aborted;
86 bool writedataactive;
87 spinlock_t flock;
88
89 struct nvmet_req req;
90 struct work_struct work;
91 struct work_struct done_work;
92
93 struct nvmet_fc_tgtport *tgtport;
94 struct nvmet_fc_tgt_queue *queue;
95
96 struct list_head fcp_list; /* tgtport->fcp_list */
97 };
98
99 struct nvmet_fc_tgtport {
100
101 struct nvmet_fc_target_port fc_target_port;
102
103 struct list_head tgt_list; /* nvmet_fc_target_list */
104 struct device *dev; /* dev for dma mapping */
105 struct nvmet_fc_target_template *ops;
106
107 struct nvmet_fc_ls_iod *iod;
108 spinlock_t lock;
109 struct list_head ls_list;
110 struct list_head ls_busylist;
111 struct list_head assoc_list;
112 struct ida assoc_cnt;
113 struct nvmet_port *port;
114 struct kref ref;
115 };
116
117 struct nvmet_fc_tgt_queue {
118 bool ninetypercent;
119 u16 qid;
120 u16 sqsize;
121 u16 ersp_ratio;
122 __le16 sqhd;
123 int cpu;
124 atomic_t connected;
125 atomic_t sqtail;
126 atomic_t zrspcnt;
127 atomic_t rsn;
128 spinlock_t qlock;
129 struct nvmet_port *port;
130 struct nvmet_cq nvme_cq;
131 struct nvmet_sq nvme_sq;
132 struct nvmet_fc_tgt_assoc *assoc;
133 struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
134 struct list_head fod_list;
135 struct workqueue_struct *work_q;
136 struct kref ref;
137 } __aligned(sizeof(unsigned long long));
138
139 struct nvmet_fc_tgt_assoc {
140 u64 association_id;
141 u32 a_id;
142 struct nvmet_fc_tgtport *tgtport;
143 struct list_head a_list;
144 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES];
145 struct kref ref;
146 };
147
148
149 static inline int
150 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
151 {
152 return (iodptr - iodptr->tgtport->iod);
153 }
154
155 static inline int
156 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
157 {
158 return (fodptr - fodptr->queue->fod);
159 }
160
161
162 /*
163 * Association and Connection IDs:
164 *
165 * Association ID will have random number in upper 6 bytes and zero
166 * in lower 2 bytes
167 *
168 * Connection IDs will be Association ID with QID or'd in lower 2 bytes
169 *
170 * note: Association ID = Connection ID for queue 0
171 */
172 #define BYTES_FOR_QID sizeof(u16)
173 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
174 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
175
176 static inline u64
177 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
178 {
179 return (assoc->association_id | qid);
180 }
181
182 static inline u64
183 nvmet_fc_getassociationid(u64 connectionid)
184 {
185 return connectionid & ~NVMET_FC_QUEUEID_MASK;
186 }
187
188 static inline u16
189 nvmet_fc_getqueueid(u64 connectionid)
190 {
191 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
192 }
193
194 static inline struct nvmet_fc_tgtport *
195 targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
196 {
197 return container_of(targetport, struct nvmet_fc_tgtport,
198 fc_target_port);
199 }
200
201 static inline struct nvmet_fc_fcp_iod *
202 nvmet_req_to_fod(struct nvmet_req *nvme_req)
203 {
204 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
205 }
206
207
208 /* *************************** Globals **************************** */
209
210
211 static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
212
213 static LIST_HEAD(nvmet_fc_target_list);
214 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
215
216
217 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
218 static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
219 static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work);
220 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
221 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
222 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
223 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
224 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
225 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
226
227
228 /* *********************** FC-NVME DMA Handling **************************** */
229
230 /*
231 * The fcloop device passes in a NULL device pointer. Real LLD's will
232 * pass in a valid device pointer. If NULL is passed to the dma mapping
233 * routines, depending on the platform, it may or may not succeed, and
234 * may crash.
235 *
236 * As such:
237 * Wrapper all the dma routines and check the dev pointer.
238 *
239 * If simple mappings (return just a dma address, we'll noop them,
240 * returning a dma address of 0.
241 *
242 * On more complex mappings (dma_map_sg), a pseudo routine fills
243 * in the scatter list, setting all dma addresses to 0.
244 */
245
246 static inline dma_addr_t
247 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
248 enum dma_data_direction dir)
249 {
250 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
251 }
252
253 static inline int
254 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
255 {
256 return dev ? dma_mapping_error(dev, dma_addr) : 0;
257 }
258
259 static inline void
260 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
261 enum dma_data_direction dir)
262 {
263 if (dev)
264 dma_unmap_single(dev, addr, size, dir);
265 }
266
267 static inline void
268 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
269 enum dma_data_direction dir)
270 {
271 if (dev)
272 dma_sync_single_for_cpu(dev, addr, size, dir);
273 }
274
275 static inline void
276 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
277 enum dma_data_direction dir)
278 {
279 if (dev)
280 dma_sync_single_for_device(dev, addr, size, dir);
281 }
282
283 /* pseudo dma_map_sg call */
284 static int
285 fc_map_sg(struct scatterlist *sg, int nents)
286 {
287 struct scatterlist *s;
288 int i;
289
290 WARN_ON(nents == 0 || sg[0].length == 0);
291
292 for_each_sg(sg, s, nents, i) {
293 s->dma_address = 0L;
294 #ifdef CONFIG_NEED_SG_DMA_LENGTH
295 s->dma_length = s->length;
296 #endif
297 }
298 return nents;
299 }
300
301 static inline int
302 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
303 enum dma_data_direction dir)
304 {
305 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
306 }
307
308 static inline void
309 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
310 enum dma_data_direction dir)
311 {
312 if (dev)
313 dma_unmap_sg(dev, sg, nents, dir);
314 }
315
316
317 /* *********************** FC-NVME Port Management ************************ */
318
319
320 static int
321 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
322 {
323 struct nvmet_fc_ls_iod *iod;
324 int i;
325
326 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
327 GFP_KERNEL);
328 if (!iod)
329 return -ENOMEM;
330
331 tgtport->iod = iod;
332
333 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
334 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
335 iod->tgtport = tgtport;
336 list_add_tail(&iod->ls_list, &tgtport->ls_list);
337
338 iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
339 GFP_KERNEL);
340 if (!iod->rqstbuf)
341 goto out_fail;
342
343 iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
344
345 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
346 NVME_FC_MAX_LS_BUFFER_SIZE,
347 DMA_TO_DEVICE);
348 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
349 goto out_fail;
350 }
351
352 return 0;
353
354 out_fail:
355 kfree(iod->rqstbuf);
356 list_del(&iod->ls_list);
357 for (iod--, i--; i >= 0; iod--, i--) {
358 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
359 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
360 kfree(iod->rqstbuf);
361 list_del(&iod->ls_list);
362 }
363
364 kfree(iod);
365
366 return -EFAULT;
367 }
368
369 static void
370 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
371 {
372 struct nvmet_fc_ls_iod *iod = tgtport->iod;
373 int i;
374
375 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
376 fc_dma_unmap_single(tgtport->dev,
377 iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
378 DMA_TO_DEVICE);
379 kfree(iod->rqstbuf);
380 list_del(&iod->ls_list);
381 }
382 kfree(tgtport->iod);
383 }
384
385 static struct nvmet_fc_ls_iod *
386 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
387 {
388 static struct nvmet_fc_ls_iod *iod;
389 unsigned long flags;
390
391 spin_lock_irqsave(&tgtport->lock, flags);
392 iod = list_first_entry_or_null(&tgtport->ls_list,
393 struct nvmet_fc_ls_iod, ls_list);
394 if (iod)
395 list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
396 spin_unlock_irqrestore(&tgtport->lock, flags);
397 return iod;
398 }
399
400
401 static void
402 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
403 struct nvmet_fc_ls_iod *iod)
404 {
405 unsigned long flags;
406
407 spin_lock_irqsave(&tgtport->lock, flags);
408 list_move(&iod->ls_list, &tgtport->ls_list);
409 spin_unlock_irqrestore(&tgtport->lock, flags);
410 }
411
412 static void
413 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
414 struct nvmet_fc_tgt_queue *queue)
415 {
416 struct nvmet_fc_fcp_iod *fod = queue->fod;
417 int i;
418
419 for (i = 0; i < queue->sqsize; fod++, i++) {
420 INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
421 INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work);
422 fod->tgtport = tgtport;
423 fod->queue = queue;
424 fod->active = false;
425 fod->abort = false;
426 fod->aborted = false;
427 fod->fcpreq = NULL;
428 list_add_tail(&fod->fcp_list, &queue->fod_list);
429 spin_lock_init(&fod->flock);
430
431 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
432 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
433 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
434 list_del(&fod->fcp_list);
435 for (fod--, i--; i >= 0; fod--, i--) {
436 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
437 sizeof(fod->rspiubuf),
438 DMA_TO_DEVICE);
439 fod->rspdma = 0L;
440 list_del(&fod->fcp_list);
441 }
442
443 return;
444 }
445 }
446 }
447
448 static void
449 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
450 struct nvmet_fc_tgt_queue *queue)
451 {
452 struct nvmet_fc_fcp_iod *fod = queue->fod;
453 int i;
454
455 for (i = 0; i < queue->sqsize; fod++, i++) {
456 if (fod->rspdma)
457 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
458 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
459 }
460 }
461
462 static struct nvmet_fc_fcp_iod *
463 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
464 {
465 static struct nvmet_fc_fcp_iod *fod;
466 unsigned long flags;
467
468 spin_lock_irqsave(&queue->qlock, flags);
469 fod = list_first_entry_or_null(&queue->fod_list,
470 struct nvmet_fc_fcp_iod, fcp_list);
471 if (fod) {
472 list_del(&fod->fcp_list);
473 fod->active = true;
474 /*
475 * no queue reference is taken, as it was taken by the
476 * queue lookup just prior to the allocation. The iod
477 * will "inherit" that reference.
478 */
479 }
480 spin_unlock_irqrestore(&queue->qlock, flags);
481 return fod;
482 }
483
484
485 static void
486 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
487 struct nvmet_fc_fcp_iod *fod)
488 {
489 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
490 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
491 unsigned long flags;
492
493 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
494 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
495
496 fcpreq->nvmet_fc_private = NULL;
497
498 spin_lock_irqsave(&queue->qlock, flags);
499 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
500 fod->active = false;
501 fod->abort = false;
502 fod->aborted = false;
503 fod->writedataactive = false;
504 fod->fcpreq = NULL;
505 spin_unlock_irqrestore(&queue->qlock, flags);
506
507 /*
508 * release the reference taken at queue lookup and fod allocation
509 */
510 nvmet_fc_tgt_q_put(queue);
511
512 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
513 }
514
515 static int
516 nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
517 {
518 int cpu, idx, cnt;
519
520 if (tgtport->ops->max_hw_queues == 1)
521 return WORK_CPU_UNBOUND;
522
523 /* Simple cpu selection based on qid modulo active cpu count */
524 idx = !qid ? 0 : (qid - 1) % num_active_cpus();
525
526 /* find the n'th active cpu */
527 for (cpu = 0, cnt = 0; ; ) {
528 if (cpu_active(cpu)) {
529 if (cnt == idx)
530 break;
531 cnt++;
532 }
533 cpu = (cpu + 1) % num_possible_cpus();
534 }
535
536 return cpu;
537 }
538
539 static struct nvmet_fc_tgt_queue *
540 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
541 u16 qid, u16 sqsize)
542 {
543 struct nvmet_fc_tgt_queue *queue;
544 unsigned long flags;
545 int ret;
546
547 if (qid >= NVMET_NR_QUEUES)
548 return NULL;
549
550 queue = kzalloc((sizeof(*queue) +
551 (sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
552 GFP_KERNEL);
553 if (!queue)
554 return NULL;
555
556 if (!nvmet_fc_tgt_a_get(assoc))
557 goto out_free_queue;
558
559 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
560 assoc->tgtport->fc_target_port.port_num,
561 assoc->a_id, qid);
562 if (!queue->work_q)
563 goto out_a_put;
564
565 queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
566 queue->qid = qid;
567 queue->sqsize = sqsize;
568 queue->assoc = assoc;
569 queue->port = assoc->tgtport->port;
570 queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
571 INIT_LIST_HEAD(&queue->fod_list);
572 atomic_set(&queue->connected, 0);
573 atomic_set(&queue->sqtail, 0);
574 atomic_set(&queue->rsn, 1);
575 atomic_set(&queue->zrspcnt, 0);
576 spin_lock_init(&queue->qlock);
577 kref_init(&queue->ref);
578
579 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
580
581 ret = nvmet_sq_init(&queue->nvme_sq);
582 if (ret)
583 goto out_fail_iodlist;
584
585 WARN_ON(assoc->queues[qid]);
586 spin_lock_irqsave(&assoc->tgtport->lock, flags);
587 assoc->queues[qid] = queue;
588 spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
589
590 return queue;
591
592 out_fail_iodlist:
593 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
594 destroy_workqueue(queue->work_q);
595 out_a_put:
596 nvmet_fc_tgt_a_put(assoc);
597 out_free_queue:
598 kfree(queue);
599 return NULL;
600 }
601
602
603 static void
604 nvmet_fc_tgt_queue_free(struct kref *ref)
605 {
606 struct nvmet_fc_tgt_queue *queue =
607 container_of(ref, struct nvmet_fc_tgt_queue, ref);
608 unsigned long flags;
609
610 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
611 queue->assoc->queues[queue->qid] = NULL;
612 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
613
614 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
615
616 nvmet_fc_tgt_a_put(queue->assoc);
617
618 destroy_workqueue(queue->work_q);
619
620 kfree(queue);
621 }
622
623 static void
624 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
625 {
626 kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
627 }
628
629 static int
630 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
631 {
632 return kref_get_unless_zero(&queue->ref);
633 }
634
635
636 static void
637 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
638 {
639 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
640 struct nvmet_fc_fcp_iod *fod = queue->fod;
641 unsigned long flags;
642 int i, writedataactive;
643 bool disconnect;
644
645 disconnect = atomic_xchg(&queue->connected, 0);
646
647 spin_lock_irqsave(&queue->qlock, flags);
648 /* about outstanding io's */
649 for (i = 0; i < queue->sqsize; fod++, i++) {
650 if (fod->active) {
651 spin_lock(&fod->flock);
652 fod->abort = true;
653 writedataactive = fod->writedataactive;
654 spin_unlock(&fod->flock);
655 /*
656 * only call lldd abort routine if waiting for
657 * writedata. other outstanding ops should finish
658 * on their own.
659 */
660 if (writedataactive) {
661 spin_lock(&fod->flock);
662 fod->aborted = true;
663 spin_unlock(&fod->flock);
664 tgtport->ops->fcp_abort(
665 &tgtport->fc_target_port, fod->fcpreq);
666 }
667 }
668 }
669 spin_unlock_irqrestore(&queue->qlock, flags);
670
671 flush_workqueue(queue->work_q);
672
673 if (disconnect)
674 nvmet_sq_destroy(&queue->nvme_sq);
675
676 nvmet_fc_tgt_q_put(queue);
677 }
678
679 static struct nvmet_fc_tgt_queue *
680 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
681 u64 connection_id)
682 {
683 struct nvmet_fc_tgt_assoc *assoc;
684 struct nvmet_fc_tgt_queue *queue;
685 u64 association_id = nvmet_fc_getassociationid(connection_id);
686 u16 qid = nvmet_fc_getqueueid(connection_id);
687 unsigned long flags;
688
689 spin_lock_irqsave(&tgtport->lock, flags);
690 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
691 if (association_id == assoc->association_id) {
692 queue = assoc->queues[qid];
693 if (queue &&
694 (!atomic_read(&queue->connected) ||
695 !nvmet_fc_tgt_q_get(queue)))
696 queue = NULL;
697 spin_unlock_irqrestore(&tgtport->lock, flags);
698 return queue;
699 }
700 }
701 spin_unlock_irqrestore(&tgtport->lock, flags);
702 return NULL;
703 }
704
705 static struct nvmet_fc_tgt_assoc *
706 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
707 {
708 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
709 unsigned long flags;
710 u64 ran;
711 int idx;
712 bool needrandom = true;
713
714 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
715 if (!assoc)
716 return NULL;
717
718 idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
719 if (idx < 0)
720 goto out_free_assoc;
721
722 if (!nvmet_fc_tgtport_get(tgtport))
723 goto out_ida_put;
724
725 assoc->tgtport = tgtport;
726 assoc->a_id = idx;
727 INIT_LIST_HEAD(&assoc->a_list);
728 kref_init(&assoc->ref);
729
730 while (needrandom) {
731 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
732 ran = ran << BYTES_FOR_QID_SHIFT;
733
734 spin_lock_irqsave(&tgtport->lock, flags);
735 needrandom = false;
736 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
737 if (ran == tmpassoc->association_id) {
738 needrandom = true;
739 break;
740 }
741 if (!needrandom) {
742 assoc->association_id = ran;
743 list_add_tail(&assoc->a_list, &tgtport->assoc_list);
744 }
745 spin_unlock_irqrestore(&tgtport->lock, flags);
746 }
747
748 return assoc;
749
750 out_ida_put:
751 ida_simple_remove(&tgtport->assoc_cnt, idx);
752 out_free_assoc:
753 kfree(assoc);
754 return NULL;
755 }
756
757 static void
758 nvmet_fc_target_assoc_free(struct kref *ref)
759 {
760 struct nvmet_fc_tgt_assoc *assoc =
761 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
762 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
763 unsigned long flags;
764
765 spin_lock_irqsave(&tgtport->lock, flags);
766 list_del(&assoc->a_list);
767 spin_unlock_irqrestore(&tgtport->lock, flags);
768 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
769 kfree(assoc);
770 nvmet_fc_tgtport_put(tgtport);
771 }
772
773 static void
774 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
775 {
776 kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
777 }
778
779 static int
780 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
781 {
782 return kref_get_unless_zero(&assoc->ref);
783 }
784
785 static void
786 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
787 {
788 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
789 struct nvmet_fc_tgt_queue *queue;
790 unsigned long flags;
791 int i;
792
793 spin_lock_irqsave(&tgtport->lock, flags);
794 for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) {
795 queue = assoc->queues[i];
796 if (queue) {
797 if (!nvmet_fc_tgt_q_get(queue))
798 continue;
799 spin_unlock_irqrestore(&tgtport->lock, flags);
800 nvmet_fc_delete_target_queue(queue);
801 nvmet_fc_tgt_q_put(queue);
802 spin_lock_irqsave(&tgtport->lock, flags);
803 }
804 }
805 spin_unlock_irqrestore(&tgtport->lock, flags);
806
807 nvmet_fc_tgt_a_put(assoc);
808 }
809
810 static struct nvmet_fc_tgt_assoc *
811 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
812 u64 association_id)
813 {
814 struct nvmet_fc_tgt_assoc *assoc;
815 struct nvmet_fc_tgt_assoc *ret = NULL;
816 unsigned long flags;
817
818 spin_lock_irqsave(&tgtport->lock, flags);
819 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
820 if (association_id == assoc->association_id) {
821 ret = assoc;
822 nvmet_fc_tgt_a_get(assoc);
823 break;
824 }
825 }
826 spin_unlock_irqrestore(&tgtport->lock, flags);
827
828 return ret;
829 }
830
831
832 /**
833 * nvme_fc_register_targetport - transport entry point called by an
834 * LLDD to register the existence of a local
835 * NVME subystem FC port.
836 * @pinfo: pointer to information about the port to be registered
837 * @template: LLDD entrypoints and operational parameters for the port
838 * @dev: physical hardware device node port corresponds to. Will be
839 * used for DMA mappings
840 * @portptr: pointer to a local port pointer. Upon success, the routine
841 * will allocate a nvme_fc_local_port structure and place its
842 * address in the local port pointer. Upon failure, local port
843 * pointer will be set to NULL.
844 *
845 * Returns:
846 * a completion status. Must be 0 upon success; a negative errno
847 * (ex: -ENXIO) upon failure.
848 */
849 int
850 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
851 struct nvmet_fc_target_template *template,
852 struct device *dev,
853 struct nvmet_fc_target_port **portptr)
854 {
855 struct nvmet_fc_tgtport *newrec;
856 unsigned long flags;
857 int ret, idx;
858
859 if (!template->xmt_ls_rsp || !template->fcp_op ||
860 !template->fcp_abort ||
861 !template->fcp_req_release || !template->targetport_delete ||
862 !template->max_hw_queues || !template->max_sgl_segments ||
863 !template->max_dif_sgl_segments || !template->dma_boundary) {
864 ret = -EINVAL;
865 goto out_regtgt_failed;
866 }
867
868 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
869 GFP_KERNEL);
870 if (!newrec) {
871 ret = -ENOMEM;
872 goto out_regtgt_failed;
873 }
874
875 idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
876 if (idx < 0) {
877 ret = -ENOSPC;
878 goto out_fail_kfree;
879 }
880
881 if (!get_device(dev) && dev) {
882 ret = -ENODEV;
883 goto out_ida_put;
884 }
885
886 newrec->fc_target_port.node_name = pinfo->node_name;
887 newrec->fc_target_port.port_name = pinfo->port_name;
888 newrec->fc_target_port.private = &newrec[1];
889 newrec->fc_target_port.port_id = pinfo->port_id;
890 newrec->fc_target_port.port_num = idx;
891 INIT_LIST_HEAD(&newrec->tgt_list);
892 newrec->dev = dev;
893 newrec->ops = template;
894 spin_lock_init(&newrec->lock);
895 INIT_LIST_HEAD(&newrec->ls_list);
896 INIT_LIST_HEAD(&newrec->ls_busylist);
897 INIT_LIST_HEAD(&newrec->assoc_list);
898 kref_init(&newrec->ref);
899 ida_init(&newrec->assoc_cnt);
900
901 ret = nvmet_fc_alloc_ls_iodlist(newrec);
902 if (ret) {
903 ret = -ENOMEM;
904 goto out_free_newrec;
905 }
906
907 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
908 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
909 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
910
911 *portptr = &newrec->fc_target_port;
912 return 0;
913
914 out_free_newrec:
915 put_device(dev);
916 out_ida_put:
917 ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
918 out_fail_kfree:
919 kfree(newrec);
920 out_regtgt_failed:
921 *portptr = NULL;
922 return ret;
923 }
924 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
925
926
927 static void
928 nvmet_fc_free_tgtport(struct kref *ref)
929 {
930 struct nvmet_fc_tgtport *tgtport =
931 container_of(ref, struct nvmet_fc_tgtport, ref);
932 struct device *dev = tgtport->dev;
933 unsigned long flags;
934
935 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
936 list_del(&tgtport->tgt_list);
937 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
938
939 nvmet_fc_free_ls_iodlist(tgtport);
940
941 /* let the LLDD know we've finished tearing it down */
942 tgtport->ops->targetport_delete(&tgtport->fc_target_port);
943
944 ida_simple_remove(&nvmet_fc_tgtport_cnt,
945 tgtport->fc_target_port.port_num);
946
947 ida_destroy(&tgtport->assoc_cnt);
948
949 kfree(tgtport);
950
951 put_device(dev);
952 }
953
954 static void
955 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
956 {
957 kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
958 }
959
960 static int
961 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
962 {
963 return kref_get_unless_zero(&tgtport->ref);
964 }
965
966 static void
967 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
968 {
969 struct nvmet_fc_tgt_assoc *assoc, *next;
970 unsigned long flags;
971
972 spin_lock_irqsave(&tgtport->lock, flags);
973 list_for_each_entry_safe(assoc, next,
974 &tgtport->assoc_list, a_list) {
975 if (!nvmet_fc_tgt_a_get(assoc))
976 continue;
977 spin_unlock_irqrestore(&tgtport->lock, flags);
978 nvmet_fc_delete_target_assoc(assoc);
979 nvmet_fc_tgt_a_put(assoc);
980 spin_lock_irqsave(&tgtport->lock, flags);
981 }
982 spin_unlock_irqrestore(&tgtport->lock, flags);
983 }
984
985 /*
986 * nvmet layer has called to terminate an association
987 */
988 static void
989 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
990 {
991 struct nvmet_fc_tgtport *tgtport, *next;
992 struct nvmet_fc_tgt_assoc *assoc;
993 struct nvmet_fc_tgt_queue *queue;
994 unsigned long flags;
995 bool found_ctrl = false;
996
997 /* this is a bit ugly, but don't want to make locks layered */
998 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
999 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1000 tgt_list) {
1001 if (!nvmet_fc_tgtport_get(tgtport))
1002 continue;
1003 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1004
1005 spin_lock_irqsave(&tgtport->lock, flags);
1006 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1007 queue = assoc->queues[0];
1008 if (queue && queue->nvme_sq.ctrl == ctrl) {
1009 if (nvmet_fc_tgt_a_get(assoc))
1010 found_ctrl = true;
1011 break;
1012 }
1013 }
1014 spin_unlock_irqrestore(&tgtport->lock, flags);
1015
1016 nvmet_fc_tgtport_put(tgtport);
1017
1018 if (found_ctrl) {
1019 nvmet_fc_delete_target_assoc(assoc);
1020 nvmet_fc_tgt_a_put(assoc);
1021 return;
1022 }
1023
1024 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1025 }
1026 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1027 }
1028
1029 /**
1030 * nvme_fc_unregister_targetport - transport entry point called by an
1031 * LLDD to deregister/remove a previously
1032 * registered a local NVME subsystem FC port.
1033 * @tgtport: pointer to the (registered) target port that is to be
1034 * deregistered.
1035 *
1036 * Returns:
1037 * a completion status. Must be 0 upon success; a negative errno
1038 * (ex: -ENXIO) upon failure.
1039 */
1040 int
1041 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1042 {
1043 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1044
1045 /* terminate any outstanding associations */
1046 __nvmet_fc_free_assocs(tgtport);
1047
1048 nvmet_fc_tgtport_put(tgtport);
1049
1050 return 0;
1051 }
1052 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1053
1054
1055 /* *********************** FC-NVME LS Handling **************************** */
1056
1057
1058 static void
1059 nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
1060 {
1061 struct fcnvme_ls_acc_hdr *acc = buf;
1062
1063 acc->w0.ls_cmd = ls_cmd;
1064 acc->desc_list_len = desc_len;
1065 acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1066 acc->rqst.desc_len =
1067 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1068 acc->rqst.w0.ls_cmd = rqst_ls_cmd;
1069 }
1070
1071 static int
1072 nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
1073 u8 reason, u8 explanation, u8 vendor)
1074 {
1075 struct fcnvme_ls_rjt *rjt = buf;
1076
1077 nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
1078 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
1079 ls_cmd);
1080 rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1081 rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1082 rjt->rjt.reason_code = reason;
1083 rjt->rjt.reason_explanation = explanation;
1084 rjt->rjt.vendor = vendor;
1085
1086 return sizeof(struct fcnvme_ls_rjt);
1087 }
1088
1089 /* Validation Error indexes into the string table below */
1090 enum {
1091 VERR_NO_ERROR = 0,
1092 VERR_CR_ASSOC_LEN = 1,
1093 VERR_CR_ASSOC_RQST_LEN = 2,
1094 VERR_CR_ASSOC_CMD = 3,
1095 VERR_CR_ASSOC_CMD_LEN = 4,
1096 VERR_ERSP_RATIO = 5,
1097 VERR_ASSOC_ALLOC_FAIL = 6,
1098 VERR_QUEUE_ALLOC_FAIL = 7,
1099 VERR_CR_CONN_LEN = 8,
1100 VERR_CR_CONN_RQST_LEN = 9,
1101 VERR_ASSOC_ID = 10,
1102 VERR_ASSOC_ID_LEN = 11,
1103 VERR_NO_ASSOC = 12,
1104 VERR_CONN_ID = 13,
1105 VERR_CONN_ID_LEN = 14,
1106 VERR_NO_CONN = 15,
1107 VERR_CR_CONN_CMD = 16,
1108 VERR_CR_CONN_CMD_LEN = 17,
1109 VERR_DISCONN_LEN = 18,
1110 VERR_DISCONN_RQST_LEN = 19,
1111 VERR_DISCONN_CMD = 20,
1112 VERR_DISCONN_CMD_LEN = 21,
1113 VERR_DISCONN_SCOPE = 22,
1114 VERR_RS_LEN = 23,
1115 VERR_RS_RQST_LEN = 24,
1116 VERR_RS_CMD = 25,
1117 VERR_RS_CMD_LEN = 26,
1118 VERR_RS_RCTL = 27,
1119 VERR_RS_RO = 28,
1120 };
1121
1122 static char *validation_errors[] = {
1123 "OK",
1124 "Bad CR_ASSOC Length",
1125 "Bad CR_ASSOC Rqst Length",
1126 "Not CR_ASSOC Cmd",
1127 "Bad CR_ASSOC Cmd Length",
1128 "Bad Ersp Ratio",
1129 "Association Allocation Failed",
1130 "Queue Allocation Failed",
1131 "Bad CR_CONN Length",
1132 "Bad CR_CONN Rqst Length",
1133 "Not Association ID",
1134 "Bad Association ID Length",
1135 "No Association",
1136 "Not Connection ID",
1137 "Bad Connection ID Length",
1138 "No Connection",
1139 "Not CR_CONN Cmd",
1140 "Bad CR_CONN Cmd Length",
1141 "Bad DISCONN Length",
1142 "Bad DISCONN Rqst Length",
1143 "Not DISCONN Cmd",
1144 "Bad DISCONN Cmd Length",
1145 "Bad Disconnect Scope",
1146 "Bad RS Length",
1147 "Bad RS Rqst Length",
1148 "Not RS Cmd",
1149 "Bad RS Cmd Length",
1150 "Bad RS R_CTL",
1151 "Bad RS Relative Offset",
1152 };
1153
1154 static void
1155 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1156 struct nvmet_fc_ls_iod *iod)
1157 {
1158 struct fcnvme_ls_cr_assoc_rqst *rqst =
1159 (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
1160 struct fcnvme_ls_cr_assoc_acc *acc =
1161 (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
1162 struct nvmet_fc_tgt_queue *queue;
1163 int ret = 0;
1164
1165 memset(acc, 0, sizeof(*acc));
1166
1167 /*
1168 * FC-NVME spec changes. There are initiators sending different
1169 * lengths as padding sizes for Create Association Cmd descriptor
1170 * was incorrect.
1171 * Accept anything of "minimum" length. Assume format per 1.15
1172 * spec (with HOSTID reduced to 16 bytes), ignore how long the
1173 * trailing pad length is.
1174 */
1175 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
1176 ret = VERR_CR_ASSOC_LEN;
1177 else if (rqst->desc_list_len <
1178 cpu_to_be32(FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN))
1179 ret = VERR_CR_ASSOC_RQST_LEN;
1180 else if (rqst->assoc_cmd.desc_tag !=
1181 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1182 ret = VERR_CR_ASSOC_CMD;
1183 else if (rqst->assoc_cmd.desc_len <
1184 cpu_to_be32(FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN))
1185 ret = VERR_CR_ASSOC_CMD_LEN;
1186 else if (!rqst->assoc_cmd.ersp_ratio ||
1187 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1188 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1189 ret = VERR_ERSP_RATIO;
1190
1191 else {
1192 /* new association w/ admin queue */
1193 iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
1194 if (!iod->assoc)
1195 ret = VERR_ASSOC_ALLOC_FAIL;
1196 else {
1197 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1198 be16_to_cpu(rqst->assoc_cmd.sqsize));
1199 if (!queue)
1200 ret = VERR_QUEUE_ALLOC_FAIL;
1201 }
1202 }
1203
1204 if (ret) {
1205 dev_err(tgtport->dev,
1206 "Create Association LS failed: %s\n",
1207 validation_errors[ret]);
1208 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1209 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1210 FCNVME_RJT_RC_LOGIC,
1211 FCNVME_RJT_EXP_NONE, 0);
1212 return;
1213 }
1214
1215 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1216 atomic_set(&queue->connected, 1);
1217 queue->sqhd = 0; /* best place to init value */
1218
1219 /* format a response */
1220
1221 iod->lsreq->rsplen = sizeof(*acc);
1222
1223 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1224 fcnvme_lsdesc_len(
1225 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1226 FCNVME_LS_CREATE_ASSOCIATION);
1227 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1228 acc->associd.desc_len =
1229 fcnvme_lsdesc_len(
1230 sizeof(struct fcnvme_lsdesc_assoc_id));
1231 acc->associd.association_id =
1232 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1233 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1234 acc->connectid.desc_len =
1235 fcnvme_lsdesc_len(
1236 sizeof(struct fcnvme_lsdesc_conn_id));
1237 acc->connectid.connection_id = acc->associd.association_id;
1238 }
1239
1240 static void
1241 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1242 struct nvmet_fc_ls_iod *iod)
1243 {
1244 struct fcnvme_ls_cr_conn_rqst *rqst =
1245 (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
1246 struct fcnvme_ls_cr_conn_acc *acc =
1247 (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
1248 struct nvmet_fc_tgt_queue *queue;
1249 int ret = 0;
1250
1251 memset(acc, 0, sizeof(*acc));
1252
1253 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1254 ret = VERR_CR_CONN_LEN;
1255 else if (rqst->desc_list_len !=
1256 fcnvme_lsdesc_len(
1257 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1258 ret = VERR_CR_CONN_RQST_LEN;
1259 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1260 ret = VERR_ASSOC_ID;
1261 else if (rqst->associd.desc_len !=
1262 fcnvme_lsdesc_len(
1263 sizeof(struct fcnvme_lsdesc_assoc_id)))
1264 ret = VERR_ASSOC_ID_LEN;
1265 else if (rqst->connect_cmd.desc_tag !=
1266 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1267 ret = VERR_CR_CONN_CMD;
1268 else if (rqst->connect_cmd.desc_len !=
1269 fcnvme_lsdesc_len(
1270 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1271 ret = VERR_CR_CONN_CMD_LEN;
1272 else if (!rqst->connect_cmd.ersp_ratio ||
1273 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1274 be16_to_cpu(rqst->connect_cmd.sqsize)))
1275 ret = VERR_ERSP_RATIO;
1276
1277 else {
1278 /* new io queue */
1279 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1280 be64_to_cpu(rqst->associd.association_id));
1281 if (!iod->assoc)
1282 ret = VERR_NO_ASSOC;
1283 else {
1284 queue = nvmet_fc_alloc_target_queue(iod->assoc,
1285 be16_to_cpu(rqst->connect_cmd.qid),
1286 be16_to_cpu(rqst->connect_cmd.sqsize));
1287 if (!queue)
1288 ret = VERR_QUEUE_ALLOC_FAIL;
1289
1290 /* release get taken in nvmet_fc_find_target_assoc */
1291 nvmet_fc_tgt_a_put(iod->assoc);
1292 }
1293 }
1294
1295 if (ret) {
1296 dev_err(tgtport->dev,
1297 "Create Connection LS failed: %s\n",
1298 validation_errors[ret]);
1299 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1300 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1301 (ret == VERR_NO_ASSOC) ?
1302 FCNVME_RJT_RC_INV_ASSOC :
1303 FCNVME_RJT_RC_LOGIC,
1304 FCNVME_RJT_EXP_NONE, 0);
1305 return;
1306 }
1307
1308 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1309 atomic_set(&queue->connected, 1);
1310 queue->sqhd = 0; /* best place to init value */
1311
1312 /* format a response */
1313
1314 iod->lsreq->rsplen = sizeof(*acc);
1315
1316 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1317 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1318 FCNVME_LS_CREATE_CONNECTION);
1319 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1320 acc->connectid.desc_len =
1321 fcnvme_lsdesc_len(
1322 sizeof(struct fcnvme_lsdesc_conn_id));
1323 acc->connectid.connection_id =
1324 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1325 be16_to_cpu(rqst->connect_cmd.qid)));
1326 }
1327
1328 static void
1329 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1330 struct nvmet_fc_ls_iod *iod)
1331 {
1332 struct fcnvme_ls_disconnect_rqst *rqst =
1333 (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1334 struct fcnvme_ls_disconnect_acc *acc =
1335 (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
1336 struct nvmet_fc_tgt_queue *queue = NULL;
1337 struct nvmet_fc_tgt_assoc *assoc;
1338 int ret = 0;
1339 bool del_assoc = false;
1340
1341 memset(acc, 0, sizeof(*acc));
1342
1343 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
1344 ret = VERR_DISCONN_LEN;
1345 else if (rqst->desc_list_len !=
1346 fcnvme_lsdesc_len(
1347 sizeof(struct fcnvme_ls_disconnect_rqst)))
1348 ret = VERR_DISCONN_RQST_LEN;
1349 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1350 ret = VERR_ASSOC_ID;
1351 else if (rqst->associd.desc_len !=
1352 fcnvme_lsdesc_len(
1353 sizeof(struct fcnvme_lsdesc_assoc_id)))
1354 ret = VERR_ASSOC_ID_LEN;
1355 else if (rqst->discon_cmd.desc_tag !=
1356 cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
1357 ret = VERR_DISCONN_CMD;
1358 else if (rqst->discon_cmd.desc_len !=
1359 fcnvme_lsdesc_len(
1360 sizeof(struct fcnvme_lsdesc_disconn_cmd)))
1361 ret = VERR_DISCONN_CMD_LEN;
1362 else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
1363 (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
1364 ret = VERR_DISCONN_SCOPE;
1365 else {
1366 /* match an active association */
1367 assoc = nvmet_fc_find_target_assoc(tgtport,
1368 be64_to_cpu(rqst->associd.association_id));
1369 iod->assoc = assoc;
1370 if (assoc) {
1371 if (rqst->discon_cmd.scope ==
1372 FCNVME_DISCONN_CONNECTION) {
1373 queue = nvmet_fc_find_target_queue(tgtport,
1374 be64_to_cpu(
1375 rqst->discon_cmd.id));
1376 if (!queue) {
1377 nvmet_fc_tgt_a_put(assoc);
1378 ret = VERR_NO_CONN;
1379 }
1380 }
1381 } else
1382 ret = VERR_NO_ASSOC;
1383 }
1384
1385 if (ret) {
1386 dev_err(tgtport->dev,
1387 "Disconnect LS failed: %s\n",
1388 validation_errors[ret]);
1389 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1390 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1391 (ret == VERR_NO_ASSOC) ?
1392 FCNVME_RJT_RC_INV_ASSOC :
1393 (ret == VERR_NO_CONN) ?
1394 FCNVME_RJT_RC_INV_CONN :
1395 FCNVME_RJT_RC_LOGIC,
1396 FCNVME_RJT_EXP_NONE, 0);
1397 return;
1398 }
1399
1400 /* format a response */
1401
1402 iod->lsreq->rsplen = sizeof(*acc);
1403
1404 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1405 fcnvme_lsdesc_len(
1406 sizeof(struct fcnvme_ls_disconnect_acc)),
1407 FCNVME_LS_DISCONNECT);
1408
1409
1410 /* are we to delete a Connection ID (queue) */
1411 if (queue) {
1412 int qid = queue->qid;
1413
1414 nvmet_fc_delete_target_queue(queue);
1415
1416 /* release the get taken by find_target_queue */
1417 nvmet_fc_tgt_q_put(queue);
1418
1419 /* tear association down if io queue terminated */
1420 if (!qid)
1421 del_assoc = true;
1422 }
1423
1424 /* release get taken in nvmet_fc_find_target_assoc */
1425 nvmet_fc_tgt_a_put(iod->assoc);
1426
1427 if (del_assoc)
1428 nvmet_fc_delete_target_assoc(iod->assoc);
1429 }
1430
1431
1432 /* *********************** NVME Ctrl Routines **************************** */
1433
1434
1435 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1436
1437 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1438
1439 static void
1440 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
1441 {
1442 struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
1443 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1444
1445 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1446 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1447 nvmet_fc_free_ls_iod(tgtport, iod);
1448 nvmet_fc_tgtport_put(tgtport);
1449 }
1450
1451 static void
1452 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1453 struct nvmet_fc_ls_iod *iod)
1454 {
1455 int ret;
1456
1457 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1458 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1459
1460 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
1461 if (ret)
1462 nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
1463 }
1464
1465 /*
1466 * Actual processing routine for received FC-NVME LS Requests from the LLD
1467 */
1468 static void
1469 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1470 struct nvmet_fc_ls_iod *iod)
1471 {
1472 struct fcnvme_ls_rqst_w0 *w0 =
1473 (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
1474
1475 iod->lsreq->nvmet_fc_private = iod;
1476 iod->lsreq->rspbuf = iod->rspbuf;
1477 iod->lsreq->rspdma = iod->rspdma;
1478 iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
1479 /* Be preventative. handlers will later set to valid length */
1480 iod->lsreq->rsplen = 0;
1481
1482 iod->assoc = NULL;
1483
1484 /*
1485 * handlers:
1486 * parse request input, execute the request, and format the
1487 * LS response
1488 */
1489 switch (w0->ls_cmd) {
1490 case FCNVME_LS_CREATE_ASSOCIATION:
1491 /* Creates Association and initial Admin Queue/Connection */
1492 nvmet_fc_ls_create_association(tgtport, iod);
1493 break;
1494 case FCNVME_LS_CREATE_CONNECTION:
1495 /* Creates an IO Queue/Connection */
1496 nvmet_fc_ls_create_connection(tgtport, iod);
1497 break;
1498 case FCNVME_LS_DISCONNECT:
1499 /* Terminate a Queue/Connection or the Association */
1500 nvmet_fc_ls_disconnect(tgtport, iod);
1501 break;
1502 default:
1503 iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
1504 NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
1505 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1506 }
1507
1508 nvmet_fc_xmt_ls_rsp(tgtport, iod);
1509 }
1510
1511 /*
1512 * Actual processing routine for received FC-NVME LS Requests from the LLD
1513 */
1514 static void
1515 nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1516 {
1517 struct nvmet_fc_ls_iod *iod =
1518 container_of(work, struct nvmet_fc_ls_iod, work);
1519 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1520
1521 nvmet_fc_handle_ls_rqst(tgtport, iod);
1522 }
1523
1524
1525 /**
1526 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1527 * upon the reception of a NVME LS request.
1528 *
1529 * The nvmet-fc layer will copy payload to an internal structure for
1530 * processing. As such, upon completion of the routine, the LLDD may
1531 * immediately free/reuse the LS request buffer passed in the call.
1532 *
1533 * If this routine returns error, the LLDD should abort the exchange.
1534 *
1535 * @tgtport: pointer to the (registered) target port the LS was
1536 * received on.
1537 * @lsreq: pointer to a lsreq request structure to be used to reference
1538 * the exchange corresponding to the LS.
1539 * @lsreqbuf: pointer to the buffer containing the LS Request
1540 * @lsreqbuf_len: length, in bytes, of the received LS request
1541 */
1542 int
1543 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
1544 struct nvmefc_tgt_ls_req *lsreq,
1545 void *lsreqbuf, u32 lsreqbuf_len)
1546 {
1547 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1548 struct nvmet_fc_ls_iod *iod;
1549
1550 if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
1551 return -E2BIG;
1552
1553 if (!nvmet_fc_tgtport_get(tgtport))
1554 return -ESHUTDOWN;
1555
1556 iod = nvmet_fc_alloc_ls_iod(tgtport);
1557 if (!iod) {
1558 nvmet_fc_tgtport_put(tgtport);
1559 return -ENOENT;
1560 }
1561
1562 iod->lsreq = lsreq;
1563 iod->fcpreq = NULL;
1564 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
1565 iod->rqstdatalen = lsreqbuf_len;
1566
1567 schedule_work(&iod->work);
1568
1569 return 0;
1570 }
1571 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
1572
1573
1574 /*
1575 * **********************
1576 * Start of FCP handling
1577 * **********************
1578 */
1579
1580 static int
1581 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1582 {
1583 struct scatterlist *sg;
1584 struct page *page;
1585 unsigned int nent;
1586 u32 page_len, length;
1587 int i = 0;
1588
1589 length = fod->total_length;
1590 nent = DIV_ROUND_UP(length, PAGE_SIZE);
1591 sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
1592 if (!sg)
1593 goto out;
1594
1595 sg_init_table(sg, nent);
1596
1597 while (length) {
1598 page_len = min_t(u32, length, PAGE_SIZE);
1599
1600 page = alloc_page(GFP_KERNEL);
1601 if (!page)
1602 goto out_free_pages;
1603
1604 sg_set_page(&sg[i], page, page_len, 0);
1605 length -= page_len;
1606 i++;
1607 }
1608
1609 fod->data_sg = sg;
1610 fod->data_sg_cnt = nent;
1611 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
1612 ((fod->io_dir == NVMET_FCP_WRITE) ?
1613 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1614 /* note: write from initiator perspective */
1615
1616 return 0;
1617
1618 out_free_pages:
1619 while (i > 0) {
1620 i--;
1621 __free_page(sg_page(&sg[i]));
1622 }
1623 kfree(sg);
1624 fod->data_sg = NULL;
1625 fod->data_sg_cnt = 0;
1626 out:
1627 return NVME_SC_INTERNAL;
1628 }
1629
1630 static void
1631 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1632 {
1633 struct scatterlist *sg;
1634 int count;
1635
1636 if (!fod->data_sg || !fod->data_sg_cnt)
1637 return;
1638
1639 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
1640 ((fod->io_dir == NVMET_FCP_WRITE) ?
1641 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1642 for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
1643 __free_page(sg_page(sg));
1644 kfree(fod->data_sg);
1645 fod->data_sg = NULL;
1646 fod->data_sg_cnt = 0;
1647 }
1648
1649
1650 static bool
1651 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
1652 {
1653 u32 sqtail, used;
1654
1655 /* egad, this is ugly. And sqtail is just a best guess */
1656 sqtail = atomic_read(&q->sqtail) % q->sqsize;
1657
1658 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
1659 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
1660 }
1661
1662 /*
1663 * Prep RSP payload.
1664 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
1665 */
1666 static void
1667 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1668 struct nvmet_fc_fcp_iod *fod)
1669 {
1670 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
1671 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1672 struct nvme_completion *cqe = &ersp->cqe;
1673 u32 *cqewd = (u32 *)cqe;
1674 bool send_ersp = false;
1675 u32 rsn, rspcnt, xfr_length;
1676
1677 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
1678 xfr_length = fod->total_length;
1679 else
1680 xfr_length = fod->offset;
1681
1682 /*
1683 * check to see if we can send a 0's rsp.
1684 * Note: to send a 0's response, the NVME-FC host transport will
1685 * recreate the CQE. The host transport knows: sq id, SQHD (last
1686 * seen in an ersp), and command_id. Thus it will create a
1687 * zero-filled CQE with those known fields filled in. Transport
1688 * must send an ersp for any condition where the cqe won't match
1689 * this.
1690 *
1691 * Here are the FC-NVME mandated cases where we must send an ersp:
1692 * every N responses, where N=ersp_ratio
1693 * force fabric commands to send ersp's (not in FC-NVME but good
1694 * practice)
1695 * normal cmds: any time status is non-zero, or status is zero
1696 * but words 0 or 1 are non-zero.
1697 * the SQ is 90% or more full
1698 * the cmd is a fused command
1699 * transferred data length not equal to cmd iu length
1700 */
1701 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
1702 if (!(rspcnt % fod->queue->ersp_ratio) ||
1703 sqe->opcode == nvme_fabrics_command ||
1704 xfr_length != fod->total_length ||
1705 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
1706 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
1707 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
1708 send_ersp = true;
1709
1710 /* re-set the fields */
1711 fod->fcpreq->rspaddr = ersp;
1712 fod->fcpreq->rspdma = fod->rspdma;
1713
1714 if (!send_ersp) {
1715 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
1716 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
1717 } else {
1718 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
1719 rsn = atomic_inc_return(&fod->queue->rsn);
1720 ersp->rsn = cpu_to_be32(rsn);
1721 ersp->xfrd_len = cpu_to_be32(xfr_length);
1722 fod->fcpreq->rsplen = sizeof(*ersp);
1723 }
1724
1725 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
1726 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1727 }
1728
1729 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
1730
1731 static void
1732 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
1733 struct nvmet_fc_fcp_iod *fod)
1734 {
1735 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1736
1737 /* data no longer needed */
1738 nvmet_fc_free_tgt_pgs(fod);
1739
1740 /*
1741 * if an ABTS was received or we issued the fcp_abort early
1742 * don't call abort routine again.
1743 */
1744 /* no need to take lock - lock was taken earlier to get here */
1745 if (!fod->aborted)
1746 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
1747
1748 nvmet_fc_free_fcp_iod(fod->queue, fod);
1749 }
1750
1751 static void
1752 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1753 struct nvmet_fc_fcp_iod *fod)
1754 {
1755 int ret;
1756
1757 fod->fcpreq->op = NVMET_FCOP_RSP;
1758 fod->fcpreq->timeout = 0;
1759
1760 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1761
1762 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1763 if (ret)
1764 nvmet_fc_abort_op(tgtport, fod);
1765 }
1766
1767 static void
1768 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1769 struct nvmet_fc_fcp_iod *fod, u8 op)
1770 {
1771 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1772 struct scatterlist *sg, *datasg;
1773 unsigned long flags;
1774 u32 tlen, sg_off;
1775 int ret;
1776
1777 fcpreq->op = op;
1778 fcpreq->offset = fod->offset;
1779 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1780 tlen = min_t(u32, (NVMET_FC_MAX_KB_PER_XFR * 1024),
1781 (fod->total_length - fod->offset));
1782 tlen = min_t(u32, tlen, NVME_FC_MAX_SEGMENTS * PAGE_SIZE);
1783 tlen = min_t(u32, tlen, fod->tgtport->ops->max_sgl_segments
1784 * PAGE_SIZE);
1785 fcpreq->transfer_length = tlen;
1786 fcpreq->transferred_length = 0;
1787 fcpreq->fcp_error = 0;
1788 fcpreq->rsplen = 0;
1789
1790 fcpreq->sg_cnt = 0;
1791
1792 datasg = fod->next_sg;
1793 sg_off = fod->next_sg_offset;
1794
1795 for (sg = fcpreq->sg ; tlen; sg++) {
1796 *sg = *datasg;
1797 if (sg_off) {
1798 sg->offset += sg_off;
1799 sg->length -= sg_off;
1800 sg->dma_address += sg_off;
1801 sg_off = 0;
1802 }
1803 if (tlen < sg->length) {
1804 sg->length = tlen;
1805 fod->next_sg = datasg;
1806 fod->next_sg_offset += tlen;
1807 } else if (tlen == sg->length) {
1808 fod->next_sg_offset = 0;
1809 fod->next_sg = sg_next(datasg);
1810 } else {
1811 fod->next_sg_offset = 0;
1812 datasg = sg_next(datasg);
1813 }
1814 tlen -= sg->length;
1815 fcpreq->sg_cnt++;
1816 }
1817
1818 /*
1819 * If the last READDATA request: check if LLDD supports
1820 * combined xfr with response.
1821 */
1822 if ((op == NVMET_FCOP_READDATA) &&
1823 ((fod->offset + fcpreq->transfer_length) == fod->total_length) &&
1824 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
1825 fcpreq->op = NVMET_FCOP_READDATA_RSP;
1826 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1827 }
1828
1829 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1830 if (ret) {
1831 /*
1832 * should be ok to set w/o lock as its in the thread of
1833 * execution (not an async timer routine) and doesn't
1834 * contend with any clearing action
1835 */
1836 fod->abort = true;
1837
1838 if (op == NVMET_FCOP_WRITEDATA) {
1839 spin_lock_irqsave(&fod->flock, flags);
1840 fod->writedataactive = false;
1841 spin_unlock_irqrestore(&fod->flock, flags);
1842 nvmet_req_complete(&fod->req,
1843 NVME_SC_FC_TRANSPORT_ERROR);
1844 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
1845 fcpreq->fcp_error = ret;
1846 fcpreq->transferred_length = 0;
1847 nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
1848 }
1849 }
1850 }
1851
1852 static inline bool
1853 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
1854 {
1855 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1856 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1857
1858 /* if in the middle of an io and we need to tear down */
1859 if (abort) {
1860 if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
1861 nvmet_req_complete(&fod->req,
1862 NVME_SC_FC_TRANSPORT_ERROR);
1863 return true;
1864 }
1865
1866 nvmet_fc_abort_op(tgtport, fod);
1867 return true;
1868 }
1869
1870 return false;
1871 }
1872
1873 /*
1874 * actual done handler for FCP operations when completed by the lldd
1875 */
1876 static void
1877 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
1878 {
1879 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1880 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1881 unsigned long flags;
1882 bool abort;
1883
1884 spin_lock_irqsave(&fod->flock, flags);
1885 abort = fod->abort;
1886 fod->writedataactive = false;
1887 spin_unlock_irqrestore(&fod->flock, flags);
1888
1889 switch (fcpreq->op) {
1890
1891 case NVMET_FCOP_WRITEDATA:
1892 if (__nvmet_fc_fod_op_abort(fod, abort))
1893 return;
1894 if (fcpreq->fcp_error ||
1895 fcpreq->transferred_length != fcpreq->transfer_length) {
1896 spin_lock(&fod->flock);
1897 fod->abort = true;
1898 spin_unlock(&fod->flock);
1899
1900 nvmet_req_complete(&fod->req,
1901 NVME_SC_FC_TRANSPORT_ERROR);
1902 return;
1903 }
1904
1905 fod->offset += fcpreq->transferred_length;
1906 if (fod->offset != fod->total_length) {
1907 spin_lock_irqsave(&fod->flock, flags);
1908 fod->writedataactive = true;
1909 spin_unlock_irqrestore(&fod->flock, flags);
1910
1911 /* transfer the next chunk */
1912 nvmet_fc_transfer_fcp_data(tgtport, fod,
1913 NVMET_FCOP_WRITEDATA);
1914 return;
1915 }
1916
1917 /* data transfer complete, resume with nvmet layer */
1918
1919 fod->req.execute(&fod->req);
1920
1921 break;
1922
1923 case NVMET_FCOP_READDATA:
1924 case NVMET_FCOP_READDATA_RSP:
1925 if (__nvmet_fc_fod_op_abort(fod, abort))
1926 return;
1927 if (fcpreq->fcp_error ||
1928 fcpreq->transferred_length != fcpreq->transfer_length) {
1929 nvmet_fc_abort_op(tgtport, fod);
1930 return;
1931 }
1932
1933 /* success */
1934
1935 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
1936 /* data no longer needed */
1937 nvmet_fc_free_tgt_pgs(fod);
1938 nvmet_fc_free_fcp_iod(fod->queue, fod);
1939 return;
1940 }
1941
1942 fod->offset += fcpreq->transferred_length;
1943 if (fod->offset != fod->total_length) {
1944 /* transfer the next chunk */
1945 nvmet_fc_transfer_fcp_data(tgtport, fod,
1946 NVMET_FCOP_READDATA);
1947 return;
1948 }
1949
1950 /* data transfer complete, send response */
1951
1952 /* data no longer needed */
1953 nvmet_fc_free_tgt_pgs(fod);
1954
1955 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
1956
1957 break;
1958
1959 case NVMET_FCOP_RSP:
1960 if (__nvmet_fc_fod_op_abort(fod, abort))
1961 return;
1962 nvmet_fc_free_fcp_iod(fod->queue, fod);
1963 break;
1964
1965 default:
1966 break;
1967 }
1968 }
1969
1970 static void
1971 nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work)
1972 {
1973 struct nvmet_fc_fcp_iod *fod =
1974 container_of(work, struct nvmet_fc_fcp_iod, done_work);
1975
1976 nvmet_fc_fod_op_done(fod);
1977 }
1978
1979 static void
1980 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
1981 {
1982 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
1983 struct nvmet_fc_tgt_queue *queue = fod->queue;
1984
1985 if (fod->tgtport->ops->target_features & NVMET_FCTGTFEAT_OPDONE_IN_ISR)
1986 /* context switch so completion is not in ISR context */
1987 queue_work_on(queue->cpu, queue->work_q, &fod->done_work);
1988 else
1989 nvmet_fc_fod_op_done(fod);
1990 }
1991
1992 /*
1993 * actual completion handler after execution by the nvmet layer
1994 */
1995 static void
1996 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
1997 struct nvmet_fc_fcp_iod *fod, int status)
1998 {
1999 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2000 struct nvme_completion *cqe = &fod->rspiubuf.cqe;
2001 unsigned long flags;
2002 bool abort;
2003
2004 spin_lock_irqsave(&fod->flock, flags);
2005 abort = fod->abort;
2006 spin_unlock_irqrestore(&fod->flock, flags);
2007
2008 /* if we have a CQE, snoop the last sq_head value */
2009 if (!status)
2010 fod->queue->sqhd = cqe->sq_head;
2011
2012 if (abort) {
2013 nvmet_fc_abort_op(tgtport, fod);
2014 return;
2015 }
2016
2017 /* if an error handling the cmd post initial parsing */
2018 if (status) {
2019 /* fudge up a failed CQE status for our transport error */
2020 memset(cqe, 0, sizeof(*cqe));
2021 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
2022 cqe->sq_id = cpu_to_le16(fod->queue->qid);
2023 cqe->command_id = sqe->command_id;
2024 cqe->status = cpu_to_le16(status);
2025 } else {
2026
2027 /*
2028 * try to push the data even if the SQE status is non-zero.
2029 * There may be a status where data still was intended to
2030 * be moved
2031 */
2032 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2033 /* push the data over before sending rsp */
2034 nvmet_fc_transfer_fcp_data(tgtport, fod,
2035 NVMET_FCOP_READDATA);
2036 return;
2037 }
2038
2039 /* writes & no data - fall thru */
2040 }
2041
2042 /* data no longer needed */
2043 nvmet_fc_free_tgt_pgs(fod);
2044
2045 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2046 }
2047
2048
2049 static void
2050 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2051 {
2052 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2053 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2054
2055 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2056 }
2057
2058
2059 /*
2060 * Actual processing routine for received FC-NVME LS Requests from the LLD
2061 */
2062 static void
2063 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2064 struct nvmet_fc_fcp_iod *fod)
2065 {
2066 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2067 int ret;
2068
2069 /*
2070 * Fused commands are currently not supported in the linux
2071 * implementation.
2072 *
2073 * As such, the implementation of the FC transport does not
2074 * look at the fused commands and order delivery to the upper
2075 * layer until we have both based on csn.
2076 */
2077
2078 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2079
2080 fod->total_length = be32_to_cpu(cmdiu->data_len);
2081 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2082 fod->io_dir = NVMET_FCP_WRITE;
2083 if (!nvme_is_write(&cmdiu->sqe))
2084 goto transport_error;
2085 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2086 fod->io_dir = NVMET_FCP_READ;
2087 if (nvme_is_write(&cmdiu->sqe))
2088 goto transport_error;
2089 } else {
2090 fod->io_dir = NVMET_FCP_NODATA;
2091 if (fod->total_length)
2092 goto transport_error;
2093 }
2094
2095 fod->req.cmd = &fod->cmdiubuf.sqe;
2096 fod->req.rsp = &fod->rspiubuf.cqe;
2097 fod->req.port = fod->queue->port;
2098
2099 /* ensure nvmet handlers will set cmd handler callback */
2100 fod->req.execute = NULL;
2101
2102 /* clear any response payload */
2103 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2104
2105 fod->data_sg = NULL;
2106 fod->data_sg_cnt = 0;
2107
2108 ret = nvmet_req_init(&fod->req,
2109 &fod->queue->nvme_cq,
2110 &fod->queue->nvme_sq,
2111 &nvmet_fc_tgt_fcp_ops);
2112 if (!ret) {
2113 /* bad SQE content or invalid ctrl state */
2114 /* nvmet layer has already called op done to send rsp. */
2115 return;
2116 }
2117
2118 /* keep a running counter of tail position */
2119 atomic_inc(&fod->queue->sqtail);
2120
2121 if (fod->total_length) {
2122 ret = nvmet_fc_alloc_tgt_pgs(fod);
2123 if (ret) {
2124 nvmet_req_complete(&fod->req, ret);
2125 return;
2126 }
2127 }
2128 fod->req.sg = fod->data_sg;
2129 fod->req.sg_cnt = fod->data_sg_cnt;
2130 fod->offset = 0;
2131 fod->next_sg = fod->data_sg;
2132 fod->next_sg_offset = 0;
2133
2134 if (fod->io_dir == NVMET_FCP_WRITE) {
2135 /* pull the data over before invoking nvmet layer */
2136 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2137 return;
2138 }
2139
2140 /*
2141 * Reads or no data:
2142 *
2143 * can invoke the nvmet_layer now. If read data, cmd completion will
2144 * push the data
2145 */
2146
2147 fod->req.execute(&fod->req);
2148
2149 return;
2150
2151 transport_error:
2152 nvmet_fc_abort_op(tgtport, fod);
2153 }
2154
2155 /*
2156 * Actual processing routine for received FC-NVME LS Requests from the LLD
2157 */
2158 static void
2159 nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
2160 {
2161 struct nvmet_fc_fcp_iod *fod =
2162 container_of(work, struct nvmet_fc_fcp_iod, work);
2163 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2164
2165 nvmet_fc_handle_fcp_rqst(tgtport, fod);
2166 }
2167
2168 /**
2169 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2170 * upon the reception of a NVME FCP CMD IU.
2171 *
2172 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2173 * layer for processing.
2174 *
2175 * The nvmet-fc layer will copy cmd payload to an internal structure for
2176 * processing. As such, upon completion of the routine, the LLDD may
2177 * immediately free/reuse the CMD IU buffer passed in the call.
2178 *
2179 * If this routine returns error, the lldd should abort the exchange.
2180 *
2181 * @target_port: pointer to the (registered) target port the FCP CMD IU
2182 * was received on.
2183 * @fcpreq: pointer to a fcpreq request structure to be used to reference
2184 * the exchange corresponding to the FCP Exchange.
2185 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
2186 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2187 */
2188 int
2189 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2190 struct nvmefc_tgt_fcp_req *fcpreq,
2191 void *cmdiubuf, u32 cmdiubuf_len)
2192 {
2193 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2194 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2195 struct nvmet_fc_tgt_queue *queue;
2196 struct nvmet_fc_fcp_iod *fod;
2197
2198 /* validate iu, so the connection id can be used to find the queue */
2199 if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2200 (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
2201 (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2202 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2203 return -EIO;
2204
2205 queue = nvmet_fc_find_target_queue(tgtport,
2206 be64_to_cpu(cmdiu->connection_id));
2207 if (!queue)
2208 return -ENOTCONN;
2209
2210 /*
2211 * note: reference taken by find_target_queue
2212 * After successful fod allocation, the fod will inherit the
2213 * ownership of that reference and will remove the reference
2214 * when the fod is freed.
2215 */
2216
2217 fod = nvmet_fc_alloc_fcp_iod(queue);
2218 if (!fod) {
2219 /* release the queue lookup reference */
2220 nvmet_fc_tgt_q_put(queue);
2221 return -ENOENT;
2222 }
2223
2224 fcpreq->nvmet_fc_private = fod;
2225 fod->fcpreq = fcpreq;
2226 /*
2227 * put all admin cmds on hw queue id 0. All io commands go to
2228 * the respective hw queue based on a modulo basis
2229 */
2230 fcpreq->hwqid = queue->qid ?
2231 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
2232 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2233
2234 if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
2235 queue_work_on(queue->cpu, queue->work_q, &fod->work);
2236 else
2237 nvmet_fc_handle_fcp_rqst(tgtport, fod);
2238
2239 return 0;
2240 }
2241 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2242
2243 /**
2244 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2245 * upon the reception of an ABTS for a FCP command
2246 *
2247 * Notify the transport that an ABTS has been received for a FCP command
2248 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
2249 * LLDD believes the command is still being worked on
2250 * (template_ops->fcp_req_release() has not been called).
2251 *
2252 * The transport will wait for any outstanding work (an op to the LLDD,
2253 * which the lldd should complete with error due to the ABTS; or the
2254 * completion from the nvmet layer of the nvme command), then will
2255 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
2256 * return the i/o context to the LLDD. The LLDD may send the BA_ACC
2257 * to the ABTS either after return from this function (assuming any
2258 * outstanding op work has been terminated) or upon the callback being
2259 * called.
2260 *
2261 * @target_port: pointer to the (registered) target port the FCP CMD IU
2262 * was received on.
2263 * @fcpreq: pointer to the fcpreq request structure that corresponds
2264 * to the exchange that received the ABTS.
2265 */
2266 void
2267 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2268 struct nvmefc_tgt_fcp_req *fcpreq)
2269 {
2270 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2271 struct nvmet_fc_tgt_queue *queue;
2272 unsigned long flags;
2273
2274 if (!fod || fod->fcpreq != fcpreq)
2275 /* job appears to have already completed, ignore abort */
2276 return;
2277
2278 queue = fod->queue;
2279
2280 spin_lock_irqsave(&queue->qlock, flags);
2281 if (fod->active) {
2282 /*
2283 * mark as abort. The abort handler, invoked upon completion
2284 * of any work, will detect the aborted status and do the
2285 * callback.
2286 */
2287 spin_lock(&fod->flock);
2288 fod->abort = true;
2289 fod->aborted = true;
2290 spin_unlock(&fod->flock);
2291 }
2292 spin_unlock_irqrestore(&queue->qlock, flags);
2293 }
2294 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2295
2296 enum {
2297 FCT_TRADDR_ERR = 0,
2298 FCT_TRADDR_WWNN = 1 << 0,
2299 FCT_TRADDR_WWPN = 1 << 1,
2300 };
2301
2302 struct nvmet_fc_traddr {
2303 u64 nn;
2304 u64 pn;
2305 };
2306
2307 static const match_table_t traddr_opt_tokens = {
2308 { FCT_TRADDR_WWNN, "nn-%s" },
2309 { FCT_TRADDR_WWPN, "pn-%s" },
2310 { FCT_TRADDR_ERR, NULL }
2311 };
2312
2313 static int
2314 nvmet_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf)
2315 {
2316 substring_t args[MAX_OPT_ARGS];
2317 char *options, *o, *p;
2318 int token, ret = 0;
2319 u64 token64;
2320
2321 options = o = kstrdup(buf, GFP_KERNEL);
2322 if (!options)
2323 return -ENOMEM;
2324
2325 while ((p = strsep(&o, ":\n")) != NULL) {
2326 if (!*p)
2327 continue;
2328
2329 token = match_token(p, traddr_opt_tokens, args);
2330 switch (token) {
2331 case FCT_TRADDR_WWNN:
2332 if (match_u64(args, &token64)) {
2333 ret = -EINVAL;
2334 goto out;
2335 }
2336 traddr->nn = token64;
2337 break;
2338 case FCT_TRADDR_WWPN:
2339 if (match_u64(args, &token64)) {
2340 ret = -EINVAL;
2341 goto out;
2342 }
2343 traddr->pn = token64;
2344 break;
2345 default:
2346 pr_warn("unknown traddr token or missing value '%s'\n",
2347 p);
2348 ret = -EINVAL;
2349 goto out;
2350 }
2351 }
2352
2353 out:
2354 kfree(options);
2355 return ret;
2356 }
2357
2358 static int
2359 nvmet_fc_add_port(struct nvmet_port *port)
2360 {
2361 struct nvmet_fc_tgtport *tgtport;
2362 struct nvmet_fc_traddr traddr = { 0L, 0L };
2363 unsigned long flags;
2364 int ret;
2365
2366 /* validate the address info */
2367 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2368 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2369 return -EINVAL;
2370
2371 /* map the traddr address info to a target port */
2372
2373 ret = nvmet_fc_parse_traddr(&traddr, port->disc_addr.traddr);
2374 if (ret)
2375 return ret;
2376
2377 ret = -ENXIO;
2378 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2379 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2380 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2381 (tgtport->fc_target_port.port_name == traddr.pn)) {
2382 /* a FC port can only be 1 nvmet port id */
2383 if (!tgtport->port) {
2384 tgtport->port = port;
2385 port->priv = tgtport;
2386 nvmet_fc_tgtport_get(tgtport);
2387 ret = 0;
2388 } else
2389 ret = -EALREADY;
2390 break;
2391 }
2392 }
2393 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2394 return ret;
2395 }
2396
2397 static void
2398 nvmet_fc_remove_port(struct nvmet_port *port)
2399 {
2400 struct nvmet_fc_tgtport *tgtport = port->priv;
2401 unsigned long flags;
2402
2403 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2404 if (tgtport->port == port) {
2405 nvmet_fc_tgtport_put(tgtport);
2406 tgtport->port = NULL;
2407 }
2408 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2409 }
2410
2411 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2412 .owner = THIS_MODULE,
2413 .type = NVMF_TRTYPE_FC,
2414 .msdbd = 1,
2415 .add_port = nvmet_fc_add_port,
2416 .remove_port = nvmet_fc_remove_port,
2417 .queue_response = nvmet_fc_fcp_nvme_cmd_done,
2418 .delete_ctrl = nvmet_fc_delete_ctrl,
2419 };
2420
2421 static int __init nvmet_fc_init_module(void)
2422 {
2423 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2424 }
2425
2426 static void __exit nvmet_fc_exit_module(void)
2427 {
2428 /* sanity check - all lports should be removed */
2429 if (!list_empty(&nvmet_fc_target_list))
2430 pr_warn("%s: targetport list not empty\n", __func__);
2431
2432 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2433
2434 ida_destroy(&nvmet_fc_tgtport_cnt);
2435 }
2436
2437 module_init(nvmet_fc_init_module);
2438 module_exit(nvmet_fc_exit_module);
2439
2440 MODULE_LICENSE("GPL v2");