]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/nvme/target/fc.c
Merge branches 'intel_pstate-fix' and 'cpufreq-x86-fix'
[mirror_ubuntu-artful-kernel.git] / drivers / nvme / target / fc.c
1 /*
2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
15 *
16 */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/blk-mq.h>
21 #include <linux/parser.h>
22 #include <linux/random.h>
23 #include <uapi/scsi/fc/fc_fs.h>
24 #include <uapi/scsi/fc/fc_els.h>
25
26 #include "nvmet.h"
27 #include <linux/nvme-fc-driver.h>
28 #include <linux/nvme-fc.h>
29
30
31 /* *************************** Data Structures/Defines ****************** */
32
33
34 #define NVMET_LS_CTX_COUNT 4
35
36 /* for this implementation, assume small single frame rqst/rsp */
37 #define NVME_FC_MAX_LS_BUFFER_SIZE 2048
38
39 struct nvmet_fc_tgtport;
40 struct nvmet_fc_tgt_assoc;
41
42 struct nvmet_fc_ls_iod {
43 struct nvmefc_tgt_ls_req *lsreq;
44 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
45
46 struct list_head ls_list; /* tgtport->ls_list */
47
48 struct nvmet_fc_tgtport *tgtport;
49 struct nvmet_fc_tgt_assoc *assoc;
50
51 u8 *rqstbuf;
52 u8 *rspbuf;
53 u16 rqstdatalen;
54 dma_addr_t rspdma;
55
56 struct scatterlist sg[2];
57
58 struct work_struct work;
59 } __aligned(sizeof(unsigned long long));
60
61 #define NVMET_FC_MAX_KB_PER_XFR 256
62
63 enum nvmet_fcp_datadir {
64 NVMET_FCP_NODATA,
65 NVMET_FCP_WRITE,
66 NVMET_FCP_READ,
67 NVMET_FCP_ABORTED,
68 };
69
70 struct nvmet_fc_fcp_iod {
71 struct nvmefc_tgt_fcp_req *fcpreq;
72
73 struct nvme_fc_cmd_iu cmdiubuf;
74 struct nvme_fc_ersp_iu rspiubuf;
75 dma_addr_t rspdma;
76 struct scatterlist *data_sg;
77 struct scatterlist *next_sg;
78 int data_sg_cnt;
79 u32 next_sg_offset;
80 u32 total_length;
81 u32 offset;
82 enum nvmet_fcp_datadir io_dir;
83 bool active;
84 bool abort;
85 bool aborted;
86 bool writedataactive;
87 spinlock_t flock;
88
89 struct nvmet_req req;
90 struct work_struct work;
91 struct work_struct done_work;
92
93 struct nvmet_fc_tgtport *tgtport;
94 struct nvmet_fc_tgt_queue *queue;
95
96 struct list_head fcp_list; /* tgtport->fcp_list */
97 };
98
99 struct nvmet_fc_tgtport {
100
101 struct nvmet_fc_target_port fc_target_port;
102
103 struct list_head tgt_list; /* nvmet_fc_target_list */
104 struct device *dev; /* dev for dma mapping */
105 struct nvmet_fc_target_template *ops;
106
107 struct nvmet_fc_ls_iod *iod;
108 spinlock_t lock;
109 struct list_head ls_list;
110 struct list_head ls_busylist;
111 struct list_head assoc_list;
112 struct ida assoc_cnt;
113 struct nvmet_port *port;
114 struct kref ref;
115 };
116
117 struct nvmet_fc_defer_fcp_req {
118 struct list_head req_list;
119 struct nvmefc_tgt_fcp_req *fcp_req;
120 };
121
122 struct nvmet_fc_tgt_queue {
123 bool ninetypercent;
124 u16 qid;
125 u16 sqsize;
126 u16 ersp_ratio;
127 __le16 sqhd;
128 int cpu;
129 atomic_t connected;
130 atomic_t sqtail;
131 atomic_t zrspcnt;
132 atomic_t rsn;
133 spinlock_t qlock;
134 struct nvmet_port *port;
135 struct nvmet_cq nvme_cq;
136 struct nvmet_sq nvme_sq;
137 struct nvmet_fc_tgt_assoc *assoc;
138 struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
139 struct list_head fod_list;
140 struct list_head pending_cmd_list;
141 struct list_head avail_defer_list;
142 struct workqueue_struct *work_q;
143 struct kref ref;
144 } __aligned(sizeof(unsigned long long));
145
146 struct nvmet_fc_tgt_assoc {
147 u64 association_id;
148 u32 a_id;
149 struct nvmet_fc_tgtport *tgtport;
150 struct list_head a_list;
151 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES];
152 struct kref ref;
153 };
154
155
156 static inline int
157 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
158 {
159 return (iodptr - iodptr->tgtport->iod);
160 }
161
162 static inline int
163 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
164 {
165 return (fodptr - fodptr->queue->fod);
166 }
167
168
169 /*
170 * Association and Connection IDs:
171 *
172 * Association ID will have random number in upper 6 bytes and zero
173 * in lower 2 bytes
174 *
175 * Connection IDs will be Association ID with QID or'd in lower 2 bytes
176 *
177 * note: Association ID = Connection ID for queue 0
178 */
179 #define BYTES_FOR_QID sizeof(u16)
180 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
181 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
182
183 static inline u64
184 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
185 {
186 return (assoc->association_id | qid);
187 }
188
189 static inline u64
190 nvmet_fc_getassociationid(u64 connectionid)
191 {
192 return connectionid & ~NVMET_FC_QUEUEID_MASK;
193 }
194
195 static inline u16
196 nvmet_fc_getqueueid(u64 connectionid)
197 {
198 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
199 }
200
201 static inline struct nvmet_fc_tgtport *
202 targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
203 {
204 return container_of(targetport, struct nvmet_fc_tgtport,
205 fc_target_port);
206 }
207
208 static inline struct nvmet_fc_fcp_iod *
209 nvmet_req_to_fod(struct nvmet_req *nvme_req)
210 {
211 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
212 }
213
214
215 /* *************************** Globals **************************** */
216
217
218 static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
219
220 static LIST_HEAD(nvmet_fc_target_list);
221 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
222
223
224 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
225 static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
226 static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work);
227 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
228 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
229 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
230 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
231 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
232 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
233 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
234 struct nvmet_fc_fcp_iod *fod);
235
236
237 /* *********************** FC-NVME DMA Handling **************************** */
238
239 /*
240 * The fcloop device passes in a NULL device pointer. Real LLD's will
241 * pass in a valid device pointer. If NULL is passed to the dma mapping
242 * routines, depending on the platform, it may or may not succeed, and
243 * may crash.
244 *
245 * As such:
246 * Wrapper all the dma routines and check the dev pointer.
247 *
248 * If simple mappings (return just a dma address, we'll noop them,
249 * returning a dma address of 0.
250 *
251 * On more complex mappings (dma_map_sg), a pseudo routine fills
252 * in the scatter list, setting all dma addresses to 0.
253 */
254
255 static inline dma_addr_t
256 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
257 enum dma_data_direction dir)
258 {
259 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
260 }
261
262 static inline int
263 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
264 {
265 return dev ? dma_mapping_error(dev, dma_addr) : 0;
266 }
267
268 static inline void
269 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
270 enum dma_data_direction dir)
271 {
272 if (dev)
273 dma_unmap_single(dev, addr, size, dir);
274 }
275
276 static inline void
277 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
278 enum dma_data_direction dir)
279 {
280 if (dev)
281 dma_sync_single_for_cpu(dev, addr, size, dir);
282 }
283
284 static inline void
285 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
286 enum dma_data_direction dir)
287 {
288 if (dev)
289 dma_sync_single_for_device(dev, addr, size, dir);
290 }
291
292 /* pseudo dma_map_sg call */
293 static int
294 fc_map_sg(struct scatterlist *sg, int nents)
295 {
296 struct scatterlist *s;
297 int i;
298
299 WARN_ON(nents == 0 || sg[0].length == 0);
300
301 for_each_sg(sg, s, nents, i) {
302 s->dma_address = 0L;
303 #ifdef CONFIG_NEED_SG_DMA_LENGTH
304 s->dma_length = s->length;
305 #endif
306 }
307 return nents;
308 }
309
310 static inline int
311 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
312 enum dma_data_direction dir)
313 {
314 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
315 }
316
317 static inline void
318 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
319 enum dma_data_direction dir)
320 {
321 if (dev)
322 dma_unmap_sg(dev, sg, nents, dir);
323 }
324
325
326 /* *********************** FC-NVME Port Management ************************ */
327
328
329 static int
330 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
331 {
332 struct nvmet_fc_ls_iod *iod;
333 int i;
334
335 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
336 GFP_KERNEL);
337 if (!iod)
338 return -ENOMEM;
339
340 tgtport->iod = iod;
341
342 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
343 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
344 iod->tgtport = tgtport;
345 list_add_tail(&iod->ls_list, &tgtport->ls_list);
346
347 iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
348 GFP_KERNEL);
349 if (!iod->rqstbuf)
350 goto out_fail;
351
352 iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
353
354 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
355 NVME_FC_MAX_LS_BUFFER_SIZE,
356 DMA_TO_DEVICE);
357 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
358 goto out_fail;
359 }
360
361 return 0;
362
363 out_fail:
364 kfree(iod->rqstbuf);
365 list_del(&iod->ls_list);
366 for (iod--, i--; i >= 0; iod--, i--) {
367 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
368 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
369 kfree(iod->rqstbuf);
370 list_del(&iod->ls_list);
371 }
372
373 kfree(iod);
374
375 return -EFAULT;
376 }
377
378 static void
379 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
380 {
381 struct nvmet_fc_ls_iod *iod = tgtport->iod;
382 int i;
383
384 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
385 fc_dma_unmap_single(tgtport->dev,
386 iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
387 DMA_TO_DEVICE);
388 kfree(iod->rqstbuf);
389 list_del(&iod->ls_list);
390 }
391 kfree(tgtport->iod);
392 }
393
394 static struct nvmet_fc_ls_iod *
395 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
396 {
397 static struct nvmet_fc_ls_iod *iod;
398 unsigned long flags;
399
400 spin_lock_irqsave(&tgtport->lock, flags);
401 iod = list_first_entry_or_null(&tgtport->ls_list,
402 struct nvmet_fc_ls_iod, ls_list);
403 if (iod)
404 list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
405 spin_unlock_irqrestore(&tgtport->lock, flags);
406 return iod;
407 }
408
409
410 static void
411 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
412 struct nvmet_fc_ls_iod *iod)
413 {
414 unsigned long flags;
415
416 spin_lock_irqsave(&tgtport->lock, flags);
417 list_move(&iod->ls_list, &tgtport->ls_list);
418 spin_unlock_irqrestore(&tgtport->lock, flags);
419 }
420
421 static void
422 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
423 struct nvmet_fc_tgt_queue *queue)
424 {
425 struct nvmet_fc_fcp_iod *fod = queue->fod;
426 int i;
427
428 for (i = 0; i < queue->sqsize; fod++, i++) {
429 INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
430 INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work);
431 fod->tgtport = tgtport;
432 fod->queue = queue;
433 fod->active = false;
434 fod->abort = false;
435 fod->aborted = false;
436 fod->fcpreq = NULL;
437 list_add_tail(&fod->fcp_list, &queue->fod_list);
438 spin_lock_init(&fod->flock);
439
440 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
441 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
442 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
443 list_del(&fod->fcp_list);
444 for (fod--, i--; i >= 0; fod--, i--) {
445 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
446 sizeof(fod->rspiubuf),
447 DMA_TO_DEVICE);
448 fod->rspdma = 0L;
449 list_del(&fod->fcp_list);
450 }
451
452 return;
453 }
454 }
455 }
456
457 static void
458 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
459 struct nvmet_fc_tgt_queue *queue)
460 {
461 struct nvmet_fc_fcp_iod *fod = queue->fod;
462 int i;
463
464 for (i = 0; i < queue->sqsize; fod++, i++) {
465 if (fod->rspdma)
466 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
467 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
468 }
469 }
470
471 static struct nvmet_fc_fcp_iod *
472 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
473 {
474 static struct nvmet_fc_fcp_iod *fod;
475
476 lockdep_assert_held(&queue->qlock);
477
478 fod = list_first_entry_or_null(&queue->fod_list,
479 struct nvmet_fc_fcp_iod, fcp_list);
480 if (fod) {
481 list_del(&fod->fcp_list);
482 fod->active = true;
483 /*
484 * no queue reference is taken, as it was taken by the
485 * queue lookup just prior to the allocation. The iod
486 * will "inherit" that reference.
487 */
488 }
489 return fod;
490 }
491
492
493 static void
494 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
495 struct nvmet_fc_tgt_queue *queue,
496 struct nvmefc_tgt_fcp_req *fcpreq)
497 {
498 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
499
500 /*
501 * put all admin cmds on hw queue id 0. All io commands go to
502 * the respective hw queue based on a modulo basis
503 */
504 fcpreq->hwqid = queue->qid ?
505 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
506
507 if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
508 queue_work_on(queue->cpu, queue->work_q, &fod->work);
509 else
510 nvmet_fc_handle_fcp_rqst(tgtport, fod);
511 }
512
513 static void
514 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
515 struct nvmet_fc_fcp_iod *fod)
516 {
517 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
518 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
519 struct nvmet_fc_defer_fcp_req *deferfcp;
520 unsigned long flags;
521
522 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
523 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
524
525 fcpreq->nvmet_fc_private = NULL;
526
527 fod->active = false;
528 fod->abort = false;
529 fod->aborted = false;
530 fod->writedataactive = false;
531 fod->fcpreq = NULL;
532
533 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
534
535 spin_lock_irqsave(&queue->qlock, flags);
536 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
537 struct nvmet_fc_defer_fcp_req, req_list);
538 if (!deferfcp) {
539 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
540 spin_unlock_irqrestore(&queue->qlock, flags);
541
542 /* Release reference taken at queue lookup and fod allocation */
543 nvmet_fc_tgt_q_put(queue);
544 return;
545 }
546
547 /* Re-use the fod for the next pending cmd that was deferred */
548 list_del(&deferfcp->req_list);
549
550 fcpreq = deferfcp->fcp_req;
551
552 /* deferfcp can be reused for another IO at a later date */
553 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
554
555 spin_unlock_irqrestore(&queue->qlock, flags);
556
557 /* Save NVME CMD IO in fod */
558 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
559
560 /* Setup new fcpreq to be processed */
561 fcpreq->rspaddr = NULL;
562 fcpreq->rsplen = 0;
563 fcpreq->nvmet_fc_private = fod;
564 fod->fcpreq = fcpreq;
565 fod->active = true;
566
567 /* inform LLDD IO is now being processed */
568 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
569
570 /* Submit deferred IO for processing */
571 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
572
573 /*
574 * Leave the queue lookup get reference taken when
575 * fod was originally allocated.
576 */
577 }
578
579 static int
580 nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
581 {
582 int cpu, idx, cnt;
583
584 if (tgtport->ops->max_hw_queues == 1)
585 return WORK_CPU_UNBOUND;
586
587 /* Simple cpu selection based on qid modulo active cpu count */
588 idx = !qid ? 0 : (qid - 1) % num_active_cpus();
589
590 /* find the n'th active cpu */
591 for (cpu = 0, cnt = 0; ; ) {
592 if (cpu_active(cpu)) {
593 if (cnt == idx)
594 break;
595 cnt++;
596 }
597 cpu = (cpu + 1) % num_possible_cpus();
598 }
599
600 return cpu;
601 }
602
603 static struct nvmet_fc_tgt_queue *
604 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
605 u16 qid, u16 sqsize)
606 {
607 struct nvmet_fc_tgt_queue *queue;
608 unsigned long flags;
609 int ret;
610
611 if (qid >= NVMET_NR_QUEUES)
612 return NULL;
613
614 queue = kzalloc((sizeof(*queue) +
615 (sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
616 GFP_KERNEL);
617 if (!queue)
618 return NULL;
619
620 if (!nvmet_fc_tgt_a_get(assoc))
621 goto out_free_queue;
622
623 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
624 assoc->tgtport->fc_target_port.port_num,
625 assoc->a_id, qid);
626 if (!queue->work_q)
627 goto out_a_put;
628
629 queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
630 queue->qid = qid;
631 queue->sqsize = sqsize;
632 queue->assoc = assoc;
633 queue->port = assoc->tgtport->port;
634 queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
635 INIT_LIST_HEAD(&queue->fod_list);
636 INIT_LIST_HEAD(&queue->avail_defer_list);
637 INIT_LIST_HEAD(&queue->pending_cmd_list);
638 atomic_set(&queue->connected, 0);
639 atomic_set(&queue->sqtail, 0);
640 atomic_set(&queue->rsn, 1);
641 atomic_set(&queue->zrspcnt, 0);
642 spin_lock_init(&queue->qlock);
643 kref_init(&queue->ref);
644
645 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
646
647 ret = nvmet_sq_init(&queue->nvme_sq);
648 if (ret)
649 goto out_fail_iodlist;
650
651 WARN_ON(assoc->queues[qid]);
652 spin_lock_irqsave(&assoc->tgtport->lock, flags);
653 assoc->queues[qid] = queue;
654 spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
655
656 return queue;
657
658 out_fail_iodlist:
659 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
660 destroy_workqueue(queue->work_q);
661 out_a_put:
662 nvmet_fc_tgt_a_put(assoc);
663 out_free_queue:
664 kfree(queue);
665 return NULL;
666 }
667
668
669 static void
670 nvmet_fc_tgt_queue_free(struct kref *ref)
671 {
672 struct nvmet_fc_tgt_queue *queue =
673 container_of(ref, struct nvmet_fc_tgt_queue, ref);
674 unsigned long flags;
675
676 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
677 queue->assoc->queues[queue->qid] = NULL;
678 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
679
680 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
681
682 nvmet_fc_tgt_a_put(queue->assoc);
683
684 destroy_workqueue(queue->work_q);
685
686 kfree(queue);
687 }
688
689 static void
690 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
691 {
692 kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
693 }
694
695 static int
696 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
697 {
698 return kref_get_unless_zero(&queue->ref);
699 }
700
701
702 static void
703 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
704 {
705 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
706 struct nvmet_fc_fcp_iod *fod = queue->fod;
707 struct nvmet_fc_defer_fcp_req *deferfcp;
708 unsigned long flags;
709 int i, writedataactive;
710 bool disconnect;
711
712 disconnect = atomic_xchg(&queue->connected, 0);
713
714 spin_lock_irqsave(&queue->qlock, flags);
715 /* about outstanding io's */
716 for (i = 0; i < queue->sqsize; fod++, i++) {
717 if (fod->active) {
718 spin_lock(&fod->flock);
719 fod->abort = true;
720 writedataactive = fod->writedataactive;
721 spin_unlock(&fod->flock);
722 /*
723 * only call lldd abort routine if waiting for
724 * writedata. other outstanding ops should finish
725 * on their own.
726 */
727 if (writedataactive) {
728 spin_lock(&fod->flock);
729 fod->aborted = true;
730 spin_unlock(&fod->flock);
731 tgtport->ops->fcp_abort(
732 &tgtport->fc_target_port, fod->fcpreq);
733 }
734 }
735 }
736
737 /* Cleanup defer'ed IOs in queue */
738 list_for_each_entry(deferfcp, &queue->avail_defer_list, req_list) {
739 list_del(&deferfcp->req_list);
740 kfree(deferfcp);
741 }
742
743 for (;;) {
744 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
745 struct nvmet_fc_defer_fcp_req, req_list);
746 if (!deferfcp)
747 break;
748
749 list_del(&deferfcp->req_list);
750 spin_unlock_irqrestore(&queue->qlock, flags);
751
752 tgtport->ops->defer_rcv(&tgtport->fc_target_port,
753 deferfcp->fcp_req);
754
755 tgtport->ops->fcp_abort(&tgtport->fc_target_port,
756 deferfcp->fcp_req);
757
758 tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
759 deferfcp->fcp_req);
760
761 kfree(deferfcp);
762
763 spin_lock_irqsave(&queue->qlock, flags);
764 }
765 spin_unlock_irqrestore(&queue->qlock, flags);
766
767 flush_workqueue(queue->work_q);
768
769 if (disconnect)
770 nvmet_sq_destroy(&queue->nvme_sq);
771
772 nvmet_fc_tgt_q_put(queue);
773 }
774
775 static struct nvmet_fc_tgt_queue *
776 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
777 u64 connection_id)
778 {
779 struct nvmet_fc_tgt_assoc *assoc;
780 struct nvmet_fc_tgt_queue *queue;
781 u64 association_id = nvmet_fc_getassociationid(connection_id);
782 u16 qid = nvmet_fc_getqueueid(connection_id);
783 unsigned long flags;
784
785 spin_lock_irqsave(&tgtport->lock, flags);
786 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
787 if (association_id == assoc->association_id) {
788 queue = assoc->queues[qid];
789 if (queue &&
790 (!atomic_read(&queue->connected) ||
791 !nvmet_fc_tgt_q_get(queue)))
792 queue = NULL;
793 spin_unlock_irqrestore(&tgtport->lock, flags);
794 return queue;
795 }
796 }
797 spin_unlock_irqrestore(&tgtport->lock, flags);
798 return NULL;
799 }
800
801 static struct nvmet_fc_tgt_assoc *
802 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
803 {
804 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
805 unsigned long flags;
806 u64 ran;
807 int idx;
808 bool needrandom = true;
809
810 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
811 if (!assoc)
812 return NULL;
813
814 idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
815 if (idx < 0)
816 goto out_free_assoc;
817
818 if (!nvmet_fc_tgtport_get(tgtport))
819 goto out_ida_put;
820
821 assoc->tgtport = tgtport;
822 assoc->a_id = idx;
823 INIT_LIST_HEAD(&assoc->a_list);
824 kref_init(&assoc->ref);
825
826 while (needrandom) {
827 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
828 ran = ran << BYTES_FOR_QID_SHIFT;
829
830 spin_lock_irqsave(&tgtport->lock, flags);
831 needrandom = false;
832 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
833 if (ran == tmpassoc->association_id) {
834 needrandom = true;
835 break;
836 }
837 if (!needrandom) {
838 assoc->association_id = ran;
839 list_add_tail(&assoc->a_list, &tgtport->assoc_list);
840 }
841 spin_unlock_irqrestore(&tgtport->lock, flags);
842 }
843
844 return assoc;
845
846 out_ida_put:
847 ida_simple_remove(&tgtport->assoc_cnt, idx);
848 out_free_assoc:
849 kfree(assoc);
850 return NULL;
851 }
852
853 static void
854 nvmet_fc_target_assoc_free(struct kref *ref)
855 {
856 struct nvmet_fc_tgt_assoc *assoc =
857 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
858 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
859 unsigned long flags;
860
861 spin_lock_irqsave(&tgtport->lock, flags);
862 list_del(&assoc->a_list);
863 spin_unlock_irqrestore(&tgtport->lock, flags);
864 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
865 kfree(assoc);
866 nvmet_fc_tgtport_put(tgtport);
867 }
868
869 static void
870 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
871 {
872 kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
873 }
874
875 static int
876 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
877 {
878 return kref_get_unless_zero(&assoc->ref);
879 }
880
881 static void
882 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
883 {
884 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
885 struct nvmet_fc_tgt_queue *queue;
886 unsigned long flags;
887 int i;
888
889 spin_lock_irqsave(&tgtport->lock, flags);
890 for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) {
891 queue = assoc->queues[i];
892 if (queue) {
893 if (!nvmet_fc_tgt_q_get(queue))
894 continue;
895 spin_unlock_irqrestore(&tgtport->lock, flags);
896 nvmet_fc_delete_target_queue(queue);
897 nvmet_fc_tgt_q_put(queue);
898 spin_lock_irqsave(&tgtport->lock, flags);
899 }
900 }
901 spin_unlock_irqrestore(&tgtport->lock, flags);
902
903 nvmet_fc_tgt_a_put(assoc);
904 }
905
906 static struct nvmet_fc_tgt_assoc *
907 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
908 u64 association_id)
909 {
910 struct nvmet_fc_tgt_assoc *assoc;
911 struct nvmet_fc_tgt_assoc *ret = NULL;
912 unsigned long flags;
913
914 spin_lock_irqsave(&tgtport->lock, flags);
915 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
916 if (association_id == assoc->association_id) {
917 ret = assoc;
918 nvmet_fc_tgt_a_get(assoc);
919 break;
920 }
921 }
922 spin_unlock_irqrestore(&tgtport->lock, flags);
923
924 return ret;
925 }
926
927
928 /**
929 * nvme_fc_register_targetport - transport entry point called by an
930 * LLDD to register the existence of a local
931 * NVME subystem FC port.
932 * @pinfo: pointer to information about the port to be registered
933 * @template: LLDD entrypoints and operational parameters for the port
934 * @dev: physical hardware device node port corresponds to. Will be
935 * used for DMA mappings
936 * @portptr: pointer to a local port pointer. Upon success, the routine
937 * will allocate a nvme_fc_local_port structure and place its
938 * address in the local port pointer. Upon failure, local port
939 * pointer will be set to NULL.
940 *
941 * Returns:
942 * a completion status. Must be 0 upon success; a negative errno
943 * (ex: -ENXIO) upon failure.
944 */
945 int
946 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
947 struct nvmet_fc_target_template *template,
948 struct device *dev,
949 struct nvmet_fc_target_port **portptr)
950 {
951 struct nvmet_fc_tgtport *newrec;
952 unsigned long flags;
953 int ret, idx;
954
955 if (!template->xmt_ls_rsp || !template->fcp_op ||
956 !template->fcp_abort ||
957 !template->fcp_req_release || !template->targetport_delete ||
958 !template->max_hw_queues || !template->max_sgl_segments ||
959 !template->max_dif_sgl_segments || !template->dma_boundary) {
960 ret = -EINVAL;
961 goto out_regtgt_failed;
962 }
963
964 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
965 GFP_KERNEL);
966 if (!newrec) {
967 ret = -ENOMEM;
968 goto out_regtgt_failed;
969 }
970
971 idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
972 if (idx < 0) {
973 ret = -ENOSPC;
974 goto out_fail_kfree;
975 }
976
977 if (!get_device(dev) && dev) {
978 ret = -ENODEV;
979 goto out_ida_put;
980 }
981
982 newrec->fc_target_port.node_name = pinfo->node_name;
983 newrec->fc_target_port.port_name = pinfo->port_name;
984 newrec->fc_target_port.private = &newrec[1];
985 newrec->fc_target_port.port_id = pinfo->port_id;
986 newrec->fc_target_port.port_num = idx;
987 INIT_LIST_HEAD(&newrec->tgt_list);
988 newrec->dev = dev;
989 newrec->ops = template;
990 spin_lock_init(&newrec->lock);
991 INIT_LIST_HEAD(&newrec->ls_list);
992 INIT_LIST_HEAD(&newrec->ls_busylist);
993 INIT_LIST_HEAD(&newrec->assoc_list);
994 kref_init(&newrec->ref);
995 ida_init(&newrec->assoc_cnt);
996
997 ret = nvmet_fc_alloc_ls_iodlist(newrec);
998 if (ret) {
999 ret = -ENOMEM;
1000 goto out_free_newrec;
1001 }
1002
1003 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1004 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
1005 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1006
1007 *portptr = &newrec->fc_target_port;
1008 return 0;
1009
1010 out_free_newrec:
1011 put_device(dev);
1012 out_ida_put:
1013 ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
1014 out_fail_kfree:
1015 kfree(newrec);
1016 out_regtgt_failed:
1017 *portptr = NULL;
1018 return ret;
1019 }
1020 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
1021
1022
1023 static void
1024 nvmet_fc_free_tgtport(struct kref *ref)
1025 {
1026 struct nvmet_fc_tgtport *tgtport =
1027 container_of(ref, struct nvmet_fc_tgtport, ref);
1028 struct device *dev = tgtport->dev;
1029 unsigned long flags;
1030
1031 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1032 list_del(&tgtport->tgt_list);
1033 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1034
1035 nvmet_fc_free_ls_iodlist(tgtport);
1036
1037 /* let the LLDD know we've finished tearing it down */
1038 tgtport->ops->targetport_delete(&tgtport->fc_target_port);
1039
1040 ida_simple_remove(&nvmet_fc_tgtport_cnt,
1041 tgtport->fc_target_port.port_num);
1042
1043 ida_destroy(&tgtport->assoc_cnt);
1044
1045 kfree(tgtport);
1046
1047 put_device(dev);
1048 }
1049
1050 static void
1051 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
1052 {
1053 kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
1054 }
1055
1056 static int
1057 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
1058 {
1059 return kref_get_unless_zero(&tgtport->ref);
1060 }
1061
1062 static void
1063 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1064 {
1065 struct nvmet_fc_tgt_assoc *assoc, *next;
1066 unsigned long flags;
1067
1068 spin_lock_irqsave(&tgtport->lock, flags);
1069 list_for_each_entry_safe(assoc, next,
1070 &tgtport->assoc_list, a_list) {
1071 if (!nvmet_fc_tgt_a_get(assoc))
1072 continue;
1073 spin_unlock_irqrestore(&tgtport->lock, flags);
1074 nvmet_fc_delete_target_assoc(assoc);
1075 nvmet_fc_tgt_a_put(assoc);
1076 spin_lock_irqsave(&tgtport->lock, flags);
1077 }
1078 spin_unlock_irqrestore(&tgtport->lock, flags);
1079 }
1080
1081 /*
1082 * nvmet layer has called to terminate an association
1083 */
1084 static void
1085 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1086 {
1087 struct nvmet_fc_tgtport *tgtport, *next;
1088 struct nvmet_fc_tgt_assoc *assoc;
1089 struct nvmet_fc_tgt_queue *queue;
1090 unsigned long flags;
1091 bool found_ctrl = false;
1092
1093 /* this is a bit ugly, but don't want to make locks layered */
1094 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1095 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1096 tgt_list) {
1097 if (!nvmet_fc_tgtport_get(tgtport))
1098 continue;
1099 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1100
1101 spin_lock_irqsave(&tgtport->lock, flags);
1102 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1103 queue = assoc->queues[0];
1104 if (queue && queue->nvme_sq.ctrl == ctrl) {
1105 if (nvmet_fc_tgt_a_get(assoc))
1106 found_ctrl = true;
1107 break;
1108 }
1109 }
1110 spin_unlock_irqrestore(&tgtport->lock, flags);
1111
1112 nvmet_fc_tgtport_put(tgtport);
1113
1114 if (found_ctrl) {
1115 nvmet_fc_delete_target_assoc(assoc);
1116 nvmet_fc_tgt_a_put(assoc);
1117 return;
1118 }
1119
1120 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1121 }
1122 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1123 }
1124
1125 /**
1126 * nvme_fc_unregister_targetport - transport entry point called by an
1127 * LLDD to deregister/remove a previously
1128 * registered a local NVME subsystem FC port.
1129 * @tgtport: pointer to the (registered) target port that is to be
1130 * deregistered.
1131 *
1132 * Returns:
1133 * a completion status. Must be 0 upon success; a negative errno
1134 * (ex: -ENXIO) upon failure.
1135 */
1136 int
1137 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1138 {
1139 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1140
1141 /* terminate any outstanding associations */
1142 __nvmet_fc_free_assocs(tgtport);
1143
1144 nvmet_fc_tgtport_put(tgtport);
1145
1146 return 0;
1147 }
1148 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1149
1150
1151 /* *********************** FC-NVME LS Handling **************************** */
1152
1153
1154 static void
1155 nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
1156 {
1157 struct fcnvme_ls_acc_hdr *acc = buf;
1158
1159 acc->w0.ls_cmd = ls_cmd;
1160 acc->desc_list_len = desc_len;
1161 acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1162 acc->rqst.desc_len =
1163 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1164 acc->rqst.w0.ls_cmd = rqst_ls_cmd;
1165 }
1166
1167 static int
1168 nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
1169 u8 reason, u8 explanation, u8 vendor)
1170 {
1171 struct fcnvme_ls_rjt *rjt = buf;
1172
1173 nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
1174 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
1175 ls_cmd);
1176 rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1177 rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1178 rjt->rjt.reason_code = reason;
1179 rjt->rjt.reason_explanation = explanation;
1180 rjt->rjt.vendor = vendor;
1181
1182 return sizeof(struct fcnvme_ls_rjt);
1183 }
1184
1185 /* Validation Error indexes into the string table below */
1186 enum {
1187 VERR_NO_ERROR = 0,
1188 VERR_CR_ASSOC_LEN = 1,
1189 VERR_CR_ASSOC_RQST_LEN = 2,
1190 VERR_CR_ASSOC_CMD = 3,
1191 VERR_CR_ASSOC_CMD_LEN = 4,
1192 VERR_ERSP_RATIO = 5,
1193 VERR_ASSOC_ALLOC_FAIL = 6,
1194 VERR_QUEUE_ALLOC_FAIL = 7,
1195 VERR_CR_CONN_LEN = 8,
1196 VERR_CR_CONN_RQST_LEN = 9,
1197 VERR_ASSOC_ID = 10,
1198 VERR_ASSOC_ID_LEN = 11,
1199 VERR_NO_ASSOC = 12,
1200 VERR_CONN_ID = 13,
1201 VERR_CONN_ID_LEN = 14,
1202 VERR_NO_CONN = 15,
1203 VERR_CR_CONN_CMD = 16,
1204 VERR_CR_CONN_CMD_LEN = 17,
1205 VERR_DISCONN_LEN = 18,
1206 VERR_DISCONN_RQST_LEN = 19,
1207 VERR_DISCONN_CMD = 20,
1208 VERR_DISCONN_CMD_LEN = 21,
1209 VERR_DISCONN_SCOPE = 22,
1210 VERR_RS_LEN = 23,
1211 VERR_RS_RQST_LEN = 24,
1212 VERR_RS_CMD = 25,
1213 VERR_RS_CMD_LEN = 26,
1214 VERR_RS_RCTL = 27,
1215 VERR_RS_RO = 28,
1216 };
1217
1218 static char *validation_errors[] = {
1219 "OK",
1220 "Bad CR_ASSOC Length",
1221 "Bad CR_ASSOC Rqst Length",
1222 "Not CR_ASSOC Cmd",
1223 "Bad CR_ASSOC Cmd Length",
1224 "Bad Ersp Ratio",
1225 "Association Allocation Failed",
1226 "Queue Allocation Failed",
1227 "Bad CR_CONN Length",
1228 "Bad CR_CONN Rqst Length",
1229 "Not Association ID",
1230 "Bad Association ID Length",
1231 "No Association",
1232 "Not Connection ID",
1233 "Bad Connection ID Length",
1234 "No Connection",
1235 "Not CR_CONN Cmd",
1236 "Bad CR_CONN Cmd Length",
1237 "Bad DISCONN Length",
1238 "Bad DISCONN Rqst Length",
1239 "Not DISCONN Cmd",
1240 "Bad DISCONN Cmd Length",
1241 "Bad Disconnect Scope",
1242 "Bad RS Length",
1243 "Bad RS Rqst Length",
1244 "Not RS Cmd",
1245 "Bad RS Cmd Length",
1246 "Bad RS R_CTL",
1247 "Bad RS Relative Offset",
1248 };
1249
1250 static void
1251 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1252 struct nvmet_fc_ls_iod *iod)
1253 {
1254 struct fcnvme_ls_cr_assoc_rqst *rqst =
1255 (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
1256 struct fcnvme_ls_cr_assoc_acc *acc =
1257 (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
1258 struct nvmet_fc_tgt_queue *queue;
1259 int ret = 0;
1260
1261 memset(acc, 0, sizeof(*acc));
1262
1263 /*
1264 * FC-NVME spec changes. There are initiators sending different
1265 * lengths as padding sizes for Create Association Cmd descriptor
1266 * was incorrect.
1267 * Accept anything of "minimum" length. Assume format per 1.15
1268 * spec (with HOSTID reduced to 16 bytes), ignore how long the
1269 * trailing pad length is.
1270 */
1271 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
1272 ret = VERR_CR_ASSOC_LEN;
1273 else if (be32_to_cpu(rqst->desc_list_len) <
1274 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
1275 ret = VERR_CR_ASSOC_RQST_LEN;
1276 else if (rqst->assoc_cmd.desc_tag !=
1277 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1278 ret = VERR_CR_ASSOC_CMD;
1279 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
1280 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
1281 ret = VERR_CR_ASSOC_CMD_LEN;
1282 else if (!rqst->assoc_cmd.ersp_ratio ||
1283 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1284 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1285 ret = VERR_ERSP_RATIO;
1286
1287 else {
1288 /* new association w/ admin queue */
1289 iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
1290 if (!iod->assoc)
1291 ret = VERR_ASSOC_ALLOC_FAIL;
1292 else {
1293 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1294 be16_to_cpu(rqst->assoc_cmd.sqsize));
1295 if (!queue)
1296 ret = VERR_QUEUE_ALLOC_FAIL;
1297 }
1298 }
1299
1300 if (ret) {
1301 dev_err(tgtport->dev,
1302 "Create Association LS failed: %s\n",
1303 validation_errors[ret]);
1304 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1305 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1306 FCNVME_RJT_RC_LOGIC,
1307 FCNVME_RJT_EXP_NONE, 0);
1308 return;
1309 }
1310
1311 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1312 atomic_set(&queue->connected, 1);
1313 queue->sqhd = 0; /* best place to init value */
1314
1315 /* format a response */
1316
1317 iod->lsreq->rsplen = sizeof(*acc);
1318
1319 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1320 fcnvme_lsdesc_len(
1321 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1322 FCNVME_LS_CREATE_ASSOCIATION);
1323 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1324 acc->associd.desc_len =
1325 fcnvme_lsdesc_len(
1326 sizeof(struct fcnvme_lsdesc_assoc_id));
1327 acc->associd.association_id =
1328 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1329 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1330 acc->connectid.desc_len =
1331 fcnvme_lsdesc_len(
1332 sizeof(struct fcnvme_lsdesc_conn_id));
1333 acc->connectid.connection_id = acc->associd.association_id;
1334 }
1335
1336 static void
1337 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1338 struct nvmet_fc_ls_iod *iod)
1339 {
1340 struct fcnvme_ls_cr_conn_rqst *rqst =
1341 (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
1342 struct fcnvme_ls_cr_conn_acc *acc =
1343 (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
1344 struct nvmet_fc_tgt_queue *queue;
1345 int ret = 0;
1346
1347 memset(acc, 0, sizeof(*acc));
1348
1349 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1350 ret = VERR_CR_CONN_LEN;
1351 else if (rqst->desc_list_len !=
1352 fcnvme_lsdesc_len(
1353 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1354 ret = VERR_CR_CONN_RQST_LEN;
1355 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1356 ret = VERR_ASSOC_ID;
1357 else if (rqst->associd.desc_len !=
1358 fcnvme_lsdesc_len(
1359 sizeof(struct fcnvme_lsdesc_assoc_id)))
1360 ret = VERR_ASSOC_ID_LEN;
1361 else if (rqst->connect_cmd.desc_tag !=
1362 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1363 ret = VERR_CR_CONN_CMD;
1364 else if (rqst->connect_cmd.desc_len !=
1365 fcnvme_lsdesc_len(
1366 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1367 ret = VERR_CR_CONN_CMD_LEN;
1368 else if (!rqst->connect_cmd.ersp_ratio ||
1369 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1370 be16_to_cpu(rqst->connect_cmd.sqsize)))
1371 ret = VERR_ERSP_RATIO;
1372
1373 else {
1374 /* new io queue */
1375 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1376 be64_to_cpu(rqst->associd.association_id));
1377 if (!iod->assoc)
1378 ret = VERR_NO_ASSOC;
1379 else {
1380 queue = nvmet_fc_alloc_target_queue(iod->assoc,
1381 be16_to_cpu(rqst->connect_cmd.qid),
1382 be16_to_cpu(rqst->connect_cmd.sqsize));
1383 if (!queue)
1384 ret = VERR_QUEUE_ALLOC_FAIL;
1385
1386 /* release get taken in nvmet_fc_find_target_assoc */
1387 nvmet_fc_tgt_a_put(iod->assoc);
1388 }
1389 }
1390
1391 if (ret) {
1392 dev_err(tgtport->dev,
1393 "Create Connection LS failed: %s\n",
1394 validation_errors[ret]);
1395 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1396 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1397 (ret == VERR_NO_ASSOC) ?
1398 FCNVME_RJT_RC_INV_ASSOC :
1399 FCNVME_RJT_RC_LOGIC,
1400 FCNVME_RJT_EXP_NONE, 0);
1401 return;
1402 }
1403
1404 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1405 atomic_set(&queue->connected, 1);
1406 queue->sqhd = 0; /* best place to init value */
1407
1408 /* format a response */
1409
1410 iod->lsreq->rsplen = sizeof(*acc);
1411
1412 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1413 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1414 FCNVME_LS_CREATE_CONNECTION);
1415 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1416 acc->connectid.desc_len =
1417 fcnvme_lsdesc_len(
1418 sizeof(struct fcnvme_lsdesc_conn_id));
1419 acc->connectid.connection_id =
1420 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1421 be16_to_cpu(rqst->connect_cmd.qid)));
1422 }
1423
1424 static void
1425 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1426 struct nvmet_fc_ls_iod *iod)
1427 {
1428 struct fcnvme_ls_disconnect_rqst *rqst =
1429 (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1430 struct fcnvme_ls_disconnect_acc *acc =
1431 (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
1432 struct nvmet_fc_tgt_queue *queue = NULL;
1433 struct nvmet_fc_tgt_assoc *assoc;
1434 int ret = 0;
1435 bool del_assoc = false;
1436
1437 memset(acc, 0, sizeof(*acc));
1438
1439 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
1440 ret = VERR_DISCONN_LEN;
1441 else if (rqst->desc_list_len !=
1442 fcnvme_lsdesc_len(
1443 sizeof(struct fcnvme_ls_disconnect_rqst)))
1444 ret = VERR_DISCONN_RQST_LEN;
1445 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1446 ret = VERR_ASSOC_ID;
1447 else if (rqst->associd.desc_len !=
1448 fcnvme_lsdesc_len(
1449 sizeof(struct fcnvme_lsdesc_assoc_id)))
1450 ret = VERR_ASSOC_ID_LEN;
1451 else if (rqst->discon_cmd.desc_tag !=
1452 cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
1453 ret = VERR_DISCONN_CMD;
1454 else if (rqst->discon_cmd.desc_len !=
1455 fcnvme_lsdesc_len(
1456 sizeof(struct fcnvme_lsdesc_disconn_cmd)))
1457 ret = VERR_DISCONN_CMD_LEN;
1458 else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
1459 (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
1460 ret = VERR_DISCONN_SCOPE;
1461 else {
1462 /* match an active association */
1463 assoc = nvmet_fc_find_target_assoc(tgtport,
1464 be64_to_cpu(rqst->associd.association_id));
1465 iod->assoc = assoc;
1466 if (assoc) {
1467 if (rqst->discon_cmd.scope ==
1468 FCNVME_DISCONN_CONNECTION) {
1469 queue = nvmet_fc_find_target_queue(tgtport,
1470 be64_to_cpu(
1471 rqst->discon_cmd.id));
1472 if (!queue) {
1473 nvmet_fc_tgt_a_put(assoc);
1474 ret = VERR_NO_CONN;
1475 }
1476 }
1477 } else
1478 ret = VERR_NO_ASSOC;
1479 }
1480
1481 if (ret) {
1482 dev_err(tgtport->dev,
1483 "Disconnect LS failed: %s\n",
1484 validation_errors[ret]);
1485 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1486 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1487 (ret == VERR_NO_ASSOC) ?
1488 FCNVME_RJT_RC_INV_ASSOC :
1489 (ret == VERR_NO_CONN) ?
1490 FCNVME_RJT_RC_INV_CONN :
1491 FCNVME_RJT_RC_LOGIC,
1492 FCNVME_RJT_EXP_NONE, 0);
1493 return;
1494 }
1495
1496 /* format a response */
1497
1498 iod->lsreq->rsplen = sizeof(*acc);
1499
1500 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1501 fcnvme_lsdesc_len(
1502 sizeof(struct fcnvme_ls_disconnect_acc)),
1503 FCNVME_LS_DISCONNECT);
1504
1505
1506 /* are we to delete a Connection ID (queue) */
1507 if (queue) {
1508 int qid = queue->qid;
1509
1510 nvmet_fc_delete_target_queue(queue);
1511
1512 /* release the get taken by find_target_queue */
1513 nvmet_fc_tgt_q_put(queue);
1514
1515 /* tear association down if io queue terminated */
1516 if (!qid)
1517 del_assoc = true;
1518 }
1519
1520 /* release get taken in nvmet_fc_find_target_assoc */
1521 nvmet_fc_tgt_a_put(iod->assoc);
1522
1523 if (del_assoc)
1524 nvmet_fc_delete_target_assoc(iod->assoc);
1525 }
1526
1527
1528 /* *********************** NVME Ctrl Routines **************************** */
1529
1530
1531 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1532
1533 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1534
1535 static void
1536 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
1537 {
1538 struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
1539 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1540
1541 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1542 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1543 nvmet_fc_free_ls_iod(tgtport, iod);
1544 nvmet_fc_tgtport_put(tgtport);
1545 }
1546
1547 static void
1548 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1549 struct nvmet_fc_ls_iod *iod)
1550 {
1551 int ret;
1552
1553 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1554 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1555
1556 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
1557 if (ret)
1558 nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
1559 }
1560
1561 /*
1562 * Actual processing routine for received FC-NVME LS Requests from the LLD
1563 */
1564 static void
1565 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1566 struct nvmet_fc_ls_iod *iod)
1567 {
1568 struct fcnvme_ls_rqst_w0 *w0 =
1569 (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
1570
1571 iod->lsreq->nvmet_fc_private = iod;
1572 iod->lsreq->rspbuf = iod->rspbuf;
1573 iod->lsreq->rspdma = iod->rspdma;
1574 iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
1575 /* Be preventative. handlers will later set to valid length */
1576 iod->lsreq->rsplen = 0;
1577
1578 iod->assoc = NULL;
1579
1580 /*
1581 * handlers:
1582 * parse request input, execute the request, and format the
1583 * LS response
1584 */
1585 switch (w0->ls_cmd) {
1586 case FCNVME_LS_CREATE_ASSOCIATION:
1587 /* Creates Association and initial Admin Queue/Connection */
1588 nvmet_fc_ls_create_association(tgtport, iod);
1589 break;
1590 case FCNVME_LS_CREATE_CONNECTION:
1591 /* Creates an IO Queue/Connection */
1592 nvmet_fc_ls_create_connection(tgtport, iod);
1593 break;
1594 case FCNVME_LS_DISCONNECT:
1595 /* Terminate a Queue/Connection or the Association */
1596 nvmet_fc_ls_disconnect(tgtport, iod);
1597 break;
1598 default:
1599 iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
1600 NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
1601 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1602 }
1603
1604 nvmet_fc_xmt_ls_rsp(tgtport, iod);
1605 }
1606
1607 /*
1608 * Actual processing routine for received FC-NVME LS Requests from the LLD
1609 */
1610 static void
1611 nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1612 {
1613 struct nvmet_fc_ls_iod *iod =
1614 container_of(work, struct nvmet_fc_ls_iod, work);
1615 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1616
1617 nvmet_fc_handle_ls_rqst(tgtport, iod);
1618 }
1619
1620
1621 /**
1622 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1623 * upon the reception of a NVME LS request.
1624 *
1625 * The nvmet-fc layer will copy payload to an internal structure for
1626 * processing. As such, upon completion of the routine, the LLDD may
1627 * immediately free/reuse the LS request buffer passed in the call.
1628 *
1629 * If this routine returns error, the LLDD should abort the exchange.
1630 *
1631 * @tgtport: pointer to the (registered) target port the LS was
1632 * received on.
1633 * @lsreq: pointer to a lsreq request structure to be used to reference
1634 * the exchange corresponding to the LS.
1635 * @lsreqbuf: pointer to the buffer containing the LS Request
1636 * @lsreqbuf_len: length, in bytes, of the received LS request
1637 */
1638 int
1639 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
1640 struct nvmefc_tgt_ls_req *lsreq,
1641 void *lsreqbuf, u32 lsreqbuf_len)
1642 {
1643 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1644 struct nvmet_fc_ls_iod *iod;
1645
1646 if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
1647 return -E2BIG;
1648
1649 if (!nvmet_fc_tgtport_get(tgtport))
1650 return -ESHUTDOWN;
1651
1652 iod = nvmet_fc_alloc_ls_iod(tgtport);
1653 if (!iod) {
1654 nvmet_fc_tgtport_put(tgtport);
1655 return -ENOENT;
1656 }
1657
1658 iod->lsreq = lsreq;
1659 iod->fcpreq = NULL;
1660 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
1661 iod->rqstdatalen = lsreqbuf_len;
1662
1663 schedule_work(&iod->work);
1664
1665 return 0;
1666 }
1667 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
1668
1669
1670 /*
1671 * **********************
1672 * Start of FCP handling
1673 * **********************
1674 */
1675
1676 static int
1677 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1678 {
1679 struct scatterlist *sg;
1680 struct page *page;
1681 unsigned int nent;
1682 u32 page_len, length;
1683 int i = 0;
1684
1685 length = fod->total_length;
1686 nent = DIV_ROUND_UP(length, PAGE_SIZE);
1687 sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
1688 if (!sg)
1689 goto out;
1690
1691 sg_init_table(sg, nent);
1692
1693 while (length) {
1694 page_len = min_t(u32, length, PAGE_SIZE);
1695
1696 page = alloc_page(GFP_KERNEL);
1697 if (!page)
1698 goto out_free_pages;
1699
1700 sg_set_page(&sg[i], page, page_len, 0);
1701 length -= page_len;
1702 i++;
1703 }
1704
1705 fod->data_sg = sg;
1706 fod->data_sg_cnt = nent;
1707 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
1708 ((fod->io_dir == NVMET_FCP_WRITE) ?
1709 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1710 /* note: write from initiator perspective */
1711
1712 return 0;
1713
1714 out_free_pages:
1715 while (i > 0) {
1716 i--;
1717 __free_page(sg_page(&sg[i]));
1718 }
1719 kfree(sg);
1720 fod->data_sg = NULL;
1721 fod->data_sg_cnt = 0;
1722 out:
1723 return NVME_SC_INTERNAL;
1724 }
1725
1726 static void
1727 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1728 {
1729 struct scatterlist *sg;
1730 int count;
1731
1732 if (!fod->data_sg || !fod->data_sg_cnt)
1733 return;
1734
1735 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
1736 ((fod->io_dir == NVMET_FCP_WRITE) ?
1737 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1738 for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
1739 __free_page(sg_page(sg));
1740 kfree(fod->data_sg);
1741 fod->data_sg = NULL;
1742 fod->data_sg_cnt = 0;
1743 }
1744
1745
1746 static bool
1747 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
1748 {
1749 u32 sqtail, used;
1750
1751 /* egad, this is ugly. And sqtail is just a best guess */
1752 sqtail = atomic_read(&q->sqtail) % q->sqsize;
1753
1754 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
1755 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
1756 }
1757
1758 /*
1759 * Prep RSP payload.
1760 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
1761 */
1762 static void
1763 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1764 struct nvmet_fc_fcp_iod *fod)
1765 {
1766 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
1767 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1768 struct nvme_completion *cqe = &ersp->cqe;
1769 u32 *cqewd = (u32 *)cqe;
1770 bool send_ersp = false;
1771 u32 rsn, rspcnt, xfr_length;
1772
1773 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
1774 xfr_length = fod->total_length;
1775 else
1776 xfr_length = fod->offset;
1777
1778 /*
1779 * check to see if we can send a 0's rsp.
1780 * Note: to send a 0's response, the NVME-FC host transport will
1781 * recreate the CQE. The host transport knows: sq id, SQHD (last
1782 * seen in an ersp), and command_id. Thus it will create a
1783 * zero-filled CQE with those known fields filled in. Transport
1784 * must send an ersp for any condition where the cqe won't match
1785 * this.
1786 *
1787 * Here are the FC-NVME mandated cases where we must send an ersp:
1788 * every N responses, where N=ersp_ratio
1789 * force fabric commands to send ersp's (not in FC-NVME but good
1790 * practice)
1791 * normal cmds: any time status is non-zero, or status is zero
1792 * but words 0 or 1 are non-zero.
1793 * the SQ is 90% or more full
1794 * the cmd is a fused command
1795 * transferred data length not equal to cmd iu length
1796 */
1797 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
1798 if (!(rspcnt % fod->queue->ersp_ratio) ||
1799 sqe->opcode == nvme_fabrics_command ||
1800 xfr_length != fod->total_length ||
1801 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
1802 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
1803 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
1804 send_ersp = true;
1805
1806 /* re-set the fields */
1807 fod->fcpreq->rspaddr = ersp;
1808 fod->fcpreq->rspdma = fod->rspdma;
1809
1810 if (!send_ersp) {
1811 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
1812 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
1813 } else {
1814 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
1815 rsn = atomic_inc_return(&fod->queue->rsn);
1816 ersp->rsn = cpu_to_be32(rsn);
1817 ersp->xfrd_len = cpu_to_be32(xfr_length);
1818 fod->fcpreq->rsplen = sizeof(*ersp);
1819 }
1820
1821 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
1822 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1823 }
1824
1825 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
1826
1827 static void
1828 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
1829 struct nvmet_fc_fcp_iod *fod)
1830 {
1831 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1832
1833 /* data no longer needed */
1834 nvmet_fc_free_tgt_pgs(fod);
1835
1836 /*
1837 * if an ABTS was received or we issued the fcp_abort early
1838 * don't call abort routine again.
1839 */
1840 /* no need to take lock - lock was taken earlier to get here */
1841 if (!fod->aborted)
1842 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
1843
1844 nvmet_fc_free_fcp_iod(fod->queue, fod);
1845 }
1846
1847 static void
1848 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1849 struct nvmet_fc_fcp_iod *fod)
1850 {
1851 int ret;
1852
1853 fod->fcpreq->op = NVMET_FCOP_RSP;
1854 fod->fcpreq->timeout = 0;
1855
1856 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1857
1858 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1859 if (ret)
1860 nvmet_fc_abort_op(tgtport, fod);
1861 }
1862
1863 static void
1864 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1865 struct nvmet_fc_fcp_iod *fod, u8 op)
1866 {
1867 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1868 struct scatterlist *sg, *datasg;
1869 unsigned long flags;
1870 u32 tlen, sg_off;
1871 int ret;
1872
1873 fcpreq->op = op;
1874 fcpreq->offset = fod->offset;
1875 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1876 tlen = min_t(u32, (NVMET_FC_MAX_KB_PER_XFR * 1024),
1877 (fod->total_length - fod->offset));
1878 tlen = min_t(u32, tlen, NVME_FC_MAX_SEGMENTS * PAGE_SIZE);
1879 tlen = min_t(u32, tlen, fod->tgtport->ops->max_sgl_segments
1880 * PAGE_SIZE);
1881 fcpreq->transfer_length = tlen;
1882 fcpreq->transferred_length = 0;
1883 fcpreq->fcp_error = 0;
1884 fcpreq->rsplen = 0;
1885
1886 fcpreq->sg_cnt = 0;
1887
1888 datasg = fod->next_sg;
1889 sg_off = fod->next_sg_offset;
1890
1891 for (sg = fcpreq->sg ; tlen; sg++) {
1892 *sg = *datasg;
1893 if (sg_off) {
1894 sg->offset += sg_off;
1895 sg->length -= sg_off;
1896 sg->dma_address += sg_off;
1897 sg_off = 0;
1898 }
1899 if (tlen < sg->length) {
1900 sg->length = tlen;
1901 fod->next_sg = datasg;
1902 fod->next_sg_offset += tlen;
1903 } else if (tlen == sg->length) {
1904 fod->next_sg_offset = 0;
1905 fod->next_sg = sg_next(datasg);
1906 } else {
1907 fod->next_sg_offset = 0;
1908 datasg = sg_next(datasg);
1909 }
1910 tlen -= sg->length;
1911 fcpreq->sg_cnt++;
1912 }
1913
1914 /*
1915 * If the last READDATA request: check if LLDD supports
1916 * combined xfr with response.
1917 */
1918 if ((op == NVMET_FCOP_READDATA) &&
1919 ((fod->offset + fcpreq->transfer_length) == fod->total_length) &&
1920 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
1921 fcpreq->op = NVMET_FCOP_READDATA_RSP;
1922 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1923 }
1924
1925 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1926 if (ret) {
1927 /*
1928 * should be ok to set w/o lock as its in the thread of
1929 * execution (not an async timer routine) and doesn't
1930 * contend with any clearing action
1931 */
1932 fod->abort = true;
1933
1934 if (op == NVMET_FCOP_WRITEDATA) {
1935 spin_lock_irqsave(&fod->flock, flags);
1936 fod->writedataactive = false;
1937 spin_unlock_irqrestore(&fod->flock, flags);
1938 nvmet_req_complete(&fod->req,
1939 NVME_SC_FC_TRANSPORT_ERROR);
1940 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
1941 fcpreq->fcp_error = ret;
1942 fcpreq->transferred_length = 0;
1943 nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
1944 }
1945 }
1946 }
1947
1948 static inline bool
1949 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
1950 {
1951 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1952 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1953
1954 /* if in the middle of an io and we need to tear down */
1955 if (abort) {
1956 if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
1957 nvmet_req_complete(&fod->req,
1958 NVME_SC_FC_TRANSPORT_ERROR);
1959 return true;
1960 }
1961
1962 nvmet_fc_abort_op(tgtport, fod);
1963 return true;
1964 }
1965
1966 return false;
1967 }
1968
1969 /*
1970 * actual done handler for FCP operations when completed by the lldd
1971 */
1972 static void
1973 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
1974 {
1975 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1976 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1977 unsigned long flags;
1978 bool abort;
1979
1980 spin_lock_irqsave(&fod->flock, flags);
1981 abort = fod->abort;
1982 fod->writedataactive = false;
1983 spin_unlock_irqrestore(&fod->flock, flags);
1984
1985 switch (fcpreq->op) {
1986
1987 case NVMET_FCOP_WRITEDATA:
1988 if (__nvmet_fc_fod_op_abort(fod, abort))
1989 return;
1990 if (fcpreq->fcp_error ||
1991 fcpreq->transferred_length != fcpreq->transfer_length) {
1992 spin_lock(&fod->flock);
1993 fod->abort = true;
1994 spin_unlock(&fod->flock);
1995
1996 nvmet_req_complete(&fod->req,
1997 NVME_SC_FC_TRANSPORT_ERROR);
1998 return;
1999 }
2000
2001 fod->offset += fcpreq->transferred_length;
2002 if (fod->offset != fod->total_length) {
2003 spin_lock_irqsave(&fod->flock, flags);
2004 fod->writedataactive = true;
2005 spin_unlock_irqrestore(&fod->flock, flags);
2006
2007 /* transfer the next chunk */
2008 nvmet_fc_transfer_fcp_data(tgtport, fod,
2009 NVMET_FCOP_WRITEDATA);
2010 return;
2011 }
2012
2013 /* data transfer complete, resume with nvmet layer */
2014
2015 fod->req.execute(&fod->req);
2016
2017 break;
2018
2019 case NVMET_FCOP_READDATA:
2020 case NVMET_FCOP_READDATA_RSP:
2021 if (__nvmet_fc_fod_op_abort(fod, abort))
2022 return;
2023 if (fcpreq->fcp_error ||
2024 fcpreq->transferred_length != fcpreq->transfer_length) {
2025 nvmet_fc_abort_op(tgtport, fod);
2026 return;
2027 }
2028
2029 /* success */
2030
2031 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
2032 /* data no longer needed */
2033 nvmet_fc_free_tgt_pgs(fod);
2034 nvmet_fc_free_fcp_iod(fod->queue, fod);
2035 return;
2036 }
2037
2038 fod->offset += fcpreq->transferred_length;
2039 if (fod->offset != fod->total_length) {
2040 /* transfer the next chunk */
2041 nvmet_fc_transfer_fcp_data(tgtport, fod,
2042 NVMET_FCOP_READDATA);
2043 return;
2044 }
2045
2046 /* data transfer complete, send response */
2047
2048 /* data no longer needed */
2049 nvmet_fc_free_tgt_pgs(fod);
2050
2051 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2052
2053 break;
2054
2055 case NVMET_FCOP_RSP:
2056 if (__nvmet_fc_fod_op_abort(fod, abort))
2057 return;
2058 nvmet_fc_free_fcp_iod(fod->queue, fod);
2059 break;
2060
2061 default:
2062 break;
2063 }
2064 }
2065
2066 static void
2067 nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work)
2068 {
2069 struct nvmet_fc_fcp_iod *fod =
2070 container_of(work, struct nvmet_fc_fcp_iod, done_work);
2071
2072 nvmet_fc_fod_op_done(fod);
2073 }
2074
2075 static void
2076 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
2077 {
2078 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2079 struct nvmet_fc_tgt_queue *queue = fod->queue;
2080
2081 if (fod->tgtport->ops->target_features & NVMET_FCTGTFEAT_OPDONE_IN_ISR)
2082 /* context switch so completion is not in ISR context */
2083 queue_work_on(queue->cpu, queue->work_q, &fod->done_work);
2084 else
2085 nvmet_fc_fod_op_done(fod);
2086 }
2087
2088 /*
2089 * actual completion handler after execution by the nvmet layer
2090 */
2091 static void
2092 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
2093 struct nvmet_fc_fcp_iod *fod, int status)
2094 {
2095 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2096 struct nvme_completion *cqe = &fod->rspiubuf.cqe;
2097 unsigned long flags;
2098 bool abort;
2099
2100 spin_lock_irqsave(&fod->flock, flags);
2101 abort = fod->abort;
2102 spin_unlock_irqrestore(&fod->flock, flags);
2103
2104 /* if we have a CQE, snoop the last sq_head value */
2105 if (!status)
2106 fod->queue->sqhd = cqe->sq_head;
2107
2108 if (abort) {
2109 nvmet_fc_abort_op(tgtport, fod);
2110 return;
2111 }
2112
2113 /* if an error handling the cmd post initial parsing */
2114 if (status) {
2115 /* fudge up a failed CQE status for our transport error */
2116 memset(cqe, 0, sizeof(*cqe));
2117 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
2118 cqe->sq_id = cpu_to_le16(fod->queue->qid);
2119 cqe->command_id = sqe->command_id;
2120 cqe->status = cpu_to_le16(status);
2121 } else {
2122
2123 /*
2124 * try to push the data even if the SQE status is non-zero.
2125 * There may be a status where data still was intended to
2126 * be moved
2127 */
2128 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2129 /* push the data over before sending rsp */
2130 nvmet_fc_transfer_fcp_data(tgtport, fod,
2131 NVMET_FCOP_READDATA);
2132 return;
2133 }
2134
2135 /* writes & no data - fall thru */
2136 }
2137
2138 /* data no longer needed */
2139 nvmet_fc_free_tgt_pgs(fod);
2140
2141 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2142 }
2143
2144
2145 static void
2146 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2147 {
2148 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2149 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2150
2151 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2152 }
2153
2154
2155 /*
2156 * Actual processing routine for received FC-NVME LS Requests from the LLD
2157 */
2158 static void
2159 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2160 struct nvmet_fc_fcp_iod *fod)
2161 {
2162 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2163 int ret;
2164
2165 /*
2166 * Fused commands are currently not supported in the linux
2167 * implementation.
2168 *
2169 * As such, the implementation of the FC transport does not
2170 * look at the fused commands and order delivery to the upper
2171 * layer until we have both based on csn.
2172 */
2173
2174 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2175
2176 fod->total_length = be32_to_cpu(cmdiu->data_len);
2177 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2178 fod->io_dir = NVMET_FCP_WRITE;
2179 if (!nvme_is_write(&cmdiu->sqe))
2180 goto transport_error;
2181 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2182 fod->io_dir = NVMET_FCP_READ;
2183 if (nvme_is_write(&cmdiu->sqe))
2184 goto transport_error;
2185 } else {
2186 fod->io_dir = NVMET_FCP_NODATA;
2187 if (fod->total_length)
2188 goto transport_error;
2189 }
2190
2191 fod->req.cmd = &fod->cmdiubuf.sqe;
2192 fod->req.rsp = &fod->rspiubuf.cqe;
2193 fod->req.port = fod->queue->port;
2194
2195 /* ensure nvmet handlers will set cmd handler callback */
2196 fod->req.execute = NULL;
2197
2198 /* clear any response payload */
2199 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2200
2201 fod->data_sg = NULL;
2202 fod->data_sg_cnt = 0;
2203
2204 ret = nvmet_req_init(&fod->req,
2205 &fod->queue->nvme_cq,
2206 &fod->queue->nvme_sq,
2207 &nvmet_fc_tgt_fcp_ops);
2208 if (!ret) {
2209 /* bad SQE content or invalid ctrl state */
2210 /* nvmet layer has already called op done to send rsp. */
2211 return;
2212 }
2213
2214 /* keep a running counter of tail position */
2215 atomic_inc(&fod->queue->sqtail);
2216
2217 if (fod->total_length) {
2218 ret = nvmet_fc_alloc_tgt_pgs(fod);
2219 if (ret) {
2220 nvmet_req_complete(&fod->req, ret);
2221 return;
2222 }
2223 }
2224 fod->req.sg = fod->data_sg;
2225 fod->req.sg_cnt = fod->data_sg_cnt;
2226 fod->offset = 0;
2227 fod->next_sg = fod->data_sg;
2228 fod->next_sg_offset = 0;
2229
2230 if (fod->io_dir == NVMET_FCP_WRITE) {
2231 /* pull the data over before invoking nvmet layer */
2232 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2233 return;
2234 }
2235
2236 /*
2237 * Reads or no data:
2238 *
2239 * can invoke the nvmet_layer now. If read data, cmd completion will
2240 * push the data
2241 */
2242
2243 fod->req.execute(&fod->req);
2244
2245 return;
2246
2247 transport_error:
2248 nvmet_fc_abort_op(tgtport, fod);
2249 }
2250
2251 /*
2252 * Actual processing routine for received FC-NVME LS Requests from the LLD
2253 */
2254 static void
2255 nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
2256 {
2257 struct nvmet_fc_fcp_iod *fod =
2258 container_of(work, struct nvmet_fc_fcp_iod, work);
2259 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2260
2261 nvmet_fc_handle_fcp_rqst(tgtport, fod);
2262 }
2263
2264 /**
2265 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2266 * upon the reception of a NVME FCP CMD IU.
2267 *
2268 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2269 * layer for processing.
2270 *
2271 * The nvmet_fc layer allocates a local job structure (struct
2272 * nvmet_fc_fcp_iod) from the queue for the io and copies the
2273 * CMD IU buffer to the job structure. As such, on a successful
2274 * completion (returns 0), the LLDD may immediately free/reuse
2275 * the CMD IU buffer passed in the call.
2276 *
2277 * However, in some circumstances, due to the packetized nature of FC
2278 * and the api of the FC LLDD which may issue a hw command to send the
2279 * response, but the LLDD may not get the hw completion for that command
2280 * and upcall the nvmet_fc layer before a new command may be
2281 * asynchronously received - its possible for a command to be received
2282 * before the LLDD and nvmet_fc have recycled the job structure. It gives
2283 * the appearance of more commands received than fits in the sq.
2284 * To alleviate this scenario, a temporary queue is maintained in the
2285 * transport for pending LLDD requests waiting for a queue job structure.
2286 * In these "overrun" cases, a temporary queue element is allocated
2287 * the LLDD request and CMD iu buffer information remembered, and the
2288 * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2289 * structure is freed, it is immediately reallocated for anything on the
2290 * pending request list. The LLDDs defer_rcv() callback is called,
2291 * informing the LLDD that it may reuse the CMD IU buffer, and the io
2292 * is then started normally with the transport.
2293 *
2294 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
2295 * the completion as successful but must not reuse the CMD IU buffer
2296 * until the LLDD's defer_rcv() callback has been called for the
2297 * corresponding struct nvmefc_tgt_fcp_req pointer.
2298 *
2299 * If there is any other condition in which an error occurs, the
2300 * transport will return a non-zero status indicating the error.
2301 * In all cases other than -EOVERFLOW, the transport has not accepted the
2302 * request and the LLDD should abort the exchange.
2303 *
2304 * @target_port: pointer to the (registered) target port the FCP CMD IU
2305 * was received on.
2306 * @fcpreq: pointer to a fcpreq request structure to be used to reference
2307 * the exchange corresponding to the FCP Exchange.
2308 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
2309 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2310 */
2311 int
2312 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2313 struct nvmefc_tgt_fcp_req *fcpreq,
2314 void *cmdiubuf, u32 cmdiubuf_len)
2315 {
2316 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2317 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2318 struct nvmet_fc_tgt_queue *queue;
2319 struct nvmet_fc_fcp_iod *fod;
2320 struct nvmet_fc_defer_fcp_req *deferfcp;
2321 unsigned long flags;
2322
2323 /* validate iu, so the connection id can be used to find the queue */
2324 if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2325 (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
2326 (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2327 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2328 return -EIO;
2329
2330 queue = nvmet_fc_find_target_queue(tgtport,
2331 be64_to_cpu(cmdiu->connection_id));
2332 if (!queue)
2333 return -ENOTCONN;
2334
2335 /*
2336 * note: reference taken by find_target_queue
2337 * After successful fod allocation, the fod will inherit the
2338 * ownership of that reference and will remove the reference
2339 * when the fod is freed.
2340 */
2341
2342 spin_lock_irqsave(&queue->qlock, flags);
2343
2344 fod = nvmet_fc_alloc_fcp_iod(queue);
2345 if (fod) {
2346 spin_unlock_irqrestore(&queue->qlock, flags);
2347
2348 fcpreq->nvmet_fc_private = fod;
2349 fod->fcpreq = fcpreq;
2350
2351 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2352
2353 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2354
2355 return 0;
2356 }
2357
2358 if (!tgtport->ops->defer_rcv) {
2359 spin_unlock_irqrestore(&queue->qlock, flags);
2360 /* release the queue lookup reference */
2361 nvmet_fc_tgt_q_put(queue);
2362 return -ENOENT;
2363 }
2364
2365 deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
2366 struct nvmet_fc_defer_fcp_req, req_list);
2367 if (deferfcp) {
2368 /* Just re-use one that was previously allocated */
2369 list_del(&deferfcp->req_list);
2370 } else {
2371 spin_unlock_irqrestore(&queue->qlock, flags);
2372
2373 /* Now we need to dynamically allocate one */
2374 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
2375 if (!deferfcp) {
2376 /* release the queue lookup reference */
2377 nvmet_fc_tgt_q_put(queue);
2378 return -ENOMEM;
2379 }
2380 spin_lock_irqsave(&queue->qlock, flags);
2381 }
2382
2383 /* For now, use rspaddr / rsplen to save payload information */
2384 fcpreq->rspaddr = cmdiubuf;
2385 fcpreq->rsplen = cmdiubuf_len;
2386 deferfcp->fcp_req = fcpreq;
2387
2388 /* defer processing till a fod becomes available */
2389 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
2390
2391 /* NOTE: the queue lookup reference is still valid */
2392
2393 spin_unlock_irqrestore(&queue->qlock, flags);
2394
2395 return -EOVERFLOW;
2396 }
2397 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2398
2399 /**
2400 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2401 * upon the reception of an ABTS for a FCP command
2402 *
2403 * Notify the transport that an ABTS has been received for a FCP command
2404 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
2405 * LLDD believes the command is still being worked on
2406 * (template_ops->fcp_req_release() has not been called).
2407 *
2408 * The transport will wait for any outstanding work (an op to the LLDD,
2409 * which the lldd should complete with error due to the ABTS; or the
2410 * completion from the nvmet layer of the nvme command), then will
2411 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
2412 * return the i/o context to the LLDD. The LLDD may send the BA_ACC
2413 * to the ABTS either after return from this function (assuming any
2414 * outstanding op work has been terminated) or upon the callback being
2415 * called.
2416 *
2417 * @target_port: pointer to the (registered) target port the FCP CMD IU
2418 * was received on.
2419 * @fcpreq: pointer to the fcpreq request structure that corresponds
2420 * to the exchange that received the ABTS.
2421 */
2422 void
2423 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2424 struct nvmefc_tgt_fcp_req *fcpreq)
2425 {
2426 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2427 struct nvmet_fc_tgt_queue *queue;
2428 unsigned long flags;
2429
2430 if (!fod || fod->fcpreq != fcpreq)
2431 /* job appears to have already completed, ignore abort */
2432 return;
2433
2434 queue = fod->queue;
2435
2436 spin_lock_irqsave(&queue->qlock, flags);
2437 if (fod->active) {
2438 /*
2439 * mark as abort. The abort handler, invoked upon completion
2440 * of any work, will detect the aborted status and do the
2441 * callback.
2442 */
2443 spin_lock(&fod->flock);
2444 fod->abort = true;
2445 fod->aborted = true;
2446 spin_unlock(&fod->flock);
2447 }
2448 spin_unlock_irqrestore(&queue->qlock, flags);
2449 }
2450 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2451
2452
2453 struct nvmet_fc_traddr {
2454 u64 nn;
2455 u64 pn;
2456 };
2457
2458 static int
2459 __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
2460 {
2461 u64 token64;
2462
2463 if (match_u64(sstr, &token64))
2464 return -EINVAL;
2465 *val = token64;
2466
2467 return 0;
2468 }
2469
2470 /*
2471 * This routine validates and extracts the WWN's from the TRADDR string.
2472 * As kernel parsers need the 0x to determine number base, universally
2473 * build string to parse with 0x prefix before parsing name strings.
2474 */
2475 static int
2476 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2477 {
2478 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2479 substring_t wwn = { name, &name[sizeof(name)-1] };
2480 int nnoffset, pnoffset;
2481
2482 /* validate it string one of the 2 allowed formats */
2483 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2484 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2485 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2486 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2487 nnoffset = NVME_FC_TRADDR_OXNNLEN;
2488 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2489 NVME_FC_TRADDR_OXNNLEN;
2490 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2491 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2492 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2493 "pn-", NVME_FC_TRADDR_NNLEN))) {
2494 nnoffset = NVME_FC_TRADDR_NNLEN;
2495 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2496 } else
2497 goto out_einval;
2498
2499 name[0] = '0';
2500 name[1] = 'x';
2501 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2502
2503 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2504 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2505 goto out_einval;
2506
2507 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2508 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2509 goto out_einval;
2510
2511 return 0;
2512
2513 out_einval:
2514 pr_warn("%s: bad traddr string\n", __func__);
2515 return -EINVAL;
2516 }
2517
2518 static int
2519 nvmet_fc_add_port(struct nvmet_port *port)
2520 {
2521 struct nvmet_fc_tgtport *tgtport;
2522 struct nvmet_fc_traddr traddr = { 0L, 0L };
2523 unsigned long flags;
2524 int ret;
2525
2526 /* validate the address info */
2527 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2528 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2529 return -EINVAL;
2530
2531 /* map the traddr address info to a target port */
2532
2533 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
2534 sizeof(port->disc_addr.traddr));
2535 if (ret)
2536 return ret;
2537
2538 ret = -ENXIO;
2539 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2540 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2541 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2542 (tgtport->fc_target_port.port_name == traddr.pn)) {
2543 /* a FC port can only be 1 nvmet port id */
2544 if (!tgtport->port) {
2545 tgtport->port = port;
2546 port->priv = tgtport;
2547 nvmet_fc_tgtport_get(tgtport);
2548 ret = 0;
2549 } else
2550 ret = -EALREADY;
2551 break;
2552 }
2553 }
2554 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2555 return ret;
2556 }
2557
2558 static void
2559 nvmet_fc_remove_port(struct nvmet_port *port)
2560 {
2561 struct nvmet_fc_tgtport *tgtport = port->priv;
2562 unsigned long flags;
2563
2564 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2565 if (tgtport->port == port) {
2566 nvmet_fc_tgtport_put(tgtport);
2567 tgtport->port = NULL;
2568 }
2569 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2570 }
2571
2572 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2573 .owner = THIS_MODULE,
2574 .type = NVMF_TRTYPE_FC,
2575 .msdbd = 1,
2576 .add_port = nvmet_fc_add_port,
2577 .remove_port = nvmet_fc_remove_port,
2578 .queue_response = nvmet_fc_fcp_nvme_cmd_done,
2579 .delete_ctrl = nvmet_fc_delete_ctrl,
2580 };
2581
2582 static int __init nvmet_fc_init_module(void)
2583 {
2584 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2585 }
2586
2587 static void __exit nvmet_fc_exit_module(void)
2588 {
2589 /* sanity check - all lports should be removed */
2590 if (!list_empty(&nvmet_fc_target_list))
2591 pr_warn("%s: targetport list not empty\n", __func__);
2592
2593 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2594
2595 ida_destroy(&nvmet_fc_tgtport_cnt);
2596 }
2597
2598 module_init(nvmet_fc_init_module);
2599 module_exit(nvmet_fc_exit_module);
2600
2601 MODULE_LICENSE("GPL v2");