]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/nvme/target/fc.c
Merge tag 'mmc-v4.15-2' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
[mirror_ubuntu-bionic-kernel.git] / drivers / nvme / target / fc.c
1 /*
2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
15 *
16 */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/blk-mq.h>
21 #include <linux/parser.h>
22 #include <linux/random.h>
23 #include <uapi/scsi/fc/fc_fs.h>
24 #include <uapi/scsi/fc/fc_els.h>
25
26 #include "nvmet.h"
27 #include <linux/nvme-fc-driver.h>
28 #include <linux/nvme-fc.h>
29
30
31 /* *************************** Data Structures/Defines ****************** */
32
33
34 #define NVMET_LS_CTX_COUNT 4
35
36 /* for this implementation, assume small single frame rqst/rsp */
37 #define NVME_FC_MAX_LS_BUFFER_SIZE 2048
38
39 struct nvmet_fc_tgtport;
40 struct nvmet_fc_tgt_assoc;
41
42 struct nvmet_fc_ls_iod {
43 struct nvmefc_tgt_ls_req *lsreq;
44 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
45
46 struct list_head ls_list; /* tgtport->ls_list */
47
48 struct nvmet_fc_tgtport *tgtport;
49 struct nvmet_fc_tgt_assoc *assoc;
50
51 u8 *rqstbuf;
52 u8 *rspbuf;
53 u16 rqstdatalen;
54 dma_addr_t rspdma;
55
56 struct scatterlist sg[2];
57
58 struct work_struct work;
59 } __aligned(sizeof(unsigned long long));
60
61 #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
62 #define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
63
64 enum nvmet_fcp_datadir {
65 NVMET_FCP_NODATA,
66 NVMET_FCP_WRITE,
67 NVMET_FCP_READ,
68 NVMET_FCP_ABORTED,
69 };
70
71 struct nvmet_fc_fcp_iod {
72 struct nvmefc_tgt_fcp_req *fcpreq;
73
74 struct nvme_fc_cmd_iu cmdiubuf;
75 struct nvme_fc_ersp_iu rspiubuf;
76 dma_addr_t rspdma;
77 struct scatterlist *data_sg;
78 int data_sg_cnt;
79 u32 offset;
80 enum nvmet_fcp_datadir io_dir;
81 bool active;
82 bool abort;
83 bool aborted;
84 bool writedataactive;
85 spinlock_t flock;
86
87 struct nvmet_req req;
88 struct work_struct work;
89 struct work_struct done_work;
90
91 struct nvmet_fc_tgtport *tgtport;
92 struct nvmet_fc_tgt_queue *queue;
93
94 struct list_head fcp_list; /* tgtport->fcp_list */
95 };
96
97 struct nvmet_fc_tgtport {
98
99 struct nvmet_fc_target_port fc_target_port;
100
101 struct list_head tgt_list; /* nvmet_fc_target_list */
102 struct device *dev; /* dev for dma mapping */
103 struct nvmet_fc_target_template *ops;
104
105 struct nvmet_fc_ls_iod *iod;
106 spinlock_t lock;
107 struct list_head ls_list;
108 struct list_head ls_busylist;
109 struct list_head assoc_list;
110 struct ida assoc_cnt;
111 struct nvmet_port *port;
112 struct kref ref;
113 u32 max_sg_cnt;
114 };
115
116 struct nvmet_fc_defer_fcp_req {
117 struct list_head req_list;
118 struct nvmefc_tgt_fcp_req *fcp_req;
119 };
120
121 struct nvmet_fc_tgt_queue {
122 bool ninetypercent;
123 u16 qid;
124 u16 sqsize;
125 u16 ersp_ratio;
126 __le16 sqhd;
127 int cpu;
128 atomic_t connected;
129 atomic_t sqtail;
130 atomic_t zrspcnt;
131 atomic_t rsn;
132 spinlock_t qlock;
133 struct nvmet_port *port;
134 struct nvmet_cq nvme_cq;
135 struct nvmet_sq nvme_sq;
136 struct nvmet_fc_tgt_assoc *assoc;
137 struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
138 struct list_head fod_list;
139 struct list_head pending_cmd_list;
140 struct list_head avail_defer_list;
141 struct workqueue_struct *work_q;
142 struct kref ref;
143 } __aligned(sizeof(unsigned long long));
144
145 struct nvmet_fc_tgt_assoc {
146 u64 association_id;
147 u32 a_id;
148 struct nvmet_fc_tgtport *tgtport;
149 struct list_head a_list;
150 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
151 struct kref ref;
152 struct work_struct del_work;
153 };
154
155
156 static inline int
157 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
158 {
159 return (iodptr - iodptr->tgtport->iod);
160 }
161
162 static inline int
163 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
164 {
165 return (fodptr - fodptr->queue->fod);
166 }
167
168
169 /*
170 * Association and Connection IDs:
171 *
172 * Association ID will have random number in upper 6 bytes and zero
173 * in lower 2 bytes
174 *
175 * Connection IDs will be Association ID with QID or'd in lower 2 bytes
176 *
177 * note: Association ID = Connection ID for queue 0
178 */
179 #define BYTES_FOR_QID sizeof(u16)
180 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
181 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
182
183 static inline u64
184 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
185 {
186 return (assoc->association_id | qid);
187 }
188
189 static inline u64
190 nvmet_fc_getassociationid(u64 connectionid)
191 {
192 return connectionid & ~NVMET_FC_QUEUEID_MASK;
193 }
194
195 static inline u16
196 nvmet_fc_getqueueid(u64 connectionid)
197 {
198 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
199 }
200
201 static inline struct nvmet_fc_tgtport *
202 targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
203 {
204 return container_of(targetport, struct nvmet_fc_tgtport,
205 fc_target_port);
206 }
207
208 static inline struct nvmet_fc_fcp_iod *
209 nvmet_req_to_fod(struct nvmet_req *nvme_req)
210 {
211 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
212 }
213
214
215 /* *************************** Globals **************************** */
216
217
218 static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
219
220 static LIST_HEAD(nvmet_fc_target_list);
221 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
222
223
224 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
225 static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
226 static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work);
227 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
228 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
229 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
230 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
231 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
232 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
233 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
234 struct nvmet_fc_fcp_iod *fod);
235 static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
236
237
238 /* *********************** FC-NVME DMA Handling **************************** */
239
240 /*
241 * The fcloop device passes in a NULL device pointer. Real LLD's will
242 * pass in a valid device pointer. If NULL is passed to the dma mapping
243 * routines, depending on the platform, it may or may not succeed, and
244 * may crash.
245 *
246 * As such:
247 * Wrapper all the dma routines and check the dev pointer.
248 *
249 * If simple mappings (return just a dma address, we'll noop them,
250 * returning a dma address of 0.
251 *
252 * On more complex mappings (dma_map_sg), a pseudo routine fills
253 * in the scatter list, setting all dma addresses to 0.
254 */
255
256 static inline dma_addr_t
257 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
258 enum dma_data_direction dir)
259 {
260 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
261 }
262
263 static inline int
264 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
265 {
266 return dev ? dma_mapping_error(dev, dma_addr) : 0;
267 }
268
269 static inline void
270 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
271 enum dma_data_direction dir)
272 {
273 if (dev)
274 dma_unmap_single(dev, addr, size, dir);
275 }
276
277 static inline void
278 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
279 enum dma_data_direction dir)
280 {
281 if (dev)
282 dma_sync_single_for_cpu(dev, addr, size, dir);
283 }
284
285 static inline void
286 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
287 enum dma_data_direction dir)
288 {
289 if (dev)
290 dma_sync_single_for_device(dev, addr, size, dir);
291 }
292
293 /* pseudo dma_map_sg call */
294 static int
295 fc_map_sg(struct scatterlist *sg, int nents)
296 {
297 struct scatterlist *s;
298 int i;
299
300 WARN_ON(nents == 0 || sg[0].length == 0);
301
302 for_each_sg(sg, s, nents, i) {
303 s->dma_address = 0L;
304 #ifdef CONFIG_NEED_SG_DMA_LENGTH
305 s->dma_length = s->length;
306 #endif
307 }
308 return nents;
309 }
310
311 static inline int
312 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
313 enum dma_data_direction dir)
314 {
315 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
316 }
317
318 static inline void
319 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
320 enum dma_data_direction dir)
321 {
322 if (dev)
323 dma_unmap_sg(dev, sg, nents, dir);
324 }
325
326
327 /* *********************** FC-NVME Port Management ************************ */
328
329
330 static int
331 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
332 {
333 struct nvmet_fc_ls_iod *iod;
334 int i;
335
336 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
337 GFP_KERNEL);
338 if (!iod)
339 return -ENOMEM;
340
341 tgtport->iod = iod;
342
343 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
344 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
345 iod->tgtport = tgtport;
346 list_add_tail(&iod->ls_list, &tgtport->ls_list);
347
348 iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
349 GFP_KERNEL);
350 if (!iod->rqstbuf)
351 goto out_fail;
352
353 iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
354
355 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
356 NVME_FC_MAX_LS_BUFFER_SIZE,
357 DMA_TO_DEVICE);
358 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
359 goto out_fail;
360 }
361
362 return 0;
363
364 out_fail:
365 kfree(iod->rqstbuf);
366 list_del(&iod->ls_list);
367 for (iod--, i--; i >= 0; iod--, i--) {
368 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
369 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
370 kfree(iod->rqstbuf);
371 list_del(&iod->ls_list);
372 }
373
374 kfree(iod);
375
376 return -EFAULT;
377 }
378
379 static void
380 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
381 {
382 struct nvmet_fc_ls_iod *iod = tgtport->iod;
383 int i;
384
385 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
386 fc_dma_unmap_single(tgtport->dev,
387 iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
388 DMA_TO_DEVICE);
389 kfree(iod->rqstbuf);
390 list_del(&iod->ls_list);
391 }
392 kfree(tgtport->iod);
393 }
394
395 static struct nvmet_fc_ls_iod *
396 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
397 {
398 struct nvmet_fc_ls_iod *iod;
399 unsigned long flags;
400
401 spin_lock_irqsave(&tgtport->lock, flags);
402 iod = list_first_entry_or_null(&tgtport->ls_list,
403 struct nvmet_fc_ls_iod, ls_list);
404 if (iod)
405 list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
406 spin_unlock_irqrestore(&tgtport->lock, flags);
407 return iod;
408 }
409
410
411 static void
412 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
413 struct nvmet_fc_ls_iod *iod)
414 {
415 unsigned long flags;
416
417 spin_lock_irqsave(&tgtport->lock, flags);
418 list_move(&iod->ls_list, &tgtport->ls_list);
419 spin_unlock_irqrestore(&tgtport->lock, flags);
420 }
421
422 static void
423 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
424 struct nvmet_fc_tgt_queue *queue)
425 {
426 struct nvmet_fc_fcp_iod *fod = queue->fod;
427 int i;
428
429 for (i = 0; i < queue->sqsize; fod++, i++) {
430 INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
431 INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work);
432 fod->tgtport = tgtport;
433 fod->queue = queue;
434 fod->active = false;
435 fod->abort = false;
436 fod->aborted = false;
437 fod->fcpreq = NULL;
438 list_add_tail(&fod->fcp_list, &queue->fod_list);
439 spin_lock_init(&fod->flock);
440
441 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
442 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
443 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
444 list_del(&fod->fcp_list);
445 for (fod--, i--; i >= 0; fod--, i--) {
446 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
447 sizeof(fod->rspiubuf),
448 DMA_TO_DEVICE);
449 fod->rspdma = 0L;
450 list_del(&fod->fcp_list);
451 }
452
453 return;
454 }
455 }
456 }
457
458 static void
459 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
460 struct nvmet_fc_tgt_queue *queue)
461 {
462 struct nvmet_fc_fcp_iod *fod = queue->fod;
463 int i;
464
465 for (i = 0; i < queue->sqsize; fod++, i++) {
466 if (fod->rspdma)
467 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
468 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
469 }
470 }
471
472 static struct nvmet_fc_fcp_iod *
473 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
474 {
475 struct nvmet_fc_fcp_iod *fod;
476
477 lockdep_assert_held(&queue->qlock);
478
479 fod = list_first_entry_or_null(&queue->fod_list,
480 struct nvmet_fc_fcp_iod, fcp_list);
481 if (fod) {
482 list_del(&fod->fcp_list);
483 fod->active = true;
484 /*
485 * no queue reference is taken, as it was taken by the
486 * queue lookup just prior to the allocation. The iod
487 * will "inherit" that reference.
488 */
489 }
490 return fod;
491 }
492
493
494 static void
495 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
496 struct nvmet_fc_tgt_queue *queue,
497 struct nvmefc_tgt_fcp_req *fcpreq)
498 {
499 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
500
501 /*
502 * put all admin cmds on hw queue id 0. All io commands go to
503 * the respective hw queue based on a modulo basis
504 */
505 fcpreq->hwqid = queue->qid ?
506 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
507
508 if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
509 queue_work_on(queue->cpu, queue->work_q, &fod->work);
510 else
511 nvmet_fc_handle_fcp_rqst(tgtport, fod);
512 }
513
514 static void
515 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
516 struct nvmet_fc_fcp_iod *fod)
517 {
518 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
519 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
520 struct nvmet_fc_defer_fcp_req *deferfcp;
521 unsigned long flags;
522
523 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
524 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
525
526 fcpreq->nvmet_fc_private = NULL;
527
528 fod->active = false;
529 fod->abort = false;
530 fod->aborted = false;
531 fod->writedataactive = false;
532 fod->fcpreq = NULL;
533
534 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
535
536 /* release the queue lookup reference on the completed IO */
537 nvmet_fc_tgt_q_put(queue);
538
539 spin_lock_irqsave(&queue->qlock, flags);
540 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
541 struct nvmet_fc_defer_fcp_req, req_list);
542 if (!deferfcp) {
543 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
544 spin_unlock_irqrestore(&queue->qlock, flags);
545 return;
546 }
547
548 /* Re-use the fod for the next pending cmd that was deferred */
549 list_del(&deferfcp->req_list);
550
551 fcpreq = deferfcp->fcp_req;
552
553 /* deferfcp can be reused for another IO at a later date */
554 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
555
556 spin_unlock_irqrestore(&queue->qlock, flags);
557
558 /* Save NVME CMD IO in fod */
559 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
560
561 /* Setup new fcpreq to be processed */
562 fcpreq->rspaddr = NULL;
563 fcpreq->rsplen = 0;
564 fcpreq->nvmet_fc_private = fod;
565 fod->fcpreq = fcpreq;
566 fod->active = true;
567
568 /* inform LLDD IO is now being processed */
569 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
570
571 /* Submit deferred IO for processing */
572 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
573
574 /*
575 * Leave the queue lookup get reference taken when
576 * fod was originally allocated.
577 */
578 }
579
580 static int
581 nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
582 {
583 int cpu, idx, cnt;
584
585 if (tgtport->ops->max_hw_queues == 1)
586 return WORK_CPU_UNBOUND;
587
588 /* Simple cpu selection based on qid modulo active cpu count */
589 idx = !qid ? 0 : (qid - 1) % num_active_cpus();
590
591 /* find the n'th active cpu */
592 for (cpu = 0, cnt = 0; ; ) {
593 if (cpu_active(cpu)) {
594 if (cnt == idx)
595 break;
596 cnt++;
597 }
598 cpu = (cpu + 1) % num_possible_cpus();
599 }
600
601 return cpu;
602 }
603
604 static struct nvmet_fc_tgt_queue *
605 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
606 u16 qid, u16 sqsize)
607 {
608 struct nvmet_fc_tgt_queue *queue;
609 unsigned long flags;
610 int ret;
611
612 if (qid > NVMET_NR_QUEUES)
613 return NULL;
614
615 queue = kzalloc((sizeof(*queue) +
616 (sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
617 GFP_KERNEL);
618 if (!queue)
619 return NULL;
620
621 if (!nvmet_fc_tgt_a_get(assoc))
622 goto out_free_queue;
623
624 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
625 assoc->tgtport->fc_target_port.port_num,
626 assoc->a_id, qid);
627 if (!queue->work_q)
628 goto out_a_put;
629
630 queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
631 queue->qid = qid;
632 queue->sqsize = sqsize;
633 queue->assoc = assoc;
634 queue->port = assoc->tgtport->port;
635 queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
636 INIT_LIST_HEAD(&queue->fod_list);
637 INIT_LIST_HEAD(&queue->avail_defer_list);
638 INIT_LIST_HEAD(&queue->pending_cmd_list);
639 atomic_set(&queue->connected, 0);
640 atomic_set(&queue->sqtail, 0);
641 atomic_set(&queue->rsn, 1);
642 atomic_set(&queue->zrspcnt, 0);
643 spin_lock_init(&queue->qlock);
644 kref_init(&queue->ref);
645
646 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
647
648 ret = nvmet_sq_init(&queue->nvme_sq);
649 if (ret)
650 goto out_fail_iodlist;
651
652 WARN_ON(assoc->queues[qid]);
653 spin_lock_irqsave(&assoc->tgtport->lock, flags);
654 assoc->queues[qid] = queue;
655 spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
656
657 return queue;
658
659 out_fail_iodlist:
660 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
661 destroy_workqueue(queue->work_q);
662 out_a_put:
663 nvmet_fc_tgt_a_put(assoc);
664 out_free_queue:
665 kfree(queue);
666 return NULL;
667 }
668
669
670 static void
671 nvmet_fc_tgt_queue_free(struct kref *ref)
672 {
673 struct nvmet_fc_tgt_queue *queue =
674 container_of(ref, struct nvmet_fc_tgt_queue, ref);
675 unsigned long flags;
676
677 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
678 queue->assoc->queues[queue->qid] = NULL;
679 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
680
681 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
682
683 nvmet_fc_tgt_a_put(queue->assoc);
684
685 destroy_workqueue(queue->work_q);
686
687 kfree(queue);
688 }
689
690 static void
691 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
692 {
693 kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
694 }
695
696 static int
697 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
698 {
699 return kref_get_unless_zero(&queue->ref);
700 }
701
702
703 static void
704 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
705 {
706 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
707 struct nvmet_fc_fcp_iod *fod = queue->fod;
708 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
709 unsigned long flags;
710 int i, writedataactive;
711 bool disconnect;
712
713 disconnect = atomic_xchg(&queue->connected, 0);
714
715 spin_lock_irqsave(&queue->qlock, flags);
716 /* about outstanding io's */
717 for (i = 0; i < queue->sqsize; fod++, i++) {
718 if (fod->active) {
719 spin_lock(&fod->flock);
720 fod->abort = true;
721 writedataactive = fod->writedataactive;
722 spin_unlock(&fod->flock);
723 /*
724 * only call lldd abort routine if waiting for
725 * writedata. other outstanding ops should finish
726 * on their own.
727 */
728 if (writedataactive) {
729 spin_lock(&fod->flock);
730 fod->aborted = true;
731 spin_unlock(&fod->flock);
732 tgtport->ops->fcp_abort(
733 &tgtport->fc_target_port, fod->fcpreq);
734 }
735 }
736 }
737
738 /* Cleanup defer'ed IOs in queue */
739 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
740 req_list) {
741 list_del(&deferfcp->req_list);
742 kfree(deferfcp);
743 }
744
745 for (;;) {
746 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
747 struct nvmet_fc_defer_fcp_req, req_list);
748 if (!deferfcp)
749 break;
750
751 list_del(&deferfcp->req_list);
752 spin_unlock_irqrestore(&queue->qlock, flags);
753
754 tgtport->ops->defer_rcv(&tgtport->fc_target_port,
755 deferfcp->fcp_req);
756
757 tgtport->ops->fcp_abort(&tgtport->fc_target_port,
758 deferfcp->fcp_req);
759
760 tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
761 deferfcp->fcp_req);
762
763 /* release the queue lookup reference */
764 nvmet_fc_tgt_q_put(queue);
765
766 kfree(deferfcp);
767
768 spin_lock_irqsave(&queue->qlock, flags);
769 }
770 spin_unlock_irqrestore(&queue->qlock, flags);
771
772 flush_workqueue(queue->work_q);
773
774 if (disconnect)
775 nvmet_sq_destroy(&queue->nvme_sq);
776
777 nvmet_fc_tgt_q_put(queue);
778 }
779
780 static struct nvmet_fc_tgt_queue *
781 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
782 u64 connection_id)
783 {
784 struct nvmet_fc_tgt_assoc *assoc;
785 struct nvmet_fc_tgt_queue *queue;
786 u64 association_id = nvmet_fc_getassociationid(connection_id);
787 u16 qid = nvmet_fc_getqueueid(connection_id);
788 unsigned long flags;
789
790 if (qid > NVMET_NR_QUEUES)
791 return NULL;
792
793 spin_lock_irqsave(&tgtport->lock, flags);
794 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
795 if (association_id == assoc->association_id) {
796 queue = assoc->queues[qid];
797 if (queue &&
798 (!atomic_read(&queue->connected) ||
799 !nvmet_fc_tgt_q_get(queue)))
800 queue = NULL;
801 spin_unlock_irqrestore(&tgtport->lock, flags);
802 return queue;
803 }
804 }
805 spin_unlock_irqrestore(&tgtport->lock, flags);
806 return NULL;
807 }
808
809 static void
810 nvmet_fc_delete_assoc(struct work_struct *work)
811 {
812 struct nvmet_fc_tgt_assoc *assoc =
813 container_of(work, struct nvmet_fc_tgt_assoc, del_work);
814
815 nvmet_fc_delete_target_assoc(assoc);
816 nvmet_fc_tgt_a_put(assoc);
817 }
818
819 static struct nvmet_fc_tgt_assoc *
820 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
821 {
822 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
823 unsigned long flags;
824 u64 ran;
825 int idx;
826 bool needrandom = true;
827
828 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
829 if (!assoc)
830 return NULL;
831
832 idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
833 if (idx < 0)
834 goto out_free_assoc;
835
836 if (!nvmet_fc_tgtport_get(tgtport))
837 goto out_ida_put;
838
839 assoc->tgtport = tgtport;
840 assoc->a_id = idx;
841 INIT_LIST_HEAD(&assoc->a_list);
842 kref_init(&assoc->ref);
843 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
844
845 while (needrandom) {
846 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
847 ran = ran << BYTES_FOR_QID_SHIFT;
848
849 spin_lock_irqsave(&tgtport->lock, flags);
850 needrandom = false;
851 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
852 if (ran == tmpassoc->association_id) {
853 needrandom = true;
854 break;
855 }
856 if (!needrandom) {
857 assoc->association_id = ran;
858 list_add_tail(&assoc->a_list, &tgtport->assoc_list);
859 }
860 spin_unlock_irqrestore(&tgtport->lock, flags);
861 }
862
863 return assoc;
864
865 out_ida_put:
866 ida_simple_remove(&tgtport->assoc_cnt, idx);
867 out_free_assoc:
868 kfree(assoc);
869 return NULL;
870 }
871
872 static void
873 nvmet_fc_target_assoc_free(struct kref *ref)
874 {
875 struct nvmet_fc_tgt_assoc *assoc =
876 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
877 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
878 unsigned long flags;
879
880 spin_lock_irqsave(&tgtport->lock, flags);
881 list_del(&assoc->a_list);
882 spin_unlock_irqrestore(&tgtport->lock, flags);
883 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
884 kfree(assoc);
885 nvmet_fc_tgtport_put(tgtport);
886 }
887
888 static void
889 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
890 {
891 kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
892 }
893
894 static int
895 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
896 {
897 return kref_get_unless_zero(&assoc->ref);
898 }
899
900 static void
901 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
902 {
903 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
904 struct nvmet_fc_tgt_queue *queue;
905 unsigned long flags;
906 int i;
907
908 spin_lock_irqsave(&tgtport->lock, flags);
909 for (i = NVMET_NR_QUEUES; i >= 0; i--) {
910 queue = assoc->queues[i];
911 if (queue) {
912 if (!nvmet_fc_tgt_q_get(queue))
913 continue;
914 spin_unlock_irqrestore(&tgtport->lock, flags);
915 nvmet_fc_delete_target_queue(queue);
916 nvmet_fc_tgt_q_put(queue);
917 spin_lock_irqsave(&tgtport->lock, flags);
918 }
919 }
920 spin_unlock_irqrestore(&tgtport->lock, flags);
921
922 nvmet_fc_tgt_a_put(assoc);
923 }
924
925 static struct nvmet_fc_tgt_assoc *
926 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
927 u64 association_id)
928 {
929 struct nvmet_fc_tgt_assoc *assoc;
930 struct nvmet_fc_tgt_assoc *ret = NULL;
931 unsigned long flags;
932
933 spin_lock_irqsave(&tgtport->lock, flags);
934 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
935 if (association_id == assoc->association_id) {
936 ret = assoc;
937 nvmet_fc_tgt_a_get(assoc);
938 break;
939 }
940 }
941 spin_unlock_irqrestore(&tgtport->lock, flags);
942
943 return ret;
944 }
945
946
947 /**
948 * nvme_fc_register_targetport - transport entry point called by an
949 * LLDD to register the existence of a local
950 * NVME subystem FC port.
951 * @pinfo: pointer to information about the port to be registered
952 * @template: LLDD entrypoints and operational parameters for the port
953 * @dev: physical hardware device node port corresponds to. Will be
954 * used for DMA mappings
955 * @portptr: pointer to a local port pointer. Upon success, the routine
956 * will allocate a nvme_fc_local_port structure and place its
957 * address in the local port pointer. Upon failure, local port
958 * pointer will be set to NULL.
959 *
960 * Returns:
961 * a completion status. Must be 0 upon success; a negative errno
962 * (ex: -ENXIO) upon failure.
963 */
964 int
965 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
966 struct nvmet_fc_target_template *template,
967 struct device *dev,
968 struct nvmet_fc_target_port **portptr)
969 {
970 struct nvmet_fc_tgtport *newrec;
971 unsigned long flags;
972 int ret, idx;
973
974 if (!template->xmt_ls_rsp || !template->fcp_op ||
975 !template->fcp_abort ||
976 !template->fcp_req_release || !template->targetport_delete ||
977 !template->max_hw_queues || !template->max_sgl_segments ||
978 !template->max_dif_sgl_segments || !template->dma_boundary) {
979 ret = -EINVAL;
980 goto out_regtgt_failed;
981 }
982
983 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
984 GFP_KERNEL);
985 if (!newrec) {
986 ret = -ENOMEM;
987 goto out_regtgt_failed;
988 }
989
990 idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
991 if (idx < 0) {
992 ret = -ENOSPC;
993 goto out_fail_kfree;
994 }
995
996 if (!get_device(dev) && dev) {
997 ret = -ENODEV;
998 goto out_ida_put;
999 }
1000
1001 newrec->fc_target_port.node_name = pinfo->node_name;
1002 newrec->fc_target_port.port_name = pinfo->port_name;
1003 newrec->fc_target_port.private = &newrec[1];
1004 newrec->fc_target_port.port_id = pinfo->port_id;
1005 newrec->fc_target_port.port_num = idx;
1006 INIT_LIST_HEAD(&newrec->tgt_list);
1007 newrec->dev = dev;
1008 newrec->ops = template;
1009 spin_lock_init(&newrec->lock);
1010 INIT_LIST_HEAD(&newrec->ls_list);
1011 INIT_LIST_HEAD(&newrec->ls_busylist);
1012 INIT_LIST_HEAD(&newrec->assoc_list);
1013 kref_init(&newrec->ref);
1014 ida_init(&newrec->assoc_cnt);
1015 newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS,
1016 template->max_sgl_segments);
1017
1018 ret = nvmet_fc_alloc_ls_iodlist(newrec);
1019 if (ret) {
1020 ret = -ENOMEM;
1021 goto out_free_newrec;
1022 }
1023
1024 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1025 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
1026 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1027
1028 *portptr = &newrec->fc_target_port;
1029 return 0;
1030
1031 out_free_newrec:
1032 put_device(dev);
1033 out_ida_put:
1034 ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
1035 out_fail_kfree:
1036 kfree(newrec);
1037 out_regtgt_failed:
1038 *portptr = NULL;
1039 return ret;
1040 }
1041 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
1042
1043
1044 static void
1045 nvmet_fc_free_tgtport(struct kref *ref)
1046 {
1047 struct nvmet_fc_tgtport *tgtport =
1048 container_of(ref, struct nvmet_fc_tgtport, ref);
1049 struct device *dev = tgtport->dev;
1050 unsigned long flags;
1051
1052 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1053 list_del(&tgtport->tgt_list);
1054 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1055
1056 nvmet_fc_free_ls_iodlist(tgtport);
1057
1058 /* let the LLDD know we've finished tearing it down */
1059 tgtport->ops->targetport_delete(&tgtport->fc_target_port);
1060
1061 ida_simple_remove(&nvmet_fc_tgtport_cnt,
1062 tgtport->fc_target_port.port_num);
1063
1064 ida_destroy(&tgtport->assoc_cnt);
1065
1066 kfree(tgtport);
1067
1068 put_device(dev);
1069 }
1070
1071 static void
1072 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
1073 {
1074 kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
1075 }
1076
1077 static int
1078 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
1079 {
1080 return kref_get_unless_zero(&tgtport->ref);
1081 }
1082
1083 static void
1084 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1085 {
1086 struct nvmet_fc_tgt_assoc *assoc, *next;
1087 unsigned long flags;
1088
1089 spin_lock_irqsave(&tgtport->lock, flags);
1090 list_for_each_entry_safe(assoc, next,
1091 &tgtport->assoc_list, a_list) {
1092 if (!nvmet_fc_tgt_a_get(assoc))
1093 continue;
1094 spin_unlock_irqrestore(&tgtport->lock, flags);
1095 nvmet_fc_delete_target_assoc(assoc);
1096 nvmet_fc_tgt_a_put(assoc);
1097 spin_lock_irqsave(&tgtport->lock, flags);
1098 }
1099 spin_unlock_irqrestore(&tgtport->lock, flags);
1100 }
1101
1102 /*
1103 * nvmet layer has called to terminate an association
1104 */
1105 static void
1106 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1107 {
1108 struct nvmet_fc_tgtport *tgtport, *next;
1109 struct nvmet_fc_tgt_assoc *assoc;
1110 struct nvmet_fc_tgt_queue *queue;
1111 unsigned long flags;
1112 bool found_ctrl = false;
1113
1114 /* this is a bit ugly, but don't want to make locks layered */
1115 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1116 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1117 tgt_list) {
1118 if (!nvmet_fc_tgtport_get(tgtport))
1119 continue;
1120 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1121
1122 spin_lock_irqsave(&tgtport->lock, flags);
1123 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1124 queue = assoc->queues[0];
1125 if (queue && queue->nvme_sq.ctrl == ctrl) {
1126 if (nvmet_fc_tgt_a_get(assoc))
1127 found_ctrl = true;
1128 break;
1129 }
1130 }
1131 spin_unlock_irqrestore(&tgtport->lock, flags);
1132
1133 nvmet_fc_tgtport_put(tgtport);
1134
1135 if (found_ctrl) {
1136 schedule_work(&assoc->del_work);
1137 return;
1138 }
1139
1140 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1141 }
1142 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1143 }
1144
1145 /**
1146 * nvme_fc_unregister_targetport - transport entry point called by an
1147 * LLDD to deregister/remove a previously
1148 * registered a local NVME subsystem FC port.
1149 * @tgtport: pointer to the (registered) target port that is to be
1150 * deregistered.
1151 *
1152 * Returns:
1153 * a completion status. Must be 0 upon success; a negative errno
1154 * (ex: -ENXIO) upon failure.
1155 */
1156 int
1157 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1158 {
1159 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1160
1161 /* terminate any outstanding associations */
1162 __nvmet_fc_free_assocs(tgtport);
1163
1164 nvmet_fc_tgtport_put(tgtport);
1165
1166 return 0;
1167 }
1168 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1169
1170
1171 /* *********************** FC-NVME LS Handling **************************** */
1172
1173
1174 static void
1175 nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
1176 {
1177 struct fcnvme_ls_acc_hdr *acc = buf;
1178
1179 acc->w0.ls_cmd = ls_cmd;
1180 acc->desc_list_len = desc_len;
1181 acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1182 acc->rqst.desc_len =
1183 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1184 acc->rqst.w0.ls_cmd = rqst_ls_cmd;
1185 }
1186
1187 static int
1188 nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
1189 u8 reason, u8 explanation, u8 vendor)
1190 {
1191 struct fcnvme_ls_rjt *rjt = buf;
1192
1193 nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
1194 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
1195 ls_cmd);
1196 rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1197 rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1198 rjt->rjt.reason_code = reason;
1199 rjt->rjt.reason_explanation = explanation;
1200 rjt->rjt.vendor = vendor;
1201
1202 return sizeof(struct fcnvme_ls_rjt);
1203 }
1204
1205 /* Validation Error indexes into the string table below */
1206 enum {
1207 VERR_NO_ERROR = 0,
1208 VERR_CR_ASSOC_LEN = 1,
1209 VERR_CR_ASSOC_RQST_LEN = 2,
1210 VERR_CR_ASSOC_CMD = 3,
1211 VERR_CR_ASSOC_CMD_LEN = 4,
1212 VERR_ERSP_RATIO = 5,
1213 VERR_ASSOC_ALLOC_FAIL = 6,
1214 VERR_QUEUE_ALLOC_FAIL = 7,
1215 VERR_CR_CONN_LEN = 8,
1216 VERR_CR_CONN_RQST_LEN = 9,
1217 VERR_ASSOC_ID = 10,
1218 VERR_ASSOC_ID_LEN = 11,
1219 VERR_NO_ASSOC = 12,
1220 VERR_CONN_ID = 13,
1221 VERR_CONN_ID_LEN = 14,
1222 VERR_NO_CONN = 15,
1223 VERR_CR_CONN_CMD = 16,
1224 VERR_CR_CONN_CMD_LEN = 17,
1225 VERR_DISCONN_LEN = 18,
1226 VERR_DISCONN_RQST_LEN = 19,
1227 VERR_DISCONN_CMD = 20,
1228 VERR_DISCONN_CMD_LEN = 21,
1229 VERR_DISCONN_SCOPE = 22,
1230 VERR_RS_LEN = 23,
1231 VERR_RS_RQST_LEN = 24,
1232 VERR_RS_CMD = 25,
1233 VERR_RS_CMD_LEN = 26,
1234 VERR_RS_RCTL = 27,
1235 VERR_RS_RO = 28,
1236 };
1237
1238 static char *validation_errors[] = {
1239 "OK",
1240 "Bad CR_ASSOC Length",
1241 "Bad CR_ASSOC Rqst Length",
1242 "Not CR_ASSOC Cmd",
1243 "Bad CR_ASSOC Cmd Length",
1244 "Bad Ersp Ratio",
1245 "Association Allocation Failed",
1246 "Queue Allocation Failed",
1247 "Bad CR_CONN Length",
1248 "Bad CR_CONN Rqst Length",
1249 "Not Association ID",
1250 "Bad Association ID Length",
1251 "No Association",
1252 "Not Connection ID",
1253 "Bad Connection ID Length",
1254 "No Connection",
1255 "Not CR_CONN Cmd",
1256 "Bad CR_CONN Cmd Length",
1257 "Bad DISCONN Length",
1258 "Bad DISCONN Rqst Length",
1259 "Not DISCONN Cmd",
1260 "Bad DISCONN Cmd Length",
1261 "Bad Disconnect Scope",
1262 "Bad RS Length",
1263 "Bad RS Rqst Length",
1264 "Not RS Cmd",
1265 "Bad RS Cmd Length",
1266 "Bad RS R_CTL",
1267 "Bad RS Relative Offset",
1268 };
1269
1270 static void
1271 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1272 struct nvmet_fc_ls_iod *iod)
1273 {
1274 struct fcnvme_ls_cr_assoc_rqst *rqst =
1275 (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
1276 struct fcnvme_ls_cr_assoc_acc *acc =
1277 (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
1278 struct nvmet_fc_tgt_queue *queue;
1279 int ret = 0;
1280
1281 memset(acc, 0, sizeof(*acc));
1282
1283 /*
1284 * FC-NVME spec changes. There are initiators sending different
1285 * lengths as padding sizes for Create Association Cmd descriptor
1286 * was incorrect.
1287 * Accept anything of "minimum" length. Assume format per 1.15
1288 * spec (with HOSTID reduced to 16 bytes), ignore how long the
1289 * trailing pad length is.
1290 */
1291 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
1292 ret = VERR_CR_ASSOC_LEN;
1293 else if (be32_to_cpu(rqst->desc_list_len) <
1294 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
1295 ret = VERR_CR_ASSOC_RQST_LEN;
1296 else if (rqst->assoc_cmd.desc_tag !=
1297 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1298 ret = VERR_CR_ASSOC_CMD;
1299 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
1300 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
1301 ret = VERR_CR_ASSOC_CMD_LEN;
1302 else if (!rqst->assoc_cmd.ersp_ratio ||
1303 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1304 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1305 ret = VERR_ERSP_RATIO;
1306
1307 else {
1308 /* new association w/ admin queue */
1309 iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
1310 if (!iod->assoc)
1311 ret = VERR_ASSOC_ALLOC_FAIL;
1312 else {
1313 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1314 be16_to_cpu(rqst->assoc_cmd.sqsize));
1315 if (!queue)
1316 ret = VERR_QUEUE_ALLOC_FAIL;
1317 }
1318 }
1319
1320 if (ret) {
1321 dev_err(tgtport->dev,
1322 "Create Association LS failed: %s\n",
1323 validation_errors[ret]);
1324 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1325 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1326 FCNVME_RJT_RC_LOGIC,
1327 FCNVME_RJT_EXP_NONE, 0);
1328 return;
1329 }
1330
1331 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1332 atomic_set(&queue->connected, 1);
1333 queue->sqhd = 0; /* best place to init value */
1334
1335 /* format a response */
1336
1337 iod->lsreq->rsplen = sizeof(*acc);
1338
1339 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1340 fcnvme_lsdesc_len(
1341 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1342 FCNVME_LS_CREATE_ASSOCIATION);
1343 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1344 acc->associd.desc_len =
1345 fcnvme_lsdesc_len(
1346 sizeof(struct fcnvme_lsdesc_assoc_id));
1347 acc->associd.association_id =
1348 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1349 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1350 acc->connectid.desc_len =
1351 fcnvme_lsdesc_len(
1352 sizeof(struct fcnvme_lsdesc_conn_id));
1353 acc->connectid.connection_id = acc->associd.association_id;
1354 }
1355
1356 static void
1357 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1358 struct nvmet_fc_ls_iod *iod)
1359 {
1360 struct fcnvme_ls_cr_conn_rqst *rqst =
1361 (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
1362 struct fcnvme_ls_cr_conn_acc *acc =
1363 (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
1364 struct nvmet_fc_tgt_queue *queue;
1365 int ret = 0;
1366
1367 memset(acc, 0, sizeof(*acc));
1368
1369 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1370 ret = VERR_CR_CONN_LEN;
1371 else if (rqst->desc_list_len !=
1372 fcnvme_lsdesc_len(
1373 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1374 ret = VERR_CR_CONN_RQST_LEN;
1375 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1376 ret = VERR_ASSOC_ID;
1377 else if (rqst->associd.desc_len !=
1378 fcnvme_lsdesc_len(
1379 sizeof(struct fcnvme_lsdesc_assoc_id)))
1380 ret = VERR_ASSOC_ID_LEN;
1381 else if (rqst->connect_cmd.desc_tag !=
1382 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1383 ret = VERR_CR_CONN_CMD;
1384 else if (rqst->connect_cmd.desc_len !=
1385 fcnvme_lsdesc_len(
1386 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1387 ret = VERR_CR_CONN_CMD_LEN;
1388 else if (!rqst->connect_cmd.ersp_ratio ||
1389 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1390 be16_to_cpu(rqst->connect_cmd.sqsize)))
1391 ret = VERR_ERSP_RATIO;
1392
1393 else {
1394 /* new io queue */
1395 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1396 be64_to_cpu(rqst->associd.association_id));
1397 if (!iod->assoc)
1398 ret = VERR_NO_ASSOC;
1399 else {
1400 queue = nvmet_fc_alloc_target_queue(iod->assoc,
1401 be16_to_cpu(rqst->connect_cmd.qid),
1402 be16_to_cpu(rqst->connect_cmd.sqsize));
1403 if (!queue)
1404 ret = VERR_QUEUE_ALLOC_FAIL;
1405
1406 /* release get taken in nvmet_fc_find_target_assoc */
1407 nvmet_fc_tgt_a_put(iod->assoc);
1408 }
1409 }
1410
1411 if (ret) {
1412 dev_err(tgtport->dev,
1413 "Create Connection LS failed: %s\n",
1414 validation_errors[ret]);
1415 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1416 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1417 (ret == VERR_NO_ASSOC) ?
1418 FCNVME_RJT_RC_INV_ASSOC :
1419 FCNVME_RJT_RC_LOGIC,
1420 FCNVME_RJT_EXP_NONE, 0);
1421 return;
1422 }
1423
1424 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1425 atomic_set(&queue->connected, 1);
1426 queue->sqhd = 0; /* best place to init value */
1427
1428 /* format a response */
1429
1430 iod->lsreq->rsplen = sizeof(*acc);
1431
1432 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1433 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1434 FCNVME_LS_CREATE_CONNECTION);
1435 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1436 acc->connectid.desc_len =
1437 fcnvme_lsdesc_len(
1438 sizeof(struct fcnvme_lsdesc_conn_id));
1439 acc->connectid.connection_id =
1440 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1441 be16_to_cpu(rqst->connect_cmd.qid)));
1442 }
1443
1444 static void
1445 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1446 struct nvmet_fc_ls_iod *iod)
1447 {
1448 struct fcnvme_ls_disconnect_rqst *rqst =
1449 (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1450 struct fcnvme_ls_disconnect_acc *acc =
1451 (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
1452 struct nvmet_fc_tgt_queue *queue = NULL;
1453 struct nvmet_fc_tgt_assoc *assoc;
1454 int ret = 0;
1455 bool del_assoc = false;
1456
1457 memset(acc, 0, sizeof(*acc));
1458
1459 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
1460 ret = VERR_DISCONN_LEN;
1461 else if (rqst->desc_list_len !=
1462 fcnvme_lsdesc_len(
1463 sizeof(struct fcnvme_ls_disconnect_rqst)))
1464 ret = VERR_DISCONN_RQST_LEN;
1465 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1466 ret = VERR_ASSOC_ID;
1467 else if (rqst->associd.desc_len !=
1468 fcnvme_lsdesc_len(
1469 sizeof(struct fcnvme_lsdesc_assoc_id)))
1470 ret = VERR_ASSOC_ID_LEN;
1471 else if (rqst->discon_cmd.desc_tag !=
1472 cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
1473 ret = VERR_DISCONN_CMD;
1474 else if (rqst->discon_cmd.desc_len !=
1475 fcnvme_lsdesc_len(
1476 sizeof(struct fcnvme_lsdesc_disconn_cmd)))
1477 ret = VERR_DISCONN_CMD_LEN;
1478 else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
1479 (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
1480 ret = VERR_DISCONN_SCOPE;
1481 else {
1482 /* match an active association */
1483 assoc = nvmet_fc_find_target_assoc(tgtport,
1484 be64_to_cpu(rqst->associd.association_id));
1485 iod->assoc = assoc;
1486 if (assoc) {
1487 if (rqst->discon_cmd.scope ==
1488 FCNVME_DISCONN_CONNECTION) {
1489 queue = nvmet_fc_find_target_queue(tgtport,
1490 be64_to_cpu(
1491 rqst->discon_cmd.id));
1492 if (!queue) {
1493 nvmet_fc_tgt_a_put(assoc);
1494 ret = VERR_NO_CONN;
1495 }
1496 }
1497 } else
1498 ret = VERR_NO_ASSOC;
1499 }
1500
1501 if (ret) {
1502 dev_err(tgtport->dev,
1503 "Disconnect LS failed: %s\n",
1504 validation_errors[ret]);
1505 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1506 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1507 (ret == VERR_NO_ASSOC) ?
1508 FCNVME_RJT_RC_INV_ASSOC :
1509 (ret == VERR_NO_CONN) ?
1510 FCNVME_RJT_RC_INV_CONN :
1511 FCNVME_RJT_RC_LOGIC,
1512 FCNVME_RJT_EXP_NONE, 0);
1513 return;
1514 }
1515
1516 /* format a response */
1517
1518 iod->lsreq->rsplen = sizeof(*acc);
1519
1520 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1521 fcnvme_lsdesc_len(
1522 sizeof(struct fcnvme_ls_disconnect_acc)),
1523 FCNVME_LS_DISCONNECT);
1524
1525
1526 /* are we to delete a Connection ID (queue) */
1527 if (queue) {
1528 int qid = queue->qid;
1529
1530 nvmet_fc_delete_target_queue(queue);
1531
1532 /* release the get taken by find_target_queue */
1533 nvmet_fc_tgt_q_put(queue);
1534
1535 /* tear association down if io queue terminated */
1536 if (!qid)
1537 del_assoc = true;
1538 }
1539
1540 /* release get taken in nvmet_fc_find_target_assoc */
1541 nvmet_fc_tgt_a_put(iod->assoc);
1542
1543 if (del_assoc)
1544 nvmet_fc_delete_target_assoc(iod->assoc);
1545 }
1546
1547
1548 /* *********************** NVME Ctrl Routines **************************** */
1549
1550
1551 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1552
1553 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1554
1555 static void
1556 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
1557 {
1558 struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
1559 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1560
1561 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1562 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1563 nvmet_fc_free_ls_iod(tgtport, iod);
1564 nvmet_fc_tgtport_put(tgtport);
1565 }
1566
1567 static void
1568 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1569 struct nvmet_fc_ls_iod *iod)
1570 {
1571 int ret;
1572
1573 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1574 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1575
1576 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
1577 if (ret)
1578 nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
1579 }
1580
1581 /*
1582 * Actual processing routine for received FC-NVME LS Requests from the LLD
1583 */
1584 static void
1585 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1586 struct nvmet_fc_ls_iod *iod)
1587 {
1588 struct fcnvme_ls_rqst_w0 *w0 =
1589 (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
1590
1591 iod->lsreq->nvmet_fc_private = iod;
1592 iod->lsreq->rspbuf = iod->rspbuf;
1593 iod->lsreq->rspdma = iod->rspdma;
1594 iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
1595 /* Be preventative. handlers will later set to valid length */
1596 iod->lsreq->rsplen = 0;
1597
1598 iod->assoc = NULL;
1599
1600 /*
1601 * handlers:
1602 * parse request input, execute the request, and format the
1603 * LS response
1604 */
1605 switch (w0->ls_cmd) {
1606 case FCNVME_LS_CREATE_ASSOCIATION:
1607 /* Creates Association and initial Admin Queue/Connection */
1608 nvmet_fc_ls_create_association(tgtport, iod);
1609 break;
1610 case FCNVME_LS_CREATE_CONNECTION:
1611 /* Creates an IO Queue/Connection */
1612 nvmet_fc_ls_create_connection(tgtport, iod);
1613 break;
1614 case FCNVME_LS_DISCONNECT:
1615 /* Terminate a Queue/Connection or the Association */
1616 nvmet_fc_ls_disconnect(tgtport, iod);
1617 break;
1618 default:
1619 iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
1620 NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
1621 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1622 }
1623
1624 nvmet_fc_xmt_ls_rsp(tgtport, iod);
1625 }
1626
1627 /*
1628 * Actual processing routine for received FC-NVME LS Requests from the LLD
1629 */
1630 static void
1631 nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1632 {
1633 struct nvmet_fc_ls_iod *iod =
1634 container_of(work, struct nvmet_fc_ls_iod, work);
1635 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1636
1637 nvmet_fc_handle_ls_rqst(tgtport, iod);
1638 }
1639
1640
1641 /**
1642 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1643 * upon the reception of a NVME LS request.
1644 *
1645 * The nvmet-fc layer will copy payload to an internal structure for
1646 * processing. As such, upon completion of the routine, the LLDD may
1647 * immediately free/reuse the LS request buffer passed in the call.
1648 *
1649 * If this routine returns error, the LLDD should abort the exchange.
1650 *
1651 * @tgtport: pointer to the (registered) target port the LS was
1652 * received on.
1653 * @lsreq: pointer to a lsreq request structure to be used to reference
1654 * the exchange corresponding to the LS.
1655 * @lsreqbuf: pointer to the buffer containing the LS Request
1656 * @lsreqbuf_len: length, in bytes, of the received LS request
1657 */
1658 int
1659 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
1660 struct nvmefc_tgt_ls_req *lsreq,
1661 void *lsreqbuf, u32 lsreqbuf_len)
1662 {
1663 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1664 struct nvmet_fc_ls_iod *iod;
1665
1666 if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
1667 return -E2BIG;
1668
1669 if (!nvmet_fc_tgtport_get(tgtport))
1670 return -ESHUTDOWN;
1671
1672 iod = nvmet_fc_alloc_ls_iod(tgtport);
1673 if (!iod) {
1674 nvmet_fc_tgtport_put(tgtport);
1675 return -ENOENT;
1676 }
1677
1678 iod->lsreq = lsreq;
1679 iod->fcpreq = NULL;
1680 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
1681 iod->rqstdatalen = lsreqbuf_len;
1682
1683 schedule_work(&iod->work);
1684
1685 return 0;
1686 }
1687 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
1688
1689
1690 /*
1691 * **********************
1692 * Start of FCP handling
1693 * **********************
1694 */
1695
1696 static int
1697 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1698 {
1699 struct scatterlist *sg;
1700 struct page *page;
1701 unsigned int nent;
1702 u32 page_len, length;
1703 int i = 0;
1704
1705 length = fod->req.transfer_len;
1706 nent = DIV_ROUND_UP(length, PAGE_SIZE);
1707 sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
1708 if (!sg)
1709 goto out;
1710
1711 sg_init_table(sg, nent);
1712
1713 while (length) {
1714 page_len = min_t(u32, length, PAGE_SIZE);
1715
1716 page = alloc_page(GFP_KERNEL);
1717 if (!page)
1718 goto out_free_pages;
1719
1720 sg_set_page(&sg[i], page, page_len, 0);
1721 length -= page_len;
1722 i++;
1723 }
1724
1725 fod->data_sg = sg;
1726 fod->data_sg_cnt = nent;
1727 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
1728 ((fod->io_dir == NVMET_FCP_WRITE) ?
1729 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1730 /* note: write from initiator perspective */
1731
1732 return 0;
1733
1734 out_free_pages:
1735 while (i > 0) {
1736 i--;
1737 __free_page(sg_page(&sg[i]));
1738 }
1739 kfree(sg);
1740 fod->data_sg = NULL;
1741 fod->data_sg_cnt = 0;
1742 out:
1743 return NVME_SC_INTERNAL;
1744 }
1745
1746 static void
1747 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1748 {
1749 struct scatterlist *sg;
1750 int count;
1751
1752 if (!fod->data_sg || !fod->data_sg_cnt)
1753 return;
1754
1755 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
1756 ((fod->io_dir == NVMET_FCP_WRITE) ?
1757 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1758 for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
1759 __free_page(sg_page(sg));
1760 kfree(fod->data_sg);
1761 fod->data_sg = NULL;
1762 fod->data_sg_cnt = 0;
1763 }
1764
1765
1766 static bool
1767 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
1768 {
1769 u32 sqtail, used;
1770
1771 /* egad, this is ugly. And sqtail is just a best guess */
1772 sqtail = atomic_read(&q->sqtail) % q->sqsize;
1773
1774 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
1775 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
1776 }
1777
1778 /*
1779 * Prep RSP payload.
1780 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
1781 */
1782 static void
1783 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1784 struct nvmet_fc_fcp_iod *fod)
1785 {
1786 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
1787 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1788 struct nvme_completion *cqe = &ersp->cqe;
1789 u32 *cqewd = (u32 *)cqe;
1790 bool send_ersp = false;
1791 u32 rsn, rspcnt, xfr_length;
1792
1793 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
1794 xfr_length = fod->req.transfer_len;
1795 else
1796 xfr_length = fod->offset;
1797
1798 /*
1799 * check to see if we can send a 0's rsp.
1800 * Note: to send a 0's response, the NVME-FC host transport will
1801 * recreate the CQE. The host transport knows: sq id, SQHD (last
1802 * seen in an ersp), and command_id. Thus it will create a
1803 * zero-filled CQE with those known fields filled in. Transport
1804 * must send an ersp for any condition where the cqe won't match
1805 * this.
1806 *
1807 * Here are the FC-NVME mandated cases where we must send an ersp:
1808 * every N responses, where N=ersp_ratio
1809 * force fabric commands to send ersp's (not in FC-NVME but good
1810 * practice)
1811 * normal cmds: any time status is non-zero, or status is zero
1812 * but words 0 or 1 are non-zero.
1813 * the SQ is 90% or more full
1814 * the cmd is a fused command
1815 * transferred data length not equal to cmd iu length
1816 */
1817 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
1818 if (!(rspcnt % fod->queue->ersp_ratio) ||
1819 sqe->opcode == nvme_fabrics_command ||
1820 xfr_length != fod->req.transfer_len ||
1821 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
1822 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
1823 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
1824 send_ersp = true;
1825
1826 /* re-set the fields */
1827 fod->fcpreq->rspaddr = ersp;
1828 fod->fcpreq->rspdma = fod->rspdma;
1829
1830 if (!send_ersp) {
1831 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
1832 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
1833 } else {
1834 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
1835 rsn = atomic_inc_return(&fod->queue->rsn);
1836 ersp->rsn = cpu_to_be32(rsn);
1837 ersp->xfrd_len = cpu_to_be32(xfr_length);
1838 fod->fcpreq->rsplen = sizeof(*ersp);
1839 }
1840
1841 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
1842 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1843 }
1844
1845 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
1846
1847 static void
1848 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
1849 struct nvmet_fc_fcp_iod *fod)
1850 {
1851 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1852
1853 /* data no longer needed */
1854 nvmet_fc_free_tgt_pgs(fod);
1855
1856 /*
1857 * if an ABTS was received or we issued the fcp_abort early
1858 * don't call abort routine again.
1859 */
1860 /* no need to take lock - lock was taken earlier to get here */
1861 if (!fod->aborted)
1862 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
1863
1864 nvmet_fc_free_fcp_iod(fod->queue, fod);
1865 }
1866
1867 static void
1868 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1869 struct nvmet_fc_fcp_iod *fod)
1870 {
1871 int ret;
1872
1873 fod->fcpreq->op = NVMET_FCOP_RSP;
1874 fod->fcpreq->timeout = 0;
1875
1876 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1877
1878 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1879 if (ret)
1880 nvmet_fc_abort_op(tgtport, fod);
1881 }
1882
1883 static void
1884 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1885 struct nvmet_fc_fcp_iod *fod, u8 op)
1886 {
1887 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1888 unsigned long flags;
1889 u32 tlen;
1890 int ret;
1891
1892 fcpreq->op = op;
1893 fcpreq->offset = fod->offset;
1894 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1895
1896 tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
1897 (fod->req.transfer_len - fod->offset));
1898 fcpreq->transfer_length = tlen;
1899 fcpreq->transferred_length = 0;
1900 fcpreq->fcp_error = 0;
1901 fcpreq->rsplen = 0;
1902
1903 fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
1904 fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
1905
1906 /*
1907 * If the last READDATA request: check if LLDD supports
1908 * combined xfr with response.
1909 */
1910 if ((op == NVMET_FCOP_READDATA) &&
1911 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
1912 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
1913 fcpreq->op = NVMET_FCOP_READDATA_RSP;
1914 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1915 }
1916
1917 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1918 if (ret) {
1919 /*
1920 * should be ok to set w/o lock as its in the thread of
1921 * execution (not an async timer routine) and doesn't
1922 * contend with any clearing action
1923 */
1924 fod->abort = true;
1925
1926 if (op == NVMET_FCOP_WRITEDATA) {
1927 spin_lock_irqsave(&fod->flock, flags);
1928 fod->writedataactive = false;
1929 spin_unlock_irqrestore(&fod->flock, flags);
1930 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1931 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
1932 fcpreq->fcp_error = ret;
1933 fcpreq->transferred_length = 0;
1934 nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
1935 }
1936 }
1937 }
1938
1939 static inline bool
1940 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
1941 {
1942 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1943 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1944
1945 /* if in the middle of an io and we need to tear down */
1946 if (abort) {
1947 if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
1948 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1949 return true;
1950 }
1951
1952 nvmet_fc_abort_op(tgtport, fod);
1953 return true;
1954 }
1955
1956 return false;
1957 }
1958
1959 /*
1960 * actual done handler for FCP operations when completed by the lldd
1961 */
1962 static void
1963 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
1964 {
1965 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1966 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1967 unsigned long flags;
1968 bool abort;
1969
1970 spin_lock_irqsave(&fod->flock, flags);
1971 abort = fod->abort;
1972 fod->writedataactive = false;
1973 spin_unlock_irqrestore(&fod->flock, flags);
1974
1975 switch (fcpreq->op) {
1976
1977 case NVMET_FCOP_WRITEDATA:
1978 if (__nvmet_fc_fod_op_abort(fod, abort))
1979 return;
1980 if (fcpreq->fcp_error ||
1981 fcpreq->transferred_length != fcpreq->transfer_length) {
1982 spin_lock(&fod->flock);
1983 fod->abort = true;
1984 spin_unlock(&fod->flock);
1985
1986 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1987 return;
1988 }
1989
1990 fod->offset += fcpreq->transferred_length;
1991 if (fod->offset != fod->req.transfer_len) {
1992 spin_lock_irqsave(&fod->flock, flags);
1993 fod->writedataactive = true;
1994 spin_unlock_irqrestore(&fod->flock, flags);
1995
1996 /* transfer the next chunk */
1997 nvmet_fc_transfer_fcp_data(tgtport, fod,
1998 NVMET_FCOP_WRITEDATA);
1999 return;
2000 }
2001
2002 /* data transfer complete, resume with nvmet layer */
2003 nvmet_req_execute(&fod->req);
2004 break;
2005
2006 case NVMET_FCOP_READDATA:
2007 case NVMET_FCOP_READDATA_RSP:
2008 if (__nvmet_fc_fod_op_abort(fod, abort))
2009 return;
2010 if (fcpreq->fcp_error ||
2011 fcpreq->transferred_length != fcpreq->transfer_length) {
2012 nvmet_fc_abort_op(tgtport, fod);
2013 return;
2014 }
2015
2016 /* success */
2017
2018 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
2019 /* data no longer needed */
2020 nvmet_fc_free_tgt_pgs(fod);
2021 nvmet_fc_free_fcp_iod(fod->queue, fod);
2022 return;
2023 }
2024
2025 fod->offset += fcpreq->transferred_length;
2026 if (fod->offset != fod->req.transfer_len) {
2027 /* transfer the next chunk */
2028 nvmet_fc_transfer_fcp_data(tgtport, fod,
2029 NVMET_FCOP_READDATA);
2030 return;
2031 }
2032
2033 /* data transfer complete, send response */
2034
2035 /* data no longer needed */
2036 nvmet_fc_free_tgt_pgs(fod);
2037
2038 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2039
2040 break;
2041
2042 case NVMET_FCOP_RSP:
2043 if (__nvmet_fc_fod_op_abort(fod, abort))
2044 return;
2045 nvmet_fc_free_fcp_iod(fod->queue, fod);
2046 break;
2047
2048 default:
2049 break;
2050 }
2051 }
2052
2053 static void
2054 nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work)
2055 {
2056 struct nvmet_fc_fcp_iod *fod =
2057 container_of(work, struct nvmet_fc_fcp_iod, done_work);
2058
2059 nvmet_fc_fod_op_done(fod);
2060 }
2061
2062 static void
2063 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
2064 {
2065 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2066 struct nvmet_fc_tgt_queue *queue = fod->queue;
2067
2068 if (fod->tgtport->ops->target_features & NVMET_FCTGTFEAT_OPDONE_IN_ISR)
2069 /* context switch so completion is not in ISR context */
2070 queue_work_on(queue->cpu, queue->work_q, &fod->done_work);
2071 else
2072 nvmet_fc_fod_op_done(fod);
2073 }
2074
2075 /*
2076 * actual completion handler after execution by the nvmet layer
2077 */
2078 static void
2079 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
2080 struct nvmet_fc_fcp_iod *fod, int status)
2081 {
2082 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2083 struct nvme_completion *cqe = &fod->rspiubuf.cqe;
2084 unsigned long flags;
2085 bool abort;
2086
2087 spin_lock_irqsave(&fod->flock, flags);
2088 abort = fod->abort;
2089 spin_unlock_irqrestore(&fod->flock, flags);
2090
2091 /* if we have a CQE, snoop the last sq_head value */
2092 if (!status)
2093 fod->queue->sqhd = cqe->sq_head;
2094
2095 if (abort) {
2096 nvmet_fc_abort_op(tgtport, fod);
2097 return;
2098 }
2099
2100 /* if an error handling the cmd post initial parsing */
2101 if (status) {
2102 /* fudge up a failed CQE status for our transport error */
2103 memset(cqe, 0, sizeof(*cqe));
2104 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
2105 cqe->sq_id = cpu_to_le16(fod->queue->qid);
2106 cqe->command_id = sqe->command_id;
2107 cqe->status = cpu_to_le16(status);
2108 } else {
2109
2110 /*
2111 * try to push the data even if the SQE status is non-zero.
2112 * There may be a status where data still was intended to
2113 * be moved
2114 */
2115 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2116 /* push the data over before sending rsp */
2117 nvmet_fc_transfer_fcp_data(tgtport, fod,
2118 NVMET_FCOP_READDATA);
2119 return;
2120 }
2121
2122 /* writes & no data - fall thru */
2123 }
2124
2125 /* data no longer needed */
2126 nvmet_fc_free_tgt_pgs(fod);
2127
2128 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2129 }
2130
2131
2132 static void
2133 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2134 {
2135 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2136 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2137
2138 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2139 }
2140
2141
2142 /*
2143 * Actual processing routine for received FC-NVME LS Requests from the LLD
2144 */
2145 static void
2146 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2147 struct nvmet_fc_fcp_iod *fod)
2148 {
2149 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2150 u32 xfrlen = be32_to_cpu(cmdiu->data_len);
2151 int ret;
2152
2153 /*
2154 * Fused commands are currently not supported in the linux
2155 * implementation.
2156 *
2157 * As such, the implementation of the FC transport does not
2158 * look at the fused commands and order delivery to the upper
2159 * layer until we have both based on csn.
2160 */
2161
2162 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2163
2164 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2165 fod->io_dir = NVMET_FCP_WRITE;
2166 if (!nvme_is_write(&cmdiu->sqe))
2167 goto transport_error;
2168 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2169 fod->io_dir = NVMET_FCP_READ;
2170 if (nvme_is_write(&cmdiu->sqe))
2171 goto transport_error;
2172 } else {
2173 fod->io_dir = NVMET_FCP_NODATA;
2174 if (xfrlen)
2175 goto transport_error;
2176 }
2177
2178 fod->req.cmd = &fod->cmdiubuf.sqe;
2179 fod->req.rsp = &fod->rspiubuf.cqe;
2180 fod->req.port = fod->queue->port;
2181
2182 /* clear any response payload */
2183 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2184
2185 fod->data_sg = NULL;
2186 fod->data_sg_cnt = 0;
2187
2188 ret = nvmet_req_init(&fod->req,
2189 &fod->queue->nvme_cq,
2190 &fod->queue->nvme_sq,
2191 &nvmet_fc_tgt_fcp_ops);
2192 if (!ret) {
2193 /* bad SQE content or invalid ctrl state */
2194 /* nvmet layer has already called op done to send rsp. */
2195 return;
2196 }
2197
2198 fod->req.transfer_len = xfrlen;
2199
2200 /* keep a running counter of tail position */
2201 atomic_inc(&fod->queue->sqtail);
2202
2203 if (fod->req.transfer_len) {
2204 ret = nvmet_fc_alloc_tgt_pgs(fod);
2205 if (ret) {
2206 nvmet_req_complete(&fod->req, ret);
2207 return;
2208 }
2209 }
2210 fod->req.sg = fod->data_sg;
2211 fod->req.sg_cnt = fod->data_sg_cnt;
2212 fod->offset = 0;
2213
2214 if (fod->io_dir == NVMET_FCP_WRITE) {
2215 /* pull the data over before invoking nvmet layer */
2216 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2217 return;
2218 }
2219
2220 /*
2221 * Reads or no data:
2222 *
2223 * can invoke the nvmet_layer now. If read data, cmd completion will
2224 * push the data
2225 */
2226 nvmet_req_execute(&fod->req);
2227 return;
2228
2229 transport_error:
2230 nvmet_fc_abort_op(tgtport, fod);
2231 }
2232
2233 /*
2234 * Actual processing routine for received FC-NVME LS Requests from the LLD
2235 */
2236 static void
2237 nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
2238 {
2239 struct nvmet_fc_fcp_iod *fod =
2240 container_of(work, struct nvmet_fc_fcp_iod, work);
2241 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2242
2243 nvmet_fc_handle_fcp_rqst(tgtport, fod);
2244 }
2245
2246 /**
2247 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2248 * upon the reception of a NVME FCP CMD IU.
2249 *
2250 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2251 * layer for processing.
2252 *
2253 * The nvmet_fc layer allocates a local job structure (struct
2254 * nvmet_fc_fcp_iod) from the queue for the io and copies the
2255 * CMD IU buffer to the job structure. As such, on a successful
2256 * completion (returns 0), the LLDD may immediately free/reuse
2257 * the CMD IU buffer passed in the call.
2258 *
2259 * However, in some circumstances, due to the packetized nature of FC
2260 * and the api of the FC LLDD which may issue a hw command to send the
2261 * response, but the LLDD may not get the hw completion for that command
2262 * and upcall the nvmet_fc layer before a new command may be
2263 * asynchronously received - its possible for a command to be received
2264 * before the LLDD and nvmet_fc have recycled the job structure. It gives
2265 * the appearance of more commands received than fits in the sq.
2266 * To alleviate this scenario, a temporary queue is maintained in the
2267 * transport for pending LLDD requests waiting for a queue job structure.
2268 * In these "overrun" cases, a temporary queue element is allocated
2269 * the LLDD request and CMD iu buffer information remembered, and the
2270 * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2271 * structure is freed, it is immediately reallocated for anything on the
2272 * pending request list. The LLDDs defer_rcv() callback is called,
2273 * informing the LLDD that it may reuse the CMD IU buffer, and the io
2274 * is then started normally with the transport.
2275 *
2276 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
2277 * the completion as successful but must not reuse the CMD IU buffer
2278 * until the LLDD's defer_rcv() callback has been called for the
2279 * corresponding struct nvmefc_tgt_fcp_req pointer.
2280 *
2281 * If there is any other condition in which an error occurs, the
2282 * transport will return a non-zero status indicating the error.
2283 * In all cases other than -EOVERFLOW, the transport has not accepted the
2284 * request and the LLDD should abort the exchange.
2285 *
2286 * @target_port: pointer to the (registered) target port the FCP CMD IU
2287 * was received on.
2288 * @fcpreq: pointer to a fcpreq request structure to be used to reference
2289 * the exchange corresponding to the FCP Exchange.
2290 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
2291 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2292 */
2293 int
2294 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2295 struct nvmefc_tgt_fcp_req *fcpreq,
2296 void *cmdiubuf, u32 cmdiubuf_len)
2297 {
2298 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2299 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2300 struct nvmet_fc_tgt_queue *queue;
2301 struct nvmet_fc_fcp_iod *fod;
2302 struct nvmet_fc_defer_fcp_req *deferfcp;
2303 unsigned long flags;
2304
2305 /* validate iu, so the connection id can be used to find the queue */
2306 if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2307 (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
2308 (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2309 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2310 return -EIO;
2311
2312 queue = nvmet_fc_find_target_queue(tgtport,
2313 be64_to_cpu(cmdiu->connection_id));
2314 if (!queue)
2315 return -ENOTCONN;
2316
2317 /*
2318 * note: reference taken by find_target_queue
2319 * After successful fod allocation, the fod will inherit the
2320 * ownership of that reference and will remove the reference
2321 * when the fod is freed.
2322 */
2323
2324 spin_lock_irqsave(&queue->qlock, flags);
2325
2326 fod = nvmet_fc_alloc_fcp_iod(queue);
2327 if (fod) {
2328 spin_unlock_irqrestore(&queue->qlock, flags);
2329
2330 fcpreq->nvmet_fc_private = fod;
2331 fod->fcpreq = fcpreq;
2332
2333 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2334
2335 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2336
2337 return 0;
2338 }
2339
2340 if (!tgtport->ops->defer_rcv) {
2341 spin_unlock_irqrestore(&queue->qlock, flags);
2342 /* release the queue lookup reference */
2343 nvmet_fc_tgt_q_put(queue);
2344 return -ENOENT;
2345 }
2346
2347 deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
2348 struct nvmet_fc_defer_fcp_req, req_list);
2349 if (deferfcp) {
2350 /* Just re-use one that was previously allocated */
2351 list_del(&deferfcp->req_list);
2352 } else {
2353 spin_unlock_irqrestore(&queue->qlock, flags);
2354
2355 /* Now we need to dynamically allocate one */
2356 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
2357 if (!deferfcp) {
2358 /* release the queue lookup reference */
2359 nvmet_fc_tgt_q_put(queue);
2360 return -ENOMEM;
2361 }
2362 spin_lock_irqsave(&queue->qlock, flags);
2363 }
2364
2365 /* For now, use rspaddr / rsplen to save payload information */
2366 fcpreq->rspaddr = cmdiubuf;
2367 fcpreq->rsplen = cmdiubuf_len;
2368 deferfcp->fcp_req = fcpreq;
2369
2370 /* defer processing till a fod becomes available */
2371 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
2372
2373 /* NOTE: the queue lookup reference is still valid */
2374
2375 spin_unlock_irqrestore(&queue->qlock, flags);
2376
2377 return -EOVERFLOW;
2378 }
2379 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2380
2381 /**
2382 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2383 * upon the reception of an ABTS for a FCP command
2384 *
2385 * Notify the transport that an ABTS has been received for a FCP command
2386 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
2387 * LLDD believes the command is still being worked on
2388 * (template_ops->fcp_req_release() has not been called).
2389 *
2390 * The transport will wait for any outstanding work (an op to the LLDD,
2391 * which the lldd should complete with error due to the ABTS; or the
2392 * completion from the nvmet layer of the nvme command), then will
2393 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
2394 * return the i/o context to the LLDD. The LLDD may send the BA_ACC
2395 * to the ABTS either after return from this function (assuming any
2396 * outstanding op work has been terminated) or upon the callback being
2397 * called.
2398 *
2399 * @target_port: pointer to the (registered) target port the FCP CMD IU
2400 * was received on.
2401 * @fcpreq: pointer to the fcpreq request structure that corresponds
2402 * to the exchange that received the ABTS.
2403 */
2404 void
2405 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2406 struct nvmefc_tgt_fcp_req *fcpreq)
2407 {
2408 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2409 struct nvmet_fc_tgt_queue *queue;
2410 unsigned long flags;
2411
2412 if (!fod || fod->fcpreq != fcpreq)
2413 /* job appears to have already completed, ignore abort */
2414 return;
2415
2416 queue = fod->queue;
2417
2418 spin_lock_irqsave(&queue->qlock, flags);
2419 if (fod->active) {
2420 /*
2421 * mark as abort. The abort handler, invoked upon completion
2422 * of any work, will detect the aborted status and do the
2423 * callback.
2424 */
2425 spin_lock(&fod->flock);
2426 fod->abort = true;
2427 fod->aborted = true;
2428 spin_unlock(&fod->flock);
2429 }
2430 spin_unlock_irqrestore(&queue->qlock, flags);
2431 }
2432 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2433
2434
2435 struct nvmet_fc_traddr {
2436 u64 nn;
2437 u64 pn;
2438 };
2439
2440 static int
2441 __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
2442 {
2443 u64 token64;
2444
2445 if (match_u64(sstr, &token64))
2446 return -EINVAL;
2447 *val = token64;
2448
2449 return 0;
2450 }
2451
2452 /*
2453 * This routine validates and extracts the WWN's from the TRADDR string.
2454 * As kernel parsers need the 0x to determine number base, universally
2455 * build string to parse with 0x prefix before parsing name strings.
2456 */
2457 static int
2458 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2459 {
2460 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2461 substring_t wwn = { name, &name[sizeof(name)-1] };
2462 int nnoffset, pnoffset;
2463
2464 /* validate it string one of the 2 allowed formats */
2465 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2466 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2467 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2468 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2469 nnoffset = NVME_FC_TRADDR_OXNNLEN;
2470 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2471 NVME_FC_TRADDR_OXNNLEN;
2472 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2473 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2474 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2475 "pn-", NVME_FC_TRADDR_NNLEN))) {
2476 nnoffset = NVME_FC_TRADDR_NNLEN;
2477 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2478 } else
2479 goto out_einval;
2480
2481 name[0] = '0';
2482 name[1] = 'x';
2483 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2484
2485 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2486 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2487 goto out_einval;
2488
2489 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2490 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2491 goto out_einval;
2492
2493 return 0;
2494
2495 out_einval:
2496 pr_warn("%s: bad traddr string\n", __func__);
2497 return -EINVAL;
2498 }
2499
2500 static int
2501 nvmet_fc_add_port(struct nvmet_port *port)
2502 {
2503 struct nvmet_fc_tgtport *tgtport;
2504 struct nvmet_fc_traddr traddr = { 0L, 0L };
2505 unsigned long flags;
2506 int ret;
2507
2508 /* validate the address info */
2509 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2510 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2511 return -EINVAL;
2512
2513 /* map the traddr address info to a target port */
2514
2515 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
2516 sizeof(port->disc_addr.traddr));
2517 if (ret)
2518 return ret;
2519
2520 ret = -ENXIO;
2521 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2522 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2523 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2524 (tgtport->fc_target_port.port_name == traddr.pn)) {
2525 /* a FC port can only be 1 nvmet port id */
2526 if (!tgtport->port) {
2527 tgtport->port = port;
2528 port->priv = tgtport;
2529 nvmet_fc_tgtport_get(tgtport);
2530 ret = 0;
2531 } else
2532 ret = -EALREADY;
2533 break;
2534 }
2535 }
2536 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2537 return ret;
2538 }
2539
2540 static void
2541 nvmet_fc_remove_port(struct nvmet_port *port)
2542 {
2543 struct nvmet_fc_tgtport *tgtport = port->priv;
2544 unsigned long flags;
2545 bool matched = false;
2546
2547 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2548 if (tgtport->port == port) {
2549 matched = true;
2550 tgtport->port = NULL;
2551 }
2552 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2553
2554 if (matched)
2555 nvmet_fc_tgtport_put(tgtport);
2556 }
2557
2558 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2559 .owner = THIS_MODULE,
2560 .type = NVMF_TRTYPE_FC,
2561 .msdbd = 1,
2562 .add_port = nvmet_fc_add_port,
2563 .remove_port = nvmet_fc_remove_port,
2564 .queue_response = nvmet_fc_fcp_nvme_cmd_done,
2565 .delete_ctrl = nvmet_fc_delete_ctrl,
2566 };
2567
2568 static int __init nvmet_fc_init_module(void)
2569 {
2570 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2571 }
2572
2573 static void __exit nvmet_fc_exit_module(void)
2574 {
2575 /* sanity check - all lports should be removed */
2576 if (!list_empty(&nvmet_fc_target_list))
2577 pr_warn("%s: targetport list not empty\n", __func__);
2578
2579 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2580
2581 ida_destroy(&nvmet_fc_tgtport_cnt);
2582 }
2583
2584 module_init(nvmet_fc_init_module);
2585 module_exit(nvmet_fc_exit_module);
2586
2587 MODULE_LICENSE("GPL v2");