]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/nvme/host/fc.c
nvmet-fc: Better size LS buffers
[mirror_ubuntu-jammy-kernel.git] / drivers / nvme / host / fc.c
CommitLineData
8638b246 1// SPDX-License-Identifier: GPL-2.0
e399441d
JS
2/*
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
e399441d
JS
4 */
5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6#include <linux/module.h>
7#include <linux/parser.h>
8#include <uapi/scsi/fc/fc_fs.h>
9#include <uapi/scsi/fc/fc_els.h>
61bff8ef 10#include <linux/delay.h>
d3d0bc78 11#include <linux/overflow.h>
e399441d
JS
12
13#include "nvme.h"
14#include "fabrics.h"
15#include <linux/nvme-fc-driver.h>
16#include <linux/nvme-fc.h>
ca19bcd0 17#include "fc.h"
a6a6d058 18#include <scsi/scsi_transport_fc.h>
e399441d
JS
19
20/* *************************** Data Structures/Defines ****************** */
21
22
e399441d 23enum nvme_fc_queue_flags {
26c0a26d
JA
24 NVME_FC_Q_CONNECTED = 0,
25 NVME_FC_Q_LIVE,
e399441d
JS
26};
27
ac7fe82b
JS
28#define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */
29
e399441d
JS
30struct nvme_fc_queue {
31 struct nvme_fc_ctrl *ctrl;
32 struct device *dev;
33 struct blk_mq_hw_ctx *hctx;
34 void *lldd_handle;
e399441d
JS
35 size_t cmnd_capsule_len;
36 u32 qnum;
37 u32 rqcnt;
38 u32 seqno;
39
40 u64 connection_id;
41 atomic_t csn;
42
43 unsigned long flags;
44} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
45
8d64daf7
JS
46enum nvme_fcop_flags {
47 FCOP_FLAGS_TERMIO = (1 << 0),
c3aedd22 48 FCOP_FLAGS_AEN = (1 << 1),
8d64daf7
JS
49};
50
e399441d
JS
51struct nvmefc_ls_req_op {
52 struct nvmefc_ls_req ls_req;
53
c913a8b0 54 struct nvme_fc_rport *rport;
e399441d
JS
55 struct nvme_fc_queue *queue;
56 struct request *rq;
8d64daf7 57 u32 flags;
e399441d
JS
58
59 int ls_error;
60 struct completion ls_done;
c913a8b0 61 struct list_head lsreq_list; /* rport->ls_req_list */
e399441d
JS
62 bool req_queued;
63};
64
65enum nvme_fcpop_state {
66 FCPOP_STATE_UNINIT = 0,
67 FCPOP_STATE_IDLE = 1,
68 FCPOP_STATE_ACTIVE = 2,
69 FCPOP_STATE_ABORTED = 3,
78a7ac26 70 FCPOP_STATE_COMPLETE = 4,
e399441d
JS
71};
72
73struct nvme_fc_fcp_op {
74 struct nvme_request nreq; /*
75 * nvme/host/core.c
76 * requires this to be
77 * the 1st element in the
78 * private structure
79 * associated with the
80 * request.
81 */
82 struct nvmefc_fcp_req fcp_req;
83
84 struct nvme_fc_ctrl *ctrl;
85 struct nvme_fc_queue *queue;
86 struct request *rq;
87
88 atomic_t state;
78a7ac26 89 u32 flags;
e399441d
JS
90 u32 rqno;
91 u32 nents;
92
93 struct nvme_fc_cmd_iu cmd_iu;
94 struct nvme_fc_ersp_iu rsp_iu;
95};
96
d3d0bc78
BVA
97struct nvme_fcp_op_w_sgl {
98 struct nvme_fc_fcp_op op;
b1ae1a23 99 struct scatterlist sgl[NVME_INLINE_SG_CNT];
d3d0bc78
BVA
100 uint8_t priv[0];
101};
102
e399441d
JS
103struct nvme_fc_lport {
104 struct nvme_fc_local_port localport;
105
106 struct ida endp_cnt;
107 struct list_head port_list; /* nvme_fc_port_list */
108 struct list_head endp_list;
109 struct device *dev; /* physical device for dma */
110 struct nvme_fc_port_template *ops;
111 struct kref ref;
158bfb88 112 atomic_t act_rport_cnt;
e399441d
JS
113} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
114
115struct nvme_fc_rport {
116 struct nvme_fc_remote_port remoteport;
117
118 struct list_head endp_list; /* for lport->endp_list */
119 struct list_head ctrl_list;
c913a8b0 120 struct list_head ls_req_list;
97faec53 121 struct list_head disc_list;
c913a8b0
JS
122 struct device *dev; /* physical device for dma */
123 struct nvme_fc_lport *lport;
e399441d
JS
124 spinlock_t lock;
125 struct kref ref;
158bfb88 126 atomic_t act_ctrl_cnt;
2b632970 127 unsigned long dev_loss_end;
e399441d
JS
128} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
129
61bff8ef
JS
130enum nvme_fcctrl_flags {
131 FCCTRL_TERMIO = (1 << 0),
e399441d
JS
132};
133
134struct nvme_fc_ctrl {
135 spinlock_t lock;
136 struct nvme_fc_queue *queues;
e399441d
JS
137 struct device *dev;
138 struct nvme_fc_lport *lport;
139 struct nvme_fc_rport *rport;
140 u32 cnum;
141
4c984154 142 bool ioq_live;
158bfb88 143 bool assoc_active;
4cff280a 144 atomic_t err_work_active;
e399441d
JS
145 u64 association_id;
146
e399441d 147 struct list_head ctrl_list; /* rport->ctrl_list */
e399441d
JS
148
149 struct blk_mq_tag_set admin_tag_set;
150 struct blk_mq_tag_set tag_set;
151
61bff8ef 152 struct delayed_work connect_work;
4cff280a 153 struct work_struct err_work;
61bff8ef 154
e399441d 155 struct kref ref;
61bff8ef
JS
156 u32 flags;
157 u32 iocnt;
36715cf4 158 wait_queue_head_t ioabort_wait;
e399441d 159
38dabe21 160 struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS];
e399441d
JS
161
162 struct nvme_ctrl ctrl;
163};
164
165static inline struct nvme_fc_ctrl *
166to_fc_ctrl(struct nvme_ctrl *ctrl)
167{
168 return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
169}
170
171static inline struct nvme_fc_lport *
172localport_to_lport(struct nvme_fc_local_port *portptr)
173{
174 return container_of(portptr, struct nvme_fc_lport, localport);
175}
176
177static inline struct nvme_fc_rport *
178remoteport_to_rport(struct nvme_fc_remote_port *portptr)
179{
180 return container_of(portptr, struct nvme_fc_rport, remoteport);
181}
182
183static inline struct nvmefc_ls_req_op *
184ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
185{
186 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
187}
188
189static inline struct nvme_fc_fcp_op *
190fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
191{
192 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
193}
194
195
196
197/* *************************** Globals **************************** */
198
199
200static DEFINE_SPINLOCK(nvme_fc_lock);
201
202static LIST_HEAD(nvme_fc_lport_list);
203static DEFINE_IDA(nvme_fc_local_port_cnt);
204static DEFINE_IDA(nvme_fc_ctrl_cnt);
205
8730c1dd 206static struct workqueue_struct *nvme_fc_wq;
e399441d 207
4c73cbdf
JS
208static bool nvme_fc_waiting_to_unload;
209static DECLARE_COMPLETION(nvme_fc_unload_proceed);
210
5f568556
JS
211/*
212 * These items are short-term. They will eventually be moved into
213 * a generic FC class. See comments in module init.
214 */
5f568556
JS
215static struct device *fc_udev_device;
216
e399441d
JS
217
218/* *********************** FC-NVME Port Management ************************ */
219
e399441d
JS
220static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
221 struct nvme_fc_queue *, unsigned int);
222
5533d424
JS
223static void
224nvme_fc_free_lport(struct kref *ref)
225{
226 struct nvme_fc_lport *lport =
227 container_of(ref, struct nvme_fc_lport, ref);
228 unsigned long flags;
229
230 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
231 WARN_ON(!list_empty(&lport->endp_list));
232
233 /* remove from transport list */
234 spin_lock_irqsave(&nvme_fc_lock, flags);
235 list_del(&lport->port_list);
4c73cbdf
JS
236 if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
237 complete(&nvme_fc_unload_proceed);
5533d424
JS
238 spin_unlock_irqrestore(&nvme_fc_lock, flags);
239
5533d424
JS
240 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
241 ida_destroy(&lport->endp_cnt);
242
243 put_device(lport->dev);
244
245 kfree(lport);
246}
247
248static void
249nvme_fc_lport_put(struct nvme_fc_lport *lport)
250{
251 kref_put(&lport->ref, nvme_fc_free_lport);
252}
253
254static int
255nvme_fc_lport_get(struct nvme_fc_lport *lport)
256{
257 return kref_get_unless_zero(&lport->ref);
258}
259
260
261static struct nvme_fc_lport *
c5760f30
JS
262nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo,
263 struct nvme_fc_port_template *ops,
264 struct device *dev)
5533d424
JS
265{
266 struct nvme_fc_lport *lport;
267 unsigned long flags;
268
269 spin_lock_irqsave(&nvme_fc_lock, flags);
270
271 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
272 if (lport->localport.node_name != pinfo->node_name ||
273 lport->localport.port_name != pinfo->port_name)
274 continue;
275
c5760f30
JS
276 if (lport->dev != dev) {
277 lport = ERR_PTR(-EXDEV);
278 goto out_done;
279 }
280
5533d424
JS
281 if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
282 lport = ERR_PTR(-EEXIST);
283 goto out_done;
284 }
285
286 if (!nvme_fc_lport_get(lport)) {
287 /*
288 * fails if ref cnt already 0. If so,
289 * act as if lport already deleted
290 */
291 lport = NULL;
292 goto out_done;
293 }
294
295 /* resume the lport */
296
c5760f30 297 lport->ops = ops;
5533d424
JS
298 lport->localport.port_role = pinfo->port_role;
299 lport->localport.port_id = pinfo->port_id;
300 lport->localport.port_state = FC_OBJSTATE_ONLINE;
301
302 spin_unlock_irqrestore(&nvme_fc_lock, flags);
303
304 return lport;
305 }
306
307 lport = NULL;
308
309out_done:
310 spin_unlock_irqrestore(&nvme_fc_lock, flags);
311
312 return lport;
313}
e399441d
JS
314
315/**
316 * nvme_fc_register_localport - transport entry point called by an
317 * LLDD to register the existence of a NVME
318 * host FC port.
319 * @pinfo: pointer to information about the port to be registered
320 * @template: LLDD entrypoints and operational parameters for the port
321 * @dev: physical hardware device node port corresponds to. Will be
322 * used for DMA mappings
76c910c7 323 * @portptr: pointer to a local port pointer. Upon success, the routine
e399441d
JS
324 * will allocate a nvme_fc_local_port structure and place its
325 * address in the local port pointer. Upon failure, local port
326 * pointer will be set to 0.
327 *
328 * Returns:
329 * a completion status. Must be 0 upon success; a negative errno
330 * (ex: -ENXIO) upon failure.
331 */
332int
333nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
334 struct nvme_fc_port_template *template,
335 struct device *dev,
336 struct nvme_fc_local_port **portptr)
337{
338 struct nvme_fc_lport *newrec;
339 unsigned long flags;
340 int ret, idx;
341
342 if (!template->localport_delete || !template->remoteport_delete ||
343 !template->ls_req || !template->fcp_io ||
344 !template->ls_abort || !template->fcp_abort ||
345 !template->max_hw_queues || !template->max_sgl_segments ||
8c5c6605 346 !template->max_dif_sgl_segments || !template->dma_boundary) {
e399441d
JS
347 ret = -EINVAL;
348 goto out_reghost_failed;
349 }
350
5533d424
JS
351 /*
352 * look to see if there is already a localport that had been
353 * deregistered and in the process of waiting for all the
354 * references to fully be removed. If the references haven't
355 * expired, we can simply re-enable the localport. Remoteports
356 * and controller reconnections should resume naturally.
357 */
c5760f30 358 newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev);
5533d424
JS
359
360 /* found an lport, but something about its state is bad */
361 if (IS_ERR(newrec)) {
362 ret = PTR_ERR(newrec);
363 goto out_reghost_failed;
364
365 /* found existing lport, which was resumed */
366 } else if (newrec) {
367 *portptr = &newrec->localport;
368 return 0;
369 }
370
371 /* nothing found - allocate a new localport struct */
372
e399441d
JS
373 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
374 GFP_KERNEL);
375 if (!newrec) {
376 ret = -ENOMEM;
377 goto out_reghost_failed;
378 }
379
380 idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
381 if (idx < 0) {
382 ret = -ENOSPC;
383 goto out_fail_kfree;
384 }
385
386 if (!get_device(dev) && dev) {
387 ret = -ENODEV;
388 goto out_ida_put;
389 }
390
391 INIT_LIST_HEAD(&newrec->port_list);
392 INIT_LIST_HEAD(&newrec->endp_list);
393 kref_init(&newrec->ref);
158bfb88 394 atomic_set(&newrec->act_rport_cnt, 0);
e399441d
JS
395 newrec->ops = template;
396 newrec->dev = dev;
397 ida_init(&newrec->endp_cnt);
398 newrec->localport.private = &newrec[1];
399 newrec->localport.node_name = pinfo->node_name;
400 newrec->localport.port_name = pinfo->port_name;
401 newrec->localport.port_role = pinfo->port_role;
402 newrec->localport.port_id = pinfo->port_id;
403 newrec->localport.port_state = FC_OBJSTATE_ONLINE;
404 newrec->localport.port_num = idx;
405
406 spin_lock_irqsave(&nvme_fc_lock, flags);
407 list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
408 spin_unlock_irqrestore(&nvme_fc_lock, flags);
409
410 if (dev)
411 dma_set_seg_boundary(dev, template->dma_boundary);
412
413 *portptr = &newrec->localport;
414 return 0;
415
416out_ida_put:
417 ida_simple_remove(&nvme_fc_local_port_cnt, idx);
418out_fail_kfree:
419 kfree(newrec);
420out_reghost_failed:
421 *portptr = NULL;
422
423 return ret;
424}
425EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
426
e399441d
JS
427/**
428 * nvme_fc_unregister_localport - transport entry point called by an
429 * LLDD to deregister/remove a previously
430 * registered a NVME host FC port.
76c910c7 431 * @portptr: pointer to the (registered) local port that is to be deregistered.
e399441d
JS
432 *
433 * Returns:
434 * a completion status. Must be 0 upon success; a negative errno
435 * (ex: -ENXIO) upon failure.
436 */
437int
438nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
439{
440 struct nvme_fc_lport *lport = localport_to_lport(portptr);
441 unsigned long flags;
442
443 if (!portptr)
444 return -EINVAL;
445
446 spin_lock_irqsave(&nvme_fc_lock, flags);
447
448 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
449 spin_unlock_irqrestore(&nvme_fc_lock, flags);
450 return -EINVAL;
451 }
452 portptr->port_state = FC_OBJSTATE_DELETED;
453
454 spin_unlock_irqrestore(&nvme_fc_lock, flags);
455
158bfb88
JS
456 if (atomic_read(&lport->act_rport_cnt) == 0)
457 lport->ops->localport_delete(&lport->localport);
458
e399441d
JS
459 nvme_fc_lport_put(lport);
460
461 return 0;
462}
463EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
464
eaefd5ab
JS
465/*
466 * TRADDR strings, per FC-NVME are fixed format:
467 * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters
468 * udev event will only differ by prefix of what field is
469 * being specified:
470 * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters
471 * 19 + 43 + null_fudge = 64 characters
472 */
473#define FCNVME_TRADDR_LENGTH 64
474
475static void
476nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
477 struct nvme_fc_rport *rport)
478{
479 char hostaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_HOST_TRADDR=...*/
480 char tgtaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_TRADDR=...*/
481 char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL };
482
483 if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY))
484 return;
485
486 snprintf(hostaddr, sizeof(hostaddr),
487 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
488 lport->localport.node_name, lport->localport.port_name);
489 snprintf(tgtaddr, sizeof(tgtaddr),
490 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
491 rport->remoteport.node_name, rport->remoteport.port_name);
492 kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp);
493}
494
469d0ef0
JS
495static void
496nvme_fc_free_rport(struct kref *ref)
497{
498 struct nvme_fc_rport *rport =
499 container_of(ref, struct nvme_fc_rport, ref);
500 struct nvme_fc_lport *lport =
501 localport_to_lport(rport->remoteport.localport);
502 unsigned long flags;
503
504 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
505 WARN_ON(!list_empty(&rport->ctrl_list));
506
507 /* remove from lport list */
508 spin_lock_irqsave(&nvme_fc_lock, flags);
509 list_del(&rport->endp_list);
510 spin_unlock_irqrestore(&nvme_fc_lock, flags);
511
97faec53 512 WARN_ON(!list_empty(&rport->disc_list));
469d0ef0
JS
513 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
514
515 kfree(rport);
516
517 nvme_fc_lport_put(lport);
518}
519
520static void
521nvme_fc_rport_put(struct nvme_fc_rport *rport)
522{
523 kref_put(&rport->ref, nvme_fc_free_rport);
524}
525
526static int
527nvme_fc_rport_get(struct nvme_fc_rport *rport)
528{
529 return kref_get_unless_zero(&rport->ref);
530}
531
2b632970
JS
532static void
533nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
534{
535 switch (ctrl->ctrl.state) {
536 case NVME_CTRL_NEW:
ad6a0a52 537 case NVME_CTRL_CONNECTING:
2b632970
JS
538 /*
539 * As all reconnects were suppressed, schedule a
540 * connect.
541 */
542 dev_info(ctrl->ctrl.device,
543 "NVME-FC{%d}: connectivity re-established. "
544 "Attempting reconnect\n", ctrl->cnum);
545
546 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
547 break;
548
549 case NVME_CTRL_RESETTING:
550 /*
551 * Controller is already in the process of terminating the
552 * association. No need to do anything further. The reconnect
553 * step will naturally occur after the reset completes.
554 */
555 break;
556
557 default:
558 /* no action to take - let it delete */
559 break;
560 }
561}
562
563static struct nvme_fc_rport *
564nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
565 struct nvme_fc_port_info *pinfo)
566{
567 struct nvme_fc_rport *rport;
568 struct nvme_fc_ctrl *ctrl;
569 unsigned long flags;
570
571 spin_lock_irqsave(&nvme_fc_lock, flags);
572
573 list_for_each_entry(rport, &lport->endp_list, endp_list) {
574 if (rport->remoteport.node_name != pinfo->node_name ||
575 rport->remoteport.port_name != pinfo->port_name)
576 continue;
577
578 if (!nvme_fc_rport_get(rport)) {
579 rport = ERR_PTR(-ENOLCK);
580 goto out_done;
581 }
582
583 spin_unlock_irqrestore(&nvme_fc_lock, flags);
584
585 spin_lock_irqsave(&rport->lock, flags);
586
587 /* has it been unregistered */
588 if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) {
589 /* means lldd called us twice */
590 spin_unlock_irqrestore(&rport->lock, flags);
591 nvme_fc_rport_put(rport);
592 return ERR_PTR(-ESTALE);
593 }
594
0cdd5fca
JS
595 rport->remoteport.port_role = pinfo->port_role;
596 rport->remoteport.port_id = pinfo->port_id;
2b632970
JS
597 rport->remoteport.port_state = FC_OBJSTATE_ONLINE;
598 rport->dev_loss_end = 0;
599
600 /*
601 * kick off a reconnect attempt on all associations to the
602 * remote port. A successful reconnects will resume i/o.
603 */
604 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
605 nvme_fc_resume_controller(ctrl);
606
607 spin_unlock_irqrestore(&rport->lock, flags);
608
609 return rport;
610 }
611
612 rport = NULL;
613
614out_done:
615 spin_unlock_irqrestore(&nvme_fc_lock, flags);
616
617 return rport;
618}
619
620static inline void
621__nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
622 struct nvme_fc_port_info *pinfo)
623{
624 if (pinfo->dev_loss_tmo)
625 rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo;
626 else
627 rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO;
628}
629
e399441d
JS
630/**
631 * nvme_fc_register_remoteport - transport entry point called by an
632 * LLDD to register the existence of a NVME
633 * subsystem FC port on its fabric.
634 * @localport: pointer to the (registered) local port that the remote
635 * subsystem port is connected to.
636 * @pinfo: pointer to information about the port to be registered
76c910c7 637 * @portptr: pointer to a remote port pointer. Upon success, the routine
e399441d
JS
638 * will allocate a nvme_fc_remote_port structure and place its
639 * address in the remote port pointer. Upon failure, remote port
640 * pointer will be set to 0.
641 *
642 * Returns:
643 * a completion status. Must be 0 upon success; a negative errno
644 * (ex: -ENXIO) upon failure.
645 */
646int
647nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
648 struct nvme_fc_port_info *pinfo,
649 struct nvme_fc_remote_port **portptr)
650{
651 struct nvme_fc_lport *lport = localport_to_lport(localport);
652 struct nvme_fc_rport *newrec;
653 unsigned long flags;
654 int ret, idx;
655
2b632970
JS
656 if (!nvme_fc_lport_get(lport)) {
657 ret = -ESHUTDOWN;
658 goto out_reghost_failed;
659 }
660
661 /*
662 * look to see if there is already a remoteport that is waiting
663 * for a reconnect (within dev_loss_tmo) with the same WWN's.
664 * If so, transition to it and reconnect.
665 */
666 newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
667
668 /* found an rport, but something about its state is bad */
669 if (IS_ERR(newrec)) {
670 ret = PTR_ERR(newrec);
671 goto out_lport_put;
672
673 /* found existing rport, which was resumed */
674 } else if (newrec) {
675 nvme_fc_lport_put(lport);
676 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
677 nvme_fc_signal_discovery_scan(lport, newrec);
678 *portptr = &newrec->remoteport;
679 return 0;
680 }
681
682 /* nothing found - allocate a new remoteport struct */
683
e399441d
JS
684 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
685 GFP_KERNEL);
686 if (!newrec) {
687 ret = -ENOMEM;
2b632970 688 goto out_lport_put;
e399441d
JS
689 }
690
691 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
692 if (idx < 0) {
693 ret = -ENOSPC;
2b632970 694 goto out_kfree_rport;
e399441d
JS
695 }
696
697 INIT_LIST_HEAD(&newrec->endp_list);
698 INIT_LIST_HEAD(&newrec->ctrl_list);
c913a8b0 699 INIT_LIST_HEAD(&newrec->ls_req_list);
97faec53 700 INIT_LIST_HEAD(&newrec->disc_list);
e399441d 701 kref_init(&newrec->ref);
158bfb88 702 atomic_set(&newrec->act_ctrl_cnt, 0);
e399441d
JS
703 spin_lock_init(&newrec->lock);
704 newrec->remoteport.localport = &lport->localport;
c913a8b0
JS
705 newrec->dev = lport->dev;
706 newrec->lport = lport;
e399441d
JS
707 newrec->remoteport.private = &newrec[1];
708 newrec->remoteport.port_role = pinfo->port_role;
709 newrec->remoteport.node_name = pinfo->node_name;
710 newrec->remoteport.port_name = pinfo->port_name;
711 newrec->remoteport.port_id = pinfo->port_id;
712 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
713 newrec->remoteport.port_num = idx;
2b632970 714 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
e399441d
JS
715
716 spin_lock_irqsave(&nvme_fc_lock, flags);
717 list_add_tail(&newrec->endp_list, &lport->endp_list);
718 spin_unlock_irqrestore(&nvme_fc_lock, flags);
719
eaefd5ab
JS
720 nvme_fc_signal_discovery_scan(lport, newrec);
721
e399441d
JS
722 *portptr = &newrec->remoteport;
723 return 0;
724
e399441d
JS
725out_kfree_rport:
726 kfree(newrec);
2b632970
JS
727out_lport_put:
728 nvme_fc_lport_put(lport);
e399441d
JS
729out_reghost_failed:
730 *portptr = NULL;
731 return ret;
e399441d
JS
732}
733EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
734
8d64daf7
JS
735static int
736nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
737{
738 struct nvmefc_ls_req_op *lsop;
739 unsigned long flags;
740
741restart:
742 spin_lock_irqsave(&rport->lock, flags);
743
744 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
745 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
746 lsop->flags |= FCOP_FLAGS_TERMIO;
747 spin_unlock_irqrestore(&rport->lock, flags);
748 rport->lport->ops->ls_abort(&rport->lport->localport,
749 &rport->remoteport,
750 &lsop->ls_req);
751 goto restart;
752 }
753 }
754 spin_unlock_irqrestore(&rport->lock, flags);
755
756 return 0;
757}
758
2b632970
JS
759static void
760nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
761{
762 dev_info(ctrl->ctrl.device,
763 "NVME-FC{%d}: controller connectivity lost. Awaiting "
764 "Reconnect", ctrl->cnum);
765
766 switch (ctrl->ctrl.state) {
767 case NVME_CTRL_NEW:
768 case NVME_CTRL_LIVE:
769 /*
770 * Schedule a controller reset. The reset will terminate the
771 * association and schedule the reconnect timer. Reconnects
772 * will be attempted until either the ctlr_loss_tmo
773 * (max_retries * connect_delay) expires or the remoteport's
774 * dev_loss_tmo expires.
775 */
776 if (nvme_reset_ctrl(&ctrl->ctrl)) {
777 dev_warn(ctrl->ctrl.device,
77d0612d 778 "NVME-FC{%d}: Couldn't schedule reset.\n",
2b632970
JS
779 ctrl->cnum);
780 nvme_delete_ctrl(&ctrl->ctrl);
781 }
782 break;
783
ad6a0a52 784 case NVME_CTRL_CONNECTING:
2b632970
JS
785 /*
786 * The association has already been terminated and the
787 * controller is attempting reconnects. No need to do anything
788 * futher. Reconnects will be attempted until either the
789 * ctlr_loss_tmo (max_retries * connect_delay) expires or the
790 * remoteport's dev_loss_tmo expires.
791 */
792 break;
793
794 case NVME_CTRL_RESETTING:
795 /*
796 * Controller is already in the process of terminating the
797 * association. No need to do anything further. The reconnect
798 * step will kick in naturally after the association is
799 * terminated.
800 */
801 break;
802
803 case NVME_CTRL_DELETING:
804 default:
805 /* no action to take - let it delete */
806 break;
807 }
808}
809
e399441d
JS
810/**
811 * nvme_fc_unregister_remoteport - transport entry point called by an
812 * LLDD to deregister/remove a previously
813 * registered a NVME subsystem FC port.
76c910c7
BVA
814 * @portptr: pointer to the (registered) remote port that is to be
815 * deregistered.
e399441d
JS
816 *
817 * Returns:
818 * a completion status. Must be 0 upon success; a negative errno
819 * (ex: -ENXIO) upon failure.
820 */
821int
822nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
823{
824 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
825 struct nvme_fc_ctrl *ctrl;
826 unsigned long flags;
827
828 if (!portptr)
829 return -EINVAL;
830
831 spin_lock_irqsave(&rport->lock, flags);
832
833 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
834 spin_unlock_irqrestore(&rport->lock, flags);
835 return -EINVAL;
836 }
837 portptr->port_state = FC_OBJSTATE_DELETED;
838
2b632970
JS
839 rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ);
840
841 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
842 /* if dev_loss_tmo==0, dev loss is immediate */
843 if (!portptr->dev_loss_tmo) {
844 dev_warn(ctrl->ctrl.device,
77d0612d 845 "NVME-FC{%d}: controller connectivity lost.\n",
2b632970
JS
846 ctrl->cnum);
847 nvme_delete_ctrl(&ctrl->ctrl);
848 } else
849 nvme_fc_ctrl_connectivity_loss(ctrl);
850 }
e399441d
JS
851
852 spin_unlock_irqrestore(&rport->lock, flags);
853
8d64daf7
JS
854 nvme_fc_abort_lsops(rport);
855
158bfb88
JS
856 if (atomic_read(&rport->act_ctrl_cnt) == 0)
857 rport->lport->ops->remoteport_delete(portptr);
858
2b632970
JS
859 /*
860 * release the reference, which will allow, if all controllers
861 * go away, which should only occur after dev_loss_tmo occurs,
862 * for the rport to be torn down.
863 */
e399441d 864 nvme_fc_rport_put(rport);
2b632970 865
e399441d
JS
866 return 0;
867}
868EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
869
eaefd5ab
JS
870/**
871 * nvme_fc_rescan_remoteport - transport entry point called by an
872 * LLDD to request a nvme device rescan.
873 * @remoteport: pointer to the (registered) remote port that is to be
874 * rescanned.
875 *
876 * Returns: N/A
877 */
878void
879nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
880{
881 struct nvme_fc_rport *rport = remoteport_to_rport(remoteport);
882
883 nvme_fc_signal_discovery_scan(rport->lport, rport);
884}
885EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport);
886
ac7fe82b
JS
887int
888nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr,
889 u32 dev_loss_tmo)
890{
891 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
ac7fe82b
JS
892 unsigned long flags;
893
894 spin_lock_irqsave(&rport->lock, flags);
895
896 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
897 spin_unlock_irqrestore(&rport->lock, flags);
898 return -EINVAL;
899 }
900
901 /* a dev_loss_tmo of 0 (immediate) is allowed to be set */
902 rport->remoteport.dev_loss_tmo = dev_loss_tmo;
903
904 spin_unlock_irqrestore(&rport->lock, flags);
905
906 return 0;
907}
908EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
909
e399441d
JS
910
911/* *********************** FC-NVME DMA Handling **************************** */
912
913/*
914 * The fcloop device passes in a NULL device pointer. Real LLD's will
915 * pass in a valid device pointer. If NULL is passed to the dma mapping
916 * routines, depending on the platform, it may or may not succeed, and
917 * may crash.
918 *
919 * As such:
920 * Wrapper all the dma routines and check the dev pointer.
921 *
922 * If simple mappings (return just a dma address, we'll noop them,
923 * returning a dma address of 0.
924 *
925 * On more complex mappings (dma_map_sg), a pseudo routine fills
926 * in the scatter list, setting all dma addresses to 0.
927 */
928
929static inline dma_addr_t
930fc_dma_map_single(struct device *dev, void *ptr, size_t size,
931 enum dma_data_direction dir)
932{
933 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
934}
935
936static inline int
937fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
938{
939 return dev ? dma_mapping_error(dev, dma_addr) : 0;
940}
941
942static inline void
943fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
944 enum dma_data_direction dir)
945{
946 if (dev)
947 dma_unmap_single(dev, addr, size, dir);
948}
949
950static inline void
951fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
952 enum dma_data_direction dir)
953{
954 if (dev)
955 dma_sync_single_for_cpu(dev, addr, size, dir);
956}
957
958static inline void
959fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
960 enum dma_data_direction dir)
961{
962 if (dev)
963 dma_sync_single_for_device(dev, addr, size, dir);
964}
965
966/* pseudo dma_map_sg call */
967static int
968fc_map_sg(struct scatterlist *sg, int nents)
969{
970 struct scatterlist *s;
971 int i;
972
973 WARN_ON(nents == 0 || sg[0].length == 0);
974
975 for_each_sg(sg, s, nents, i) {
976 s->dma_address = 0L;
977#ifdef CONFIG_NEED_SG_DMA_LENGTH
978 s->dma_length = s->length;
979#endif
980 }
981 return nents;
982}
983
984static inline int
985fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
986 enum dma_data_direction dir)
987{
988 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
989}
990
991static inline void
992fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
993 enum dma_data_direction dir)
994{
995 if (dev)
996 dma_unmap_sg(dev, sg, nents, dir);
997}
998
e399441d
JS
999/* *********************** FC-NVME LS Handling **************************** */
1000
1001static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
1002static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
1003
1004
1005static void
c913a8b0 1006__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
e399441d 1007{
c913a8b0 1008 struct nvme_fc_rport *rport = lsop->rport;
e399441d
JS
1009 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1010 unsigned long flags;
1011
c913a8b0 1012 spin_lock_irqsave(&rport->lock, flags);
e399441d
JS
1013
1014 if (!lsop->req_queued) {
c913a8b0 1015 spin_unlock_irqrestore(&rport->lock, flags);
e399441d
JS
1016 return;
1017 }
1018
1019 list_del(&lsop->lsreq_list);
1020
1021 lsop->req_queued = false;
1022
c913a8b0 1023 spin_unlock_irqrestore(&rport->lock, flags);
e399441d 1024
c913a8b0 1025 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
e399441d
JS
1026 (lsreq->rqstlen + lsreq->rsplen),
1027 DMA_BIDIRECTIONAL);
1028
c913a8b0 1029 nvme_fc_rport_put(rport);
e399441d
JS
1030}
1031
1032static int
c913a8b0 1033__nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
e399441d
JS
1034 struct nvmefc_ls_req_op *lsop,
1035 void (*done)(struct nvmefc_ls_req *req, int status))
1036{
1037 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1038 unsigned long flags;
c913a8b0 1039 int ret = 0;
e399441d 1040
c913a8b0
JS
1041 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1042 return -ECONNREFUSED;
1043
1044 if (!nvme_fc_rport_get(rport))
e399441d
JS
1045 return -ESHUTDOWN;
1046
1047 lsreq->done = done;
c913a8b0 1048 lsop->rport = rport;
e399441d
JS
1049 lsop->req_queued = false;
1050 INIT_LIST_HEAD(&lsop->lsreq_list);
1051 init_completion(&lsop->ls_done);
1052
c913a8b0 1053 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
e399441d
JS
1054 lsreq->rqstlen + lsreq->rsplen,
1055 DMA_BIDIRECTIONAL);
c913a8b0
JS
1056 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
1057 ret = -EFAULT;
1058 goto out_putrport;
e399441d
JS
1059 }
1060 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
1061
c913a8b0 1062 spin_lock_irqsave(&rport->lock, flags);
e399441d 1063
c913a8b0 1064 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
e399441d
JS
1065
1066 lsop->req_queued = true;
1067
c913a8b0 1068 spin_unlock_irqrestore(&rport->lock, flags);
e399441d 1069
c913a8b0
JS
1070 ret = rport->lport->ops->ls_req(&rport->lport->localport,
1071 &rport->remoteport, lsreq);
e399441d 1072 if (ret)
c913a8b0
JS
1073 goto out_unlink;
1074
1075 return 0;
1076
1077out_unlink:
1078 lsop->ls_error = ret;
1079 spin_lock_irqsave(&rport->lock, flags);
1080 lsop->req_queued = false;
1081 list_del(&lsop->lsreq_list);
1082 spin_unlock_irqrestore(&rport->lock, flags);
1083 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1084 (lsreq->rqstlen + lsreq->rsplen),
1085 DMA_BIDIRECTIONAL);
1086out_putrport:
1087 nvme_fc_rport_put(rport);
e399441d
JS
1088
1089 return ret;
1090}
1091
1092static void
1093nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
1094{
1095 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1096
1097 lsop->ls_error = status;
1098 complete(&lsop->ls_done);
1099}
1100
1101static int
c913a8b0 1102nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
e399441d
JS
1103{
1104 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1105 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
1106 int ret;
1107
c913a8b0 1108 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
e399441d 1109
c913a8b0 1110 if (!ret) {
e399441d
JS
1111 /*
1112 * No timeout/not interruptible as we need the struct
1113 * to exist until the lldd calls us back. Thus mandate
1114 * wait until driver calls back. lldd responsible for
1115 * the timeout action
1116 */
1117 wait_for_completion(&lsop->ls_done);
1118
c913a8b0 1119 __nvme_fc_finish_ls_req(lsop);
e399441d 1120
c913a8b0 1121 ret = lsop->ls_error;
e399441d
JS
1122 }
1123
c913a8b0
JS
1124 if (ret)
1125 return ret;
1126
e399441d
JS
1127 /* ACC or RJT payload ? */
1128 if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
1129 return -ENXIO;
1130
1131 return 0;
1132}
1133
c913a8b0
JS
1134static int
1135nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
e399441d
JS
1136 struct nvmefc_ls_req_op *lsop,
1137 void (*done)(struct nvmefc_ls_req *req, int status))
1138{
e399441d
JS
1139 /* don't wait for completion */
1140
c913a8b0 1141 return __nvme_fc_send_ls_req(rport, lsop, done);
e399441d
JS
1142}
1143
e399441d
JS
1144static int
1145nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
1146 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
1147{
1148 struct nvmefc_ls_req_op *lsop;
1149 struct nvmefc_ls_req *lsreq;
1150 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
1151 struct fcnvme_ls_cr_assoc_acc *assoc_acc;
1152 int ret, fcret = 0;
1153
1154 lsop = kzalloc((sizeof(*lsop) +
1155 ctrl->lport->ops->lsrqst_priv_sz +
1156 sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
1157 if (!lsop) {
1158 ret = -ENOMEM;
1159 goto out_no_memory;
1160 }
1161 lsreq = &lsop->ls_req;
1162
1163 lsreq->private = (void *)&lsop[1];
1164 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
1165 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1166 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
1167
1168 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
1169 assoc_rqst->desc_list_len =
1170 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1171
1172 assoc_rqst->assoc_cmd.desc_tag =
1173 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
1174 assoc_rqst->assoc_cmd.desc_len =
1175 fcnvme_lsdesc_len(
1176 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1177
1178 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
d157e534 1179 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
e399441d
JS
1180 /* Linux supports only Dynamic controllers */
1181 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
8e412263 1182 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
e399441d
JS
1183 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
1184 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
1185 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
1186 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
1187
1188 lsop->queue = queue;
1189 lsreq->rqstaddr = assoc_rqst;
1190 lsreq->rqstlen = sizeof(*assoc_rqst);
1191 lsreq->rspaddr = assoc_acc;
1192 lsreq->rsplen = sizeof(*assoc_acc);
53b2b2f5 1193 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
e399441d 1194
c913a8b0 1195 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
e399441d
JS
1196 if (ret)
1197 goto out_free_buffer;
1198
1199 /* process connect LS completion */
1200
1201 /* validate the ACC response */
1202 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1203 fcret = VERR_LSACC;
f77fc87c 1204 else if (assoc_acc->hdr.desc_list_len !=
e399441d
JS
1205 fcnvme_lsdesc_len(
1206 sizeof(struct fcnvme_ls_cr_assoc_acc)))
1207 fcret = VERR_CR_ASSOC_ACC_LEN;
f77fc87c
JS
1208 else if (assoc_acc->hdr.rqst.desc_tag !=
1209 cpu_to_be32(FCNVME_LSDESC_RQST))
e399441d
JS
1210 fcret = VERR_LSDESC_RQST;
1211 else if (assoc_acc->hdr.rqst.desc_len !=
1212 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1213 fcret = VERR_LSDESC_RQST_LEN;
1214 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
1215 fcret = VERR_CR_ASSOC;
1216 else if (assoc_acc->associd.desc_tag !=
1217 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1218 fcret = VERR_ASSOC_ID;
1219 else if (assoc_acc->associd.desc_len !=
1220 fcnvme_lsdesc_len(
1221 sizeof(struct fcnvme_lsdesc_assoc_id)))
1222 fcret = VERR_ASSOC_ID_LEN;
1223 else if (assoc_acc->connectid.desc_tag !=
1224 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1225 fcret = VERR_CONN_ID;
1226 else if (assoc_acc->connectid.desc_len !=
1227 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1228 fcret = VERR_CONN_ID_LEN;
1229
1230 if (fcret) {
1231 ret = -EBADF;
1232 dev_err(ctrl->dev,
7db39484 1233 "q %d Create Association LS failed: %s\n",
e399441d
JS
1234 queue->qnum, validation_errors[fcret]);
1235 } else {
1236 ctrl->association_id =
1237 be64_to_cpu(assoc_acc->associd.association_id);
1238 queue->connection_id =
1239 be64_to_cpu(assoc_acc->connectid.connection_id);
1240 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1241 }
1242
1243out_free_buffer:
1244 kfree(lsop);
1245out_no_memory:
1246 if (ret)
1247 dev_err(ctrl->dev,
1248 "queue %d connect admin queue failed (%d).\n",
1249 queue->qnum, ret);
1250 return ret;
1251}
1252
1253static int
1254nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1255 u16 qsize, u16 ersp_ratio)
1256{
1257 struct nvmefc_ls_req_op *lsop;
1258 struct nvmefc_ls_req *lsreq;
1259 struct fcnvme_ls_cr_conn_rqst *conn_rqst;
1260 struct fcnvme_ls_cr_conn_acc *conn_acc;
1261 int ret, fcret = 0;
1262
1263 lsop = kzalloc((sizeof(*lsop) +
1264 ctrl->lport->ops->lsrqst_priv_sz +
1265 sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
1266 if (!lsop) {
1267 ret = -ENOMEM;
1268 goto out_no_memory;
1269 }
1270 lsreq = &lsop->ls_req;
1271
1272 lsreq->private = (void *)&lsop[1];
1273 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
1274 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1275 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
1276
1277 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
1278 conn_rqst->desc_list_len = cpu_to_be32(
1279 sizeof(struct fcnvme_lsdesc_assoc_id) +
1280 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1281
1282 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1283 conn_rqst->associd.desc_len =
1284 fcnvme_lsdesc_len(
1285 sizeof(struct fcnvme_lsdesc_assoc_id));
1286 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1287 conn_rqst->connect_cmd.desc_tag =
1288 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
1289 conn_rqst->connect_cmd.desc_len =
1290 fcnvme_lsdesc_len(
1291 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1292 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1293 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
d157e534 1294 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
e399441d
JS
1295
1296 lsop->queue = queue;
1297 lsreq->rqstaddr = conn_rqst;
1298 lsreq->rqstlen = sizeof(*conn_rqst);
1299 lsreq->rspaddr = conn_acc;
1300 lsreq->rsplen = sizeof(*conn_acc);
53b2b2f5 1301 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
e399441d 1302
c913a8b0 1303 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
e399441d
JS
1304 if (ret)
1305 goto out_free_buffer;
1306
1307 /* process connect LS completion */
1308
1309 /* validate the ACC response */
1310 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1311 fcret = VERR_LSACC;
f77fc87c 1312 else if (conn_acc->hdr.desc_list_len !=
e399441d
JS
1313 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
1314 fcret = VERR_CR_CONN_ACC_LEN;
f77fc87c 1315 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
e399441d
JS
1316 fcret = VERR_LSDESC_RQST;
1317 else if (conn_acc->hdr.rqst.desc_len !=
1318 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1319 fcret = VERR_LSDESC_RQST_LEN;
1320 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
1321 fcret = VERR_CR_CONN;
1322 else if (conn_acc->connectid.desc_tag !=
1323 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1324 fcret = VERR_CONN_ID;
1325 else if (conn_acc->connectid.desc_len !=
1326 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1327 fcret = VERR_CONN_ID_LEN;
1328
1329 if (fcret) {
1330 ret = -EBADF;
1331 dev_err(ctrl->dev,
7db39484 1332 "q %d Create I/O Connection LS failed: %s\n",
e399441d
JS
1333 queue->qnum, validation_errors[fcret]);
1334 } else {
1335 queue->connection_id =
1336 be64_to_cpu(conn_acc->connectid.connection_id);
1337 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1338 }
1339
1340out_free_buffer:
1341 kfree(lsop);
1342out_no_memory:
1343 if (ret)
1344 dev_err(ctrl->dev,
7db39484 1345 "queue %d connect I/O queue failed (%d).\n",
e399441d
JS
1346 queue->qnum, ret);
1347 return ret;
1348}
1349
1350static void
1351nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1352{
1353 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
e399441d 1354
c913a8b0 1355 __nvme_fc_finish_ls_req(lsop);
e399441d 1356
d4e4230c 1357 /* fc-nvme initiator doesn't care about success or failure of cmd */
e399441d
JS
1358
1359 kfree(lsop);
1360}
1361
1362/*
1363 * This routine sends a FC-NVME LS to disconnect (aka terminate)
1364 * the FC-NVME Association. Terminating the association also
1365 * terminates the FC-NVME connections (per queue, both admin and io
1366 * queues) that are part of the association. E.g. things are torn
1367 * down, and the related FC-NVME Association ID and Connection IDs
1368 * become invalid.
1369 *
1370 * The behavior of the fc-nvme initiator is such that it's
1371 * understanding of the association and connections will implicitly
1372 * be torn down. The action is implicit as it may be due to a loss of
1373 * connectivity with the fc-nvme target, so you may never get a
1374 * response even if you tried. As such, the action of this routine
1375 * is to asynchronously send the LS, ignore any results of the LS, and
1376 * continue on with terminating the association. If the fc-nvme target
1377 * is present and receives the LS, it too can tear down.
1378 */
1379static void
1380nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1381{
53b2b2f5
JS
1382 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
1383 struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
e399441d
JS
1384 struct nvmefc_ls_req_op *lsop;
1385 struct nvmefc_ls_req *lsreq;
c913a8b0 1386 int ret;
e399441d
JS
1387
1388 lsop = kzalloc((sizeof(*lsop) +
1389 ctrl->lport->ops->lsrqst_priv_sz +
1390 sizeof(*discon_rqst) + sizeof(*discon_acc)),
1391 GFP_KERNEL);
1392 if (!lsop)
1393 /* couldn't sent it... too bad */
1394 return;
1395
1396 lsreq = &lsop->ls_req;
1397
1398 lsreq->private = (void *)&lsop[1];
53b2b2f5 1399 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)
e399441d 1400 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
53b2b2f5 1401 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
e399441d 1402
53b2b2f5 1403 discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT_ASSOC;
e399441d
JS
1404 discon_rqst->desc_list_len = cpu_to_be32(
1405 sizeof(struct fcnvme_lsdesc_assoc_id) +
1406 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1407
1408 discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1409 discon_rqst->associd.desc_len =
1410 fcnvme_lsdesc_len(
1411 sizeof(struct fcnvme_lsdesc_assoc_id));
1412
1413 discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1414
1415 discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
1416 FCNVME_LSDESC_DISCONN_CMD);
1417 discon_rqst->discon_cmd.desc_len =
1418 fcnvme_lsdesc_len(
1419 sizeof(struct fcnvme_lsdesc_disconn_cmd));
e399441d
JS
1420
1421 lsreq->rqstaddr = discon_rqst;
1422 lsreq->rqstlen = sizeof(*discon_rqst);
1423 lsreq->rspaddr = discon_acc;
1424 lsreq->rsplen = sizeof(*discon_acc);
53b2b2f5 1425 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
e399441d 1426
c913a8b0
JS
1427 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1428 nvme_fc_disconnect_assoc_done);
1429 if (ret)
1430 kfree(lsop);
e399441d
JS
1431}
1432
72e6329f
JS
1433/**
1434 * nvme_fc_rcv_ls_req - transport entry point called by an LLDD
1435 * upon the reception of a NVME LS request.
1436 *
1437 * The nvme-fc layer will copy payload to an internal structure for
1438 * processing. As such, upon completion of the routine, the LLDD may
1439 * immediately free/reuse the LS request buffer passed in the call.
1440 *
1441 * If this routine returns error, the LLDD should abort the exchange.
1442 *
1443 * @remoteport: pointer to the (registered) remote port that the LS
1444 * was received from. The remoteport is associated with
1445 * a specific localport.
1446 * @lsrsp: pointer to a nvmefc_ls_rsp response structure to be
1447 * used to reference the exchange corresponding to the LS
1448 * when issuing an ls response.
1449 * @lsreqbuf: pointer to the buffer containing the LS Request
1450 * @lsreqbuf_len: length, in bytes, of the received LS request
1451 */
1452int
1453nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr,
1454 struct nvmefc_ls_rsp *lsrsp,
1455 void *lsreqbuf, u32 lsreqbuf_len)
1456{
1457 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
1458 struct nvme_fc_lport *lport = rport->lport;
1459
1460 /* validate there's a routine to transmit a response */
1461 if (!lport->ops->xmt_ls_rsp)
1462 return(-EINVAL);
1463
1464 return 0;
1465}
1466EXPORT_SYMBOL_GPL(nvme_fc_rcv_ls_req);
1467
e399441d
JS
1468
1469/* *********************** NVME Ctrl Routines **************************** */
1470
f874d5d0 1471static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
e399441d 1472
e399441d
JS
1473static void
1474__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1475 struct nvme_fc_fcp_op *op)
1476{
1477 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1478 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1479 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1480 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1481
1482 atomic_set(&op->state, FCPOP_STATE_UNINIT);
1483}
1484
1485static void
d6296d39
CH
1486nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1487 unsigned int hctx_idx)
e399441d
JS
1488{
1489 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1490
d6296d39 1491 return __nvme_fc_exit_request(set->driver_data, op);
e399441d
JS
1492}
1493
78a7ac26
JS
1494static int
1495__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1496{
3efd6e8e
JS
1497 unsigned long flags;
1498 int opstate;
1499
1500 spin_lock_irqsave(&ctrl->lock, flags);
1501 opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1502 if (opstate != FCPOP_STATE_ACTIVE)
1503 atomic_set(&op->state, opstate);
1504 else if (ctrl->flags & FCCTRL_TERMIO)
1505 ctrl->iocnt++;
1506 spin_unlock_irqrestore(&ctrl->lock, flags);
78a7ac26 1507
3efd6e8e 1508 if (opstate != FCPOP_STATE_ACTIVE)
78a7ac26 1509 return -ECANCELED;
78a7ac26
JS
1510
1511 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1512 &ctrl->rport->remoteport,
1513 op->queue->lldd_handle,
1514 &op->fcp_req);
1515
1516 return 0;
1517}
1518
e399441d 1519static void
78a7ac26 1520nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
e399441d
JS
1521{
1522 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
3efd6e8e 1523 int i;
78a7ac26 1524
4cff280a
JS
1525 /* ensure we've initialized the ops once */
1526 if (!(aen_op->flags & FCOP_FLAGS_AEN))
1527 return;
1528
3efd6e8e
JS
1529 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
1530 __nvme_fc_abort_op(ctrl, aen_op);
e399441d
JS
1531}
1532
c3aedd22 1533static inline void
78a7ac26 1534__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
3efd6e8e 1535 struct nvme_fc_fcp_op *op, int opstate)
78a7ac26
JS
1536{
1537 unsigned long flags;
78a7ac26 1538
c3aedd22
JS
1539 if (opstate == FCPOP_STATE_ABORTED) {
1540 spin_lock_irqsave(&ctrl->lock, flags);
1541 if (ctrl->flags & FCCTRL_TERMIO) {
1542 if (!--ctrl->iocnt)
1543 wake_up(&ctrl->ioabort_wait);
1544 }
1545 spin_unlock_irqrestore(&ctrl->lock, flags);
61bff8ef 1546 }
78a7ac26
JS
1547}
1548
baee29ac 1549static void
e399441d
JS
1550nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1551{
1552 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1553 struct request *rq = op->rq;
1554 struct nvmefc_fcp_req *freq = &op->fcp_req;
1555 struct nvme_fc_ctrl *ctrl = op->ctrl;
1556 struct nvme_fc_queue *queue = op->queue;
1557 struct nvme_completion *cqe = &op->rsp_iu.cqe;
458f280d 1558 struct nvme_command *sqe = &op->cmd_iu.sqe;
d663b69f 1559 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
27fa9bc5 1560 union nvme_result result;
0a02e39f 1561 bool terminate_assoc = true;
3efd6e8e 1562 int opstate;
e399441d
JS
1563
1564 /*
1565 * WARNING:
1566 * The current linux implementation of a nvme controller
1567 * allocates a single tag set for all io queues and sizes
1568 * the io queues to fully hold all possible tags. Thus, the
1569 * implementation does not reference or care about the sqhd
1570 * value as it never needs to use the sqhd/sqtail pointers
1571 * for submission pacing.
1572 *
1573 * This affects the FC-NVME implementation in two ways:
1574 * 1) As the value doesn't matter, we don't need to waste
1575 * cycles extracting it from ERSPs and stamping it in the
1576 * cases where the transport fabricates CQEs on successful
1577 * completions.
1578 * 2) The FC-NVME implementation requires that delivery of
1579 * ERSP completions are to go back to the nvme layer in order
1580 * relative to the rsn, such that the sqhd value will always
1581 * be "in order" for the nvme layer. As the nvme layer in
1582 * linux doesn't care about sqhd, there's no need to return
1583 * them in order.
1584 *
1585 * Additionally:
1586 * As the core nvme layer in linux currently does not look at
1587 * every field in the cqe - in cases where the FC transport must
1588 * fabricate a CQE, the following fields will not be set as they
1589 * are not referenced:
1590 * cqe.sqid, cqe.sqhd, cqe.command_id
f874d5d0
JS
1591 *
1592 * Failure or error of an individual i/o, in a transport
1593 * detected fashion unrelated to the nvme completion status,
1594 * potentially cause the initiator and target sides to get out
1595 * of sync on SQ head/tail (aka outstanding io count allowed).
1596 * Per FC-NVME spec, failure of an individual command requires
1597 * the connection to be terminated, which in turn requires the
1598 * association to be terminated.
e399441d
JS
1599 */
1600
3efd6e8e
JS
1601 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
1602
e399441d
JS
1603 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1604 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1605
3efd6e8e 1606 if (opstate == FCPOP_STATE_ABORTED)
74bd8cbe
JS
1607 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1608 else if (freq->status) {
1609 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1610 dev_info(ctrl->ctrl.device,
1611 "NVME-FC{%d}: io failed due to lldd error %d\n",
1612 ctrl->cnum, freq->status);
1613 }
e399441d
JS
1614
1615 /*
1616 * For the linux implementation, if we have an unsuccesful
1617 * status, they blk-mq layer can typically be called with the
1618 * non-zero status and the content of the cqe isn't important.
1619 */
1620 if (status)
1621 goto done;
1622
1623 /*
1624 * command completed successfully relative to the wire
1625 * protocol. However, validate anything received and
1626 * extract the status and result from the cqe (create it
1627 * where necessary).
1628 */
1629
1630 switch (freq->rcv_rsplen) {
1631
1632 case 0:
1633 case NVME_FC_SIZEOF_ZEROS_RSP:
1634 /*
1635 * No response payload or 12 bytes of payload (which
1636 * should all be zeros) are considered successful and
1637 * no payload in the CQE by the transport.
1638 */
1639 if (freq->transferred_length !=
74bd8cbe
JS
1640 be32_to_cpu(op->cmd_iu.data_len)) {
1641 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1642 dev_info(ctrl->ctrl.device,
1643 "NVME-FC{%d}: io failed due to bad transfer "
1644 "length: %d vs expected %d\n",
1645 ctrl->cnum, freq->transferred_length,
1646 be32_to_cpu(op->cmd_iu.data_len));
e399441d
JS
1647 goto done;
1648 }
27fa9bc5 1649 result.u64 = 0;
e399441d
JS
1650 break;
1651
1652 case sizeof(struct nvme_fc_ersp_iu):
1653 /*
1654 * The ERSP IU contains a full completion with CQE.
1655 * Validate ERSP IU and look at cqe.
1656 */
1657 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
1658 (freq->rcv_rsplen / 4) ||
1659 be32_to_cpu(op->rsp_iu.xfrd_len) !=
1660 freq->transferred_length ||
53b2b2f5 1661 op->rsp_iu.ersp_result ||
458f280d 1662 sqe->common.command_id != cqe->command_id)) {
74bd8cbe
JS
1663 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1664 dev_info(ctrl->ctrl.device,
1665 "NVME-FC{%d}: io failed due to bad NVMe_ERSP: "
1666 "iu len %d, xfr len %d vs %d, status code "
1667 "%d, cmdid %d vs %d\n",
1668 ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len),
1669 be32_to_cpu(op->rsp_iu.xfrd_len),
1670 freq->transferred_length,
53b2b2f5 1671 op->rsp_iu.ersp_result,
74bd8cbe
JS
1672 sqe->common.command_id,
1673 cqe->command_id);
e399441d
JS
1674 goto done;
1675 }
27fa9bc5 1676 result = cqe->result;
d663b69f 1677 status = cqe->status;
e399441d
JS
1678 break;
1679
1680 default:
74bd8cbe
JS
1681 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1682 dev_info(ctrl->ctrl.device,
1683 "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu "
1684 "len %d\n",
1685 ctrl->cnum, freq->rcv_rsplen);
e399441d
JS
1686 goto done;
1687 }
1688
f874d5d0
JS
1689 terminate_assoc = false;
1690
e399441d 1691done:
78a7ac26 1692 if (op->flags & FCOP_FLAGS_AEN) {
27fa9bc5 1693 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
3efd6e8e 1694 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
78a7ac26
JS
1695 atomic_set(&op->state, FCPOP_STATE_IDLE);
1696 op->flags = FCOP_FLAGS_AEN; /* clear other flags */
e399441d 1697 nvme_fc_ctrl_put(ctrl);
f874d5d0 1698 goto check_error;
e399441d
JS
1699 }
1700
c3aedd22
JS
1701 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
1702 nvme_end_request(rq, status, result);
f874d5d0
JS
1703
1704check_error:
1705 if (terminate_assoc)
1706 nvme_fc_error_recovery(ctrl, "transport detected io error");
e399441d
JS
1707}
1708
1709static int
1710__nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
1711 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
1712 struct request *rq, u32 rqno)
1713{
d3d0bc78
BVA
1714 struct nvme_fcp_op_w_sgl *op_w_sgl =
1715 container_of(op, typeof(*op_w_sgl), op);
e399441d
JS
1716 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1717 int ret = 0;
1718
1719 memset(op, 0, sizeof(*op));
1720 op->fcp_req.cmdaddr = &op->cmd_iu;
1721 op->fcp_req.cmdlen = sizeof(op->cmd_iu);
1722 op->fcp_req.rspaddr = &op->rsp_iu;
1723 op->fcp_req.rsplen = sizeof(op->rsp_iu);
1724 op->fcp_req.done = nvme_fc_fcpio_done;
e399441d
JS
1725 op->ctrl = ctrl;
1726 op->queue = queue;
1727 op->rq = rq;
1728 op->rqno = rqno;
1729
53b2b2f5 1730 cmdiu->format_id = NVME_CMD_FORMAT_ID;
e399441d
JS
1731 cmdiu->fc_id = NVME_CMD_FC_ID;
1732 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
44fbf3bb
JS
1733 if (queue->qnum)
1734 cmdiu->rsv_cat = fccmnd_set_cat_css(0,
1735 (NVME_CC_CSS_NVM >> NVME_CC_CSS_SHIFT));
1736 else
1737 cmdiu->rsv_cat = fccmnd_set_cat_admin(0);
e399441d
JS
1738
1739 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
1740 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
1741 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
1742 dev_err(ctrl->dev,
1743 "FCP Op failed - cmdiu dma mapping failed.\n");
1744 ret = EFAULT;
1745 goto out_on_error;
1746 }
1747
1748 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
1749 &op->rsp_iu, sizeof(op->rsp_iu),
1750 DMA_FROM_DEVICE);
1751 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
1752 dev_err(ctrl->dev,
1753 "FCP Op failed - rspiu dma mapping failed.\n");
1754 ret = EFAULT;
1755 }
1756
1757 atomic_set(&op->state, FCPOP_STATE_IDLE);
1758out_on_error:
1759 return ret;
1760}
1761
1762static int
d6296d39
CH
1763nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
1764 unsigned int hctx_idx, unsigned int numa_node)
e399441d 1765{
d6296d39 1766 struct nvme_fc_ctrl *ctrl = set->driver_data;
d3d0bc78 1767 struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
76f983cb
CH
1768 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
1769 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
0d2bdf9f 1770 int res;
e399441d 1771
0d2bdf9f
BVA
1772 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
1773 if (res)
1774 return res;
1775 op->op.fcp_req.first_sgl = &op->sgl[0];
d19b8bc8 1776 op->op.fcp_req.private = &op->priv[0];
dfa74422 1777 nvme_req(rq)->ctrl = &ctrl->ctrl;
0d2bdf9f 1778 return res;
e399441d
JS
1779}
1780
1781static int
1782nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
1783{
1784 struct nvme_fc_fcp_op *aen_op;
1785 struct nvme_fc_cmd_iu *cmdiu;
1786 struct nvme_command *sqe;
61bff8ef 1787 void *private;
e399441d
JS
1788 int i, ret;
1789
1790 aen_op = ctrl->aen_ops;
38dabe21 1791 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
61bff8ef
JS
1792 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
1793 GFP_KERNEL);
1794 if (!private)
1795 return -ENOMEM;
1796
e399441d
JS
1797 cmdiu = &aen_op->cmd_iu;
1798 sqe = &cmdiu->sqe;
1799 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
1800 aen_op, (struct request *)NULL,
38dabe21 1801 (NVME_AQ_BLK_MQ_DEPTH + i));
61bff8ef
JS
1802 if (ret) {
1803 kfree(private);
e399441d 1804 return ret;
61bff8ef 1805 }
e399441d 1806
78a7ac26 1807 aen_op->flags = FCOP_FLAGS_AEN;
61bff8ef 1808 aen_op->fcp_req.private = private;
78a7ac26 1809
e399441d
JS
1810 memset(sqe, 0, sizeof(*sqe));
1811 sqe->common.opcode = nvme_admin_async_event;
78a7ac26 1812 /* Note: core layer may overwrite the sqe.command_id value */
38dabe21 1813 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
e399441d
JS
1814 }
1815 return 0;
1816}
1817
61bff8ef
JS
1818static void
1819nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
1820{
1821 struct nvme_fc_fcp_op *aen_op;
1822 int i;
1823
1824 aen_op = ctrl->aen_ops;
38dabe21 1825 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
61bff8ef
JS
1826 if (!aen_op->fcp_req.private)
1827 continue;
1828
1829 __nvme_fc_exit_request(ctrl, aen_op);
1830
1831 kfree(aen_op->fcp_req.private);
1832 aen_op->fcp_req.private = NULL;
1833 }
1834}
e399441d
JS
1835
1836static inline void
1837__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
1838 unsigned int qidx)
1839{
1840 struct nvme_fc_queue *queue = &ctrl->queues[qidx];
1841
1842 hctx->driver_data = queue;
1843 queue->hctx = hctx;
1844}
1845
1846static int
1847nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1848 unsigned int hctx_idx)
1849{
1850 struct nvme_fc_ctrl *ctrl = data;
1851
1852 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
1853
1854 return 0;
1855}
1856
1857static int
1858nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1859 unsigned int hctx_idx)
1860{
1861 struct nvme_fc_ctrl *ctrl = data;
1862
1863 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
1864
1865 return 0;
1866}
1867
1868static void
08e15075 1869nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
e399441d
JS
1870{
1871 struct nvme_fc_queue *queue;
1872
1873 queue = &ctrl->queues[idx];
1874 memset(queue, 0, sizeof(*queue));
1875 queue->ctrl = ctrl;
1876 queue->qnum = idx;
67f471b6 1877 atomic_set(&queue->csn, 0);
e399441d
JS
1878 queue->dev = ctrl->dev;
1879
1880 if (idx > 0)
1881 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1882 else
1883 queue->cmnd_capsule_len = sizeof(struct nvme_command);
1884
e399441d
JS
1885 /*
1886 * Considered whether we should allocate buffers for all SQEs
1887 * and CQEs and dma map them - mapping their respective entries
1888 * into the request structures (kernel vm addr and dma address)
1889 * thus the driver could use the buffers/mappings directly.
1890 * It only makes sense if the LLDD would use them for its
1891 * messaging api. It's very unlikely most adapter api's would use
1892 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
1893 * structures were used instead.
1894 */
1895}
1896
1897/*
1898 * This routine terminates a queue at the transport level.
1899 * The transport has already ensured that all outstanding ios on
1900 * the queue have been terminated.
1901 * The transport will send a Disconnect LS request to terminate
1902 * the queue's connection. Termination of the admin queue will also
1903 * terminate the association at the target.
1904 */
1905static void
1906nvme_fc_free_queue(struct nvme_fc_queue *queue)
1907{
1908 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
1909 return;
1910
9e0ed16a 1911 clear_bit(NVME_FC_Q_LIVE, &queue->flags);
e399441d
JS
1912 /*
1913 * Current implementation never disconnects a single queue.
1914 * It always terminates a whole association. So there is never
1915 * a disconnect(queue) LS sent to the target.
1916 */
1917
1918 queue->connection_id = 0;
67f471b6 1919 atomic_set(&queue->csn, 0);
e399441d
JS
1920}
1921
1922static void
1923__nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
1924 struct nvme_fc_queue *queue, unsigned int qidx)
1925{
1926 if (ctrl->lport->ops->delete_queue)
1927 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
1928 queue->lldd_handle);
1929 queue->lldd_handle = NULL;
1930}
1931
e399441d
JS
1932static void
1933nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
1934{
1935 int i;
1936
d858e5f0 1937 for (i = 1; i < ctrl->ctrl.queue_count; i++)
e399441d
JS
1938 nvme_fc_free_queue(&ctrl->queues[i]);
1939}
1940
1941static int
1942__nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
1943 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
1944{
1945 int ret = 0;
1946
1947 queue->lldd_handle = NULL;
1948 if (ctrl->lport->ops->create_queue)
1949 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
1950 qidx, qsize, &queue->lldd_handle);
1951
1952 return ret;
1953}
1954
1955static void
1956nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
1957{
d858e5f0 1958 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
e399441d
JS
1959 int i;
1960
d858e5f0 1961 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
e399441d
JS
1962 __nvme_fc_delete_hw_queue(ctrl, queue, i);
1963}
1964
1965static int
1966nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1967{
1968 struct nvme_fc_queue *queue = &ctrl->queues[1];
17a1ec08 1969 int i, ret;
e399441d 1970
d858e5f0 1971 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
e399441d 1972 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
17a1ec08
JT
1973 if (ret)
1974 goto delete_queues;
e399441d
JS
1975 }
1976
1977 return 0;
17a1ec08
JT
1978
1979delete_queues:
1980 for (; i >= 0; i--)
1981 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
1982 return ret;
e399441d
JS
1983}
1984
1985static int
1986nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1987{
1988 int i, ret = 0;
1989
d858e5f0 1990 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
e399441d
JS
1991 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
1992 (qsize / 5));
1993 if (ret)
1994 break;
26c68227 1995 ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
e399441d
JS
1996 if (ret)
1997 break;
9e0ed16a
SG
1998
1999 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
e399441d
JS
2000 }
2001
2002 return ret;
2003}
2004
2005static void
2006nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
2007{
2008 int i;
2009
d858e5f0 2010 for (i = 1; i < ctrl->ctrl.queue_count; i++)
08e15075 2011 nvme_fc_init_queue(ctrl, i);
e399441d
JS
2012}
2013
2014static void
2015nvme_fc_ctrl_free(struct kref *ref)
2016{
2017 struct nvme_fc_ctrl *ctrl =
2018 container_of(ref, struct nvme_fc_ctrl, ref);
2019 unsigned long flags;
2020
61bff8ef
JS
2021 if (ctrl->ctrl.tagset) {
2022 blk_cleanup_queue(ctrl->ctrl.connect_q);
2023 blk_mq_free_tag_set(&ctrl->tag_set);
e399441d
JS
2024 }
2025
61bff8ef
JS
2026 /* remove from rport list */
2027 spin_lock_irqsave(&ctrl->rport->lock, flags);
2028 list_del(&ctrl->ctrl_list);
2029 spin_unlock_irqrestore(&ctrl->rport->lock, flags);
2030
f9c5af5f 2031 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
61bff8ef 2032 blk_cleanup_queue(ctrl->ctrl.admin_q);
e7832cb4 2033 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
61bff8ef
JS
2034 blk_mq_free_tag_set(&ctrl->admin_tag_set);
2035
2036 kfree(ctrl->queues);
2037
e399441d
JS
2038 put_device(ctrl->dev);
2039 nvme_fc_rport_put(ctrl->rport);
2040
e399441d 2041 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
de41447a
EM
2042 if (ctrl->ctrl.opts)
2043 nvmf_free_options(ctrl->ctrl.opts);
e399441d
JS
2044 kfree(ctrl);
2045}
2046
2047static void
2048nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
2049{
2050 kref_put(&ctrl->ref, nvme_fc_ctrl_free);
2051}
2052
2053static int
2054nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
2055{
2056 return kref_get_unless_zero(&ctrl->ref);
2057}
2058
2059/*
2060 * All accesses from nvme core layer done - can now free the
2061 * controller. Called after last nvme_put_ctrl() call
2062 */
2063static void
61bff8ef 2064nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
e399441d
JS
2065{
2066 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2067
2068 WARN_ON(nctrl != &ctrl->ctrl);
2069
61bff8ef
JS
2070 nvme_fc_ctrl_put(ctrl);
2071}
e399441d 2072
61bff8ef
JS
2073static void
2074nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
2075{
4cff280a
JS
2076 int active;
2077
2078 /*
2079 * if an error (io timeout, etc) while (re)connecting,
2080 * it's an error on creating the new association.
2081 * Start the error recovery thread if it hasn't already
2082 * been started. It is expected there could be multiple
2083 * ios hitting this path before things are cleaned up.
2084 */
2085 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
2086 active = atomic_xchg(&ctrl->err_work_active, 1);
8730c1dd 2087 if (!active && !queue_work(nvme_fc_wq, &ctrl->err_work)) {
4cff280a
JS
2088 atomic_set(&ctrl->err_work_active, 0);
2089 WARN_ON(1);
2090 }
2091 return;
2092 }
2093
2094 /* Otherwise, only proceed if in LIVE state - e.g. on first error */
69fa9646
JS
2095 if (ctrl->ctrl.state != NVME_CTRL_LIVE)
2096 return;
2097
61bff8ef
JS
2098 dev_warn(ctrl->ctrl.device,
2099 "NVME-FC{%d}: transport association error detected: %s\n",
2100 ctrl->cnum, errmsg);
589ff775 2101 dev_warn(ctrl->ctrl.device,
61bff8ef 2102 "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
e399441d 2103
d86c4d8e 2104 nvme_reset_ctrl(&ctrl->ctrl);
e399441d
JS
2105}
2106
baee29ac 2107static enum blk_eh_timer_return
e399441d
JS
2108nvme_fc_timeout(struct request *rq, bool reserved)
2109{
2110 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2111 struct nvme_fc_ctrl *ctrl = op->ctrl;
e399441d
JS
2112
2113 /*
61bff8ef 2114 * we can't individually ABTS an io without affecting the queue,
041018c6 2115 * thus killing the queue, and thus the association.
61bff8ef
JS
2116 * So resolve by performing a controller reset, which will stop
2117 * the host/io stack, terminate the association on the link,
2118 * and recreate an association on the link.
e399441d 2119 */
61bff8ef 2120 nvme_fc_error_recovery(ctrl, "io timeout error");
e399441d 2121
134aedc9
JS
2122 /*
2123 * the io abort has been initiated. Have the reset timer
2124 * restarted and the abort completion will complete the io
2125 * shortly. Avoids a synchronous wait while the abort finishes.
2126 */
2127 return BLK_EH_RESET_TIMER;
e399441d
JS
2128}
2129
2130static int
2131nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2132 struct nvme_fc_fcp_op *op)
2133{
2134 struct nvmefc_fcp_req *freq = &op->fcp_req;
e399441d
JS
2135 int ret;
2136
2137 freq->sg_cnt = 0;
2138
9f7d8ae2 2139 if (!blk_rq_nr_phys_segments(rq))
e399441d
JS
2140 return 0;
2141
2142 freq->sg_table.sgl = freq->first_sgl;
19e420bb 2143 ret = sg_alloc_table_chained(&freq->sg_table,
4635873c 2144 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
b1ae1a23 2145 NVME_INLINE_SG_CNT);
e399441d
JS
2146 if (ret)
2147 return -ENOMEM;
2148
2149 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
19e420bb 2150 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
e399441d 2151 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
f15872c5 2152 op->nents, rq_dma_dir(rq));
e399441d 2153 if (unlikely(freq->sg_cnt <= 0)) {
b1ae1a23 2154 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
e399441d
JS
2155 freq->sg_cnt = 0;
2156 return -EFAULT;
2157 }
2158
2159 /*
2160 * TODO: blk_integrity_rq(rq) for DIF
2161 */
2162 return 0;
2163}
2164
2165static void
2166nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2167 struct nvme_fc_fcp_op *op)
2168{
2169 struct nvmefc_fcp_req *freq = &op->fcp_req;
2170
2171 if (!freq->sg_cnt)
2172 return;
2173
2174 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
f15872c5 2175 rq_dma_dir(rq));
e399441d 2176
b1ae1a23 2177 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
e399441d
JS
2178
2179 freq->sg_cnt = 0;
2180}
2181
2182/*
2183 * In FC, the queue is a logical thing. At transport connect, the target
2184 * creates its "queue" and returns a handle that is to be given to the
2185 * target whenever it posts something to the corresponding SQ. When an
2186 * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
2187 * command contained within the SQE, an io, and assigns a FC exchange
2188 * to it. The SQE and the associated SQ handle are sent in the initial
2189 * CMD IU sents on the exchange. All transfers relative to the io occur
2190 * as part of the exchange. The CQE is the last thing for the io,
2191 * which is transferred (explicitly or implicitly) with the RSP IU
2192 * sent on the exchange. After the CQE is received, the FC exchange is
2193 * terminaed and the Exchange may be used on a different io.
2194 *
2195 * The transport to LLDD api has the transport making a request for a
2196 * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
2197 * resource and transfers the command. The LLDD will then process all
2198 * steps to complete the io. Upon completion, the transport done routine
2199 * is called.
2200 *
2201 * So - while the operation is outstanding to the LLDD, there is a link
2202 * level FC exchange resource that is also outstanding. This must be
2203 * considered in all cleanup operations.
2204 */
fc17b653 2205static blk_status_t
e399441d
JS
2206nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2207 struct nvme_fc_fcp_op *op, u32 data_len,
2208 enum nvmefc_fcp_datadir io_dir)
2209{
2210 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2211 struct nvme_command *sqe = &cmdiu->sqe;
b12740d3 2212 int ret, opstate;
e399441d 2213
61bff8ef
JS
2214 /*
2215 * before attempting to send the io, check to see if we believe
2216 * the target device is present
2217 */
2218 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
86ff7c2a 2219 return BLK_STS_RESOURCE;
61bff8ef 2220
e399441d 2221 if (!nvme_fc_ctrl_get(ctrl))
fc17b653 2222 return BLK_STS_IOERR;
e399441d
JS
2223
2224 /* format the FC-NVME CMD IU and fcp_req */
2225 cmdiu->connection_id = cpu_to_be64(queue->connection_id);
e399441d
JS
2226 cmdiu->data_len = cpu_to_be32(data_len);
2227 switch (io_dir) {
2228 case NVMEFC_FCP_WRITE:
2229 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
2230 break;
2231 case NVMEFC_FCP_READ:
2232 cmdiu->flags = FCNVME_CMD_FLAGS_READ;
2233 break;
2234 case NVMEFC_FCP_NODATA:
2235 cmdiu->flags = 0;
2236 break;
2237 }
2238 op->fcp_req.payload_length = data_len;
2239 op->fcp_req.io_dir = io_dir;
2240 op->fcp_req.transferred_length = 0;
2241 op->fcp_req.rcv_rsplen = 0;
62eeacb0 2242 op->fcp_req.status = NVME_SC_SUCCESS;
e399441d
JS
2243 op->fcp_req.sqid = cpu_to_le16(queue->qnum);
2244
2245 /*
2246 * validate per fabric rules, set fields mandated by fabric spec
2247 * as well as those by FC-NVME spec.
2248 */
2249 WARN_ON_ONCE(sqe->common.metadata);
e399441d
JS
2250 sqe->common.flags |= NVME_CMD_SGL_METABUF;
2251
2252 /*
d9d34c0b
JS
2253 * format SQE DPTR field per FC-NVME rules:
2254 * type=0x5 Transport SGL Data Block Descriptor
2255 * subtype=0xA Transport-specific value
2256 * address=0
2257 * length=length of the data series
e399441d 2258 */
d9d34c0b
JS
2259 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2260 NVME_SGL_FMT_TRANSPORT_A;
e399441d
JS
2261 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
2262 sqe->rw.dptr.sgl.addr = 0;
2263
78a7ac26 2264 if (!(op->flags & FCOP_FLAGS_AEN)) {
e399441d
JS
2265 ret = nvme_fc_map_data(ctrl, op->rq, op);
2266 if (ret < 0) {
e399441d
JS
2267 nvme_cleanup_cmd(op->rq);
2268 nvme_fc_ctrl_put(ctrl);
fc17b653
CH
2269 if (ret == -ENOMEM || ret == -EAGAIN)
2270 return BLK_STS_RESOURCE;
2271 return BLK_STS_IOERR;
e399441d
JS
2272 }
2273 }
2274
2275 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
2276 sizeof(op->cmd_iu), DMA_TO_DEVICE);
2277
2278 atomic_set(&op->state, FCPOP_STATE_ACTIVE);
2279
78a7ac26 2280 if (!(op->flags & FCOP_FLAGS_AEN))
e399441d
JS
2281 blk_mq_start_request(op->rq);
2282
67f471b6 2283 cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
e399441d
JS
2284 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
2285 &ctrl->rport->remoteport,
2286 queue->lldd_handle, &op->fcp_req);
2287
2288 if (ret) {
67f471b6
JS
2289 /*
2290 * If the lld fails to send the command is there an issue with
2291 * the csn value? If the command that fails is the Connect,
2292 * no - as the connection won't be live. If it is a command
2293 * post-connect, it's possible a gap in csn may be created.
2294 * Does this matter? As Linux initiators don't send fused
2295 * commands, no. The gap would exist, but as there's nothing
2296 * that depends on csn order to be delivered on the target
2297 * side, it shouldn't hurt. It would be difficult for a
2298 * target to even detect the csn gap as it has no idea when the
2299 * cmd with the csn was supposed to arrive.
2300 */
b12740d3
JS
2301 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
2302 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2303
8b25f351 2304 if (!(op->flags & FCOP_FLAGS_AEN))
e399441d 2305 nvme_fc_unmap_data(ctrl, op->rq, op);
e399441d 2306
16686f3a 2307 nvme_cleanup_cmd(op->rq);
e399441d
JS
2308 nvme_fc_ctrl_put(ctrl);
2309
8b25f351
JS
2310 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
2311 ret != -EBUSY)
fc17b653 2312 return BLK_STS_IOERR;
e399441d 2313
86ff7c2a 2314 return BLK_STS_RESOURCE;
e399441d
JS
2315 }
2316
fc17b653 2317 return BLK_STS_OK;
e399441d
JS
2318}
2319
fc17b653 2320static blk_status_t
e399441d
JS
2321nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2322 const struct blk_mq_queue_data *bd)
2323{
2324 struct nvme_ns *ns = hctx->queue->queuedata;
2325 struct nvme_fc_queue *queue = hctx->driver_data;
2326 struct nvme_fc_ctrl *ctrl = queue->ctrl;
2327 struct request *rq = bd->rq;
2328 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2329 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2330 struct nvme_command *sqe = &cmdiu->sqe;
2331 enum nvmefc_fcp_datadir io_dir;
3bc32bb1 2332 bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
e399441d 2333 u32 data_len;
fc17b653 2334 blk_status_t ret;
e399441d 2335
3bc32bb1
CH
2336 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
2337 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
6cdefc6e 2338 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
9e0ed16a 2339
e399441d
JS
2340 ret = nvme_setup_cmd(ns, rq, sqe);
2341 if (ret)
2342 return ret;
2343
9f7d8ae2
JS
2344 /*
2345 * nvme core doesn't quite treat the rq opaquely. Commands such
2346 * as WRITE ZEROES will return a non-zero rq payload_bytes yet
2347 * there is no actual payload to be transferred.
2348 * To get it right, key data transmission on there being 1 or
2349 * more physical segments in the sg list. If there is no
2350 * physical segments, there is no payload.
2351 */
2352 if (blk_rq_nr_phys_segments(rq)) {
2353 data_len = blk_rq_payload_bytes(rq);
e399441d
JS
2354 io_dir = ((rq_data_dir(rq) == WRITE) ?
2355 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
9f7d8ae2
JS
2356 } else {
2357 data_len = 0;
e399441d 2358 io_dir = NVMEFC_FCP_NODATA;
9f7d8ae2
JS
2359 }
2360
e399441d
JS
2361
2362 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2363}
2364
e399441d 2365static void
ad22c355 2366nvme_fc_submit_async_event(struct nvme_ctrl *arg)
e399441d
JS
2367{
2368 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2369 struct nvme_fc_fcp_op *aen_op;
61bff8ef
JS
2370 unsigned long flags;
2371 bool terminating = false;
fc17b653 2372 blk_status_t ret;
e399441d 2373
61bff8ef
JS
2374 spin_lock_irqsave(&ctrl->lock, flags);
2375 if (ctrl->flags & FCCTRL_TERMIO)
2376 terminating = true;
2377 spin_unlock_irqrestore(&ctrl->lock, flags);
2378
2379 if (terminating)
2380 return;
2381
ad22c355 2382 aen_op = &ctrl->aen_ops[0];
e399441d
JS
2383
2384 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2385 NVMEFC_FCP_NODATA);
2386 if (ret)
2387 dev_err(ctrl->ctrl.device,
ad22c355 2388 "failed async event work\n");
e399441d
JS
2389}
2390
2391static void
c3aedd22 2392nvme_fc_complete_rq(struct request *rq)
e399441d
JS
2393{
2394 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2395 struct nvme_fc_ctrl *ctrl = op->ctrl;
e399441d 2396
78a7ac26 2397 atomic_set(&op->state, FCPOP_STATE_IDLE);
e399441d 2398
e399441d 2399 nvme_fc_unmap_data(ctrl, rq, op);
77f02a7a 2400 nvme_complete_rq(rq);
e399441d 2401 nvme_fc_ctrl_put(ctrl);
78a7ac26
JS
2402}
2403
e399441d
JS
2404/*
2405 * This routine is used by the transport when it needs to find active
2406 * io on a queue that is to be terminated. The transport uses
2407 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
2408 * this routine to kill them on a 1 by 1 basis.
2409 *
2410 * As FC allocates FC exchange for each io, the transport must contact
2411 * the LLDD to terminate the exchange, thus releasing the FC exchange.
2412 * After terminating the exchange the LLDD will call the transport's
2413 * normal io done path for the request, but it will have an aborted
2414 * status. The done path will return the io request back to the block
2415 * layer with an error status.
2416 */
7baa8572 2417static bool
e399441d
JS
2418nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2419{
2420 struct nvme_ctrl *nctrl = data;
2421 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2422 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
e399441d 2423
3efd6e8e 2424 __nvme_fc_abort_op(ctrl, op);
7baa8572 2425 return true;
e399441d
JS
2426}
2427
78a7ac26 2428
61bff8ef
JS
2429static const struct blk_mq_ops nvme_fc_mq_ops = {
2430 .queue_rq = nvme_fc_queue_rq,
2431 .complete = nvme_fc_complete_rq,
2432 .init_request = nvme_fc_init_request,
2433 .exit_request = nvme_fc_exit_request,
61bff8ef 2434 .init_hctx = nvme_fc_init_hctx,
61bff8ef
JS
2435 .timeout = nvme_fc_timeout,
2436};
e399441d 2437
61bff8ef
JS
2438static int
2439nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
e399441d 2440{
61bff8ef 2441 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
7314183d 2442 unsigned int nr_io_queues;
61bff8ef 2443 int ret;
e399441d 2444
7314183d
SG
2445 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2446 ctrl->lport->ops->max_hw_queues);
2447 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
61bff8ef
JS
2448 if (ret) {
2449 dev_info(ctrl->ctrl.device,
2450 "set_queue_count failed: %d\n", ret);
2451 return ret;
2452 }
e399441d 2453
7314183d
SG
2454 ctrl->ctrl.queue_count = nr_io_queues + 1;
2455 if (!nr_io_queues)
61bff8ef 2456 return 0;
e399441d 2457
61bff8ef 2458 nvme_fc_init_io_queues(ctrl);
e399441d 2459
61bff8ef
JS
2460 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2461 ctrl->tag_set.ops = &nvme_fc_mq_ops;
2462 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2463 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
103e515e 2464 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
61bff8ef 2465 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
d3d0bc78
BVA
2466 ctrl->tag_set.cmd_size =
2467 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
2468 ctrl->lport->ops->fcprqst_priv_sz);
61bff8ef 2469 ctrl->tag_set.driver_data = ctrl;
d858e5f0 2470 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
61bff8ef 2471 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
e399441d 2472
61bff8ef
JS
2473 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2474 if (ret)
2475 return ret;
e399441d 2476
61bff8ef 2477 ctrl->ctrl.tagset = &ctrl->tag_set;
e399441d 2478
61bff8ef
JS
2479 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2480 if (IS_ERR(ctrl->ctrl.connect_q)) {
2481 ret = PTR_ERR(ctrl->ctrl.connect_q);
2482 goto out_free_tag_set;
2483 }
e399441d 2484
d157e534 2485 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
e399441d 2486 if (ret)
61bff8ef 2487 goto out_cleanup_blk_queue;
e399441d 2488
d157e534 2489 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
61bff8ef
JS
2490 if (ret)
2491 goto out_delete_hw_queues;
e399441d 2492
4c984154
JS
2493 ctrl->ioq_live = true;
2494
e399441d 2495 return 0;
e399441d 2496
61bff8ef
JS
2497out_delete_hw_queues:
2498 nvme_fc_delete_hw_io_queues(ctrl);
2499out_cleanup_blk_queue:
61bff8ef
JS
2500 blk_cleanup_queue(ctrl->ctrl.connect_q);
2501out_free_tag_set:
2502 blk_mq_free_tag_set(&ctrl->tag_set);
2503 nvme_fc_free_io_queues(ctrl);
e399441d 2504
61bff8ef
JS
2505 /* force put free routine to ignore io queues */
2506 ctrl->ctrl.tagset = NULL;
2507
2508 return ret;
2509}
e399441d
JS
2510
2511static int
3e493c00 2512nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
e399441d
JS
2513{
2514 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
834d3710 2515 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1;
7314183d 2516 unsigned int nr_io_queues;
e399441d
JS
2517 int ret;
2518
7314183d
SG
2519 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2520 ctrl->lport->ops->max_hw_queues);
2521 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
e399441d
JS
2522 if (ret) {
2523 dev_info(ctrl->ctrl.device,
2524 "set_queue_count failed: %d\n", ret);
2525 return ret;
2526 }
2527
834d3710
JS
2528 if (!nr_io_queues && prior_ioq_cnt) {
2529 dev_info(ctrl->ctrl.device,
2530 "Fail Reconnect: At least 1 io queue "
2531 "required (was %d)\n", prior_ioq_cnt);
2532 return -ENOSPC;
2533 }
2534
7314183d 2535 ctrl->ctrl.queue_count = nr_io_queues + 1;
61bff8ef 2536 /* check for io queues existing */
d858e5f0 2537 if (ctrl->ctrl.queue_count == 1)
e399441d
JS
2538 return 0;
2539
d157e534 2540 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
e399441d 2541 if (ret)
61bff8ef 2542 goto out_free_io_queues;
e399441d 2543
d157e534 2544 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
e399441d
JS
2545 if (ret)
2546 goto out_delete_hw_queues;
2547
834d3710
JS
2548 if (prior_ioq_cnt != nr_io_queues)
2549 dev_info(ctrl->ctrl.device,
2550 "reconnect: revising io queue count from %d to %d\n",
2551 prior_ioq_cnt, nr_io_queues);
cda5fd1a
SG
2552 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
2553
e399441d
JS
2554 return 0;
2555
2556out_delete_hw_queues:
2557 nvme_fc_delete_hw_io_queues(ctrl);
61bff8ef 2558out_free_io_queues:
e399441d 2559 nvme_fc_free_io_queues(ctrl);
61bff8ef
JS
2560 return ret;
2561}
e399441d 2562
158bfb88
JS
2563static void
2564nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport)
2565{
2566 struct nvme_fc_lport *lport = rport->lport;
2567
2568 atomic_inc(&lport->act_rport_cnt);
2569}
2570
2571static void
2572nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport)
2573{
2574 struct nvme_fc_lport *lport = rport->lport;
2575 u32 cnt;
2576
2577 cnt = atomic_dec_return(&lport->act_rport_cnt);
2578 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
2579 lport->ops->localport_delete(&lport->localport);
2580}
2581
2582static int
2583nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
2584{
2585 struct nvme_fc_rport *rport = ctrl->rport;
2586 u32 cnt;
2587
2588 if (ctrl->assoc_active)
2589 return 1;
2590
2591 ctrl->assoc_active = true;
2592 cnt = atomic_inc_return(&rport->act_ctrl_cnt);
2593 if (cnt == 1)
2594 nvme_fc_rport_active_on_lport(rport);
2595
2596 return 0;
2597}
2598
2599static int
2600nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
2601{
2602 struct nvme_fc_rport *rport = ctrl->rport;
2603 struct nvme_fc_lport *lport = rport->lport;
2604 u32 cnt;
2605
2606 /* ctrl->assoc_active=false will be set independently */
2607
2608 cnt = atomic_dec_return(&rport->act_ctrl_cnt);
2609 if (cnt == 0) {
2610 if (rport->remoteport.port_state == FC_OBJSTATE_DELETED)
2611 lport->ops->remoteport_delete(&rport->remoteport);
2612 nvme_fc_rport_inactive_on_lport(rport);
2613 }
2614
2615 return 0;
2616}
2617
61bff8ef
JS
2618/*
2619 * This routine restarts the controller on the host side, and
2620 * on the link side, recreates the controller association.
2621 */
2622static int
2623nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2624{
2625 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
61bff8ef
JS
2626 int ret;
2627 bool changed;
2628
fdf9dfa8 2629 ++ctrl->ctrl.nr_reconnects;
61bff8ef 2630
96e24801
JS
2631 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2632 return -ENODEV;
2633
158bfb88
JS
2634 if (nvme_fc_ctlr_active_on_rport(ctrl))
2635 return -ENOTUNIQ;
2636
4bea364f
JS
2637 dev_info(ctrl->ctrl.device,
2638 "NVME-FC{%d}: create association : host wwpn 0x%016llx "
2639 " rport wwpn 0x%016llx: NQN \"%s\"\n",
2640 ctrl->cnum, ctrl->lport->localport.port_name,
2641 ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn);
2642
61bff8ef
JS
2643 /*
2644 * Create the admin queue
2645 */
2646
61bff8ef 2647 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
d157e534 2648 NVME_AQ_DEPTH);
61bff8ef
JS
2649 if (ret)
2650 goto out_free_queue;
2651
2652 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
d157e534 2653 NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
61bff8ef
JS
2654 if (ret)
2655 goto out_delete_hw_queue;
2656
61bff8ef
JS
2657 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
2658 if (ret)
2659 goto out_disconnect_admin_queue;
2660
9e0ed16a
SG
2661 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
2662
61bff8ef
JS
2663 /*
2664 * Check controller capabilities
2665 *
2666 * todo:- add code to check if ctrl attributes changed from
2667 * prior connection values
2668 */
2669
c0f2f45b 2670 ret = nvme_enable_ctrl(&ctrl->ctrl);
61bff8ef
JS
2671 if (ret)
2672 goto out_disconnect_admin_queue;
2673
ecad0d2c
JS
2674 ctrl->ctrl.max_hw_sectors =
2675 (ctrl->lport->ops->max_sgl_segments - 1) << (PAGE_SHIFT - 9);
61bff8ef 2676
e7832cb4
SG
2677 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2678
61bff8ef
JS
2679 ret = nvme_init_identify(&ctrl->ctrl);
2680 if (ret)
2681 goto out_disconnect_admin_queue;
2682
2683 /* sanity checks */
2684
2685 /* FC-NVME does not have other data in the capsule */
2686 if (ctrl->ctrl.icdoff) {
2687 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
2688 ctrl->ctrl.icdoff);
2689 goto out_disconnect_admin_queue;
2690 }
2691
61bff8ef
JS
2692 /* FC-NVME supports normal SGL Data Block Descriptors */
2693
2694 if (opts->queue_size > ctrl->ctrl.maxcmd) {
2695 /* warn if maxcmd is lower than queue_size */
2696 dev_warn(ctrl->ctrl.device,
2697 "queue_size %zu > ctrl maxcmd %u, reducing "
7db39484 2698 "to maxcmd\n",
61bff8ef
JS
2699 opts->queue_size, ctrl->ctrl.maxcmd);
2700 opts->queue_size = ctrl->ctrl.maxcmd;
2701 }
2702
d157e534
JS
2703 if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
2704 /* warn if sqsize is lower than queue_size */
2705 dev_warn(ctrl->ctrl.device,
7db39484
JS
2706 "queue_size %zu > ctrl sqsize %u, reducing "
2707 "to sqsize\n",
d157e534
JS
2708 opts->queue_size, ctrl->ctrl.sqsize + 1);
2709 opts->queue_size = ctrl->ctrl.sqsize + 1;
2710 }
2711
61bff8ef
JS
2712 ret = nvme_fc_init_aen_ops(ctrl);
2713 if (ret)
2714 goto out_term_aen_ops;
2715
2716 /*
2717 * Create the io queues
2718 */
2719
d858e5f0 2720 if (ctrl->ctrl.queue_count > 1) {
4c984154 2721 if (!ctrl->ioq_live)
61bff8ef
JS
2722 ret = nvme_fc_create_io_queues(ctrl);
2723 else
3e493c00 2724 ret = nvme_fc_recreate_io_queues(ctrl);
61bff8ef
JS
2725 if (ret)
2726 goto out_term_aen_ops;
2727 }
2728
2729 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
61bff8ef 2730
fdf9dfa8 2731 ctrl->ctrl.nr_reconnects = 0;
61bff8ef 2732
44c6ec77
JS
2733 if (changed)
2734 nvme_start_ctrl(&ctrl->ctrl);
61bff8ef
JS
2735
2736 return 0; /* Success */
2737
2738out_term_aen_ops:
2739 nvme_fc_term_aen_ops(ctrl);
61bff8ef
JS
2740out_disconnect_admin_queue:
2741 /* send a Disconnect(association) LS to fc-nvme target */
2742 nvme_fc_xmt_disconnect_assoc(ctrl);
bcde5f0f 2743 ctrl->association_id = 0;
61bff8ef
JS
2744out_delete_hw_queue:
2745 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2746out_free_queue:
2747 nvme_fc_free_queue(&ctrl->queues[0]);
158bfb88
JS
2748 ctrl->assoc_active = false;
2749 nvme_fc_ctlr_inactive_on_rport(ctrl);
e399441d
JS
2750
2751 return ret;
2752}
2753
61bff8ef
JS
2754/*
2755 * This routine stops operation of the controller on the host side.
2756 * On the host os stack side: Admin and IO queues are stopped,
2757 * outstanding ios on them terminated via FC ABTS.
2758 * On the link side: the association is terminated.
2759 */
2760static void
2761nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
2762{
2763 unsigned long flags;
2764
158bfb88
JS
2765 if (!ctrl->assoc_active)
2766 return;
2767 ctrl->assoc_active = false;
2768
61bff8ef
JS
2769 spin_lock_irqsave(&ctrl->lock, flags);
2770 ctrl->flags |= FCCTRL_TERMIO;
2771 ctrl->iocnt = 0;
2772 spin_unlock_irqrestore(&ctrl->lock, flags);
2773
2774 /*
2775 * If io queues are present, stop them and terminate all outstanding
2776 * ios on them. As FC allocates FC exchange for each io, the
2777 * transport must contact the LLDD to terminate the exchange,
2778 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
2779 * to tell us what io's are busy and invoke a transport routine
2780 * to kill them with the LLDD. After terminating the exchange
2781 * the LLDD will call the transport's normal io done path, but it
2782 * will have an aborted status. The done path will return the
2783 * io requests back to the block layer as part of normal completions
2784 * (but with error status).
2785 */
d858e5f0 2786 if (ctrl->ctrl.queue_count > 1) {
61bff8ef
JS
2787 nvme_stop_queues(&ctrl->ctrl);
2788 blk_mq_tagset_busy_iter(&ctrl->tag_set,
2789 nvme_fc_terminate_exchange, &ctrl->ctrl);
622b8b68 2790 blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
61bff8ef
JS
2791 }
2792
2793 /*
2794 * Other transports, which don't have link-level contexts bound
2795 * to sqe's, would try to gracefully shutdown the controller by
2796 * writing the registers for shutdown and polling (call
2797 * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
2798 * just aborted and we will wait on those contexts, and given
2799 * there was no indication of how live the controlelr is on the
2800 * link, don't send more io to create more contexts for the
2801 * shutdown. Let the controller fail via keepalive failure if
2802 * its still present.
2803 */
2804
2805 /*
2806 * clean up the admin queue. Same thing as above.
2807 * use blk_mq_tagset_busy_itr() and the transport routine to
2808 * terminate the exchanges.
2809 */
4c984154 2810 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
61bff8ef
JS
2811 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2812 nvme_fc_terminate_exchange, &ctrl->ctrl);
622b8b68 2813 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
61bff8ef
JS
2814
2815 /* kill the aens as they are a separate path */
2816 nvme_fc_abort_aen_ops(ctrl);
2817
2818 /* wait for all io that had to be aborted */
8a82dbf1 2819 spin_lock_irq(&ctrl->lock);
36715cf4 2820 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
61bff8ef 2821 ctrl->flags &= ~FCCTRL_TERMIO;
8a82dbf1 2822 spin_unlock_irq(&ctrl->lock);
61bff8ef
JS
2823
2824 nvme_fc_term_aen_ops(ctrl);
2825
2826 /*
2827 * send a Disconnect(association) LS to fc-nvme target
2828 * Note: could have been sent at top of process, but
2829 * cleaner on link traffic if after the aborts complete.
2830 * Note: if association doesn't exist, association_id will be 0
2831 */
2832 if (ctrl->association_id)
2833 nvme_fc_xmt_disconnect_assoc(ctrl);
2834
bcde5f0f
JS
2835 ctrl->association_id = 0;
2836
61bff8ef
JS
2837 if (ctrl->ctrl.tagset) {
2838 nvme_fc_delete_hw_io_queues(ctrl);
2839 nvme_fc_free_io_queues(ctrl);
2840 }
2841
2842 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2843 nvme_fc_free_queue(&ctrl->queues[0]);
158bfb88 2844
d625d05e
JS
2845 /* re-enable the admin_q so anything new can fast fail */
2846 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2847
02d62a8b
JS
2848 /* resume the io queues so that things will fast fail */
2849 nvme_start_queues(&ctrl->ctrl);
2850
158bfb88 2851 nvme_fc_ctlr_inactive_on_rport(ctrl);
61bff8ef
JS
2852}
2853
2854static void
c5017e85 2855nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
61bff8ef 2856{
c5017e85 2857 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
61bff8ef 2858
4cff280a 2859 cancel_work_sync(&ctrl->err_work);
61bff8ef 2860 cancel_delayed_work_sync(&ctrl->connect_work);
61bff8ef
JS
2861 /*
2862 * kill the association on the link side. this will block
2863 * waiting for io to terminate
2864 */
2865 nvme_fc_delete_association(ctrl);
61bff8ef
JS
2866}
2867
5bbecdbc
JS
2868static void
2869nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
2870{
2b632970
JS
2871 struct nvme_fc_rport *rport = ctrl->rport;
2872 struct nvme_fc_remote_port *portptr = &rport->remoteport;
2873 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
2874 bool recon = true;
5bbecdbc 2875
ad6a0a52 2876 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
5bbecdbc 2877 return;
5bbecdbc 2878
2b632970 2879 if (portptr->port_state == FC_OBJSTATE_ONLINE)
5bbecdbc 2880 dev_info(ctrl->ctrl.device,
2b632970
JS
2881 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
2882 ctrl->cnum, status);
2883 else if (time_after_eq(jiffies, rport->dev_loss_end))
2884 recon = false;
5bbecdbc 2885
2b632970
JS
2886 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
2887 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2888 dev_info(ctrl->ctrl.device,
2889 "NVME-FC{%d}: Reconnect attempt in %ld "
2890 "seconds\n",
2891 ctrl->cnum, recon_delay / HZ);
2892 else if (time_after(jiffies + recon_delay, rport->dev_loss_end))
2893 recon_delay = rport->dev_loss_end - jiffies;
96e24801 2894
2b632970 2895 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
5bbecdbc 2896 } else {
2b632970
JS
2897 if (portptr->port_state == FC_OBJSTATE_ONLINE)
2898 dev_warn(ctrl->ctrl.device,
5bbecdbc 2899 "NVME-FC{%d}: Max reconnect attempts (%d) "
77d0612d 2900 "reached.\n",
fdf9dfa8 2901 ctrl->cnum, ctrl->ctrl.nr_reconnects);
2b632970
JS
2902 else
2903 dev_warn(ctrl->ctrl.device,
2904 "NVME-FC{%d}: dev_loss_tmo (%d) expired "
77d0612d
MG
2905 "while waiting for remoteport connectivity.\n",
2906 ctrl->cnum, portptr->dev_loss_tmo);
c5017e85 2907 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
5bbecdbc
JS
2908 }
2909}
2910
61bff8ef 2911static void
4cff280a 2912__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
61bff8ef 2913{
c869e494
JS
2914 /*
2915 * if state is connecting - the error occurred as part of a
2916 * reconnect attempt. The create_association error paths will
2917 * clean up any outstanding io.
2918 *
2919 * if it's a different state - ensure all pending io is
2920 * terminated. Given this can delay while waiting for the
2921 * aborted io to return, we recheck adapter state below
2922 * before changing state.
2923 */
2924 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
2925 nvme_stop_keep_alive(&ctrl->ctrl);
44c6ec77 2926
c869e494
JS
2927 /* will block will waiting for io to terminate */
2928 nvme_fc_delete_association(ctrl);
2929 }
61bff8ef 2930
4cff280a
JS
2931 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
2932 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
44c6ec77
JS
2933 dev_err(ctrl->ctrl.device,
2934 "NVME-FC{%d}: error_recovery: Couldn't change state "
ad6a0a52 2935 "to CONNECTING\n", ctrl->cnum);
4cff280a
JS
2936}
2937
2938static void
2939nvme_fc_reset_ctrl_work(struct work_struct *work)
2940{
2941 struct nvme_fc_ctrl *ctrl =
2942 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
2943 int ret;
2944
2945 __nvme_fc_terminate_io(ctrl);
2946
2947 nvme_stop_ctrl(&ctrl->ctrl);
44c6ec77 2948
2b632970 2949 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
96e24801 2950 ret = nvme_fc_create_association(ctrl);
2b632970
JS
2951 else
2952 ret = -ENOTCONN;
2953
5bbecdbc
JS
2954 if (ret)
2955 nvme_fc_reconnect_or_delete(ctrl, ret);
2956 else
61bff8ef 2957 dev_info(ctrl->ctrl.device,
2b632970
JS
2958 "NVME-FC{%d}: controller reset complete\n",
2959 ctrl->cnum);
61bff8ef
JS
2960}
2961
4cff280a
JS
2962static void
2963nvme_fc_connect_err_work(struct work_struct *work)
2964{
2965 struct nvme_fc_ctrl *ctrl =
2966 container_of(work, struct nvme_fc_ctrl, err_work);
2967
2968 __nvme_fc_terminate_io(ctrl);
2969
2970 atomic_set(&ctrl->err_work_active, 0);
2971
2972 /*
2973 * Rescheduling the connection after recovering
2974 * from the io error is left to the reconnect work
2975 * item, which is what should have stalled waiting on
2976 * the io that had the error that scheduled this work.
2977 */
2978}
2979
61bff8ef
JS
2980static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
2981 .name = "fc",
2982 .module = THIS_MODULE,
d3d5b87d 2983 .flags = NVME_F_FABRICS,
61bff8ef
JS
2984 .reg_read32 = nvmf_reg_read32,
2985 .reg_read64 = nvmf_reg_read64,
2986 .reg_write32 = nvmf_reg_write32,
61bff8ef
JS
2987 .free_ctrl = nvme_fc_nvme_ctrl_freed,
2988 .submit_async_event = nvme_fc_submit_async_event,
c5017e85 2989 .delete_ctrl = nvme_fc_delete_ctrl,
61bff8ef
JS
2990 .get_address = nvmf_get_address,
2991};
2992
2993static void
2994nvme_fc_connect_ctrl_work(struct work_struct *work)
2995{
2996 int ret;
2997
2998 struct nvme_fc_ctrl *ctrl =
2999 container_of(to_delayed_work(work),
3000 struct nvme_fc_ctrl, connect_work);
3001
3002 ret = nvme_fc_create_association(ctrl);
5bbecdbc
JS
3003 if (ret)
3004 nvme_fc_reconnect_or_delete(ctrl, ret);
3005 else
61bff8ef 3006 dev_info(ctrl->ctrl.device,
4c984154 3007 "NVME-FC{%d}: controller connect complete\n",
61bff8ef
JS
3008 ctrl->cnum);
3009}
3010
3011
3012static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
3013 .queue_rq = nvme_fc_queue_rq,
3014 .complete = nvme_fc_complete_rq,
76f983cb 3015 .init_request = nvme_fc_init_request,
61bff8ef 3016 .exit_request = nvme_fc_exit_request,
61bff8ef
JS
3017 .init_hctx = nvme_fc_init_admin_hctx,
3018 .timeout = nvme_fc_timeout,
3019};
3020
e399441d 3021
56d5f4f1
JS
3022/*
3023 * Fails a controller request if it matches an existing controller
3024 * (association) with the same tuple:
3025 * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN>
3026 *
3027 * The ports don't need to be compared as they are intrinsically
3028 * already matched by the port pointers supplied.
3029 */
3030static bool
3031nvme_fc_existing_controller(struct nvme_fc_rport *rport,
3032 struct nvmf_ctrl_options *opts)
3033{
3034 struct nvme_fc_ctrl *ctrl;
3035 unsigned long flags;
3036 bool found = false;
3037
3038 spin_lock_irqsave(&rport->lock, flags);
3039 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3040 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
3041 if (found)
3042 break;
3043 }
3044 spin_unlock_irqrestore(&rport->lock, flags);
3045
3046 return found;
3047}
3048
e399441d 3049static struct nvme_ctrl *
61bff8ef 3050nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
e399441d
JS
3051 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
3052{
3053 struct nvme_fc_ctrl *ctrl;
3054 unsigned long flags;
4c984154 3055 int ret, idx;
e399441d 3056
85e6a6ad
JS
3057 if (!(rport->remoteport.port_role &
3058 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
3059 ret = -EBADR;
3060 goto out_fail;
3061 }
3062
56d5f4f1
JS
3063 if (!opts->duplicate_connect &&
3064 nvme_fc_existing_controller(rport, opts)) {
3065 ret = -EALREADY;
3066 goto out_fail;
3067 }
3068
e399441d
JS
3069 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
3070 if (!ctrl) {
3071 ret = -ENOMEM;
3072 goto out_fail;
3073 }
3074
3075 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
3076 if (idx < 0) {
3077 ret = -ENOSPC;
8c5c6605 3078 goto out_free_ctrl;
e399441d
JS
3079 }
3080
3081 ctrl->ctrl.opts = opts;
4c984154 3082 ctrl->ctrl.nr_reconnects = 0;
06f3d71e
JS
3083 if (lport->dev)
3084 ctrl->ctrl.numa_node = dev_to_node(lport->dev);
3085 else
3086 ctrl->ctrl.numa_node = NUMA_NO_NODE;
e399441d 3087 INIT_LIST_HEAD(&ctrl->ctrl_list);
e399441d
JS
3088 ctrl->lport = lport;
3089 ctrl->rport = rport;
3090 ctrl->dev = lport->dev;
e399441d 3091 ctrl->cnum = idx;
4c984154 3092 ctrl->ioq_live = false;
158bfb88 3093 ctrl->assoc_active = false;
4cff280a 3094 atomic_set(&ctrl->err_work_active, 0);
8a82dbf1 3095 init_waitqueue_head(&ctrl->ioabort_wait);
e399441d 3096
e399441d
JS
3097 get_device(ctrl->dev);
3098 kref_init(&ctrl->ref);
3099
d86c4d8e 3100 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
61bff8ef 3101 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
4cff280a 3102 INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work);
e399441d
JS
3103 spin_lock_init(&ctrl->lock);
3104
3105 /* io queue count */
d858e5f0 3106 ctrl->ctrl.queue_count = min_t(unsigned int,
e399441d
JS
3107 opts->nr_io_queues,
3108 lport->ops->max_hw_queues);
d858e5f0 3109 ctrl->ctrl.queue_count++; /* +1 for admin queue */
e399441d
JS
3110
3111 ctrl->ctrl.sqsize = opts->queue_size - 1;
3112 ctrl->ctrl.kato = opts->kato;
4c984154 3113 ctrl->ctrl.cntlid = 0xffff;
e399441d
JS
3114
3115 ret = -ENOMEM;
d858e5f0
SG
3116 ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
3117 sizeof(struct nvme_fc_queue), GFP_KERNEL);
e399441d 3118 if (!ctrl->queues)
61bff8ef 3119 goto out_free_ida;
e399441d 3120
3e493c00
JS
3121 nvme_fc_init_queue(ctrl, 0);
3122
61bff8ef
JS
3123 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
3124 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
38dabe21 3125 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
61bff8ef 3126 ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
103e515e 3127 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
d3d0bc78
BVA
3128 ctrl->admin_tag_set.cmd_size =
3129 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
3130 ctrl->lport->ops->fcprqst_priv_sz);
61bff8ef
JS
3131 ctrl->admin_tag_set.driver_data = ctrl;
3132 ctrl->admin_tag_set.nr_hw_queues = 1;
3133 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
5a22e2bf 3134 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
e399441d 3135
61bff8ef 3136 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
e399441d 3137 if (ret)
61bff8ef 3138 goto out_free_queues;
34b6c231 3139 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
e399441d 3140
e7832cb4
SG
3141 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3142 if (IS_ERR(ctrl->ctrl.fabrics_q)) {
3143 ret = PTR_ERR(ctrl->ctrl.fabrics_q);
3144 goto out_free_admin_tag_set;
3145 }
3146
61bff8ef
JS
3147 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3148 if (IS_ERR(ctrl->ctrl.admin_q)) {
3149 ret = PTR_ERR(ctrl->ctrl.admin_q);
e7832cb4 3150 goto out_cleanup_fabrics_q;
e399441d
JS
3151 }
3152
61bff8ef
JS
3153 /*
3154 * Would have been nice to init io queues tag set as well.
3155 * However, we require interaction from the controller
3156 * for max io queue count before we can do so.
3157 * Defer this to the connect path.
3158 */
e399441d 3159
61bff8ef
JS
3160 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
3161 if (ret)
3162 goto out_cleanup_admin_q;
e399441d 3163
61bff8ef 3164 /* at this point, teardown path changes to ref counting on nvme ctrl */
e399441d
JS
3165
3166 spin_lock_irqsave(&rport->lock, flags);
3167 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
3168 spin_unlock_irqrestore(&rport->lock, flags);
3169
4c984154
JS
3170 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
3171 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
17c4dc6e 3172 dev_err(ctrl->ctrl.device,
4c984154
JS
3173 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
3174 goto fail_ctrl;
3175 }
17c4dc6e 3176
4c984154 3177 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
4c984154
JS
3178 dev_err(ctrl->ctrl.device,
3179 "NVME-FC{%d}: failed to schedule initial connect\n",
3180 ctrl->cnum);
3181 goto fail_ctrl;
e399441d
JS
3182 }
3183
4c984154 3184 flush_delayed_work(&ctrl->connect_work);
2cb657bc 3185
61bff8ef
JS
3186 dev_info(ctrl->ctrl.device,
3187 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
3188 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
e399441d 3189
61bff8ef 3190 return &ctrl->ctrl;
e399441d 3191
4c984154
JS
3192fail_ctrl:
3193 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
3194 cancel_work_sync(&ctrl->ctrl.reset_work);
4cff280a 3195 cancel_work_sync(&ctrl->err_work);
4c984154
JS
3196 cancel_delayed_work_sync(&ctrl->connect_work);
3197
3198 ctrl->ctrl.opts = NULL;
3199
3200 /* initiate nvme ctrl ref counting teardown */
3201 nvme_uninit_ctrl(&ctrl->ctrl);
3202
3203 /* Remove core ctrl ref. */
3204 nvme_put_ctrl(&ctrl->ctrl);
3205
3206 /* as we're past the point where we transition to the ref
3207 * counting teardown path, if we return a bad pointer here,
3208 * the calling routine, thinking it's prior to the
3209 * transition, will do an rport put. Since the teardown
3210 * path also does a rport put, we do an extra get here to
3211 * so proper order/teardown happens.
3212 */
3213 nvme_fc_rport_get(rport);
3214
3215 return ERR_PTR(-EIO);
3216
61bff8ef
JS
3217out_cleanup_admin_q:
3218 blk_cleanup_queue(ctrl->ctrl.admin_q);
e7832cb4
SG
3219out_cleanup_fabrics_q:
3220 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
61bff8ef
JS
3221out_free_admin_tag_set:
3222 blk_mq_free_tag_set(&ctrl->admin_tag_set);
3223out_free_queues:
3224 kfree(ctrl->queues);
e399441d 3225out_free_ida:
61bff8ef 3226 put_device(ctrl->dev);
e399441d
JS
3227 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
3228out_free_ctrl:
3229 kfree(ctrl);
3230out_fail:
e399441d
JS
3231 /* exit via here doesn't follow ctlr ref points */
3232 return ERR_PTR(ret);
3233}
3234
e399441d
JS
3235
3236struct nvmet_fc_traddr {
3237 u64 nn;
3238 u64 pn;
3239};
3240
e399441d 3241static int
9c5358e1 3242__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
e399441d 3243{
e399441d
JS
3244 u64 token64;
3245
9c5358e1
JS
3246 if (match_u64(sstr, &token64))
3247 return -EINVAL;
3248 *val = token64;
e399441d 3249
9c5358e1
JS
3250 return 0;
3251}
e399441d 3252
9c5358e1
JS
3253/*
3254 * This routine validates and extracts the WWN's from the TRADDR string.
3255 * As kernel parsers need the 0x to determine number base, universally
3256 * build string to parse with 0x prefix before parsing name strings.
3257 */
3258static int
3259nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
3260{
3261 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
3262 substring_t wwn = { name, &name[sizeof(name)-1] };
3263 int nnoffset, pnoffset;
3264
d4e4230c 3265 /* validate if string is one of the 2 allowed formats */
9c5358e1
JS
3266 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
3267 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
3268 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
3269 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
3270 nnoffset = NVME_FC_TRADDR_OXNNLEN;
3271 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
3272 NVME_FC_TRADDR_OXNNLEN;
3273 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
3274 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
3275 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
3276 "pn-", NVME_FC_TRADDR_NNLEN))) {
3277 nnoffset = NVME_FC_TRADDR_NNLEN;
3278 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
3279 } else
3280 goto out_einval;
e399441d 3281
9c5358e1
JS
3282 name[0] = '0';
3283 name[1] = 'x';
3284 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
3285
3286 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3287 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
3288 goto out_einval;
3289
3290 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3291 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
3292 goto out_einval;
3293
3294 return 0;
3295
3296out_einval:
3297 pr_warn("%s: bad traddr string\n", __func__);
3298 return -EINVAL;
e399441d
JS
3299}
3300
3301static struct nvme_ctrl *
3302nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
3303{
3304 struct nvme_fc_lport *lport;
3305 struct nvme_fc_rport *rport;
61bff8ef 3306 struct nvme_ctrl *ctrl;
e399441d
JS
3307 struct nvmet_fc_traddr laddr = { 0L, 0L };
3308 struct nvmet_fc_traddr raddr = { 0L, 0L };
3309 unsigned long flags;
3310 int ret;
3311
9c5358e1 3312 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
e399441d
JS
3313 if (ret || !raddr.nn || !raddr.pn)
3314 return ERR_PTR(-EINVAL);
3315
9c5358e1 3316 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
e399441d
JS
3317 if (ret || !laddr.nn || !laddr.pn)
3318 return ERR_PTR(-EINVAL);
3319
3320 /* find the host and remote ports to connect together */
3321 spin_lock_irqsave(&nvme_fc_lock, flags);
3322 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3323 if (lport->localport.node_name != laddr.nn ||
3324 lport->localport.port_name != laddr.pn)
3325 continue;
3326
3327 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3328 if (rport->remoteport.node_name != raddr.nn ||
3329 rport->remoteport.port_name != raddr.pn)
3330 continue;
3331
3332 /* if fail to get reference fall through. Will error */
3333 if (!nvme_fc_rport_get(rport))
3334 break;
3335
3336 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3337
61bff8ef
JS
3338 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
3339 if (IS_ERR(ctrl))
3340 nvme_fc_rport_put(rport);
3341 return ctrl;
e399441d
JS
3342 }
3343 }
3344 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3345
4fb135ad
JT
3346 pr_warn("%s: %s - %s combination not found\n",
3347 __func__, opts->traddr, opts->host_traddr);
e399441d
JS
3348 return ERR_PTR(-ENOENT);
3349}
3350
3351
3352static struct nvmf_transport_ops nvme_fc_transport = {
3353 .name = "fc",
0de5cd36 3354 .module = THIS_MODULE,
e399441d 3355 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
5bbecdbc 3356 .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
e399441d
JS
3357 .create_ctrl = nvme_fc_create_ctrl,
3358};
3359
97faec53
JS
3360/* Arbitrary successive failures max. With lots of subsystems could be high */
3361#define DISCOVERY_MAX_FAIL 20
3362
3363static ssize_t nvme_fc_nvme_discovery_store(struct device *dev,
3364 struct device_attribute *attr, const char *buf, size_t count)
3365{
3366 unsigned long flags;
3367 LIST_HEAD(local_disc_list);
3368 struct nvme_fc_lport *lport;
3369 struct nvme_fc_rport *rport;
3370 int failcnt = 0;
3371
3372 spin_lock_irqsave(&nvme_fc_lock, flags);
3373restart:
3374 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3375 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3376 if (!nvme_fc_lport_get(lport))
3377 continue;
3378 if (!nvme_fc_rport_get(rport)) {
3379 /*
3380 * This is a temporary condition. Upon restart
3381 * this rport will be gone from the list.
3382 *
3383 * Revert the lport put and retry. Anything
3384 * added to the list already will be skipped (as
3385 * they are no longer list_empty). Loops should
3386 * resume at rports that were not yet seen.
3387 */
3388 nvme_fc_lport_put(lport);
3389
3390 if (failcnt++ < DISCOVERY_MAX_FAIL)
3391 goto restart;
3392
3393 pr_err("nvme_discovery: too many reference "
3394 "failures\n");
3395 goto process_local_list;
3396 }
3397 if (list_empty(&rport->disc_list))
3398 list_add_tail(&rport->disc_list,
3399 &local_disc_list);
3400 }
3401 }
3402
3403process_local_list:
3404 while (!list_empty(&local_disc_list)) {
3405 rport = list_first_entry(&local_disc_list,
3406 struct nvme_fc_rport, disc_list);
3407 list_del_init(&rport->disc_list);
3408 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3409
3410 lport = rport->lport;
3411 /* signal discovery. Won't hurt if it repeats */
3412 nvme_fc_signal_discovery_scan(lport, rport);
3413 nvme_fc_rport_put(rport);
3414 nvme_fc_lport_put(lport);
3415
3416 spin_lock_irqsave(&nvme_fc_lock, flags);
3417 }
3418 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3419
3420 return count;
3421}
3422static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
3423
3424static struct attribute *nvme_fc_attrs[] = {
3425 &dev_attr_nvme_discovery.attr,
3426 NULL
3427};
3428
3429static struct attribute_group nvme_fc_attr_group = {
3430 .attrs = nvme_fc_attrs,
3431};
3432
3433static const struct attribute_group *nvme_fc_attr_groups[] = {
3434 &nvme_fc_attr_group,
3435 NULL
3436};
3437
3438static struct class fc_class = {
3439 .name = "fc",
3440 .dev_groups = nvme_fc_attr_groups,
3441 .owner = THIS_MODULE,
3442};
3443
e399441d
JS
3444static int __init nvme_fc_init_module(void)
3445{
5f568556
JS
3446 int ret;
3447
8730c1dd
HR
3448 nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
3449 if (!nvme_fc_wq)
3450 return -ENOMEM;
3451
5f568556
JS
3452 /*
3453 * NOTE:
3454 * It is expected that in the future the kernel will combine
3455 * the FC-isms that are currently under scsi and now being
3456 * added to by NVME into a new standalone FC class. The SCSI
3457 * and NVME protocols and their devices would be under this
3458 * new FC class.
3459 *
3460 * As we need something to post FC-specific udev events to,
3461 * specifically for nvme probe events, start by creating the
3462 * new device class. When the new standalone FC class is
3463 * put in place, this code will move to a more generic
3464 * location for the class.
3465 */
97faec53
JS
3466 ret = class_register(&fc_class);
3467 if (ret) {
5f568556 3468 pr_err("couldn't register class fc\n");
8730c1dd 3469 goto out_destroy_wq;
5f568556
JS
3470 }
3471
3472 /*
3473 * Create a device for the FC-centric udev events
3474 */
97faec53 3475 fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL,
5f568556
JS
3476 "fc_udev_device");
3477 if (IS_ERR(fc_udev_device)) {
3478 pr_err("couldn't create fc_udev device!\n");
3479 ret = PTR_ERR(fc_udev_device);
3480 goto out_destroy_class;
3481 }
3482
3483 ret = nvmf_register_transport(&nvme_fc_transport);
3484 if (ret)
3485 goto out_destroy_device;
3486
3487 return 0;
3488
3489out_destroy_device:
97faec53 3490 device_destroy(&fc_class, MKDEV(0, 0));
5f568556 3491out_destroy_class:
97faec53 3492 class_unregister(&fc_class);
8730c1dd
HR
3493out_destroy_wq:
3494 destroy_workqueue(nvme_fc_wq);
3495
5f568556 3496 return ret;
e399441d
JS
3497}
3498
4c73cbdf
JS
3499static void
3500nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
3501{
3502 struct nvme_fc_ctrl *ctrl;
3503
3504 spin_lock(&rport->lock);
3505 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3506 dev_warn(ctrl->ctrl.device,
3507 "NVME-FC{%d}: transport unloading: deleting ctrl\n",
3508 ctrl->cnum);
3509 nvme_delete_ctrl(&ctrl->ctrl);
3510 }
3511 spin_unlock(&rport->lock);
3512}
3513
3514static void
3515nvme_fc_cleanup_for_unload(void)
3516{
3517 struct nvme_fc_lport *lport;
3518 struct nvme_fc_rport *rport;
3519
3520 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3521 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3522 nvme_fc_delete_controllers(rport);
3523 }
3524 }
3525}
3526
e399441d
JS
3527static void __exit nvme_fc_exit_module(void)
3528{
4c73cbdf
JS
3529 unsigned long flags;
3530 bool need_cleanup = false;
3531
3532 spin_lock_irqsave(&nvme_fc_lock, flags);
3533 nvme_fc_waiting_to_unload = true;
3534 if (!list_empty(&nvme_fc_lport_list)) {
3535 need_cleanup = true;
3536 nvme_fc_cleanup_for_unload();
3537 }
3538 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3539 if (need_cleanup) {
3540 pr_info("%s: waiting for ctlr deletes\n", __func__);
3541 wait_for_completion(&nvme_fc_unload_proceed);
3542 pr_info("%s: ctrl deletes complete\n", __func__);
3543 }
e399441d
JS
3544
3545 nvmf_unregister_transport(&nvme_fc_transport);
3546
e399441d
JS
3547 ida_destroy(&nvme_fc_local_port_cnt);
3548 ida_destroy(&nvme_fc_ctrl_cnt);
5f568556 3549
97faec53
JS
3550 device_destroy(&fc_class, MKDEV(0, 0));
3551 class_unregister(&fc_class);
8730c1dd 3552 destroy_workqueue(nvme_fc_wq);
e399441d
JS
3553}
3554
3555module_init(nvme_fc_init_module);
3556module_exit(nvme_fc_exit_module);
3557
3558MODULE_LICENSE("GPL v2");