]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/nvme/host/fc.c
nvme-fc: track error_recovery while connecting
[mirror_ubuntu-jammy-kernel.git] / drivers / nvme / host / fc.c
CommitLineData
8638b246 1// SPDX-License-Identifier: GPL-2.0
e399441d
JS
2/*
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
e399441d
JS
4 */
5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6#include <linux/module.h>
7#include <linux/parser.h>
8#include <uapi/scsi/fc/fc_fs.h>
9#include <uapi/scsi/fc/fc_els.h>
61bff8ef 10#include <linux/delay.h>
d3d0bc78 11#include <linux/overflow.h>
e399441d
JS
12
13#include "nvme.h"
14#include "fabrics.h"
15#include <linux/nvme-fc-driver.h>
16#include <linux/nvme-fc.h>
ca19bcd0 17#include "fc.h"
a6a6d058 18#include <scsi/scsi_transport_fc.h>
e399441d
JS
19
20/* *************************** Data Structures/Defines ****************** */
21
22
e399441d 23enum nvme_fc_queue_flags {
26c0a26d
JA
24 NVME_FC_Q_CONNECTED = 0,
25 NVME_FC_Q_LIVE,
e399441d
JS
26};
27
ac7fe82b 28#define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */
f673714a
JS
29#define NVME_FC_DEFAULT_RECONNECT_TMO 2 /* delay between reconnects
30 * when connected and a
31 * connection failure.
32 */
ac7fe82b 33
e399441d
JS
34struct nvme_fc_queue {
35 struct nvme_fc_ctrl *ctrl;
36 struct device *dev;
37 struct blk_mq_hw_ctx *hctx;
38 void *lldd_handle;
e399441d
JS
39 size_t cmnd_capsule_len;
40 u32 qnum;
41 u32 rqcnt;
42 u32 seqno;
43
44 u64 connection_id;
45 atomic_t csn;
46
47 unsigned long flags;
48} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
49
8d64daf7
JS
50enum nvme_fcop_flags {
51 FCOP_FLAGS_TERMIO = (1 << 0),
c3aedd22 52 FCOP_FLAGS_AEN = (1 << 1),
8d64daf7
JS
53};
54
e399441d
JS
55struct nvmefc_ls_req_op {
56 struct nvmefc_ls_req ls_req;
57
c913a8b0 58 struct nvme_fc_rport *rport;
e399441d
JS
59 struct nvme_fc_queue *queue;
60 struct request *rq;
8d64daf7 61 u32 flags;
e399441d
JS
62
63 int ls_error;
64 struct completion ls_done;
c913a8b0 65 struct list_head lsreq_list; /* rport->ls_req_list */
e399441d
JS
66 bool req_queued;
67};
68
14fd1e98
JS
69struct nvmefc_ls_rcv_op {
70 struct nvme_fc_rport *rport;
71 struct nvmefc_ls_rsp *lsrsp;
72 union nvmefc_ls_requests *rqstbuf;
73 union nvmefc_ls_responses *rspbuf;
74 u16 rqstdatalen;
75 bool handled;
76 dma_addr_t rspdma;
77 struct list_head lsrcv_list; /* rport->ls_rcv_list */
78} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
79
e399441d
JS
80enum nvme_fcpop_state {
81 FCPOP_STATE_UNINIT = 0,
82 FCPOP_STATE_IDLE = 1,
83 FCPOP_STATE_ACTIVE = 2,
84 FCPOP_STATE_ABORTED = 3,
78a7ac26 85 FCPOP_STATE_COMPLETE = 4,
e399441d
JS
86};
87
88struct nvme_fc_fcp_op {
89 struct nvme_request nreq; /*
90 * nvme/host/core.c
91 * requires this to be
92 * the 1st element in the
93 * private structure
94 * associated with the
95 * request.
96 */
97 struct nvmefc_fcp_req fcp_req;
98
99 struct nvme_fc_ctrl *ctrl;
100 struct nvme_fc_queue *queue;
101 struct request *rq;
102
103 atomic_t state;
78a7ac26 104 u32 flags;
e399441d
JS
105 u32 rqno;
106 u32 nents;
107
108 struct nvme_fc_cmd_iu cmd_iu;
109 struct nvme_fc_ersp_iu rsp_iu;
110};
111
d3d0bc78
BVA
112struct nvme_fcp_op_w_sgl {
113 struct nvme_fc_fcp_op op;
b1ae1a23 114 struct scatterlist sgl[NVME_INLINE_SG_CNT];
f1e71d75 115 uint8_t priv[];
d3d0bc78
BVA
116};
117
e399441d
JS
118struct nvme_fc_lport {
119 struct nvme_fc_local_port localport;
120
121 struct ida endp_cnt;
122 struct list_head port_list; /* nvme_fc_port_list */
123 struct list_head endp_list;
124 struct device *dev; /* physical device for dma */
125 struct nvme_fc_port_template *ops;
126 struct kref ref;
158bfb88 127 atomic_t act_rport_cnt;
e399441d
JS
128} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
129
130struct nvme_fc_rport {
131 struct nvme_fc_remote_port remoteport;
132
133 struct list_head endp_list; /* for lport->endp_list */
134 struct list_head ctrl_list;
c913a8b0 135 struct list_head ls_req_list;
14fd1e98 136 struct list_head ls_rcv_list;
97faec53 137 struct list_head disc_list;
c913a8b0
JS
138 struct device *dev; /* physical device for dma */
139 struct nvme_fc_lport *lport;
e399441d
JS
140 spinlock_t lock;
141 struct kref ref;
158bfb88 142 atomic_t act_ctrl_cnt;
2b632970 143 unsigned long dev_loss_end;
14fd1e98 144 struct work_struct lsrcv_work;
e399441d
JS
145} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
146
eb4ee8f1
JS
147/* fc_ctrl flags values - specified as bit positions */
148#define ASSOC_ACTIVE 0
caf1cbe3
JS
149#define ASSOC_FAILED 1
150#define FCCTRL_TERMIO 2
e399441d
JS
151
152struct nvme_fc_ctrl {
153 spinlock_t lock;
154 struct nvme_fc_queue *queues;
e399441d
JS
155 struct device *dev;
156 struct nvme_fc_lport *lport;
157 struct nvme_fc_rport *rport;
158 u32 cnum;
159
4c984154 160 bool ioq_live;
4cff280a 161 atomic_t err_work_active;
e399441d 162 u64 association_id;
14fd1e98 163 struct nvmefc_ls_rcv_op *rcv_disconn;
e399441d 164
e399441d 165 struct list_head ctrl_list; /* rport->ctrl_list */
e399441d
JS
166
167 struct blk_mq_tag_set admin_tag_set;
168 struct blk_mq_tag_set tag_set;
169
61bff8ef 170 struct delayed_work connect_work;
4cff280a 171 struct work_struct err_work;
61bff8ef 172
e399441d 173 struct kref ref;
eb4ee8f1 174 unsigned long flags;
61bff8ef 175 u32 iocnt;
36715cf4 176 wait_queue_head_t ioabort_wait;
e399441d 177
38dabe21 178 struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS];
e399441d
JS
179
180 struct nvme_ctrl ctrl;
181};
182
183static inline struct nvme_fc_ctrl *
184to_fc_ctrl(struct nvme_ctrl *ctrl)
185{
186 return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
187}
188
189static inline struct nvme_fc_lport *
190localport_to_lport(struct nvme_fc_local_port *portptr)
191{
192 return container_of(portptr, struct nvme_fc_lport, localport);
193}
194
195static inline struct nvme_fc_rport *
196remoteport_to_rport(struct nvme_fc_remote_port *portptr)
197{
198 return container_of(portptr, struct nvme_fc_rport, remoteport);
199}
200
201static inline struct nvmefc_ls_req_op *
202ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
203{
204 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
205}
206
207static inline struct nvme_fc_fcp_op *
208fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
209{
210 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
211}
212
213
214
215/* *************************** Globals **************************** */
216
217
218static DEFINE_SPINLOCK(nvme_fc_lock);
219
220static LIST_HEAD(nvme_fc_lport_list);
221static DEFINE_IDA(nvme_fc_local_port_cnt);
222static DEFINE_IDA(nvme_fc_ctrl_cnt);
223
8730c1dd 224static struct workqueue_struct *nvme_fc_wq;
e399441d 225
4c73cbdf
JS
226static bool nvme_fc_waiting_to_unload;
227static DECLARE_COMPLETION(nvme_fc_unload_proceed);
228
5f568556
JS
229/*
230 * These items are short-term. They will eventually be moved into
231 * a generic FC class. See comments in module init.
232 */
5f568556
JS
233static struct device *fc_udev_device;
234
ff029451 235static void nvme_fc_complete_rq(struct request *rq);
e399441d
JS
236
237/* *********************** FC-NVME Port Management ************************ */
238
e399441d
JS
239static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
240 struct nvme_fc_queue *, unsigned int);
241
14fd1e98
JS
242static void nvme_fc_handle_ls_rqst_work(struct work_struct *work);
243
244
5533d424
JS
245static void
246nvme_fc_free_lport(struct kref *ref)
247{
248 struct nvme_fc_lport *lport =
249 container_of(ref, struct nvme_fc_lport, ref);
250 unsigned long flags;
251
252 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
253 WARN_ON(!list_empty(&lport->endp_list));
254
255 /* remove from transport list */
256 spin_lock_irqsave(&nvme_fc_lock, flags);
257 list_del(&lport->port_list);
4c73cbdf
JS
258 if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
259 complete(&nvme_fc_unload_proceed);
5533d424
JS
260 spin_unlock_irqrestore(&nvme_fc_lock, flags);
261
5533d424
JS
262 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
263 ida_destroy(&lport->endp_cnt);
264
265 put_device(lport->dev);
266
267 kfree(lport);
268}
269
270static void
271nvme_fc_lport_put(struct nvme_fc_lport *lport)
272{
273 kref_put(&lport->ref, nvme_fc_free_lport);
274}
275
276static int
277nvme_fc_lport_get(struct nvme_fc_lport *lport)
278{
279 return kref_get_unless_zero(&lport->ref);
280}
281
282
283static struct nvme_fc_lport *
c5760f30
JS
284nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo,
285 struct nvme_fc_port_template *ops,
286 struct device *dev)
5533d424
JS
287{
288 struct nvme_fc_lport *lport;
289 unsigned long flags;
290
291 spin_lock_irqsave(&nvme_fc_lock, flags);
292
293 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
294 if (lport->localport.node_name != pinfo->node_name ||
295 lport->localport.port_name != pinfo->port_name)
296 continue;
297
c5760f30
JS
298 if (lport->dev != dev) {
299 lport = ERR_PTR(-EXDEV);
300 goto out_done;
301 }
302
5533d424
JS
303 if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
304 lport = ERR_PTR(-EEXIST);
305 goto out_done;
306 }
307
308 if (!nvme_fc_lport_get(lport)) {
309 /*
310 * fails if ref cnt already 0. If so,
311 * act as if lport already deleted
312 */
313 lport = NULL;
314 goto out_done;
315 }
316
317 /* resume the lport */
318
c5760f30 319 lport->ops = ops;
5533d424
JS
320 lport->localport.port_role = pinfo->port_role;
321 lport->localport.port_id = pinfo->port_id;
322 lport->localport.port_state = FC_OBJSTATE_ONLINE;
323
324 spin_unlock_irqrestore(&nvme_fc_lock, flags);
325
326 return lport;
327 }
328
329 lport = NULL;
330
331out_done:
332 spin_unlock_irqrestore(&nvme_fc_lock, flags);
333
334 return lport;
335}
e399441d
JS
336
337/**
338 * nvme_fc_register_localport - transport entry point called by an
339 * LLDD to register the existence of a NVME
340 * host FC port.
341 * @pinfo: pointer to information about the port to be registered
342 * @template: LLDD entrypoints and operational parameters for the port
343 * @dev: physical hardware device node port corresponds to. Will be
344 * used for DMA mappings
76c910c7 345 * @portptr: pointer to a local port pointer. Upon success, the routine
e399441d
JS
346 * will allocate a nvme_fc_local_port structure and place its
347 * address in the local port pointer. Upon failure, local port
348 * pointer will be set to 0.
349 *
350 * Returns:
351 * a completion status. Must be 0 upon success; a negative errno
352 * (ex: -ENXIO) upon failure.
353 */
354int
355nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
356 struct nvme_fc_port_template *template,
357 struct device *dev,
358 struct nvme_fc_local_port **portptr)
359{
360 struct nvme_fc_lport *newrec;
361 unsigned long flags;
362 int ret, idx;
363
364 if (!template->localport_delete || !template->remoteport_delete ||
365 !template->ls_req || !template->fcp_io ||
366 !template->ls_abort || !template->fcp_abort ||
367 !template->max_hw_queues || !template->max_sgl_segments ||
8c5c6605 368 !template->max_dif_sgl_segments || !template->dma_boundary) {
e399441d
JS
369 ret = -EINVAL;
370 goto out_reghost_failed;
371 }
372
5533d424
JS
373 /*
374 * look to see if there is already a localport that had been
375 * deregistered and in the process of waiting for all the
376 * references to fully be removed. If the references haven't
377 * expired, we can simply re-enable the localport. Remoteports
378 * and controller reconnections should resume naturally.
379 */
c5760f30 380 newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev);
5533d424
JS
381
382 /* found an lport, but something about its state is bad */
383 if (IS_ERR(newrec)) {
384 ret = PTR_ERR(newrec);
385 goto out_reghost_failed;
386
387 /* found existing lport, which was resumed */
388 } else if (newrec) {
389 *portptr = &newrec->localport;
390 return 0;
391 }
392
393 /* nothing found - allocate a new localport struct */
394
e399441d
JS
395 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
396 GFP_KERNEL);
397 if (!newrec) {
398 ret = -ENOMEM;
399 goto out_reghost_failed;
400 }
401
402 idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
403 if (idx < 0) {
404 ret = -ENOSPC;
405 goto out_fail_kfree;
406 }
407
408 if (!get_device(dev) && dev) {
409 ret = -ENODEV;
410 goto out_ida_put;
411 }
412
413 INIT_LIST_HEAD(&newrec->port_list);
414 INIT_LIST_HEAD(&newrec->endp_list);
415 kref_init(&newrec->ref);
158bfb88 416 atomic_set(&newrec->act_rport_cnt, 0);
e399441d
JS
417 newrec->ops = template;
418 newrec->dev = dev;
419 ida_init(&newrec->endp_cnt);
f56bf76f
JS
420 if (template->local_priv_sz)
421 newrec->localport.private = &newrec[1];
422 else
423 newrec->localport.private = NULL;
e399441d
JS
424 newrec->localport.node_name = pinfo->node_name;
425 newrec->localport.port_name = pinfo->port_name;
426 newrec->localport.port_role = pinfo->port_role;
427 newrec->localport.port_id = pinfo->port_id;
428 newrec->localport.port_state = FC_OBJSTATE_ONLINE;
429 newrec->localport.port_num = idx;
430
431 spin_lock_irqsave(&nvme_fc_lock, flags);
432 list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
433 spin_unlock_irqrestore(&nvme_fc_lock, flags);
434
435 if (dev)
436 dma_set_seg_boundary(dev, template->dma_boundary);
437
438 *portptr = &newrec->localport;
439 return 0;
440
441out_ida_put:
442 ida_simple_remove(&nvme_fc_local_port_cnt, idx);
443out_fail_kfree:
444 kfree(newrec);
445out_reghost_failed:
446 *portptr = NULL;
447
448 return ret;
449}
450EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
451
e399441d
JS
452/**
453 * nvme_fc_unregister_localport - transport entry point called by an
454 * LLDD to deregister/remove a previously
455 * registered a NVME host FC port.
76c910c7 456 * @portptr: pointer to the (registered) local port that is to be deregistered.
e399441d
JS
457 *
458 * Returns:
459 * a completion status. Must be 0 upon success; a negative errno
460 * (ex: -ENXIO) upon failure.
461 */
462int
463nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
464{
465 struct nvme_fc_lport *lport = localport_to_lport(portptr);
466 unsigned long flags;
467
468 if (!portptr)
469 return -EINVAL;
470
471 spin_lock_irqsave(&nvme_fc_lock, flags);
472
473 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
474 spin_unlock_irqrestore(&nvme_fc_lock, flags);
475 return -EINVAL;
476 }
477 portptr->port_state = FC_OBJSTATE_DELETED;
478
479 spin_unlock_irqrestore(&nvme_fc_lock, flags);
480
158bfb88
JS
481 if (atomic_read(&lport->act_rport_cnt) == 0)
482 lport->ops->localport_delete(&lport->localport);
483
e399441d
JS
484 nvme_fc_lport_put(lport);
485
486 return 0;
487}
488EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
489
eaefd5ab
JS
490/*
491 * TRADDR strings, per FC-NVME are fixed format:
492 * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters
493 * udev event will only differ by prefix of what field is
494 * being specified:
495 * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters
496 * 19 + 43 + null_fudge = 64 characters
497 */
498#define FCNVME_TRADDR_LENGTH 64
499
500static void
501nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
502 struct nvme_fc_rport *rport)
503{
504 char hostaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_HOST_TRADDR=...*/
505 char tgtaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_TRADDR=...*/
506 char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL };
507
508 if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY))
509 return;
510
511 snprintf(hostaddr, sizeof(hostaddr),
512 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
513 lport->localport.node_name, lport->localport.port_name);
514 snprintf(tgtaddr, sizeof(tgtaddr),
515 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
516 rport->remoteport.node_name, rport->remoteport.port_name);
517 kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp);
518}
519
469d0ef0
JS
520static void
521nvme_fc_free_rport(struct kref *ref)
522{
523 struct nvme_fc_rport *rport =
524 container_of(ref, struct nvme_fc_rport, ref);
525 struct nvme_fc_lport *lport =
526 localport_to_lport(rport->remoteport.localport);
527 unsigned long flags;
528
529 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
530 WARN_ON(!list_empty(&rport->ctrl_list));
531
532 /* remove from lport list */
533 spin_lock_irqsave(&nvme_fc_lock, flags);
534 list_del(&rport->endp_list);
535 spin_unlock_irqrestore(&nvme_fc_lock, flags);
536
97faec53 537 WARN_ON(!list_empty(&rport->disc_list));
469d0ef0
JS
538 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
539
540 kfree(rport);
541
542 nvme_fc_lport_put(lport);
543}
544
545static void
546nvme_fc_rport_put(struct nvme_fc_rport *rport)
547{
548 kref_put(&rport->ref, nvme_fc_free_rport);
549}
550
551static int
552nvme_fc_rport_get(struct nvme_fc_rport *rport)
553{
554 return kref_get_unless_zero(&rport->ref);
555}
556
2b632970
JS
557static void
558nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
559{
560 switch (ctrl->ctrl.state) {
561 case NVME_CTRL_NEW:
ad6a0a52 562 case NVME_CTRL_CONNECTING:
2b632970
JS
563 /*
564 * As all reconnects were suppressed, schedule a
565 * connect.
566 */
567 dev_info(ctrl->ctrl.device,
568 "NVME-FC{%d}: connectivity re-established. "
569 "Attempting reconnect\n", ctrl->cnum);
570
571 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
572 break;
573
574 case NVME_CTRL_RESETTING:
575 /*
576 * Controller is already in the process of terminating the
577 * association. No need to do anything further. The reconnect
578 * step will naturally occur after the reset completes.
579 */
580 break;
581
582 default:
583 /* no action to take - let it delete */
584 break;
585 }
586}
587
588static struct nvme_fc_rport *
589nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
590 struct nvme_fc_port_info *pinfo)
591{
592 struct nvme_fc_rport *rport;
593 struct nvme_fc_ctrl *ctrl;
594 unsigned long flags;
595
596 spin_lock_irqsave(&nvme_fc_lock, flags);
597
598 list_for_each_entry(rport, &lport->endp_list, endp_list) {
599 if (rport->remoteport.node_name != pinfo->node_name ||
600 rport->remoteport.port_name != pinfo->port_name)
601 continue;
602
603 if (!nvme_fc_rport_get(rport)) {
604 rport = ERR_PTR(-ENOLCK);
605 goto out_done;
606 }
607
608 spin_unlock_irqrestore(&nvme_fc_lock, flags);
609
610 spin_lock_irqsave(&rport->lock, flags);
611
612 /* has it been unregistered */
613 if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) {
614 /* means lldd called us twice */
615 spin_unlock_irqrestore(&rport->lock, flags);
616 nvme_fc_rport_put(rport);
617 return ERR_PTR(-ESTALE);
618 }
619
0cdd5fca
JS
620 rport->remoteport.port_role = pinfo->port_role;
621 rport->remoteport.port_id = pinfo->port_id;
2b632970
JS
622 rport->remoteport.port_state = FC_OBJSTATE_ONLINE;
623 rport->dev_loss_end = 0;
624
625 /*
626 * kick off a reconnect attempt on all associations to the
627 * remote port. A successful reconnects will resume i/o.
628 */
629 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
630 nvme_fc_resume_controller(ctrl);
631
632 spin_unlock_irqrestore(&rport->lock, flags);
633
634 return rport;
635 }
636
637 rport = NULL;
638
639out_done:
640 spin_unlock_irqrestore(&nvme_fc_lock, flags);
641
642 return rport;
643}
644
645static inline void
646__nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
647 struct nvme_fc_port_info *pinfo)
648{
649 if (pinfo->dev_loss_tmo)
650 rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo;
651 else
652 rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO;
653}
654
e399441d
JS
655/**
656 * nvme_fc_register_remoteport - transport entry point called by an
657 * LLDD to register the existence of a NVME
658 * subsystem FC port on its fabric.
659 * @localport: pointer to the (registered) local port that the remote
660 * subsystem port is connected to.
661 * @pinfo: pointer to information about the port to be registered
76c910c7 662 * @portptr: pointer to a remote port pointer. Upon success, the routine
e399441d
JS
663 * will allocate a nvme_fc_remote_port structure and place its
664 * address in the remote port pointer. Upon failure, remote port
665 * pointer will be set to 0.
666 *
667 * Returns:
668 * a completion status. Must be 0 upon success; a negative errno
669 * (ex: -ENXIO) upon failure.
670 */
671int
672nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
673 struct nvme_fc_port_info *pinfo,
674 struct nvme_fc_remote_port **portptr)
675{
676 struct nvme_fc_lport *lport = localport_to_lport(localport);
677 struct nvme_fc_rport *newrec;
678 unsigned long flags;
679 int ret, idx;
680
2b632970
JS
681 if (!nvme_fc_lport_get(lport)) {
682 ret = -ESHUTDOWN;
683 goto out_reghost_failed;
684 }
685
686 /*
687 * look to see if there is already a remoteport that is waiting
688 * for a reconnect (within dev_loss_tmo) with the same WWN's.
689 * If so, transition to it and reconnect.
690 */
691 newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
692
693 /* found an rport, but something about its state is bad */
694 if (IS_ERR(newrec)) {
695 ret = PTR_ERR(newrec);
696 goto out_lport_put;
697
698 /* found existing rport, which was resumed */
699 } else if (newrec) {
700 nvme_fc_lport_put(lport);
701 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
702 nvme_fc_signal_discovery_scan(lport, newrec);
703 *portptr = &newrec->remoteport;
704 return 0;
705 }
706
707 /* nothing found - allocate a new remoteport struct */
708
e399441d
JS
709 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
710 GFP_KERNEL);
711 if (!newrec) {
712 ret = -ENOMEM;
2b632970 713 goto out_lport_put;
e399441d
JS
714 }
715
716 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
717 if (idx < 0) {
718 ret = -ENOSPC;
2b632970 719 goto out_kfree_rport;
e399441d
JS
720 }
721
722 INIT_LIST_HEAD(&newrec->endp_list);
723 INIT_LIST_HEAD(&newrec->ctrl_list);
c913a8b0 724 INIT_LIST_HEAD(&newrec->ls_req_list);
97faec53 725 INIT_LIST_HEAD(&newrec->disc_list);
e399441d 726 kref_init(&newrec->ref);
158bfb88 727 atomic_set(&newrec->act_ctrl_cnt, 0);
e399441d
JS
728 spin_lock_init(&newrec->lock);
729 newrec->remoteport.localport = &lport->localport;
14fd1e98 730 INIT_LIST_HEAD(&newrec->ls_rcv_list);
c913a8b0
JS
731 newrec->dev = lport->dev;
732 newrec->lport = lport;
f56bf76f
JS
733 if (lport->ops->remote_priv_sz)
734 newrec->remoteport.private = &newrec[1];
735 else
736 newrec->remoteport.private = NULL;
e399441d
JS
737 newrec->remoteport.port_role = pinfo->port_role;
738 newrec->remoteport.node_name = pinfo->node_name;
739 newrec->remoteport.port_name = pinfo->port_name;
740 newrec->remoteport.port_id = pinfo->port_id;
741 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
742 newrec->remoteport.port_num = idx;
2b632970 743 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
14fd1e98 744 INIT_WORK(&newrec->lsrcv_work, nvme_fc_handle_ls_rqst_work);
e399441d
JS
745
746 spin_lock_irqsave(&nvme_fc_lock, flags);
747 list_add_tail(&newrec->endp_list, &lport->endp_list);
748 spin_unlock_irqrestore(&nvme_fc_lock, flags);
749
eaefd5ab
JS
750 nvme_fc_signal_discovery_scan(lport, newrec);
751
e399441d
JS
752 *portptr = &newrec->remoteport;
753 return 0;
754
e399441d
JS
755out_kfree_rport:
756 kfree(newrec);
2b632970
JS
757out_lport_put:
758 nvme_fc_lport_put(lport);
e399441d
JS
759out_reghost_failed:
760 *portptr = NULL;
761 return ret;
e399441d
JS
762}
763EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
764
8d64daf7
JS
765static int
766nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
767{
768 struct nvmefc_ls_req_op *lsop;
769 unsigned long flags;
770
771restart:
772 spin_lock_irqsave(&rport->lock, flags);
773
774 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
775 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
776 lsop->flags |= FCOP_FLAGS_TERMIO;
777 spin_unlock_irqrestore(&rport->lock, flags);
778 rport->lport->ops->ls_abort(&rport->lport->localport,
779 &rport->remoteport,
780 &lsop->ls_req);
781 goto restart;
782 }
783 }
784 spin_unlock_irqrestore(&rport->lock, flags);
785
786 return 0;
787}
788
2b632970
JS
789static void
790nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
791{
792 dev_info(ctrl->ctrl.device,
793 "NVME-FC{%d}: controller connectivity lost. Awaiting "
794 "Reconnect", ctrl->cnum);
795
796 switch (ctrl->ctrl.state) {
797 case NVME_CTRL_NEW:
798 case NVME_CTRL_LIVE:
799 /*
800 * Schedule a controller reset. The reset will terminate the
801 * association and schedule the reconnect timer. Reconnects
802 * will be attempted until either the ctlr_loss_tmo
803 * (max_retries * connect_delay) expires or the remoteport's
804 * dev_loss_tmo expires.
805 */
806 if (nvme_reset_ctrl(&ctrl->ctrl)) {
807 dev_warn(ctrl->ctrl.device,
77d0612d 808 "NVME-FC{%d}: Couldn't schedule reset.\n",
2b632970
JS
809 ctrl->cnum);
810 nvme_delete_ctrl(&ctrl->ctrl);
811 }
812 break;
813
ad6a0a52 814 case NVME_CTRL_CONNECTING:
2b632970
JS
815 /*
816 * The association has already been terminated and the
817 * controller is attempting reconnects. No need to do anything
818 * futher. Reconnects will be attempted until either the
819 * ctlr_loss_tmo (max_retries * connect_delay) expires or the
820 * remoteport's dev_loss_tmo expires.
821 */
822 break;
823
824 case NVME_CTRL_RESETTING:
825 /*
826 * Controller is already in the process of terminating the
827 * association. No need to do anything further. The reconnect
828 * step will kick in naturally after the association is
829 * terminated.
830 */
831 break;
832
833 case NVME_CTRL_DELETING:
ecca390e 834 case NVME_CTRL_DELETING_NOIO:
2b632970
JS
835 default:
836 /* no action to take - let it delete */
837 break;
838 }
839}
840
e399441d
JS
841/**
842 * nvme_fc_unregister_remoteport - transport entry point called by an
843 * LLDD to deregister/remove a previously
844 * registered a NVME subsystem FC port.
76c910c7
BVA
845 * @portptr: pointer to the (registered) remote port that is to be
846 * deregistered.
e399441d
JS
847 *
848 * Returns:
849 * a completion status. Must be 0 upon success; a negative errno
850 * (ex: -ENXIO) upon failure.
851 */
852int
853nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
854{
855 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
856 struct nvme_fc_ctrl *ctrl;
857 unsigned long flags;
858
859 if (!portptr)
860 return -EINVAL;
861
862 spin_lock_irqsave(&rport->lock, flags);
863
864 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
865 spin_unlock_irqrestore(&rport->lock, flags);
866 return -EINVAL;
867 }
868 portptr->port_state = FC_OBJSTATE_DELETED;
869
2b632970
JS
870 rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ);
871
872 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
873 /* if dev_loss_tmo==0, dev loss is immediate */
874 if (!portptr->dev_loss_tmo) {
875 dev_warn(ctrl->ctrl.device,
77d0612d 876 "NVME-FC{%d}: controller connectivity lost.\n",
2b632970
JS
877 ctrl->cnum);
878 nvme_delete_ctrl(&ctrl->ctrl);
879 } else
880 nvme_fc_ctrl_connectivity_loss(ctrl);
881 }
e399441d
JS
882
883 spin_unlock_irqrestore(&rport->lock, flags);
884
8d64daf7
JS
885 nvme_fc_abort_lsops(rport);
886
158bfb88
JS
887 if (atomic_read(&rport->act_ctrl_cnt) == 0)
888 rport->lport->ops->remoteport_delete(portptr);
889
2b632970
JS
890 /*
891 * release the reference, which will allow, if all controllers
892 * go away, which should only occur after dev_loss_tmo occurs,
893 * for the rport to be torn down.
894 */
e399441d 895 nvme_fc_rport_put(rport);
2b632970 896
e399441d
JS
897 return 0;
898}
899EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
900
eaefd5ab
JS
901/**
902 * nvme_fc_rescan_remoteport - transport entry point called by an
903 * LLDD to request a nvme device rescan.
904 * @remoteport: pointer to the (registered) remote port that is to be
905 * rescanned.
906 *
907 * Returns: N/A
908 */
909void
910nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
911{
912 struct nvme_fc_rport *rport = remoteport_to_rport(remoteport);
913
914 nvme_fc_signal_discovery_scan(rport->lport, rport);
915}
916EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport);
917
ac7fe82b
JS
918int
919nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr,
920 u32 dev_loss_tmo)
921{
922 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
ac7fe82b
JS
923 unsigned long flags;
924
925 spin_lock_irqsave(&rport->lock, flags);
926
927 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
928 spin_unlock_irqrestore(&rport->lock, flags);
929 return -EINVAL;
930 }
931
932 /* a dev_loss_tmo of 0 (immediate) is allowed to be set */
933 rport->remoteport.dev_loss_tmo = dev_loss_tmo;
934
935 spin_unlock_irqrestore(&rport->lock, flags);
936
937 return 0;
938}
939EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
940
e399441d
JS
941
942/* *********************** FC-NVME DMA Handling **************************** */
943
944/*
945 * The fcloop device passes in a NULL device pointer. Real LLD's will
946 * pass in a valid device pointer. If NULL is passed to the dma mapping
947 * routines, depending on the platform, it may or may not succeed, and
948 * may crash.
949 *
950 * As such:
951 * Wrapper all the dma routines and check the dev pointer.
952 *
953 * If simple mappings (return just a dma address, we'll noop them,
954 * returning a dma address of 0.
955 *
956 * On more complex mappings (dma_map_sg), a pseudo routine fills
957 * in the scatter list, setting all dma addresses to 0.
958 */
959
960static inline dma_addr_t
961fc_dma_map_single(struct device *dev, void *ptr, size_t size,
962 enum dma_data_direction dir)
963{
964 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
965}
966
967static inline int
968fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
969{
970 return dev ? dma_mapping_error(dev, dma_addr) : 0;
971}
972
973static inline void
974fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
975 enum dma_data_direction dir)
976{
977 if (dev)
978 dma_unmap_single(dev, addr, size, dir);
979}
980
981static inline void
982fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
983 enum dma_data_direction dir)
984{
985 if (dev)
986 dma_sync_single_for_cpu(dev, addr, size, dir);
987}
988
989static inline void
990fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
991 enum dma_data_direction dir)
992{
993 if (dev)
994 dma_sync_single_for_device(dev, addr, size, dir);
995}
996
997/* pseudo dma_map_sg call */
998static int
999fc_map_sg(struct scatterlist *sg, int nents)
1000{
1001 struct scatterlist *s;
1002 int i;
1003
1004 WARN_ON(nents == 0 || sg[0].length == 0);
1005
1006 for_each_sg(sg, s, nents, i) {
1007 s->dma_address = 0L;
1008#ifdef CONFIG_NEED_SG_DMA_LENGTH
1009 s->dma_length = s->length;
1010#endif
1011 }
1012 return nents;
1013}
1014
1015static inline int
1016fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1017 enum dma_data_direction dir)
1018{
1019 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
1020}
1021
1022static inline void
1023fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1024 enum dma_data_direction dir)
1025{
1026 if (dev)
1027 dma_unmap_sg(dev, sg, nents, dir);
1028}
1029
e399441d
JS
1030/* *********************** FC-NVME LS Handling **************************** */
1031
1032static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
1033static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
1034
14fd1e98 1035static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
e399441d
JS
1036
1037static void
c913a8b0 1038__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
e399441d 1039{
c913a8b0 1040 struct nvme_fc_rport *rport = lsop->rport;
e399441d
JS
1041 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1042 unsigned long flags;
1043
c913a8b0 1044 spin_lock_irqsave(&rport->lock, flags);
e399441d
JS
1045
1046 if (!lsop->req_queued) {
c913a8b0 1047 spin_unlock_irqrestore(&rport->lock, flags);
e399441d
JS
1048 return;
1049 }
1050
1051 list_del(&lsop->lsreq_list);
1052
1053 lsop->req_queued = false;
1054
c913a8b0 1055 spin_unlock_irqrestore(&rport->lock, flags);
e399441d 1056
c913a8b0 1057 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
e399441d
JS
1058 (lsreq->rqstlen + lsreq->rsplen),
1059 DMA_BIDIRECTIONAL);
1060
c913a8b0 1061 nvme_fc_rport_put(rport);
e399441d
JS
1062}
1063
1064static int
c913a8b0 1065__nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
e399441d
JS
1066 struct nvmefc_ls_req_op *lsop,
1067 void (*done)(struct nvmefc_ls_req *req, int status))
1068{
1069 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1070 unsigned long flags;
c913a8b0 1071 int ret = 0;
e399441d 1072
c913a8b0
JS
1073 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1074 return -ECONNREFUSED;
1075
1076 if (!nvme_fc_rport_get(rport))
e399441d
JS
1077 return -ESHUTDOWN;
1078
1079 lsreq->done = done;
c913a8b0 1080 lsop->rport = rport;
e399441d
JS
1081 lsop->req_queued = false;
1082 INIT_LIST_HEAD(&lsop->lsreq_list);
1083 init_completion(&lsop->ls_done);
1084
c913a8b0 1085 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
e399441d
JS
1086 lsreq->rqstlen + lsreq->rsplen,
1087 DMA_BIDIRECTIONAL);
c913a8b0
JS
1088 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
1089 ret = -EFAULT;
1090 goto out_putrport;
e399441d
JS
1091 }
1092 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
1093
c913a8b0 1094 spin_lock_irqsave(&rport->lock, flags);
e399441d 1095
c913a8b0 1096 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
e399441d
JS
1097
1098 lsop->req_queued = true;
1099
c913a8b0 1100 spin_unlock_irqrestore(&rport->lock, flags);
e399441d 1101
c913a8b0
JS
1102 ret = rport->lport->ops->ls_req(&rport->lport->localport,
1103 &rport->remoteport, lsreq);
e399441d 1104 if (ret)
c913a8b0
JS
1105 goto out_unlink;
1106
1107 return 0;
1108
1109out_unlink:
1110 lsop->ls_error = ret;
1111 spin_lock_irqsave(&rport->lock, flags);
1112 lsop->req_queued = false;
1113 list_del(&lsop->lsreq_list);
1114 spin_unlock_irqrestore(&rport->lock, flags);
1115 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1116 (lsreq->rqstlen + lsreq->rsplen),
1117 DMA_BIDIRECTIONAL);
1118out_putrport:
1119 nvme_fc_rport_put(rport);
e399441d
JS
1120
1121 return ret;
1122}
1123
1124static void
1125nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
1126{
1127 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1128
1129 lsop->ls_error = status;
1130 complete(&lsop->ls_done);
1131}
1132
1133static int
c913a8b0 1134nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
e399441d
JS
1135{
1136 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1137 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
1138 int ret;
1139
c913a8b0 1140 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
e399441d 1141
c913a8b0 1142 if (!ret) {
e399441d
JS
1143 /*
1144 * No timeout/not interruptible as we need the struct
1145 * to exist until the lldd calls us back. Thus mandate
1146 * wait until driver calls back. lldd responsible for
1147 * the timeout action
1148 */
1149 wait_for_completion(&lsop->ls_done);
1150
c913a8b0 1151 __nvme_fc_finish_ls_req(lsop);
e399441d 1152
c913a8b0 1153 ret = lsop->ls_error;
e399441d
JS
1154 }
1155
c913a8b0
JS
1156 if (ret)
1157 return ret;
1158
e399441d
JS
1159 /* ACC or RJT payload ? */
1160 if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
1161 return -ENXIO;
1162
1163 return 0;
1164}
1165
c913a8b0
JS
1166static int
1167nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
e399441d
JS
1168 struct nvmefc_ls_req_op *lsop,
1169 void (*done)(struct nvmefc_ls_req *req, int status))
1170{
e399441d
JS
1171 /* don't wait for completion */
1172
c913a8b0 1173 return __nvme_fc_send_ls_req(rport, lsop, done);
e399441d
JS
1174}
1175
e399441d
JS
1176static int
1177nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
1178 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
1179{
1180 struct nvmefc_ls_req_op *lsop;
1181 struct nvmefc_ls_req *lsreq;
1182 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
1183 struct fcnvme_ls_cr_assoc_acc *assoc_acc;
14fd1e98 1184 unsigned long flags;
e399441d
JS
1185 int ret, fcret = 0;
1186
1187 lsop = kzalloc((sizeof(*lsop) +
f56bf76f
JS
1188 sizeof(*assoc_rqst) + sizeof(*assoc_acc) +
1189 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
e399441d 1190 if (!lsop) {
f56bf76f
JS
1191 dev_info(ctrl->ctrl.device,
1192 "NVME-FC{%d}: send Create Association failed: ENOMEM\n",
1193 ctrl->cnum);
e399441d
JS
1194 ret = -ENOMEM;
1195 goto out_no_memory;
1196 }
e399441d 1197
f56bf76f 1198 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)&lsop[1];
e399441d 1199 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
f56bf76f
JS
1200 lsreq = &lsop->ls_req;
1201 if (ctrl->lport->ops->lsrqst_priv_sz)
1202 lsreq->private = &assoc_acc[1];
1203 else
1204 lsreq->private = NULL;
e399441d
JS
1205
1206 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
1207 assoc_rqst->desc_list_len =
1208 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1209
1210 assoc_rqst->assoc_cmd.desc_tag =
1211 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
1212 assoc_rqst->assoc_cmd.desc_len =
1213 fcnvme_lsdesc_len(
1214 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1215
1216 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
d157e534 1217 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
e399441d
JS
1218 /* Linux supports only Dynamic controllers */
1219 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
8e412263 1220 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
e399441d
JS
1221 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
1222 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
1223 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
1224 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
1225
1226 lsop->queue = queue;
1227 lsreq->rqstaddr = assoc_rqst;
1228 lsreq->rqstlen = sizeof(*assoc_rqst);
1229 lsreq->rspaddr = assoc_acc;
1230 lsreq->rsplen = sizeof(*assoc_acc);
53b2b2f5 1231 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
e399441d 1232
c913a8b0 1233 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
e399441d
JS
1234 if (ret)
1235 goto out_free_buffer;
1236
1237 /* process connect LS completion */
1238
1239 /* validate the ACC response */
1240 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1241 fcret = VERR_LSACC;
f77fc87c 1242 else if (assoc_acc->hdr.desc_list_len !=
e399441d
JS
1243 fcnvme_lsdesc_len(
1244 sizeof(struct fcnvme_ls_cr_assoc_acc)))
1245 fcret = VERR_CR_ASSOC_ACC_LEN;
f77fc87c
JS
1246 else if (assoc_acc->hdr.rqst.desc_tag !=
1247 cpu_to_be32(FCNVME_LSDESC_RQST))
e399441d
JS
1248 fcret = VERR_LSDESC_RQST;
1249 else if (assoc_acc->hdr.rqst.desc_len !=
1250 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1251 fcret = VERR_LSDESC_RQST_LEN;
1252 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
1253 fcret = VERR_CR_ASSOC;
1254 else if (assoc_acc->associd.desc_tag !=
1255 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1256 fcret = VERR_ASSOC_ID;
1257 else if (assoc_acc->associd.desc_len !=
1258 fcnvme_lsdesc_len(
1259 sizeof(struct fcnvme_lsdesc_assoc_id)))
1260 fcret = VERR_ASSOC_ID_LEN;
1261 else if (assoc_acc->connectid.desc_tag !=
1262 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1263 fcret = VERR_CONN_ID;
1264 else if (assoc_acc->connectid.desc_len !=
1265 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1266 fcret = VERR_CONN_ID_LEN;
1267
1268 if (fcret) {
1269 ret = -EBADF;
1270 dev_err(ctrl->dev,
7db39484 1271 "q %d Create Association LS failed: %s\n",
e399441d
JS
1272 queue->qnum, validation_errors[fcret]);
1273 } else {
14fd1e98 1274 spin_lock_irqsave(&ctrl->lock, flags);
e399441d
JS
1275 ctrl->association_id =
1276 be64_to_cpu(assoc_acc->associd.association_id);
1277 queue->connection_id =
1278 be64_to_cpu(assoc_acc->connectid.connection_id);
1279 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
14fd1e98 1280 spin_unlock_irqrestore(&ctrl->lock, flags);
e399441d
JS
1281 }
1282
1283out_free_buffer:
1284 kfree(lsop);
1285out_no_memory:
1286 if (ret)
1287 dev_err(ctrl->dev,
1288 "queue %d connect admin queue failed (%d).\n",
1289 queue->qnum, ret);
1290 return ret;
1291}
1292
1293static int
1294nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1295 u16 qsize, u16 ersp_ratio)
1296{
1297 struct nvmefc_ls_req_op *lsop;
1298 struct nvmefc_ls_req *lsreq;
1299 struct fcnvme_ls_cr_conn_rqst *conn_rqst;
1300 struct fcnvme_ls_cr_conn_acc *conn_acc;
1301 int ret, fcret = 0;
1302
1303 lsop = kzalloc((sizeof(*lsop) +
f56bf76f
JS
1304 sizeof(*conn_rqst) + sizeof(*conn_acc) +
1305 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
e399441d 1306 if (!lsop) {
f56bf76f
JS
1307 dev_info(ctrl->ctrl.device,
1308 "NVME-FC{%d}: send Create Connection failed: ENOMEM\n",
1309 ctrl->cnum);
e399441d
JS
1310 ret = -ENOMEM;
1311 goto out_no_memory;
1312 }
e399441d 1313
f56bf76f 1314 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)&lsop[1];
e399441d 1315 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
f56bf76f
JS
1316 lsreq = &lsop->ls_req;
1317 if (ctrl->lport->ops->lsrqst_priv_sz)
1318 lsreq->private = (void *)&conn_acc[1];
1319 else
1320 lsreq->private = NULL;
e399441d
JS
1321
1322 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
1323 conn_rqst->desc_list_len = cpu_to_be32(
1324 sizeof(struct fcnvme_lsdesc_assoc_id) +
1325 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1326
1327 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1328 conn_rqst->associd.desc_len =
1329 fcnvme_lsdesc_len(
1330 sizeof(struct fcnvme_lsdesc_assoc_id));
1331 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1332 conn_rqst->connect_cmd.desc_tag =
1333 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
1334 conn_rqst->connect_cmd.desc_len =
1335 fcnvme_lsdesc_len(
1336 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1337 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1338 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
d157e534 1339 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
e399441d
JS
1340
1341 lsop->queue = queue;
1342 lsreq->rqstaddr = conn_rqst;
1343 lsreq->rqstlen = sizeof(*conn_rqst);
1344 lsreq->rspaddr = conn_acc;
1345 lsreq->rsplen = sizeof(*conn_acc);
53b2b2f5 1346 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
e399441d 1347
c913a8b0 1348 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
e399441d
JS
1349 if (ret)
1350 goto out_free_buffer;
1351
1352 /* process connect LS completion */
1353
1354 /* validate the ACC response */
1355 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1356 fcret = VERR_LSACC;
f77fc87c 1357 else if (conn_acc->hdr.desc_list_len !=
e399441d
JS
1358 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
1359 fcret = VERR_CR_CONN_ACC_LEN;
f77fc87c 1360 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
e399441d
JS
1361 fcret = VERR_LSDESC_RQST;
1362 else if (conn_acc->hdr.rqst.desc_len !=
1363 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1364 fcret = VERR_LSDESC_RQST_LEN;
1365 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
1366 fcret = VERR_CR_CONN;
1367 else if (conn_acc->connectid.desc_tag !=
1368 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1369 fcret = VERR_CONN_ID;
1370 else if (conn_acc->connectid.desc_len !=
1371 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1372 fcret = VERR_CONN_ID_LEN;
1373
1374 if (fcret) {
1375 ret = -EBADF;
1376 dev_err(ctrl->dev,
7db39484 1377 "q %d Create I/O Connection LS failed: %s\n",
e399441d
JS
1378 queue->qnum, validation_errors[fcret]);
1379 } else {
1380 queue->connection_id =
1381 be64_to_cpu(conn_acc->connectid.connection_id);
1382 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1383 }
1384
1385out_free_buffer:
1386 kfree(lsop);
1387out_no_memory:
1388 if (ret)
1389 dev_err(ctrl->dev,
7db39484 1390 "queue %d connect I/O queue failed (%d).\n",
e399441d
JS
1391 queue->qnum, ret);
1392 return ret;
1393}
1394
1395static void
1396nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1397{
1398 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
e399441d 1399
c913a8b0 1400 __nvme_fc_finish_ls_req(lsop);
e399441d 1401
d4e4230c 1402 /* fc-nvme initiator doesn't care about success or failure of cmd */
e399441d
JS
1403
1404 kfree(lsop);
1405}
1406
1407/*
1408 * This routine sends a FC-NVME LS to disconnect (aka terminate)
1409 * the FC-NVME Association. Terminating the association also
1410 * terminates the FC-NVME connections (per queue, both admin and io
1411 * queues) that are part of the association. E.g. things are torn
1412 * down, and the related FC-NVME Association ID and Connection IDs
1413 * become invalid.
1414 *
1415 * The behavior of the fc-nvme initiator is such that it's
1416 * understanding of the association and connections will implicitly
1417 * be torn down. The action is implicit as it may be due to a loss of
1418 * connectivity with the fc-nvme target, so you may never get a
1419 * response even if you tried. As such, the action of this routine
1420 * is to asynchronously send the LS, ignore any results of the LS, and
1421 * continue on with terminating the association. If the fc-nvme target
1422 * is present and receives the LS, it too can tear down.
1423 */
1424static void
1425nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1426{
53b2b2f5
JS
1427 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
1428 struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
e399441d
JS
1429 struct nvmefc_ls_req_op *lsop;
1430 struct nvmefc_ls_req *lsreq;
c913a8b0 1431 int ret;
e399441d
JS
1432
1433 lsop = kzalloc((sizeof(*lsop) +
f56bf76f
JS
1434 sizeof(*discon_rqst) + sizeof(*discon_acc) +
1435 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1436 if (!lsop) {
1437 dev_info(ctrl->ctrl.device,
1438 "NVME-FC{%d}: send Disconnect Association "
1439 "failed: ENOMEM\n",
1440 ctrl->cnum);
e399441d 1441 return;
f56bf76f 1442 }
e399441d 1443
f56bf76f 1444 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
53b2b2f5 1445 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
f56bf76f
JS
1446 lsreq = &lsop->ls_req;
1447 if (ctrl->lport->ops->lsrqst_priv_sz)
1448 lsreq->private = (void *)&discon_acc[1];
1449 else
1450 lsreq->private = NULL;
e399441d 1451
fd5a5f22
JS
1452 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
1453 ctrl->association_id);
e399441d 1454
c913a8b0
JS
1455 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1456 nvme_fc_disconnect_assoc_done);
1457 if (ret)
1458 kfree(lsop);
e399441d
JS
1459}
1460
14fd1e98
JS
1461static void
1462nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
1463{
1464 struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private;
1465 struct nvme_fc_rport *rport = lsop->rport;
1466 struct nvme_fc_lport *lport = rport->lport;
1467 unsigned long flags;
1468
1469 spin_lock_irqsave(&rport->lock, flags);
1470 list_del(&lsop->lsrcv_list);
1471 spin_unlock_irqrestore(&rport->lock, flags);
1472
1473 fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma,
1474 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1475 fc_dma_unmap_single(lport->dev, lsop->rspdma,
1476 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1477
1478 kfree(lsop);
1479
1480 nvme_fc_rport_put(rport);
1481}
1482
1483static void
1484nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop)
1485{
1486 struct nvme_fc_rport *rport = lsop->rport;
1487 struct nvme_fc_lport *lport = rport->lport;
1488 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
1489 int ret;
1490
1491 fc_dma_sync_single_for_device(lport->dev, lsop->rspdma,
1492 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1493
1494 ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport,
1495 lsop->lsrsp);
1496 if (ret) {
1497 dev_warn(lport->dev,
1498 "LLDD rejected LS RSP xmt: LS %d status %d\n",
1499 w0->ls_cmd, ret);
1500 nvme_fc_xmt_ls_rsp_done(lsop->lsrsp);
1501 return;
1502 }
1503}
1504
1505static struct nvme_fc_ctrl *
1506nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport,
1507 struct nvmefc_ls_rcv_op *lsop)
1508{
1509 struct fcnvme_ls_disconnect_assoc_rqst *rqst =
1510 &lsop->rqstbuf->rq_dis_assoc;
1511 struct nvme_fc_ctrl *ctrl, *ret = NULL;
1512 struct nvmefc_ls_rcv_op *oldls = NULL;
1513 u64 association_id = be64_to_cpu(rqst->associd.association_id);
1514 unsigned long flags;
1515
1516 spin_lock_irqsave(&rport->lock, flags);
1517
1518 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
1519 if (!nvme_fc_ctrl_get(ctrl))
1520 continue;
1521 spin_lock(&ctrl->lock);
1522 if (association_id == ctrl->association_id) {
1523 oldls = ctrl->rcv_disconn;
1524 ctrl->rcv_disconn = lsop;
1525 ret = ctrl;
1526 }
1527 spin_unlock(&ctrl->lock);
1528 if (ret)
1529 /* leave the ctrl get reference */
1530 break;
1531 nvme_fc_ctrl_put(ctrl);
1532 }
1533
1534 spin_unlock_irqrestore(&rport->lock, flags);
1535
1536 /* transmit a response for anything that was pending */
1537 if (oldls) {
1538 dev_info(rport->lport->dev,
1539 "NVME-FC{%d}: Multiple Disconnect Association "
1540 "LS's received\n", ctrl->cnum);
1541 /* overwrite good response with bogus failure */
1542 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
1543 sizeof(*oldls->rspbuf),
1544 rqst->w0.ls_cmd,
1545 FCNVME_RJT_RC_UNAB,
1546 FCNVME_RJT_EXP_NONE, 0);
1547 nvme_fc_xmt_ls_rsp(oldls);
1548 }
1549
1550 return ret;
1551}
1552
1553/*
1554 * returns true to mean LS handled and ls_rsp can be sent
1555 * returns false to defer ls_rsp xmt (will be done as part of
1556 * association termination)
1557 */
1558static bool
1559nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op *lsop)
1560{
1561 struct nvme_fc_rport *rport = lsop->rport;
1562 struct fcnvme_ls_disconnect_assoc_rqst *rqst =
1563 &lsop->rqstbuf->rq_dis_assoc;
1564 struct fcnvme_ls_disconnect_assoc_acc *acc =
1565 &lsop->rspbuf->rsp_dis_assoc;
1566 struct nvme_fc_ctrl *ctrl = NULL;
1567 int ret = 0;
1568
1569 memset(acc, 0, sizeof(*acc));
1570
1571 ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst);
1572 if (!ret) {
1573 /* match an active association */
1574 ctrl = nvme_fc_match_disconn_ls(rport, lsop);
1575 if (!ctrl)
1576 ret = VERR_NO_ASSOC;
1577 }
1578
1579 if (ret) {
1580 dev_info(rport->lport->dev,
1581 "Disconnect LS failed: %s\n",
1582 validation_errors[ret]);
1583 lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1584 sizeof(*acc), rqst->w0.ls_cmd,
1585 (ret == VERR_NO_ASSOC) ?
1586 FCNVME_RJT_RC_INV_ASSOC :
1587 FCNVME_RJT_RC_LOGIC,
1588 FCNVME_RJT_EXP_NONE, 0);
1589 return true;
1590 }
1591
1592 /* format an ACCept response */
1593
1594 lsop->lsrsp->rsplen = sizeof(*acc);
1595
1596 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1597 fcnvme_lsdesc_len(
1598 sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
1599 FCNVME_LS_DISCONNECT_ASSOC);
1600
1601 /*
1602 * the transmit of the response will occur after the exchanges
1603 * for the association have been ABTS'd by
1604 * nvme_fc_delete_association().
1605 */
1606
1607 /* fail the association */
1608 nvme_fc_error_recovery(ctrl, "Disconnect Association LS received");
1609
1610 /* release the reference taken by nvme_fc_match_disconn_ls() */
1611 nvme_fc_ctrl_put(ctrl);
1612
1613 return false;
1614}
1615
1616/*
1617 * Actual Processing routine for received FC-NVME LS Requests from the LLD
1618 * returns true if a response should be sent afterward, false if rsp will
1619 * be sent asynchronously.
1620 */
1621static bool
1622nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op *lsop)
1623{
1624 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
1625 bool ret = true;
1626
1627 lsop->lsrsp->nvme_fc_private = lsop;
1628 lsop->lsrsp->rspbuf = lsop->rspbuf;
1629 lsop->lsrsp->rspdma = lsop->rspdma;
1630 lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done;
1631 /* Be preventative. handlers will later set to valid length */
1632 lsop->lsrsp->rsplen = 0;
1633
1634 /*
1635 * handlers:
1636 * parse request input, execute the request, and format the
1637 * LS response
1638 */
1639 switch (w0->ls_cmd) {
1640 case FCNVME_LS_DISCONNECT_ASSOC:
1641 ret = nvme_fc_ls_disconnect_assoc(lsop);
1642 break;
1643 case FCNVME_LS_DISCONNECT_CONN:
1644 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1645 sizeof(*lsop->rspbuf), w0->ls_cmd,
1646 FCNVME_RJT_RC_UNSUP, FCNVME_RJT_EXP_NONE, 0);
1647 break;
1648 case FCNVME_LS_CREATE_ASSOCIATION:
1649 case FCNVME_LS_CREATE_CONNECTION:
1650 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1651 sizeof(*lsop->rspbuf), w0->ls_cmd,
1652 FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0);
1653 break;
1654 default:
1655 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1656 sizeof(*lsop->rspbuf), w0->ls_cmd,
1657 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1658 break;
1659 }
1660
1661 return(ret);
1662}
1663
1664static void
1665nvme_fc_handle_ls_rqst_work(struct work_struct *work)
1666{
1667 struct nvme_fc_rport *rport =
1668 container_of(work, struct nvme_fc_rport, lsrcv_work);
1669 struct fcnvme_ls_rqst_w0 *w0;
1670 struct nvmefc_ls_rcv_op *lsop;
1671 unsigned long flags;
1672 bool sendrsp;
1673
1674restart:
1675 sendrsp = true;
1676 spin_lock_irqsave(&rport->lock, flags);
1677 list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) {
1678 if (lsop->handled)
1679 continue;
1680
1681 lsop->handled = true;
1682 if (rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
1683 spin_unlock_irqrestore(&rport->lock, flags);
1684 sendrsp = nvme_fc_handle_ls_rqst(lsop);
1685 } else {
1686 spin_unlock_irqrestore(&rport->lock, flags);
1687 w0 = &lsop->rqstbuf->w0;
1688 lsop->lsrsp->rsplen = nvme_fc_format_rjt(
1689 lsop->rspbuf,
1690 sizeof(*lsop->rspbuf),
1691 w0->ls_cmd,
1692 FCNVME_RJT_RC_UNAB,
1693 FCNVME_RJT_EXP_NONE, 0);
1694 }
1695 if (sendrsp)
1696 nvme_fc_xmt_ls_rsp(lsop);
1697 goto restart;
1698 }
1699 spin_unlock_irqrestore(&rport->lock, flags);
1700}
1701
72e6329f
JS
1702/**
1703 * nvme_fc_rcv_ls_req - transport entry point called by an LLDD
1704 * upon the reception of a NVME LS request.
1705 *
1706 * The nvme-fc layer will copy payload to an internal structure for
1707 * processing. As such, upon completion of the routine, the LLDD may
1708 * immediately free/reuse the LS request buffer passed in the call.
1709 *
1710 * If this routine returns error, the LLDD should abort the exchange.
1711 *
1712 * @remoteport: pointer to the (registered) remote port that the LS
1713 * was received from. The remoteport is associated with
1714 * a specific localport.
1715 * @lsrsp: pointer to a nvmefc_ls_rsp response structure to be
1716 * used to reference the exchange corresponding to the LS
1717 * when issuing an ls response.
1718 * @lsreqbuf: pointer to the buffer containing the LS Request
1719 * @lsreqbuf_len: length, in bytes, of the received LS request
1720 */
1721int
1722nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr,
1723 struct nvmefc_ls_rsp *lsrsp,
1724 void *lsreqbuf, u32 lsreqbuf_len)
1725{
1726 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
1727 struct nvme_fc_lport *lport = rport->lport;
14fd1e98
JS
1728 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
1729 struct nvmefc_ls_rcv_op *lsop;
1730 unsigned long flags;
1731 int ret;
1732
1733 nvme_fc_rport_get(rport);
72e6329f
JS
1734
1735 /* validate there's a routine to transmit a response */
14fd1e98
JS
1736 if (!lport->ops->xmt_ls_rsp) {
1737 dev_info(lport->dev,
1738 "RCV %s LS failed: no LLDD xmt_ls_rsp\n",
1739 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1740 nvmefc_ls_names[w0->ls_cmd] : "");
1741 ret = -EINVAL;
1742 goto out_put;
1743 }
1744
1745 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
1746 dev_info(lport->dev,
1747 "RCV %s LS failed: payload too large\n",
1748 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1749 nvmefc_ls_names[w0->ls_cmd] : "");
1750 ret = -E2BIG;
1751 goto out_put;
1752 }
1753
1754 lsop = kzalloc(sizeof(*lsop) +
1755 sizeof(union nvmefc_ls_requests) +
1756 sizeof(union nvmefc_ls_responses),
1757 GFP_KERNEL);
1758 if (!lsop) {
1759 dev_info(lport->dev,
1760 "RCV %s LS failed: No memory\n",
1761 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1762 nvmefc_ls_names[w0->ls_cmd] : "");
1763 ret = -ENOMEM;
1764 goto out_put;
1765 }
1766 lsop->rqstbuf = (union nvmefc_ls_requests *)&lsop[1];
1767 lsop->rspbuf = (union nvmefc_ls_responses *)&lsop->rqstbuf[1];
1768
1769 lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf,
1770 sizeof(*lsop->rspbuf),
1771 DMA_TO_DEVICE);
1772 if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) {
1773 dev_info(lport->dev,
1774 "RCV %s LS failed: DMA mapping failure\n",
1775 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1776 nvmefc_ls_names[w0->ls_cmd] : "");
1777 ret = -EFAULT;
1778 goto out_free;
1779 }
1780
1781 lsop->rport = rport;
1782 lsop->lsrsp = lsrsp;
1783
1784 memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len);
1785 lsop->rqstdatalen = lsreqbuf_len;
1786
1787 spin_lock_irqsave(&rport->lock, flags);
1788 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) {
1789 spin_unlock_irqrestore(&rport->lock, flags);
1790 ret = -ENOTCONN;
1791 goto out_unmap;
1792 }
1793 list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list);
1794 spin_unlock_irqrestore(&rport->lock, flags);
1795
1796 schedule_work(&rport->lsrcv_work);
72e6329f
JS
1797
1798 return 0;
14fd1e98
JS
1799
1800out_unmap:
1801 fc_dma_unmap_single(lport->dev, lsop->rspdma,
1802 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1803out_free:
1804 kfree(lsop);
1805out_put:
1806 nvme_fc_rport_put(rport);
1807 return ret;
72e6329f
JS
1808}
1809EXPORT_SYMBOL_GPL(nvme_fc_rcv_ls_req);
1810
e399441d
JS
1811
1812/* *********************** NVME Ctrl Routines **************************** */
1813
e399441d
JS
1814static void
1815__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1816 struct nvme_fc_fcp_op *op)
1817{
1818 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1819 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1820 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1821 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1822
1823 atomic_set(&op->state, FCPOP_STATE_UNINIT);
1824}
1825
1826static void
d6296d39
CH
1827nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1828 unsigned int hctx_idx)
e399441d
JS
1829{
1830 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1831
d6296d39 1832 return __nvme_fc_exit_request(set->driver_data, op);
e399441d
JS
1833}
1834
78a7ac26
JS
1835static int
1836__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1837{
3efd6e8e
JS
1838 unsigned long flags;
1839 int opstate;
1840
1841 spin_lock_irqsave(&ctrl->lock, flags);
1842 opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1843 if (opstate != FCPOP_STATE_ACTIVE)
1844 atomic_set(&op->state, opstate);
52793d62
JS
1845 else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) {
1846 op->flags |= FCOP_FLAGS_TERMIO;
3efd6e8e 1847 ctrl->iocnt++;
52793d62 1848 }
3efd6e8e 1849 spin_unlock_irqrestore(&ctrl->lock, flags);
78a7ac26 1850
3efd6e8e 1851 if (opstate != FCPOP_STATE_ACTIVE)
78a7ac26 1852 return -ECANCELED;
78a7ac26
JS
1853
1854 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1855 &ctrl->rport->remoteport,
1856 op->queue->lldd_handle,
1857 &op->fcp_req);
1858
1859 return 0;
1860}
1861
e399441d 1862static void
78a7ac26 1863nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
e399441d
JS
1864{
1865 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
3efd6e8e 1866 int i;
78a7ac26 1867
4cff280a
JS
1868 /* ensure we've initialized the ops once */
1869 if (!(aen_op->flags & FCOP_FLAGS_AEN))
1870 return;
1871
3efd6e8e
JS
1872 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
1873 __nvme_fc_abort_op(ctrl, aen_op);
e399441d
JS
1874}
1875
c3aedd22 1876static inline void
78a7ac26 1877__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
3efd6e8e 1878 struct nvme_fc_fcp_op *op, int opstate)
78a7ac26
JS
1879{
1880 unsigned long flags;
78a7ac26 1881
c3aedd22
JS
1882 if (opstate == FCPOP_STATE_ABORTED) {
1883 spin_lock_irqsave(&ctrl->lock, flags);
52793d62
JS
1884 if (test_bit(FCCTRL_TERMIO, &ctrl->flags) &&
1885 op->flags & FCOP_FLAGS_TERMIO) {
c3aedd22
JS
1886 if (!--ctrl->iocnt)
1887 wake_up(&ctrl->ioabort_wait);
1888 }
1889 spin_unlock_irqrestore(&ctrl->lock, flags);
61bff8ef 1890 }
78a7ac26
JS
1891}
1892
baee29ac 1893static void
e399441d
JS
1894nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1895{
1896 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1897 struct request *rq = op->rq;
1898 struct nvmefc_fcp_req *freq = &op->fcp_req;
1899 struct nvme_fc_ctrl *ctrl = op->ctrl;
1900 struct nvme_fc_queue *queue = op->queue;
1901 struct nvme_completion *cqe = &op->rsp_iu.cqe;
458f280d 1902 struct nvme_command *sqe = &op->cmd_iu.sqe;
d663b69f 1903 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
27fa9bc5 1904 union nvme_result result;
0a02e39f 1905 bool terminate_assoc = true;
3efd6e8e 1906 int opstate;
e399441d
JS
1907
1908 /*
1909 * WARNING:
1910 * The current linux implementation of a nvme controller
1911 * allocates a single tag set for all io queues and sizes
1912 * the io queues to fully hold all possible tags. Thus, the
1913 * implementation does not reference or care about the sqhd
1914 * value as it never needs to use the sqhd/sqtail pointers
1915 * for submission pacing.
1916 *
1917 * This affects the FC-NVME implementation in two ways:
1918 * 1) As the value doesn't matter, we don't need to waste
1919 * cycles extracting it from ERSPs and stamping it in the
1920 * cases where the transport fabricates CQEs on successful
1921 * completions.
1922 * 2) The FC-NVME implementation requires that delivery of
1923 * ERSP completions are to go back to the nvme layer in order
1924 * relative to the rsn, such that the sqhd value will always
1925 * be "in order" for the nvme layer. As the nvme layer in
1926 * linux doesn't care about sqhd, there's no need to return
1927 * them in order.
1928 *
1929 * Additionally:
1930 * As the core nvme layer in linux currently does not look at
1931 * every field in the cqe - in cases where the FC transport must
1932 * fabricate a CQE, the following fields will not be set as they
1933 * are not referenced:
1934 * cqe.sqid, cqe.sqhd, cqe.command_id
f874d5d0
JS
1935 *
1936 * Failure or error of an individual i/o, in a transport
1937 * detected fashion unrelated to the nvme completion status,
1938 * potentially cause the initiator and target sides to get out
1939 * of sync on SQ head/tail (aka outstanding io count allowed).
1940 * Per FC-NVME spec, failure of an individual command requires
1941 * the connection to be terminated, which in turn requires the
1942 * association to be terminated.
e399441d
JS
1943 */
1944
3efd6e8e
JS
1945 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
1946
e399441d
JS
1947 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1948 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1949
3efd6e8e 1950 if (opstate == FCPOP_STATE_ABORTED)
74bd8cbe
JS
1951 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1952 else if (freq->status) {
1953 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1954 dev_info(ctrl->ctrl.device,
1955 "NVME-FC{%d}: io failed due to lldd error %d\n",
1956 ctrl->cnum, freq->status);
1957 }
e399441d
JS
1958
1959 /*
1960 * For the linux implementation, if we have an unsuccesful
1961 * status, they blk-mq layer can typically be called with the
1962 * non-zero status and the content of the cqe isn't important.
1963 */
1964 if (status)
1965 goto done;
1966
1967 /*
1968 * command completed successfully relative to the wire
1969 * protocol. However, validate anything received and
1970 * extract the status and result from the cqe (create it
1971 * where necessary).
1972 */
1973
1974 switch (freq->rcv_rsplen) {
1975
1976 case 0:
1977 case NVME_FC_SIZEOF_ZEROS_RSP:
1978 /*
1979 * No response payload or 12 bytes of payload (which
1980 * should all be zeros) are considered successful and
1981 * no payload in the CQE by the transport.
1982 */
1983 if (freq->transferred_length !=
74bd8cbe
JS
1984 be32_to_cpu(op->cmd_iu.data_len)) {
1985 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1986 dev_info(ctrl->ctrl.device,
1987 "NVME-FC{%d}: io failed due to bad transfer "
1988 "length: %d vs expected %d\n",
1989 ctrl->cnum, freq->transferred_length,
1990 be32_to_cpu(op->cmd_iu.data_len));
e399441d
JS
1991 goto done;
1992 }
27fa9bc5 1993 result.u64 = 0;
e399441d
JS
1994 break;
1995
1996 case sizeof(struct nvme_fc_ersp_iu):
1997 /*
1998 * The ERSP IU contains a full completion with CQE.
1999 * Validate ERSP IU and look at cqe.
2000 */
2001 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
2002 (freq->rcv_rsplen / 4) ||
2003 be32_to_cpu(op->rsp_iu.xfrd_len) !=
2004 freq->transferred_length ||
53b2b2f5 2005 op->rsp_iu.ersp_result ||
458f280d 2006 sqe->common.command_id != cqe->command_id)) {
74bd8cbe
JS
2007 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
2008 dev_info(ctrl->ctrl.device,
2009 "NVME-FC{%d}: io failed due to bad NVMe_ERSP: "
2010 "iu len %d, xfr len %d vs %d, status code "
2011 "%d, cmdid %d vs %d\n",
2012 ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len),
2013 be32_to_cpu(op->rsp_iu.xfrd_len),
2014 freq->transferred_length,
53b2b2f5 2015 op->rsp_iu.ersp_result,
74bd8cbe
JS
2016 sqe->common.command_id,
2017 cqe->command_id);
e399441d
JS
2018 goto done;
2019 }
27fa9bc5 2020 result = cqe->result;
d663b69f 2021 status = cqe->status;
e399441d
JS
2022 break;
2023
2024 default:
74bd8cbe
JS
2025 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
2026 dev_info(ctrl->ctrl.device,
2027 "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu "
2028 "len %d\n",
2029 ctrl->cnum, freq->rcv_rsplen);
e399441d
JS
2030 goto done;
2031 }
2032
f874d5d0
JS
2033 terminate_assoc = false;
2034
e399441d 2035done:
78a7ac26 2036 if (op->flags & FCOP_FLAGS_AEN) {
27fa9bc5 2037 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
3efd6e8e 2038 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
78a7ac26
JS
2039 atomic_set(&op->state, FCPOP_STATE_IDLE);
2040 op->flags = FCOP_FLAGS_AEN; /* clear other flags */
e399441d 2041 nvme_fc_ctrl_put(ctrl);
f874d5d0 2042 goto check_error;
e399441d
JS
2043 }
2044
c3aedd22 2045 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2eb81a33 2046 if (!nvme_try_complete_req(rq, status, result))
ff029451 2047 nvme_fc_complete_rq(rq);
f874d5d0
JS
2048
2049check_error:
2050 if (terminate_assoc)
2051 nvme_fc_error_recovery(ctrl, "transport detected io error");
e399441d
JS
2052}
2053
2054static int
2055__nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
2056 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
2057 struct request *rq, u32 rqno)
2058{
d3d0bc78
BVA
2059 struct nvme_fcp_op_w_sgl *op_w_sgl =
2060 container_of(op, typeof(*op_w_sgl), op);
e399441d
JS
2061 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2062 int ret = 0;
2063
2064 memset(op, 0, sizeof(*op));
2065 op->fcp_req.cmdaddr = &op->cmd_iu;
2066 op->fcp_req.cmdlen = sizeof(op->cmd_iu);
2067 op->fcp_req.rspaddr = &op->rsp_iu;
2068 op->fcp_req.rsplen = sizeof(op->rsp_iu);
2069 op->fcp_req.done = nvme_fc_fcpio_done;
e399441d
JS
2070 op->ctrl = ctrl;
2071 op->queue = queue;
2072 op->rq = rq;
2073 op->rqno = rqno;
2074
53b2b2f5 2075 cmdiu->format_id = NVME_CMD_FORMAT_ID;
e399441d
JS
2076 cmdiu->fc_id = NVME_CMD_FC_ID;
2077 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
44fbf3bb
JS
2078 if (queue->qnum)
2079 cmdiu->rsv_cat = fccmnd_set_cat_css(0,
2080 (NVME_CC_CSS_NVM >> NVME_CC_CSS_SHIFT));
2081 else
2082 cmdiu->rsv_cat = fccmnd_set_cat_admin(0);
e399441d
JS
2083
2084 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
2085 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
2086 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
2087 dev_err(ctrl->dev,
2088 "FCP Op failed - cmdiu dma mapping failed.\n");
f34448cd 2089 ret = -EFAULT;
e399441d
JS
2090 goto out_on_error;
2091 }
2092
2093 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
2094 &op->rsp_iu, sizeof(op->rsp_iu),
2095 DMA_FROM_DEVICE);
2096 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
2097 dev_err(ctrl->dev,
2098 "FCP Op failed - rspiu dma mapping failed.\n");
f34448cd 2099 ret = -EFAULT;
e399441d
JS
2100 }
2101
2102 atomic_set(&op->state, FCPOP_STATE_IDLE);
2103out_on_error:
2104 return ret;
2105}
2106
2107static int
d6296d39
CH
2108nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
2109 unsigned int hctx_idx, unsigned int numa_node)
e399441d 2110{
d6296d39 2111 struct nvme_fc_ctrl *ctrl = set->driver_data;
d3d0bc78 2112 struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
76f983cb
CH
2113 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
2114 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
0d2bdf9f 2115 int res;
e399441d 2116
0d2bdf9f
BVA
2117 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
2118 if (res)
2119 return res;
3add1d93 2120 op->op.fcp_req.first_sgl = op->sgl;
d19b8bc8 2121 op->op.fcp_req.private = &op->priv[0];
dfa74422 2122 nvme_req(rq)->ctrl = &ctrl->ctrl;
0d2bdf9f 2123 return res;
e399441d
JS
2124}
2125
2126static int
2127nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
2128{
2129 struct nvme_fc_fcp_op *aen_op;
2130 struct nvme_fc_cmd_iu *cmdiu;
2131 struct nvme_command *sqe;
f56bf76f 2132 void *private = NULL;
e399441d
JS
2133 int i, ret;
2134
2135 aen_op = ctrl->aen_ops;
38dabe21 2136 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
f56bf76f
JS
2137 if (ctrl->lport->ops->fcprqst_priv_sz) {
2138 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
61bff8ef 2139 GFP_KERNEL);
f56bf76f
JS
2140 if (!private)
2141 return -ENOMEM;
2142 }
61bff8ef 2143
e399441d
JS
2144 cmdiu = &aen_op->cmd_iu;
2145 sqe = &cmdiu->sqe;
2146 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
2147 aen_op, (struct request *)NULL,
38dabe21 2148 (NVME_AQ_BLK_MQ_DEPTH + i));
61bff8ef
JS
2149 if (ret) {
2150 kfree(private);
e399441d 2151 return ret;
61bff8ef 2152 }
e399441d 2153
78a7ac26 2154 aen_op->flags = FCOP_FLAGS_AEN;
61bff8ef 2155 aen_op->fcp_req.private = private;
78a7ac26 2156
e399441d
JS
2157 memset(sqe, 0, sizeof(*sqe));
2158 sqe->common.opcode = nvme_admin_async_event;
78a7ac26 2159 /* Note: core layer may overwrite the sqe.command_id value */
38dabe21 2160 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
e399441d
JS
2161 }
2162 return 0;
2163}
2164
61bff8ef
JS
2165static void
2166nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
2167{
2168 struct nvme_fc_fcp_op *aen_op;
2169 int i;
2170
e126e821 2171 cancel_work_sync(&ctrl->ctrl.async_event_work);
61bff8ef 2172 aen_op = ctrl->aen_ops;
38dabe21 2173 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
61bff8ef
JS
2174 __nvme_fc_exit_request(ctrl, aen_op);
2175
2176 kfree(aen_op->fcp_req.private);
2177 aen_op->fcp_req.private = NULL;
2178 }
2179}
e399441d
JS
2180
2181static inline void
2182__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
2183 unsigned int qidx)
2184{
2185 struct nvme_fc_queue *queue = &ctrl->queues[qidx];
2186
2187 hctx->driver_data = queue;
2188 queue->hctx = hctx;
2189}
2190
2191static int
2192nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2193 unsigned int hctx_idx)
2194{
2195 struct nvme_fc_ctrl *ctrl = data;
2196
2197 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
2198
2199 return 0;
2200}
2201
2202static int
2203nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2204 unsigned int hctx_idx)
2205{
2206 struct nvme_fc_ctrl *ctrl = data;
2207
2208 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
2209
2210 return 0;
2211}
2212
2213static void
08e15075 2214nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
e399441d
JS
2215{
2216 struct nvme_fc_queue *queue;
2217
2218 queue = &ctrl->queues[idx];
2219 memset(queue, 0, sizeof(*queue));
2220 queue->ctrl = ctrl;
2221 queue->qnum = idx;
67f471b6 2222 atomic_set(&queue->csn, 0);
e399441d
JS
2223 queue->dev = ctrl->dev;
2224
2225 if (idx > 0)
2226 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
2227 else
2228 queue->cmnd_capsule_len = sizeof(struct nvme_command);
2229
e399441d
JS
2230 /*
2231 * Considered whether we should allocate buffers for all SQEs
2232 * and CQEs and dma map them - mapping their respective entries
2233 * into the request structures (kernel vm addr and dma address)
2234 * thus the driver could use the buffers/mappings directly.
2235 * It only makes sense if the LLDD would use them for its
2236 * messaging api. It's very unlikely most adapter api's would use
2237 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
2238 * structures were used instead.
2239 */
2240}
2241
2242/*
2243 * This routine terminates a queue at the transport level.
2244 * The transport has already ensured that all outstanding ios on
2245 * the queue have been terminated.
2246 * The transport will send a Disconnect LS request to terminate
2247 * the queue's connection. Termination of the admin queue will also
2248 * terminate the association at the target.
2249 */
2250static void
2251nvme_fc_free_queue(struct nvme_fc_queue *queue)
2252{
2253 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
2254 return;
2255
9e0ed16a 2256 clear_bit(NVME_FC_Q_LIVE, &queue->flags);
e399441d
JS
2257 /*
2258 * Current implementation never disconnects a single queue.
2259 * It always terminates a whole association. So there is never
2260 * a disconnect(queue) LS sent to the target.
2261 */
2262
2263 queue->connection_id = 0;
67f471b6 2264 atomic_set(&queue->csn, 0);
e399441d
JS
2265}
2266
2267static void
2268__nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
2269 struct nvme_fc_queue *queue, unsigned int qidx)
2270{
2271 if (ctrl->lport->ops->delete_queue)
2272 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
2273 queue->lldd_handle);
2274 queue->lldd_handle = NULL;
2275}
2276
e399441d
JS
2277static void
2278nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
2279{
2280 int i;
2281
d858e5f0 2282 for (i = 1; i < ctrl->ctrl.queue_count; i++)
e399441d
JS
2283 nvme_fc_free_queue(&ctrl->queues[i]);
2284}
2285
2286static int
2287__nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
2288 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
2289{
2290 int ret = 0;
2291
2292 queue->lldd_handle = NULL;
2293 if (ctrl->lport->ops->create_queue)
2294 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
2295 qidx, qsize, &queue->lldd_handle);
2296
2297 return ret;
2298}
2299
2300static void
2301nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
2302{
d858e5f0 2303 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
e399441d
JS
2304 int i;
2305
d858e5f0 2306 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
e399441d
JS
2307 __nvme_fc_delete_hw_queue(ctrl, queue, i);
2308}
2309
2310static int
2311nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
2312{
2313 struct nvme_fc_queue *queue = &ctrl->queues[1];
17a1ec08 2314 int i, ret;
e399441d 2315
d858e5f0 2316 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
e399441d 2317 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
17a1ec08
JT
2318 if (ret)
2319 goto delete_queues;
e399441d
JS
2320 }
2321
2322 return 0;
17a1ec08
JT
2323
2324delete_queues:
514a6dc9 2325 for (; i > 0; i--)
17a1ec08
JT
2326 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
2327 return ret;
e399441d
JS
2328}
2329
2330static int
2331nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
2332{
2333 int i, ret = 0;
2334
d858e5f0 2335 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
e399441d
JS
2336 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
2337 (qsize / 5));
2338 if (ret)
2339 break;
26c68227 2340 ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
e399441d
JS
2341 if (ret)
2342 break;
9e0ed16a
SG
2343
2344 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
e399441d
JS
2345 }
2346
2347 return ret;
2348}
2349
2350static void
2351nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
2352{
2353 int i;
2354
d858e5f0 2355 for (i = 1; i < ctrl->ctrl.queue_count; i++)
08e15075 2356 nvme_fc_init_queue(ctrl, i);
e399441d
JS
2357}
2358
2359static void
2360nvme_fc_ctrl_free(struct kref *ref)
2361{
2362 struct nvme_fc_ctrl *ctrl =
2363 container_of(ref, struct nvme_fc_ctrl, ref);
2364 unsigned long flags;
2365
61bff8ef
JS
2366 if (ctrl->ctrl.tagset) {
2367 blk_cleanup_queue(ctrl->ctrl.connect_q);
2368 blk_mq_free_tag_set(&ctrl->tag_set);
e399441d
JS
2369 }
2370
61bff8ef
JS
2371 /* remove from rport list */
2372 spin_lock_irqsave(&ctrl->rport->lock, flags);
2373 list_del(&ctrl->ctrl_list);
2374 spin_unlock_irqrestore(&ctrl->rport->lock, flags);
2375
f9c5af5f 2376 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
61bff8ef 2377 blk_cleanup_queue(ctrl->ctrl.admin_q);
e7832cb4 2378 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
61bff8ef
JS
2379 blk_mq_free_tag_set(&ctrl->admin_tag_set);
2380
2381 kfree(ctrl->queues);
2382
e399441d
JS
2383 put_device(ctrl->dev);
2384 nvme_fc_rport_put(ctrl->rport);
2385
e399441d 2386 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
de41447a
EM
2387 if (ctrl->ctrl.opts)
2388 nvmf_free_options(ctrl->ctrl.opts);
e399441d
JS
2389 kfree(ctrl);
2390}
2391
2392static void
2393nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
2394{
2395 kref_put(&ctrl->ref, nvme_fc_ctrl_free);
2396}
2397
2398static int
2399nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
2400{
2401 return kref_get_unless_zero(&ctrl->ref);
2402}
2403
2404/*
2405 * All accesses from nvme core layer done - can now free the
2406 * controller. Called after last nvme_put_ctrl() call
2407 */
2408static void
61bff8ef 2409nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
e399441d
JS
2410{
2411 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2412
2413 WARN_ON(nctrl != &ctrl->ctrl);
2414
61bff8ef
JS
2415 nvme_fc_ctrl_put(ctrl);
2416}
e399441d 2417
61bff8ef
JS
2418static void
2419nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
2420{
4cff280a
JS
2421 int active;
2422
2423 /*
2424 * if an error (io timeout, etc) while (re)connecting,
2425 * it's an error on creating the new association.
2426 * Start the error recovery thread if it hasn't already
2427 * been started. It is expected there could be multiple
2428 * ios hitting this path before things are cleaned up.
2429 */
2430 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
2431 active = atomic_xchg(&ctrl->err_work_active, 1);
8730c1dd 2432 if (!active && !queue_work(nvme_fc_wq, &ctrl->err_work)) {
4cff280a
JS
2433 atomic_set(&ctrl->err_work_active, 0);
2434 WARN_ON(1);
2435 }
2436 return;
2437 }
2438
2439 /* Otherwise, only proceed if in LIVE state - e.g. on first error */
69fa9646
JS
2440 if (ctrl->ctrl.state != NVME_CTRL_LIVE)
2441 return;
2442
61bff8ef 2443 dev_warn(ctrl->ctrl.device,
514a6dc9 2444 "NVME-FC{%d}: transport association event: %s\n",
61bff8ef 2445 ctrl->cnum, errmsg);
589ff775 2446 dev_warn(ctrl->ctrl.device,
61bff8ef 2447 "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
e399441d 2448
d86c4d8e 2449 nvme_reset_ctrl(&ctrl->ctrl);
e399441d
JS
2450}
2451
baee29ac 2452static enum blk_eh_timer_return
e399441d
JS
2453nvme_fc_timeout(struct request *rq, bool reserved)
2454{
2455 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2456 struct nvme_fc_ctrl *ctrl = op->ctrl;
52793d62
JS
2457 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2458 struct nvme_command *sqe = &cmdiu->sqe;
e399441d
JS
2459
2460 /*
52793d62
JS
2461 * Attempt to abort the offending command. Command completion
2462 * will detect the aborted io and will fail the connection.
e399441d 2463 */
52793d62
JS
2464 dev_info(ctrl->ctrl.device,
2465 "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: "
2466 "x%08x/x%08x\n",
2467 ctrl->cnum, op->queue->qnum, sqe->common.opcode,
2468 sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11);
2469 if (__nvme_fc_abort_op(ctrl, op))
2470 nvme_fc_error_recovery(ctrl, "io timeout abort failed");
e399441d 2471
134aedc9
JS
2472 /*
2473 * the io abort has been initiated. Have the reset timer
2474 * restarted and the abort completion will complete the io
2475 * shortly. Avoids a synchronous wait while the abort finishes.
2476 */
2477 return BLK_EH_RESET_TIMER;
e399441d
JS
2478}
2479
2480static int
2481nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2482 struct nvme_fc_fcp_op *op)
2483{
2484 struct nvmefc_fcp_req *freq = &op->fcp_req;
e399441d
JS
2485 int ret;
2486
2487 freq->sg_cnt = 0;
2488
9f7d8ae2 2489 if (!blk_rq_nr_phys_segments(rq))
e399441d
JS
2490 return 0;
2491
2492 freq->sg_table.sgl = freq->first_sgl;
19e420bb 2493 ret = sg_alloc_table_chained(&freq->sg_table,
4635873c 2494 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
b1ae1a23 2495 NVME_INLINE_SG_CNT);
e399441d
JS
2496 if (ret)
2497 return -ENOMEM;
2498
2499 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
19e420bb 2500 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
e399441d 2501 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
f15872c5 2502 op->nents, rq_dma_dir(rq));
e399441d 2503 if (unlikely(freq->sg_cnt <= 0)) {
b1ae1a23 2504 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
e399441d
JS
2505 freq->sg_cnt = 0;
2506 return -EFAULT;
2507 }
2508
2509 /*
2510 * TODO: blk_integrity_rq(rq) for DIF
2511 */
2512 return 0;
2513}
2514
2515static void
2516nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2517 struct nvme_fc_fcp_op *op)
2518{
2519 struct nvmefc_fcp_req *freq = &op->fcp_req;
2520
2521 if (!freq->sg_cnt)
2522 return;
2523
2524 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
f15872c5 2525 rq_dma_dir(rq));
e399441d 2526
b1ae1a23 2527 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
e399441d
JS
2528
2529 freq->sg_cnt = 0;
2530}
2531
2532/*
2533 * In FC, the queue is a logical thing. At transport connect, the target
2534 * creates its "queue" and returns a handle that is to be given to the
2535 * target whenever it posts something to the corresponding SQ. When an
2536 * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
2537 * command contained within the SQE, an io, and assigns a FC exchange
2538 * to it. The SQE and the associated SQ handle are sent in the initial
2539 * CMD IU sents on the exchange. All transfers relative to the io occur
2540 * as part of the exchange. The CQE is the last thing for the io,
2541 * which is transferred (explicitly or implicitly) with the RSP IU
2542 * sent on the exchange. After the CQE is received, the FC exchange is
2543 * terminaed and the Exchange may be used on a different io.
2544 *
2545 * The transport to LLDD api has the transport making a request for a
2546 * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
2547 * resource and transfers the command. The LLDD will then process all
2548 * steps to complete the io. Upon completion, the transport done routine
2549 * is called.
2550 *
2551 * So - while the operation is outstanding to the LLDD, there is a link
2552 * level FC exchange resource that is also outstanding. This must be
2553 * considered in all cleanup operations.
2554 */
fc17b653 2555static blk_status_t
e399441d
JS
2556nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2557 struct nvme_fc_fcp_op *op, u32 data_len,
2558 enum nvmefc_fcp_datadir io_dir)
2559{
2560 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2561 struct nvme_command *sqe = &cmdiu->sqe;
b12740d3 2562 int ret, opstate;
e399441d 2563
61bff8ef
JS
2564 /*
2565 * before attempting to send the io, check to see if we believe
2566 * the target device is present
2567 */
2568 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
86ff7c2a 2569 return BLK_STS_RESOURCE;
61bff8ef 2570
e399441d 2571 if (!nvme_fc_ctrl_get(ctrl))
fc17b653 2572 return BLK_STS_IOERR;
e399441d
JS
2573
2574 /* format the FC-NVME CMD IU and fcp_req */
2575 cmdiu->connection_id = cpu_to_be64(queue->connection_id);
e399441d
JS
2576 cmdiu->data_len = cpu_to_be32(data_len);
2577 switch (io_dir) {
2578 case NVMEFC_FCP_WRITE:
2579 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
2580 break;
2581 case NVMEFC_FCP_READ:
2582 cmdiu->flags = FCNVME_CMD_FLAGS_READ;
2583 break;
2584 case NVMEFC_FCP_NODATA:
2585 cmdiu->flags = 0;
2586 break;
2587 }
2588 op->fcp_req.payload_length = data_len;
2589 op->fcp_req.io_dir = io_dir;
2590 op->fcp_req.transferred_length = 0;
2591 op->fcp_req.rcv_rsplen = 0;
62eeacb0 2592 op->fcp_req.status = NVME_SC_SUCCESS;
e399441d
JS
2593 op->fcp_req.sqid = cpu_to_le16(queue->qnum);
2594
2595 /*
2596 * validate per fabric rules, set fields mandated by fabric spec
2597 * as well as those by FC-NVME spec.
2598 */
2599 WARN_ON_ONCE(sqe->common.metadata);
e399441d
JS
2600 sqe->common.flags |= NVME_CMD_SGL_METABUF;
2601
2602 /*
d9d34c0b
JS
2603 * format SQE DPTR field per FC-NVME rules:
2604 * type=0x5 Transport SGL Data Block Descriptor
2605 * subtype=0xA Transport-specific value
2606 * address=0
2607 * length=length of the data series
e399441d 2608 */
d9d34c0b
JS
2609 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2610 NVME_SGL_FMT_TRANSPORT_A;
e399441d
JS
2611 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
2612 sqe->rw.dptr.sgl.addr = 0;
2613
78a7ac26 2614 if (!(op->flags & FCOP_FLAGS_AEN)) {
e399441d
JS
2615 ret = nvme_fc_map_data(ctrl, op->rq, op);
2616 if (ret < 0) {
e399441d
JS
2617 nvme_cleanup_cmd(op->rq);
2618 nvme_fc_ctrl_put(ctrl);
fc17b653
CH
2619 if (ret == -ENOMEM || ret == -EAGAIN)
2620 return BLK_STS_RESOURCE;
2621 return BLK_STS_IOERR;
e399441d
JS
2622 }
2623 }
2624
2625 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
2626 sizeof(op->cmd_iu), DMA_TO_DEVICE);
2627
2628 atomic_set(&op->state, FCPOP_STATE_ACTIVE);
2629
78a7ac26 2630 if (!(op->flags & FCOP_FLAGS_AEN))
e399441d
JS
2631 blk_mq_start_request(op->rq);
2632
67f471b6 2633 cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
e399441d
JS
2634 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
2635 &ctrl->rport->remoteport,
2636 queue->lldd_handle, &op->fcp_req);
2637
2638 if (ret) {
67f471b6
JS
2639 /*
2640 * If the lld fails to send the command is there an issue with
2641 * the csn value? If the command that fails is the Connect,
2642 * no - as the connection won't be live. If it is a command
2643 * post-connect, it's possible a gap in csn may be created.
2644 * Does this matter? As Linux initiators don't send fused
2645 * commands, no. The gap would exist, but as there's nothing
2646 * that depends on csn order to be delivered on the target
2647 * side, it shouldn't hurt. It would be difficult for a
2648 * target to even detect the csn gap as it has no idea when the
2649 * cmd with the csn was supposed to arrive.
2650 */
b12740d3
JS
2651 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
2652 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2653
c9c12e51 2654 if (!(op->flags & FCOP_FLAGS_AEN)) {
e399441d 2655 nvme_fc_unmap_data(ctrl, op->rq, op);
c9c12e51
DW
2656 nvme_cleanup_cmd(op->rq);
2657 }
e399441d
JS
2658
2659 nvme_fc_ctrl_put(ctrl);
2660
8b25f351
JS
2661 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
2662 ret != -EBUSY)
fc17b653 2663 return BLK_STS_IOERR;
e399441d 2664
86ff7c2a 2665 return BLK_STS_RESOURCE;
e399441d
JS
2666 }
2667
fc17b653 2668 return BLK_STS_OK;
e399441d
JS
2669}
2670
fc17b653 2671static blk_status_t
e399441d
JS
2672nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2673 const struct blk_mq_queue_data *bd)
2674{
2675 struct nvme_ns *ns = hctx->queue->queuedata;
2676 struct nvme_fc_queue *queue = hctx->driver_data;
2677 struct nvme_fc_ctrl *ctrl = queue->ctrl;
2678 struct request *rq = bd->rq;
2679 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2680 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2681 struct nvme_command *sqe = &cmdiu->sqe;
2682 enum nvmefc_fcp_datadir io_dir;
3bc32bb1 2683 bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
e399441d 2684 u32 data_len;
fc17b653 2685 blk_status_t ret;
e399441d 2686
3bc32bb1
CH
2687 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
2688 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
6cdefc6e 2689 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
9e0ed16a 2690
e399441d
JS
2691 ret = nvme_setup_cmd(ns, rq, sqe);
2692 if (ret)
2693 return ret;
2694
9f7d8ae2
JS
2695 /*
2696 * nvme core doesn't quite treat the rq opaquely. Commands such
2697 * as WRITE ZEROES will return a non-zero rq payload_bytes yet
2698 * there is no actual payload to be transferred.
2699 * To get it right, key data transmission on there being 1 or
2700 * more physical segments in the sg list. If there is no
2701 * physical segments, there is no payload.
2702 */
2703 if (blk_rq_nr_phys_segments(rq)) {
2704 data_len = blk_rq_payload_bytes(rq);
e399441d
JS
2705 io_dir = ((rq_data_dir(rq) == WRITE) ?
2706 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
9f7d8ae2
JS
2707 } else {
2708 data_len = 0;
e399441d 2709 io_dir = NVMEFC_FCP_NODATA;
9f7d8ae2
JS
2710 }
2711
e399441d
JS
2712
2713 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2714}
2715
e399441d 2716static void
ad22c355 2717nvme_fc_submit_async_event(struct nvme_ctrl *arg)
e399441d
JS
2718{
2719 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2720 struct nvme_fc_fcp_op *aen_op;
fc17b653 2721 blk_status_t ret;
e399441d 2722
eb4ee8f1 2723 if (test_bit(FCCTRL_TERMIO, &ctrl->flags))
61bff8ef
JS
2724 return;
2725
ad22c355 2726 aen_op = &ctrl->aen_ops[0];
e399441d
JS
2727
2728 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2729 NVMEFC_FCP_NODATA);
2730 if (ret)
2731 dev_err(ctrl->ctrl.device,
ad22c355 2732 "failed async event work\n");
e399441d
JS
2733}
2734
2735static void
c3aedd22 2736nvme_fc_complete_rq(struct request *rq)
e399441d
JS
2737{
2738 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2739 struct nvme_fc_ctrl *ctrl = op->ctrl;
e399441d 2740
78a7ac26 2741 atomic_set(&op->state, FCPOP_STATE_IDLE);
52793d62 2742 op->flags &= ~FCOP_FLAGS_TERMIO;
e399441d 2743
e399441d 2744 nvme_fc_unmap_data(ctrl, rq, op);
77f02a7a 2745 nvme_complete_rq(rq);
e399441d 2746 nvme_fc_ctrl_put(ctrl);
78a7ac26
JS
2747}
2748
e399441d
JS
2749/*
2750 * This routine is used by the transport when it needs to find active
2751 * io on a queue that is to be terminated. The transport uses
2752 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
2753 * this routine to kill them on a 1 by 1 basis.
2754 *
2755 * As FC allocates FC exchange for each io, the transport must contact
2756 * the LLDD to terminate the exchange, thus releasing the FC exchange.
2757 * After terminating the exchange the LLDD will call the transport's
2758 * normal io done path for the request, but it will have an aborted
2759 * status. The done path will return the io request back to the block
2760 * layer with an error status.
2761 */
7baa8572 2762static bool
e399441d
JS
2763nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2764{
2765 struct nvme_ctrl *nctrl = data;
2766 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2767 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
e399441d 2768
3efd6e8e 2769 __nvme_fc_abort_op(ctrl, op);
7baa8572 2770 return true;
e399441d
JS
2771}
2772
78a7ac26 2773
61bff8ef
JS
2774static const struct blk_mq_ops nvme_fc_mq_ops = {
2775 .queue_rq = nvme_fc_queue_rq,
2776 .complete = nvme_fc_complete_rq,
2777 .init_request = nvme_fc_init_request,
2778 .exit_request = nvme_fc_exit_request,
61bff8ef 2779 .init_hctx = nvme_fc_init_hctx,
61bff8ef
JS
2780 .timeout = nvme_fc_timeout,
2781};
e399441d 2782
61bff8ef
JS
2783static int
2784nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
e399441d 2785{
61bff8ef 2786 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
7314183d 2787 unsigned int nr_io_queues;
61bff8ef 2788 int ret;
e399441d 2789
7314183d
SG
2790 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2791 ctrl->lport->ops->max_hw_queues);
2792 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
61bff8ef
JS
2793 if (ret) {
2794 dev_info(ctrl->ctrl.device,
2795 "set_queue_count failed: %d\n", ret);
2796 return ret;
2797 }
e399441d 2798
7314183d
SG
2799 ctrl->ctrl.queue_count = nr_io_queues + 1;
2800 if (!nr_io_queues)
61bff8ef 2801 return 0;
e399441d 2802
61bff8ef 2803 nvme_fc_init_io_queues(ctrl);
e399441d 2804
61bff8ef
JS
2805 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2806 ctrl->tag_set.ops = &nvme_fc_mq_ops;
2807 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2808 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
103e515e 2809 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
61bff8ef 2810 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
d3d0bc78
BVA
2811 ctrl->tag_set.cmd_size =
2812 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
2813 ctrl->lport->ops->fcprqst_priv_sz);
61bff8ef 2814 ctrl->tag_set.driver_data = ctrl;
d858e5f0 2815 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
61bff8ef 2816 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
e399441d 2817
61bff8ef
JS
2818 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2819 if (ret)
2820 return ret;
e399441d 2821
61bff8ef 2822 ctrl->ctrl.tagset = &ctrl->tag_set;
e399441d 2823
61bff8ef
JS
2824 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2825 if (IS_ERR(ctrl->ctrl.connect_q)) {
2826 ret = PTR_ERR(ctrl->ctrl.connect_q);
2827 goto out_free_tag_set;
2828 }
e399441d 2829
d157e534 2830 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
e399441d 2831 if (ret)
61bff8ef 2832 goto out_cleanup_blk_queue;
e399441d 2833
d157e534 2834 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
61bff8ef
JS
2835 if (ret)
2836 goto out_delete_hw_queues;
e399441d 2837
4c984154
JS
2838 ctrl->ioq_live = true;
2839
e399441d 2840 return 0;
e399441d 2841
61bff8ef
JS
2842out_delete_hw_queues:
2843 nvme_fc_delete_hw_io_queues(ctrl);
2844out_cleanup_blk_queue:
61bff8ef
JS
2845 blk_cleanup_queue(ctrl->ctrl.connect_q);
2846out_free_tag_set:
2847 blk_mq_free_tag_set(&ctrl->tag_set);
2848 nvme_fc_free_io_queues(ctrl);
e399441d 2849
61bff8ef
JS
2850 /* force put free routine to ignore io queues */
2851 ctrl->ctrl.tagset = NULL;
2852
2853 return ret;
2854}
e399441d
JS
2855
2856static int
3e493c00 2857nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
e399441d
JS
2858{
2859 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
834d3710 2860 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1;
7314183d 2861 unsigned int nr_io_queues;
e399441d
JS
2862 int ret;
2863
7314183d
SG
2864 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2865 ctrl->lport->ops->max_hw_queues);
2866 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
e399441d
JS
2867 if (ret) {
2868 dev_info(ctrl->ctrl.device,
2869 "set_queue_count failed: %d\n", ret);
2870 return ret;
2871 }
2872
834d3710
JS
2873 if (!nr_io_queues && prior_ioq_cnt) {
2874 dev_info(ctrl->ctrl.device,
2875 "Fail Reconnect: At least 1 io queue "
2876 "required (was %d)\n", prior_ioq_cnt);
2877 return -ENOSPC;
2878 }
2879
7314183d 2880 ctrl->ctrl.queue_count = nr_io_queues + 1;
61bff8ef 2881 /* check for io queues existing */
d858e5f0 2882 if (ctrl->ctrl.queue_count == 1)
e399441d
JS
2883 return 0;
2884
d157e534 2885 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
e399441d 2886 if (ret)
61bff8ef 2887 goto out_free_io_queues;
e399441d 2888
d157e534 2889 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
e399441d
JS
2890 if (ret)
2891 goto out_delete_hw_queues;
2892
88e837ed 2893 if (prior_ioq_cnt != nr_io_queues) {
834d3710
JS
2894 dev_info(ctrl->ctrl.device,
2895 "reconnect: revising io queue count from %d to %d\n",
2896 prior_ioq_cnt, nr_io_queues);
88e837ed
JS
2897 nvme_wait_freeze(&ctrl->ctrl);
2898 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
2899 nvme_unfreeze(&ctrl->ctrl);
2900 }
cda5fd1a 2901
e399441d
JS
2902 return 0;
2903
2904out_delete_hw_queues:
2905 nvme_fc_delete_hw_io_queues(ctrl);
61bff8ef 2906out_free_io_queues:
e399441d 2907 nvme_fc_free_io_queues(ctrl);
61bff8ef
JS
2908 return ret;
2909}
e399441d 2910
158bfb88
JS
2911static void
2912nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport)
2913{
2914 struct nvme_fc_lport *lport = rport->lport;
2915
2916 atomic_inc(&lport->act_rport_cnt);
2917}
2918
2919static void
2920nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport)
2921{
2922 struct nvme_fc_lport *lport = rport->lport;
2923 u32 cnt;
2924
2925 cnt = atomic_dec_return(&lport->act_rport_cnt);
2926 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
2927 lport->ops->localport_delete(&lport->localport);
2928}
2929
2930static int
2931nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
2932{
2933 struct nvme_fc_rport *rport = ctrl->rport;
2934 u32 cnt;
2935
eb4ee8f1 2936 if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags))
158bfb88
JS
2937 return 1;
2938
158bfb88
JS
2939 cnt = atomic_inc_return(&rport->act_ctrl_cnt);
2940 if (cnt == 1)
2941 nvme_fc_rport_active_on_lport(rport);
2942
2943 return 0;
2944}
2945
2946static int
2947nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
2948{
2949 struct nvme_fc_rport *rport = ctrl->rport;
2950 struct nvme_fc_lport *lport = rport->lport;
2951 u32 cnt;
2952
eb4ee8f1 2953 /* clearing of ctrl->flags ASSOC_ACTIVE bit is in association delete */
158bfb88
JS
2954
2955 cnt = atomic_dec_return(&rport->act_ctrl_cnt);
2956 if (cnt == 0) {
2957 if (rport->remoteport.port_state == FC_OBJSTATE_DELETED)
2958 lport->ops->remoteport_delete(&rport->remoteport);
2959 nvme_fc_rport_inactive_on_lport(rport);
2960 }
2961
2962 return 0;
2963}
2964
61bff8ef
JS
2965/*
2966 * This routine restarts the controller on the host side, and
2967 * on the link side, recreates the controller association.
2968 */
2969static int
2970nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2971{
2972 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
14fd1e98
JS
2973 struct nvmefc_ls_rcv_op *disls = NULL;
2974 unsigned long flags;
61bff8ef
JS
2975 int ret;
2976 bool changed;
2977
fdf9dfa8 2978 ++ctrl->ctrl.nr_reconnects;
61bff8ef 2979
96e24801
JS
2980 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2981 return -ENODEV;
2982
158bfb88
JS
2983 if (nvme_fc_ctlr_active_on_rport(ctrl))
2984 return -ENOTUNIQ;
2985
4bea364f
JS
2986 dev_info(ctrl->ctrl.device,
2987 "NVME-FC{%d}: create association : host wwpn 0x%016llx "
2988 " rport wwpn 0x%016llx: NQN \"%s\"\n",
2989 ctrl->cnum, ctrl->lport->localport.port_name,
2990 ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn);
2991
caf1cbe3
JS
2992 clear_bit(ASSOC_FAILED, &ctrl->flags);
2993
61bff8ef
JS
2994 /*
2995 * Create the admin queue
2996 */
2997
61bff8ef 2998 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
d157e534 2999 NVME_AQ_DEPTH);
61bff8ef
JS
3000 if (ret)
3001 goto out_free_queue;
3002
3003 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
d157e534 3004 NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
61bff8ef
JS
3005 if (ret)
3006 goto out_delete_hw_queue;
3007
61bff8ef
JS
3008 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
3009 if (ret)
3010 goto out_disconnect_admin_queue;
3011
9e0ed16a
SG
3012 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
3013
61bff8ef
JS
3014 /*
3015 * Check controller capabilities
3016 *
3017 * todo:- add code to check if ctrl attributes changed from
3018 * prior connection values
3019 */
3020
c0f2f45b 3021 ret = nvme_enable_ctrl(&ctrl->ctrl);
caf1cbe3 3022 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
61bff8ef
JS
3023 goto out_disconnect_admin_queue;
3024
23748076
JS
3025 ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments;
3026 ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments <<
3027 (ilog2(SZ_4K) - 9);
61bff8ef 3028
e7832cb4
SG
3029 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
3030
61bff8ef 3031 ret = nvme_init_identify(&ctrl->ctrl);
caf1cbe3 3032 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
61bff8ef
JS
3033 goto out_disconnect_admin_queue;
3034
3035 /* sanity checks */
3036
3037 /* FC-NVME does not have other data in the capsule */
3038 if (ctrl->ctrl.icdoff) {
3039 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
3040 ctrl->ctrl.icdoff);
3041 goto out_disconnect_admin_queue;
3042 }
3043
61bff8ef
JS
3044 /* FC-NVME supports normal SGL Data Block Descriptors */
3045
3046 if (opts->queue_size > ctrl->ctrl.maxcmd) {
3047 /* warn if maxcmd is lower than queue_size */
3048 dev_warn(ctrl->ctrl.device,
3049 "queue_size %zu > ctrl maxcmd %u, reducing "
7db39484 3050 "to maxcmd\n",
61bff8ef
JS
3051 opts->queue_size, ctrl->ctrl.maxcmd);
3052 opts->queue_size = ctrl->ctrl.maxcmd;
3053 }
3054
d157e534
JS
3055 if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
3056 /* warn if sqsize is lower than queue_size */
3057 dev_warn(ctrl->ctrl.device,
7db39484
JS
3058 "queue_size %zu > ctrl sqsize %u, reducing "
3059 "to sqsize\n",
d157e534
JS
3060 opts->queue_size, ctrl->ctrl.sqsize + 1);
3061 opts->queue_size = ctrl->ctrl.sqsize + 1;
3062 }
3063
61bff8ef
JS
3064 ret = nvme_fc_init_aen_ops(ctrl);
3065 if (ret)
3066 goto out_term_aen_ops;
3067
3068 /*
3069 * Create the io queues
3070 */
3071
d858e5f0 3072 if (ctrl->ctrl.queue_count > 1) {
4c984154 3073 if (!ctrl->ioq_live)
61bff8ef
JS
3074 ret = nvme_fc_create_io_queues(ctrl);
3075 else
3e493c00 3076 ret = nvme_fc_recreate_io_queues(ctrl);
61bff8ef 3077 }
caf1cbe3
JS
3078 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
3079 goto out_term_aen_ops;
61bff8ef
JS
3080
3081 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
61bff8ef 3082
fdf9dfa8 3083 ctrl->ctrl.nr_reconnects = 0;
61bff8ef 3084
44c6ec77
JS
3085 if (changed)
3086 nvme_start_ctrl(&ctrl->ctrl);
61bff8ef
JS
3087
3088 return 0; /* Success */
3089
3090out_term_aen_ops:
3091 nvme_fc_term_aen_ops(ctrl);
61bff8ef
JS
3092out_disconnect_admin_queue:
3093 /* send a Disconnect(association) LS to fc-nvme target */
3094 nvme_fc_xmt_disconnect_assoc(ctrl);
14fd1e98 3095 spin_lock_irqsave(&ctrl->lock, flags);
bcde5f0f 3096 ctrl->association_id = 0;
14fd1e98
JS
3097 disls = ctrl->rcv_disconn;
3098 ctrl->rcv_disconn = NULL;
3099 spin_unlock_irqrestore(&ctrl->lock, flags);
3100 if (disls)
3101 nvme_fc_xmt_ls_rsp(disls);
61bff8ef
JS
3102out_delete_hw_queue:
3103 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
3104out_free_queue:
3105 nvme_fc_free_queue(&ctrl->queues[0]);
eb4ee8f1 3106 clear_bit(ASSOC_ACTIVE, &ctrl->flags);
158bfb88 3107 nvme_fc_ctlr_inactive_on_rport(ctrl);
e399441d
JS
3108
3109 return ret;
3110}
3111
52793d62 3112
61bff8ef 3113/*
52793d62
JS
3114 * This routine runs through all outstanding commands on the association
3115 * and aborts them. This routine is typically be called by the
3116 * delete_association routine. It is also called due to an error during
3117 * reconnect. In that scenario, it is most likely a command that initializes
3118 * the controller, including fabric Connect commands on io queues, that
3119 * may have timed out or failed thus the io must be killed for the connect
3120 * thread to see the error.
61bff8ef
JS
3121 */
3122static void
52793d62 3123__nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
61bff8ef 3124{
61bff8ef
JS
3125 /*
3126 * If io queues are present, stop them and terminate all outstanding
3127 * ios on them. As FC allocates FC exchange for each io, the
3128 * transport must contact the LLDD to terminate the exchange,
3129 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
3130 * to tell us what io's are busy and invoke a transport routine
3131 * to kill them with the LLDD. After terminating the exchange
3132 * the LLDD will call the transport's normal io done path, but it
3133 * will have an aborted status. The done path will return the
3134 * io requests back to the block layer as part of normal completions
3135 * (but with error status).
3136 */
d858e5f0 3137 if (ctrl->ctrl.queue_count > 1) {
61bff8ef
JS
3138 nvme_stop_queues(&ctrl->ctrl);
3139 blk_mq_tagset_busy_iter(&ctrl->tag_set,
3140 nvme_fc_terminate_exchange, &ctrl->ctrl);
622b8b68 3141 blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
52793d62
JS
3142 if (start_queues)
3143 nvme_start_queues(&ctrl->ctrl);
61bff8ef
JS
3144 }
3145
3146 /*
3147 * Other transports, which don't have link-level contexts bound
3148 * to sqe's, would try to gracefully shutdown the controller by
3149 * writing the registers for shutdown and polling (call
3150 * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
3151 * just aborted and we will wait on those contexts, and given
3152 * there was no indication of how live the controlelr is on the
3153 * link, don't send more io to create more contexts for the
3154 * shutdown. Let the controller fail via keepalive failure if
3155 * its still present.
3156 */
3157
3158 /*
3159 * clean up the admin queue. Same thing as above.
61bff8ef 3160 */
4c984154 3161 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
61bff8ef
JS
3162 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
3163 nvme_fc_terminate_exchange, &ctrl->ctrl);
622b8b68 3164 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
52793d62
JS
3165}
3166
3167/*
3168 * This routine stops operation of the controller on the host side.
3169 * On the host os stack side: Admin and IO queues are stopped,
3170 * outstanding ios on them terminated via FC ABTS.
3171 * On the link side: the association is terminated.
3172 */
3173static void
3174nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
3175{
3176 struct nvmefc_ls_rcv_op *disls = NULL;
3177 unsigned long flags;
3178
3179 if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags))
3180 return;
3181
3182 spin_lock_irqsave(&ctrl->lock, flags);
3183 set_bit(FCCTRL_TERMIO, &ctrl->flags);
3184 ctrl->iocnt = 0;
3185 spin_unlock_irqrestore(&ctrl->lock, flags);
3186
3187 __nvme_fc_abort_outstanding_ios(ctrl, false);
61bff8ef
JS
3188
3189 /* kill the aens as they are a separate path */
3190 nvme_fc_abort_aen_ops(ctrl);
3191
3192 /* wait for all io that had to be aborted */
8a82dbf1 3193 spin_lock_irq(&ctrl->lock);
36715cf4 3194 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
eb4ee8f1 3195 clear_bit(FCCTRL_TERMIO, &ctrl->flags);
8a82dbf1 3196 spin_unlock_irq(&ctrl->lock);
61bff8ef
JS
3197
3198 nvme_fc_term_aen_ops(ctrl);
3199
3200 /*
3201 * send a Disconnect(association) LS to fc-nvme target
3202 * Note: could have been sent at top of process, but
3203 * cleaner on link traffic if after the aborts complete.
3204 * Note: if association doesn't exist, association_id will be 0
3205 */
3206 if (ctrl->association_id)
3207 nvme_fc_xmt_disconnect_assoc(ctrl);
3208
14fd1e98 3209 spin_lock_irqsave(&ctrl->lock, flags);
bcde5f0f 3210 ctrl->association_id = 0;
14fd1e98
JS
3211 disls = ctrl->rcv_disconn;
3212 ctrl->rcv_disconn = NULL;
3213 spin_unlock_irqrestore(&ctrl->lock, flags);
3214 if (disls)
3215 /*
3216 * if a Disconnect Request was waiting for a response, send
3217 * now that all ABTS's have been issued (and are complete).
3218 */
3219 nvme_fc_xmt_ls_rsp(disls);
bcde5f0f 3220
61bff8ef
JS
3221 if (ctrl->ctrl.tagset) {
3222 nvme_fc_delete_hw_io_queues(ctrl);
3223 nvme_fc_free_io_queues(ctrl);
3224 }
3225
3226 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
3227 nvme_fc_free_queue(&ctrl->queues[0]);
158bfb88 3228
d625d05e
JS
3229 /* re-enable the admin_q so anything new can fast fail */
3230 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
3231
02d62a8b
JS
3232 /* resume the io queues so that things will fast fail */
3233 nvme_start_queues(&ctrl->ctrl);
3234
158bfb88 3235 nvme_fc_ctlr_inactive_on_rport(ctrl);
61bff8ef
JS
3236}
3237
3238static void
c5017e85 3239nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
61bff8ef 3240{
c5017e85 3241 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
61bff8ef 3242
4cff280a 3243 cancel_work_sync(&ctrl->err_work);
61bff8ef 3244 cancel_delayed_work_sync(&ctrl->connect_work);
61bff8ef
JS
3245 /*
3246 * kill the association on the link side. this will block
3247 * waiting for io to terminate
3248 */
3249 nvme_fc_delete_association(ctrl);
61bff8ef
JS
3250}
3251
5bbecdbc
JS
3252static void
3253nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
3254{
2b632970
JS
3255 struct nvme_fc_rport *rport = ctrl->rport;
3256 struct nvme_fc_remote_port *portptr = &rport->remoteport;
3257 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
3258 bool recon = true;
5bbecdbc 3259
ad6a0a52 3260 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
5bbecdbc 3261 return;
5bbecdbc 3262
2b632970 3263 if (portptr->port_state == FC_OBJSTATE_ONLINE)
5bbecdbc 3264 dev_info(ctrl->ctrl.device,
2b632970
JS
3265 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
3266 ctrl->cnum, status);
3267 else if (time_after_eq(jiffies, rport->dev_loss_end))
3268 recon = false;
5bbecdbc 3269
2b632970
JS
3270 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
3271 if (portptr->port_state == FC_OBJSTATE_ONLINE)
3272 dev_info(ctrl->ctrl.device,
3273 "NVME-FC{%d}: Reconnect attempt in %ld "
3274 "seconds\n",
3275 ctrl->cnum, recon_delay / HZ);
3276 else if (time_after(jiffies + recon_delay, rport->dev_loss_end))
3277 recon_delay = rport->dev_loss_end - jiffies;
96e24801 3278
2b632970 3279 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
5bbecdbc 3280 } else {
2b632970
JS
3281 if (portptr->port_state == FC_OBJSTATE_ONLINE)
3282 dev_warn(ctrl->ctrl.device,
5bbecdbc 3283 "NVME-FC{%d}: Max reconnect attempts (%d) "
77d0612d 3284 "reached.\n",
fdf9dfa8 3285 ctrl->cnum, ctrl->ctrl.nr_reconnects);
2b632970
JS
3286 else
3287 dev_warn(ctrl->ctrl.device,
3288 "NVME-FC{%d}: dev_loss_tmo (%d) expired "
77d0612d 3289 "while waiting for remoteport connectivity.\n",
614fc1c0
MG
3290 ctrl->cnum, min_t(int, portptr->dev_loss_tmo,
3291 (ctrl->ctrl.opts->max_reconnects *
3292 ctrl->ctrl.opts->reconnect_delay)));
c5017e85 3293 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
5bbecdbc
JS
3294 }
3295}
3296
61bff8ef 3297static void
4cff280a 3298__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
61bff8ef 3299{
c869e494 3300 /*
52793d62
JS
3301 * if state is CONNECTING - the error occurred as part of a
3302 * reconnect attempt. Abort any ios on the association and
3303 * let the create_association error paths resolve things.
c869e494 3304 */
52793d62
JS
3305 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
3306 __nvme_fc_abort_outstanding_ios(ctrl, true);
caf1cbe3 3307 set_bit(ASSOC_FAILED, &ctrl->flags);
52793d62 3308 return;
c869e494 3309 }
61bff8ef 3310
52793d62
JS
3311 /*
3312 * For any other state, kill the association. As this routine
3313 * is a common io abort routine for resetting and such, after
3314 * the association is terminated, ensure that the state is set
3315 * to CONNECTING.
3316 */
3317
3318 nvme_stop_keep_alive(&ctrl->ctrl);
3319
3320 /* will block will waiting for io to terminate */
3321 nvme_fc_delete_association(ctrl);
3322
4cff280a
JS
3323 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
3324 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
44c6ec77
JS
3325 dev_err(ctrl->ctrl.device,
3326 "NVME-FC{%d}: error_recovery: Couldn't change state "
ad6a0a52 3327 "to CONNECTING\n", ctrl->cnum);
4cff280a
JS
3328}
3329
3330static void
3331nvme_fc_reset_ctrl_work(struct work_struct *work)
3332{
3333 struct nvme_fc_ctrl *ctrl =
3334 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
3335 int ret;
3336
3337 __nvme_fc_terminate_io(ctrl);
3338
3339 nvme_stop_ctrl(&ctrl->ctrl);
44c6ec77 3340
2b632970 3341 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
96e24801 3342 ret = nvme_fc_create_association(ctrl);
2b632970
JS
3343 else
3344 ret = -ENOTCONN;
3345
5bbecdbc
JS
3346 if (ret)
3347 nvme_fc_reconnect_or_delete(ctrl, ret);
3348 else
61bff8ef 3349 dev_info(ctrl->ctrl.device,
2b632970
JS
3350 "NVME-FC{%d}: controller reset complete\n",
3351 ctrl->cnum);
61bff8ef
JS
3352}
3353
4cff280a
JS
3354static void
3355nvme_fc_connect_err_work(struct work_struct *work)
3356{
3357 struct nvme_fc_ctrl *ctrl =
3358 container_of(work, struct nvme_fc_ctrl, err_work);
3359
3360 __nvme_fc_terminate_io(ctrl);
3361
3362 atomic_set(&ctrl->err_work_active, 0);
3363
3364 /*
3365 * Rescheduling the connection after recovering
3366 * from the io error is left to the reconnect work
3367 * item, which is what should have stalled waiting on
3368 * the io that had the error that scheduled this work.
3369 */
3370}
3371
61bff8ef
JS
3372static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
3373 .name = "fc",
3374 .module = THIS_MODULE,
d3d5b87d 3375 .flags = NVME_F_FABRICS,
61bff8ef
JS
3376 .reg_read32 = nvmf_reg_read32,
3377 .reg_read64 = nvmf_reg_read64,
3378 .reg_write32 = nvmf_reg_write32,
61bff8ef
JS
3379 .free_ctrl = nvme_fc_nvme_ctrl_freed,
3380 .submit_async_event = nvme_fc_submit_async_event,
c5017e85 3381 .delete_ctrl = nvme_fc_delete_ctrl,
61bff8ef
JS
3382 .get_address = nvmf_get_address,
3383};
3384
3385static void
3386nvme_fc_connect_ctrl_work(struct work_struct *work)
3387{
3388 int ret;
3389
3390 struct nvme_fc_ctrl *ctrl =
3391 container_of(to_delayed_work(work),
3392 struct nvme_fc_ctrl, connect_work);
3393
3394 ret = nvme_fc_create_association(ctrl);
5bbecdbc
JS
3395 if (ret)
3396 nvme_fc_reconnect_or_delete(ctrl, ret);
3397 else
61bff8ef 3398 dev_info(ctrl->ctrl.device,
4c984154 3399 "NVME-FC{%d}: controller connect complete\n",
61bff8ef
JS
3400 ctrl->cnum);
3401}
3402
3403
3404static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
3405 .queue_rq = nvme_fc_queue_rq,
3406 .complete = nvme_fc_complete_rq,
76f983cb 3407 .init_request = nvme_fc_init_request,
61bff8ef 3408 .exit_request = nvme_fc_exit_request,
61bff8ef
JS
3409 .init_hctx = nvme_fc_init_admin_hctx,
3410 .timeout = nvme_fc_timeout,
3411};
3412
e399441d 3413
56d5f4f1
JS
3414/*
3415 * Fails a controller request if it matches an existing controller
3416 * (association) with the same tuple:
3417 * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN>
3418 *
3419 * The ports don't need to be compared as they are intrinsically
3420 * already matched by the port pointers supplied.
3421 */
3422static bool
3423nvme_fc_existing_controller(struct nvme_fc_rport *rport,
3424 struct nvmf_ctrl_options *opts)
3425{
3426 struct nvme_fc_ctrl *ctrl;
3427 unsigned long flags;
3428 bool found = false;
3429
3430 spin_lock_irqsave(&rport->lock, flags);
3431 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3432 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
3433 if (found)
3434 break;
3435 }
3436 spin_unlock_irqrestore(&rport->lock, flags);
3437
3438 return found;
3439}
3440
e399441d 3441static struct nvme_ctrl *
61bff8ef 3442nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
e399441d
JS
3443 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
3444{
3445 struct nvme_fc_ctrl *ctrl;
3446 unsigned long flags;
f673714a 3447 int ret, idx, ctrl_loss_tmo;
e399441d 3448
85e6a6ad
JS
3449 if (!(rport->remoteport.port_role &
3450 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
3451 ret = -EBADR;
3452 goto out_fail;
3453 }
3454
56d5f4f1
JS
3455 if (!opts->duplicate_connect &&
3456 nvme_fc_existing_controller(rport, opts)) {
3457 ret = -EALREADY;
3458 goto out_fail;
3459 }
3460
e399441d
JS
3461 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
3462 if (!ctrl) {
3463 ret = -ENOMEM;
3464 goto out_fail;
3465 }
3466
3467 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
3468 if (idx < 0) {
3469 ret = -ENOSPC;
8c5c6605 3470 goto out_free_ctrl;
e399441d
JS
3471 }
3472
f673714a
JS
3473 /*
3474 * if ctrl_loss_tmo is being enforced and the default reconnect delay
3475 * is being used, change to a shorter reconnect delay for FC.
3476 */
3477 if (opts->max_reconnects != -1 &&
3478 opts->reconnect_delay == NVMF_DEF_RECONNECT_DELAY &&
3479 opts->reconnect_delay > NVME_FC_DEFAULT_RECONNECT_TMO) {
3480 ctrl_loss_tmo = opts->max_reconnects * opts->reconnect_delay;
3481 opts->reconnect_delay = NVME_FC_DEFAULT_RECONNECT_TMO;
3482 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
3483 opts->reconnect_delay);
3484 }
3485
e399441d 3486 ctrl->ctrl.opts = opts;
4c984154 3487 ctrl->ctrl.nr_reconnects = 0;
06f3d71e
JS
3488 if (lport->dev)
3489 ctrl->ctrl.numa_node = dev_to_node(lport->dev);
3490 else
3491 ctrl->ctrl.numa_node = NUMA_NO_NODE;
e399441d 3492 INIT_LIST_HEAD(&ctrl->ctrl_list);
e399441d
JS
3493 ctrl->lport = lport;
3494 ctrl->rport = rport;
3495 ctrl->dev = lport->dev;
e399441d 3496 ctrl->cnum = idx;
4c984154 3497 ctrl->ioq_live = false;
4cff280a 3498 atomic_set(&ctrl->err_work_active, 0);
8a82dbf1 3499 init_waitqueue_head(&ctrl->ioabort_wait);
e399441d 3500
e399441d
JS
3501 get_device(ctrl->dev);
3502 kref_init(&ctrl->ref);
3503
d86c4d8e 3504 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
61bff8ef 3505 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
4cff280a 3506 INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work);
e399441d
JS
3507 spin_lock_init(&ctrl->lock);
3508
3509 /* io queue count */
d858e5f0 3510 ctrl->ctrl.queue_count = min_t(unsigned int,
e399441d
JS
3511 opts->nr_io_queues,
3512 lport->ops->max_hw_queues);
d858e5f0 3513 ctrl->ctrl.queue_count++; /* +1 for admin queue */
e399441d
JS
3514
3515 ctrl->ctrl.sqsize = opts->queue_size - 1;
3516 ctrl->ctrl.kato = opts->kato;
4c984154 3517 ctrl->ctrl.cntlid = 0xffff;
e399441d
JS
3518
3519 ret = -ENOMEM;
d858e5f0
SG
3520 ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
3521 sizeof(struct nvme_fc_queue), GFP_KERNEL);
e399441d 3522 if (!ctrl->queues)
61bff8ef 3523 goto out_free_ida;
e399441d 3524
3e493c00
JS
3525 nvme_fc_init_queue(ctrl, 0);
3526
61bff8ef
JS
3527 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
3528 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
38dabe21 3529 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
61bff8ef 3530 ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
103e515e 3531 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
d3d0bc78
BVA
3532 ctrl->admin_tag_set.cmd_size =
3533 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
3534 ctrl->lport->ops->fcprqst_priv_sz);
61bff8ef
JS
3535 ctrl->admin_tag_set.driver_data = ctrl;
3536 ctrl->admin_tag_set.nr_hw_queues = 1;
3537 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
5a22e2bf 3538 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
e399441d 3539
61bff8ef 3540 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
e399441d 3541 if (ret)
61bff8ef 3542 goto out_free_queues;
34b6c231 3543 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
e399441d 3544
e7832cb4
SG
3545 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3546 if (IS_ERR(ctrl->ctrl.fabrics_q)) {
3547 ret = PTR_ERR(ctrl->ctrl.fabrics_q);
3548 goto out_free_admin_tag_set;
3549 }
3550
61bff8ef
JS
3551 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3552 if (IS_ERR(ctrl->ctrl.admin_q)) {
3553 ret = PTR_ERR(ctrl->ctrl.admin_q);
e7832cb4 3554 goto out_cleanup_fabrics_q;
e399441d
JS
3555 }
3556
61bff8ef
JS
3557 /*
3558 * Would have been nice to init io queues tag set as well.
3559 * However, we require interaction from the controller
3560 * for max io queue count before we can do so.
3561 * Defer this to the connect path.
3562 */
e399441d 3563
61bff8ef
JS
3564 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
3565 if (ret)
3566 goto out_cleanup_admin_q;
e399441d 3567
61bff8ef 3568 /* at this point, teardown path changes to ref counting on nvme ctrl */
e399441d
JS
3569
3570 spin_lock_irqsave(&rport->lock, flags);
3571 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
3572 spin_unlock_irqrestore(&rport->lock, flags);
3573
4c984154
JS
3574 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
3575 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
17c4dc6e 3576 dev_err(ctrl->ctrl.device,
4c984154
JS
3577 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
3578 goto fail_ctrl;
3579 }
17c4dc6e 3580
4c984154 3581 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
4c984154
JS
3582 dev_err(ctrl->ctrl.device,
3583 "NVME-FC{%d}: failed to schedule initial connect\n",
3584 ctrl->cnum);
3585 goto fail_ctrl;
e399441d
JS
3586 }
3587
4c984154 3588 flush_delayed_work(&ctrl->connect_work);
2cb657bc 3589
61bff8ef
JS
3590 dev_info(ctrl->ctrl.device,
3591 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
3592 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
e399441d 3593
61bff8ef 3594 return &ctrl->ctrl;
e399441d 3595
4c984154
JS
3596fail_ctrl:
3597 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
3598 cancel_work_sync(&ctrl->ctrl.reset_work);
4cff280a 3599 cancel_work_sync(&ctrl->err_work);
4c984154
JS
3600 cancel_delayed_work_sync(&ctrl->connect_work);
3601
3602 ctrl->ctrl.opts = NULL;
3603
3604 /* initiate nvme ctrl ref counting teardown */
3605 nvme_uninit_ctrl(&ctrl->ctrl);
3606
3607 /* Remove core ctrl ref. */
3608 nvme_put_ctrl(&ctrl->ctrl);
3609
3610 /* as we're past the point where we transition to the ref
3611 * counting teardown path, if we return a bad pointer here,
3612 * the calling routine, thinking it's prior to the
3613 * transition, will do an rport put. Since the teardown
3614 * path also does a rport put, we do an extra get here to
3615 * so proper order/teardown happens.
3616 */
3617 nvme_fc_rport_get(rport);
3618
3619 return ERR_PTR(-EIO);
3620
61bff8ef
JS
3621out_cleanup_admin_q:
3622 blk_cleanup_queue(ctrl->ctrl.admin_q);
e7832cb4
SG
3623out_cleanup_fabrics_q:
3624 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
61bff8ef
JS
3625out_free_admin_tag_set:
3626 blk_mq_free_tag_set(&ctrl->admin_tag_set);
3627out_free_queues:
3628 kfree(ctrl->queues);
e399441d 3629out_free_ida:
61bff8ef 3630 put_device(ctrl->dev);
e399441d
JS
3631 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
3632out_free_ctrl:
3633 kfree(ctrl);
3634out_fail:
e399441d
JS
3635 /* exit via here doesn't follow ctlr ref points */
3636 return ERR_PTR(ret);
3637}
3638
e399441d
JS
3639
3640struct nvmet_fc_traddr {
3641 u64 nn;
3642 u64 pn;
3643};
3644
e399441d 3645static int
9c5358e1 3646__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
e399441d 3647{
e399441d
JS
3648 u64 token64;
3649
9c5358e1
JS
3650 if (match_u64(sstr, &token64))
3651 return -EINVAL;
3652 *val = token64;
e399441d 3653
9c5358e1
JS
3654 return 0;
3655}
e399441d 3656
9c5358e1
JS
3657/*
3658 * This routine validates and extracts the WWN's from the TRADDR string.
3659 * As kernel parsers need the 0x to determine number base, universally
3660 * build string to parse with 0x prefix before parsing name strings.
3661 */
3662static int
3663nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
3664{
3665 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
3666 substring_t wwn = { name, &name[sizeof(name)-1] };
3667 int nnoffset, pnoffset;
3668
d4e4230c 3669 /* validate if string is one of the 2 allowed formats */
9c5358e1
JS
3670 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
3671 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
3672 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
3673 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
3674 nnoffset = NVME_FC_TRADDR_OXNNLEN;
3675 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
3676 NVME_FC_TRADDR_OXNNLEN;
3677 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
3678 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
3679 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
3680 "pn-", NVME_FC_TRADDR_NNLEN))) {
3681 nnoffset = NVME_FC_TRADDR_NNLEN;
3682 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
3683 } else
3684 goto out_einval;
e399441d 3685
9c5358e1
JS
3686 name[0] = '0';
3687 name[1] = 'x';
3688 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
3689
3690 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3691 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
3692 goto out_einval;
3693
3694 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3695 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
3696 goto out_einval;
3697
3698 return 0;
3699
3700out_einval:
3701 pr_warn("%s: bad traddr string\n", __func__);
3702 return -EINVAL;
e399441d
JS
3703}
3704
3705static struct nvme_ctrl *
3706nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
3707{
3708 struct nvme_fc_lport *lport;
3709 struct nvme_fc_rport *rport;
61bff8ef 3710 struct nvme_ctrl *ctrl;
e399441d
JS
3711 struct nvmet_fc_traddr laddr = { 0L, 0L };
3712 struct nvmet_fc_traddr raddr = { 0L, 0L };
3713 unsigned long flags;
3714 int ret;
3715
9c5358e1 3716 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
e399441d
JS
3717 if (ret || !raddr.nn || !raddr.pn)
3718 return ERR_PTR(-EINVAL);
3719
9c5358e1 3720 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
e399441d
JS
3721 if (ret || !laddr.nn || !laddr.pn)
3722 return ERR_PTR(-EINVAL);
3723
3724 /* find the host and remote ports to connect together */
3725 spin_lock_irqsave(&nvme_fc_lock, flags);
3726 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3727 if (lport->localport.node_name != laddr.nn ||
9e0e8dac
JS
3728 lport->localport.port_name != laddr.pn ||
3729 lport->localport.port_state != FC_OBJSTATE_ONLINE)
e399441d
JS
3730 continue;
3731
3732 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3733 if (rport->remoteport.node_name != raddr.nn ||
9e0e8dac
JS
3734 rport->remoteport.port_name != raddr.pn ||
3735 rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
e399441d
JS
3736 continue;
3737
3738 /* if fail to get reference fall through. Will error */
3739 if (!nvme_fc_rport_get(rport))
3740 break;
3741
3742 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3743
61bff8ef
JS
3744 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
3745 if (IS_ERR(ctrl))
3746 nvme_fc_rport_put(rport);
3747 return ctrl;
e399441d
JS
3748 }
3749 }
3750 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3751
4fb135ad
JT
3752 pr_warn("%s: %s - %s combination not found\n",
3753 __func__, opts->traddr, opts->host_traddr);
e399441d
JS
3754 return ERR_PTR(-ENOENT);
3755}
3756
3757
3758static struct nvmf_transport_ops nvme_fc_transport = {
3759 .name = "fc",
0de5cd36 3760 .module = THIS_MODULE,
e399441d 3761 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
5bbecdbc 3762 .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
e399441d
JS
3763 .create_ctrl = nvme_fc_create_ctrl,
3764};
3765
97faec53
JS
3766/* Arbitrary successive failures max. With lots of subsystems could be high */
3767#define DISCOVERY_MAX_FAIL 20
3768
3769static ssize_t nvme_fc_nvme_discovery_store(struct device *dev,
3770 struct device_attribute *attr, const char *buf, size_t count)
3771{
3772 unsigned long flags;
3773 LIST_HEAD(local_disc_list);
3774 struct nvme_fc_lport *lport;
3775 struct nvme_fc_rport *rport;
3776 int failcnt = 0;
3777
3778 spin_lock_irqsave(&nvme_fc_lock, flags);
3779restart:
3780 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3781 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3782 if (!nvme_fc_lport_get(lport))
3783 continue;
3784 if (!nvme_fc_rport_get(rport)) {
3785 /*
3786 * This is a temporary condition. Upon restart
3787 * this rport will be gone from the list.
3788 *
3789 * Revert the lport put and retry. Anything
3790 * added to the list already will be skipped (as
3791 * they are no longer list_empty). Loops should
3792 * resume at rports that were not yet seen.
3793 */
3794 nvme_fc_lport_put(lport);
3795
3796 if (failcnt++ < DISCOVERY_MAX_FAIL)
3797 goto restart;
3798
3799 pr_err("nvme_discovery: too many reference "
3800 "failures\n");
3801 goto process_local_list;
3802 }
3803 if (list_empty(&rport->disc_list))
3804 list_add_tail(&rport->disc_list,
3805 &local_disc_list);
3806 }
3807 }
3808
3809process_local_list:
3810 while (!list_empty(&local_disc_list)) {
3811 rport = list_first_entry(&local_disc_list,
3812 struct nvme_fc_rport, disc_list);
3813 list_del_init(&rport->disc_list);
3814 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3815
3816 lport = rport->lport;
3817 /* signal discovery. Won't hurt if it repeats */
3818 nvme_fc_signal_discovery_scan(lport, rport);
3819 nvme_fc_rport_put(rport);
3820 nvme_fc_lport_put(lport);
3821
3822 spin_lock_irqsave(&nvme_fc_lock, flags);
3823 }
3824 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3825
3826 return count;
3827}
3828static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
3829
3830static struct attribute *nvme_fc_attrs[] = {
3831 &dev_attr_nvme_discovery.attr,
3832 NULL
3833};
3834
3835static struct attribute_group nvme_fc_attr_group = {
3836 .attrs = nvme_fc_attrs,
3837};
3838
3839static const struct attribute_group *nvme_fc_attr_groups[] = {
3840 &nvme_fc_attr_group,
3841 NULL
3842};
3843
3844static struct class fc_class = {
3845 .name = "fc",
3846 .dev_groups = nvme_fc_attr_groups,
3847 .owner = THIS_MODULE,
3848};
3849
e399441d
JS
3850static int __init nvme_fc_init_module(void)
3851{
5f568556
JS
3852 int ret;
3853
8730c1dd
HR
3854 nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
3855 if (!nvme_fc_wq)
3856 return -ENOMEM;
3857
5f568556
JS
3858 /*
3859 * NOTE:
3860 * It is expected that in the future the kernel will combine
3861 * the FC-isms that are currently under scsi and now being
3862 * added to by NVME into a new standalone FC class. The SCSI
3863 * and NVME protocols and their devices would be under this
3864 * new FC class.
3865 *
3866 * As we need something to post FC-specific udev events to,
3867 * specifically for nvme probe events, start by creating the
3868 * new device class. When the new standalone FC class is
3869 * put in place, this code will move to a more generic
3870 * location for the class.
3871 */
97faec53
JS
3872 ret = class_register(&fc_class);
3873 if (ret) {
5f568556 3874 pr_err("couldn't register class fc\n");
8730c1dd 3875 goto out_destroy_wq;
5f568556
JS
3876 }
3877
3878 /*
3879 * Create a device for the FC-centric udev events
3880 */
97faec53 3881 fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL,
5f568556
JS
3882 "fc_udev_device");
3883 if (IS_ERR(fc_udev_device)) {
3884 pr_err("couldn't create fc_udev device!\n");
3885 ret = PTR_ERR(fc_udev_device);
3886 goto out_destroy_class;
3887 }
3888
3889 ret = nvmf_register_transport(&nvme_fc_transport);
3890 if (ret)
3891 goto out_destroy_device;
3892
3893 return 0;
3894
3895out_destroy_device:
97faec53 3896 device_destroy(&fc_class, MKDEV(0, 0));
5f568556 3897out_destroy_class:
97faec53 3898 class_unregister(&fc_class);
8730c1dd
HR
3899out_destroy_wq:
3900 destroy_workqueue(nvme_fc_wq);
3901
5f568556 3902 return ret;
e399441d
JS
3903}
3904
4c73cbdf
JS
3905static void
3906nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
3907{
3908 struct nvme_fc_ctrl *ctrl;
3909
3910 spin_lock(&rport->lock);
3911 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3912 dev_warn(ctrl->ctrl.device,
3913 "NVME-FC{%d}: transport unloading: deleting ctrl\n",
3914 ctrl->cnum);
3915 nvme_delete_ctrl(&ctrl->ctrl);
3916 }
3917 spin_unlock(&rport->lock);
3918}
3919
3920static void
3921nvme_fc_cleanup_for_unload(void)
3922{
3923 struct nvme_fc_lport *lport;
3924 struct nvme_fc_rport *rport;
3925
3926 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3927 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3928 nvme_fc_delete_controllers(rport);
3929 }
3930 }
3931}
3932
e399441d
JS
3933static void __exit nvme_fc_exit_module(void)
3934{
4c73cbdf
JS
3935 unsigned long flags;
3936 bool need_cleanup = false;
3937
3938 spin_lock_irqsave(&nvme_fc_lock, flags);
3939 nvme_fc_waiting_to_unload = true;
3940 if (!list_empty(&nvme_fc_lport_list)) {
3941 need_cleanup = true;
3942 nvme_fc_cleanup_for_unload();
3943 }
3944 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3945 if (need_cleanup) {
3946 pr_info("%s: waiting for ctlr deletes\n", __func__);
3947 wait_for_completion(&nvme_fc_unload_proceed);
3948 pr_info("%s: ctrl deletes complete\n", __func__);
3949 }
e399441d
JS
3950
3951 nvmf_unregister_transport(&nvme_fc_transport);
3952
e399441d
JS
3953 ida_destroy(&nvme_fc_local_port_cnt);
3954 ida_destroy(&nvme_fc_ctrl_cnt);
5f568556 3955
97faec53
JS
3956 device_destroy(&fc_class, MKDEV(0, 0));
3957 class_unregister(&fc_class);
8730c1dd 3958 destroy_workqueue(nvme_fc_wq);
e399441d
JS
3959}
3960
3961module_init(nvme_fc_init_module);
3962module_exit(nvme_fc_exit_module);
3963
3964MODULE_LICENSE("GPL v2");