]>
Commit | Line | Data |
---|---|---|
8638b246 | 1 | // SPDX-License-Identifier: GPL-2.0 |
e399441d JS |
2 | /* |
3 | * Copyright (c) 2016 Avago Technologies. All rights reserved. | |
e399441d JS |
4 | */ |
5 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
6 | #include <linux/module.h> | |
7 | #include <linux/parser.h> | |
8 | #include <uapi/scsi/fc/fc_fs.h> | |
9 | #include <uapi/scsi/fc/fc_els.h> | |
61bff8ef | 10 | #include <linux/delay.h> |
d3d0bc78 | 11 | #include <linux/overflow.h> |
e399441d JS |
12 | |
13 | #include "nvme.h" | |
14 | #include "fabrics.h" | |
15 | #include <linux/nvme-fc-driver.h> | |
16 | #include <linux/nvme-fc.h> | |
ca19bcd0 | 17 | #include "fc.h" |
a6a6d058 | 18 | #include <scsi/scsi_transport_fc.h> |
e399441d JS |
19 | |
20 | /* *************************** Data Structures/Defines ****************** */ | |
21 | ||
22 | ||
e399441d | 23 | enum nvme_fc_queue_flags { |
26c0a26d JA |
24 | NVME_FC_Q_CONNECTED = 0, |
25 | NVME_FC_Q_LIVE, | |
e399441d JS |
26 | }; |
27 | ||
ac7fe82b | 28 | #define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */ |
f673714a JS |
29 | #define NVME_FC_DEFAULT_RECONNECT_TMO 2 /* delay between reconnects |
30 | * when connected and a | |
31 | * connection failure. | |
32 | */ | |
ac7fe82b | 33 | |
e399441d JS |
34 | struct nvme_fc_queue { |
35 | struct nvme_fc_ctrl *ctrl; | |
36 | struct device *dev; | |
37 | struct blk_mq_hw_ctx *hctx; | |
38 | void *lldd_handle; | |
e399441d JS |
39 | size_t cmnd_capsule_len; |
40 | u32 qnum; | |
41 | u32 rqcnt; | |
42 | u32 seqno; | |
43 | ||
44 | u64 connection_id; | |
45 | atomic_t csn; | |
46 | ||
47 | unsigned long flags; | |
48 | } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ | |
49 | ||
8d64daf7 JS |
50 | enum nvme_fcop_flags { |
51 | FCOP_FLAGS_TERMIO = (1 << 0), | |
c3aedd22 | 52 | FCOP_FLAGS_AEN = (1 << 1), |
8d64daf7 JS |
53 | }; |
54 | ||
e399441d JS |
55 | struct nvmefc_ls_req_op { |
56 | struct nvmefc_ls_req ls_req; | |
57 | ||
c913a8b0 | 58 | struct nvme_fc_rport *rport; |
e399441d JS |
59 | struct nvme_fc_queue *queue; |
60 | struct request *rq; | |
8d64daf7 | 61 | u32 flags; |
e399441d JS |
62 | |
63 | int ls_error; | |
64 | struct completion ls_done; | |
c913a8b0 | 65 | struct list_head lsreq_list; /* rport->ls_req_list */ |
e399441d JS |
66 | bool req_queued; |
67 | }; | |
68 | ||
14fd1e98 JS |
69 | struct nvmefc_ls_rcv_op { |
70 | struct nvme_fc_rport *rport; | |
71 | struct nvmefc_ls_rsp *lsrsp; | |
72 | union nvmefc_ls_requests *rqstbuf; | |
73 | union nvmefc_ls_responses *rspbuf; | |
74 | u16 rqstdatalen; | |
75 | bool handled; | |
76 | dma_addr_t rspdma; | |
77 | struct list_head lsrcv_list; /* rport->ls_rcv_list */ | |
78 | } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ | |
79 | ||
e399441d JS |
80 | enum nvme_fcpop_state { |
81 | FCPOP_STATE_UNINIT = 0, | |
82 | FCPOP_STATE_IDLE = 1, | |
83 | FCPOP_STATE_ACTIVE = 2, | |
84 | FCPOP_STATE_ABORTED = 3, | |
78a7ac26 | 85 | FCPOP_STATE_COMPLETE = 4, |
e399441d JS |
86 | }; |
87 | ||
88 | struct nvme_fc_fcp_op { | |
89 | struct nvme_request nreq; /* | |
90 | * nvme/host/core.c | |
91 | * requires this to be | |
92 | * the 1st element in the | |
93 | * private structure | |
94 | * associated with the | |
95 | * request. | |
96 | */ | |
97 | struct nvmefc_fcp_req fcp_req; | |
98 | ||
99 | struct nvme_fc_ctrl *ctrl; | |
100 | struct nvme_fc_queue *queue; | |
101 | struct request *rq; | |
102 | ||
103 | atomic_t state; | |
78a7ac26 | 104 | u32 flags; |
e399441d JS |
105 | u32 rqno; |
106 | u32 nents; | |
107 | ||
108 | struct nvme_fc_cmd_iu cmd_iu; | |
109 | struct nvme_fc_ersp_iu rsp_iu; | |
110 | }; | |
111 | ||
d3d0bc78 BVA |
112 | struct nvme_fcp_op_w_sgl { |
113 | struct nvme_fc_fcp_op op; | |
b1ae1a23 | 114 | struct scatterlist sgl[NVME_INLINE_SG_CNT]; |
f1e71d75 | 115 | uint8_t priv[]; |
d3d0bc78 BVA |
116 | }; |
117 | ||
e399441d JS |
118 | struct nvme_fc_lport { |
119 | struct nvme_fc_local_port localport; | |
120 | ||
121 | struct ida endp_cnt; | |
122 | struct list_head port_list; /* nvme_fc_port_list */ | |
123 | struct list_head endp_list; | |
124 | struct device *dev; /* physical device for dma */ | |
125 | struct nvme_fc_port_template *ops; | |
126 | struct kref ref; | |
158bfb88 | 127 | atomic_t act_rport_cnt; |
e399441d JS |
128 | } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ |
129 | ||
130 | struct nvme_fc_rport { | |
131 | struct nvme_fc_remote_port remoteport; | |
132 | ||
133 | struct list_head endp_list; /* for lport->endp_list */ | |
134 | struct list_head ctrl_list; | |
c913a8b0 | 135 | struct list_head ls_req_list; |
14fd1e98 | 136 | struct list_head ls_rcv_list; |
97faec53 | 137 | struct list_head disc_list; |
c913a8b0 JS |
138 | struct device *dev; /* physical device for dma */ |
139 | struct nvme_fc_lport *lport; | |
e399441d JS |
140 | spinlock_t lock; |
141 | struct kref ref; | |
158bfb88 | 142 | atomic_t act_ctrl_cnt; |
2b632970 | 143 | unsigned long dev_loss_end; |
14fd1e98 | 144 | struct work_struct lsrcv_work; |
e399441d JS |
145 | } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ |
146 | ||
eb4ee8f1 JS |
147 | /* fc_ctrl flags values - specified as bit positions */ |
148 | #define ASSOC_ACTIVE 0 | |
caf1cbe3 JS |
149 | #define ASSOC_FAILED 1 |
150 | #define FCCTRL_TERMIO 2 | |
e399441d JS |
151 | |
152 | struct nvme_fc_ctrl { | |
153 | spinlock_t lock; | |
154 | struct nvme_fc_queue *queues; | |
e399441d JS |
155 | struct device *dev; |
156 | struct nvme_fc_lport *lport; | |
157 | struct nvme_fc_rport *rport; | |
158 | u32 cnum; | |
159 | ||
4c984154 | 160 | bool ioq_live; |
e399441d | 161 | u64 association_id; |
14fd1e98 | 162 | struct nvmefc_ls_rcv_op *rcv_disconn; |
e399441d | 163 | |
e399441d | 164 | struct list_head ctrl_list; /* rport->ctrl_list */ |
e399441d JS |
165 | |
166 | struct blk_mq_tag_set admin_tag_set; | |
167 | struct blk_mq_tag_set tag_set; | |
168 | ||
19fce047 | 169 | struct work_struct ioerr_work; |
61bff8ef | 170 | struct delayed_work connect_work; |
61bff8ef | 171 | |
e399441d | 172 | struct kref ref; |
eb4ee8f1 | 173 | unsigned long flags; |
61bff8ef | 174 | u32 iocnt; |
36715cf4 | 175 | wait_queue_head_t ioabort_wait; |
e399441d | 176 | |
38dabe21 | 177 | struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS]; |
e399441d JS |
178 | |
179 | struct nvme_ctrl ctrl; | |
180 | }; | |
181 | ||
182 | static inline struct nvme_fc_ctrl * | |
183 | to_fc_ctrl(struct nvme_ctrl *ctrl) | |
184 | { | |
185 | return container_of(ctrl, struct nvme_fc_ctrl, ctrl); | |
186 | } | |
187 | ||
188 | static inline struct nvme_fc_lport * | |
189 | localport_to_lport(struct nvme_fc_local_port *portptr) | |
190 | { | |
191 | return container_of(portptr, struct nvme_fc_lport, localport); | |
192 | } | |
193 | ||
194 | static inline struct nvme_fc_rport * | |
195 | remoteport_to_rport(struct nvme_fc_remote_port *portptr) | |
196 | { | |
197 | return container_of(portptr, struct nvme_fc_rport, remoteport); | |
198 | } | |
199 | ||
200 | static inline struct nvmefc_ls_req_op * | |
201 | ls_req_to_lsop(struct nvmefc_ls_req *lsreq) | |
202 | { | |
203 | return container_of(lsreq, struct nvmefc_ls_req_op, ls_req); | |
204 | } | |
205 | ||
206 | static inline struct nvme_fc_fcp_op * | |
207 | fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq) | |
208 | { | |
209 | return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req); | |
210 | } | |
211 | ||
212 | ||
213 | ||
214 | /* *************************** Globals **************************** */ | |
215 | ||
216 | ||
217 | static DEFINE_SPINLOCK(nvme_fc_lock); | |
218 | ||
219 | static LIST_HEAD(nvme_fc_lport_list); | |
220 | static DEFINE_IDA(nvme_fc_local_port_cnt); | |
221 | static DEFINE_IDA(nvme_fc_ctrl_cnt); | |
222 | ||
8730c1dd | 223 | static struct workqueue_struct *nvme_fc_wq; |
e399441d | 224 | |
4c73cbdf JS |
225 | static bool nvme_fc_waiting_to_unload; |
226 | static DECLARE_COMPLETION(nvme_fc_unload_proceed); | |
227 | ||
5f568556 JS |
228 | /* |
229 | * These items are short-term. They will eventually be moved into | |
230 | * a generic FC class. See comments in module init. | |
231 | */ | |
5f568556 JS |
232 | static struct device *fc_udev_device; |
233 | ||
ff029451 | 234 | static void nvme_fc_complete_rq(struct request *rq); |
e399441d JS |
235 | |
236 | /* *********************** FC-NVME Port Management ************************ */ | |
237 | ||
e399441d JS |
238 | static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *, |
239 | struct nvme_fc_queue *, unsigned int); | |
240 | ||
14fd1e98 JS |
241 | static void nvme_fc_handle_ls_rqst_work(struct work_struct *work); |
242 | ||
243 | ||
5533d424 JS |
244 | static void |
245 | nvme_fc_free_lport(struct kref *ref) | |
246 | { | |
247 | struct nvme_fc_lport *lport = | |
248 | container_of(ref, struct nvme_fc_lport, ref); | |
249 | unsigned long flags; | |
250 | ||
251 | WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED); | |
252 | WARN_ON(!list_empty(&lport->endp_list)); | |
253 | ||
254 | /* remove from transport list */ | |
255 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
256 | list_del(&lport->port_list); | |
4c73cbdf JS |
257 | if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list)) |
258 | complete(&nvme_fc_unload_proceed); | |
5533d424 JS |
259 | spin_unlock_irqrestore(&nvme_fc_lock, flags); |
260 | ||
5533d424 JS |
261 | ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num); |
262 | ida_destroy(&lport->endp_cnt); | |
263 | ||
264 | put_device(lport->dev); | |
265 | ||
266 | kfree(lport); | |
267 | } | |
268 | ||
269 | static void | |
270 | nvme_fc_lport_put(struct nvme_fc_lport *lport) | |
271 | { | |
272 | kref_put(&lport->ref, nvme_fc_free_lport); | |
273 | } | |
274 | ||
275 | static int | |
276 | nvme_fc_lport_get(struct nvme_fc_lport *lport) | |
277 | { | |
278 | return kref_get_unless_zero(&lport->ref); | |
279 | } | |
280 | ||
281 | ||
282 | static struct nvme_fc_lport * | |
c5760f30 JS |
283 | nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo, |
284 | struct nvme_fc_port_template *ops, | |
285 | struct device *dev) | |
5533d424 JS |
286 | { |
287 | struct nvme_fc_lport *lport; | |
288 | unsigned long flags; | |
289 | ||
290 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
291 | ||
292 | list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { | |
293 | if (lport->localport.node_name != pinfo->node_name || | |
294 | lport->localport.port_name != pinfo->port_name) | |
295 | continue; | |
296 | ||
c5760f30 JS |
297 | if (lport->dev != dev) { |
298 | lport = ERR_PTR(-EXDEV); | |
299 | goto out_done; | |
300 | } | |
301 | ||
5533d424 JS |
302 | if (lport->localport.port_state != FC_OBJSTATE_DELETED) { |
303 | lport = ERR_PTR(-EEXIST); | |
304 | goto out_done; | |
305 | } | |
306 | ||
307 | if (!nvme_fc_lport_get(lport)) { | |
308 | /* | |
309 | * fails if ref cnt already 0. If so, | |
310 | * act as if lport already deleted | |
311 | */ | |
312 | lport = NULL; | |
313 | goto out_done; | |
314 | } | |
315 | ||
316 | /* resume the lport */ | |
317 | ||
c5760f30 | 318 | lport->ops = ops; |
5533d424 JS |
319 | lport->localport.port_role = pinfo->port_role; |
320 | lport->localport.port_id = pinfo->port_id; | |
321 | lport->localport.port_state = FC_OBJSTATE_ONLINE; | |
322 | ||
323 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
324 | ||
325 | return lport; | |
326 | } | |
327 | ||
328 | lport = NULL; | |
329 | ||
330 | out_done: | |
331 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
332 | ||
333 | return lport; | |
334 | } | |
e399441d JS |
335 | |
336 | /** | |
337 | * nvme_fc_register_localport - transport entry point called by an | |
338 | * LLDD to register the existence of a NVME | |
339 | * host FC port. | |
340 | * @pinfo: pointer to information about the port to be registered | |
341 | * @template: LLDD entrypoints and operational parameters for the port | |
342 | * @dev: physical hardware device node port corresponds to. Will be | |
343 | * used for DMA mappings | |
76c910c7 | 344 | * @portptr: pointer to a local port pointer. Upon success, the routine |
e399441d JS |
345 | * will allocate a nvme_fc_local_port structure and place its |
346 | * address in the local port pointer. Upon failure, local port | |
347 | * pointer will be set to 0. | |
348 | * | |
349 | * Returns: | |
350 | * a completion status. Must be 0 upon success; a negative errno | |
351 | * (ex: -ENXIO) upon failure. | |
352 | */ | |
353 | int | |
354 | nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, | |
355 | struct nvme_fc_port_template *template, | |
356 | struct device *dev, | |
357 | struct nvme_fc_local_port **portptr) | |
358 | { | |
359 | struct nvme_fc_lport *newrec; | |
360 | unsigned long flags; | |
361 | int ret, idx; | |
362 | ||
363 | if (!template->localport_delete || !template->remoteport_delete || | |
364 | !template->ls_req || !template->fcp_io || | |
365 | !template->ls_abort || !template->fcp_abort || | |
366 | !template->max_hw_queues || !template->max_sgl_segments || | |
8c5c6605 | 367 | !template->max_dif_sgl_segments || !template->dma_boundary) { |
e399441d JS |
368 | ret = -EINVAL; |
369 | goto out_reghost_failed; | |
370 | } | |
371 | ||
5533d424 JS |
372 | /* |
373 | * look to see if there is already a localport that had been | |
374 | * deregistered and in the process of waiting for all the | |
375 | * references to fully be removed. If the references haven't | |
376 | * expired, we can simply re-enable the localport. Remoteports | |
377 | * and controller reconnections should resume naturally. | |
378 | */ | |
c5760f30 | 379 | newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev); |
5533d424 JS |
380 | |
381 | /* found an lport, but something about its state is bad */ | |
382 | if (IS_ERR(newrec)) { | |
383 | ret = PTR_ERR(newrec); | |
384 | goto out_reghost_failed; | |
385 | ||
386 | /* found existing lport, which was resumed */ | |
387 | } else if (newrec) { | |
388 | *portptr = &newrec->localport; | |
389 | return 0; | |
390 | } | |
391 | ||
392 | /* nothing found - allocate a new localport struct */ | |
393 | ||
e399441d JS |
394 | newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz), |
395 | GFP_KERNEL); | |
396 | if (!newrec) { | |
397 | ret = -ENOMEM; | |
398 | goto out_reghost_failed; | |
399 | } | |
400 | ||
401 | idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL); | |
402 | if (idx < 0) { | |
403 | ret = -ENOSPC; | |
404 | goto out_fail_kfree; | |
405 | } | |
406 | ||
407 | if (!get_device(dev) && dev) { | |
408 | ret = -ENODEV; | |
409 | goto out_ida_put; | |
410 | } | |
411 | ||
412 | INIT_LIST_HEAD(&newrec->port_list); | |
413 | INIT_LIST_HEAD(&newrec->endp_list); | |
414 | kref_init(&newrec->ref); | |
158bfb88 | 415 | atomic_set(&newrec->act_rport_cnt, 0); |
e399441d JS |
416 | newrec->ops = template; |
417 | newrec->dev = dev; | |
418 | ida_init(&newrec->endp_cnt); | |
f56bf76f JS |
419 | if (template->local_priv_sz) |
420 | newrec->localport.private = &newrec[1]; | |
421 | else | |
422 | newrec->localport.private = NULL; | |
e399441d JS |
423 | newrec->localport.node_name = pinfo->node_name; |
424 | newrec->localport.port_name = pinfo->port_name; | |
425 | newrec->localport.port_role = pinfo->port_role; | |
426 | newrec->localport.port_id = pinfo->port_id; | |
427 | newrec->localport.port_state = FC_OBJSTATE_ONLINE; | |
428 | newrec->localport.port_num = idx; | |
429 | ||
430 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
431 | list_add_tail(&newrec->port_list, &nvme_fc_lport_list); | |
432 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
433 | ||
434 | if (dev) | |
435 | dma_set_seg_boundary(dev, template->dma_boundary); | |
436 | ||
437 | *portptr = &newrec->localport; | |
438 | return 0; | |
439 | ||
440 | out_ida_put: | |
441 | ida_simple_remove(&nvme_fc_local_port_cnt, idx); | |
442 | out_fail_kfree: | |
443 | kfree(newrec); | |
444 | out_reghost_failed: | |
445 | *portptr = NULL; | |
446 | ||
447 | return ret; | |
448 | } | |
449 | EXPORT_SYMBOL_GPL(nvme_fc_register_localport); | |
450 | ||
e399441d JS |
451 | /** |
452 | * nvme_fc_unregister_localport - transport entry point called by an | |
453 | * LLDD to deregister/remove a previously | |
454 | * registered a NVME host FC port. | |
76c910c7 | 455 | * @portptr: pointer to the (registered) local port that is to be deregistered. |
e399441d JS |
456 | * |
457 | * Returns: | |
458 | * a completion status. Must be 0 upon success; a negative errno | |
459 | * (ex: -ENXIO) upon failure. | |
460 | */ | |
461 | int | |
462 | nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr) | |
463 | { | |
464 | struct nvme_fc_lport *lport = localport_to_lport(portptr); | |
465 | unsigned long flags; | |
466 | ||
467 | if (!portptr) | |
468 | return -EINVAL; | |
469 | ||
470 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
471 | ||
472 | if (portptr->port_state != FC_OBJSTATE_ONLINE) { | |
473 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
474 | return -EINVAL; | |
475 | } | |
476 | portptr->port_state = FC_OBJSTATE_DELETED; | |
477 | ||
478 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
479 | ||
158bfb88 JS |
480 | if (atomic_read(&lport->act_rport_cnt) == 0) |
481 | lport->ops->localport_delete(&lport->localport); | |
482 | ||
e399441d JS |
483 | nvme_fc_lport_put(lport); |
484 | ||
485 | return 0; | |
486 | } | |
487 | EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport); | |
488 | ||
eaefd5ab JS |
489 | /* |
490 | * TRADDR strings, per FC-NVME are fixed format: | |
491 | * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters | |
492 | * udev event will only differ by prefix of what field is | |
493 | * being specified: | |
494 | * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters | |
495 | * 19 + 43 + null_fudge = 64 characters | |
496 | */ | |
497 | #define FCNVME_TRADDR_LENGTH 64 | |
498 | ||
499 | static void | |
500 | nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport, | |
501 | struct nvme_fc_rport *rport) | |
502 | { | |
503 | char hostaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_HOST_TRADDR=...*/ | |
504 | char tgtaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_TRADDR=...*/ | |
505 | char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL }; | |
506 | ||
507 | if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY)) | |
508 | return; | |
509 | ||
510 | snprintf(hostaddr, sizeof(hostaddr), | |
511 | "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx", | |
512 | lport->localport.node_name, lport->localport.port_name); | |
513 | snprintf(tgtaddr, sizeof(tgtaddr), | |
514 | "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx", | |
515 | rport->remoteport.node_name, rport->remoteport.port_name); | |
516 | kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp); | |
517 | } | |
518 | ||
469d0ef0 JS |
519 | static void |
520 | nvme_fc_free_rport(struct kref *ref) | |
521 | { | |
522 | struct nvme_fc_rport *rport = | |
523 | container_of(ref, struct nvme_fc_rport, ref); | |
524 | struct nvme_fc_lport *lport = | |
525 | localport_to_lport(rport->remoteport.localport); | |
526 | unsigned long flags; | |
527 | ||
528 | WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED); | |
529 | WARN_ON(!list_empty(&rport->ctrl_list)); | |
530 | ||
531 | /* remove from lport list */ | |
532 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
533 | list_del(&rport->endp_list); | |
534 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
535 | ||
97faec53 | 536 | WARN_ON(!list_empty(&rport->disc_list)); |
469d0ef0 JS |
537 | ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num); |
538 | ||
539 | kfree(rport); | |
540 | ||
541 | nvme_fc_lport_put(lport); | |
542 | } | |
543 | ||
544 | static void | |
545 | nvme_fc_rport_put(struct nvme_fc_rport *rport) | |
546 | { | |
547 | kref_put(&rport->ref, nvme_fc_free_rport); | |
548 | } | |
549 | ||
550 | static int | |
551 | nvme_fc_rport_get(struct nvme_fc_rport *rport) | |
552 | { | |
553 | return kref_get_unless_zero(&rport->ref); | |
554 | } | |
555 | ||
2b632970 JS |
556 | static void |
557 | nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl) | |
558 | { | |
559 | switch (ctrl->ctrl.state) { | |
560 | case NVME_CTRL_NEW: | |
ad6a0a52 | 561 | case NVME_CTRL_CONNECTING: |
2b632970 JS |
562 | /* |
563 | * As all reconnects were suppressed, schedule a | |
564 | * connect. | |
565 | */ | |
566 | dev_info(ctrl->ctrl.device, | |
567 | "NVME-FC{%d}: connectivity re-established. " | |
568 | "Attempting reconnect\n", ctrl->cnum); | |
569 | ||
570 | queue_delayed_work(nvme_wq, &ctrl->connect_work, 0); | |
571 | break; | |
572 | ||
573 | case NVME_CTRL_RESETTING: | |
574 | /* | |
575 | * Controller is already in the process of terminating the | |
576 | * association. No need to do anything further. The reconnect | |
577 | * step will naturally occur after the reset completes. | |
578 | */ | |
579 | break; | |
580 | ||
581 | default: | |
582 | /* no action to take - let it delete */ | |
583 | break; | |
584 | } | |
585 | } | |
586 | ||
587 | static struct nvme_fc_rport * | |
588 | nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport, | |
589 | struct nvme_fc_port_info *pinfo) | |
590 | { | |
591 | struct nvme_fc_rport *rport; | |
592 | struct nvme_fc_ctrl *ctrl; | |
593 | unsigned long flags; | |
594 | ||
595 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
596 | ||
597 | list_for_each_entry(rport, &lport->endp_list, endp_list) { | |
598 | if (rport->remoteport.node_name != pinfo->node_name || | |
599 | rport->remoteport.port_name != pinfo->port_name) | |
600 | continue; | |
601 | ||
602 | if (!nvme_fc_rport_get(rport)) { | |
603 | rport = ERR_PTR(-ENOLCK); | |
604 | goto out_done; | |
605 | } | |
606 | ||
607 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
608 | ||
609 | spin_lock_irqsave(&rport->lock, flags); | |
610 | ||
611 | /* has it been unregistered */ | |
612 | if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) { | |
613 | /* means lldd called us twice */ | |
614 | spin_unlock_irqrestore(&rport->lock, flags); | |
615 | nvme_fc_rport_put(rport); | |
616 | return ERR_PTR(-ESTALE); | |
617 | } | |
618 | ||
0cdd5fca JS |
619 | rport->remoteport.port_role = pinfo->port_role; |
620 | rport->remoteport.port_id = pinfo->port_id; | |
2b632970 JS |
621 | rport->remoteport.port_state = FC_OBJSTATE_ONLINE; |
622 | rport->dev_loss_end = 0; | |
623 | ||
624 | /* | |
625 | * kick off a reconnect attempt on all associations to the | |
626 | * remote port. A successful reconnects will resume i/o. | |
627 | */ | |
628 | list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) | |
629 | nvme_fc_resume_controller(ctrl); | |
630 | ||
631 | spin_unlock_irqrestore(&rport->lock, flags); | |
632 | ||
633 | return rport; | |
634 | } | |
635 | ||
636 | rport = NULL; | |
637 | ||
638 | out_done: | |
639 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
640 | ||
641 | return rport; | |
642 | } | |
643 | ||
644 | static inline void | |
645 | __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport, | |
646 | struct nvme_fc_port_info *pinfo) | |
647 | { | |
648 | if (pinfo->dev_loss_tmo) | |
649 | rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo; | |
650 | else | |
651 | rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO; | |
652 | } | |
653 | ||
e399441d JS |
654 | /** |
655 | * nvme_fc_register_remoteport - transport entry point called by an | |
656 | * LLDD to register the existence of a NVME | |
657 | * subsystem FC port on its fabric. | |
658 | * @localport: pointer to the (registered) local port that the remote | |
659 | * subsystem port is connected to. | |
660 | * @pinfo: pointer to information about the port to be registered | |
76c910c7 | 661 | * @portptr: pointer to a remote port pointer. Upon success, the routine |
e399441d JS |
662 | * will allocate a nvme_fc_remote_port structure and place its |
663 | * address in the remote port pointer. Upon failure, remote port | |
664 | * pointer will be set to 0. | |
665 | * | |
666 | * Returns: | |
667 | * a completion status. Must be 0 upon success; a negative errno | |
668 | * (ex: -ENXIO) upon failure. | |
669 | */ | |
670 | int | |
671 | nvme_fc_register_remoteport(struct nvme_fc_local_port *localport, | |
672 | struct nvme_fc_port_info *pinfo, | |
673 | struct nvme_fc_remote_port **portptr) | |
674 | { | |
675 | struct nvme_fc_lport *lport = localport_to_lport(localport); | |
676 | struct nvme_fc_rport *newrec; | |
677 | unsigned long flags; | |
678 | int ret, idx; | |
679 | ||
2b632970 JS |
680 | if (!nvme_fc_lport_get(lport)) { |
681 | ret = -ESHUTDOWN; | |
682 | goto out_reghost_failed; | |
683 | } | |
684 | ||
685 | /* | |
686 | * look to see if there is already a remoteport that is waiting | |
687 | * for a reconnect (within dev_loss_tmo) with the same WWN's. | |
688 | * If so, transition to it and reconnect. | |
689 | */ | |
690 | newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo); | |
691 | ||
692 | /* found an rport, but something about its state is bad */ | |
693 | if (IS_ERR(newrec)) { | |
694 | ret = PTR_ERR(newrec); | |
695 | goto out_lport_put; | |
696 | ||
697 | /* found existing rport, which was resumed */ | |
698 | } else if (newrec) { | |
699 | nvme_fc_lport_put(lport); | |
700 | __nvme_fc_set_dev_loss_tmo(newrec, pinfo); | |
701 | nvme_fc_signal_discovery_scan(lport, newrec); | |
702 | *portptr = &newrec->remoteport; | |
703 | return 0; | |
704 | } | |
705 | ||
706 | /* nothing found - allocate a new remoteport struct */ | |
707 | ||
e399441d JS |
708 | newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz), |
709 | GFP_KERNEL); | |
710 | if (!newrec) { | |
711 | ret = -ENOMEM; | |
2b632970 | 712 | goto out_lport_put; |
e399441d JS |
713 | } |
714 | ||
715 | idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL); | |
716 | if (idx < 0) { | |
717 | ret = -ENOSPC; | |
2b632970 | 718 | goto out_kfree_rport; |
e399441d JS |
719 | } |
720 | ||
721 | INIT_LIST_HEAD(&newrec->endp_list); | |
722 | INIT_LIST_HEAD(&newrec->ctrl_list); | |
c913a8b0 | 723 | INIT_LIST_HEAD(&newrec->ls_req_list); |
97faec53 | 724 | INIT_LIST_HEAD(&newrec->disc_list); |
e399441d | 725 | kref_init(&newrec->ref); |
158bfb88 | 726 | atomic_set(&newrec->act_ctrl_cnt, 0); |
e399441d JS |
727 | spin_lock_init(&newrec->lock); |
728 | newrec->remoteport.localport = &lport->localport; | |
14fd1e98 | 729 | INIT_LIST_HEAD(&newrec->ls_rcv_list); |
c913a8b0 JS |
730 | newrec->dev = lport->dev; |
731 | newrec->lport = lport; | |
f56bf76f JS |
732 | if (lport->ops->remote_priv_sz) |
733 | newrec->remoteport.private = &newrec[1]; | |
734 | else | |
735 | newrec->remoteport.private = NULL; | |
e399441d JS |
736 | newrec->remoteport.port_role = pinfo->port_role; |
737 | newrec->remoteport.node_name = pinfo->node_name; | |
738 | newrec->remoteport.port_name = pinfo->port_name; | |
739 | newrec->remoteport.port_id = pinfo->port_id; | |
740 | newrec->remoteport.port_state = FC_OBJSTATE_ONLINE; | |
741 | newrec->remoteport.port_num = idx; | |
2b632970 | 742 | __nvme_fc_set_dev_loss_tmo(newrec, pinfo); |
14fd1e98 | 743 | INIT_WORK(&newrec->lsrcv_work, nvme_fc_handle_ls_rqst_work); |
e399441d JS |
744 | |
745 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
746 | list_add_tail(&newrec->endp_list, &lport->endp_list); | |
747 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
748 | ||
eaefd5ab JS |
749 | nvme_fc_signal_discovery_scan(lport, newrec); |
750 | ||
e399441d JS |
751 | *portptr = &newrec->remoteport; |
752 | return 0; | |
753 | ||
e399441d JS |
754 | out_kfree_rport: |
755 | kfree(newrec); | |
2b632970 JS |
756 | out_lport_put: |
757 | nvme_fc_lport_put(lport); | |
e399441d JS |
758 | out_reghost_failed: |
759 | *portptr = NULL; | |
760 | return ret; | |
e399441d JS |
761 | } |
762 | EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport); | |
763 | ||
8d64daf7 JS |
764 | static int |
765 | nvme_fc_abort_lsops(struct nvme_fc_rport *rport) | |
766 | { | |
767 | struct nvmefc_ls_req_op *lsop; | |
768 | unsigned long flags; | |
769 | ||
770 | restart: | |
771 | spin_lock_irqsave(&rport->lock, flags); | |
772 | ||
773 | list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) { | |
774 | if (!(lsop->flags & FCOP_FLAGS_TERMIO)) { | |
775 | lsop->flags |= FCOP_FLAGS_TERMIO; | |
776 | spin_unlock_irqrestore(&rport->lock, flags); | |
777 | rport->lport->ops->ls_abort(&rport->lport->localport, | |
778 | &rport->remoteport, | |
779 | &lsop->ls_req); | |
780 | goto restart; | |
781 | } | |
782 | } | |
783 | spin_unlock_irqrestore(&rport->lock, flags); | |
784 | ||
785 | return 0; | |
786 | } | |
787 | ||
2b632970 JS |
788 | static void |
789 | nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl) | |
790 | { | |
791 | dev_info(ctrl->ctrl.device, | |
792 | "NVME-FC{%d}: controller connectivity lost. Awaiting " | |
793 | "Reconnect", ctrl->cnum); | |
794 | ||
795 | switch (ctrl->ctrl.state) { | |
796 | case NVME_CTRL_NEW: | |
797 | case NVME_CTRL_LIVE: | |
798 | /* | |
799 | * Schedule a controller reset. The reset will terminate the | |
800 | * association and schedule the reconnect timer. Reconnects | |
801 | * will be attempted until either the ctlr_loss_tmo | |
802 | * (max_retries * connect_delay) expires or the remoteport's | |
803 | * dev_loss_tmo expires. | |
804 | */ | |
805 | if (nvme_reset_ctrl(&ctrl->ctrl)) { | |
806 | dev_warn(ctrl->ctrl.device, | |
77d0612d | 807 | "NVME-FC{%d}: Couldn't schedule reset.\n", |
2b632970 JS |
808 | ctrl->cnum); |
809 | nvme_delete_ctrl(&ctrl->ctrl); | |
810 | } | |
811 | break; | |
812 | ||
ad6a0a52 | 813 | case NVME_CTRL_CONNECTING: |
2b632970 JS |
814 | /* |
815 | * The association has already been terminated and the | |
816 | * controller is attempting reconnects. No need to do anything | |
817 | * futher. Reconnects will be attempted until either the | |
818 | * ctlr_loss_tmo (max_retries * connect_delay) expires or the | |
819 | * remoteport's dev_loss_tmo expires. | |
820 | */ | |
821 | break; | |
822 | ||
823 | case NVME_CTRL_RESETTING: | |
824 | /* | |
825 | * Controller is already in the process of terminating the | |
826 | * association. No need to do anything further. The reconnect | |
827 | * step will kick in naturally after the association is | |
828 | * terminated. | |
829 | */ | |
830 | break; | |
831 | ||
832 | case NVME_CTRL_DELETING: | |
ecca390e | 833 | case NVME_CTRL_DELETING_NOIO: |
2b632970 JS |
834 | default: |
835 | /* no action to take - let it delete */ | |
836 | break; | |
837 | } | |
838 | } | |
839 | ||
e399441d JS |
840 | /** |
841 | * nvme_fc_unregister_remoteport - transport entry point called by an | |
842 | * LLDD to deregister/remove a previously | |
843 | * registered a NVME subsystem FC port. | |
76c910c7 BVA |
844 | * @portptr: pointer to the (registered) remote port that is to be |
845 | * deregistered. | |
e399441d JS |
846 | * |
847 | * Returns: | |
848 | * a completion status. Must be 0 upon success; a negative errno | |
849 | * (ex: -ENXIO) upon failure. | |
850 | */ | |
851 | int | |
852 | nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr) | |
853 | { | |
854 | struct nvme_fc_rport *rport = remoteport_to_rport(portptr); | |
855 | struct nvme_fc_ctrl *ctrl; | |
856 | unsigned long flags; | |
857 | ||
858 | if (!portptr) | |
859 | return -EINVAL; | |
860 | ||
861 | spin_lock_irqsave(&rport->lock, flags); | |
862 | ||
863 | if (portptr->port_state != FC_OBJSTATE_ONLINE) { | |
864 | spin_unlock_irqrestore(&rport->lock, flags); | |
865 | return -EINVAL; | |
866 | } | |
867 | portptr->port_state = FC_OBJSTATE_DELETED; | |
868 | ||
2b632970 JS |
869 | rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ); |
870 | ||
871 | list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { | |
872 | /* if dev_loss_tmo==0, dev loss is immediate */ | |
873 | if (!portptr->dev_loss_tmo) { | |
874 | dev_warn(ctrl->ctrl.device, | |
77d0612d | 875 | "NVME-FC{%d}: controller connectivity lost.\n", |
2b632970 JS |
876 | ctrl->cnum); |
877 | nvme_delete_ctrl(&ctrl->ctrl); | |
878 | } else | |
879 | nvme_fc_ctrl_connectivity_loss(ctrl); | |
880 | } | |
e399441d JS |
881 | |
882 | spin_unlock_irqrestore(&rport->lock, flags); | |
883 | ||
8d64daf7 JS |
884 | nvme_fc_abort_lsops(rport); |
885 | ||
158bfb88 JS |
886 | if (atomic_read(&rport->act_ctrl_cnt) == 0) |
887 | rport->lport->ops->remoteport_delete(portptr); | |
888 | ||
2b632970 JS |
889 | /* |
890 | * release the reference, which will allow, if all controllers | |
891 | * go away, which should only occur after dev_loss_tmo occurs, | |
892 | * for the rport to be torn down. | |
893 | */ | |
e399441d | 894 | nvme_fc_rport_put(rport); |
2b632970 | 895 | |
e399441d JS |
896 | return 0; |
897 | } | |
898 | EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport); | |
899 | ||
eaefd5ab JS |
900 | /** |
901 | * nvme_fc_rescan_remoteport - transport entry point called by an | |
902 | * LLDD to request a nvme device rescan. | |
903 | * @remoteport: pointer to the (registered) remote port that is to be | |
904 | * rescanned. | |
905 | * | |
906 | * Returns: N/A | |
907 | */ | |
908 | void | |
909 | nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport) | |
910 | { | |
911 | struct nvme_fc_rport *rport = remoteport_to_rport(remoteport); | |
912 | ||
913 | nvme_fc_signal_discovery_scan(rport->lport, rport); | |
914 | } | |
915 | EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport); | |
916 | ||
ac7fe82b JS |
917 | int |
918 | nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr, | |
919 | u32 dev_loss_tmo) | |
920 | { | |
921 | struct nvme_fc_rport *rport = remoteport_to_rport(portptr); | |
ac7fe82b JS |
922 | unsigned long flags; |
923 | ||
924 | spin_lock_irqsave(&rport->lock, flags); | |
925 | ||
926 | if (portptr->port_state != FC_OBJSTATE_ONLINE) { | |
927 | spin_unlock_irqrestore(&rport->lock, flags); | |
928 | return -EINVAL; | |
929 | } | |
930 | ||
931 | /* a dev_loss_tmo of 0 (immediate) is allowed to be set */ | |
932 | rport->remoteport.dev_loss_tmo = dev_loss_tmo; | |
933 | ||
934 | spin_unlock_irqrestore(&rport->lock, flags); | |
935 | ||
936 | return 0; | |
937 | } | |
938 | EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss); | |
939 | ||
e399441d JS |
940 | |
941 | /* *********************** FC-NVME DMA Handling **************************** */ | |
942 | ||
943 | /* | |
944 | * The fcloop device passes in a NULL device pointer. Real LLD's will | |
945 | * pass in a valid device pointer. If NULL is passed to the dma mapping | |
946 | * routines, depending on the platform, it may or may not succeed, and | |
947 | * may crash. | |
948 | * | |
949 | * As such: | |
950 | * Wrapper all the dma routines and check the dev pointer. | |
951 | * | |
952 | * If simple mappings (return just a dma address, we'll noop them, | |
953 | * returning a dma address of 0. | |
954 | * | |
955 | * On more complex mappings (dma_map_sg), a pseudo routine fills | |
956 | * in the scatter list, setting all dma addresses to 0. | |
957 | */ | |
958 | ||
959 | static inline dma_addr_t | |
960 | fc_dma_map_single(struct device *dev, void *ptr, size_t size, | |
961 | enum dma_data_direction dir) | |
962 | { | |
963 | return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; | |
964 | } | |
965 | ||
966 | static inline int | |
967 | fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |
968 | { | |
969 | return dev ? dma_mapping_error(dev, dma_addr) : 0; | |
970 | } | |
971 | ||
972 | static inline void | |
973 | fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, | |
974 | enum dma_data_direction dir) | |
975 | { | |
976 | if (dev) | |
977 | dma_unmap_single(dev, addr, size, dir); | |
978 | } | |
979 | ||
980 | static inline void | |
981 | fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, | |
982 | enum dma_data_direction dir) | |
983 | { | |
984 | if (dev) | |
985 | dma_sync_single_for_cpu(dev, addr, size, dir); | |
986 | } | |
987 | ||
988 | static inline void | |
989 | fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, | |
990 | enum dma_data_direction dir) | |
991 | { | |
992 | if (dev) | |
993 | dma_sync_single_for_device(dev, addr, size, dir); | |
994 | } | |
995 | ||
996 | /* pseudo dma_map_sg call */ | |
997 | static int | |
998 | fc_map_sg(struct scatterlist *sg, int nents) | |
999 | { | |
1000 | struct scatterlist *s; | |
1001 | int i; | |
1002 | ||
1003 | WARN_ON(nents == 0 || sg[0].length == 0); | |
1004 | ||
1005 | for_each_sg(sg, s, nents, i) { | |
1006 | s->dma_address = 0L; | |
1007 | #ifdef CONFIG_NEED_SG_DMA_LENGTH | |
1008 | s->dma_length = s->length; | |
1009 | #endif | |
1010 | } | |
1011 | return nents; | |
1012 | } | |
1013 | ||
1014 | static inline int | |
1015 | fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
1016 | enum dma_data_direction dir) | |
1017 | { | |
1018 | return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); | |
1019 | } | |
1020 | ||
1021 | static inline void | |
1022 | fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |
1023 | enum dma_data_direction dir) | |
1024 | { | |
1025 | if (dev) | |
1026 | dma_unmap_sg(dev, sg, nents, dir); | |
1027 | } | |
1028 | ||
e399441d JS |
1029 | /* *********************** FC-NVME LS Handling **************************** */ |
1030 | ||
1031 | static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *); | |
1032 | static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *); | |
1033 | ||
14fd1e98 | 1034 | static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg); |
e399441d JS |
1035 | |
1036 | static void | |
c913a8b0 | 1037 | __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop) |
e399441d | 1038 | { |
c913a8b0 | 1039 | struct nvme_fc_rport *rport = lsop->rport; |
e399441d JS |
1040 | struct nvmefc_ls_req *lsreq = &lsop->ls_req; |
1041 | unsigned long flags; | |
1042 | ||
c913a8b0 | 1043 | spin_lock_irqsave(&rport->lock, flags); |
e399441d JS |
1044 | |
1045 | if (!lsop->req_queued) { | |
c913a8b0 | 1046 | spin_unlock_irqrestore(&rport->lock, flags); |
e399441d JS |
1047 | return; |
1048 | } | |
1049 | ||
1050 | list_del(&lsop->lsreq_list); | |
1051 | ||
1052 | lsop->req_queued = false; | |
1053 | ||
c913a8b0 | 1054 | spin_unlock_irqrestore(&rport->lock, flags); |
e399441d | 1055 | |
c913a8b0 | 1056 | fc_dma_unmap_single(rport->dev, lsreq->rqstdma, |
e399441d JS |
1057 | (lsreq->rqstlen + lsreq->rsplen), |
1058 | DMA_BIDIRECTIONAL); | |
1059 | ||
c913a8b0 | 1060 | nvme_fc_rport_put(rport); |
e399441d JS |
1061 | } |
1062 | ||
1063 | static int | |
c913a8b0 | 1064 | __nvme_fc_send_ls_req(struct nvme_fc_rport *rport, |
e399441d JS |
1065 | struct nvmefc_ls_req_op *lsop, |
1066 | void (*done)(struct nvmefc_ls_req *req, int status)) | |
1067 | { | |
1068 | struct nvmefc_ls_req *lsreq = &lsop->ls_req; | |
1069 | unsigned long flags; | |
c913a8b0 | 1070 | int ret = 0; |
e399441d | 1071 | |
c913a8b0 JS |
1072 | if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) |
1073 | return -ECONNREFUSED; | |
1074 | ||
1075 | if (!nvme_fc_rport_get(rport)) | |
e399441d JS |
1076 | return -ESHUTDOWN; |
1077 | ||
1078 | lsreq->done = done; | |
c913a8b0 | 1079 | lsop->rport = rport; |
e399441d JS |
1080 | lsop->req_queued = false; |
1081 | INIT_LIST_HEAD(&lsop->lsreq_list); | |
1082 | init_completion(&lsop->ls_done); | |
1083 | ||
c913a8b0 | 1084 | lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr, |
e399441d JS |
1085 | lsreq->rqstlen + lsreq->rsplen, |
1086 | DMA_BIDIRECTIONAL); | |
c913a8b0 JS |
1087 | if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) { |
1088 | ret = -EFAULT; | |
1089 | goto out_putrport; | |
e399441d JS |
1090 | } |
1091 | lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; | |
1092 | ||
c913a8b0 | 1093 | spin_lock_irqsave(&rport->lock, flags); |
e399441d | 1094 | |
c913a8b0 | 1095 | list_add_tail(&lsop->lsreq_list, &rport->ls_req_list); |
e399441d JS |
1096 | |
1097 | lsop->req_queued = true; | |
1098 | ||
c913a8b0 | 1099 | spin_unlock_irqrestore(&rport->lock, flags); |
e399441d | 1100 | |
c913a8b0 JS |
1101 | ret = rport->lport->ops->ls_req(&rport->lport->localport, |
1102 | &rport->remoteport, lsreq); | |
e399441d | 1103 | if (ret) |
c913a8b0 JS |
1104 | goto out_unlink; |
1105 | ||
1106 | return 0; | |
1107 | ||
1108 | out_unlink: | |
1109 | lsop->ls_error = ret; | |
1110 | spin_lock_irqsave(&rport->lock, flags); | |
1111 | lsop->req_queued = false; | |
1112 | list_del(&lsop->lsreq_list); | |
1113 | spin_unlock_irqrestore(&rport->lock, flags); | |
1114 | fc_dma_unmap_single(rport->dev, lsreq->rqstdma, | |
1115 | (lsreq->rqstlen + lsreq->rsplen), | |
1116 | DMA_BIDIRECTIONAL); | |
1117 | out_putrport: | |
1118 | nvme_fc_rport_put(rport); | |
e399441d JS |
1119 | |
1120 | return ret; | |
1121 | } | |
1122 | ||
1123 | static void | |
1124 | nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status) | |
1125 | { | |
1126 | struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq); | |
1127 | ||
1128 | lsop->ls_error = status; | |
1129 | complete(&lsop->ls_done); | |
1130 | } | |
1131 | ||
1132 | static int | |
c913a8b0 | 1133 | nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop) |
e399441d JS |
1134 | { |
1135 | struct nvmefc_ls_req *lsreq = &lsop->ls_req; | |
1136 | struct fcnvme_ls_rjt *rjt = lsreq->rspaddr; | |
1137 | int ret; | |
1138 | ||
c913a8b0 | 1139 | ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done); |
e399441d | 1140 | |
c913a8b0 | 1141 | if (!ret) { |
e399441d JS |
1142 | /* |
1143 | * No timeout/not interruptible as we need the struct | |
1144 | * to exist until the lldd calls us back. Thus mandate | |
1145 | * wait until driver calls back. lldd responsible for | |
1146 | * the timeout action | |
1147 | */ | |
1148 | wait_for_completion(&lsop->ls_done); | |
1149 | ||
c913a8b0 | 1150 | __nvme_fc_finish_ls_req(lsop); |
e399441d | 1151 | |
c913a8b0 | 1152 | ret = lsop->ls_error; |
e399441d JS |
1153 | } |
1154 | ||
c913a8b0 JS |
1155 | if (ret) |
1156 | return ret; | |
1157 | ||
e399441d JS |
1158 | /* ACC or RJT payload ? */ |
1159 | if (rjt->w0.ls_cmd == FCNVME_LS_RJT) | |
1160 | return -ENXIO; | |
1161 | ||
1162 | return 0; | |
1163 | } | |
1164 | ||
c913a8b0 JS |
1165 | static int |
1166 | nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport, | |
e399441d JS |
1167 | struct nvmefc_ls_req_op *lsop, |
1168 | void (*done)(struct nvmefc_ls_req *req, int status)) | |
1169 | { | |
e399441d JS |
1170 | /* don't wait for completion */ |
1171 | ||
c913a8b0 | 1172 | return __nvme_fc_send_ls_req(rport, lsop, done); |
e399441d JS |
1173 | } |
1174 | ||
e399441d JS |
1175 | static int |
1176 | nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl, | |
1177 | struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio) | |
1178 | { | |
1179 | struct nvmefc_ls_req_op *lsop; | |
1180 | struct nvmefc_ls_req *lsreq; | |
1181 | struct fcnvme_ls_cr_assoc_rqst *assoc_rqst; | |
1182 | struct fcnvme_ls_cr_assoc_acc *assoc_acc; | |
14fd1e98 | 1183 | unsigned long flags; |
e399441d JS |
1184 | int ret, fcret = 0; |
1185 | ||
1186 | lsop = kzalloc((sizeof(*lsop) + | |
f56bf76f JS |
1187 | sizeof(*assoc_rqst) + sizeof(*assoc_acc) + |
1188 | ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); | |
e399441d | 1189 | if (!lsop) { |
f56bf76f JS |
1190 | dev_info(ctrl->ctrl.device, |
1191 | "NVME-FC{%d}: send Create Association failed: ENOMEM\n", | |
1192 | ctrl->cnum); | |
e399441d JS |
1193 | ret = -ENOMEM; |
1194 | goto out_no_memory; | |
1195 | } | |
e399441d | 1196 | |
f56bf76f | 1197 | assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)&lsop[1]; |
e399441d | 1198 | assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1]; |
f56bf76f JS |
1199 | lsreq = &lsop->ls_req; |
1200 | if (ctrl->lport->ops->lsrqst_priv_sz) | |
1201 | lsreq->private = &assoc_acc[1]; | |
1202 | else | |
1203 | lsreq->private = NULL; | |
e399441d JS |
1204 | |
1205 | assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION; | |
1206 | assoc_rqst->desc_list_len = | |
1207 | cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); | |
1208 | ||
1209 | assoc_rqst->assoc_cmd.desc_tag = | |
1210 | cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD); | |
1211 | assoc_rqst->assoc_cmd.desc_len = | |
1212 | fcnvme_lsdesc_len( | |
1213 | sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); | |
1214 | ||
1215 | assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); | |
d157e534 | 1216 | assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1); |
e399441d JS |
1217 | /* Linux supports only Dynamic controllers */ |
1218 | assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); | |
8e412263 | 1219 | uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); |
e399441d JS |
1220 | strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn, |
1221 | min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE)); | |
1222 | strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn, | |
1223 | min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE)); | |
1224 | ||
1225 | lsop->queue = queue; | |
1226 | lsreq->rqstaddr = assoc_rqst; | |
1227 | lsreq->rqstlen = sizeof(*assoc_rqst); | |
1228 | lsreq->rspaddr = assoc_acc; | |
1229 | lsreq->rsplen = sizeof(*assoc_acc); | |
53b2b2f5 | 1230 | lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; |
e399441d | 1231 | |
c913a8b0 | 1232 | ret = nvme_fc_send_ls_req(ctrl->rport, lsop); |
e399441d JS |
1233 | if (ret) |
1234 | goto out_free_buffer; | |
1235 | ||
1236 | /* process connect LS completion */ | |
1237 | ||
1238 | /* validate the ACC response */ | |
1239 | if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) | |
1240 | fcret = VERR_LSACC; | |
f77fc87c | 1241 | else if (assoc_acc->hdr.desc_list_len != |
e399441d JS |
1242 | fcnvme_lsdesc_len( |
1243 | sizeof(struct fcnvme_ls_cr_assoc_acc))) | |
1244 | fcret = VERR_CR_ASSOC_ACC_LEN; | |
f77fc87c JS |
1245 | else if (assoc_acc->hdr.rqst.desc_tag != |
1246 | cpu_to_be32(FCNVME_LSDESC_RQST)) | |
e399441d JS |
1247 | fcret = VERR_LSDESC_RQST; |
1248 | else if (assoc_acc->hdr.rqst.desc_len != | |
1249 | fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst))) | |
1250 | fcret = VERR_LSDESC_RQST_LEN; | |
1251 | else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION) | |
1252 | fcret = VERR_CR_ASSOC; | |
1253 | else if (assoc_acc->associd.desc_tag != | |
1254 | cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) | |
1255 | fcret = VERR_ASSOC_ID; | |
1256 | else if (assoc_acc->associd.desc_len != | |
1257 | fcnvme_lsdesc_len( | |
1258 | sizeof(struct fcnvme_lsdesc_assoc_id))) | |
1259 | fcret = VERR_ASSOC_ID_LEN; | |
1260 | else if (assoc_acc->connectid.desc_tag != | |
1261 | cpu_to_be32(FCNVME_LSDESC_CONN_ID)) | |
1262 | fcret = VERR_CONN_ID; | |
1263 | else if (assoc_acc->connectid.desc_len != | |
1264 | fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id))) | |
1265 | fcret = VERR_CONN_ID_LEN; | |
1266 | ||
1267 | if (fcret) { | |
1268 | ret = -EBADF; | |
1269 | dev_err(ctrl->dev, | |
7db39484 | 1270 | "q %d Create Association LS failed: %s\n", |
e399441d JS |
1271 | queue->qnum, validation_errors[fcret]); |
1272 | } else { | |
14fd1e98 | 1273 | spin_lock_irqsave(&ctrl->lock, flags); |
e399441d JS |
1274 | ctrl->association_id = |
1275 | be64_to_cpu(assoc_acc->associd.association_id); | |
1276 | queue->connection_id = | |
1277 | be64_to_cpu(assoc_acc->connectid.connection_id); | |
1278 | set_bit(NVME_FC_Q_CONNECTED, &queue->flags); | |
14fd1e98 | 1279 | spin_unlock_irqrestore(&ctrl->lock, flags); |
e399441d JS |
1280 | } |
1281 | ||
1282 | out_free_buffer: | |
1283 | kfree(lsop); | |
1284 | out_no_memory: | |
1285 | if (ret) | |
1286 | dev_err(ctrl->dev, | |
1287 | "queue %d connect admin queue failed (%d).\n", | |
1288 | queue->qnum, ret); | |
1289 | return ret; | |
1290 | } | |
1291 | ||
1292 | static int | |
1293 | nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, | |
1294 | u16 qsize, u16 ersp_ratio) | |
1295 | { | |
1296 | struct nvmefc_ls_req_op *lsop; | |
1297 | struct nvmefc_ls_req *lsreq; | |
1298 | struct fcnvme_ls_cr_conn_rqst *conn_rqst; | |
1299 | struct fcnvme_ls_cr_conn_acc *conn_acc; | |
1300 | int ret, fcret = 0; | |
1301 | ||
1302 | lsop = kzalloc((sizeof(*lsop) + | |
f56bf76f JS |
1303 | sizeof(*conn_rqst) + sizeof(*conn_acc) + |
1304 | ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); | |
e399441d | 1305 | if (!lsop) { |
f56bf76f JS |
1306 | dev_info(ctrl->ctrl.device, |
1307 | "NVME-FC{%d}: send Create Connection failed: ENOMEM\n", | |
1308 | ctrl->cnum); | |
e399441d JS |
1309 | ret = -ENOMEM; |
1310 | goto out_no_memory; | |
1311 | } | |
e399441d | 1312 | |
f56bf76f | 1313 | conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)&lsop[1]; |
e399441d | 1314 | conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1]; |
f56bf76f JS |
1315 | lsreq = &lsop->ls_req; |
1316 | if (ctrl->lport->ops->lsrqst_priv_sz) | |
1317 | lsreq->private = (void *)&conn_acc[1]; | |
1318 | else | |
1319 | lsreq->private = NULL; | |
e399441d JS |
1320 | |
1321 | conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION; | |
1322 | conn_rqst->desc_list_len = cpu_to_be32( | |
1323 | sizeof(struct fcnvme_lsdesc_assoc_id) + | |
1324 | sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); | |
1325 | ||
1326 | conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); | |
1327 | conn_rqst->associd.desc_len = | |
1328 | fcnvme_lsdesc_len( | |
1329 | sizeof(struct fcnvme_lsdesc_assoc_id)); | |
1330 | conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id); | |
1331 | conn_rqst->connect_cmd.desc_tag = | |
1332 | cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD); | |
1333 | conn_rqst->connect_cmd.desc_len = | |
1334 | fcnvme_lsdesc_len( | |
1335 | sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); | |
1336 | conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); | |
1337 | conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); | |
d157e534 | 1338 | conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1); |
e399441d JS |
1339 | |
1340 | lsop->queue = queue; | |
1341 | lsreq->rqstaddr = conn_rqst; | |
1342 | lsreq->rqstlen = sizeof(*conn_rqst); | |
1343 | lsreq->rspaddr = conn_acc; | |
1344 | lsreq->rsplen = sizeof(*conn_acc); | |
53b2b2f5 | 1345 | lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; |
e399441d | 1346 | |
c913a8b0 | 1347 | ret = nvme_fc_send_ls_req(ctrl->rport, lsop); |
e399441d JS |
1348 | if (ret) |
1349 | goto out_free_buffer; | |
1350 | ||
1351 | /* process connect LS completion */ | |
1352 | ||
1353 | /* validate the ACC response */ | |
1354 | if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) | |
1355 | fcret = VERR_LSACC; | |
f77fc87c | 1356 | else if (conn_acc->hdr.desc_list_len != |
e399441d JS |
1357 | fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc))) |
1358 | fcret = VERR_CR_CONN_ACC_LEN; | |
f77fc87c | 1359 | else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST)) |
e399441d JS |
1360 | fcret = VERR_LSDESC_RQST; |
1361 | else if (conn_acc->hdr.rqst.desc_len != | |
1362 | fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst))) | |
1363 | fcret = VERR_LSDESC_RQST_LEN; | |
1364 | else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION) | |
1365 | fcret = VERR_CR_CONN; | |
1366 | else if (conn_acc->connectid.desc_tag != | |
1367 | cpu_to_be32(FCNVME_LSDESC_CONN_ID)) | |
1368 | fcret = VERR_CONN_ID; | |
1369 | else if (conn_acc->connectid.desc_len != | |
1370 | fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id))) | |
1371 | fcret = VERR_CONN_ID_LEN; | |
1372 | ||
1373 | if (fcret) { | |
1374 | ret = -EBADF; | |
1375 | dev_err(ctrl->dev, | |
7db39484 | 1376 | "q %d Create I/O Connection LS failed: %s\n", |
e399441d JS |
1377 | queue->qnum, validation_errors[fcret]); |
1378 | } else { | |
1379 | queue->connection_id = | |
1380 | be64_to_cpu(conn_acc->connectid.connection_id); | |
1381 | set_bit(NVME_FC_Q_CONNECTED, &queue->flags); | |
1382 | } | |
1383 | ||
1384 | out_free_buffer: | |
1385 | kfree(lsop); | |
1386 | out_no_memory: | |
1387 | if (ret) | |
1388 | dev_err(ctrl->dev, | |
7db39484 | 1389 | "queue %d connect I/O queue failed (%d).\n", |
e399441d JS |
1390 | queue->qnum, ret); |
1391 | return ret; | |
1392 | } | |
1393 | ||
1394 | static void | |
1395 | nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) | |
1396 | { | |
1397 | struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq); | |
e399441d | 1398 | |
c913a8b0 | 1399 | __nvme_fc_finish_ls_req(lsop); |
e399441d | 1400 | |
d4e4230c | 1401 | /* fc-nvme initiator doesn't care about success or failure of cmd */ |
e399441d JS |
1402 | |
1403 | kfree(lsop); | |
1404 | } | |
1405 | ||
1406 | /* | |
1407 | * This routine sends a FC-NVME LS to disconnect (aka terminate) | |
1408 | * the FC-NVME Association. Terminating the association also | |
1409 | * terminates the FC-NVME connections (per queue, both admin and io | |
1410 | * queues) that are part of the association. E.g. things are torn | |
1411 | * down, and the related FC-NVME Association ID and Connection IDs | |
1412 | * become invalid. | |
1413 | * | |
1414 | * The behavior of the fc-nvme initiator is such that it's | |
1415 | * understanding of the association and connections will implicitly | |
1416 | * be torn down. The action is implicit as it may be due to a loss of | |
1417 | * connectivity with the fc-nvme target, so you may never get a | |
1418 | * response even if you tried. As such, the action of this routine | |
1419 | * is to asynchronously send the LS, ignore any results of the LS, and | |
1420 | * continue on with terminating the association. If the fc-nvme target | |
1421 | * is present and receives the LS, it too can tear down. | |
1422 | */ | |
1423 | static void | |
1424 | nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) | |
1425 | { | |
53b2b2f5 JS |
1426 | struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; |
1427 | struct fcnvme_ls_disconnect_assoc_acc *discon_acc; | |
e399441d JS |
1428 | struct nvmefc_ls_req_op *lsop; |
1429 | struct nvmefc_ls_req *lsreq; | |
c913a8b0 | 1430 | int ret; |
e399441d JS |
1431 | |
1432 | lsop = kzalloc((sizeof(*lsop) + | |
f56bf76f JS |
1433 | sizeof(*discon_rqst) + sizeof(*discon_acc) + |
1434 | ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); | |
1435 | if (!lsop) { | |
1436 | dev_info(ctrl->ctrl.device, | |
1437 | "NVME-FC{%d}: send Disconnect Association " | |
1438 | "failed: ENOMEM\n", | |
1439 | ctrl->cnum); | |
e399441d | 1440 | return; |
f56bf76f | 1441 | } |
e399441d | 1442 | |
f56bf76f | 1443 | discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1]; |
53b2b2f5 | 1444 | discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; |
f56bf76f JS |
1445 | lsreq = &lsop->ls_req; |
1446 | if (ctrl->lport->ops->lsrqst_priv_sz) | |
1447 | lsreq->private = (void *)&discon_acc[1]; | |
1448 | else | |
1449 | lsreq->private = NULL; | |
e399441d | 1450 | |
fd5a5f22 JS |
1451 | nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc, |
1452 | ctrl->association_id); | |
e399441d | 1453 | |
c913a8b0 JS |
1454 | ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop, |
1455 | nvme_fc_disconnect_assoc_done); | |
1456 | if (ret) | |
1457 | kfree(lsop); | |
e399441d JS |
1458 | } |
1459 | ||
14fd1e98 JS |
1460 | static void |
1461 | nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) | |
1462 | { | |
1463 | struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private; | |
1464 | struct nvme_fc_rport *rport = lsop->rport; | |
1465 | struct nvme_fc_lport *lport = rport->lport; | |
1466 | unsigned long flags; | |
1467 | ||
1468 | spin_lock_irqsave(&rport->lock, flags); | |
1469 | list_del(&lsop->lsrcv_list); | |
1470 | spin_unlock_irqrestore(&rport->lock, flags); | |
1471 | ||
1472 | fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma, | |
1473 | sizeof(*lsop->rspbuf), DMA_TO_DEVICE); | |
1474 | fc_dma_unmap_single(lport->dev, lsop->rspdma, | |
1475 | sizeof(*lsop->rspbuf), DMA_TO_DEVICE); | |
1476 | ||
1477 | kfree(lsop); | |
1478 | ||
1479 | nvme_fc_rport_put(rport); | |
1480 | } | |
1481 | ||
1482 | static void | |
1483 | nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop) | |
1484 | { | |
1485 | struct nvme_fc_rport *rport = lsop->rport; | |
1486 | struct nvme_fc_lport *lport = rport->lport; | |
1487 | struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0; | |
1488 | int ret; | |
1489 | ||
1490 | fc_dma_sync_single_for_device(lport->dev, lsop->rspdma, | |
1491 | sizeof(*lsop->rspbuf), DMA_TO_DEVICE); | |
1492 | ||
1493 | ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport, | |
1494 | lsop->lsrsp); | |
1495 | if (ret) { | |
1496 | dev_warn(lport->dev, | |
1497 | "LLDD rejected LS RSP xmt: LS %d status %d\n", | |
1498 | w0->ls_cmd, ret); | |
1499 | nvme_fc_xmt_ls_rsp_done(lsop->lsrsp); | |
1500 | return; | |
1501 | } | |
1502 | } | |
1503 | ||
1504 | static struct nvme_fc_ctrl * | |
1505 | nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport, | |
1506 | struct nvmefc_ls_rcv_op *lsop) | |
1507 | { | |
1508 | struct fcnvme_ls_disconnect_assoc_rqst *rqst = | |
1509 | &lsop->rqstbuf->rq_dis_assoc; | |
1510 | struct nvme_fc_ctrl *ctrl, *ret = NULL; | |
1511 | struct nvmefc_ls_rcv_op *oldls = NULL; | |
1512 | u64 association_id = be64_to_cpu(rqst->associd.association_id); | |
1513 | unsigned long flags; | |
1514 | ||
1515 | spin_lock_irqsave(&rport->lock, flags); | |
1516 | ||
1517 | list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { | |
1518 | if (!nvme_fc_ctrl_get(ctrl)) | |
1519 | continue; | |
1520 | spin_lock(&ctrl->lock); | |
1521 | if (association_id == ctrl->association_id) { | |
1522 | oldls = ctrl->rcv_disconn; | |
1523 | ctrl->rcv_disconn = lsop; | |
1524 | ret = ctrl; | |
1525 | } | |
1526 | spin_unlock(&ctrl->lock); | |
1527 | if (ret) | |
1528 | /* leave the ctrl get reference */ | |
1529 | break; | |
1530 | nvme_fc_ctrl_put(ctrl); | |
1531 | } | |
1532 | ||
1533 | spin_unlock_irqrestore(&rport->lock, flags); | |
1534 | ||
1535 | /* transmit a response for anything that was pending */ | |
1536 | if (oldls) { | |
1537 | dev_info(rport->lport->dev, | |
1538 | "NVME-FC{%d}: Multiple Disconnect Association " | |
1539 | "LS's received\n", ctrl->cnum); | |
1540 | /* overwrite good response with bogus failure */ | |
1541 | oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, | |
1542 | sizeof(*oldls->rspbuf), | |
1543 | rqst->w0.ls_cmd, | |
1544 | FCNVME_RJT_RC_UNAB, | |
1545 | FCNVME_RJT_EXP_NONE, 0); | |
1546 | nvme_fc_xmt_ls_rsp(oldls); | |
1547 | } | |
1548 | ||
1549 | return ret; | |
1550 | } | |
1551 | ||
1552 | /* | |
1553 | * returns true to mean LS handled and ls_rsp can be sent | |
1554 | * returns false to defer ls_rsp xmt (will be done as part of | |
1555 | * association termination) | |
1556 | */ | |
1557 | static bool | |
1558 | nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op *lsop) | |
1559 | { | |
1560 | struct nvme_fc_rport *rport = lsop->rport; | |
1561 | struct fcnvme_ls_disconnect_assoc_rqst *rqst = | |
1562 | &lsop->rqstbuf->rq_dis_assoc; | |
1563 | struct fcnvme_ls_disconnect_assoc_acc *acc = | |
1564 | &lsop->rspbuf->rsp_dis_assoc; | |
1565 | struct nvme_fc_ctrl *ctrl = NULL; | |
1566 | int ret = 0; | |
1567 | ||
1568 | memset(acc, 0, sizeof(*acc)); | |
1569 | ||
1570 | ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst); | |
1571 | if (!ret) { | |
1572 | /* match an active association */ | |
1573 | ctrl = nvme_fc_match_disconn_ls(rport, lsop); | |
1574 | if (!ctrl) | |
1575 | ret = VERR_NO_ASSOC; | |
1576 | } | |
1577 | ||
1578 | if (ret) { | |
1579 | dev_info(rport->lport->dev, | |
1580 | "Disconnect LS failed: %s\n", | |
1581 | validation_errors[ret]); | |
1582 | lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc, | |
1583 | sizeof(*acc), rqst->w0.ls_cmd, | |
1584 | (ret == VERR_NO_ASSOC) ? | |
1585 | FCNVME_RJT_RC_INV_ASSOC : | |
1586 | FCNVME_RJT_RC_LOGIC, | |
1587 | FCNVME_RJT_EXP_NONE, 0); | |
1588 | return true; | |
1589 | } | |
1590 | ||
1591 | /* format an ACCept response */ | |
1592 | ||
1593 | lsop->lsrsp->rsplen = sizeof(*acc); | |
1594 | ||
1595 | nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, | |
1596 | fcnvme_lsdesc_len( | |
1597 | sizeof(struct fcnvme_ls_disconnect_assoc_acc)), | |
1598 | FCNVME_LS_DISCONNECT_ASSOC); | |
1599 | ||
1600 | /* | |
1601 | * the transmit of the response will occur after the exchanges | |
1602 | * for the association have been ABTS'd by | |
1603 | * nvme_fc_delete_association(). | |
1604 | */ | |
1605 | ||
1606 | /* fail the association */ | |
1607 | nvme_fc_error_recovery(ctrl, "Disconnect Association LS received"); | |
1608 | ||
1609 | /* release the reference taken by nvme_fc_match_disconn_ls() */ | |
1610 | nvme_fc_ctrl_put(ctrl); | |
1611 | ||
1612 | return false; | |
1613 | } | |
1614 | ||
1615 | /* | |
1616 | * Actual Processing routine for received FC-NVME LS Requests from the LLD | |
1617 | * returns true if a response should be sent afterward, false if rsp will | |
1618 | * be sent asynchronously. | |
1619 | */ | |
1620 | static bool | |
1621 | nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op *lsop) | |
1622 | { | |
1623 | struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0; | |
1624 | bool ret = true; | |
1625 | ||
1626 | lsop->lsrsp->nvme_fc_private = lsop; | |
1627 | lsop->lsrsp->rspbuf = lsop->rspbuf; | |
1628 | lsop->lsrsp->rspdma = lsop->rspdma; | |
1629 | lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done; | |
1630 | /* Be preventative. handlers will later set to valid length */ | |
1631 | lsop->lsrsp->rsplen = 0; | |
1632 | ||
1633 | /* | |
1634 | * handlers: | |
1635 | * parse request input, execute the request, and format the | |
1636 | * LS response | |
1637 | */ | |
1638 | switch (w0->ls_cmd) { | |
1639 | case FCNVME_LS_DISCONNECT_ASSOC: | |
1640 | ret = nvme_fc_ls_disconnect_assoc(lsop); | |
1641 | break; | |
1642 | case FCNVME_LS_DISCONNECT_CONN: | |
1643 | lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, | |
1644 | sizeof(*lsop->rspbuf), w0->ls_cmd, | |
1645 | FCNVME_RJT_RC_UNSUP, FCNVME_RJT_EXP_NONE, 0); | |
1646 | break; | |
1647 | case FCNVME_LS_CREATE_ASSOCIATION: | |
1648 | case FCNVME_LS_CREATE_CONNECTION: | |
1649 | lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, | |
1650 | sizeof(*lsop->rspbuf), w0->ls_cmd, | |
1651 | FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0); | |
1652 | break; | |
1653 | default: | |
1654 | lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, | |
1655 | sizeof(*lsop->rspbuf), w0->ls_cmd, | |
1656 | FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0); | |
1657 | break; | |
1658 | } | |
1659 | ||
1660 | return(ret); | |
1661 | } | |
1662 | ||
1663 | static void | |
1664 | nvme_fc_handle_ls_rqst_work(struct work_struct *work) | |
1665 | { | |
1666 | struct nvme_fc_rport *rport = | |
1667 | container_of(work, struct nvme_fc_rport, lsrcv_work); | |
1668 | struct fcnvme_ls_rqst_w0 *w0; | |
1669 | struct nvmefc_ls_rcv_op *lsop; | |
1670 | unsigned long flags; | |
1671 | bool sendrsp; | |
1672 | ||
1673 | restart: | |
1674 | sendrsp = true; | |
1675 | spin_lock_irqsave(&rport->lock, flags); | |
1676 | list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) { | |
1677 | if (lsop->handled) | |
1678 | continue; | |
1679 | ||
1680 | lsop->handled = true; | |
1681 | if (rport->remoteport.port_state == FC_OBJSTATE_ONLINE) { | |
1682 | spin_unlock_irqrestore(&rport->lock, flags); | |
1683 | sendrsp = nvme_fc_handle_ls_rqst(lsop); | |
1684 | } else { | |
1685 | spin_unlock_irqrestore(&rport->lock, flags); | |
1686 | w0 = &lsop->rqstbuf->w0; | |
1687 | lsop->lsrsp->rsplen = nvme_fc_format_rjt( | |
1688 | lsop->rspbuf, | |
1689 | sizeof(*lsop->rspbuf), | |
1690 | w0->ls_cmd, | |
1691 | FCNVME_RJT_RC_UNAB, | |
1692 | FCNVME_RJT_EXP_NONE, 0); | |
1693 | } | |
1694 | if (sendrsp) | |
1695 | nvme_fc_xmt_ls_rsp(lsop); | |
1696 | goto restart; | |
1697 | } | |
1698 | spin_unlock_irqrestore(&rport->lock, flags); | |
1699 | } | |
1700 | ||
72e6329f JS |
1701 | /** |
1702 | * nvme_fc_rcv_ls_req - transport entry point called by an LLDD | |
1703 | * upon the reception of a NVME LS request. | |
1704 | * | |
1705 | * The nvme-fc layer will copy payload to an internal structure for | |
1706 | * processing. As such, upon completion of the routine, the LLDD may | |
1707 | * immediately free/reuse the LS request buffer passed in the call. | |
1708 | * | |
1709 | * If this routine returns error, the LLDD should abort the exchange. | |
1710 | * | |
2afc4866 | 1711 | * @portptr: pointer to the (registered) remote port that the LS |
72e6329f JS |
1712 | * was received from. The remoteport is associated with |
1713 | * a specific localport. | |
1714 | * @lsrsp: pointer to a nvmefc_ls_rsp response structure to be | |
1715 | * used to reference the exchange corresponding to the LS | |
1716 | * when issuing an ls response. | |
1717 | * @lsreqbuf: pointer to the buffer containing the LS Request | |
1718 | * @lsreqbuf_len: length, in bytes, of the received LS request | |
1719 | */ | |
1720 | int | |
1721 | nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr, | |
1722 | struct nvmefc_ls_rsp *lsrsp, | |
1723 | void *lsreqbuf, u32 lsreqbuf_len) | |
1724 | { | |
1725 | struct nvme_fc_rport *rport = remoteport_to_rport(portptr); | |
1726 | struct nvme_fc_lport *lport = rport->lport; | |
14fd1e98 JS |
1727 | struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; |
1728 | struct nvmefc_ls_rcv_op *lsop; | |
1729 | unsigned long flags; | |
1730 | int ret; | |
1731 | ||
1732 | nvme_fc_rport_get(rport); | |
72e6329f JS |
1733 | |
1734 | /* validate there's a routine to transmit a response */ | |
14fd1e98 JS |
1735 | if (!lport->ops->xmt_ls_rsp) { |
1736 | dev_info(lport->dev, | |
1737 | "RCV %s LS failed: no LLDD xmt_ls_rsp\n", | |
1738 | (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? | |
1739 | nvmefc_ls_names[w0->ls_cmd] : ""); | |
1740 | ret = -EINVAL; | |
1741 | goto out_put; | |
1742 | } | |
1743 | ||
1744 | if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { | |
1745 | dev_info(lport->dev, | |
1746 | "RCV %s LS failed: payload too large\n", | |
1747 | (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? | |
1748 | nvmefc_ls_names[w0->ls_cmd] : ""); | |
1749 | ret = -E2BIG; | |
1750 | goto out_put; | |
1751 | } | |
1752 | ||
1753 | lsop = kzalloc(sizeof(*lsop) + | |
1754 | sizeof(union nvmefc_ls_requests) + | |
1755 | sizeof(union nvmefc_ls_responses), | |
1756 | GFP_KERNEL); | |
1757 | if (!lsop) { | |
1758 | dev_info(lport->dev, | |
1759 | "RCV %s LS failed: No memory\n", | |
1760 | (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? | |
1761 | nvmefc_ls_names[w0->ls_cmd] : ""); | |
1762 | ret = -ENOMEM; | |
1763 | goto out_put; | |
1764 | } | |
1765 | lsop->rqstbuf = (union nvmefc_ls_requests *)&lsop[1]; | |
1766 | lsop->rspbuf = (union nvmefc_ls_responses *)&lsop->rqstbuf[1]; | |
1767 | ||
1768 | lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf, | |
1769 | sizeof(*lsop->rspbuf), | |
1770 | DMA_TO_DEVICE); | |
1771 | if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) { | |
1772 | dev_info(lport->dev, | |
1773 | "RCV %s LS failed: DMA mapping failure\n", | |
1774 | (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? | |
1775 | nvmefc_ls_names[w0->ls_cmd] : ""); | |
1776 | ret = -EFAULT; | |
1777 | goto out_free; | |
1778 | } | |
1779 | ||
1780 | lsop->rport = rport; | |
1781 | lsop->lsrsp = lsrsp; | |
1782 | ||
1783 | memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len); | |
1784 | lsop->rqstdatalen = lsreqbuf_len; | |
1785 | ||
1786 | spin_lock_irqsave(&rport->lock, flags); | |
1787 | if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) { | |
1788 | spin_unlock_irqrestore(&rport->lock, flags); | |
1789 | ret = -ENOTCONN; | |
1790 | goto out_unmap; | |
1791 | } | |
1792 | list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list); | |
1793 | spin_unlock_irqrestore(&rport->lock, flags); | |
1794 | ||
1795 | schedule_work(&rport->lsrcv_work); | |
72e6329f JS |
1796 | |
1797 | return 0; | |
14fd1e98 JS |
1798 | |
1799 | out_unmap: | |
1800 | fc_dma_unmap_single(lport->dev, lsop->rspdma, | |
1801 | sizeof(*lsop->rspbuf), DMA_TO_DEVICE); | |
1802 | out_free: | |
1803 | kfree(lsop); | |
1804 | out_put: | |
1805 | nvme_fc_rport_put(rport); | |
1806 | return ret; | |
72e6329f JS |
1807 | } |
1808 | EXPORT_SYMBOL_GPL(nvme_fc_rcv_ls_req); | |
1809 | ||
e399441d JS |
1810 | |
1811 | /* *********************** NVME Ctrl Routines **************************** */ | |
1812 | ||
e399441d JS |
1813 | static void |
1814 | __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl, | |
1815 | struct nvme_fc_fcp_op *op) | |
1816 | { | |
1817 | fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma, | |
1818 | sizeof(op->rsp_iu), DMA_FROM_DEVICE); | |
1819 | fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma, | |
1820 | sizeof(op->cmd_iu), DMA_TO_DEVICE); | |
1821 | ||
1822 | atomic_set(&op->state, FCPOP_STATE_UNINIT); | |
1823 | } | |
1824 | ||
1825 | static void | |
d6296d39 CH |
1826 | nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq, |
1827 | unsigned int hctx_idx) | |
e399441d JS |
1828 | { |
1829 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); | |
1830 | ||
d6296d39 | 1831 | return __nvme_fc_exit_request(set->driver_data, op); |
e399441d JS |
1832 | } |
1833 | ||
78a7ac26 JS |
1834 | static int |
1835 | __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) | |
1836 | { | |
3efd6e8e JS |
1837 | unsigned long flags; |
1838 | int opstate; | |
1839 | ||
1840 | spin_lock_irqsave(&ctrl->lock, flags); | |
1841 | opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); | |
1842 | if (opstate != FCPOP_STATE_ACTIVE) | |
1843 | atomic_set(&op->state, opstate); | |
52793d62 JS |
1844 | else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) { |
1845 | op->flags |= FCOP_FLAGS_TERMIO; | |
3efd6e8e | 1846 | ctrl->iocnt++; |
52793d62 | 1847 | } |
3efd6e8e | 1848 | spin_unlock_irqrestore(&ctrl->lock, flags); |
78a7ac26 | 1849 | |
3efd6e8e | 1850 | if (opstate != FCPOP_STATE_ACTIVE) |
78a7ac26 | 1851 | return -ECANCELED; |
78a7ac26 JS |
1852 | |
1853 | ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, | |
1854 | &ctrl->rport->remoteport, | |
1855 | op->queue->lldd_handle, | |
1856 | &op->fcp_req); | |
1857 | ||
1858 | return 0; | |
1859 | } | |
1860 | ||
e399441d | 1861 | static void |
78a7ac26 | 1862 | nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) |
e399441d JS |
1863 | { |
1864 | struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; | |
3efd6e8e | 1865 | int i; |
78a7ac26 | 1866 | |
4cff280a JS |
1867 | /* ensure we've initialized the ops once */ |
1868 | if (!(aen_op->flags & FCOP_FLAGS_AEN)) | |
1869 | return; | |
1870 | ||
3efd6e8e JS |
1871 | for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) |
1872 | __nvme_fc_abort_op(ctrl, aen_op); | |
e399441d JS |
1873 | } |
1874 | ||
c3aedd22 | 1875 | static inline void |
78a7ac26 | 1876 | __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, |
3efd6e8e | 1877 | struct nvme_fc_fcp_op *op, int opstate) |
78a7ac26 JS |
1878 | { |
1879 | unsigned long flags; | |
78a7ac26 | 1880 | |
c3aedd22 JS |
1881 | if (opstate == FCPOP_STATE_ABORTED) { |
1882 | spin_lock_irqsave(&ctrl->lock, flags); | |
52793d62 JS |
1883 | if (test_bit(FCCTRL_TERMIO, &ctrl->flags) && |
1884 | op->flags & FCOP_FLAGS_TERMIO) { | |
c3aedd22 JS |
1885 | if (!--ctrl->iocnt) |
1886 | wake_up(&ctrl->ioabort_wait); | |
1887 | } | |
1888 | spin_unlock_irqrestore(&ctrl->lock, flags); | |
61bff8ef | 1889 | } |
78a7ac26 JS |
1890 | } |
1891 | ||
19fce047 JS |
1892 | static void |
1893 | nvme_fc_ctrl_ioerr_work(struct work_struct *work) | |
1894 | { | |
1895 | struct nvme_fc_ctrl *ctrl = | |
1896 | container_of(work, struct nvme_fc_ctrl, ioerr_work); | |
1897 | ||
1898 | nvme_fc_error_recovery(ctrl, "transport detected io error"); | |
1899 | } | |
1900 | ||
baee29ac | 1901 | static void |
e399441d JS |
1902 | nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) |
1903 | { | |
1904 | struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req); | |
1905 | struct request *rq = op->rq; | |
1906 | struct nvmefc_fcp_req *freq = &op->fcp_req; | |
1907 | struct nvme_fc_ctrl *ctrl = op->ctrl; | |
1908 | struct nvme_fc_queue *queue = op->queue; | |
1909 | struct nvme_completion *cqe = &op->rsp_iu.cqe; | |
458f280d | 1910 | struct nvme_command *sqe = &op->cmd_iu.sqe; |
d663b69f | 1911 | __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); |
27fa9bc5 | 1912 | union nvme_result result; |
0a02e39f | 1913 | bool terminate_assoc = true; |
3efd6e8e | 1914 | int opstate; |
e399441d JS |
1915 | |
1916 | /* | |
1917 | * WARNING: | |
1918 | * The current linux implementation of a nvme controller | |
1919 | * allocates a single tag set for all io queues and sizes | |
1920 | * the io queues to fully hold all possible tags. Thus, the | |
1921 | * implementation does not reference or care about the sqhd | |
1922 | * value as it never needs to use the sqhd/sqtail pointers | |
1923 | * for submission pacing. | |
1924 | * | |
1925 | * This affects the FC-NVME implementation in two ways: | |
1926 | * 1) As the value doesn't matter, we don't need to waste | |
1927 | * cycles extracting it from ERSPs and stamping it in the | |
1928 | * cases where the transport fabricates CQEs on successful | |
1929 | * completions. | |
1930 | * 2) The FC-NVME implementation requires that delivery of | |
1931 | * ERSP completions are to go back to the nvme layer in order | |
1932 | * relative to the rsn, such that the sqhd value will always | |
1933 | * be "in order" for the nvme layer. As the nvme layer in | |
1934 | * linux doesn't care about sqhd, there's no need to return | |
1935 | * them in order. | |
1936 | * | |
1937 | * Additionally: | |
1938 | * As the core nvme layer in linux currently does not look at | |
1939 | * every field in the cqe - in cases where the FC transport must | |
1940 | * fabricate a CQE, the following fields will not be set as they | |
1941 | * are not referenced: | |
1942 | * cqe.sqid, cqe.sqhd, cqe.command_id | |
f874d5d0 JS |
1943 | * |
1944 | * Failure or error of an individual i/o, in a transport | |
1945 | * detected fashion unrelated to the nvme completion status, | |
1946 | * potentially cause the initiator and target sides to get out | |
1947 | * of sync on SQ head/tail (aka outstanding io count allowed). | |
1948 | * Per FC-NVME spec, failure of an individual command requires | |
1949 | * the connection to be terminated, which in turn requires the | |
1950 | * association to be terminated. | |
e399441d JS |
1951 | */ |
1952 | ||
3efd6e8e JS |
1953 | opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); |
1954 | ||
e399441d JS |
1955 | fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, |
1956 | sizeof(op->rsp_iu), DMA_FROM_DEVICE); | |
1957 | ||
3efd6e8e | 1958 | if (opstate == FCPOP_STATE_ABORTED) |
ae3afe63 | 1959 | status = cpu_to_le16(NVME_SC_HOST_ABORTED_CMD << 1); |
74bd8cbe JS |
1960 | else if (freq->status) { |
1961 | status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); | |
1962 | dev_info(ctrl->ctrl.device, | |
1963 | "NVME-FC{%d}: io failed due to lldd error %d\n", | |
1964 | ctrl->cnum, freq->status); | |
1965 | } | |
e399441d JS |
1966 | |
1967 | /* | |
1968 | * For the linux implementation, if we have an unsuccesful | |
1969 | * status, they blk-mq layer can typically be called with the | |
1970 | * non-zero status and the content of the cqe isn't important. | |
1971 | */ | |
1972 | if (status) | |
1973 | goto done; | |
1974 | ||
1975 | /* | |
1976 | * command completed successfully relative to the wire | |
1977 | * protocol. However, validate anything received and | |
1978 | * extract the status and result from the cqe (create it | |
1979 | * where necessary). | |
1980 | */ | |
1981 | ||
1982 | switch (freq->rcv_rsplen) { | |
1983 | ||
1984 | case 0: | |
1985 | case NVME_FC_SIZEOF_ZEROS_RSP: | |
1986 | /* | |
1987 | * No response payload or 12 bytes of payload (which | |
1988 | * should all be zeros) are considered successful and | |
1989 | * no payload in the CQE by the transport. | |
1990 | */ | |
1991 | if (freq->transferred_length != | |
74bd8cbe JS |
1992 | be32_to_cpu(op->cmd_iu.data_len)) { |
1993 | status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); | |
1994 | dev_info(ctrl->ctrl.device, | |
1995 | "NVME-FC{%d}: io failed due to bad transfer " | |
1996 | "length: %d vs expected %d\n", | |
1997 | ctrl->cnum, freq->transferred_length, | |
1998 | be32_to_cpu(op->cmd_iu.data_len)); | |
e399441d JS |
1999 | goto done; |
2000 | } | |
27fa9bc5 | 2001 | result.u64 = 0; |
e399441d JS |
2002 | break; |
2003 | ||
2004 | case sizeof(struct nvme_fc_ersp_iu): | |
2005 | /* | |
2006 | * The ERSP IU contains a full completion with CQE. | |
2007 | * Validate ERSP IU and look at cqe. | |
2008 | */ | |
2009 | if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) != | |
2010 | (freq->rcv_rsplen / 4) || | |
2011 | be32_to_cpu(op->rsp_iu.xfrd_len) != | |
2012 | freq->transferred_length || | |
53b2b2f5 | 2013 | op->rsp_iu.ersp_result || |
458f280d | 2014 | sqe->common.command_id != cqe->command_id)) { |
74bd8cbe JS |
2015 | status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); |
2016 | dev_info(ctrl->ctrl.device, | |
2017 | "NVME-FC{%d}: io failed due to bad NVMe_ERSP: " | |
2018 | "iu len %d, xfr len %d vs %d, status code " | |
2019 | "%d, cmdid %d vs %d\n", | |
2020 | ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len), | |
2021 | be32_to_cpu(op->rsp_iu.xfrd_len), | |
2022 | freq->transferred_length, | |
53b2b2f5 | 2023 | op->rsp_iu.ersp_result, |
74bd8cbe JS |
2024 | sqe->common.command_id, |
2025 | cqe->command_id); | |
e399441d JS |
2026 | goto done; |
2027 | } | |
27fa9bc5 | 2028 | result = cqe->result; |
d663b69f | 2029 | status = cqe->status; |
e399441d JS |
2030 | break; |
2031 | ||
2032 | default: | |
74bd8cbe JS |
2033 | status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); |
2034 | dev_info(ctrl->ctrl.device, | |
2035 | "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu " | |
2036 | "len %d\n", | |
2037 | ctrl->cnum, freq->rcv_rsplen); | |
e399441d JS |
2038 | goto done; |
2039 | } | |
2040 | ||
f874d5d0 JS |
2041 | terminate_assoc = false; |
2042 | ||
e399441d | 2043 | done: |
78a7ac26 | 2044 | if (op->flags & FCOP_FLAGS_AEN) { |
27fa9bc5 | 2045 | nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); |
3efd6e8e | 2046 | __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); |
78a7ac26 JS |
2047 | atomic_set(&op->state, FCPOP_STATE_IDLE); |
2048 | op->flags = FCOP_FLAGS_AEN; /* clear other flags */ | |
e399441d | 2049 | nvme_fc_ctrl_put(ctrl); |
f874d5d0 | 2050 | goto check_error; |
e399441d JS |
2051 | } |
2052 | ||
c3aedd22 | 2053 | __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); |
2eb81a33 | 2054 | if (!nvme_try_complete_req(rq, status, result)) |
ff029451 | 2055 | nvme_fc_complete_rq(rq); |
f874d5d0 JS |
2056 | |
2057 | check_error: | |
f20ef34d | 2058 | if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING) |
19fce047 | 2059 | queue_work(nvme_reset_wq, &ctrl->ioerr_work); |
e399441d JS |
2060 | } |
2061 | ||
2062 | static int | |
2063 | __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, | |
2064 | struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op, | |
2065 | struct request *rq, u32 rqno) | |
2066 | { | |
d3d0bc78 BVA |
2067 | struct nvme_fcp_op_w_sgl *op_w_sgl = |
2068 | container_of(op, typeof(*op_w_sgl), op); | |
e399441d JS |
2069 | struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; |
2070 | int ret = 0; | |
2071 | ||
2072 | memset(op, 0, sizeof(*op)); | |
2073 | op->fcp_req.cmdaddr = &op->cmd_iu; | |
2074 | op->fcp_req.cmdlen = sizeof(op->cmd_iu); | |
2075 | op->fcp_req.rspaddr = &op->rsp_iu; | |
2076 | op->fcp_req.rsplen = sizeof(op->rsp_iu); | |
2077 | op->fcp_req.done = nvme_fc_fcpio_done; | |
e399441d JS |
2078 | op->ctrl = ctrl; |
2079 | op->queue = queue; | |
2080 | op->rq = rq; | |
2081 | op->rqno = rqno; | |
2082 | ||
53b2b2f5 | 2083 | cmdiu->format_id = NVME_CMD_FORMAT_ID; |
e399441d JS |
2084 | cmdiu->fc_id = NVME_CMD_FC_ID; |
2085 | cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32)); | |
44fbf3bb JS |
2086 | if (queue->qnum) |
2087 | cmdiu->rsv_cat = fccmnd_set_cat_css(0, | |
2088 | (NVME_CC_CSS_NVM >> NVME_CC_CSS_SHIFT)); | |
2089 | else | |
2090 | cmdiu->rsv_cat = fccmnd_set_cat_admin(0); | |
e399441d JS |
2091 | |
2092 | op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev, | |
2093 | &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE); | |
2094 | if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) { | |
2095 | dev_err(ctrl->dev, | |
2096 | "FCP Op failed - cmdiu dma mapping failed.\n"); | |
f34448cd | 2097 | ret = -EFAULT; |
e399441d JS |
2098 | goto out_on_error; |
2099 | } | |
2100 | ||
2101 | op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev, | |
2102 | &op->rsp_iu, sizeof(op->rsp_iu), | |
2103 | DMA_FROM_DEVICE); | |
2104 | if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) { | |
2105 | dev_err(ctrl->dev, | |
2106 | "FCP Op failed - rspiu dma mapping failed.\n"); | |
f34448cd | 2107 | ret = -EFAULT; |
e399441d JS |
2108 | } |
2109 | ||
2110 | atomic_set(&op->state, FCPOP_STATE_IDLE); | |
2111 | out_on_error: | |
2112 | return ret; | |
2113 | } | |
2114 | ||
2115 | static int | |
d6296d39 CH |
2116 | nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq, |
2117 | unsigned int hctx_idx, unsigned int numa_node) | |
e399441d | 2118 | { |
d6296d39 | 2119 | struct nvme_fc_ctrl *ctrl = set->driver_data; |
d3d0bc78 | 2120 | struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq); |
76f983cb CH |
2121 | int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; |
2122 | struct nvme_fc_queue *queue = &ctrl->queues[queue_idx]; | |
0d2bdf9f | 2123 | int res; |
e399441d | 2124 | |
0d2bdf9f BVA |
2125 | res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++); |
2126 | if (res) | |
2127 | return res; | |
3add1d93 | 2128 | op->op.fcp_req.first_sgl = op->sgl; |
d19b8bc8 | 2129 | op->op.fcp_req.private = &op->priv[0]; |
dfa74422 | 2130 | nvme_req(rq)->ctrl = &ctrl->ctrl; |
f4b9e6c9 | 2131 | nvme_req(rq)->cmd = &op->op.cmd_iu.sqe; |
0d2bdf9f | 2132 | return res; |
e399441d JS |
2133 | } |
2134 | ||
2135 | static int | |
2136 | nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl) | |
2137 | { | |
2138 | struct nvme_fc_fcp_op *aen_op; | |
2139 | struct nvme_fc_cmd_iu *cmdiu; | |
2140 | struct nvme_command *sqe; | |
f56bf76f | 2141 | void *private = NULL; |
e399441d JS |
2142 | int i, ret; |
2143 | ||
2144 | aen_op = ctrl->aen_ops; | |
38dabe21 | 2145 | for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { |
f56bf76f JS |
2146 | if (ctrl->lport->ops->fcprqst_priv_sz) { |
2147 | private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz, | |
61bff8ef | 2148 | GFP_KERNEL); |
f56bf76f JS |
2149 | if (!private) |
2150 | return -ENOMEM; | |
2151 | } | |
61bff8ef | 2152 | |
e399441d JS |
2153 | cmdiu = &aen_op->cmd_iu; |
2154 | sqe = &cmdiu->sqe; | |
2155 | ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0], | |
2156 | aen_op, (struct request *)NULL, | |
38dabe21 | 2157 | (NVME_AQ_BLK_MQ_DEPTH + i)); |
61bff8ef JS |
2158 | if (ret) { |
2159 | kfree(private); | |
e399441d | 2160 | return ret; |
61bff8ef | 2161 | } |
e399441d | 2162 | |
78a7ac26 | 2163 | aen_op->flags = FCOP_FLAGS_AEN; |
61bff8ef | 2164 | aen_op->fcp_req.private = private; |
78a7ac26 | 2165 | |
e399441d JS |
2166 | memset(sqe, 0, sizeof(*sqe)); |
2167 | sqe->common.opcode = nvme_admin_async_event; | |
78a7ac26 | 2168 | /* Note: core layer may overwrite the sqe.command_id value */ |
38dabe21 | 2169 | sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i; |
e399441d JS |
2170 | } |
2171 | return 0; | |
2172 | } | |
2173 | ||
61bff8ef JS |
2174 | static void |
2175 | nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl) | |
2176 | { | |
2177 | struct nvme_fc_fcp_op *aen_op; | |
2178 | int i; | |
2179 | ||
e126e821 | 2180 | cancel_work_sync(&ctrl->ctrl.async_event_work); |
61bff8ef | 2181 | aen_op = ctrl->aen_ops; |
38dabe21 | 2182 | for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { |
61bff8ef JS |
2183 | __nvme_fc_exit_request(ctrl, aen_op); |
2184 | ||
2185 | kfree(aen_op->fcp_req.private); | |
2186 | aen_op->fcp_req.private = NULL; | |
2187 | } | |
2188 | } | |
e399441d JS |
2189 | |
2190 | static inline void | |
2191 | __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl, | |
2192 | unsigned int qidx) | |
2193 | { | |
2194 | struct nvme_fc_queue *queue = &ctrl->queues[qidx]; | |
2195 | ||
2196 | hctx->driver_data = queue; | |
2197 | queue->hctx = hctx; | |
2198 | } | |
2199 | ||
2200 | static int | |
2201 | nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, | |
2202 | unsigned int hctx_idx) | |
2203 | { | |
2204 | struct nvme_fc_ctrl *ctrl = data; | |
2205 | ||
2206 | __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1); | |
2207 | ||
2208 | return 0; | |
2209 | } | |
2210 | ||
2211 | static int | |
2212 | nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, | |
2213 | unsigned int hctx_idx) | |
2214 | { | |
2215 | struct nvme_fc_ctrl *ctrl = data; | |
2216 | ||
2217 | __nvme_fc_init_hctx(hctx, ctrl, hctx_idx); | |
2218 | ||
2219 | return 0; | |
2220 | } | |
2221 | ||
2222 | static void | |
08e15075 | 2223 | nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx) |
e399441d JS |
2224 | { |
2225 | struct nvme_fc_queue *queue; | |
2226 | ||
2227 | queue = &ctrl->queues[idx]; | |
2228 | memset(queue, 0, sizeof(*queue)); | |
2229 | queue->ctrl = ctrl; | |
2230 | queue->qnum = idx; | |
67f471b6 | 2231 | atomic_set(&queue->csn, 0); |
e399441d JS |
2232 | queue->dev = ctrl->dev; |
2233 | ||
2234 | if (idx > 0) | |
2235 | queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; | |
2236 | else | |
2237 | queue->cmnd_capsule_len = sizeof(struct nvme_command); | |
2238 | ||
e399441d JS |
2239 | /* |
2240 | * Considered whether we should allocate buffers for all SQEs | |
2241 | * and CQEs and dma map them - mapping their respective entries | |
2242 | * into the request structures (kernel vm addr and dma address) | |
2243 | * thus the driver could use the buffers/mappings directly. | |
2244 | * It only makes sense if the LLDD would use them for its | |
2245 | * messaging api. It's very unlikely most adapter api's would use | |
2246 | * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload | |
2247 | * structures were used instead. | |
2248 | */ | |
2249 | } | |
2250 | ||
2251 | /* | |
2252 | * This routine terminates a queue at the transport level. | |
2253 | * The transport has already ensured that all outstanding ios on | |
2254 | * the queue have been terminated. | |
2255 | * The transport will send a Disconnect LS request to terminate | |
2256 | * the queue's connection. Termination of the admin queue will also | |
2257 | * terminate the association at the target. | |
2258 | */ | |
2259 | static void | |
2260 | nvme_fc_free_queue(struct nvme_fc_queue *queue) | |
2261 | { | |
2262 | if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags)) | |
2263 | return; | |
2264 | ||
9e0ed16a | 2265 | clear_bit(NVME_FC_Q_LIVE, &queue->flags); |
e399441d JS |
2266 | /* |
2267 | * Current implementation never disconnects a single queue. | |
2268 | * It always terminates a whole association. So there is never | |
2269 | * a disconnect(queue) LS sent to the target. | |
2270 | */ | |
2271 | ||
2272 | queue->connection_id = 0; | |
67f471b6 | 2273 | atomic_set(&queue->csn, 0); |
e399441d JS |
2274 | } |
2275 | ||
2276 | static void | |
2277 | __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl, | |
2278 | struct nvme_fc_queue *queue, unsigned int qidx) | |
2279 | { | |
2280 | if (ctrl->lport->ops->delete_queue) | |
2281 | ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx, | |
2282 | queue->lldd_handle); | |
2283 | queue->lldd_handle = NULL; | |
2284 | } | |
2285 | ||
e399441d JS |
2286 | static void |
2287 | nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl) | |
2288 | { | |
2289 | int i; | |
2290 | ||
d858e5f0 | 2291 | for (i = 1; i < ctrl->ctrl.queue_count; i++) |
e399441d JS |
2292 | nvme_fc_free_queue(&ctrl->queues[i]); |
2293 | } | |
2294 | ||
2295 | static int | |
2296 | __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl, | |
2297 | struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize) | |
2298 | { | |
2299 | int ret = 0; | |
2300 | ||
2301 | queue->lldd_handle = NULL; | |
2302 | if (ctrl->lport->ops->create_queue) | |
2303 | ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport, | |
2304 | qidx, qsize, &queue->lldd_handle); | |
2305 | ||
2306 | return ret; | |
2307 | } | |
2308 | ||
2309 | static void | |
2310 | nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl) | |
2311 | { | |
d858e5f0 | 2312 | struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1]; |
e399441d JS |
2313 | int i; |
2314 | ||
d858e5f0 | 2315 | for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--) |
e399441d JS |
2316 | __nvme_fc_delete_hw_queue(ctrl, queue, i); |
2317 | } | |
2318 | ||
2319 | static int | |
2320 | nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) | |
2321 | { | |
2322 | struct nvme_fc_queue *queue = &ctrl->queues[1]; | |
17a1ec08 | 2323 | int i, ret; |
e399441d | 2324 | |
d858e5f0 | 2325 | for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) { |
e399441d | 2326 | ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize); |
17a1ec08 JT |
2327 | if (ret) |
2328 | goto delete_queues; | |
e399441d JS |
2329 | } |
2330 | ||
2331 | return 0; | |
17a1ec08 JT |
2332 | |
2333 | delete_queues: | |
514a6dc9 | 2334 | for (; i > 0; i--) |
17a1ec08 JT |
2335 | __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i); |
2336 | return ret; | |
e399441d JS |
2337 | } |
2338 | ||
2339 | static int | |
2340 | nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) | |
2341 | { | |
2342 | int i, ret = 0; | |
2343 | ||
d858e5f0 | 2344 | for (i = 1; i < ctrl->ctrl.queue_count; i++) { |
e399441d JS |
2345 | ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize, |
2346 | (qsize / 5)); | |
2347 | if (ret) | |
2348 | break; | |
26c68227 | 2349 | ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false); |
e399441d JS |
2350 | if (ret) |
2351 | break; | |
9e0ed16a SG |
2352 | |
2353 | set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags); | |
e399441d JS |
2354 | } |
2355 | ||
2356 | return ret; | |
2357 | } | |
2358 | ||
2359 | static void | |
2360 | nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl) | |
2361 | { | |
2362 | int i; | |
2363 | ||
d858e5f0 | 2364 | for (i = 1; i < ctrl->ctrl.queue_count; i++) |
08e15075 | 2365 | nvme_fc_init_queue(ctrl, i); |
e399441d JS |
2366 | } |
2367 | ||
2368 | static void | |
2369 | nvme_fc_ctrl_free(struct kref *ref) | |
2370 | { | |
2371 | struct nvme_fc_ctrl *ctrl = | |
2372 | container_of(ref, struct nvme_fc_ctrl, ref); | |
2373 | unsigned long flags; | |
2374 | ||
61bff8ef JS |
2375 | if (ctrl->ctrl.tagset) { |
2376 | blk_cleanup_queue(ctrl->ctrl.connect_q); | |
2377 | blk_mq_free_tag_set(&ctrl->tag_set); | |
e399441d JS |
2378 | } |
2379 | ||
61bff8ef JS |
2380 | /* remove from rport list */ |
2381 | spin_lock_irqsave(&ctrl->rport->lock, flags); | |
2382 | list_del(&ctrl->ctrl_list); | |
2383 | spin_unlock_irqrestore(&ctrl->rport->lock, flags); | |
2384 | ||
f9c5af5f | 2385 | blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); |
61bff8ef | 2386 | blk_cleanup_queue(ctrl->ctrl.admin_q); |
e7832cb4 | 2387 | blk_cleanup_queue(ctrl->ctrl.fabrics_q); |
61bff8ef JS |
2388 | blk_mq_free_tag_set(&ctrl->admin_tag_set); |
2389 | ||
2390 | kfree(ctrl->queues); | |
2391 | ||
e399441d JS |
2392 | put_device(ctrl->dev); |
2393 | nvme_fc_rport_put(ctrl->rport); | |
2394 | ||
e399441d | 2395 | ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); |
de41447a EM |
2396 | if (ctrl->ctrl.opts) |
2397 | nvmf_free_options(ctrl->ctrl.opts); | |
e399441d JS |
2398 | kfree(ctrl); |
2399 | } | |
2400 | ||
2401 | static void | |
2402 | nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl) | |
2403 | { | |
2404 | kref_put(&ctrl->ref, nvme_fc_ctrl_free); | |
2405 | } | |
2406 | ||
2407 | static int | |
2408 | nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl) | |
2409 | { | |
2410 | return kref_get_unless_zero(&ctrl->ref); | |
2411 | } | |
2412 | ||
2413 | /* | |
2414 | * All accesses from nvme core layer done - can now free the | |
2415 | * controller. Called after last nvme_put_ctrl() call | |
2416 | */ | |
2417 | static void | |
61bff8ef | 2418 | nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl) |
e399441d JS |
2419 | { |
2420 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); | |
2421 | ||
2422 | WARN_ON(nctrl != &ctrl->ctrl); | |
2423 | ||
61bff8ef JS |
2424 | nvme_fc_ctrl_put(ctrl); |
2425 | } | |
e399441d | 2426 | |
95ced8a2 JS |
2427 | /* |
2428 | * This routine is used by the transport when it needs to find active | |
2429 | * io on a queue that is to be terminated. The transport uses | |
2430 | * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke | |
2431 | * this routine to kill them on a 1 by 1 basis. | |
2432 | * | |
2433 | * As FC allocates FC exchange for each io, the transport must contact | |
2434 | * the LLDD to terminate the exchange, thus releasing the FC exchange. | |
2435 | * After terminating the exchange the LLDD will call the transport's | |
2436 | * normal io done path for the request, but it will have an aborted | |
2437 | * status. The done path will return the io request back to the block | |
2438 | * layer with an error status. | |
2439 | */ | |
2440 | static bool | |
2441 | nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved) | |
2442 | { | |
2443 | struct nvme_ctrl *nctrl = data; | |
2444 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); | |
2445 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); | |
2446 | ||
3c7aafbc | 2447 | op->nreq.flags |= NVME_REQ_CANCELLED; |
95ced8a2 JS |
2448 | __nvme_fc_abort_op(ctrl, op); |
2449 | return true; | |
2450 | } | |
2451 | ||
2452 | /* | |
2453 | * This routine runs through all outstanding commands on the association | |
2454 | * and aborts them. This routine is typically be called by the | |
2455 | * delete_association routine. It is also called due to an error during | |
2456 | * reconnect. In that scenario, it is most likely a command that initializes | |
2457 | * the controller, including fabric Connect commands on io queues, that | |
2458 | * may have timed out or failed thus the io must be killed for the connect | |
2459 | * thread to see the error. | |
2460 | */ | |
2461 | static void | |
2462 | __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues) | |
2463 | { | |
2464 | /* | |
2465 | * If io queues are present, stop them and terminate all outstanding | |
2466 | * ios on them. As FC allocates FC exchange for each io, the | |
2467 | * transport must contact the LLDD to terminate the exchange, | |
2468 | * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr() | |
2469 | * to tell us what io's are busy and invoke a transport routine | |
2470 | * to kill them with the LLDD. After terminating the exchange | |
2471 | * the LLDD will call the transport's normal io done path, but it | |
2472 | * will have an aborted status. The done path will return the | |
2473 | * io requests back to the block layer as part of normal completions | |
2474 | * (but with error status). | |
2475 | */ | |
2476 | if (ctrl->ctrl.queue_count > 1) { | |
2477 | nvme_stop_queues(&ctrl->ctrl); | |
2478 | blk_mq_tagset_busy_iter(&ctrl->tag_set, | |
2479 | nvme_fc_terminate_exchange, &ctrl->ctrl); | |
2480 | blk_mq_tagset_wait_completed_request(&ctrl->tag_set); | |
2481 | if (start_queues) | |
2482 | nvme_start_queues(&ctrl->ctrl); | |
2483 | } | |
2484 | ||
2485 | /* | |
2486 | * Other transports, which don't have link-level contexts bound | |
2487 | * to sqe's, would try to gracefully shutdown the controller by | |
2488 | * writing the registers for shutdown and polling (call | |
2489 | * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially | |
2490 | * just aborted and we will wait on those contexts, and given | |
2491 | * there was no indication of how live the controlelr is on the | |
2492 | * link, don't send more io to create more contexts for the | |
2493 | * shutdown. Let the controller fail via keepalive failure if | |
2494 | * its still present. | |
2495 | */ | |
2496 | ||
2497 | /* | |
2498 | * clean up the admin queue. Same thing as above. | |
2499 | */ | |
2500 | blk_mq_quiesce_queue(ctrl->ctrl.admin_q); | |
2501 | blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, | |
2502 | nvme_fc_terminate_exchange, &ctrl->ctrl); | |
2503 | blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set); | |
2504 | } | |
9c2bb257 | 2505 | |
61bff8ef JS |
2506 | static void |
2507 | nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) | |
2508 | { | |
4cff280a | 2509 | /* |
95ced8a2 JS |
2510 | * if an error (io timeout, etc) while (re)connecting, the remote |
2511 | * port requested terminating of the association (disconnect_ls) | |
2512 | * or an error (timeout or abort) occurred on an io while creating | |
2513 | * the controller. Abort any ios on the association and let the | |
2514 | * create_association error path resolve things. | |
4cff280a JS |
2515 | */ |
2516 | if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) { | |
95ced8a2 JS |
2517 | __nvme_fc_abort_outstanding_ios(ctrl, true); |
2518 | set_bit(ASSOC_FAILED, &ctrl->flags); | |
4cff280a JS |
2519 | return; |
2520 | } | |
2521 | ||
2522 | /* Otherwise, only proceed if in LIVE state - e.g. on first error */ | |
69fa9646 JS |
2523 | if (ctrl->ctrl.state != NVME_CTRL_LIVE) |
2524 | return; | |
2525 | ||
61bff8ef | 2526 | dev_warn(ctrl->ctrl.device, |
514a6dc9 | 2527 | "NVME-FC{%d}: transport association event: %s\n", |
61bff8ef | 2528 | ctrl->cnum, errmsg); |
589ff775 | 2529 | dev_warn(ctrl->ctrl.device, |
61bff8ef | 2530 | "NVME-FC{%d}: resetting controller\n", ctrl->cnum); |
e399441d | 2531 | |
d86c4d8e | 2532 | nvme_reset_ctrl(&ctrl->ctrl); |
e399441d JS |
2533 | } |
2534 | ||
baee29ac | 2535 | static enum blk_eh_timer_return |
e399441d JS |
2536 | nvme_fc_timeout(struct request *rq, bool reserved) |
2537 | { | |
2538 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); | |
2539 | struct nvme_fc_ctrl *ctrl = op->ctrl; | |
52793d62 JS |
2540 | struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; |
2541 | struct nvme_command *sqe = &cmdiu->sqe; | |
e399441d JS |
2542 | |
2543 | /* | |
52793d62 JS |
2544 | * Attempt to abort the offending command. Command completion |
2545 | * will detect the aborted io and will fail the connection. | |
e399441d | 2546 | */ |
52793d62 JS |
2547 | dev_info(ctrl->ctrl.device, |
2548 | "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: " | |
2549 | "x%08x/x%08x\n", | |
2550 | ctrl->cnum, op->queue->qnum, sqe->common.opcode, | |
2551 | sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11); | |
2552 | if (__nvme_fc_abort_op(ctrl, op)) | |
2553 | nvme_fc_error_recovery(ctrl, "io timeout abort failed"); | |
e399441d | 2554 | |
134aedc9 JS |
2555 | /* |
2556 | * the io abort has been initiated. Have the reset timer | |
2557 | * restarted and the abort completion will complete the io | |
2558 | * shortly. Avoids a synchronous wait while the abort finishes. | |
2559 | */ | |
2560 | return BLK_EH_RESET_TIMER; | |
e399441d JS |
2561 | } |
2562 | ||
2563 | static int | |
2564 | nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, | |
2565 | struct nvme_fc_fcp_op *op) | |
2566 | { | |
2567 | struct nvmefc_fcp_req *freq = &op->fcp_req; | |
e399441d JS |
2568 | int ret; |
2569 | ||
2570 | freq->sg_cnt = 0; | |
2571 | ||
9f7d8ae2 | 2572 | if (!blk_rq_nr_phys_segments(rq)) |
e399441d JS |
2573 | return 0; |
2574 | ||
2575 | freq->sg_table.sgl = freq->first_sgl; | |
19e420bb | 2576 | ret = sg_alloc_table_chained(&freq->sg_table, |
4635873c | 2577 | blk_rq_nr_phys_segments(rq), freq->sg_table.sgl, |
b1ae1a23 | 2578 | NVME_INLINE_SG_CNT); |
e399441d JS |
2579 | if (ret) |
2580 | return -ENOMEM; | |
2581 | ||
2582 | op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); | |
19e420bb | 2583 | WARN_ON(op->nents > blk_rq_nr_phys_segments(rq)); |
e399441d | 2584 | freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, |
f15872c5 | 2585 | op->nents, rq_dma_dir(rq)); |
e399441d | 2586 | if (unlikely(freq->sg_cnt <= 0)) { |
b1ae1a23 | 2587 | sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); |
e399441d JS |
2588 | freq->sg_cnt = 0; |
2589 | return -EFAULT; | |
2590 | } | |
2591 | ||
2592 | /* | |
2593 | * TODO: blk_integrity_rq(rq) for DIF | |
2594 | */ | |
2595 | return 0; | |
2596 | } | |
2597 | ||
2598 | static void | |
2599 | nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, | |
2600 | struct nvme_fc_fcp_op *op) | |
2601 | { | |
2602 | struct nvmefc_fcp_req *freq = &op->fcp_req; | |
2603 | ||
2604 | if (!freq->sg_cnt) | |
2605 | return; | |
2606 | ||
2607 | fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, | |
f15872c5 | 2608 | rq_dma_dir(rq)); |
e399441d | 2609 | |
b1ae1a23 | 2610 | sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); |
e399441d JS |
2611 | |
2612 | freq->sg_cnt = 0; | |
2613 | } | |
2614 | ||
2615 | /* | |
2616 | * In FC, the queue is a logical thing. At transport connect, the target | |
2617 | * creates its "queue" and returns a handle that is to be given to the | |
2618 | * target whenever it posts something to the corresponding SQ. When an | |
2619 | * SQE is sent on a SQ, FC effectively considers the SQE, or rather the | |
2620 | * command contained within the SQE, an io, and assigns a FC exchange | |
2621 | * to it. The SQE and the associated SQ handle are sent in the initial | |
2622 | * CMD IU sents on the exchange. All transfers relative to the io occur | |
2623 | * as part of the exchange. The CQE is the last thing for the io, | |
2624 | * which is transferred (explicitly or implicitly) with the RSP IU | |
2625 | * sent on the exchange. After the CQE is received, the FC exchange is | |
2626 | * terminaed and the Exchange may be used on a different io. | |
2627 | * | |
2628 | * The transport to LLDD api has the transport making a request for a | |
2629 | * new fcp io request to the LLDD. The LLDD then allocates a FC exchange | |
2630 | * resource and transfers the command. The LLDD will then process all | |
2631 | * steps to complete the io. Upon completion, the transport done routine | |
2632 | * is called. | |
2633 | * | |
2634 | * So - while the operation is outstanding to the LLDD, there is a link | |
2635 | * level FC exchange resource that is also outstanding. This must be | |
2636 | * considered in all cleanup operations. | |
2637 | */ | |
fc17b653 | 2638 | static blk_status_t |
e399441d JS |
2639 | nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, |
2640 | struct nvme_fc_fcp_op *op, u32 data_len, | |
2641 | enum nvmefc_fcp_datadir io_dir) | |
2642 | { | |
2643 | struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; | |
2644 | struct nvme_command *sqe = &cmdiu->sqe; | |
b12740d3 | 2645 | int ret, opstate; |
e399441d | 2646 | |
61bff8ef JS |
2647 | /* |
2648 | * before attempting to send the io, check to see if we believe | |
2649 | * the target device is present | |
2650 | */ | |
2651 | if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) | |
86ff7c2a | 2652 | return BLK_STS_RESOURCE; |
61bff8ef | 2653 | |
e399441d | 2654 | if (!nvme_fc_ctrl_get(ctrl)) |
fc17b653 | 2655 | return BLK_STS_IOERR; |
e399441d JS |
2656 | |
2657 | /* format the FC-NVME CMD IU and fcp_req */ | |
2658 | cmdiu->connection_id = cpu_to_be64(queue->connection_id); | |
e399441d JS |
2659 | cmdiu->data_len = cpu_to_be32(data_len); |
2660 | switch (io_dir) { | |
2661 | case NVMEFC_FCP_WRITE: | |
2662 | cmdiu->flags = FCNVME_CMD_FLAGS_WRITE; | |
2663 | break; | |
2664 | case NVMEFC_FCP_READ: | |
2665 | cmdiu->flags = FCNVME_CMD_FLAGS_READ; | |
2666 | break; | |
2667 | case NVMEFC_FCP_NODATA: | |
2668 | cmdiu->flags = 0; | |
2669 | break; | |
2670 | } | |
2671 | op->fcp_req.payload_length = data_len; | |
2672 | op->fcp_req.io_dir = io_dir; | |
2673 | op->fcp_req.transferred_length = 0; | |
2674 | op->fcp_req.rcv_rsplen = 0; | |
62eeacb0 | 2675 | op->fcp_req.status = NVME_SC_SUCCESS; |
e399441d JS |
2676 | op->fcp_req.sqid = cpu_to_le16(queue->qnum); |
2677 | ||
2678 | /* | |
2679 | * validate per fabric rules, set fields mandated by fabric spec | |
2680 | * as well as those by FC-NVME spec. | |
2681 | */ | |
2682 | WARN_ON_ONCE(sqe->common.metadata); | |
e399441d JS |
2683 | sqe->common.flags |= NVME_CMD_SGL_METABUF; |
2684 | ||
2685 | /* | |
d9d34c0b JS |
2686 | * format SQE DPTR field per FC-NVME rules: |
2687 | * type=0x5 Transport SGL Data Block Descriptor | |
2688 | * subtype=0xA Transport-specific value | |
2689 | * address=0 | |
2690 | * length=length of the data series | |
e399441d | 2691 | */ |
d9d34c0b JS |
2692 | sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | |
2693 | NVME_SGL_FMT_TRANSPORT_A; | |
e399441d JS |
2694 | sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); |
2695 | sqe->rw.dptr.sgl.addr = 0; | |
2696 | ||
78a7ac26 | 2697 | if (!(op->flags & FCOP_FLAGS_AEN)) { |
e399441d JS |
2698 | ret = nvme_fc_map_data(ctrl, op->rq, op); |
2699 | if (ret < 0) { | |
e399441d JS |
2700 | nvme_cleanup_cmd(op->rq); |
2701 | nvme_fc_ctrl_put(ctrl); | |
fc17b653 CH |
2702 | if (ret == -ENOMEM || ret == -EAGAIN) |
2703 | return BLK_STS_RESOURCE; | |
2704 | return BLK_STS_IOERR; | |
e399441d JS |
2705 | } |
2706 | } | |
2707 | ||
2708 | fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma, | |
2709 | sizeof(op->cmd_iu), DMA_TO_DEVICE); | |
2710 | ||
2711 | atomic_set(&op->state, FCPOP_STATE_ACTIVE); | |
2712 | ||
78a7ac26 | 2713 | if (!(op->flags & FCOP_FLAGS_AEN)) |
e399441d JS |
2714 | blk_mq_start_request(op->rq); |
2715 | ||
67f471b6 | 2716 | cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn)); |
e399441d JS |
2717 | ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, |
2718 | &ctrl->rport->remoteport, | |
2719 | queue->lldd_handle, &op->fcp_req); | |
2720 | ||
2721 | if (ret) { | |
67f471b6 JS |
2722 | /* |
2723 | * If the lld fails to send the command is there an issue with | |
2724 | * the csn value? If the command that fails is the Connect, | |
2725 | * no - as the connection won't be live. If it is a command | |
2726 | * post-connect, it's possible a gap in csn may be created. | |
2727 | * Does this matter? As Linux initiators don't send fused | |
2728 | * commands, no. The gap would exist, but as there's nothing | |
2729 | * that depends on csn order to be delivered on the target | |
2730 | * side, it shouldn't hurt. It would be difficult for a | |
2731 | * target to even detect the csn gap as it has no idea when the | |
2732 | * cmd with the csn was supposed to arrive. | |
2733 | */ | |
b12740d3 JS |
2734 | opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); |
2735 | __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); | |
2736 | ||
c9c12e51 | 2737 | if (!(op->flags & FCOP_FLAGS_AEN)) { |
e399441d | 2738 | nvme_fc_unmap_data(ctrl, op->rq, op); |
c9c12e51 DW |
2739 | nvme_cleanup_cmd(op->rq); |
2740 | } | |
e399441d JS |
2741 | |
2742 | nvme_fc_ctrl_put(ctrl); | |
2743 | ||
8b25f351 JS |
2744 | if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE && |
2745 | ret != -EBUSY) | |
fc17b653 | 2746 | return BLK_STS_IOERR; |
e399441d | 2747 | |
86ff7c2a | 2748 | return BLK_STS_RESOURCE; |
e399441d JS |
2749 | } |
2750 | ||
fc17b653 | 2751 | return BLK_STS_OK; |
e399441d JS |
2752 | } |
2753 | ||
fc17b653 | 2754 | static blk_status_t |
e399441d JS |
2755 | nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, |
2756 | const struct blk_mq_queue_data *bd) | |
2757 | { | |
2758 | struct nvme_ns *ns = hctx->queue->queuedata; | |
2759 | struct nvme_fc_queue *queue = hctx->driver_data; | |
2760 | struct nvme_fc_ctrl *ctrl = queue->ctrl; | |
2761 | struct request *rq = bd->rq; | |
2762 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); | |
e399441d | 2763 | enum nvmefc_fcp_datadir io_dir; |
3bc32bb1 | 2764 | bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags); |
e399441d | 2765 | u32 data_len; |
fc17b653 | 2766 | blk_status_t ret; |
e399441d | 2767 | |
3bc32bb1 CH |
2768 | if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE || |
2769 | !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) | |
6cdefc6e | 2770 | return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); |
9e0ed16a | 2771 | |
f4b9e6c9 | 2772 | ret = nvme_setup_cmd(ns, rq); |
e399441d JS |
2773 | if (ret) |
2774 | return ret; | |
2775 | ||
9f7d8ae2 JS |
2776 | /* |
2777 | * nvme core doesn't quite treat the rq opaquely. Commands such | |
2778 | * as WRITE ZEROES will return a non-zero rq payload_bytes yet | |
2779 | * there is no actual payload to be transferred. | |
2780 | * To get it right, key data transmission on there being 1 or | |
2781 | * more physical segments in the sg list. If there is no | |
2782 | * physical segments, there is no payload. | |
2783 | */ | |
2784 | if (blk_rq_nr_phys_segments(rq)) { | |
2785 | data_len = blk_rq_payload_bytes(rq); | |
e399441d JS |
2786 | io_dir = ((rq_data_dir(rq) == WRITE) ? |
2787 | NVMEFC_FCP_WRITE : NVMEFC_FCP_READ); | |
9f7d8ae2 JS |
2788 | } else { |
2789 | data_len = 0; | |
e399441d | 2790 | io_dir = NVMEFC_FCP_NODATA; |
9f7d8ae2 JS |
2791 | } |
2792 | ||
e399441d JS |
2793 | |
2794 | return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir); | |
2795 | } | |
2796 | ||
e399441d | 2797 | static void |
ad22c355 | 2798 | nvme_fc_submit_async_event(struct nvme_ctrl *arg) |
e399441d JS |
2799 | { |
2800 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg); | |
2801 | struct nvme_fc_fcp_op *aen_op; | |
fc17b653 | 2802 | blk_status_t ret; |
e399441d | 2803 | |
eb4ee8f1 | 2804 | if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) |
61bff8ef JS |
2805 | return; |
2806 | ||
ad22c355 | 2807 | aen_op = &ctrl->aen_ops[0]; |
e399441d JS |
2808 | |
2809 | ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0, | |
2810 | NVMEFC_FCP_NODATA); | |
2811 | if (ret) | |
2812 | dev_err(ctrl->ctrl.device, | |
ad22c355 | 2813 | "failed async event work\n"); |
e399441d JS |
2814 | } |
2815 | ||
2816 | static void | |
c3aedd22 | 2817 | nvme_fc_complete_rq(struct request *rq) |
e399441d JS |
2818 | { |
2819 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); | |
2820 | struct nvme_fc_ctrl *ctrl = op->ctrl; | |
e399441d | 2821 | |
78a7ac26 | 2822 | atomic_set(&op->state, FCPOP_STATE_IDLE); |
52793d62 | 2823 | op->flags &= ~FCOP_FLAGS_TERMIO; |
e399441d | 2824 | |
e399441d | 2825 | nvme_fc_unmap_data(ctrl, rq, op); |
77f02a7a | 2826 | nvme_complete_rq(rq); |
e399441d | 2827 | nvme_fc_ctrl_put(ctrl); |
78a7ac26 JS |
2828 | } |
2829 | ||
78a7ac26 | 2830 | |
61bff8ef JS |
2831 | static const struct blk_mq_ops nvme_fc_mq_ops = { |
2832 | .queue_rq = nvme_fc_queue_rq, | |
2833 | .complete = nvme_fc_complete_rq, | |
2834 | .init_request = nvme_fc_init_request, | |
2835 | .exit_request = nvme_fc_exit_request, | |
61bff8ef | 2836 | .init_hctx = nvme_fc_init_hctx, |
61bff8ef JS |
2837 | .timeout = nvme_fc_timeout, |
2838 | }; | |
e399441d | 2839 | |
61bff8ef JS |
2840 | static int |
2841 | nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) | |
e399441d | 2842 | { |
61bff8ef | 2843 | struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; |
7314183d | 2844 | unsigned int nr_io_queues; |
61bff8ef | 2845 | int ret; |
e399441d | 2846 | |
7314183d SG |
2847 | nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), |
2848 | ctrl->lport->ops->max_hw_queues); | |
2849 | ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); | |
61bff8ef JS |
2850 | if (ret) { |
2851 | dev_info(ctrl->ctrl.device, | |
2852 | "set_queue_count failed: %d\n", ret); | |
2853 | return ret; | |
2854 | } | |
e399441d | 2855 | |
7314183d SG |
2856 | ctrl->ctrl.queue_count = nr_io_queues + 1; |
2857 | if (!nr_io_queues) | |
61bff8ef | 2858 | return 0; |
e399441d | 2859 | |
61bff8ef | 2860 | nvme_fc_init_io_queues(ctrl); |
e399441d | 2861 | |
61bff8ef JS |
2862 | memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); |
2863 | ctrl->tag_set.ops = &nvme_fc_mq_ops; | |
2864 | ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; | |
2865 | ctrl->tag_set.reserved_tags = 1; /* fabric connect */ | |
103e515e | 2866 | ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; |
61bff8ef | 2867 | ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; |
d3d0bc78 BVA |
2868 | ctrl->tag_set.cmd_size = |
2869 | struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv, | |
2870 | ctrl->lport->ops->fcprqst_priv_sz); | |
61bff8ef | 2871 | ctrl->tag_set.driver_data = ctrl; |
d858e5f0 | 2872 | ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1; |
61bff8ef | 2873 | ctrl->tag_set.timeout = NVME_IO_TIMEOUT; |
e399441d | 2874 | |
61bff8ef JS |
2875 | ret = blk_mq_alloc_tag_set(&ctrl->tag_set); |
2876 | if (ret) | |
2877 | return ret; | |
e399441d | 2878 | |
61bff8ef | 2879 | ctrl->ctrl.tagset = &ctrl->tag_set; |
e399441d | 2880 | |
61bff8ef JS |
2881 | ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); |
2882 | if (IS_ERR(ctrl->ctrl.connect_q)) { | |
2883 | ret = PTR_ERR(ctrl->ctrl.connect_q); | |
2884 | goto out_free_tag_set; | |
2885 | } | |
e399441d | 2886 | |
d157e534 | 2887 | ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); |
e399441d | 2888 | if (ret) |
61bff8ef | 2889 | goto out_cleanup_blk_queue; |
e399441d | 2890 | |
d157e534 | 2891 | ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); |
61bff8ef JS |
2892 | if (ret) |
2893 | goto out_delete_hw_queues; | |
e399441d | 2894 | |
4c984154 JS |
2895 | ctrl->ioq_live = true; |
2896 | ||
e399441d | 2897 | return 0; |
e399441d | 2898 | |
61bff8ef JS |
2899 | out_delete_hw_queues: |
2900 | nvme_fc_delete_hw_io_queues(ctrl); | |
2901 | out_cleanup_blk_queue: | |
61bff8ef JS |
2902 | blk_cleanup_queue(ctrl->ctrl.connect_q); |
2903 | out_free_tag_set: | |
2904 | blk_mq_free_tag_set(&ctrl->tag_set); | |
2905 | nvme_fc_free_io_queues(ctrl); | |
e399441d | 2906 | |
61bff8ef JS |
2907 | /* force put free routine to ignore io queues */ |
2908 | ctrl->ctrl.tagset = NULL; | |
2909 | ||
2910 | return ret; | |
2911 | } | |
e399441d JS |
2912 | |
2913 | static int | |
3e493c00 | 2914 | nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl) |
e399441d JS |
2915 | { |
2916 | struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; | |
834d3710 | 2917 | u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1; |
7314183d | 2918 | unsigned int nr_io_queues; |
e399441d JS |
2919 | int ret; |
2920 | ||
7314183d SG |
2921 | nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), |
2922 | ctrl->lport->ops->max_hw_queues); | |
2923 | ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); | |
e399441d JS |
2924 | if (ret) { |
2925 | dev_info(ctrl->ctrl.device, | |
2926 | "set_queue_count failed: %d\n", ret); | |
2927 | return ret; | |
2928 | } | |
2929 | ||
834d3710 JS |
2930 | if (!nr_io_queues && prior_ioq_cnt) { |
2931 | dev_info(ctrl->ctrl.device, | |
2932 | "Fail Reconnect: At least 1 io queue " | |
2933 | "required (was %d)\n", prior_ioq_cnt); | |
2934 | return -ENOSPC; | |
2935 | } | |
2936 | ||
7314183d | 2937 | ctrl->ctrl.queue_count = nr_io_queues + 1; |
61bff8ef | 2938 | /* check for io queues existing */ |
d858e5f0 | 2939 | if (ctrl->ctrl.queue_count == 1) |
e399441d JS |
2940 | return 0; |
2941 | ||
d157e534 | 2942 | ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); |
e399441d | 2943 | if (ret) |
61bff8ef | 2944 | goto out_free_io_queues; |
e399441d | 2945 | |
d157e534 | 2946 | ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); |
e399441d JS |
2947 | if (ret) |
2948 | goto out_delete_hw_queues; | |
2949 | ||
88e837ed | 2950 | if (prior_ioq_cnt != nr_io_queues) { |
834d3710 JS |
2951 | dev_info(ctrl->ctrl.device, |
2952 | "reconnect: revising io queue count from %d to %d\n", | |
2953 | prior_ioq_cnt, nr_io_queues); | |
88e837ed JS |
2954 | nvme_wait_freeze(&ctrl->ctrl); |
2955 | blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues); | |
2956 | nvme_unfreeze(&ctrl->ctrl); | |
2957 | } | |
cda5fd1a | 2958 | |
e399441d JS |
2959 | return 0; |
2960 | ||
2961 | out_delete_hw_queues: | |
2962 | nvme_fc_delete_hw_io_queues(ctrl); | |
61bff8ef | 2963 | out_free_io_queues: |
e399441d | 2964 | nvme_fc_free_io_queues(ctrl); |
61bff8ef JS |
2965 | return ret; |
2966 | } | |
e399441d | 2967 | |
158bfb88 JS |
2968 | static void |
2969 | nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport) | |
2970 | { | |
2971 | struct nvme_fc_lport *lport = rport->lport; | |
2972 | ||
2973 | atomic_inc(&lport->act_rport_cnt); | |
2974 | } | |
2975 | ||
2976 | static void | |
2977 | nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport) | |
2978 | { | |
2979 | struct nvme_fc_lport *lport = rport->lport; | |
2980 | u32 cnt; | |
2981 | ||
2982 | cnt = atomic_dec_return(&lport->act_rport_cnt); | |
2983 | if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED) | |
2984 | lport->ops->localport_delete(&lport->localport); | |
2985 | } | |
2986 | ||
2987 | static int | |
2988 | nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl) | |
2989 | { | |
2990 | struct nvme_fc_rport *rport = ctrl->rport; | |
2991 | u32 cnt; | |
2992 | ||
eb4ee8f1 | 2993 | if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags)) |
158bfb88 JS |
2994 | return 1; |
2995 | ||
158bfb88 JS |
2996 | cnt = atomic_inc_return(&rport->act_ctrl_cnt); |
2997 | if (cnt == 1) | |
2998 | nvme_fc_rport_active_on_lport(rport); | |
2999 | ||
3000 | return 0; | |
3001 | } | |
3002 | ||
3003 | static int | |
3004 | nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl) | |
3005 | { | |
3006 | struct nvme_fc_rport *rport = ctrl->rport; | |
3007 | struct nvme_fc_lport *lport = rport->lport; | |
3008 | u32 cnt; | |
3009 | ||
eb4ee8f1 | 3010 | /* clearing of ctrl->flags ASSOC_ACTIVE bit is in association delete */ |
158bfb88 JS |
3011 | |
3012 | cnt = atomic_dec_return(&rport->act_ctrl_cnt); | |
3013 | if (cnt == 0) { | |
3014 | if (rport->remoteport.port_state == FC_OBJSTATE_DELETED) | |
3015 | lport->ops->remoteport_delete(&rport->remoteport); | |
3016 | nvme_fc_rport_inactive_on_lport(rport); | |
3017 | } | |
3018 | ||
3019 | return 0; | |
3020 | } | |
3021 | ||
61bff8ef JS |
3022 | /* |
3023 | * This routine restarts the controller on the host side, and | |
3024 | * on the link side, recreates the controller association. | |
3025 | */ | |
3026 | static int | |
3027 | nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) | |
3028 | { | |
3029 | struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; | |
14fd1e98 JS |
3030 | struct nvmefc_ls_rcv_op *disls = NULL; |
3031 | unsigned long flags; | |
61bff8ef JS |
3032 | int ret; |
3033 | bool changed; | |
3034 | ||
fdf9dfa8 | 3035 | ++ctrl->ctrl.nr_reconnects; |
61bff8ef | 3036 | |
96e24801 JS |
3037 | if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) |
3038 | return -ENODEV; | |
3039 | ||
158bfb88 JS |
3040 | if (nvme_fc_ctlr_active_on_rport(ctrl)) |
3041 | return -ENOTUNIQ; | |
3042 | ||
4bea364f JS |
3043 | dev_info(ctrl->ctrl.device, |
3044 | "NVME-FC{%d}: create association : host wwpn 0x%016llx " | |
3045 | " rport wwpn 0x%016llx: NQN \"%s\"\n", | |
3046 | ctrl->cnum, ctrl->lport->localport.port_name, | |
3047 | ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn); | |
3048 | ||
caf1cbe3 JS |
3049 | clear_bit(ASSOC_FAILED, &ctrl->flags); |
3050 | ||
61bff8ef JS |
3051 | /* |
3052 | * Create the admin queue | |
3053 | */ | |
3054 | ||
61bff8ef | 3055 | ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, |
d157e534 | 3056 | NVME_AQ_DEPTH); |
61bff8ef JS |
3057 | if (ret) |
3058 | goto out_free_queue; | |
3059 | ||
3060 | ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], | |
d157e534 | 3061 | NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4)); |
61bff8ef JS |
3062 | if (ret) |
3063 | goto out_delete_hw_queue; | |
3064 | ||
61bff8ef JS |
3065 | ret = nvmf_connect_admin_queue(&ctrl->ctrl); |
3066 | if (ret) | |
3067 | goto out_disconnect_admin_queue; | |
3068 | ||
9e0ed16a SG |
3069 | set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); |
3070 | ||
61bff8ef JS |
3071 | /* |
3072 | * Check controller capabilities | |
3073 | * | |
3074 | * todo:- add code to check if ctrl attributes changed from | |
3075 | * prior connection values | |
3076 | */ | |
3077 | ||
c0f2f45b | 3078 | ret = nvme_enable_ctrl(&ctrl->ctrl); |
caf1cbe3 | 3079 | if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) |
61bff8ef JS |
3080 | goto out_disconnect_admin_queue; |
3081 | ||
23748076 JS |
3082 | ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments; |
3083 | ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments << | |
3084 | (ilog2(SZ_4K) - 9); | |
61bff8ef | 3085 | |
e7832cb4 SG |
3086 | blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); |
3087 | ||
f21c4769 | 3088 | ret = nvme_init_ctrl_finish(&ctrl->ctrl); |
caf1cbe3 | 3089 | if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) |
61bff8ef JS |
3090 | goto out_disconnect_admin_queue; |
3091 | ||
3092 | /* sanity checks */ | |
3093 | ||
3094 | /* FC-NVME does not have other data in the capsule */ | |
3095 | if (ctrl->ctrl.icdoff) { | |
3096 | dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", | |
3097 | ctrl->ctrl.icdoff); | |
3098 | goto out_disconnect_admin_queue; | |
3099 | } | |
3100 | ||
61bff8ef JS |
3101 | /* FC-NVME supports normal SGL Data Block Descriptors */ |
3102 | ||
3103 | if (opts->queue_size > ctrl->ctrl.maxcmd) { | |
3104 | /* warn if maxcmd is lower than queue_size */ | |
3105 | dev_warn(ctrl->ctrl.device, | |
3106 | "queue_size %zu > ctrl maxcmd %u, reducing " | |
7db39484 | 3107 | "to maxcmd\n", |
61bff8ef JS |
3108 | opts->queue_size, ctrl->ctrl.maxcmd); |
3109 | opts->queue_size = ctrl->ctrl.maxcmd; | |
3110 | } | |
3111 | ||
d157e534 JS |
3112 | if (opts->queue_size > ctrl->ctrl.sqsize + 1) { |
3113 | /* warn if sqsize is lower than queue_size */ | |
3114 | dev_warn(ctrl->ctrl.device, | |
7db39484 JS |
3115 | "queue_size %zu > ctrl sqsize %u, reducing " |
3116 | "to sqsize\n", | |
d157e534 JS |
3117 | opts->queue_size, ctrl->ctrl.sqsize + 1); |
3118 | opts->queue_size = ctrl->ctrl.sqsize + 1; | |
3119 | } | |
3120 | ||
61bff8ef JS |
3121 | ret = nvme_fc_init_aen_ops(ctrl); |
3122 | if (ret) | |
3123 | goto out_term_aen_ops; | |
3124 | ||
3125 | /* | |
3126 | * Create the io queues | |
3127 | */ | |
3128 | ||
d858e5f0 | 3129 | if (ctrl->ctrl.queue_count > 1) { |
4c984154 | 3130 | if (!ctrl->ioq_live) |
61bff8ef JS |
3131 | ret = nvme_fc_create_io_queues(ctrl); |
3132 | else | |
3e493c00 | 3133 | ret = nvme_fc_recreate_io_queues(ctrl); |
61bff8ef | 3134 | } |
caf1cbe3 JS |
3135 | if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) |
3136 | goto out_term_aen_ops; | |
61bff8ef JS |
3137 | |
3138 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); | |
61bff8ef | 3139 | |
fdf9dfa8 | 3140 | ctrl->ctrl.nr_reconnects = 0; |
61bff8ef | 3141 | |
44c6ec77 JS |
3142 | if (changed) |
3143 | nvme_start_ctrl(&ctrl->ctrl); | |
61bff8ef JS |
3144 | |
3145 | return 0; /* Success */ | |
3146 | ||
3147 | out_term_aen_ops: | |
3148 | nvme_fc_term_aen_ops(ctrl); | |
61bff8ef JS |
3149 | out_disconnect_admin_queue: |
3150 | /* send a Disconnect(association) LS to fc-nvme target */ | |
3151 | nvme_fc_xmt_disconnect_assoc(ctrl); | |
14fd1e98 | 3152 | spin_lock_irqsave(&ctrl->lock, flags); |
bcde5f0f | 3153 | ctrl->association_id = 0; |
14fd1e98 JS |
3154 | disls = ctrl->rcv_disconn; |
3155 | ctrl->rcv_disconn = NULL; | |
3156 | spin_unlock_irqrestore(&ctrl->lock, flags); | |
3157 | if (disls) | |
3158 | nvme_fc_xmt_ls_rsp(disls); | |
61bff8ef JS |
3159 | out_delete_hw_queue: |
3160 | __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); | |
3161 | out_free_queue: | |
3162 | nvme_fc_free_queue(&ctrl->queues[0]); | |
eb4ee8f1 | 3163 | clear_bit(ASSOC_ACTIVE, &ctrl->flags); |
158bfb88 | 3164 | nvme_fc_ctlr_inactive_on_rport(ctrl); |
e399441d JS |
3165 | |
3166 | return ret; | |
3167 | } | |
3168 | ||
52793d62 | 3169 | |
52793d62 JS |
3170 | /* |
3171 | * This routine stops operation of the controller on the host side. | |
3172 | * On the host os stack side: Admin and IO queues are stopped, | |
3173 | * outstanding ios on them terminated via FC ABTS. | |
3174 | * On the link side: the association is terminated. | |
3175 | */ | |
3176 | static void | |
3177 | nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) | |
3178 | { | |
3179 | struct nvmefc_ls_rcv_op *disls = NULL; | |
3180 | unsigned long flags; | |
3181 | ||
3182 | if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags)) | |
3183 | return; | |
3184 | ||
3185 | spin_lock_irqsave(&ctrl->lock, flags); | |
3186 | set_bit(FCCTRL_TERMIO, &ctrl->flags); | |
3187 | ctrl->iocnt = 0; | |
3188 | spin_unlock_irqrestore(&ctrl->lock, flags); | |
3189 | ||
3190 | __nvme_fc_abort_outstanding_ios(ctrl, false); | |
61bff8ef JS |
3191 | |
3192 | /* kill the aens as they are a separate path */ | |
3193 | nvme_fc_abort_aen_ops(ctrl); | |
3194 | ||
3195 | /* wait for all io that had to be aborted */ | |
8a82dbf1 | 3196 | spin_lock_irq(&ctrl->lock); |
36715cf4 | 3197 | wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock); |
eb4ee8f1 | 3198 | clear_bit(FCCTRL_TERMIO, &ctrl->flags); |
8a82dbf1 | 3199 | spin_unlock_irq(&ctrl->lock); |
61bff8ef JS |
3200 | |
3201 | nvme_fc_term_aen_ops(ctrl); | |
3202 | ||
3203 | /* | |
3204 | * send a Disconnect(association) LS to fc-nvme target | |
3205 | * Note: could have been sent at top of process, but | |
3206 | * cleaner on link traffic if after the aborts complete. | |
3207 | * Note: if association doesn't exist, association_id will be 0 | |
3208 | */ | |
3209 | if (ctrl->association_id) | |
3210 | nvme_fc_xmt_disconnect_assoc(ctrl); | |
3211 | ||
14fd1e98 | 3212 | spin_lock_irqsave(&ctrl->lock, flags); |
bcde5f0f | 3213 | ctrl->association_id = 0; |
14fd1e98 JS |
3214 | disls = ctrl->rcv_disconn; |
3215 | ctrl->rcv_disconn = NULL; | |
3216 | spin_unlock_irqrestore(&ctrl->lock, flags); | |
3217 | if (disls) | |
3218 | /* | |
3219 | * if a Disconnect Request was waiting for a response, send | |
3220 | * now that all ABTS's have been issued (and are complete). | |
3221 | */ | |
3222 | nvme_fc_xmt_ls_rsp(disls); | |
bcde5f0f | 3223 | |
61bff8ef JS |
3224 | if (ctrl->ctrl.tagset) { |
3225 | nvme_fc_delete_hw_io_queues(ctrl); | |
3226 | nvme_fc_free_io_queues(ctrl); | |
3227 | } | |
3228 | ||
3229 | __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); | |
3230 | nvme_fc_free_queue(&ctrl->queues[0]); | |
158bfb88 | 3231 | |
d625d05e JS |
3232 | /* re-enable the admin_q so anything new can fast fail */ |
3233 | blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); | |
3234 | ||
02d62a8b JS |
3235 | /* resume the io queues so that things will fast fail */ |
3236 | nvme_start_queues(&ctrl->ctrl); | |
3237 | ||
158bfb88 | 3238 | nvme_fc_ctlr_inactive_on_rport(ctrl); |
61bff8ef JS |
3239 | } |
3240 | ||
3241 | static void | |
c5017e85 | 3242 | nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl) |
61bff8ef | 3243 | { |
c5017e85 | 3244 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); |
61bff8ef | 3245 | |
19fce047 | 3246 | cancel_work_sync(&ctrl->ioerr_work); |
61bff8ef | 3247 | cancel_delayed_work_sync(&ctrl->connect_work); |
61bff8ef JS |
3248 | /* |
3249 | * kill the association on the link side. this will block | |
3250 | * waiting for io to terminate | |
3251 | */ | |
3252 | nvme_fc_delete_association(ctrl); | |
61bff8ef JS |
3253 | } |
3254 | ||
5bbecdbc JS |
3255 | static void |
3256 | nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) | |
3257 | { | |
2b632970 JS |
3258 | struct nvme_fc_rport *rport = ctrl->rport; |
3259 | struct nvme_fc_remote_port *portptr = &rport->remoteport; | |
3260 | unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ; | |
3261 | bool recon = true; | |
5bbecdbc | 3262 | |
ad6a0a52 | 3263 | if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) |
5bbecdbc | 3264 | return; |
5bbecdbc | 3265 | |
2b632970 | 3266 | if (portptr->port_state == FC_OBJSTATE_ONLINE) |
5bbecdbc | 3267 | dev_info(ctrl->ctrl.device, |
2b632970 JS |
3268 | "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", |
3269 | ctrl->cnum, status); | |
3270 | else if (time_after_eq(jiffies, rport->dev_loss_end)) | |
3271 | recon = false; | |
5bbecdbc | 3272 | |
2b632970 JS |
3273 | if (recon && nvmf_should_reconnect(&ctrl->ctrl)) { |
3274 | if (portptr->port_state == FC_OBJSTATE_ONLINE) | |
3275 | dev_info(ctrl->ctrl.device, | |
3276 | "NVME-FC{%d}: Reconnect attempt in %ld " | |
3277 | "seconds\n", | |
3278 | ctrl->cnum, recon_delay / HZ); | |
3279 | else if (time_after(jiffies + recon_delay, rport->dev_loss_end)) | |
3280 | recon_delay = rport->dev_loss_end - jiffies; | |
96e24801 | 3281 | |
2b632970 | 3282 | queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay); |
5bbecdbc | 3283 | } else { |
2b632970 JS |
3284 | if (portptr->port_state == FC_OBJSTATE_ONLINE) |
3285 | dev_warn(ctrl->ctrl.device, | |
5bbecdbc | 3286 | "NVME-FC{%d}: Max reconnect attempts (%d) " |
77d0612d | 3287 | "reached.\n", |
fdf9dfa8 | 3288 | ctrl->cnum, ctrl->ctrl.nr_reconnects); |
2b632970 JS |
3289 | else |
3290 | dev_warn(ctrl->ctrl.device, | |
3291 | "NVME-FC{%d}: dev_loss_tmo (%d) expired " | |
77d0612d | 3292 | "while waiting for remoteport connectivity.\n", |
614fc1c0 MG |
3293 | ctrl->cnum, min_t(int, portptr->dev_loss_tmo, |
3294 | (ctrl->ctrl.opts->max_reconnects * | |
3295 | ctrl->ctrl.opts->reconnect_delay))); | |
c5017e85 | 3296 | WARN_ON(nvme_delete_ctrl(&ctrl->ctrl)); |
5bbecdbc JS |
3297 | } |
3298 | } | |
3299 | ||
61bff8ef | 3300 | static void |
ac9b820e | 3301 | nvme_fc_reset_ctrl_work(struct work_struct *work) |
61bff8ef | 3302 | { |
ac9b820e JS |
3303 | struct nvme_fc_ctrl *ctrl = |
3304 | container_of(work, struct nvme_fc_ctrl, ctrl.reset_work); | |
52793d62 | 3305 | |
ac9b820e | 3306 | nvme_stop_ctrl(&ctrl->ctrl); |
52793d62 JS |
3307 | |
3308 | /* will block will waiting for io to terminate */ | |
3309 | nvme_fc_delete_association(ctrl); | |
3310 | ||
ac9b820e | 3311 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) |
44c6ec77 JS |
3312 | dev_err(ctrl->ctrl.device, |
3313 | "NVME-FC{%d}: error_recovery: Couldn't change state " | |
ad6a0a52 | 3314 | "to CONNECTING\n", ctrl->cnum); |
4cff280a | 3315 | |
ac9b820e JS |
3316 | if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) { |
3317 | if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { | |
3318 | dev_err(ctrl->ctrl.device, | |
3319 | "NVME-FC{%d}: failed to schedule connect " | |
3320 | "after reset\n", ctrl->cnum); | |
3321 | } else { | |
3322 | flush_delayed_work(&ctrl->connect_work); | |
3323 | } | |
3324 | } else { | |
3325 | nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN); | |
3326 | } | |
61bff8ef JS |
3327 | } |
3328 | ||
4cff280a | 3329 | |
61bff8ef JS |
3330 | static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { |
3331 | .name = "fc", | |
3332 | .module = THIS_MODULE, | |
d3d5b87d | 3333 | .flags = NVME_F_FABRICS, |
61bff8ef JS |
3334 | .reg_read32 = nvmf_reg_read32, |
3335 | .reg_read64 = nvmf_reg_read64, | |
3336 | .reg_write32 = nvmf_reg_write32, | |
61bff8ef JS |
3337 | .free_ctrl = nvme_fc_nvme_ctrl_freed, |
3338 | .submit_async_event = nvme_fc_submit_async_event, | |
c5017e85 | 3339 | .delete_ctrl = nvme_fc_delete_ctrl, |
61bff8ef JS |
3340 | .get_address = nvmf_get_address, |
3341 | }; | |
3342 | ||
3343 | static void | |
3344 | nvme_fc_connect_ctrl_work(struct work_struct *work) | |
3345 | { | |
3346 | int ret; | |
3347 | ||
3348 | struct nvme_fc_ctrl *ctrl = | |
3349 | container_of(to_delayed_work(work), | |
3350 | struct nvme_fc_ctrl, connect_work); | |
3351 | ||
3352 | ret = nvme_fc_create_association(ctrl); | |
5bbecdbc JS |
3353 | if (ret) |
3354 | nvme_fc_reconnect_or_delete(ctrl, ret); | |
3355 | else | |
61bff8ef | 3356 | dev_info(ctrl->ctrl.device, |
4c984154 | 3357 | "NVME-FC{%d}: controller connect complete\n", |
61bff8ef JS |
3358 | ctrl->cnum); |
3359 | } | |
3360 | ||
3361 | ||
3362 | static const struct blk_mq_ops nvme_fc_admin_mq_ops = { | |
3363 | .queue_rq = nvme_fc_queue_rq, | |
3364 | .complete = nvme_fc_complete_rq, | |
76f983cb | 3365 | .init_request = nvme_fc_init_request, |
61bff8ef | 3366 | .exit_request = nvme_fc_exit_request, |
61bff8ef JS |
3367 | .init_hctx = nvme_fc_init_admin_hctx, |
3368 | .timeout = nvme_fc_timeout, | |
3369 | }; | |
3370 | ||
e399441d | 3371 | |
56d5f4f1 JS |
3372 | /* |
3373 | * Fails a controller request if it matches an existing controller | |
3374 | * (association) with the same tuple: | |
3375 | * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN> | |
3376 | * | |
3377 | * The ports don't need to be compared as they are intrinsically | |
3378 | * already matched by the port pointers supplied. | |
3379 | */ | |
3380 | static bool | |
3381 | nvme_fc_existing_controller(struct nvme_fc_rport *rport, | |
3382 | struct nvmf_ctrl_options *opts) | |
3383 | { | |
3384 | struct nvme_fc_ctrl *ctrl; | |
3385 | unsigned long flags; | |
3386 | bool found = false; | |
3387 | ||
3388 | spin_lock_irqsave(&rport->lock, flags); | |
3389 | list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { | |
3390 | found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts); | |
3391 | if (found) | |
3392 | break; | |
3393 | } | |
3394 | spin_unlock_irqrestore(&rport->lock, flags); | |
3395 | ||
3396 | return found; | |
3397 | } | |
3398 | ||
e399441d | 3399 | static struct nvme_ctrl * |
61bff8ef | 3400 | nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, |
e399441d JS |
3401 | struct nvme_fc_lport *lport, struct nvme_fc_rport *rport) |
3402 | { | |
3403 | struct nvme_fc_ctrl *ctrl; | |
3404 | unsigned long flags; | |
f673714a | 3405 | int ret, idx, ctrl_loss_tmo; |
e399441d | 3406 | |
85e6a6ad JS |
3407 | if (!(rport->remoteport.port_role & |
3408 | (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) { | |
3409 | ret = -EBADR; | |
3410 | goto out_fail; | |
3411 | } | |
3412 | ||
56d5f4f1 JS |
3413 | if (!opts->duplicate_connect && |
3414 | nvme_fc_existing_controller(rport, opts)) { | |
3415 | ret = -EALREADY; | |
3416 | goto out_fail; | |
3417 | } | |
3418 | ||
e399441d JS |
3419 | ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); |
3420 | if (!ctrl) { | |
3421 | ret = -ENOMEM; | |
3422 | goto out_fail; | |
3423 | } | |
3424 | ||
3425 | idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL); | |
3426 | if (idx < 0) { | |
3427 | ret = -ENOSPC; | |
8c5c6605 | 3428 | goto out_free_ctrl; |
e399441d JS |
3429 | } |
3430 | ||
f673714a JS |
3431 | /* |
3432 | * if ctrl_loss_tmo is being enforced and the default reconnect delay | |
3433 | * is being used, change to a shorter reconnect delay for FC. | |
3434 | */ | |
3435 | if (opts->max_reconnects != -1 && | |
3436 | opts->reconnect_delay == NVMF_DEF_RECONNECT_DELAY && | |
3437 | opts->reconnect_delay > NVME_FC_DEFAULT_RECONNECT_TMO) { | |
3438 | ctrl_loss_tmo = opts->max_reconnects * opts->reconnect_delay; | |
3439 | opts->reconnect_delay = NVME_FC_DEFAULT_RECONNECT_TMO; | |
3440 | opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, | |
3441 | opts->reconnect_delay); | |
3442 | } | |
3443 | ||
e399441d | 3444 | ctrl->ctrl.opts = opts; |
4c984154 | 3445 | ctrl->ctrl.nr_reconnects = 0; |
06f3d71e JS |
3446 | if (lport->dev) |
3447 | ctrl->ctrl.numa_node = dev_to_node(lport->dev); | |
3448 | else | |
3449 | ctrl->ctrl.numa_node = NUMA_NO_NODE; | |
e399441d | 3450 | INIT_LIST_HEAD(&ctrl->ctrl_list); |
e399441d JS |
3451 | ctrl->lport = lport; |
3452 | ctrl->rport = rport; | |
3453 | ctrl->dev = lport->dev; | |
e399441d | 3454 | ctrl->cnum = idx; |
4c984154 | 3455 | ctrl->ioq_live = false; |
8a82dbf1 | 3456 | init_waitqueue_head(&ctrl->ioabort_wait); |
e399441d | 3457 | |
e399441d JS |
3458 | get_device(ctrl->dev); |
3459 | kref_init(&ctrl->ref); | |
3460 | ||
d86c4d8e | 3461 | INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); |
61bff8ef | 3462 | INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); |
19fce047 | 3463 | INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work); |
e399441d JS |
3464 | spin_lock_init(&ctrl->lock); |
3465 | ||
3466 | /* io queue count */ | |
d858e5f0 | 3467 | ctrl->ctrl.queue_count = min_t(unsigned int, |
e399441d JS |
3468 | opts->nr_io_queues, |
3469 | lport->ops->max_hw_queues); | |
d858e5f0 | 3470 | ctrl->ctrl.queue_count++; /* +1 for admin queue */ |
e399441d JS |
3471 | |
3472 | ctrl->ctrl.sqsize = opts->queue_size - 1; | |
3473 | ctrl->ctrl.kato = opts->kato; | |
4c984154 | 3474 | ctrl->ctrl.cntlid = 0xffff; |
e399441d JS |
3475 | |
3476 | ret = -ENOMEM; | |
d858e5f0 SG |
3477 | ctrl->queues = kcalloc(ctrl->ctrl.queue_count, |
3478 | sizeof(struct nvme_fc_queue), GFP_KERNEL); | |
e399441d | 3479 | if (!ctrl->queues) |
61bff8ef | 3480 | goto out_free_ida; |
e399441d | 3481 | |
3e493c00 JS |
3482 | nvme_fc_init_queue(ctrl, 0); |
3483 | ||
61bff8ef JS |
3484 | memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); |
3485 | ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops; | |
38dabe21 | 3486 | ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; |
61bff8ef | 3487 | ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */ |
103e515e | 3488 | ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node; |
d3d0bc78 BVA |
3489 | ctrl->admin_tag_set.cmd_size = |
3490 | struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv, | |
3491 | ctrl->lport->ops->fcprqst_priv_sz); | |
61bff8ef JS |
3492 | ctrl->admin_tag_set.driver_data = ctrl; |
3493 | ctrl->admin_tag_set.nr_hw_queues = 1; | |
dc96f938 | 3494 | ctrl->admin_tag_set.timeout = NVME_ADMIN_TIMEOUT; |
5a22e2bf | 3495 | ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED; |
e399441d | 3496 | |
61bff8ef | 3497 | ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); |
e399441d | 3498 | if (ret) |
61bff8ef | 3499 | goto out_free_queues; |
34b6c231 | 3500 | ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set; |
e399441d | 3501 | |
e7832cb4 SG |
3502 | ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set); |
3503 | if (IS_ERR(ctrl->ctrl.fabrics_q)) { | |
3504 | ret = PTR_ERR(ctrl->ctrl.fabrics_q); | |
3505 | goto out_free_admin_tag_set; | |
3506 | } | |
3507 | ||
61bff8ef JS |
3508 | ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); |
3509 | if (IS_ERR(ctrl->ctrl.admin_q)) { | |
3510 | ret = PTR_ERR(ctrl->ctrl.admin_q); | |
e7832cb4 | 3511 | goto out_cleanup_fabrics_q; |
e399441d JS |
3512 | } |
3513 | ||
61bff8ef JS |
3514 | /* |
3515 | * Would have been nice to init io queues tag set as well. | |
3516 | * However, we require interaction from the controller | |
3517 | * for max io queue count before we can do so. | |
3518 | * Defer this to the connect path. | |
3519 | */ | |
e399441d | 3520 | |
61bff8ef JS |
3521 | ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0); |
3522 | if (ret) | |
3523 | goto out_cleanup_admin_q; | |
e399441d | 3524 | |
61bff8ef | 3525 | /* at this point, teardown path changes to ref counting on nvme ctrl */ |
e399441d JS |
3526 | |
3527 | spin_lock_irqsave(&rport->lock, flags); | |
3528 | list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); | |
3529 | spin_unlock_irqrestore(&rport->lock, flags); | |
3530 | ||
4c984154 JS |
3531 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) || |
3532 | !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { | |
17c4dc6e | 3533 | dev_err(ctrl->ctrl.device, |
4c984154 JS |
3534 | "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum); |
3535 | goto fail_ctrl; | |
3536 | } | |
17c4dc6e | 3537 | |
4c984154 | 3538 | if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { |
4c984154 JS |
3539 | dev_err(ctrl->ctrl.device, |
3540 | "NVME-FC{%d}: failed to schedule initial connect\n", | |
3541 | ctrl->cnum); | |
3542 | goto fail_ctrl; | |
e399441d JS |
3543 | } |
3544 | ||
4c984154 | 3545 | flush_delayed_work(&ctrl->connect_work); |
2cb657bc | 3546 | |
61bff8ef JS |
3547 | dev_info(ctrl->ctrl.device, |
3548 | "NVME-FC{%d}: new ctrl: NQN \"%s\"\n", | |
3549 | ctrl->cnum, ctrl->ctrl.opts->subsysnqn); | |
e399441d | 3550 | |
61bff8ef | 3551 | return &ctrl->ctrl; |
e399441d | 3552 | |
4c984154 JS |
3553 | fail_ctrl: |
3554 | nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); | |
19fce047 | 3555 | cancel_work_sync(&ctrl->ioerr_work); |
4c984154 JS |
3556 | cancel_work_sync(&ctrl->ctrl.reset_work); |
3557 | cancel_delayed_work_sync(&ctrl->connect_work); | |
3558 | ||
3559 | ctrl->ctrl.opts = NULL; | |
3560 | ||
3561 | /* initiate nvme ctrl ref counting teardown */ | |
3562 | nvme_uninit_ctrl(&ctrl->ctrl); | |
3563 | ||
3564 | /* Remove core ctrl ref. */ | |
3565 | nvme_put_ctrl(&ctrl->ctrl); | |
3566 | ||
3567 | /* as we're past the point where we transition to the ref | |
3568 | * counting teardown path, if we return a bad pointer here, | |
3569 | * the calling routine, thinking it's prior to the | |
3570 | * transition, will do an rport put. Since the teardown | |
3571 | * path also does a rport put, we do an extra get here to | |
3572 | * so proper order/teardown happens. | |
3573 | */ | |
3574 | nvme_fc_rport_get(rport); | |
3575 | ||
3576 | return ERR_PTR(-EIO); | |
3577 | ||
61bff8ef JS |
3578 | out_cleanup_admin_q: |
3579 | blk_cleanup_queue(ctrl->ctrl.admin_q); | |
e7832cb4 SG |
3580 | out_cleanup_fabrics_q: |
3581 | blk_cleanup_queue(ctrl->ctrl.fabrics_q); | |
61bff8ef JS |
3582 | out_free_admin_tag_set: |
3583 | blk_mq_free_tag_set(&ctrl->admin_tag_set); | |
3584 | out_free_queues: | |
3585 | kfree(ctrl->queues); | |
e399441d | 3586 | out_free_ida: |
61bff8ef | 3587 | put_device(ctrl->dev); |
e399441d JS |
3588 | ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); |
3589 | out_free_ctrl: | |
3590 | kfree(ctrl); | |
3591 | out_fail: | |
e399441d JS |
3592 | /* exit via here doesn't follow ctlr ref points */ |
3593 | return ERR_PTR(ret); | |
3594 | } | |
3595 | ||
e399441d JS |
3596 | |
3597 | struct nvmet_fc_traddr { | |
3598 | u64 nn; | |
3599 | u64 pn; | |
3600 | }; | |
3601 | ||
e399441d | 3602 | static int |
9c5358e1 | 3603 | __nvme_fc_parse_u64(substring_t *sstr, u64 *val) |
e399441d | 3604 | { |
e399441d JS |
3605 | u64 token64; |
3606 | ||
9c5358e1 JS |
3607 | if (match_u64(sstr, &token64)) |
3608 | return -EINVAL; | |
3609 | *val = token64; | |
e399441d | 3610 | |
9c5358e1 JS |
3611 | return 0; |
3612 | } | |
e399441d | 3613 | |
9c5358e1 JS |
3614 | /* |
3615 | * This routine validates and extracts the WWN's from the TRADDR string. | |
3616 | * As kernel parsers need the 0x to determine number base, universally | |
3617 | * build string to parse with 0x prefix before parsing name strings. | |
3618 | */ | |
3619 | static int | |
3620 | nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) | |
3621 | { | |
3622 | char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; | |
3623 | substring_t wwn = { name, &name[sizeof(name)-1] }; | |
3624 | int nnoffset, pnoffset; | |
3625 | ||
d4e4230c | 3626 | /* validate if string is one of the 2 allowed formats */ |
9c5358e1 JS |
3627 | if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && |
3628 | !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && | |
3629 | !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], | |
3630 | "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { | |
3631 | nnoffset = NVME_FC_TRADDR_OXNNLEN; | |
3632 | pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + | |
3633 | NVME_FC_TRADDR_OXNNLEN; | |
3634 | } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && | |
3635 | !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && | |
3636 | !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], | |
3637 | "pn-", NVME_FC_TRADDR_NNLEN))) { | |
3638 | nnoffset = NVME_FC_TRADDR_NNLEN; | |
3639 | pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; | |
3640 | } else | |
3641 | goto out_einval; | |
e399441d | 3642 | |
9c5358e1 JS |
3643 | name[0] = '0'; |
3644 | name[1] = 'x'; | |
3645 | name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; | |
3646 | ||
3647 | memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); | |
3648 | if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) | |
3649 | goto out_einval; | |
3650 | ||
3651 | memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); | |
3652 | if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) | |
3653 | goto out_einval; | |
3654 | ||
3655 | return 0; | |
3656 | ||
3657 | out_einval: | |
3658 | pr_warn("%s: bad traddr string\n", __func__); | |
3659 | return -EINVAL; | |
e399441d JS |
3660 | } |
3661 | ||
3662 | static struct nvme_ctrl * | |
3663 | nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) | |
3664 | { | |
3665 | struct nvme_fc_lport *lport; | |
3666 | struct nvme_fc_rport *rport; | |
61bff8ef | 3667 | struct nvme_ctrl *ctrl; |
e399441d JS |
3668 | struct nvmet_fc_traddr laddr = { 0L, 0L }; |
3669 | struct nvmet_fc_traddr raddr = { 0L, 0L }; | |
3670 | unsigned long flags; | |
3671 | int ret; | |
3672 | ||
9c5358e1 | 3673 | ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE); |
e399441d JS |
3674 | if (ret || !raddr.nn || !raddr.pn) |
3675 | return ERR_PTR(-EINVAL); | |
3676 | ||
9c5358e1 | 3677 | ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE); |
e399441d JS |
3678 | if (ret || !laddr.nn || !laddr.pn) |
3679 | return ERR_PTR(-EINVAL); | |
3680 | ||
3681 | /* find the host and remote ports to connect together */ | |
3682 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
3683 | list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { | |
3684 | if (lport->localport.node_name != laddr.nn || | |
9e0e8dac JS |
3685 | lport->localport.port_name != laddr.pn || |
3686 | lport->localport.port_state != FC_OBJSTATE_ONLINE) | |
e399441d JS |
3687 | continue; |
3688 | ||
3689 | list_for_each_entry(rport, &lport->endp_list, endp_list) { | |
3690 | if (rport->remoteport.node_name != raddr.nn || | |
9e0e8dac JS |
3691 | rport->remoteport.port_name != raddr.pn || |
3692 | rport->remoteport.port_state != FC_OBJSTATE_ONLINE) | |
e399441d JS |
3693 | continue; |
3694 | ||
3695 | /* if fail to get reference fall through. Will error */ | |
3696 | if (!nvme_fc_rport_get(rport)) | |
3697 | break; | |
3698 | ||
3699 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
3700 | ||
61bff8ef JS |
3701 | ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport); |
3702 | if (IS_ERR(ctrl)) | |
3703 | nvme_fc_rport_put(rport); | |
3704 | return ctrl; | |
e399441d JS |
3705 | } |
3706 | } | |
3707 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
3708 | ||
4fb135ad JT |
3709 | pr_warn("%s: %s - %s combination not found\n", |
3710 | __func__, opts->traddr, opts->host_traddr); | |
e399441d JS |
3711 | return ERR_PTR(-ENOENT); |
3712 | } | |
3713 | ||
3714 | ||
3715 | static struct nvmf_transport_ops nvme_fc_transport = { | |
3716 | .name = "fc", | |
0de5cd36 | 3717 | .module = THIS_MODULE, |
e399441d | 3718 | .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR, |
5bbecdbc | 3719 | .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO, |
e399441d JS |
3720 | .create_ctrl = nvme_fc_create_ctrl, |
3721 | }; | |
3722 | ||
97faec53 JS |
3723 | /* Arbitrary successive failures max. With lots of subsystems could be high */ |
3724 | #define DISCOVERY_MAX_FAIL 20 | |
3725 | ||
3726 | static ssize_t nvme_fc_nvme_discovery_store(struct device *dev, | |
3727 | struct device_attribute *attr, const char *buf, size_t count) | |
3728 | { | |
3729 | unsigned long flags; | |
3730 | LIST_HEAD(local_disc_list); | |
3731 | struct nvme_fc_lport *lport; | |
3732 | struct nvme_fc_rport *rport; | |
3733 | int failcnt = 0; | |
3734 | ||
3735 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
3736 | restart: | |
3737 | list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { | |
3738 | list_for_each_entry(rport, &lport->endp_list, endp_list) { | |
3739 | if (!nvme_fc_lport_get(lport)) | |
3740 | continue; | |
3741 | if (!nvme_fc_rport_get(rport)) { | |
3742 | /* | |
3743 | * This is a temporary condition. Upon restart | |
3744 | * this rport will be gone from the list. | |
3745 | * | |
3746 | * Revert the lport put and retry. Anything | |
3747 | * added to the list already will be skipped (as | |
3748 | * they are no longer list_empty). Loops should | |
3749 | * resume at rports that were not yet seen. | |
3750 | */ | |
3751 | nvme_fc_lport_put(lport); | |
3752 | ||
3753 | if (failcnt++ < DISCOVERY_MAX_FAIL) | |
3754 | goto restart; | |
3755 | ||
3756 | pr_err("nvme_discovery: too many reference " | |
3757 | "failures\n"); | |
3758 | goto process_local_list; | |
3759 | } | |
3760 | if (list_empty(&rport->disc_list)) | |
3761 | list_add_tail(&rport->disc_list, | |
3762 | &local_disc_list); | |
3763 | } | |
3764 | } | |
3765 | ||
3766 | process_local_list: | |
3767 | while (!list_empty(&local_disc_list)) { | |
3768 | rport = list_first_entry(&local_disc_list, | |
3769 | struct nvme_fc_rport, disc_list); | |
3770 | list_del_init(&rport->disc_list); | |
3771 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
3772 | ||
3773 | lport = rport->lport; | |
3774 | /* signal discovery. Won't hurt if it repeats */ | |
3775 | nvme_fc_signal_discovery_scan(lport, rport); | |
3776 | nvme_fc_rport_put(rport); | |
3777 | nvme_fc_lport_put(lport); | |
3778 | ||
3779 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
3780 | } | |
3781 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
3782 | ||
3783 | return count; | |
3784 | } | |
3785 | static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store); | |
3786 | ||
3787 | static struct attribute *nvme_fc_attrs[] = { | |
3788 | &dev_attr_nvme_discovery.attr, | |
3789 | NULL | |
3790 | }; | |
3791 | ||
60b152a5 | 3792 | static const struct attribute_group nvme_fc_attr_group = { |
97faec53 JS |
3793 | .attrs = nvme_fc_attrs, |
3794 | }; | |
3795 | ||
3796 | static const struct attribute_group *nvme_fc_attr_groups[] = { | |
3797 | &nvme_fc_attr_group, | |
3798 | NULL | |
3799 | }; | |
3800 | ||
3801 | static struct class fc_class = { | |
3802 | .name = "fc", | |
3803 | .dev_groups = nvme_fc_attr_groups, | |
3804 | .owner = THIS_MODULE, | |
3805 | }; | |
3806 | ||
e399441d JS |
3807 | static int __init nvme_fc_init_module(void) |
3808 | { | |
5f568556 JS |
3809 | int ret; |
3810 | ||
8730c1dd HR |
3811 | nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0); |
3812 | if (!nvme_fc_wq) | |
3813 | return -ENOMEM; | |
3814 | ||
5f568556 JS |
3815 | /* |
3816 | * NOTE: | |
3817 | * It is expected that in the future the kernel will combine | |
3818 | * the FC-isms that are currently under scsi and now being | |
3819 | * added to by NVME into a new standalone FC class. The SCSI | |
3820 | * and NVME protocols and their devices would be under this | |
3821 | * new FC class. | |
3822 | * | |
3823 | * As we need something to post FC-specific udev events to, | |
3824 | * specifically for nvme probe events, start by creating the | |
3825 | * new device class. When the new standalone FC class is | |
3826 | * put in place, this code will move to a more generic | |
3827 | * location for the class. | |
3828 | */ | |
97faec53 JS |
3829 | ret = class_register(&fc_class); |
3830 | if (ret) { | |
5f568556 | 3831 | pr_err("couldn't register class fc\n"); |
8730c1dd | 3832 | goto out_destroy_wq; |
5f568556 JS |
3833 | } |
3834 | ||
3835 | /* | |
3836 | * Create a device for the FC-centric udev events | |
3837 | */ | |
97faec53 | 3838 | fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL, |
5f568556 JS |
3839 | "fc_udev_device"); |
3840 | if (IS_ERR(fc_udev_device)) { | |
3841 | pr_err("couldn't create fc_udev device!\n"); | |
3842 | ret = PTR_ERR(fc_udev_device); | |
3843 | goto out_destroy_class; | |
3844 | } | |
3845 | ||
3846 | ret = nvmf_register_transport(&nvme_fc_transport); | |
3847 | if (ret) | |
3848 | goto out_destroy_device; | |
3849 | ||
3850 | return 0; | |
3851 | ||
3852 | out_destroy_device: | |
97faec53 | 3853 | device_destroy(&fc_class, MKDEV(0, 0)); |
5f568556 | 3854 | out_destroy_class: |
97faec53 | 3855 | class_unregister(&fc_class); |
8730c1dd HR |
3856 | out_destroy_wq: |
3857 | destroy_workqueue(nvme_fc_wq); | |
3858 | ||
5f568556 | 3859 | return ret; |
e399441d JS |
3860 | } |
3861 | ||
4c73cbdf JS |
3862 | static void |
3863 | nvme_fc_delete_controllers(struct nvme_fc_rport *rport) | |
3864 | { | |
3865 | struct nvme_fc_ctrl *ctrl; | |
3866 | ||
3867 | spin_lock(&rport->lock); | |
3868 | list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { | |
3869 | dev_warn(ctrl->ctrl.device, | |
3870 | "NVME-FC{%d}: transport unloading: deleting ctrl\n", | |
3871 | ctrl->cnum); | |
3872 | nvme_delete_ctrl(&ctrl->ctrl); | |
3873 | } | |
3874 | spin_unlock(&rport->lock); | |
3875 | } | |
3876 | ||
3877 | static void | |
3878 | nvme_fc_cleanup_for_unload(void) | |
3879 | { | |
3880 | struct nvme_fc_lport *lport; | |
3881 | struct nvme_fc_rport *rport; | |
3882 | ||
3883 | list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { | |
3884 | list_for_each_entry(rport, &lport->endp_list, endp_list) { | |
3885 | nvme_fc_delete_controllers(rport); | |
3886 | } | |
3887 | } | |
3888 | } | |
3889 | ||
e399441d JS |
3890 | static void __exit nvme_fc_exit_module(void) |
3891 | { | |
4c73cbdf JS |
3892 | unsigned long flags; |
3893 | bool need_cleanup = false; | |
3894 | ||
3895 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
3896 | nvme_fc_waiting_to_unload = true; | |
3897 | if (!list_empty(&nvme_fc_lport_list)) { | |
3898 | need_cleanup = true; | |
3899 | nvme_fc_cleanup_for_unload(); | |
3900 | } | |
3901 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
3902 | if (need_cleanup) { | |
3903 | pr_info("%s: waiting for ctlr deletes\n", __func__); | |
3904 | wait_for_completion(&nvme_fc_unload_proceed); | |
3905 | pr_info("%s: ctrl deletes complete\n", __func__); | |
3906 | } | |
e399441d JS |
3907 | |
3908 | nvmf_unregister_transport(&nvme_fc_transport); | |
3909 | ||
e399441d JS |
3910 | ida_destroy(&nvme_fc_local_port_cnt); |
3911 | ida_destroy(&nvme_fc_ctrl_cnt); | |
5f568556 | 3912 | |
97faec53 JS |
3913 | device_destroy(&fc_class, MKDEV(0, 0)); |
3914 | class_unregister(&fc_class); | |
8730c1dd | 3915 | destroy_workqueue(nvme_fc_wq); |
e399441d JS |
3916 | } |
3917 | ||
3918 | module_init(nvme_fc_init_module); | |
3919 | module_exit(nvme_fc_exit_module); | |
3920 | ||
3921 | MODULE_LICENSE("GPL v2"); |