]>
Commit | Line | Data |
---|---|---|
e399441d JS |
1 | /* |
2 | * Copyright (c) 2016 Avago Technologies. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of version 2 of the GNU General Public License as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful. | |
9 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, | |
10 | * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A | |
11 | * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO | |
12 | * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. | |
13 | * See the GNU General Public License for more details, a copy of which | |
14 | * can be found in the file COPYING included with this package | |
15 | * | |
16 | */ | |
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
18 | #include <linux/module.h> | |
19 | #include <linux/parser.h> | |
20 | #include <uapi/scsi/fc/fc_fs.h> | |
21 | #include <uapi/scsi/fc/fc_els.h> | |
61bff8ef | 22 | #include <linux/delay.h> |
e399441d JS |
23 | |
24 | #include "nvme.h" | |
25 | #include "fabrics.h" | |
26 | #include <linux/nvme-fc-driver.h> | |
27 | #include <linux/nvme-fc.h> | |
28 | ||
29 | ||
30 | /* *************************** Data Structures/Defines ****************** */ | |
31 | ||
32 | ||
33 | /* | |
34 | * We handle AEN commands ourselves and don't even let the | |
35 | * block layer know about them. | |
36 | */ | |
37 | #define NVME_FC_NR_AEN_COMMANDS 1 | |
38 | #define NVME_FC_AQ_BLKMQ_DEPTH \ | |
39 | (NVMF_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS) | |
40 | #define AEN_CMDID_BASE (NVME_FC_AQ_BLKMQ_DEPTH + 1) | |
41 | ||
42 | enum nvme_fc_queue_flags { | |
43 | NVME_FC_Q_CONNECTED = (1 << 0), | |
44 | }; | |
45 | ||
46 | #define NVMEFC_QUEUE_DELAY 3 /* ms units */ | |
47 | ||
48 | struct nvme_fc_queue { | |
49 | struct nvme_fc_ctrl *ctrl; | |
50 | struct device *dev; | |
51 | struct blk_mq_hw_ctx *hctx; | |
52 | void *lldd_handle; | |
53 | int queue_size; | |
54 | size_t cmnd_capsule_len; | |
55 | u32 qnum; | |
56 | u32 rqcnt; | |
57 | u32 seqno; | |
58 | ||
59 | u64 connection_id; | |
60 | atomic_t csn; | |
61 | ||
62 | unsigned long flags; | |
63 | } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ | |
64 | ||
8d64daf7 JS |
65 | enum nvme_fcop_flags { |
66 | FCOP_FLAGS_TERMIO = (1 << 0), | |
67 | FCOP_FLAGS_RELEASED = (1 << 1), | |
68 | FCOP_FLAGS_COMPLETE = (1 << 2), | |
78a7ac26 | 69 | FCOP_FLAGS_AEN = (1 << 3), |
8d64daf7 JS |
70 | }; |
71 | ||
e399441d JS |
72 | struct nvmefc_ls_req_op { |
73 | struct nvmefc_ls_req ls_req; | |
74 | ||
c913a8b0 | 75 | struct nvme_fc_rport *rport; |
e399441d JS |
76 | struct nvme_fc_queue *queue; |
77 | struct request *rq; | |
8d64daf7 | 78 | u32 flags; |
e399441d JS |
79 | |
80 | int ls_error; | |
81 | struct completion ls_done; | |
c913a8b0 | 82 | struct list_head lsreq_list; /* rport->ls_req_list */ |
e399441d JS |
83 | bool req_queued; |
84 | }; | |
85 | ||
86 | enum nvme_fcpop_state { | |
87 | FCPOP_STATE_UNINIT = 0, | |
88 | FCPOP_STATE_IDLE = 1, | |
89 | FCPOP_STATE_ACTIVE = 2, | |
90 | FCPOP_STATE_ABORTED = 3, | |
78a7ac26 | 91 | FCPOP_STATE_COMPLETE = 4, |
e399441d JS |
92 | }; |
93 | ||
94 | struct nvme_fc_fcp_op { | |
95 | struct nvme_request nreq; /* | |
96 | * nvme/host/core.c | |
97 | * requires this to be | |
98 | * the 1st element in the | |
99 | * private structure | |
100 | * associated with the | |
101 | * request. | |
102 | */ | |
103 | struct nvmefc_fcp_req fcp_req; | |
104 | ||
105 | struct nvme_fc_ctrl *ctrl; | |
106 | struct nvme_fc_queue *queue; | |
107 | struct request *rq; | |
108 | ||
109 | atomic_t state; | |
78a7ac26 | 110 | u32 flags; |
e399441d JS |
111 | u32 rqno; |
112 | u32 nents; | |
113 | ||
114 | struct nvme_fc_cmd_iu cmd_iu; | |
115 | struct nvme_fc_ersp_iu rsp_iu; | |
116 | }; | |
117 | ||
118 | struct nvme_fc_lport { | |
119 | struct nvme_fc_local_port localport; | |
120 | ||
121 | struct ida endp_cnt; | |
122 | struct list_head port_list; /* nvme_fc_port_list */ | |
123 | struct list_head endp_list; | |
124 | struct device *dev; /* physical device for dma */ | |
125 | struct nvme_fc_port_template *ops; | |
126 | struct kref ref; | |
127 | } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ | |
128 | ||
129 | struct nvme_fc_rport { | |
130 | struct nvme_fc_remote_port remoteport; | |
131 | ||
132 | struct list_head endp_list; /* for lport->endp_list */ | |
133 | struct list_head ctrl_list; | |
c913a8b0 JS |
134 | struct list_head ls_req_list; |
135 | struct device *dev; /* physical device for dma */ | |
136 | struct nvme_fc_lport *lport; | |
e399441d JS |
137 | spinlock_t lock; |
138 | struct kref ref; | |
139 | } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ | |
140 | ||
61bff8ef JS |
141 | enum nvme_fcctrl_flags { |
142 | FCCTRL_TERMIO = (1 << 0), | |
e399441d JS |
143 | }; |
144 | ||
145 | struct nvme_fc_ctrl { | |
146 | spinlock_t lock; | |
147 | struct nvme_fc_queue *queues; | |
e399441d JS |
148 | struct device *dev; |
149 | struct nvme_fc_lport *lport; | |
150 | struct nvme_fc_rport *rport; | |
61bff8ef | 151 | u32 queue_count; |
e399441d JS |
152 | u32 cnum; |
153 | ||
154 | u64 association_id; | |
155 | ||
156 | u64 cap; | |
157 | ||
158 | struct list_head ctrl_list; /* rport->ctrl_list */ | |
e399441d JS |
159 | |
160 | struct blk_mq_tag_set admin_tag_set; | |
161 | struct blk_mq_tag_set tag_set; | |
162 | ||
163 | struct work_struct delete_work; | |
61bff8ef JS |
164 | struct work_struct reset_work; |
165 | struct delayed_work connect_work; | |
61bff8ef | 166 | |
e399441d | 167 | struct kref ref; |
61bff8ef JS |
168 | u32 flags; |
169 | u32 iocnt; | |
e399441d JS |
170 | |
171 | struct nvme_fc_fcp_op aen_ops[NVME_FC_NR_AEN_COMMANDS]; | |
172 | ||
173 | struct nvme_ctrl ctrl; | |
174 | }; | |
175 | ||
176 | static inline struct nvme_fc_ctrl * | |
177 | to_fc_ctrl(struct nvme_ctrl *ctrl) | |
178 | { | |
179 | return container_of(ctrl, struct nvme_fc_ctrl, ctrl); | |
180 | } | |
181 | ||
182 | static inline struct nvme_fc_lport * | |
183 | localport_to_lport(struct nvme_fc_local_port *portptr) | |
184 | { | |
185 | return container_of(portptr, struct nvme_fc_lport, localport); | |
186 | } | |
187 | ||
188 | static inline struct nvme_fc_rport * | |
189 | remoteport_to_rport(struct nvme_fc_remote_port *portptr) | |
190 | { | |
191 | return container_of(portptr, struct nvme_fc_rport, remoteport); | |
192 | } | |
193 | ||
194 | static inline struct nvmefc_ls_req_op * | |
195 | ls_req_to_lsop(struct nvmefc_ls_req *lsreq) | |
196 | { | |
197 | return container_of(lsreq, struct nvmefc_ls_req_op, ls_req); | |
198 | } | |
199 | ||
200 | static inline struct nvme_fc_fcp_op * | |
201 | fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq) | |
202 | { | |
203 | return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req); | |
204 | } | |
205 | ||
206 | ||
207 | ||
208 | /* *************************** Globals **************************** */ | |
209 | ||
210 | ||
211 | static DEFINE_SPINLOCK(nvme_fc_lock); | |
212 | ||
213 | static LIST_HEAD(nvme_fc_lport_list); | |
214 | static DEFINE_IDA(nvme_fc_local_port_cnt); | |
215 | static DEFINE_IDA(nvme_fc_ctrl_cnt); | |
216 | ||
217 | static struct workqueue_struct *nvme_fc_wq; | |
218 | ||
219 | ||
220 | ||
221 | /* *********************** FC-NVME Port Management ************************ */ | |
222 | ||
223 | static int __nvme_fc_del_ctrl(struct nvme_fc_ctrl *); | |
224 | static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *, | |
225 | struct nvme_fc_queue *, unsigned int); | |
226 | ||
227 | ||
228 | /** | |
229 | * nvme_fc_register_localport - transport entry point called by an | |
230 | * LLDD to register the existence of a NVME | |
231 | * host FC port. | |
232 | * @pinfo: pointer to information about the port to be registered | |
233 | * @template: LLDD entrypoints and operational parameters for the port | |
234 | * @dev: physical hardware device node port corresponds to. Will be | |
235 | * used for DMA mappings | |
236 | * @lport_p: pointer to a local port pointer. Upon success, the routine | |
237 | * will allocate a nvme_fc_local_port structure and place its | |
238 | * address in the local port pointer. Upon failure, local port | |
239 | * pointer will be set to 0. | |
240 | * | |
241 | * Returns: | |
242 | * a completion status. Must be 0 upon success; a negative errno | |
243 | * (ex: -ENXIO) upon failure. | |
244 | */ | |
245 | int | |
246 | nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, | |
247 | struct nvme_fc_port_template *template, | |
248 | struct device *dev, | |
249 | struct nvme_fc_local_port **portptr) | |
250 | { | |
251 | struct nvme_fc_lport *newrec; | |
252 | unsigned long flags; | |
253 | int ret, idx; | |
254 | ||
255 | if (!template->localport_delete || !template->remoteport_delete || | |
256 | !template->ls_req || !template->fcp_io || | |
257 | !template->ls_abort || !template->fcp_abort || | |
258 | !template->max_hw_queues || !template->max_sgl_segments || | |
259 | !template->max_dif_sgl_segments || !template->dma_boundary) { | |
260 | ret = -EINVAL; | |
261 | goto out_reghost_failed; | |
262 | } | |
263 | ||
264 | newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz), | |
265 | GFP_KERNEL); | |
266 | if (!newrec) { | |
267 | ret = -ENOMEM; | |
268 | goto out_reghost_failed; | |
269 | } | |
270 | ||
271 | idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL); | |
272 | if (idx < 0) { | |
273 | ret = -ENOSPC; | |
274 | goto out_fail_kfree; | |
275 | } | |
276 | ||
277 | if (!get_device(dev) && dev) { | |
278 | ret = -ENODEV; | |
279 | goto out_ida_put; | |
280 | } | |
281 | ||
282 | INIT_LIST_HEAD(&newrec->port_list); | |
283 | INIT_LIST_HEAD(&newrec->endp_list); | |
284 | kref_init(&newrec->ref); | |
285 | newrec->ops = template; | |
286 | newrec->dev = dev; | |
287 | ida_init(&newrec->endp_cnt); | |
288 | newrec->localport.private = &newrec[1]; | |
289 | newrec->localport.node_name = pinfo->node_name; | |
290 | newrec->localport.port_name = pinfo->port_name; | |
291 | newrec->localport.port_role = pinfo->port_role; | |
292 | newrec->localport.port_id = pinfo->port_id; | |
293 | newrec->localport.port_state = FC_OBJSTATE_ONLINE; | |
294 | newrec->localport.port_num = idx; | |
295 | ||
296 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
297 | list_add_tail(&newrec->port_list, &nvme_fc_lport_list); | |
298 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
299 | ||
300 | if (dev) | |
301 | dma_set_seg_boundary(dev, template->dma_boundary); | |
302 | ||
303 | *portptr = &newrec->localport; | |
304 | return 0; | |
305 | ||
306 | out_ida_put: | |
307 | ida_simple_remove(&nvme_fc_local_port_cnt, idx); | |
308 | out_fail_kfree: | |
309 | kfree(newrec); | |
310 | out_reghost_failed: | |
311 | *portptr = NULL; | |
312 | ||
313 | return ret; | |
314 | } | |
315 | EXPORT_SYMBOL_GPL(nvme_fc_register_localport); | |
316 | ||
317 | static void | |
318 | nvme_fc_free_lport(struct kref *ref) | |
319 | { | |
320 | struct nvme_fc_lport *lport = | |
321 | container_of(ref, struct nvme_fc_lport, ref); | |
322 | unsigned long flags; | |
323 | ||
324 | WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED); | |
325 | WARN_ON(!list_empty(&lport->endp_list)); | |
326 | ||
327 | /* remove from transport list */ | |
328 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
329 | list_del(&lport->port_list); | |
330 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
331 | ||
332 | /* let the LLDD know we've finished tearing it down */ | |
333 | lport->ops->localport_delete(&lport->localport); | |
334 | ||
335 | ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num); | |
336 | ida_destroy(&lport->endp_cnt); | |
337 | ||
338 | put_device(lport->dev); | |
339 | ||
340 | kfree(lport); | |
341 | } | |
342 | ||
343 | static void | |
344 | nvme_fc_lport_put(struct nvme_fc_lport *lport) | |
345 | { | |
346 | kref_put(&lport->ref, nvme_fc_free_lport); | |
347 | } | |
348 | ||
349 | static int | |
350 | nvme_fc_lport_get(struct nvme_fc_lport *lport) | |
351 | { | |
352 | return kref_get_unless_zero(&lport->ref); | |
353 | } | |
354 | ||
355 | /** | |
356 | * nvme_fc_unregister_localport - transport entry point called by an | |
357 | * LLDD to deregister/remove a previously | |
358 | * registered a NVME host FC port. | |
359 | * @localport: pointer to the (registered) local port that is to be | |
360 | * deregistered. | |
361 | * | |
362 | * Returns: | |
363 | * a completion status. Must be 0 upon success; a negative errno | |
364 | * (ex: -ENXIO) upon failure. | |
365 | */ | |
366 | int | |
367 | nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr) | |
368 | { | |
369 | struct nvme_fc_lport *lport = localport_to_lport(portptr); | |
370 | unsigned long flags; | |
371 | ||
372 | if (!portptr) | |
373 | return -EINVAL; | |
374 | ||
375 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
376 | ||
377 | if (portptr->port_state != FC_OBJSTATE_ONLINE) { | |
378 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
379 | return -EINVAL; | |
380 | } | |
381 | portptr->port_state = FC_OBJSTATE_DELETED; | |
382 | ||
383 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
384 | ||
385 | nvme_fc_lport_put(lport); | |
386 | ||
387 | return 0; | |
388 | } | |
389 | EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport); | |
390 | ||
391 | /** | |
392 | * nvme_fc_register_remoteport - transport entry point called by an | |
393 | * LLDD to register the existence of a NVME | |
394 | * subsystem FC port on its fabric. | |
395 | * @localport: pointer to the (registered) local port that the remote | |
396 | * subsystem port is connected to. | |
397 | * @pinfo: pointer to information about the port to be registered | |
398 | * @rport_p: pointer to a remote port pointer. Upon success, the routine | |
399 | * will allocate a nvme_fc_remote_port structure and place its | |
400 | * address in the remote port pointer. Upon failure, remote port | |
401 | * pointer will be set to 0. | |
402 | * | |
403 | * Returns: | |
404 | * a completion status. Must be 0 upon success; a negative errno | |
405 | * (ex: -ENXIO) upon failure. | |
406 | */ | |
407 | int | |
408 | nvme_fc_register_remoteport(struct nvme_fc_local_port *localport, | |
409 | struct nvme_fc_port_info *pinfo, | |
410 | struct nvme_fc_remote_port **portptr) | |
411 | { | |
412 | struct nvme_fc_lport *lport = localport_to_lport(localport); | |
413 | struct nvme_fc_rport *newrec; | |
414 | unsigned long flags; | |
415 | int ret, idx; | |
416 | ||
417 | newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz), | |
418 | GFP_KERNEL); | |
419 | if (!newrec) { | |
420 | ret = -ENOMEM; | |
421 | goto out_reghost_failed; | |
422 | } | |
423 | ||
424 | if (!nvme_fc_lport_get(lport)) { | |
425 | ret = -ESHUTDOWN; | |
426 | goto out_kfree_rport; | |
427 | } | |
428 | ||
429 | idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL); | |
430 | if (idx < 0) { | |
431 | ret = -ENOSPC; | |
432 | goto out_lport_put; | |
433 | } | |
434 | ||
435 | INIT_LIST_HEAD(&newrec->endp_list); | |
436 | INIT_LIST_HEAD(&newrec->ctrl_list); | |
c913a8b0 | 437 | INIT_LIST_HEAD(&newrec->ls_req_list); |
e399441d JS |
438 | kref_init(&newrec->ref); |
439 | spin_lock_init(&newrec->lock); | |
440 | newrec->remoteport.localport = &lport->localport; | |
c913a8b0 JS |
441 | newrec->dev = lport->dev; |
442 | newrec->lport = lport; | |
e399441d JS |
443 | newrec->remoteport.private = &newrec[1]; |
444 | newrec->remoteport.port_role = pinfo->port_role; | |
445 | newrec->remoteport.node_name = pinfo->node_name; | |
446 | newrec->remoteport.port_name = pinfo->port_name; | |
447 | newrec->remoteport.port_id = pinfo->port_id; | |
448 | newrec->remoteport.port_state = FC_OBJSTATE_ONLINE; | |
449 | newrec->remoteport.port_num = idx; | |
450 | ||
451 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
452 | list_add_tail(&newrec->endp_list, &lport->endp_list); | |
453 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
454 | ||
455 | *portptr = &newrec->remoteport; | |
456 | return 0; | |
457 | ||
458 | out_lport_put: | |
459 | nvme_fc_lport_put(lport); | |
460 | out_kfree_rport: | |
461 | kfree(newrec); | |
462 | out_reghost_failed: | |
463 | *portptr = NULL; | |
464 | return ret; | |
e399441d JS |
465 | } |
466 | EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport); | |
467 | ||
468 | static void | |
469 | nvme_fc_free_rport(struct kref *ref) | |
470 | { | |
471 | struct nvme_fc_rport *rport = | |
472 | container_of(ref, struct nvme_fc_rport, ref); | |
473 | struct nvme_fc_lport *lport = | |
474 | localport_to_lport(rport->remoteport.localport); | |
475 | unsigned long flags; | |
476 | ||
477 | WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED); | |
478 | WARN_ON(!list_empty(&rport->ctrl_list)); | |
479 | ||
480 | /* remove from lport list */ | |
481 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
482 | list_del(&rport->endp_list); | |
483 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
484 | ||
485 | /* let the LLDD know we've finished tearing it down */ | |
486 | lport->ops->remoteport_delete(&rport->remoteport); | |
487 | ||
488 | ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num); | |
489 | ||
490 | kfree(rport); | |
491 | ||
492 | nvme_fc_lport_put(lport); | |
493 | } | |
494 | ||
495 | static void | |
496 | nvme_fc_rport_put(struct nvme_fc_rport *rport) | |
497 | { | |
498 | kref_put(&rport->ref, nvme_fc_free_rport); | |
499 | } | |
500 | ||
501 | static int | |
502 | nvme_fc_rport_get(struct nvme_fc_rport *rport) | |
503 | { | |
504 | return kref_get_unless_zero(&rport->ref); | |
505 | } | |
506 | ||
8d64daf7 JS |
507 | static int |
508 | nvme_fc_abort_lsops(struct nvme_fc_rport *rport) | |
509 | { | |
510 | struct nvmefc_ls_req_op *lsop; | |
511 | unsigned long flags; | |
512 | ||
513 | restart: | |
514 | spin_lock_irqsave(&rport->lock, flags); | |
515 | ||
516 | list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) { | |
517 | if (!(lsop->flags & FCOP_FLAGS_TERMIO)) { | |
518 | lsop->flags |= FCOP_FLAGS_TERMIO; | |
519 | spin_unlock_irqrestore(&rport->lock, flags); | |
520 | rport->lport->ops->ls_abort(&rport->lport->localport, | |
521 | &rport->remoteport, | |
522 | &lsop->ls_req); | |
523 | goto restart; | |
524 | } | |
525 | } | |
526 | spin_unlock_irqrestore(&rport->lock, flags); | |
527 | ||
528 | return 0; | |
529 | } | |
530 | ||
e399441d JS |
531 | /** |
532 | * nvme_fc_unregister_remoteport - transport entry point called by an | |
533 | * LLDD to deregister/remove a previously | |
534 | * registered a NVME subsystem FC port. | |
535 | * @remoteport: pointer to the (registered) remote port that is to be | |
536 | * deregistered. | |
537 | * | |
538 | * Returns: | |
539 | * a completion status. Must be 0 upon success; a negative errno | |
540 | * (ex: -ENXIO) upon failure. | |
541 | */ | |
542 | int | |
543 | nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr) | |
544 | { | |
545 | struct nvme_fc_rport *rport = remoteport_to_rport(portptr); | |
546 | struct nvme_fc_ctrl *ctrl; | |
547 | unsigned long flags; | |
548 | ||
549 | if (!portptr) | |
550 | return -EINVAL; | |
551 | ||
552 | spin_lock_irqsave(&rport->lock, flags); | |
553 | ||
554 | if (portptr->port_state != FC_OBJSTATE_ONLINE) { | |
555 | spin_unlock_irqrestore(&rport->lock, flags); | |
556 | return -EINVAL; | |
557 | } | |
558 | portptr->port_state = FC_OBJSTATE_DELETED; | |
559 | ||
560 | /* tear down all associations to the remote port */ | |
561 | list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) | |
562 | __nvme_fc_del_ctrl(ctrl); | |
563 | ||
564 | spin_unlock_irqrestore(&rport->lock, flags); | |
565 | ||
8d64daf7 JS |
566 | nvme_fc_abort_lsops(rport); |
567 | ||
e399441d JS |
568 | nvme_fc_rport_put(rport); |
569 | return 0; | |
570 | } | |
571 | EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport); | |
572 | ||
573 | ||
574 | /* *********************** FC-NVME DMA Handling **************************** */ | |
575 | ||
576 | /* | |
577 | * The fcloop device passes in a NULL device pointer. Real LLD's will | |
578 | * pass in a valid device pointer. If NULL is passed to the dma mapping | |
579 | * routines, depending on the platform, it may or may not succeed, and | |
580 | * may crash. | |
581 | * | |
582 | * As such: | |
583 | * Wrapper all the dma routines and check the dev pointer. | |
584 | * | |
585 | * If simple mappings (return just a dma address, we'll noop them, | |
586 | * returning a dma address of 0. | |
587 | * | |
588 | * On more complex mappings (dma_map_sg), a pseudo routine fills | |
589 | * in the scatter list, setting all dma addresses to 0. | |
590 | */ | |
591 | ||
592 | static inline dma_addr_t | |
593 | fc_dma_map_single(struct device *dev, void *ptr, size_t size, | |
594 | enum dma_data_direction dir) | |
595 | { | |
596 | return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; | |
597 | } | |
598 | ||
599 | static inline int | |
600 | fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |
601 | { | |
602 | return dev ? dma_mapping_error(dev, dma_addr) : 0; | |
603 | } | |
604 | ||
605 | static inline void | |
606 | fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, | |
607 | enum dma_data_direction dir) | |
608 | { | |
609 | if (dev) | |
610 | dma_unmap_single(dev, addr, size, dir); | |
611 | } | |
612 | ||
613 | static inline void | |
614 | fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, | |
615 | enum dma_data_direction dir) | |
616 | { | |
617 | if (dev) | |
618 | dma_sync_single_for_cpu(dev, addr, size, dir); | |
619 | } | |
620 | ||
621 | static inline void | |
622 | fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, | |
623 | enum dma_data_direction dir) | |
624 | { | |
625 | if (dev) | |
626 | dma_sync_single_for_device(dev, addr, size, dir); | |
627 | } | |
628 | ||
629 | /* pseudo dma_map_sg call */ | |
630 | static int | |
631 | fc_map_sg(struct scatterlist *sg, int nents) | |
632 | { | |
633 | struct scatterlist *s; | |
634 | int i; | |
635 | ||
636 | WARN_ON(nents == 0 || sg[0].length == 0); | |
637 | ||
638 | for_each_sg(sg, s, nents, i) { | |
639 | s->dma_address = 0L; | |
640 | #ifdef CONFIG_NEED_SG_DMA_LENGTH | |
641 | s->dma_length = s->length; | |
642 | #endif | |
643 | } | |
644 | return nents; | |
645 | } | |
646 | ||
647 | static inline int | |
648 | fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
649 | enum dma_data_direction dir) | |
650 | { | |
651 | return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); | |
652 | } | |
653 | ||
654 | static inline void | |
655 | fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |
656 | enum dma_data_direction dir) | |
657 | { | |
658 | if (dev) | |
659 | dma_unmap_sg(dev, sg, nents, dir); | |
660 | } | |
661 | ||
662 | ||
663 | /* *********************** FC-NVME LS Handling **************************** */ | |
664 | ||
665 | static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *); | |
666 | static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *); | |
667 | ||
668 | ||
669 | static void | |
c913a8b0 | 670 | __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop) |
e399441d | 671 | { |
c913a8b0 | 672 | struct nvme_fc_rport *rport = lsop->rport; |
e399441d JS |
673 | struct nvmefc_ls_req *lsreq = &lsop->ls_req; |
674 | unsigned long flags; | |
675 | ||
c913a8b0 | 676 | spin_lock_irqsave(&rport->lock, flags); |
e399441d JS |
677 | |
678 | if (!lsop->req_queued) { | |
c913a8b0 | 679 | spin_unlock_irqrestore(&rport->lock, flags); |
e399441d JS |
680 | return; |
681 | } | |
682 | ||
683 | list_del(&lsop->lsreq_list); | |
684 | ||
685 | lsop->req_queued = false; | |
686 | ||
c913a8b0 | 687 | spin_unlock_irqrestore(&rport->lock, flags); |
e399441d | 688 | |
c913a8b0 | 689 | fc_dma_unmap_single(rport->dev, lsreq->rqstdma, |
e399441d JS |
690 | (lsreq->rqstlen + lsreq->rsplen), |
691 | DMA_BIDIRECTIONAL); | |
692 | ||
c913a8b0 | 693 | nvme_fc_rport_put(rport); |
e399441d JS |
694 | } |
695 | ||
696 | static int | |
c913a8b0 | 697 | __nvme_fc_send_ls_req(struct nvme_fc_rport *rport, |
e399441d JS |
698 | struct nvmefc_ls_req_op *lsop, |
699 | void (*done)(struct nvmefc_ls_req *req, int status)) | |
700 | { | |
701 | struct nvmefc_ls_req *lsreq = &lsop->ls_req; | |
702 | unsigned long flags; | |
c913a8b0 | 703 | int ret = 0; |
e399441d | 704 | |
c913a8b0 JS |
705 | if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) |
706 | return -ECONNREFUSED; | |
707 | ||
708 | if (!nvme_fc_rport_get(rport)) | |
e399441d JS |
709 | return -ESHUTDOWN; |
710 | ||
711 | lsreq->done = done; | |
c913a8b0 | 712 | lsop->rport = rport; |
e399441d JS |
713 | lsop->req_queued = false; |
714 | INIT_LIST_HEAD(&lsop->lsreq_list); | |
715 | init_completion(&lsop->ls_done); | |
716 | ||
c913a8b0 | 717 | lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr, |
e399441d JS |
718 | lsreq->rqstlen + lsreq->rsplen, |
719 | DMA_BIDIRECTIONAL); | |
c913a8b0 JS |
720 | if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) { |
721 | ret = -EFAULT; | |
722 | goto out_putrport; | |
e399441d JS |
723 | } |
724 | lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; | |
725 | ||
c913a8b0 | 726 | spin_lock_irqsave(&rport->lock, flags); |
e399441d | 727 | |
c913a8b0 | 728 | list_add_tail(&lsop->lsreq_list, &rport->ls_req_list); |
e399441d JS |
729 | |
730 | lsop->req_queued = true; | |
731 | ||
c913a8b0 | 732 | spin_unlock_irqrestore(&rport->lock, flags); |
e399441d | 733 | |
c913a8b0 JS |
734 | ret = rport->lport->ops->ls_req(&rport->lport->localport, |
735 | &rport->remoteport, lsreq); | |
e399441d | 736 | if (ret) |
c913a8b0 JS |
737 | goto out_unlink; |
738 | ||
739 | return 0; | |
740 | ||
741 | out_unlink: | |
742 | lsop->ls_error = ret; | |
743 | spin_lock_irqsave(&rport->lock, flags); | |
744 | lsop->req_queued = false; | |
745 | list_del(&lsop->lsreq_list); | |
746 | spin_unlock_irqrestore(&rport->lock, flags); | |
747 | fc_dma_unmap_single(rport->dev, lsreq->rqstdma, | |
748 | (lsreq->rqstlen + lsreq->rsplen), | |
749 | DMA_BIDIRECTIONAL); | |
750 | out_putrport: | |
751 | nvme_fc_rport_put(rport); | |
e399441d JS |
752 | |
753 | return ret; | |
754 | } | |
755 | ||
756 | static void | |
757 | nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status) | |
758 | { | |
759 | struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq); | |
760 | ||
761 | lsop->ls_error = status; | |
762 | complete(&lsop->ls_done); | |
763 | } | |
764 | ||
765 | static int | |
c913a8b0 | 766 | nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop) |
e399441d JS |
767 | { |
768 | struct nvmefc_ls_req *lsreq = &lsop->ls_req; | |
769 | struct fcnvme_ls_rjt *rjt = lsreq->rspaddr; | |
770 | int ret; | |
771 | ||
c913a8b0 | 772 | ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done); |
e399441d | 773 | |
c913a8b0 | 774 | if (!ret) { |
e399441d JS |
775 | /* |
776 | * No timeout/not interruptible as we need the struct | |
777 | * to exist until the lldd calls us back. Thus mandate | |
778 | * wait until driver calls back. lldd responsible for | |
779 | * the timeout action | |
780 | */ | |
781 | wait_for_completion(&lsop->ls_done); | |
782 | ||
c913a8b0 | 783 | __nvme_fc_finish_ls_req(lsop); |
e399441d | 784 | |
c913a8b0 | 785 | ret = lsop->ls_error; |
e399441d JS |
786 | } |
787 | ||
c913a8b0 JS |
788 | if (ret) |
789 | return ret; | |
790 | ||
e399441d JS |
791 | /* ACC or RJT payload ? */ |
792 | if (rjt->w0.ls_cmd == FCNVME_LS_RJT) | |
793 | return -ENXIO; | |
794 | ||
795 | return 0; | |
796 | } | |
797 | ||
c913a8b0 JS |
798 | static int |
799 | nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport, | |
e399441d JS |
800 | struct nvmefc_ls_req_op *lsop, |
801 | void (*done)(struct nvmefc_ls_req *req, int status)) | |
802 | { | |
e399441d JS |
803 | /* don't wait for completion */ |
804 | ||
c913a8b0 | 805 | return __nvme_fc_send_ls_req(rport, lsop, done); |
e399441d JS |
806 | } |
807 | ||
808 | /* Validation Error indexes into the string table below */ | |
809 | enum { | |
810 | VERR_NO_ERROR = 0, | |
811 | VERR_LSACC = 1, | |
812 | VERR_LSDESC_RQST = 2, | |
813 | VERR_LSDESC_RQST_LEN = 3, | |
814 | VERR_ASSOC_ID = 4, | |
815 | VERR_ASSOC_ID_LEN = 5, | |
816 | VERR_CONN_ID = 6, | |
817 | VERR_CONN_ID_LEN = 7, | |
818 | VERR_CR_ASSOC = 8, | |
819 | VERR_CR_ASSOC_ACC_LEN = 9, | |
820 | VERR_CR_CONN = 10, | |
821 | VERR_CR_CONN_ACC_LEN = 11, | |
822 | VERR_DISCONN = 12, | |
823 | VERR_DISCONN_ACC_LEN = 13, | |
824 | }; | |
825 | ||
826 | static char *validation_errors[] = { | |
827 | "OK", | |
828 | "Not LS_ACC", | |
829 | "Not LSDESC_RQST", | |
830 | "Bad LSDESC_RQST Length", | |
831 | "Not Association ID", | |
832 | "Bad Association ID Length", | |
833 | "Not Connection ID", | |
834 | "Bad Connection ID Length", | |
835 | "Not CR_ASSOC Rqst", | |
836 | "Bad CR_ASSOC ACC Length", | |
837 | "Not CR_CONN Rqst", | |
838 | "Bad CR_CONN ACC Length", | |
839 | "Not Disconnect Rqst", | |
840 | "Bad Disconnect ACC Length", | |
841 | }; | |
842 | ||
843 | static int | |
844 | nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl, | |
845 | struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio) | |
846 | { | |
847 | struct nvmefc_ls_req_op *lsop; | |
848 | struct nvmefc_ls_req *lsreq; | |
849 | struct fcnvme_ls_cr_assoc_rqst *assoc_rqst; | |
850 | struct fcnvme_ls_cr_assoc_acc *assoc_acc; | |
851 | int ret, fcret = 0; | |
852 | ||
853 | lsop = kzalloc((sizeof(*lsop) + | |
854 | ctrl->lport->ops->lsrqst_priv_sz + | |
855 | sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL); | |
856 | if (!lsop) { | |
857 | ret = -ENOMEM; | |
858 | goto out_no_memory; | |
859 | } | |
860 | lsreq = &lsop->ls_req; | |
861 | ||
862 | lsreq->private = (void *)&lsop[1]; | |
863 | assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *) | |
864 | (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz); | |
865 | assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1]; | |
866 | ||
867 | assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION; | |
868 | assoc_rqst->desc_list_len = | |
869 | cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); | |
870 | ||
871 | assoc_rqst->assoc_cmd.desc_tag = | |
872 | cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD); | |
873 | assoc_rqst->assoc_cmd.desc_len = | |
874 | fcnvme_lsdesc_len( | |
875 | sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); | |
876 | ||
877 | assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); | |
878 | assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize); | |
879 | /* Linux supports only Dynamic controllers */ | |
880 | assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); | |
881 | memcpy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id, | |
882 | min_t(size_t, FCNVME_ASSOC_HOSTID_LEN, sizeof(uuid_be))); | |
883 | strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn, | |
884 | min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE)); | |
885 | strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn, | |
886 | min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE)); | |
887 | ||
888 | lsop->queue = queue; | |
889 | lsreq->rqstaddr = assoc_rqst; | |
890 | lsreq->rqstlen = sizeof(*assoc_rqst); | |
891 | lsreq->rspaddr = assoc_acc; | |
892 | lsreq->rsplen = sizeof(*assoc_acc); | |
893 | lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC; | |
894 | ||
c913a8b0 | 895 | ret = nvme_fc_send_ls_req(ctrl->rport, lsop); |
e399441d JS |
896 | if (ret) |
897 | goto out_free_buffer; | |
898 | ||
899 | /* process connect LS completion */ | |
900 | ||
901 | /* validate the ACC response */ | |
902 | if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) | |
903 | fcret = VERR_LSACC; | |
f77fc87c | 904 | else if (assoc_acc->hdr.desc_list_len != |
e399441d JS |
905 | fcnvme_lsdesc_len( |
906 | sizeof(struct fcnvme_ls_cr_assoc_acc))) | |
907 | fcret = VERR_CR_ASSOC_ACC_LEN; | |
f77fc87c JS |
908 | else if (assoc_acc->hdr.rqst.desc_tag != |
909 | cpu_to_be32(FCNVME_LSDESC_RQST)) | |
e399441d JS |
910 | fcret = VERR_LSDESC_RQST; |
911 | else if (assoc_acc->hdr.rqst.desc_len != | |
912 | fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst))) | |
913 | fcret = VERR_LSDESC_RQST_LEN; | |
914 | else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION) | |
915 | fcret = VERR_CR_ASSOC; | |
916 | else if (assoc_acc->associd.desc_tag != | |
917 | cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) | |
918 | fcret = VERR_ASSOC_ID; | |
919 | else if (assoc_acc->associd.desc_len != | |
920 | fcnvme_lsdesc_len( | |
921 | sizeof(struct fcnvme_lsdesc_assoc_id))) | |
922 | fcret = VERR_ASSOC_ID_LEN; | |
923 | else if (assoc_acc->connectid.desc_tag != | |
924 | cpu_to_be32(FCNVME_LSDESC_CONN_ID)) | |
925 | fcret = VERR_CONN_ID; | |
926 | else if (assoc_acc->connectid.desc_len != | |
927 | fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id))) | |
928 | fcret = VERR_CONN_ID_LEN; | |
929 | ||
930 | if (fcret) { | |
931 | ret = -EBADF; | |
932 | dev_err(ctrl->dev, | |
933 | "q %d connect failed: %s\n", | |
934 | queue->qnum, validation_errors[fcret]); | |
935 | } else { | |
936 | ctrl->association_id = | |
937 | be64_to_cpu(assoc_acc->associd.association_id); | |
938 | queue->connection_id = | |
939 | be64_to_cpu(assoc_acc->connectid.connection_id); | |
940 | set_bit(NVME_FC_Q_CONNECTED, &queue->flags); | |
941 | } | |
942 | ||
943 | out_free_buffer: | |
944 | kfree(lsop); | |
945 | out_no_memory: | |
946 | if (ret) | |
947 | dev_err(ctrl->dev, | |
948 | "queue %d connect admin queue failed (%d).\n", | |
949 | queue->qnum, ret); | |
950 | return ret; | |
951 | } | |
952 | ||
953 | static int | |
954 | nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, | |
955 | u16 qsize, u16 ersp_ratio) | |
956 | { | |
957 | struct nvmefc_ls_req_op *lsop; | |
958 | struct nvmefc_ls_req *lsreq; | |
959 | struct fcnvme_ls_cr_conn_rqst *conn_rqst; | |
960 | struct fcnvme_ls_cr_conn_acc *conn_acc; | |
961 | int ret, fcret = 0; | |
962 | ||
963 | lsop = kzalloc((sizeof(*lsop) + | |
964 | ctrl->lport->ops->lsrqst_priv_sz + | |
965 | sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL); | |
966 | if (!lsop) { | |
967 | ret = -ENOMEM; | |
968 | goto out_no_memory; | |
969 | } | |
970 | lsreq = &lsop->ls_req; | |
971 | ||
972 | lsreq->private = (void *)&lsop[1]; | |
973 | conn_rqst = (struct fcnvme_ls_cr_conn_rqst *) | |
974 | (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz); | |
975 | conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1]; | |
976 | ||
977 | conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION; | |
978 | conn_rqst->desc_list_len = cpu_to_be32( | |
979 | sizeof(struct fcnvme_lsdesc_assoc_id) + | |
980 | sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); | |
981 | ||
982 | conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); | |
983 | conn_rqst->associd.desc_len = | |
984 | fcnvme_lsdesc_len( | |
985 | sizeof(struct fcnvme_lsdesc_assoc_id)); | |
986 | conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id); | |
987 | conn_rqst->connect_cmd.desc_tag = | |
988 | cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD); | |
989 | conn_rqst->connect_cmd.desc_len = | |
990 | fcnvme_lsdesc_len( | |
991 | sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); | |
992 | conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); | |
993 | conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); | |
994 | conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize); | |
995 | ||
996 | lsop->queue = queue; | |
997 | lsreq->rqstaddr = conn_rqst; | |
998 | lsreq->rqstlen = sizeof(*conn_rqst); | |
999 | lsreq->rspaddr = conn_acc; | |
1000 | lsreq->rsplen = sizeof(*conn_acc); | |
1001 | lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC; | |
1002 | ||
c913a8b0 | 1003 | ret = nvme_fc_send_ls_req(ctrl->rport, lsop); |
e399441d JS |
1004 | if (ret) |
1005 | goto out_free_buffer; | |
1006 | ||
1007 | /* process connect LS completion */ | |
1008 | ||
1009 | /* validate the ACC response */ | |
1010 | if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) | |
1011 | fcret = VERR_LSACC; | |
f77fc87c | 1012 | else if (conn_acc->hdr.desc_list_len != |
e399441d JS |
1013 | fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc))) |
1014 | fcret = VERR_CR_CONN_ACC_LEN; | |
f77fc87c | 1015 | else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST)) |
e399441d JS |
1016 | fcret = VERR_LSDESC_RQST; |
1017 | else if (conn_acc->hdr.rqst.desc_len != | |
1018 | fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst))) | |
1019 | fcret = VERR_LSDESC_RQST_LEN; | |
1020 | else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION) | |
1021 | fcret = VERR_CR_CONN; | |
1022 | else if (conn_acc->connectid.desc_tag != | |
1023 | cpu_to_be32(FCNVME_LSDESC_CONN_ID)) | |
1024 | fcret = VERR_CONN_ID; | |
1025 | else if (conn_acc->connectid.desc_len != | |
1026 | fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id))) | |
1027 | fcret = VERR_CONN_ID_LEN; | |
1028 | ||
1029 | if (fcret) { | |
1030 | ret = -EBADF; | |
1031 | dev_err(ctrl->dev, | |
1032 | "q %d connect failed: %s\n", | |
1033 | queue->qnum, validation_errors[fcret]); | |
1034 | } else { | |
1035 | queue->connection_id = | |
1036 | be64_to_cpu(conn_acc->connectid.connection_id); | |
1037 | set_bit(NVME_FC_Q_CONNECTED, &queue->flags); | |
1038 | } | |
1039 | ||
1040 | out_free_buffer: | |
1041 | kfree(lsop); | |
1042 | out_no_memory: | |
1043 | if (ret) | |
1044 | dev_err(ctrl->dev, | |
1045 | "queue %d connect command failed (%d).\n", | |
1046 | queue->qnum, ret); | |
1047 | return ret; | |
1048 | } | |
1049 | ||
1050 | static void | |
1051 | nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) | |
1052 | { | |
1053 | struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq); | |
e399441d | 1054 | |
c913a8b0 | 1055 | __nvme_fc_finish_ls_req(lsop); |
e399441d JS |
1056 | |
1057 | /* fc-nvme iniator doesn't care about success or failure of cmd */ | |
1058 | ||
1059 | kfree(lsop); | |
1060 | } | |
1061 | ||
1062 | /* | |
1063 | * This routine sends a FC-NVME LS to disconnect (aka terminate) | |
1064 | * the FC-NVME Association. Terminating the association also | |
1065 | * terminates the FC-NVME connections (per queue, both admin and io | |
1066 | * queues) that are part of the association. E.g. things are torn | |
1067 | * down, and the related FC-NVME Association ID and Connection IDs | |
1068 | * become invalid. | |
1069 | * | |
1070 | * The behavior of the fc-nvme initiator is such that it's | |
1071 | * understanding of the association and connections will implicitly | |
1072 | * be torn down. The action is implicit as it may be due to a loss of | |
1073 | * connectivity with the fc-nvme target, so you may never get a | |
1074 | * response even if you tried. As such, the action of this routine | |
1075 | * is to asynchronously send the LS, ignore any results of the LS, and | |
1076 | * continue on with terminating the association. If the fc-nvme target | |
1077 | * is present and receives the LS, it too can tear down. | |
1078 | */ | |
1079 | static void | |
1080 | nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) | |
1081 | { | |
1082 | struct fcnvme_ls_disconnect_rqst *discon_rqst; | |
1083 | struct fcnvme_ls_disconnect_acc *discon_acc; | |
1084 | struct nvmefc_ls_req_op *lsop; | |
1085 | struct nvmefc_ls_req *lsreq; | |
c913a8b0 | 1086 | int ret; |
e399441d JS |
1087 | |
1088 | lsop = kzalloc((sizeof(*lsop) + | |
1089 | ctrl->lport->ops->lsrqst_priv_sz + | |
1090 | sizeof(*discon_rqst) + sizeof(*discon_acc)), | |
1091 | GFP_KERNEL); | |
1092 | if (!lsop) | |
1093 | /* couldn't sent it... too bad */ | |
1094 | return; | |
1095 | ||
1096 | lsreq = &lsop->ls_req; | |
1097 | ||
1098 | lsreq->private = (void *)&lsop[1]; | |
1099 | discon_rqst = (struct fcnvme_ls_disconnect_rqst *) | |
1100 | (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz); | |
1101 | discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1]; | |
1102 | ||
1103 | discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT; | |
1104 | discon_rqst->desc_list_len = cpu_to_be32( | |
1105 | sizeof(struct fcnvme_lsdesc_assoc_id) + | |
1106 | sizeof(struct fcnvme_lsdesc_disconn_cmd)); | |
1107 | ||
1108 | discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); | |
1109 | discon_rqst->associd.desc_len = | |
1110 | fcnvme_lsdesc_len( | |
1111 | sizeof(struct fcnvme_lsdesc_assoc_id)); | |
1112 | ||
1113 | discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id); | |
1114 | ||
1115 | discon_rqst->discon_cmd.desc_tag = cpu_to_be32( | |
1116 | FCNVME_LSDESC_DISCONN_CMD); | |
1117 | discon_rqst->discon_cmd.desc_len = | |
1118 | fcnvme_lsdesc_len( | |
1119 | sizeof(struct fcnvme_lsdesc_disconn_cmd)); | |
1120 | discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION; | |
1121 | discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id); | |
1122 | ||
1123 | lsreq->rqstaddr = discon_rqst; | |
1124 | lsreq->rqstlen = sizeof(*discon_rqst); | |
1125 | lsreq->rspaddr = discon_acc; | |
1126 | lsreq->rsplen = sizeof(*discon_acc); | |
1127 | lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC; | |
1128 | ||
c913a8b0 JS |
1129 | ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop, |
1130 | nvme_fc_disconnect_assoc_done); | |
1131 | if (ret) | |
1132 | kfree(lsop); | |
e399441d JS |
1133 | |
1134 | /* only meaningful part to terminating the association */ | |
1135 | ctrl->association_id = 0; | |
1136 | } | |
1137 | ||
1138 | ||
1139 | /* *********************** NVME Ctrl Routines **************************** */ | |
1140 | ||
78a7ac26 | 1141 | static void __nvme_fc_final_op_cleanup(struct request *rq); |
f874d5d0 | 1142 | static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg); |
e399441d JS |
1143 | |
1144 | static int | |
1145 | nvme_fc_reinit_request(void *data, struct request *rq) | |
1146 | { | |
1147 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); | |
1148 | struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; | |
1149 | ||
1150 | memset(cmdiu, 0, sizeof(*cmdiu)); | |
1151 | cmdiu->scsi_id = NVME_CMD_SCSI_ID; | |
1152 | cmdiu->fc_id = NVME_CMD_FC_ID; | |
1153 | cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32)); | |
1154 | memset(&op->rsp_iu, 0, sizeof(op->rsp_iu)); | |
1155 | ||
1156 | return 0; | |
1157 | } | |
1158 | ||
1159 | static void | |
1160 | __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl, | |
1161 | struct nvme_fc_fcp_op *op) | |
1162 | { | |
1163 | fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma, | |
1164 | sizeof(op->rsp_iu), DMA_FROM_DEVICE); | |
1165 | fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma, | |
1166 | sizeof(op->cmd_iu), DMA_TO_DEVICE); | |
1167 | ||
1168 | atomic_set(&op->state, FCPOP_STATE_UNINIT); | |
1169 | } | |
1170 | ||
1171 | static void | |
d6296d39 CH |
1172 | nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq, |
1173 | unsigned int hctx_idx) | |
e399441d JS |
1174 | { |
1175 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); | |
1176 | ||
d6296d39 | 1177 | return __nvme_fc_exit_request(set->driver_data, op); |
e399441d JS |
1178 | } |
1179 | ||
78a7ac26 JS |
1180 | static int |
1181 | __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) | |
1182 | { | |
1183 | int state; | |
1184 | ||
1185 | state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); | |
1186 | if (state != FCPOP_STATE_ACTIVE) { | |
1187 | atomic_set(&op->state, state); | |
1188 | return -ECANCELED; | |
1189 | } | |
1190 | ||
1191 | ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, | |
1192 | &ctrl->rport->remoteport, | |
1193 | op->queue->lldd_handle, | |
1194 | &op->fcp_req); | |
1195 | ||
1196 | return 0; | |
1197 | } | |
1198 | ||
e399441d | 1199 | static void |
78a7ac26 | 1200 | nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) |
e399441d JS |
1201 | { |
1202 | struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; | |
78a7ac26 JS |
1203 | unsigned long flags; |
1204 | int i, ret; | |
e399441d JS |
1205 | |
1206 | for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) { | |
78a7ac26 | 1207 | if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE) |
e399441d | 1208 | continue; |
78a7ac26 JS |
1209 | |
1210 | spin_lock_irqsave(&ctrl->lock, flags); | |
61bff8ef JS |
1211 | if (ctrl->flags & FCCTRL_TERMIO) { |
1212 | ctrl->iocnt++; | |
1213 | aen_op->flags |= FCOP_FLAGS_TERMIO; | |
1214 | } | |
78a7ac26 JS |
1215 | spin_unlock_irqrestore(&ctrl->lock, flags); |
1216 | ||
1217 | ret = __nvme_fc_abort_op(ctrl, aen_op); | |
1218 | if (ret) { | |
1219 | /* | |
1220 | * if __nvme_fc_abort_op failed the io wasn't | |
1221 | * active. Thus this call path is running in | |
1222 | * parallel to the io complete. Treat as non-error. | |
1223 | */ | |
1224 | ||
1225 | /* back out the flags/counters */ | |
1226 | spin_lock_irqsave(&ctrl->lock, flags); | |
61bff8ef JS |
1227 | if (ctrl->flags & FCCTRL_TERMIO) |
1228 | ctrl->iocnt--; | |
78a7ac26 JS |
1229 | aen_op->flags &= ~FCOP_FLAGS_TERMIO; |
1230 | spin_unlock_irqrestore(&ctrl->lock, flags); | |
1231 | return; | |
1232 | } | |
e399441d JS |
1233 | } |
1234 | } | |
1235 | ||
78a7ac26 JS |
1236 | static inline int |
1237 | __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, | |
1238 | struct nvme_fc_fcp_op *op) | |
1239 | { | |
1240 | unsigned long flags; | |
1241 | bool complete_rq = false; | |
1242 | ||
1243 | spin_lock_irqsave(&ctrl->lock, flags); | |
61bff8ef JS |
1244 | if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) { |
1245 | if (ctrl->flags & FCCTRL_TERMIO) | |
1246 | ctrl->iocnt--; | |
1247 | } | |
78a7ac26 JS |
1248 | if (op->flags & FCOP_FLAGS_RELEASED) |
1249 | complete_rq = true; | |
1250 | else | |
1251 | op->flags |= FCOP_FLAGS_COMPLETE; | |
1252 | spin_unlock_irqrestore(&ctrl->lock, flags); | |
1253 | ||
1254 | return complete_rq; | |
1255 | } | |
1256 | ||
baee29ac | 1257 | static void |
e399441d JS |
1258 | nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) |
1259 | { | |
1260 | struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req); | |
1261 | struct request *rq = op->rq; | |
1262 | struct nvmefc_fcp_req *freq = &op->fcp_req; | |
1263 | struct nvme_fc_ctrl *ctrl = op->ctrl; | |
1264 | struct nvme_fc_queue *queue = op->queue; | |
1265 | struct nvme_completion *cqe = &op->rsp_iu.cqe; | |
458f280d | 1266 | struct nvme_command *sqe = &op->cmd_iu.sqe; |
d663b69f | 1267 | __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); |
27fa9bc5 | 1268 | union nvme_result result; |
f874d5d0 | 1269 | bool complete_rq, terminate_assoc = true; |
e399441d JS |
1270 | |
1271 | /* | |
1272 | * WARNING: | |
1273 | * The current linux implementation of a nvme controller | |
1274 | * allocates a single tag set for all io queues and sizes | |
1275 | * the io queues to fully hold all possible tags. Thus, the | |
1276 | * implementation does not reference or care about the sqhd | |
1277 | * value as it never needs to use the sqhd/sqtail pointers | |
1278 | * for submission pacing. | |
1279 | * | |
1280 | * This affects the FC-NVME implementation in two ways: | |
1281 | * 1) As the value doesn't matter, we don't need to waste | |
1282 | * cycles extracting it from ERSPs and stamping it in the | |
1283 | * cases where the transport fabricates CQEs on successful | |
1284 | * completions. | |
1285 | * 2) The FC-NVME implementation requires that delivery of | |
1286 | * ERSP completions are to go back to the nvme layer in order | |
1287 | * relative to the rsn, such that the sqhd value will always | |
1288 | * be "in order" for the nvme layer. As the nvme layer in | |
1289 | * linux doesn't care about sqhd, there's no need to return | |
1290 | * them in order. | |
1291 | * | |
1292 | * Additionally: | |
1293 | * As the core nvme layer in linux currently does not look at | |
1294 | * every field in the cqe - in cases where the FC transport must | |
1295 | * fabricate a CQE, the following fields will not be set as they | |
1296 | * are not referenced: | |
1297 | * cqe.sqid, cqe.sqhd, cqe.command_id | |
f874d5d0 JS |
1298 | * |
1299 | * Failure or error of an individual i/o, in a transport | |
1300 | * detected fashion unrelated to the nvme completion status, | |
1301 | * potentially cause the initiator and target sides to get out | |
1302 | * of sync on SQ head/tail (aka outstanding io count allowed). | |
1303 | * Per FC-NVME spec, failure of an individual command requires | |
1304 | * the connection to be terminated, which in turn requires the | |
1305 | * association to be terminated. | |
e399441d JS |
1306 | */ |
1307 | ||
1308 | fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, | |
1309 | sizeof(op->rsp_iu), DMA_FROM_DEVICE); | |
1310 | ||
1311 | if (atomic_read(&op->state) == FCPOP_STATE_ABORTED) | |
d663b69f | 1312 | status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1); |
62eeacb0 | 1313 | else if (freq->status) |
d663b69f | 1314 | status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); |
e399441d JS |
1315 | |
1316 | /* | |
1317 | * For the linux implementation, if we have an unsuccesful | |
1318 | * status, they blk-mq layer can typically be called with the | |
1319 | * non-zero status and the content of the cqe isn't important. | |
1320 | */ | |
1321 | if (status) | |
1322 | goto done; | |
1323 | ||
1324 | /* | |
1325 | * command completed successfully relative to the wire | |
1326 | * protocol. However, validate anything received and | |
1327 | * extract the status and result from the cqe (create it | |
1328 | * where necessary). | |
1329 | */ | |
1330 | ||
1331 | switch (freq->rcv_rsplen) { | |
1332 | ||
1333 | case 0: | |
1334 | case NVME_FC_SIZEOF_ZEROS_RSP: | |
1335 | /* | |
1336 | * No response payload or 12 bytes of payload (which | |
1337 | * should all be zeros) are considered successful and | |
1338 | * no payload in the CQE by the transport. | |
1339 | */ | |
1340 | if (freq->transferred_length != | |
1341 | be32_to_cpu(op->cmd_iu.data_len)) { | |
d663b69f | 1342 | status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); |
e399441d JS |
1343 | goto done; |
1344 | } | |
27fa9bc5 | 1345 | result.u64 = 0; |
e399441d JS |
1346 | break; |
1347 | ||
1348 | case sizeof(struct nvme_fc_ersp_iu): | |
1349 | /* | |
1350 | * The ERSP IU contains a full completion with CQE. | |
1351 | * Validate ERSP IU and look at cqe. | |
1352 | */ | |
1353 | if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) != | |
1354 | (freq->rcv_rsplen / 4) || | |
1355 | be32_to_cpu(op->rsp_iu.xfrd_len) != | |
1356 | freq->transferred_length || | |
726a1080 | 1357 | op->rsp_iu.status_code || |
458f280d | 1358 | sqe->common.command_id != cqe->command_id)) { |
d663b69f | 1359 | status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); |
e399441d JS |
1360 | goto done; |
1361 | } | |
27fa9bc5 | 1362 | result = cqe->result; |
d663b69f | 1363 | status = cqe->status; |
e399441d JS |
1364 | break; |
1365 | ||
1366 | default: | |
d663b69f | 1367 | status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); |
e399441d JS |
1368 | goto done; |
1369 | } | |
1370 | ||
f874d5d0 JS |
1371 | terminate_assoc = false; |
1372 | ||
e399441d | 1373 | done: |
78a7ac26 | 1374 | if (op->flags & FCOP_FLAGS_AEN) { |
27fa9bc5 | 1375 | nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); |
78a7ac26 JS |
1376 | complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op); |
1377 | atomic_set(&op->state, FCPOP_STATE_IDLE); | |
1378 | op->flags = FCOP_FLAGS_AEN; /* clear other flags */ | |
e399441d | 1379 | nvme_fc_ctrl_put(ctrl); |
f874d5d0 | 1380 | goto check_error; |
e399441d JS |
1381 | } |
1382 | ||
78a7ac26 JS |
1383 | complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op); |
1384 | if (!complete_rq) { | |
1385 | if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) { | |
e392e1f1 | 1386 | status = cpu_to_le16(NVME_SC_ABORT_REQ << 1); |
78a7ac26 | 1387 | if (blk_queue_dying(rq->q)) |
e392e1f1 | 1388 | status |= cpu_to_le16(NVME_SC_DNR << 1); |
78a7ac26 JS |
1389 | } |
1390 | nvme_end_request(rq, status, result); | |
1391 | } else | |
1392 | __nvme_fc_final_op_cleanup(rq); | |
f874d5d0 JS |
1393 | |
1394 | check_error: | |
1395 | if (terminate_assoc) | |
1396 | nvme_fc_error_recovery(ctrl, "transport detected io error"); | |
e399441d JS |
1397 | } |
1398 | ||
1399 | static int | |
1400 | __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, | |
1401 | struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op, | |
1402 | struct request *rq, u32 rqno) | |
1403 | { | |
1404 | struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; | |
1405 | int ret = 0; | |
1406 | ||
1407 | memset(op, 0, sizeof(*op)); | |
1408 | op->fcp_req.cmdaddr = &op->cmd_iu; | |
1409 | op->fcp_req.cmdlen = sizeof(op->cmd_iu); | |
1410 | op->fcp_req.rspaddr = &op->rsp_iu; | |
1411 | op->fcp_req.rsplen = sizeof(op->rsp_iu); | |
1412 | op->fcp_req.done = nvme_fc_fcpio_done; | |
1413 | op->fcp_req.first_sgl = (struct scatterlist *)&op[1]; | |
1414 | op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE]; | |
1415 | op->ctrl = ctrl; | |
1416 | op->queue = queue; | |
1417 | op->rq = rq; | |
1418 | op->rqno = rqno; | |
1419 | ||
1420 | cmdiu->scsi_id = NVME_CMD_SCSI_ID; | |
1421 | cmdiu->fc_id = NVME_CMD_FC_ID; | |
1422 | cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32)); | |
1423 | ||
1424 | op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev, | |
1425 | &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE); | |
1426 | if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) { | |
1427 | dev_err(ctrl->dev, | |
1428 | "FCP Op failed - cmdiu dma mapping failed.\n"); | |
1429 | ret = EFAULT; | |
1430 | goto out_on_error; | |
1431 | } | |
1432 | ||
1433 | op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev, | |
1434 | &op->rsp_iu, sizeof(op->rsp_iu), | |
1435 | DMA_FROM_DEVICE); | |
1436 | if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) { | |
1437 | dev_err(ctrl->dev, | |
1438 | "FCP Op failed - rspiu dma mapping failed.\n"); | |
1439 | ret = EFAULT; | |
1440 | } | |
1441 | ||
1442 | atomic_set(&op->state, FCPOP_STATE_IDLE); | |
1443 | out_on_error: | |
1444 | return ret; | |
1445 | } | |
1446 | ||
1447 | static int | |
d6296d39 CH |
1448 | nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq, |
1449 | unsigned int hctx_idx, unsigned int numa_node) | |
e399441d | 1450 | { |
d6296d39 | 1451 | struct nvme_fc_ctrl *ctrl = set->driver_data; |
e399441d JS |
1452 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); |
1453 | struct nvme_fc_queue *queue = &ctrl->queues[hctx_idx+1]; | |
1454 | ||
1455 | return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++); | |
1456 | } | |
1457 | ||
1458 | static int | |
d6296d39 CH |
1459 | nvme_fc_init_admin_request(struct blk_mq_tag_set *set, struct request *rq, |
1460 | unsigned int hctx_idx, unsigned int numa_node) | |
e399441d | 1461 | { |
d6296d39 | 1462 | struct nvme_fc_ctrl *ctrl = set->driver_data; |
e399441d JS |
1463 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); |
1464 | struct nvme_fc_queue *queue = &ctrl->queues[0]; | |
1465 | ||
1466 | return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++); | |
1467 | } | |
1468 | ||
1469 | static int | |
1470 | nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl) | |
1471 | { | |
1472 | struct nvme_fc_fcp_op *aen_op; | |
1473 | struct nvme_fc_cmd_iu *cmdiu; | |
1474 | struct nvme_command *sqe; | |
61bff8ef | 1475 | void *private; |
e399441d JS |
1476 | int i, ret; |
1477 | ||
1478 | aen_op = ctrl->aen_ops; | |
1479 | for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) { | |
61bff8ef JS |
1480 | private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz, |
1481 | GFP_KERNEL); | |
1482 | if (!private) | |
1483 | return -ENOMEM; | |
1484 | ||
e399441d JS |
1485 | cmdiu = &aen_op->cmd_iu; |
1486 | sqe = &cmdiu->sqe; | |
1487 | ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0], | |
1488 | aen_op, (struct request *)NULL, | |
1489 | (AEN_CMDID_BASE + i)); | |
61bff8ef JS |
1490 | if (ret) { |
1491 | kfree(private); | |
e399441d | 1492 | return ret; |
61bff8ef | 1493 | } |
e399441d | 1494 | |
78a7ac26 | 1495 | aen_op->flags = FCOP_FLAGS_AEN; |
61bff8ef JS |
1496 | aen_op->fcp_req.first_sgl = NULL; /* no sg list */ |
1497 | aen_op->fcp_req.private = private; | |
78a7ac26 | 1498 | |
e399441d JS |
1499 | memset(sqe, 0, sizeof(*sqe)); |
1500 | sqe->common.opcode = nvme_admin_async_event; | |
78a7ac26 | 1501 | /* Note: core layer may overwrite the sqe.command_id value */ |
e399441d JS |
1502 | sqe->common.command_id = AEN_CMDID_BASE + i; |
1503 | } | |
1504 | return 0; | |
1505 | } | |
1506 | ||
61bff8ef JS |
1507 | static void |
1508 | nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl) | |
1509 | { | |
1510 | struct nvme_fc_fcp_op *aen_op; | |
1511 | int i; | |
1512 | ||
1513 | aen_op = ctrl->aen_ops; | |
1514 | for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) { | |
1515 | if (!aen_op->fcp_req.private) | |
1516 | continue; | |
1517 | ||
1518 | __nvme_fc_exit_request(ctrl, aen_op); | |
1519 | ||
1520 | kfree(aen_op->fcp_req.private); | |
1521 | aen_op->fcp_req.private = NULL; | |
1522 | } | |
1523 | } | |
e399441d JS |
1524 | |
1525 | static inline void | |
1526 | __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl, | |
1527 | unsigned int qidx) | |
1528 | { | |
1529 | struct nvme_fc_queue *queue = &ctrl->queues[qidx]; | |
1530 | ||
1531 | hctx->driver_data = queue; | |
1532 | queue->hctx = hctx; | |
1533 | } | |
1534 | ||
1535 | static int | |
1536 | nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, | |
1537 | unsigned int hctx_idx) | |
1538 | { | |
1539 | struct nvme_fc_ctrl *ctrl = data; | |
1540 | ||
1541 | __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1); | |
1542 | ||
1543 | return 0; | |
1544 | } | |
1545 | ||
1546 | static int | |
1547 | nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, | |
1548 | unsigned int hctx_idx) | |
1549 | { | |
1550 | struct nvme_fc_ctrl *ctrl = data; | |
1551 | ||
1552 | __nvme_fc_init_hctx(hctx, ctrl, hctx_idx); | |
1553 | ||
1554 | return 0; | |
1555 | } | |
1556 | ||
1557 | static void | |
1558 | nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx, size_t queue_size) | |
1559 | { | |
1560 | struct nvme_fc_queue *queue; | |
1561 | ||
1562 | queue = &ctrl->queues[idx]; | |
1563 | memset(queue, 0, sizeof(*queue)); | |
1564 | queue->ctrl = ctrl; | |
1565 | queue->qnum = idx; | |
1566 | atomic_set(&queue->csn, 1); | |
1567 | queue->dev = ctrl->dev; | |
1568 | ||
1569 | if (idx > 0) | |
1570 | queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; | |
1571 | else | |
1572 | queue->cmnd_capsule_len = sizeof(struct nvme_command); | |
1573 | ||
1574 | queue->queue_size = queue_size; | |
1575 | ||
1576 | /* | |
1577 | * Considered whether we should allocate buffers for all SQEs | |
1578 | * and CQEs and dma map them - mapping their respective entries | |
1579 | * into the request structures (kernel vm addr and dma address) | |
1580 | * thus the driver could use the buffers/mappings directly. | |
1581 | * It only makes sense if the LLDD would use them for its | |
1582 | * messaging api. It's very unlikely most adapter api's would use | |
1583 | * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload | |
1584 | * structures were used instead. | |
1585 | */ | |
1586 | } | |
1587 | ||
1588 | /* | |
1589 | * This routine terminates a queue at the transport level. | |
1590 | * The transport has already ensured that all outstanding ios on | |
1591 | * the queue have been terminated. | |
1592 | * The transport will send a Disconnect LS request to terminate | |
1593 | * the queue's connection. Termination of the admin queue will also | |
1594 | * terminate the association at the target. | |
1595 | */ | |
1596 | static void | |
1597 | nvme_fc_free_queue(struct nvme_fc_queue *queue) | |
1598 | { | |
1599 | if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags)) | |
1600 | return; | |
1601 | ||
1602 | /* | |
1603 | * Current implementation never disconnects a single queue. | |
1604 | * It always terminates a whole association. So there is never | |
1605 | * a disconnect(queue) LS sent to the target. | |
1606 | */ | |
1607 | ||
1608 | queue->connection_id = 0; | |
1609 | clear_bit(NVME_FC_Q_CONNECTED, &queue->flags); | |
1610 | } | |
1611 | ||
1612 | static void | |
1613 | __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl, | |
1614 | struct nvme_fc_queue *queue, unsigned int qidx) | |
1615 | { | |
1616 | if (ctrl->lport->ops->delete_queue) | |
1617 | ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx, | |
1618 | queue->lldd_handle); | |
1619 | queue->lldd_handle = NULL; | |
1620 | } | |
1621 | ||
e399441d JS |
1622 | static void |
1623 | nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl) | |
1624 | { | |
1625 | int i; | |
1626 | ||
1627 | for (i = 1; i < ctrl->queue_count; i++) | |
1628 | nvme_fc_free_queue(&ctrl->queues[i]); | |
1629 | } | |
1630 | ||
1631 | static int | |
1632 | __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl, | |
1633 | struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize) | |
1634 | { | |
1635 | int ret = 0; | |
1636 | ||
1637 | queue->lldd_handle = NULL; | |
1638 | if (ctrl->lport->ops->create_queue) | |
1639 | ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport, | |
1640 | qidx, qsize, &queue->lldd_handle); | |
1641 | ||
1642 | return ret; | |
1643 | } | |
1644 | ||
1645 | static void | |
1646 | nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl) | |
1647 | { | |
1648 | struct nvme_fc_queue *queue = &ctrl->queues[ctrl->queue_count - 1]; | |
1649 | int i; | |
1650 | ||
1651 | for (i = ctrl->queue_count - 1; i >= 1; i--, queue--) | |
1652 | __nvme_fc_delete_hw_queue(ctrl, queue, i); | |
1653 | } | |
1654 | ||
1655 | static int | |
1656 | nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) | |
1657 | { | |
1658 | struct nvme_fc_queue *queue = &ctrl->queues[1]; | |
17a1ec08 | 1659 | int i, ret; |
e399441d JS |
1660 | |
1661 | for (i = 1; i < ctrl->queue_count; i++, queue++) { | |
1662 | ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize); | |
17a1ec08 JT |
1663 | if (ret) |
1664 | goto delete_queues; | |
e399441d JS |
1665 | } |
1666 | ||
1667 | return 0; | |
17a1ec08 JT |
1668 | |
1669 | delete_queues: | |
1670 | for (; i >= 0; i--) | |
1671 | __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i); | |
1672 | return ret; | |
e399441d JS |
1673 | } |
1674 | ||
1675 | static int | |
1676 | nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) | |
1677 | { | |
1678 | int i, ret = 0; | |
1679 | ||
1680 | for (i = 1; i < ctrl->queue_count; i++) { | |
1681 | ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize, | |
1682 | (qsize / 5)); | |
1683 | if (ret) | |
1684 | break; | |
1685 | ret = nvmf_connect_io_queue(&ctrl->ctrl, i); | |
1686 | if (ret) | |
1687 | break; | |
1688 | } | |
1689 | ||
1690 | return ret; | |
1691 | } | |
1692 | ||
1693 | static void | |
1694 | nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl) | |
1695 | { | |
1696 | int i; | |
1697 | ||
1698 | for (i = 1; i < ctrl->queue_count; i++) | |
1699 | nvme_fc_init_queue(ctrl, i, ctrl->ctrl.sqsize); | |
1700 | } | |
1701 | ||
1702 | static void | |
1703 | nvme_fc_ctrl_free(struct kref *ref) | |
1704 | { | |
1705 | struct nvme_fc_ctrl *ctrl = | |
1706 | container_of(ref, struct nvme_fc_ctrl, ref); | |
1707 | unsigned long flags; | |
1708 | ||
61bff8ef JS |
1709 | if (ctrl->ctrl.tagset) { |
1710 | blk_cleanup_queue(ctrl->ctrl.connect_q); | |
1711 | blk_mq_free_tag_set(&ctrl->tag_set); | |
e399441d JS |
1712 | } |
1713 | ||
61bff8ef JS |
1714 | /* remove from rport list */ |
1715 | spin_lock_irqsave(&ctrl->rport->lock, flags); | |
1716 | list_del(&ctrl->ctrl_list); | |
1717 | spin_unlock_irqrestore(&ctrl->rport->lock, flags); | |
1718 | ||
1719 | blk_cleanup_queue(ctrl->ctrl.admin_q); | |
1720 | blk_mq_free_tag_set(&ctrl->admin_tag_set); | |
1721 | ||
1722 | kfree(ctrl->queues); | |
1723 | ||
e399441d JS |
1724 | put_device(ctrl->dev); |
1725 | nvme_fc_rport_put(ctrl->rport); | |
1726 | ||
e399441d | 1727 | ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); |
de41447a EM |
1728 | if (ctrl->ctrl.opts) |
1729 | nvmf_free_options(ctrl->ctrl.opts); | |
e399441d JS |
1730 | kfree(ctrl); |
1731 | } | |
1732 | ||
1733 | static void | |
1734 | nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl) | |
1735 | { | |
1736 | kref_put(&ctrl->ref, nvme_fc_ctrl_free); | |
1737 | } | |
1738 | ||
1739 | static int | |
1740 | nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl) | |
1741 | { | |
1742 | return kref_get_unless_zero(&ctrl->ref); | |
1743 | } | |
1744 | ||
1745 | /* | |
1746 | * All accesses from nvme core layer done - can now free the | |
1747 | * controller. Called after last nvme_put_ctrl() call | |
1748 | */ | |
1749 | static void | |
61bff8ef | 1750 | nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl) |
e399441d JS |
1751 | { |
1752 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); | |
1753 | ||
1754 | WARN_ON(nctrl != &ctrl->ctrl); | |
1755 | ||
61bff8ef JS |
1756 | nvme_fc_ctrl_put(ctrl); |
1757 | } | |
e399441d | 1758 | |
61bff8ef JS |
1759 | static void |
1760 | nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) | |
1761 | { | |
1762 | dev_warn(ctrl->ctrl.device, | |
1763 | "NVME-FC{%d}: transport association error detected: %s\n", | |
1764 | ctrl->cnum, errmsg); | |
589ff775 | 1765 | dev_warn(ctrl->ctrl.device, |
61bff8ef | 1766 | "NVME-FC{%d}: resetting controller\n", ctrl->cnum); |
e399441d | 1767 | |
2952a879 JS |
1768 | /* stop the queues on error, cleanup is in reset thread */ |
1769 | if (ctrl->queue_count > 1) | |
1770 | nvme_stop_queues(&ctrl->ctrl); | |
1771 | ||
61bff8ef JS |
1772 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { |
1773 | dev_err(ctrl->ctrl.device, | |
1774 | "NVME-FC{%d}: error_recovery: Couldn't change state " | |
1775 | "to RECONNECTING\n", ctrl->cnum); | |
1776 | return; | |
e399441d JS |
1777 | } |
1778 | ||
61bff8ef JS |
1779 | if (!queue_work(nvme_fc_wq, &ctrl->reset_work)) |
1780 | dev_err(ctrl->ctrl.device, | |
1781 | "NVME-FC{%d}: error_recovery: Failed to schedule " | |
1782 | "reset work\n", ctrl->cnum); | |
e399441d JS |
1783 | } |
1784 | ||
baee29ac | 1785 | static enum blk_eh_timer_return |
e399441d JS |
1786 | nvme_fc_timeout(struct request *rq, bool reserved) |
1787 | { | |
1788 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); | |
1789 | struct nvme_fc_ctrl *ctrl = op->ctrl; | |
1790 | int ret; | |
1791 | ||
1792 | if (reserved) | |
1793 | return BLK_EH_RESET_TIMER; | |
1794 | ||
1795 | ret = __nvme_fc_abort_op(ctrl, op); | |
1796 | if (ret) | |
1797 | /* io wasn't active to abort consider it done */ | |
1798 | return BLK_EH_HANDLED; | |
1799 | ||
1800 | /* | |
61bff8ef JS |
1801 | * we can't individually ABTS an io without affecting the queue, |
1802 | * thus killing the queue, adn thus the association. | |
1803 | * So resolve by performing a controller reset, which will stop | |
1804 | * the host/io stack, terminate the association on the link, | |
1805 | * and recreate an association on the link. | |
e399441d | 1806 | */ |
61bff8ef | 1807 | nvme_fc_error_recovery(ctrl, "io timeout error"); |
e399441d JS |
1808 | |
1809 | return BLK_EH_HANDLED; | |
1810 | } | |
1811 | ||
1812 | static int | |
1813 | nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, | |
1814 | struct nvme_fc_fcp_op *op) | |
1815 | { | |
1816 | struct nvmefc_fcp_req *freq = &op->fcp_req; | |
e399441d JS |
1817 | enum dma_data_direction dir; |
1818 | int ret; | |
1819 | ||
1820 | freq->sg_cnt = 0; | |
1821 | ||
b131c61d | 1822 | if (!blk_rq_payload_bytes(rq)) |
e399441d JS |
1823 | return 0; |
1824 | ||
1825 | freq->sg_table.sgl = freq->first_sgl; | |
19e420bb CH |
1826 | ret = sg_alloc_table_chained(&freq->sg_table, |
1827 | blk_rq_nr_phys_segments(rq), freq->sg_table.sgl); | |
e399441d JS |
1828 | if (ret) |
1829 | return -ENOMEM; | |
1830 | ||
1831 | op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); | |
19e420bb | 1832 | WARN_ON(op->nents > blk_rq_nr_phys_segments(rq)); |
e399441d JS |
1833 | dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; |
1834 | freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, | |
1835 | op->nents, dir); | |
1836 | if (unlikely(freq->sg_cnt <= 0)) { | |
1837 | sg_free_table_chained(&freq->sg_table, true); | |
1838 | freq->sg_cnt = 0; | |
1839 | return -EFAULT; | |
1840 | } | |
1841 | ||
1842 | /* | |
1843 | * TODO: blk_integrity_rq(rq) for DIF | |
1844 | */ | |
1845 | return 0; | |
1846 | } | |
1847 | ||
1848 | static void | |
1849 | nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, | |
1850 | struct nvme_fc_fcp_op *op) | |
1851 | { | |
1852 | struct nvmefc_fcp_req *freq = &op->fcp_req; | |
1853 | ||
1854 | if (!freq->sg_cnt) | |
1855 | return; | |
1856 | ||
1857 | fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, | |
1858 | ((rq_data_dir(rq) == WRITE) ? | |
1859 | DMA_TO_DEVICE : DMA_FROM_DEVICE)); | |
1860 | ||
1861 | nvme_cleanup_cmd(rq); | |
1862 | ||
1863 | sg_free_table_chained(&freq->sg_table, true); | |
1864 | ||
1865 | freq->sg_cnt = 0; | |
1866 | } | |
1867 | ||
1868 | /* | |
1869 | * In FC, the queue is a logical thing. At transport connect, the target | |
1870 | * creates its "queue" and returns a handle that is to be given to the | |
1871 | * target whenever it posts something to the corresponding SQ. When an | |
1872 | * SQE is sent on a SQ, FC effectively considers the SQE, or rather the | |
1873 | * command contained within the SQE, an io, and assigns a FC exchange | |
1874 | * to it. The SQE and the associated SQ handle are sent in the initial | |
1875 | * CMD IU sents on the exchange. All transfers relative to the io occur | |
1876 | * as part of the exchange. The CQE is the last thing for the io, | |
1877 | * which is transferred (explicitly or implicitly) with the RSP IU | |
1878 | * sent on the exchange. After the CQE is received, the FC exchange is | |
1879 | * terminaed and the Exchange may be used on a different io. | |
1880 | * | |
1881 | * The transport to LLDD api has the transport making a request for a | |
1882 | * new fcp io request to the LLDD. The LLDD then allocates a FC exchange | |
1883 | * resource and transfers the command. The LLDD will then process all | |
1884 | * steps to complete the io. Upon completion, the transport done routine | |
1885 | * is called. | |
1886 | * | |
1887 | * So - while the operation is outstanding to the LLDD, there is a link | |
1888 | * level FC exchange resource that is also outstanding. This must be | |
1889 | * considered in all cleanup operations. | |
1890 | */ | |
1891 | static int | |
1892 | nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, | |
1893 | struct nvme_fc_fcp_op *op, u32 data_len, | |
1894 | enum nvmefc_fcp_datadir io_dir) | |
1895 | { | |
1896 | struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; | |
1897 | struct nvme_command *sqe = &cmdiu->sqe; | |
1898 | u32 csn; | |
1899 | int ret; | |
1900 | ||
61bff8ef JS |
1901 | /* |
1902 | * before attempting to send the io, check to see if we believe | |
1903 | * the target device is present | |
1904 | */ | |
1905 | if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) | |
1906 | return BLK_MQ_RQ_QUEUE_ERROR; | |
1907 | ||
e399441d JS |
1908 | if (!nvme_fc_ctrl_get(ctrl)) |
1909 | return BLK_MQ_RQ_QUEUE_ERROR; | |
1910 | ||
1911 | /* format the FC-NVME CMD IU and fcp_req */ | |
1912 | cmdiu->connection_id = cpu_to_be64(queue->connection_id); | |
1913 | csn = atomic_inc_return(&queue->csn); | |
1914 | cmdiu->csn = cpu_to_be32(csn); | |
1915 | cmdiu->data_len = cpu_to_be32(data_len); | |
1916 | switch (io_dir) { | |
1917 | case NVMEFC_FCP_WRITE: | |
1918 | cmdiu->flags = FCNVME_CMD_FLAGS_WRITE; | |
1919 | break; | |
1920 | case NVMEFC_FCP_READ: | |
1921 | cmdiu->flags = FCNVME_CMD_FLAGS_READ; | |
1922 | break; | |
1923 | case NVMEFC_FCP_NODATA: | |
1924 | cmdiu->flags = 0; | |
1925 | break; | |
1926 | } | |
1927 | op->fcp_req.payload_length = data_len; | |
1928 | op->fcp_req.io_dir = io_dir; | |
1929 | op->fcp_req.transferred_length = 0; | |
1930 | op->fcp_req.rcv_rsplen = 0; | |
62eeacb0 | 1931 | op->fcp_req.status = NVME_SC_SUCCESS; |
e399441d JS |
1932 | op->fcp_req.sqid = cpu_to_le16(queue->qnum); |
1933 | ||
1934 | /* | |
1935 | * validate per fabric rules, set fields mandated by fabric spec | |
1936 | * as well as those by FC-NVME spec. | |
1937 | */ | |
1938 | WARN_ON_ONCE(sqe->common.metadata); | |
1939 | WARN_ON_ONCE(sqe->common.dptr.prp1); | |
1940 | WARN_ON_ONCE(sqe->common.dptr.prp2); | |
1941 | sqe->common.flags |= NVME_CMD_SGL_METABUF; | |
1942 | ||
1943 | /* | |
1944 | * format SQE DPTR field per FC-NVME rules | |
1945 | * type=data block descr; subtype=offset; | |
1946 | * offset is currently 0. | |
1947 | */ | |
1948 | sqe->rw.dptr.sgl.type = NVME_SGL_FMT_OFFSET; | |
1949 | sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); | |
1950 | sqe->rw.dptr.sgl.addr = 0; | |
1951 | ||
78a7ac26 | 1952 | if (!(op->flags & FCOP_FLAGS_AEN)) { |
e399441d JS |
1953 | ret = nvme_fc_map_data(ctrl, op->rq, op); |
1954 | if (ret < 0) { | |
e399441d JS |
1955 | nvme_cleanup_cmd(op->rq); |
1956 | nvme_fc_ctrl_put(ctrl); | |
1957 | return (ret == -ENOMEM || ret == -EAGAIN) ? | |
1958 | BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR; | |
1959 | } | |
1960 | } | |
1961 | ||
1962 | fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma, | |
1963 | sizeof(op->cmd_iu), DMA_TO_DEVICE); | |
1964 | ||
1965 | atomic_set(&op->state, FCPOP_STATE_ACTIVE); | |
1966 | ||
78a7ac26 | 1967 | if (!(op->flags & FCOP_FLAGS_AEN)) |
e399441d JS |
1968 | blk_mq_start_request(op->rq); |
1969 | ||
1970 | ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, | |
1971 | &ctrl->rport->remoteport, | |
1972 | queue->lldd_handle, &op->fcp_req); | |
1973 | ||
1974 | if (ret) { | |
e399441d JS |
1975 | if (op->rq) { /* normal request */ |
1976 | nvme_fc_unmap_data(ctrl, op->rq, op); | |
1977 | nvme_cleanup_cmd(op->rq); | |
1978 | } | |
1979 | /* else - aen. no cleanup needed */ | |
1980 | ||
1981 | nvme_fc_ctrl_put(ctrl); | |
1982 | ||
1983 | if (ret != -EBUSY) | |
1984 | return BLK_MQ_RQ_QUEUE_ERROR; | |
1985 | ||
1986 | if (op->rq) { | |
1987 | blk_mq_stop_hw_queues(op->rq->q); | |
1988 | blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY); | |
1989 | } | |
1990 | return BLK_MQ_RQ_QUEUE_BUSY; | |
1991 | } | |
1992 | ||
1993 | return BLK_MQ_RQ_QUEUE_OK; | |
1994 | } | |
1995 | ||
1996 | static int | |
1997 | nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, | |
1998 | const struct blk_mq_queue_data *bd) | |
1999 | { | |
2000 | struct nvme_ns *ns = hctx->queue->queuedata; | |
2001 | struct nvme_fc_queue *queue = hctx->driver_data; | |
2002 | struct nvme_fc_ctrl *ctrl = queue->ctrl; | |
2003 | struct request *rq = bd->rq; | |
2004 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); | |
2005 | struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; | |
2006 | struct nvme_command *sqe = &cmdiu->sqe; | |
2007 | enum nvmefc_fcp_datadir io_dir; | |
2008 | u32 data_len; | |
2009 | int ret; | |
2010 | ||
2011 | ret = nvme_setup_cmd(ns, rq, sqe); | |
2012 | if (ret) | |
2013 | return ret; | |
2014 | ||
b131c61d | 2015 | data_len = blk_rq_payload_bytes(rq); |
e399441d JS |
2016 | if (data_len) |
2017 | io_dir = ((rq_data_dir(rq) == WRITE) ? | |
2018 | NVMEFC_FCP_WRITE : NVMEFC_FCP_READ); | |
2019 | else | |
2020 | io_dir = NVMEFC_FCP_NODATA; | |
2021 | ||
2022 | return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir); | |
2023 | } | |
2024 | ||
2025 | static struct blk_mq_tags * | |
2026 | nvme_fc_tagset(struct nvme_fc_queue *queue) | |
2027 | { | |
2028 | if (queue->qnum == 0) | |
2029 | return queue->ctrl->admin_tag_set.tags[queue->qnum]; | |
2030 | ||
2031 | return queue->ctrl->tag_set.tags[queue->qnum - 1]; | |
2032 | } | |
2033 | ||
2034 | static int | |
2035 | nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) | |
2036 | ||
2037 | { | |
2038 | struct nvme_fc_queue *queue = hctx->driver_data; | |
2039 | struct nvme_fc_ctrl *ctrl = queue->ctrl; | |
2040 | struct request *req; | |
2041 | struct nvme_fc_fcp_op *op; | |
2042 | ||
2043 | req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag); | |
61bff8ef | 2044 | if (!req) |
e399441d | 2045 | return 0; |
e399441d JS |
2046 | |
2047 | op = blk_mq_rq_to_pdu(req); | |
2048 | ||
2049 | if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) && | |
2050 | (ctrl->lport->ops->poll_queue)) | |
2051 | ctrl->lport->ops->poll_queue(&ctrl->lport->localport, | |
2052 | queue->lldd_handle); | |
2053 | ||
2054 | return ((atomic_read(&op->state) != FCPOP_STATE_ACTIVE)); | |
2055 | } | |
2056 | ||
2057 | static void | |
2058 | nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx) | |
2059 | { | |
2060 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg); | |
2061 | struct nvme_fc_fcp_op *aen_op; | |
61bff8ef JS |
2062 | unsigned long flags; |
2063 | bool terminating = false; | |
e399441d JS |
2064 | int ret; |
2065 | ||
2066 | if (aer_idx > NVME_FC_NR_AEN_COMMANDS) | |
2067 | return; | |
2068 | ||
61bff8ef JS |
2069 | spin_lock_irqsave(&ctrl->lock, flags); |
2070 | if (ctrl->flags & FCCTRL_TERMIO) | |
2071 | terminating = true; | |
2072 | spin_unlock_irqrestore(&ctrl->lock, flags); | |
2073 | ||
2074 | if (terminating) | |
2075 | return; | |
2076 | ||
e399441d JS |
2077 | aen_op = &ctrl->aen_ops[aer_idx]; |
2078 | ||
2079 | ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0, | |
2080 | NVMEFC_FCP_NODATA); | |
2081 | if (ret) | |
2082 | dev_err(ctrl->ctrl.device, | |
2083 | "failed async event work [%d]\n", aer_idx); | |
2084 | } | |
2085 | ||
2086 | static void | |
78a7ac26 | 2087 | __nvme_fc_final_op_cleanup(struct request *rq) |
e399441d JS |
2088 | { |
2089 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); | |
2090 | struct nvme_fc_ctrl *ctrl = op->ctrl; | |
e399441d | 2091 | |
78a7ac26 JS |
2092 | atomic_set(&op->state, FCPOP_STATE_IDLE); |
2093 | op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED | | |
2094 | FCOP_FLAGS_COMPLETE); | |
e399441d JS |
2095 | |
2096 | nvme_cleanup_cmd(rq); | |
e399441d | 2097 | nvme_fc_unmap_data(ctrl, rq, op); |
77f02a7a | 2098 | nvme_complete_rq(rq); |
e399441d JS |
2099 | nvme_fc_ctrl_put(ctrl); |
2100 | ||
e399441d JS |
2101 | } |
2102 | ||
78a7ac26 JS |
2103 | static void |
2104 | nvme_fc_complete_rq(struct request *rq) | |
2105 | { | |
2106 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); | |
2107 | struct nvme_fc_ctrl *ctrl = op->ctrl; | |
2108 | unsigned long flags; | |
2109 | bool completed = false; | |
2110 | ||
2111 | /* | |
2112 | * the core layer, on controller resets after calling | |
2113 | * nvme_shutdown_ctrl(), calls complete_rq without our | |
2114 | * calling blk_mq_complete_request(), thus there may still | |
2115 | * be live i/o outstanding with the LLDD. Means transport has | |
2116 | * to track complete calls vs fcpio_done calls to know what | |
2117 | * path to take on completes and dones. | |
2118 | */ | |
2119 | spin_lock_irqsave(&ctrl->lock, flags); | |
2120 | if (op->flags & FCOP_FLAGS_COMPLETE) | |
2121 | completed = true; | |
2122 | else | |
2123 | op->flags |= FCOP_FLAGS_RELEASED; | |
2124 | spin_unlock_irqrestore(&ctrl->lock, flags); | |
2125 | ||
2126 | if (completed) | |
2127 | __nvme_fc_final_op_cleanup(rq); | |
2128 | } | |
2129 | ||
e399441d JS |
2130 | /* |
2131 | * This routine is used by the transport when it needs to find active | |
2132 | * io on a queue that is to be terminated. The transport uses | |
2133 | * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke | |
2134 | * this routine to kill them on a 1 by 1 basis. | |
2135 | * | |
2136 | * As FC allocates FC exchange for each io, the transport must contact | |
2137 | * the LLDD to terminate the exchange, thus releasing the FC exchange. | |
2138 | * After terminating the exchange the LLDD will call the transport's | |
2139 | * normal io done path for the request, but it will have an aborted | |
2140 | * status. The done path will return the io request back to the block | |
2141 | * layer with an error status. | |
2142 | */ | |
2143 | static void | |
2144 | nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved) | |
2145 | { | |
2146 | struct nvme_ctrl *nctrl = data; | |
2147 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); | |
2148 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); | |
78a7ac26 JS |
2149 | unsigned long flags; |
2150 | int status; | |
e399441d JS |
2151 | |
2152 | if (!blk_mq_request_started(req)) | |
2153 | return; | |
2154 | ||
78a7ac26 | 2155 | spin_lock_irqsave(&ctrl->lock, flags); |
61bff8ef JS |
2156 | if (ctrl->flags & FCCTRL_TERMIO) { |
2157 | ctrl->iocnt++; | |
2158 | op->flags |= FCOP_FLAGS_TERMIO; | |
2159 | } | |
78a7ac26 JS |
2160 | spin_unlock_irqrestore(&ctrl->lock, flags); |
2161 | ||
e399441d | 2162 | status = __nvme_fc_abort_op(ctrl, op); |
78a7ac26 JS |
2163 | if (status) { |
2164 | /* | |
2165 | * if __nvme_fc_abort_op failed the io wasn't | |
2166 | * active. Thus this call path is running in | |
2167 | * parallel to the io complete. Treat as non-error. | |
2168 | */ | |
2169 | ||
2170 | /* back out the flags/counters */ | |
2171 | spin_lock_irqsave(&ctrl->lock, flags); | |
61bff8ef JS |
2172 | if (ctrl->flags & FCCTRL_TERMIO) |
2173 | ctrl->iocnt--; | |
78a7ac26 JS |
2174 | op->flags &= ~FCOP_FLAGS_TERMIO; |
2175 | spin_unlock_irqrestore(&ctrl->lock, flags); | |
e399441d | 2176 | return; |
78a7ac26 | 2177 | } |
e399441d JS |
2178 | } |
2179 | ||
78a7ac26 | 2180 | |
61bff8ef JS |
2181 | static const struct blk_mq_ops nvme_fc_mq_ops = { |
2182 | .queue_rq = nvme_fc_queue_rq, | |
2183 | .complete = nvme_fc_complete_rq, | |
2184 | .init_request = nvme_fc_init_request, | |
2185 | .exit_request = nvme_fc_exit_request, | |
2186 | .reinit_request = nvme_fc_reinit_request, | |
2187 | .init_hctx = nvme_fc_init_hctx, | |
2188 | .poll = nvme_fc_poll, | |
2189 | .timeout = nvme_fc_timeout, | |
2190 | }; | |
e399441d | 2191 | |
61bff8ef JS |
2192 | static int |
2193 | nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) | |
e399441d | 2194 | { |
61bff8ef JS |
2195 | struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; |
2196 | int ret; | |
e399441d | 2197 | |
61bff8ef JS |
2198 | ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues); |
2199 | if (ret) { | |
2200 | dev_info(ctrl->ctrl.device, | |
2201 | "set_queue_count failed: %d\n", ret); | |
2202 | return ret; | |
2203 | } | |
e399441d | 2204 | |
61bff8ef JS |
2205 | ctrl->queue_count = opts->nr_io_queues + 1; |
2206 | if (!opts->nr_io_queues) | |
2207 | return 0; | |
e399441d | 2208 | |
61bff8ef | 2209 | nvme_fc_init_io_queues(ctrl); |
e399441d | 2210 | |
61bff8ef JS |
2211 | memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); |
2212 | ctrl->tag_set.ops = &nvme_fc_mq_ops; | |
2213 | ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; | |
2214 | ctrl->tag_set.reserved_tags = 1; /* fabric connect */ | |
2215 | ctrl->tag_set.numa_node = NUMA_NO_NODE; | |
2216 | ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | |
2217 | ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) + | |
2218 | (SG_CHUNK_SIZE * | |
2219 | sizeof(struct scatterlist)) + | |
2220 | ctrl->lport->ops->fcprqst_priv_sz; | |
2221 | ctrl->tag_set.driver_data = ctrl; | |
2222 | ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1; | |
2223 | ctrl->tag_set.timeout = NVME_IO_TIMEOUT; | |
e399441d | 2224 | |
61bff8ef JS |
2225 | ret = blk_mq_alloc_tag_set(&ctrl->tag_set); |
2226 | if (ret) | |
2227 | return ret; | |
e399441d | 2228 | |
61bff8ef | 2229 | ctrl->ctrl.tagset = &ctrl->tag_set; |
e399441d | 2230 | |
61bff8ef JS |
2231 | ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); |
2232 | if (IS_ERR(ctrl->ctrl.connect_q)) { | |
2233 | ret = PTR_ERR(ctrl->ctrl.connect_q); | |
2234 | goto out_free_tag_set; | |
2235 | } | |
e399441d | 2236 | |
61bff8ef | 2237 | ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size); |
e399441d | 2238 | if (ret) |
61bff8ef | 2239 | goto out_cleanup_blk_queue; |
e399441d | 2240 | |
61bff8ef JS |
2241 | ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size); |
2242 | if (ret) | |
2243 | goto out_delete_hw_queues; | |
e399441d JS |
2244 | |
2245 | return 0; | |
e399441d | 2246 | |
61bff8ef JS |
2247 | out_delete_hw_queues: |
2248 | nvme_fc_delete_hw_io_queues(ctrl); | |
2249 | out_cleanup_blk_queue: | |
2250 | nvme_stop_keep_alive(&ctrl->ctrl); | |
2251 | blk_cleanup_queue(ctrl->ctrl.connect_q); | |
2252 | out_free_tag_set: | |
2253 | blk_mq_free_tag_set(&ctrl->tag_set); | |
2254 | nvme_fc_free_io_queues(ctrl); | |
e399441d | 2255 | |
61bff8ef JS |
2256 | /* force put free routine to ignore io queues */ |
2257 | ctrl->ctrl.tagset = NULL; | |
2258 | ||
2259 | return ret; | |
2260 | } | |
e399441d JS |
2261 | |
2262 | static int | |
61bff8ef | 2263 | nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl) |
e399441d JS |
2264 | { |
2265 | struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; | |
2266 | int ret; | |
2267 | ||
2268 | ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues); | |
2269 | if (ret) { | |
2270 | dev_info(ctrl->ctrl.device, | |
2271 | "set_queue_count failed: %d\n", ret); | |
2272 | return ret; | |
2273 | } | |
2274 | ||
61bff8ef JS |
2275 | /* check for io queues existing */ |
2276 | if (ctrl->queue_count == 1) | |
e399441d JS |
2277 | return 0; |
2278 | ||
e399441d JS |
2279 | nvme_fc_init_io_queues(ctrl); |
2280 | ||
61bff8ef | 2281 | ret = blk_mq_reinit_tagset(&ctrl->tag_set); |
e399441d | 2282 | if (ret) |
61bff8ef | 2283 | goto out_free_io_queues; |
e399441d JS |
2284 | |
2285 | ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size); | |
2286 | if (ret) | |
61bff8ef | 2287 | goto out_free_io_queues; |
e399441d JS |
2288 | |
2289 | ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size); | |
2290 | if (ret) | |
2291 | goto out_delete_hw_queues; | |
2292 | ||
2293 | return 0; | |
2294 | ||
2295 | out_delete_hw_queues: | |
2296 | nvme_fc_delete_hw_io_queues(ctrl); | |
61bff8ef | 2297 | out_free_io_queues: |
e399441d | 2298 | nvme_fc_free_io_queues(ctrl); |
61bff8ef JS |
2299 | return ret; |
2300 | } | |
e399441d | 2301 | |
61bff8ef JS |
2302 | /* |
2303 | * This routine restarts the controller on the host side, and | |
2304 | * on the link side, recreates the controller association. | |
2305 | */ | |
2306 | static int | |
2307 | nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) | |
2308 | { | |
2309 | struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; | |
2310 | u32 segs; | |
2311 | int ret; | |
2312 | bool changed; | |
2313 | ||
5bbecdbc | 2314 | ++ctrl->ctrl.opts->nr_reconnects; |
61bff8ef JS |
2315 | |
2316 | /* | |
2317 | * Create the admin queue | |
2318 | */ | |
2319 | ||
2320 | nvme_fc_init_queue(ctrl, 0, NVME_FC_AQ_BLKMQ_DEPTH); | |
2321 | ||
2322 | ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, | |
2323 | NVME_FC_AQ_BLKMQ_DEPTH); | |
2324 | if (ret) | |
2325 | goto out_free_queue; | |
2326 | ||
2327 | ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], | |
2328 | NVME_FC_AQ_BLKMQ_DEPTH, | |
2329 | (NVME_FC_AQ_BLKMQ_DEPTH / 4)); | |
2330 | if (ret) | |
2331 | goto out_delete_hw_queue; | |
2332 | ||
2333 | if (ctrl->ctrl.state != NVME_CTRL_NEW) | |
2334 | blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true); | |
2335 | ||
2336 | ret = nvmf_connect_admin_queue(&ctrl->ctrl); | |
2337 | if (ret) | |
2338 | goto out_disconnect_admin_queue; | |
2339 | ||
2340 | /* | |
2341 | * Check controller capabilities | |
2342 | * | |
2343 | * todo:- add code to check if ctrl attributes changed from | |
2344 | * prior connection values | |
2345 | */ | |
2346 | ||
2347 | ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap); | |
2348 | if (ret) { | |
2349 | dev_err(ctrl->ctrl.device, | |
2350 | "prop_get NVME_REG_CAP failed\n"); | |
2351 | goto out_disconnect_admin_queue; | |
2352 | } | |
2353 | ||
2354 | ctrl->ctrl.sqsize = | |
2355 | min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); | |
2356 | ||
2357 | ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); | |
2358 | if (ret) | |
2359 | goto out_disconnect_admin_queue; | |
2360 | ||
2361 | segs = min_t(u32, NVME_FC_MAX_SEGMENTS, | |
2362 | ctrl->lport->ops->max_sgl_segments); | |
2363 | ctrl->ctrl.max_hw_sectors = (segs - 1) << (PAGE_SHIFT - 9); | |
2364 | ||
2365 | ret = nvme_init_identify(&ctrl->ctrl); | |
2366 | if (ret) | |
2367 | goto out_disconnect_admin_queue; | |
2368 | ||
2369 | /* sanity checks */ | |
2370 | ||
2371 | /* FC-NVME does not have other data in the capsule */ | |
2372 | if (ctrl->ctrl.icdoff) { | |
2373 | dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", | |
2374 | ctrl->ctrl.icdoff); | |
2375 | goto out_disconnect_admin_queue; | |
2376 | } | |
2377 | ||
2378 | nvme_start_keep_alive(&ctrl->ctrl); | |
2379 | ||
2380 | /* FC-NVME supports normal SGL Data Block Descriptors */ | |
2381 | ||
2382 | if (opts->queue_size > ctrl->ctrl.maxcmd) { | |
2383 | /* warn if maxcmd is lower than queue_size */ | |
2384 | dev_warn(ctrl->ctrl.device, | |
2385 | "queue_size %zu > ctrl maxcmd %u, reducing " | |
2386 | "to queue_size\n", | |
2387 | opts->queue_size, ctrl->ctrl.maxcmd); | |
2388 | opts->queue_size = ctrl->ctrl.maxcmd; | |
2389 | } | |
2390 | ||
2391 | ret = nvme_fc_init_aen_ops(ctrl); | |
2392 | if (ret) | |
2393 | goto out_term_aen_ops; | |
2394 | ||
2395 | /* | |
2396 | * Create the io queues | |
2397 | */ | |
2398 | ||
2399 | if (ctrl->queue_count > 1) { | |
2400 | if (ctrl->ctrl.state == NVME_CTRL_NEW) | |
2401 | ret = nvme_fc_create_io_queues(ctrl); | |
2402 | else | |
2403 | ret = nvme_fc_reinit_io_queues(ctrl); | |
2404 | if (ret) | |
2405 | goto out_term_aen_ops; | |
2406 | } | |
2407 | ||
2408 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); | |
2409 | WARN_ON_ONCE(!changed); | |
2410 | ||
5bbecdbc | 2411 | ctrl->ctrl.opts->nr_reconnects = 0; |
61bff8ef | 2412 | |
61bff8ef JS |
2413 | if (ctrl->queue_count > 1) { |
2414 | nvme_start_queues(&ctrl->ctrl); | |
2415 | nvme_queue_scan(&ctrl->ctrl); | |
2416 | nvme_queue_async_events(&ctrl->ctrl); | |
2417 | } | |
2418 | ||
2419 | return 0; /* Success */ | |
2420 | ||
2421 | out_term_aen_ops: | |
2422 | nvme_fc_term_aen_ops(ctrl); | |
2423 | nvme_stop_keep_alive(&ctrl->ctrl); | |
2424 | out_disconnect_admin_queue: | |
2425 | /* send a Disconnect(association) LS to fc-nvme target */ | |
2426 | nvme_fc_xmt_disconnect_assoc(ctrl); | |
2427 | out_delete_hw_queue: | |
2428 | __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); | |
2429 | out_free_queue: | |
2430 | nvme_fc_free_queue(&ctrl->queues[0]); | |
e399441d JS |
2431 | |
2432 | return ret; | |
2433 | } | |
2434 | ||
61bff8ef JS |
2435 | /* |
2436 | * This routine stops operation of the controller on the host side. | |
2437 | * On the host os stack side: Admin and IO queues are stopped, | |
2438 | * outstanding ios on them terminated via FC ABTS. | |
2439 | * On the link side: the association is terminated. | |
2440 | */ | |
2441 | static void | |
2442 | nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) | |
2443 | { | |
2444 | unsigned long flags; | |
2445 | ||
2446 | nvme_stop_keep_alive(&ctrl->ctrl); | |
2447 | ||
2448 | spin_lock_irqsave(&ctrl->lock, flags); | |
2449 | ctrl->flags |= FCCTRL_TERMIO; | |
2450 | ctrl->iocnt = 0; | |
2451 | spin_unlock_irqrestore(&ctrl->lock, flags); | |
2452 | ||
2453 | /* | |
2454 | * If io queues are present, stop them and terminate all outstanding | |
2455 | * ios on them. As FC allocates FC exchange for each io, the | |
2456 | * transport must contact the LLDD to terminate the exchange, | |
2457 | * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr() | |
2458 | * to tell us what io's are busy and invoke a transport routine | |
2459 | * to kill them with the LLDD. After terminating the exchange | |
2460 | * the LLDD will call the transport's normal io done path, but it | |
2461 | * will have an aborted status. The done path will return the | |
2462 | * io requests back to the block layer as part of normal completions | |
2463 | * (but with error status). | |
2464 | */ | |
2465 | if (ctrl->queue_count > 1) { | |
2466 | nvme_stop_queues(&ctrl->ctrl); | |
2467 | blk_mq_tagset_busy_iter(&ctrl->tag_set, | |
2468 | nvme_fc_terminate_exchange, &ctrl->ctrl); | |
2469 | } | |
2470 | ||
2471 | /* | |
2472 | * Other transports, which don't have link-level contexts bound | |
2473 | * to sqe's, would try to gracefully shutdown the controller by | |
2474 | * writing the registers for shutdown and polling (call | |
2475 | * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially | |
2476 | * just aborted and we will wait on those contexts, and given | |
2477 | * there was no indication of how live the controlelr is on the | |
2478 | * link, don't send more io to create more contexts for the | |
2479 | * shutdown. Let the controller fail via keepalive failure if | |
2480 | * its still present. | |
2481 | */ | |
2482 | ||
2483 | /* | |
2484 | * clean up the admin queue. Same thing as above. | |
2485 | * use blk_mq_tagset_busy_itr() and the transport routine to | |
2486 | * terminate the exchanges. | |
2487 | */ | |
2488 | blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); | |
2489 | blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, | |
2490 | nvme_fc_terminate_exchange, &ctrl->ctrl); | |
2491 | ||
2492 | /* kill the aens as they are a separate path */ | |
2493 | nvme_fc_abort_aen_ops(ctrl); | |
2494 | ||
2495 | /* wait for all io that had to be aborted */ | |
2496 | spin_lock_irqsave(&ctrl->lock, flags); | |
2497 | while (ctrl->iocnt) { | |
2498 | spin_unlock_irqrestore(&ctrl->lock, flags); | |
2499 | msleep(1000); | |
2500 | spin_lock_irqsave(&ctrl->lock, flags); | |
2501 | } | |
2502 | ctrl->flags &= ~FCCTRL_TERMIO; | |
2503 | spin_unlock_irqrestore(&ctrl->lock, flags); | |
2504 | ||
2505 | nvme_fc_term_aen_ops(ctrl); | |
2506 | ||
2507 | /* | |
2508 | * send a Disconnect(association) LS to fc-nvme target | |
2509 | * Note: could have been sent at top of process, but | |
2510 | * cleaner on link traffic if after the aborts complete. | |
2511 | * Note: if association doesn't exist, association_id will be 0 | |
2512 | */ | |
2513 | if (ctrl->association_id) | |
2514 | nvme_fc_xmt_disconnect_assoc(ctrl); | |
2515 | ||
2516 | if (ctrl->ctrl.tagset) { | |
2517 | nvme_fc_delete_hw_io_queues(ctrl); | |
2518 | nvme_fc_free_io_queues(ctrl); | |
2519 | } | |
2520 | ||
2521 | __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); | |
2522 | nvme_fc_free_queue(&ctrl->queues[0]); | |
2523 | } | |
2524 | ||
2525 | static void | |
2526 | nvme_fc_delete_ctrl_work(struct work_struct *work) | |
2527 | { | |
2528 | struct nvme_fc_ctrl *ctrl = | |
2529 | container_of(work, struct nvme_fc_ctrl, delete_work); | |
2530 | ||
2531 | cancel_work_sync(&ctrl->reset_work); | |
2532 | cancel_delayed_work_sync(&ctrl->connect_work); | |
2533 | ||
2534 | /* | |
2535 | * kill the association on the link side. this will block | |
2536 | * waiting for io to terminate | |
2537 | */ | |
2538 | nvme_fc_delete_association(ctrl); | |
2539 | ||
2540 | /* | |
2541 | * tear down the controller | |
a5321aa5 JS |
2542 | * After the last reference on the nvme ctrl is removed, |
2543 | * the transport nvme_fc_nvme_ctrl_freed() callback will be | |
2544 | * invoked. From there, the transport will tear down it's | |
2545 | * logical queues and association. | |
61bff8ef JS |
2546 | */ |
2547 | nvme_uninit_ctrl(&ctrl->ctrl); | |
2548 | ||
2549 | nvme_put_ctrl(&ctrl->ctrl); | |
2550 | } | |
2551 | ||
5bbecdbc JS |
2552 | static bool |
2553 | __nvme_fc_schedule_delete_work(struct nvme_fc_ctrl *ctrl) | |
61bff8ef JS |
2554 | { |
2555 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING)) | |
5bbecdbc | 2556 | return true; |
61bff8ef JS |
2557 | |
2558 | if (!queue_work(nvme_fc_wq, &ctrl->delete_work)) | |
5bbecdbc | 2559 | return true; |
61bff8ef | 2560 | |
5bbecdbc JS |
2561 | return false; |
2562 | } | |
2563 | ||
2564 | static int | |
2565 | __nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl) | |
2566 | { | |
2567 | return __nvme_fc_schedule_delete_work(ctrl) ? -EBUSY : 0; | |
61bff8ef JS |
2568 | } |
2569 | ||
2570 | /* | |
2571 | * Request from nvme core layer to delete the controller | |
2572 | */ | |
2573 | static int | |
2574 | nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl) | |
2575 | { | |
2576 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); | |
2577 | int ret; | |
2578 | ||
2579 | if (!kref_get_unless_zero(&ctrl->ctrl.kref)) | |
2580 | return -EBUSY; | |
2581 | ||
2582 | ret = __nvme_fc_del_ctrl(ctrl); | |
2583 | ||
2584 | if (!ret) | |
2585 | flush_workqueue(nvme_fc_wq); | |
2586 | ||
2587 | nvme_put_ctrl(&ctrl->ctrl); | |
2588 | ||
2589 | return ret; | |
2590 | } | |
2591 | ||
5bbecdbc JS |
2592 | static void |
2593 | nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) | |
2594 | { | |
2595 | /* If we are resetting/deleting then do nothing */ | |
2596 | if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) { | |
2597 | WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW || | |
2598 | ctrl->ctrl.state == NVME_CTRL_LIVE); | |
2599 | return; | |
2600 | } | |
2601 | ||
589ff775 | 2602 | dev_info(ctrl->ctrl.device, |
5bbecdbc JS |
2603 | "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", |
2604 | ctrl->cnum, status); | |
2605 | ||
2606 | if (nvmf_should_reconnect(&ctrl->ctrl)) { | |
2607 | dev_info(ctrl->ctrl.device, | |
2608 | "NVME-FC{%d}: Reconnect attempt in %d seconds.\n", | |
2609 | ctrl->cnum, ctrl->ctrl.opts->reconnect_delay); | |
2610 | queue_delayed_work(nvme_fc_wq, &ctrl->connect_work, | |
2611 | ctrl->ctrl.opts->reconnect_delay * HZ); | |
2612 | } else { | |
589ff775 | 2613 | dev_warn(ctrl->ctrl.device, |
5bbecdbc JS |
2614 | "NVME-FC{%d}: Max reconnect attempts (%d) " |
2615 | "reached. Removing controller\n", | |
2616 | ctrl->cnum, ctrl->ctrl.opts->nr_reconnects); | |
2617 | WARN_ON(__nvme_fc_schedule_delete_work(ctrl)); | |
2618 | } | |
2619 | } | |
2620 | ||
61bff8ef JS |
2621 | static void |
2622 | nvme_fc_reset_ctrl_work(struct work_struct *work) | |
2623 | { | |
2624 | struct nvme_fc_ctrl *ctrl = | |
2625 | container_of(work, struct nvme_fc_ctrl, reset_work); | |
2626 | int ret; | |
2627 | ||
2628 | /* will block will waiting for io to terminate */ | |
2629 | nvme_fc_delete_association(ctrl); | |
2630 | ||
2631 | ret = nvme_fc_create_association(ctrl); | |
5bbecdbc JS |
2632 | if (ret) |
2633 | nvme_fc_reconnect_or_delete(ctrl, ret); | |
2634 | else | |
61bff8ef JS |
2635 | dev_info(ctrl->ctrl.device, |
2636 | "NVME-FC{%d}: controller reset complete\n", ctrl->cnum); | |
2637 | } | |
2638 | ||
2639 | /* | |
2640 | * called by the nvme core layer, for sysfs interface that requests | |
2641 | * a reset of the nvme controller | |
2642 | */ | |
2643 | static int | |
2644 | nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl) | |
2645 | { | |
2646 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); | |
2647 | ||
589ff775 | 2648 | dev_info(ctrl->ctrl.device, |
61bff8ef JS |
2649 | "NVME-FC{%d}: admin requested controller reset\n", ctrl->cnum); |
2650 | ||
2651 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) | |
2652 | return -EBUSY; | |
2653 | ||
2654 | if (!queue_work(nvme_fc_wq, &ctrl->reset_work)) | |
2655 | return -EBUSY; | |
2656 | ||
2657 | flush_work(&ctrl->reset_work); | |
2658 | ||
2659 | return 0; | |
2660 | } | |
2661 | ||
2662 | static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { | |
2663 | .name = "fc", | |
2664 | .module = THIS_MODULE, | |
d3d5b87d | 2665 | .flags = NVME_F_FABRICS, |
61bff8ef JS |
2666 | .reg_read32 = nvmf_reg_read32, |
2667 | .reg_read64 = nvmf_reg_read64, | |
2668 | .reg_write32 = nvmf_reg_write32, | |
2669 | .reset_ctrl = nvme_fc_reset_nvme_ctrl, | |
2670 | .free_ctrl = nvme_fc_nvme_ctrl_freed, | |
2671 | .submit_async_event = nvme_fc_submit_async_event, | |
2672 | .delete_ctrl = nvme_fc_del_nvme_ctrl, | |
2673 | .get_subsysnqn = nvmf_get_subsysnqn, | |
2674 | .get_address = nvmf_get_address, | |
2675 | }; | |
2676 | ||
2677 | static void | |
2678 | nvme_fc_connect_ctrl_work(struct work_struct *work) | |
2679 | { | |
2680 | int ret; | |
2681 | ||
2682 | struct nvme_fc_ctrl *ctrl = | |
2683 | container_of(to_delayed_work(work), | |
2684 | struct nvme_fc_ctrl, connect_work); | |
2685 | ||
2686 | ret = nvme_fc_create_association(ctrl); | |
5bbecdbc JS |
2687 | if (ret) |
2688 | nvme_fc_reconnect_or_delete(ctrl, ret); | |
2689 | else | |
61bff8ef JS |
2690 | dev_info(ctrl->ctrl.device, |
2691 | "NVME-FC{%d}: controller reconnect complete\n", | |
2692 | ctrl->cnum); | |
2693 | } | |
2694 | ||
2695 | ||
2696 | static const struct blk_mq_ops nvme_fc_admin_mq_ops = { | |
2697 | .queue_rq = nvme_fc_queue_rq, | |
2698 | .complete = nvme_fc_complete_rq, | |
2699 | .init_request = nvme_fc_init_admin_request, | |
2700 | .exit_request = nvme_fc_exit_request, | |
2701 | .reinit_request = nvme_fc_reinit_request, | |
2702 | .init_hctx = nvme_fc_init_admin_hctx, | |
2703 | .timeout = nvme_fc_timeout, | |
2704 | }; | |
2705 | ||
e399441d JS |
2706 | |
2707 | static struct nvme_ctrl * | |
61bff8ef | 2708 | nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, |
e399441d JS |
2709 | struct nvme_fc_lport *lport, struct nvme_fc_rport *rport) |
2710 | { | |
2711 | struct nvme_fc_ctrl *ctrl; | |
2712 | unsigned long flags; | |
2713 | int ret, idx; | |
e399441d | 2714 | |
85e6a6ad JS |
2715 | if (!(rport->remoteport.port_role & |
2716 | (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) { | |
2717 | ret = -EBADR; | |
2718 | goto out_fail; | |
2719 | } | |
2720 | ||
e399441d JS |
2721 | ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); |
2722 | if (!ctrl) { | |
2723 | ret = -ENOMEM; | |
2724 | goto out_fail; | |
2725 | } | |
2726 | ||
2727 | idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL); | |
2728 | if (idx < 0) { | |
2729 | ret = -ENOSPC; | |
2730 | goto out_free_ctrl; | |
2731 | } | |
2732 | ||
2733 | ctrl->ctrl.opts = opts; | |
2734 | INIT_LIST_HEAD(&ctrl->ctrl_list); | |
e399441d JS |
2735 | ctrl->lport = lport; |
2736 | ctrl->rport = rport; | |
2737 | ctrl->dev = lport->dev; | |
e399441d JS |
2738 | ctrl->cnum = idx; |
2739 | ||
e399441d JS |
2740 | get_device(ctrl->dev); |
2741 | kref_init(&ctrl->ref); | |
2742 | ||
61bff8ef JS |
2743 | INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work); |
2744 | INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work); | |
2745 | INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); | |
e399441d JS |
2746 | spin_lock_init(&ctrl->lock); |
2747 | ||
2748 | /* io queue count */ | |
2749 | ctrl->queue_count = min_t(unsigned int, | |
2750 | opts->nr_io_queues, | |
2751 | lport->ops->max_hw_queues); | |
2752 | opts->nr_io_queues = ctrl->queue_count; /* so opts has valid value */ | |
2753 | ctrl->queue_count++; /* +1 for admin queue */ | |
2754 | ||
2755 | ctrl->ctrl.sqsize = opts->queue_size - 1; | |
2756 | ctrl->ctrl.kato = opts->kato; | |
2757 | ||
2758 | ret = -ENOMEM; | |
2759 | ctrl->queues = kcalloc(ctrl->queue_count, sizeof(struct nvme_fc_queue), | |
2760 | GFP_KERNEL); | |
2761 | if (!ctrl->queues) | |
61bff8ef | 2762 | goto out_free_ida; |
e399441d | 2763 | |
61bff8ef JS |
2764 | memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); |
2765 | ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops; | |
2766 | ctrl->admin_tag_set.queue_depth = NVME_FC_AQ_BLKMQ_DEPTH; | |
2767 | ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */ | |
2768 | ctrl->admin_tag_set.numa_node = NUMA_NO_NODE; | |
2769 | ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) + | |
2770 | (SG_CHUNK_SIZE * | |
2771 | sizeof(struct scatterlist)) + | |
2772 | ctrl->lport->ops->fcprqst_priv_sz; | |
2773 | ctrl->admin_tag_set.driver_data = ctrl; | |
2774 | ctrl->admin_tag_set.nr_hw_queues = 1; | |
2775 | ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; | |
e399441d | 2776 | |
61bff8ef | 2777 | ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); |
e399441d | 2778 | if (ret) |
61bff8ef | 2779 | goto out_free_queues; |
e399441d | 2780 | |
61bff8ef JS |
2781 | ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); |
2782 | if (IS_ERR(ctrl->ctrl.admin_q)) { | |
2783 | ret = PTR_ERR(ctrl->ctrl.admin_q); | |
2784 | goto out_free_admin_tag_set; | |
e399441d JS |
2785 | } |
2786 | ||
61bff8ef JS |
2787 | /* |
2788 | * Would have been nice to init io queues tag set as well. | |
2789 | * However, we require interaction from the controller | |
2790 | * for max io queue count before we can do so. | |
2791 | * Defer this to the connect path. | |
2792 | */ | |
e399441d | 2793 | |
61bff8ef JS |
2794 | ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0); |
2795 | if (ret) | |
2796 | goto out_cleanup_admin_q; | |
e399441d | 2797 | |
61bff8ef | 2798 | /* at this point, teardown path changes to ref counting on nvme ctrl */ |
e399441d JS |
2799 | |
2800 | spin_lock_irqsave(&rport->lock, flags); | |
2801 | list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); | |
2802 | spin_unlock_irqrestore(&rport->lock, flags); | |
2803 | ||
61bff8ef JS |
2804 | ret = nvme_fc_create_association(ctrl); |
2805 | if (ret) { | |
de41447a | 2806 | ctrl->ctrl.opts = NULL; |
61bff8ef JS |
2807 | /* initiate nvme ctrl ref counting teardown */ |
2808 | nvme_uninit_ctrl(&ctrl->ctrl); | |
24b7f059 | 2809 | nvme_put_ctrl(&ctrl->ctrl); |
61bff8ef JS |
2810 | |
2811 | /* as we're past the point where we transition to the ref | |
2812 | * counting teardown path, if we return a bad pointer here, | |
2813 | * the calling routine, thinking it's prior to the | |
2814 | * transition, will do an rport put. Since the teardown | |
2815 | * path also does a rport put, we do an extra get here to | |
2816 | * so proper order/teardown happens. | |
2817 | */ | |
2818 | nvme_fc_rport_get(rport); | |
2819 | ||
2820 | if (ret > 0) | |
2821 | ret = -EIO; | |
2822 | return ERR_PTR(ret); | |
e399441d JS |
2823 | } |
2824 | ||
2cb657bc JS |
2825 | kref_get(&ctrl->ctrl.kref); |
2826 | ||
61bff8ef JS |
2827 | dev_info(ctrl->ctrl.device, |
2828 | "NVME-FC{%d}: new ctrl: NQN \"%s\"\n", | |
2829 | ctrl->cnum, ctrl->ctrl.opts->subsysnqn); | |
e399441d | 2830 | |
61bff8ef | 2831 | return &ctrl->ctrl; |
e399441d | 2832 | |
61bff8ef JS |
2833 | out_cleanup_admin_q: |
2834 | blk_cleanup_queue(ctrl->ctrl.admin_q); | |
2835 | out_free_admin_tag_set: | |
2836 | blk_mq_free_tag_set(&ctrl->admin_tag_set); | |
2837 | out_free_queues: | |
2838 | kfree(ctrl->queues); | |
e399441d | 2839 | out_free_ida: |
61bff8ef | 2840 | put_device(ctrl->dev); |
e399441d JS |
2841 | ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); |
2842 | out_free_ctrl: | |
2843 | kfree(ctrl); | |
2844 | out_fail: | |
e399441d JS |
2845 | /* exit via here doesn't follow ctlr ref points */ |
2846 | return ERR_PTR(ret); | |
2847 | } | |
2848 | ||
2849 | enum { | |
2850 | FCT_TRADDR_ERR = 0, | |
2851 | FCT_TRADDR_WWNN = 1 << 0, | |
2852 | FCT_TRADDR_WWPN = 1 << 1, | |
2853 | }; | |
2854 | ||
2855 | struct nvmet_fc_traddr { | |
2856 | u64 nn; | |
2857 | u64 pn; | |
2858 | }; | |
2859 | ||
2860 | static const match_table_t traddr_opt_tokens = { | |
2861 | { FCT_TRADDR_WWNN, "nn-%s" }, | |
2862 | { FCT_TRADDR_WWPN, "pn-%s" }, | |
2863 | { FCT_TRADDR_ERR, NULL } | |
2864 | }; | |
2865 | ||
2866 | static int | |
2867 | nvme_fc_parse_address(struct nvmet_fc_traddr *traddr, char *buf) | |
2868 | { | |
2869 | substring_t args[MAX_OPT_ARGS]; | |
2870 | char *options, *o, *p; | |
2871 | int token, ret = 0; | |
2872 | u64 token64; | |
2873 | ||
2874 | options = o = kstrdup(buf, GFP_KERNEL); | |
2875 | if (!options) | |
2876 | return -ENOMEM; | |
2877 | ||
2878 | while ((p = strsep(&o, ":\n")) != NULL) { | |
2879 | if (!*p) | |
2880 | continue; | |
2881 | ||
2882 | token = match_token(p, traddr_opt_tokens, args); | |
2883 | switch (token) { | |
2884 | case FCT_TRADDR_WWNN: | |
2885 | if (match_u64(args, &token64)) { | |
2886 | ret = -EINVAL; | |
2887 | goto out; | |
2888 | } | |
2889 | traddr->nn = token64; | |
2890 | break; | |
2891 | case FCT_TRADDR_WWPN: | |
2892 | if (match_u64(args, &token64)) { | |
2893 | ret = -EINVAL; | |
2894 | goto out; | |
2895 | } | |
2896 | traddr->pn = token64; | |
2897 | break; | |
2898 | default: | |
2899 | pr_warn("unknown traddr token or missing value '%s'\n", | |
2900 | p); | |
2901 | ret = -EINVAL; | |
2902 | goto out; | |
2903 | } | |
2904 | } | |
2905 | ||
2906 | out: | |
2907 | kfree(options); | |
2908 | return ret; | |
2909 | } | |
2910 | ||
2911 | static struct nvme_ctrl * | |
2912 | nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) | |
2913 | { | |
2914 | struct nvme_fc_lport *lport; | |
2915 | struct nvme_fc_rport *rport; | |
61bff8ef | 2916 | struct nvme_ctrl *ctrl; |
e399441d JS |
2917 | struct nvmet_fc_traddr laddr = { 0L, 0L }; |
2918 | struct nvmet_fc_traddr raddr = { 0L, 0L }; | |
2919 | unsigned long flags; | |
2920 | int ret; | |
2921 | ||
2922 | ret = nvme_fc_parse_address(&raddr, opts->traddr); | |
2923 | if (ret || !raddr.nn || !raddr.pn) | |
2924 | return ERR_PTR(-EINVAL); | |
2925 | ||
2926 | ret = nvme_fc_parse_address(&laddr, opts->host_traddr); | |
2927 | if (ret || !laddr.nn || !laddr.pn) | |
2928 | return ERR_PTR(-EINVAL); | |
2929 | ||
2930 | /* find the host and remote ports to connect together */ | |
2931 | spin_lock_irqsave(&nvme_fc_lock, flags); | |
2932 | list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { | |
2933 | if (lport->localport.node_name != laddr.nn || | |
2934 | lport->localport.port_name != laddr.pn) | |
2935 | continue; | |
2936 | ||
2937 | list_for_each_entry(rport, &lport->endp_list, endp_list) { | |
2938 | if (rport->remoteport.node_name != raddr.nn || | |
2939 | rport->remoteport.port_name != raddr.pn) | |
2940 | continue; | |
2941 | ||
2942 | /* if fail to get reference fall through. Will error */ | |
2943 | if (!nvme_fc_rport_get(rport)) | |
2944 | break; | |
2945 | ||
2946 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
2947 | ||
61bff8ef JS |
2948 | ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport); |
2949 | if (IS_ERR(ctrl)) | |
2950 | nvme_fc_rport_put(rport); | |
2951 | return ctrl; | |
e399441d JS |
2952 | } |
2953 | } | |
2954 | spin_unlock_irqrestore(&nvme_fc_lock, flags); | |
2955 | ||
2956 | return ERR_PTR(-ENOENT); | |
2957 | } | |
2958 | ||
2959 | ||
2960 | static struct nvmf_transport_ops nvme_fc_transport = { | |
2961 | .name = "fc", | |
2962 | .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR, | |
5bbecdbc | 2963 | .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO, |
e399441d JS |
2964 | .create_ctrl = nvme_fc_create_ctrl, |
2965 | }; | |
2966 | ||
2967 | static int __init nvme_fc_init_module(void) | |
2968 | { | |
c0e4a6f5 SG |
2969 | int ret; |
2970 | ||
e399441d JS |
2971 | nvme_fc_wq = create_workqueue("nvme_fc_wq"); |
2972 | if (!nvme_fc_wq) | |
2973 | return -ENOMEM; | |
2974 | ||
c0e4a6f5 SG |
2975 | ret = nvmf_register_transport(&nvme_fc_transport); |
2976 | if (ret) | |
2977 | goto err; | |
2978 | ||
2979 | return 0; | |
2980 | err: | |
2981 | destroy_workqueue(nvme_fc_wq); | |
2982 | return ret; | |
e399441d JS |
2983 | } |
2984 | ||
2985 | static void __exit nvme_fc_exit_module(void) | |
2986 | { | |
2987 | /* sanity check - all lports should be removed */ | |
2988 | if (!list_empty(&nvme_fc_lport_list)) | |
2989 | pr_warn("%s: localport list not empty\n", __func__); | |
2990 | ||
2991 | nvmf_unregister_transport(&nvme_fc_transport); | |
2992 | ||
2993 | destroy_workqueue(nvme_fc_wq); | |
2994 | ||
2995 | ida_destroy(&nvme_fc_local_port_cnt); | |
2996 | ida_destroy(&nvme_fc_ctrl_cnt); | |
2997 | } | |
2998 | ||
2999 | module_init(nvme_fc_init_module); | |
3000 | module_exit(nvme_fc_exit_module); | |
3001 | ||
3002 | MODULE_LICENSE("GPL v2"); |