]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/lib/nvme/nvme.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / lib / nvme / nvme.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright (c) Intel Corporation.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include "spdk/nvmf_spec.h"
35 #include "nvme_internal.h"
36
37 #define SPDK_NVME_DRIVER_NAME "spdk_nvme_driver"
38
39 struct nvme_driver *g_spdk_nvme_driver;
40 pid_t g_spdk_nvme_pid;
41
42 int32_t spdk_nvme_retry_count;
43
44 /* gross timeout of 180 seconds in milliseconds */
45 static int g_nvme_driver_timeout_ms = 3 * 60 * 1000;
46
47 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_nvme_init_ctrlrs =
48 TAILQ_HEAD_INITIALIZER(g_nvme_init_ctrlrs);
49
50 /* Per-process attached controller list */
51 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_nvme_attached_ctrlrs =
52 TAILQ_HEAD_INITIALIZER(g_nvme_attached_ctrlrs);
53
54 /* Returns true if ctrlr should be stored on the multi-process shared_attached_ctrlrs list */
55 static bool
56 nvme_ctrlr_shared(const struct spdk_nvme_ctrlr *ctrlr)
57 {
58 return ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE;
59 }
60
61 /* Caller must hold g_spdk_nvme_driver->lock */
62 void
63 nvme_ctrlr_connected(struct spdk_nvme_ctrlr *ctrlr)
64 {
65 TAILQ_INSERT_TAIL(&g_nvme_init_ctrlrs, ctrlr, tailq);
66 }
67
68 int
69 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
70 {
71 nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
72
73 nvme_ctrlr_proc_put_ref(ctrlr);
74
75 if (nvme_ctrlr_get_ref_count(ctrlr) == 0) {
76 if (nvme_ctrlr_shared(ctrlr)) {
77 TAILQ_REMOVE(&g_spdk_nvme_driver->shared_attached_ctrlrs, ctrlr, tailq);
78 } else {
79 TAILQ_REMOVE(&g_nvme_attached_ctrlrs, ctrlr, tailq);
80 }
81 nvme_ctrlr_destruct(ctrlr);
82 }
83
84 nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
85 return 0;
86 }
87
88 void
89 nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
90 {
91 struct nvme_completion_poll_status *status = arg;
92
93 /*
94 * Copy status into the argument passed by the caller, so that
95 * the caller can check the status to determine if the
96 * the request passed or failed.
97 */
98 memcpy(&status->cpl, cpl, sizeof(*cpl));
99 status->done = true;
100 }
101
102 /**
103 * Poll qpair for completions until a command completes.
104 *
105 * \param qpair queue to poll
106 * \param status completion status
107 * \param robust_mutex optional robust mutex to lock while polling qpair
108 *
109 * \return 0 if command completed without error, negative errno on failure
110 *
111 * The command to wait upon must be submitted with nvme_completion_poll_cb as the callback
112 * and status as the callback argument.
113 */
114 int
115 spdk_nvme_wait_for_completion_robust_lock(
116 struct spdk_nvme_qpair *qpair,
117 struct nvme_completion_poll_status *status,
118 pthread_mutex_t *robust_mutex)
119 {
120 memset(&status->cpl, 0, sizeof(status->cpl));
121 status->done = false;
122
123 while (status->done == false) {
124 if (robust_mutex) {
125 nvme_robust_mutex_lock(robust_mutex);
126 }
127
128 spdk_nvme_qpair_process_completions(qpair, 0);
129
130 if (robust_mutex) {
131 nvme_robust_mutex_unlock(robust_mutex);
132 }
133 }
134
135 return spdk_nvme_cpl_is_error(&status->cpl) ? -EIO : 0;
136 }
137
138 int
139 spdk_nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
140 struct nvme_completion_poll_status *status)
141 {
142 return spdk_nvme_wait_for_completion_robust_lock(qpair, status, NULL);
143 }
144
145 static void
146 nvme_user_copy_cmd_complete(void *arg, const struct spdk_nvme_cpl *cpl)
147 {
148 struct nvme_request *req = arg;
149 enum spdk_nvme_data_transfer xfer;
150
151 if (req->user_buffer && req->payload_size) {
152 /* Copy back to the user buffer and free the contig buffer */
153 assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG);
154 xfer = spdk_nvme_opc_get_data_transfer(req->cmd.opc);
155 if (xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST ||
156 xfer == SPDK_NVME_DATA_BIDIRECTIONAL) {
157 assert(req->pid == getpid());
158 memcpy(req->user_buffer, req->payload.contig_or_cb_arg, req->payload_size);
159 }
160
161 spdk_dma_free(req->payload.contig_or_cb_arg);
162 }
163
164 /* Call the user's original callback now that the buffer has been copied */
165 req->user_cb_fn(req->user_cb_arg, cpl);
166 }
167
168 /**
169 * Allocate a request as well as a DMA-capable buffer to copy to/from the user's buffer.
170 *
171 * This is intended for use in non-fast-path functions (admin commands, reservations, etc.)
172 * where the overhead of a copy is not a problem.
173 */
174 struct nvme_request *
175 nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair,
176 void *buffer, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
177 void *cb_arg, bool host_to_controller)
178 {
179 struct nvme_request *req;
180 void *dma_buffer = NULL;
181 uint64_t phys_addr;
182
183 if (buffer && payload_size) {
184 dma_buffer = spdk_zmalloc(payload_size, 4096, &phys_addr,
185 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
186 if (!dma_buffer) {
187 return NULL;
188 }
189
190 if (host_to_controller) {
191 memcpy(dma_buffer, buffer, payload_size);
192 }
193 }
194
195 req = nvme_allocate_request_contig(qpair, dma_buffer, payload_size, nvme_user_copy_cmd_complete,
196 NULL);
197 if (!req) {
198 spdk_free(dma_buffer);
199 return NULL;
200 }
201
202 req->user_cb_fn = cb_fn;
203 req->user_cb_arg = cb_arg;
204 req->user_buffer = buffer;
205 req->cb_arg = req;
206
207 return req;
208 }
209
210 /**
211 * Check if a request has exceeded the controller timeout.
212 *
213 * \param req request to check for timeout.
214 * \param cid command ID for command submitted by req (will be passed to timeout_cb_fn)
215 * \param active_proc per-process data for the controller associated with req
216 * \param now_tick current time from spdk_get_ticks()
217 * \return 0 if requests submitted more recently than req should still be checked for timeouts, or
218 * 1 if requests newer than req need not be checked.
219 *
220 * The request's timeout callback will be called if needed; the caller is only responsible for
221 * calling this function on each outstanding request.
222 */
223 int
224 nvme_request_check_timeout(struct nvme_request *req, uint16_t cid,
225 struct spdk_nvme_ctrlr_process *active_proc,
226 uint64_t now_tick)
227 {
228 struct spdk_nvme_qpair *qpair = req->qpair;
229 struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
230
231 assert(active_proc->timeout_cb_fn != NULL);
232
233 if (req->timed_out || req->submit_tick == 0) {
234 return 0;
235 }
236
237 if (req->pid != g_spdk_nvme_pid) {
238 return 0;
239 }
240
241 if (nvme_qpair_is_admin_queue(qpair) &&
242 req->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST) {
243 return 0;
244 }
245
246 if (req->submit_tick + active_proc->timeout_ticks > now_tick) {
247 return 1;
248 }
249
250 req->timed_out = true;
251
252 /*
253 * We don't want to expose the admin queue to the user,
254 * so when we're timing out admin commands set the
255 * qpair to NULL.
256 */
257 active_proc->timeout_cb_fn(active_proc->timeout_cb_arg, ctrlr,
258 nvme_qpair_is_admin_queue(qpair) ? NULL : qpair,
259 cid);
260 return 0;
261 }
262
263 int
264 nvme_robust_mutex_init_shared(pthread_mutex_t *mtx)
265 {
266 int rc = 0;
267
268 #ifdef __FreeBSD__
269 pthread_mutex_init(mtx, NULL);
270 #else
271 pthread_mutexattr_t attr;
272
273 if (pthread_mutexattr_init(&attr)) {
274 return -1;
275 }
276 if (pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED) ||
277 pthread_mutexattr_setrobust(&attr, PTHREAD_MUTEX_ROBUST) ||
278 pthread_mutex_init(mtx, &attr)) {
279 rc = -1;
280 }
281 pthread_mutexattr_destroy(&attr);
282 #endif
283
284 return rc;
285 }
286
287 int
288 nvme_driver_init(void)
289 {
290 int ret = 0;
291 /* Any socket ID */
292 int socket_id = -1;
293
294 /* Each process needs its own pid. */
295 g_spdk_nvme_pid = getpid();
296
297 /*
298 * Only one thread from one process will do this driver init work.
299 * The primary process will reserve the shared memory and do the
300 * initialization.
301 * The secondary process will lookup the existing reserved memory.
302 */
303 if (spdk_process_is_primary()) {
304 /* The unique named memzone already reserved. */
305 if (g_spdk_nvme_driver != NULL) {
306 return 0;
307 } else {
308 g_spdk_nvme_driver = spdk_memzone_reserve(SPDK_NVME_DRIVER_NAME,
309 sizeof(struct nvme_driver), socket_id,
310 SPDK_MEMZONE_NO_IOVA_CONTIG);
311 }
312
313 if (g_spdk_nvme_driver == NULL) {
314 SPDK_ERRLOG("primary process failed to reserve memory\n");
315
316 return -1;
317 }
318 } else {
319 g_spdk_nvme_driver = spdk_memzone_lookup(SPDK_NVME_DRIVER_NAME);
320
321 /* The unique named memzone already reserved by the primary process. */
322 if (g_spdk_nvme_driver != NULL) {
323 int ms_waited = 0;
324
325 /* Wait the nvme driver to get initialized. */
326 while ((g_spdk_nvme_driver->initialized == false) &&
327 (ms_waited < g_nvme_driver_timeout_ms)) {
328 ms_waited++;
329 nvme_delay(1000); /* delay 1ms */
330 }
331 if (g_spdk_nvme_driver->initialized == false) {
332 SPDK_ERRLOG("timeout waiting for primary process to init\n");
333
334 return -1;
335 }
336 } else {
337 SPDK_ERRLOG("primary process is not started yet\n");
338
339 return -1;
340 }
341
342 return 0;
343 }
344
345 /*
346 * At this moment, only one thread from the primary process will do
347 * the g_spdk_nvme_driver initialization
348 */
349 assert(spdk_process_is_primary());
350
351 ret = nvme_robust_mutex_init_shared(&g_spdk_nvme_driver->lock);
352 if (ret != 0) {
353 SPDK_ERRLOG("failed to initialize mutex\n");
354 spdk_memzone_free(SPDK_NVME_DRIVER_NAME);
355 return ret;
356 }
357
358 nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
359
360 g_spdk_nvme_driver->initialized = false;
361
362 TAILQ_INIT(&g_spdk_nvme_driver->shared_attached_ctrlrs);
363
364 spdk_uuid_generate(&g_spdk_nvme_driver->default_extended_host_id);
365
366 nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
367
368 return ret;
369 }
370
371 int
372 nvme_ctrlr_probe(const struct spdk_nvme_transport_id *trid, void *devhandle,
373 spdk_nvme_probe_cb probe_cb, void *cb_ctx)
374 {
375 struct spdk_nvme_ctrlr *ctrlr;
376 struct spdk_nvme_ctrlr_opts opts;
377
378 assert(trid != NULL);
379
380 spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, sizeof(opts));
381
382 if (!probe_cb || probe_cb(cb_ctx, trid, &opts)) {
383 ctrlr = nvme_transport_ctrlr_construct(trid, &opts, devhandle);
384 if (ctrlr == NULL) {
385 SPDK_ERRLOG("Failed to construct NVMe controller for SSD: %s\n", trid->traddr);
386 return -1;
387 }
388
389 TAILQ_INSERT_TAIL(&g_nvme_init_ctrlrs, ctrlr, tailq);
390 return 0;
391 }
392
393 return 1;
394 }
395
396 static int
397 nvme_init_controllers(void *cb_ctx, spdk_nvme_attach_cb attach_cb)
398 {
399 int rc = 0;
400 int start_rc;
401 struct spdk_nvme_ctrlr *ctrlr, *ctrlr_tmp;
402
403 nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
404
405 /* Initialize all new controllers in the g_nvme_init_ctrlrs list in parallel. */
406 while (!TAILQ_EMPTY(&g_nvme_init_ctrlrs)) {
407 TAILQ_FOREACH_SAFE(ctrlr, &g_nvme_init_ctrlrs, tailq, ctrlr_tmp) {
408 /* Drop the driver lock while calling nvme_ctrlr_process_init()
409 * since it needs to acquire the driver lock internally when initializing
410 * controller.
411 *
412 * TODO: Rethink the locking - maybe reset should take the lock so that start() and
413 * the functions it calls (in particular nvme_ctrlr_set_num_qpairs())
414 * can assume it is held.
415 */
416 nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
417 start_rc = nvme_ctrlr_process_init(ctrlr);
418 nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
419
420 if (start_rc) {
421 /* Controller failed to initialize. */
422 TAILQ_REMOVE(&g_nvme_init_ctrlrs, ctrlr, tailq);
423 SPDK_ERRLOG("Failed to initialize SSD: %s\n", ctrlr->trid.traddr);
424 nvme_ctrlr_destruct(ctrlr);
425 rc = -1;
426 break;
427 }
428
429 if (ctrlr->state == NVME_CTRLR_STATE_READY) {
430 /*
431 * Controller has been initialized.
432 * Move it to the attached_ctrlrs list.
433 */
434 TAILQ_REMOVE(&g_nvme_init_ctrlrs, ctrlr, tailq);
435 if (nvme_ctrlr_shared(ctrlr)) {
436 TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->shared_attached_ctrlrs, ctrlr, tailq);
437 } else {
438 TAILQ_INSERT_TAIL(&g_nvme_attached_ctrlrs, ctrlr, tailq);
439 }
440
441 /*
442 * Increase the ref count before calling attach_cb() as the user may
443 * call nvme_detach() immediately.
444 */
445 nvme_ctrlr_proc_get_ref(ctrlr);
446
447 /*
448 * Unlock while calling attach_cb() so the user can call other functions
449 * that may take the driver lock, like nvme_detach().
450 */
451 if (attach_cb) {
452 nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
453 attach_cb(cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
454 nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
455 }
456
457 break;
458 }
459 }
460 }
461
462 g_spdk_nvme_driver->initialized = true;
463
464 nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
465 return rc;
466 }
467
468 /* This function must not be called while holding g_spdk_nvme_driver->lock */
469 static struct spdk_nvme_ctrlr *
470 spdk_nvme_get_ctrlr_by_trid(const struct spdk_nvme_transport_id *trid)
471 {
472 struct spdk_nvme_ctrlr *ctrlr;
473
474 nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
475 ctrlr = spdk_nvme_get_ctrlr_by_trid_unsafe(trid);
476 nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
477
478 return ctrlr;
479 }
480
481 /* This function must be called while holding g_spdk_nvme_driver->lock */
482 struct spdk_nvme_ctrlr *
483 spdk_nvme_get_ctrlr_by_trid_unsafe(const struct spdk_nvme_transport_id *trid)
484 {
485 struct spdk_nvme_ctrlr *ctrlr;
486
487 /* Search per-process list */
488 TAILQ_FOREACH(ctrlr, &g_nvme_attached_ctrlrs, tailq) {
489 if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
490 return ctrlr;
491 }
492 }
493
494 /* Search multi-process shared list */
495 TAILQ_FOREACH(ctrlr, &g_spdk_nvme_driver->shared_attached_ctrlrs, tailq) {
496 if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) {
497 return ctrlr;
498 }
499 }
500
501 return NULL;
502 }
503
504 /* This function must only be called while holding g_spdk_nvme_driver->lock */
505 static int
506 spdk_nvme_probe_internal(const struct spdk_nvme_transport_id *trid, void *cb_ctx,
507 spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
508 spdk_nvme_remove_cb remove_cb, struct spdk_nvme_ctrlr **connected_ctrlr)
509 {
510 int rc;
511 struct spdk_nvme_ctrlr *ctrlr;
512 bool direct_connect = (connected_ctrlr != NULL);
513
514 if (!spdk_nvme_transport_available(trid->trtype)) {
515 SPDK_ERRLOG("NVMe trtype %u not available\n", trid->trtype);
516 return -1;
517 }
518
519 nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
520
521 nvme_transport_ctrlr_scan(trid, cb_ctx, probe_cb, remove_cb, direct_connect);
522
523 /*
524 * Probe controllers on the shared_attached_ctrlrs list
525 */
526 if (!spdk_process_is_primary() && (trid->trtype == SPDK_NVME_TRANSPORT_PCIE)) {
527 TAILQ_FOREACH(ctrlr, &g_spdk_nvme_driver->shared_attached_ctrlrs, tailq) {
528 /* Do not attach other ctrlrs if user specify a valid trid */
529 if ((strlen(trid->traddr) != 0) &&
530 (spdk_nvme_transport_id_compare(trid, &ctrlr->trid))) {
531 continue;
532 }
533
534 nvme_ctrlr_proc_get_ref(ctrlr);
535
536 /*
537 * Unlock while calling attach_cb() so the user can call other functions
538 * that may take the driver lock, like nvme_detach().
539 */
540 if (attach_cb) {
541 nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
542 attach_cb(cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts);
543 nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
544 }
545 }
546
547 nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
548
549 rc = 0;
550
551 goto exit;
552 }
553
554 nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
555 /*
556 * Keep going even if one or more nvme_attach() calls failed,
557 * but maintain the value of rc to signal errors when we return.
558 */
559
560 rc = nvme_init_controllers(cb_ctx, attach_cb);
561
562 exit:
563 if (connected_ctrlr) {
564 *connected_ctrlr = spdk_nvme_get_ctrlr_by_trid(trid);
565 }
566
567 return rc;
568 }
569
570 int
571 spdk_nvme_probe(const struct spdk_nvme_transport_id *trid, void *cb_ctx,
572 spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb,
573 spdk_nvme_remove_cb remove_cb)
574 {
575 int rc;
576 struct spdk_nvme_transport_id trid_pcie;
577
578 rc = nvme_driver_init();
579 if (rc != 0) {
580 return rc;
581 }
582
583 if (trid == NULL) {
584 memset(&trid_pcie, 0, sizeof(trid_pcie));
585 trid_pcie.trtype = SPDK_NVME_TRANSPORT_PCIE;
586 trid = &trid_pcie;
587 }
588
589 return spdk_nvme_probe_internal(trid, cb_ctx, probe_cb, attach_cb, remove_cb, NULL);
590 }
591
592 static bool
593 spdk_nvme_connect_probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
594 struct spdk_nvme_ctrlr_opts *opts)
595 {
596 struct spdk_nvme_ctrlr_connect_opts *requested_opts = cb_ctx;
597
598 assert(requested_opts->opts);
599
600 assert(requested_opts->opts_size != 0);
601
602 memcpy(opts, requested_opts->opts, spdk_min(sizeof(*opts), requested_opts->opts_size));
603
604 return true;
605 }
606
607 struct spdk_nvme_ctrlr *
608 spdk_nvme_connect(const struct spdk_nvme_transport_id *trid,
609 const struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
610 {
611 int rc;
612 struct spdk_nvme_ctrlr_connect_opts connect_opts = {};
613 struct spdk_nvme_ctrlr_connect_opts *user_connect_opts = NULL;
614 struct spdk_nvme_ctrlr *ctrlr = NULL;
615 spdk_nvme_probe_cb probe_cb = NULL;
616
617 if (trid == NULL) {
618 SPDK_ERRLOG("No transport ID specified\n");
619 return NULL;
620 }
621
622 rc = nvme_driver_init();
623 if (rc != 0) {
624 return NULL;
625 }
626
627 if (opts && opts_size > 0) {
628 connect_opts.opts = opts;
629 connect_opts.opts_size = opts_size;
630 user_connect_opts = &connect_opts;
631 probe_cb = spdk_nvme_connect_probe_cb;
632 }
633
634 spdk_nvme_probe_internal(trid, user_connect_opts, probe_cb, NULL, NULL, &ctrlr);
635
636 return ctrlr;
637 }
638
639 int
640 spdk_nvme_transport_id_parse_trtype(enum spdk_nvme_transport_type *trtype, const char *str)
641 {
642 if (trtype == NULL || str == NULL) {
643 return -EINVAL;
644 }
645
646 if (strcasecmp(str, "PCIe") == 0) {
647 *trtype = SPDK_NVME_TRANSPORT_PCIE;
648 } else if (strcasecmp(str, "RDMA") == 0) {
649 *trtype = SPDK_NVME_TRANSPORT_RDMA;
650 } else if (strcasecmp(str, "FC") == 0) {
651 *trtype = SPDK_NVME_TRANSPORT_FC;
652 } else {
653 return -ENOENT;
654 }
655 return 0;
656 }
657
658 const char *
659 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
660 {
661 switch (trtype) {
662 case SPDK_NVME_TRANSPORT_PCIE:
663 return "PCIe";
664 case SPDK_NVME_TRANSPORT_RDMA:
665 return "RDMA";
666 case SPDK_NVME_TRANSPORT_FC:
667 return "FC";
668 default:
669 return NULL;
670 }
671 }
672
673 int
674 spdk_nvme_transport_id_parse_adrfam(enum spdk_nvmf_adrfam *adrfam, const char *str)
675 {
676 if (adrfam == NULL || str == NULL) {
677 return -EINVAL;
678 }
679
680 if (strcasecmp(str, "IPv4") == 0) {
681 *adrfam = SPDK_NVMF_ADRFAM_IPV4;
682 } else if (strcasecmp(str, "IPv6") == 0) {
683 *adrfam = SPDK_NVMF_ADRFAM_IPV6;
684 } else if (strcasecmp(str, "IB") == 0) {
685 *adrfam = SPDK_NVMF_ADRFAM_IB;
686 } else if (strcasecmp(str, "FC") == 0) {
687 *adrfam = SPDK_NVMF_ADRFAM_FC;
688 } else {
689 return -ENOENT;
690 }
691 return 0;
692 }
693
694 const char *
695 spdk_nvme_transport_id_adrfam_str(enum spdk_nvmf_adrfam adrfam)
696 {
697 switch (adrfam) {
698 case SPDK_NVMF_ADRFAM_IPV4:
699 return "IPv4";
700 case SPDK_NVMF_ADRFAM_IPV6:
701 return "IPv6";
702 case SPDK_NVMF_ADRFAM_IB:
703 return "IB";
704 case SPDK_NVMF_ADRFAM_FC:
705 return "FC";
706 default:
707 return NULL;
708 }
709 }
710
711 int
712 spdk_nvme_transport_id_parse(struct spdk_nvme_transport_id *trid, const char *str)
713 {
714 const char *sep, *sep1;
715 const char *whitespace = " \t\n";
716 size_t key_len, val_len;
717 char key[32];
718 char val[1024];
719
720 if (trid == NULL || str == NULL) {
721 return -EINVAL;
722 }
723
724 while (*str != '\0') {
725 str += strspn(str, whitespace);
726
727 sep = strchr(str, ':');
728 if (!sep) {
729 sep = strchr(str, '=');
730 if (!sep) {
731 SPDK_ERRLOG("Key without ':' or '=' separator\n");
732 return -EINVAL;
733 }
734 } else {
735 sep1 = strchr(str, '=');
736 if ((sep1 != NULL) && (sep1 < sep)) {
737 sep = sep1;
738 }
739 }
740
741 key_len = sep - str;
742 if (key_len >= sizeof(key)) {
743 SPDK_ERRLOG("Transport key length %zu greater than maximum allowed %zu\n",
744 key_len, sizeof(key) - 1);
745 return -EINVAL;
746 }
747
748 memcpy(key, str, key_len);
749 key[key_len] = '\0';
750
751 str += key_len + 1; /* Skip key: */
752 val_len = strcspn(str, whitespace);
753 if (val_len == 0) {
754 SPDK_ERRLOG("Key without value\n");
755 return -EINVAL;
756 }
757
758 if (val_len >= sizeof(val)) {
759 SPDK_ERRLOG("Transport value length %zu greater than maximum allowed %zu\n",
760 val_len, sizeof(val) - 1);
761 return -EINVAL;
762 }
763
764 memcpy(val, str, val_len);
765 val[val_len] = '\0';
766
767 str += val_len;
768
769 if (strcasecmp(key, "trtype") == 0) {
770 if (spdk_nvme_transport_id_parse_trtype(&trid->trtype, val) != 0) {
771 SPDK_ERRLOG("Unknown trtype '%s'\n", val);
772 return -EINVAL;
773 }
774 } else if (strcasecmp(key, "adrfam") == 0) {
775 if (spdk_nvme_transport_id_parse_adrfam(&trid->adrfam, val) != 0) {
776 SPDK_ERRLOG("Unknown adrfam '%s'\n", val);
777 return -EINVAL;
778 }
779 } else if (strcasecmp(key, "traddr") == 0) {
780 if (val_len > SPDK_NVMF_TRADDR_MAX_LEN) {
781 SPDK_ERRLOG("traddr length %zu greater than maximum allowed %u\n",
782 val_len, SPDK_NVMF_TRADDR_MAX_LEN);
783 return -EINVAL;
784 }
785 memcpy(trid->traddr, val, val_len + 1);
786 } else if (strcasecmp(key, "trsvcid") == 0) {
787 if (val_len > SPDK_NVMF_TRSVCID_MAX_LEN) {
788 SPDK_ERRLOG("trsvcid length %zu greater than maximum allowed %u\n",
789 val_len, SPDK_NVMF_TRSVCID_MAX_LEN);
790 return -EINVAL;
791 }
792 memcpy(trid->trsvcid, val, val_len + 1);
793 } else if (strcasecmp(key, "subnqn") == 0) {
794 if (val_len > SPDK_NVMF_NQN_MAX_LEN) {
795 SPDK_ERRLOG("subnqn length %zu greater than maximum allowed %u\n",
796 val_len, SPDK_NVMF_NQN_MAX_LEN);
797 return -EINVAL;
798 }
799 memcpy(trid->subnqn, val, val_len + 1);
800 } else {
801 SPDK_ERRLOG("Unknown transport ID key '%s'\n", key);
802 }
803 }
804
805 return 0;
806 }
807
808 static int
809 cmp_int(int a, int b)
810 {
811 return a - b;
812 }
813
814 int
815 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
816 const struct spdk_nvme_transport_id *trid2)
817 {
818 int cmp;
819
820 cmp = cmp_int(trid1->trtype, trid2->trtype);
821 if (cmp) {
822 return cmp;
823 }
824
825 if (trid1->trtype == SPDK_NVME_TRANSPORT_PCIE) {
826 struct spdk_pci_addr pci_addr1;
827 struct spdk_pci_addr pci_addr2;
828
829 /* Normalize PCI addresses before comparing */
830 if (spdk_pci_addr_parse(&pci_addr1, trid1->traddr) < 0 ||
831 spdk_pci_addr_parse(&pci_addr2, trid2->traddr) < 0) {
832 return -1;
833 }
834
835 /* PCIe transport ID only uses trtype and traddr */
836 return spdk_pci_addr_compare(&pci_addr1, &pci_addr2);
837 }
838
839 cmp = strcasecmp(trid1->traddr, trid2->traddr);
840 if (cmp) {
841 return cmp;
842 }
843
844 cmp = cmp_int(trid1->adrfam, trid2->adrfam);
845 if (cmp) {
846 return cmp;
847 }
848
849 cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid);
850 if (cmp) {
851 return cmp;
852 }
853
854 cmp = strcmp(trid1->subnqn, trid2->subnqn);
855 if (cmp) {
856 return cmp;
857 }
858
859 return 0;
860 }
861
862 SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME)