]> git.proxmox.com Git - ceph.git/blobdiff - ceph/src/spdk/lib/nvme/nvme_transport.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / lib / nvme / nvme_transport.c
index 3f7c9be4c582ba6c9d5279e467a2c1081a336fa4..76efd59660c6450e8bc862152dd2a64c3c3c02f6 100644 (file)
  */
 
 #include "nvme_internal.h"
+#include "spdk/queue.h"
 
-#ifdef DEBUG
-static __attribute__((noreturn)) void
-nvme_transport_unknown(enum spdk_nvme_transport_type trtype)
-{
-       SPDK_ERRLOG("Unknown transport %d\n", (int)trtype);
-       abort();
-}
-#define TRANSPORT_DEFAULT(trtype)      default: nvme_transport_unknown(trtype);
-#else
-#define TRANSPORT_DEFAULT(trtype)
-#endif
-
-#define TRANSPORT_PCIE(func_name, args)        case SPDK_NVME_TRANSPORT_PCIE: return nvme_pcie_ ## func_name args;
-
-#define TRANSPORT_FABRICS_TCP(func_name, args) case SPDK_NVME_TRANSPORT_TCP: return nvme_tcp_ ## func_name args;
-
-#ifdef SPDK_CONFIG_RDMA
-#define TRANSPORT_FABRICS_RDMA(func_name, args)        case SPDK_NVME_TRANSPORT_RDMA: return nvme_rdma_ ## func_name args;
-#define TRANSPORT_RDMA_AVAILABLE               true
-#else
-#define TRANSPORT_FABRICS_RDMA(func_name, args)        case SPDK_NVME_TRANSPORT_RDMA: SPDK_UNREACHABLE();
-#define TRANSPORT_RDMA_AVAILABLE               false
-#endif
-#define TRANSPORT_FABRICS_FC(func_name, args)  case SPDK_NVME_TRANSPORT_FC: SPDK_UNREACHABLE();
-
-#define NVME_TRANSPORT_CALL(trtype, func_name, args)           \
-       do {                                                    \
-               switch (trtype) {                               \
-               TRANSPORT_PCIE(func_name, args)                 \
-               TRANSPORT_FABRICS_RDMA(func_name, args)         \
-               TRANSPORT_FABRICS_FC(func_name, args)           \
-               TRANSPORT_FABRICS_TCP(func_name, args)          \
-               TRANSPORT_DEFAULT(trtype)                       \
-               }                                               \
-               SPDK_UNREACHABLE();                             \
-       } while (0)
+#define SPDK_MAX_NUM_OF_TRANSPORTS 16
+
+struct spdk_nvme_transport {
+       struct spdk_nvme_transport_ops  ops;
+       TAILQ_ENTRY(spdk_nvme_transport)        link;
+};
+
+TAILQ_HEAD(nvme_transport_list, spdk_nvme_transport) g_spdk_nvme_transports =
+       TAILQ_HEAD_INITIALIZER(g_spdk_nvme_transports);
+
+struct spdk_nvme_transport g_spdk_transports[SPDK_MAX_NUM_OF_TRANSPORTS] = {};
+int g_current_transport_index = 0;
+
+const struct spdk_nvme_transport *
+nvme_get_first_transport(void)
+{
+       return TAILQ_FIRST(&g_spdk_nvme_transports);
+}
+
+const struct spdk_nvme_transport *
+nvme_get_next_transport(const struct spdk_nvme_transport *transport)
+{
+       return TAILQ_NEXT(transport, link);
+}
+
+/*
+ * Unfortunately, due to NVMe PCIe multiprocess support, we cannot store the
+ * transport object in either the controller struct or the admin qpair. THis means
+ * that a lot of admin related transport calls will have to call nvme_get_transport
+ * in order to knwo which functions to call.
+ * In the I/O path, we have the ability to store the transport struct in the I/O
+ * qpairs to avoid taking a performance hit.
+ */
+const struct spdk_nvme_transport *
+nvme_get_transport(const char *transport_name)
+{
+       struct spdk_nvme_transport *registered_transport;
+
+       TAILQ_FOREACH(registered_transport, &g_spdk_nvme_transports, link) {
+               if (strcasecmp(transport_name, registered_transport->ops.name) == 0) {
+                       return registered_transport;
+               }
+       }
+
+       return NULL;
+}
 
 bool
 spdk_nvme_transport_available(enum spdk_nvme_transport_type trtype)
 {
-       switch (trtype) {
-       case SPDK_NVME_TRANSPORT_PCIE:
-       case SPDK_NVME_TRANSPORT_TCP:
-               return true;
+       return nvme_get_transport(spdk_nvme_transport_id_trtype_str(trtype)) == NULL ? false : true;
+}
 
-       case SPDK_NVME_TRANSPORT_RDMA:
-               return TRANSPORT_RDMA_AVAILABLE;
+bool
+spdk_nvme_transport_available_by_name(const char *transport_name)
+{
+       return nvme_get_transport(transport_name) == NULL ? false : true;
+}
+
+void spdk_nvme_transport_register(const struct spdk_nvme_transport_ops *ops)
+{
+       struct spdk_nvme_transport *new_transport;
 
-       case SPDK_NVME_TRANSPORT_FC:
-               return false;
+       if (nvme_get_transport(ops->name)) {
+               SPDK_ERRLOG("Double registering NVMe transport %s is prohibited.\n", ops->name);
+               assert(false);
        }
 
-       return false;
+       if (g_current_transport_index == SPDK_MAX_NUM_OF_TRANSPORTS) {
+               SPDK_ERRLOG("Unable to register new NVMe transport.\n");
+               assert(false);
+               return;
+       }
+       new_transport = &g_spdk_transports[g_current_transport_index++];
+
+       new_transport->ops = *ops;
+       TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, new_transport, link);
 }
 
 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
                const struct spdk_nvme_ctrlr_opts *opts,
                void *devhandle)
 {
-       NVME_TRANSPORT_CALL(trid->trtype, ctrlr_construct, (trid, opts, devhandle));
+       const struct spdk_nvme_transport *transport = nvme_get_transport(trid->trstring);
+       struct spdk_nvme_ctrlr *ctrlr;
+
+       if (transport == NULL) {
+               SPDK_ERRLOG("Transport %s doesn't exist.", trid->trstring);
+               return NULL;
+       }
+
+       ctrlr = transport->ops.ctrlr_construct(trid, opts, devhandle);
+
+       return ctrlr;
 }
 
 int
 nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
                          bool direct_connect)
 {
-       NVME_TRANSPORT_CALL(probe_ctx->trid.trtype, ctrlr_scan, (probe_ctx, direct_connect));
+       const struct spdk_nvme_transport *transport = nvme_get_transport(probe_ctx->trid.trstring);
+
+       if (transport == NULL) {
+               SPDK_ERRLOG("Transport %s doesn't exist.", probe_ctx->trid.trstring);
+               return -ENOENT;
+       }
+
+       return transport->ops.ctrlr_scan(probe_ctx, direct_connect);
 }
 
 int
 nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
 {
-       NVME_TRANSPORT_CALL(ctrlr->trid.trtype, ctrlr_destruct, (ctrlr));
+       const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
+
+       assert(transport != NULL);
+       return transport->ops.ctrlr_destruct(ctrlr);
 }
 
 int
 nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
 {
-       NVME_TRANSPORT_CALL(ctrlr->trid.trtype, ctrlr_enable, (ctrlr));
+       const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
+
+       assert(transport != NULL);
+       return transport->ops.ctrlr_enable(ctrlr);
 }
 
 int
 nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
 {
-       NVME_TRANSPORT_CALL(ctrlr->trid.trtype, ctrlr_set_reg_4, (ctrlr, offset, value));
+       const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
+
+       assert(transport != NULL);
+       return transport->ops.ctrlr_set_reg_4(ctrlr, offset, value);
 }
 
 int
 nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
 {
-       NVME_TRANSPORT_CALL(ctrlr->trid.trtype, ctrlr_set_reg_8, (ctrlr, offset, value));
+       const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
+
+       assert(transport != NULL);
+       return transport->ops.ctrlr_set_reg_8(ctrlr, offset, value);
 }
 
 int
 nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
 {
-       NVME_TRANSPORT_CALL(ctrlr->trid.trtype, ctrlr_get_reg_4, (ctrlr, offset, value));
+       const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
+
+       assert(transport != NULL);
+       return transport->ops.ctrlr_get_reg_4(ctrlr, offset, value);
 }
 
 int
 nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
 {
-       NVME_TRANSPORT_CALL(ctrlr->trid.trtype, ctrlr_get_reg_8, (ctrlr, offset, value));
+       const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
+
+       assert(transport != NULL);
+       return transport->ops.ctrlr_get_reg_8(ctrlr, offset, value);
 }
 
 uint32_t
 nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
 {
-       NVME_TRANSPORT_CALL(ctrlr->trid.trtype, ctrlr_get_max_xfer_size, (ctrlr));
+       const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
+
+       assert(transport != NULL);
+       return transport->ops.ctrlr_get_max_xfer_size(ctrlr);
 }
 
 uint16_t
 nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
 {
-       NVME_TRANSPORT_CALL(ctrlr->trid.trtype, ctrlr_get_max_sges, (ctrlr));
+       const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
+
+       assert(transport != NULL);
+       return transport->ops.ctrlr_get_max_sges(ctrlr);
+}
+
+int
+nvme_transport_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr)
+{
+       const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
+
+       assert(transport != NULL);
+       if (transport->ops.ctrlr_reserve_cmb != NULL) {
+               return transport->ops.ctrlr_reserve_cmb(ctrlr);
+       }
+
+       return -ENOTSUP;
 }
 
 void *
-nvme_transport_ctrlr_alloc_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, size_t size)
+nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
 {
-       NVME_TRANSPORT_CALL(ctrlr->trid.trtype, ctrlr_alloc_cmb_io_buffer, (ctrlr, size));
+       const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
+
+       assert(transport != NULL);
+       if (transport->ops.ctrlr_map_cmb != NULL) {
+               return transport->ops.ctrlr_map_cmb(ctrlr, size);
+       }
+
+       return NULL;
 }
 
 int
-nvme_transport_ctrlr_free_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, void *buf, size_t size)
+nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
 {
-       NVME_TRANSPORT_CALL(ctrlr->trid.trtype, ctrlr_free_cmb_io_buffer, (ctrlr, buf, size));
+       const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
+
+       assert(transport != NULL);
+       if (transport->ops.ctrlr_unmap_cmb != NULL) {
+               return transport->ops.ctrlr_unmap_cmb(ctrlr);
+       }
+
+       return 0;
 }
 
 struct spdk_nvme_qpair *
 nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
                                     const struct spdk_nvme_io_qpair_opts *opts)
 {
-       NVME_TRANSPORT_CALL(ctrlr->trid.trtype, ctrlr_create_io_qpair, (ctrlr, qid, opts));
+       struct spdk_nvme_qpair *qpair;
+       const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
+
+       assert(transport != NULL);
+       qpair = transport->ops.ctrlr_create_io_qpair(ctrlr, qid, opts);
+       if (qpair != NULL && !nvme_qpair_is_admin_queue(qpair)) {
+               qpair->transport = transport;
+       }
+
+       return qpair;
 }
 
 int
 nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
 {
-       NVME_TRANSPORT_CALL(ctrlr->trid.trtype, ctrlr_delete_io_qpair, (ctrlr, qpair));
+       const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
+
+       assert(transport != NULL);
+
+       /* Do not rely on qpair->transport.  For multi-process cases, a foreign process may delete
+        * the IO qpair, in which case the transport object would be invalid (each process has their
+        * own unique transport objects since they contain function pointers).  So we look up the
+        * transport object in the delete_io_qpair case.
+        */
+       return transport->ops.ctrlr_delete_io_qpair(ctrlr, qpair);
 }
 
 int
 nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
 {
-       if (nvme_qpair_is_admin_queue(qpair)) {
-               qpair->is_connecting = 1;
+       const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
+       uint8_t transport_failure_reason;
+       int rc;
+
+       assert(transport != NULL);
+       if (!nvme_qpair_is_admin_queue(qpair)) {
+               qpair->transport = transport;
        }
-       NVME_TRANSPORT_CALL(ctrlr->trid.trtype, ctrlr_connect_qpair, (ctrlr, qpair));
-       if (nvme_qpair_is_admin_queue(qpair)) {
-               qpair->is_connecting = 0;
+
+       transport_failure_reason = qpair->transport_failure_reason;
+       qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
+
+       nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTING);
+       rc = transport->ops.ctrlr_connect_qpair(ctrlr, qpair);
+       if (rc != 0) {
+               goto err;
        }
-}
 
-volatile struct spdk_nvme_registers *
-nvme_transport_ctrlr_get_registers(struct spdk_nvme_ctrlr *ctrlr)
-{
-       NVME_TRANSPORT_CALL(ctrlr->trid.trtype, ctrlr_get_registers, (ctrlr));
+       nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED);
+       if (qpair->poll_group) {
+               rc = nvme_poll_group_connect_qpair(qpair);
+               if (rc) {
+                       goto err;
+               }
+       }
+
+       return rc;
+
+err:
+       /* If the qpair was unable to reconnect, restore the original failure reason. */
+       qpair->transport_failure_reason = transport_failure_reason;
+       nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
+       nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
+       return rc;
 }
 
 void
 nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
 {
-       NVME_TRANSPORT_CALL(ctrlr->trid.trtype, ctrlr_disconnect_qpair, (ctrlr, qpair));
+       const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
+
+       if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING ||
+           nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTED) {
+               return;
+       }
+
+       nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTING);
+       assert(transport != NULL);
+       if (qpair->poll_group) {
+               nvme_poll_group_disconnect_qpair(qpair);
+       }
+
+       transport->ops.ctrlr_disconnect_qpair(ctrlr, qpair);
+
+       nvme_qpair_abort_reqs(qpair, 0);
+       nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
 }
 
 void
 nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
 {
+       const struct spdk_nvme_transport *transport;
+
        assert(dnr <= 1);
-       NVME_TRANSPORT_CALL(qpair->trtype, qpair_abort_reqs, (qpair, dnr));
+       if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
+               qpair->transport->ops.qpair_abort_reqs(qpair, dnr);
+       } else {
+               transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
+               assert(transport != NULL);
+               transport->ops.qpair_abort_reqs(qpair, dnr);
+       }
 }
 
 int
 nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
 {
-       NVME_TRANSPORT_CALL(qpair->trtype, qpair_reset, (qpair));
+       const struct spdk_nvme_transport *transport;
+
+       if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
+               return qpair->transport->ops.qpair_reset(qpair);
+       }
+
+       transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
+       assert(transport != NULL);
+       return transport->ops.qpair_reset(qpair);
 }
 
 int
 nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
 {
-       NVME_TRANSPORT_CALL(qpair->trtype, qpair_submit_request, (qpair, req));
+       const struct spdk_nvme_transport *transport;
+
+       if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
+               return qpair->transport->ops.qpair_submit_request(qpair, req);
+       }
+
+       transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
+       assert(transport != NULL);
+       return transport->ops.qpair_submit_request(qpair, req);
 }
 
 int32_t
 nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
 {
-       NVME_TRANSPORT_CALL(qpair->trtype, qpair_process_completions, (qpair, max_completions));
+       const struct spdk_nvme_transport *transport;
+
+       if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
+               return qpair->transport->ops.qpair_process_completions(qpair, max_completions);
+       }
+
+       transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
+       assert(transport != NULL);
+       return transport->ops.qpair_process_completions(qpair, max_completions);
+}
+
+int
+nvme_transport_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
+                                     int (*iter_fn)(struct nvme_request *req, void *arg),
+                                     void *arg)
+{
+       const struct spdk_nvme_transport *transport;
+
+       if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
+               return qpair->transport->ops.qpair_iterate_requests(qpair, iter_fn, arg);
+       }
+
+       transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
+       assert(transport != NULL);
+       return transport->ops.qpair_iterate_requests(qpair, iter_fn, arg);
 }
 
 void
 nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
 {
-       NVME_TRANSPORT_CALL(qpair->trtype, admin_qpair_abort_aers, (qpair));
+       const struct spdk_nvme_transport *transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
+
+       assert(transport != NULL);
+       transport->ops.admin_qpair_abort_aers(qpair);
+}
+
+struct spdk_nvme_transport_poll_group *
+nvme_transport_poll_group_create(const struct spdk_nvme_transport *transport)
+{
+       struct spdk_nvme_transport_poll_group *group = NULL;
+
+       group = transport->ops.poll_group_create();
+       if (group) {
+               group->transport = transport;
+               STAILQ_INIT(&group->connected_qpairs);
+               STAILQ_INIT(&group->disconnected_qpairs);
+       }
+
+       return group;
+}
+
+int
+nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
+                             struct spdk_nvme_qpair *qpair)
+{
+       int rc;
+
+       rc = tgroup->transport->ops.poll_group_add(tgroup, qpair);
+       if (rc == 0) {
+               qpair->poll_group = tgroup;
+               assert(nvme_qpair_get_state(qpair) < NVME_QPAIR_CONNECTED);
+               qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs;
+               STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
+       }
+
+       return rc;
+}
+
+int
+nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
+                                struct spdk_nvme_qpair *qpair)
+{
+       int rc;
+
+       rc = tgroup->transport->ops.poll_group_remove(tgroup, qpair);
+       if (rc == 0) {
+               if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
+                       STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
+               } else if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
+                       STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
+               } else {
+                       return -ENOENT;
+               }
+
+               qpair->poll_group = NULL;
+               qpair->poll_group_tailq_head = NULL;
+       }
+
+       return rc;
+}
+
+int64_t
+nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
+               uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
+{
+       struct spdk_nvme_qpair *qpair;
+       int64_t rc;
+
+       tgroup->in_completion_context = true;
+       rc = tgroup->transport->ops.poll_group_process_completions(tgroup, completions_per_qpair,
+                       disconnected_qpair_cb);
+       tgroup->in_completion_context = false;
+
+       if (spdk_unlikely(tgroup->num_qpairs_to_delete > 0)) {
+               /* deleted qpairs are more likely to be in the disconnected qpairs list. */
+               STAILQ_FOREACH(qpair, &tgroup->disconnected_qpairs, poll_group_stailq) {
+                       if (spdk_unlikely(qpair->delete_after_completion_context)) {
+                               spdk_nvme_ctrlr_free_io_qpair(qpair);
+                               if (--tgroup->num_qpairs_to_delete == 0) {
+                                       return rc;
+                               }
+                       }
+               }
+
+               STAILQ_FOREACH(qpair, &tgroup->connected_qpairs, poll_group_stailq) {
+                       if (spdk_unlikely(qpair->delete_after_completion_context)) {
+                               spdk_nvme_ctrlr_free_io_qpair(qpair);
+                               if (--tgroup->num_qpairs_to_delete == 0) {
+                                       return rc;
+                               }
+                       }
+               }
+               /* Just in case. */
+               SPDK_DEBUGLOG(SPDK_LOG_NVME, "Mismatch between qpairs to delete and poll group number.\n");
+               tgroup->num_qpairs_to_delete = 0;
+       }
+
+       return rc;
+}
+
+int
+nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
+{
+       return tgroup->transport->ops.poll_group_destroy(tgroup);
+}
+
+int
+nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
+{
+       struct spdk_nvme_transport_poll_group *tgroup;
+       int rc;
+
+       tgroup = qpair->poll_group;
+
+       if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
+               return 0;
+       }
+
+       if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
+               rc = tgroup->transport->ops.poll_group_disconnect_qpair(qpair);
+               if (rc == 0) {
+                       qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs;
+                       STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
+                       STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
+                       /* EINPROGRESS indicates that a call has already been made to this function.
+                        * It just keeps us from segfaulting on a double removal/insert.
+                        */
+               } else if (rc == -EINPROGRESS) {
+                       rc = 0;
+               }
+               return rc;
+       }
+
+       return -EINVAL;
+}
+
+int
+nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
+{
+       struct spdk_nvme_transport_poll_group *tgroup;
+       int rc;
+
+       tgroup = qpair->poll_group;
+
+       if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
+               return 0;
+       }
+
+       if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
+               rc = tgroup->transport->ops.poll_group_connect_qpair(qpair);
+               if (rc == 0) {
+                       qpair->poll_group_tailq_head = &tgroup->connected_qpairs;
+                       STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
+                       STAILQ_INSERT_TAIL(&tgroup->connected_qpairs, qpair, poll_group_stailq);
+               }
+
+               return rc == -EINPROGRESS ? 0 : rc;
+       }
+
+
+       return -EINVAL;
 }