]> git.proxmox.com Git - ceph.git/blobdiff - ceph/src/spdk/lib/nvmf/rdma.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / lib / nvmf / rdma.c
index 333e703f51125f1ad04c6b90b871fc67069ff063..594baae80c46f77ab2b56142bd5b527c5a5ae97a 100644 (file)
@@ -1,8 +1,8 @@
 /*-
  *   BSD LICENSE
  *
- *   Copyright (c) Intel Corporation.
- *   All rights reserved.
+ *   Copyright (c) Intel Corporation. All rights reserved.
+ *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
  *   modification, are permitted provided that the following conditions
 
 #include "spdk_internal/log.h"
 
+struct spdk_nvme_rdma_hooks g_nvmf_hooks = {};
+
 /*
  RDMA Connection Resource Defaults
  */
-#define NVMF_DEFAULT_TX_SGE            1
+#define NVMF_DEFAULT_TX_SGE            SPDK_NVMF_MAX_SGL_ENTRIES
+#define NVMF_DEFAULT_RSP_SGE           1
 #define NVMF_DEFAULT_RX_SGE            2
-#define NVMF_DEFAULT_DATA_SGE          16
 
 /* The RDMA completion queue size */
-#define NVMF_RDMA_CQ_SIZE      4096
+#define DEFAULT_NVMF_RDMA_CQ_SIZE      4096
+#define MAX_WR_PER_QP(queue_depth)     (queue_depth * 3 + 2)
 
-/* AIO backend requires block size aligned data buffers,
- * extra 4KiB aligned data buffer should work for most devices.
- */
-#define SHIFT_4KB                      12
-#define NVMF_DATA_BUFFER_ALIGNMENT     (1 << SHIFT_4KB)
-#define NVMF_DATA_BUFFER_MASK          (NVMF_DATA_BUFFER_ALIGNMENT - 1)
+/* Timeout for destroying defunct rqpairs */
+#define NVMF_RDMA_QPAIR_DESTROY_TIMEOUT_US     4000000
+
+/* The maximum number of buffers per request */
+#define NVMF_REQ_MAX_BUFFERS   (SPDK_NVMF_MAX_SGL_ENTRIES * 2)
+
+static int g_spdk_nvmf_ibv_query_mask =
+       IBV_QP_STATE |
+       IBV_QP_PKEY_INDEX |
+       IBV_QP_PORT |
+       IBV_QP_ACCESS_FLAGS |
+       IBV_QP_AV |
+       IBV_QP_PATH_MTU |
+       IBV_QP_DEST_QPN |
+       IBV_QP_RQ_PSN |
+       IBV_QP_MAX_DEST_RD_ATOMIC |
+       IBV_QP_MIN_RNR_TIMER |
+       IBV_QP_SQ_PSN |
+       IBV_QP_TIMEOUT |
+       IBV_QP_RETRY_CNT |
+       IBV_QP_RNR_RETRY |
+       IBV_QP_MAX_QP_RD_ATOMIC;
 
 enum spdk_nvmf_rdma_request_state {
        /* The request is not currently in use */
@@ -79,9 +98,9 @@ enum spdk_nvmf_rdma_request_state {
        RDMA_REQUEST_STATE_NEED_BUFFER,
 
        /* The request is waiting on RDMA queue depth availability
-        * to transfer data between the host and the controller.
+        * to transfer data from the host to the controller.
         */
-       RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING,
+       RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING,
 
        /* The request is currently transferring data from the host to the controller. */
        RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER,
@@ -95,6 +114,11 @@ enum spdk_nvmf_rdma_request_state {
        /* The request finished executing at the block device */
        RDMA_REQUEST_STATE_EXECUTED,
 
+       /* The request is waiting on RDMA queue depth availability
+        * to transfer data from the controller to the host.
+        */
+       RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING,
+
        /* The request is ready to send a completion */
        RDMA_REQUEST_STATE_READY_TO_COMPLETE,
 
@@ -115,76 +139,88 @@ enum spdk_nvmf_rdma_request_state {
 
 #define OBJECT_NVMF_RDMA_IO                            0x40
 
-#define                                                                        TRACE_GROUP_NVMF_RDMA 0x4
+#define TRACE_GROUP_NVMF_RDMA                          0x4
 #define TRACE_RDMA_REQUEST_STATE_NEW                                   SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x0)
 #define TRACE_RDMA_REQUEST_STATE_NEED_BUFFER                           SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x1)
-#define TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING                 SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x2)
+#define TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING   SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x2)
 #define TRACE_RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER       SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x3)
 #define TRACE_RDMA_REQUEST_STATE_READY_TO_EXECUTE                      SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x4)
 #define TRACE_RDMA_REQUEST_STATE_EXECUTING                             SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x5)
 #define TRACE_RDMA_REQUEST_STATE_EXECUTED                              SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x6)
-#define TRACE_RDMA_REQUEST_STATE_READY_TO_COMPLETE                     SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x7)
-#define TRACE_RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST       SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x8)
-#define TRACE_RDMA_REQUEST_STATE_COMPLETING                            SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x9)
-#define TRACE_RDMA_REQUEST_STATE_COMPLETED                             SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xA)
-#define TRACE_RDMA_QP_CREATE                                           SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xB)
-#define TRACE_RDMA_IBV_ASYNC_EVENT                                     SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xC)
-#define TRACE_RDMA_CM_ASYNC_EVENT                                      SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xD)
-#define TRACE_RDMA_QP_STATE_CHANGE                                     SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xE)
-#define TRACE_RDMA_QP_DISCONNECT                                       SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xF)
-#define TRACE_RDMA_QP_DESTROY                                          SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x10)
-
-SPDK_TRACE_REGISTER_FN(nvmf_trace)
+#define TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING         SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x7)
+#define TRACE_RDMA_REQUEST_STATE_READY_TO_COMPLETE                     SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x8)
+#define TRACE_RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST       SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x9)
+#define TRACE_RDMA_REQUEST_STATE_COMPLETING                            SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xA)
+#define TRACE_RDMA_REQUEST_STATE_COMPLETED                             SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xB)
+#define TRACE_RDMA_QP_CREATE                                           SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xC)
+#define TRACE_RDMA_IBV_ASYNC_EVENT                                     SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xD)
+#define TRACE_RDMA_CM_ASYNC_EVENT                                      SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xE)
+#define TRACE_RDMA_QP_STATE_CHANGE                                     SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xF)
+#define TRACE_RDMA_QP_DISCONNECT                                       SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x10)
+#define TRACE_RDMA_QP_DESTROY                                          SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x11)
+
+SPDK_TRACE_REGISTER_FN(nvmf_trace, "nvmf_rdma", TRACE_GROUP_NVMF_RDMA)
 {
        spdk_trace_register_object(OBJECT_NVMF_RDMA_IO, 'r');
-       spdk_trace_register_description("RDMA_REQ_NEW", "",
-                                       TRACE_RDMA_REQUEST_STATE_NEW,
+       spdk_trace_register_description("RDMA_REQ_NEW", TRACE_RDMA_REQUEST_STATE_NEW,
                                        OWNER_NONE, OBJECT_NVMF_RDMA_IO, 1, 1, "cmid:   ");
-       spdk_trace_register_description("RDMA_REQ_NEED_BUFFER", "",
-                                       TRACE_RDMA_REQUEST_STATE_NEED_BUFFER,
+       spdk_trace_register_description("RDMA_REQ_NEED_BUFFER", TRACE_RDMA_REQUEST_STATE_NEED_BUFFER,
                                        OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid:   ");
-       spdk_trace_register_description("RDMA_REQ_TX_PENDING_H_TO_C", "",
-                                       TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING,
+       spdk_trace_register_description("RDMA_REQ_TX_PENDING_C2H",
+                                       TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING,
                                        OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid:   ");
-       spdk_trace_register_description("RDMA_REQ_TX_H_TO_C", "",
+       spdk_trace_register_description("RDMA_REQ_TX_PENDING_H2C",
+                                       TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING,
+                                       OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid:   ");
+       spdk_trace_register_description("RDMA_REQ_TX_H2C",
                                        TRACE_RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER,
                                        OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid:   ");
-       spdk_trace_register_description("RDMA_REQ_RDY_TO_EXECUTE", "",
+       spdk_trace_register_description("RDMA_REQ_RDY_TO_EXECUTE",
                                        TRACE_RDMA_REQUEST_STATE_READY_TO_EXECUTE,
                                        OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid:   ");
-       spdk_trace_register_description("RDMA_REQ_EXECUTING", "",
+       spdk_trace_register_description("RDMA_REQ_EXECUTING",
                                        TRACE_RDMA_REQUEST_STATE_EXECUTING,
                                        OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid:   ");
-       spdk_trace_register_description("RDMA_REQ_EXECUTED", "",
+       spdk_trace_register_description("RDMA_REQ_EXECUTED",
                                        TRACE_RDMA_REQUEST_STATE_EXECUTED,
                                        OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid:   ");
-       spdk_trace_register_description("RDMA_REQ_RDY_TO_COMPLETE", "",
+       spdk_trace_register_description("RDMA_REQ_RDY_TO_COMPL",
                                        TRACE_RDMA_REQUEST_STATE_READY_TO_COMPLETE,
                                        OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid:   ");
-       spdk_trace_register_description("RDMA_REQ_COMPLETING_CONTROLLER_TO_HOST", "",
+       spdk_trace_register_description("RDMA_REQ_COMPLETING_C2H",
                                        TRACE_RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST,
                                        OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid:   ");
-       spdk_trace_register_description("RDMA_REQ_COMPLETING_INCAPSULE", "",
+       spdk_trace_register_description("RDMA_REQ_COMPLETING",
                                        TRACE_RDMA_REQUEST_STATE_COMPLETING,
                                        OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid:   ");
-       spdk_trace_register_description("RDMA_REQ_COMPLETED", "",
+       spdk_trace_register_description("RDMA_REQ_COMPLETED",
                                        TRACE_RDMA_REQUEST_STATE_COMPLETED,
                                        OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid:   ");
 
-       spdk_trace_register_description("RDMA_QP_CREATE", "", TRACE_RDMA_QP_CREATE,
+       spdk_trace_register_description("RDMA_QP_CREATE", TRACE_RDMA_QP_CREATE,
                                        OWNER_NONE, OBJECT_NONE, 0, 0, "");
-       spdk_trace_register_description("RDMA_IBV_ASYNC_EVENT", "", TRACE_RDMA_IBV_ASYNC_EVENT,
+       spdk_trace_register_description("RDMA_IBV_ASYNC_EVENT", TRACE_RDMA_IBV_ASYNC_EVENT,
                                        OWNER_NONE, OBJECT_NONE, 0, 0, "type:   ");
-       spdk_trace_register_description("RDMA_CM_ASYNC_EVENT", "", TRACE_RDMA_CM_ASYNC_EVENT,
+       spdk_trace_register_description("RDMA_CM_ASYNC_EVENT", TRACE_RDMA_CM_ASYNC_EVENT,
                                        OWNER_NONE, OBJECT_NONE, 0, 0, "type:   ");
-       spdk_trace_register_description("RDMA_QP_STATE_CHANGE", "", TRACE_RDMA_QP_STATE_CHANGE,
+       spdk_trace_register_description("RDMA_QP_STATE_CHANGE", TRACE_RDMA_QP_STATE_CHANGE,
                                        OWNER_NONE, OBJECT_NONE, 0, 1, "state:  ");
-       spdk_trace_register_description("RDMA_QP_DISCONNECT", "", TRACE_RDMA_QP_DISCONNECT,
+       spdk_trace_register_description("RDMA_QP_DISCONNECT", TRACE_RDMA_QP_DISCONNECT,
                                        OWNER_NONE, OBJECT_NONE, 0, 0, "");
-       spdk_trace_register_description("RDMA_QP_DESTROY", "", TRACE_RDMA_QP_DESTROY,
+       spdk_trace_register_description("RDMA_QP_DESTROY", TRACE_RDMA_QP_DESTROY,
                                        OWNER_NONE, OBJECT_NONE, 0, 0, "");
 }
 
+enum spdk_nvmf_rdma_wr_type {
+       RDMA_WR_TYPE_RECV,
+       RDMA_WR_TYPE_SEND,
+       RDMA_WR_TYPE_DATA,
+};
+
+struct spdk_nvmf_rdma_wr {
+       enum spdk_nvmf_rdma_wr_type     type;
+};
+
 /* This structure holds commands as they are received off the wire.
  * It must be dynamically paired with a full request object
  * (spdk_nvmf_rdma_request) to service a request. It is separate
@@ -193,15 +229,23 @@ SPDK_TRACE_REGISTER_FN(nvmf_trace)
  * command when there aren't any free request objects.
  */
 struct spdk_nvmf_rdma_recv {
-       struct ibv_recv_wr              wr;
-       struct ibv_sge                  sgl[NVMF_DEFAULT_RX_SGE];
+       struct ibv_recv_wr                      wr;
+       struct ibv_sge                          sgl[NVMF_DEFAULT_RX_SGE];
 
-       struct spdk_nvmf_rdma_qpair     *qpair;
+       struct spdk_nvmf_rdma_qpair             *qpair;
 
        /* In-capsule data buffer */
-       uint8_t                         *buf;
+       uint8_t                                 *buf;
 
-       TAILQ_ENTRY(spdk_nvmf_rdma_recv) link;
+       struct spdk_nvmf_rdma_wr                rdma_wr;
+
+       STAILQ_ENTRY(spdk_nvmf_rdma_recv)       link;
+};
+
+struct spdk_nvmf_rdma_request_data {
+       struct spdk_nvmf_rdma_wr        rdma_wr;
+       struct ibv_send_wr              wr;
+       struct ibv_sge                  sgl[SPDK_NVMF_MAX_SGL_ENTRIES];
 };
 
 struct spdk_nvmf_rdma_request {
@@ -213,46 +257,46 @@ struct spdk_nvmf_rdma_request {
        struct spdk_nvmf_rdma_recv              *recv;
 
        struct {
+               struct spdk_nvmf_rdma_wr        rdma_wr;
                struct  ibv_send_wr             wr;
-               struct  ibv_sge                 sgl[NVMF_DEFAULT_TX_SGE];
+               struct  ibv_sge                 sgl[NVMF_DEFAULT_RSP_SGE];
        } rsp;
 
-       struct {
-               struct ibv_send_wr              wr;
-               struct ibv_sge                  sgl[SPDK_NVMF_MAX_SGL_ENTRIES];
-               void                            *buffers[SPDK_NVMF_MAX_SGL_ENTRIES];
-       } data;
-
-       TAILQ_ENTRY(spdk_nvmf_rdma_request)     link;
-       TAILQ_ENTRY(spdk_nvmf_rdma_request)     state_link;
-};
-
-struct spdk_nvmf_rdma_qpair {
-       struct spdk_nvmf_qpair                  qpair;
-
-       struct spdk_nvmf_rdma_port              *port;
-       struct spdk_nvmf_rdma_poller            *poller;
-
-       struct rdma_cm_id                       *cm_id;
-       struct rdma_cm_id                       *listen_id;
+       struct spdk_nvmf_rdma_request_data      data;
+       void                                    *buffers[NVMF_REQ_MAX_BUFFERS];
 
-       /* The maximum number of I/O outstanding on this connection at one time */
-       uint16_t                                max_queue_depth;
+       uint32_t                                num_outstanding_data_wr;
 
-       /* The maximum number of active RDMA READ and WRITE operations at one time */
-       uint16_t                                max_rw_depth;
+       STAILQ_ENTRY(spdk_nvmf_rdma_request)    state_link;
+};
 
-       /* Receives that are waiting for a request object */
-       TAILQ_HEAD(, spdk_nvmf_rdma_recv)       incoming_queue;
+enum spdk_nvmf_rdma_qpair_disconnect_flags {
+       RDMA_QP_DISCONNECTING           = 1,
+       RDMA_QP_RECV_DRAINED            = 1 << 1,
+       RDMA_QP_SEND_DRAINED            = 1 << 2
+};
 
-       /* Queues to track the requests in all states */
-       TAILQ_HEAD(, spdk_nvmf_rdma_request)    state_queue[RDMA_REQUEST_NUM_STATES];
+struct spdk_nvmf_rdma_resource_opts {
+       struct spdk_nvmf_rdma_qpair     *qpair;
+       /* qp points either to an ibv_qp object or an ibv_srq object depending on the value of shared. */
+       void                            *qp;
+       struct ibv_pd                   *pd;
+       uint32_t                        max_queue_depth;
+       uint32_t                        in_capsule_data_size;
+       bool                            shared;
+};
 
-       /* Number of requests in each state */
-       uint32_t                                state_cntr[RDMA_REQUEST_NUM_STATES];
+struct spdk_nvmf_send_wr_list {
+       struct ibv_send_wr      *first;
+       struct ibv_send_wr      *last;
+};
 
-       int                                     max_sge;
+struct spdk_nvmf_recv_wr_list {
+       struct ibv_recv_wr      *first;
+       struct ibv_recv_wr      *last;
+};
 
+struct spdk_nvmf_rdma_resources {
        /* Array of size "max_queue_depth" containing RDMA requests. */
        struct spdk_nvmf_rdma_request           *reqs;
 
@@ -277,44 +321,125 @@ struct spdk_nvmf_rdma_qpair {
        void                                    *bufs;
        struct ibv_mr                           *bufs_mr;
 
+       /* The list of pending recvs to transfer */
+       struct spdk_nvmf_recv_wr_list           recvs_to_post;
+
+       /* Receives that are waiting for a request object */
+       STAILQ_HEAD(, spdk_nvmf_rdma_recv)      incoming_queue;
+
+       /* Queue to track free requests */
+       STAILQ_HEAD(, spdk_nvmf_rdma_request)   free_queue;
+};
+
+struct spdk_nvmf_rdma_qpair {
+       struct spdk_nvmf_qpair                  qpair;
+
+       struct spdk_nvmf_rdma_port              *port;
+       struct spdk_nvmf_rdma_poller            *poller;
+
+       struct rdma_cm_id                       *cm_id;
+       struct ibv_srq                          *srq;
+       struct rdma_cm_id                       *listen_id;
+
+       /* The maximum number of I/O outstanding on this connection at one time */
+       uint16_t                                max_queue_depth;
+
+       /* The maximum number of active RDMA READ and ATOMIC operations at one time */
+       uint16_t                                max_read_depth;
+
+       /* The maximum number of RDMA SEND operations at one time */
+       uint32_t                                max_send_depth;
+
+       /* The current number of outstanding WRs from this qpair's
+        * recv queue. Should not exceed device->attr.max_queue_depth.
+        */
+       uint16_t                                current_recv_depth;
+
+       /* The current number of active RDMA READ operations */
+       uint16_t                                current_read_depth;
+
+       /* The current number of posted WRs from this qpair's
+        * send queue. Should not exceed max_send_depth.
+        */
+       uint32_t                                current_send_depth;
+
+       /* The maximum number of SGEs per WR on the send queue */
+       uint32_t                                max_send_sge;
+
+       /* The maximum number of SGEs per WR on the recv queue */
+       uint32_t                                max_recv_sge;
+
+       /* The list of pending send requests for a transfer */
+       struct spdk_nvmf_send_wr_list           sends_to_post;
+
+       struct spdk_nvmf_rdma_resources         *resources;
+
+       STAILQ_HEAD(, spdk_nvmf_rdma_request)   pending_rdma_read_queue;
+
+       STAILQ_HEAD(, spdk_nvmf_rdma_request)   pending_rdma_write_queue;
+
+       /* Number of requests not in the free state */
+       uint32_t                                qd;
+
        TAILQ_ENTRY(spdk_nvmf_rdma_qpair)       link;
 
-       /* Mgmt channel */
-       struct spdk_io_channel                  *mgmt_channel;
-       struct spdk_nvmf_rdma_mgmt_channel      *ch;
+       STAILQ_ENTRY(spdk_nvmf_rdma_qpair)      recv_link;
+
+       STAILQ_ENTRY(spdk_nvmf_rdma_qpair)      send_link;
 
        /* IBV queue pair attributes: they are used to manage
         * qp state and recover from errors.
         */
-       struct ibv_qp_init_attr                 ibv_init_attr;
-       struct ibv_qp_attr                      ibv_attr;
+       enum ibv_qp_state                       ibv_state;
+
+       uint32_t                                disconnect_flags;
+
+       /* Poller registered in case the qpair doesn't properly
+        * complete the qpair destruct process and becomes defunct.
+        */
 
-       bool                                    qpair_disconnected;
+       struct spdk_poller                      *destruct_poller;
 
-       /* Reference counter for how many unprocessed messages
-        * from other threads are currently outstanding. The
-        * qpair cannot be destroyed until this is 0. This is
-        * atomically incremented from any thread, but only
-        * decremented and read from the thread that owns this
-        * qpair.
+       /* There are several ways a disconnect can start on a qpair
+        * and they are not all mutually exclusive. It is important
+        * that we only initialize one of these paths.
         */
-       uint32_t                                refcnt;
+       bool                                    disconnect_started;
+       /* Lets us know that we have received the last_wqe event. */
+       bool                                    last_wqe_reached;
 };
 
 struct spdk_nvmf_rdma_poller {
        struct spdk_nvmf_rdma_device            *device;
        struct spdk_nvmf_rdma_poll_group        *group;
 
+       int                                     num_cqe;
+       int                                     required_num_wr;
        struct ibv_cq                           *cq;
 
+       /* The maximum number of I/O outstanding on the shared receive queue at one time */
+       uint16_t                                max_srq_depth;
+
+       /* Shared receive queue */
+       struct ibv_srq                          *srq;
+
+       struct spdk_nvmf_rdma_resources         *resources;
+
        TAILQ_HEAD(, spdk_nvmf_rdma_qpair)      qpairs;
 
+       STAILQ_HEAD(, spdk_nvmf_rdma_qpair)     qpairs_pending_recv;
+
+       STAILQ_HEAD(, spdk_nvmf_rdma_qpair)     qpairs_pending_send;
+
        TAILQ_ENTRY(spdk_nvmf_rdma_poller)      link;
 };
 
 struct spdk_nvmf_rdma_poll_group {
        struct spdk_nvmf_transport_poll_group   group;
 
+       /* Requests that are waiting to obtain a data buffer */
+       STAILQ_HEAD(, spdk_nvmf_rdma_request)   pending_data_buf_queue;
+
        TAILQ_HEAD(, spdk_nvmf_rdma_poller)     pollers;
 };
 
@@ -326,6 +451,8 @@ struct spdk_nvmf_rdma_device {
        struct spdk_mem_map                     *map;
        struct ibv_pd                           *pd;
 
+       int                                     num_srq;
+
        TAILQ_ENTRY(spdk_nvmf_rdma_device)      link;
 };
 
@@ -342,7 +469,7 @@ struct spdk_nvmf_rdma_transport {
 
        struct rdma_event_channel       *event_channel;
 
-       struct spdk_mempool             *data_buf_pool;
+       struct spdk_mempool             *data_wr_pool;
 
        pthread_mutex_t                 lock;
 
@@ -354,76 +481,55 @@ struct spdk_nvmf_rdma_transport {
        TAILQ_HEAD(, spdk_nvmf_rdma_port)       ports;
 };
 
-struct spdk_nvmf_rdma_mgmt_channel {
-       /* Requests that are waiting to obtain a data buffer */
-       TAILQ_HEAD(, spdk_nvmf_rdma_request)    pending_data_buf_queue;
-};
-
-static inline void
-spdk_nvmf_rdma_qpair_inc_refcnt(struct spdk_nvmf_rdma_qpair *rqpair)
+static inline int
+spdk_nvmf_rdma_check_ibv_state(enum ibv_qp_state state)
 {
-       __sync_fetch_and_add(&rqpair->refcnt, 1);
-}
-
-static inline uint32_t
-spdk_nvmf_rdma_qpair_dec_refcnt(struct spdk_nvmf_rdma_qpair *rqpair)
-{
-       uint32_t old_refcnt, new_refcnt;
-
-       do {
-               old_refcnt = rqpair->refcnt;
-               assert(old_refcnt > 0);
-               new_refcnt = old_refcnt - 1;
-       } while (__sync_bool_compare_and_swap(&rqpair->refcnt, old_refcnt, new_refcnt) == false);
-
-       return new_refcnt;
+       switch (state) {
+       case IBV_QPS_RESET:
+       case IBV_QPS_INIT:
+       case IBV_QPS_RTR:
+       case IBV_QPS_RTS:
+       case IBV_QPS_SQD:
+       case IBV_QPS_SQE:
+       case IBV_QPS_ERR:
+               return 0;
+       default:
+               return -1;
+       }
 }
 
-/* API to IBV QueuePair */
-static const char *str_ibv_qp_state[] = {
-       "IBV_QPS_RESET",
-       "IBV_QPS_INIT",
-       "IBV_QPS_RTR",
-       "IBV_QPS_RTS",
-       "IBV_QPS_SQD",
-       "IBV_QPS_SQE",
-       "IBV_QPS_ERR"
-};
-
 static enum ibv_qp_state
 spdk_nvmf_rdma_update_ibv_state(struct spdk_nvmf_rdma_qpair *rqpair) {
        enum ibv_qp_state old_state, new_state;
+       struct ibv_qp_attr qp_attr;
+       struct ibv_qp_init_attr init_attr;
        int rc;
 
-       /* All the attributes needed for recovery */
-       static int spdk_nvmf_ibv_attr_mask =
-       IBV_QP_STATE |
-       IBV_QP_PKEY_INDEX |
-       IBV_QP_PORT |
-       IBV_QP_ACCESS_FLAGS |
-       IBV_QP_AV |
-       IBV_QP_PATH_MTU |
-       IBV_QP_DEST_QPN |
-       IBV_QP_RQ_PSN |
-       IBV_QP_MAX_DEST_RD_ATOMIC |
-       IBV_QP_MIN_RNR_TIMER |
-       IBV_QP_SQ_PSN |
-       IBV_QP_TIMEOUT |
-       IBV_QP_RETRY_CNT |
-       IBV_QP_RNR_RETRY |
-       IBV_QP_MAX_QP_RD_ATOMIC;
-
-       old_state = rqpair->ibv_attr.qp_state;
-       rc = ibv_query_qp(rqpair->cm_id->qp, &rqpair->ibv_attr,
-                         spdk_nvmf_ibv_attr_mask, &rqpair->ibv_init_attr);
+       old_state = rqpair->ibv_state;
+       rc = ibv_query_qp(rqpair->cm_id->qp, &qp_attr,
+                         g_spdk_nvmf_ibv_query_mask, &init_attr);
 
        if (rc)
        {
                SPDK_ERRLOG("Failed to get updated RDMA queue pair state!\n");
-               assert(false);
+               return IBV_QPS_ERR + 1;
+       }
+
+       new_state = qp_attr.qp_state;
+       rqpair->ibv_state = new_state;
+       qp_attr.ah_attr.port_num = qp_attr.port_num;
+
+       rc = spdk_nvmf_rdma_check_ibv_state(new_state);
+       if (rc)
+       {
+               SPDK_ERRLOG("QP#%d: bad state updated: %u, maybe hardware issue\n", rqpair->qpair.qid, new_state);
+               /*
+                * IBV_QPS_UNKNOWN undefined if lib version smaller than libibverbs-1.1.8
+                * IBV_QPS_UNKNOWN is the enum element after IBV_QPS_ERR
+                */
+               return IBV_QPS_ERR + 1;
        }
 
-       new_state = rqpair->ibv_attr.qp_state;
        if (old_state != new_state)
        {
                spdk_trace_record(TRACE_RDMA_QP_STATE_CHANGE, 0, 0,
@@ -432,10 +538,23 @@ spdk_nvmf_rdma_update_ibv_state(struct spdk_nvmf_rdma_qpair *rqpair) {
        return new_state;
 }
 
+static const char *str_ibv_qp_state[] = {
+       "IBV_QPS_RESET",
+       "IBV_QPS_INIT",
+       "IBV_QPS_RTR",
+       "IBV_QPS_RTS",
+       "IBV_QPS_SQD",
+       "IBV_QPS_SQE",
+       "IBV_QPS_ERR",
+       "IBV_QPS_UNKNOWN"
+};
+
 static int
 spdk_nvmf_rdma_set_ibv_state(struct spdk_nvmf_rdma_qpair *rqpair,
                             enum ibv_qp_state new_state)
 {
+       struct ibv_qp_attr qp_attr;
+       struct ibv_qp_init_attr init_attr;
        int rc;
        enum ibv_qp_state state;
        static int attr_mask_rc[] = {
@@ -462,25 +581,25 @@ spdk_nvmf_rdma_set_ibv_state(struct spdk_nvmf_rdma_qpair *rqpair,
                [IBV_QPS_ERR] = IBV_QP_STATE,
        };
 
-       switch (new_state) {
-       case IBV_QPS_RESET:
-       case IBV_QPS_INIT:
-       case IBV_QPS_RTR:
-       case IBV_QPS_RTS:
-       case IBV_QPS_SQD:
-       case IBV_QPS_SQE:
-       case IBV_QPS_ERR:
-               break;
-       default:
+       rc = spdk_nvmf_rdma_check_ibv_state(new_state);
+       if (rc) {
                SPDK_ERRLOG("QP#%d: bad state requested: %u\n",
                            rqpair->qpair.qid, new_state);
-               return -1;
+               return rc;
+       }
+
+       rc = ibv_query_qp(rqpair->cm_id->qp, &qp_attr,
+                         g_spdk_nvmf_ibv_query_mask, &init_attr);
+
+       if (rc) {
+               SPDK_ERRLOG("Failed to get updated RDMA queue pair state!\n");
+               assert(false);
        }
-       rqpair->ibv_attr.cur_qp_state = rqpair->ibv_attr.qp_state;
-       rqpair->ibv_attr.qp_state = new_state;
-       rqpair->ibv_attr.ah_attr.port_num = rqpair->ibv_attr.port_num;
 
-       rc = ibv_modify_qp(rqpair->cm_id->qp, &rqpair->ibv_attr,
+       qp_attr.cur_qp_state = rqpair->ibv_state;
+       qp_attr.qp_state = new_state;
+
+       rc = ibv_modify_qp(rqpair->cm_id->qp, &qp_attr,
                           attr_mask_rc[new_state]);
 
        if (rc) {
@@ -497,254 +616,231 @@ spdk_nvmf_rdma_set_ibv_state(struct spdk_nvmf_rdma_qpair *rqpair,
                            str_ibv_qp_state[state]);
                return -1;
        }
-       SPDK_NOTICELOG("IBV QP#%u changed to: %s\n", rqpair->qpair.qid,
-                      str_ibv_qp_state[state]);
+       SPDK_DEBUGLOG(SPDK_LOG_RDMA, "IBV QP#%u changed to: %s\n", rqpair->qpair.qid,
+                     str_ibv_qp_state[state]);
        return 0;
 }
 
 static void
-spdk_nvmf_rdma_request_set_state(struct spdk_nvmf_rdma_request *rdma_req,
-                                enum spdk_nvmf_rdma_request_state state)
-{
-       struct spdk_nvmf_qpair          *qpair;
-       struct spdk_nvmf_rdma_qpair     *rqpair;
-
-       qpair = rdma_req->req.qpair;
-       rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
-
-       TAILQ_REMOVE(&rqpair->state_queue[rdma_req->state], rdma_req, state_link);
-       rqpair->state_cntr[rdma_req->state]--;
-
-       rdma_req->state = state;
-
-       TAILQ_INSERT_TAIL(&rqpair->state_queue[rdma_req->state], rdma_req, state_link);
-       rqpair->state_cntr[rdma_req->state]++;
-}
-
-static int
-spdk_nvmf_rdma_mgmt_channel_create(void *io_device, void *ctx_buf)
+nvmf_rdma_request_free_data(struct spdk_nvmf_rdma_request *rdma_req,
+                           struct spdk_nvmf_rdma_transport *rtransport)
 {
-       struct spdk_nvmf_rdma_mgmt_channel *ch = ctx_buf;
+       struct spdk_nvmf_rdma_request_data      *current_data_wr = NULL, *next_data_wr = NULL;
+       struct ibv_send_wr                      *send_wr;
+       int                                     i;
+
+       rdma_req->num_outstanding_data_wr = 0;
+       current_data_wr = &rdma_req->data;
+       for (i = 0; i < current_data_wr->wr.num_sge; i++) {
+               current_data_wr->wr.sg_list[i].addr = 0;
+               current_data_wr->wr.sg_list[i].length = 0;
+               current_data_wr->wr.sg_list[i].lkey = 0;
+       }
+       current_data_wr->wr.num_sge = 0;
+
+       send_wr = current_data_wr->wr.next;
+       if (send_wr != NULL && send_wr != &rdma_req->rsp.wr) {
+               next_data_wr = SPDK_CONTAINEROF(send_wr, struct spdk_nvmf_rdma_request_data, wr);
+       }
+       while (next_data_wr) {
+               current_data_wr = next_data_wr;
+               send_wr = current_data_wr->wr.next;
+               if (send_wr != NULL && send_wr != &rdma_req->rsp.wr &&
+                   send_wr->wr_id == current_data_wr->wr.wr_id) {
+                       next_data_wr = SPDK_CONTAINEROF(send_wr, struct spdk_nvmf_rdma_request_data, wr);
+               } else {
+                       next_data_wr = NULL;
+               }
 
-       TAILQ_INIT(&ch->pending_data_buf_queue);
-       return 0;
+               for (i = 0; i < current_data_wr->wr.num_sge; i++) {
+                       current_data_wr->wr.sg_list[i].addr = 0;
+                       current_data_wr->wr.sg_list[i].length = 0;
+                       current_data_wr->wr.sg_list[i].lkey = 0;
+               }
+               current_data_wr->wr.num_sge = 0;
+               spdk_mempool_put(rtransport->data_wr_pool, current_data_wr);
+       }
 }
 
 static void
-spdk_nvmf_rdma_mgmt_channel_destroy(void *io_device, void *ctx_buf)
+nvmf_rdma_dump_request(struct spdk_nvmf_rdma_request *req)
 {
-       struct spdk_nvmf_rdma_mgmt_channel *ch = ctx_buf;
-
-       if (!TAILQ_EMPTY(&ch->pending_data_buf_queue)) {
-               SPDK_ERRLOG("Pending I/O list wasn't empty on channel destruction\n");
+       SPDK_ERRLOG("\t\tRequest Data From Pool: %d\n", req->data_from_pool);
+       if (req->req.cmd) {
+               SPDK_ERRLOG("\t\tRequest opcode: %d\n", req->req.cmd->nvmf_cmd.opcode);
+       }
+       if (req->recv) {
+               SPDK_ERRLOG("\t\tRequest recv wr_id%lu\n", req->recv->wr.wr_id);
        }
 }
 
-static int
-spdk_nvmf_rdma_cur_rw_depth(struct spdk_nvmf_rdma_qpair *rqpair)
+static void
+nvmf_rdma_dump_qpair_contents(struct spdk_nvmf_rdma_qpair *rqpair)
 {
-       return rqpair->state_cntr[RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER] +
-              rqpair->state_cntr[RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST];
-}
+       int i;
 
-static int
-spdk_nvmf_rdma_cur_queue_depth(struct spdk_nvmf_rdma_qpair *rqpair)
-{
-       return rqpair->max_queue_depth -
-              rqpair->state_cntr[RDMA_REQUEST_STATE_FREE];
+       SPDK_ERRLOG("Dumping contents of queue pair (QID %d)\n", rqpair->qpair.qid);
+       for (i = 0; i < rqpair->max_queue_depth; i++) {
+               if (rqpair->resources->reqs[i].state != RDMA_REQUEST_STATE_FREE) {
+                       nvmf_rdma_dump_request(&rqpair->resources->reqs[i]);
+               }
+       }
 }
 
 static void
-spdk_nvmf_rdma_qpair_destroy(struct spdk_nvmf_rdma_qpair *rqpair)
+nvmf_rdma_resources_destroy(struct spdk_nvmf_rdma_resources *resources)
 {
-       spdk_trace_record(TRACE_RDMA_QP_DESTROY, 0, 0, (uintptr_t)rqpair->cm_id, 0);
-
-       if (spdk_nvmf_rdma_cur_queue_depth(rqpair)) {
-               rqpair->qpair_disconnected = true;
-               return;
-       }
-
-       if (rqpair->refcnt > 0) {
-               return;
-       }
-
-       if (rqpair->poller) {
-               TAILQ_REMOVE(&rqpair->poller->qpairs, rqpair, link);
-       }
-
-       if (rqpair->cmds_mr) {
-               ibv_dereg_mr(rqpair->cmds_mr);
+       if (resources->cmds_mr) {
+               ibv_dereg_mr(resources->cmds_mr);
        }
 
-       if (rqpair->cpls_mr) {
-               ibv_dereg_mr(rqpair->cpls_mr);
+       if (resources->cpls_mr) {
+               ibv_dereg_mr(resources->cpls_mr);
        }
 
-       if (rqpair->bufs_mr) {
-               ibv_dereg_mr(rqpair->bufs_mr);
+       if (resources->bufs_mr) {
+               ibv_dereg_mr(resources->bufs_mr);
        }
 
-       if (rqpair->cm_id) {
-               rdma_destroy_qp(rqpair->cm_id);
-               rdma_destroy_id(rqpair->cm_id);
-       }
-
-       if (rqpair->mgmt_channel) {
-               spdk_put_io_channel(rqpair->mgmt_channel);
-       }
-
-       /* Free all memory */
-       spdk_dma_free(rqpair->cmds);
-       spdk_dma_free(rqpair->cpls);
-       spdk_dma_free(rqpair->bufs);
-       free(rqpair->reqs);
-       free(rqpair->recvs);
-       free(rqpair);
+       spdk_dma_free(resources->cmds);
+       spdk_dma_free(resources->cpls);
+       spdk_dma_free(resources->bufs);
+       free(resources->reqs);
+       free(resources->recvs);
+       free(resources);
 }
 
-static int
-spdk_nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair)
+
+static struct spdk_nvmf_rdma_resources *
+nvmf_rdma_resources_create(struct spdk_nvmf_rdma_resource_opts *opts)
 {
-       struct spdk_nvmf_rdma_transport *rtransport;
-       struct spdk_nvmf_rdma_qpair     *rqpair;
-       int                             rc, i;
-       struct spdk_nvmf_rdma_recv      *rdma_recv;
+       struct spdk_nvmf_rdma_resources *resources;
        struct spdk_nvmf_rdma_request   *rdma_req;
-       struct spdk_nvmf_transport      *transport;
+       struct spdk_nvmf_rdma_recv      *rdma_recv;
+       struct ibv_qp                   *qp;
+       struct ibv_srq                  *srq;
+       uint32_t                        i;
+       int                             rc;
 
-       rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
-       rtransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_rdma_transport, transport);
-       transport = &rtransport->transport;
-
-       memset(&rqpair->ibv_init_attr, 0, sizeof(struct ibv_qp_init_attr));
-       rqpair->ibv_init_attr.qp_context        = rqpair;
-       rqpair->ibv_init_attr.qp_type           = IBV_QPT_RC;
-       rqpair->ibv_init_attr.send_cq           = rqpair->poller->cq;
-       rqpair->ibv_init_attr.recv_cq           = rqpair->poller->cq;
-       rqpair->ibv_init_attr.cap.max_send_wr   = rqpair->max_queue_depth *
-                       2; /* SEND, READ, and WRITE operations */
-       rqpair->ibv_init_attr.cap.max_recv_wr   = rqpair->max_queue_depth; /* RECV operations */
-       rqpair->ibv_init_attr.cap.max_send_sge  = rqpair->max_sge;
-       rqpair->ibv_init_attr.cap.max_recv_sge  = NVMF_DEFAULT_RX_SGE;
-
-       rc = rdma_create_qp(rqpair->cm_id, rqpair->port->device->pd, &rqpair->ibv_init_attr);
-       if (rc) {
-               SPDK_ERRLOG("rdma_create_qp failed: errno %d: %s\n", errno, spdk_strerror(errno));
-               rdma_destroy_id(rqpair->cm_id);
-               rqpair->cm_id = NULL;
-               spdk_nvmf_rdma_qpair_destroy(rqpair);
-               return -1;
+       resources = calloc(1, sizeof(struct spdk_nvmf_rdma_resources));
+       if (!resources) {
+               SPDK_ERRLOG("Unable to allocate resources for receive queue.\n");
+               return NULL;
        }
 
-       spdk_trace_record(TRACE_RDMA_QP_CREATE, 0, 0, (uintptr_t)rqpair->cm_id, 0);
-       SPDK_DEBUGLOG(SPDK_LOG_RDMA, "New RDMA Connection: %p\n", qpair);
-
-       rqpair->reqs = calloc(rqpair->max_queue_depth, sizeof(*rqpair->reqs));
-       rqpair->recvs = calloc(rqpair->max_queue_depth, sizeof(*rqpair->recvs));
-       rqpair->cmds = spdk_dma_zmalloc(rqpair->max_queue_depth * sizeof(*rqpair->cmds),
-                                       0x1000, NULL);
-       rqpair->cpls = spdk_dma_zmalloc(rqpair->max_queue_depth * sizeof(*rqpair->cpls),
-                                       0x1000, NULL);
-
+       resources->reqs = calloc(opts->max_queue_depth, sizeof(*resources->reqs));
+       resources->recvs = calloc(opts->max_queue_depth, sizeof(*resources->recvs));
+       resources->cmds = spdk_dma_zmalloc(opts->max_queue_depth * sizeof(*resources->cmds),
+                                          0x1000, NULL);
+       resources->cpls = spdk_dma_zmalloc(opts->max_queue_depth * sizeof(*resources->cpls),
+                                          0x1000, NULL);
 
-       if (transport->opts.in_capsule_data_size > 0) {
-               rqpair->bufs = spdk_dma_zmalloc(rqpair->max_queue_depth *
-                                               transport->opts.in_capsule_data_size,
-                                               0x1000, NULL);
+       if (opts->in_capsule_data_size > 0) {
+               resources->bufs = spdk_dma_zmalloc(opts->max_queue_depth *
+                                                  opts->in_capsule_data_size,
+                                                  0x1000, NULL);
        }
 
-       if (!rqpair->reqs || !rqpair->recvs || !rqpair->cmds ||
-           !rqpair->cpls || (transport->opts.in_capsule_data_size && !rqpair->bufs)) {
+       if (!resources->reqs || !resources->recvs || !resources->cmds ||
+           !resources->cpls || (opts->in_capsule_data_size && !resources->bufs)) {
                SPDK_ERRLOG("Unable to allocate sufficient memory for RDMA queue.\n");
-               spdk_nvmf_rdma_qpair_destroy(rqpair);
-               return -1;
+               goto cleanup;
        }
 
-       rqpair->cmds_mr = ibv_reg_mr(rqpair->cm_id->pd, rqpair->cmds,
-                                    rqpair->max_queue_depth * sizeof(*rqpair->cmds),
-                                    IBV_ACCESS_LOCAL_WRITE);
-       rqpair->cpls_mr = ibv_reg_mr(rqpair->cm_id->pd, rqpair->cpls,
-                                    rqpair->max_queue_depth * sizeof(*rqpair->cpls),
-                                    0);
+       resources->cmds_mr = ibv_reg_mr(opts->pd, resources->cmds,
+                                       opts->max_queue_depth * sizeof(*resources->cmds),
+                                       IBV_ACCESS_LOCAL_WRITE);
+       resources->cpls_mr = ibv_reg_mr(opts->pd, resources->cpls,
+                                       opts->max_queue_depth * sizeof(*resources->cpls),
+                                       0);
 
-       if (transport->opts.in_capsule_data_size) {
-               rqpair->bufs_mr = ibv_reg_mr(rqpair->cm_id->pd, rqpair->bufs,
-                                            rqpair->max_queue_depth *
-                                            transport->opts.in_capsule_data_size,
-                                            IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE);
+       if (opts->in_capsule_data_size) {
+               resources->bufs_mr = ibv_reg_mr(opts->pd, resources->bufs,
+                                               opts->max_queue_depth *
+                                               opts->in_capsule_data_size,
+                                               IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE);
        }
 
-       if (!rqpair->cmds_mr || !rqpair->cpls_mr || (transport->opts.in_capsule_data_size &&
-                       !rqpair->bufs_mr)) {
-               SPDK_ERRLOG("Unable to register required memory for RDMA queue.\n");
-               spdk_nvmf_rdma_qpair_destroy(rqpair);
-               return -1;
+       if (!resources->cmds_mr || !resources->cpls_mr ||
+           (opts->in_capsule_data_size &&
+            !resources->bufs_mr)) {
+               goto cleanup;
        }
        SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Command Array: %p Length: %lx LKey: %x\n",
-                     rqpair->cmds, rqpair->max_queue_depth * sizeof(*rqpair->cmds), rqpair->cmds_mr->lkey);
+                     resources->cmds, opts->max_queue_depth * sizeof(*resources->cmds),
+                     resources->cmds_mr->lkey);
        SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Completion Array: %p Length: %lx LKey: %x\n",
-                     rqpair->cpls, rqpair->max_queue_depth * sizeof(*rqpair->cpls), rqpair->cpls_mr->lkey);
-       if (rqpair->bufs && rqpair->bufs_mr) {
+                     resources->cpls, opts->max_queue_depth * sizeof(*resources->cpls),
+                     resources->cpls_mr->lkey);
+       if (resources->bufs && resources->bufs_mr) {
                SPDK_DEBUGLOG(SPDK_LOG_RDMA, "In Capsule Data Array: %p Length: %x LKey: %x\n",
-                             rqpair->bufs, rqpair->max_queue_depth *
-                             transport->opts.in_capsule_data_size, rqpair->bufs_mr->lkey);
+                             resources->bufs, opts->max_queue_depth *
+                             opts->in_capsule_data_size, resources->bufs_mr->lkey);
        }
 
-       /* Initialise request state queues and counters of the queue pair */
-       for (i = RDMA_REQUEST_STATE_FREE; i < RDMA_REQUEST_NUM_STATES; i++) {
-               TAILQ_INIT(&rqpair->state_queue[i]);
-               rqpair->state_cntr[i] = 0;
-       }
+       /* Initialize queues */
+       STAILQ_INIT(&resources->incoming_queue);
+       STAILQ_INIT(&resources->free_queue);
 
-       for (i = 0; i < rqpair->max_queue_depth; i++) {
+       for (i = 0; i < opts->max_queue_depth; i++) {
                struct ibv_recv_wr *bad_wr = NULL;
 
-               rdma_recv = &rqpair->recvs[i];
-               rdma_recv->qpair = rqpair;
+               rdma_recv = &resources->recvs[i];
+               rdma_recv->qpair = opts->qpair;
 
                /* Set up memory to receive commands */
-               if (rqpair->bufs) {
-                       rdma_recv->buf = (void *)((uintptr_t)rqpair->bufs + (i *
-                                                 transport->opts.in_capsule_data_size));
+               if (resources->bufs) {
+                       rdma_recv->buf = (void *)((uintptr_t)resources->bufs + (i *
+                                                 opts->in_capsule_data_size));
                }
 
-               rdma_recv->sgl[0].addr = (uintptr_t)&rqpair->cmds[i];
-               rdma_recv->sgl[0].length = sizeof(rqpair->cmds[i]);
-               rdma_recv->sgl[0].lkey = rqpair->cmds_mr->lkey;
+               rdma_recv->rdma_wr.type = RDMA_WR_TYPE_RECV;
+
+               rdma_recv->sgl[0].addr = (uintptr_t)&resources->cmds[i];
+               rdma_recv->sgl[0].length = sizeof(resources->cmds[i]);
+               rdma_recv->sgl[0].lkey = resources->cmds_mr->lkey;
                rdma_recv->wr.num_sge = 1;
 
-               if (rdma_recv->buf && rqpair->bufs_mr) {
+               if (rdma_recv->buf && resources->bufs_mr) {
                        rdma_recv->sgl[1].addr = (uintptr_t)rdma_recv->buf;
-                       rdma_recv->sgl[1].length = transport->opts.in_capsule_data_size;
-                       rdma_recv->sgl[1].lkey = rqpair->bufs_mr->lkey;
+                       rdma_recv->sgl[1].length = opts->in_capsule_data_size;
+                       rdma_recv->sgl[1].lkey = resources->bufs_mr->lkey;
                        rdma_recv->wr.num_sge++;
                }
 
-               rdma_recv->wr.wr_id = (uintptr_t)rdma_recv;
+               rdma_recv->wr.wr_id = (uintptr_t)&rdma_recv->rdma_wr;
                rdma_recv->wr.sg_list = rdma_recv->sgl;
-
-               rc = ibv_post_recv(rqpair->cm_id->qp, &rdma_recv->wr, &bad_wr);
+               if (opts->shared) {
+                       srq = (struct ibv_srq *)opts->qp;
+                       rc = ibv_post_srq_recv(srq, &rdma_recv->wr, &bad_wr);
+               } else {
+                       qp = (struct ibv_qp *)opts->qp;
+                       rc = ibv_post_recv(qp, &rdma_recv->wr, &bad_wr);
+               }
                if (rc) {
-                       SPDK_ERRLOG("Unable to post capsule for RDMA RECV\n");
-                       spdk_nvmf_rdma_qpair_destroy(rqpair);
-                       return -1;
+                       goto cleanup;
                }
        }
 
-       for (i = 0; i < rqpair->max_queue_depth; i++) {
-               rdma_req = &rqpair->reqs[i];
+       for (i = 0; i < opts->max_queue_depth; i++) {
+               rdma_req = &resources->reqs[i];
 
-               rdma_req->req.qpair = &rqpair->qpair;
+               if (opts->qpair != NULL) {
+                       rdma_req->req.qpair = &opts->qpair->qpair;
+               } else {
+                       rdma_req->req.qpair = NULL;
+               }
                rdma_req->req.cmd = NULL;
 
                /* Set up memory to send responses */
-               rdma_req->req.rsp = &rqpair->cpls[i];
+               rdma_req->req.rsp = &resources->cpls[i];
 
-               rdma_req->rsp.sgl[0].addr = (uintptr_t)&rqpair->cpls[i];
-               rdma_req->rsp.sgl[0].length = sizeof(rqpair->cpls[i]);
-               rdma_req->rsp.sgl[0].lkey = rqpair->cpls_mr->lkey;
+               rdma_req->rsp.sgl[0].addr = (uintptr_t)&resources->cpls[i];
+               rdma_req->rsp.sgl[0].length = sizeof(resources->cpls[i]);
+               rdma_req->rsp.sgl[0].lkey = resources->cpls_mr->lkey;
 
-               rdma_req->rsp.wr.wr_id = (uintptr_t)rdma_req;
+               rdma_req->rsp.rdma_wr.type = RDMA_WR_TYPE_SEND;
+               rdma_req->rsp.wr.wr_id = (uintptr_t)&rdma_req->rsp.rdma_wr;
                rdma_req->rsp.wr.next = NULL;
                rdma_req->rsp.wr.opcode = IBV_WR_SEND;
                rdma_req->rsp.wr.send_flags = IBV_SEND_SIGNALED;
@@ -752,7 +848,8 @@ spdk_nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair)
                rdma_req->rsp.wr.num_sge = SPDK_COUNTOF(rdma_req->rsp.sgl);
 
                /* Set up memory for data buffers */
-               rdma_req->data.wr.wr_id = (uint64_t)rdma_req;
+               rdma_req->data.rdma_wr.type = RDMA_WR_TYPE_DATA;
+               rdma_req->data.wr.wr_id = (uintptr_t)&rdma_req->data.rdma_wr;
                rdma_req->data.wr.next = NULL;
                rdma_req->data.wr.send_flags = IBV_SEND_SIGNALED;
                rdma_req->data.wr.sg_list = rdma_req->data.sgl;
@@ -760,50 +857,267 @@ spdk_nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair)
 
                /* Initialize request state to FREE */
                rdma_req->state = RDMA_REQUEST_STATE_FREE;
-               TAILQ_INSERT_TAIL(&rqpair->state_queue[rdma_req->state], rdma_req, state_link);
-               rqpair->state_cntr[rdma_req->state]++;
+               STAILQ_INSERT_TAIL(&resources->free_queue, rdma_req, state_link);
        }
 
-       return 0;
+       return resources;
+
+cleanup:
+       nvmf_rdma_resources_destroy(resources);
+       return NULL;
+}
+
+static void
+spdk_nvmf_rdma_qpair_destroy(struct spdk_nvmf_rdma_qpair *rqpair)
+{
+       struct spdk_nvmf_rdma_recv      *rdma_recv, *recv_tmp;
+       struct ibv_recv_wr              *bad_recv_wr = NULL;
+       int                             rc;
+
+       spdk_trace_record(TRACE_RDMA_QP_DESTROY, 0, 0, (uintptr_t)rqpair->cm_id, 0);
+
+       spdk_poller_unregister(&rqpair->destruct_poller);
+
+       if (rqpair->qd != 0) {
+               if (rqpair->srq == NULL) {
+                       nvmf_rdma_dump_qpair_contents(rqpair);
+               }
+               SPDK_WARNLOG("Destroying qpair when queue depth is %d\n", rqpair->qd);
+       }
+
+       if (rqpair->poller) {
+               TAILQ_REMOVE(&rqpair->poller->qpairs, rqpair, link);
+
+               if (rqpair->srq != NULL && rqpair->resources != NULL) {
+                       /* Drop all received but unprocessed commands for this queue and return them to SRQ */
+                       STAILQ_FOREACH_SAFE(rdma_recv, &rqpair->resources->incoming_queue, link, recv_tmp) {
+                               if (rqpair == rdma_recv->qpair) {
+                                       STAILQ_REMOVE_HEAD(&rqpair->resources->incoming_queue, link);
+                                       rc = ibv_post_srq_recv(rqpair->srq, &rdma_recv->wr, &bad_recv_wr);
+                                       if (rc) {
+                                               SPDK_ERRLOG("Unable to re-post rx descriptor\n");
+                                       }
+                               }
+                       }
+               }
+       }
+
+       if (rqpair->cm_id) {
+               if (rqpair->cm_id->qp != NULL) {
+                       rdma_destroy_qp(rqpair->cm_id);
+               }
+               rdma_destroy_id(rqpair->cm_id);
+
+               if (rqpair->poller != NULL && rqpair->srq == NULL) {
+                       rqpair->poller->required_num_wr -= MAX_WR_PER_QP(rqpair->max_queue_depth);
+               }
+       }
+
+       if (rqpair->srq == NULL && rqpair->resources != NULL) {
+               nvmf_rdma_resources_destroy(rqpair->resources);
+       }
+
+       free(rqpair);
+}
+
+static int
+nvmf_rdma_resize_cq(struct spdk_nvmf_rdma_qpair *rqpair, struct spdk_nvmf_rdma_device *device)
+{
+       struct spdk_nvmf_rdma_poller    *rpoller;
+       int                             rc, num_cqe, required_num_wr;
+
+       /* Enlarge CQ size dynamically */
+       rpoller = rqpair->poller;
+       required_num_wr = rpoller->required_num_wr + MAX_WR_PER_QP(rqpair->max_queue_depth);
+       num_cqe = rpoller->num_cqe;
+       if (num_cqe < required_num_wr) {
+               num_cqe = spdk_max(num_cqe * 2, required_num_wr);
+               num_cqe = spdk_min(num_cqe, device->attr.max_cqe);
+       }
+
+       if (rpoller->num_cqe != num_cqe) {
+               if (required_num_wr > device->attr.max_cqe) {
+                       SPDK_ERRLOG("RDMA CQE requirement (%d) exceeds device max_cqe limitation (%d)\n",
+                                   required_num_wr, device->attr.max_cqe);
+                       return -1;
+               }
+
+               SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Resize RDMA CQ from %d to %d\n", rpoller->num_cqe, num_cqe);
+               rc = ibv_resize_cq(rpoller->cq, num_cqe);
+               if (rc) {
+                       SPDK_ERRLOG("RDMA CQ resize failed: errno %d: %s\n", errno, spdk_strerror(errno));
+                       return -1;
+               }
+
+               rpoller->num_cqe = num_cqe;
+       }
+
+       rpoller->required_num_wr = required_num_wr;
+       return 0;
+}
+
+static int
+spdk_nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair)
+{
+       struct spdk_nvmf_rdma_qpair             *rqpair;
+       int                                     rc;
+       struct spdk_nvmf_rdma_transport         *rtransport;
+       struct spdk_nvmf_transport              *transport;
+       struct spdk_nvmf_rdma_resource_opts     opts;
+       struct spdk_nvmf_rdma_device            *device;
+       struct ibv_qp_init_attr                 ibv_init_attr;
+
+       rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
+       device = rqpair->port->device;
+
+       memset(&ibv_init_attr, 0, sizeof(struct ibv_qp_init_attr));
+       ibv_init_attr.qp_context        = rqpair;
+       ibv_init_attr.qp_type           = IBV_QPT_RC;
+       ibv_init_attr.send_cq           = rqpair->poller->cq;
+       ibv_init_attr.recv_cq           = rqpair->poller->cq;
+
+       if (rqpair->srq) {
+               ibv_init_attr.srq               = rqpair->srq;
+       } else {
+               ibv_init_attr.cap.max_recv_wr   = rqpair->max_queue_depth +
+                                                 1; /* RECV operations + dummy drain WR */
+       }
+
+       ibv_init_attr.cap.max_send_wr   = rqpair->max_queue_depth *
+                                         2 + 1; /* SEND, READ, and WRITE operations + dummy drain WR */
+       ibv_init_attr.cap.max_send_sge  = spdk_min(device->attr.max_sge, NVMF_DEFAULT_TX_SGE);
+       ibv_init_attr.cap.max_recv_sge  = spdk_min(device->attr.max_sge, NVMF_DEFAULT_RX_SGE);
+
+       if (rqpair->srq == NULL && nvmf_rdma_resize_cq(rqpair, device) < 0) {
+               SPDK_ERRLOG("Failed to resize the completion queue. Cannot initialize qpair.\n");
+               goto error;
+       }
+
+       rc = rdma_create_qp(rqpair->cm_id, rqpair->port->device->pd, &ibv_init_attr);
+       if (rc) {
+               SPDK_ERRLOG("rdma_create_qp failed: errno %d: %s\n", errno, spdk_strerror(errno));
+               goto error;
+       }
+
+       rqpair->max_send_depth = spdk_min((uint32_t)(rqpair->max_queue_depth * 2 + 1),
+                                         ibv_init_attr.cap.max_send_wr);
+       rqpair->max_send_sge = spdk_min(NVMF_DEFAULT_TX_SGE, ibv_init_attr.cap.max_send_sge);
+       rqpair->max_recv_sge = spdk_min(NVMF_DEFAULT_RX_SGE, ibv_init_attr.cap.max_recv_sge);
+       spdk_trace_record(TRACE_RDMA_QP_CREATE, 0, 0, (uintptr_t)rqpair->cm_id, 0);
+       SPDK_DEBUGLOG(SPDK_LOG_RDMA, "New RDMA Connection: %p\n", qpair);
+
+       rqpair->sends_to_post.first = NULL;
+       rqpair->sends_to_post.last = NULL;
+
+       if (rqpair->poller->srq == NULL) {
+               rtransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_rdma_transport, transport);
+               transport = &rtransport->transport;
+
+               opts.qp = rqpair->cm_id->qp;
+               opts.pd = rqpair->cm_id->pd;
+               opts.qpair = rqpair;
+               opts.shared = false;
+               opts.max_queue_depth = rqpair->max_queue_depth;
+               opts.in_capsule_data_size = transport->opts.in_capsule_data_size;
+
+               rqpair->resources = nvmf_rdma_resources_create(&opts);
+
+               if (!rqpair->resources) {
+                       SPDK_ERRLOG("Unable to allocate resources for receive queue.\n");
+                       goto error;
+               }
+       } else {
+               rqpair->resources = rqpair->poller->resources;
+       }
+
+       rqpair->current_recv_depth = 0;
+       STAILQ_INIT(&rqpair->pending_rdma_read_queue);
+       STAILQ_INIT(&rqpair->pending_rdma_write_queue);
+
+       return 0;
+
+error:
+       rdma_destroy_id(rqpair->cm_id);
+       rqpair->cm_id = NULL;
+       spdk_nvmf_rdma_qpair_destroy(rqpair);
+       return -1;
+}
+
+/* Append the given recv wr structure to the resource structs outstanding recvs list. */
+/* This function accepts either a single wr or the first wr in a linked list. */
+static void
+nvmf_rdma_qpair_queue_recv_wrs(struct spdk_nvmf_rdma_qpair *rqpair, struct ibv_recv_wr *first)
+{
+       struct ibv_recv_wr *last;
+
+       last = first;
+       while (last->next != NULL) {
+               last = last->next;
+       }
+
+       if (rqpair->resources->recvs_to_post.first == NULL) {
+               rqpair->resources->recvs_to_post.first = first;
+               rqpair->resources->recvs_to_post.last = last;
+               if (rqpair->srq == NULL) {
+                       STAILQ_INSERT_TAIL(&rqpair->poller->qpairs_pending_recv, rqpair, recv_link);
+               }
+       } else {
+               rqpair->resources->recvs_to_post.last->next = first;
+               rqpair->resources->recvs_to_post.last = last;
+       }
+}
+
+/* Append the given send wr structure to the qpair's outstanding sends list. */
+/* This function accepts either a single wr or the first wr in a linked list. */
+static void
+nvmf_rdma_qpair_queue_send_wrs(struct spdk_nvmf_rdma_qpair *rqpair, struct ibv_send_wr *first)
+{
+       struct ibv_send_wr *last;
+
+       last = first;
+       while (last->next != NULL) {
+               last = last->next;
+       }
+
+       if (rqpair->sends_to_post.first == NULL) {
+               rqpair->sends_to_post.first = first;
+               rqpair->sends_to_post.last = last;
+               STAILQ_INSERT_TAIL(&rqpair->poller->qpairs_pending_send, rqpair, send_link);
+       } else {
+               rqpair->sends_to_post.last->next = first;
+               rqpair->sends_to_post.last = last;
+       }
 }
 
 static int
 request_transfer_in(struct spdk_nvmf_request *req)
 {
-       int                             rc;
        struct spdk_nvmf_rdma_request   *rdma_req;
        struct spdk_nvmf_qpair          *qpair;
        struct spdk_nvmf_rdma_qpair     *rqpair;
-       struct ibv_send_wr              *bad_wr = NULL;
 
        qpair = req->qpair;
        rdma_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_rdma_request, req);
        rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
 
        assert(req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER);
+       assert(rdma_req != NULL);
 
-       SPDK_DEBUGLOG(SPDK_LOG_RDMA, "RDMA READ POSTED. Request: %p Connection: %p\n", req, qpair);
-
-       rdma_req->data.wr.opcode = IBV_WR_RDMA_READ;
-       rdma_req->data.wr.next = NULL;
-       rc = ibv_post_send(rqpair->cm_id->qp, &rdma_req->data.wr, &bad_wr);
-       if (rc) {
-               SPDK_ERRLOG("Unable to transfer data from host to target\n");
-               return -1;
-       }
+       nvmf_rdma_qpair_queue_send_wrs(rqpair, &rdma_req->data.wr);
+       rqpair->current_read_depth += rdma_req->num_outstanding_data_wr;
+       rqpair->current_send_depth += rdma_req->num_outstanding_data_wr;
        return 0;
 }
 
 static int
 request_transfer_out(struct spdk_nvmf_request *req, int *data_posted)
 {
-       int                             rc;
+       int                             num_outstanding_data_wr = 0;
        struct spdk_nvmf_rdma_request   *rdma_req;
        struct spdk_nvmf_qpair          *qpair;
        struct spdk_nvmf_rdma_qpair     *rqpair;
        struct spdk_nvme_cpl            *rsp;
-       struct ibv_recv_wr              *bad_recv_wr = NULL;
-       struct ibv_send_wr              *send_wr, *bad_send_wr = NULL;
+       struct ibv_send_wr              *first = NULL;
 
        *data_posted = 0;
        qpair = req->qpair;
@@ -819,43 +1133,32 @@ request_transfer_out(struct spdk_nvmf_request *req, int *data_posted)
        }
        rsp->sqhd = qpair->sq_head;
 
-       /* Post the capsule to the recv buffer */
+       /* queue the capsule for the recv buffer */
        assert(rdma_req->recv != NULL);
-       SPDK_DEBUGLOG(SPDK_LOG_RDMA, "RDMA RECV POSTED. Recv: %p Connection: %p\n", rdma_req->recv,
-                     rqpair);
-       rc = ibv_post_recv(rqpair->cm_id->qp, &rdma_req->recv->wr, &bad_recv_wr);
-       if (rc) {
-               SPDK_ERRLOG("Unable to re-post rx descriptor\n");
-               return rc;
-       }
+
+       nvmf_rdma_qpair_queue_recv_wrs(rqpair, &rdma_req->recv->wr);
+
        rdma_req->recv = NULL;
+       assert(rqpair->current_recv_depth > 0);
+       rqpair->current_recv_depth--;
 
-       /* Build the response which consists of an optional
-        * RDMA WRITE to transfer data, plus an RDMA SEND
+       /* Build the response which consists of optional
+        * RDMA WRITEs to transfer data, plus an RDMA SEND
         * containing the response.
         */
-       send_wr = &rdma_req->rsp.wr;
+       first = &rdma_req->rsp.wr;
 
        if (rsp->status.sc == SPDK_NVME_SC_SUCCESS &&
            req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
-               SPDK_DEBUGLOG(SPDK_LOG_RDMA, "RDMA WRITE POSTED. Request: %p Connection: %p\n", req, qpair);
-
-               rdma_req->data.wr.opcode = IBV_WR_RDMA_WRITE;
-
-               rdma_req->data.wr.next = send_wr;
+               first = &rdma_req->data.wr;
                *data_posted = 1;
-               send_wr = &rdma_req->data.wr;
-       }
-
-       SPDK_DEBUGLOG(SPDK_LOG_RDMA, "RDMA SEND POSTED. Request: %p Connection: %p\n", req, qpair);
-
-       /* Send the completion */
-       rc = ibv_post_send(rqpair->cm_id->qp, send_wr, &bad_send_wr);
-       if (rc) {
-               SPDK_ERRLOG("Unable to send response capsule\n");
+               num_outstanding_data_wr = rdma_req->num_outstanding_data_wr;
        }
+       nvmf_rdma_qpair_queue_send_wrs(rqpair, first);
+       /* +1 for the rsp wr */
+       rqpair->current_send_depth += num_outstanding_data_wr + 1;
 
-       return rc;
+       return 0;
 }
 
 static int
@@ -872,7 +1175,16 @@ spdk_nvmf_rdma_event_accept(struct rdma_cm_id *id, struct spdk_nvmf_rdma_qpair *
        ctrlr_event_data.private_data_len = sizeof(accept_data);
        if (id->ps == RDMA_PS_TCP) {
                ctrlr_event_data.responder_resources = 0; /* We accept 0 reads from the host */
-               ctrlr_event_data.initiator_depth = rqpair->max_rw_depth;
+               ctrlr_event_data.initiator_depth = rqpair->max_read_depth;
+       }
+
+       /* Configure infinite retries for the initiator side qpair.
+        * When using a shared receive queue on the target side,
+        * we need to pass this value to the initiator to prevent the
+        * initiator side NIC from completing SEND requests back to the
+        * initiator with status rnr_retry_count_exceeded. */
+       if (rqpair->srq != NULL) {
+               ctrlr_event_data.rnr_retry_count = 0x7;
        }
 
        rc = rdma_accept(id, &ctrlr_event_data);
@@ -906,7 +1218,7 @@ nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *e
        struct rdma_conn_param          *rdma_param = NULL;
        const struct spdk_nvmf_rdma_request_private_data *private_data = NULL;
        uint16_t                        max_queue_depth;
-       uint16_t                        max_rw_depth;
+       uint16_t                        max_read_depth;
 
        rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
 
@@ -943,7 +1255,7 @@ nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *e
 
        /* Start with the maximum queue depth allowed by the target */
        max_queue_depth = rtransport->transport.opts.max_queue_depth;
-       max_rw_depth = rtransport->transport.opts.max_queue_depth;
+       max_read_depth = rtransport->transport.opts.max_queue_depth;
        SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Target Max Queue Depth: %d\n",
                      rtransport->transport.opts.max_queue_depth);
 
@@ -952,14 +1264,14 @@ nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *e
                      "Local NIC Max Send/Recv Queue Depth: %d Max Read/Write Queue Depth: %d\n",
                      port->device->attr.max_qp_wr, port->device->attr.max_qp_rd_atom);
        max_queue_depth = spdk_min(max_queue_depth, port->device->attr.max_qp_wr);
-       max_rw_depth = spdk_min(max_rw_depth, port->device->attr.max_qp_rd_atom);
+       max_read_depth = spdk_min(max_read_depth, port->device->attr.max_qp_init_rd_atom);
 
        /* Next check the remote NIC's hardware limitations */
        SPDK_DEBUGLOG(SPDK_LOG_RDMA,
                      "Host (Initiator) NIC Max Incoming RDMA R/W operations: %d Max Outgoing RDMA R/W operations: %d\n",
                      rdma_param->initiator_depth, rdma_param->responder_resources);
        if (rdma_param->initiator_depth > 0) {
-               max_rw_depth = spdk_min(max_rw_depth, rdma_param->initiator_depth);
+               max_read_depth = spdk_min(max_read_depth, rdma_param->initiator_depth);
        }
 
        /* Finally check for the host software requested values, which are
@@ -973,7 +1285,7 @@ nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *e
        }
 
        SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Final Negotiated Queue Depth: %d R/W Depth: %d\n",
-                     max_queue_depth, max_rw_depth);
+                     max_queue_depth, max_read_depth);
 
        rqpair = calloc(1, sizeof(struct spdk_nvmf_rdma_qpair));
        if (rqpair == NULL) {
@@ -984,12 +1296,11 @@ nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *e
 
        rqpair->port = port;
        rqpair->max_queue_depth = max_queue_depth;
-       rqpair->max_rw_depth = max_rw_depth;
+       rqpair->max_read_depth = max_read_depth;
        rqpair->cm_id = event->id;
        rqpair->listen_id = event->listen_id;
        rqpair->qpair.transport = transport;
-       rqpair->max_sge = spdk_min(port->device->attr.max_sge, SPDK_NVMF_MAX_SGL_ENTRIES);
-       TAILQ_INIT(&rqpair->incoming_queue);
+
        event->id->context = &rqpair->qpair;
 
        cb_fn(&rqpair->qpair);
@@ -997,204 +1308,103 @@ nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *e
        return 0;
 }
 
-static void
-_nvmf_rdma_disconnect(void *ctx)
-{
-       struct spdk_nvmf_qpair *qpair = ctx;
-       struct spdk_nvmf_rdma_qpair *rqpair;
-
-       rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
-
-       spdk_nvmf_rdma_qpair_dec_refcnt(rqpair);
-
-       spdk_nvmf_qpair_disconnect(qpair, NULL, NULL);
-}
-
-static void
-_nvmf_rdma_disconnect_retry(void *ctx)
+static int
+spdk_nvmf_rdma_mem_notify(void *cb_ctx, struct spdk_mem_map *map,
+                         enum spdk_mem_map_notify_action action,
+                         void *vaddr, size_t size)
 {
-       struct spdk_nvmf_qpair *qpair = ctx;
-       struct spdk_nvmf_poll_group *group;
-
-       /* Read the group out of the qpair. This is normally set and accessed only from
-        * the thread that created the group. Here, we're not on that thread necessarily.
-        * The data member qpair->group begins it's life as NULL and then is assigned to
-        * a pointer and never changes. So fortunately reading this and checking for
-        * non-NULL is thread safe in the x86_64 memory model. */
-       group = qpair->group;
+       struct ibv_pd *pd = cb_ctx;
+       struct ibv_mr *mr;
 
-       if (group == NULL) {
-               /* The qpair hasn't been assigned to a group yet, so we can't
-                * process a disconnect. Send a message to ourself and try again. */
-               spdk_thread_send_msg(spdk_get_thread(), _nvmf_rdma_disconnect_retry, qpair);
-               return;
+       switch (action) {
+       case SPDK_MEM_MAP_NOTIFY_REGISTER:
+               if (!g_nvmf_hooks.get_rkey) {
+                       mr = ibv_reg_mr(pd, vaddr, size,
+                                       IBV_ACCESS_LOCAL_WRITE |
+                                       IBV_ACCESS_REMOTE_READ |
+                                       IBV_ACCESS_REMOTE_WRITE);
+                       if (mr == NULL) {
+                               SPDK_ERRLOG("ibv_reg_mr() failed\n");
+                               return -1;
+                       } else {
+                               spdk_mem_map_set_translation(map, (uint64_t)vaddr, size, (uint64_t)mr);
+                       }
+               } else {
+                       spdk_mem_map_set_translation(map, (uint64_t)vaddr, size,
+                                                    g_nvmf_hooks.get_rkey(pd, vaddr, size));
+               }
+               break;
+       case SPDK_MEM_MAP_NOTIFY_UNREGISTER:
+               if (!g_nvmf_hooks.get_rkey) {
+                       mr = (struct ibv_mr *)spdk_mem_map_translate(map, (uint64_t)vaddr, NULL);
+                       spdk_mem_map_clear_translation(map, (uint64_t)vaddr, size);
+                       if (mr) {
+                               ibv_dereg_mr(mr);
+                       }
+               }
+               break;
        }
 
-       spdk_thread_send_msg(group->thread, _nvmf_rdma_disconnect, qpair);
+       return 0;
 }
 
 static int
-nvmf_rdma_disconnect(struct rdma_cm_event *evt)
+spdk_nvmf_rdma_check_contiguous_entries(uint64_t addr_1, uint64_t addr_2)
 {
-       struct spdk_nvmf_qpair          *qpair;
-       struct spdk_nvmf_rdma_qpair     *rqpair;
-
-       if (evt->id == NULL) {
-               SPDK_ERRLOG("disconnect request: missing cm_id\n");
-               return -1;
-       }
-
-       qpair = evt->id->context;
-       if (qpair == NULL) {
-               SPDK_ERRLOG("disconnect request: no active connection\n");
-               return -1;
-       }
-
-       rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
-
-       spdk_trace_record(TRACE_RDMA_QP_DISCONNECT, 0, 0, (uintptr_t)rqpair->cm_id, 0);
-
-       spdk_nvmf_rdma_update_ibv_state(rqpair);
-       spdk_nvmf_rdma_qpair_inc_refcnt(rqpair);
-
-       _nvmf_rdma_disconnect_retry(qpair);
-
-       return 0;
+       /* Two contiguous mappings will point to the same address which is the start of the RDMA MR. */
+       return addr_1 == addr_2;
 }
 
-#ifdef DEBUG
-static const char *CM_EVENT_STR[] = {
-       "RDMA_CM_EVENT_ADDR_RESOLVED",
-       "RDMA_CM_EVENT_ADDR_ERROR",
-       "RDMA_CM_EVENT_ROUTE_RESOLVED",
-       "RDMA_CM_EVENT_ROUTE_ERROR",
-       "RDMA_CM_EVENT_CONNECT_REQUEST",
-       "RDMA_CM_EVENT_CONNECT_RESPONSE",
-       "RDMA_CM_EVENT_CONNECT_ERROR",
-       "RDMA_CM_EVENT_UNREACHABLE",
-       "RDMA_CM_EVENT_REJECTED",
-       "RDMA_CM_EVENT_ESTABLISHED",
-       "RDMA_CM_EVENT_DISCONNECTED",
-       "RDMA_CM_EVENT_DEVICE_REMOVAL",
-       "RDMA_CM_EVENT_MULTICAST_JOIN",
-       "RDMA_CM_EVENT_MULTICAST_ERROR",
-       "RDMA_CM_EVENT_ADDR_CHANGE",
-       "RDMA_CM_EVENT_TIMEWAIT_EXIT"
-};
-#endif /* DEBUG */
-
 static void
-spdk_nvmf_process_cm_event(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn)
+spdk_nvmf_rdma_request_free_buffers(struct spdk_nvmf_rdma_request *rdma_req,
+                                   struct spdk_nvmf_transport_poll_group *group, struct spdk_nvmf_transport *transport,
+                                   uint32_t num_buffers)
 {
-       struct spdk_nvmf_rdma_transport *rtransport;
-       struct rdma_cm_event            *event;
-       int                             rc;
-
-       rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
-
-       if (rtransport->event_channel == NULL) {
-               return;
-       }
-
-       while (1) {
-               rc = rdma_get_cm_event(rtransport->event_channel, &event);
-               if (rc == 0) {
-                       SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Acceptor Event: %s\n", CM_EVENT_STR[event->event]);
-
-                       spdk_trace_record(TRACE_RDMA_CM_ASYNC_EVENT, 0, 0, 0, event->event);
+       uint32_t i;
 
-                       switch (event->event) {
-                       case RDMA_CM_EVENT_ADDR_RESOLVED:
-                       case RDMA_CM_EVENT_ADDR_ERROR:
-                       case RDMA_CM_EVENT_ROUTE_RESOLVED:
-                       case RDMA_CM_EVENT_ROUTE_ERROR:
-                               /* No action required. The target never attempts to resolve routes. */
-                               break;
-                       case RDMA_CM_EVENT_CONNECT_REQUEST:
-                               rc = nvmf_rdma_connect(transport, event, cb_fn);
-                               if (rc < 0) {
-                                       SPDK_ERRLOG("Unable to process connect event. rc: %d\n", rc);
-                                       break;
-                               }
-                               break;
-                       case RDMA_CM_EVENT_CONNECT_RESPONSE:
-                               /* The target never initiates a new connection. So this will not occur. */
-                               break;
-                       case RDMA_CM_EVENT_CONNECT_ERROR:
-                               /* Can this happen? The docs say it can, but not sure what causes it. */
-                               break;
-                       case RDMA_CM_EVENT_UNREACHABLE:
-                       case RDMA_CM_EVENT_REJECTED:
-                               /* These only occur on the client side. */
-                               break;
-                       case RDMA_CM_EVENT_ESTABLISHED:
-                               /* TODO: Should we be waiting for this event anywhere? */
-                               break;
-                       case RDMA_CM_EVENT_DISCONNECTED:
-                       case RDMA_CM_EVENT_DEVICE_REMOVAL:
-                               rc = nvmf_rdma_disconnect(event);
-                               if (rc < 0) {
-                                       SPDK_ERRLOG("Unable to process disconnect event. rc: %d\n", rc);
-                                       break;
-                               }
-                               break;
-                       case RDMA_CM_EVENT_MULTICAST_JOIN:
-                       case RDMA_CM_EVENT_MULTICAST_ERROR:
-                               /* Multicast is not used */
-                               break;
-                       case RDMA_CM_EVENT_ADDR_CHANGE:
-                               /* Not utilizing this event */
-                               break;
-                       case RDMA_CM_EVENT_TIMEWAIT_EXIT:
-                               /* For now, do nothing. The target never re-uses queue pairs. */
-                               break;
-                       default:
-                               SPDK_ERRLOG("Unexpected Acceptor Event [%d]\n", event->event);
-                               break;
-                       }
-
-                       rdma_ack_cm_event(event);
+       for (i = 0; i < num_buffers; i++) {
+               if (group->buf_cache_count < group->buf_cache_size) {
+                       STAILQ_INSERT_HEAD(&group->buf_cache,
+                                          (struct spdk_nvmf_transport_pg_cache_buf *)rdma_req->buffers[i], link);
+                       group->buf_cache_count++;
                } else {
-                       if (errno != EAGAIN && errno != EWOULDBLOCK) {
-                               SPDK_ERRLOG("Acceptor Event Error: %s\n", spdk_strerror(errno));
-                       }
-                       break;
+                       spdk_mempool_put(transport->data_buf_pool, rdma_req->buffers[i]);
                }
+               rdma_req->req.iov[i].iov_base = NULL;
+               rdma_req->buffers[i] = NULL;
+               rdma_req->req.iov[i].iov_len = 0;
+
        }
+       rdma_req->data_from_pool = false;
 }
 
 static int
-spdk_nvmf_rdma_mem_notify(void *cb_ctx, struct spdk_mem_map *map,
-                         enum spdk_mem_map_notify_action action,
-                         void *vaddr, size_t size)
+nvmf_rdma_request_get_buffers(struct spdk_nvmf_rdma_request *rdma_req,
+                             struct spdk_nvmf_transport_poll_group *group, struct spdk_nvmf_transport *transport,
+                             uint32_t num_buffers)
 {
-       struct spdk_nvmf_rdma_device *device = cb_ctx;
-       struct ibv_pd *pd = device->pd;
-       struct ibv_mr *mr;
-
-       switch (action) {
-       case SPDK_MEM_MAP_NOTIFY_REGISTER:
-               mr = ibv_reg_mr(pd, vaddr, size,
-                               IBV_ACCESS_LOCAL_WRITE |
-                               IBV_ACCESS_REMOTE_READ |
-                               IBV_ACCESS_REMOTE_WRITE);
-               if (mr == NULL) {
-                       SPDK_ERRLOG("ibv_reg_mr() failed\n");
-                       return -1;
+       uint32_t i = 0;
+
+       while (i < num_buffers) {
+               if (!(STAILQ_EMPTY(&group->buf_cache))) {
+                       group->buf_cache_count--;
+                       rdma_req->buffers[i] = STAILQ_FIRST(&group->buf_cache);
+                       STAILQ_REMOVE_HEAD(&group->buf_cache, link);
+                       assert(rdma_req->buffers[i] != NULL);
+                       i++;
                } else {
-                       spdk_mem_map_set_translation(map, (uint64_t)vaddr, size, (uint64_t)mr);
-               }
-               break;
-       case SPDK_MEM_MAP_NOTIFY_UNREGISTER:
-               mr = (struct ibv_mr *)spdk_mem_map_translate(map, (uint64_t)vaddr, NULL);
-               spdk_mem_map_clear_translation(map, (uint64_t)vaddr, size);
-               if (mr) {
-                       ibv_dereg_mr(mr);
+                       if (spdk_mempool_get_bulk(transport->data_buf_pool, &rdma_req->buffers[i], num_buffers - i)) {
+                               goto err_exit;
+                       }
+                       i += num_buffers - i;
                }
-               break;
        }
 
        return 0;
+
+err_exit:
+       spdk_nvmf_rdma_request_free_buffers(rdma_req, group, transport, i);
+       return -ENOMEM;
 }
 
 typedef enum spdk_nvme_data_transfer spdk_nvme_data_transfer_t;
@@ -1259,53 +1469,242 @@ spdk_nvmf_rdma_request_get_xfer(struct spdk_nvmf_rdma_request *rdma_req)
        return xfer;
 }
 
+static int
+nvmf_request_alloc_wrs(struct spdk_nvmf_rdma_transport *rtransport,
+                      struct spdk_nvmf_rdma_request *rdma_req,
+                      uint32_t num_sgl_descriptors)
+{
+       struct spdk_nvmf_rdma_request_data      *work_requests[SPDK_NVMF_MAX_SGL_ENTRIES];
+       struct spdk_nvmf_rdma_request_data      *current_data_wr;
+       uint32_t                                i;
+
+       if (spdk_mempool_get_bulk(rtransport->data_wr_pool, (void **)work_requests, num_sgl_descriptors)) {
+               return -ENOMEM;
+       }
+
+       current_data_wr = &rdma_req->data;
+
+       for (i = 0; i < num_sgl_descriptors; i++) {
+               if (rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
+                       current_data_wr->wr.opcode = IBV_WR_RDMA_WRITE;
+                       current_data_wr->wr.send_flags = 0;
+               } else if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
+                       current_data_wr->wr.opcode = IBV_WR_RDMA_READ;
+                       current_data_wr->wr.send_flags = IBV_SEND_SIGNALED;
+               } else {
+                       assert(false);
+               }
+               work_requests[i]->wr.sg_list = work_requests[i]->sgl;
+               work_requests[i]->wr.wr_id = rdma_req->data.wr.wr_id;
+               current_data_wr->wr.next = &work_requests[i]->wr;
+               current_data_wr = work_requests[i];
+       }
+
+       if (rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
+               current_data_wr->wr.opcode = IBV_WR_RDMA_WRITE;
+               current_data_wr->wr.next = &rdma_req->rsp.wr;
+               current_data_wr->wr.send_flags = 0;
+       } else if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
+               current_data_wr->wr.opcode = IBV_WR_RDMA_READ;
+               current_data_wr->wr.next = NULL;
+               current_data_wr->wr.send_flags = IBV_SEND_SIGNALED;
+       }
+       return 0;
+}
+
+static int
+nvmf_rdma_fill_buffers(struct spdk_nvmf_rdma_transport *rtransport,
+                      struct spdk_nvmf_rdma_poll_group *rgroup,
+                      struct spdk_nvmf_rdma_device *device,
+                      struct spdk_nvmf_rdma_request *rdma_req,
+                      struct ibv_send_wr *wr,
+                      uint32_t length)
+{
+       uint64_t        translation_len;
+       uint32_t        remaining_length = length;
+       uint32_t        iovcnt;
+       uint32_t        i = 0;
+
+
+       while (remaining_length) {
+               iovcnt = rdma_req->req.iovcnt;
+               rdma_req->req.iov[iovcnt].iov_base = (void *)((uintptr_t)(rdma_req->buffers[iovcnt] +
+                                                    NVMF_DATA_BUFFER_MASK) &
+                                                    ~NVMF_DATA_BUFFER_MASK);
+               rdma_req->req.iov[iovcnt].iov_len  = spdk_min(remaining_length,
+                                                    rtransport->transport.opts.io_unit_size);
+               rdma_req->req.iovcnt++;
+               wr->sg_list[i].addr = (uintptr_t)(rdma_req->req.iov[iovcnt].iov_base);
+               wr->sg_list[i].length = rdma_req->req.iov[iovcnt].iov_len;
+               translation_len = rdma_req->req.iov[iovcnt].iov_len;
+
+               if (!g_nvmf_hooks.get_rkey) {
+                       wr->sg_list[i].lkey = ((struct ibv_mr *)spdk_mem_map_translate(device->map,
+                                              (uint64_t)rdma_req->buffers[iovcnt], &translation_len))->lkey;
+               } else {
+                       wr->sg_list[i].lkey = spdk_mem_map_translate(device->map,
+                                             (uint64_t)rdma_req->buffers[iovcnt], &translation_len);
+               }
+
+               remaining_length -= rdma_req->req.iov[iovcnt].iov_len;
+
+               if (translation_len < rdma_req->req.iov[iovcnt].iov_len) {
+                       SPDK_ERRLOG("Data buffer split over multiple RDMA Memory Regions\n");
+                       return -EINVAL;
+               }
+               i++;
+       }
+       wr->num_sge = i;
+
+       return 0;
+}
+
 static int
 spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
                                 struct spdk_nvmf_rdma_device *device,
                                 struct spdk_nvmf_rdma_request *rdma_req)
 {
-       void            *buf = NULL;
-       uint32_t        length = rdma_req->req.length;
-       uint32_t        i = 0;
+       struct spdk_nvmf_rdma_qpair             *rqpair;
+       struct spdk_nvmf_rdma_poll_group        *rgroup;
+       uint32_t                                num_buffers;
+       uint32_t                                i = 0;
+       int                                     rc = 0;
 
+       rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
+       rgroup = rqpair->poller->group;
        rdma_req->req.iovcnt = 0;
-       while (length) {
-               buf = spdk_mempool_get(rtransport->data_buf_pool);
-               if (!buf) {
-                       goto nomem;
-               }
 
-               rdma_req->req.iov[i].iov_base = (void *)((uintptr_t)(buf + NVMF_DATA_BUFFER_MASK) &
-                                               ~NVMF_DATA_BUFFER_MASK);
-               rdma_req->req.iov[i].iov_len  = spdk_min(length, rtransport->transport.opts.io_unit_size);
-               rdma_req->req.iovcnt++;
-               rdma_req->data.buffers[i] = buf;
-               rdma_req->data.wr.sg_list[i].addr = (uintptr_t)(rdma_req->req.iov[i].iov_base);
-               rdma_req->data.wr.sg_list[i].length = rdma_req->req.iov[i].iov_len;
-               rdma_req->data.wr.sg_list[i].lkey = ((struct ibv_mr *)spdk_mem_map_translate(device->map,
-                                                    (uint64_t)buf, NULL))->lkey;
+       num_buffers = rdma_req->req.length / rtransport->transport.opts.io_unit_size;
+       if (rdma_req->req.length % rtransport->transport.opts.io_unit_size) {
+               num_buffers++;
+       }
 
-               length -= rdma_req->req.iov[i].iov_len;
-               i++;
+       if (nvmf_rdma_request_get_buffers(rdma_req, &rgroup->group, &rtransport->transport, num_buffers)) {
+               return -ENOMEM;
+       }
+
+       rdma_req->req.iovcnt = 0;
+
+       rc = nvmf_rdma_fill_buffers(rtransport, rgroup, device, rdma_req, &rdma_req->data.wr,
+                                   rdma_req->req.length);
+       if (rc != 0) {
+               goto err_exit;
        }
 
+       assert(rdma_req->req.iovcnt <= rqpair->max_send_sge);
+
        rdma_req->data_from_pool = true;
 
-       return 0;
+       return rc;
 
-nomem:
+err_exit:
+       spdk_nvmf_rdma_request_free_buffers(rdma_req, &rgroup->group, &rtransport->transport, num_buffers);
        while (i) {
                i--;
-               spdk_mempool_put(rtransport->data_buf_pool, rdma_req->req.iov[i].iov_base);
-               rdma_req->req.iov[i].iov_base = NULL;
-               rdma_req->req.iov[i].iov_len = 0;
-
                rdma_req->data.wr.sg_list[i].addr = 0;
                rdma_req->data.wr.sg_list[i].length = 0;
                rdma_req->data.wr.sg_list[i].lkey = 0;
        }
        rdma_req->req.iovcnt = 0;
-       return -ENOMEM;
+       return rc;
+}
+
+static int
+nvmf_rdma_request_fill_iovs_multi_sgl(struct spdk_nvmf_rdma_transport *rtransport,
+                                     struct spdk_nvmf_rdma_device *device,
+                                     struct spdk_nvmf_rdma_request *rdma_req)
+{
+       struct spdk_nvmf_rdma_qpair             *rqpair;
+       struct spdk_nvmf_rdma_poll_group        *rgroup;
+       struct ibv_send_wr                      *current_wr;
+       struct spdk_nvmf_request                *req = &rdma_req->req;
+       struct spdk_nvme_sgl_descriptor         *inline_segment, *desc;
+       uint32_t                                num_sgl_descriptors;
+       uint32_t                                num_buffers = 0;
+       uint32_t                                i;
+       int                                     rc;
+
+       rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
+       rgroup = rqpair->poller->group;
+
+       inline_segment = &req->cmd->nvme_cmd.dptr.sgl1;
+       assert(inline_segment->generic.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
+       assert(inline_segment->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
+
+       num_sgl_descriptors = inline_segment->unkeyed.length / sizeof(struct spdk_nvme_sgl_descriptor);
+       assert(num_sgl_descriptors <= SPDK_NVMF_MAX_SGL_ENTRIES);
+       desc = (struct spdk_nvme_sgl_descriptor *)rdma_req->recv->buf + inline_segment->address;
+
+       for (i = 0; i < num_sgl_descriptors; i++) {
+               num_buffers += desc->keyed.length / rtransport->transport.opts.io_unit_size;
+               if (desc->keyed.length % rtransport->transport.opts.io_unit_size) {
+                       num_buffers++;
+               }
+               desc++;
+       }
+       /* If the number of buffers is too large, then we know the I/O is larger than allowed. Fail it. */
+       if (num_buffers > NVMF_REQ_MAX_BUFFERS) {
+               return -EINVAL;
+       }
+       if (nvmf_rdma_request_get_buffers(rdma_req, &rgroup->group, &rtransport->transport,
+                                         num_buffers) != 0) {
+               return -ENOMEM;
+       }
+
+       if (nvmf_request_alloc_wrs(rtransport, rdma_req, num_sgl_descriptors - 1) != 0) {
+               spdk_nvmf_rdma_request_free_buffers(rdma_req, &rgroup->group, &rtransport->transport, num_buffers);
+               return -ENOMEM;
+       }
+
+       /* The first WR must always be the embedded data WR. This is how we unwind them later. */
+       current_wr = &rdma_req->data.wr;
+
+       req->iovcnt = 0;
+       desc = (struct spdk_nvme_sgl_descriptor *)rdma_req->recv->buf + inline_segment->address;
+       for (i = 0; i < num_sgl_descriptors; i++) {
+               /* The descriptors must be keyed data block descriptors with an address, not an offset. */
+               if (spdk_unlikely(desc->generic.type != SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK ||
+                                 desc->keyed.subtype != SPDK_NVME_SGL_SUBTYPE_ADDRESS)) {
+                       rc = -EINVAL;
+                       goto err_exit;
+               }
+
+               current_wr->num_sge = 0;
+               req->length += desc->keyed.length;
+
+               rc = nvmf_rdma_fill_buffers(rtransport, rgroup, device, rdma_req, current_wr,
+                                           desc->keyed.length);
+               if (rc != 0) {
+                       rc = -ENOMEM;
+                       goto err_exit;
+               }
+
+               current_wr->wr.rdma.rkey = desc->keyed.key;
+               current_wr->wr.rdma.remote_addr = desc->address;
+               current_wr = current_wr->next;
+               desc++;
+       }
+
+#ifdef SPDK_CONFIG_RDMA_SEND_WITH_INVAL
+       /* Go back to the last descriptor in the list. */
+       desc--;
+       if ((device->attr.device_cap_flags & IBV_DEVICE_MEM_MGT_EXTENSIONS) != 0) {
+               if (desc->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY) {
+                       rdma_req->rsp.wr.opcode = IBV_WR_SEND_WITH_INV;
+                       rdma_req->rsp.wr.imm_data = desc->keyed.key;
+               }
+       }
+#endif
+
+       rdma_req->num_outstanding_data_wr = num_sgl_descriptors;
+       rdma_req->data_from_pool = true;
+
+       return 0;
+
+err_exit:
+       spdk_nvmf_rdma_request_free_buffers(rdma_req, &rgroup->group, &rtransport->transport, num_buffers);
+       nvmf_rdma_request_free_data(rdma_req, rtransport);
+       return rc;
 }
 
 static int
@@ -1316,6 +1715,7 @@ spdk_nvmf_rdma_request_parse_sgl(struct spdk_nvmf_rdma_transport *rtransport,
        struct spdk_nvme_cmd                    *cmd;
        struct spdk_nvme_cpl                    *rsp;
        struct spdk_nvme_sgl_descriptor         *sgl;
+       int                                     rc;
 
        cmd = &rdma_req->req.cmd->nvme_cmd;
        rsp = &rdma_req->req.rsp->nvme_cpl;
@@ -1355,6 +1755,18 @@ spdk_nvmf_rdma_request_parse_sgl(struct spdk_nvmf_rdma_transport *rtransport,
                rdma_req->data.wr.num_sge = rdma_req->req.iovcnt;
                rdma_req->data.wr.wr.rdma.rkey = sgl->keyed.key;
                rdma_req->data.wr.wr.rdma.remote_addr = sgl->address;
+               if (rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
+                       rdma_req->data.wr.opcode = IBV_WR_RDMA_WRITE;
+                       rdma_req->data.wr.next = &rdma_req->rsp.wr;
+                       rdma_req->data.wr.send_flags &= ~IBV_SEND_SIGNALED;
+               } else if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
+                       rdma_req->data.wr.opcode = IBV_WR_RDMA_READ;
+                       rdma_req->data.wr.next = NULL;
+                       rdma_req->data.wr.send_flags |= IBV_SEND_SIGNALED;
+               }
+
+               /* set the number of outstanding data WRs for this request. */
+               rdma_req->num_outstanding_data_wr = 1;
 
                SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Request %p took %d buffer/s from central pool\n", rdma_req,
                              rdma_req->req.iovcnt);
@@ -1383,6 +1795,7 @@ spdk_nvmf_rdma_request_parse_sgl(struct spdk_nvmf_rdma_transport *rtransport,
                        return -1;
                }
 
+               rdma_req->num_outstanding_data_wr = 0;
                rdma_req->req.data = rdma_req->recv->buf + offset;
                rdma_req->data_from_pool = false;
                rdma_req->req.length = sgl->unkeyed.length;
@@ -1391,6 +1804,25 @@ spdk_nvmf_rdma_request_parse_sgl(struct spdk_nvmf_rdma_transport *rtransport,
                rdma_req->req.iov[0].iov_len = rdma_req->req.length;
                rdma_req->req.iovcnt = 1;
 
+               return 0;
+       } else if (sgl->generic.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT &&
+                  sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET) {
+
+               rc = nvmf_rdma_request_fill_iovs_multi_sgl(rtransport, device, rdma_req);
+               if (rc == -ENOMEM) {
+                       SPDK_DEBUGLOG(SPDK_LOG_RDMA, "No available large data buffers. Queueing request %p\n", rdma_req);
+                       return 0;
+               } else if (rc == -EINVAL) {
+                       SPDK_ERRLOG("Multi SGL element request length exceeds the max I/O size\n");
+                       return -1;
+               }
+
+               /* backward compatible */
+               rdma_req->req.data = rdma_req->req.iov[0].iov_base;
+
+               SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Request %p took %d buffer/s from central pool\n", rdma_req,
+                             rdma_req->req.iovcnt);
+
                return 0;
        }
 
@@ -1400,32 +1832,63 @@ spdk_nvmf_rdma_request_parse_sgl(struct spdk_nvmf_rdma_transport *rtransport,
        return -1;
 }
 
+static void
+nvmf_rdma_request_free(struct spdk_nvmf_rdma_request *rdma_req,
+                      struct spdk_nvmf_rdma_transport  *rtransport)
+{
+       struct spdk_nvmf_rdma_qpair             *rqpair;
+       struct spdk_nvmf_rdma_poll_group        *rgroup;
+
+       rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
+       if (rdma_req->data_from_pool) {
+               rgroup = rqpair->poller->group;
+
+               spdk_nvmf_rdma_request_free_buffers(rdma_req, &rgroup->group, &rtransport->transport,
+                                                   rdma_req->req.iovcnt);
+       }
+       nvmf_rdma_request_free_data(rdma_req, rtransport);
+       rdma_req->req.length = 0;
+       rdma_req->req.iovcnt = 0;
+       rdma_req->req.data = NULL;
+       rdma_req->rsp.wr.next = NULL;
+       rdma_req->data.wr.next = NULL;
+       rqpair->qd--;
+
+       STAILQ_INSERT_HEAD(&rqpair->resources->free_queue, rdma_req, state_link);
+       rdma_req->state = RDMA_REQUEST_STATE_FREE;
+}
+
 static bool
 spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
                               struct spdk_nvmf_rdma_request *rdma_req)
 {
        struct spdk_nvmf_rdma_qpair     *rqpair;
        struct spdk_nvmf_rdma_device    *device;
+       struct spdk_nvmf_rdma_poll_group *rgroup;
        struct spdk_nvme_cpl            *rsp = &rdma_req->req.rsp->nvme_cpl;
        int                             rc;
        struct spdk_nvmf_rdma_recv      *rdma_recv;
        enum spdk_nvmf_rdma_request_state prev_state;
        bool                            progress = false;
        int                             data_posted;
-       int                             cur_rdma_rw_depth;
 
        rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
        device = rqpair->port->device;
+       rgroup = rqpair->poller->group;
 
        assert(rdma_req->state != RDMA_REQUEST_STATE_FREE);
 
        /* If the queue pair is in an error state, force the request to the completed state
         * to release resources. */
-       if (rqpair->ibv_attr.qp_state == IBV_QPS_ERR || rqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
+       if (rqpair->ibv_state == IBV_QPS_ERR || rqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
                if (rdma_req->state == RDMA_REQUEST_STATE_NEED_BUFFER) {
-                       TAILQ_REMOVE(&rqpair->ch->pending_data_buf_queue, rdma_req, link);
+                       STAILQ_REMOVE(&rgroup->pending_data_buf_queue, rdma_req, spdk_nvmf_rdma_request, state_link);
+               } else if (rdma_req->state == RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING) {
+                       STAILQ_REMOVE(&rqpair->pending_rdma_read_queue, rdma_req, spdk_nvmf_rdma_request, state_link);
+               } else if (rdma_req->state == RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING) {
+                       STAILQ_REMOVE(&rqpair->pending_rdma_write_queue, rdma_req, spdk_nvmf_rdma_request, state_link);
                }
-               spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED);
+               rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
        }
 
        /* The loop here is to allow for several back-to-back state changes. */
@@ -1448,10 +1911,8 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
                        rdma_req->req.cmd = (union nvmf_h2c_msg *)rdma_recv->sgl[0].addr;
                        memset(rdma_req->req.rsp, 0, sizeof(*rdma_req->req.rsp));
 
-                       TAILQ_REMOVE(&rqpair->incoming_queue, rdma_recv, link);
-
-                       if (rqpair->ibv_attr.qp_state == IBV_QPS_ERR) {
-                               spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED);
+                       if (rqpair->ibv_state == IBV_QPS_ERR  || rqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
+                               rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
                                break;
                        }
 
@@ -1460,12 +1921,12 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
 
                        /* If no data to transfer, ready to execute. */
                        if (rdma_req->req.xfer == SPDK_NVME_DATA_NONE) {
-                               spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_READY_TO_EXECUTE);
+                               rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
                                break;
                        }
 
-                       spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_NEED_BUFFER);
-                       TAILQ_INSERT_TAIL(&rqpair->ch->pending_data_buf_queue, rdma_req, link);
+                       rdma_req->state = RDMA_REQUEST_STATE_NEED_BUFFER;
+                       STAILQ_INSERT_TAIL(&rgroup->pending_data_buf_queue, rdma_req, state_link);
                        break;
                case RDMA_REQUEST_STATE_NEED_BUFFER:
                        spdk_trace_record(TRACE_RDMA_REQUEST_STATE_NEED_BUFFER, 0, 0,
@@ -1473,7 +1934,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
 
                        assert(rdma_req->req.xfer != SPDK_NVME_DATA_NONE);
 
-                       if (rdma_req != TAILQ_FIRST(&rqpair->ch->pending_data_buf_queue)) {
+                       if (rdma_req != STAILQ_FIRST(&rgroup->pending_data_buf_queue)) {
                                /* This request needs to wait in line to obtain a buffer */
                                break;
                        }
@@ -1481,9 +1942,9 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
                        /* Try to get a data buffer */
                        rc = spdk_nvmf_rdma_request_parse_sgl(rtransport, device, rdma_req);
                        if (rc < 0) {
-                               TAILQ_REMOVE(&rqpair->ch->pending_data_buf_queue, rdma_req, link);
+                               STAILQ_REMOVE_HEAD(&rgroup->pending_data_buf_queue, state_link);
                                rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
-                               spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_READY_TO_COMPLETE);
+                               rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
                                break;
                        }
 
@@ -1492,53 +1953,42 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
                                break;
                        }
 
-                       TAILQ_REMOVE(&rqpair->ch->pending_data_buf_queue, rdma_req, link);
+                       STAILQ_REMOVE_HEAD(&rgroup->pending_data_buf_queue, state_link);
 
                        /* If data is transferring from host to controller and the data didn't
                         * arrive using in capsule data, we need to do a transfer from the host.
                         */
                        if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER && rdma_req->data_from_pool) {
-                               spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING);
+                               STAILQ_INSERT_TAIL(&rqpair->pending_rdma_read_queue, rdma_req, state_link);
+                               rdma_req->state = RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING;
                                break;
                        }
 
-                       spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_READY_TO_EXECUTE);
+                       rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
                        break;
-               case RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING:
-                       spdk_trace_record(TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING, 0, 0,
+               case RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING:
+                       spdk_trace_record(TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING, 0, 0,
                                          (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id);
 
-                       if (rdma_req != TAILQ_FIRST(&rqpair->state_queue[RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING])) {
+                       if (rdma_req != STAILQ_FIRST(&rqpair->pending_rdma_read_queue)) {
                                /* This request needs to wait in line to perform RDMA */
                                break;
                        }
-                       cur_rdma_rw_depth = spdk_nvmf_rdma_cur_rw_depth(rqpair);
-
-                       if (cur_rdma_rw_depth >= rqpair->max_rw_depth) {
-                               /* R/W queue is full, need to wait */
+                       if (rqpair->current_send_depth + rdma_req->num_outstanding_data_wr > rqpair->max_send_depth
+                           || rqpair->current_read_depth + rdma_req->num_outstanding_data_wr > rqpair->max_read_depth) {
+                               /* We can only have so many WRs outstanding. we have to wait until some finish. */
                                break;
                        }
 
-                       if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
-                               rc = request_transfer_in(&rdma_req->req);
-                               if (!rc) {
-                                       spdk_nvmf_rdma_request_set_state(rdma_req,
-                                                                        RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
-                               } else {
-                                       rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
-                                       spdk_nvmf_rdma_request_set_state(rdma_req,
-                                                                        RDMA_REQUEST_STATE_READY_TO_COMPLETE);
-                               }
-                       } else if (rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
-                               /* The data transfer will be kicked off from
-                                * RDMA_REQUEST_STATE_READY_TO_COMPLETE state.
-                                */
-                               spdk_nvmf_rdma_request_set_state(rdma_req,
-                                                                RDMA_REQUEST_STATE_READY_TO_COMPLETE);
+                       /* We have already verified that this request is the head of the queue. */
+                       STAILQ_REMOVE_HEAD(&rqpair->pending_rdma_read_queue, state_link);
+
+                       rc = request_transfer_in(&rdma_req->req);
+                       if (!rc) {
+                               rdma_req->state = RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER;
                        } else {
-                               SPDK_ERRLOG("Cannot perform data transfer, unknown state: %u\n",
-                                           rdma_req->req.xfer);
-                               assert(0);
+                               rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
+                               rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
                        }
                        break;
                case RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER:
@@ -1550,7 +2000,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
                case RDMA_REQUEST_STATE_READY_TO_EXECUTE:
                        spdk_trace_record(TRACE_RDMA_REQUEST_STATE_READY_TO_EXECUTE, 0, 0,
                                          (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id);
-                       spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_EXECUTING);
+                       rdma_req->state = RDMA_REQUEST_STATE_EXECUTING;
                        spdk_nvmf_request_exec(&rdma_req->req);
                        break;
                case RDMA_REQUEST_STATE_EXECUTING:
@@ -1563,10 +2013,34 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
                        spdk_trace_record(TRACE_RDMA_REQUEST_STATE_EXECUTED, 0, 0,
                                          (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id);
                        if (rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
-                               spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING);
+                               STAILQ_INSERT_TAIL(&rqpair->pending_rdma_write_queue, rdma_req, state_link);
+                               rdma_req->state = RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING;
                        } else {
-                               spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_READY_TO_COMPLETE);
+                               rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
+                       }
+                       break;
+               case RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING:
+                       spdk_trace_record(TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING, 0, 0,
+                                         (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id);
+
+                       if (rdma_req != STAILQ_FIRST(&rqpair->pending_rdma_write_queue)) {
+                               /* This request needs to wait in line to perform RDMA */
+                               break;
                        }
+                       if ((rqpair->current_send_depth + rdma_req->num_outstanding_data_wr + 1) >
+                           rqpair->max_send_depth) {
+                               /* We can only have so many WRs outstanding. we have to wait until some finish.
+                                * +1 since each request has an additional wr in the resp. */
+                               break;
+                       }
+
+                       /* We have already verified that this request is the head of the queue. */
+                       STAILQ_REMOVE_HEAD(&rqpair->pending_rdma_write_queue, state_link);
+
+                       /* The data transfer will be kicked off from
+                        * RDMA_REQUEST_STATE_READY_TO_COMPLETE state.
+                        */
+                       rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
                        break;
                case RDMA_REQUEST_STATE_READY_TO_COMPLETE:
                        spdk_trace_record(TRACE_RDMA_REQUEST_STATE_READY_TO_COMPLETE, 0, 0,
@@ -1574,12 +2048,10 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
                        rc = request_transfer_out(&rdma_req->req, &data_posted);
                        assert(rc == 0); /* No good way to handle this currently */
                        if (rc) {
-                               spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED);
+                               rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
                        } else {
-                               spdk_nvmf_rdma_request_set_state(rdma_req,
-                                                                data_posted ?
-                                                                RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST :
-                                                                RDMA_REQUEST_STATE_COMPLETING);
+                               rdma_req->state = data_posted ? RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST :
+                                                 RDMA_REQUEST_STATE_COMPLETING;
                        }
                        break;
                case RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST:
@@ -1598,19 +2070,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
                        spdk_trace_record(TRACE_RDMA_REQUEST_STATE_COMPLETED, 0, 0,
                                          (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id);
 
-                       if (rdma_req->data_from_pool) {
-                               /* Put the buffer/s back in the pool */
-                               for (uint32_t i = 0; i < rdma_req->req.iovcnt; i++) {
-                                       spdk_mempool_put(rtransport->data_buf_pool, rdma_req->data.buffers[i]);
-                                       rdma_req->req.iov[i].iov_base = NULL;
-                                       rdma_req->data.buffers[i] = NULL;
-                               }
-                               rdma_req->data_from_pool = false;
-                       }
-                       rdma_req->req.length = 0;
-                       rdma_req->req.iovcnt = 0;
-                       rdma_req->req.data = NULL;
-                       spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_FREE);
+                       nvmf_rdma_request_free(rdma_req, rtransport);
                        break;
                case RDMA_REQUEST_NUM_STATES:
                default:
@@ -1630,22 +2090,35 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
 
 #define SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH 128
 #define SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH 128
-#define SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR 64
+#define SPDK_NVMF_RDMA_DEFAULT_SRQ_DEPTH 4096
+#define SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR 128
 #define SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE 4096
 #define SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE 131072
-#define SPDK_NVMF_RDMA_DEFAULT_IO_BUFFER_SIZE 131072
+#define SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE (SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE / SPDK_NVMF_MAX_SGL_ENTRIES)
+#define SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS 4095
+#define SPDK_NVMF_RDMA_DEFAULT_BUFFER_CACHE_SIZE 32
+#define SPDK_NVMF_RDMA_DEFAULT_NO_SRQ false;
 
 static void
 spdk_nvmf_rdma_opts_init(struct spdk_nvmf_transport_opts *opts)
 {
-       opts->max_queue_depth =      SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH;
-       opts->max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR;
-       opts->in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE;
-       opts->max_io_size =          SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE;
-       opts->io_unit_size =         SPDK_NVMF_RDMA_DEFAULT_IO_BUFFER_SIZE;
-       opts->max_aq_depth =         SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH;
+       opts->max_queue_depth =         SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH;
+       opts->max_qpairs_per_ctrlr =    SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR;
+       opts->in_capsule_data_size =    SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE;
+       opts->max_io_size =             SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE;
+       opts->io_unit_size =            SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE;
+       opts->max_aq_depth =            SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH;
+       opts->num_shared_buffers =      SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS;
+       opts->buf_cache_size =          SPDK_NVMF_RDMA_DEFAULT_BUFFER_CACHE_SIZE;
+       opts->max_srq_depth =           SPDK_NVMF_RDMA_DEFAULT_SRQ_DEPTH;
+       opts->no_srq =                  SPDK_NVMF_RDMA_DEFAULT_NO_SRQ
 }
 
+const struct spdk_mem_map_ops g_nvmf_rdma_map_ops = {
+       .notify_cb = spdk_nvmf_rdma_mem_notify,
+       .are_contiguous = spdk_nvmf_rdma_check_contiguous_entries
+};
+
 static int spdk_nvmf_rdma_destroy(struct spdk_nvmf_transport *transport);
 
 static struct spdk_nvmf_transport *
@@ -1654,15 +2127,13 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
        int rc;
        struct spdk_nvmf_rdma_transport *rtransport;
        struct spdk_nvmf_rdma_device    *device, *tmp;
+       struct ibv_pd                   *pd;
        struct ibv_context              **contexts;
        uint32_t                        i;
        int                             flag;
        uint32_t                        sge_count;
-
-       const struct spdk_mem_map_ops nvmf_rdma_map_ops = {
-               .notify_cb = spdk_nvmf_rdma_mem_notify,
-               .are_contiguous = NULL
-       };
+       uint32_t                        min_shared_buffers;
+       int                             max_device_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
 
        rtransport = calloc(1, sizeof(*rtransport));
        if (!rtransport) {
@@ -1675,11 +2146,6 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
                return NULL;
        }
 
-       spdk_io_device_register(rtransport, spdk_nvmf_rdma_mgmt_channel_create,
-                               spdk_nvmf_rdma_mgmt_channel_destroy,
-                               sizeof(struct spdk_nvmf_rdma_mgmt_channel),
-                               "rdma_transport");
-
        TAILQ_INIT(&rtransport->devices);
        TAILQ_INIT(&rtransport->ports);
 
@@ -1688,21 +2154,43 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
        SPDK_INFOLOG(SPDK_LOG_RDMA, "*** RDMA Transport Init ***\n"
                     "  Transport opts:  max_ioq_depth=%d, max_io_size=%d,\n"
                     "  max_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
-                    "  in_capsule_data_size=%d, max_aq_depth=%d\n",
+                    "  in_capsule_data_size=%d, max_aq_depth=%d,\n"
+                    "  num_shared_buffers=%d, max_srq_depth=%d, no_srq=%d\n",
                     opts->max_queue_depth,
                     opts->max_io_size,
                     opts->max_qpairs_per_ctrlr,
                     opts->io_unit_size,
                     opts->in_capsule_data_size,
-                    opts->max_aq_depth);
+                    opts->max_aq_depth,
+                    opts->num_shared_buffers,
+                    opts->max_srq_depth,
+                    opts->no_srq);
 
        /* I/O unit size cannot be larger than max I/O size */
        if (opts->io_unit_size > opts->max_io_size) {
                opts->io_unit_size = opts->max_io_size;
        }
 
+       if (opts->num_shared_buffers < (SPDK_NVMF_MAX_SGL_ENTRIES * 2)) {
+               SPDK_ERRLOG("The number of shared data buffers (%d) is less than"
+                           "the minimum number required to guarantee that forward progress can be made (%d)\n",
+                           opts->num_shared_buffers, (SPDK_NVMF_MAX_SGL_ENTRIES * 2));
+               spdk_nvmf_rdma_destroy(&rtransport->transport);
+               return NULL;
+       }
+
+       min_shared_buffers = spdk_thread_get_count() * opts->buf_cache_size;
+       if (min_shared_buffers > opts->num_shared_buffers) {
+               SPDK_ERRLOG("There are not enough buffers to satisfy"
+                           "per-poll group caches for each thread. (%" PRIu32 ")"
+                           "supplied. (%" PRIu32 ") required\n", opts->num_shared_buffers, min_shared_buffers);
+               SPDK_ERRLOG("Please specify a larger number of shared buffers\n");
+               spdk_nvmf_rdma_destroy(&rtransport->transport);
+               return NULL;
+       }
+
        sge_count = opts->max_io_size / opts->io_unit_size;
-       if (sge_count > SPDK_NVMF_MAX_SGL_ENTRIES) {
+       if (sge_count > NVMF_DEFAULT_TX_SGE) {
                SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size);
                spdk_nvmf_rdma_destroy(&rtransport->transport);
                return NULL;
@@ -1723,13 +2211,13 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
                return NULL;
        }
 
-       rtransport->data_buf_pool = spdk_mempool_create("spdk_nvmf_rdma",
-                                   opts->max_queue_depth * 4, /* The 4 is arbitrarily chosen. Needs to be configurable. */
-                                   opts->max_io_size + NVMF_DATA_BUFFER_ALIGNMENT,
-                                   SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
-                                   SPDK_ENV_SOCKET_ID_ANY);
-       if (!rtransport->data_buf_pool) {
-               SPDK_ERRLOG("Unable to allocate buffer pool for poll group\n");
+       rtransport->data_wr_pool = spdk_mempool_create("spdk_nvmf_rdma_wr_data",
+                                  opts->max_queue_depth * SPDK_NVMF_MAX_SGL_ENTRIES,
+                                  sizeof(struct spdk_nvmf_rdma_request_data),
+                                  SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
+                                  SPDK_ENV_SOCKET_ID_ANY);
+       if (!rtransport->data_wr_pool) {
+               SPDK_ERRLOG("Unable to allocate work request pool for poll group\n");
                spdk_nvmf_rdma_destroy(&rtransport->transport);
                return NULL;
        }
@@ -1759,6 +2247,8 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
 
                }
 
+               max_device_sge = spdk_min(max_device_sge, device->attr.max_sge);
+
 #ifdef SPDK_CONFIG_RDMA_SEND_WITH_INVAL
                if ((device->attr.device_cap_flags & IBV_DEVICE_MEM_MGT_EXTENSIONS) == 0) {
                        SPDK_WARNLOG("The libibverbs on this system supports SEND_WITH_INVALIDATE,");
@@ -1788,28 +2278,51 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
                        break;
                }
 
-               device->pd = ibv_alloc_pd(device->context);
-               if (!device->pd) {
-                       SPDK_ERRLOG("Unable to allocate protection domain.\n");
-                       free(device);
-                       rc = -1;
-                       break;
+               TAILQ_INSERT_TAIL(&rtransport->devices, device, link);
+               i++;
+
+               pd = NULL;
+               if (g_nvmf_hooks.get_ibv_pd) {
+                       pd = g_nvmf_hooks.get_ibv_pd(NULL, device->context);
+               }
+
+               if (!g_nvmf_hooks.get_ibv_pd) {
+                       device->pd = ibv_alloc_pd(device->context);
+                       if (!device->pd) {
+                               SPDK_ERRLOG("Unable to allocate protection domain.\n");
+                               spdk_nvmf_rdma_destroy(&rtransport->transport);
+                               return NULL;
+                       }
+               } else {
+                       device->pd = pd;
                }
 
-               device->map = spdk_mem_map_alloc(0, &nvmf_rdma_map_ops, device);
+               assert(device->map == NULL);
+
+               device->map = spdk_mem_map_alloc(0, &g_nvmf_rdma_map_ops, device->pd);
                if (!device->map) {
-                       SPDK_ERRLOG("Unable to allocate memory map for new poll group\n");
-                       ibv_dealloc_pd(device->pd);
-                       free(device);
-                       rc = -1;
-                       break;
+                       SPDK_ERRLOG("Unable to allocate memory map for listen address\n");
+                       spdk_nvmf_rdma_destroy(&rtransport->transport);
+                       return NULL;
                }
 
-               TAILQ_INSERT_TAIL(&rtransport->devices, device, link);
-               i++;
+               assert(device->map != NULL);
+               assert(device->pd != NULL);
        }
        rdma_free_devices(contexts);
 
+       if (opts->io_unit_size * max_device_sge < opts->max_io_size) {
+               /* divide and round up. */
+               opts->io_unit_size = (opts->max_io_size + max_device_sge - 1) / max_device_sge;
+
+               /* round up to the nearest 4k. */
+               opts->io_unit_size = (opts->io_unit_size + NVMF_DATA_BUFFER_ALIGNMENT - 1) & ~NVMF_DATA_BUFFER_MASK;
+
+               opts->io_unit_size = spdk_max(opts->io_unit_size, SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE);
+               SPDK_NOTICELOG("Adjusting the io unit size to fit the device's maximum I/O size. New I/O unit size %u\n",
+                              opts->io_unit_size);
+       }
+
        if (rc < 0) {
                spdk_nvmf_rdma_destroy(&rtransport->transport);
                return NULL;
@@ -1867,28 +2380,34 @@ spdk_nvmf_rdma_destroy(struct spdk_nvmf_transport *transport)
                        spdk_mem_map_free(&device->map);
                }
                if (device->pd) {
-                       ibv_dealloc_pd(device->pd);
+                       if (!g_nvmf_hooks.get_ibv_pd) {
+                               ibv_dealloc_pd(device->pd);
+                       }
                }
                free(device);
        }
 
-       if (rtransport->data_buf_pool != NULL) {
-               if (spdk_mempool_count(rtransport->data_buf_pool) !=
-                   (transport->opts.max_queue_depth * 4)) {
-                       SPDK_ERRLOG("transport buffer pool count is %zu but should be %u\n",
-                                   spdk_mempool_count(rtransport->data_buf_pool),
-                                   transport->opts.max_queue_depth * 4);
+       if (rtransport->data_wr_pool != NULL) {
+               if (spdk_mempool_count(rtransport->data_wr_pool) !=
+                   (transport->opts.max_queue_depth * SPDK_NVMF_MAX_SGL_ENTRIES)) {
+                       SPDK_ERRLOG("transport wr pool count is %zu but should be %u\n",
+                                   spdk_mempool_count(rtransport->data_wr_pool),
+                                   transport->opts.max_queue_depth * SPDK_NVMF_MAX_SGL_ENTRIES);
                }
        }
 
-       spdk_mempool_free(rtransport->data_buf_pool);
-       spdk_io_device_unregister(rtransport, NULL);
+       spdk_mempool_free(rtransport->data_wr_pool);
        pthread_mutex_destroy(&rtransport->lock);
        free(rtransport);
 
        return 0;
 }
 
+static int
+spdk_nvmf_rdma_trid_from_cm_id(struct rdma_cm_id *id,
+                              struct spdk_nvme_transport_id *trid,
+                              bool peer);
+
 static int
 spdk_nvmf_rdma_listen(struct spdk_nvmf_transport *transport,
                      const struct spdk_nvme_transport_id *trid)
@@ -2054,66 +2573,50 @@ spdk_nvmf_rdma_stop_listen(struct spdk_nvmf_transport *transport,
        return 0;
 }
 
-static bool
-spdk_nvmf_rdma_qpair_is_idle(struct spdk_nvmf_qpair *qpair)
-{
-       int cur_queue_depth, cur_rdma_rw_depth;
-       struct spdk_nvmf_rdma_qpair *rqpair;
-
-       rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
-       cur_queue_depth = spdk_nvmf_rdma_cur_queue_depth(rqpair);
-       cur_rdma_rw_depth = spdk_nvmf_rdma_cur_rw_depth(rqpair);
-
-       if (cur_queue_depth == 0 && cur_rdma_rw_depth == 0) {
-               return true;
-       }
-       return false;
-}
-
 static void
 spdk_nvmf_rdma_qpair_process_pending(struct spdk_nvmf_rdma_transport *rtransport,
-                                    struct spdk_nvmf_rdma_qpair *rqpair)
+                                    struct spdk_nvmf_rdma_qpair *rqpair, bool drain)
 {
-       struct spdk_nvmf_rdma_recv      *rdma_recv, *recv_tmp;
        struct spdk_nvmf_rdma_request   *rdma_req, *req_tmp;
+       struct spdk_nvmf_rdma_resources *resources;
 
-       /* We process I/O in the data transfer pending queue at the highest priority. */
-       TAILQ_FOREACH_SAFE(rdma_req, &rqpair->state_queue[RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING],
-                          state_link, req_tmp) {
-               if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false) {
+       /* We process I/O in the data transfer pending queue at the highest priority. RDMA reads first */
+       STAILQ_FOREACH_SAFE(rdma_req, &rqpair->pending_rdma_read_queue, state_link, req_tmp) {
+               if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) {
                        break;
                }
        }
 
-       /* The second highest priority is I/O waiting on memory buffers. */
-       TAILQ_FOREACH_SAFE(rdma_req, &rqpair->ch->pending_data_buf_queue, link,
-                          req_tmp) {
-               if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false) {
+       /* Then RDMA writes since reads have stronger restrictions than writes */
+       STAILQ_FOREACH_SAFE(rdma_req, &rqpair->pending_rdma_write_queue, state_link, req_tmp) {
+               if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) {
                        break;
                }
        }
 
-       if (rqpair->qpair_disconnected) {
-               spdk_nvmf_rdma_qpair_destroy(rqpair);
-               return;
+       /* The second highest priority is I/O waiting on memory buffers. */
+       STAILQ_FOREACH_SAFE(rdma_req, &rqpair->poller->group->pending_data_buf_queue, state_link,
+                           req_tmp) {
+               if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) {
+                       break;
+               }
        }
 
-       /* Do not process newly received commands if qp is in ERROR state,
-        * wait till the recovery is complete.
-        */
-       if (rqpair->ibv_attr.qp_state == IBV_QPS_ERR) {
-               return;
-       }
+       resources = rqpair->resources;
+       while (!STAILQ_EMPTY(&resources->free_queue) && !STAILQ_EMPTY(&resources->incoming_queue)) {
+               rdma_req = STAILQ_FIRST(&resources->free_queue);
+               STAILQ_REMOVE_HEAD(&resources->free_queue, state_link);
+               rdma_req->recv = STAILQ_FIRST(&resources->incoming_queue);
+               STAILQ_REMOVE_HEAD(&resources->incoming_queue, link);
 
-       /* The lowest priority is processing newly received commands */
-       TAILQ_FOREACH_SAFE(rdma_recv, &rqpair->incoming_queue, link, recv_tmp) {
-               if (TAILQ_EMPTY(&rqpair->state_queue[RDMA_REQUEST_STATE_FREE])) {
-                       break;
+               if (rqpair->srq != NULL) {
+                       rdma_req->req.qpair = &rdma_req->recv->qpair->qpair;
+                       rdma_req->recv->qpair->qd++;
+               } else {
+                       rqpair->qd++;
                }
 
-               rdma_req = TAILQ_FIRST(&rqpair->state_queue[RDMA_REQUEST_STATE_FREE]);
-               rdma_req->recv = rdma_recv;
-               spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_NEW);
+               rdma_req->state = RDMA_REQUEST_STATE_NEW;
                if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false) {
                        break;
                }
@@ -2121,184 +2624,202 @@ spdk_nvmf_rdma_qpair_process_pending(struct spdk_nvmf_rdma_transport *rtransport
 }
 
 static void
-spdk_nvmf_rdma_drain_state_queue(struct spdk_nvmf_rdma_qpair *rqpair,
-                                enum spdk_nvmf_rdma_request_state state)
+_nvmf_rdma_qpair_disconnect(void *ctx)
 {
-       struct spdk_nvmf_rdma_request *rdma_req, *req_tmp;
-       struct spdk_nvmf_rdma_transport *rtransport;
+       struct spdk_nvmf_qpair *qpair = ctx;
 
-       TAILQ_FOREACH_SAFE(rdma_req, &rqpair->state_queue[state], state_link, req_tmp) {
-               rtransport = SPDK_CONTAINEROF(rdma_req->req.qpair->transport,
-                                             struct spdk_nvmf_rdma_transport, transport);
-               spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED);
-               spdk_nvmf_rdma_request_process(rtransport, rdma_req);
-       }
+       spdk_nvmf_qpair_disconnect(qpair, NULL, NULL);
 }
 
 static void
-spdk_nvmf_rdma_qpair_recover(struct spdk_nvmf_rdma_qpair *rqpair)
+_nvmf_rdma_try_disconnect(void *ctx)
 {
-       enum ibv_qp_state state, next_state;
-       int recovered;
-       struct spdk_nvmf_rdma_transport *rtransport;
+       struct spdk_nvmf_qpair *qpair = ctx;
+       struct spdk_nvmf_poll_group *group;
 
-       if (!spdk_nvmf_rdma_qpair_is_idle(&rqpair->qpair)) {
-               /* There must be outstanding requests down to media.
-                * If so, wait till they're complete.
-                */
-               assert(!TAILQ_EMPTY(&rqpair->qpair.outstanding));
+       /* Read the group out of the qpair. This is normally set and accessed only from
+        * the thread that created the group. Here, we're not on that thread necessarily.
+        * The data member qpair->group begins it's life as NULL and then is assigned to
+        * a pointer and never changes. So fortunately reading this and checking for
+        * non-NULL is thread safe in the x86_64 memory model. */
+       group = qpair->group;
+
+       if (group == NULL) {
+               /* The qpair hasn't been assigned to a group yet, so we can't
+                * process a disconnect. Send a message to ourself and try again. */
+               spdk_thread_send_msg(spdk_get_thread(), _nvmf_rdma_try_disconnect, qpair);
                return;
        }
 
-       state = rqpair->ibv_attr.qp_state;
-       next_state = state;
+       spdk_thread_send_msg(group->thread, _nvmf_rdma_qpair_disconnect, qpair);
+}
 
-       SPDK_NOTICELOG("RDMA qpair %u is in state: %s\n",
-                      rqpair->qpair.qid,
-                      str_ibv_qp_state[state]);
+static inline void
+spdk_nvmf_rdma_start_disconnect(struct spdk_nvmf_rdma_qpair *rqpair)
+{
+       if (__sync_bool_compare_and_swap(&rqpair->disconnect_started, false, true)) {
+               _nvmf_rdma_try_disconnect(&rqpair->qpair);
+       }
+}
 
-       if (!(state == IBV_QPS_ERR || state == IBV_QPS_RESET)) {
-               SPDK_ERRLOG("Can't recover RDMA qpair %u from the state: %s\n",
-                           rqpair->qpair.qid,
-                           str_ibv_qp_state[state]);
-               spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL);
+static void nvmf_rdma_destroy_drained_qpair(void *ctx)
+{
+       struct spdk_nvmf_rdma_qpair *rqpair = ctx;
+       struct spdk_nvmf_rdma_transport *rtransport = SPDK_CONTAINEROF(rqpair->qpair.transport,
+                       struct spdk_nvmf_rdma_transport, transport);
+
+       /* In non SRQ path, we will reach rqpair->max_queue_depth. In SRQ path, we will get the last_wqe event. */
+       if (rqpair->current_send_depth != 0) {
                return;
        }
 
-       recovered = 0;
-       while (!recovered) {
-               switch (state) {
-               case IBV_QPS_ERR:
-                       next_state = IBV_QPS_RESET;
-                       break;
-               case IBV_QPS_RESET:
-                       next_state = IBV_QPS_INIT;
-                       break;
-               case IBV_QPS_INIT:
-                       next_state = IBV_QPS_RTR;
-                       break;
-               case IBV_QPS_RTR:
-                       next_state = IBV_QPS_RTS;
-                       break;
-               case IBV_QPS_RTS:
-                       recovered = 1;
-                       break;
-               default:
-                       SPDK_ERRLOG("RDMA qpair %u unexpected state for recovery: %u\n",
-                                   rqpair->qpair.qid, state);
-                       goto error;
-               }
-               /* Do not transition into same state */
-               if (next_state == state) {
-                       break;
-               }
+       if (rqpair->srq == NULL && rqpair->current_recv_depth != rqpair->max_queue_depth) {
+               return;
+       }
+
+       if (rqpair->srq != NULL && rqpair->last_wqe_reached == false) {
+               return;
+       }
+
+       spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair, true);
+       spdk_nvmf_rdma_qpair_destroy(rqpair);
+}
+
+
+static int
+nvmf_rdma_disconnect(struct rdma_cm_event *evt)
+{
+       struct spdk_nvmf_qpair          *qpair;
+       struct spdk_nvmf_rdma_qpair     *rqpair;
+
+       if (evt->id == NULL) {
+               SPDK_ERRLOG("disconnect request: missing cm_id\n");
+               return -1;
+       }
+
+       qpair = evt->id->context;
+       if (qpair == NULL) {
+               SPDK_ERRLOG("disconnect request: no active connection\n");
+               return -1;
+       }
+
+       rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
+
+       spdk_trace_record(TRACE_RDMA_QP_DISCONNECT, 0, 0, (uintptr_t)rqpair->cm_id, 0);
+
+       spdk_nvmf_rdma_update_ibv_state(rqpair);
+
+       spdk_nvmf_rdma_start_disconnect(rqpair);
+
+       return 0;
+}
+
+#ifdef DEBUG
+static const char *CM_EVENT_STR[] = {
+       "RDMA_CM_EVENT_ADDR_RESOLVED",
+       "RDMA_CM_EVENT_ADDR_ERROR",
+       "RDMA_CM_EVENT_ROUTE_RESOLVED",
+       "RDMA_CM_EVENT_ROUTE_ERROR",
+       "RDMA_CM_EVENT_CONNECT_REQUEST",
+       "RDMA_CM_EVENT_CONNECT_RESPONSE",
+       "RDMA_CM_EVENT_CONNECT_ERROR",
+       "RDMA_CM_EVENT_UNREACHABLE",
+       "RDMA_CM_EVENT_REJECTED",
+       "RDMA_CM_EVENT_ESTABLISHED",
+       "RDMA_CM_EVENT_DISCONNECTED",
+       "RDMA_CM_EVENT_DEVICE_REMOVAL",
+       "RDMA_CM_EVENT_MULTICAST_JOIN",
+       "RDMA_CM_EVENT_MULTICAST_ERROR",
+       "RDMA_CM_EVENT_ADDR_CHANGE",
+       "RDMA_CM_EVENT_TIMEWAIT_EXIT"
+};
+#endif /* DEBUG */
+
+static void
+spdk_nvmf_process_cm_event(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn)
+{
+       struct spdk_nvmf_rdma_transport *rtransport;
+       struct rdma_cm_event            *event;
+       int                             rc;
+
+       rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
+
+       if (rtransport->event_channel == NULL) {
+               return;
+       }
+
+       while (1) {
+               rc = rdma_get_cm_event(rtransport->event_channel, &event);
+               if (rc == 0) {
+                       SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Acceptor Event: %s\n", CM_EVENT_STR[event->event]);
+
+                       spdk_trace_record(TRACE_RDMA_CM_ASYNC_EVENT, 0, 0, 0, event->event);
+
+                       switch (event->event) {
+                       case RDMA_CM_EVENT_ADDR_RESOLVED:
+                       case RDMA_CM_EVENT_ADDR_ERROR:
+                       case RDMA_CM_EVENT_ROUTE_RESOLVED:
+                       case RDMA_CM_EVENT_ROUTE_ERROR:
+                               /* No action required. The target never attempts to resolve routes. */
+                               break;
+                       case RDMA_CM_EVENT_CONNECT_REQUEST:
+                               rc = nvmf_rdma_connect(transport, event, cb_fn);
+                               if (rc < 0) {
+                                       SPDK_ERRLOG("Unable to process connect event. rc: %d\n", rc);
+                                       break;
+                               }
+                               break;
+                       case RDMA_CM_EVENT_CONNECT_RESPONSE:
+                               /* The target never initiates a new connection. So this will not occur. */
+                               break;
+                       case RDMA_CM_EVENT_CONNECT_ERROR:
+                               /* Can this happen? The docs say it can, but not sure what causes it. */
+                               break;
+                       case RDMA_CM_EVENT_UNREACHABLE:
+                       case RDMA_CM_EVENT_REJECTED:
+                               /* These only occur on the client side. */
+                               break;
+                       case RDMA_CM_EVENT_ESTABLISHED:
+                               /* TODO: Should we be waiting for this event anywhere? */
+                               break;
+                       case RDMA_CM_EVENT_DISCONNECTED:
+                       case RDMA_CM_EVENT_DEVICE_REMOVAL:
+                               rc = nvmf_rdma_disconnect(event);
+                               if (rc < 0) {
+                                       SPDK_ERRLOG("Unable to process disconnect event. rc: %d\n", rc);
+                                       break;
+                               }
+                               break;
+                       case RDMA_CM_EVENT_MULTICAST_JOIN:
+                       case RDMA_CM_EVENT_MULTICAST_ERROR:
+                               /* Multicast is not used */
+                               break;
+                       case RDMA_CM_EVENT_ADDR_CHANGE:
+                               /* Not utilizing this event */
+                               break;
+                       case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+                               /* For now, do nothing. The target never re-uses queue pairs. */
+                               break;
+                       default:
+                               SPDK_ERRLOG("Unexpected Acceptor Event [%d]\n", event->event);
+                               break;
+                       }
 
-               if (spdk_nvmf_rdma_set_ibv_state(rqpair, next_state)) {
-                       goto error;
+                       rdma_ack_cm_event(event);
+               } else {
+                       if (errno != EAGAIN && errno != EWOULDBLOCK) {
+                               SPDK_ERRLOG("Acceptor Event Error: %s\n", spdk_strerror(errno));
+                       }
+                       break;
                }
-
-               state = next_state;
-       }
-
-       rtransport = SPDK_CONTAINEROF(rqpair->qpair.transport,
-                                     struct spdk_nvmf_rdma_transport,
-                                     transport);
-
-       spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair);
-
-       return;
-error:
-       SPDK_NOTICELOG("RDMA qpair %u: recovery failed, disconnecting...\n",
-                      rqpair->qpair.qid);
-       spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL);
-}
-
-/* Clean up only the states that can be aborted at any time */
-static void
-_spdk_nvmf_rdma_qp_cleanup_safe_states(struct spdk_nvmf_rdma_qpair *rqpair)
-{
-       struct spdk_nvmf_rdma_request   *rdma_req, *req_tmp;
-
-       spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_NEW);
-       TAILQ_FOREACH_SAFE(rdma_req, &rqpair->state_queue[RDMA_REQUEST_STATE_NEED_BUFFER], link, req_tmp) {
-               TAILQ_REMOVE(&rqpair->ch->pending_data_buf_queue, rdma_req, link);
-       }
-       spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_NEED_BUFFER);
-       spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING);
-       spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_READY_TO_EXECUTE);
-       spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_EXECUTED);
-       spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_READY_TO_COMPLETE);
-       spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_COMPLETED);
-}
-
-/* This cleans up all memory. It is only safe to use if the rest of the software stack
- * has been shut down */
-static void
-_spdk_nvmf_rdma_qp_cleanup_all_states(struct spdk_nvmf_rdma_qpair *rqpair)
-{
-       _spdk_nvmf_rdma_qp_cleanup_safe_states(rqpair);
-
-       spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_EXECUTING);
-       spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
-       spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
-       spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_COMPLETING);
-}
-
-static void
-_spdk_nvmf_rdma_qp_error(void *arg)
-{
-       struct spdk_nvmf_rdma_qpair     *rqpair = arg;
-       enum ibv_qp_state               state;
-
-       spdk_nvmf_rdma_qpair_dec_refcnt(rqpair);
-
-       state = rqpair->ibv_attr.qp_state;
-       if (state != IBV_QPS_ERR) {
-               /* Error was already recovered */
-               return;
-       }
-
-       if (spdk_nvmf_qpair_is_admin_queue(&rqpair->qpair)) {
-               spdk_nvmf_ctrlr_abort_aer(rqpair->qpair.ctrlr);
-       }
-
-       _spdk_nvmf_rdma_qp_cleanup_safe_states(rqpair);
-
-       /* Attempt recovery. This will exit without recovering if I/O requests
-        * are still outstanding */
-       spdk_nvmf_rdma_qpair_recover(rqpair);
-}
-
-static void
-_spdk_nvmf_rdma_qp_last_wqe(void *arg)
-{
-       struct spdk_nvmf_rdma_qpair     *rqpair = arg;
-       enum ibv_qp_state               state;
-
-       spdk_nvmf_rdma_qpair_dec_refcnt(rqpair);
-
-       state = rqpair->ibv_attr.qp_state;
-       if (state != IBV_QPS_ERR) {
-               /* Error was already recovered */
-               return;
        }
-
-       /* Clear out the states that are safe to clear any time, plus the
-        * RDMA data transfer states. */
-       _spdk_nvmf_rdma_qp_cleanup_safe_states(rqpair);
-
-       spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
-       spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
-       spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_COMPLETING);
-
-       spdk_nvmf_rdma_qpair_recover(rqpair);
 }
 
 static void
 spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
 {
        int                             rc;
-       struct spdk_nvmf_rdma_qpair     *rqpair;
+       struct spdk_nvmf_rdma_qpair     *rqpair = NULL;
        struct ibv_async_event          event;
        enum ibv_qp_state               state;
 
@@ -2310,25 +2831,28 @@ spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
                return;
        }
 
-       SPDK_NOTICELOG("Async event: %s\n",
-                      ibv_event_type_str(event.event_type));
-
        switch (event.event_type) {
        case IBV_EVENT_QP_FATAL:
                rqpair = event.element.qp->qp_context;
+               SPDK_ERRLOG("Fatal event received for rqpair %p\n", rqpair);
                spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0,
                                  (uintptr_t)rqpair->cm_id, event.event_type);
                spdk_nvmf_rdma_update_ibv_state(rqpair);
-               spdk_nvmf_rdma_qpair_inc_refcnt(rqpair);
-               spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_qp_error, rqpair);
+               spdk_nvmf_rdma_start_disconnect(rqpair);
                break;
        case IBV_EVENT_QP_LAST_WQE_REACHED:
+               /* This event only occurs for shared receive queues. */
                rqpair = event.element.qp->qp_context;
-               spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0,
-                                 (uintptr_t)rqpair->cm_id, event.event_type);
-               spdk_nvmf_rdma_update_ibv_state(rqpair);
-               spdk_nvmf_rdma_qpair_inc_refcnt(rqpair);
-               spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_qp_last_wqe, rqpair);
+               rqpair->last_wqe_reached = true;
+
+               SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Last WQE reached event received for rqpair %p\n", rqpair);
+               /* This must be handled on the polling thread if it exists. Otherwise the timeout will catch it. */
+               if (rqpair->qpair.group) {
+                       spdk_thread_send_msg(rqpair->qpair.group->thread, nvmf_rdma_destroy_drained_qpair, rqpair);
+               } else {
+                       SPDK_ERRLOG("Unable to destroy the qpair %p since it does not have a poll group.\n", rqpair);
+               }
+
                break;
        case IBV_EVENT_SQ_DRAINED:
                /* This event occurs frequently in both error and non-error states.
@@ -2337,12 +2861,12 @@ spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
                 * the operations that the below calls make all happen to be thread
                 * safe. */
                rqpair = event.element.qp->qp_context;
+               SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Last sq drained event received for rqpair %p\n", rqpair);
                spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0,
                                  (uintptr_t)rqpair->cm_id, event.event_type);
                state = spdk_nvmf_rdma_update_ibv_state(rqpair);
                if (state == IBV_QPS_ERR) {
-                       spdk_nvmf_rdma_qpair_inc_refcnt(rqpair);
-                       spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_qp_error, rqpair);
+                       spdk_nvmf_rdma_start_disconnect(rqpair);
                }
                break;
        case IBV_EVENT_QP_REQ_ERR:
@@ -2350,6 +2874,8 @@ spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
        case IBV_EVENT_COMM_EST:
        case IBV_EVENT_PATH_MIG:
        case IBV_EVENT_PATH_MIG_ERR:
+               SPDK_NOTICELOG("Async event: %s\n",
+                              ibv_event_type_str(event.event_type));
                rqpair = event.element.qp->qp_context;
                spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0,
                                  (uintptr_t)rqpair->cm_id, event.event_type);
@@ -2367,6 +2893,8 @@ spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
        case IBV_EVENT_CLIENT_REREGISTER:
        case IBV_EVENT_GID_CHANGE:
        default:
+               SPDK_NOTICELOG("Async event: %s\n",
+                              ibv_event_type_str(event.event_type));
                spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0, 0, event.event_type);
                break;
        }
@@ -2425,6 +2953,9 @@ spdk_nvmf_rdma_discover(struct spdk_nvmf_transport *transport,
        entry->tsas.rdma.rdma_cms = SPDK_NVMF_RDMA_CMS_RDMA_CM;
 }
 
+static void
+spdk_nvmf_rdma_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group);
+
 static struct spdk_nvmf_transport_poll_group *
 spdk_nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport)
 {
@@ -2432,6 +2963,9 @@ spdk_nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport)
        struct spdk_nvmf_rdma_poll_group        *rgroup;
        struct spdk_nvmf_rdma_poller            *poller;
        struct spdk_nvmf_rdma_device            *device;
+       struct ibv_srq_init_attr                srq_init_attr;
+       struct spdk_nvmf_rdma_resource_opts     opts;
+       int                                     num_cqe;
 
        rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
 
@@ -2441,13 +2975,14 @@ spdk_nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport)
        }
 
        TAILQ_INIT(&rgroup->pollers);
+       STAILQ_INIT(&rgroup->pending_data_buf_queue);
 
        pthread_mutex_lock(&rtransport->lock);
        TAILQ_FOREACH(device, &rtransport->devices, link) {
                poller = calloc(1, sizeof(*poller));
                if (!poller) {
                        SPDK_ERRLOG("Unable to allocate memory for new RDMA poller\n");
-                       free(rgroup);
+                       spdk_nvmf_rdma_poll_group_destroy(&rgroup->group);
                        pthread_mutex_unlock(&rtransport->lock);
                        return NULL;
                }
@@ -2456,17 +2991,60 @@ spdk_nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport)
                poller->group = rgroup;
 
                TAILQ_INIT(&poller->qpairs);
+               STAILQ_INIT(&poller->qpairs_pending_send);
+               STAILQ_INIT(&poller->qpairs_pending_recv);
 
-               poller->cq = ibv_create_cq(device->context, NVMF_RDMA_CQ_SIZE, poller, NULL, 0);
+               TAILQ_INSERT_TAIL(&rgroup->pollers, poller, link);
+               if (transport->opts.no_srq == false && device->num_srq < device->attr.max_srq) {
+                       poller->max_srq_depth = transport->opts.max_srq_depth;
+
+                       device->num_srq++;
+                       memset(&srq_init_attr, 0, sizeof(struct ibv_srq_init_attr));
+                       srq_init_attr.attr.max_wr = poller->max_srq_depth;
+                       srq_init_attr.attr.max_sge = spdk_min(device->attr.max_sge, NVMF_DEFAULT_RX_SGE);
+                       poller->srq = ibv_create_srq(device->pd, &srq_init_attr);
+                       if (!poller->srq) {
+                               SPDK_ERRLOG("Unable to create shared receive queue, errno %d\n", errno);
+                               spdk_nvmf_rdma_poll_group_destroy(&rgroup->group);
+                               pthread_mutex_unlock(&rtransport->lock);
+                               return NULL;
+                       }
+
+                       opts.qp = poller->srq;
+                       opts.pd = device->pd;
+                       opts.qpair = NULL;
+                       opts.shared = true;
+                       opts.max_queue_depth = poller->max_srq_depth;
+                       opts.in_capsule_data_size = transport->opts.in_capsule_data_size;
+
+                       poller->resources = nvmf_rdma_resources_create(&opts);
+                       if (!poller->resources) {
+                               SPDK_ERRLOG("Unable to allocate resources for shared receive queue.\n");
+                               spdk_nvmf_rdma_poll_group_destroy(&rgroup->group);
+                               pthread_mutex_unlock(&rtransport->lock);
+                       }
+               }
+
+               /*
+                * When using an srq, we can limit the completion queue at startup.
+                * The following formula represents the calculation:
+                * num_cqe = num_recv + num_data_wr + num_send_wr.
+                * where num_recv=num_data_wr=and num_send_wr=poller->max_srq_depth
+                */
+               if (poller->srq) {
+                       num_cqe = poller->max_srq_depth * 3;
+               } else {
+                       num_cqe = DEFAULT_NVMF_RDMA_CQ_SIZE;
+               }
+
+               poller->cq = ibv_create_cq(device->context, num_cqe, poller, NULL, 0);
                if (!poller->cq) {
                        SPDK_ERRLOG("Unable to create completion queue\n");
-                       free(poller);
-                       free(rgroup);
+                       spdk_nvmf_rdma_poll_group_destroy(&rgroup->group);
                        pthread_mutex_unlock(&rtransport->lock);
                        return NULL;
                }
-
-               TAILQ_INSERT_TAIL(&rgroup->pollers, poller, link);
+               poller->num_cqe = num_cqe;
        }
 
        pthread_mutex_unlock(&rtransport->lock);
@@ -2489,32 +3067,49 @@ spdk_nvmf_rdma_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
        TAILQ_FOREACH_SAFE(poller, &rgroup->pollers, link, tmp) {
                TAILQ_REMOVE(&rgroup->pollers, poller, link);
 
-               if (poller->cq) {
-                       ibv_destroy_cq(poller->cq);
-               }
                TAILQ_FOREACH_SAFE(qpair, &poller->qpairs, link, tmp_qpair) {
-                       _spdk_nvmf_rdma_qp_cleanup_all_states(qpair);
                        spdk_nvmf_rdma_qpair_destroy(qpair);
                }
 
+               if (poller->srq) {
+                       nvmf_rdma_resources_destroy(poller->resources);
+                       ibv_destroy_srq(poller->srq);
+                       SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Destroyed RDMA shared queue %p\n", poller->srq);
+               }
+
+               if (poller->cq) {
+                       ibv_destroy_cq(poller->cq);
+               }
+
                free(poller);
        }
 
+       if (!STAILQ_EMPTY(&rgroup->pending_data_buf_queue)) {
+               SPDK_ERRLOG("Pending I/O list wasn't empty on poll group destruction\n");
+       }
+
        free(rgroup);
 }
 
+static void
+spdk_nvmf_rdma_qpair_reject_connection(struct spdk_nvmf_rdma_qpair *rqpair)
+{
+       if (rqpair->cm_id != NULL) {
+               spdk_nvmf_rdma_event_reject(rqpair->cm_id, SPDK_NVMF_RDMA_ERROR_NO_RESOURCES);
+       }
+       spdk_nvmf_rdma_qpair_destroy(rqpair);
+}
+
 static int
 spdk_nvmf_rdma_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
                              struct spdk_nvmf_qpair *qpair)
 {
-       struct spdk_nvmf_rdma_transport         *rtransport;
        struct spdk_nvmf_rdma_poll_group        *rgroup;
        struct spdk_nvmf_rdma_qpair             *rqpair;
        struct spdk_nvmf_rdma_device            *device;
        struct spdk_nvmf_rdma_poller            *poller;
        int                                     rc;
 
-       rtransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_rdma_transport, transport);
        rgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_rdma_poll_group, group);
        rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
 
@@ -2533,6 +3128,7 @@ spdk_nvmf_rdma_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
 
        TAILQ_INSERT_TAIL(&poller->qpairs, rqpair, link);
        rqpair->poller = poller;
+       rqpair->srq = rqpair->poller->srq;
 
        rc = spdk_nvmf_rdma_qpair_initialize(qpair);
        if (rc < 0) {
@@ -2540,21 +3136,10 @@ spdk_nvmf_rdma_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
                return -1;
        }
 
-       rqpair->mgmt_channel = spdk_get_io_channel(rtransport);
-       if (!rqpair->mgmt_channel) {
-               spdk_nvmf_rdma_event_reject(rqpair->cm_id, SPDK_NVMF_RDMA_ERROR_NO_RESOURCES);
-               spdk_nvmf_rdma_qpair_destroy(rqpair);
-               return -1;
-       }
-
-       rqpair->ch = spdk_io_channel_get_ctx(rqpair->mgmt_channel);
-       assert(rqpair->ch != NULL);
-
        rc = spdk_nvmf_rdma_event_accept(rqpair->cm_id, rqpair);
        if (rc) {
                /* Try to reject, but we probably can't */
-               spdk_nvmf_rdma_event_reject(rqpair->cm_id, SPDK_NVMF_RDMA_ERROR_NO_RESOURCES);
-               spdk_nvmf_rdma_qpair_destroy(rqpair);
+               spdk_nvmf_rdma_qpair_reject_connection(rqpair);
                return -1;
        }
 
@@ -2570,19 +3155,7 @@ spdk_nvmf_rdma_request_free(struct spdk_nvmf_request *req)
        struct spdk_nvmf_rdma_transport *rtransport = SPDK_CONTAINEROF(req->qpair->transport,
                        struct spdk_nvmf_rdma_transport, transport);
 
-       if (rdma_req->data_from_pool) {
-               /* Put the buffer/s back in the pool */
-               for (uint32_t i = 0; i < rdma_req->req.iovcnt; i++) {
-                       spdk_mempool_put(rtransport->data_buf_pool, rdma_req->data.buffers[i]);
-                       rdma_req->req.iov[i].iov_base = NULL;
-                       rdma_req->data.buffers[i] = NULL;
-               }
-               rdma_req->data_from_pool = false;
-       }
-       rdma_req->req.length = 0;
-       rdma_req->req.iovcnt = 0;
-       rdma_req->req.data = NULL;
-       spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_FREE);
+       nvmf_rdma_request_free(rdma_req, rtransport);
        return 0;
 }
 
@@ -2596,21 +3169,28 @@ spdk_nvmf_rdma_request_complete(struct spdk_nvmf_request *req)
        struct spdk_nvmf_rdma_qpair     *rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair,
                        struct spdk_nvmf_rdma_qpair, qpair);
 
-       if (rqpair->ibv_attr.qp_state != IBV_QPS_ERR) {
+       if (rqpair->ibv_state != IBV_QPS_ERR) {
                /* The connection is alive, so process the request as normal */
-               spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_EXECUTED);
+               rdma_req->state = RDMA_REQUEST_STATE_EXECUTED;
        } else {
                /* The connection is dead. Move the request directly to the completed state. */
-               spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED);
+               rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
        }
 
        spdk_nvmf_rdma_request_process(rtransport, rdma_req);
 
-       if (rqpair->qpair.state == SPDK_NVMF_QPAIR_ACTIVE && rqpair->ibv_attr.qp_state == IBV_QPS_ERR) {
-               /* If the NVMe-oF layer thinks the connection is active, but the RDMA layer thinks
-                * the connection is dead, perform error recovery. */
-               spdk_nvmf_rdma_qpair_recover(rqpair);
-       }
+       return 0;
+}
+
+static int
+spdk_nvmf_rdma_destroy_defunct_qpair(void *ctx)
+{
+       struct spdk_nvmf_rdma_qpair     *rqpair = ctx;
+       struct spdk_nvmf_rdma_transport *rtransport = SPDK_CONTAINEROF(rqpair->qpair.transport,
+                       struct spdk_nvmf_rdma_transport, transport);
+
+       spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair, true);
+       spdk_nvmf_rdma_qpair_destroy(rqpair);
 
        return 0;
 }
@@ -2620,62 +3200,205 @@ spdk_nvmf_rdma_close_qpair(struct spdk_nvmf_qpair *qpair)
 {
        struct spdk_nvmf_rdma_qpair *rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
 
-       spdk_nvmf_rdma_qpair_destroy(rqpair);
-}
+       if (rqpair->disconnect_flags & RDMA_QP_DISCONNECTING) {
+               return;
+       }
 
-static struct spdk_nvmf_rdma_request *
-get_rdma_req_from_wc(struct ibv_wc *wc)
-{
-       struct spdk_nvmf_rdma_request *rdma_req;
+       rqpair->disconnect_flags |= RDMA_QP_DISCONNECTING;
 
-       rdma_req = (struct spdk_nvmf_rdma_request *)wc->wr_id;
-       assert(rdma_req != NULL);
+       /* This happens only when the qpair is disconnected before
+        * it is added to the poll group. Since there is no poll group,
+        * the RDMA qp has not been initialized yet and the RDMA CM
+        * event has not yet been acknowledged, so we need to reject it.
+        */
+       if (rqpair->qpair.state == SPDK_NVMF_QPAIR_UNINITIALIZED) {
+               spdk_nvmf_rdma_qpair_reject_connection(rqpair);
+               return;
+       }
 
-#ifdef DEBUG
+       if (rqpair->ibv_state != IBV_QPS_ERR) {
+               spdk_nvmf_rdma_set_ibv_state(rqpair, IBV_QPS_ERR);
+       }
+
+       rqpair->destruct_poller = spdk_poller_register(spdk_nvmf_rdma_destroy_defunct_qpair, (void *)rqpair,
+                                 NVMF_RDMA_QPAIR_DESTROY_TIMEOUT_US);
+}
+
+static struct spdk_nvmf_rdma_qpair *
+get_rdma_qpair_from_wc(struct spdk_nvmf_rdma_poller *rpoller, struct ibv_wc *wc)
+{
        struct spdk_nvmf_rdma_qpair *rqpair;
-       rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
+       /* @todo: improve QP search */
+       TAILQ_FOREACH(rqpair, &rpoller->qpairs, link) {
+               if (wc->qp_num == rqpair->cm_id->qp->qp_num) {
+                       return rqpair;
+               }
+       }
+       SPDK_ERRLOG("Didn't find QP with qp_num %u\n", wc->qp_num);
+       return NULL;
+}
 
-       assert(rdma_req - rqpair->reqs >= 0);
-       assert(rdma_req - rqpair->reqs < (ptrdiff_t)rqpair->max_queue_depth);
+#ifdef DEBUG
+static int
+spdk_nvmf_rdma_req_is_completing(struct spdk_nvmf_rdma_request *rdma_req)
+{
+       return rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST ||
+              rdma_req->state == RDMA_REQUEST_STATE_COMPLETING;
+}
 #endif
 
-       return rdma_req;
+static void
+_poller_reset_failed_recvs(struct spdk_nvmf_rdma_poller *rpoller, struct ibv_recv_wr *bad_recv_wr,
+                          int rc)
+{
+       struct spdk_nvmf_rdma_recv      *rdma_recv;
+       struct spdk_nvmf_rdma_wr        *bad_rdma_wr;
+
+       SPDK_ERRLOG("Failed to post a recv for the poller %p with errno %d\n", rpoller, -rc);
+       while (bad_recv_wr != NULL) {
+               bad_rdma_wr = (struct spdk_nvmf_rdma_wr *)bad_recv_wr->wr_id;
+               rdma_recv = SPDK_CONTAINEROF(bad_rdma_wr, struct spdk_nvmf_rdma_recv, rdma_wr);
+
+               rdma_recv->qpair->current_recv_depth++;
+               bad_recv_wr = bad_recv_wr->next;
+               SPDK_ERRLOG("Failed to post a recv for the qpair %p with errno %d\n", rdma_recv->qpair, -rc);
+               spdk_nvmf_rdma_start_disconnect(rdma_recv->qpair);
+       }
+}
+
+static void
+_qp_reset_failed_recvs(struct spdk_nvmf_rdma_qpair *rqpair, struct ibv_recv_wr *bad_recv_wr, int rc)
+{
+       SPDK_ERRLOG("Failed to post a recv for the qpair %p with errno %d\n", rqpair, -rc);
+       while (bad_recv_wr != NULL) {
+               bad_recv_wr = bad_recv_wr->next;
+               rqpair->current_recv_depth++;
+       }
+       spdk_nvmf_rdma_start_disconnect(rqpair);
+}
+
+static void
+_poller_submit_recvs(struct spdk_nvmf_rdma_transport *rtransport,
+                    struct spdk_nvmf_rdma_poller *rpoller)
+{
+       struct spdk_nvmf_rdma_qpair     *rqpair;
+       struct ibv_recv_wr              *bad_recv_wr;
+       int                             rc;
+
+       if (rpoller->srq) {
+               if (rpoller->resources->recvs_to_post.first != NULL) {
+                       rc = ibv_post_srq_recv(rpoller->srq, rpoller->resources->recvs_to_post.first, &bad_recv_wr);
+                       if (rc) {
+                               _poller_reset_failed_recvs(rpoller, bad_recv_wr, rc);
+                       }
+                       rpoller->resources->recvs_to_post.first = NULL;
+                       rpoller->resources->recvs_to_post.last = NULL;
+               }
+       } else {
+               while (!STAILQ_EMPTY(&rpoller->qpairs_pending_recv)) {
+                       rqpair = STAILQ_FIRST(&rpoller->qpairs_pending_recv);
+                       assert(rqpair->resources->recvs_to_post.first != NULL);
+                       rc = ibv_post_recv(rqpair->cm_id->qp, rqpair->resources->recvs_to_post.first, &bad_recv_wr);
+                       if (rc) {
+                               _qp_reset_failed_recvs(rqpair, bad_recv_wr, rc);
+                       }
+                       rqpair->resources->recvs_to_post.first = NULL;
+                       rqpair->resources->recvs_to_post.last = NULL;
+                       STAILQ_REMOVE_HEAD(&rpoller->qpairs_pending_recv, recv_link);
+               }
+       }
 }
 
-static struct spdk_nvmf_rdma_recv *
-get_rdma_recv_from_wc(struct ibv_wc *wc)
+static void
+_qp_reset_failed_sends(struct spdk_nvmf_rdma_transport *rtransport,
+                      struct spdk_nvmf_rdma_qpair *rqpair, struct ibv_send_wr *bad_wr, int rc)
 {
-       struct spdk_nvmf_rdma_recv *rdma_recv;
+       struct spdk_nvmf_rdma_wr        *bad_rdma_wr;
+       struct spdk_nvmf_rdma_request   *prev_rdma_req = NULL, *cur_rdma_req = NULL;
+
+       SPDK_ERRLOG("Failed to post a send for the qpair %p with errno %d\n", rqpair, -rc);
+       for (; bad_wr != NULL; bad_wr = bad_wr->next) {
+               bad_rdma_wr = (struct spdk_nvmf_rdma_wr *)bad_wr->wr_id;
+               assert(rqpair->current_send_depth > 0);
+               rqpair->current_send_depth--;
+               switch (bad_rdma_wr->type) {
+               case RDMA_WR_TYPE_DATA:
+                       cur_rdma_req = SPDK_CONTAINEROF(bad_rdma_wr, struct spdk_nvmf_rdma_request, data.rdma_wr);
+                       if (bad_wr->opcode == IBV_WR_RDMA_READ) {
+                               assert(rqpair->current_read_depth > 0);
+                               rqpair->current_read_depth--;
+                       }
+                       break;
+               case RDMA_WR_TYPE_SEND:
+                       cur_rdma_req = SPDK_CONTAINEROF(bad_rdma_wr, struct spdk_nvmf_rdma_request, rsp.rdma_wr);
+                       break;
+               default:
+                       SPDK_ERRLOG("Found a RECV in the list of pending SEND requests for qpair %p\n", rqpair);
+                       prev_rdma_req = cur_rdma_req;
+                       continue;
+               }
 
-       assert(wc->byte_len >= sizeof(struct spdk_nvmf_capsule_cmd));
+               if (prev_rdma_req == cur_rdma_req) {
+                       /* this request was handled by an earlier wr. i.e. we were performing an nvme read. */
+                       /* We only have to check against prev_wr since each requests wrs are contiguous in this list. */
+                       continue;
+               }
 
-       rdma_recv = (struct spdk_nvmf_rdma_recv *)wc->wr_id;
-       assert(rdma_recv != NULL);
+               switch (cur_rdma_req->state) {
+               case RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER:
+                       cur_rdma_req->req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
+                       cur_rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
+                       break;
+               case RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST:
+               case RDMA_REQUEST_STATE_COMPLETING:
+                       cur_rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
+                       break;
+               default:
+                       SPDK_ERRLOG("Found a request in a bad state %d when draining pending SEND requests for qpair %p\n",
+                                   cur_rdma_req->state, rqpair);
+                       continue;
+               }
 
-#ifdef DEBUG
-       struct spdk_nvmf_rdma_qpair *rqpair = rdma_recv->qpair;
+               spdk_nvmf_rdma_request_process(rtransport, cur_rdma_req);
+               prev_rdma_req = cur_rdma_req;
+       }
 
-       assert(rdma_recv - rqpair->recvs >= 0);
-       assert(rdma_recv - rqpair->recvs < (ptrdiff_t)rqpair->max_queue_depth);
-#endif
+       if (rqpair->qpair.state == SPDK_NVMF_QPAIR_ACTIVE) {
+               /* Disconnect the connection. */
+               spdk_nvmf_rdma_start_disconnect(rqpair);
+       }
 
-       return rdma_recv;
 }
 
-#ifdef DEBUG
-static int
-spdk_nvmf_rdma_req_is_completing(struct spdk_nvmf_rdma_request *rdma_req)
+static void
+_poller_submit_sends(struct spdk_nvmf_rdma_transport *rtransport,
+                    struct spdk_nvmf_rdma_poller *rpoller)
 {
-       return rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST ||
-              rdma_req->state == RDMA_REQUEST_STATE_COMPLETING;
+       struct spdk_nvmf_rdma_qpair     *rqpair;
+       struct ibv_send_wr              *bad_wr = NULL;
+       int                             rc;
+
+       while (!STAILQ_EMPTY(&rpoller->qpairs_pending_send)) {
+               rqpair = STAILQ_FIRST(&rpoller->qpairs_pending_send);
+               assert(rqpair->sends_to_post.first != NULL);
+               rc = ibv_post_send(rqpair->cm_id->qp, rqpair->sends_to_post.first, &bad_wr);
+
+               /* bad wr always points to the first wr that failed. */
+               if (rc) {
+                       _qp_reset_failed_sends(rtransport, rqpair, bad_wr, rc);
+               }
+               rqpair->sends_to_post.first = NULL;
+               rqpair->sends_to_post.last = NULL;
+               STAILQ_REMOVE_HEAD(&rpoller->qpairs_pending_send, send_link);
+       }
 }
-#endif
 
 static int
 spdk_nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport,
                           struct spdk_nvmf_rdma_poller *rpoller)
 {
        struct ibv_wc wc[32];
+       struct spdk_nvmf_rdma_wr        *rdma_wr;
        struct spdk_nvmf_rdma_request   *rdma_req;
        struct spdk_nvmf_rdma_recv      *rdma_recv;
        struct spdk_nvmf_rdma_qpair     *rqpair;
@@ -2692,103 +3415,116 @@ spdk_nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport,
        }
 
        for (i = 0; i < reaped; i++) {
-               /* Handle error conditions */
-               if (wc[i].status) {
-                       SPDK_WARNLOG("CQ error on CQ %p, Request 0x%lu (%d): %s\n",
-                                    rpoller->cq, wc[i].wr_id, wc[i].status, ibv_wc_status_str(wc[i].status));
-                       error = true;
 
-                       switch (wc[i].opcode) {
-                       case IBV_WC_SEND:
-                               rdma_req = get_rdma_req_from_wc(&wc[i]);
-                               rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
+               rdma_wr = (struct spdk_nvmf_rdma_wr *)wc[i].wr_id;
 
-                               /* We're going to attempt an error recovery, so force the request into
-                                * the completed state. */
-                               spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED);
-                               spdk_nvmf_rdma_request_process(rtransport, rdma_req);
-                               break;
-                       case IBV_WC_RECV:
-                               rdma_recv = get_rdma_recv_from_wc(&wc[i]);
-                               rqpair = rdma_recv->qpair;
+               switch (rdma_wr->type) {
+               case RDMA_WR_TYPE_SEND:
+                       rdma_req = SPDK_CONTAINEROF(rdma_wr, struct spdk_nvmf_rdma_request, rsp.rdma_wr);
+                       rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
 
-                               /* Dump this into the incoming queue. This gets cleaned up when
-                                * the queue pair disconnects or recovers. */
-                               TAILQ_INSERT_TAIL(&rqpair->incoming_queue, rdma_recv, link);
-                               break;
-                       case IBV_WC_RDMA_WRITE:
-                       case IBV_WC_RDMA_READ:
-                               /* If the data transfer fails still force the queue into the error state,
-                                * but the rdma_req objects should only be manipulated in response to
-                                * SEND and RECV operations. */
-                               rdma_req = get_rdma_req_from_wc(&wc[i]);
-                               rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
-                               break;
-                       default:
-                               SPDK_ERRLOG("Received an unknown opcode on the CQ: %d\n", wc[i].opcode);
-                               continue;
+                       if (!wc[i].status) {
+                               count++;
+                               assert(wc[i].opcode == IBV_WC_SEND);
+                               assert(spdk_nvmf_rdma_req_is_completing(rdma_req));
+                       } else {
+                               SPDK_ERRLOG("data=%p length=%u\n", rdma_req->req.data, rdma_req->req.length);
                        }
 
-                       /* Set the qpair to the error state. This will initiate a recovery. */
-                       spdk_nvmf_rdma_set_ibv_state(rqpair, IBV_QPS_ERR);
-                       continue;
-               }
-
-               switch (wc[i].opcode) {
-               case IBV_WC_SEND:
-                       rdma_req = get_rdma_req_from_wc(&wc[i]);
-                       rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
-
-                       assert(spdk_nvmf_rdma_req_is_completing(rdma_req));
+                       rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
+                       /* +1 for the response wr */
+                       rqpair->current_send_depth -= rdma_req->num_outstanding_data_wr + 1;
+                       rdma_req->num_outstanding_data_wr = 0;
 
-                       spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED);
                        spdk_nvmf_rdma_request_process(rtransport, rdma_req);
-
-                       count++;
-
-                       /* Try to process other queued requests */
-                       spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair);
                        break;
+               case RDMA_WR_TYPE_RECV:
+                       /* rdma_recv->qpair will be invalid if using an SRQ.  In that case we have to get the qpair from the wc. */
+                       rdma_recv = SPDK_CONTAINEROF(rdma_wr, struct spdk_nvmf_rdma_recv, rdma_wr);
+                       if (rpoller->srq != NULL) {
+                               rdma_recv->qpair = get_rdma_qpair_from_wc(rpoller, &wc[i]);
+                       }
+                       rqpair = rdma_recv->qpair;
 
-               case IBV_WC_RDMA_WRITE:
-                       rdma_req = get_rdma_req_from_wc(&wc[i]);
-                       rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
+                       assert(rqpair != NULL);
+                       if (!wc[i].status) {
+                               assert(wc[i].opcode == IBV_WC_RECV);
+                               if (rqpair->current_recv_depth >= rqpair->max_queue_depth) {
+                                       spdk_nvmf_rdma_start_disconnect(rqpair);
+                                       break;
+                               }
+                       }
 
-                       /* Try to process other queued requests */
-                       spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair);
+                       rdma_recv->wr.next = NULL;
+                       rqpair->current_recv_depth++;
+                       STAILQ_INSERT_TAIL(&rqpair->resources->incoming_queue, rdma_recv, link);
                        break;
-
-               case IBV_WC_RDMA_READ:
-                       rdma_req = get_rdma_req_from_wc(&wc[i]);
+               case RDMA_WR_TYPE_DATA:
+                       rdma_req = SPDK_CONTAINEROF(rdma_wr, struct spdk_nvmf_rdma_request, data.rdma_wr);
                        rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
 
-                       assert(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
-                       spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_READY_TO_EXECUTE);
-                       spdk_nvmf_rdma_request_process(rtransport, rdma_req);
-
-                       /* Try to process other queued requests */
-                       spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair);
+                       assert(rdma_req->num_outstanding_data_wr > 0);
+
+                       rqpair->current_send_depth--;
+                       rdma_req->num_outstanding_data_wr--;
+                       if (!wc[i].status) {
+                               assert(wc[i].opcode == IBV_WC_RDMA_READ);
+                               rqpair->current_read_depth--;
+                               /* wait for all outstanding reads associated with the same rdma_req to complete before proceeding. */
+                               if (rdma_req->num_outstanding_data_wr == 0) {
+                                       rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
+                                       spdk_nvmf_rdma_request_process(rtransport, rdma_req);
+                               }
+                       } else {
+                               /* If the data transfer fails still force the queue into the error state,
+                                * if we were performing an RDMA_READ, we need to force the request into a
+                                * completed state since it wasn't linked to a send. However, in the RDMA_WRITE
+                                * case, we should wait for the SEND to complete. */
+                               SPDK_ERRLOG("data=%p length=%u\n", rdma_req->req.data, rdma_req->req.length);
+                               if (rdma_req->data.wr.opcode == IBV_WR_RDMA_READ) {
+                                       rqpair->current_read_depth--;
+                                       if (rdma_req->num_outstanding_data_wr == 0) {
+                                               rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
+                                       }
+                               }
+                       }
                        break;
+               default:
+                       SPDK_ERRLOG("Received an unknown opcode on the CQ: %d\n", wc[i].opcode);
+                       continue;
+               }
 
-               case IBV_WC_RECV:
-                       rdma_recv = get_rdma_recv_from_wc(&wc[i]);
-                       rqpair = rdma_recv->qpair;
+               /* Handle error conditions */
+               if (wc[i].status) {
+                       SPDK_DEBUGLOG(SPDK_LOG_RDMA, "CQ error on CQ %p, Request 0x%lu (%d): %s\n",
+                                     rpoller->cq, wc[i].wr_id, wc[i].status, ibv_wc_status_str(wc[i].status));
 
-                       TAILQ_INSERT_TAIL(&rqpair->incoming_queue, rdma_recv, link);
-                       /* Try to process other queued requests */
-                       spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair);
-                       break;
+                       error = true;
 
-               default:
-                       SPDK_ERRLOG("Received an unknown opcode on the CQ: %d\n", wc[i].opcode);
+                       if (rqpair->qpair.state == SPDK_NVMF_QPAIR_ACTIVE) {
+                               /* Disconnect the connection. */
+                               spdk_nvmf_rdma_start_disconnect(rqpair);
+                       } else {
+                               nvmf_rdma_destroy_drained_qpair(rqpair);
+                       }
                        continue;
                }
+
+               spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair, false);
+
+               if (rqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
+                       nvmf_rdma_destroy_drained_qpair(rqpair);
+               }
        }
 
        if (error == true) {
                return -1;
        }
 
+       /* submit outstanding work requests. */
+       _poller_submit_recvs(rtransport, rpoller);
+       _poller_submit_sends(rtransport, rpoller);
+
        return count;
 }
 
@@ -2899,6 +3635,12 @@ spdk_nvmf_rdma_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
        return spdk_nvmf_rdma_trid_from_cm_id(rqpair->listen_id, trid, false);
 }
 
+void
+spdk_nvmf_rdma_init_hooks(struct spdk_nvme_rdma_hooks *hooks)
+{
+       g_nvmf_hooks = *hooks;
+}
+
 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_rdma = {
        .type = SPDK_NVME_TRANSPORT_RDMA,
        .opts_init = spdk_nvmf_rdma_opts_init,
@@ -2920,7 +3662,6 @@ const struct spdk_nvmf_transport_ops spdk_nvmf_transport_rdma = {
        .req_complete = spdk_nvmf_rdma_request_complete,
 
        .qpair_fini = spdk_nvmf_rdma_close_qpair,
-       .qpair_is_idle = spdk_nvmf_rdma_qpair_is_idle,
        .qpair_get_peer_trid = spdk_nvmf_rdma_qpair_get_peer_trid,
        .qpair_get_local_trid = spdk_nvmf_rdma_qpair_get_local_trid,
        .qpair_get_listen_trid = spdk_nvmf_rdma_qpair_get_listen_trid,