]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - drivers/nvme/host/rdma.c
Merge tag 'for-next-dma_ops' of git://git.kernel.org/pub/scm/linux/kernel/git/dledfor...
[mirror_ubuntu-bionic-kernel.git] / drivers / nvme / host / rdma.c
index 4fc7aa86fe3a14a0a5aa6a5c98897f32e4e49c12..bc20a2442a04256dcdfeffd882bda9043b314bfc 100644 (file)
 
 #define NVME_RDMA_MAX_INLINE_SEGMENTS  1
 
-static const char *const nvme_rdma_cm_status_strs[] = {
-       [NVME_RDMA_CM_INVALID_LEN]      = "invalid length",
-       [NVME_RDMA_CM_INVALID_RECFMT]   = "invalid record format",
-       [NVME_RDMA_CM_INVALID_QID]      = "invalid queue ID",
-       [NVME_RDMA_CM_INVALID_HSQSIZE]  = "invalid host SQ size",
-       [NVME_RDMA_CM_INVALID_HRQSIZE]  = "invalid host RQ size",
-       [NVME_RDMA_CM_NO_RSC]           = "resource not found",
-       [NVME_RDMA_CM_INVALID_IRD]      = "invalid IRD",
-       [NVME_RDMA_CM_INVALID_ORD]      = "Invalid ORD",
-};
-
-static const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status)
-{
-       size_t index = status;
-
-       if (index < ARRAY_SIZE(nvme_rdma_cm_status_strs) &&
-           nvme_rdma_cm_status_strs[index])
-               return nvme_rdma_cm_status_strs[index];
-       else
-               return "unrecognized reason";
-};
-
 /*
  * We handle AEN commands ourselves and don't even let the
  * block layer know about them.
@@ -155,6 +133,10 @@ struct nvme_rdma_ctrl {
                struct sockaddr addr;
                struct sockaddr_in addr_in;
        };
+       union {
+               struct sockaddr src_addr;
+               struct sockaddr_in src_addr_in;
+       };
 
        struct nvme_ctrl        ctrl;
 };
@@ -567,6 +549,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
                int idx, size_t queue_size)
 {
        struct nvme_rdma_queue *queue;
+       struct sockaddr *src_addr = NULL;
        int ret;
 
        queue = &ctrl->queues[idx];
@@ -589,7 +572,10 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
        }
 
        queue->cm_error = -ETIMEDOUT;
-       ret = rdma_resolve_addr(queue->cm_id, NULL, &ctrl->addr,
+       if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
+               src_addr = &ctrl->src_addr;
+
+       ret = rdma_resolve_addr(queue->cm_id, src_addr, &ctrl->addr,
                        NVME_RDMA_CONNECT_TIMEOUT_MS);
        if (ret) {
                dev_info(ctrl->ctrl.device,
@@ -981,8 +967,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
 }
 
 static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
-               struct request *rq, unsigned int map_len,
-               struct nvme_command *c)
+               struct request *rq, struct nvme_command *c)
 {
        struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
        struct nvme_rdma_device *dev = queue->device;
@@ -1014,9 +999,9 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
        }
 
        if (count == 1) {
-               if (rq_data_dir(rq) == WRITE &&
-                   map_len <= nvme_rdma_inline_data_size(queue) &&
-                   nvme_rdma_queue_idx(queue))
+               if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
+                   blk_rq_payload_bytes(rq) <=
+                               nvme_rdma_inline_data_size(queue))
                        return nvme_rdma_map_sg_inline(queue, req, c);
 
                if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
@@ -1422,9 +1407,9 @@ static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
                struct request *rq)
 {
        if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
-               struct nvme_command *cmd = (struct nvme_command *)rq->cmd;
+               struct nvme_command *cmd = nvme_req(rq)->cmd;
 
-               if (rq->cmd_type != REQ_TYPE_DRV_PRIV ||
+               if (!blk_rq_is_passthrough(rq) ||
                    cmd->common.opcode != nvme_fabrics_command ||
                    cmd->fabrics.fctype != nvme_fabrics_type_connect)
                        return false;
@@ -1444,7 +1429,6 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_command *c = sqe->data;
        bool flush = false;
        struct ib_device *dev;
-       unsigned int map_len;
        int ret;
 
        WARN_ON_ONCE(rq->tag < 0);
@@ -1462,8 +1446,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        blk_mq_start_request(rq);
 
-       map_len = nvme_map_len(rq);
-       ret = nvme_rdma_map_data(queue, rq, map_len, c);
+       ret = nvme_rdma_map_data(queue, rq, c);
        if (ret < 0) {
                dev_err(queue->ctrl->ctrl.device,
                             "Failed to map data (%d)\n", ret);
@@ -1474,7 +1457,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
        ib_dma_sync_single_for_device(dev, sqe->dma,
                        sizeof(struct nvme_command), DMA_TO_DEVICE);
 
-       if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH)
+       if (req_op(rq) == REQ_OP_FLUSH)
                flush = true;
        ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
                        req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
@@ -1525,7 +1508,7 @@ static void nvme_rdma_complete_rq(struct request *rq)
                        return;
                }
 
-               if (rq->cmd_type == REQ_TYPE_DRV_PRIV)
+               if (blk_rq_is_passthrough(rq))
                        error = rq->errors;
                else
                        error = nvme_error_status(rq->errors);
@@ -1908,6 +1891,16 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
                goto out_free_ctrl;
        }
 
+       if (opts->mask & NVMF_OPT_HOST_TRADDR) {
+               ret = nvme_rdma_parse_ipaddr(&ctrl->src_addr_in,
+                               opts->host_traddr);
+               if (ret) {
+                       pr_err("malformed src IP address passed: %s\n",
+                              opts->host_traddr);
+                       goto out_free_ctrl;
+               }
+       }
+
        if (opts->mask & NVMF_OPT_TRSVCID) {
                u16 port;
 
@@ -2019,7 +2012,8 @@ out_free_ctrl:
 static struct nvmf_transport_ops nvme_rdma_transport = {
        .name           = "rdma",
        .required_opts  = NVMF_OPT_TRADDR,
-       .allowed_opts   = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY,
+       .allowed_opts   = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
+                         NVMF_OPT_HOST_TRADDR,
        .create_ctrl    = nvme_rdma_create_ctrl,
 };
 
@@ -2066,8 +2060,7 @@ static int __init nvme_rdma_init_module(void)
                return ret;
        }
 
-       nvmf_register_transport(&nvme_rdma_transport);
-       return 0;
+       return nvmf_register_transport(&nvme_rdma_transport);
 }
 
 static void __exit nvme_rdma_cleanup_module(void)