]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
nvme: use driver pdu command for passthrough
authorKeith Busch <kbusch@kernel.org>
Wed, 17 Mar 2021 20:37:03 +0000 (13:37 -0700)
committerChristoph Hellwig <hch@lst.de>
Fri, 2 Apr 2021 16:48:27 +0000 (18:48 +0200)
All nvme transport drivers preallocate an nvme command for each request.
Assume to use that command for nvme_setup_cmd() instead of requiring
drivers pass a pointer to it. All nvme drivers must initialize the
generic nvme_request 'cmd' to point to the transport's preallocated
nvme_command.

The generic nvme_request cmd pointer had previously been used only as a
temporary copy for passthrough commands. Since it now points to the
command that gets dispatched, passthrough commands must directly set it
up prior to executing the request.

Signed-off-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Jens Axboe <axboe@kernel.dk>
Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/loop.c

index 17c4ca5918172b57aea3670360cc93ae8255daef..c3f94eb9066913be46bedb0fabc73515aee74474 100644 (file)
@@ -575,6 +575,9 @@ EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU);
 
 static inline void nvme_clear_nvme_request(struct request *req)
 {
+       struct nvme_command *cmd = nvme_req(req)->cmd;
+
+       memset(cmd, 0, sizeof(*cmd));
        nvme_req(req)->retries = 0;
        nvme_req(req)->flags = 0;
        req->rq_flags |= RQF_DONTPREP;
@@ -593,9 +596,12 @@ static inline void nvme_init_request(struct request *req,
        else /* no queuedata implies admin queue */
                req->timeout = NVME_ADMIN_TIMEOUT;
 
+       /* passthru commands should let the driver set the SGL flags */
+       cmd->common.flags &= ~NVME_CMD_SGL_ALL;
+
        req->cmd_flags |= REQ_FAILFAST_DRIVER;
        nvme_clear_nvme_request(req);
-       nvme_req(req)->cmd = cmd;
+       memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
 }
 
 struct request *nvme_alloc_request(struct request_queue *q,
@@ -724,14 +730,6 @@ static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
                req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
 }
 
-static inline void nvme_setup_passthrough(struct request *req,
-               struct nvme_command *cmd)
-{
-       memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
-       /* passthru commands should let the driver set the SGL flags */
-       cmd->common.flags &= ~NVME_CMD_SGL_ALL;
-}
-
 static inline void nvme_setup_flush(struct nvme_ns *ns,
                struct nvme_command *cmnd)
 {
@@ -886,19 +884,18 @@ void nvme_cleanup_cmd(struct request *req)
 }
 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
 
-blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
-               struct nvme_command *cmd)
+blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
 {
+       struct nvme_command *cmd = nvme_req(req)->cmd;
        blk_status_t ret = BLK_STS_OK;
 
        if (!(req->rq_flags & RQF_DONTPREP))
                nvme_clear_nvme_request(req);
 
-       memset(cmd, 0, sizeof(*cmd));
        switch (req_op(req)) {
        case REQ_OP_DRV_IN:
        case REQ_OP_DRV_OUT:
-               nvme_setup_passthrough(req, cmd);
+               /* these are setup prior to execution in nvme_init_request() */
                break;
        case REQ_OP_FLUSH:
                nvme_setup_flush(ns, cmd);
index fcf6fd83d08ddd90e676525f647f1c13c5c6b94c..f54ffb792acc9b68ad17de5ef4a4393a3b17f4d3 100644 (file)
@@ -2128,6 +2128,7 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
        op->op.fcp_req.first_sgl = op->sgl;
        op->op.fcp_req.private = &op->priv[0];
        nvme_req(rq)->ctrl = &ctrl->ctrl;
+       nvme_req(rq)->cmd = &op->op.cmd_iu.sqe;
        return res;
 }
 
@@ -2759,8 +2760,6 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_fc_ctrl *ctrl = queue->ctrl;
        struct request *rq = bd->rq;
        struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
-       struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
-       struct nvme_command *sqe = &cmdiu->sqe;
        enum nvmefc_fcp_datadir io_dir;
        bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
        u32 data_len;
@@ -2770,7 +2769,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
            !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
                return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
 
-       ret = nvme_setup_cmd(ns, rq, sqe);
+       ret = nvme_setup_cmd(ns, rq);
        if (ret)
                return ret;
 
index 76de7ed55d90a80148583282b88b02205487be42..b0863c59fac46fa6541550816a96e4d35da65ea0 100644 (file)
@@ -623,8 +623,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
 struct request *nvme_alloc_request(struct request_queue *q,
                struct nvme_command *cmd, blk_mq_req_flags_t flags);
 void nvme_cleanup_cmd(struct request *req);
-blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
-               struct nvme_command *cmd);
+blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                void *buf, unsigned bufflen);
 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
index 1a0912146c7498671ba975b8e12208d8b1b14134..d47bb18b976ad72c4c23c3db8621294947f0c473 100644 (file)
@@ -430,6 +430,7 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
        iod->nvmeq = nvmeq;
 
        nvme_req(req)->ctrl = &dev->ctrl;
+       nvme_req(req)->cmd = &iod->cmd;
        return 0;
 }
 
@@ -932,7 +933,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
                return BLK_STS_IOERR;
 
-       ret = nvme_setup_cmd(ns, req, cmnd);
+       ret = nvme_setup_cmd(ns, req);
        if (ret)
                return ret;
 
index 9c710839b03a4965fa22e16f3f06e527c01b670f..d6bc43e6c8a649bbd56b43870e4e63e2fb7ec136 100644 (file)
@@ -314,6 +314,7 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
                        NVME_RDMA_DATA_SGL_SIZE;
 
        req->queue = queue;
+       nvme_req(rq)->cmd = req->sqe.data;
 
        return 0;
 }
@@ -2038,7 +2039,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct request *rq = bd->rq;
        struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
        struct nvme_rdma_qe *sqe = &req->sqe;
-       struct nvme_command *c = sqe->data;
+       struct nvme_command *c = nvme_req(rq)->cmd;
        struct ib_device *dev;
        bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags);
        blk_status_t ret;
@@ -2061,7 +2062,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
        ib_dma_sync_single_for_cpu(dev, sqe->dma,
                        sizeof(struct nvme_command), DMA_TO_DEVICE);
 
-       ret = nvme_setup_cmd(ns, rq, c);
+       ret = nvme_setup_cmd(ns, rq);
        if (ret)
                goto unmap_qe;
 
index 735e768f9f43645f17557f8b26a3212a75032efb..7de9bee1e5e968cea4d656919399e157843a7be6 100644 (file)
@@ -417,6 +417,7 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
 {
        struct nvme_tcp_ctrl *ctrl = set->driver_data;
        struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+       struct nvme_tcp_cmd_pdu *pdu;
        int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
        struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
        u8 hdgst = nvme_tcp_hdgst_len(queue);
@@ -427,8 +428,10 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
        if (!req->pdu)
                return -ENOMEM;
 
+       pdu = req->pdu;
        req->queue = queue;
        nvme_req(rq)->ctrl = &ctrl->ctrl;
+       nvme_req(rq)->cmd = &pdu->cmd;
 
        return 0;
 }
@@ -2259,7 +2262,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
        u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
        blk_status_t ret;
 
-       ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
+       ret = nvme_setup_cmd(ns, rq);
        if (ret)
                return ret;
 
index a7f97c8b2f771241ffaa366f819fc3ee1f535016..b741854fc957a6f48adeb49d169305b721b8fd6c 100644 (file)
@@ -141,7 +141,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
                return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
 
-       ret = nvme_setup_cmd(ns, req, &iod->cmd);
+       ret = nvme_setup_cmd(ns, req);
        if (ret)
                return ret;
 
@@ -205,8 +205,10 @@ static int nvme_loop_init_request(struct blk_mq_tag_set *set,
                unsigned int numa_node)
 {
        struct nvme_loop_ctrl *ctrl = set->driver_data;
+       struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
 
        nvme_req(req)->ctrl = &ctrl->ctrl;
+       nvme_req(req)->cmd = &iod->cmd;
        return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
                        (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
 }