]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/lib/nvmf/request.c
update download target update for octopus release
[ceph.git] / ceph / src / spdk / lib / nvmf / request.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright (c) Intel Corporation.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include "spdk/stdinc.h"
35
36 #include "nvmf_internal.h"
37 #include "transport.h"
38
39 #include "spdk/thread.h"
40 #include "spdk/likely.h"
41 #include "spdk/nvme.h"
42 #include "spdk/nvmf_spec.h"
43 #include "spdk/trace.h"
44
45 #include "spdk_internal/assert.h"
46 #include "spdk_internal/log.h"
47
48 static void
49 spdk_nvmf_qpair_request_cleanup(struct spdk_nvmf_qpair *qpair)
50 {
51 if (qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING) {
52 assert(qpair->state_cb != NULL);
53
54 if (TAILQ_EMPTY(&qpair->outstanding)) {
55 qpair->state_cb(qpair->state_cb_arg, 0);
56 }
57 } else {
58 assert(qpair->state == SPDK_NVMF_QPAIR_ACTIVE);
59 }
60 }
61
62 int
63 spdk_nvmf_request_free(struct spdk_nvmf_request *req)
64 {
65 struct spdk_nvmf_qpair *qpair = req->qpair;
66
67 TAILQ_REMOVE(&qpair->outstanding, req, link);
68 if (spdk_nvmf_transport_req_free(req)) {
69 SPDK_ERRLOG("Unable to free transport level request resources.\n");
70 }
71
72 spdk_nvmf_qpair_request_cleanup(qpair);
73
74 return 0;
75 }
76
77 int
78 spdk_nvmf_request_complete(struct spdk_nvmf_request *req)
79 {
80 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
81 struct spdk_nvmf_qpair *qpair;
82
83 rsp->sqid = 0;
84 rsp->status.p = 0;
85 rsp->cid = req->cmd->nvme_cmd.cid;
86
87 qpair = req->qpair;
88
89 SPDK_DEBUGLOG(SPDK_LOG_NVMF,
90 "cpl: cid=%u cdw0=0x%08x rsvd1=%u status=0x%04x\n",
91 rsp->cid, rsp->cdw0, rsp->rsvd1,
92 *(uint16_t *)&rsp->status);
93
94 TAILQ_REMOVE(&qpair->outstanding, req, link);
95 if (spdk_nvmf_transport_req_complete(req)) {
96 SPDK_ERRLOG("Transport request completion error!\n");
97 }
98
99 spdk_nvmf_qpair_request_cleanup(qpair);
100
101 return 0;
102 }
103
104 static void
105 nvmf_trace_command(union nvmf_h2c_msg *h2c_msg, bool is_admin_queue)
106 {
107 struct spdk_nvmf_capsule_cmd *cap_hdr = &h2c_msg->nvmf_cmd;
108 struct spdk_nvme_cmd *cmd = &h2c_msg->nvme_cmd;
109 struct spdk_nvme_sgl_descriptor *sgl = &cmd->dptr.sgl1;
110 uint8_t opc;
111
112 if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
113 opc = cap_hdr->fctype;
114 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "%s Fabrics cmd: fctype 0x%02x cid %u\n",
115 is_admin_queue ? "Admin" : "I/O",
116 cap_hdr->fctype, cap_hdr->cid);
117 } else {
118 opc = cmd->opc;
119 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "%s cmd: opc 0x%02x fuse %u cid %u nsid %u cdw10 0x%08x\n",
120 is_admin_queue ? "Admin" : "I/O",
121 cmd->opc, cmd->fuse, cmd->cid, cmd->nsid, cmd->cdw10);
122 if (cmd->mptr) {
123 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "mptr 0x%" PRIx64 "\n", cmd->mptr);
124 }
125 if (cmd->psdt != SPDK_NVME_PSDT_SGL_MPTR_CONTIG &&
126 cmd->psdt != SPDK_NVME_PSDT_SGL_MPTR_SGL) {
127 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "psdt %u\n", cmd->psdt);
128 }
129 }
130
131 if (spdk_nvme_opc_get_data_transfer(opc) != SPDK_NVME_DATA_NONE) {
132 if (sgl->generic.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK) {
133 SPDK_DEBUGLOG(SPDK_LOG_NVMF,
134 "SGL: Keyed%s: addr 0x%" PRIx64 " key 0x%x len 0x%x\n",
135 sgl->generic.subtype == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY ? " (Inv)" : "",
136 sgl->address, sgl->keyed.key, sgl->keyed.length);
137 } else if (sgl->generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK) {
138 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "SGL: Data block: %s 0x%" PRIx64 " len 0x%x\n",
139 sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET ? "offs" : "addr",
140 sgl->address, sgl->unkeyed.length);
141 } else {
142 SPDK_DEBUGLOG(SPDK_LOG_NVMF, "SGL type 0x%x subtype 0x%x\n",
143 sgl->generic.type, sgl->generic.subtype);
144 }
145 }
146 }
147
148 void
149 spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
150 {
151 struct spdk_nvmf_qpair *qpair = req->qpair;
152 spdk_nvmf_request_exec_status status;
153
154 nvmf_trace_command(req->cmd, spdk_nvmf_qpair_is_admin_queue(qpair));
155
156 if (qpair->state != SPDK_NVMF_QPAIR_ACTIVE) {
157 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
158 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
159 /* Place the request on the outstanding list so we can keep track of it */
160 TAILQ_INSERT_TAIL(&qpair->outstanding, req, link);
161 spdk_nvmf_request_complete(req);
162 return;
163 }
164
165 /* Check if the subsystem is paused (if there is a subsystem) */
166 if (qpair->ctrlr) {
167 struct spdk_nvmf_subsystem_poll_group *sgroup = &qpair->group->sgroups[qpair->ctrlr->subsys->id];
168 if (sgroup->state != SPDK_NVMF_SUBSYSTEM_ACTIVE) {
169 /* The subsystem is not currently active. Queue this request. */
170 TAILQ_INSERT_TAIL(&sgroup->queued, req, link);
171 return;
172 }
173
174 }
175
176 /* Place the request on the outstanding list so we can keep track of it */
177 TAILQ_INSERT_TAIL(&qpair->outstanding, req, link);
178
179 if (spdk_unlikely(req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC)) {
180 status = spdk_nvmf_ctrlr_process_fabrics_cmd(req);
181 } else if (spdk_unlikely(spdk_nvmf_qpair_is_admin_queue(qpair))) {
182 status = spdk_nvmf_ctrlr_process_admin_cmd(req);
183 } else {
184 status = spdk_nvmf_ctrlr_process_io_cmd(req);
185 }
186
187 if (status == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) {
188 spdk_nvmf_request_complete(req);
189 }
190 }