]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/lib/nvmf/request.c
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / spdk / lib / nvmf / request.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright (c) Intel Corporation.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <assert.h>
35
36 #include "nvmf_internal.h"
37 #include "request.h"
38 #include "session.h"
39 #include "subsystem.h"
40 #include "transport.h"
41
42 #include "spdk/nvme.h"
43 #include "spdk/nvmf_spec.h"
44 #include "spdk/trace.h"
45
46 #include "spdk_internal/log.h"
47
48 int
49 spdk_nvmf_request_complete(struct spdk_nvmf_request *req)
50 {
51 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
52
53 response->sqid = 0;
54 response->status.p = 0;
55 response->cid = req->cmd->nvme_cmd.cid;
56
57 SPDK_TRACELOG(SPDK_TRACE_NVMF,
58 "cpl: cid=%u cdw0=0x%08x rsvd1=%u status=0x%04x\n",
59 response->cid, response->cdw0, response->rsvd1,
60 *(uint16_t *)&response->status);
61
62 if (req->conn->transport->req_complete(req)) {
63 SPDK_ERRLOG("Transport request completion error!\n");
64 return -1;
65 }
66
67 return 0;
68 }
69
70 static spdk_nvmf_request_exec_status
71 nvmf_process_property_get(struct spdk_nvmf_request *req)
72 {
73 struct spdk_nvmf_fabric_prop_get_rsp *response;
74 struct spdk_nvmf_fabric_prop_get_cmd *cmd;
75
76 cmd = &req->cmd->prop_get_cmd;
77 response = &req->rsp->prop_get_rsp;
78
79 spdk_nvmf_property_get(req->conn->sess, cmd, response);
80
81 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
82 }
83
84 static spdk_nvmf_request_exec_status
85 nvmf_process_property_set(struct spdk_nvmf_request *req)
86 {
87 struct spdk_nvmf_fabric_prop_set_cmd *cmd;
88
89 cmd = &req->cmd->prop_set_cmd;
90
91 spdk_nvmf_property_set(req->conn->sess, cmd, &req->rsp->nvme_cpl);
92
93 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
94 }
95
96 void
97 spdk_nvmf_handle_connect(struct spdk_nvmf_request *req)
98 {
99 struct spdk_nvmf_fabric_connect_cmd *connect = &req->cmd->connect_cmd;
100 struct spdk_nvmf_fabric_connect_data *connect_data = (struct spdk_nvmf_fabric_connect_data *)
101 req->data;
102 struct spdk_nvmf_fabric_connect_rsp *response = &req->rsp->connect_rsp;
103 struct spdk_nvmf_conn *conn = req->conn;
104
105 spdk_nvmf_session_connect(conn, connect, connect_data, response);
106
107 SPDK_TRACELOG(SPDK_TRACE_NVMF, "connect capsule response: cntlid = 0x%04x\n",
108 response->status_code_specific.success.cntlid);
109
110 spdk_nvmf_request_complete(req);
111 return;
112 }
113
114 static void
115 invalid_connect_response(struct spdk_nvmf_fabric_connect_rsp *rsp, uint8_t iattr, uint16_t ipo)
116 {
117 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
118 rsp->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_PARAM;
119 rsp->status_code_specific.invalid.iattr = iattr;
120 rsp->status_code_specific.invalid.ipo = ipo;
121 }
122
123 static spdk_nvmf_request_exec_status
124 nvmf_process_connect(struct spdk_nvmf_request *req)
125 {
126 struct spdk_nvmf_subsystem *subsystem;
127 struct spdk_nvmf_fabric_connect_data *data = (struct spdk_nvmf_fabric_connect_data *)
128 req->data;
129 struct spdk_nvmf_fabric_connect_cmd *cmd = &req->cmd->connect_cmd;
130 struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp;
131 void *end;
132
133 #define INVALID_CONNECT_DATA(field) invalid_connect_response(rsp, 1, offsetof(struct spdk_nvmf_fabric_connect_data, field))
134
135 if (cmd->recfmt != 0) {
136 SPDK_ERRLOG("Connect command unsupported RECFMT %u\n", cmd->recfmt);
137 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
138 rsp->status.sc = SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT;
139 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
140 }
141
142 if (req->length < sizeof(struct spdk_nvmf_fabric_connect_data)) {
143 SPDK_ERRLOG("Connect command data length 0x%x too small\n", req->length);
144 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_FIELD;
145 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
146 }
147
148 /* Ensure that subnqn and hostnqn are null terminated */
149 end = memchr(data->subnqn, '\0', SPDK_NVMF_NQN_MAX_LEN);
150 if (!end) {
151 SPDK_ERRLOG("Connect SUBNQN is not null terminated\n");
152 INVALID_CONNECT_DATA(subnqn);
153 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
154 }
155
156 end = memchr(data->hostnqn, '\0', SPDK_NVMF_NQN_MAX_LEN);
157 if (!end) {
158 SPDK_ERRLOG("Connect HOSTNQN is not null terminated\n");
159 INVALID_CONNECT_DATA(hostnqn);
160 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
161 }
162
163 subsystem = nvmf_find_subsystem(data->subnqn);
164 if (subsystem == NULL) {
165 SPDK_ERRLOG("Could not find subsystem '%s'\n", data->subnqn);
166 INVALID_CONNECT_DATA(subnqn);
167 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
168 }
169
170 if (!spdk_nvmf_subsystem_host_allowed(subsystem, data->hostnqn)) {
171 SPDK_ERRLOG("Subsystem '%s' does not allow host '%s'\n", data->subnqn, data->hostnqn);
172 rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
173 rsp->status.sc = SPDK_NVMF_FABRIC_SC_INVALID_HOST;
174 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
175 }
176
177 subsystem->connect_cb(subsystem->cb_ctx, req);
178
179 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
180 }
181
182 static spdk_nvmf_request_exec_status
183 nvmf_process_fabrics_command(struct spdk_nvmf_request *req)
184 {
185 struct spdk_nvmf_conn *conn = req->conn;
186 struct spdk_nvmf_capsule_cmd *cap_hdr;
187
188 cap_hdr = &req->cmd->nvmf_cmd;
189
190 if (conn->sess == NULL) {
191 /* No session established yet; the only valid command is Connect */
192 if (cap_hdr->fctype == SPDK_NVMF_FABRIC_COMMAND_CONNECT) {
193 return nvmf_process_connect(req);
194 } else {
195 SPDK_TRACELOG(SPDK_TRACE_NVMF, "Got fctype 0x%x, expected Connect\n",
196 cap_hdr->fctype);
197 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
198 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
199 }
200 } else if (conn->type == CONN_TYPE_AQ) {
201 /*
202 * Session is established, and this is an admin queue.
203 * Disallow Connect and allow other fabrics commands.
204 */
205 switch (cap_hdr->fctype) {
206 case SPDK_NVMF_FABRIC_COMMAND_PROPERTY_SET:
207 return nvmf_process_property_set(req);
208 case SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET:
209 return nvmf_process_property_get(req);
210 default:
211 SPDK_TRACELOG(SPDK_TRACE_NVMF, "recv capsule header type invalid [%x]!\n",
212 cap_hdr->fctype);
213 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
214 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
215 }
216 } else {
217 /* Session is established, and this is an I/O queue */
218 /* For now, no I/O-specific Fabrics commands are implemented (other than Connect) */
219 SPDK_TRACELOG(SPDK_TRACE_NVMF, "Unexpected I/O fctype 0x%x\n", cap_hdr->fctype);
220 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
221 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
222 }
223 }
224
225 static void
226 nvmf_trace_command(union nvmf_h2c_msg *h2c_msg, enum conn_type conn_type)
227 {
228 struct spdk_nvmf_capsule_cmd *cap_hdr = &h2c_msg->nvmf_cmd;
229 struct spdk_nvme_cmd *cmd = &h2c_msg->nvme_cmd;
230 struct spdk_nvme_sgl_descriptor *sgl = &cmd->dptr.sgl1;
231 uint8_t opc;
232
233 if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
234 opc = cap_hdr->fctype;
235 SPDK_TRACELOG(SPDK_TRACE_NVMF, "%s Fabrics cmd: fctype 0x%02x cid %u\n",
236 conn_type == CONN_TYPE_AQ ? "Admin" : "I/O",
237 cap_hdr->fctype, cap_hdr->cid);
238 } else {
239 opc = cmd->opc;
240 SPDK_TRACELOG(SPDK_TRACE_NVMF, "%s cmd: opc 0x%02x fuse %u cid %u nsid %u cdw10 0x%08x\n",
241 conn_type == CONN_TYPE_AQ ? "Admin" : "I/O",
242 cmd->opc, cmd->fuse, cmd->cid, cmd->nsid, cmd->cdw10);
243 if (cmd->mptr) {
244 SPDK_TRACELOG(SPDK_TRACE_NVMF, "mptr 0x%" PRIx64 "\n", cmd->mptr);
245 }
246 if (cmd->psdt != SPDK_NVME_PSDT_SGL_MPTR_CONTIG &&
247 cmd->psdt != SPDK_NVME_PSDT_SGL_MPTR_SGL) {
248 SPDK_TRACELOG(SPDK_TRACE_NVMF, "psdt %u\n", cmd->psdt);
249 }
250 }
251
252 if (spdk_nvme_opc_get_data_transfer(opc) != SPDK_NVME_DATA_NONE) {
253 if (sgl->generic.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK) {
254 SPDK_TRACELOG(SPDK_TRACE_NVMF,
255 "SGL: Keyed%s: addr 0x%" PRIx64 " key 0x%x len 0x%x\n",
256 sgl->generic.subtype == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY ? " (Inv)" : "",
257 sgl->address, sgl->keyed.key, sgl->keyed.length);
258 } else if (sgl->generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK) {
259 SPDK_TRACELOG(SPDK_TRACE_NVMF, "SGL: Data block: %s 0x%" PRIx64 " len 0x%x\n",
260 sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET ? "offs" : "addr",
261 sgl->address, sgl->unkeyed.length);
262 } else {
263 SPDK_TRACELOG(SPDK_TRACE_NVMF, "SGL type 0x%x subtype 0x%x\n",
264 sgl->generic.type, sgl->generic.subtype);
265 }
266 }
267 }
268
269 int
270 spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
271 {
272 struct spdk_nvmf_session *session = req->conn->sess;
273 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
274 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
275 spdk_nvmf_request_exec_status status;
276
277 nvmf_trace_command(req->cmd, req->conn->type);
278
279 if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
280 status = nvmf_process_fabrics_command(req);
281 } else if (session == NULL || !session->vcprop.cc.bits.en) {
282 /* Only Fabric commands are allowed when the controller is disabled */
283 SPDK_ERRLOG("Non-Fabric command sent to disabled controller\n");
284 rsp->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
285 status = SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
286 } else {
287 struct spdk_nvmf_subsystem *subsystem;
288
289 subsystem = session->subsys;
290 assert(subsystem != NULL);
291
292 if (subsystem->is_removed) {
293 rsp->status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
294 status = SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
295 } else if (req->conn->type == CONN_TYPE_AQ) {
296 status = subsystem->ops->process_admin_cmd(req);
297 } else {
298 status = subsystem->ops->process_io_cmd(req);
299 }
300 }
301
302 switch (status) {
303 case SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE:
304 return spdk_nvmf_request_complete(req);
305 case SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS:
306 return 0;
307 default:
308 SPDK_ERRLOG("Unknown request exec status: 0x%x\n", status);
309 return -1;
310 }
311
312 return 0;
313 }