4 * Copyright (c) Intel Corporation. All rights reserved.
5 * Copyright (c) 2019, 2020 Mellanox Technologies LTD. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "spdk/stdinc.h"
36 #include "nvmf_internal.h"
37 #include "transport.h"
39 #include "spdk/bit_array.h"
40 #include "spdk/endian.h"
41 #include "spdk/thread.h"
42 #include "spdk/trace.h"
43 #include "spdk/nvme_spec.h"
44 #include "spdk/nvmf_cmd.h"
45 #include "spdk/string.h"
46 #include "spdk/util.h"
47 #include "spdk/version.h"
49 #include "spdk_internal/log.h"
51 #define MIN_KEEP_ALIVE_TIMEOUT_IN_MS 10000
52 #define NVMF_DISC_KATO_IN_MS 120000
53 #define KAS_TIME_UNIT_IN_MS 100
54 #define KAS_DEFAULT_VALUE (MIN_KEEP_ALIVE_TIMEOUT_IN_MS / KAS_TIME_UNIT_IN_MS)
57 * Report the SPDK version as the firmware revision.
58 * SPDK_VERSION_STRING won't fit into FR (only 8 bytes), so try to fit the most important parts.
60 #define FW_VERSION SPDK_VERSION_MAJOR_STRING SPDK_VERSION_MINOR_STRING SPDK_VERSION_PATCH_STRING
63 * Support for custom admin command handlers
65 struct spdk_nvmf_custom_admin_cmd
{
66 spdk_nvmf_custom_cmd_hdlr hdlr
;
67 uint32_t nsid
; /* nsid to forward */
70 static struct spdk_nvmf_custom_admin_cmd g_nvmf_custom_admin_cmd_hdlrs
[SPDK_NVME_MAX_OPC
+ 1];
72 static void _nvmf_request_complete(void *ctx
);
75 nvmf_invalid_connect_response(struct spdk_nvmf_fabric_connect_rsp
*rsp
,
76 uint8_t iattr
, uint16_t ipo
)
78 rsp
->status
.sct
= SPDK_NVME_SCT_COMMAND_SPECIFIC
;
79 rsp
->status
.sc
= SPDK_NVMF_FABRIC_SC_INVALID_PARAM
;
80 rsp
->status_code_specific
.invalid
.iattr
= iattr
;
81 rsp
->status_code_specific
.invalid
.ipo
= ipo
;
84 #define SPDK_NVMF_INVALID_CONNECT_CMD(rsp, field) \
85 nvmf_invalid_connect_response(rsp, 0, offsetof(struct spdk_nvmf_fabric_connect_cmd, field))
86 #define SPDK_NVMF_INVALID_CONNECT_DATA(rsp, field) \
87 nvmf_invalid_connect_response(rsp, 1, offsetof(struct spdk_nvmf_fabric_connect_data, field))
90 nvmf_ctrlr_stop_keep_alive_timer(struct spdk_nvmf_ctrlr
*ctrlr
)
93 SPDK_ERRLOG("Controller is NULL\n");
97 if (ctrlr
->keep_alive_poller
== NULL
) {
101 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Stop keep alive poller\n");
102 spdk_poller_unregister(&ctrlr
->keep_alive_poller
);
106 nvmf_ctrlr_disconnect_qpairs_done(struct spdk_io_channel_iter
*i
, int status
)
109 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "ctrlr disconnect qpairs complete successfully\n");
111 SPDK_ERRLOG("Fail to disconnect ctrlr qpairs\n");
116 _nvmf_ctrlr_disconnect_qpairs_on_pg(struct spdk_io_channel_iter
*i
, bool include_admin
)
119 struct spdk_nvmf_ctrlr
*ctrlr
;
120 struct spdk_nvmf_qpair
*qpair
, *temp_qpair
;
121 struct spdk_io_channel
*ch
;
122 struct spdk_nvmf_poll_group
*group
;
124 ctrlr
= spdk_io_channel_iter_get_ctx(i
);
125 ch
= spdk_io_channel_iter_get_channel(i
);
126 group
= spdk_io_channel_get_ctx(ch
);
128 TAILQ_FOREACH_SAFE(qpair
, &group
->qpairs
, link
, temp_qpair
) {
129 if (qpair
->ctrlr
== ctrlr
&& (include_admin
|| !nvmf_qpair_is_admin_queue(qpair
))) {
130 rc
= spdk_nvmf_qpair_disconnect(qpair
, NULL
, NULL
);
132 SPDK_ERRLOG("Qpair disconnect failed\n");
142 nvmf_ctrlr_disconnect_qpairs_on_pg(struct spdk_io_channel_iter
*i
)
144 spdk_for_each_channel_continue(i
, _nvmf_ctrlr_disconnect_qpairs_on_pg(i
, true));
148 nvmf_ctrlr_disconnect_io_qpairs_on_pg(struct spdk_io_channel_iter
*i
)
150 spdk_for_each_channel_continue(i
, _nvmf_ctrlr_disconnect_qpairs_on_pg(i
, false));
154 nvmf_ctrlr_keep_alive_poll(void *ctx
)
156 uint64_t keep_alive_timeout_tick
;
157 uint64_t now
= spdk_get_ticks();
158 struct spdk_nvmf_ctrlr
*ctrlr
= ctx
;
160 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Polling ctrlr keep alive timeout\n");
162 /* If the Keep alive feature is in use and the timer expires */
163 keep_alive_timeout_tick
= ctrlr
->last_keep_alive_tick
+
164 ctrlr
->feat
.keep_alive_timer
.bits
.kato
* spdk_get_ticks_hz() / UINT64_C(1000);
165 if (now
> keep_alive_timeout_tick
) {
166 SPDK_NOTICELOG("Disconnecting host from subsystem %s due to keep alive timeout.\n",
167 ctrlr
->subsys
->subnqn
);
168 /* set the Controller Fatal Status bit to '1' */
169 if (ctrlr
->vcprop
.csts
.bits
.cfs
== 0) {
170 ctrlr
->vcprop
.csts
.bits
.cfs
= 1;
173 * disconnect qpairs, terminate Transport connection
174 * destroy ctrlr, break the host to controller association
175 * disconnect qpairs with qpair->ctrlr == ctrlr
177 spdk_for_each_channel(ctrlr
->subsys
->tgt
,
178 nvmf_ctrlr_disconnect_qpairs_on_pg
,
180 nvmf_ctrlr_disconnect_qpairs_done
);
184 return SPDK_POLLER_BUSY
;
188 nvmf_ctrlr_start_keep_alive_timer(struct spdk_nvmf_ctrlr
*ctrlr
)
191 SPDK_ERRLOG("Controller is NULL\n");
195 /* if cleared to 0 then the Keep Alive Timer is disabled */
196 if (ctrlr
->feat
.keep_alive_timer
.bits
.kato
!= 0) {
198 ctrlr
->last_keep_alive_tick
= spdk_get_ticks();
200 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Ctrlr add keep alive poller\n");
201 ctrlr
->keep_alive_poller
= SPDK_POLLER_REGISTER(nvmf_ctrlr_keep_alive_poll
, ctrlr
,
202 ctrlr
->feat
.keep_alive_timer
.bits
.kato
* 1000);
207 ctrlr_add_qpair_and_update_rsp(struct spdk_nvmf_qpair
*qpair
,
208 struct spdk_nvmf_ctrlr
*ctrlr
,
209 struct spdk_nvmf_fabric_connect_rsp
*rsp
)
211 assert(ctrlr
->admin_qpair
->group
->thread
== spdk_get_thread());
213 /* check if we would exceed ctrlr connection limit */
214 if (qpair
->qid
>= spdk_bit_array_capacity(ctrlr
->qpair_mask
)) {
215 SPDK_ERRLOG("Requested QID %u but Max QID is %u\n",
216 qpair
->qid
, spdk_bit_array_capacity(ctrlr
->qpair_mask
) - 1);
217 rsp
->status
.sct
= SPDK_NVME_SCT_COMMAND_SPECIFIC
;
218 rsp
->status
.sc
= SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER
;
222 if (spdk_bit_array_get(ctrlr
->qpair_mask
, qpair
->qid
)) {
223 SPDK_ERRLOG("Got I/O connect with duplicate QID %u\n", qpair
->qid
);
224 rsp
->status
.sct
= SPDK_NVME_SCT_COMMAND_SPECIFIC
;
225 rsp
->status
.sc
= SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER
;
229 qpair
->ctrlr
= ctrlr
;
230 spdk_bit_array_set(ctrlr
->qpair_mask
, qpair
->qid
);
232 rsp
->status
.sc
= SPDK_NVME_SC_SUCCESS
;
233 rsp
->status_code_specific
.success
.cntlid
= ctrlr
->cntlid
;
234 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "connect capsule response: cntlid = 0x%04x\n",
235 rsp
->status_code_specific
.success
.cntlid
);
239 _nvmf_ctrlr_add_admin_qpair(void *ctx
)
241 struct spdk_nvmf_request
*req
= ctx
;
242 struct spdk_nvmf_fabric_connect_rsp
*rsp
= &req
->rsp
->connect_rsp
;
243 struct spdk_nvmf_qpair
*qpair
= req
->qpair
;
244 struct spdk_nvmf_ctrlr
*ctrlr
= qpair
->ctrlr
;
246 ctrlr
->admin_qpair
= qpair
;
247 nvmf_ctrlr_start_keep_alive_timer(ctrlr
);
248 ctrlr_add_qpair_and_update_rsp(qpair
, ctrlr
, rsp
);
249 _nvmf_request_complete(req
);
253 _nvmf_subsystem_add_ctrlr(void *ctx
)
255 struct spdk_nvmf_request
*req
= ctx
;
256 struct spdk_nvmf_qpair
*qpair
= req
->qpair
;
257 struct spdk_nvmf_fabric_connect_rsp
*rsp
= &req
->rsp
->connect_rsp
;
258 struct spdk_nvmf_ctrlr
*ctrlr
= qpair
->ctrlr
;
260 if (nvmf_subsystem_add_ctrlr(ctrlr
->subsys
, ctrlr
)) {
261 SPDK_ERRLOG("Unable to add controller to subsystem\n");
262 spdk_bit_array_free(&ctrlr
->qpair_mask
);
265 rsp
->status
.sc
= SPDK_NVME_SC_INTERNAL_DEVICE_ERROR
;
266 spdk_nvmf_request_complete(req
);
270 spdk_thread_send_msg(ctrlr
->thread
, _nvmf_ctrlr_add_admin_qpair
, req
);
274 nvmf_ctrlr_cdata_init(struct spdk_nvmf_transport
*transport
, struct spdk_nvmf_subsystem
*subsystem
,
275 struct spdk_nvmf_ctrlr_data
*cdata
)
277 cdata
->kas
= KAS_DEFAULT_VALUE
;
278 cdata
->sgls
.supported
= 1;
279 cdata
->sgls
.keyed_sgl
= 1;
280 cdata
->sgls
.sgl_offset
= 1;
281 cdata
->nvmf_specific
.ioccsz
= sizeof(struct spdk_nvme_cmd
) / 16;
282 cdata
->nvmf_specific
.ioccsz
+= transport
->opts
.in_capsule_data_size
/ 16;
283 cdata
->nvmf_specific
.iorcsz
= sizeof(struct spdk_nvme_cpl
) / 16;
284 cdata
->nvmf_specific
.icdoff
= 0; /* offset starts directly after SQE */
285 cdata
->nvmf_specific
.ctrattr
.ctrlr_model
= SPDK_NVMF_CTRLR_MODEL_DYNAMIC
;
286 cdata
->nvmf_specific
.msdbd
= 1;
288 if (transport
->ops
->cdata_init
) {
289 transport
->ops
->cdata_init(transport
, subsystem
, cdata
);
293 static struct spdk_nvmf_ctrlr
*
294 nvmf_ctrlr_create(struct spdk_nvmf_subsystem
*subsystem
,
295 struct spdk_nvmf_request
*req
,
296 struct spdk_nvmf_fabric_connect_cmd
*connect_cmd
,
297 struct spdk_nvmf_fabric_connect_data
*connect_data
)
299 struct spdk_nvmf_ctrlr
*ctrlr
;
300 struct spdk_nvmf_transport
*transport
;
302 ctrlr
= calloc(1, sizeof(*ctrlr
));
304 SPDK_ERRLOG("Memory allocation failed\n");
308 TAILQ_INIT(&ctrlr
->log_head
);
309 ctrlr
->subsys
= subsystem
;
310 ctrlr
->thread
= req
->qpair
->group
->thread
;
312 transport
= req
->qpair
->transport
;
313 ctrlr
->qpair_mask
= spdk_bit_array_create(transport
->opts
.max_qpairs_per_ctrlr
);
314 if (!ctrlr
->qpair_mask
) {
315 SPDK_ERRLOG("Failed to allocate controller qpair mask\n");
320 nvmf_ctrlr_cdata_init(transport
, subsystem
, &ctrlr
->cdata
);
323 * KAS: This field indicates the granularity of the Keep Alive Timer in 100ms units.
324 * If this field is cleared to 0h, then Keep Alive is not supported.
326 if (ctrlr
->cdata
.kas
) {
327 ctrlr
->feat
.keep_alive_timer
.bits
.kato
= spdk_divide_round_up(connect_cmd
->kato
,
328 KAS_DEFAULT_VALUE
* KAS_TIME_UNIT_IN_MS
) *
329 KAS_DEFAULT_VALUE
* KAS_TIME_UNIT_IN_MS
;
332 ctrlr
->feat
.async_event_configuration
.bits
.ns_attr_notice
= 1;
333 ctrlr
->feat
.volatile_write_cache
.bits
.wce
= 1;
335 if (ctrlr
->subsys
->subtype
== SPDK_NVMF_SUBTYPE_DISCOVERY
) {
337 * If keep-alive timeout is not set, discovery controllers use some
338 * arbitrary high value in order to cleanup stale discovery sessions
340 * From the 1.0a nvme-of spec:
341 * "The Keep Alive command is reserved for
342 * Discovery controllers. A transport may specify a
343 * fixed Discovery controller activity timeout value
344 * (e.g., 2 minutes). If no commands are received
345 * by a Discovery controller within that time
346 * period, the controller may perform the
347 * actions for Keep Alive Timer expiration".
348 * kato is in millisecond.
350 if (ctrlr
->feat
.keep_alive_timer
.bits
.kato
== 0) {
351 ctrlr
->feat
.keep_alive_timer
.bits
.kato
= NVMF_DISC_KATO_IN_MS
;
355 /* Subtract 1 for admin queue, 1 for 0's based */
356 ctrlr
->feat
.number_of_queues
.bits
.ncqr
= transport
->opts
.max_qpairs_per_ctrlr
- 1 -
358 ctrlr
->feat
.number_of_queues
.bits
.nsqr
= transport
->opts
.max_qpairs_per_ctrlr
- 1 -
361 spdk_uuid_copy(&ctrlr
->hostid
, (struct spdk_uuid
*)connect_data
->hostid
);
362 memcpy(ctrlr
->hostnqn
, connect_data
->hostnqn
, sizeof(ctrlr
->hostnqn
));
364 ctrlr
->vcprop
.cap
.raw
= 0;
365 ctrlr
->vcprop
.cap
.bits
.cqr
= 1; /* NVMe-oF specification required */
366 ctrlr
->vcprop
.cap
.bits
.mqes
= transport
->opts
.max_queue_depth
-
367 1; /* max queue depth */
368 ctrlr
->vcprop
.cap
.bits
.ams
= 0; /* optional arb mechanisms */
369 ctrlr
->vcprop
.cap
.bits
.to
= 1; /* ready timeout - 500 msec units */
370 ctrlr
->vcprop
.cap
.bits
.dstrd
= 0; /* fixed to 0 for NVMe-oF */
371 ctrlr
->vcprop
.cap
.bits
.css
= SPDK_NVME_CAP_CSS_NVM
; /* NVM command set */
372 ctrlr
->vcprop
.cap
.bits
.mpsmin
= 0; /* 2 ^ (12 + mpsmin) == 4k */
373 ctrlr
->vcprop
.cap
.bits
.mpsmax
= 0; /* 2 ^ (12 + mpsmax) == 4k */
375 /* Version Supported: 1.3 */
376 ctrlr
->vcprop
.vs
.bits
.mjr
= 1;
377 ctrlr
->vcprop
.vs
.bits
.mnr
= 3;
378 ctrlr
->vcprop
.vs
.bits
.ter
= 0;
380 ctrlr
->vcprop
.cc
.raw
= 0;
381 ctrlr
->vcprop
.cc
.bits
.en
= 0; /* Init controller disabled */
383 ctrlr
->vcprop
.csts
.raw
= 0;
384 ctrlr
->vcprop
.csts
.bits
.rdy
= 0; /* Init controller as not ready */
386 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "cap 0x%" PRIx64
"\n", ctrlr
->vcprop
.cap
.raw
);
387 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "vs 0x%x\n", ctrlr
->vcprop
.vs
.raw
);
388 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "cc 0x%x\n", ctrlr
->vcprop
.cc
.raw
);
389 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "csts 0x%x\n", ctrlr
->vcprop
.csts
.raw
);
391 ctrlr
->dif_insert_or_strip
= transport
->opts
.dif_insert_or_strip
;
393 req
->qpair
->ctrlr
= ctrlr
;
394 spdk_thread_send_msg(subsystem
->thread
, _nvmf_subsystem_add_ctrlr
, req
);
400 _nvmf_ctrlr_destruct(void *ctx
)
402 struct spdk_nvmf_ctrlr
*ctrlr
= ctx
;
403 struct spdk_nvmf_reservation_log
*log
, *log_tmp
;
405 nvmf_ctrlr_stop_keep_alive_timer(ctrlr
);
407 TAILQ_FOREACH_SAFE(log
, &ctrlr
->log_head
, link
, log_tmp
) {
408 TAILQ_REMOVE(&ctrlr
->log_head
, log
, link
);
415 nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr
*ctrlr
)
417 nvmf_subsystem_remove_ctrlr(ctrlr
->subsys
, ctrlr
);
419 spdk_thread_send_msg(ctrlr
->thread
, _nvmf_ctrlr_destruct
, ctrlr
);
423 nvmf_ctrlr_add_io_qpair(void *ctx
)
425 struct spdk_nvmf_request
*req
= ctx
;
426 struct spdk_nvmf_fabric_connect_rsp
*rsp
= &req
->rsp
->connect_rsp
;
427 struct spdk_nvmf_qpair
*qpair
= req
->qpair
;
428 struct spdk_nvmf_ctrlr
*ctrlr
= qpair
->ctrlr
;
430 /* Unit test will check qpair->ctrlr after calling spdk_nvmf_ctrlr_connect.
431 * For error case, the value should be NULL. So set it to NULL at first.
435 if (ctrlr
->subsys
->subtype
== SPDK_NVMF_SUBTYPE_DISCOVERY
) {
436 SPDK_ERRLOG("I/O connect not allowed on discovery controller\n");
437 SPDK_NVMF_INVALID_CONNECT_CMD(rsp
, qid
);
441 if (!ctrlr
->vcprop
.cc
.bits
.en
) {
442 SPDK_ERRLOG("Got I/O connect before ctrlr was enabled\n");
443 SPDK_NVMF_INVALID_CONNECT_CMD(rsp
, qid
);
447 if (1u << ctrlr
->vcprop
.cc
.bits
.iosqes
!= sizeof(struct spdk_nvme_cmd
)) {
448 SPDK_ERRLOG("Got I/O connect with invalid IOSQES %u\n",
449 ctrlr
->vcprop
.cc
.bits
.iosqes
);
450 SPDK_NVMF_INVALID_CONNECT_CMD(rsp
, qid
);
454 if (1u << ctrlr
->vcprop
.cc
.bits
.iocqes
!= sizeof(struct spdk_nvme_cpl
)) {
455 SPDK_ERRLOG("Got I/O connect with invalid IOCQES %u\n",
456 ctrlr
->vcprop
.cc
.bits
.iocqes
);
457 SPDK_NVMF_INVALID_CONNECT_CMD(rsp
, qid
);
461 ctrlr_add_qpair_and_update_rsp(qpair
, ctrlr
, rsp
);
463 spdk_nvmf_request_complete(req
);
467 _nvmf_ctrlr_add_io_qpair(void *ctx
)
469 struct spdk_nvmf_request
*req
= ctx
;
470 struct spdk_nvmf_fabric_connect_rsp
*rsp
= &req
->rsp
->connect_rsp
;
471 struct spdk_nvmf_fabric_connect_data
*data
= req
->data
;
472 struct spdk_nvmf_ctrlr
*ctrlr
;
473 struct spdk_nvmf_qpair
*qpair
= req
->qpair
;
474 struct spdk_nvmf_qpair
*admin_qpair
;
475 struct spdk_nvmf_tgt
*tgt
= qpair
->transport
->tgt
;
476 struct spdk_nvmf_subsystem
*subsystem
;
478 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Connect I/O Queue for controller id 0x%x\n", data
->cntlid
);
480 subsystem
= spdk_nvmf_tgt_find_subsystem(tgt
, data
->subnqn
);
481 /* We already checked this in spdk_nvmf_ctrlr_connect */
482 assert(subsystem
!= NULL
);
484 ctrlr
= nvmf_subsystem_get_ctrlr(subsystem
, data
->cntlid
);
486 SPDK_ERRLOG("Unknown controller ID 0x%x\n", data
->cntlid
);
487 SPDK_NVMF_INVALID_CONNECT_DATA(rsp
, cntlid
);
488 spdk_nvmf_request_complete(req
);
492 admin_qpair
= ctrlr
->admin_qpair
;
493 qpair
->ctrlr
= ctrlr
;
494 spdk_thread_send_msg(admin_qpair
->group
->thread
, nvmf_ctrlr_add_io_qpair
, req
);
498 nvmf_qpair_access_allowed(struct spdk_nvmf_qpair
*qpair
, struct spdk_nvmf_subsystem
*subsystem
,
501 struct spdk_nvme_transport_id listen_trid
= {};
503 if (!spdk_nvmf_subsystem_host_allowed(subsystem
, hostnqn
)) {
504 SPDK_ERRLOG("Subsystem '%s' does not allow host '%s'\n", subsystem
->subnqn
, hostnqn
);
508 if (spdk_nvmf_qpair_get_listen_trid(qpair
, &listen_trid
)) {
509 SPDK_ERRLOG("Subsystem '%s' is unable to enforce access control due to an internal error.\n",
514 if (!spdk_nvmf_subsystem_listener_allowed(subsystem
, &listen_trid
)) {
515 SPDK_ERRLOG("Subsystem '%s' does not allow host '%s' to connect at this address.\n",
516 subsystem
->subnqn
, hostnqn
);
524 _nvmf_ctrlr_connect(struct spdk_nvmf_request
*req
)
526 struct spdk_nvmf_fabric_connect_data
*data
= req
->data
;
527 struct spdk_nvmf_fabric_connect_cmd
*cmd
= &req
->cmd
->connect_cmd
;
528 struct spdk_nvmf_fabric_connect_rsp
*rsp
= &req
->rsp
->connect_rsp
;
529 struct spdk_nvmf_qpair
*qpair
= req
->qpair
;
530 struct spdk_nvmf_transport
*transport
= qpair
->transport
;
531 struct spdk_nvmf_ctrlr
*ctrlr
;
532 struct spdk_nvmf_subsystem
*subsystem
;
534 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "recfmt 0x%x qid %u sqsize %u\n",
535 cmd
->recfmt
, cmd
->qid
, cmd
->sqsize
);
537 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Connect data:\n");
538 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, " cntlid: 0x%04x\n", data
->cntlid
);
539 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, " hostid: %08x-%04x-%04x-%02x%02x-%04x%08x ***\n",
540 ntohl(*(uint32_t *)&data
->hostid
[0]),
541 ntohs(*(uint16_t *)&data
->hostid
[4]),
542 ntohs(*(uint16_t *)&data
->hostid
[6]),
545 ntohs(*(uint16_t *)&data
->hostid
[10]),
546 ntohl(*(uint32_t *)&data
->hostid
[12]));
547 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, " subnqn: \"%s\"\n", data
->subnqn
);
548 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, " hostnqn: \"%s\"\n", data
->hostnqn
);
550 subsystem
= spdk_nvmf_tgt_find_subsystem(transport
->tgt
, data
->subnqn
);
552 SPDK_NVMF_INVALID_CONNECT_DATA(rsp
, subnqn
);
553 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
556 if (cmd
->recfmt
!= 0) {
557 SPDK_ERRLOG("Connect command unsupported RECFMT %u\n", cmd
->recfmt
);
558 rsp
->status
.sct
= SPDK_NVME_SCT_COMMAND_SPECIFIC
;
559 rsp
->status
.sc
= SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT
;
560 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
564 * SQSIZE is a 0-based value, so it must be at least 1 (minimum queue depth is 2) and
565 * strictly less than max_aq_depth (admin queues) or max_queue_depth (io queues).
567 if (cmd
->sqsize
== 0) {
568 SPDK_ERRLOG("Invalid SQSIZE = 0\n");
569 SPDK_NVMF_INVALID_CONNECT_CMD(rsp
, sqsize
);
570 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
574 if (cmd
->sqsize
>= transport
->opts
.max_aq_depth
) {
575 SPDK_ERRLOG("Invalid SQSIZE for admin queue %u (min 1, max %u)\n",
576 cmd
->sqsize
, transport
->opts
.max_aq_depth
- 1);
577 SPDK_NVMF_INVALID_CONNECT_CMD(rsp
, sqsize
);
578 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
580 } else if (cmd
->sqsize
>= transport
->opts
.max_queue_depth
) {
581 SPDK_ERRLOG("Invalid SQSIZE %u (min 1, max %u)\n",
582 cmd
->sqsize
, transport
->opts
.max_queue_depth
- 1);
583 SPDK_NVMF_INVALID_CONNECT_CMD(rsp
, sqsize
);
584 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
587 qpair
->sq_head_max
= cmd
->sqsize
;
588 qpair
->qid
= cmd
->qid
;
590 if (0 == qpair
->qid
) {
591 qpair
->group
->stat
.admin_qpairs
++;
593 qpair
->group
->stat
.io_qpairs
++;
597 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Connect Admin Queue for controller ID 0x%x\n", data
->cntlid
);
599 if (data
->cntlid
!= 0xFFFF) {
600 /* This NVMf target only supports dynamic mode. */
601 SPDK_ERRLOG("The NVMf target only supports dynamic mode (CNTLID = 0x%x).\n", data
->cntlid
);
602 SPDK_NVMF_INVALID_CONNECT_DATA(rsp
, cntlid
);
603 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
606 /* Establish a new ctrlr */
607 ctrlr
= nvmf_ctrlr_create(subsystem
, req
, cmd
, data
);
609 SPDK_ERRLOG("nvmf_ctrlr_create() failed\n");
610 rsp
->status
.sc
= SPDK_NVME_SC_INTERNAL_DEVICE_ERROR
;
611 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
613 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS
;
616 spdk_thread_send_msg(subsystem
->thread
, _nvmf_ctrlr_add_io_qpair
, req
);
617 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS
;
622 nvmf_request_is_fabric_connect(struct spdk_nvmf_request
*req
)
624 return req
->cmd
->nvmf_cmd
.opcode
== SPDK_NVME_OPC_FABRIC
&&
625 req
->cmd
->nvmf_cmd
.fctype
== SPDK_NVMF_FABRIC_COMMAND_CONNECT
;
628 static struct spdk_nvmf_subsystem_poll_group
*
629 nvmf_subsystem_pg_from_connect_cmd(struct spdk_nvmf_request
*req
)
631 struct spdk_nvmf_fabric_connect_data
*data
;
632 struct spdk_nvmf_subsystem
*subsystem
;
633 struct spdk_nvmf_tgt
*tgt
;
635 assert(nvmf_request_is_fabric_connect(req
));
636 assert(req
->qpair
->ctrlr
== NULL
);
639 tgt
= req
->qpair
->transport
->tgt
;
641 subsystem
= spdk_nvmf_tgt_find_subsystem(tgt
, data
->subnqn
);
642 if (subsystem
== NULL
) {
646 return &req
->qpair
->group
->sgroups
[subsystem
->id
];
650 spdk_nvmf_ctrlr_connect(struct spdk_nvmf_request
*req
)
652 struct spdk_nvmf_fabric_connect_rsp
*rsp
= &req
->rsp
->connect_rsp
;
653 struct spdk_nvmf_qpair
*qpair
= req
->qpair
;
654 struct spdk_nvmf_subsystem_poll_group
*sgroup
;
655 enum spdk_nvmf_request_exec_status status
;
657 sgroup
= nvmf_subsystem_pg_from_connect_cmd(req
);
659 SPDK_NVMF_INVALID_CONNECT_DATA(rsp
, subnqn
);
660 status
= SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
664 sgroup
->io_outstanding
++;
665 TAILQ_INSERT_TAIL(&qpair
->outstanding
, req
, link
);
667 status
= _nvmf_ctrlr_connect(req
);
670 if (status
== SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
) {
671 _nvmf_request_complete(req
);
678 nvmf_ctrlr_cmd_connect(struct spdk_nvmf_request
*req
)
680 struct spdk_nvmf_fabric_connect_data
*data
= req
->data
;
681 struct spdk_nvmf_fabric_connect_rsp
*rsp
= &req
->rsp
->connect_rsp
;
682 struct spdk_nvmf_transport
*transport
= req
->qpair
->transport
;
683 struct spdk_nvmf_subsystem
*subsystem
;
685 if (req
->length
< sizeof(struct spdk_nvmf_fabric_connect_data
)) {
686 SPDK_ERRLOG("Connect command data length 0x%x too small\n", req
->length
);
687 rsp
->status
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
688 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
691 subsystem
= spdk_nvmf_tgt_find_subsystem(transport
->tgt
, data
->subnqn
);
693 SPDK_NVMF_INVALID_CONNECT_DATA(rsp
, subnqn
);
694 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
697 if ((subsystem
->state
== SPDK_NVMF_SUBSYSTEM_INACTIVE
) ||
698 (subsystem
->state
== SPDK_NVMF_SUBSYSTEM_PAUSING
) ||
699 (subsystem
->state
== SPDK_NVMF_SUBSYSTEM_PAUSED
) ||
700 (subsystem
->state
== SPDK_NVMF_SUBSYSTEM_DEACTIVATING
)) {
701 SPDK_ERRLOG("Subsystem '%s' is not ready\n", subsystem
->subnqn
);
702 rsp
->status
.sct
= SPDK_NVME_SCT_COMMAND_SPECIFIC
;
703 rsp
->status
.sc
= SPDK_NVMF_FABRIC_SC_CONTROLLER_BUSY
;
704 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
707 /* Ensure that hostnqn is null terminated */
708 if (!memchr(data
->hostnqn
, '\0', SPDK_NVMF_NQN_MAX_LEN
+ 1)) {
709 SPDK_ERRLOG("Connect HOSTNQN is not null terminated\n");
710 SPDK_NVMF_INVALID_CONNECT_DATA(rsp
, hostnqn
);
711 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
714 if (!nvmf_qpair_access_allowed(req
->qpair
, subsystem
, data
->hostnqn
)) {
715 rsp
->status
.sct
= SPDK_NVME_SCT_COMMAND_SPECIFIC
;
716 rsp
->status
.sc
= SPDK_NVMF_FABRIC_SC_INVALID_HOST
;
717 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
720 return _nvmf_ctrlr_connect(req
);
724 nvmf_ctrlr_cc_reset_done(struct spdk_io_channel_iter
*i
, int status
)
726 struct spdk_nvmf_ctrlr
*ctrlr
= spdk_io_channel_iter_get_ctx(i
);
729 SPDK_ERRLOG("Fail to disconnect io ctrlr qpairs\n");
733 /* Only a subset of the registers are cleared out on a reset */
734 ctrlr
->vcprop
.cc
.raw
= 0;
735 ctrlr
->vcprop
.csts
.raw
= 0;
739 const struct spdk_nvmf_registers
*
740 spdk_nvmf_ctrlr_get_regs(struct spdk_nvmf_ctrlr
*ctrlr
)
742 return &ctrlr
->vcprop
;
746 nvmf_prop_get_cap(struct spdk_nvmf_ctrlr
*ctrlr
)
748 return ctrlr
->vcprop
.cap
.raw
;
752 nvmf_prop_get_vs(struct spdk_nvmf_ctrlr
*ctrlr
)
754 return ctrlr
->vcprop
.vs
.raw
;
758 nvmf_prop_get_cc(struct spdk_nvmf_ctrlr
*ctrlr
)
760 return ctrlr
->vcprop
.cc
.raw
;
764 nvmf_prop_set_cc(struct spdk_nvmf_ctrlr
*ctrlr
, uint32_t value
)
766 union spdk_nvme_cc_register cc
, diff
;
770 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "cur CC: 0x%08x\n", ctrlr
->vcprop
.cc
.raw
);
771 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "new CC: 0x%08x\n", cc
.raw
);
774 * Calculate which bits changed between the current and new CC.
775 * Mark each bit as 0 once it is handled to determine if any unhandled bits were changed.
777 diff
.raw
= cc
.raw
^ ctrlr
->vcprop
.cc
.raw
;
781 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Property Set CC Enable!\n");
782 ctrlr
->vcprop
.cc
.bits
.en
= 1;
783 ctrlr
->vcprop
.csts
.bits
.rdy
= 1;
785 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Property Set CC Disable!\n");
786 ctrlr
->vcprop
.cc
.bits
.en
= 0;
787 spdk_for_each_channel(ctrlr
->subsys
->tgt
,
788 nvmf_ctrlr_disconnect_io_qpairs_on_pg
,
790 nvmf_ctrlr_cc_reset_done
);
796 if (cc
.bits
.shn
== SPDK_NVME_SHN_NORMAL
||
797 cc
.bits
.shn
== SPDK_NVME_SHN_ABRUPT
) {
798 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Property Set CC Shutdown %u%ub!\n",
799 cc
.bits
.shn
>> 1, cc
.bits
.shn
& 1);
800 ctrlr
->vcprop
.cc
.bits
.shn
= cc
.bits
.shn
;
801 ctrlr
->vcprop
.cc
.bits
.en
= 0;
802 ctrlr
->vcprop
.csts
.bits
.rdy
= 0;
803 ctrlr
->vcprop
.csts
.bits
.shst
= SPDK_NVME_SHST_COMPLETE
;
804 } else if (cc
.bits
.shn
== 0) {
805 ctrlr
->vcprop
.cc
.bits
.shn
= 0;
807 SPDK_ERRLOG("Prop Set CC: Invalid SHN value %u%ub\n",
808 cc
.bits
.shn
>> 1, cc
.bits
.shn
& 1);
814 if (diff
.bits
.iosqes
) {
815 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Prop Set IOSQES = %u (%u bytes)\n",
816 cc
.bits
.iosqes
, 1u << cc
.bits
.iosqes
);
817 ctrlr
->vcprop
.cc
.bits
.iosqes
= cc
.bits
.iosqes
;
818 diff
.bits
.iosqes
= 0;
821 if (diff
.bits
.iocqes
) {
822 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Prop Set IOCQES = %u (%u bytes)\n",
823 cc
.bits
.iocqes
, 1u << cc
.bits
.iocqes
);
824 ctrlr
->vcprop
.cc
.bits
.iocqes
= cc
.bits
.iocqes
;
825 diff
.bits
.iocqes
= 0;
829 SPDK_ERRLOG("Arbitration Mechanism Selected (AMS) 0x%x not supported!\n", cc
.bits
.ams
);
834 SPDK_ERRLOG("Memory Page Size (MPS) %u KiB not supported!\n", (1 << (2 + cc
.bits
.mps
)));
839 SPDK_ERRLOG("I/O Command Set Selected (CSS) 0x%x not supported!\n", cc
.bits
.css
);
844 SPDK_ERRLOG("Prop Set CC toggled reserved bits 0x%x!\n", diff
.raw
);
852 nvmf_prop_get_csts(struct spdk_nvmf_ctrlr
*ctrlr
)
854 return ctrlr
->vcprop
.csts
.raw
;
858 nvmf_prop_get_aqa(struct spdk_nvmf_ctrlr
*ctrlr
)
860 return ctrlr
->vcprop
.aqa
.raw
;
864 nvmf_prop_set_aqa(struct spdk_nvmf_ctrlr
*ctrlr
, uint32_t value
)
866 union spdk_nvme_aqa_register aqa
;
870 if (aqa
.bits
.asqs
> ctrlr
->vcprop
.cap
.bits
.mqes
||
871 aqa
.bits
.acqs
> ctrlr
->vcprop
.cap
.bits
.mqes
) {
875 ctrlr
->vcprop
.aqa
.raw
= value
;
881 nvmf_prop_get_asq(struct spdk_nvmf_ctrlr
*ctrlr
)
883 return ctrlr
->vcprop
.asq
;
887 nvmf_prop_set_asq_lower(struct spdk_nvmf_ctrlr
*ctrlr
, uint32_t value
)
889 ctrlr
->vcprop
.asq
= (ctrlr
->vcprop
.asq
& (0xFFFFFFFFULL
<< 32ULL)) | value
;
895 nvmf_prop_set_asq_upper(struct spdk_nvmf_ctrlr
*ctrlr
, uint32_t value
)
897 ctrlr
->vcprop
.asq
= (ctrlr
->vcprop
.asq
& 0xFFFFFFFFULL
) | ((uint64_t)value
<< 32ULL);
903 nvmf_prop_get_acq(struct spdk_nvmf_ctrlr
*ctrlr
)
905 return ctrlr
->vcprop
.acq
;
909 nvmf_prop_set_acq_lower(struct spdk_nvmf_ctrlr
*ctrlr
, uint32_t value
)
911 ctrlr
->vcprop
.acq
= (ctrlr
->vcprop
.acq
& (0xFFFFFFFFULL
<< 32ULL)) | value
;
917 nvmf_prop_set_acq_upper(struct spdk_nvmf_ctrlr
*ctrlr
, uint32_t value
)
919 ctrlr
->vcprop
.acq
= (ctrlr
->vcprop
.acq
& 0xFFFFFFFFULL
) | ((uint64_t)value
<< 32ULL);
928 uint64_t (*get_cb
)(struct spdk_nvmf_ctrlr
*ctrlr
);
929 bool (*set_cb
)(struct spdk_nvmf_ctrlr
*ctrlr
, uint32_t value
);
930 bool (*set_upper_cb
)(struct spdk_nvmf_ctrlr
*ctrlr
, uint32_t value
);
933 #define PROP(field, size, get_cb, set_cb, set_upper_cb) \
935 offsetof(struct spdk_nvme_registers, field), \
938 get_cb, set_cb, set_upper_cb \
941 static const struct nvmf_prop nvmf_props
[] = {
942 PROP(cap
, 8, nvmf_prop_get_cap
, NULL
, NULL
),
943 PROP(vs
, 4, nvmf_prop_get_vs
, NULL
, NULL
),
944 PROP(cc
, 4, nvmf_prop_get_cc
, nvmf_prop_set_cc
, NULL
),
945 PROP(csts
, 4, nvmf_prop_get_csts
, NULL
, NULL
),
946 PROP(aqa
, 4, nvmf_prop_get_aqa
, nvmf_prop_set_aqa
, NULL
),
947 PROP(asq
, 8, nvmf_prop_get_asq
, nvmf_prop_set_asq_lower
, nvmf_prop_set_asq_upper
),
948 PROP(acq
, 8, nvmf_prop_get_acq
, nvmf_prop_set_acq_lower
, nvmf_prop_set_acq_upper
),
951 static const struct nvmf_prop
*
952 find_prop(uint32_t ofst
, uint8_t size
)
956 for (i
= 0; i
< SPDK_COUNTOF(nvmf_props
); i
++) {
957 const struct nvmf_prop
*prop
= &nvmf_props
[i
];
959 if ((ofst
>= prop
->ofst
) && (ofst
+ size
<= prop
->ofst
+ prop
->size
)) {
968 nvmf_property_get(struct spdk_nvmf_request
*req
)
970 struct spdk_nvmf_ctrlr
*ctrlr
= req
->qpair
->ctrlr
;
971 struct spdk_nvmf_fabric_prop_get_cmd
*cmd
= &req
->cmd
->prop_get_cmd
;
972 struct spdk_nvmf_fabric_prop_get_rsp
*response
= &req
->rsp
->prop_get_rsp
;
973 const struct nvmf_prop
*prop
;
976 response
->status
.sc
= 0;
977 response
->value
.u64
= 0;
979 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "size %d, offset 0x%x\n",
980 cmd
->attrib
.size
, cmd
->ofst
);
982 switch (cmd
->attrib
.size
) {
983 case SPDK_NVMF_PROP_SIZE_4
:
986 case SPDK_NVMF_PROP_SIZE_8
:
990 SPDK_ERRLOG("Invalid size value %d\n", cmd
->attrib
.size
);
991 response
->status
.sct
= SPDK_NVME_SCT_COMMAND_SPECIFIC
;
992 response
->status
.sc
= SPDK_NVMF_FABRIC_SC_INVALID_PARAM
;
993 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
996 prop
= find_prop(cmd
->ofst
, size
);
997 if (prop
== NULL
|| prop
->get_cb
== NULL
) {
998 response
->status
.sct
= SPDK_NVME_SCT_COMMAND_SPECIFIC
;
999 response
->status
.sc
= SPDK_NVMF_FABRIC_SC_INVALID_PARAM
;
1000 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1003 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "name: %s\n", prop
->name
);
1005 response
->value
.u64
= prop
->get_cb(ctrlr
);
1007 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "response value: 0x%" PRIx64
"\n", response
->value
.u64
);
1009 if (size
!= prop
->size
) {
1010 /* The size must be 4 and the prop->size is 8. Figure out which part of the property to read. */
1012 assert(prop
->size
== 8);
1014 if (cmd
->ofst
== prop
->ofst
) {
1015 /* Keep bottom 4 bytes only */
1016 response
->value
.u64
&= 0xFFFFFFFF;
1018 /* Keep top 4 bytes only */
1019 response
->value
.u64
>>= 32;
1023 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1027 nvmf_property_set(struct spdk_nvmf_request
*req
)
1029 struct spdk_nvmf_ctrlr
*ctrlr
= req
->qpair
->ctrlr
;
1030 struct spdk_nvmf_fabric_prop_set_cmd
*cmd
= &req
->cmd
->prop_set_cmd
;
1031 struct spdk_nvme_cpl
*response
= &req
->rsp
->nvme_cpl
;
1032 const struct nvmf_prop
*prop
;
1037 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "size %d, offset 0x%x, value 0x%" PRIx64
"\n",
1038 cmd
->attrib
.size
, cmd
->ofst
, cmd
->value
.u64
);
1040 switch (cmd
->attrib
.size
) {
1041 case SPDK_NVMF_PROP_SIZE_4
:
1044 case SPDK_NVMF_PROP_SIZE_8
:
1048 SPDK_ERRLOG("Invalid size value %d\n", cmd
->attrib
.size
);
1049 response
->status
.sct
= SPDK_NVME_SCT_COMMAND_SPECIFIC
;
1050 response
->status
.sc
= SPDK_NVMF_FABRIC_SC_INVALID_PARAM
;
1051 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1054 prop
= find_prop(cmd
->ofst
, size
);
1055 if (prop
== NULL
|| prop
->set_cb
== NULL
) {
1056 SPDK_ERRLOG("Invalid offset 0x%x\n", cmd
->ofst
);
1057 response
->status
.sct
= SPDK_NVME_SCT_COMMAND_SPECIFIC
;
1058 response
->status
.sc
= SPDK_NVMF_FABRIC_SC_INVALID_PARAM
;
1059 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1062 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "name: %s\n", prop
->name
);
1064 value
= cmd
->value
.u64
;
1066 if (prop
->size
== 4) {
1067 ret
= prop
->set_cb(ctrlr
, (uint32_t)value
);
1068 } else if (size
!= prop
->size
) {
1069 /* The size must be 4 and the prop->size is 8. Figure out which part of the property to write. */
1071 assert(prop
->size
== 8);
1073 if (cmd
->ofst
== prop
->ofst
) {
1074 ret
= prop
->set_cb(ctrlr
, (uint32_t)value
);
1076 ret
= prop
->set_upper_cb(ctrlr
, (uint32_t)value
);
1079 ret
= prop
->set_cb(ctrlr
, (uint32_t)value
);
1081 ret
= prop
->set_upper_cb(ctrlr
, (uint32_t)(value
>> 32));
1086 SPDK_ERRLOG("prop set_cb failed\n");
1087 response
->status
.sct
= SPDK_NVME_SCT_COMMAND_SPECIFIC
;
1088 response
->status
.sc
= SPDK_NVMF_FABRIC_SC_INVALID_PARAM
;
1089 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1092 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1096 nvmf_ctrlr_set_features_arbitration(struct spdk_nvmf_request
*req
)
1098 struct spdk_nvmf_ctrlr
*ctrlr
= req
->qpair
->ctrlr
;
1099 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
1101 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Set Features - Arbitration (cdw11 = 0x%0x)\n", cmd
->cdw11
);
1103 ctrlr
->feat
.arbitration
.raw
= cmd
->cdw11
;
1104 ctrlr
->feat
.arbitration
.bits
.reserved
= 0;
1106 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1110 nvmf_ctrlr_set_features_power_management(struct spdk_nvmf_request
*req
)
1112 struct spdk_nvmf_ctrlr
*ctrlr
= req
->qpair
->ctrlr
;
1113 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
1114 struct spdk_nvme_cpl
*rsp
= &req
->rsp
->nvme_cpl
;
1116 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Set Features - Power Management (cdw11 = 0x%0x)\n", cmd
->cdw11
);
1118 /* Only PS = 0 is allowed, since we report NPSS = 0 */
1119 if (cmd
->cdw11_bits
.feat_power_management
.bits
.ps
!= 0) {
1120 SPDK_ERRLOG("Invalid power state %u\n", cmd
->cdw11_bits
.feat_power_management
.bits
.ps
);
1121 rsp
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
1122 rsp
->status
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
1123 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1126 ctrlr
->feat
.power_management
.raw
= cmd
->cdw11
;
1127 ctrlr
->feat
.power_management
.bits
.reserved
= 0;
1129 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1133 temp_threshold_opts_valid(const union spdk_nvme_feat_temperature_threshold
*opts
)
1136 * Valid TMPSEL values:
1137 * 0000b - 1000b: temperature sensors
1138 * 1111b: set all implemented temperature sensors
1140 if (opts
->bits
.tmpsel
>= 9 && opts
->bits
.tmpsel
!= 15) {
1141 /* 1001b - 1110b: reserved */
1142 SPDK_ERRLOG("Invalid TMPSEL %u\n", opts
->bits
.tmpsel
);
1147 * Valid THSEL values:
1148 * 00b: over temperature threshold
1149 * 01b: under temperature threshold
1151 if (opts
->bits
.thsel
> 1) {
1152 /* 10b - 11b: reserved */
1153 SPDK_ERRLOG("Invalid THSEL %u\n", opts
->bits
.thsel
);
1161 nvmf_ctrlr_set_features_temperature_threshold(struct spdk_nvmf_request
*req
)
1163 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
1164 struct spdk_nvme_cpl
*rsp
= &req
->rsp
->nvme_cpl
;
1166 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Set Features - Temperature Threshold (cdw11 = 0x%0x)\n", cmd
->cdw11
);
1168 if (!temp_threshold_opts_valid(&cmd
->cdw11_bits
.feat_temp_threshold
)) {
1169 rsp
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
1170 rsp
->status
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
1171 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1174 /* TODO: no sensors implemented - ignore new values */
1175 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1179 nvmf_ctrlr_get_features_temperature_threshold(struct spdk_nvmf_request
*req
)
1181 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
1182 struct spdk_nvme_cpl
*rsp
= &req
->rsp
->nvme_cpl
;
1184 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Get Features - Temperature Threshold (cdw11 = 0x%0x)\n", cmd
->cdw11
);
1186 if (!temp_threshold_opts_valid(&cmd
->cdw11_bits
.feat_temp_threshold
)) {
1187 rsp
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
1188 rsp
->status
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
1189 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1192 /* TODO: no sensors implemented - return 0 for all thresholds */
1195 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1199 nvmf_ctrlr_set_features_error_recovery(struct spdk_nvmf_request
*req
)
1201 struct spdk_nvmf_ctrlr
*ctrlr
= req
->qpair
->ctrlr
;
1202 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
1203 struct spdk_nvme_cpl
*rsp
= &req
->rsp
->nvme_cpl
;
1205 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Set Features - Error Recovery (cdw11 = 0x%0x)\n", cmd
->cdw11
);
1207 if (cmd
->cdw11_bits
.feat_error_recovery
.bits
.dulbe
) {
1209 * Host is not allowed to set this bit, since we don't advertise it in
1210 * Identify Namespace.
1212 SPDK_ERRLOG("Host set unsupported DULBE bit\n");
1213 rsp
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
1214 rsp
->status
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
1215 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1218 ctrlr
->feat
.error_recovery
.raw
= cmd
->cdw11
;
1219 ctrlr
->feat
.error_recovery
.bits
.reserved
= 0;
1221 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1225 nvmf_ctrlr_set_features_volatile_write_cache(struct spdk_nvmf_request
*req
)
1227 struct spdk_nvmf_ctrlr
*ctrlr
= req
->qpair
->ctrlr
;
1228 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
1230 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Set Features - Volatile Write Cache (cdw11 = 0x%0x)\n", cmd
->cdw11
);
1232 ctrlr
->feat
.volatile_write_cache
.raw
= cmd
->cdw11
;
1233 ctrlr
->feat
.volatile_write_cache
.bits
.reserved
= 0;
1235 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Set Features - Volatile Write Cache %s\n",
1236 ctrlr
->feat
.volatile_write_cache
.bits
.wce
? "Enabled" : "Disabled");
1237 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1241 nvmf_ctrlr_set_features_write_atomicity(struct spdk_nvmf_request
*req
)
1243 struct spdk_nvmf_ctrlr
*ctrlr
= req
->qpair
->ctrlr
;
1244 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
1246 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Set Features - Write Atomicity (cdw11 = 0x%0x)\n", cmd
->cdw11
);
1248 ctrlr
->feat
.write_atomicity
.raw
= cmd
->cdw11
;
1249 ctrlr
->feat
.write_atomicity
.bits
.reserved
= 0;
1251 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1255 nvmf_ctrlr_set_features_host_identifier(struct spdk_nvmf_request
*req
)
1257 struct spdk_nvme_cpl
*response
= &req
->rsp
->nvme_cpl
;
1259 SPDK_ERRLOG("Set Features - Host Identifier not allowed\n");
1260 response
->status
.sc
= SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR
;
1261 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1265 nvmf_ctrlr_get_features_host_identifier(struct spdk_nvmf_request
*req
)
1267 struct spdk_nvmf_ctrlr
*ctrlr
= req
->qpair
->ctrlr
;
1268 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
1269 struct spdk_nvme_cpl
*response
= &req
->rsp
->nvme_cpl
;
1271 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Get Features - Host Identifier\n");
1273 if (!cmd
->cdw11_bits
.feat_host_identifier
.bits
.exhid
) {
1274 /* NVMe over Fabrics requires EXHID=1 (128-bit/16-byte host ID) */
1275 SPDK_ERRLOG("Get Features - Host Identifier with EXHID=0 not allowed\n");
1276 response
->status
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
1277 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1280 if (req
->data
== NULL
|| req
->length
< sizeof(ctrlr
->hostid
)) {
1281 SPDK_ERRLOG("Invalid data buffer for Get Features - Host Identifier\n");
1282 response
->status
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
1283 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1286 spdk_uuid_copy((struct spdk_uuid
*)req
->data
, &ctrlr
->hostid
);
1287 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1291 nvmf_ctrlr_get_features_reservation_notification_mask(struct spdk_nvmf_request
*req
)
1293 struct spdk_nvmf_ctrlr
*ctrlr
= req
->qpair
->ctrlr
;
1294 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
1295 struct spdk_nvme_cpl
*rsp
= &req
->rsp
->nvme_cpl
;
1296 struct spdk_nvmf_ns
*ns
;
1298 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "get Features - Reservation Notificaton Mask\n");
1300 if (cmd
->nsid
== 0xffffffffu
) {
1301 SPDK_ERRLOG("get Features - Invalid Namespace ID\n");
1302 rsp
->status
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
1303 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1306 ns
= _nvmf_subsystem_get_ns(ctrlr
->subsys
, cmd
->nsid
);
1308 SPDK_ERRLOG("Set Features - Invalid Namespace ID\n");
1309 rsp
->status
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
1310 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1312 rsp
->cdw0
= ns
->mask
;
1314 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1318 nvmf_ctrlr_set_features_reservation_notification_mask(struct spdk_nvmf_request
*req
)
1320 struct spdk_nvmf_ctrlr
*ctrlr
= req
->qpair
->ctrlr
;
1321 struct spdk_nvmf_subsystem
*subsystem
= ctrlr
->subsys
;
1322 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
1323 struct spdk_nvme_cpl
*rsp
= &req
->rsp
->nvme_cpl
;
1324 struct spdk_nvmf_ns
*ns
;
1326 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Set Features - Reservation Notificaton Mask\n");
1328 if (cmd
->nsid
== 0xffffffffu
) {
1329 for (ns
= spdk_nvmf_subsystem_get_first_ns(subsystem
); ns
!= NULL
;
1330 ns
= spdk_nvmf_subsystem_get_next_ns(subsystem
, ns
)) {
1331 ns
->mask
= cmd
->cdw11
;
1333 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1336 ns
= _nvmf_subsystem_get_ns(ctrlr
->subsys
, cmd
->nsid
);
1338 SPDK_ERRLOG("Set Features - Invalid Namespace ID\n");
1339 rsp
->status
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
1340 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1342 ns
->mask
= cmd
->cdw11
;
1344 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1348 nvmf_ctrlr_get_features_reservation_persistence(struct spdk_nvmf_request
*req
)
1350 struct spdk_nvmf_ctrlr
*ctrlr
= req
->qpair
->ctrlr
;
1351 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
1352 struct spdk_nvme_cpl
*response
= &req
->rsp
->nvme_cpl
;
1353 struct spdk_nvmf_ns
*ns
;
1355 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Get Features - Reservation Persistence\n");
1357 ns
= _nvmf_subsystem_get_ns(ctrlr
->subsys
, cmd
->nsid
);
1358 /* NSID with 0xffffffffu also included */
1360 SPDK_ERRLOG("Get Features - Invalid Namespace ID\n");
1361 response
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
1362 response
->status
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
1363 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1366 response
->cdw0
= ns
->ptpl_activated
;
1368 response
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
1369 response
->status
.sc
= SPDK_NVME_SC_SUCCESS
;
1370 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1374 nvmf_ctrlr_set_features_reservation_persistence(struct spdk_nvmf_request
*req
)
1376 struct spdk_nvmf_ctrlr
*ctrlr
= req
->qpair
->ctrlr
;
1377 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
1378 struct spdk_nvme_cpl
*response
= &req
->rsp
->nvme_cpl
;
1379 struct spdk_nvmf_ns
*ns
;
1382 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Set Features - Reservation Persistence\n");
1384 ns
= _nvmf_subsystem_get_ns(ctrlr
->subsys
, cmd
->nsid
);
1385 ptpl
= cmd
->cdw11_bits
.feat_rsv_persistence
.bits
.ptpl
;
1387 if (cmd
->nsid
!= 0xffffffffu
&& ns
&& ns
->ptpl_file
) {
1388 ns
->ptpl_activated
= ptpl
;
1389 } else if (cmd
->nsid
== 0xffffffffu
) {
1390 for (ns
= spdk_nvmf_subsystem_get_first_ns(ctrlr
->subsys
); ns
&& ns
->ptpl_file
;
1391 ns
= spdk_nvmf_subsystem_get_next_ns(ctrlr
->subsys
, ns
)) {
1392 ns
->ptpl_activated
= ptpl
;
1395 SPDK_ERRLOG("Set Features - Invalid Namespace ID or Reservation Configuration\n");
1396 response
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
1397 response
->status
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
1398 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1401 /* TODO: Feature not changeable for now */
1402 response
->status
.sct
= SPDK_NVME_SCT_COMMAND_SPECIFIC
;
1403 response
->status
.sc
= SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE
;
1404 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1408 nvmf_ctrlr_set_features_keep_alive_timer(struct spdk_nvmf_request
*req
)
1410 struct spdk_nvmf_ctrlr
*ctrlr
= req
->qpair
->ctrlr
;
1411 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
1412 struct spdk_nvme_cpl
*rsp
= &req
->rsp
->nvme_cpl
;
1414 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Set Features - Keep Alive Timer (%u ms)\n", cmd
->cdw11
);
1417 * if attempts to disable keep alive by setting kato to 0h
1418 * a status value of keep alive invalid shall be returned
1420 if (cmd
->cdw11_bits
.feat_keep_alive_timer
.bits
.kato
== 0) {
1421 rsp
->status
.sc
= SPDK_NVME_SC_KEEP_ALIVE_INVALID
;
1422 } else if (cmd
->cdw11_bits
.feat_keep_alive_timer
.bits
.kato
< MIN_KEEP_ALIVE_TIMEOUT_IN_MS
) {
1423 ctrlr
->feat
.keep_alive_timer
.bits
.kato
= MIN_KEEP_ALIVE_TIMEOUT_IN_MS
;
1425 /* round up to milliseconds */
1426 ctrlr
->feat
.keep_alive_timer
.bits
.kato
= spdk_divide_round_up(
1427 cmd
->cdw11_bits
.feat_keep_alive_timer
.bits
.kato
,
1428 KAS_DEFAULT_VALUE
* KAS_TIME_UNIT_IN_MS
) *
1429 KAS_DEFAULT_VALUE
* KAS_TIME_UNIT_IN_MS
;
1433 * if change the keep alive timeout value successfully
1434 * update the keep alive poller.
1436 if (cmd
->cdw11_bits
.feat_keep_alive_timer
.bits
.kato
!= 0) {
1437 if (ctrlr
->keep_alive_poller
!= NULL
) {
1438 spdk_poller_unregister(&ctrlr
->keep_alive_poller
);
1440 ctrlr
->keep_alive_poller
= SPDK_POLLER_REGISTER(nvmf_ctrlr_keep_alive_poll
, ctrlr
,
1441 ctrlr
->feat
.keep_alive_timer
.bits
.kato
* 1000);
1444 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Set Features - Keep Alive Timer set to %u ms\n",
1445 ctrlr
->feat
.keep_alive_timer
.bits
.kato
);
1447 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1451 nvmf_ctrlr_set_features_number_of_queues(struct spdk_nvmf_request
*req
)
1453 struct spdk_nvmf_ctrlr
*ctrlr
= req
->qpair
->ctrlr
;
1454 struct spdk_nvme_cpl
*rsp
= &req
->rsp
->nvme_cpl
;
1457 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Set Features - Number of Queues, cdw11 0x%x\n",
1458 req
->cmd
->nvme_cmd
.cdw11
);
1460 count
= spdk_bit_array_count_set(ctrlr
->qpair_mask
);
1461 /* verify that the controller is ready to process commands */
1463 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Queue pairs already active!\n");
1464 rsp
->status
.sc
= SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR
;
1467 * Ignore the value requested by the host -
1468 * always return the pre-configured value based on max_qpairs_allowed.
1470 rsp
->cdw0
= ctrlr
->feat
.number_of_queues
.raw
;
1473 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1477 nvmf_ctrlr_set_features_async_event_configuration(struct spdk_nvmf_request
*req
)
1479 struct spdk_nvmf_ctrlr
*ctrlr
= req
->qpair
->ctrlr
;
1480 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
1482 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Set Features - Async Event Configuration, cdw11 0x%08x\n",
1484 ctrlr
->feat
.async_event_configuration
.raw
= cmd
->cdw11
;
1485 ctrlr
->feat
.async_event_configuration
.bits
.reserved
= 0;
1486 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1490 nvmf_ctrlr_async_event_request(struct spdk_nvmf_request
*req
)
1492 struct spdk_nvmf_ctrlr
*ctrlr
= req
->qpair
->ctrlr
;
1493 struct spdk_nvme_cpl
*rsp
= &req
->rsp
->nvme_cpl
;
1494 struct spdk_nvmf_subsystem_poll_group
*sgroup
;
1496 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Async Event Request\n");
1498 /* Four asynchronous events are supported for now */
1499 if (ctrlr
->nr_aer_reqs
>= NVMF_MAX_ASYNC_EVENTS
) {
1500 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "AERL exceeded\n");
1501 rsp
->status
.sct
= SPDK_NVME_SCT_COMMAND_SPECIFIC
;
1502 rsp
->status
.sc
= SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED
;
1503 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1506 if (ctrlr
->notice_event
.bits
.async_event_type
==
1507 SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE
) {
1508 rsp
->cdw0
= ctrlr
->notice_event
.raw
;
1509 ctrlr
->notice_event
.raw
= 0;
1510 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1513 if (ctrlr
->reservation_event
.bits
.async_event_type
==
1514 SPDK_NVME_ASYNC_EVENT_TYPE_IO
) {
1515 rsp
->cdw0
= ctrlr
->reservation_event
.raw
;
1516 ctrlr
->reservation_event
.raw
= 0;
1517 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1520 /* AER cmd is an exception */
1521 sgroup
= &req
->qpair
->group
->sgroups
[ctrlr
->subsys
->id
];
1522 assert(sgroup
!= NULL
);
1523 sgroup
->io_outstanding
--;
1525 ctrlr
->aer_req
[ctrlr
->nr_aer_reqs
++] = req
;
1526 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS
;
1530 nvmf_get_firmware_slot_log_page(void *buffer
, uint64_t offset
, uint32_t length
)
1532 struct spdk_nvme_firmware_page fw_page
;
1535 memset(&fw_page
, 0, sizeof(fw_page
));
1536 fw_page
.afi
.active_slot
= 1;
1537 fw_page
.afi
.next_reset_slot
= 0;
1538 spdk_strcpy_pad(fw_page
.revision
[0], FW_VERSION
, sizeof(fw_page
.revision
[0]), ' ');
1540 if (offset
< sizeof(fw_page
)) {
1541 copy_len
= spdk_min(sizeof(fw_page
) - offset
, length
);
1543 memcpy(buffer
, (const char *)&fw_page
+ offset
, copy_len
);
1549 nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr
*ctrlr
, uint32_t nsid
)
1551 uint16_t max_changes
= SPDK_COUNTOF(ctrlr
->changed_ns_list
.ns_list
);
1555 for (i
= 0; i
< ctrlr
->changed_ns_list_count
; i
++) {
1556 if (ctrlr
->changed_ns_list
.ns_list
[i
] == nsid
) {
1557 /* nsid is already in the list */
1564 if (ctrlr
->changed_ns_list_count
== max_changes
) {
1565 /* Out of space - set first entry to FFFFFFFFh and zero-fill the rest. */
1566 ctrlr
->changed_ns_list
.ns_list
[0] = 0xFFFFFFFFu
;
1567 for (i
= 1; i
< max_changes
; i
++) {
1568 ctrlr
->changed_ns_list
.ns_list
[i
] = 0;
1571 ctrlr
->changed_ns_list
.ns_list
[ctrlr
->changed_ns_list_count
++] = nsid
;
1577 nvmf_get_changed_ns_list_log_page(struct spdk_nvmf_ctrlr
*ctrlr
,
1578 void *buffer
, uint64_t offset
, uint32_t length
)
1582 if (offset
< sizeof(ctrlr
->changed_ns_list
)) {
1583 copy_length
= spdk_min(length
, sizeof(ctrlr
->changed_ns_list
) - offset
);
1585 memcpy(buffer
, (char *)&ctrlr
->changed_ns_list
+ offset
, copy_length
);
1589 /* Clear log page each time it is read */
1590 ctrlr
->changed_ns_list_count
= 0;
1591 memset(&ctrlr
->changed_ns_list
, 0, sizeof(ctrlr
->changed_ns_list
));
1594 /* The structure can be modified if we provide support for other commands in future */
1595 static const struct spdk_nvme_cmds_and_effect_log_page g_cmds_and_effect_log_page
= {
1596 .admin_cmds_supported
= {
1597 /* CSUPP, LBCC, NCC, NIC, CCC, CSE */
1599 [SPDK_NVME_OPC_GET_LOG_PAGE
] = {1, 0, 0, 0, 0, 0, 0, 0},
1601 [SPDK_NVME_OPC_IDENTIFY
] = {1, 0, 0, 0, 0, 0, 0, 0},
1603 [SPDK_NVME_OPC_ABORT
] = {1, 0, 0, 0, 0, 0, 0, 0},
1605 [SPDK_NVME_OPC_SET_FEATURES
] = {1, 0, 0, 0, 0, 0, 0, 0},
1607 [SPDK_NVME_OPC_GET_FEATURES
] = {1, 0, 0, 0, 0, 0, 0, 0},
1608 /* Async Event Request */
1609 [SPDK_NVME_OPC_ASYNC_EVENT_REQUEST
] = {1, 0, 0, 0, 0, 0, 0, 0},
1611 [SPDK_NVME_OPC_KEEP_ALIVE
] = {1, 0, 0, 0, 0, 0, 0, 0},
1613 .io_cmds_supported
= {
1615 [SPDK_NVME_OPC_FLUSH
] = {1, 1, 0, 0, 0, 0, 0, 0},
1617 [SPDK_NVME_OPC_WRITE
] = {1, 1, 0, 0, 0, 0, 0, 0},
1619 [SPDK_NVME_OPC_READ
] = {1, 0, 0, 0, 0, 0, 0, 0},
1621 [SPDK_NVME_OPC_WRITE_ZEROES
] = {1, 1, 0, 0, 0, 0, 0, 0},
1622 /* DATASET MANAGEMENT */
1623 [SPDK_NVME_OPC_DATASET_MANAGEMENT
] = {1, 1, 0, 0, 0, 0, 0, 0},
1625 [SPDK_NVME_OPC_COMPARE
] = {1, 0, 0, 0, 0, 0, 0, 0},
1630 nvmf_get_cmds_and_effects_log_page(void *buffer
,
1631 uint64_t offset
, uint32_t length
)
1633 uint32_t page_size
= sizeof(struct spdk_nvme_cmds_and_effect_log_page
);
1634 size_t copy_len
= 0;
1635 size_t zero_len
= length
;
1637 if (offset
< page_size
) {
1638 copy_len
= spdk_min(page_size
- offset
, length
);
1639 zero_len
-= copy_len
;
1640 memcpy(buffer
, (char *)(&g_cmds_and_effect_log_page
) + offset
, copy_len
);
1644 memset((char *)buffer
+ copy_len
, 0, zero_len
);
1649 nvmf_get_reservation_notification_log_page(struct spdk_nvmf_ctrlr
*ctrlr
,
1650 void *data
, uint64_t offset
, uint32_t length
)
1652 uint32_t unit_log_len
, avail_log_len
, next_pos
, copy_len
;
1653 struct spdk_nvmf_reservation_log
*log
, *log_tmp
;
1654 uint8_t *buf
= data
;
1656 unit_log_len
= sizeof(struct spdk_nvme_reservation_notification_log
);
1657 /* No available log, return 1 zeroed log page */
1658 if (!ctrlr
->num_avail_log_pages
) {
1659 memset(buf
, 0, spdk_min(length
, unit_log_len
));
1663 avail_log_len
= ctrlr
->num_avail_log_pages
* unit_log_len
;
1664 if (offset
>= avail_log_len
) {
1668 next_pos
= copy_len
= 0;
1669 TAILQ_FOREACH_SAFE(log
, &ctrlr
->log_head
, link
, log_tmp
) {
1670 TAILQ_REMOVE(&ctrlr
->log_head
, log
, link
);
1671 ctrlr
->num_avail_log_pages
--;
1673 next_pos
+= unit_log_len
;
1674 if (next_pos
> offset
) {
1675 copy_len
= spdk_min(next_pos
- offset
, length
);
1676 memcpy(buf
, &log
->log
, copy_len
);
1691 nvmf_ctrlr_get_log_page(struct spdk_nvmf_request
*req
)
1693 struct spdk_nvmf_ctrlr
*ctrlr
= req
->qpair
->ctrlr
;
1694 struct spdk_nvmf_subsystem
*subsystem
= ctrlr
->subsys
;
1695 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
1696 struct spdk_nvme_cpl
*response
= &req
->rsp
->nvme_cpl
;
1697 uint64_t offset
, len
;
1698 uint32_t numdl
, numdu
;
1701 if (req
->data
== NULL
) {
1702 SPDK_ERRLOG("get log command with no buffer\n");
1703 response
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
1704 response
->status
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
1705 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1708 offset
= (uint64_t)cmd
->cdw12
| ((uint64_t)cmd
->cdw13
<< 32);
1710 SPDK_ERRLOG("Invalid log page offset 0x%" PRIx64
"\n", offset
);
1711 response
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
1712 response
->status
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
1713 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1716 numdl
= cmd
->cdw10_bits
.get_log_page
.numdl
;
1717 numdu
= cmd
->cdw11_bits
.get_log_page
.numdu
;
1718 len
= ((numdu
<< 16) + numdl
+ (uint64_t)1) * 4;
1719 if (len
> req
->length
) {
1720 SPDK_ERRLOG("Get log page: len (%" PRIu64
") > buf size (%u)\n",
1722 response
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
1723 response
->status
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
1724 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1727 lid
= cmd
->cdw10_bits
.get_log_page
.lid
;
1728 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Get log page: LID=0x%02X offset=0x%" PRIx64
" len=0x%" PRIx64
"\n",
1731 if (subsystem
->subtype
== SPDK_NVMF_SUBTYPE_DISCOVERY
) {
1733 case SPDK_NVME_LOG_DISCOVERY
:
1734 nvmf_get_discovery_log_page(subsystem
->tgt
, ctrlr
->hostnqn
, req
->iov
, req
->iovcnt
, offset
,
1736 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1738 goto invalid_log_page
;
1742 case SPDK_NVME_LOG_ERROR
:
1743 case SPDK_NVME_LOG_HEALTH_INFORMATION
:
1744 /* TODO: actually fill out log page data */
1745 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1746 case SPDK_NVME_LOG_FIRMWARE_SLOT
:
1747 nvmf_get_firmware_slot_log_page(req
->data
, offset
, len
);
1748 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1749 case SPDK_NVME_LOG_COMMAND_EFFECTS_LOG
:
1750 nvmf_get_cmds_and_effects_log_page(req
->data
, offset
, len
);
1751 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1752 case SPDK_NVME_LOG_CHANGED_NS_LIST
:
1753 nvmf_get_changed_ns_list_log_page(ctrlr
, req
->data
, offset
, len
);
1754 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1755 case SPDK_NVME_LOG_RESERVATION_NOTIFICATION
:
1756 nvmf_get_reservation_notification_log_page(ctrlr
, req
->data
, offset
, len
);
1757 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1759 goto invalid_log_page
;
1764 SPDK_ERRLOG("Unsupported Get Log Page 0x%02X\n", lid
);
1765 response
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
1766 response
->status
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
1767 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1771 spdk_nvmf_ctrlr_identify_ns(struct spdk_nvmf_ctrlr
*ctrlr
,
1772 struct spdk_nvme_cmd
*cmd
,
1773 struct spdk_nvme_cpl
*rsp
,
1774 struct spdk_nvme_ns_data
*nsdata
)
1776 struct spdk_nvmf_subsystem
*subsystem
= ctrlr
->subsys
;
1777 struct spdk_nvmf_ns
*ns
;
1778 uint32_t max_num_blocks
;
1780 if (cmd
->nsid
== 0 || cmd
->nsid
> subsystem
->max_nsid
) {
1781 SPDK_ERRLOG("Identify Namespace for invalid NSID %u\n", cmd
->nsid
);
1782 rsp
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
1783 rsp
->status
.sc
= SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT
;
1784 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1787 ns
= _nvmf_subsystem_get_ns(subsystem
, cmd
->nsid
);
1788 if (ns
== NULL
|| ns
->bdev
== NULL
) {
1790 * Inactive namespaces should return a zero filled data structure.
1791 * The data buffer is already zeroed by nvmf_ctrlr_process_admin_cmd(),
1792 * so we can just return early here.
1794 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Identify Namespace for inactive NSID %u\n", cmd
->nsid
);
1795 rsp
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
1796 rsp
->status
.sc
= SPDK_NVME_SC_SUCCESS
;
1797 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1800 nvmf_bdev_ctrlr_identify_ns(ns
, nsdata
, ctrlr
->dif_insert_or_strip
);
1802 /* Due to bug in the Linux kernel NVMe driver we have to set noiob no larger than mdts */
1803 max_num_blocks
= ctrlr
->admin_qpair
->transport
->opts
.max_io_size
/
1804 (1U << nsdata
->lbaf
[nsdata
->flbas
.format
].lbads
);
1805 if (nsdata
->noiob
> max_num_blocks
) {
1806 nsdata
->noiob
= max_num_blocks
;
1809 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1813 nvmf_ctrlr_populate_oacs(struct spdk_nvmf_ctrlr
*ctrlr
,
1814 struct spdk_nvme_ctrlr_data
*cdata
)
1816 cdata
->oacs
.virtualization_management
=
1817 g_nvmf_custom_admin_cmd_hdlrs
[SPDK_NVME_OPC_VIRTUALIZATION_MANAGEMENT
].hdlr
!= NULL
;
1818 cdata
->oacs
.nvme_mi
= g_nvmf_custom_admin_cmd_hdlrs
[SPDK_NVME_OPC_NVME_MI_SEND
].hdlr
!= NULL
1819 && g_nvmf_custom_admin_cmd_hdlrs
[SPDK_NVME_OPC_NVME_MI_RECEIVE
].hdlr
!= NULL
;
1820 cdata
->oacs
.directives
= g_nvmf_custom_admin_cmd_hdlrs
[SPDK_NVME_OPC_DIRECTIVE_SEND
].hdlr
!= NULL
1821 && g_nvmf_custom_admin_cmd_hdlrs
[SPDK_NVME_OPC_DIRECTIVE_RECEIVE
].hdlr
!= NULL
;
1822 cdata
->oacs
.device_self_test
=
1823 g_nvmf_custom_admin_cmd_hdlrs
[SPDK_NVME_OPC_DEVICE_SELF_TEST
].hdlr
!= NULL
;
1824 cdata
->oacs
.ns_manage
= g_nvmf_custom_admin_cmd_hdlrs
[SPDK_NVME_OPC_NS_MANAGEMENT
].hdlr
!= NULL
1825 && g_nvmf_custom_admin_cmd_hdlrs
[SPDK_NVME_OPC_NS_ATTACHMENT
].hdlr
!= NULL
;
1826 cdata
->oacs
.firmware
= g_nvmf_custom_admin_cmd_hdlrs
[SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD
].hdlr
!=
1828 && g_nvmf_custom_admin_cmd_hdlrs
[SPDK_NVME_OPC_FIRMWARE_COMMIT
].hdlr
!= NULL
;
1829 cdata
->oacs
.format
=
1830 g_nvmf_custom_admin_cmd_hdlrs
[SPDK_NVME_OPC_FORMAT_NVM
].hdlr
!= NULL
;
1831 cdata
->oacs
.security
= g_nvmf_custom_admin_cmd_hdlrs
[SPDK_NVME_OPC_SECURITY_SEND
].hdlr
!= NULL
1832 && g_nvmf_custom_admin_cmd_hdlrs
[SPDK_NVME_OPC_SECURITY_RECEIVE
].hdlr
!= NULL
;
1833 cdata
->oacs
.get_lba_status
= g_nvmf_custom_admin_cmd_hdlrs
[SPDK_NVME_OPC_GET_LBA_STATUS
].hdlr
!=
1838 spdk_nvmf_ctrlr_identify_ctrlr(struct spdk_nvmf_ctrlr
*ctrlr
, struct spdk_nvme_ctrlr_data
*cdata
)
1840 struct spdk_nvmf_subsystem
*subsystem
= ctrlr
->subsys
;
1841 struct spdk_nvmf_transport
*transport
= ctrlr
->admin_qpair
->transport
;
1844 * Common fields for discovery and NVM subsystems
1846 spdk_strcpy_pad(cdata
->fr
, FW_VERSION
, sizeof(cdata
->fr
), ' ');
1847 assert((transport
->opts
.max_io_size
% 4096) == 0);
1848 cdata
->mdts
= spdk_u32log2(transport
->opts
.max_io_size
/ 4096);
1849 cdata
->cntlid
= ctrlr
->cntlid
;
1850 cdata
->ver
= ctrlr
->vcprop
.vs
;
1851 cdata
->aerl
= NVMF_MAX_ASYNC_EVENTS
- 1;
1852 cdata
->lpa
.edlp
= 1;
1854 cdata
->maxcmd
= transport
->opts
.max_queue_depth
;
1855 cdata
->sgls
= ctrlr
->cdata
.sgls
;
1856 cdata
->fuses
.compare_and_write
= 1;
1858 spdk_strcpy_pad(cdata
->subnqn
, subsystem
->subnqn
, sizeof(cdata
->subnqn
), '\0');
1860 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "ctrlr data: maxcmd 0x%x\n", cdata
->maxcmd
);
1861 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "sgls data: 0x%x\n", from_le32(&cdata
->sgls
));
1864 * NVM subsystem fields (reserved for discovery subsystems)
1866 if (subsystem
->subtype
== SPDK_NVMF_SUBTYPE_NVME
) {
1867 spdk_strcpy_pad(cdata
->mn
, spdk_nvmf_subsystem_get_mn(subsystem
), sizeof(cdata
->mn
), ' ');
1868 spdk_strcpy_pad(cdata
->sn
, spdk_nvmf_subsystem_get_sn(subsystem
), sizeof(cdata
->sn
), ' ');
1869 cdata
->kas
= ctrlr
->cdata
.kas
;
1872 cdata
->cmic
.multi_port
= 1;
1873 cdata
->cmic
.multi_host
= 1;
1874 cdata
->oaes
.ns_attribute_notices
= 1;
1875 cdata
->ctratt
.host_id_exhid_supported
= 1;
1876 /* TODO: Concurrent execution of multiple abort commands. */
1879 cdata
->frmw
.slot1_ro
= 1;
1880 cdata
->frmw
.num_slots
= 1;
1882 cdata
->lpa
.celp
= 1; /* Command Effects log page supported */
1884 cdata
->sqes
.min
= 6;
1885 cdata
->sqes
.max
= 6;
1886 cdata
->cqes
.min
= 4;
1887 cdata
->cqes
.max
= 4;
1888 cdata
->nn
= subsystem
->max_nsid
;
1889 cdata
->vwc
.present
= 1;
1890 cdata
->vwc
.flush_broadcast
= SPDK_NVME_FLUSH_BROADCAST_NOT_SUPPORTED
;
1892 cdata
->nvmf_specific
= ctrlr
->cdata
.nvmf_specific
;
1894 cdata
->oncs
.dsm
= nvmf_ctrlr_dsm_supported(ctrlr
);
1895 cdata
->oncs
.write_zeroes
= nvmf_ctrlr_write_zeroes_supported(ctrlr
);
1896 cdata
->oncs
.reservations
= 1;
1898 nvmf_ctrlr_populate_oacs(ctrlr
, cdata
);
1900 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "ext ctrlr data: ioccsz 0x%x\n",
1901 cdata
->nvmf_specific
.ioccsz
);
1902 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "ext ctrlr data: iorcsz 0x%x\n",
1903 cdata
->nvmf_specific
.iorcsz
);
1904 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "ext ctrlr data: icdoff 0x%x\n",
1905 cdata
->nvmf_specific
.icdoff
);
1906 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "ext ctrlr data: ctrattr 0x%x\n",
1907 *(uint8_t *)&cdata
->nvmf_specific
.ctrattr
);
1908 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "ext ctrlr data: msdbd 0x%x\n",
1909 cdata
->nvmf_specific
.msdbd
);
1912 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1916 nvmf_ctrlr_identify_active_ns_list(struct spdk_nvmf_subsystem
*subsystem
,
1917 struct spdk_nvme_cmd
*cmd
,
1918 struct spdk_nvme_cpl
*rsp
,
1919 struct spdk_nvme_ns_list
*ns_list
)
1921 struct spdk_nvmf_ns
*ns
;
1924 if (cmd
->nsid
>= 0xfffffffeUL
) {
1925 SPDK_ERRLOG("Identify Active Namespace List with invalid NSID %u\n", cmd
->nsid
);
1926 rsp
->status
.sc
= SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT
;
1927 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1930 for (ns
= spdk_nvmf_subsystem_get_first_ns(subsystem
); ns
!= NULL
;
1931 ns
= spdk_nvmf_subsystem_get_next_ns(subsystem
, ns
)) {
1932 if (ns
->opts
.nsid
<= cmd
->nsid
) {
1936 ns_list
->ns_list
[count
++] = ns
->opts
.nsid
;
1937 if (count
== SPDK_COUNTOF(ns_list
->ns_list
)) {
1942 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1946 _add_ns_id_desc(void **buf_ptr
, size_t *buf_remain
,
1947 enum spdk_nvme_nidt type
,
1948 const void *data
, size_t data_size
)
1950 struct spdk_nvme_ns_id_desc
*desc
;
1951 size_t desc_size
= sizeof(*desc
) + data_size
;
1954 * These should never fail in practice, since all valid NS ID descriptors
1955 * should be defined so that they fit in the available 4096-byte buffer.
1957 assert(data_size
> 0);
1958 assert(data_size
<= UINT8_MAX
);
1959 assert(desc_size
< *buf_remain
);
1960 if (data_size
== 0 || data_size
> UINT8_MAX
|| desc_size
> *buf_remain
) {
1966 desc
->nidl
= data_size
;
1967 memcpy(desc
->nid
, data
, data_size
);
1969 *buf_ptr
+= desc_size
;
1970 *buf_remain
-= desc_size
;
1974 nvmf_ctrlr_identify_ns_id_descriptor_list(
1975 struct spdk_nvmf_subsystem
*subsystem
,
1976 struct spdk_nvme_cmd
*cmd
,
1977 struct spdk_nvme_cpl
*rsp
,
1978 void *id_desc_list
, size_t id_desc_list_size
)
1980 struct spdk_nvmf_ns
*ns
;
1981 size_t buf_remain
= id_desc_list_size
;
1982 void *buf_ptr
= id_desc_list
;
1984 ns
= _nvmf_subsystem_get_ns(subsystem
, cmd
->nsid
);
1985 if (ns
== NULL
|| ns
->bdev
== NULL
) {
1986 rsp
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
1987 rsp
->status
.sc
= SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT
;
1988 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
1991 #define ADD_ID_DESC(type, data, size) \
1993 if (!spdk_mem_all_zero(data, size)) { \
1994 _add_ns_id_desc(&buf_ptr, &buf_remain, type, data, size); \
1998 ADD_ID_DESC(SPDK_NVME_NIDT_EUI64
, ns
->opts
.eui64
, sizeof(ns
->opts
.eui64
));
1999 ADD_ID_DESC(SPDK_NVME_NIDT_NGUID
, ns
->opts
.nguid
, sizeof(ns
->opts
.nguid
));
2000 ADD_ID_DESC(SPDK_NVME_NIDT_UUID
, &ns
->opts
.uuid
, sizeof(ns
->opts
.uuid
));
2003 * The list is automatically 0-terminated because controller to host buffers in
2004 * admin commands always get zeroed in nvmf_ctrlr_process_admin_cmd().
2009 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
2013 nvmf_ctrlr_identify(struct spdk_nvmf_request
*req
)
2016 struct spdk_nvmf_ctrlr
*ctrlr
= req
->qpair
->ctrlr
;
2017 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
2018 struct spdk_nvme_cpl
*rsp
= &req
->rsp
->nvme_cpl
;
2019 struct spdk_nvmf_subsystem
*subsystem
= ctrlr
->subsys
;
2021 if (req
->data
== NULL
|| req
->length
< 4096) {
2022 SPDK_ERRLOG("identify command with invalid buffer\n");
2023 rsp
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
2024 rsp
->status
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
2025 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
2028 cns
= cmd
->cdw10_bits
.identify
.cns
;
2030 if (subsystem
->subtype
== SPDK_NVMF_SUBTYPE_DISCOVERY
&&
2031 cns
!= SPDK_NVME_IDENTIFY_CTRLR
) {
2032 /* Discovery controllers only support Identify Controller */
2037 case SPDK_NVME_IDENTIFY_NS
:
2038 return spdk_nvmf_ctrlr_identify_ns(ctrlr
, cmd
, rsp
, req
->data
);
2039 case SPDK_NVME_IDENTIFY_CTRLR
:
2040 return spdk_nvmf_ctrlr_identify_ctrlr(ctrlr
, req
->data
);
2041 case SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST
:
2042 return nvmf_ctrlr_identify_active_ns_list(subsystem
, cmd
, rsp
, req
->data
);
2043 case SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST
:
2044 return nvmf_ctrlr_identify_ns_id_descriptor_list(subsystem
, cmd
, rsp
, req
->data
, req
->length
);
2050 SPDK_ERRLOG("Identify command with unsupported CNS 0x%02x\n", cns
);
2051 rsp
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
2052 rsp
->status
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
2053 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
2057 nvmf_qpair_abort_aer(struct spdk_nvmf_qpair
*qpair
, uint16_t cid
)
2059 struct spdk_nvmf_ctrlr
*ctrlr
= qpair
->ctrlr
;
2060 struct spdk_nvmf_request
*req
;
2063 if (!nvmf_qpair_is_admin_queue(qpair
)) {
2067 for (i
= 0; i
< ctrlr
->nr_aer_reqs
; i
++) {
2068 if (ctrlr
->aer_req
[i
]->cmd
->nvme_cmd
.cid
== cid
) {
2069 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Aborting AER request\n");
2070 req
= ctrlr
->aer_req
[i
];
2071 ctrlr
->aer_req
[i
] = NULL
;
2072 ctrlr
->nr_aer_reqs
--;
2074 /* Move the last req to the aborting position for making aer_reqs
2077 if (i
< ctrlr
->nr_aer_reqs
) {
2078 ctrlr
->aer_req
[i
] = ctrlr
->aer_req
[ctrlr
->nr_aer_reqs
];
2079 ctrlr
->aer_req
[ctrlr
->nr_aer_reqs
] = NULL
;
2082 req
->rsp
->nvme_cpl
.status
.sct
= SPDK_NVME_SCT_GENERIC
;
2083 req
->rsp
->nvme_cpl
.status
.sc
= SPDK_NVME_SC_ABORTED_BY_REQUEST
;
2084 _nvmf_request_complete(req
);
2093 nvmf_qpair_abort_request(struct spdk_nvmf_qpair
*qpair
, struct spdk_nvmf_request
*req
)
2095 uint16_t cid
= req
->cmd
->nvme_cmd
.cdw10_bits
.abort
.cid
;
2097 if (nvmf_qpair_abort_aer(qpair
, cid
)) {
2098 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "abort ctrlr=%p sqid=%u cid=%u successful\n",
2099 qpair
->ctrlr
, qpair
->qid
, cid
);
2100 req
->rsp
->nvme_cpl
.cdw0
&= ~1U; /* Command successfully aborted */
2102 spdk_nvmf_request_complete(req
);
2106 nvmf_transport_qpair_abort_request(qpair
, req
);
2110 nvmf_ctrlr_abort_done(struct spdk_io_channel_iter
*i
, int status
)
2112 struct spdk_nvmf_request
*req
= spdk_io_channel_iter_get_ctx(i
);
2115 /* There was no qpair whose ID matches SQID of the abort command.
2116 * Hence call _nvmf_request_complete() here.
2118 _nvmf_request_complete(req
);
2123 nvmf_ctrlr_abort_on_pg(struct spdk_io_channel_iter
*i
)
2125 struct spdk_nvmf_request
*req
= spdk_io_channel_iter_get_ctx(i
);
2126 struct spdk_io_channel
*ch
= spdk_io_channel_iter_get_channel(i
);
2127 struct spdk_nvmf_poll_group
*group
= spdk_io_channel_get_ctx(ch
);
2128 uint16_t sqid
= req
->cmd
->nvme_cmd
.cdw10_bits
.abort
.sqid
;
2129 struct spdk_nvmf_qpair
*qpair
;
2131 TAILQ_FOREACH(qpair
, &group
->qpairs
, link
) {
2132 if (qpair
->ctrlr
== req
->qpair
->ctrlr
&& qpair
->qid
== sqid
) {
2133 /* Found the qpair */
2135 nvmf_qpair_abort_request(qpair
, req
);
2137 /* Return -1 for the status so the iteration across threads stops. */
2138 spdk_for_each_channel_continue(i
, -1);
2143 spdk_for_each_channel_continue(i
, 0);
2147 nvmf_ctrlr_abort(struct spdk_nvmf_request
*req
)
2149 struct spdk_nvme_cpl
*rsp
= &req
->rsp
->nvme_cpl
;
2151 rsp
->cdw0
= 1U; /* Command not aborted */
2152 rsp
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
2153 rsp
->status
.sc
= SPDK_NVME_SC_SUCCESS
;
2155 /* Send a message to each poll group, searching for this ctrlr, sqid, and command. */
2156 spdk_for_each_channel(req
->qpair
->ctrlr
->subsys
->tgt
,
2157 nvmf_ctrlr_abort_on_pg
,
2159 nvmf_ctrlr_abort_done
2162 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS
;
2166 nvmf_ctrlr_abort_request(struct spdk_nvmf_request
*req
)
2168 struct spdk_nvmf_request
*req_to_abort
= req
->req_to_abort
;
2169 struct spdk_bdev
*bdev
;
2170 struct spdk_bdev_desc
*desc
;
2171 struct spdk_io_channel
*ch
;
2174 assert(req_to_abort
!= NULL
);
2176 if (g_nvmf_custom_admin_cmd_hdlrs
[SPDK_NVME_OPC_ABORT
].hdlr
&&
2177 nvmf_qpair_is_admin_queue(req_to_abort
->qpair
)) {
2178 return g_nvmf_custom_admin_cmd_hdlrs
[SPDK_NVME_OPC_ABORT
].hdlr(req
);
2181 rc
= spdk_nvmf_request_get_bdev(req_to_abort
->cmd
->nvme_cmd
.nsid
, req_to_abort
,
2184 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
2187 return spdk_nvmf_bdev_ctrlr_abort_cmd(bdev
, desc
, ch
, req
, req_to_abort
);
2191 get_features_generic(struct spdk_nvmf_request
*req
, uint32_t cdw0
)
2193 struct spdk_nvme_cpl
*rsp
= &req
->rsp
->nvme_cpl
;
2196 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
2200 nvmf_ctrlr_get_features(struct spdk_nvmf_request
*req
)
2203 struct spdk_nvmf_ctrlr
*ctrlr
= req
->qpair
->ctrlr
;
2204 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
2205 struct spdk_nvme_cpl
*response
= &req
->rsp
->nvme_cpl
;
2207 feature
= cmd
->cdw10_bits
.get_features
.fid
;
2209 case SPDK_NVME_FEAT_ARBITRATION
:
2210 return get_features_generic(req
, ctrlr
->feat
.arbitration
.raw
);
2211 case SPDK_NVME_FEAT_POWER_MANAGEMENT
:
2212 return get_features_generic(req
, ctrlr
->feat
.power_management
.raw
);
2213 case SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD
:
2214 return nvmf_ctrlr_get_features_temperature_threshold(req
);
2215 case SPDK_NVME_FEAT_ERROR_RECOVERY
:
2216 return get_features_generic(req
, ctrlr
->feat
.error_recovery
.raw
);
2217 case SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE
:
2218 return get_features_generic(req
, ctrlr
->feat
.volatile_write_cache
.raw
);
2219 case SPDK_NVME_FEAT_NUMBER_OF_QUEUES
:
2220 return get_features_generic(req
, ctrlr
->feat
.number_of_queues
.raw
);
2221 case SPDK_NVME_FEAT_WRITE_ATOMICITY
:
2222 return get_features_generic(req
, ctrlr
->feat
.write_atomicity
.raw
);
2223 case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION
:
2224 return get_features_generic(req
, ctrlr
->feat
.async_event_configuration
.raw
);
2225 case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER
:
2226 return get_features_generic(req
, ctrlr
->feat
.keep_alive_timer
.raw
);
2227 case SPDK_NVME_FEAT_HOST_IDENTIFIER
:
2228 return nvmf_ctrlr_get_features_host_identifier(req
);
2229 case SPDK_NVME_FEAT_HOST_RESERVE_MASK
:
2230 return nvmf_ctrlr_get_features_reservation_notification_mask(req
);
2231 case SPDK_NVME_FEAT_HOST_RESERVE_PERSIST
:
2232 return nvmf_ctrlr_get_features_reservation_persistence(req
);
2234 SPDK_ERRLOG("Get Features command with unsupported feature ID 0x%02x\n", feature
);
2235 response
->status
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
2236 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
2241 nvmf_ctrlr_set_features(struct spdk_nvmf_request
*req
)
2243 uint8_t feature
, save
;
2244 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
2245 struct spdk_nvme_cpl
*response
= &req
->rsp
->nvme_cpl
;
2248 * Features are not saveable by the controller as indicated by
2249 * ONCS field of the Identify Controller data.
2251 save
= cmd
->cdw10_bits
.set_features
.sv
;
2253 response
->status
.sc
= SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE
;
2254 response
->status
.sct
= SPDK_NVME_SCT_COMMAND_SPECIFIC
;
2255 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
2258 feature
= cmd
->cdw10_bits
.set_features
.fid
;
2260 case SPDK_NVME_FEAT_ARBITRATION
:
2261 return nvmf_ctrlr_set_features_arbitration(req
);
2262 case SPDK_NVME_FEAT_POWER_MANAGEMENT
:
2263 return nvmf_ctrlr_set_features_power_management(req
);
2264 case SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD
:
2265 return nvmf_ctrlr_set_features_temperature_threshold(req
);
2266 case SPDK_NVME_FEAT_ERROR_RECOVERY
:
2267 return nvmf_ctrlr_set_features_error_recovery(req
);
2268 case SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE
:
2269 return nvmf_ctrlr_set_features_volatile_write_cache(req
);
2270 case SPDK_NVME_FEAT_NUMBER_OF_QUEUES
:
2271 return nvmf_ctrlr_set_features_number_of_queues(req
);
2272 case SPDK_NVME_FEAT_WRITE_ATOMICITY
:
2273 return nvmf_ctrlr_set_features_write_atomicity(req
);
2274 case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION
:
2275 return nvmf_ctrlr_set_features_async_event_configuration(req
);
2276 case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER
:
2277 return nvmf_ctrlr_set_features_keep_alive_timer(req
);
2278 case SPDK_NVME_FEAT_HOST_IDENTIFIER
:
2279 return nvmf_ctrlr_set_features_host_identifier(req
);
2280 case SPDK_NVME_FEAT_HOST_RESERVE_MASK
:
2281 return nvmf_ctrlr_set_features_reservation_notification_mask(req
);
2282 case SPDK_NVME_FEAT_HOST_RESERVE_PERSIST
:
2283 return nvmf_ctrlr_set_features_reservation_persistence(req
);
2285 SPDK_ERRLOG("Set Features command with unsupported feature ID 0x%02x\n", feature
);
2286 response
->status
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
2287 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
2292 nvmf_ctrlr_keep_alive(struct spdk_nvmf_request
*req
)
2294 struct spdk_nvmf_ctrlr
*ctrlr
= req
->qpair
->ctrlr
;
2296 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Keep Alive\n");
2298 * To handle keep alive just clear or reset the
2299 * ctrlr based keep alive duration counter.
2300 * When added, a separate timer based process
2301 * will monitor if the time since last recorded
2302 * keep alive has exceeded the max duration and
2303 * take appropriate action.
2305 ctrlr
->last_keep_alive_tick
= spdk_get_ticks();
2307 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
2311 nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request
*req
)
2313 struct spdk_nvmf_ctrlr
*ctrlr
= req
->qpair
->ctrlr
;
2314 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
2315 struct spdk_nvme_cpl
*response
= &req
->rsp
->nvme_cpl
;
2318 if (ctrlr
== NULL
) {
2319 SPDK_ERRLOG("Admin command sent before CONNECT\n");
2320 response
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
2321 response
->status
.sc
= SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR
;
2322 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
2325 if (ctrlr
->vcprop
.cc
.bits
.en
!= 1) {
2326 SPDK_ERRLOG("Admin command sent to disabled controller\n");
2327 response
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
2328 response
->status
.sc
= SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR
;
2329 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
2332 if (req
->data
&& spdk_nvme_opc_get_data_transfer(cmd
->opc
) == SPDK_NVME_DATA_CONTROLLER_TO_HOST
) {
2333 memset(req
->data
, 0, req
->length
);
2336 if (ctrlr
->subsys
->subtype
== SPDK_NVMF_SUBTYPE_DISCOVERY
) {
2337 /* Discovery controllers only support Get Log Page, Identify and Keep Alive. */
2339 case SPDK_NVME_OPC_IDENTIFY
:
2340 case SPDK_NVME_OPC_GET_LOG_PAGE
:
2341 case SPDK_NVME_OPC_KEEP_ALIVE
:
2344 goto invalid_opcode
;
2348 /* Call a custom adm cmd handler if set. Aborts are handled in a different path (see nvmf_passthru_admin_cmd) */
2349 if (g_nvmf_custom_admin_cmd_hdlrs
[cmd
->opc
].hdlr
&& cmd
->opc
!= SPDK_NVME_OPC_ABORT
) {
2350 rc
= g_nvmf_custom_admin_cmd_hdlrs
[cmd
->opc
].hdlr(req
);
2351 if (rc
>= SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
) {
2352 /* The handler took care of this commmand */
2358 case SPDK_NVME_OPC_GET_LOG_PAGE
:
2359 return nvmf_ctrlr_get_log_page(req
);
2360 case SPDK_NVME_OPC_IDENTIFY
:
2361 return nvmf_ctrlr_identify(req
);
2362 case SPDK_NVME_OPC_ABORT
:
2363 return nvmf_ctrlr_abort(req
);
2364 case SPDK_NVME_OPC_GET_FEATURES
:
2365 return nvmf_ctrlr_get_features(req
);
2366 case SPDK_NVME_OPC_SET_FEATURES
:
2367 return nvmf_ctrlr_set_features(req
);
2368 case SPDK_NVME_OPC_ASYNC_EVENT_REQUEST
:
2369 return nvmf_ctrlr_async_event_request(req
);
2370 case SPDK_NVME_OPC_KEEP_ALIVE
:
2371 return nvmf_ctrlr_keep_alive(req
);
2373 case SPDK_NVME_OPC_CREATE_IO_SQ
:
2374 case SPDK_NVME_OPC_CREATE_IO_CQ
:
2375 case SPDK_NVME_OPC_DELETE_IO_SQ
:
2376 case SPDK_NVME_OPC_DELETE_IO_CQ
:
2377 /* Create and Delete I/O CQ/SQ not allowed in NVMe-oF */
2378 goto invalid_opcode
;
2381 goto invalid_opcode
;
2385 SPDK_ERRLOG("Unsupported admin opcode 0x%x\n", cmd
->opc
);
2386 response
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
2387 response
->status
.sc
= SPDK_NVME_SC_INVALID_OPCODE
;
2388 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
2392 nvmf_ctrlr_process_fabrics_cmd(struct spdk_nvmf_request
*req
)
2394 struct spdk_nvmf_qpair
*qpair
= req
->qpair
;
2395 struct spdk_nvmf_capsule_cmd
*cap_hdr
;
2397 cap_hdr
= &req
->cmd
->nvmf_cmd
;
2399 if (qpair
->ctrlr
== NULL
) {
2400 /* No ctrlr established yet; the only valid command is Connect */
2401 if (cap_hdr
->fctype
== SPDK_NVMF_FABRIC_COMMAND_CONNECT
) {
2402 return nvmf_ctrlr_cmd_connect(req
);
2404 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Got fctype 0x%x, expected Connect\n",
2406 req
->rsp
->nvme_cpl
.status
.sct
= SPDK_NVME_SCT_GENERIC
;
2407 req
->rsp
->nvme_cpl
.status
.sc
= SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR
;
2408 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
2410 } else if (nvmf_qpair_is_admin_queue(qpair
)) {
2412 * Controller session is established, and this is an admin queue.
2413 * Disallow Connect and allow other fabrics commands.
2415 switch (cap_hdr
->fctype
) {
2416 case SPDK_NVMF_FABRIC_COMMAND_PROPERTY_SET
:
2417 return nvmf_property_set(req
);
2418 case SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET
:
2419 return nvmf_property_get(req
);
2421 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "unknown fctype 0x%02x\n",
2423 req
->rsp
->nvme_cpl
.status
.sct
= SPDK_NVME_SCT_GENERIC
;
2424 req
->rsp
->nvme_cpl
.status
.sc
= SPDK_NVME_SC_INVALID_OPCODE
;
2425 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
2428 /* Controller session is established, and this is an I/O queue */
2429 /* For now, no I/O-specific Fabrics commands are implemented (other than Connect) */
2430 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Unexpected I/O fctype 0x%x\n", cap_hdr
->fctype
);
2431 req
->rsp
->nvme_cpl
.status
.sct
= SPDK_NVME_SCT_GENERIC
;
2432 req
->rsp
->nvme_cpl
.status
.sc
= SPDK_NVME_SC_INVALID_OPCODE
;
2433 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
2438 nvmf_ctrlr_async_event_notification(struct spdk_nvmf_ctrlr
*ctrlr
,
2439 union spdk_nvme_async_event_completion
*event
)
2441 struct spdk_nvmf_request
*req
;
2442 struct spdk_nvme_cpl
*rsp
;
2444 assert(ctrlr
->nr_aer_reqs
> 0);
2446 req
= ctrlr
->aer_req
[--ctrlr
->nr_aer_reqs
];
2447 rsp
= &req
->rsp
->nvme_cpl
;
2449 rsp
->cdw0
= event
->raw
;
2451 _nvmf_request_complete(req
);
2452 ctrlr
->aer_req
[ctrlr
->nr_aer_reqs
] = NULL
;
2458 nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr
*ctrlr
)
2460 union spdk_nvme_async_event_completion event
= {0};
2462 /* Users may disable the event notification */
2463 if (!ctrlr
->feat
.async_event_configuration
.bits
.ns_attr_notice
) {
2467 event
.bits
.async_event_type
= SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE
;
2468 event
.bits
.async_event_info
= SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED
;
2469 event
.bits
.log_page_identifier
= SPDK_NVME_LOG_CHANGED_NS_LIST
;
2471 /* If there is no outstanding AER request, queue the event. Then
2472 * if an AER is later submitted, this event can be sent as a
2475 if (ctrlr
->nr_aer_reqs
== 0) {
2476 if (ctrlr
->notice_event
.bits
.async_event_type
==
2477 SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE
) {
2481 ctrlr
->notice_event
.raw
= event
.raw
;
2485 return nvmf_ctrlr_async_event_notification(ctrlr
, &event
);
2489 nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr
*ctrlr
)
2491 union spdk_nvme_async_event_completion event
= {0};
2493 if (!ctrlr
->num_avail_log_pages
) {
2496 event
.bits
.async_event_type
= SPDK_NVME_ASYNC_EVENT_TYPE_IO
;
2497 event
.bits
.async_event_info
= SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL
;
2498 event
.bits
.log_page_identifier
= SPDK_NVME_LOG_RESERVATION_NOTIFICATION
;
2500 /* If there is no outstanding AER request, queue the event. Then
2501 * if an AER is later submitted, this event can be sent as a
2504 if (ctrlr
->nr_aer_reqs
== 0) {
2505 if (ctrlr
->reservation_event
.bits
.async_event_type
==
2506 SPDK_NVME_ASYNC_EVENT_TYPE_IO
) {
2510 ctrlr
->reservation_event
.raw
= event
.raw
;
2514 nvmf_ctrlr_async_event_notification(ctrlr
, &event
);
2518 nvmf_qpair_free_aer(struct spdk_nvmf_qpair
*qpair
)
2520 struct spdk_nvmf_ctrlr
*ctrlr
= qpair
->ctrlr
;
2523 if (!nvmf_qpair_is_admin_queue(qpair
)) {
2527 for (i
= 0; i
< ctrlr
->nr_aer_reqs
; i
++) {
2528 spdk_nvmf_request_free(ctrlr
->aer_req
[i
]);
2529 ctrlr
->aer_req
[i
] = NULL
;
2532 ctrlr
->nr_aer_reqs
= 0;
2536 nvmf_ctrlr_abort_aer(struct spdk_nvmf_ctrlr
*ctrlr
)
2538 struct spdk_nvmf_request
*req
;
2541 for (i
= 0; i
< ctrlr
->nr_aer_reqs
; i
++) {
2542 req
= ctrlr
->aer_req
[i
];
2544 req
->rsp
->nvme_cpl
.status
.sct
= SPDK_NVME_SCT_GENERIC
;
2545 req
->rsp
->nvme_cpl
.status
.sc
= SPDK_NVME_SC_ABORTED_BY_REQUEST
;
2546 _nvmf_request_complete(req
);
2548 ctrlr
->aer_req
[i
] = NULL
;
2551 ctrlr
->nr_aer_reqs
= 0;
2555 _nvmf_ctrlr_add_reservation_log(void *ctx
)
2557 struct spdk_nvmf_reservation_log
*log
= (struct spdk_nvmf_reservation_log
*)ctx
;
2558 struct spdk_nvmf_ctrlr
*ctrlr
= log
->ctrlr
;
2560 ctrlr
->log_page_count
++;
2562 /* Maximum number of queued log pages is 255 */
2563 if (ctrlr
->num_avail_log_pages
== 0xff) {
2564 struct spdk_nvmf_reservation_log
*entry
;
2565 entry
= TAILQ_LAST(&ctrlr
->log_head
, log_page_head
);
2566 entry
->log
.log_page_count
= ctrlr
->log_page_count
;
2571 log
->log
.log_page_count
= ctrlr
->log_page_count
;
2572 log
->log
.num_avail_log_pages
= ctrlr
->num_avail_log_pages
++;
2573 TAILQ_INSERT_TAIL(&ctrlr
->log_head
, log
, link
);
2575 nvmf_ctrlr_async_event_reservation_notification(ctrlr
);
2579 nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr
*ctrlr
,
2580 struct spdk_nvmf_ns
*ns
,
2581 enum spdk_nvme_reservation_notification_log_page_type type
)
2583 struct spdk_nvmf_reservation_log
*log
;
2586 case SPDK_NVME_RESERVATION_LOG_PAGE_EMPTY
:
2588 case SPDK_NVME_REGISTRATION_PREEMPTED
:
2589 if (ns
->mask
& SPDK_NVME_REGISTRATION_PREEMPTED_MASK
) {
2593 case SPDK_NVME_RESERVATION_RELEASED
:
2594 if (ns
->mask
& SPDK_NVME_RESERVATION_RELEASED_MASK
) {
2598 case SPDK_NVME_RESERVATION_PREEMPTED
:
2599 if (ns
->mask
& SPDK_NVME_RESERVATION_PREEMPTED_MASK
) {
2607 log
= calloc(1, sizeof(*log
));
2609 SPDK_ERRLOG("Alloc log page failed, ignore the log\n");
2613 log
->log
.type
= type
;
2614 log
->log
.nsid
= ns
->nsid
;
2616 spdk_thread_send_msg(ctrlr
->thread
, _nvmf_ctrlr_add_reservation_log
, log
);
2619 /* Check from subsystem poll group's namespace information data structure */
2621 nvmf_ns_info_ctrlr_is_registrant(struct spdk_nvmf_subsystem_pg_ns_info
*ns_info
,
2622 struct spdk_nvmf_ctrlr
*ctrlr
)
2626 for (i
= 0; i
< SPDK_NVMF_MAX_NUM_REGISTRANTS
; i
++) {
2627 if (!spdk_uuid_compare(&ns_info
->reg_hostid
[i
], &ctrlr
->hostid
)) {
2636 * Check the NVMe command is permitted or not for current controller(Host).
2639 nvmf_ns_reservation_request_check(struct spdk_nvmf_subsystem_pg_ns_info
*ns_info
,
2640 struct spdk_nvmf_ctrlr
*ctrlr
,
2641 struct spdk_nvmf_request
*req
)
2643 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
2644 enum spdk_nvme_reservation_type rtype
= ns_info
->rtype
;
2645 uint8_t status
= SPDK_NVME_SC_SUCCESS
;
2649 /* No valid reservation */
2654 is_registrant
= nvmf_ns_info_ctrlr_is_registrant(ns_info
, ctrlr
);
2655 /* All registrants type and current ctrlr is a valid registrant */
2656 if ((rtype
== SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS
||
2657 rtype
== SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS
) && is_registrant
) {
2659 } else if (!spdk_uuid_compare(&ns_info
->holder_id
, &ctrlr
->hostid
)) {
2663 /* Non-holder for current controller */
2665 case SPDK_NVME_OPC_READ
:
2666 case SPDK_NVME_OPC_COMPARE
:
2667 if (rtype
== SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS
) {
2668 status
= SPDK_NVME_SC_RESERVATION_CONFLICT
;
2671 if ((rtype
== SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY
||
2672 rtype
== SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS
) && !is_registrant
) {
2673 status
= SPDK_NVME_SC_RESERVATION_CONFLICT
;
2676 case SPDK_NVME_OPC_FLUSH
:
2677 case SPDK_NVME_OPC_WRITE
:
2678 case SPDK_NVME_OPC_WRITE_UNCORRECTABLE
:
2679 case SPDK_NVME_OPC_WRITE_ZEROES
:
2680 case SPDK_NVME_OPC_DATASET_MANAGEMENT
:
2681 if (rtype
== SPDK_NVME_RESERVE_WRITE_EXCLUSIVE
||
2682 rtype
== SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS
) {
2683 status
= SPDK_NVME_SC_RESERVATION_CONFLICT
;
2686 if (!is_registrant
) {
2687 status
= SPDK_NVME_SC_RESERVATION_CONFLICT
;
2690 case SPDK_NVME_OPC_RESERVATION_ACQUIRE
:
2691 racqa
= cmd
->cdw10_bits
.resv_acquire
.racqa
;
2692 if (racqa
== SPDK_NVME_RESERVE_ACQUIRE
) {
2693 status
= SPDK_NVME_SC_RESERVATION_CONFLICT
;
2696 if (!is_registrant
) {
2697 status
= SPDK_NVME_SC_RESERVATION_CONFLICT
;
2700 case SPDK_NVME_OPC_RESERVATION_RELEASE
:
2701 if (!is_registrant
) {
2702 status
= SPDK_NVME_SC_RESERVATION_CONFLICT
;
2710 req
->rsp
->nvme_cpl
.status
.sct
= SPDK_NVME_SCT_GENERIC
;
2711 req
->rsp
->nvme_cpl
.status
.sc
= status
;
2712 if (status
== SPDK_NVME_SC_RESERVATION_CONFLICT
) {
2720 nvmf_ctrlr_process_io_fused_cmd(struct spdk_nvmf_request
*req
, struct spdk_bdev
*bdev
,
2721 struct spdk_bdev_desc
*desc
, struct spdk_io_channel
*ch
)
2723 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
2724 struct spdk_nvme_cpl
*rsp
= &req
->rsp
->nvme_cpl
;
2725 struct spdk_nvmf_request
*first_fused_req
= req
->qpair
->first_fused_req
;
2728 if (cmd
->fuse
== SPDK_NVME_CMD_FUSE_FIRST
) {
2729 /* first fused operation (should be compare) */
2730 if (first_fused_req
!= NULL
) {
2731 struct spdk_nvme_cpl
*fused_response
= &first_fused_req
->rsp
->nvme_cpl
;
2733 SPDK_ERRLOG("Wrong sequence of fused operations\n");
2735 /* abort req->qpair->first_fused_request and continue with new fused command */
2736 fused_response
->status
.sc
= SPDK_NVME_SC_ABORTED_MISSING_FUSED
;
2737 fused_response
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
2738 _nvmf_request_complete(first_fused_req
);
2739 } else if (cmd
->opc
!= SPDK_NVME_OPC_COMPARE
) {
2740 SPDK_ERRLOG("Wrong op code of fused operations\n");
2741 rsp
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
2742 rsp
->status
.sc
= SPDK_NVME_SC_INVALID_OPCODE
;
2743 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
2746 req
->qpair
->first_fused_req
= req
;
2747 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS
;
2748 } else if (cmd
->fuse
== SPDK_NVME_CMD_FUSE_SECOND
) {
2749 /* second fused operation (should be write) */
2750 if (first_fused_req
== NULL
) {
2751 SPDK_ERRLOG("Wrong sequence of fused operations\n");
2752 rsp
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
2753 rsp
->status
.sc
= SPDK_NVME_SC_ABORTED_MISSING_FUSED
;
2754 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
2755 } else if (cmd
->opc
!= SPDK_NVME_OPC_WRITE
) {
2756 struct spdk_nvme_cpl
*fused_response
= &first_fused_req
->rsp
->nvme_cpl
;
2758 SPDK_ERRLOG("Wrong op code of fused operations\n");
2760 /* abort req->qpair->first_fused_request and fail current command */
2761 fused_response
->status
.sc
= SPDK_NVME_SC_ABORTED_MISSING_FUSED
;
2762 fused_response
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
2763 _nvmf_request_complete(first_fused_req
);
2765 rsp
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
2766 rsp
->status
.sc
= SPDK_NVME_SC_INVALID_OPCODE
;
2767 req
->qpair
->first_fused_req
= NULL
;
2768 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
2771 /* save request of first command to generate response later */
2772 req
->first_fused_req
= first_fused_req
;
2773 req
->qpair
->first_fused_req
= NULL
;
2775 SPDK_ERRLOG("Invalid fused command fuse field.\n");
2776 rsp
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
2777 rsp
->status
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
2778 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
2781 rc
= nvmf_bdev_ctrlr_compare_and_write_cmd(bdev
, desc
, ch
, req
->first_fused_req
, req
);
2783 if (rc
== SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
) {
2784 if (spdk_nvme_cpl_is_error(rsp
)) {
2785 struct spdk_nvme_cpl
*fused_response
= &first_fused_req
->rsp
->nvme_cpl
;
2787 fused_response
->status
= rsp
->status
;
2788 rsp
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
2789 rsp
->status
.sc
= SPDK_NVME_SC_ABORTED_FAILED_FUSED
;
2790 /* Complete first of fused commands. Second will be completed by upper layer */
2791 _nvmf_request_complete(first_fused_req
);
2792 req
->first_fused_req
= NULL
;
2800 nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request
*req
)
2803 struct spdk_nvmf_ns
*ns
;
2804 struct spdk_bdev
*bdev
;
2805 struct spdk_bdev_desc
*desc
;
2806 struct spdk_io_channel
*ch
;
2807 struct spdk_nvmf_poll_group
*group
= req
->qpair
->group
;
2808 struct spdk_nvmf_ctrlr
*ctrlr
= req
->qpair
->ctrlr
;
2809 struct spdk_nvme_cmd
*cmd
= &req
->cmd
->nvme_cmd
;
2810 struct spdk_nvme_cpl
*response
= &req
->rsp
->nvme_cpl
;
2811 struct spdk_nvmf_subsystem_pg_ns_info
*ns_info
;
2813 /* pre-set response details for this command */
2814 response
->status
.sc
= SPDK_NVME_SC_SUCCESS
;
2817 if (spdk_unlikely(ctrlr
== NULL
)) {
2818 SPDK_ERRLOG("I/O command sent before CONNECT\n");
2819 response
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
2820 response
->status
.sc
= SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR
;
2821 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
2824 if (spdk_unlikely(ctrlr
->vcprop
.cc
.bits
.en
!= 1)) {
2825 SPDK_ERRLOG("I/O command sent to disabled controller\n");
2826 response
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
2827 response
->status
.sc
= SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR
;
2828 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
2831 ns
= _nvmf_subsystem_get_ns(ctrlr
->subsys
, nsid
);
2832 if (ns
== NULL
|| ns
->bdev
== NULL
) {
2833 SPDK_ERRLOG("Unsuccessful query for nsid %u\n", cmd
->nsid
);
2834 response
->status
.sc
= SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT
;
2835 response
->status
.dnr
= 1;
2836 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
2839 /* scan-build falsely reporting dereference of null pointer */
2840 assert(group
!= NULL
&& group
->sgroups
!= NULL
);
2841 ns_info
= &group
->sgroups
[ctrlr
->subsys
->id
].ns_info
[nsid
- 1];
2842 if (nvmf_ns_reservation_request_check(ns_info
, ctrlr
, req
)) {
2843 SPDK_DEBUGLOG(SPDK_LOG_NVMF
, "Reservation Conflict for nsid %u, opcode %u\n",
2844 cmd
->nsid
, cmd
->opc
);
2845 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
2850 ch
= ns_info
->channel
;
2852 if (spdk_unlikely(cmd
->fuse
& SPDK_NVME_CMD_FUSE_MASK
)) {
2853 return nvmf_ctrlr_process_io_fused_cmd(req
, bdev
, desc
, ch
);
2854 } else if (spdk_unlikely(req
->qpair
->first_fused_req
!= NULL
)) {
2855 struct spdk_nvme_cpl
*fused_response
= &req
->qpair
->first_fused_req
->rsp
->nvme_cpl
;
2857 SPDK_ERRLOG("Expected second of fused commands - failing first of fused commands\n");
2859 /* abort req->qpair->first_fused_request and continue with new command */
2860 fused_response
->status
.sc
= SPDK_NVME_SC_ABORTED_MISSING_FUSED
;
2861 fused_response
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
2862 _nvmf_request_complete(req
->qpair
->first_fused_req
);
2863 req
->qpair
->first_fused_req
= NULL
;
2867 case SPDK_NVME_OPC_READ
:
2868 return nvmf_bdev_ctrlr_read_cmd(bdev
, desc
, ch
, req
);
2869 case SPDK_NVME_OPC_WRITE
:
2870 return nvmf_bdev_ctrlr_write_cmd(bdev
, desc
, ch
, req
);
2871 case SPDK_NVME_OPC_COMPARE
:
2872 return nvmf_bdev_ctrlr_compare_cmd(bdev
, desc
, ch
, req
);
2873 case SPDK_NVME_OPC_WRITE_ZEROES
:
2874 return nvmf_bdev_ctrlr_write_zeroes_cmd(bdev
, desc
, ch
, req
);
2875 case SPDK_NVME_OPC_FLUSH
:
2876 return nvmf_bdev_ctrlr_flush_cmd(bdev
, desc
, ch
, req
);
2877 case SPDK_NVME_OPC_DATASET_MANAGEMENT
:
2878 return nvmf_bdev_ctrlr_dsm_cmd(bdev
, desc
, ch
, req
);
2879 case SPDK_NVME_OPC_RESERVATION_REGISTER
:
2880 case SPDK_NVME_OPC_RESERVATION_ACQUIRE
:
2881 case SPDK_NVME_OPC_RESERVATION_RELEASE
:
2882 case SPDK_NVME_OPC_RESERVATION_REPORT
:
2883 spdk_thread_send_msg(ctrlr
->subsys
->thread
, nvmf_ns_reservation_request
, req
);
2884 return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS
;
2886 return nvmf_bdev_ctrlr_nvme_passthru_io(bdev
, desc
, ch
, req
);
2891 nvmf_qpair_request_cleanup(struct spdk_nvmf_qpair
*qpair
)
2893 if (qpair
->state
== SPDK_NVMF_QPAIR_DEACTIVATING
) {
2894 assert(qpair
->state_cb
!= NULL
);
2896 if (TAILQ_EMPTY(&qpair
->outstanding
)) {
2897 qpair
->state_cb(qpair
->state_cb_arg
, 0);
2903 spdk_nvmf_request_free(struct spdk_nvmf_request
*req
)
2905 struct spdk_nvmf_qpair
*qpair
= req
->qpair
;
2907 TAILQ_REMOVE(&qpair
->outstanding
, req
, link
);
2908 if (nvmf_transport_req_free(req
)) {
2909 SPDK_ERRLOG("Unable to free transport level request resources.\n");
2912 nvmf_qpair_request_cleanup(qpair
);
2918 _nvmf_request_complete(void *ctx
)
2920 struct spdk_nvmf_request
*req
= ctx
;
2921 struct spdk_nvme_cpl
*rsp
= &req
->rsp
->nvme_cpl
;
2922 struct spdk_nvmf_qpair
*qpair
;
2923 struct spdk_nvmf_subsystem_poll_group
*sgroup
= NULL
;
2924 bool is_aer
= false;
2928 rsp
->cid
= req
->cmd
->nvme_cmd
.cid
;
2932 sgroup
= &qpair
->group
->sgroups
[qpair
->ctrlr
->subsys
->id
];
2933 assert(sgroup
!= NULL
);
2934 is_aer
= req
->cmd
->nvme_cmd
.opc
== SPDK_NVME_OPC_ASYNC_EVENT_REQUEST
;
2935 } else if (spdk_unlikely(nvmf_request_is_fabric_connect(req
))) {
2936 sgroup
= nvmf_subsystem_pg_from_connect_cmd(req
);
2939 if (SPDK_DEBUGLOG_FLAG_ENABLED("nvmf")) {
2940 spdk_nvme_print_completion(qpair
->qid
, rsp
);
2943 TAILQ_REMOVE(&qpair
->outstanding
, req
, link
);
2944 if (nvmf_transport_req_complete(req
)) {
2945 SPDK_ERRLOG("Transport request completion error!\n");
2948 /* AER cmd is an exception */
2949 if (sgroup
&& !is_aer
) {
2950 assert(sgroup
->io_outstanding
> 0);
2951 sgroup
->io_outstanding
--;
2952 if (sgroup
->state
== SPDK_NVMF_SUBSYSTEM_PAUSING
&&
2953 sgroup
->io_outstanding
== 0) {
2954 sgroup
->state
= SPDK_NVMF_SUBSYSTEM_PAUSED
;
2955 sgroup
->cb_fn(sgroup
->cb_arg
, 0);
2959 nvmf_qpair_request_cleanup(qpair
);
2963 spdk_nvmf_request_complete(struct spdk_nvmf_request
*req
)
2965 struct spdk_nvmf_qpair
*qpair
= req
->qpair
;
2967 if (spdk_likely(qpair
->group
->thread
== spdk_get_thread())) {
2968 _nvmf_request_complete(req
);
2970 spdk_thread_send_msg(qpair
->group
->thread
,
2971 _nvmf_request_complete
, req
);
2978 _nvmf_request_exec(struct spdk_nvmf_request
*req
,
2979 struct spdk_nvmf_subsystem_poll_group
*sgroup
)
2981 struct spdk_nvmf_qpair
*qpair
= req
->qpair
;
2982 enum spdk_nvmf_request_exec_status status
;
2984 if (SPDK_DEBUGLOG_FLAG_ENABLED("nvmf")) {
2985 spdk_nvme_print_command(qpair
->qid
, &req
->cmd
->nvme_cmd
);
2989 sgroup
->io_outstanding
++;
2992 /* Place the request on the outstanding list so we can keep track of it */
2993 TAILQ_INSERT_TAIL(&qpair
->outstanding
, req
, link
);
2995 if (spdk_unlikely(req
->cmd
->nvmf_cmd
.opcode
== SPDK_NVME_OPC_FABRIC
)) {
2996 status
= nvmf_ctrlr_process_fabrics_cmd(req
);
2997 } else if (spdk_unlikely(nvmf_qpair_is_admin_queue(qpair
))) {
2998 status
= nvmf_ctrlr_process_admin_cmd(req
);
3000 status
= nvmf_ctrlr_process_io_cmd(req
);
3003 if (status
== SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
) {
3004 _nvmf_request_complete(req
);
3009 spdk_nvmf_request_exec_fabrics(struct spdk_nvmf_request
*req
)
3011 struct spdk_nvmf_qpair
*qpair
= req
->qpair
;
3012 struct spdk_nvmf_subsystem_poll_group
*sgroup
= NULL
;
3014 assert(req
->cmd
->nvmf_cmd
.opcode
== SPDK_NVME_OPC_FABRIC
);
3017 sgroup
= &qpair
->group
->sgroups
[qpair
->ctrlr
->subsys
->id
];
3018 assert(sgroup
!= NULL
);
3020 sgroup
= nvmf_subsystem_pg_from_connect_cmd(req
);
3023 _nvmf_request_exec(req
, sgroup
);
3027 spdk_nvmf_request_exec(struct spdk_nvmf_request
*req
)
3029 struct spdk_nvmf_qpair
*qpair
= req
->qpair
;
3030 struct spdk_nvmf_subsystem_poll_group
*sgroup
= NULL
;
3033 sgroup
= &qpair
->group
->sgroups
[qpair
->ctrlr
->subsys
->id
];
3034 assert(sgroup
!= NULL
);
3035 } else if (spdk_unlikely(nvmf_request_is_fabric_connect(req
))) {
3036 sgroup
= nvmf_subsystem_pg_from_connect_cmd(req
);
3039 if (qpair
->state
!= SPDK_NVMF_QPAIR_ACTIVE
) {
3040 req
->rsp
->nvme_cpl
.status
.sct
= SPDK_NVME_SCT_GENERIC
;
3041 req
->rsp
->nvme_cpl
.status
.sc
= SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR
;
3042 /* Place the request on the outstanding list so we can keep track of it */
3043 TAILQ_INSERT_TAIL(&qpair
->outstanding
, req
, link
);
3044 /* Still increment io_outstanding because request_complete decrements it */
3045 if (sgroup
!= NULL
) {
3046 sgroup
->io_outstanding
++;
3048 _nvmf_request_complete(req
);
3052 /* Check if the subsystem is paused (if there is a subsystem) */
3053 if (sgroup
!= NULL
) {
3054 if (sgroup
->state
!= SPDK_NVMF_SUBSYSTEM_ACTIVE
) {
3055 /* The subsystem is not currently active. Queue this request. */
3056 TAILQ_INSERT_TAIL(&sgroup
->queued
, req
, link
);
3061 _nvmf_request_exec(req
, sgroup
);
3065 nvmf_ctrlr_get_dif_ctx(struct spdk_nvmf_ctrlr
*ctrlr
, struct spdk_nvme_cmd
*cmd
,
3066 struct spdk_dif_ctx
*dif_ctx
)
3068 struct spdk_nvmf_ns
*ns
;
3069 struct spdk_bdev
*bdev
;
3071 if (ctrlr
== NULL
|| cmd
== NULL
) {
3075 ns
= _nvmf_subsystem_get_ns(ctrlr
->subsys
, cmd
->nsid
);
3076 if (ns
== NULL
|| ns
->bdev
== NULL
) {
3083 case SPDK_NVME_OPC_READ
:
3084 case SPDK_NVME_OPC_WRITE
:
3085 case SPDK_NVME_OPC_COMPARE
:
3086 return nvmf_bdev_ctrlr_get_dif_ctx(bdev
, cmd
, dif_ctx
);
3095 spdk_nvmf_request_get_dif_ctx(struct spdk_nvmf_request
*req
, struct spdk_dif_ctx
*dif_ctx
)
3097 struct spdk_nvmf_qpair
*qpair
= req
->qpair
;
3098 struct spdk_nvmf_ctrlr
*ctrlr
= qpair
->ctrlr
;
3100 if (spdk_likely(ctrlr
== NULL
|| !ctrlr
->dif_insert_or_strip
)) {
3104 if (spdk_unlikely(qpair
->state
!= SPDK_NVMF_QPAIR_ACTIVE
)) {
3108 if (spdk_unlikely(req
->cmd
->nvmf_cmd
.opcode
== SPDK_NVME_OPC_FABRIC
)) {
3112 if (spdk_unlikely(nvmf_qpair_is_admin_queue(qpair
))) {
3116 return nvmf_ctrlr_get_dif_ctx(ctrlr
, &req
->cmd
->nvme_cmd
, dif_ctx
);
3120 spdk_nvmf_set_custom_admin_cmd_hdlr(uint8_t opc
, spdk_nvmf_custom_cmd_hdlr hdlr
)
3122 g_nvmf_custom_admin_cmd_hdlrs
[opc
].hdlr
= hdlr
;
3126 nvmf_passthru_admin_cmd(struct spdk_nvmf_request
*req
)
3128 struct spdk_bdev
*bdev
;
3129 struct spdk_bdev_desc
*desc
;
3130 struct spdk_io_channel
*ch
;
3131 struct spdk_nvme_cmd
*cmd
= spdk_nvmf_request_get_cmd(req
);
3132 struct spdk_nvme_cpl
*response
= spdk_nvmf_request_get_response(req
);
3136 if (g_nvmf_custom_admin_cmd_hdlrs
[cmd
->opc
].nsid
== 0) {
3137 bdev_nsid
= cmd
->nsid
;
3139 bdev_nsid
= g_nvmf_custom_admin_cmd_hdlrs
[cmd
->opc
].nsid
;
3142 rc
= spdk_nvmf_request_get_bdev(bdev_nsid
, req
, &bdev
, &desc
, &ch
);
3144 response
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
3145 response
->status
.sc
= SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT
;
3146 return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
;
3148 return spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(bdev
, desc
, ch
, req
, NULL
);
3152 spdk_nvmf_set_passthru_admin_cmd(uint8_t opc
, uint32_t forward_nsid
)
3154 g_nvmf_custom_admin_cmd_hdlrs
[opc
].hdlr
= nvmf_passthru_admin_cmd
;
3155 g_nvmf_custom_admin_cmd_hdlrs
[opc
].nsid
= forward_nsid
;
3159 spdk_nvmf_request_get_bdev(uint32_t nsid
, struct spdk_nvmf_request
*req
,
3160 struct spdk_bdev
**bdev
, struct spdk_bdev_desc
**desc
, struct spdk_io_channel
**ch
)
3162 struct spdk_nvmf_ctrlr
*ctrlr
= req
->qpair
->ctrlr
;
3163 struct spdk_nvmf_ns
*ns
;
3164 struct spdk_nvmf_poll_group
*group
= req
->qpair
->group
;
3165 struct spdk_nvmf_subsystem_pg_ns_info
*ns_info
;
3171 ns
= _nvmf_subsystem_get_ns(ctrlr
->subsys
, nsid
);
3172 if (ns
== NULL
|| ns
->bdev
== NULL
) {
3176 assert(group
!= NULL
&& group
->sgroups
!= NULL
);
3177 ns_info
= &group
->sgroups
[ctrlr
->subsys
->id
].ns_info
[nsid
- 1];
3180 *ch
= ns_info
->channel
;
3185 struct spdk_nvmf_ctrlr
*spdk_nvmf_request_get_ctrlr(struct spdk_nvmf_request
*req
)
3187 return req
->qpair
->ctrlr
;
3190 struct spdk_nvme_cmd
*spdk_nvmf_request_get_cmd(struct spdk_nvmf_request
*req
)
3192 return &req
->cmd
->nvme_cmd
;
3195 struct spdk_nvme_cpl
*spdk_nvmf_request_get_response(struct spdk_nvmf_request
*req
)
3197 return &req
->rsp
->nvme_cpl
;
3200 struct spdk_nvmf_subsystem
*spdk_nvmf_request_get_subsystem(struct spdk_nvmf_request
*req
)
3202 return req
->qpair
->ctrlr
->subsys
;
3205 void spdk_nvmf_request_get_data(struct spdk_nvmf_request
*req
, void **data
, uint32_t *length
)
3208 *length
= req
->length
;
3211 struct spdk_nvmf_subsystem
*spdk_nvmf_ctrlr_get_subsystem(struct spdk_nvmf_ctrlr
*ctrlr
)
3213 return ctrlr
->subsys
;
3216 uint16_t spdk_nvmf_ctrlr_get_id(struct spdk_nvmf_ctrlr
*ctrlr
)
3218 return ctrlr
->cntlid
;
3221 struct spdk_nvmf_request
*spdk_nvmf_request_get_req_to_abort(struct spdk_nvmf_request
*req
)
3223 return req
->req_to_abort
;