4 * Copyright (c) Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "event_nvmf.h"
36 #include "spdk/bdev.h"
37 #include "spdk/event.h"
38 #include "spdk/thread.h"
40 #include "spdk/nvme.h"
41 #include "spdk/nvmf_cmd.h"
42 #include "spdk/util.h"
45 NVMF_TGT_INIT_NONE
= 0,
46 NVMF_TGT_INIT_PARSE_CONFIG
,
47 NVMF_TGT_INIT_CREATE_POLL_GROUPS
,
48 NVMF_TGT_INIT_START_SUBSYSTEMS
,
49 NVMF_TGT_INIT_START_ACCEPTOR
,
51 NVMF_TGT_FINI_STOP_SUBSYSTEMS
,
52 NVMF_TGT_FINI_DESTROY_POLL_GROUPS
,
53 NVMF_TGT_FINI_STOP_ACCEPTOR
,
54 NVMF_TGT_FINI_FREE_RESOURCES
,
59 struct nvmf_tgt_poll_group
{
60 struct spdk_nvmf_poll_group
*group
;
61 struct spdk_thread
*thread
;
62 TAILQ_ENTRY(nvmf_tgt_poll_group
) link
;
65 struct spdk_nvmf_tgt
*g_spdk_nvmf_tgt
= NULL
;
67 static enum nvmf_tgt_state g_tgt_state
;
69 static struct spdk_thread
*g_tgt_init_thread
= NULL
;
70 static struct spdk_thread
*g_tgt_fini_thread
= NULL
;
72 static TAILQ_HEAD(, nvmf_tgt_poll_group
) g_poll_groups
= TAILQ_HEAD_INITIALIZER(g_poll_groups
);
73 static size_t g_num_poll_groups
= 0;
75 static struct spdk_poller
*g_acceptor_poller
= NULL
;
77 static void nvmf_tgt_advance_state(void);
80 nvmf_shutdown_cb(void *arg1
)
82 /* Still in initialization state, defer shutdown operation */
83 if (g_tgt_state
< NVMF_TGT_RUNNING
) {
84 spdk_thread_send_msg(spdk_get_thread(), nvmf_shutdown_cb
, NULL
);
86 } else if (g_tgt_state
!= NVMF_TGT_RUNNING
&& g_tgt_state
!= NVMF_TGT_ERROR
) {
87 /* Already in Shutdown status, ignore the signal */
91 if (g_tgt_state
== NVMF_TGT_ERROR
) {
92 /* Parse configuration error */
93 g_tgt_state
= NVMF_TGT_FINI_FREE_RESOURCES
;
95 g_tgt_state
= NVMF_TGT_FINI_STOP_SUBSYSTEMS
;
97 nvmf_tgt_advance_state();
101 nvmf_subsystem_fini(void)
103 nvmf_shutdown_cb(NULL
);
107 acceptor_poll(void *arg
)
109 struct spdk_nvmf_tgt
*tgt
= arg
;
112 count
= spdk_nvmf_tgt_accept(tgt
);
115 return SPDK_POLLER_BUSY
;
117 return SPDK_POLLER_IDLE
;
122 _nvmf_tgt_destroy_poll_group_done(void *ctx
)
124 assert(g_num_poll_groups
> 0);
126 if (--g_num_poll_groups
== 0) {
127 g_tgt_state
= NVMF_TGT_FINI_STOP_ACCEPTOR
;
128 nvmf_tgt_advance_state();
133 nvmf_tgt_destroy_poll_group_done(void *cb_arg
, int status
)
135 struct nvmf_tgt_poll_group
*pg
= cb_arg
;
139 spdk_thread_send_msg(g_tgt_fini_thread
, _nvmf_tgt_destroy_poll_group_done
, NULL
);
141 spdk_thread_exit(spdk_get_thread());
145 nvmf_tgt_destroy_poll_group(void *ctx
)
147 struct nvmf_tgt_poll_group
*pg
= ctx
;
149 spdk_nvmf_poll_group_destroy(pg
->group
, nvmf_tgt_destroy_poll_group_done
, pg
);
153 nvmf_tgt_destroy_poll_groups(void)
155 struct nvmf_tgt_poll_group
*pg
, *tpg
;
157 g_tgt_fini_thread
= spdk_get_thread();
158 assert(g_tgt_fini_thread
!= NULL
);
160 TAILQ_FOREACH_SAFE(pg
, &g_poll_groups
, link
, tpg
) {
161 TAILQ_REMOVE(&g_poll_groups
, pg
, link
);
162 spdk_thread_send_msg(pg
->thread
, nvmf_tgt_destroy_poll_group
, pg
);
167 nvmf_tgt_create_poll_group_done(void *ctx
)
169 struct nvmf_tgt_poll_group
*pg
= ctx
;
171 TAILQ_INSERT_TAIL(&g_poll_groups
, pg
, link
);
173 assert(g_num_poll_groups
< spdk_env_get_core_count());
175 if (++g_num_poll_groups
== spdk_env_get_core_count()) {
176 g_tgt_state
= NVMF_TGT_INIT_START_SUBSYSTEMS
;
177 nvmf_tgt_advance_state();
182 nvmf_tgt_create_poll_group(void *ctx
)
184 struct nvmf_tgt_poll_group
*pg
;
186 pg
= calloc(1, sizeof(*pg
));
188 SPDK_ERRLOG("Not enough memory to allocate poll groups\n");
189 spdk_app_stop(-ENOMEM
);
193 pg
->thread
= spdk_get_thread();
194 pg
->group
= spdk_nvmf_poll_group_create(g_spdk_nvmf_tgt
);
196 spdk_thread_send_msg(g_tgt_init_thread
, nvmf_tgt_create_poll_group_done
, pg
);
200 nvmf_tgt_create_poll_groups(void)
202 struct spdk_cpuset tmp_cpumask
= {};
204 char thread_name
[32];
205 struct spdk_thread
*thread
;
207 g_tgt_init_thread
= spdk_get_thread();
208 assert(g_tgt_init_thread
!= NULL
);
210 SPDK_ENV_FOREACH_CORE(i
) {
211 spdk_cpuset_zero(&tmp_cpumask
);
212 spdk_cpuset_set_cpu(&tmp_cpumask
, i
, true);
213 snprintf(thread_name
, sizeof(thread_name
), "nvmf_tgt_poll_group_%u", i
);
215 thread
= spdk_thread_create(thread_name
, &tmp_cpumask
);
216 assert(thread
!= NULL
);
218 spdk_thread_send_msg(thread
, nvmf_tgt_create_poll_group
, NULL
);
223 nvmf_tgt_subsystem_started(struct spdk_nvmf_subsystem
*subsystem
,
224 void *cb_arg
, int status
)
226 subsystem
= spdk_nvmf_subsystem_get_next(subsystem
);
229 spdk_nvmf_subsystem_start(subsystem
, nvmf_tgt_subsystem_started
, NULL
);
233 g_tgt_state
= NVMF_TGT_INIT_START_ACCEPTOR
;
234 nvmf_tgt_advance_state();
238 nvmf_tgt_subsystem_stopped(struct spdk_nvmf_subsystem
*subsystem
,
239 void *cb_arg
, int status
)
241 subsystem
= spdk_nvmf_subsystem_get_next(subsystem
);
244 spdk_nvmf_subsystem_stop(subsystem
, nvmf_tgt_subsystem_stopped
, NULL
);
248 g_tgt_state
= NVMF_TGT_FINI_DESTROY_POLL_GROUPS
;
249 nvmf_tgt_advance_state();
253 nvmf_tgt_destroy_done(void *ctx
, int status
)
255 g_tgt_state
= NVMF_TGT_STOPPED
;
257 free(g_spdk_nvmf_tgt_conf
);
258 g_spdk_nvmf_tgt_conf
= NULL
;
259 nvmf_tgt_advance_state();
263 nvmf_tgt_parse_conf_done(int status
)
265 g_tgt_state
= (status
== 0) ? NVMF_TGT_INIT_CREATE_POLL_GROUPS
: NVMF_TGT_ERROR
;
266 nvmf_tgt_advance_state();
270 nvmf_tgt_parse_conf_start(void *ctx
)
272 if (nvmf_parse_conf(nvmf_tgt_parse_conf_done
)) {
273 SPDK_ERRLOG("nvmf_parse_conf() failed\n");
274 g_tgt_state
= NVMF_TGT_ERROR
;
275 nvmf_tgt_advance_state();
280 fixup_identify_ctrlr(struct spdk_nvmf_request
*req
)
284 struct spdk_nvme_ctrlr_data
*nvme_cdata
;
285 struct spdk_nvme_ctrlr_data nvmf_cdata
= {};
286 struct spdk_nvmf_ctrlr
*ctrlr
= spdk_nvmf_request_get_ctrlr(req
);
287 struct spdk_nvme_cpl
*rsp
= spdk_nvmf_request_get_response(req
);
289 /* This is the identify data from the NVMe drive */
290 spdk_nvmf_request_get_data(req
, (void **)&nvme_cdata
, &length
);
292 /* Get the NVMF identify data */
293 rc
= spdk_nvmf_ctrlr_identify_ctrlr(ctrlr
, &nvmf_cdata
);
294 if (rc
!= SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE
) {
295 rsp
->status
.sct
= SPDK_NVME_SCT_GENERIC
;
296 rsp
->status
.sc
= SPDK_NVME_SC_INTERNAL_DEVICE_ERROR
;
300 /* Fixup NVMF identify data with NVMe identify data */
302 /* Serial Number (SN) */
303 memcpy(&nvmf_cdata
.sn
[0], &nvme_cdata
->sn
[0], sizeof(nvmf_cdata
.sn
));
304 /* Model Number (MN) */
305 memcpy(&nvmf_cdata
.mn
[0], &nvme_cdata
->mn
[0], sizeof(nvmf_cdata
.mn
));
306 /* Firmware Revision (FR) */
307 memcpy(&nvmf_cdata
.fr
[0], &nvme_cdata
->fr
[0], sizeof(nvmf_cdata
.fr
));
308 /* IEEE OUI Identifier (IEEE) */
309 memcpy(&nvmf_cdata
.ieee
[0], &nvme_cdata
->ieee
[0], sizeof(nvmf_cdata
.ieee
));
310 /* FRU Globally Unique Identifier (FGUID) */
312 /* Copy the fixed up data back to the response */
313 memcpy(nvme_cdata
, &nvmf_cdata
, length
);
317 nvmf_custom_identify_hdlr(struct spdk_nvmf_request
*req
)
319 struct spdk_nvme_cmd
*cmd
= spdk_nvmf_request_get_cmd(req
);
320 struct spdk_bdev
*bdev
;
321 struct spdk_bdev_desc
*desc
;
322 struct spdk_io_channel
*ch
;
323 struct spdk_nvmf_subsystem
*subsys
;
326 if (cmd
->cdw10_bits
.identify
.cns
!= SPDK_NVME_IDENTIFY_CTRLR
) {
327 return -1; /* continue */
330 subsys
= spdk_nvmf_request_get_subsystem(req
);
331 if (subsys
== NULL
) {
335 /* Only procss this request if it has exactly one namespace */
336 if (spdk_nvmf_subsystem_get_max_nsid(subsys
) != 1) {
340 /* Forward to first namespace if it supports NVME admin commands */
341 rc
= spdk_nvmf_request_get_bdev(1, req
, &bdev
, &desc
, &ch
);
343 /* No bdev found for this namespace. Continue. */
347 if (!spdk_bdev_io_type_supported(bdev
, SPDK_BDEV_IO_TYPE_NVME_ADMIN
)) {
351 return spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(bdev
, desc
, ch
, req
, fixup_identify_ctrlr
);
355 nvmf_tgt_advance_state(void)
357 enum nvmf_tgt_state prev_state
;
361 prev_state
= g_tgt_state
;
363 switch (g_tgt_state
) {
364 case NVMF_TGT_INIT_NONE
: {
365 g_tgt_state
= NVMF_TGT_INIT_PARSE_CONFIG
;
368 case NVMF_TGT_INIT_PARSE_CONFIG
:
369 /* Send message to self to call parse conf func.
370 * Prevents it from possibly performing cb before getting
371 * out of this function, which causes problems. */
372 spdk_thread_send_msg(spdk_get_thread(), nvmf_tgt_parse_conf_start
, NULL
);
374 case NVMF_TGT_INIT_CREATE_POLL_GROUPS
:
376 if (g_spdk_nvmf_tgt_conf
->admin_passthru
.identify_ctrlr
) {
377 SPDK_NOTICELOG("Custom identify ctrlr handler enabled\n");
378 spdk_nvmf_set_custom_admin_cmd_hdlr(SPDK_NVME_OPC_IDENTIFY
, nvmf_custom_identify_hdlr
);
380 /* Create poll group threads, and send a message to each thread
381 * and create a poll group.
383 nvmf_tgt_create_poll_groups();
385 case NVMF_TGT_INIT_START_SUBSYSTEMS
: {
386 struct spdk_nvmf_subsystem
*subsystem
;
388 subsystem
= spdk_nvmf_subsystem_get_first(g_spdk_nvmf_tgt
);
391 spdk_nvmf_subsystem_start(subsystem
, nvmf_tgt_subsystem_started
, NULL
);
393 g_tgt_state
= NVMF_TGT_INIT_START_ACCEPTOR
;
397 case NVMF_TGT_INIT_START_ACCEPTOR
:
398 g_acceptor_poller
= SPDK_POLLER_REGISTER(acceptor_poll
, g_spdk_nvmf_tgt
,
399 g_spdk_nvmf_tgt_conf
->acceptor_poll_rate
);
400 g_tgt_state
= NVMF_TGT_RUNNING
;
402 case NVMF_TGT_RUNNING
:
403 spdk_subsystem_init_next(0);
405 case NVMF_TGT_FINI_STOP_SUBSYSTEMS
: {
406 struct spdk_nvmf_subsystem
*subsystem
;
408 subsystem
= spdk_nvmf_subsystem_get_first(g_spdk_nvmf_tgt
);
411 spdk_nvmf_subsystem_stop(subsystem
, nvmf_tgt_subsystem_stopped
, NULL
);
413 g_tgt_state
= NVMF_TGT_FINI_DESTROY_POLL_GROUPS
;
417 case NVMF_TGT_FINI_DESTROY_POLL_GROUPS
:
418 /* Send a message to each poll group thread, and terminate the thread */
419 nvmf_tgt_destroy_poll_groups();
421 case NVMF_TGT_FINI_STOP_ACCEPTOR
:
422 spdk_poller_unregister(&g_acceptor_poller
);
423 g_tgt_state
= NVMF_TGT_FINI_FREE_RESOURCES
;
425 case NVMF_TGT_FINI_FREE_RESOURCES
:
426 spdk_nvmf_tgt_destroy(g_spdk_nvmf_tgt
, nvmf_tgt_destroy_done
, NULL
);
428 case NVMF_TGT_STOPPED
:
429 spdk_subsystem_fini_next();
432 spdk_subsystem_init_next(rc
);
436 } while (g_tgt_state
!= prev_state
);
440 nvmf_subsystem_init(void)
442 g_tgt_state
= NVMF_TGT_INIT_NONE
;
443 nvmf_tgt_advance_state();
447 nvmf_subsystem_write_config_json(struct spdk_json_write_ctx
*w
)
449 spdk_json_write_array_begin(w
);
451 spdk_json_write_object_begin(w
);
452 spdk_json_write_named_string(w
, "method", "nvmf_set_config");
454 spdk_json_write_named_object_begin(w
, "params");
455 spdk_json_write_named_uint32(w
, "acceptor_poll_rate", g_spdk_nvmf_tgt_conf
->acceptor_poll_rate
);
456 spdk_json_write_named_object_begin(w
, "admin_cmd_passthru");
457 spdk_json_write_named_bool(w
, "identify_ctrlr",
458 g_spdk_nvmf_tgt_conf
->admin_passthru
.identify_ctrlr
);
459 spdk_json_write_object_end(w
);
460 spdk_json_write_object_end(w
);
461 spdk_json_write_object_end(w
);
463 spdk_nvmf_tgt_write_config_json(w
, g_spdk_nvmf_tgt
);
464 spdk_json_write_array_end(w
);
467 static struct spdk_subsystem g_spdk_subsystem_nvmf
= {
469 .init
= nvmf_subsystem_init
,
470 .fini
= nvmf_subsystem_fini
,
471 .write_config_json
= nvmf_subsystem_write_config_json
,
474 SPDK_SUBSYSTEM_REGISTER(g_spdk_subsystem_nvmf
)
475 SPDK_SUBSYSTEM_DEPEND(nvmf
, bdev
)
476 SPDK_SUBSYSTEM_DEPEND(nvmf
, sock
)