4 * Copyright (c) Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "spdk_cunit.h"
36 #include "spdk_internal/log.h"
40 #include "lib/test_env.c"
42 struct spdk_trace_flag SPDK_TRACE_NVME
= {
47 #include "nvme/nvme_ctrlr.c"
49 struct nvme_driver _g_nvme_driver
= {
50 .lock
= PTHREAD_MUTEX_INITIALIZER
,
53 uint64_t g_ut_tsc
= 0;
54 struct spdk_nvme_registers g_ut_nvme_regs
= {};
56 __thread
int nvme_thread_ioq_index
= -1;
58 struct spdk_nvme_ctrlr
*nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id
*trid
,
59 const struct spdk_nvme_ctrlr_opts
*opts
,
66 nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr
*ctrlr
)
72 nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr
*ctrlr
)
78 nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr
*ctrlr
, uint32_t offset
, uint32_t value
)
80 SPDK_CU_ASSERT_FATAL(offset
<= sizeof(struct spdk_nvme_registers
) - 4);
81 *(uint32_t *)((uintptr_t)&g_ut_nvme_regs
+ offset
) = value
;
86 nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr
*ctrlr
, uint32_t offset
, uint64_t value
)
88 SPDK_CU_ASSERT_FATAL(offset
<= sizeof(struct spdk_nvme_registers
) - 8);
89 *(uint64_t *)((uintptr_t)&g_ut_nvme_regs
+ offset
) = value
;
94 nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr
*ctrlr
, uint32_t offset
, uint32_t *value
)
96 SPDK_CU_ASSERT_FATAL(offset
<= sizeof(struct spdk_nvme_registers
) - 4);
97 *value
= *(uint32_t *)((uintptr_t)&g_ut_nvme_regs
+ offset
);
102 nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr
*ctrlr
, uint32_t offset
, uint64_t *value
)
104 SPDK_CU_ASSERT_FATAL(offset
<= sizeof(struct spdk_nvme_registers
) - 8);
105 *value
= *(uint64_t *)((uintptr_t)&g_ut_nvme_regs
+ offset
);
110 nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr
*ctrlr
)
116 nvme_transport_ctrlr_get_max_io_queue_size(struct spdk_nvme_ctrlr
*ctrlr
)
118 return SPDK_NVME_IO_QUEUE_MAX_ENTRIES
;
121 struct spdk_nvme_qpair
*
122 nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr
*ctrlr
, uint16_t qid
,
123 enum spdk_nvme_qprio qprio
)
125 struct spdk_nvme_qpair
*qpair
;
127 qpair
= calloc(1, sizeof(*qpair
));
128 SPDK_CU_ASSERT_FATAL(qpair
!= NULL
);
130 qpair
->ctrlr
= ctrlr
;
132 qpair
->qprio
= qprio
;
138 nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr
*ctrlr
, struct spdk_nvme_qpair
*qpair
)
145 nvme_transport_ctrlr_reinit_io_qpair(struct spdk_nvme_ctrlr
*ctrlr
, struct spdk_nvme_qpair
*qpair
)
151 nvme_transport_qpair_reset(struct spdk_nvme_qpair
*qpair
)
156 int nvme_qpair_init(struct spdk_nvme_qpair
*qpair
, uint16_t id
,
157 struct spdk_nvme_ctrlr
*ctrlr
,
158 enum spdk_nvme_qprio qprio
,
159 uint32_t num_requests
)
162 qpair
->qprio
= qprio
;
163 qpair
->ctrlr
= ctrlr
;
169 fake_cpl_success(spdk_nvme_cmd_cb cb_fn
, void *cb_arg
)
171 struct spdk_nvme_cpl cpl
= {};
173 cpl
.status
.sc
= SPDK_NVME_SC_SUCCESS
;
178 spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr
*ctrlr
, uint8_t feature
,
179 uint32_t cdw11
, uint32_t cdw12
, void *payload
, uint32_t payload_size
,
180 spdk_nvme_cmd_cb cb_fn
, void *cb_arg
)
187 spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr
*ctrlr
, uint8_t feature
,
188 uint32_t cdw11
, void *payload
, uint32_t payload_size
,
189 spdk_nvme_cmd_cb cb_fn
, void *cb_arg
)
196 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr
*ctrlr
, uint8_t log_page
,
197 uint32_t nsid
, void *payload
, uint32_t payload_size
,
198 uint64_t offset
, spdk_nvme_cmd_cb cb_fn
, void *cb_arg
)
200 fake_cpl_success(cb_fn
, cb_arg
);
205 nvme_qpair_submit_request(struct spdk_nvme_qpair
*qpair
, struct nvme_request
*req
)
207 CU_ASSERT(req
->cmd
.opc
== SPDK_NVME_OPC_ASYNC_EVENT_REQUEST
);
210 * Free the request here so it does not leak.
211 * For the purposes of this unit test, we don't need to bother emulating request submission.
219 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair
*qpair
, uint32_t max_completions
)
225 nvme_qpair_disable(struct spdk_nvme_qpair
*qpair
)
230 nvme_qpair_enable(struct spdk_nvme_qpair
*qpair
)
235 nvme_completion_poll_cb(void *arg
, const struct spdk_nvme_cpl
*cpl
)
237 struct nvme_completion_poll_status
*status
= arg
;
244 nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr
*ctrlr
,
245 union spdk_nvme_critical_warning_state state
, spdk_nvme_cmd_cb cb_fn
,
248 fake_cpl_success(cb_fn
, cb_arg
);
253 nvme_ctrlr_cmd_identify_controller(struct spdk_nvme_ctrlr
*ctrlr
, void *payload
,
254 spdk_nvme_cmd_cb cb_fn
, void *cb_arg
)
256 fake_cpl_success(cb_fn
, cb_arg
);
261 nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr
*ctrlr
,
262 uint32_t num_queues
, spdk_nvme_cmd_cb cb_fn
, void *cb_arg
)
264 fake_cpl_success(cb_fn
, cb_arg
);
269 nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr
*ctrlr
, uint32_t nsid
,
270 struct spdk_nvme_ctrlr_list
*payload
, spdk_nvme_cmd_cb cb_fn
, void *cb_arg
)
276 nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr
*ctrlr
, uint32_t nsid
,
277 struct spdk_nvme_ctrlr_list
*payload
, spdk_nvme_cmd_cb cb_fn
, void *cb_arg
)
283 nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr
*ctrlr
, struct spdk_nvme_ns_data
*payload
,
284 spdk_nvme_cmd_cb cb_fn
, void *cb_arg
)
290 nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr
*ctrlr
, uint32_t nsid
, spdk_nvme_cmd_cb cb_fn
,
297 nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr
*ctrlr
, uint32_t nsid
, struct spdk_nvme_format
*format
,
298 spdk_nvme_cmd_cb cb_fn
, void *cb_arg
)
304 nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr
*ctrlr
, const struct spdk_nvme_fw_commit
*fw_commit
,
305 spdk_nvme_cmd_cb cb_fn
, void *cb_arg
)
311 nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr
*ctrlr
,
312 uint32_t size
, uint32_t offset
, void *payload
,
313 spdk_nvme_cmd_cb cb_fn
, void *cb_arg
)
319 nvme_ns_destruct(struct spdk_nvme_ns
*ns
)
324 nvme_ns_construct(struct spdk_nvme_ns
*ns
, uint16_t id
,
325 struct spdk_nvme_ctrlr
*ctrlr
)
330 struct nvme_request
*
331 nvme_allocate_request(struct spdk_nvme_qpair
*qpair
,
332 const struct nvme_payload
*payload
, uint32_t payload_size
,
333 spdk_nvme_cmd_cb cb_fn
,
336 struct nvme_request
*req
= NULL
;
337 req
= calloc(1, sizeof(*req
));
340 memset(req
, 0, offsetof(struct nvme_request
, children
));
342 req
->payload
= *payload
;
343 req
->payload_size
= payload_size
;
346 req
->cb_arg
= cb_arg
;
354 struct nvme_request
*
355 nvme_allocate_request_contig(struct spdk_nvme_qpair
*qpair
, void *buffer
, uint32_t payload_size
,
356 spdk_nvme_cmd_cb cb_fn
, void *cb_arg
)
358 struct nvme_payload payload
;
360 payload
.type
= NVME_PAYLOAD_TYPE_CONTIG
;
361 payload
.u
.contig
= buffer
;
363 return nvme_allocate_request(qpair
, &payload
, payload_size
, cb_fn
, cb_arg
);
366 struct nvme_request
*
367 nvme_allocate_request_null(struct spdk_nvme_qpair
*qpair
, spdk_nvme_cmd_cb cb_fn
, void *cb_arg
)
369 return nvme_allocate_request_contig(qpair
, NULL
, 0, cb_fn
, cb_arg
);
373 nvme_free_request(struct nvme_request
*req
)
379 test_nvme_ctrlr_init_en_1_rdy_0(void)
381 struct spdk_nvme_ctrlr ctrlr
= {};
383 memset(&g_ut_nvme_regs
, 0, sizeof(g_ut_nvme_regs
));
386 * Initial state: CC.EN = 1, CSTS.RDY = 0
388 g_ut_nvme_regs
.cc
.bits
.en
= 1;
389 g_ut_nvme_regs
.csts
.bits
.rdy
= 0;
391 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr
) == 0);
393 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_INIT
);
394 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
395 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1
);
398 * Transition to CSTS.RDY = 1.
399 * init() should set CC.EN = 0.
401 g_ut_nvme_regs
.csts
.bits
.rdy
= 1;
402 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
403 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0
);
404 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.en
== 0);
407 * Transition to CSTS.RDY = 0.
409 g_ut_nvme_regs
.csts
.bits
.rdy
= 0;
410 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
411 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE
);
414 * Transition to CC.EN = 1
416 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
417 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1
);
418 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.en
== 1);
421 * Transition to CSTS.RDY = 1.
423 g_ut_nvme_regs
.csts
.bits
.rdy
= 1;
424 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
425 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_READY
);
427 g_ut_nvme_regs
.csts
.bits
.shst
= SPDK_NVME_SHST_COMPLETE
;
428 nvme_ctrlr_destruct(&ctrlr
);
432 test_nvme_ctrlr_init_en_1_rdy_1(void)
434 struct spdk_nvme_ctrlr ctrlr
= {};
436 memset(&g_ut_nvme_regs
, 0, sizeof(g_ut_nvme_regs
));
439 * Initial state: CC.EN = 1, CSTS.RDY = 1
440 * init() should set CC.EN = 0.
442 g_ut_nvme_regs
.cc
.bits
.en
= 1;
443 g_ut_nvme_regs
.csts
.bits
.rdy
= 1;
445 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr
) == 0);
447 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_INIT
);
448 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
449 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0
);
450 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.en
== 0);
453 * Transition to CSTS.RDY = 0.
455 g_ut_nvme_regs
.csts
.bits
.rdy
= 0;
456 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
457 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE
);
460 * Transition to CC.EN = 1
462 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
463 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1
);
464 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.en
== 1);
467 * Transition to CSTS.RDY = 1.
469 g_ut_nvme_regs
.csts
.bits
.rdy
= 1;
470 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
471 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_READY
);
473 g_ut_nvme_regs
.csts
.bits
.shst
= SPDK_NVME_SHST_COMPLETE
;
474 nvme_ctrlr_destruct(&ctrlr
);
478 test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void)
480 struct spdk_nvme_ctrlr ctrlr
= {};
482 memset(&g_ut_nvme_regs
, 0, sizeof(g_ut_nvme_regs
));
485 * Initial state: CC.EN = 0, CSTS.RDY = 0
486 * init() should set CC.EN = 1.
488 g_ut_nvme_regs
.cc
.bits
.en
= 0;
489 g_ut_nvme_regs
.csts
.bits
.rdy
= 0;
492 * Default round robin enabled
494 g_ut_nvme_regs
.cap
.bits
.ams
= 0x0;
495 ctrlr
.cap
= g_ut_nvme_regs
.cap
;
497 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr
) == 0);
500 * Case 1: default round robin arbitration mechanism selected
502 ctrlr
.opts
.arb_mechanism
= SPDK_NVME_CC_AMS_RR
;
504 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_INIT
);
505 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
506 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0
);
507 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
508 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE
);
509 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
510 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1
);
511 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.en
== 1);
512 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.ams
== SPDK_NVME_CC_AMS_RR
);
513 CU_ASSERT(ctrlr
.opts
.arb_mechanism
== SPDK_NVME_CC_AMS_RR
);
516 * Complete and destroy the controller
518 g_ut_nvme_regs
.csts
.bits
.shst
= SPDK_NVME_SHST_COMPLETE
;
519 nvme_ctrlr_destruct(&ctrlr
);
522 * Reset to initial state
524 g_ut_nvme_regs
.cc
.bits
.en
= 0;
525 g_ut_nvme_regs
.csts
.bits
.rdy
= 0;
528 * Case 2: weighted round robin arbitration mechanism selected
530 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr
) == 0);
532 ctrlr
.opts
.arb_mechanism
= SPDK_NVME_CC_AMS_WRR
;
534 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_INIT
);
535 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
536 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0
);
537 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
538 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE
);
539 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) != 0);
540 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1
);
541 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.en
== 0);
544 * Complete and destroy the controller
546 g_ut_nvme_regs
.csts
.bits
.shst
= SPDK_NVME_SHST_COMPLETE
;
547 nvme_ctrlr_destruct(&ctrlr
);
550 * Reset to initial state
552 g_ut_nvme_regs
.cc
.bits
.en
= 0;
553 g_ut_nvme_regs
.csts
.bits
.rdy
= 0;
556 * Case 3: vendor specific arbitration mechanism selected
558 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr
) == 0);
560 ctrlr
.opts
.arb_mechanism
= SPDK_NVME_CC_AMS_VS
;
562 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_INIT
);
563 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
564 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0
);
565 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
566 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE
);
567 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) != 0);
568 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1
);
569 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.en
== 0);
572 * Complete and destroy the controller
574 g_ut_nvme_regs
.csts
.bits
.shst
= SPDK_NVME_SHST_COMPLETE
;
575 nvme_ctrlr_destruct(&ctrlr
);
578 * Reset to initial state
580 g_ut_nvme_regs
.cc
.bits
.en
= 0;
581 g_ut_nvme_regs
.csts
.bits
.rdy
= 0;
584 * Case 4: invalid arbitration mechanism selected
586 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr
) == 0);
588 ctrlr
.opts
.arb_mechanism
= SPDK_NVME_CC_AMS_VS
+ 1;
590 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_INIT
);
591 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
592 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0
);
593 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
594 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE
);
595 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) != 0);
596 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1
);
597 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.en
== 0);
600 * Complete and destroy the controller
602 g_ut_nvme_regs
.csts
.bits
.shst
= SPDK_NVME_SHST_COMPLETE
;
603 nvme_ctrlr_destruct(&ctrlr
);
606 * Reset to initial state
608 g_ut_nvme_regs
.cc
.bits
.en
= 0;
609 g_ut_nvme_regs
.csts
.bits
.rdy
= 0;
612 * Case 5: reset to default round robin arbitration mechanism
614 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr
) == 0);
616 ctrlr
.opts
.arb_mechanism
= SPDK_NVME_CC_AMS_RR
;
618 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_INIT
);
619 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
620 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0
);
621 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
622 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE
);
623 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
624 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1
);
625 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.en
== 1);
626 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.ams
== SPDK_NVME_CC_AMS_RR
);
627 CU_ASSERT(ctrlr
.opts
.arb_mechanism
== SPDK_NVME_CC_AMS_RR
);
630 * Transition to CSTS.RDY = 1.
632 g_ut_nvme_regs
.csts
.bits
.rdy
= 1;
633 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
634 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_READY
);
636 g_ut_nvme_regs
.csts
.bits
.shst
= SPDK_NVME_SHST_COMPLETE
;
637 nvme_ctrlr_destruct(&ctrlr
);
641 test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void)
643 struct spdk_nvme_ctrlr ctrlr
= {};
645 memset(&g_ut_nvme_regs
, 0, sizeof(g_ut_nvme_regs
));
648 * Initial state: CC.EN = 0, CSTS.RDY = 0
649 * init() should set CC.EN = 1.
651 g_ut_nvme_regs
.cc
.bits
.en
= 0;
652 g_ut_nvme_regs
.csts
.bits
.rdy
= 0;
655 * Weighted round robin enabled
657 g_ut_nvme_regs
.cap
.bits
.ams
= SPDK_NVME_CAP_AMS_WRR
;
658 ctrlr
.cap
= g_ut_nvme_regs
.cap
;
660 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr
) == 0);
663 * Case 1: default round robin arbitration mechanism selected
665 ctrlr
.opts
.arb_mechanism
= SPDK_NVME_CC_AMS_RR
;
667 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_INIT
);
668 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
669 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0
);
670 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
671 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE
);
672 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
673 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1
);
674 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.en
== 1);
675 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.ams
== SPDK_NVME_CC_AMS_RR
);
676 CU_ASSERT(ctrlr
.opts
.arb_mechanism
== SPDK_NVME_CC_AMS_RR
);
679 * Complete and destroy the controller
681 g_ut_nvme_regs
.csts
.bits
.shst
= SPDK_NVME_SHST_COMPLETE
;
682 nvme_ctrlr_destruct(&ctrlr
);
685 * Reset to initial state
687 g_ut_nvme_regs
.cc
.bits
.en
= 0;
688 g_ut_nvme_regs
.csts
.bits
.rdy
= 0;
691 * Case 2: weighted round robin arbitration mechanism selected
693 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr
) == 0);
695 ctrlr
.opts
.arb_mechanism
= SPDK_NVME_CC_AMS_WRR
;
697 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_INIT
);
698 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
699 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0
);
700 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
701 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE
);
702 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
703 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1
);
704 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.en
== 1);
705 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.ams
== SPDK_NVME_CC_AMS_WRR
);
706 CU_ASSERT(ctrlr
.opts
.arb_mechanism
== SPDK_NVME_CC_AMS_WRR
);
709 * Complete and destroy the controller
711 g_ut_nvme_regs
.csts
.bits
.shst
= SPDK_NVME_SHST_COMPLETE
;
712 nvme_ctrlr_destruct(&ctrlr
);
715 * Reset to initial state
717 g_ut_nvme_regs
.cc
.bits
.en
= 0;
718 g_ut_nvme_regs
.csts
.bits
.rdy
= 0;
721 * Case 3: vendor specific arbitration mechanism selected
723 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr
) == 0);
725 ctrlr
.opts
.arb_mechanism
= SPDK_NVME_CC_AMS_VS
;
727 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_INIT
);
728 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
729 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0
);
730 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
731 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE
);
732 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) != 0);
733 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1
);
734 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.en
== 0);
737 * Complete and destroy the controller
739 g_ut_nvme_regs
.csts
.bits
.shst
= SPDK_NVME_SHST_COMPLETE
;
740 nvme_ctrlr_destruct(&ctrlr
);
743 * Reset to initial state
745 g_ut_nvme_regs
.cc
.bits
.en
= 0;
746 g_ut_nvme_regs
.csts
.bits
.rdy
= 0;
749 * Case 4: invalid arbitration mechanism selected
751 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr
) == 0);
753 ctrlr
.opts
.arb_mechanism
= SPDK_NVME_CC_AMS_VS
+ 1;
755 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_INIT
);
756 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
757 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0
);
758 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
759 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE
);
760 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) != 0);
761 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1
);
762 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.en
== 0);
765 * Complete and destroy the controller
767 g_ut_nvme_regs
.csts
.bits
.shst
= SPDK_NVME_SHST_COMPLETE
;
768 nvme_ctrlr_destruct(&ctrlr
);
771 * Reset to initial state
773 g_ut_nvme_regs
.cc
.bits
.en
= 0;
774 g_ut_nvme_regs
.csts
.bits
.rdy
= 0;
777 * Case 5: reset to weighted round robin arbitration mechanism
779 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr
) == 0);
781 ctrlr
.opts
.arb_mechanism
= SPDK_NVME_CC_AMS_WRR
;
783 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_INIT
);
784 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
785 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0
);
786 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
787 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE
);
788 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
789 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1
);
790 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.en
== 1);
791 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.ams
== SPDK_NVME_CC_AMS_WRR
);
792 CU_ASSERT(ctrlr
.opts
.arb_mechanism
== SPDK_NVME_CC_AMS_WRR
);
795 * Transition to CSTS.RDY = 1.
797 g_ut_nvme_regs
.csts
.bits
.rdy
= 1;
798 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
799 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_READY
);
801 g_ut_nvme_regs
.csts
.bits
.shst
= SPDK_NVME_SHST_COMPLETE
;
802 nvme_ctrlr_destruct(&ctrlr
);
805 test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void)
807 struct spdk_nvme_ctrlr ctrlr
= {};
809 memset(&g_ut_nvme_regs
, 0, sizeof(g_ut_nvme_regs
));
812 * Initial state: CC.EN = 0, CSTS.RDY = 0
813 * init() should set CC.EN = 1.
815 g_ut_nvme_regs
.cc
.bits
.en
= 0;
816 g_ut_nvme_regs
.csts
.bits
.rdy
= 0;
819 * Default round robin enabled
821 g_ut_nvme_regs
.cap
.bits
.ams
= SPDK_NVME_CAP_AMS_VS
;
822 ctrlr
.cap
= g_ut_nvme_regs
.cap
;
824 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr
) == 0);
827 * Case 1: default round robin arbitration mechanism selected
829 ctrlr
.opts
.arb_mechanism
= SPDK_NVME_CC_AMS_RR
;
831 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_INIT
);
832 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
833 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0
);
834 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
835 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE
);
836 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
837 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1
);
838 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.en
== 1);
839 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.ams
== SPDK_NVME_CC_AMS_RR
);
840 CU_ASSERT(ctrlr
.opts
.arb_mechanism
== SPDK_NVME_CC_AMS_RR
);
843 * Complete and destroy the controller
845 g_ut_nvme_regs
.csts
.bits
.shst
= SPDK_NVME_SHST_COMPLETE
;
846 nvme_ctrlr_destruct(&ctrlr
);
849 * Reset to initial state
851 g_ut_nvme_regs
.cc
.bits
.en
= 0;
852 g_ut_nvme_regs
.csts
.bits
.rdy
= 0;
855 * Case 2: weighted round robin arbitration mechanism selected
857 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr
) == 0);
859 ctrlr
.opts
.arb_mechanism
= SPDK_NVME_CC_AMS_WRR
;
861 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_INIT
);
862 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
863 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0
);
864 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
865 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE
);
866 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) != 0);
867 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1
);
868 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.en
== 0);
871 * Complete and destroy the controller
873 g_ut_nvme_regs
.csts
.bits
.shst
= SPDK_NVME_SHST_COMPLETE
;
874 nvme_ctrlr_destruct(&ctrlr
);
877 * Reset to initial state
879 g_ut_nvme_regs
.cc
.bits
.en
= 0;
880 g_ut_nvme_regs
.csts
.bits
.rdy
= 0;
883 * Case 3: vendor specific arbitration mechanism selected
885 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr
) == 0);
887 ctrlr
.opts
.arb_mechanism
= SPDK_NVME_CC_AMS_VS
;
889 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_INIT
);
890 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
891 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0
);
892 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
893 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE
);
894 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
895 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1
);
896 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.en
== 1);
897 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.ams
== SPDK_NVME_CC_AMS_VS
);
898 CU_ASSERT(ctrlr
.opts
.arb_mechanism
== SPDK_NVME_CC_AMS_VS
);
901 * Complete and destroy the controller
903 g_ut_nvme_regs
.csts
.bits
.shst
= SPDK_NVME_SHST_COMPLETE
;
904 nvme_ctrlr_destruct(&ctrlr
);
907 * Reset to initial state
909 g_ut_nvme_regs
.cc
.bits
.en
= 0;
910 g_ut_nvme_regs
.csts
.bits
.rdy
= 0;
913 * Case 4: invalid arbitration mechanism selected
915 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr
) == 0);
917 ctrlr
.opts
.arb_mechanism
= SPDK_NVME_CC_AMS_VS
+ 1;
919 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_INIT
);
920 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
921 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0
);
922 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
923 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE
);
924 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) != 0);
925 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1
);
926 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.en
== 0);
929 * Complete and destroy the controller
931 g_ut_nvme_regs
.csts
.bits
.shst
= SPDK_NVME_SHST_COMPLETE
;
932 nvme_ctrlr_destruct(&ctrlr
);
935 * Reset to initial state
937 g_ut_nvme_regs
.cc
.bits
.en
= 0;
938 g_ut_nvme_regs
.csts
.bits
.rdy
= 0;
941 * Case 5: reset to vendor specific arbitration mechanism
943 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr
) == 0);
945 ctrlr
.opts
.arb_mechanism
= SPDK_NVME_CC_AMS_VS
;
947 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_INIT
);
948 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
949 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0
);
950 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
951 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE
);
952 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
953 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1
);
954 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.en
== 1);
955 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.ams
== SPDK_NVME_CC_AMS_VS
);
956 CU_ASSERT(ctrlr
.opts
.arb_mechanism
== SPDK_NVME_CC_AMS_VS
);
959 * Transition to CSTS.RDY = 1.
961 g_ut_nvme_regs
.csts
.bits
.rdy
= 1;
962 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
963 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_READY
);
965 g_ut_nvme_regs
.csts
.bits
.shst
= SPDK_NVME_SHST_COMPLETE
;
966 nvme_ctrlr_destruct(&ctrlr
);
970 test_nvme_ctrlr_init_en_0_rdy_0(void)
972 struct spdk_nvme_ctrlr ctrlr
= {};
974 memset(&g_ut_nvme_regs
, 0, sizeof(g_ut_nvme_regs
));
977 * Initial state: CC.EN = 0, CSTS.RDY = 0
978 * init() should set CC.EN = 1.
980 g_ut_nvme_regs
.cc
.bits
.en
= 0;
981 g_ut_nvme_regs
.csts
.bits
.rdy
= 0;
983 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr
) == 0);
985 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_INIT
);
986 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
987 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0
);
989 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
990 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE
);
992 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
993 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1
);
994 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.en
== 1);
997 * Transition to CSTS.RDY = 1.
999 g_ut_nvme_regs
.csts
.bits
.rdy
= 1;
1000 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
1001 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_READY
);
1003 g_ut_nvme_regs
.csts
.bits
.shst
= SPDK_NVME_SHST_COMPLETE
;
1004 nvme_ctrlr_destruct(&ctrlr
);
1008 test_nvme_ctrlr_init_en_0_rdy_1(void)
1010 struct spdk_nvme_ctrlr ctrlr
= {};
1012 memset(&g_ut_nvme_regs
, 0, sizeof(g_ut_nvme_regs
));
1015 * Initial state: CC.EN = 0, CSTS.RDY = 1
1017 g_ut_nvme_regs
.cc
.bits
.en
= 0;
1018 g_ut_nvme_regs
.csts
.bits
.rdy
= 1;
1020 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr
) == 0);
1022 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_INIT
);
1023 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
1024 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0
);
1027 * Transition to CSTS.RDY = 0.
1029 g_ut_nvme_regs
.csts
.bits
.rdy
= 0;
1030 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
1031 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE
);
1034 * Transition to CC.EN = 1
1036 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
1037 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1
);
1038 CU_ASSERT(g_ut_nvme_regs
.cc
.bits
.en
== 1);
1041 * Transition to CSTS.RDY = 1.
1043 g_ut_nvme_regs
.csts
.bits
.rdy
= 1;
1044 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr
) == 0);
1045 CU_ASSERT(ctrlr
.state
== NVME_CTRLR_STATE_READY
);
1047 g_ut_nvme_regs
.csts
.bits
.shst
= SPDK_NVME_SHST_COMPLETE
;
1048 nvme_ctrlr_destruct(&ctrlr
);
1052 setup_qpairs(struct spdk_nvme_ctrlr
*ctrlr
, uint32_t num_io_queues
)
1056 CU_ASSERT_FATAL(pthread_mutex_init(&ctrlr
->ctrlr_lock
, NULL
) == 0);
1058 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(ctrlr
) == 0);
1060 ctrlr
->opts
.num_io_queues
= num_io_queues
;
1061 ctrlr
->free_io_qids
= spdk_bit_array_create(num_io_queues
+ 1);
1062 SPDK_CU_ASSERT_FATAL(ctrlr
->free_io_qids
!= NULL
);
1064 spdk_bit_array_clear(ctrlr
->free_io_qids
, 0);
1065 for (i
= 1; i
<= num_io_queues
; i
++) {
1066 spdk_bit_array_set(ctrlr
->free_io_qids
, i
);
1071 cleanup_qpairs(struct spdk_nvme_ctrlr
*ctrlr
)
1073 nvme_ctrlr_destruct(ctrlr
);
1077 test_alloc_io_qpair_rr_1(void)
1079 struct spdk_nvme_ctrlr ctrlr
= {};
1080 struct spdk_nvme_qpair
*q0
;
1082 setup_qpairs(&ctrlr
, 1);
1085 * Fake to simulate the controller with default round robin
1086 * arbitration mechanism.
1088 g_ut_nvme_regs
.cc
.bits
.ams
= SPDK_NVME_CC_AMS_RR
;
1090 q0
= spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr
, 0);
1091 SPDK_CU_ASSERT_FATAL(q0
!= NULL
);
1092 SPDK_CU_ASSERT_FATAL(q0
->qprio
== 0);
1093 /* Only 1 I/O qpair was allocated, so this should fail */
1094 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr
, 0) == NULL
);
1095 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0
) == 0);
1098 * Now that the qpair has been returned to the free list,
1099 * we should be able to allocate it again.
1101 q0
= spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr
, 0);
1102 SPDK_CU_ASSERT_FATAL(q0
!= NULL
);
1103 SPDK_CU_ASSERT_FATAL(q0
->qprio
== 0);
1104 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0
) == 0);
1106 /* Only 0 qprio is acceptable for default round robin arbitration mechanism */
1107 q0
= spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr
, 1);
1108 SPDK_CU_ASSERT_FATAL(q0
== NULL
);
1109 q0
= spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr
, 2);
1110 SPDK_CU_ASSERT_FATAL(q0
== NULL
);
1111 q0
= spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr
, 3);
1112 SPDK_CU_ASSERT_FATAL(q0
== NULL
);
1114 /* Only 0 ~ 3 qprio is acceptable */
1115 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr
, 4) == NULL
);
1117 cleanup_qpairs(&ctrlr
);
1121 test_alloc_io_qpair_wrr_1(void)
1123 struct spdk_nvme_ctrlr ctrlr
= {};
1124 struct spdk_nvme_qpair
*q0
, *q1
;
1126 setup_qpairs(&ctrlr
, 2);
1129 * Fake to simulate the controller with weighted round robin
1130 * arbitration mechanism.
1132 g_ut_nvme_regs
.cc
.bits
.ams
= SPDK_NVME_CC_AMS_WRR
;
1135 * Allocate 2 qpairs and free them
1137 q0
= spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr
, 0);
1138 SPDK_CU_ASSERT_FATAL(q0
!= NULL
);
1139 SPDK_CU_ASSERT_FATAL(q0
->qprio
== 0);
1140 q1
= spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr
, 1);
1141 SPDK_CU_ASSERT_FATAL(q1
!= NULL
);
1142 SPDK_CU_ASSERT_FATAL(q1
->qprio
== 1);
1143 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1
) == 0);
1144 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0
) == 0);
1147 * Allocate 2 qpairs and free them in the reverse order
1149 q0
= spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr
, 2);
1150 SPDK_CU_ASSERT_FATAL(q0
!= NULL
);
1151 SPDK_CU_ASSERT_FATAL(q0
->qprio
== 2);
1152 q1
= spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr
, 3);
1153 SPDK_CU_ASSERT_FATAL(q1
!= NULL
);
1154 SPDK_CU_ASSERT_FATAL(q1
->qprio
== 3);
1155 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0
) == 0);
1156 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1
) == 0);
1158 /* Only 0 ~ 3 qprio is acceptable */
1159 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr
, 4) == NULL
);
1161 cleanup_qpairs(&ctrlr
);
1165 test_alloc_io_qpair_wrr_2(void)
1167 struct spdk_nvme_ctrlr ctrlr
= {};
1168 struct spdk_nvme_qpair
*q0
, *q1
, *q2
, *q3
;
1170 setup_qpairs(&ctrlr
, 4);
1173 * Fake to simulate the controller with weighted round robin
1174 * arbitration mechanism.
1176 g_ut_nvme_regs
.cc
.bits
.ams
= SPDK_NVME_CC_AMS_WRR
;
1178 q0
= spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr
, 0);
1179 SPDK_CU_ASSERT_FATAL(q0
!= NULL
);
1180 SPDK_CU_ASSERT_FATAL(q0
->qprio
== 0);
1181 q1
= spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr
, 1);
1182 SPDK_CU_ASSERT_FATAL(q1
!= NULL
);
1183 SPDK_CU_ASSERT_FATAL(q1
->qprio
== 1);
1184 q2
= spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr
, 2);
1185 SPDK_CU_ASSERT_FATAL(q2
!= NULL
);
1186 SPDK_CU_ASSERT_FATAL(q2
->qprio
== 2);
1187 q3
= spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr
, 3);
1188 SPDK_CU_ASSERT_FATAL(q3
!= NULL
);
1189 SPDK_CU_ASSERT_FATAL(q3
->qprio
== 3);
1190 /* Only 4 I/O qpairs was allocated, so this should fail */
1191 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr
, 0) == NULL
);
1192 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3
) == 0);
1193 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2
) == 0);
1194 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1
) == 0);
1195 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0
) == 0);
1198 * Now that the qpair has been returned to the free list,
1199 * we should be able to allocate it again.
1201 * Allocate 4 I/O qpairs and half of them with same qprio.
1203 q0
= spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr
, 1);
1204 SPDK_CU_ASSERT_FATAL(q0
!= NULL
);
1205 SPDK_CU_ASSERT_FATAL(q0
->qprio
== 1);
1206 q1
= spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr
, 1);
1207 SPDK_CU_ASSERT_FATAL(q1
!= NULL
);
1208 SPDK_CU_ASSERT_FATAL(q1
->qprio
== 1);
1209 q2
= spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr
, 3);
1210 SPDK_CU_ASSERT_FATAL(q2
!= NULL
);
1211 SPDK_CU_ASSERT_FATAL(q2
->qprio
== 3);
1212 q3
= spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr
, 3);
1213 SPDK_CU_ASSERT_FATAL(q3
!= NULL
);
1214 SPDK_CU_ASSERT_FATAL(q3
->qprio
== 3);
1217 * Free all I/O qpairs in reverse order
1219 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0
) == 0);
1220 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1
) == 0);
1221 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2
) == 0);
1222 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3
) == 0);
1224 cleanup_qpairs(&ctrlr
);
1228 test_nvme_ctrlr_fail(void)
1230 struct spdk_nvme_ctrlr ctrlr
= {};
1232 ctrlr
.opts
.num_io_queues
= 0;
1233 nvme_ctrlr_fail(&ctrlr
, false);
1235 CU_ASSERT(ctrlr
.is_failed
== true);
1239 test_nvme_ctrlr_construct_intel_support_log_page_list(void)
1242 struct spdk_nvme_ctrlr ctrlr
= {};
1243 struct spdk_nvme_intel_log_page_directory payload
= {};
1244 struct spdk_pci_id pci_id
= {};
1246 /* Get quirks for a device with all 0 vendor/device id */
1247 ctrlr
.quirks
= nvme_get_quirks(&pci_id
);
1248 CU_ASSERT(ctrlr
.quirks
== 0);
1250 nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr
, &payload
);
1251 res
= spdk_nvme_ctrlr_is_log_page_supported(&ctrlr
, SPDK_NVME_INTEL_LOG_TEMPERATURE
);
1252 CU_ASSERT(res
== false);
1254 /* Set the vendor to Intel, but provide no device id */
1255 ctrlr
.cdata
.vid
= pci_id
.vendor_id
= SPDK_PCI_VID_INTEL
;
1256 payload
.temperature_statistics_log_len
= 1;
1257 ctrlr
.quirks
= nvme_get_quirks(&pci_id
);
1258 memset(ctrlr
.log_page_supported
, 0, sizeof(ctrlr
.log_page_supported
));
1260 nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr
, &payload
);
1261 res
= spdk_nvme_ctrlr_is_log_page_supported(&ctrlr
, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY
);
1262 CU_ASSERT(res
== true);
1263 res
= spdk_nvme_ctrlr_is_log_page_supported(&ctrlr
, SPDK_NVME_INTEL_LOG_TEMPERATURE
);
1264 CU_ASSERT(res
== true);
1265 res
= spdk_nvme_ctrlr_is_log_page_supported(&ctrlr
, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY
);
1266 CU_ASSERT(res
== false);
1267 res
= spdk_nvme_ctrlr_is_log_page_supported(&ctrlr
, SPDK_NVME_INTEL_LOG_SMART
);
1268 CU_ASSERT(res
== false);
1270 /* set valid vendor id, device id and sub device id*/
1271 ctrlr
.cdata
.vid
= SPDK_PCI_VID_INTEL
;
1272 payload
.temperature_statistics_log_len
= 0;
1273 pci_id
.vendor_id
= SPDK_PCI_VID_INTEL
;
1274 pci_id
.device_id
= 0x0953;
1275 pci_id
.subvendor_id
= SPDK_PCI_VID_INTEL
;
1276 pci_id
.subdevice_id
= 0x3702;
1277 ctrlr
.quirks
= nvme_get_quirks(&pci_id
);
1278 memset(ctrlr
.log_page_supported
, 0, sizeof(ctrlr
.log_page_supported
));
1280 nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr
, &payload
);
1281 res
= spdk_nvme_ctrlr_is_log_page_supported(&ctrlr
, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY
);
1282 CU_ASSERT(res
== true);
1283 res
= spdk_nvme_ctrlr_is_log_page_supported(&ctrlr
, SPDK_NVME_INTEL_LOG_TEMPERATURE
);
1284 CU_ASSERT(res
== false);
1285 res
= spdk_nvme_ctrlr_is_log_page_supported(&ctrlr
, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY
);
1286 CU_ASSERT(res
== true);
1287 res
= spdk_nvme_ctrlr_is_log_page_supported(&ctrlr
, SPDK_NVME_INTEL_LOG_SMART
);
1288 CU_ASSERT(res
== false);
1292 test_nvme_ctrlr_set_supported_features(void)
1295 struct spdk_nvme_ctrlr ctrlr
= {};
1297 /* set a invalid vendor id */
1298 ctrlr
.cdata
.vid
= 0xFFFF;
1299 nvme_ctrlr_set_supported_features(&ctrlr
);
1300 res
= spdk_nvme_ctrlr_is_feature_supported(&ctrlr
, SPDK_NVME_FEAT_ARBITRATION
);
1301 CU_ASSERT(res
== true);
1302 res
= spdk_nvme_ctrlr_is_feature_supported(&ctrlr
, SPDK_NVME_INTEL_FEAT_MAX_LBA
);
1303 CU_ASSERT(res
== false);
1305 ctrlr
.cdata
.vid
= SPDK_PCI_VID_INTEL
;
1306 nvme_ctrlr_set_supported_features(&ctrlr
);
1307 res
= spdk_nvme_ctrlr_is_feature_supported(&ctrlr
, SPDK_NVME_FEAT_ARBITRATION
);
1308 CU_ASSERT(res
== true);
1309 res
= spdk_nvme_ctrlr_is_feature_supported(&ctrlr
, SPDK_NVME_INTEL_FEAT_MAX_LBA
);
1310 CU_ASSERT(res
== true);
1313 #if 0 /* TODO: move to PCIe-specific unit test */
1315 test_nvme_ctrlr_alloc_cmb(void)
1319 struct spdk_nvme_ctrlr ctrlr
= {};
1321 ctrlr
.cmb_size
= 0x1000000;
1322 ctrlr
.cmb_current_offset
= 0x100;
1323 rc
= nvme_ctrlr_alloc_cmb(&ctrlr
, 0x200, 0x1000, &offset
);
1325 CU_ASSERT(offset
== 0x1000);
1326 CU_ASSERT(ctrlr
.cmb_current_offset
== 0x1200);
1328 rc
= nvme_ctrlr_alloc_cmb(&ctrlr
, 0x800, 0x1000, &offset
);
1330 CU_ASSERT(offset
== 0x2000);
1331 CU_ASSERT(ctrlr
.cmb_current_offset
== 0x2800);
1333 rc
= nvme_ctrlr_alloc_cmb(&ctrlr
, 0x800000, 0x100000, &offset
);
1335 CU_ASSERT(offset
== 0x100000);
1336 CU_ASSERT(ctrlr
.cmb_current_offset
== 0x900000);
1338 rc
= nvme_ctrlr_alloc_cmb(&ctrlr
, 0x8000000, 0x1000, &offset
);
1339 CU_ASSERT(rc
== -1);
1343 int main(int argc
, char **argv
)
1345 CU_pSuite suite
= NULL
;
1346 unsigned int num_failures
;
1348 if (CU_initialize_registry() != CUE_SUCCESS
) {
1349 return CU_get_error();
1352 suite
= CU_add_suite("nvme_ctrlr", NULL
, NULL
);
1353 if (suite
== NULL
) {
1354 CU_cleanup_registry();
1355 return CU_get_error();
1359 CU_add_test(suite
, "test nvme_ctrlr init CC.EN = 1 CSTS.RDY = 0",
1360 test_nvme_ctrlr_init_en_1_rdy_0
) == NULL
1361 || CU_add_test(suite
, "test nvme_ctrlr init CC.EN = 1 CSTS.RDY = 1",
1362 test_nvme_ctrlr_init_en_1_rdy_1
) == NULL
1363 || CU_add_test(suite
, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 0",
1364 test_nvme_ctrlr_init_en_0_rdy_0
) == NULL
1365 || CU_add_test(suite
, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 1",
1366 test_nvme_ctrlr_init_en_0_rdy_1
) == NULL
1367 || CU_add_test(suite
, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 0 AMS = RR",
1368 test_nvme_ctrlr_init_en_0_rdy_0_ams_rr
) == NULL
1369 || CU_add_test(suite
, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 0 AMS = WRR",
1370 test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr
) == NULL
1371 || CU_add_test(suite
, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 0 AMS = VS",
1372 test_nvme_ctrlr_init_en_0_rdy_0_ams_vs
) == NULL
1373 || CU_add_test(suite
, "alloc_io_qpair_rr 1", test_alloc_io_qpair_rr_1
) == NULL
1374 || CU_add_test(suite
, "alloc_io_qpair_wrr 1", test_alloc_io_qpair_wrr_1
) == NULL
1375 || CU_add_test(suite
, "alloc_io_qpair_wrr 2", test_alloc_io_qpair_wrr_2
) == NULL
1376 || CU_add_test(suite
, "test nvme_ctrlr function nvme_ctrlr_fail", test_nvme_ctrlr_fail
) == NULL
1377 || CU_add_test(suite
, "test nvme ctrlr function nvme_ctrlr_construct_intel_support_log_page_list",
1378 test_nvme_ctrlr_construct_intel_support_log_page_list
) == NULL
1379 || CU_add_test(suite
, "test nvme ctrlr function nvme_ctrlr_set_supported_features",
1380 test_nvme_ctrlr_set_supported_features
) == NULL
1381 #if 0 /* TODO: move to PCIe-specific unit test */
1382 || CU_add_test(suite
, "test nvme ctrlr function nvme_ctrlr_alloc_cmb",
1383 test_nvme_ctrlr_alloc_cmb
) == NULL
1386 CU_cleanup_registry();
1387 return CU_get_error();
1390 CU_basic_set_mode(CU_BRM_VERBOSE
);
1391 CU_basic_run_tests();
1392 num_failures
= CU_get_number_of_failures();
1393 CU_cleanup_registry();
1394 return num_failures
;