]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/test/lib/nvme/unit/nvme_ctrlr_c/nvme_ctrlr_ut.c
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / spdk / test / lib / nvme / unit / nvme_ctrlr_c / nvme_ctrlr_ut.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright (c) Intel Corporation.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include "spdk_cunit.h"
35
36 #include "spdk_internal/log.h"
37
38 #include <stdbool.h>
39
40 #include "lib/test_env.c"
41
42 struct spdk_trace_flag SPDK_TRACE_NVME = {
43 .name = "nvme",
44 .enabled = false,
45 };
46
47 #include "nvme/nvme_ctrlr.c"
48
49 struct nvme_driver _g_nvme_driver = {
50 .lock = PTHREAD_MUTEX_INITIALIZER,
51 };
52
53 uint64_t g_ut_tsc = 0;
54 struct spdk_nvme_registers g_ut_nvme_regs = {};
55
56 __thread int nvme_thread_ioq_index = -1;
57
58 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
59 const struct spdk_nvme_ctrlr_opts *opts,
60 void *devhandle)
61 {
62 return NULL;
63 }
64
65 int
66 nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
67 {
68 return 0;
69 }
70
71 int
72 nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
73 {
74 return 0;
75 }
76
77 int
78 nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
79 {
80 SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
81 *(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
82 return 0;
83 }
84
85 int
86 nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
87 {
88 SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
89 *(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
90 return 0;
91 }
92
93 int
94 nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
95 {
96 SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
97 *value = *(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset);
98 return 0;
99 }
100
101 int
102 nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
103 {
104 SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
105 *value = *(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset);
106 return 0;
107 }
108
109 uint32_t
110 nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
111 {
112 return UINT32_MAX;
113 }
114
115 uint32_t
116 nvme_transport_ctrlr_get_max_io_queue_size(struct spdk_nvme_ctrlr *ctrlr)
117 {
118 return SPDK_NVME_IO_QUEUE_MAX_ENTRIES;
119 }
120
121 struct spdk_nvme_qpair *
122 nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
123 enum spdk_nvme_qprio qprio)
124 {
125 struct spdk_nvme_qpair *qpair;
126
127 qpair = calloc(1, sizeof(*qpair));
128 SPDK_CU_ASSERT_FATAL(qpair != NULL);
129
130 qpair->ctrlr = ctrlr;
131 qpair->id = qid;
132 qpair->qprio = qprio;
133
134 return qpair;
135 }
136
137 int
138 nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
139 {
140 free(qpair);
141 return 0;
142 }
143
144 int
145 nvme_transport_ctrlr_reinit_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
146 {
147 return 0;
148 }
149
150 int
151 nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
152 {
153 return 0;
154 }
155
156 int nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
157 struct spdk_nvme_ctrlr *ctrlr,
158 enum spdk_nvme_qprio qprio,
159 uint32_t num_requests)
160 {
161 qpair->id = id;
162 qpair->qprio = qprio;
163 qpair->ctrlr = ctrlr;
164
165 return 0;
166 }
167
168 static void
169 fake_cpl_success(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
170 {
171 struct spdk_nvme_cpl cpl = {};
172
173 cpl.status.sc = SPDK_NVME_SC_SUCCESS;
174 cb_fn(cb_arg, &cpl);
175 }
176
177 int
178 spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
179 uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size,
180 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
181 {
182 CU_ASSERT_FATAL(0);
183 return -1;
184 }
185
186 int
187 spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
188 uint32_t cdw11, void *payload, uint32_t payload_size,
189 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
190 {
191 CU_ASSERT_FATAL(0);
192 return -1;
193 }
194
195 int
196 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
197 uint32_t nsid, void *payload, uint32_t payload_size,
198 uint64_t offset, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
199 {
200 fake_cpl_success(cb_fn, cb_arg);
201 return 0;
202 }
203
204 int
205 nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
206 {
207 CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST);
208
209 /*
210 * Free the request here so it does not leak.
211 * For the purposes of this unit test, we don't need to bother emulating request submission.
212 */
213 free(req);
214
215 return 0;
216 }
217
218 int32_t
219 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
220 {
221 return 0;
222 }
223
224 void
225 nvme_qpair_disable(struct spdk_nvme_qpair *qpair)
226 {
227 }
228
229 void
230 nvme_qpair_enable(struct spdk_nvme_qpair *qpair)
231 {
232 }
233
234 void
235 nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
236 {
237 struct nvme_completion_poll_status *status = arg;
238
239 status->cpl = *cpl;
240 status->done = true;
241 }
242
243 int
244 nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
245 union spdk_nvme_critical_warning_state state, spdk_nvme_cmd_cb cb_fn,
246 void *cb_arg)
247 {
248 fake_cpl_success(cb_fn, cb_arg);
249 return 0;
250 }
251
252 int
253 nvme_ctrlr_cmd_identify_controller(struct spdk_nvme_ctrlr *ctrlr, void *payload,
254 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
255 {
256 fake_cpl_success(cb_fn, cb_arg);
257 return 0;
258 }
259
260 int
261 nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
262 uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
263 {
264 fake_cpl_success(cb_fn, cb_arg);
265 return 0;
266 }
267
268 int
269 nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
270 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
271 {
272 return 0;
273 }
274
275 int
276 nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
277 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
278 {
279 return 0;
280 }
281
282 int
283 nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
284 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
285 {
286 return 0;
287 }
288
289 int
290 nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
291 void *cb_arg)
292 {
293 return 0;
294 }
295
296 int
297 nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_format *format,
298 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
299 {
300 return 0;
301 }
302
303 int
304 nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_fw_commit *fw_commit,
305 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
306 {
307 return 0;
308 }
309
310 int
311 nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
312 uint32_t size, uint32_t offset, void *payload,
313 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
314 {
315 return 0;
316 }
317
318 void
319 nvme_ns_destruct(struct spdk_nvme_ns *ns)
320 {
321 }
322
323 int
324 nvme_ns_construct(struct spdk_nvme_ns *ns, uint16_t id,
325 struct spdk_nvme_ctrlr *ctrlr)
326 {
327 return 0;
328 }
329
330 struct nvme_request *
331 nvme_allocate_request(struct spdk_nvme_qpair *qpair,
332 const struct nvme_payload *payload, uint32_t payload_size,
333 spdk_nvme_cmd_cb cb_fn,
334 void *cb_arg)
335 {
336 struct nvme_request *req = NULL;
337 req = calloc(1, sizeof(*req));
338
339 if (req != NULL) {
340 memset(req, 0, offsetof(struct nvme_request, children));
341
342 req->payload = *payload;
343 req->payload_size = payload_size;
344
345 req->cb_fn = cb_fn;
346 req->cb_arg = cb_arg;
347 req->qpair = qpair;
348 req->pid = getpid();
349 }
350
351 return req;
352 }
353
354 struct nvme_request *
355 nvme_allocate_request_contig(struct spdk_nvme_qpair *qpair, void *buffer, uint32_t payload_size,
356 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
357 {
358 struct nvme_payload payload;
359
360 payload.type = NVME_PAYLOAD_TYPE_CONTIG;
361 payload.u.contig = buffer;
362
363 return nvme_allocate_request(qpair, &payload, payload_size, cb_fn, cb_arg);
364 }
365
366 struct nvme_request *
367 nvme_allocate_request_null(struct spdk_nvme_qpair *qpair, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
368 {
369 return nvme_allocate_request_contig(qpair, NULL, 0, cb_fn, cb_arg);
370 }
371
372 void
373 nvme_free_request(struct nvme_request *req)
374 {
375 free(req);
376 }
377
378 static void
379 test_nvme_ctrlr_init_en_1_rdy_0(void)
380 {
381 struct spdk_nvme_ctrlr ctrlr = {};
382
383 memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
384
385 /*
386 * Initial state: CC.EN = 1, CSTS.RDY = 0
387 */
388 g_ut_nvme_regs.cc.bits.en = 1;
389 g_ut_nvme_regs.csts.bits.rdy = 0;
390
391 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
392 ctrlr.cdata.nn = 1;
393 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
394 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
395 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1);
396
397 /*
398 * Transition to CSTS.RDY = 1.
399 * init() should set CC.EN = 0.
400 */
401 g_ut_nvme_regs.csts.bits.rdy = 1;
402 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
403 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
404 CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
405
406 /*
407 * Transition to CSTS.RDY = 0.
408 */
409 g_ut_nvme_regs.csts.bits.rdy = 0;
410 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
411 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
412
413 /*
414 * Transition to CC.EN = 1
415 */
416 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
417 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
418 CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
419
420 /*
421 * Transition to CSTS.RDY = 1.
422 */
423 g_ut_nvme_regs.csts.bits.rdy = 1;
424 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
425 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_READY);
426
427 g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
428 nvme_ctrlr_destruct(&ctrlr);
429 }
430
431 static void
432 test_nvme_ctrlr_init_en_1_rdy_1(void)
433 {
434 struct spdk_nvme_ctrlr ctrlr = {};
435
436 memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
437
438 /*
439 * Initial state: CC.EN = 1, CSTS.RDY = 1
440 * init() should set CC.EN = 0.
441 */
442 g_ut_nvme_regs.cc.bits.en = 1;
443 g_ut_nvme_regs.csts.bits.rdy = 1;
444
445 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
446 ctrlr.cdata.nn = 1;
447 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
448 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
449 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
450 CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
451
452 /*
453 * Transition to CSTS.RDY = 0.
454 */
455 g_ut_nvme_regs.csts.bits.rdy = 0;
456 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
457 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
458
459 /*
460 * Transition to CC.EN = 1
461 */
462 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
463 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
464 CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
465
466 /*
467 * Transition to CSTS.RDY = 1.
468 */
469 g_ut_nvme_regs.csts.bits.rdy = 1;
470 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
471 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_READY);
472
473 g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
474 nvme_ctrlr_destruct(&ctrlr);
475 }
476
477 static void
478 test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void)
479 {
480 struct spdk_nvme_ctrlr ctrlr = {};
481
482 memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
483
484 /*
485 * Initial state: CC.EN = 0, CSTS.RDY = 0
486 * init() should set CC.EN = 1.
487 */
488 g_ut_nvme_regs.cc.bits.en = 0;
489 g_ut_nvme_regs.csts.bits.rdy = 0;
490
491 /*
492 * Default round robin enabled
493 */
494 g_ut_nvme_regs.cap.bits.ams = 0x0;
495 ctrlr.cap = g_ut_nvme_regs.cap;
496
497 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
498 ctrlr.cdata.nn = 1;
499 /*
500 * Case 1: default round robin arbitration mechanism selected
501 */
502 ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
503
504 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
505 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
506 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
507 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
508 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
509 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
510 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
511 CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
512 CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
513 CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
514
515 /*
516 * Complete and destroy the controller
517 */
518 g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
519 nvme_ctrlr_destruct(&ctrlr);
520
521 /*
522 * Reset to initial state
523 */
524 g_ut_nvme_regs.cc.bits.en = 0;
525 g_ut_nvme_regs.csts.bits.rdy = 0;
526
527 /*
528 * Case 2: weighted round robin arbitration mechanism selected
529 */
530 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
531 ctrlr.cdata.nn = 1;
532 ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
533
534 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
535 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
536 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
537 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
538 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
539 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
540 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
541 CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
542
543 /*
544 * Complete and destroy the controller
545 */
546 g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
547 nvme_ctrlr_destruct(&ctrlr);
548
549 /*
550 * Reset to initial state
551 */
552 g_ut_nvme_regs.cc.bits.en = 0;
553 g_ut_nvme_regs.csts.bits.rdy = 0;
554
555 /*
556 * Case 3: vendor specific arbitration mechanism selected
557 */
558 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
559 ctrlr.cdata.nn = 1;
560 ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
561
562 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
563 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
564 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
565 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
566 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
567 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
568 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
569 CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
570
571 /*
572 * Complete and destroy the controller
573 */
574 g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
575 nvme_ctrlr_destruct(&ctrlr);
576
577 /*
578 * Reset to initial state
579 */
580 g_ut_nvme_regs.cc.bits.en = 0;
581 g_ut_nvme_regs.csts.bits.rdy = 0;
582
583 /*
584 * Case 4: invalid arbitration mechanism selected
585 */
586 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
587 ctrlr.cdata.nn = 1;
588 ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
589
590 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
591 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
592 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
593 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
594 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
595 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
596 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
597 CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
598
599 /*
600 * Complete and destroy the controller
601 */
602 g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
603 nvme_ctrlr_destruct(&ctrlr);
604
605 /*
606 * Reset to initial state
607 */
608 g_ut_nvme_regs.cc.bits.en = 0;
609 g_ut_nvme_regs.csts.bits.rdy = 0;
610
611 /*
612 * Case 5: reset to default round robin arbitration mechanism
613 */
614 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
615 ctrlr.cdata.nn = 1;
616 ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
617
618 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
619 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
620 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
621 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
622 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
623 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
624 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
625 CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
626 CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
627 CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
628
629 /*
630 * Transition to CSTS.RDY = 1.
631 */
632 g_ut_nvme_regs.csts.bits.rdy = 1;
633 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
634 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_READY);
635
636 g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
637 nvme_ctrlr_destruct(&ctrlr);
638 }
639
640 static void
641 test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void)
642 {
643 struct spdk_nvme_ctrlr ctrlr = {};
644
645 memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
646
647 /*
648 * Initial state: CC.EN = 0, CSTS.RDY = 0
649 * init() should set CC.EN = 1.
650 */
651 g_ut_nvme_regs.cc.bits.en = 0;
652 g_ut_nvme_regs.csts.bits.rdy = 0;
653
654 /*
655 * Weighted round robin enabled
656 */
657 g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_WRR;
658 ctrlr.cap = g_ut_nvme_regs.cap;
659
660 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
661 ctrlr.cdata.nn = 1;
662 /*
663 * Case 1: default round robin arbitration mechanism selected
664 */
665 ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
666
667 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
668 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
669 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
670 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
671 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
672 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
673 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
674 CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
675 CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
676 CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
677
678 /*
679 * Complete and destroy the controller
680 */
681 g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
682 nvme_ctrlr_destruct(&ctrlr);
683
684 /*
685 * Reset to initial state
686 */
687 g_ut_nvme_regs.cc.bits.en = 0;
688 g_ut_nvme_regs.csts.bits.rdy = 0;
689
690 /*
691 * Case 2: weighted round robin arbitration mechanism selected
692 */
693 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
694 ctrlr.cdata.nn = 1;
695 ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
696
697 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
698 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
699 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
700 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
701 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
702 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
703 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
704 CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
705 CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
706 CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
707
708 /*
709 * Complete and destroy the controller
710 */
711 g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
712 nvme_ctrlr_destruct(&ctrlr);
713
714 /*
715 * Reset to initial state
716 */
717 g_ut_nvme_regs.cc.bits.en = 0;
718 g_ut_nvme_regs.csts.bits.rdy = 0;
719
720 /*
721 * Case 3: vendor specific arbitration mechanism selected
722 */
723 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
724 ctrlr.cdata.nn = 1;
725 ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
726
727 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
728 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
729 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
730 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
731 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
732 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
733 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
734 CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
735
736 /*
737 * Complete and destroy the controller
738 */
739 g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
740 nvme_ctrlr_destruct(&ctrlr);
741
742 /*
743 * Reset to initial state
744 */
745 g_ut_nvme_regs.cc.bits.en = 0;
746 g_ut_nvme_regs.csts.bits.rdy = 0;
747
748 /*
749 * Case 4: invalid arbitration mechanism selected
750 */
751 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
752 ctrlr.cdata.nn = 1;
753 ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
754
755 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
756 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
757 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
758 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
759 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
760 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
761 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
762 CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
763
764 /*
765 * Complete and destroy the controller
766 */
767 g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
768 nvme_ctrlr_destruct(&ctrlr);
769
770 /*
771 * Reset to initial state
772 */
773 g_ut_nvme_regs.cc.bits.en = 0;
774 g_ut_nvme_regs.csts.bits.rdy = 0;
775
776 /*
777 * Case 5: reset to weighted round robin arbitration mechanism
778 */
779 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
780 ctrlr.cdata.nn = 1;
781 ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
782
783 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
784 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
785 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
786 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
787 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
788 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
789 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
790 CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
791 CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
792 CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
793
794 /*
795 * Transition to CSTS.RDY = 1.
796 */
797 g_ut_nvme_regs.csts.bits.rdy = 1;
798 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
799 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_READY);
800
801 g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
802 nvme_ctrlr_destruct(&ctrlr);
803 }
804 static void
805 test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void)
806 {
807 struct spdk_nvme_ctrlr ctrlr = {};
808
809 memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
810
811 /*
812 * Initial state: CC.EN = 0, CSTS.RDY = 0
813 * init() should set CC.EN = 1.
814 */
815 g_ut_nvme_regs.cc.bits.en = 0;
816 g_ut_nvme_regs.csts.bits.rdy = 0;
817
818 /*
819 * Default round robin enabled
820 */
821 g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_VS;
822 ctrlr.cap = g_ut_nvme_regs.cap;
823
824 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
825 ctrlr.cdata.nn = 1;
826 /*
827 * Case 1: default round robin arbitration mechanism selected
828 */
829 ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
830
831 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
832 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
833 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
834 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
835 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
836 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
837 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
838 CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
839 CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
840 CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
841
842 /*
843 * Complete and destroy the controller
844 */
845 g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
846 nvme_ctrlr_destruct(&ctrlr);
847
848 /*
849 * Reset to initial state
850 */
851 g_ut_nvme_regs.cc.bits.en = 0;
852 g_ut_nvme_regs.csts.bits.rdy = 0;
853
854 /*
855 * Case 2: weighted round robin arbitration mechanism selected
856 */
857 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
858 ctrlr.cdata.nn = 1;
859 ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
860
861 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
862 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
863 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
864 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
865 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
866 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
867 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
868 CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
869
870 /*
871 * Complete and destroy the controller
872 */
873 g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
874 nvme_ctrlr_destruct(&ctrlr);
875
876 /*
877 * Reset to initial state
878 */
879 g_ut_nvme_regs.cc.bits.en = 0;
880 g_ut_nvme_regs.csts.bits.rdy = 0;
881
882 /*
883 * Case 3: vendor specific arbitration mechanism selected
884 */
885 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
886 ctrlr.cdata.nn = 1;
887 ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
888
889 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
890 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
891 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
892 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
893 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
894 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
895 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
896 CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
897 CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
898 CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
899
900 /*
901 * Complete and destroy the controller
902 */
903 g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
904 nvme_ctrlr_destruct(&ctrlr);
905
906 /*
907 * Reset to initial state
908 */
909 g_ut_nvme_regs.cc.bits.en = 0;
910 g_ut_nvme_regs.csts.bits.rdy = 0;
911
912 /*
913 * Case 4: invalid arbitration mechanism selected
914 */
915 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
916 ctrlr.cdata.nn = 1;
917 ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
918
919 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
920 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
921 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
922 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
923 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
924 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
925 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
926 CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
927
928 /*
929 * Complete and destroy the controller
930 */
931 g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
932 nvme_ctrlr_destruct(&ctrlr);
933
934 /*
935 * Reset to initial state
936 */
937 g_ut_nvme_regs.cc.bits.en = 0;
938 g_ut_nvme_regs.csts.bits.rdy = 0;
939
940 /*
941 * Case 5: reset to vendor specific arbitration mechanism
942 */
943 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
944 ctrlr.cdata.nn = 1;
945 ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
946
947 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
948 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
949 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
950 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
951 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
952 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
953 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
954 CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
955 CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
956 CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
957
958 /*
959 * Transition to CSTS.RDY = 1.
960 */
961 g_ut_nvme_regs.csts.bits.rdy = 1;
962 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
963 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_READY);
964
965 g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
966 nvme_ctrlr_destruct(&ctrlr);
967 }
968
969 static void
970 test_nvme_ctrlr_init_en_0_rdy_0(void)
971 {
972 struct spdk_nvme_ctrlr ctrlr = {};
973
974 memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
975
976 /*
977 * Initial state: CC.EN = 0, CSTS.RDY = 0
978 * init() should set CC.EN = 1.
979 */
980 g_ut_nvme_regs.cc.bits.en = 0;
981 g_ut_nvme_regs.csts.bits.rdy = 0;
982
983 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
984 ctrlr.cdata.nn = 1;
985 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
986 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
987 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
988
989 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
990 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
991
992 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
993 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
994 CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
995
996 /*
997 * Transition to CSTS.RDY = 1.
998 */
999 g_ut_nvme_regs.csts.bits.rdy = 1;
1000 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1001 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_READY);
1002
1003 g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1004 nvme_ctrlr_destruct(&ctrlr);
1005 }
1006
1007 static void
1008 test_nvme_ctrlr_init_en_0_rdy_1(void)
1009 {
1010 struct spdk_nvme_ctrlr ctrlr = {};
1011
1012 memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
1013
1014 /*
1015 * Initial state: CC.EN = 0, CSTS.RDY = 1
1016 */
1017 g_ut_nvme_regs.cc.bits.en = 0;
1018 g_ut_nvme_regs.csts.bits.rdy = 1;
1019
1020 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
1021 ctrlr.cdata.nn = 1;
1022 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
1023 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1024 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
1025
1026 /*
1027 * Transition to CSTS.RDY = 0.
1028 */
1029 g_ut_nvme_regs.csts.bits.rdy = 0;
1030 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1031 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
1032
1033 /*
1034 * Transition to CC.EN = 1
1035 */
1036 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1037 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
1038 CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
1039
1040 /*
1041 * Transition to CSTS.RDY = 1.
1042 */
1043 g_ut_nvme_regs.csts.bits.rdy = 1;
1044 CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
1045 CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_READY);
1046
1047 g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1048 nvme_ctrlr_destruct(&ctrlr);
1049 }
1050
1051 static void
1052 setup_qpairs(struct spdk_nvme_ctrlr *ctrlr, uint32_t num_io_queues)
1053 {
1054 uint32_t i;
1055
1056 CU_ASSERT_FATAL(pthread_mutex_init(&ctrlr->ctrlr_lock, NULL) == 0);
1057
1058 SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(ctrlr) == 0);
1059
1060 ctrlr->opts.num_io_queues = num_io_queues;
1061 ctrlr->free_io_qids = spdk_bit_array_create(num_io_queues + 1);
1062 SPDK_CU_ASSERT_FATAL(ctrlr->free_io_qids != NULL);
1063
1064 spdk_bit_array_clear(ctrlr->free_io_qids, 0);
1065 for (i = 1; i <= num_io_queues; i++) {
1066 spdk_bit_array_set(ctrlr->free_io_qids, i);
1067 }
1068 }
1069
1070 static void
1071 cleanup_qpairs(struct spdk_nvme_ctrlr *ctrlr)
1072 {
1073 nvme_ctrlr_destruct(ctrlr);
1074 }
1075
1076 static void
1077 test_alloc_io_qpair_rr_1(void)
1078 {
1079 struct spdk_nvme_ctrlr ctrlr = {};
1080 struct spdk_nvme_qpair *q0;
1081
1082 setup_qpairs(&ctrlr, 1);
1083
1084 /*
1085 * Fake to simulate the controller with default round robin
1086 * arbitration mechanism.
1087 */
1088 g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_RR;
1089
1090 q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 0);
1091 SPDK_CU_ASSERT_FATAL(q0 != NULL);
1092 SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1093 /* Only 1 I/O qpair was allocated, so this should fail */
1094 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 0) == NULL);
1095 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1096
1097 /*
1098 * Now that the qpair has been returned to the free list,
1099 * we should be able to allocate it again.
1100 */
1101 q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 0);
1102 SPDK_CU_ASSERT_FATAL(q0 != NULL);
1103 SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1104 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1105
1106 /* Only 0 qprio is acceptable for default round robin arbitration mechanism */
1107 q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 1);
1108 SPDK_CU_ASSERT_FATAL(q0 == NULL);
1109 q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 2);
1110 SPDK_CU_ASSERT_FATAL(q0 == NULL);
1111 q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 3);
1112 SPDK_CU_ASSERT_FATAL(q0 == NULL);
1113
1114 /* Only 0 ~ 3 qprio is acceptable */
1115 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 4) == NULL);
1116
1117 cleanup_qpairs(&ctrlr);
1118 }
1119
1120 static void
1121 test_alloc_io_qpair_wrr_1(void)
1122 {
1123 struct spdk_nvme_ctrlr ctrlr = {};
1124 struct spdk_nvme_qpair *q0, *q1;
1125
1126 setup_qpairs(&ctrlr, 2);
1127
1128 /*
1129 * Fake to simulate the controller with weighted round robin
1130 * arbitration mechanism.
1131 */
1132 g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
1133
1134 /*
1135 * Allocate 2 qpairs and free them
1136 */
1137 q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 0);
1138 SPDK_CU_ASSERT_FATAL(q0 != NULL);
1139 SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1140 q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 1);
1141 SPDK_CU_ASSERT_FATAL(q1 != NULL);
1142 SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
1143 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1144 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1145
1146 /*
1147 * Allocate 2 qpairs and free them in the reverse order
1148 */
1149 q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 2);
1150 SPDK_CU_ASSERT_FATAL(q0 != NULL);
1151 SPDK_CU_ASSERT_FATAL(q0->qprio == 2);
1152 q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 3);
1153 SPDK_CU_ASSERT_FATAL(q1 != NULL);
1154 SPDK_CU_ASSERT_FATAL(q1->qprio == 3);
1155 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1156 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1157
1158 /* Only 0 ~ 3 qprio is acceptable */
1159 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 4) == NULL);
1160
1161 cleanup_qpairs(&ctrlr);
1162 }
1163
1164 static void
1165 test_alloc_io_qpair_wrr_2(void)
1166 {
1167 struct spdk_nvme_ctrlr ctrlr = {};
1168 struct spdk_nvme_qpair *q0, *q1, *q2, *q3;
1169
1170 setup_qpairs(&ctrlr, 4);
1171
1172 /*
1173 * Fake to simulate the controller with weighted round robin
1174 * arbitration mechanism.
1175 */
1176 g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
1177
1178 q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 0);
1179 SPDK_CU_ASSERT_FATAL(q0 != NULL);
1180 SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
1181 q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 1);
1182 SPDK_CU_ASSERT_FATAL(q1 != NULL);
1183 SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
1184 q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 2);
1185 SPDK_CU_ASSERT_FATAL(q2 != NULL);
1186 SPDK_CU_ASSERT_FATAL(q2->qprio == 2);
1187 q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 3);
1188 SPDK_CU_ASSERT_FATAL(q3 != NULL);
1189 SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
1190 /* Only 4 I/O qpairs was allocated, so this should fail */
1191 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 0) == NULL);
1192 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
1193 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
1194 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1195 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1196
1197 /*
1198 * Now that the qpair has been returned to the free list,
1199 * we should be able to allocate it again.
1200 *
1201 * Allocate 4 I/O qpairs and half of them with same qprio.
1202 */
1203 q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 1);
1204 SPDK_CU_ASSERT_FATAL(q0 != NULL);
1205 SPDK_CU_ASSERT_FATAL(q0->qprio == 1);
1206 q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 1);
1207 SPDK_CU_ASSERT_FATAL(q1 != NULL);
1208 SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
1209 q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 3);
1210 SPDK_CU_ASSERT_FATAL(q2 != NULL);
1211 SPDK_CU_ASSERT_FATAL(q2->qprio == 3);
1212 q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 3);
1213 SPDK_CU_ASSERT_FATAL(q3 != NULL);
1214 SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
1215
1216 /*
1217 * Free all I/O qpairs in reverse order
1218 */
1219 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
1220 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
1221 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
1222 SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
1223
1224 cleanup_qpairs(&ctrlr);
1225 }
1226
1227 static void
1228 test_nvme_ctrlr_fail(void)
1229 {
1230 struct spdk_nvme_ctrlr ctrlr = {};
1231
1232 ctrlr.opts.num_io_queues = 0;
1233 nvme_ctrlr_fail(&ctrlr, false);
1234
1235 CU_ASSERT(ctrlr.is_failed == true);
1236 }
1237
1238 static void
1239 test_nvme_ctrlr_construct_intel_support_log_page_list(void)
1240 {
1241 bool res;
1242 struct spdk_nvme_ctrlr ctrlr = {};
1243 struct spdk_nvme_intel_log_page_directory payload = {};
1244 struct spdk_pci_id pci_id = {};
1245
1246 /* Get quirks for a device with all 0 vendor/device id */
1247 ctrlr.quirks = nvme_get_quirks(&pci_id);
1248 CU_ASSERT(ctrlr.quirks == 0);
1249
1250 nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
1251 res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
1252 CU_ASSERT(res == false);
1253
1254 /* Set the vendor to Intel, but provide no device id */
1255 ctrlr.cdata.vid = pci_id.vendor_id = SPDK_PCI_VID_INTEL;
1256 payload.temperature_statistics_log_len = 1;
1257 ctrlr.quirks = nvme_get_quirks(&pci_id);
1258 memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
1259
1260 nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
1261 res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
1262 CU_ASSERT(res == true);
1263 res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
1264 CU_ASSERT(res == true);
1265 res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
1266 CU_ASSERT(res == false);
1267 res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
1268 CU_ASSERT(res == false);
1269
1270 /* set valid vendor id, device id and sub device id*/
1271 ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
1272 payload.temperature_statistics_log_len = 0;
1273 pci_id.vendor_id = SPDK_PCI_VID_INTEL;
1274 pci_id.device_id = 0x0953;
1275 pci_id.subvendor_id = SPDK_PCI_VID_INTEL;
1276 pci_id.subdevice_id = 0x3702;
1277 ctrlr.quirks = nvme_get_quirks(&pci_id);
1278 memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
1279
1280 nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
1281 res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
1282 CU_ASSERT(res == true);
1283 res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
1284 CU_ASSERT(res == false);
1285 res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
1286 CU_ASSERT(res == true);
1287 res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
1288 CU_ASSERT(res == false);
1289 }
1290
1291 static void
1292 test_nvme_ctrlr_set_supported_features(void)
1293 {
1294 bool res;
1295 struct spdk_nvme_ctrlr ctrlr = {};
1296
1297 /* set a invalid vendor id */
1298 ctrlr.cdata.vid = 0xFFFF;
1299 nvme_ctrlr_set_supported_features(&ctrlr);
1300 res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
1301 CU_ASSERT(res == true);
1302 res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
1303 CU_ASSERT(res == false);
1304
1305 ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
1306 nvme_ctrlr_set_supported_features(&ctrlr);
1307 res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
1308 CU_ASSERT(res == true);
1309 res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
1310 CU_ASSERT(res == true);
1311 }
1312
1313 #if 0 /* TODO: move to PCIe-specific unit test */
1314 static void
1315 test_nvme_ctrlr_alloc_cmb(void)
1316 {
1317 int rc;
1318 uint64_t offset;
1319 struct spdk_nvme_ctrlr ctrlr = {};
1320
1321 ctrlr.cmb_size = 0x1000000;
1322 ctrlr.cmb_current_offset = 0x100;
1323 rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x200, 0x1000, &offset);
1324 CU_ASSERT(rc == 0);
1325 CU_ASSERT(offset == 0x1000);
1326 CU_ASSERT(ctrlr.cmb_current_offset == 0x1200);
1327
1328 rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800, 0x1000, &offset);
1329 CU_ASSERT(rc == 0);
1330 CU_ASSERT(offset == 0x2000);
1331 CU_ASSERT(ctrlr.cmb_current_offset == 0x2800);
1332
1333 rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800000, 0x100000, &offset);
1334 CU_ASSERT(rc == 0);
1335 CU_ASSERT(offset == 0x100000);
1336 CU_ASSERT(ctrlr.cmb_current_offset == 0x900000);
1337
1338 rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x8000000, 0x1000, &offset);
1339 CU_ASSERT(rc == -1);
1340 }
1341 #endif
1342
1343 int main(int argc, char **argv)
1344 {
1345 CU_pSuite suite = NULL;
1346 unsigned int num_failures;
1347
1348 if (CU_initialize_registry() != CUE_SUCCESS) {
1349 return CU_get_error();
1350 }
1351
1352 suite = CU_add_suite("nvme_ctrlr", NULL, NULL);
1353 if (suite == NULL) {
1354 CU_cleanup_registry();
1355 return CU_get_error();
1356 }
1357
1358 if (
1359 CU_add_test(suite, "test nvme_ctrlr init CC.EN = 1 CSTS.RDY = 0",
1360 test_nvme_ctrlr_init_en_1_rdy_0) == NULL
1361 || CU_add_test(suite, "test nvme_ctrlr init CC.EN = 1 CSTS.RDY = 1",
1362 test_nvme_ctrlr_init_en_1_rdy_1) == NULL
1363 || CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 0",
1364 test_nvme_ctrlr_init_en_0_rdy_0) == NULL
1365 || CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 1",
1366 test_nvme_ctrlr_init_en_0_rdy_1) == NULL
1367 || CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 0 AMS = RR",
1368 test_nvme_ctrlr_init_en_0_rdy_0_ams_rr) == NULL
1369 || CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 0 AMS = WRR",
1370 test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr) == NULL
1371 || CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 0 AMS = VS",
1372 test_nvme_ctrlr_init_en_0_rdy_0_ams_vs) == NULL
1373 || CU_add_test(suite, "alloc_io_qpair_rr 1", test_alloc_io_qpair_rr_1) == NULL
1374 || CU_add_test(suite, "alloc_io_qpair_wrr 1", test_alloc_io_qpair_wrr_1) == NULL
1375 || CU_add_test(suite, "alloc_io_qpair_wrr 2", test_alloc_io_qpair_wrr_2) == NULL
1376 || CU_add_test(suite, "test nvme_ctrlr function nvme_ctrlr_fail", test_nvme_ctrlr_fail) == NULL
1377 || CU_add_test(suite, "test nvme ctrlr function nvme_ctrlr_construct_intel_support_log_page_list",
1378 test_nvme_ctrlr_construct_intel_support_log_page_list) == NULL
1379 || CU_add_test(suite, "test nvme ctrlr function nvme_ctrlr_set_supported_features",
1380 test_nvme_ctrlr_set_supported_features) == NULL
1381 #if 0 /* TODO: move to PCIe-specific unit test */
1382 || CU_add_test(suite, "test nvme ctrlr function nvme_ctrlr_alloc_cmb",
1383 test_nvme_ctrlr_alloc_cmb) == NULL
1384 #endif
1385 ) {
1386 CU_cleanup_registry();
1387 return CU_get_error();
1388 }
1389
1390 CU_basic_set_mode(CU_BRM_VERBOSE);
1391 CU_basic_run_tests();
1392 num_failures = CU_get_number_of_failures();
1393 CU_cleanup_registry();
1394 return num_failures;
1395 }