4 * Copyright (c) Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include "spdk_cunit.h"
38 #include "lib/test_env.c"
40 bool trace_flag
= false;
41 #define SPDK_TRACE_NVME trace_flag
43 #include "nvme/nvme_qpair.c"
45 struct nvme_driver _g_nvme_driver
= {
46 .lock
= PTHREAD_MUTEX_INITIALIZER
,
50 nvme_allocate_request(struct spdk_nvme_qpair
*qpair
,
51 const struct nvme_payload
*payload
, uint32_t payload_size
,
52 spdk_nvme_cmd_cb cb_fn
,
55 struct nvme_request
*req
;
57 req
= STAILQ_FIRST(&qpair
->free_req
);
62 STAILQ_REMOVE_HEAD(&qpair
->free_req
, stailq
);
65 * Only memset up to (but not including) the children
66 * TAILQ_ENTRY. children, and following members, are
67 * only used as part of I/O splitting so we avoid
68 * memsetting them until it is actually needed.
69 * They will be initialized in nvme_request_add_child()
70 * if the request is split.
72 memset(req
, 0, offsetof(struct nvme_request
, children
));
75 req
->payload
= *payload
;
76 req
->payload_size
= payload_size
;
84 nvme_allocate_request_contig(struct spdk_nvme_qpair
*qpair
, void *buffer
, uint32_t payload_size
,
85 spdk_nvme_cmd_cb cb_fn
, void *cb_arg
)
87 struct nvme_payload payload
;
89 payload
.type
= NVME_PAYLOAD_TYPE_CONTIG
;
90 payload
.u
.contig
= buffer
;
92 return nvme_allocate_request(qpair
, &payload
, payload_size
, cb_fn
, cb_arg
);
96 nvme_allocate_request_null(struct spdk_nvme_qpair
*qpair
, spdk_nvme_cmd_cb cb_fn
, void *cb_arg
)
98 return nvme_allocate_request_contig(qpair
, NULL
, 0, cb_fn
, cb_arg
);
102 nvme_free_request(struct nvme_request
*req
)
104 SPDK_CU_ASSERT_FATAL(req
!= NULL
);
105 SPDK_CU_ASSERT_FATAL(req
->qpair
!= NULL
);
106 STAILQ_INSERT_HEAD(&req
->qpair
->free_req
, req
, stailq
);
110 nvme_request_remove_child(struct nvme_request
*parent
,
111 struct nvme_request
*child
)
113 parent
->num_children
--;
114 TAILQ_REMOVE(&parent
->children
, child
, child_tailq
);
118 nvme_transport_qpair_enable(struct spdk_nvme_qpair
*qpair
)
124 nvme_transport_qpair_disable(struct spdk_nvme_qpair
*qpair
)
130 nvme_transport_qpair_fail(struct spdk_nvme_qpair
*qpair
)
136 nvme_transport_qpair_submit_request(struct spdk_nvme_qpair
*qpair
, struct nvme_request
*req
)
143 nvme_transport_qpair_process_completions(struct spdk_nvme_qpair
*qpair
, uint32_t max_completions
)
150 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair
*qpair
)
156 prepare_submit_request_test(struct spdk_nvme_qpair
*qpair
,
157 struct spdk_nvme_ctrlr
*ctrlr
)
159 memset(ctrlr
, 0, sizeof(*ctrlr
));
160 ctrlr
->free_io_qids
= NULL
;
161 TAILQ_INIT(&ctrlr
->active_io_qpairs
);
162 TAILQ_INIT(&ctrlr
->active_procs
);
163 nvme_qpair_init(qpair
, 1, ctrlr
, 0, 32);
167 cleanup_submit_request_test(struct spdk_nvme_qpair
*qpair
)
169 free(qpair
->req_buf
);
173 expected_success_callback(void *arg
, const struct spdk_nvme_cpl
*cpl
)
175 CU_ASSERT(!spdk_nvme_cpl_is_error(cpl
));
179 expected_failure_callback(void *arg
, const struct spdk_nvme_cpl
*cpl
)
181 CU_ASSERT(spdk_nvme_cpl_is_error(cpl
));
187 struct spdk_nvme_qpair qpair
= {};
188 struct nvme_request
*req
;
189 struct spdk_nvme_ctrlr ctrlr
= {};
191 prepare_submit_request_test(&qpair
, &ctrlr
);
193 req
= nvme_allocate_request_null(&qpair
, expected_success_callback
, NULL
);
194 SPDK_CU_ASSERT_FATAL(req
!= NULL
);
196 CU_ASSERT(nvme_qpair_submit_request(&qpair
, req
) == 0);
198 nvme_free_request(req
);
200 cleanup_submit_request_test(&qpair
);
204 test_ctrlr_failed(void)
206 struct spdk_nvme_qpair qpair
= {};
207 struct nvme_request
*req
;
208 struct spdk_nvme_ctrlr ctrlr
= {};
211 prepare_submit_request_test(&qpair
, &ctrlr
);
213 req
= nvme_allocate_request_contig(&qpair
, payload
, sizeof(payload
), expected_failure_callback
,
215 SPDK_CU_ASSERT_FATAL(req
!= NULL
);
217 /* Set the controller to failed.
218 * Set the controller to resetting so that the qpair won't get re-enabled.
220 ctrlr
.is_failed
= true;
221 ctrlr
.is_resetting
= true;
223 CU_ASSERT(nvme_qpair_submit_request(&qpair
, req
) != 0);
225 cleanup_submit_request_test(&qpair
);
228 static void struct_packing(void)
230 /* ctrlr is the first field in nvme_qpair after the fields
231 * that are used in the I/O path. Make sure the I/O path fields
232 * all fit into two cache lines.
234 CU_ASSERT(offsetof(struct spdk_nvme_qpair
, ctrlr
) <= 128);
237 static void test_nvme_qpair_process_completions(void)
239 struct spdk_nvme_qpair qpair
= {};
240 struct spdk_nvme_ctrlr ctrlr
= {};
242 prepare_submit_request_test(&qpair
, &ctrlr
);
243 qpair
.ctrlr
->is_resetting
= true;
245 spdk_nvme_qpair_process_completions(&qpair
, 0);
246 cleanup_submit_request_test(&qpair
);
249 static void test_nvme_completion_is_retry(void)
251 struct spdk_nvme_cpl cpl
= {};
253 cpl
.status
.sct
= SPDK_NVME_SCT_GENERIC
;
254 cpl
.status
.sc
= SPDK_NVME_SC_NAMESPACE_NOT_READY
;
256 CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl
));
258 cpl
.status
.sc
= SPDK_NVME_SC_FORMAT_IN_PROGRESS
;
260 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
262 CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl
));
264 cpl
.status
.sc
= SPDK_NVME_SC_INVALID_OPCODE
;
265 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
267 cpl
.status
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
268 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
270 cpl
.status
.sc
= SPDK_NVME_SC_COMMAND_ID_CONFLICT
;
271 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
273 cpl
.status
.sc
= SPDK_NVME_SC_DATA_TRANSFER_ERROR
;
274 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
276 cpl
.status
.sc
= SPDK_NVME_SC_ABORTED_POWER_LOSS
;
277 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
279 cpl
.status
.sc
= SPDK_NVME_SC_INTERNAL_DEVICE_ERROR
;
280 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
282 cpl
.status
.sc
= SPDK_NVME_SC_ABORTED_BY_REQUEST
;
283 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
285 cpl
.status
.sc
= SPDK_NVME_SC_ABORTED_FAILED_FUSED
;
286 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
288 cpl
.status
.sc
= SPDK_NVME_SC_ABORTED_MISSING_FUSED
;
289 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
291 cpl
.status
.sc
= SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT
;
292 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
294 cpl
.status
.sc
= SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR
;
295 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
297 cpl
.status
.sc
= SPDK_NVME_SC_INVALID_SGL_SEG_DESCRIPTOR
;
298 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
300 cpl
.status
.sc
= SPDK_NVME_SC_INVALID_NUM_SGL_DESCIRPTORS
;
301 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
303 cpl
.status
.sc
= SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID
;
304 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
306 cpl
.status
.sc
= SPDK_NVME_SC_METADATA_SGL_LENGTH_INVALID
;
307 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
309 cpl
.status
.sc
= SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID
;
310 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
312 cpl
.status
.sc
= SPDK_NVME_SC_INVALID_CONTROLLER_MEM_BUF
;
313 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
315 cpl
.status
.sc
= SPDK_NVME_SC_INVALID_PRP_OFFSET
;
316 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
318 cpl
.status
.sc
= SPDK_NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED
;
319 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
321 cpl
.status
.sc
= SPDK_NVME_SC_LBA_OUT_OF_RANGE
;
322 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
324 cpl
.status
.sc
= SPDK_NVME_SC_CAPACITY_EXCEEDED
;
325 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
327 cpl
.status
.sc
= SPDK_NVME_SC_RESERVATION_CONFLICT
;
328 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
330 cpl
.status
.sc
= 0x70;
331 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
333 cpl
.status
.sct
= SPDK_NVME_SCT_COMMAND_SPECIFIC
;
334 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
336 cpl
.status
.sct
= SPDK_NVME_SCT_MEDIA_ERROR
;
337 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
339 cpl
.status
.sct
= SPDK_NVME_SCT_VENDOR_SPECIFIC
;
340 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
342 cpl
.status
.sct
= 0x4;
343 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl
));
348 test_get_status_string(void)
350 const char *status_string
;
352 status_string
= get_status_string(SPDK_NVME_SCT_GENERIC
, SPDK_NVME_SC_SUCCESS
);
353 CU_ASSERT(strcmp(status_string
, "SUCCESS") == 0);
355 status_string
= get_status_string(SPDK_NVME_SCT_COMMAND_SPECIFIC
,
356 SPDK_NVME_SC_COMPLETION_QUEUE_INVALID
);
357 CU_ASSERT(strcmp(status_string
, "INVALID COMPLETION QUEUE") == 0);
359 status_string
= get_status_string(SPDK_NVME_SCT_MEDIA_ERROR
, SPDK_NVME_SC_UNRECOVERED_READ_ERROR
);
360 CU_ASSERT(strcmp(status_string
, "UNRECOVERED READ ERROR") == 0);
362 status_string
= get_status_string(SPDK_NVME_SCT_VENDOR_SPECIFIC
, 0);
363 CU_ASSERT(strcmp(status_string
, "VENDOR SPECIFIC") == 0);
365 status_string
= get_status_string(100, 0);
366 CU_ASSERT(strcmp(status_string
, "RESERVED") == 0);
370 int main(int argc
, char **argv
)
372 CU_pSuite suite
= NULL
;
373 unsigned int num_failures
;
375 if (CU_initialize_registry() != CUE_SUCCESS
) {
376 return CU_get_error();
379 suite
= CU_add_suite("nvme_qpair", NULL
, NULL
);
381 CU_cleanup_registry();
382 return CU_get_error();
385 if (CU_add_test(suite
, "test3", test3
) == NULL
386 || CU_add_test(suite
, "ctrlr_failed", test_ctrlr_failed
) == NULL
387 || CU_add_test(suite
, "struct_packing", struct_packing
) == NULL
388 || CU_add_test(suite
, "spdk_nvme_qpair_process_completions",
389 test_nvme_qpair_process_completions
) == NULL
390 || CU_add_test(suite
, "nvme_completion_is_retry", test_nvme_completion_is_retry
) == NULL
392 || CU_add_test(suite
, "get_status_string", test_get_status_string
) == NULL
395 CU_cleanup_registry();
396 return CU_get_error();
399 CU_basic_set_mode(CU_BRM_VERBOSE
);
400 CU_basic_run_tests();
401 num_failures
= CU_get_number_of_failures();
402 CU_cleanup_registry();