]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / test / unit / lib / nvme / nvme_qpair.c / nvme_qpair_ut.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright (c) Intel Corporation.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include "spdk/stdinc.h"
35
36 #include "spdk_cunit.h"
37
38 #include "common/lib/test_env.c"
39
40 pid_t g_spdk_nvme_pid;
41
42 bool trace_flag = false;
43 #define SPDK_LOG_NVME trace_flag
44
45 #include "nvme/nvme_qpair.c"
46
47 struct nvme_driver _g_nvme_driver = {
48 .lock = PTHREAD_MUTEX_INITIALIZER,
49 };
50
51 void
52 nvme_request_remove_child(struct nvme_request *parent,
53 struct nvme_request *child)
54 {
55 parent->num_children--;
56 TAILQ_REMOVE(&parent->children, child, child_tailq);
57 }
58
59 void
60 nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
61 {
62 }
63
64 int
65 nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
66 {
67 /* TODO */
68 return 0;
69 }
70
71 int32_t
72 nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
73 {
74 /* TODO */
75 return 0;
76 }
77
78 int
79 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
80 {
81 return 0;
82 }
83
84 static void
85 prepare_submit_request_test(struct spdk_nvme_qpair *qpair,
86 struct spdk_nvme_ctrlr *ctrlr)
87 {
88 memset(ctrlr, 0, sizeof(*ctrlr));
89 ctrlr->free_io_qids = NULL;
90 TAILQ_INIT(&ctrlr->active_io_qpairs);
91 TAILQ_INIT(&ctrlr->active_procs);
92 nvme_qpair_init(qpair, 1, ctrlr, 0, 32);
93 }
94
95 static void
96 cleanup_submit_request_test(struct spdk_nvme_qpair *qpair)
97 {
98 free(qpair->req_buf);
99 }
100
101 static void
102 expected_success_callback(void *arg, const struct spdk_nvme_cpl *cpl)
103 {
104 CU_ASSERT(!spdk_nvme_cpl_is_error(cpl));
105 }
106
107 static void
108 expected_failure_callback(void *arg, const struct spdk_nvme_cpl *cpl)
109 {
110 CU_ASSERT(spdk_nvme_cpl_is_error(cpl));
111 }
112
113 static void
114 test3(void)
115 {
116 struct spdk_nvme_qpair qpair = {};
117 struct nvme_request *req;
118 struct spdk_nvme_ctrlr ctrlr = {};
119
120 prepare_submit_request_test(&qpair, &ctrlr);
121
122 req = nvme_allocate_request_null(&qpair, expected_success_callback, NULL);
123 SPDK_CU_ASSERT_FATAL(req != NULL);
124
125 CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);
126
127 nvme_free_request(req);
128
129 cleanup_submit_request_test(&qpair);
130 }
131
132 static void
133 test_ctrlr_failed(void)
134 {
135 struct spdk_nvme_qpair qpair = {};
136 struct nvme_request *req;
137 struct spdk_nvme_ctrlr ctrlr = {};
138 char payload[4096];
139
140 prepare_submit_request_test(&qpair, &ctrlr);
141
142 req = nvme_allocate_request_contig(&qpair, payload, sizeof(payload), expected_failure_callback,
143 NULL);
144 SPDK_CU_ASSERT_FATAL(req != NULL);
145
146 /* Set the controller to failed.
147 * Set the controller to resetting so that the qpair won't get re-enabled.
148 */
149 ctrlr.is_failed = true;
150 ctrlr.is_resetting = true;
151
152 CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
153
154 cleanup_submit_request_test(&qpair);
155 }
156
157 static void struct_packing(void)
158 {
159 /* ctrlr is the first field in nvme_qpair after the fields
160 * that are used in the I/O path. Make sure the I/O path fields
161 * all fit into two cache lines.
162 */
163 CU_ASSERT(offsetof(struct spdk_nvme_qpair, ctrlr) <= 128);
164 }
165
166 static void test_nvme_qpair_process_completions(void)
167 {
168 struct spdk_nvme_qpair qpair = {};
169 struct spdk_nvme_ctrlr ctrlr = {};
170
171 prepare_submit_request_test(&qpair, &ctrlr);
172 qpair.ctrlr->is_resetting = true;
173
174 spdk_nvme_qpair_process_completions(&qpair, 0);
175 cleanup_submit_request_test(&qpair);
176 }
177
178 static void test_nvme_completion_is_retry(void)
179 {
180 struct spdk_nvme_cpl cpl = {};
181
182 cpl.status.sct = SPDK_NVME_SCT_GENERIC;
183 cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
184 cpl.status.dnr = 0;
185 CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
186
187 cpl.status.sc = SPDK_NVME_SC_FORMAT_IN_PROGRESS;
188 cpl.status.dnr = 1;
189 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
190 cpl.status.dnr = 0;
191 CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
192
193 cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
194 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
195
196 cpl.status.sc = SPDK_NVME_SC_INVALID_FIELD;
197 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
198
199 cpl.status.sc = SPDK_NVME_SC_COMMAND_ID_CONFLICT;
200 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
201
202 cpl.status.sc = SPDK_NVME_SC_DATA_TRANSFER_ERROR;
203 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
204
205 cpl.status.sc = SPDK_NVME_SC_ABORTED_POWER_LOSS;
206 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
207
208 cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
209 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
210
211 cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
212 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
213
214 cpl.status.sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
215 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
216
217 cpl.status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED;
218 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
219
220 cpl.status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
221 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
222
223 cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
224 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
225
226 cpl.status.sc = SPDK_NVME_SC_INVALID_SGL_SEG_DESCRIPTOR;
227 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
228
229 cpl.status.sc = SPDK_NVME_SC_INVALID_NUM_SGL_DESCIRPTORS;
230 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
231
232 cpl.status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
233 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
234
235 cpl.status.sc = SPDK_NVME_SC_METADATA_SGL_LENGTH_INVALID;
236 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
237
238 cpl.status.sc = SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID;
239 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
240
241 cpl.status.sc = SPDK_NVME_SC_INVALID_CONTROLLER_MEM_BUF;
242 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
243
244 cpl.status.sc = SPDK_NVME_SC_INVALID_PRP_OFFSET;
245 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
246
247 cpl.status.sc = SPDK_NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED;
248 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
249
250 cpl.status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
251 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
252
253 cpl.status.sc = SPDK_NVME_SC_CAPACITY_EXCEEDED;
254 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
255
256 cpl.status.sc = SPDK_NVME_SC_RESERVATION_CONFLICT;
257 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
258
259 cpl.status.sc = 0x70;
260 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
261
262 cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
263 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
264
265 cpl.status.sct = SPDK_NVME_SCT_MEDIA_ERROR;
266 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
267
268 cpl.status.sct = SPDK_NVME_SCT_PATH;
269 cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
270 cpl.status.dnr = 0;
271 CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
272
273 cpl.status.sct = SPDK_NVME_SCT_PATH;
274 cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
275 cpl.status.dnr = 1;
276 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
277
278 cpl.status.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
279 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
280
281 cpl.status.sct = 0x4;
282 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
283 }
284
285 #ifdef DEBUG
286 static void
287 test_get_status_string(void)
288 {
289 const char *status_string;
290 struct spdk_nvme_status status;
291
292 status.sct = SPDK_NVME_SCT_GENERIC;
293 status.sc = SPDK_NVME_SC_SUCCESS;
294 status_string = spdk_nvme_cpl_get_status_string(&status);
295 CU_ASSERT(strcmp(status_string, "SUCCESS") == 0);
296
297 status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
298 status.sc = SPDK_NVME_SC_COMPLETION_QUEUE_INVALID;
299 status_string = spdk_nvme_cpl_get_status_string(&status);
300 CU_ASSERT(strcmp(status_string, "INVALID COMPLETION QUEUE") == 0);
301
302 status.sct = SPDK_NVME_SCT_MEDIA_ERROR;
303 status.sc = SPDK_NVME_SC_UNRECOVERED_READ_ERROR;
304 status_string = spdk_nvme_cpl_get_status_string(&status);
305 CU_ASSERT(strcmp(status_string, "UNRECOVERED READ ERROR") == 0);
306
307 status.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
308 status.sc = 0;
309 status_string = spdk_nvme_cpl_get_status_string(&status);
310 CU_ASSERT(strcmp(status_string, "VENDOR SPECIFIC") == 0);
311
312 status.sct = 0x4;
313 status.sc = 0;
314 status_string = spdk_nvme_cpl_get_status_string(&status);
315 CU_ASSERT(strcmp(status_string, "RESERVED") == 0);
316 }
317 #endif
318
319 static void
320 test_nvme_qpair_add_cmd_error_injection(void)
321 {
322 struct spdk_nvme_qpair qpair = {};
323 struct spdk_nvme_ctrlr ctrlr = {};
324 int rc;
325
326 prepare_submit_request_test(&qpair, &ctrlr);
327 ctrlr.adminq = &qpair;
328
329 /* Admin error injection at submission path */
330 rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, NULL,
331 SPDK_NVME_OPC_GET_FEATURES, true, 5000, 1,
332 SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_INVALID_FIELD);
333
334 CU_ASSERT(rc == 0);
335 CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
336
337 /* Remove cmd error injection */
338 spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, NULL, SPDK_NVME_OPC_GET_FEATURES);
339
340 CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
341
342 /* IO error injection at completion path */
343 rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
344 SPDK_NVME_OPC_READ, false, 0, 1,
345 SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
346
347 CU_ASSERT(rc == 0);
348 CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
349
350 /* Provide the same opc, and check whether allocate a new entry */
351 rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
352 SPDK_NVME_OPC_READ, false, 0, 1,
353 SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
354
355 CU_ASSERT(rc == 0);
356 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&qpair.err_cmd_head));
357 CU_ASSERT(TAILQ_NEXT(TAILQ_FIRST(&qpair.err_cmd_head), link) == NULL);
358
359 /* Remove cmd error injection */
360 spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, &qpair, SPDK_NVME_OPC_READ);
361
362 CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
363
364 rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
365 SPDK_NVME_OPC_COMPARE, true, 0, 5,
366 SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_COMPARE_FAILURE);
367
368 CU_ASSERT(rc == 0);
369 CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
370
371 /* Remove cmd error injection */
372 spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, &qpair, SPDK_NVME_OPC_COMPARE);
373
374 CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
375
376 cleanup_submit_request_test(&qpair);
377 }
378
379 int main(int argc, char **argv)
380 {
381 CU_pSuite suite = NULL;
382 unsigned int num_failures;
383
384 if (CU_initialize_registry() != CUE_SUCCESS) {
385 return CU_get_error();
386 }
387
388 suite = CU_add_suite("nvme_qpair", NULL, NULL);
389 if (suite == NULL) {
390 CU_cleanup_registry();
391 return CU_get_error();
392 }
393
394 if (CU_add_test(suite, "test3", test3) == NULL
395 || CU_add_test(suite, "ctrlr_failed", test_ctrlr_failed) == NULL
396 || CU_add_test(suite, "struct_packing", struct_packing) == NULL
397 || CU_add_test(suite, "spdk_nvme_qpair_process_completions",
398 test_nvme_qpair_process_completions) == NULL
399 || CU_add_test(suite, "nvme_completion_is_retry", test_nvme_completion_is_retry) == NULL
400 #ifdef DEBUG
401 || CU_add_test(suite, "get_status_string", test_get_status_string) == NULL
402 #endif
403 || CU_add_test(suite, "spdk_nvme_qpair_add_cmd_error_injection",
404 test_nvme_qpair_add_cmd_error_injection) == NULL
405 ) {
406 CU_cleanup_registry();
407 return CU_get_error();
408 }
409
410 CU_basic_set_mode(CU_BRM_VERBOSE);
411 CU_basic_run_tests();
412 num_failures = CU_get_number_of_failures();
413 CU_cleanup_registry();
414 return num_failures;
415 }