]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/test/lib/nvme/unit/nvme_qpair_c/nvme_qpair_ut.c
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / spdk / test / lib / nvme / unit / nvme_qpair_c / nvme_qpair_ut.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright (c) Intel Corporation.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <stdbool.h>
35
36 #include "spdk_cunit.h"
37
38 #include "lib/test_env.c"
39
40 bool trace_flag = false;
41 #define SPDK_TRACE_NVME trace_flag
42
43 #include "nvme/nvme_qpair.c"
44
45 struct nvme_driver _g_nvme_driver = {
46 .lock = PTHREAD_MUTEX_INITIALIZER,
47 };
48
49 struct nvme_request *
50 nvme_allocate_request(struct spdk_nvme_qpair *qpair,
51 const struct nvme_payload *payload, uint32_t payload_size,
52 spdk_nvme_cmd_cb cb_fn,
53 void *cb_arg)
54 {
55 struct nvme_request *req;
56
57 req = STAILQ_FIRST(&qpair->free_req);
58 if (req == NULL) {
59 return NULL;
60 }
61
62 STAILQ_REMOVE_HEAD(&qpair->free_req, stailq);
63
64 /*
65 * Only memset up to (but not including) the children
66 * TAILQ_ENTRY. children, and following members, are
67 * only used as part of I/O splitting so we avoid
68 * memsetting them until it is actually needed.
69 * They will be initialized in nvme_request_add_child()
70 * if the request is split.
71 */
72 memset(req, 0, offsetof(struct nvme_request, children));
73 req->cb_fn = cb_fn;
74 req->cb_arg = cb_arg;
75 req->payload = *payload;
76 req->payload_size = payload_size;
77 req->qpair = qpair;
78 req->pid = getpid();
79
80 return req;
81 }
82
83 struct nvme_request *
84 nvme_allocate_request_contig(struct spdk_nvme_qpair *qpair, void *buffer, uint32_t payload_size,
85 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
86 {
87 struct nvme_payload payload;
88
89 payload.type = NVME_PAYLOAD_TYPE_CONTIG;
90 payload.u.contig = buffer;
91
92 return nvme_allocate_request(qpair, &payload, payload_size, cb_fn, cb_arg);
93 }
94
95 struct nvme_request *
96 nvme_allocate_request_null(struct spdk_nvme_qpair *qpair, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
97 {
98 return nvme_allocate_request_contig(qpair, NULL, 0, cb_fn, cb_arg);
99 }
100
101 void
102 nvme_free_request(struct nvme_request *req)
103 {
104 SPDK_CU_ASSERT_FATAL(req != NULL);
105 SPDK_CU_ASSERT_FATAL(req->qpair != NULL);
106 STAILQ_INSERT_HEAD(&req->qpair->free_req, req, stailq);
107 }
108
109 void
110 nvme_request_remove_child(struct nvme_request *parent,
111 struct nvme_request *child)
112 {
113 parent->num_children--;
114 TAILQ_REMOVE(&parent->children, child, child_tailq);
115 }
116
117 int
118 nvme_transport_qpair_enable(struct spdk_nvme_qpair *qpair)
119 {
120 return 0;
121 }
122
123 int
124 nvme_transport_qpair_disable(struct spdk_nvme_qpair *qpair)
125 {
126 return 0;
127 }
128
129 int
130 nvme_transport_qpair_fail(struct spdk_nvme_qpair *qpair)
131 {
132 return 0;
133 }
134
135 int
136 nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
137 {
138 // TODO
139 return 0;
140 }
141
142 int32_t
143 nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
144 {
145 // TODO
146 return 0;
147 }
148
149 int
150 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
151 {
152 return 0;
153 }
154
155 static void
156 prepare_submit_request_test(struct spdk_nvme_qpair *qpair,
157 struct spdk_nvme_ctrlr *ctrlr)
158 {
159 memset(ctrlr, 0, sizeof(*ctrlr));
160 ctrlr->free_io_qids = NULL;
161 TAILQ_INIT(&ctrlr->active_io_qpairs);
162 TAILQ_INIT(&ctrlr->active_procs);
163 nvme_qpair_init(qpair, 1, ctrlr, 0, 32);
164 }
165
166 static void
167 cleanup_submit_request_test(struct spdk_nvme_qpair *qpair)
168 {
169 free(qpair->req_buf);
170 }
171
172 static void
173 expected_success_callback(void *arg, const struct spdk_nvme_cpl *cpl)
174 {
175 CU_ASSERT(!spdk_nvme_cpl_is_error(cpl));
176 }
177
178 static void
179 expected_failure_callback(void *arg, const struct spdk_nvme_cpl *cpl)
180 {
181 CU_ASSERT(spdk_nvme_cpl_is_error(cpl));
182 }
183
184 static void
185 test3(void)
186 {
187 struct spdk_nvme_qpair qpair = {};
188 struct nvme_request *req;
189 struct spdk_nvme_ctrlr ctrlr = {};
190
191 prepare_submit_request_test(&qpair, &ctrlr);
192
193 req = nvme_allocate_request_null(&qpair, expected_success_callback, NULL);
194 SPDK_CU_ASSERT_FATAL(req != NULL);
195
196 CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);
197
198 nvme_free_request(req);
199
200 cleanup_submit_request_test(&qpair);
201 }
202
203 static void
204 test_ctrlr_failed(void)
205 {
206 struct spdk_nvme_qpair qpair = {};
207 struct nvme_request *req;
208 struct spdk_nvme_ctrlr ctrlr = {};
209 char payload[4096];
210
211 prepare_submit_request_test(&qpair, &ctrlr);
212
213 req = nvme_allocate_request_contig(&qpair, payload, sizeof(payload), expected_failure_callback,
214 NULL);
215 SPDK_CU_ASSERT_FATAL(req != NULL);
216
217 /* Set the controller to failed.
218 * Set the controller to resetting so that the qpair won't get re-enabled.
219 */
220 ctrlr.is_failed = true;
221 ctrlr.is_resetting = true;
222
223 CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
224
225 cleanup_submit_request_test(&qpair);
226 }
227
228 static void struct_packing(void)
229 {
230 /* ctrlr is the first field in nvme_qpair after the fields
231 * that are used in the I/O path. Make sure the I/O path fields
232 * all fit into two cache lines.
233 */
234 CU_ASSERT(offsetof(struct spdk_nvme_qpair, ctrlr) <= 128);
235 }
236
237 static void test_nvme_qpair_process_completions(void)
238 {
239 struct spdk_nvme_qpair qpair = {};
240 struct spdk_nvme_ctrlr ctrlr = {};
241
242 prepare_submit_request_test(&qpair, &ctrlr);
243 qpair.ctrlr->is_resetting = true;
244
245 spdk_nvme_qpair_process_completions(&qpair, 0);
246 cleanup_submit_request_test(&qpair);
247 }
248
249 static void test_nvme_completion_is_retry(void)
250 {
251 struct spdk_nvme_cpl cpl = {};
252
253 cpl.status.sct = SPDK_NVME_SCT_GENERIC;
254 cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
255 cpl.status.dnr = 0;
256 CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
257
258 cpl.status.sc = SPDK_NVME_SC_FORMAT_IN_PROGRESS;
259 cpl.status.dnr = 1;
260 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
261 cpl.status.dnr = 0;
262 CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
263
264 cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
265 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
266
267 cpl.status.sc = SPDK_NVME_SC_INVALID_FIELD;
268 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
269
270 cpl.status.sc = SPDK_NVME_SC_COMMAND_ID_CONFLICT;
271 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
272
273 cpl.status.sc = SPDK_NVME_SC_DATA_TRANSFER_ERROR;
274 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
275
276 cpl.status.sc = SPDK_NVME_SC_ABORTED_POWER_LOSS;
277 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
278
279 cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
280 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
281
282 cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
283 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
284
285 cpl.status.sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
286 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
287
288 cpl.status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED;
289 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
290
291 cpl.status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
292 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
293
294 cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
295 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
296
297 cpl.status.sc = SPDK_NVME_SC_INVALID_SGL_SEG_DESCRIPTOR;
298 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
299
300 cpl.status.sc = SPDK_NVME_SC_INVALID_NUM_SGL_DESCIRPTORS;
301 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
302
303 cpl.status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
304 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
305
306 cpl.status.sc = SPDK_NVME_SC_METADATA_SGL_LENGTH_INVALID;
307 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
308
309 cpl.status.sc = SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID;
310 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
311
312 cpl.status.sc = SPDK_NVME_SC_INVALID_CONTROLLER_MEM_BUF;
313 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
314
315 cpl.status.sc = SPDK_NVME_SC_INVALID_PRP_OFFSET;
316 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
317
318 cpl.status.sc = SPDK_NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED;
319 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
320
321 cpl.status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
322 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
323
324 cpl.status.sc = SPDK_NVME_SC_CAPACITY_EXCEEDED;
325 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
326
327 cpl.status.sc = SPDK_NVME_SC_RESERVATION_CONFLICT;
328 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
329
330 cpl.status.sc = 0x70;
331 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
332
333 cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
334 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
335
336 cpl.status.sct = SPDK_NVME_SCT_MEDIA_ERROR;
337 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
338
339 cpl.status.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
340 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
341
342 cpl.status.sct = 0x4;
343 CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
344 }
345
346 #ifdef DEBUG
347 static void
348 test_get_status_string(void)
349 {
350 const char *status_string;
351
352 status_string = get_status_string(SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_SUCCESS);
353 CU_ASSERT(strcmp(status_string, "SUCCESS") == 0);
354
355 status_string = get_status_string(SPDK_NVME_SCT_COMMAND_SPECIFIC,
356 SPDK_NVME_SC_COMPLETION_QUEUE_INVALID);
357 CU_ASSERT(strcmp(status_string, "INVALID COMPLETION QUEUE") == 0);
358
359 status_string = get_status_string(SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
360 CU_ASSERT(strcmp(status_string, "UNRECOVERED READ ERROR") == 0);
361
362 status_string = get_status_string(SPDK_NVME_SCT_VENDOR_SPECIFIC, 0);
363 CU_ASSERT(strcmp(status_string, "VENDOR SPECIFIC") == 0);
364
365 status_string = get_status_string(100, 0);
366 CU_ASSERT(strcmp(status_string, "RESERVED") == 0);
367 }
368 #endif
369
370 int main(int argc, char **argv)
371 {
372 CU_pSuite suite = NULL;
373 unsigned int num_failures;
374
375 if (CU_initialize_registry() != CUE_SUCCESS) {
376 return CU_get_error();
377 }
378
379 suite = CU_add_suite("nvme_qpair", NULL, NULL);
380 if (suite == NULL) {
381 CU_cleanup_registry();
382 return CU_get_error();
383 }
384
385 if (CU_add_test(suite, "test3", test3) == NULL
386 || CU_add_test(suite, "ctrlr_failed", test_ctrlr_failed) == NULL
387 || CU_add_test(suite, "struct_packing", struct_packing) == NULL
388 || CU_add_test(suite, "spdk_nvme_qpair_process_completions",
389 test_nvme_qpair_process_completions) == NULL
390 || CU_add_test(suite, "nvme_completion_is_retry", test_nvme_completion_is_retry) == NULL
391 #ifdef DEBUG
392 || CU_add_test(suite, "get_status_string", test_get_status_string) == NULL
393 #endif
394 ) {
395 CU_cleanup_registry();
396 return CU_get_error();
397 }
398
399 CU_basic_set_mode(CU_BRM_VERBOSE);
400 CU_basic_run_tests();
401 num_failures = CU_get_number_of_failures();
402 CU_cleanup_registry();
403 return num_failures;
404 }