]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / test / unit / lib / nvme / nvme_ns_cmd.c / nvme_ns_cmd_ut.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright (c) Intel Corporation.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include "spdk_cunit.h"
35
36 #include "nvme/nvme_ns_cmd.c"
37 #include "nvme/nvme.c"
38
39 #include "common/lib/test_env.c"
40
41 DEFINE_STUB(spdk_nvme_qpair_process_completions, int32_t,
42 (struct spdk_nvme_qpair *qpair,
43 uint32_t max_completions), 0);
44
45 static struct nvme_driver _g_nvme_driver = {
46 .lock = PTHREAD_MUTEX_INITIALIZER,
47 };
48
49 static struct nvme_request *g_request = NULL;
50
51 int
52 spdk_pci_nvme_enumerate(spdk_pci_enum_cb enum_cb, void *enum_ctx)
53 {
54 return -1;
55 }
56
57 static void nvme_request_reset_sgl(void *cb_arg, uint32_t sgl_offset)
58 {
59 }
60
61 static int nvme_request_next_sge(void *cb_arg, void **address, uint32_t *length)
62 {
63 uint32_t *lba_count = cb_arg;
64
65 /*
66 * We need to set address to something here, since the SGL splitting code will
67 * use it to determine PRP compatibility. Just use a rather arbitrary address
68 * for now - these tests will not actually cause data to be read from or written
69 * to this address.
70 */
71 *address = (void *)(uintptr_t)0x10000000;
72 *length = *lba_count;
73 return 0;
74 }
75
76 bool
77 spdk_nvme_transport_available(enum spdk_nvme_transport_type trtype)
78 {
79 return true;
80 }
81
82 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
83 const struct spdk_nvme_ctrlr_opts *opts,
84 void *devhandle)
85 {
86 return NULL;
87 }
88
89 void
90 nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
91 {
92 }
93
94 int
95 nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
96 {
97 return 0;
98 }
99
100 int
101 nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
102 {
103 return 0;
104 }
105
106 void
107 nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
108 {
109 }
110
111 struct spdk_pci_addr
112 spdk_pci_device_get_addr(struct spdk_pci_device *pci_dev)
113 {
114 struct spdk_pci_addr pci_addr;
115
116 memset(&pci_addr, 0, sizeof(pci_addr));
117 return pci_addr;
118 }
119
120 struct spdk_pci_id
121 spdk_pci_device_get_id(struct spdk_pci_device *pci_dev)
122 {
123 struct spdk_pci_id pci_id;
124
125 memset(&pci_id, 0xFF, sizeof(pci_id));
126
127 return pci_id;
128 }
129
130 void
131 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
132 {
133 memset(opts, 0, sizeof(*opts));
134 }
135
136 uint32_t
137 spdk_nvme_ns_get_sector_size(struct spdk_nvme_ns *ns)
138 {
139 return ns->sector_size;
140 }
141
142 uint32_t
143 spdk_nvme_ns_get_max_io_xfer_size(struct spdk_nvme_ns *ns)
144 {
145 return ns->ctrlr->max_xfer_size;
146 }
147
148 int
149 nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
150 {
151 g_request = req;
152
153 return 0;
154 }
155
156 void
157 nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
158 {
159 return;
160 }
161
162 void
163 nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
164 {
165 return;
166 }
167
168 int
169 nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
170 {
171 return 0;
172 }
173
174 int
175 nvme_transport_ctrlr_scan(const struct spdk_nvme_transport_id *trid,
176 void *cb_ctx,
177 spdk_nvme_probe_cb probe_cb,
178 spdk_nvme_remove_cb remove_cb,
179 bool direct_connect)
180 {
181 return 0;
182 }
183
184 static void
185 prepare_for_test(struct spdk_nvme_ns *ns, struct spdk_nvme_ctrlr *ctrlr,
186 struct spdk_nvme_qpair *qpair,
187 uint32_t sector_size, uint32_t md_size, uint32_t max_xfer_size,
188 uint32_t stripe_size, bool extended_lba)
189 {
190 uint32_t num_requests = 32;
191 uint32_t i;
192
193 ctrlr->max_xfer_size = max_xfer_size;
194 /*
195 * Clear the flags field - we especially want to make sure the SGL_SUPPORTED flag is not set
196 * so that we test the SGL splitting path.
197 */
198 ctrlr->flags = 0;
199 ctrlr->min_page_size = 4096;
200 ctrlr->page_size = 4096;
201 memset(&ctrlr->opts, 0, sizeof(ctrlr->opts));
202 memset(ns, 0, sizeof(*ns));
203 ns->ctrlr = ctrlr;
204 ns->sector_size = sector_size;
205 ns->extended_lba_size = sector_size;
206 if (extended_lba) {
207 ns->flags |= SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED;
208 ns->extended_lba_size += md_size;
209 }
210 ns->md_size = md_size;
211 ns->sectors_per_max_io = spdk_nvme_ns_get_max_io_xfer_size(ns) / ns->extended_lba_size;
212 ns->sectors_per_stripe = stripe_size / ns->extended_lba_size;
213
214 memset(qpair, 0, sizeof(*qpair));
215 qpair->ctrlr = ctrlr;
216 qpair->req_buf = calloc(num_requests, sizeof(struct nvme_request));
217 SPDK_CU_ASSERT_FATAL(qpair->req_buf != NULL);
218
219 for (i = 0; i < num_requests; i++) {
220 struct nvme_request *req = qpair->req_buf + i * sizeof(struct nvme_request);
221
222 STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
223 }
224
225 g_request = NULL;
226 }
227
228 static void
229 cleanup_after_test(struct spdk_nvme_qpair *qpair)
230 {
231 free(qpair->req_buf);
232 }
233
234 static void
235 nvme_cmd_interpret_rw(const struct spdk_nvme_cmd *cmd,
236 uint64_t *lba, uint32_t *num_blocks)
237 {
238 *lba = *(const uint64_t *)&cmd->cdw10;
239 *num_blocks = (cmd->cdw12 & 0xFFFFu) + 1;
240 }
241
242 static void
243 split_test(void)
244 {
245 struct spdk_nvme_ns ns;
246 struct spdk_nvme_qpair qpair;
247 struct spdk_nvme_ctrlr ctrlr;
248 void *payload;
249 uint64_t lba, cmd_lba;
250 uint32_t lba_count, cmd_lba_count;
251 int rc;
252
253 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
254 payload = malloc(512);
255 lba = 0;
256 lba_count = 1;
257
258 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0);
259
260 SPDK_CU_ASSERT_FATAL(rc == 0);
261 SPDK_CU_ASSERT_FATAL(g_request != NULL);
262
263 CU_ASSERT(g_request->num_children == 0);
264 nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
265 CU_ASSERT(cmd_lba == lba);
266 CU_ASSERT(cmd_lba_count == lba_count);
267
268 free(payload);
269 nvme_free_request(g_request);
270 cleanup_after_test(&qpair);
271 }
272
273 static void
274 split_test2(void)
275 {
276 struct spdk_nvme_ns ns;
277 struct spdk_nvme_ctrlr ctrlr;
278 struct spdk_nvme_qpair qpair;
279 struct nvme_request *child;
280 void *payload;
281 uint64_t lba, cmd_lba;
282 uint32_t lba_count, cmd_lba_count;
283 int rc;
284
285 /*
286 * Controller has max xfer of 128 KB (256 blocks).
287 * Submit an I/O of 256 KB starting at LBA 0, which should be split
288 * on the max I/O boundary into two I/Os of 128 KB.
289 */
290
291 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
292 payload = malloc(256 * 1024);
293 lba = 0;
294 lba_count = (256 * 1024) / 512;
295
296 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0);
297
298 SPDK_CU_ASSERT_FATAL(rc == 0);
299 SPDK_CU_ASSERT_FATAL(g_request != NULL);
300
301 CU_ASSERT(g_request->num_children == 2);
302
303 child = TAILQ_FIRST(&g_request->children);
304 nvme_request_remove_child(g_request, child);
305 nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
306 CU_ASSERT(child->num_children == 0);
307 CU_ASSERT(child->payload_size == 128 * 1024);
308 CU_ASSERT(cmd_lba == 0);
309 CU_ASSERT(cmd_lba_count == 256); /* 256 * 512 byte blocks = 128 KB */
310 nvme_free_request(child);
311
312 child = TAILQ_FIRST(&g_request->children);
313 nvme_request_remove_child(g_request, child);
314 nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
315 CU_ASSERT(child->num_children == 0);
316 CU_ASSERT(child->payload_size == 128 * 1024);
317 CU_ASSERT(cmd_lba == 256);
318 CU_ASSERT(cmd_lba_count == 256);
319 nvme_free_request(child);
320
321 CU_ASSERT(TAILQ_EMPTY(&g_request->children));
322
323 free(payload);
324 nvme_free_request(g_request);
325 cleanup_after_test(&qpair);
326 }
327
328 static void
329 split_test3(void)
330 {
331 struct spdk_nvme_ns ns;
332 struct spdk_nvme_ctrlr ctrlr;
333 struct spdk_nvme_qpair qpair;
334 struct nvme_request *child;
335 void *payload;
336 uint64_t lba, cmd_lba;
337 uint32_t lba_count, cmd_lba_count;
338 int rc;
339
340 /*
341 * Controller has max xfer of 128 KB (256 blocks).
342 * Submit an I/O of 256 KB starting at LBA 10, which should be split
343 * into two I/Os:
344 * 1) LBA = 10, count = 256 blocks
345 * 2) LBA = 266, count = 256 blocks
346 */
347
348 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
349 payload = malloc(256 * 1024);
350 lba = 10; /* Start at an LBA that isn't aligned to the stripe size */
351 lba_count = (256 * 1024) / 512;
352
353 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0);
354
355 SPDK_CU_ASSERT_FATAL(rc == 0);
356 SPDK_CU_ASSERT_FATAL(g_request != NULL);
357
358 SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
359
360 child = TAILQ_FIRST(&g_request->children);
361 nvme_request_remove_child(g_request, child);
362 nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
363 CU_ASSERT(child->num_children == 0);
364 CU_ASSERT(child->payload_size == 128 * 1024);
365 CU_ASSERT(cmd_lba == 10);
366 CU_ASSERT(cmd_lba_count == 256);
367 nvme_free_request(child);
368
369 child = TAILQ_FIRST(&g_request->children);
370 nvme_request_remove_child(g_request, child);
371 nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
372 CU_ASSERT(child->num_children == 0);
373 CU_ASSERT(child->payload_size == 128 * 1024);
374 CU_ASSERT(cmd_lba == 266);
375 CU_ASSERT(cmd_lba_count == 256);
376 nvme_free_request(child);
377
378 CU_ASSERT(TAILQ_EMPTY(&g_request->children));
379
380 free(payload);
381 nvme_free_request(g_request);
382 cleanup_after_test(&qpair);
383 }
384
385 static void
386 split_test4(void)
387 {
388 struct spdk_nvme_ns ns;
389 struct spdk_nvme_ctrlr ctrlr;
390 struct spdk_nvme_qpair qpair;
391 struct nvme_request *child;
392 void *payload;
393 uint64_t lba, cmd_lba;
394 uint32_t lba_count, cmd_lba_count;
395 int rc;
396
397 /*
398 * Controller has max xfer of 128 KB (256 blocks) and a stripe size of 128 KB.
399 * (Same as split_test3 except with driver-assisted striping enabled.)
400 * Submit an I/O of 256 KB starting at LBA 10, which should be split
401 * into three I/Os:
402 * 1) LBA = 10, count = 246 blocks (less than max I/O size to align to stripe size)
403 * 2) LBA = 256, count = 256 blocks (aligned to stripe size and max I/O size)
404 * 3) LBA = 512, count = 10 blocks (finish off the remaining I/O size)
405 */
406
407 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 128 * 1024, false);
408 payload = malloc(256 * 1024);
409 lba = 10; /* Start at an LBA that isn't aligned to the stripe size */
410 lba_count = (256 * 1024) / 512;
411
412 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
413 SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
414
415 SPDK_CU_ASSERT_FATAL(rc == 0);
416 SPDK_CU_ASSERT_FATAL(g_request != NULL);
417
418 SPDK_CU_ASSERT_FATAL(g_request->num_children == 3);
419
420 child = TAILQ_FIRST(&g_request->children);
421 nvme_request_remove_child(g_request, child);
422 nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
423 CU_ASSERT(child->num_children == 0);
424 CU_ASSERT(child->payload_size == (256 - 10) * 512);
425 CU_ASSERT(child->payload_offset == 0);
426 CU_ASSERT(cmd_lba == 10);
427 CU_ASSERT(cmd_lba_count == 256 - 10);
428 CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
429 CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
430 nvme_free_request(child);
431
432 child = TAILQ_FIRST(&g_request->children);
433 nvme_request_remove_child(g_request, child);
434 nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
435 CU_ASSERT(child->num_children == 0);
436 CU_ASSERT(child->payload_size == 128 * 1024);
437 CU_ASSERT(child->payload_offset == (256 - 10) * 512);
438 CU_ASSERT(cmd_lba == 256);
439 CU_ASSERT(cmd_lba_count == 256);
440 CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
441 CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
442 nvme_free_request(child);
443
444 child = TAILQ_FIRST(&g_request->children);
445 nvme_request_remove_child(g_request, child);
446 nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
447 CU_ASSERT(child->num_children == 0);
448 CU_ASSERT(child->payload_size == 10 * 512);
449 CU_ASSERT(child->payload_offset == (512 - 10) * 512);
450 CU_ASSERT(cmd_lba == 512);
451 CU_ASSERT(cmd_lba_count == 10);
452 CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
453 CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
454 nvme_free_request(child);
455
456 CU_ASSERT(TAILQ_EMPTY(&g_request->children));
457
458 free(payload);
459 nvme_free_request(g_request);
460 cleanup_after_test(&qpair);
461 }
462
463 static void
464 test_cmd_child_request(void)
465 {
466
467 struct spdk_nvme_ns ns;
468 struct spdk_nvme_ctrlr ctrlr;
469 struct spdk_nvme_qpair qpair;
470 int rc = 0;
471 struct nvme_request *child, *tmp;
472 void *payload;
473 uint64_t lba = 0x1000;
474 uint32_t i = 0;
475 uint32_t offset = 0;
476 uint32_t sector_size = 512;
477 uint32_t max_io_size = 128 * 1024;
478 uint32_t sectors_per_max_io = max_io_size / sector_size;
479
480 prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_io_size, 0, false);
481
482 payload = malloc(128 * 1024);
483 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io, NULL, NULL, 0);
484 SPDK_CU_ASSERT_FATAL(rc == 0);
485 SPDK_CU_ASSERT_FATAL(g_request != NULL);
486 CU_ASSERT(g_request->payload_offset == 0);
487 CU_ASSERT(g_request->num_children == 0);
488 nvme_free_request(g_request);
489
490 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io - 1, NULL, NULL, 0);
491 SPDK_CU_ASSERT_FATAL(rc == 0);
492 SPDK_CU_ASSERT_FATAL(g_request != NULL);
493 CU_ASSERT(g_request->payload_offset == 0);
494 CU_ASSERT(g_request->num_children == 0);
495 nvme_free_request(g_request);
496
497 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io * 4, NULL, NULL, 0);
498 SPDK_CU_ASSERT_FATAL(rc == 0);
499 SPDK_CU_ASSERT_FATAL(g_request != NULL);
500 CU_ASSERT(g_request->num_children == 4);
501
502 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, (DEFAULT_IO_QUEUE_REQUESTS + 1) * sector_size,
503 NULL,
504 NULL, 0);
505 SPDK_CU_ASSERT_FATAL(rc == -EINVAL);
506
507 TAILQ_FOREACH_SAFE(child, &g_request->children, child_tailq, tmp) {
508 nvme_request_remove_child(g_request, child);
509 CU_ASSERT(child->payload_offset == offset);
510 CU_ASSERT(child->cmd.opc == SPDK_NVME_OPC_READ);
511 CU_ASSERT(child->cmd.nsid == ns.id);
512 CU_ASSERT(child->cmd.cdw10 == (lba + sectors_per_max_io * i));
513 CU_ASSERT(child->cmd.cdw12 == ((sectors_per_max_io - 1) | 0));
514 offset += max_io_size;
515 nvme_free_request(child);
516 i++;
517 }
518
519 free(payload);
520 nvme_free_request(g_request);
521 cleanup_after_test(&qpair);
522 }
523
524 static void
525 test_nvme_ns_cmd_flush(void)
526 {
527 struct spdk_nvme_ns ns;
528 struct spdk_nvme_ctrlr ctrlr;
529 struct spdk_nvme_qpair qpair;
530 spdk_nvme_cmd_cb cb_fn = NULL;
531 void *cb_arg = NULL;
532 int rc;
533
534 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
535
536 rc = spdk_nvme_ns_cmd_flush(&ns, &qpair, cb_fn, cb_arg);
537 SPDK_CU_ASSERT_FATAL(rc == 0);
538 SPDK_CU_ASSERT_FATAL(g_request != NULL);
539 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_FLUSH);
540 CU_ASSERT(g_request->cmd.nsid == ns.id);
541
542 nvme_free_request(g_request);
543 cleanup_after_test(&qpair);
544 }
545
546 static void
547 test_nvme_ns_cmd_write_zeroes(void)
548 {
549 struct spdk_nvme_ns ns = { 0 };
550 struct spdk_nvme_ctrlr ctrlr = { 0 };
551 struct spdk_nvme_qpair qpair;
552 spdk_nvme_cmd_cb cb_fn = NULL;
553 void *cb_arg = NULL;
554 uint64_t cmd_lba;
555 uint32_t cmd_lba_count;
556 int rc;
557
558 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
559
560 rc = spdk_nvme_ns_cmd_write_zeroes(&ns, &qpair, 0, 2, cb_fn, cb_arg, 0);
561 SPDK_CU_ASSERT_FATAL(rc == 0);
562 SPDK_CU_ASSERT_FATAL(g_request != NULL);
563 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE_ZEROES);
564 CU_ASSERT(g_request->cmd.nsid == ns.id);
565 nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
566 CU_ASSERT_EQUAL(cmd_lba, 0);
567 CU_ASSERT_EQUAL(cmd_lba_count, 2);
568
569 nvme_free_request(g_request);
570 cleanup_after_test(&qpair);
571 }
572
573 static void
574 test_nvme_ns_cmd_dataset_management(void)
575 {
576 struct spdk_nvme_ns ns;
577 struct spdk_nvme_ctrlr ctrlr;
578 struct spdk_nvme_qpair qpair;
579 spdk_nvme_cmd_cb cb_fn = NULL;
580 void *cb_arg = NULL;
581 struct spdk_nvme_dsm_range ranges[256];
582 uint16_t i;
583 int rc = 0;
584
585 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
586
587 for (i = 0; i < 256; i++) {
588 ranges[i].starting_lba = i;
589 ranges[i].length = 1;
590 ranges[i].attributes.raw = 0;
591 }
592
593 /* TRIM one LBA */
594 rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE,
595 ranges, 1, cb_fn, cb_arg);
596 SPDK_CU_ASSERT_FATAL(rc == 0);
597 SPDK_CU_ASSERT_FATAL(g_request != NULL);
598 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_DATASET_MANAGEMENT);
599 CU_ASSERT(g_request->cmd.nsid == ns.id);
600 CU_ASSERT(g_request->cmd.cdw10 == 0);
601 CU_ASSERT(g_request->cmd.cdw11 == SPDK_NVME_DSM_ATTR_DEALLOCATE);
602 spdk_dma_free(g_request->payload.contig_or_cb_arg);
603 nvme_free_request(g_request);
604
605 /* TRIM 256 LBAs */
606 rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE,
607 ranges, 256, cb_fn, cb_arg);
608 SPDK_CU_ASSERT_FATAL(rc == 0);
609 SPDK_CU_ASSERT_FATAL(g_request != NULL);
610 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_DATASET_MANAGEMENT);
611 CU_ASSERT(g_request->cmd.nsid == ns.id);
612 CU_ASSERT(g_request->cmd.cdw10 == 255u);
613 CU_ASSERT(g_request->cmd.cdw11 == SPDK_NVME_DSM_ATTR_DEALLOCATE);
614 spdk_dma_free(g_request->payload.contig_or_cb_arg);
615 nvme_free_request(g_request);
616
617 rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE,
618 NULL, 0, cb_fn, cb_arg);
619 CU_ASSERT(rc != 0);
620 cleanup_after_test(&qpair);
621 }
622
623 static void
624 test_nvme_ns_cmd_readv(void)
625 {
626 struct spdk_nvme_ns ns;
627 struct spdk_nvme_ctrlr ctrlr;
628 struct spdk_nvme_qpair qpair;
629 int rc = 0;
630 void *cb_arg;
631 uint32_t lba_count = 256;
632 uint32_t sector_size = 512;
633 uint64_t sge_length = lba_count * sector_size;
634
635 cb_arg = malloc(512);
636 prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false);
637 rc = spdk_nvme_ns_cmd_readv(&ns, &qpair, 0x1000, lba_count, NULL, &sge_length, 0,
638 nvme_request_reset_sgl, nvme_request_next_sge);
639
640 SPDK_CU_ASSERT_FATAL(rc == 0);
641 SPDK_CU_ASSERT_FATAL(g_request != NULL);
642 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_READ);
643 CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
644 CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
645 CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
646 CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
647 CU_ASSERT(g_request->cmd.nsid == ns.id);
648
649 rc = spdk_nvme_ns_cmd_readv(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0, nvme_request_reset_sgl,
650 NULL);
651 CU_ASSERT(rc != 0);
652
653 free(cb_arg);
654 nvme_free_request(g_request);
655 cleanup_after_test(&qpair);
656 }
657
658 static void
659 test_nvme_ns_cmd_writev(void)
660 {
661 struct spdk_nvme_ns ns;
662 struct spdk_nvme_ctrlr ctrlr;
663 struct spdk_nvme_qpair qpair;
664 int rc = 0;
665 void *cb_arg;
666 uint32_t lba_count = 256;
667 uint32_t sector_size = 512;
668 uint64_t sge_length = lba_count * sector_size;
669
670 cb_arg = malloc(512);
671 prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false);
672 rc = spdk_nvme_ns_cmd_writev(&ns, &qpair, 0x1000, lba_count, NULL, &sge_length, 0,
673 nvme_request_reset_sgl, nvme_request_next_sge);
674
675 SPDK_CU_ASSERT_FATAL(rc == 0);
676 SPDK_CU_ASSERT_FATAL(g_request != NULL);
677 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE);
678 CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
679 CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
680 CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
681 CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
682 CU_ASSERT(g_request->cmd.nsid == ns.id);
683
684 rc = spdk_nvme_ns_cmd_writev(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0,
685 NULL, nvme_request_next_sge);
686 CU_ASSERT(rc != 0);
687
688 free(cb_arg);
689 nvme_free_request(g_request);
690 cleanup_after_test(&qpair);
691 }
692
693 static void
694 test_nvme_ns_cmd_comparev(void)
695 {
696 struct spdk_nvme_ns ns;
697 struct spdk_nvme_ctrlr ctrlr;
698 struct spdk_nvme_qpair qpair;
699 int rc = 0;
700 void *cb_arg;
701 uint32_t lba_count = 256;
702 uint32_t sector_size = 512;
703 uint64_t sge_length = lba_count * sector_size;
704
705 cb_arg = malloc(512);
706 prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false);
707 rc = spdk_nvme_ns_cmd_comparev(&ns, &qpair, 0x1000, lba_count, NULL, &sge_length, 0,
708 nvme_request_reset_sgl, nvme_request_next_sge);
709
710 SPDK_CU_ASSERT_FATAL(rc == 0);
711 SPDK_CU_ASSERT_FATAL(g_request != NULL);
712 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_COMPARE);
713 CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
714 CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
715 CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
716 CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
717 CU_ASSERT(g_request->cmd.nsid == ns.id);
718
719 rc = spdk_nvme_ns_cmd_comparev(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0,
720 nvme_request_reset_sgl, NULL);
721 CU_ASSERT(rc != 0);
722
723 free(cb_arg);
724 nvme_free_request(g_request);
725 cleanup_after_test(&qpair);
726 }
727
728 static void
729 test_io_flags(void)
730 {
731 struct spdk_nvme_ns ns;
732 struct spdk_nvme_ctrlr ctrlr;
733 struct spdk_nvme_qpair qpair;
734 void *payload;
735 uint64_t lba;
736 uint32_t lba_count;
737 int rc;
738
739 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 128 * 1024, false);
740 payload = malloc(256 * 1024);
741 lba = 0;
742 lba_count = (4 * 1024) / 512;
743
744 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
745 SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
746 SPDK_CU_ASSERT_FATAL(rc == 0);
747 SPDK_CU_ASSERT_FATAL(g_request != NULL);
748 CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
749 CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
750 nvme_free_request(g_request);
751
752 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
753 SPDK_NVME_IO_FLAGS_LIMITED_RETRY);
754 SPDK_CU_ASSERT_FATAL(rc == 0);
755 SPDK_CU_ASSERT_FATAL(g_request != NULL);
756 CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) == 0);
757 CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) != 0);
758 nvme_free_request(g_request);
759
760 free(payload);
761 cleanup_after_test(&qpair);
762 }
763
764 static void
765 test_nvme_ns_cmd_reservation_register(void)
766 {
767 struct spdk_nvme_ns ns;
768 struct spdk_nvme_ctrlr ctrlr;
769 struct spdk_nvme_qpair qpair;
770 struct spdk_nvme_reservation_register_data *payload;
771 bool ignore_key = 1;
772 spdk_nvme_cmd_cb cb_fn = NULL;
773 void *cb_arg = NULL;
774 int rc = 0;
775 uint32_t tmp_cdw10;
776
777 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
778 payload = malloc(sizeof(struct spdk_nvme_reservation_register_data));
779
780 rc = spdk_nvme_ns_cmd_reservation_register(&ns, &qpair, payload, ignore_key,
781 SPDK_NVME_RESERVE_REGISTER_KEY,
782 SPDK_NVME_RESERVE_PTPL_NO_CHANGES,
783 cb_fn, cb_arg);
784
785 SPDK_CU_ASSERT_FATAL(rc == 0);
786 SPDK_CU_ASSERT_FATAL(g_request != NULL);
787 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_REGISTER);
788 CU_ASSERT(g_request->cmd.nsid == ns.id);
789
790 tmp_cdw10 = SPDK_NVME_RESERVE_REGISTER_KEY;
791 tmp_cdw10 |= ignore_key ? 1 << 3 : 0;
792 tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_PTPL_NO_CHANGES << 30;
793
794 CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
795
796 spdk_dma_free(g_request->payload.contig_or_cb_arg);
797 nvme_free_request(g_request);
798 free(payload);
799 cleanup_after_test(&qpair);
800 }
801
802 static void
803 test_nvme_ns_cmd_reservation_release(void)
804 {
805 struct spdk_nvme_ns ns;
806 struct spdk_nvme_ctrlr ctrlr;
807 struct spdk_nvme_qpair qpair;
808 struct spdk_nvme_reservation_key_data *payload;
809 bool ignore_key = 1;
810 spdk_nvme_cmd_cb cb_fn = NULL;
811 void *cb_arg = NULL;
812 int rc = 0;
813 uint32_t tmp_cdw10;
814
815 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
816 payload = malloc(sizeof(struct spdk_nvme_reservation_key_data));
817
818 rc = spdk_nvme_ns_cmd_reservation_release(&ns, &qpair, payload, ignore_key,
819 SPDK_NVME_RESERVE_RELEASE,
820 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
821 cb_fn, cb_arg);
822
823 SPDK_CU_ASSERT_FATAL(rc == 0);
824 SPDK_CU_ASSERT_FATAL(g_request != NULL);
825 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_RELEASE);
826 CU_ASSERT(g_request->cmd.nsid == ns.id);
827
828 tmp_cdw10 = SPDK_NVME_RESERVE_RELEASE;
829 tmp_cdw10 |= ignore_key ? 1 << 3 : 0;
830 tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_WRITE_EXCLUSIVE << 8;
831
832 CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
833
834 spdk_dma_free(g_request->payload.contig_or_cb_arg);
835 nvme_free_request(g_request);
836 free(payload);
837 cleanup_after_test(&qpair);
838 }
839
840 static void
841 test_nvme_ns_cmd_reservation_acquire(void)
842 {
843 struct spdk_nvme_ns ns;
844 struct spdk_nvme_ctrlr ctrlr;
845 struct spdk_nvme_qpair qpair;
846 struct spdk_nvme_reservation_acquire_data *payload;
847 bool ignore_key = 1;
848 spdk_nvme_cmd_cb cb_fn = NULL;
849 void *cb_arg = NULL;
850 int rc = 0;
851 uint32_t tmp_cdw10;
852
853 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
854 payload = malloc(sizeof(struct spdk_nvme_reservation_acquire_data));
855
856 rc = spdk_nvme_ns_cmd_reservation_acquire(&ns, &qpair, payload, ignore_key,
857 SPDK_NVME_RESERVE_ACQUIRE,
858 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
859 cb_fn, cb_arg);
860
861 SPDK_CU_ASSERT_FATAL(rc == 0);
862 SPDK_CU_ASSERT_FATAL(g_request != NULL);
863 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_ACQUIRE);
864 CU_ASSERT(g_request->cmd.nsid == ns.id);
865
866 tmp_cdw10 = SPDK_NVME_RESERVE_ACQUIRE;
867 tmp_cdw10 |= ignore_key ? 1 << 3 : 0;
868 tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_WRITE_EXCLUSIVE << 8;
869
870 CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
871
872 spdk_dma_free(g_request->payload.contig_or_cb_arg);
873 nvme_free_request(g_request);
874 free(payload);
875 cleanup_after_test(&qpair);
876 }
877
878 static void
879 test_nvme_ns_cmd_reservation_report(void)
880 {
881 struct spdk_nvme_ns ns;
882 struct spdk_nvme_ctrlr ctrlr;
883 struct spdk_nvme_qpair qpair;
884 struct spdk_nvme_reservation_status_data *payload;
885 spdk_nvme_cmd_cb cb_fn = NULL;
886 void *cb_arg = NULL;
887 int rc = 0;
888 uint32_t size = sizeof(struct spdk_nvme_reservation_status_data);
889
890 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
891
892 payload = calloc(1, size);
893 SPDK_CU_ASSERT_FATAL(payload != NULL);
894
895 rc = spdk_nvme_ns_cmd_reservation_report(&ns, &qpair, payload, size, cb_fn, cb_arg);
896
897 SPDK_CU_ASSERT_FATAL(rc == 0);
898 SPDK_CU_ASSERT_FATAL(g_request != NULL);
899 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_REPORT);
900 CU_ASSERT(g_request->cmd.nsid == ns.id);
901
902 CU_ASSERT(g_request->cmd.cdw10 == (size / 4));
903
904 spdk_dma_free(g_request->payload.contig_or_cb_arg);
905 nvme_free_request(g_request);
906 free(payload);
907 cleanup_after_test(&qpair);
908 }
909
910 static void
911 test_nvme_ns_cmd_write_with_md(void)
912 {
913 struct spdk_nvme_ns ns;
914 struct spdk_nvme_ctrlr ctrlr;
915 struct spdk_nvme_qpair qpair;
916 int rc = 0;
917 char *buffer = NULL;
918 char *metadata = NULL;
919 uint32_t block_size, md_size;
920 struct nvme_request *child0, *child1;
921
922 block_size = 512;
923 md_size = 128;
924
925 buffer = malloc((block_size + md_size) * 384);
926 SPDK_CU_ASSERT_FATAL(buffer != NULL);
927 metadata = malloc(md_size * 384);
928 SPDK_CU_ASSERT_FATAL(metadata != NULL);
929
930 /*
931 * 512 byte data + 128 byte metadata
932 * Separate metadata buffer
933 * Max data transfer size 128 KB
934 * No stripe size
935 *
936 * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required)
937 */
938 prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false);
939
940 rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, NULL, NULL, 0, 0,
941 0);
942
943 SPDK_CU_ASSERT_FATAL(rc == 0);
944 SPDK_CU_ASSERT_FATAL(g_request != NULL);
945 SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
946
947 CU_ASSERT(g_request->payload.md == metadata);
948 CU_ASSERT(g_request->payload_size == 256 * 512);
949
950 nvme_free_request(g_request);
951 cleanup_after_test(&qpair);
952
953 /*
954 * 512 byte data + 128 byte metadata
955 * Extended LBA
956 * Max data transfer size 128 KB
957 * No stripe size
958 *
959 * 256 blocks * (512 + 128) bytes per block = two I/Os:
960 * child 0: 204 blocks - 204 * (512 + 128) = 127.5 KB
961 * child 1: 52 blocks
962 */
963 prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, true);
964
965 rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL, 0, 0,
966 0);
967
968 SPDK_CU_ASSERT_FATAL(rc == 0);
969 SPDK_CU_ASSERT_FATAL(g_request != NULL);
970 SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
971 child0 = TAILQ_FIRST(&g_request->children);
972
973 SPDK_CU_ASSERT_FATAL(child0 != NULL);
974 CU_ASSERT(child0->payload.md == NULL);
975 CU_ASSERT(child0->payload_offset == 0);
976 CU_ASSERT(child0->payload_size == 204 * (512 + 128));
977 child1 = TAILQ_NEXT(child0, child_tailq);
978
979 SPDK_CU_ASSERT_FATAL(child1 != NULL);
980 CU_ASSERT(child1->payload.md == NULL);
981 CU_ASSERT(child1->payload_offset == 204 * (512 + 128));
982 CU_ASSERT(child1->payload_size == 52 * (512 + 128));
983
984 nvme_request_free_children(g_request);
985 nvme_free_request(g_request);
986 cleanup_after_test(&qpair);
987
988 /*
989 * 512 byte data + 8 byte metadata
990 * Extended LBA
991 * Max data transfer size 128 KB
992 * No stripe size
993 * No protection information
994 *
995 * 256 blocks * (512 + 8) bytes per block = two I/Os:
996 * child 0: 252 blocks - 252 * (512 + 8) = 127.96875 KB
997 * child 1: 4 blocks
998 */
999 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
1000
1001 rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL, 0, 0,
1002 0);
1003
1004 SPDK_CU_ASSERT_FATAL(rc == 0);
1005 SPDK_CU_ASSERT_FATAL(g_request != NULL);
1006 SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
1007 child0 = TAILQ_FIRST(&g_request->children);
1008
1009 SPDK_CU_ASSERT_FATAL(child0 != NULL);
1010 CU_ASSERT(child0->payload.md == NULL);
1011 CU_ASSERT(child0->payload_offset == 0);
1012 CU_ASSERT(child0->payload_size == 252 * (512 + 8));
1013 child1 = TAILQ_NEXT(child0, child_tailq);
1014
1015 SPDK_CU_ASSERT_FATAL(child1 != NULL);
1016 CU_ASSERT(child1->payload.md == NULL);
1017 CU_ASSERT(child1->payload_offset == 252 * (512 + 8));
1018 CU_ASSERT(child1->payload_size == 4 * (512 + 8));
1019
1020 nvme_request_free_children(g_request);
1021 nvme_free_request(g_request);
1022 cleanup_after_test(&qpair);
1023
1024 /*
1025 * 512 byte data + 8 byte metadata
1026 * Extended LBA
1027 * Max data transfer size 128 KB
1028 * No stripe size
1029 * Protection information enabled + PRACT
1030 *
1031 * Special case for 8-byte metadata + PI + PRACT: no metadata transferred
1032 * In theory, 256 blocks * 512 bytes per block = one I/O (128 KB)
1033 * However, the splitting code does not account for PRACT when calculating
1034 * max sectors per transfer, so we actually get two I/Os:
1035 * child 0: 252 blocks
1036 * child 1: 4 blocks
1037 */
1038 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
1039 ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
1040
1041 rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL,
1042 SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
1043
1044 SPDK_CU_ASSERT_FATAL(rc == 0);
1045 SPDK_CU_ASSERT_FATAL(g_request != NULL);
1046 SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
1047 child0 = TAILQ_FIRST(&g_request->children);
1048
1049 SPDK_CU_ASSERT_FATAL(child0 != NULL);
1050 CU_ASSERT(child0->payload_offset == 0);
1051 CU_ASSERT(child0->payload_size == 252 * 512); /* NOTE: does not include metadata! */
1052 child1 = TAILQ_NEXT(child0, child_tailq);
1053
1054 SPDK_CU_ASSERT_FATAL(child1 != NULL);
1055 CU_ASSERT(child1->payload.md == NULL);
1056 CU_ASSERT(child1->payload_offset == 252 * 512);
1057 CU_ASSERT(child1->payload_size == 4 * 512);
1058
1059 nvme_request_free_children(g_request);
1060 nvme_free_request(g_request);
1061 cleanup_after_test(&qpair);
1062
1063 /*
1064 * 512 byte data + 8 byte metadata
1065 * Separate metadata buffer
1066 * Max data transfer size 128 KB
1067 * No stripe size
1068 * Protection information enabled + PRACT
1069 */
1070 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
1071 ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
1072
1073 rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, NULL, NULL,
1074 SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
1075
1076 SPDK_CU_ASSERT_FATAL(rc == 0);
1077 SPDK_CU_ASSERT_FATAL(g_request != NULL);
1078 SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
1079
1080 CU_ASSERT(g_request->payload.md == metadata);
1081 CU_ASSERT(g_request->payload_size == 256 * 512);
1082
1083 nvme_free_request(g_request);
1084 cleanup_after_test(&qpair);
1085
1086 /*
1087 * 512 byte data + 8 byte metadata
1088 * Separate metadata buffer
1089 * Max data transfer size 128 KB
1090 * No stripe size
1091 * Protection information enabled + PRACT
1092 *
1093 * 384 blocks * 512 bytes = two I/Os:
1094 * child 0: 256 blocks
1095 * child 1: 128 blocks
1096 */
1097 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
1098 ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
1099
1100 rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, metadata, 0x1000, 384, NULL, NULL,
1101 SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
1102
1103 SPDK_CU_ASSERT_FATAL(rc == 0);
1104 SPDK_CU_ASSERT_FATAL(g_request != NULL);
1105 SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
1106 child0 = TAILQ_FIRST(&g_request->children);
1107
1108 SPDK_CU_ASSERT_FATAL(child0 != NULL);
1109 CU_ASSERT(child0->payload_offset == 0);
1110 CU_ASSERT(child0->payload_size == 256 * 512);
1111 CU_ASSERT(child0->md_offset == 0);
1112 child1 = TAILQ_NEXT(child0, child_tailq);
1113
1114 SPDK_CU_ASSERT_FATAL(child1 != NULL);
1115 CU_ASSERT(child1->payload_offset == 256 * 512);
1116 CU_ASSERT(child1->payload_size == 128 * 512);
1117 CU_ASSERT(child1->md_offset == 256 * 8);
1118
1119 nvme_request_free_children(g_request);
1120 nvme_free_request(g_request);
1121 cleanup_after_test(&qpair);
1122
1123 free(buffer);
1124 free(metadata);
1125 }
1126
1127 static void
1128 test_nvme_ns_cmd_read_with_md(void)
1129 {
1130 struct spdk_nvme_ns ns;
1131 struct spdk_nvme_ctrlr ctrlr;
1132 struct spdk_nvme_qpair qpair;
1133 int rc = 0;
1134 char *buffer = NULL;
1135 char *metadata = NULL;
1136 uint32_t block_size, md_size;
1137
1138 block_size = 512;
1139 md_size = 128;
1140
1141 buffer = malloc(block_size * 256);
1142 SPDK_CU_ASSERT_FATAL(buffer != NULL);
1143 metadata = malloc(md_size * 256);
1144 SPDK_CU_ASSERT_FATAL(metadata != NULL);
1145
1146 /*
1147 * 512 byte data + 128 byte metadata
1148 * Separate metadata buffer
1149 * Max data transfer size 128 KB
1150 * No stripe size
1151 *
1152 * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required)
1153 */
1154 prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false);
1155
1156 rc = spdk_nvme_ns_cmd_read_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, NULL, NULL, 0, 0,
1157 0);
1158
1159 SPDK_CU_ASSERT_FATAL(rc == 0);
1160 SPDK_CU_ASSERT_FATAL(g_request != NULL);
1161 SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
1162
1163 CU_ASSERT(g_request->payload.md == metadata);
1164 CU_ASSERT(g_request->payload_size == 256 * 512);
1165
1166 nvme_free_request(g_request);
1167 cleanup_after_test(&qpair);
1168 free(buffer);
1169 free(metadata);
1170 }
1171
1172 static void
1173 test_nvme_ns_cmd_compare_with_md(void)
1174 {
1175 struct spdk_nvme_ns ns;
1176 struct spdk_nvme_ctrlr ctrlr;
1177 struct spdk_nvme_qpair qpair;
1178 int rc = 0;
1179 char *buffer = NULL;
1180 char *metadata = NULL;
1181 uint32_t block_size, md_size;
1182 struct nvme_request *child0, *child1;
1183
1184 block_size = 512;
1185 md_size = 128;
1186
1187 buffer = malloc((block_size + md_size) * 384);
1188 SPDK_CU_ASSERT_FATAL(buffer != NULL);
1189 metadata = malloc(md_size * 384);
1190 SPDK_CU_ASSERT_FATAL(metadata != NULL);
1191
1192 /*
1193 * 512 byte data + 128 byte metadata
1194 * Separate metadata buffer
1195 * Max data transfer size 128 KB
1196 * No stripe size
1197 *
1198 * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required)
1199 */
1200 prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false);
1201
1202 rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256,
1203 NULL, NULL, 0, 0, 0);
1204
1205 SPDK_CU_ASSERT_FATAL(rc == 0);
1206 SPDK_CU_ASSERT_FATAL(g_request != NULL);
1207 SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
1208
1209 CU_ASSERT(g_request->payload.md == metadata);
1210 CU_ASSERT(g_request->payload_size == 256 * 512);
1211
1212 nvme_free_request(g_request);
1213 cleanup_after_test(&qpair);
1214
1215 /*
1216 * 512 byte data + 128 byte metadata
1217 * Extended LBA
1218 * Max data transfer size 128 KB
1219 * No stripe size
1220 *
1221 * 256 blocks * (512 + 128) bytes per block = two I/Os:
1222 * child 0: 204 blocks - 204 * (512 + 128) = 127.5 KB
1223 * child 1: 52 blocks
1224 */
1225 prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, true);
1226
1227 rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256,
1228 NULL, NULL, 0, 0, 0);
1229
1230 SPDK_CU_ASSERT_FATAL(rc == 0);
1231 SPDK_CU_ASSERT_FATAL(g_request != NULL);
1232 SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
1233 child0 = TAILQ_FIRST(&g_request->children);
1234
1235 SPDK_CU_ASSERT_FATAL(child0 != NULL);
1236 CU_ASSERT(child0->payload.md == NULL);
1237 CU_ASSERT(child0->payload_offset == 0);
1238 CU_ASSERT(child0->payload_size == 204 * (512 + 128));
1239 child1 = TAILQ_NEXT(child0, child_tailq);
1240
1241 SPDK_CU_ASSERT_FATAL(child1 != NULL);
1242 CU_ASSERT(child1->payload.md == NULL);
1243 CU_ASSERT(child1->payload_offset == 204 * (512 + 128));
1244 CU_ASSERT(child1->payload_size == 52 * (512 + 128));
1245
1246 nvme_request_free_children(g_request);
1247 nvme_free_request(g_request);
1248 cleanup_after_test(&qpair);
1249
1250 /*
1251 * 512 byte data + 8 byte metadata
1252 * Extended LBA
1253 * Max data transfer size 128 KB
1254 * No stripe size
1255 * No protection information
1256 *
1257 * 256 blocks * (512 + 8) bytes per block = two I/Os:
1258 * child 0: 252 blocks - 252 * (512 + 8) = 127.96875 KB
1259 * child 1: 4 blocks
1260 */
1261 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
1262
1263 rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256,
1264 NULL, NULL, 0, 0, 0);
1265
1266 SPDK_CU_ASSERT_FATAL(rc == 0);
1267 SPDK_CU_ASSERT_FATAL(g_request != NULL);
1268 SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
1269 child0 = TAILQ_FIRST(&g_request->children);
1270
1271 SPDK_CU_ASSERT_FATAL(child0 != NULL);
1272 CU_ASSERT(child0->payload.md == NULL);
1273 CU_ASSERT(child0->payload_offset == 0);
1274 CU_ASSERT(child0->payload_size == 252 * (512 + 8));
1275 child1 = TAILQ_NEXT(child0, child_tailq);
1276
1277 SPDK_CU_ASSERT_FATAL(child1 != NULL);
1278 CU_ASSERT(child1->payload.md == NULL);
1279 CU_ASSERT(child1->payload_offset == 252 * (512 + 8));
1280 CU_ASSERT(child1->payload_size == 4 * (512 + 8));
1281
1282 nvme_request_free_children(g_request);
1283 nvme_free_request(g_request);
1284 cleanup_after_test(&qpair);
1285
1286 /*
1287 * 512 byte data + 8 byte metadata
1288 * Extended LBA
1289 * Max data transfer size 128 KB
1290 * No stripe size
1291 * Protection information enabled + PRACT
1292 *
1293 * Special case for 8-byte metadata + PI + PRACT: no metadata transferred
1294 * In theory, 256 blocks * 512 bytes per block = one I/O (128 KB)
1295 * However, the splitting code does not account for PRACT when calculating
1296 * max sectors per transfer, so we actually get two I/Os:
1297 * child 0: 252 blocks
1298 * child 1: 4 blocks
1299 */
1300 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
1301 ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
1302
1303 rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256,
1304 NULL, NULL, SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
1305
1306 SPDK_CU_ASSERT_FATAL(rc == 0);
1307 SPDK_CU_ASSERT_FATAL(g_request != NULL);
1308 SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
1309 child0 = TAILQ_FIRST(&g_request->children);
1310
1311 SPDK_CU_ASSERT_FATAL(child0 != NULL);
1312 CU_ASSERT(child0->payload_offset == 0);
1313 CU_ASSERT(child0->payload_size == 252 * 512); /* NOTE: does not include metadata! */
1314 child1 = TAILQ_NEXT(child0, child_tailq);
1315
1316 SPDK_CU_ASSERT_FATAL(child1 != NULL);
1317 CU_ASSERT(child1->payload.md == NULL);
1318 CU_ASSERT(child1->payload_offset == 252 * 512);
1319 CU_ASSERT(child1->payload_size == 4 * 512);
1320
1321 nvme_request_free_children(g_request);
1322 nvme_free_request(g_request);
1323 cleanup_after_test(&qpair);
1324
1325 /*
1326 * 512 byte data + 8 byte metadata
1327 * Separate metadata buffer
1328 * Max data transfer size 128 KB
1329 * No stripe size
1330 * Protection information enabled + PRACT
1331 */
1332 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
1333 ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
1334
1335 rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256,
1336 NULL, NULL, SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
1337
1338 SPDK_CU_ASSERT_FATAL(rc == 0);
1339 SPDK_CU_ASSERT_FATAL(g_request != NULL);
1340 SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
1341
1342 CU_ASSERT(g_request->payload.md == metadata);
1343 CU_ASSERT(g_request->payload_size == 256 * 512);
1344
1345 nvme_free_request(g_request);
1346 cleanup_after_test(&qpair);
1347
1348 /*
1349 * 512 byte data + 8 byte metadata
1350 * Separate metadata buffer
1351 * Max data transfer size 128 KB
1352 * No stripe size
1353 * Protection information enabled + PRACT
1354 *
1355 * 384 blocks * 512 bytes = two I/Os:
1356 * child 0: 256 blocks
1357 * child 1: 128 blocks
1358 */
1359 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
1360 ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
1361
1362 rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, metadata, 0x1000, 384,
1363 NULL, NULL, SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
1364
1365 SPDK_CU_ASSERT_FATAL(rc == 0);
1366 SPDK_CU_ASSERT_FATAL(g_request != NULL);
1367 SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
1368 child0 = TAILQ_FIRST(&g_request->children);
1369
1370 SPDK_CU_ASSERT_FATAL(child0 != NULL);
1371 CU_ASSERT(child0->payload_offset == 0);
1372 CU_ASSERT(child0->payload_size == 256 * 512);
1373 CU_ASSERT(child0->md_offset == 0);
1374 child1 = TAILQ_NEXT(child0, child_tailq);
1375
1376 SPDK_CU_ASSERT_FATAL(child1 != NULL);
1377 CU_ASSERT(child1->payload_offset == 256 * 512);
1378 CU_ASSERT(child1->payload_size == 128 * 512);
1379 CU_ASSERT(child1->md_offset == 256 * 8);
1380
1381 nvme_request_free_children(g_request);
1382 nvme_free_request(g_request);
1383 cleanup_after_test(&qpair);
1384
1385 free(buffer);
1386 free(metadata);
1387 }
1388
1389 int main(int argc, char **argv)
1390 {
1391 CU_pSuite suite = NULL;
1392 unsigned int num_failures;
1393
1394 if (CU_initialize_registry() != CUE_SUCCESS) {
1395 return CU_get_error();
1396 }
1397
1398 suite = CU_add_suite("nvme_ns_cmd", NULL, NULL);
1399 if (suite == NULL) {
1400 CU_cleanup_registry();
1401 return CU_get_error();
1402 }
1403
1404 if (
1405 CU_add_test(suite, "split_test", split_test) == NULL
1406 || CU_add_test(suite, "split_test2", split_test2) == NULL
1407 || CU_add_test(suite, "split_test3", split_test3) == NULL
1408 || CU_add_test(suite, "split_test4", split_test4) == NULL
1409 || CU_add_test(suite, "nvme_ns_cmd_flush", test_nvme_ns_cmd_flush) == NULL
1410 || CU_add_test(suite, "nvme_ns_cmd_dataset_management",
1411 test_nvme_ns_cmd_dataset_management) == NULL
1412 || CU_add_test(suite, "io_flags", test_io_flags) == NULL
1413 || CU_add_test(suite, "nvme_ns_cmd_write_zeroes", test_nvme_ns_cmd_write_zeroes) == NULL
1414 || CU_add_test(suite, "nvme_ns_cmd_reservation_register",
1415 test_nvme_ns_cmd_reservation_register) == NULL
1416 || CU_add_test(suite, "nvme_ns_cmd_reservation_release",
1417 test_nvme_ns_cmd_reservation_release) == NULL
1418 || CU_add_test(suite, "nvme_ns_cmd_reservation_acquire",
1419 test_nvme_ns_cmd_reservation_acquire) == NULL
1420 || CU_add_test(suite, "nvme_ns_cmd_reservation_report", test_nvme_ns_cmd_reservation_report) == NULL
1421 || CU_add_test(suite, "test_cmd_child_request", test_cmd_child_request) == NULL
1422 || CU_add_test(suite, "nvme_ns_cmd_readv", test_nvme_ns_cmd_readv) == NULL
1423 || CU_add_test(suite, "nvme_ns_cmd_read_with_md", test_nvme_ns_cmd_read_with_md) == NULL
1424 || CU_add_test(suite, "nvme_ns_cmd_writev", test_nvme_ns_cmd_writev) == NULL
1425 || CU_add_test(suite, "nvme_ns_cmd_write_with_md", test_nvme_ns_cmd_write_with_md) == NULL
1426 || CU_add_test(suite, "nvme_ns_cmd_comparev", test_nvme_ns_cmd_comparev) == NULL
1427 || CU_add_test(suite, "nvme_ns_cmd_compare_with_md", test_nvme_ns_cmd_compare_with_md) == NULL
1428 ) {
1429 CU_cleanup_registry();
1430 return CU_get_error();
1431 }
1432
1433 g_spdk_nvme_driver = &_g_nvme_driver;
1434
1435 CU_basic_set_mode(CU_BRM_VERBOSE);
1436 CU_basic_run_tests();
1437 num_failures = CU_get_number_of_failures();
1438 CU_cleanup_registry();
1439 return num_failures;
1440 }