]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / test / unit / lib / nvme / nvme_ns_cmd.c / nvme_ns_cmd_ut.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright (c) Intel Corporation.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include "spdk_cunit.h"
35
36 #include "nvme/nvme_ns_cmd.c"
37 #include "nvme/nvme.c"
38
39 #include "common/lib/test_env.c"
40
41 static struct nvme_driver _g_nvme_driver = {
42 .lock = PTHREAD_MUTEX_INITIALIZER,
43 };
44
45 static struct nvme_request *g_request = NULL;
46
47 int
48 spdk_pci_enumerate(struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb, void *enum_ctx)
49 {
50 return -1;
51 }
52
53 static void nvme_request_reset_sgl(void *cb_arg, uint32_t sgl_offset)
54 {
55 }
56
57 static int nvme_request_next_sge(void *cb_arg, void **address, uint32_t *length)
58 {
59 uint32_t *lba_count = cb_arg;
60
61 /*
62 * We need to set address to something here, since the SGL splitting code will
63 * use it to determine PRP compatibility. Just use a rather arbitrary address
64 * for now - these tests will not actually cause data to be read from or written
65 * to this address.
66 */
67 *address = (void *)(uintptr_t)0x10000000;
68 *length = *lba_count;
69 return 0;
70 }
71
72 bool
73 spdk_nvme_transport_available(enum spdk_nvme_transport_type trtype)
74 {
75 return true;
76 }
77
78 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
79 const struct spdk_nvme_ctrlr_opts *opts,
80 void *devhandle)
81 {
82 return NULL;
83 }
84
85 void
86 nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
87 {
88 }
89
90 int
91 nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
92 {
93 return 0;
94 }
95
96 int
97 nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
98 {
99 return 0;
100 }
101
102 void
103 nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
104 {
105 }
106
107 struct spdk_pci_addr
108 spdk_pci_device_get_addr(struct spdk_pci_device *pci_dev)
109 {
110 struct spdk_pci_addr pci_addr;
111
112 memset(&pci_addr, 0, sizeof(pci_addr));
113 return pci_addr;
114 }
115
116 struct spdk_pci_id
117 spdk_pci_device_get_id(struct spdk_pci_device *pci_dev)
118 {
119 struct spdk_pci_id pci_id;
120
121 memset(&pci_id, 0xFF, sizeof(pci_id));
122
123 return pci_id;
124 }
125
126 void
127 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
128 {
129 memset(opts, 0, sizeof(*opts));
130 }
131
132 uint32_t
133 spdk_nvme_ns_get_sector_size(struct spdk_nvme_ns *ns)
134 {
135 return ns->sector_size;
136 }
137
138 uint32_t
139 spdk_nvme_ns_get_max_io_xfer_size(struct spdk_nvme_ns *ns)
140 {
141 return ns->ctrlr->max_xfer_size;
142 }
143
144 int
145 nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
146 {
147 g_request = req;
148
149 return 0;
150 }
151
152 void
153 nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
154 {
155 return;
156 }
157
158 void
159 nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
160 {
161 return;
162 }
163
164 int
165 nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
166 {
167 return 0;
168 }
169
170 int
171 nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
172 bool direct_connect)
173 {
174 return 0;
175 }
176
177 static void
178 prepare_for_test(struct spdk_nvme_ns *ns, struct spdk_nvme_ctrlr *ctrlr,
179 struct spdk_nvme_qpair *qpair,
180 uint32_t sector_size, uint32_t md_size, uint32_t max_xfer_size,
181 uint32_t stripe_size, bool extended_lba)
182 {
183 uint32_t num_requests = 32;
184 uint32_t i;
185
186 ctrlr->max_xfer_size = max_xfer_size;
187 /*
188 * Clear the flags field - we especially want to make sure the SGL_SUPPORTED flag is not set
189 * so that we test the SGL splitting path.
190 */
191 ctrlr->flags = 0;
192 ctrlr->min_page_size = 4096;
193 ctrlr->page_size = 4096;
194 memset(&ctrlr->opts, 0, sizeof(ctrlr->opts));
195 memset(ns, 0, sizeof(*ns));
196 ns->ctrlr = ctrlr;
197 ns->sector_size = sector_size;
198 ns->extended_lba_size = sector_size;
199 if (extended_lba) {
200 ns->flags |= SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED;
201 ns->extended_lba_size += md_size;
202 }
203 ns->md_size = md_size;
204 ns->sectors_per_max_io = spdk_nvme_ns_get_max_io_xfer_size(ns) / ns->extended_lba_size;
205 ns->sectors_per_stripe = stripe_size / ns->extended_lba_size;
206
207 memset(qpair, 0, sizeof(*qpair));
208 qpair->ctrlr = ctrlr;
209 qpair->req_buf = calloc(num_requests, sizeof(struct nvme_request));
210 SPDK_CU_ASSERT_FATAL(qpair->req_buf != NULL);
211
212 for (i = 0; i < num_requests; i++) {
213 struct nvme_request *req = qpair->req_buf + i * sizeof(struct nvme_request);
214
215 req->qpair = qpair;
216 STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
217 }
218
219 g_request = NULL;
220 }
221
222 static void
223 cleanup_after_test(struct spdk_nvme_qpair *qpair)
224 {
225 free(qpair->req_buf);
226 }
227
228 static void
229 nvme_cmd_interpret_rw(const struct spdk_nvme_cmd *cmd,
230 uint64_t *lba, uint32_t *num_blocks)
231 {
232 *lba = *(const uint64_t *)&cmd->cdw10;
233 *num_blocks = (cmd->cdw12 & 0xFFFFu) + 1;
234 }
235
236 static void
237 split_test(void)
238 {
239 struct spdk_nvme_ns ns;
240 struct spdk_nvme_qpair qpair;
241 struct spdk_nvme_ctrlr ctrlr;
242 void *payload;
243 uint64_t lba, cmd_lba;
244 uint32_t lba_count, cmd_lba_count;
245 int rc;
246
247 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
248 payload = malloc(512);
249 lba = 0;
250 lba_count = 1;
251
252 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0);
253
254 SPDK_CU_ASSERT_FATAL(rc == 0);
255 SPDK_CU_ASSERT_FATAL(g_request != NULL);
256
257 CU_ASSERT(g_request->num_children == 0);
258 nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
259 CU_ASSERT(cmd_lba == lba);
260 CU_ASSERT(cmd_lba_count == lba_count);
261
262 free(payload);
263 nvme_free_request(g_request);
264 cleanup_after_test(&qpair);
265 }
266
267 static void
268 split_test2(void)
269 {
270 struct spdk_nvme_ns ns;
271 struct spdk_nvme_ctrlr ctrlr;
272 struct spdk_nvme_qpair qpair;
273 struct nvme_request *child;
274 void *payload;
275 uint64_t lba, cmd_lba;
276 uint32_t lba_count, cmd_lba_count;
277 int rc;
278
279 /*
280 * Controller has max xfer of 128 KB (256 blocks).
281 * Submit an I/O of 256 KB starting at LBA 0, which should be split
282 * on the max I/O boundary into two I/Os of 128 KB.
283 */
284
285 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
286 payload = malloc(256 * 1024);
287 lba = 0;
288 lba_count = (256 * 1024) / 512;
289
290 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0);
291
292 SPDK_CU_ASSERT_FATAL(rc == 0);
293 SPDK_CU_ASSERT_FATAL(g_request != NULL);
294
295 CU_ASSERT(g_request->num_children == 2);
296
297 child = TAILQ_FIRST(&g_request->children);
298 nvme_request_remove_child(g_request, child);
299 nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
300 CU_ASSERT(child->num_children == 0);
301 CU_ASSERT(child->payload_size == 128 * 1024);
302 CU_ASSERT(cmd_lba == 0);
303 CU_ASSERT(cmd_lba_count == 256); /* 256 * 512 byte blocks = 128 KB */
304 nvme_free_request(child);
305
306 child = TAILQ_FIRST(&g_request->children);
307 nvme_request_remove_child(g_request, child);
308 nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
309 CU_ASSERT(child->num_children == 0);
310 CU_ASSERT(child->payload_size == 128 * 1024);
311 CU_ASSERT(cmd_lba == 256);
312 CU_ASSERT(cmd_lba_count == 256);
313 nvme_free_request(child);
314
315 CU_ASSERT(TAILQ_EMPTY(&g_request->children));
316
317 free(payload);
318 nvme_free_request(g_request);
319 cleanup_after_test(&qpair);
320 }
321
322 static void
323 split_test3(void)
324 {
325 struct spdk_nvme_ns ns;
326 struct spdk_nvme_ctrlr ctrlr;
327 struct spdk_nvme_qpair qpair;
328 struct nvme_request *child;
329 void *payload;
330 uint64_t lba, cmd_lba;
331 uint32_t lba_count, cmd_lba_count;
332 int rc;
333
334 /*
335 * Controller has max xfer of 128 KB (256 blocks).
336 * Submit an I/O of 256 KB starting at LBA 10, which should be split
337 * into two I/Os:
338 * 1) LBA = 10, count = 256 blocks
339 * 2) LBA = 266, count = 256 blocks
340 */
341
342 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
343 payload = malloc(256 * 1024);
344 lba = 10; /* Start at an LBA that isn't aligned to the stripe size */
345 lba_count = (256 * 1024) / 512;
346
347 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0);
348
349 SPDK_CU_ASSERT_FATAL(rc == 0);
350 SPDK_CU_ASSERT_FATAL(g_request != NULL);
351
352 SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
353
354 child = TAILQ_FIRST(&g_request->children);
355 nvme_request_remove_child(g_request, child);
356 nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
357 CU_ASSERT(child->num_children == 0);
358 CU_ASSERT(child->payload_size == 128 * 1024);
359 CU_ASSERT(cmd_lba == 10);
360 CU_ASSERT(cmd_lba_count == 256);
361 nvme_free_request(child);
362
363 child = TAILQ_FIRST(&g_request->children);
364 nvme_request_remove_child(g_request, child);
365 nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
366 CU_ASSERT(child->num_children == 0);
367 CU_ASSERT(child->payload_size == 128 * 1024);
368 CU_ASSERT(cmd_lba == 266);
369 CU_ASSERT(cmd_lba_count == 256);
370 nvme_free_request(child);
371
372 CU_ASSERT(TAILQ_EMPTY(&g_request->children));
373
374 free(payload);
375 nvme_free_request(g_request);
376 cleanup_after_test(&qpair);
377 }
378
379 static void
380 split_test4(void)
381 {
382 struct spdk_nvme_ns ns;
383 struct spdk_nvme_ctrlr ctrlr;
384 struct spdk_nvme_qpair qpair;
385 struct nvme_request *child;
386 void *payload;
387 uint64_t lba, cmd_lba;
388 uint32_t lba_count, cmd_lba_count;
389 int rc;
390
391 /*
392 * Controller has max xfer of 128 KB (256 blocks) and a stripe size of 128 KB.
393 * (Same as split_test3 except with driver-assisted striping enabled.)
394 * Submit an I/O of 256 KB starting at LBA 10, which should be split
395 * into three I/Os:
396 * 1) LBA = 10, count = 246 blocks (less than max I/O size to align to stripe size)
397 * 2) LBA = 256, count = 256 blocks (aligned to stripe size and max I/O size)
398 * 3) LBA = 512, count = 10 blocks (finish off the remaining I/O size)
399 */
400
401 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 128 * 1024, false);
402 payload = malloc(256 * 1024);
403 lba = 10; /* Start at an LBA that isn't aligned to the stripe size */
404 lba_count = (256 * 1024) / 512;
405
406 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
407 SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
408
409 SPDK_CU_ASSERT_FATAL(rc == 0);
410 SPDK_CU_ASSERT_FATAL(g_request != NULL);
411
412 SPDK_CU_ASSERT_FATAL(g_request->num_children == 3);
413
414 child = TAILQ_FIRST(&g_request->children);
415 nvme_request_remove_child(g_request, child);
416 nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
417 CU_ASSERT(child->num_children == 0);
418 CU_ASSERT(child->payload_size == (256 - 10) * 512);
419 CU_ASSERT(child->payload_offset == 0);
420 CU_ASSERT(cmd_lba == 10);
421 CU_ASSERT(cmd_lba_count == 256 - 10);
422 CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
423 CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
424 nvme_free_request(child);
425
426 child = TAILQ_FIRST(&g_request->children);
427 nvme_request_remove_child(g_request, child);
428 nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
429 CU_ASSERT(child->num_children == 0);
430 CU_ASSERT(child->payload_size == 128 * 1024);
431 CU_ASSERT(child->payload_offset == (256 - 10) * 512);
432 CU_ASSERT(cmd_lba == 256);
433 CU_ASSERT(cmd_lba_count == 256);
434 CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
435 CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
436 nvme_free_request(child);
437
438 child = TAILQ_FIRST(&g_request->children);
439 nvme_request_remove_child(g_request, child);
440 nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
441 CU_ASSERT(child->num_children == 0);
442 CU_ASSERT(child->payload_size == 10 * 512);
443 CU_ASSERT(child->payload_offset == (512 - 10) * 512);
444 CU_ASSERT(cmd_lba == 512);
445 CU_ASSERT(cmd_lba_count == 10);
446 CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
447 CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
448 nvme_free_request(child);
449
450 CU_ASSERT(TAILQ_EMPTY(&g_request->children));
451
452 free(payload);
453 nvme_free_request(g_request);
454 cleanup_after_test(&qpair);
455 }
456
457 static void
458 test_cmd_child_request(void)
459 {
460
461 struct spdk_nvme_ns ns;
462 struct spdk_nvme_ctrlr ctrlr;
463 struct spdk_nvme_qpair qpair;
464 int rc = 0;
465 struct nvme_request *child, *tmp;
466 void *payload;
467 uint64_t lba = 0x1000;
468 uint32_t i = 0;
469 uint32_t offset = 0;
470 uint32_t sector_size = 512;
471 uint32_t max_io_size = 128 * 1024;
472 uint32_t sectors_per_max_io = max_io_size / sector_size;
473
474 prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_io_size, 0, false);
475
476 payload = malloc(128 * 1024);
477 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io, NULL, NULL, 0);
478 SPDK_CU_ASSERT_FATAL(rc == 0);
479 SPDK_CU_ASSERT_FATAL(g_request != NULL);
480 CU_ASSERT(g_request->payload_offset == 0);
481 CU_ASSERT(g_request->num_children == 0);
482 nvme_free_request(g_request);
483
484 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io - 1, NULL, NULL, 0);
485 SPDK_CU_ASSERT_FATAL(rc == 0);
486 SPDK_CU_ASSERT_FATAL(g_request != NULL);
487 CU_ASSERT(g_request->payload_offset == 0);
488 CU_ASSERT(g_request->num_children == 0);
489 nvme_free_request(g_request);
490
491 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io * 4, NULL, NULL, 0);
492 SPDK_CU_ASSERT_FATAL(rc == 0);
493 SPDK_CU_ASSERT_FATAL(g_request != NULL);
494 CU_ASSERT(g_request->num_children == 4);
495
496 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, (DEFAULT_IO_QUEUE_REQUESTS + 1) * sector_size,
497 NULL,
498 NULL, 0);
499 SPDK_CU_ASSERT_FATAL(rc == -EINVAL);
500
501 TAILQ_FOREACH_SAFE(child, &g_request->children, child_tailq, tmp) {
502 nvme_request_remove_child(g_request, child);
503 CU_ASSERT(child->payload_offset == offset);
504 CU_ASSERT(child->cmd.opc == SPDK_NVME_OPC_READ);
505 CU_ASSERT(child->cmd.nsid == ns.id);
506 CU_ASSERT(child->cmd.cdw10 == (lba + sectors_per_max_io * i));
507 CU_ASSERT(child->cmd.cdw12 == ((sectors_per_max_io - 1) | 0));
508 offset += max_io_size;
509 nvme_free_request(child);
510 i++;
511 }
512
513 free(payload);
514 nvme_free_request(g_request);
515 cleanup_after_test(&qpair);
516 }
517
518 static void
519 test_nvme_ns_cmd_flush(void)
520 {
521 struct spdk_nvme_ns ns;
522 struct spdk_nvme_ctrlr ctrlr;
523 struct spdk_nvme_qpair qpair;
524 spdk_nvme_cmd_cb cb_fn = NULL;
525 void *cb_arg = NULL;
526 int rc;
527
528 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
529
530 rc = spdk_nvme_ns_cmd_flush(&ns, &qpair, cb_fn, cb_arg);
531 SPDK_CU_ASSERT_FATAL(rc == 0);
532 SPDK_CU_ASSERT_FATAL(g_request != NULL);
533 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_FLUSH);
534 CU_ASSERT(g_request->cmd.nsid == ns.id);
535
536 nvme_free_request(g_request);
537 cleanup_after_test(&qpair);
538 }
539
540 static void
541 test_nvme_ns_cmd_write_zeroes(void)
542 {
543 struct spdk_nvme_ns ns = { 0 };
544 struct spdk_nvme_ctrlr ctrlr = { 0 };
545 struct spdk_nvme_qpair qpair;
546 spdk_nvme_cmd_cb cb_fn = NULL;
547 void *cb_arg = NULL;
548 uint64_t cmd_lba;
549 uint32_t cmd_lba_count;
550 int rc;
551
552 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
553
554 rc = spdk_nvme_ns_cmd_write_zeroes(&ns, &qpair, 0, 2, cb_fn, cb_arg, 0);
555 SPDK_CU_ASSERT_FATAL(rc == 0);
556 SPDK_CU_ASSERT_FATAL(g_request != NULL);
557 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE_ZEROES);
558 CU_ASSERT(g_request->cmd.nsid == ns.id);
559 nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
560 CU_ASSERT_EQUAL(cmd_lba, 0);
561 CU_ASSERT_EQUAL(cmd_lba_count, 2);
562
563 nvme_free_request(g_request);
564 cleanup_after_test(&qpair);
565 }
566
567 static void
568 test_nvme_ns_cmd_dataset_management(void)
569 {
570 struct spdk_nvme_ns ns;
571 struct spdk_nvme_ctrlr ctrlr;
572 struct spdk_nvme_qpair qpair;
573 spdk_nvme_cmd_cb cb_fn = NULL;
574 void *cb_arg = NULL;
575 struct spdk_nvme_dsm_range ranges[256];
576 uint16_t i;
577 int rc = 0;
578
579 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
580
581 for (i = 0; i < 256; i++) {
582 ranges[i].starting_lba = i;
583 ranges[i].length = 1;
584 ranges[i].attributes.raw = 0;
585 }
586
587 /* TRIM one LBA */
588 rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE,
589 ranges, 1, cb_fn, cb_arg);
590 SPDK_CU_ASSERT_FATAL(rc == 0);
591 SPDK_CU_ASSERT_FATAL(g_request != NULL);
592 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_DATASET_MANAGEMENT);
593 CU_ASSERT(g_request->cmd.nsid == ns.id);
594 CU_ASSERT(g_request->cmd.cdw10 == 0);
595 CU_ASSERT(g_request->cmd.cdw11 == SPDK_NVME_DSM_ATTR_DEALLOCATE);
596 spdk_dma_free(g_request->payload.contig_or_cb_arg);
597 nvme_free_request(g_request);
598
599 /* TRIM 256 LBAs */
600 rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE,
601 ranges, 256, cb_fn, cb_arg);
602 SPDK_CU_ASSERT_FATAL(rc == 0);
603 SPDK_CU_ASSERT_FATAL(g_request != NULL);
604 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_DATASET_MANAGEMENT);
605 CU_ASSERT(g_request->cmd.nsid == ns.id);
606 CU_ASSERT(g_request->cmd.cdw10 == 255u);
607 CU_ASSERT(g_request->cmd.cdw11 == SPDK_NVME_DSM_ATTR_DEALLOCATE);
608 spdk_dma_free(g_request->payload.contig_or_cb_arg);
609 nvme_free_request(g_request);
610
611 rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE,
612 NULL, 0, cb_fn, cb_arg);
613 CU_ASSERT(rc != 0);
614 cleanup_after_test(&qpair);
615 }
616
617 static void
618 test_nvme_ns_cmd_readv(void)
619 {
620 struct spdk_nvme_ns ns;
621 struct spdk_nvme_ctrlr ctrlr;
622 struct spdk_nvme_qpair qpair;
623 int rc = 0;
624 void *cb_arg;
625 uint32_t lba_count = 256;
626 uint32_t sector_size = 512;
627 uint64_t sge_length = lba_count * sector_size;
628
629 cb_arg = malloc(512);
630 prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false);
631 rc = spdk_nvme_ns_cmd_readv(&ns, &qpair, 0x1000, lba_count, NULL, &sge_length, 0,
632 nvme_request_reset_sgl, nvme_request_next_sge);
633
634 SPDK_CU_ASSERT_FATAL(rc == 0);
635 SPDK_CU_ASSERT_FATAL(g_request != NULL);
636 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_READ);
637 CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
638 CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
639 CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
640 CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
641 CU_ASSERT(g_request->cmd.nsid == ns.id);
642
643 rc = spdk_nvme_ns_cmd_readv(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0, nvme_request_reset_sgl,
644 NULL);
645 CU_ASSERT(rc != 0);
646
647 free(cb_arg);
648 nvme_free_request(g_request);
649 cleanup_after_test(&qpair);
650 }
651
652 static void
653 test_nvme_ns_cmd_writev(void)
654 {
655 struct spdk_nvme_ns ns;
656 struct spdk_nvme_ctrlr ctrlr;
657 struct spdk_nvme_qpair qpair;
658 int rc = 0;
659 void *cb_arg;
660 uint32_t lba_count = 256;
661 uint32_t sector_size = 512;
662 uint64_t sge_length = lba_count * sector_size;
663
664 cb_arg = malloc(512);
665 prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false);
666 rc = spdk_nvme_ns_cmd_writev(&ns, &qpair, 0x1000, lba_count, NULL, &sge_length, 0,
667 nvme_request_reset_sgl, nvme_request_next_sge);
668
669 SPDK_CU_ASSERT_FATAL(rc == 0);
670 SPDK_CU_ASSERT_FATAL(g_request != NULL);
671 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE);
672 CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
673 CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
674 CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
675 CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
676 CU_ASSERT(g_request->cmd.nsid == ns.id);
677
678 rc = spdk_nvme_ns_cmd_writev(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0,
679 NULL, nvme_request_next_sge);
680 CU_ASSERT(rc != 0);
681
682 free(cb_arg);
683 nvme_free_request(g_request);
684 cleanup_after_test(&qpair);
685 }
686
687 static void
688 test_nvme_ns_cmd_comparev(void)
689 {
690 struct spdk_nvme_ns ns;
691 struct spdk_nvme_ctrlr ctrlr;
692 struct spdk_nvme_qpair qpair;
693 int rc = 0;
694 void *cb_arg;
695 uint32_t lba_count = 256;
696 uint32_t sector_size = 512;
697 uint64_t sge_length = lba_count * sector_size;
698
699 cb_arg = malloc(512);
700 prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false);
701 rc = spdk_nvme_ns_cmd_comparev(&ns, &qpair, 0x1000, lba_count, NULL, &sge_length, 0,
702 nvme_request_reset_sgl, nvme_request_next_sge);
703
704 SPDK_CU_ASSERT_FATAL(rc == 0);
705 SPDK_CU_ASSERT_FATAL(g_request != NULL);
706 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_COMPARE);
707 CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
708 CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
709 CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
710 CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
711 CU_ASSERT(g_request->cmd.nsid == ns.id);
712
713 rc = spdk_nvme_ns_cmd_comparev(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0,
714 nvme_request_reset_sgl, NULL);
715 CU_ASSERT(rc != 0);
716
717 free(cb_arg);
718 nvme_free_request(g_request);
719 cleanup_after_test(&qpair);
720 }
721
722 static void
723 test_io_flags(void)
724 {
725 struct spdk_nvme_ns ns;
726 struct spdk_nvme_ctrlr ctrlr;
727 struct spdk_nvme_qpair qpair;
728 void *payload;
729 uint64_t lba;
730 uint32_t lba_count;
731 int rc;
732
733 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 128 * 1024, false);
734 payload = malloc(256 * 1024);
735 lba = 0;
736 lba_count = (4 * 1024) / 512;
737
738 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
739 SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
740 SPDK_CU_ASSERT_FATAL(rc == 0);
741 SPDK_CU_ASSERT_FATAL(g_request != NULL);
742 CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
743 CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
744 nvme_free_request(g_request);
745
746 rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
747 SPDK_NVME_IO_FLAGS_LIMITED_RETRY);
748 SPDK_CU_ASSERT_FATAL(rc == 0);
749 SPDK_CU_ASSERT_FATAL(g_request != NULL);
750 CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) == 0);
751 CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) != 0);
752 nvme_free_request(g_request);
753
754 free(payload);
755 cleanup_after_test(&qpair);
756 }
757
758 static void
759 test_nvme_ns_cmd_reservation_register(void)
760 {
761 struct spdk_nvme_ns ns;
762 struct spdk_nvme_ctrlr ctrlr;
763 struct spdk_nvme_qpair qpair;
764 struct spdk_nvme_reservation_register_data *payload;
765 bool ignore_key = 1;
766 spdk_nvme_cmd_cb cb_fn = NULL;
767 void *cb_arg = NULL;
768 int rc = 0;
769 uint32_t tmp_cdw10;
770
771 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
772 payload = malloc(sizeof(struct spdk_nvme_reservation_register_data));
773
774 rc = spdk_nvme_ns_cmd_reservation_register(&ns, &qpair, payload, ignore_key,
775 SPDK_NVME_RESERVE_REGISTER_KEY,
776 SPDK_NVME_RESERVE_PTPL_NO_CHANGES,
777 cb_fn, cb_arg);
778
779 SPDK_CU_ASSERT_FATAL(rc == 0);
780 SPDK_CU_ASSERT_FATAL(g_request != NULL);
781 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_REGISTER);
782 CU_ASSERT(g_request->cmd.nsid == ns.id);
783
784 tmp_cdw10 = SPDK_NVME_RESERVE_REGISTER_KEY;
785 tmp_cdw10 |= ignore_key ? 1 << 3 : 0;
786 tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_PTPL_NO_CHANGES << 30;
787
788 CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
789
790 spdk_dma_free(g_request->payload.contig_or_cb_arg);
791 nvme_free_request(g_request);
792 free(payload);
793 cleanup_after_test(&qpair);
794 }
795
796 static void
797 test_nvme_ns_cmd_reservation_release(void)
798 {
799 struct spdk_nvme_ns ns;
800 struct spdk_nvme_ctrlr ctrlr;
801 struct spdk_nvme_qpair qpair;
802 struct spdk_nvme_reservation_key_data *payload;
803 bool ignore_key = 1;
804 spdk_nvme_cmd_cb cb_fn = NULL;
805 void *cb_arg = NULL;
806 int rc = 0;
807 uint32_t tmp_cdw10;
808
809 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
810 payload = malloc(sizeof(struct spdk_nvme_reservation_key_data));
811
812 rc = spdk_nvme_ns_cmd_reservation_release(&ns, &qpair, payload, ignore_key,
813 SPDK_NVME_RESERVE_RELEASE,
814 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
815 cb_fn, cb_arg);
816
817 SPDK_CU_ASSERT_FATAL(rc == 0);
818 SPDK_CU_ASSERT_FATAL(g_request != NULL);
819 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_RELEASE);
820 CU_ASSERT(g_request->cmd.nsid == ns.id);
821
822 tmp_cdw10 = SPDK_NVME_RESERVE_RELEASE;
823 tmp_cdw10 |= ignore_key ? 1 << 3 : 0;
824 tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_WRITE_EXCLUSIVE << 8;
825
826 CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
827
828 spdk_dma_free(g_request->payload.contig_or_cb_arg);
829 nvme_free_request(g_request);
830 free(payload);
831 cleanup_after_test(&qpair);
832 }
833
834 static void
835 test_nvme_ns_cmd_reservation_acquire(void)
836 {
837 struct spdk_nvme_ns ns;
838 struct spdk_nvme_ctrlr ctrlr;
839 struct spdk_nvme_qpair qpair;
840 struct spdk_nvme_reservation_acquire_data *payload;
841 bool ignore_key = 1;
842 spdk_nvme_cmd_cb cb_fn = NULL;
843 void *cb_arg = NULL;
844 int rc = 0;
845 uint32_t tmp_cdw10;
846
847 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
848 payload = malloc(sizeof(struct spdk_nvme_reservation_acquire_data));
849
850 rc = spdk_nvme_ns_cmd_reservation_acquire(&ns, &qpair, payload, ignore_key,
851 SPDK_NVME_RESERVE_ACQUIRE,
852 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
853 cb_fn, cb_arg);
854
855 SPDK_CU_ASSERT_FATAL(rc == 0);
856 SPDK_CU_ASSERT_FATAL(g_request != NULL);
857 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_ACQUIRE);
858 CU_ASSERT(g_request->cmd.nsid == ns.id);
859
860 tmp_cdw10 = SPDK_NVME_RESERVE_ACQUIRE;
861 tmp_cdw10 |= ignore_key ? 1 << 3 : 0;
862 tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_WRITE_EXCLUSIVE << 8;
863
864 CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
865
866 spdk_dma_free(g_request->payload.contig_or_cb_arg);
867 nvme_free_request(g_request);
868 free(payload);
869 cleanup_after_test(&qpair);
870 }
871
872 static void
873 test_nvme_ns_cmd_reservation_report(void)
874 {
875 struct spdk_nvme_ns ns;
876 struct spdk_nvme_ctrlr ctrlr;
877 struct spdk_nvme_qpair qpair;
878 struct spdk_nvme_reservation_status_data *payload;
879 spdk_nvme_cmd_cb cb_fn = NULL;
880 void *cb_arg = NULL;
881 int rc = 0;
882 uint32_t size = sizeof(struct spdk_nvme_reservation_status_data);
883
884 prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
885
886 payload = calloc(1, size);
887 SPDK_CU_ASSERT_FATAL(payload != NULL);
888
889 rc = spdk_nvme_ns_cmd_reservation_report(&ns, &qpair, payload, size, cb_fn, cb_arg);
890
891 SPDK_CU_ASSERT_FATAL(rc == 0);
892 SPDK_CU_ASSERT_FATAL(g_request != NULL);
893 CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_REPORT);
894 CU_ASSERT(g_request->cmd.nsid == ns.id);
895
896 CU_ASSERT(g_request->cmd.cdw10 == (size / 4));
897
898 spdk_dma_free(g_request->payload.contig_or_cb_arg);
899 nvme_free_request(g_request);
900 free(payload);
901 cleanup_after_test(&qpair);
902 }
903
904 static void
905 test_nvme_ns_cmd_write_with_md(void)
906 {
907 struct spdk_nvme_ns ns;
908 struct spdk_nvme_ctrlr ctrlr;
909 struct spdk_nvme_qpair qpair;
910 int rc = 0;
911 char *buffer = NULL;
912 char *metadata = NULL;
913 uint32_t block_size, md_size;
914 struct nvme_request *child0, *child1;
915
916 block_size = 512;
917 md_size = 128;
918
919 buffer = malloc((block_size + md_size) * 384);
920 SPDK_CU_ASSERT_FATAL(buffer != NULL);
921 metadata = malloc(md_size * 384);
922 SPDK_CU_ASSERT_FATAL(metadata != NULL);
923
924 /*
925 * 512 byte data + 128 byte metadata
926 * Separate metadata buffer
927 * Max data transfer size 128 KB
928 * No stripe size
929 *
930 * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required)
931 */
932 prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false);
933
934 rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, NULL, NULL, 0, 0,
935 0);
936
937 SPDK_CU_ASSERT_FATAL(rc == 0);
938 SPDK_CU_ASSERT_FATAL(g_request != NULL);
939 SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
940
941 CU_ASSERT(g_request->payload.md == metadata);
942 CU_ASSERT(g_request->payload_size == 256 * 512);
943
944 nvme_free_request(g_request);
945 cleanup_after_test(&qpair);
946
947 /*
948 * 512 byte data + 128 byte metadata
949 * Extended LBA
950 * Max data transfer size 128 KB
951 * No stripe size
952 *
953 * 256 blocks * (512 + 128) bytes per block = two I/Os:
954 * child 0: 204 blocks - 204 * (512 + 128) = 127.5 KB
955 * child 1: 52 blocks
956 */
957 prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, true);
958
959 rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL, 0, 0,
960 0);
961
962 SPDK_CU_ASSERT_FATAL(rc == 0);
963 SPDK_CU_ASSERT_FATAL(g_request != NULL);
964 SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
965 child0 = TAILQ_FIRST(&g_request->children);
966
967 SPDK_CU_ASSERT_FATAL(child0 != NULL);
968 CU_ASSERT(child0->payload.md == NULL);
969 CU_ASSERT(child0->payload_offset == 0);
970 CU_ASSERT(child0->payload_size == 204 * (512 + 128));
971 child1 = TAILQ_NEXT(child0, child_tailq);
972
973 SPDK_CU_ASSERT_FATAL(child1 != NULL);
974 CU_ASSERT(child1->payload.md == NULL);
975 CU_ASSERT(child1->payload_offset == 204 * (512 + 128));
976 CU_ASSERT(child1->payload_size == 52 * (512 + 128));
977
978 nvme_request_free_children(g_request);
979 nvme_free_request(g_request);
980 cleanup_after_test(&qpair);
981
982 /*
983 * 512 byte data + 8 byte metadata
984 * Extended LBA
985 * Max data transfer size 128 KB
986 * No stripe size
987 * No protection information
988 *
989 * 256 blocks * (512 + 8) bytes per block = two I/Os:
990 * child 0: 252 blocks - 252 * (512 + 8) = 127.96875 KB
991 * child 1: 4 blocks
992 */
993 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
994
995 rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL, 0, 0,
996 0);
997
998 SPDK_CU_ASSERT_FATAL(rc == 0);
999 SPDK_CU_ASSERT_FATAL(g_request != NULL);
1000 SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
1001 child0 = TAILQ_FIRST(&g_request->children);
1002
1003 SPDK_CU_ASSERT_FATAL(child0 != NULL);
1004 CU_ASSERT(child0->payload.md == NULL);
1005 CU_ASSERT(child0->payload_offset == 0);
1006 CU_ASSERT(child0->payload_size == 252 * (512 + 8));
1007 child1 = TAILQ_NEXT(child0, child_tailq);
1008
1009 SPDK_CU_ASSERT_FATAL(child1 != NULL);
1010 CU_ASSERT(child1->payload.md == NULL);
1011 CU_ASSERT(child1->payload_offset == 252 * (512 + 8));
1012 CU_ASSERT(child1->payload_size == 4 * (512 + 8));
1013
1014 nvme_request_free_children(g_request);
1015 nvme_free_request(g_request);
1016 cleanup_after_test(&qpair);
1017
1018 /*
1019 * 512 byte data + 8 byte metadata
1020 * Extended LBA
1021 * Max data transfer size 128 KB
1022 * No stripe size
1023 * Protection information enabled + PRACT
1024 *
1025 * Special case for 8-byte metadata + PI + PRACT: no metadata transferred
1026 * In theory, 256 blocks * 512 bytes per block = one I/O (128 KB)
1027 * However, the splitting code does not account for PRACT when calculating
1028 * max sectors per transfer, so we actually get two I/Os:
1029 * child 0: 252 blocks
1030 * child 1: 4 blocks
1031 */
1032 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
1033 ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
1034
1035 rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL,
1036 SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
1037
1038 SPDK_CU_ASSERT_FATAL(rc == 0);
1039 SPDK_CU_ASSERT_FATAL(g_request != NULL);
1040 SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
1041 child0 = TAILQ_FIRST(&g_request->children);
1042
1043 SPDK_CU_ASSERT_FATAL(child0 != NULL);
1044 CU_ASSERT(child0->payload_offset == 0);
1045 CU_ASSERT(child0->payload_size == 252 * 512); /* NOTE: does not include metadata! */
1046 child1 = TAILQ_NEXT(child0, child_tailq);
1047
1048 SPDK_CU_ASSERT_FATAL(child1 != NULL);
1049 CU_ASSERT(child1->payload.md == NULL);
1050 CU_ASSERT(child1->payload_offset == 252 * 512);
1051 CU_ASSERT(child1->payload_size == 4 * 512);
1052
1053 nvme_request_free_children(g_request);
1054 nvme_free_request(g_request);
1055 cleanup_after_test(&qpair);
1056
1057 /*
1058 * 512 byte data + 8 byte metadata
1059 * Separate metadata buffer
1060 * Max data transfer size 128 KB
1061 * No stripe size
1062 * Protection information enabled + PRACT
1063 */
1064 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
1065 ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
1066
1067 rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, NULL, NULL,
1068 SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
1069
1070 SPDK_CU_ASSERT_FATAL(rc == 0);
1071 SPDK_CU_ASSERT_FATAL(g_request != NULL);
1072 SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
1073
1074 CU_ASSERT(g_request->payload.md == metadata);
1075 CU_ASSERT(g_request->payload_size == 256 * 512);
1076
1077 nvme_free_request(g_request);
1078 cleanup_after_test(&qpair);
1079
1080 /*
1081 * 512 byte data + 8 byte metadata
1082 * Separate metadata buffer
1083 * Max data transfer size 128 KB
1084 * No stripe size
1085 * Protection information enabled + PRACT
1086 *
1087 * 384 blocks * 512 bytes = two I/Os:
1088 * child 0: 256 blocks
1089 * child 1: 128 blocks
1090 */
1091 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
1092 ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
1093
1094 rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, metadata, 0x1000, 384, NULL, NULL,
1095 SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
1096
1097 SPDK_CU_ASSERT_FATAL(rc == 0);
1098 SPDK_CU_ASSERT_FATAL(g_request != NULL);
1099 SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
1100 child0 = TAILQ_FIRST(&g_request->children);
1101
1102 SPDK_CU_ASSERT_FATAL(child0 != NULL);
1103 CU_ASSERT(child0->payload_offset == 0);
1104 CU_ASSERT(child0->payload_size == 256 * 512);
1105 CU_ASSERT(child0->md_offset == 0);
1106 child1 = TAILQ_NEXT(child0, child_tailq);
1107
1108 SPDK_CU_ASSERT_FATAL(child1 != NULL);
1109 CU_ASSERT(child1->payload_offset == 256 * 512);
1110 CU_ASSERT(child1->payload_size == 128 * 512);
1111 CU_ASSERT(child1->md_offset == 256 * 8);
1112
1113 nvme_request_free_children(g_request);
1114 nvme_free_request(g_request);
1115 cleanup_after_test(&qpair);
1116
1117 free(buffer);
1118 free(metadata);
1119 }
1120
1121 static void
1122 test_nvme_ns_cmd_read_with_md(void)
1123 {
1124 struct spdk_nvme_ns ns;
1125 struct spdk_nvme_ctrlr ctrlr;
1126 struct spdk_nvme_qpair qpair;
1127 int rc = 0;
1128 char *buffer = NULL;
1129 char *metadata = NULL;
1130 uint32_t block_size, md_size;
1131
1132 block_size = 512;
1133 md_size = 128;
1134
1135 buffer = malloc(block_size * 256);
1136 SPDK_CU_ASSERT_FATAL(buffer != NULL);
1137 metadata = malloc(md_size * 256);
1138 SPDK_CU_ASSERT_FATAL(metadata != NULL);
1139
1140 /*
1141 * 512 byte data + 128 byte metadata
1142 * Separate metadata buffer
1143 * Max data transfer size 128 KB
1144 * No stripe size
1145 *
1146 * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required)
1147 */
1148 prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false);
1149
1150 rc = spdk_nvme_ns_cmd_read_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, NULL, NULL, 0, 0,
1151 0);
1152
1153 SPDK_CU_ASSERT_FATAL(rc == 0);
1154 SPDK_CU_ASSERT_FATAL(g_request != NULL);
1155 SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
1156
1157 CU_ASSERT(g_request->payload.md == metadata);
1158 CU_ASSERT(g_request->payload_size == 256 * 512);
1159
1160 nvme_free_request(g_request);
1161 cleanup_after_test(&qpair);
1162 free(buffer);
1163 free(metadata);
1164 }
1165
1166 static void
1167 test_nvme_ns_cmd_compare_with_md(void)
1168 {
1169 struct spdk_nvme_ns ns;
1170 struct spdk_nvme_ctrlr ctrlr;
1171 struct spdk_nvme_qpair qpair;
1172 int rc = 0;
1173 char *buffer = NULL;
1174 char *metadata = NULL;
1175 uint32_t block_size, md_size;
1176 struct nvme_request *child0, *child1;
1177
1178 block_size = 512;
1179 md_size = 128;
1180
1181 buffer = malloc((block_size + md_size) * 384);
1182 SPDK_CU_ASSERT_FATAL(buffer != NULL);
1183 metadata = malloc(md_size * 384);
1184 SPDK_CU_ASSERT_FATAL(metadata != NULL);
1185
1186 /*
1187 * 512 byte data + 128 byte metadata
1188 * Separate metadata buffer
1189 * Max data transfer size 128 KB
1190 * No stripe size
1191 *
1192 * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required)
1193 */
1194 prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false);
1195
1196 rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256,
1197 NULL, NULL, 0, 0, 0);
1198
1199 SPDK_CU_ASSERT_FATAL(rc == 0);
1200 SPDK_CU_ASSERT_FATAL(g_request != NULL);
1201 SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
1202
1203 CU_ASSERT(g_request->payload.md == metadata);
1204 CU_ASSERT(g_request->payload_size == 256 * 512);
1205
1206 nvme_free_request(g_request);
1207 cleanup_after_test(&qpair);
1208
1209 /*
1210 * 512 byte data + 128 byte metadata
1211 * Extended LBA
1212 * Max data transfer size 128 KB
1213 * No stripe size
1214 *
1215 * 256 blocks * (512 + 128) bytes per block = two I/Os:
1216 * child 0: 204 blocks - 204 * (512 + 128) = 127.5 KB
1217 * child 1: 52 blocks
1218 */
1219 prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, true);
1220
1221 rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256,
1222 NULL, NULL, 0, 0, 0);
1223
1224 SPDK_CU_ASSERT_FATAL(rc == 0);
1225 SPDK_CU_ASSERT_FATAL(g_request != NULL);
1226 SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
1227 child0 = TAILQ_FIRST(&g_request->children);
1228
1229 SPDK_CU_ASSERT_FATAL(child0 != NULL);
1230 CU_ASSERT(child0->payload.md == NULL);
1231 CU_ASSERT(child0->payload_offset == 0);
1232 CU_ASSERT(child0->payload_size == 204 * (512 + 128));
1233 child1 = TAILQ_NEXT(child0, child_tailq);
1234
1235 SPDK_CU_ASSERT_FATAL(child1 != NULL);
1236 CU_ASSERT(child1->payload.md == NULL);
1237 CU_ASSERT(child1->payload_offset == 204 * (512 + 128));
1238 CU_ASSERT(child1->payload_size == 52 * (512 + 128));
1239
1240 nvme_request_free_children(g_request);
1241 nvme_free_request(g_request);
1242 cleanup_after_test(&qpair);
1243
1244 /*
1245 * 512 byte data + 8 byte metadata
1246 * Extended LBA
1247 * Max data transfer size 128 KB
1248 * No stripe size
1249 * No protection information
1250 *
1251 * 256 blocks * (512 + 8) bytes per block = two I/Os:
1252 * child 0: 252 blocks - 252 * (512 + 8) = 127.96875 KB
1253 * child 1: 4 blocks
1254 */
1255 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
1256
1257 rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256,
1258 NULL, NULL, 0, 0, 0);
1259
1260 SPDK_CU_ASSERT_FATAL(rc == 0);
1261 SPDK_CU_ASSERT_FATAL(g_request != NULL);
1262 SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
1263 child0 = TAILQ_FIRST(&g_request->children);
1264
1265 SPDK_CU_ASSERT_FATAL(child0 != NULL);
1266 CU_ASSERT(child0->payload.md == NULL);
1267 CU_ASSERT(child0->payload_offset == 0);
1268 CU_ASSERT(child0->payload_size == 252 * (512 + 8));
1269 child1 = TAILQ_NEXT(child0, child_tailq);
1270
1271 SPDK_CU_ASSERT_FATAL(child1 != NULL);
1272 CU_ASSERT(child1->payload.md == NULL);
1273 CU_ASSERT(child1->payload_offset == 252 * (512 + 8));
1274 CU_ASSERT(child1->payload_size == 4 * (512 + 8));
1275
1276 nvme_request_free_children(g_request);
1277 nvme_free_request(g_request);
1278 cleanup_after_test(&qpair);
1279
1280 /*
1281 * 512 byte data + 8 byte metadata
1282 * Extended LBA
1283 * Max data transfer size 128 KB
1284 * No stripe size
1285 * Protection information enabled + PRACT
1286 *
1287 * Special case for 8-byte metadata + PI + PRACT: no metadata transferred
1288 * In theory, 256 blocks * 512 bytes per block = one I/O (128 KB)
1289 * However, the splitting code does not account for PRACT when calculating
1290 * max sectors per transfer, so we actually get two I/Os:
1291 * child 0: 252 blocks
1292 * child 1: 4 blocks
1293 */
1294 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
1295 ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
1296
1297 rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256,
1298 NULL, NULL, SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
1299
1300 SPDK_CU_ASSERT_FATAL(rc == 0);
1301 SPDK_CU_ASSERT_FATAL(g_request != NULL);
1302 SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
1303 child0 = TAILQ_FIRST(&g_request->children);
1304
1305 SPDK_CU_ASSERT_FATAL(child0 != NULL);
1306 CU_ASSERT(child0->payload_offset == 0);
1307 CU_ASSERT(child0->payload_size == 252 * 512); /* NOTE: does not include metadata! */
1308 child1 = TAILQ_NEXT(child0, child_tailq);
1309
1310 SPDK_CU_ASSERT_FATAL(child1 != NULL);
1311 CU_ASSERT(child1->payload.md == NULL);
1312 CU_ASSERT(child1->payload_offset == 252 * 512);
1313 CU_ASSERT(child1->payload_size == 4 * 512);
1314
1315 nvme_request_free_children(g_request);
1316 nvme_free_request(g_request);
1317 cleanup_after_test(&qpair);
1318
1319 /*
1320 * 512 byte data + 8 byte metadata
1321 * Separate metadata buffer
1322 * Max data transfer size 128 KB
1323 * No stripe size
1324 * Protection information enabled + PRACT
1325 */
1326 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
1327 ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
1328
1329 rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256,
1330 NULL, NULL, SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
1331
1332 SPDK_CU_ASSERT_FATAL(rc == 0);
1333 SPDK_CU_ASSERT_FATAL(g_request != NULL);
1334 SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
1335
1336 CU_ASSERT(g_request->payload.md == metadata);
1337 CU_ASSERT(g_request->payload_size == 256 * 512);
1338
1339 nvme_free_request(g_request);
1340 cleanup_after_test(&qpair);
1341
1342 /*
1343 * 512 byte data + 8 byte metadata
1344 * Separate metadata buffer
1345 * Max data transfer size 128 KB
1346 * No stripe size
1347 * Protection information enabled + PRACT
1348 *
1349 * 384 blocks * 512 bytes = two I/Os:
1350 * child 0: 256 blocks
1351 * child 1: 128 blocks
1352 */
1353 prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
1354 ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
1355
1356 rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, metadata, 0x1000, 384,
1357 NULL, NULL, SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
1358
1359 SPDK_CU_ASSERT_FATAL(rc == 0);
1360 SPDK_CU_ASSERT_FATAL(g_request != NULL);
1361 SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
1362 child0 = TAILQ_FIRST(&g_request->children);
1363
1364 SPDK_CU_ASSERT_FATAL(child0 != NULL);
1365 CU_ASSERT(child0->payload_offset == 0);
1366 CU_ASSERT(child0->payload_size == 256 * 512);
1367 CU_ASSERT(child0->md_offset == 0);
1368 child1 = TAILQ_NEXT(child0, child_tailq);
1369
1370 SPDK_CU_ASSERT_FATAL(child1 != NULL);
1371 CU_ASSERT(child1->payload_offset == 256 * 512);
1372 CU_ASSERT(child1->payload_size == 128 * 512);
1373 CU_ASSERT(child1->md_offset == 256 * 8);
1374
1375 nvme_request_free_children(g_request);
1376 nvme_free_request(g_request);
1377 cleanup_after_test(&qpair);
1378
1379 free(buffer);
1380 free(metadata);
1381 }
1382
1383 int main(int argc, char **argv)
1384 {
1385 CU_pSuite suite = NULL;
1386 unsigned int num_failures;
1387
1388 if (CU_initialize_registry() != CUE_SUCCESS) {
1389 return CU_get_error();
1390 }
1391
1392 suite = CU_add_suite("nvme_ns_cmd", NULL, NULL);
1393 if (suite == NULL) {
1394 CU_cleanup_registry();
1395 return CU_get_error();
1396 }
1397
1398 if (
1399 CU_add_test(suite, "split_test", split_test) == NULL
1400 || CU_add_test(suite, "split_test2", split_test2) == NULL
1401 || CU_add_test(suite, "split_test3", split_test3) == NULL
1402 || CU_add_test(suite, "split_test4", split_test4) == NULL
1403 || CU_add_test(suite, "nvme_ns_cmd_flush", test_nvme_ns_cmd_flush) == NULL
1404 || CU_add_test(suite, "nvme_ns_cmd_dataset_management",
1405 test_nvme_ns_cmd_dataset_management) == NULL
1406 || CU_add_test(suite, "io_flags", test_io_flags) == NULL
1407 || CU_add_test(suite, "nvme_ns_cmd_write_zeroes", test_nvme_ns_cmd_write_zeroes) == NULL
1408 || CU_add_test(suite, "nvme_ns_cmd_reservation_register",
1409 test_nvme_ns_cmd_reservation_register) == NULL
1410 || CU_add_test(suite, "nvme_ns_cmd_reservation_release",
1411 test_nvme_ns_cmd_reservation_release) == NULL
1412 || CU_add_test(suite, "nvme_ns_cmd_reservation_acquire",
1413 test_nvme_ns_cmd_reservation_acquire) == NULL
1414 || CU_add_test(suite, "nvme_ns_cmd_reservation_report", test_nvme_ns_cmd_reservation_report) == NULL
1415 || CU_add_test(suite, "test_cmd_child_request", test_cmd_child_request) == NULL
1416 || CU_add_test(suite, "nvme_ns_cmd_readv", test_nvme_ns_cmd_readv) == NULL
1417 || CU_add_test(suite, "nvme_ns_cmd_read_with_md", test_nvme_ns_cmd_read_with_md) == NULL
1418 || CU_add_test(suite, "nvme_ns_cmd_writev", test_nvme_ns_cmd_writev) == NULL
1419 || CU_add_test(suite, "nvme_ns_cmd_write_with_md", test_nvme_ns_cmd_write_with_md) == NULL
1420 || CU_add_test(suite, "nvme_ns_cmd_comparev", test_nvme_ns_cmd_comparev) == NULL
1421 || CU_add_test(suite, "nvme_ns_cmd_compare_with_md", test_nvme_ns_cmd_compare_with_md) == NULL
1422 ) {
1423 CU_cleanup_registry();
1424 return CU_get_error();
1425 }
1426
1427 g_spdk_nvme_driver = &_g_nvme_driver;
1428
1429 CU_basic_set_mode(CU_BRM_VERBOSE);
1430 CU_basic_run_tests();
1431 num_failures = CU_get_number_of_failures();
1432 CU_cleanup_registry();
1433 return num_failures;
1434 }