4 * Copyright (c) Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * NVMe end-to-end data protection test
38 #include "spdk/stdinc.h"
40 #include "spdk/nvme.h"
42 #include "spdk/crc16.h"
43 #include "spdk/endian.h"
45 #include "spdk_internal/memory.h"
49 #define DATA_PATTERN 0x5A
52 struct spdk_nvme_ctrlr
*ctrlr
;
53 char name
[SPDK_NVMF_TRADDR_MAX_LEN
+ 1];
56 static struct dev devs
[MAX_DEVS
];
57 static int num_devs
= 0;
59 #define foreach_dev(iter) \
60 for (iter = devs; iter - devs < num_devs; iter++)
62 static int io_complete_flag
= 0;
67 bool use_extended_lba
;
78 io_complete(void *ctx
, const struct spdk_nvme_cpl
*cpl
)
80 if (spdk_nvme_cpl_is_error(cpl
)) {
88 ns_data_buffer_reset(struct spdk_nvme_ns
*ns
, struct io_request
*req
, uint8_t data_pattern
)
90 uint32_t md_size
, sector_size
;
91 uint32_t i
, offset
= 0;
94 sector_size
= spdk_nvme_ns_get_sector_size(ns
);
95 md_size
= spdk_nvme_ns_get_md_size(ns
);
97 for (i
= 0; i
< req
->lba_count
; i
++) {
98 if (req
->use_extended_lba
) {
99 offset
= (sector_size
+ md_size
) * i
;
101 offset
= sector_size
* i
;
104 buf
= (uint8_t *)req
->contig
+ offset
;
105 memset(buf
, data_pattern
, sector_size
);
109 static void nvme_req_reset_sgl(void *cb_arg
, uint32_t sgl_offset
)
111 struct io_request
*req
= (struct io_request
*)cb_arg
;
113 req
->sgl_offset
= sgl_offset
;
117 static int nvme_req_next_sge(void *cb_arg
, void **address
, uint32_t *length
)
119 struct io_request
*req
= (struct io_request
*)cb_arg
;
122 payload
= req
->contig
+ req
->sgl_offset
;
125 *length
= req
->buf_size
- req
->sgl_offset
;
130 /* CRC-16 Guard checked for extended lba format */
131 static uint32_t dp_guard_check_extended_lba_test(struct spdk_nvme_ns
*ns
, struct io_request
*req
,
134 struct spdk_nvme_protection_info
*pi
;
135 uint32_t md_size
, sector_size
;
139 /* extended LBA only for the test case */
140 if (!(spdk_nvme_ns_supports_extended_lba(ns
))) {
144 sector_size
= spdk_nvme_ns_get_sector_size(ns
);
145 md_size
= spdk_nvme_ns_get_md_size(ns
);
146 req
->contig
= spdk_dma_zmalloc((sector_size
+ md_size
) * req
->lba_count
, 0x1000, NULL
);
151 req
->lba
= VALUE_2MB
;
152 req
->use_extended_lba
= true;
154 req
->buf_size
= (sector_size
+ md_size
) * req
->lba_count
;
155 req
->metadata
= NULL
;
156 ns_data_buffer_reset(ns
, req
, DATA_PATTERN
);
157 pi
= (struct spdk_nvme_protection_info
*)(req
->contig
+ sector_size
+ md_size
- 8);
158 /* big-endian for guard */
159 to_be16(&pi
->guard
, spdk_crc16_t10dif(0, req
->contig
, sector_size
));
161 pi
= (struct spdk_nvme_protection_info
*)(req
->contig
+ (sector_size
+ md_size
) * 2 - 8);
162 to_be16(&pi
->guard
, spdk_crc16_t10dif(0, req
->contig
+ sector_size
+ md_size
, sector_size
));
164 *io_flags
= SPDK_NVME_IO_FLAGS_PRCHK_GUARD
;
166 return req
->lba_count
;
170 * No protection information with PRACT setting to 1,
171 * both extended LBA format and separate metadata can
174 static uint32_t dp_with_pract_test(struct spdk_nvme_ns
*ns
, struct io_request
*req
,
177 uint32_t sector_size
;
181 sector_size
= spdk_nvme_ns_get_sector_size(ns
);
182 /* No additional metadata buffer provided */
183 req
->contig
= spdk_dma_zmalloc(sector_size
* req
->lba_count
, 0x1000, NULL
);
188 switch (spdk_nvme_ns_get_pi_type(ns
)) {
189 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE3
:
190 *io_flags
= SPDK_NVME_IO_FLAGS_PRCHK_GUARD
| SPDK_NVME_IO_FLAGS_PRACT
;
192 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE1
:
193 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE2
:
194 *io_flags
= SPDK_NVME_IO_FLAGS_PRCHK_GUARD
| SPDK_NVME_IO_FLAGS_PRCHK_REFTAG
|
195 SPDK_NVME_IO_FLAGS_PRACT
;
203 req
->use_extended_lba
= false;
204 req
->metadata
= NULL
;
206 return req
->lba_count
;
209 /* Block Reference Tag checked for TYPE1 and TYPE2 with PRACT setting to 0 */
210 static uint32_t dp_without_pract_extended_lba_test(struct spdk_nvme_ns
*ns
, struct io_request
*req
,
213 struct spdk_nvme_protection_info
*pi
;
214 uint32_t md_size
, sector_size
;
218 switch (spdk_nvme_ns_get_pi_type(ns
)) {
219 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE3
:
225 /* extended LBA only for the test case */
226 if (!(spdk_nvme_ns_supports_extended_lba(ns
))) {
230 sector_size
= spdk_nvme_ns_get_sector_size(ns
);
231 md_size
= spdk_nvme_ns_get_md_size(ns
);
232 req
->contig
= spdk_dma_zmalloc((sector_size
+ md_size
) * req
->lba_count
, 0x1000, NULL
);
237 req
->lba
= VALUE_2MB
;
238 req
->use_extended_lba
= true;
239 req
->metadata
= NULL
;
240 pi
= (struct spdk_nvme_protection_info
*)(req
->contig
+ sector_size
+ md_size
- 8);
241 /* big-endian for reference tag */
242 to_be32(&pi
->ref_tag
, (uint32_t)req
->lba
);
244 pi
= (struct spdk_nvme_protection_info
*)(req
->contig
+ (sector_size
+ md_size
) * 2 - 8);
245 /* is incremented for each subsequent logical block */
246 to_be32(&pi
->ref_tag
, (uint32_t)(req
->lba
+ 1));
248 *io_flags
= SPDK_NVME_IO_FLAGS_PRCHK_REFTAG
;
250 return req
->lba_count
;
253 /* LBA + Metadata without data protection bits setting */
254 static uint32_t dp_without_flags_extended_lba_test(struct spdk_nvme_ns
*ns
, struct io_request
*req
,
257 uint32_t md_size
, sector_size
;
261 /* extended LBA only for the test case */
262 if (!(spdk_nvme_ns_supports_extended_lba(ns
))) {
266 sector_size
= spdk_nvme_ns_get_sector_size(ns
);
267 md_size
= spdk_nvme_ns_get_md_size(ns
);
268 req
->contig
= spdk_dma_zmalloc((sector_size
+ md_size
) * req
->lba_count
, 0x1000, NULL
);
274 req
->use_extended_lba
= true;
275 req
->metadata
= NULL
;
278 return req
->lba_count
;
281 /* Block Reference Tag checked for TYPE1 and TYPE2 with PRACT setting to 0 */
282 static uint32_t dp_without_pract_separate_meta_test(struct spdk_nvme_ns
*ns
, struct io_request
*req
,
285 struct spdk_nvme_protection_info
*pi
;
286 uint32_t md_size
, sector_size
;
290 switch (spdk_nvme_ns_get_pi_type(ns
)) {
291 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE3
:
297 /* separate metadata payload for the test case */
298 if (spdk_nvme_ns_supports_extended_lba(ns
)) {
302 sector_size
= spdk_nvme_ns_get_sector_size(ns
);
303 md_size
= spdk_nvme_ns_get_md_size(ns
);
304 req
->contig
= spdk_dma_zmalloc(sector_size
* req
->lba_count
, 0x1000, NULL
);
309 req
->metadata
= spdk_dma_zmalloc(md_size
* req
->lba_count
, 0x1000, NULL
);
310 if (!req
->metadata
) {
311 spdk_dma_free(req
->contig
);
316 req
->use_extended_lba
= false;
318 /* last 8 bytes if the metadata size bigger than 8 */
319 pi
= (struct spdk_nvme_protection_info
*)(req
->metadata
+ md_size
- 8);
320 /* big-endian for reference tag */
321 to_be32(&pi
->ref_tag
, (uint32_t)req
->lba
);
323 pi
= (struct spdk_nvme_protection_info
*)(req
->metadata
+ md_size
* 2 - 8);
324 /* is incremented for each subsequent logical block */
325 to_be32(&pi
->ref_tag
, (uint32_t)(req
->lba
+ 1));
327 *io_flags
= SPDK_NVME_IO_FLAGS_PRCHK_REFTAG
;
329 return req
->lba_count
;
332 /* Application Tag checked with PRACT setting to 0 */
333 static uint32_t dp_without_pract_separate_meta_apptag_test(struct spdk_nvme_ns
*ns
,
334 struct io_request
*req
,
337 struct spdk_nvme_protection_info
*pi
;
338 uint32_t md_size
, sector_size
;
342 /* separate metadata payload for the test case */
343 if (spdk_nvme_ns_supports_extended_lba(ns
)) {
347 sector_size
= spdk_nvme_ns_get_sector_size(ns
);
348 md_size
= spdk_nvme_ns_get_md_size(ns
);
349 req
->contig
= spdk_dma_zmalloc(sector_size
* req
->lba_count
, 0x1000, NULL
);
354 req
->metadata
= spdk_dma_zmalloc(md_size
* req
->lba_count
, 0x1000, NULL
);
355 if (!req
->metadata
) {
356 spdk_dma_free(req
->contig
);
361 req
->use_extended_lba
= false;
362 req
->apptag_mask
= 0xFFFF;
363 req
->apptag
= req
->lba_count
;
365 /* last 8 bytes if the metadata size bigger than 8 */
366 pi
= (struct spdk_nvme_protection_info
*)(req
->metadata
+ md_size
- 8);
367 to_be16(&pi
->app_tag
, req
->lba_count
);
369 *io_flags
= SPDK_NVME_IO_FLAGS_PRCHK_APPTAG
;
371 return req
->lba_count
;
375 * LBA + Metadata without data protection bits setting,
376 * separate metadata payload for the test case.
378 static uint32_t dp_without_flags_separate_meta_test(struct spdk_nvme_ns
*ns
, struct io_request
*req
,
381 uint32_t md_size
, sector_size
;
385 /* separate metadata payload for the test case */
386 if (spdk_nvme_ns_supports_extended_lba(ns
)) {
390 sector_size
= spdk_nvme_ns_get_sector_size(ns
);
391 md_size
= spdk_nvme_ns_get_md_size(ns
);
392 req
->contig
= spdk_dma_zmalloc(sector_size
* req
->lba_count
, 0x1000, NULL
);
397 req
->metadata
= spdk_dma_zmalloc(md_size
* req
->lba_count
, 0x1000, NULL
);
398 if (!req
->metadata
) {
399 spdk_dma_free(req
->contig
);
404 req
->use_extended_lba
= false;
407 return req
->lba_count
;
410 typedef uint32_t (*nvme_build_io_req_fn_t
)(struct spdk_nvme_ns
*ns
, struct io_request
*req
,
411 uint32_t *lba_count
);
414 free_req(struct io_request
*req
)
421 spdk_dma_free(req
->contig
);
425 spdk_dma_free(req
->metadata
);
432 ns_data_buffer_compare(struct spdk_nvme_ns
*ns
, struct io_request
*req
, uint8_t data_pattern
)
434 uint32_t md_size
, sector_size
;
435 uint32_t i
, j
, offset
= 0;
438 sector_size
= spdk_nvme_ns_get_sector_size(ns
);
439 md_size
= spdk_nvme_ns_get_md_size(ns
);
441 for (i
= 0; i
< req
->lba_count
; i
++) {
442 if (req
->use_extended_lba
) {
443 offset
= (sector_size
+ md_size
) * i
;
445 offset
= sector_size
* i
;
448 buf
= (uint8_t *)req
->contig
+ offset
;
449 for (j
= 0; j
< sector_size
; j
++) {
450 if (buf
[j
] != data_pattern
) {
460 write_read_e2e_dp_tests(struct dev
*dev
, nvme_build_io_req_fn_t build_io_fn
, const char *test_name
)
464 uint32_t io_flags
= 0;
466 struct io_request
*req
;
467 struct spdk_nvme_ns
*ns
;
468 struct spdk_nvme_qpair
*qpair
;
469 const struct spdk_nvme_ns_data
*nsdata
;
471 ns
= spdk_nvme_ctrlr_get_ns(dev
->ctrlr
, 1);
473 fprintf(stderr
, "Null namespace\n");
477 if (!(spdk_nvme_ns_get_flags(ns
) & SPDK_NVME_NS_DPS_PI_SUPPORTED
)) {
481 nsdata
= spdk_nvme_ns_get_data(ns
);
482 if (!nsdata
|| !spdk_nvme_ns_get_sector_size(ns
)) {
483 fprintf(stderr
, "Empty nsdata or wrong sector size\n");
487 req
= spdk_dma_zmalloc(sizeof(*req
), 0, NULL
);
489 fprintf(stderr
, "Allocate request failed\n");
493 /* IO parameters setting */
494 lba_count
= build_io_fn(ns
, req
, &io_flags
);
497 fprintf(stderr
, "%s: %s bypass the test case\n", dev
->name
, test_name
);
502 qpair
= spdk_nvme_ctrlr_alloc_io_qpair(dev
->ctrlr
, NULL
, 0);
508 ns_data_buffer_reset(ns
, req
, DATA_PATTERN
);
509 if (req
->use_extended_lba
&& req
->use_sgl
) {
510 rc
= spdk_nvme_ns_cmd_writev(ns
, qpair
, req
->lba
, lba_count
, io_complete
, req
, io_flags
,
511 nvme_req_reset_sgl
, nvme_req_next_sge
);
512 } else if (req
->use_extended_lba
) {
513 rc
= spdk_nvme_ns_cmd_write(ns
, qpair
, req
->contig
, req
->lba
, lba_count
,
514 io_complete
, req
, io_flags
);
516 rc
= spdk_nvme_ns_cmd_write_with_md(ns
, qpair
, req
->contig
, req
->metadata
, req
->lba
, lba_count
,
517 io_complete
, req
, io_flags
, req
->apptag_mask
, req
->apptag
);
521 fprintf(stderr
, "%s: %s write submit failed\n", dev
->name
, test_name
);
522 spdk_nvme_ctrlr_free_io_qpair(qpair
);
527 io_complete_flag
= 0;
529 while (!io_complete_flag
) {
530 spdk_nvme_qpair_process_completions(qpair
, 1);
533 if (io_complete_flag
!= 1) {
534 fprintf(stderr
, "%s: %s write exec failed\n", dev
->name
, test_name
);
535 spdk_nvme_ctrlr_free_io_qpair(qpair
);
540 /* reset completion flag */
541 io_complete_flag
= 0;
543 ns_data_buffer_reset(ns
, req
, 0);
544 if (req
->use_extended_lba
&& req
->use_sgl
) {
545 rc
= spdk_nvme_ns_cmd_readv(ns
, qpair
, req
->lba
, lba_count
, io_complete
, req
, io_flags
,
546 nvme_req_reset_sgl
, nvme_req_next_sge
);
548 } else if (req
->use_extended_lba
) {
549 rc
= spdk_nvme_ns_cmd_read(ns
, qpair
, req
->contig
, req
->lba
, lba_count
,
550 io_complete
, req
, io_flags
);
552 rc
= spdk_nvme_ns_cmd_read_with_md(ns
, qpair
, req
->contig
, req
->metadata
, req
->lba
, lba_count
,
553 io_complete
, req
, io_flags
, req
->apptag_mask
, req
->apptag
);
557 fprintf(stderr
, "%s: %s read failed\n", dev
->name
, test_name
);
558 spdk_nvme_ctrlr_free_io_qpair(qpair
);
563 while (!io_complete_flag
) {
564 spdk_nvme_qpair_process_completions(qpair
, 1);
567 if (io_complete_flag
!= 1) {
568 fprintf(stderr
, "%s: %s read failed\n", dev
->name
, test_name
);
569 spdk_nvme_ctrlr_free_io_qpair(qpair
);
574 rc
= ns_data_buffer_compare(ns
, req
, DATA_PATTERN
);
576 fprintf(stderr
, "%s: %s write/read success, but memcmp Failed\n", dev
->name
, test_name
);
577 spdk_nvme_ctrlr_free_io_qpair(qpair
);
582 fprintf(stdout
, "%s: %s test passed\n", dev
->name
, test_name
);
583 spdk_nvme_ctrlr_free_io_qpair(qpair
);
589 probe_cb(void *cb_ctx
, const struct spdk_nvme_transport_id
*trid
,
590 struct spdk_nvme_ctrlr_opts
*opts
)
592 printf("Attaching to %s\n", trid
->traddr
);
598 attach_cb(void *cb_ctx
, const struct spdk_nvme_transport_id
*trid
,
599 struct spdk_nvme_ctrlr
*ctrlr
, const struct spdk_nvme_ctrlr_opts
*opts
)
603 /* add to dev list */
604 dev
= &devs
[num_devs
++];
608 snprintf(dev
->name
, sizeof(dev
->name
), "%s",
611 printf("Attached to %s\n", dev
->name
);
614 int main(int argc
, char **argv
)
618 struct spdk_env_opts opts
;
620 spdk_env_opts_init(&opts
);
621 opts
.name
= "nvme_dp";
622 opts
.core_mask
= "0x1";
624 if (spdk_env_init(&opts
) < 0) {
625 fprintf(stderr
, "Unable to initialize SPDK env\n");
629 printf("NVMe Write/Read with End-to-End data protection test\n");
631 if (spdk_nvme_probe(NULL
, NULL
, probe_cb
, attach_cb
, NULL
) != 0) {
632 fprintf(stderr
, "nvme_probe() failed\n");
638 #define TEST(x) write_read_e2e_dp_tests(iter, x, #x)
639 if (TEST(dp_with_pract_test
)
640 || TEST(dp_guard_check_extended_lba_test
)
641 || TEST(dp_without_pract_extended_lba_test
)
642 || TEST(dp_without_flags_extended_lba_test
)
643 || TEST(dp_without_pract_separate_meta_test
)
644 || TEST(dp_without_pract_separate_meta_apptag_test
)
645 || TEST(dp_without_flags_separate_meta_test
)) {
648 printf("%s: failed End-to-End data protection tests\n", iter
->name
);
652 printf("Cleaning up...\n");
654 for (i
= 0; i
< num_devs
; i
++) {
655 struct dev
*dev
= &devs
[i
];
657 spdk_nvme_detach(dev
->ctrlr
);