4 * Copyright (c) Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * NVMe end-to-end data protection test
38 #include "spdk/stdinc.h"
40 #include "spdk/nvme.h"
42 #include "spdk/crc16.h"
43 #include "spdk/endian.h"
47 #define DATA_PATTERN 0x5A
50 struct spdk_nvme_ctrlr
*ctrlr
;
51 char name
[SPDK_NVMF_TRADDR_MAX_LEN
+ 1];
54 static struct dev devs
[MAX_DEVS
];
55 static int num_devs
= 0;
57 #define foreach_dev(iter) \
58 for (iter = devs; iter - devs < num_devs; iter++)
60 static int io_complete_flag
= 0;
65 bool use_extended_lba
;
76 io_complete(void *ctx
, const struct spdk_nvme_cpl
*cpl
)
78 if (spdk_nvme_cpl_is_error(cpl
)) {
86 ns_data_buffer_reset(struct spdk_nvme_ns
*ns
, struct io_request
*req
, uint8_t data_pattern
)
88 uint32_t md_size
, sector_size
;
89 uint32_t i
, offset
= 0;
92 sector_size
= spdk_nvme_ns_get_sector_size(ns
);
93 md_size
= spdk_nvme_ns_get_md_size(ns
);
95 for (i
= 0; i
< req
->lba_count
; i
++) {
96 if (req
->use_extended_lba
) {
97 offset
= (sector_size
+ md_size
) * i
;
99 offset
= sector_size
* i
;
102 buf
= (uint8_t *)req
->contig
+ offset
;
103 memset(buf
, data_pattern
, sector_size
);
107 static void nvme_req_reset_sgl(void *cb_arg
, uint32_t sgl_offset
)
109 struct io_request
*req
= (struct io_request
*)cb_arg
;
111 req
->sgl_offset
= sgl_offset
;
115 static int nvme_req_next_sge(void *cb_arg
, void **address
, uint32_t *length
)
117 struct io_request
*req
= (struct io_request
*)cb_arg
;
120 payload
= req
->contig
+ req
->sgl_offset
;
123 *length
= req
->buf_size
- req
->sgl_offset
;
128 /* CRC-16 Guard checked for extended lba format */
129 static uint32_t dp_guard_check_extended_lba_test(struct spdk_nvme_ns
*ns
, struct io_request
*req
,
132 struct spdk_nvme_protection_info
*pi
;
133 uint32_t md_size
, sector_size
;
137 /* extended LBA only for the test case */
138 if (!(spdk_nvme_ns_supports_extended_lba(ns
))) {
142 sector_size
= spdk_nvme_ns_get_sector_size(ns
);
143 md_size
= spdk_nvme_ns_get_md_size(ns
);
144 req
->contig
= spdk_dma_zmalloc((sector_size
+ md_size
) * req
->lba_count
, 0x1000, NULL
);
150 req
->use_extended_lba
= true;
152 req
->buf_size
= (sector_size
+ md_size
) * req
->lba_count
;
153 req
->metadata
= NULL
;
154 ns_data_buffer_reset(ns
, req
, DATA_PATTERN
);
155 pi
= (struct spdk_nvme_protection_info
*)(req
->contig
+ sector_size
+ md_size
- 8);
156 /* big-endian for guard */
157 to_be16(&pi
->guard
, spdk_crc16_t10dif(req
->contig
, sector_size
));
159 pi
= (struct spdk_nvme_protection_info
*)(req
->contig
+ (sector_size
+ md_size
) * 2 - 8);
160 to_be16(&pi
->guard
, spdk_crc16_t10dif(req
->contig
+ sector_size
+ md_size
, sector_size
));
162 *io_flags
= SPDK_NVME_IO_FLAGS_PRCHK_GUARD
;
164 return req
->lba_count
;
168 * No protection information with PRACT setting to 1,
169 * both extended LBA format and separate metadata can
172 static uint32_t dp_with_pract_test(struct spdk_nvme_ns
*ns
, struct io_request
*req
,
175 uint32_t sector_size
;
179 sector_size
= spdk_nvme_ns_get_sector_size(ns
);
180 /* No additional metadata buffer provided */
181 req
->contig
= spdk_dma_zmalloc(sector_size
* req
->lba_count
, 0x1000, NULL
);
186 switch (spdk_nvme_ns_get_pi_type(ns
)) {
187 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE3
:
188 *io_flags
= SPDK_NVME_IO_FLAGS_PRCHK_GUARD
| SPDK_NVME_IO_FLAGS_PRACT
;
190 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE1
:
191 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE2
:
192 *io_flags
= SPDK_NVME_IO_FLAGS_PRCHK_GUARD
| SPDK_NVME_IO_FLAGS_PRCHK_REFTAG
|
193 SPDK_NVME_IO_FLAGS_PRACT
;
201 req
->use_extended_lba
= false;
202 req
->metadata
= NULL
;
204 return req
->lba_count
;
207 /* Block Reference Tag checked for TYPE1 and TYPE2 with PRACT setting to 0 */
208 static uint32_t dp_without_pract_extended_lba_test(struct spdk_nvme_ns
*ns
, struct io_request
*req
,
211 struct spdk_nvme_protection_info
*pi
;
212 uint32_t md_size
, sector_size
;
216 switch (spdk_nvme_ns_get_pi_type(ns
)) {
217 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE3
:
223 /* extended LBA only for the test case */
224 if (!(spdk_nvme_ns_supports_extended_lba(ns
))) {
228 sector_size
= spdk_nvme_ns_get_sector_size(ns
);
229 md_size
= spdk_nvme_ns_get_md_size(ns
);
230 req
->contig
= spdk_dma_zmalloc((sector_size
+ md_size
) * req
->lba_count
, 0x1000, NULL
);
236 req
->use_extended_lba
= true;
237 req
->metadata
= NULL
;
238 pi
= (struct spdk_nvme_protection_info
*)(req
->contig
+ sector_size
+ md_size
- 8);
239 /* big-endian for reference tag */
240 to_be32(&pi
->ref_tag
, (uint32_t)req
->lba
);
242 pi
= (struct spdk_nvme_protection_info
*)(req
->contig
+ (sector_size
+ md_size
) * 2 - 8);
243 /* is incremented for each subsequent logical block */
244 to_be32(&pi
->ref_tag
, (uint32_t)(req
->lba
+ 1));
246 *io_flags
= SPDK_NVME_IO_FLAGS_PRCHK_REFTAG
;
248 return req
->lba_count
;
251 /* LBA + Metadata without data protection bits setting */
252 static uint32_t dp_without_flags_extended_lba_test(struct spdk_nvme_ns
*ns
, struct io_request
*req
,
255 uint32_t md_size
, sector_size
;
259 /* extended LBA only for the test case */
260 if (!(spdk_nvme_ns_supports_extended_lba(ns
))) {
264 sector_size
= spdk_nvme_ns_get_sector_size(ns
);
265 md_size
= spdk_nvme_ns_get_md_size(ns
);
266 req
->contig
= spdk_dma_zmalloc((sector_size
+ md_size
) * req
->lba_count
, 0x1000, NULL
);
272 req
->use_extended_lba
= true;
273 req
->metadata
= NULL
;
276 return req
->lba_count
;
279 /* Block Reference Tag checked for TYPE1 and TYPE2 with PRACT setting to 0 */
280 static uint32_t dp_without_pract_separate_meta_test(struct spdk_nvme_ns
*ns
, struct io_request
*req
,
283 struct spdk_nvme_protection_info
*pi
;
284 uint32_t md_size
, sector_size
;
288 switch (spdk_nvme_ns_get_pi_type(ns
)) {
289 case SPDK_NVME_FMT_NVM_PROTECTION_TYPE3
:
295 /* separate metadata payload for the test case */
296 if (spdk_nvme_ns_supports_extended_lba(ns
)) {
300 sector_size
= spdk_nvme_ns_get_sector_size(ns
);
301 md_size
= spdk_nvme_ns_get_md_size(ns
);
302 req
->contig
= spdk_dma_zmalloc(sector_size
* req
->lba_count
, 0x1000, NULL
);
307 req
->metadata
= spdk_dma_zmalloc(md_size
* req
->lba_count
, 0x1000, NULL
);
308 if (!req
->metadata
) {
309 spdk_dma_free(req
->contig
);
314 req
->use_extended_lba
= false;
316 /* last 8 bytes if the metadata size bigger than 8 */
317 pi
= (struct spdk_nvme_protection_info
*)(req
->metadata
+ md_size
- 8);
318 /* big-endian for reference tag */
319 to_be32(&pi
->ref_tag
, (uint32_t)req
->lba
);
321 pi
= (struct spdk_nvme_protection_info
*)(req
->metadata
+ md_size
* 2 - 8);
322 /* is incremented for each subsequent logical block */
323 to_be32(&pi
->ref_tag
, (uint32_t)(req
->lba
+ 1));
325 *io_flags
= SPDK_NVME_IO_FLAGS_PRCHK_REFTAG
;
327 return req
->lba_count
;
330 /* Application Tag checked with PRACT setting to 0 */
331 static uint32_t dp_without_pract_separate_meta_apptag_test(struct spdk_nvme_ns
*ns
,
332 struct io_request
*req
,
335 struct spdk_nvme_protection_info
*pi
;
336 uint32_t md_size
, sector_size
;
340 /* separate metadata payload for the test case */
341 if (spdk_nvme_ns_supports_extended_lba(ns
)) {
345 sector_size
= spdk_nvme_ns_get_sector_size(ns
);
346 md_size
= spdk_nvme_ns_get_md_size(ns
);
347 req
->contig
= spdk_dma_zmalloc(sector_size
* req
->lba_count
, 0x1000, NULL
);
352 req
->metadata
= spdk_dma_zmalloc(md_size
* req
->lba_count
, 0x1000, NULL
);
353 if (!req
->metadata
) {
354 spdk_dma_free(req
->contig
);
359 req
->use_extended_lba
= false;
360 req
->apptag_mask
= 0xFFFF;
361 req
->apptag
= req
->lba_count
;
363 /* last 8 bytes if the metadata size bigger than 8 */
364 pi
= (struct spdk_nvme_protection_info
*)(req
->metadata
+ md_size
- 8);
365 to_be16(&pi
->app_tag
, req
->lba_count
);
367 *io_flags
= SPDK_NVME_IO_FLAGS_PRCHK_APPTAG
;
369 return req
->lba_count
;
373 * LBA + Metadata without data protection bits setting,
374 * separate metadata payload for the test case.
376 static uint32_t dp_without_flags_separate_meta_test(struct spdk_nvme_ns
*ns
, struct io_request
*req
,
379 uint32_t md_size
, sector_size
;
383 /* separate metadata payload for the test case */
384 if (spdk_nvme_ns_supports_extended_lba(ns
)) {
388 sector_size
= spdk_nvme_ns_get_sector_size(ns
);
389 md_size
= spdk_nvme_ns_get_md_size(ns
);
390 req
->contig
= spdk_dma_zmalloc(sector_size
* req
->lba_count
, 0x1000, NULL
);
395 req
->metadata
= spdk_dma_zmalloc(md_size
* req
->lba_count
, 0x1000, NULL
);
396 if (!req
->metadata
) {
397 spdk_dma_free(req
->contig
);
402 req
->use_extended_lba
= false;
405 return req
->lba_count
;
408 typedef uint32_t (*nvme_build_io_req_fn_t
)(struct spdk_nvme_ns
*ns
, struct io_request
*req
,
409 uint32_t *lba_count
);
412 free_req(struct io_request
*req
)
419 spdk_dma_free(req
->contig
);
423 spdk_dma_free(req
->metadata
);
430 ns_data_buffer_compare(struct spdk_nvme_ns
*ns
, struct io_request
*req
, uint8_t data_pattern
)
432 uint32_t md_size
, sector_size
;
433 uint32_t i
, j
, offset
= 0;
436 sector_size
= spdk_nvme_ns_get_sector_size(ns
);
437 md_size
= spdk_nvme_ns_get_md_size(ns
);
439 for (i
= 0; i
< req
->lba_count
; i
++) {
440 if (req
->use_extended_lba
) {
441 offset
= (sector_size
+ md_size
) * i
;
443 offset
= sector_size
* i
;
446 buf
= (uint8_t *)req
->contig
+ offset
;
447 for (j
= 0; j
< sector_size
; j
++) {
448 if (buf
[j
] != data_pattern
) {
458 write_read_e2e_dp_tests(struct dev
*dev
, nvme_build_io_req_fn_t build_io_fn
, const char *test_name
)
462 uint32_t io_flags
= 0;
464 struct io_request
*req
;
465 struct spdk_nvme_ns
*ns
;
466 struct spdk_nvme_qpair
*qpair
;
467 const struct spdk_nvme_ns_data
*nsdata
;
469 ns
= spdk_nvme_ctrlr_get_ns(dev
->ctrlr
, 1);
471 fprintf(stderr
, "Null namespace\n");
475 if (!(spdk_nvme_ns_get_flags(ns
) & SPDK_NVME_NS_DPS_PI_SUPPORTED
)) {
479 nsdata
= spdk_nvme_ns_get_data(ns
);
480 if (!nsdata
|| !spdk_nvme_ns_get_sector_size(ns
)) {
481 fprintf(stderr
, "Empty nsdata or wrong sector size\n");
485 req
= spdk_dma_zmalloc(sizeof(*req
), 0, NULL
);
487 fprintf(stderr
, "Allocate request failed\n");
491 /* IO parameters setting */
492 lba_count
= build_io_fn(ns
, req
, &io_flags
);
495 fprintf(stderr
, "%s: %s bypass the test case\n", dev
->name
, test_name
);
500 qpair
= spdk_nvme_ctrlr_alloc_io_qpair(dev
->ctrlr
, NULL
, 0);
506 ns_data_buffer_reset(ns
, req
, DATA_PATTERN
);
507 if (req
->use_extended_lba
&& req
->use_sgl
) {
508 rc
= spdk_nvme_ns_cmd_writev(ns
, qpair
, req
->lba
, lba_count
, io_complete
, req
, io_flags
,
509 nvme_req_reset_sgl
, nvme_req_next_sge
);
510 } else if (req
->use_extended_lba
) {
511 rc
= spdk_nvme_ns_cmd_write(ns
, qpair
, req
->contig
, req
->lba
, lba_count
,
512 io_complete
, req
, io_flags
);
514 rc
= spdk_nvme_ns_cmd_write_with_md(ns
, qpair
, req
->contig
, req
->metadata
, req
->lba
, lba_count
,
515 io_complete
, req
, io_flags
, req
->apptag_mask
, req
->apptag
);
519 fprintf(stderr
, "%s: %s write submit failed\n", dev
->name
, test_name
);
520 spdk_nvme_ctrlr_free_io_qpair(qpair
);
525 io_complete_flag
= 0;
527 while (!io_complete_flag
) {
528 spdk_nvme_qpair_process_completions(qpair
, 1);
531 if (io_complete_flag
!= 1) {
532 fprintf(stderr
, "%s: %s write exec failed\n", dev
->name
, test_name
);
533 spdk_nvme_ctrlr_free_io_qpair(qpair
);
538 /* reset completion flag */
539 io_complete_flag
= 0;
541 ns_data_buffer_reset(ns
, req
, 0);
542 if (req
->use_extended_lba
&& req
->use_sgl
) {
543 rc
= spdk_nvme_ns_cmd_readv(ns
, qpair
, req
->lba
, lba_count
, io_complete
, req
, io_flags
,
544 nvme_req_reset_sgl
, nvme_req_next_sge
);
546 } else if (req
->use_extended_lba
) {
547 rc
= spdk_nvme_ns_cmd_read(ns
, qpair
, req
->contig
, req
->lba
, lba_count
,
548 io_complete
, req
, io_flags
);
550 rc
= spdk_nvme_ns_cmd_read_with_md(ns
, qpair
, req
->contig
, req
->metadata
, req
->lba
, lba_count
,
551 io_complete
, req
, io_flags
, req
->apptag_mask
, req
->apptag
);
555 fprintf(stderr
, "%s: %s read failed\n", dev
->name
, test_name
);
556 spdk_nvme_ctrlr_free_io_qpair(qpair
);
561 while (!io_complete_flag
) {
562 spdk_nvme_qpair_process_completions(qpair
, 1);
565 if (io_complete_flag
!= 1) {
566 fprintf(stderr
, "%s: %s read failed\n", dev
->name
, test_name
);
567 spdk_nvme_ctrlr_free_io_qpair(qpair
);
572 rc
= ns_data_buffer_compare(ns
, req
, DATA_PATTERN
);
574 fprintf(stderr
, "%s: %s write/read success, but memcmp Failed\n", dev
->name
, test_name
);
575 spdk_nvme_ctrlr_free_io_qpair(qpair
);
580 fprintf(stdout
, "%s: %s test passed\n", dev
->name
, test_name
);
581 spdk_nvme_ctrlr_free_io_qpair(qpair
);
587 probe_cb(void *cb_ctx
, const struct spdk_nvme_transport_id
*trid
,
588 struct spdk_nvme_ctrlr_opts
*opts
)
590 printf("Attaching to %s\n", trid
->traddr
);
596 attach_cb(void *cb_ctx
, const struct spdk_nvme_transport_id
*trid
,
597 struct spdk_nvme_ctrlr
*ctrlr
, const struct spdk_nvme_ctrlr_opts
*opts
)
601 /* add to dev list */
602 dev
= &devs
[num_devs
++];
606 snprintf(dev
->name
, sizeof(dev
->name
), "%s",
609 printf("Attached to %s\n", dev
->name
);
612 int main(int argc
, char **argv
)
616 struct spdk_env_opts opts
;
618 spdk_env_opts_init(&opts
);
619 opts
.name
= "nvme_dp";
620 opts
.core_mask
= "0x1";
622 if (spdk_env_init(&opts
) < 0) {
623 fprintf(stderr
, "Unable to initialize SPDK env\n");
627 printf("NVMe Write/Read with End-to-End data protection test\n");
629 if (spdk_nvme_probe(NULL
, NULL
, probe_cb
, attach_cb
, NULL
) != 0) {
630 fprintf(stderr
, "nvme_probe() failed\n");
636 #define TEST(x) write_read_e2e_dp_tests(iter, x, #x)
637 if (TEST(dp_with_pract_test
)
638 || TEST(dp_guard_check_extended_lba_test
)
639 || TEST(dp_without_pract_extended_lba_test
)
640 || TEST(dp_without_flags_extended_lba_test
)
641 || TEST(dp_without_pract_separate_meta_test
)
642 || TEST(dp_without_pract_separate_meta_apptag_test
)
643 || TEST(dp_without_flags_separate_meta_test
)) {
646 printf("%s: failed End-to-End data protection tests\n", iter
->name
);
650 printf("Cleaning up...\n");
652 for (i
= 0; i
< num_devs
; i
++) {
653 struct dev
*dev
= &devs
[i
];
655 spdk_nvme_detach(dev
->ctrlr
);