4 * Copyright (c) Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "spdk/stdinc.h"
36 #include "spdk/bdev.h"
37 #include "spdk/accel_engine.h"
40 #include "spdk/thread.h"
41 #include "spdk/event.h"
43 #include "spdk/util.h"
44 #include "spdk/string.h"
46 #include "CUnit/Basic.h"
48 #define BUFFER_IOVS 1024
49 #define BUFFER_SIZE 260 * 1024
50 #define BDEV_TASK_ARRAY_SIZE 2048
52 pthread_mutex_t g_test_mutex
;
53 pthread_cond_t g_test_cond
;
55 static struct spdk_thread
*g_thread_init
;
56 static struct spdk_thread
*g_thread_ut
;
57 static struct spdk_thread
*g_thread_io
;
58 static bool g_wait_for_tests
= false;
59 static int g_num_failures
= 0;
62 struct spdk_bdev
*bdev
;
63 struct spdk_bdev_desc
*bdev_desc
;
64 struct spdk_io_channel
*ch
;
65 struct io_target
*next
;
68 struct bdevio_request
{
73 struct iovec iov
[BUFFER_IOVS
];
75 struct iovec fused_iov
[BUFFER_IOVS
];
77 struct io_target
*target
;
80 struct io_target
*g_io_targets
= NULL
;
81 struct io_target
*g_current_io_target
= NULL
;
82 static void rpc_perform_tests_cb(unsigned num_failures
, struct spdk_jsonrpc_request
*request
);
85 execute_spdk_function(spdk_msg_fn fn
, void *arg
)
87 pthread_mutex_lock(&g_test_mutex
);
88 spdk_thread_send_msg(g_thread_io
, fn
, arg
);
89 pthread_cond_wait(&g_test_cond
, &g_test_mutex
);
90 pthread_mutex_unlock(&g_test_mutex
);
96 pthread_mutex_lock(&g_test_mutex
);
97 pthread_cond_signal(&g_test_cond
);
98 pthread_mutex_unlock(&g_test_mutex
);
102 __get_io_channel(void *arg
)
104 struct io_target
*target
= arg
;
106 target
->ch
= spdk_bdev_get_io_channel(target
->bdev_desc
);
112 bdevio_construct_target(struct spdk_bdev
*bdev
)
114 struct io_target
*target
;
116 uint64_t num_blocks
= spdk_bdev_get_num_blocks(bdev
);
117 uint32_t block_size
= spdk_bdev_get_block_size(bdev
);
119 target
= malloc(sizeof(struct io_target
));
120 if (target
== NULL
) {
124 rc
= spdk_bdev_open(bdev
, true, NULL
, NULL
, &target
->bdev_desc
);
127 SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev
), rc
);
131 printf(" %s: %" PRIu64
" blocks of %" PRIu32
" bytes (%" PRIu64
" MiB)\n",
132 spdk_bdev_get_name(bdev
),
133 num_blocks
, block_size
,
134 (num_blocks
* block_size
+ 1024 * 1024 - 1) / (1024 * 1024));
137 target
->next
= g_io_targets
;
138 execute_spdk_function(__get_io_channel
, target
);
139 g_io_targets
= target
;
145 bdevio_construct_targets(void)
147 struct spdk_bdev
*bdev
;
150 printf("I/O targets:\n");
152 bdev
= spdk_bdev_first_leaf();
153 while (bdev
!= NULL
) {
154 rc
= bdevio_construct_target(bdev
);
156 SPDK_ERRLOG("Could not construct bdev %s, error=%d\n", spdk_bdev_get_name(bdev
), rc
);
159 bdev
= spdk_bdev_next_leaf(bdev
);
162 if (g_io_targets
== NULL
) {
163 SPDK_ERRLOG("No bdevs to perform tests on\n");
171 __put_io_channel(void *arg
)
173 struct io_target
*target
= arg
;
175 spdk_put_io_channel(target
->ch
);
180 bdevio_cleanup_targets(void)
182 struct io_target
*target
;
184 target
= g_io_targets
;
185 while (target
!= NULL
) {
186 execute_spdk_function(__put_io_channel
, target
);
187 spdk_bdev_close(target
->bdev_desc
);
188 g_io_targets
= target
->next
;
190 target
= g_io_targets
;
194 static bool g_completion_success
;
197 initialize_buffer(char **buf
, int pattern
, int size
)
199 *buf
= spdk_zmalloc(size
, 0x1000, NULL
, SPDK_ENV_LCORE_ID_ANY
, SPDK_MALLOC_DMA
);
200 memset(*buf
, pattern
, size
);
204 quick_test_complete(struct spdk_bdev_io
*bdev_io
, bool success
, void *arg
)
206 g_completion_success
= success
;
207 spdk_bdev_free_io(bdev_io
);
212 __blockdev_write(void *arg
)
214 struct bdevio_request
*req
= arg
;
215 struct io_target
*target
= req
->target
;
219 rc
= spdk_bdev_writev(target
->bdev_desc
, target
->ch
, req
->iov
, req
->iovcnt
, req
->offset
,
220 req
->data_len
, quick_test_complete
, NULL
);
222 rc
= spdk_bdev_write(target
->bdev_desc
, target
->ch
, req
->buf
, req
->offset
,
223 req
->data_len
, quick_test_complete
, NULL
);
227 g_completion_success
= false;
233 __blockdev_write_zeroes(void *arg
)
235 struct bdevio_request
*req
= arg
;
236 struct io_target
*target
= req
->target
;
239 rc
= spdk_bdev_write_zeroes(target
->bdev_desc
, target
->ch
, req
->offset
,
240 req
->data_len
, quick_test_complete
, NULL
);
242 g_completion_success
= false;
248 __blockdev_compare_and_write(void *arg
)
250 struct bdevio_request
*req
= arg
;
251 struct io_target
*target
= req
->target
;
254 rc
= spdk_bdev_comparev_and_writev_blocks(target
->bdev_desc
, target
->ch
, req
->iov
, req
->iovcnt
,
255 req
->fused_iov
, req
->fused_iovcnt
, req
->offset
, req
->data_len
, quick_test_complete
, NULL
);
258 g_completion_success
= false;
264 sgl_chop_buffer(struct bdevio_request
*req
, int iov_len
)
266 int data_len
= req
->data_len
;
267 char *buf
= req
->buf
;
274 for (; data_len
> 0 && req
->iovcnt
< BUFFER_IOVS
; req
->iovcnt
++) {
275 if (data_len
< iov_len
) {
279 req
->iov
[req
->iovcnt
].iov_base
= buf
;
280 req
->iov
[req
->iovcnt
].iov_len
= iov_len
;
286 CU_ASSERT_EQUAL_FATAL(data_len
, 0);
290 sgl_chop_fused_buffer(struct bdevio_request
*req
, int iov_len
)
292 int data_len
= req
->data_len
;
293 char *buf
= req
->fused_buf
;
295 req
->fused_iovcnt
= 0;
300 for (; data_len
> 0 && req
->fused_iovcnt
< BUFFER_IOVS
; req
->fused_iovcnt
++) {
301 if (data_len
< iov_len
) {
305 req
->fused_iov
[req
->fused_iovcnt
].iov_base
= buf
;
306 req
->fused_iov
[req
->fused_iovcnt
].iov_len
= iov_len
;
312 CU_ASSERT_EQUAL_FATAL(data_len
, 0);
316 blockdev_write(struct io_target
*target
, char *tx_buf
,
317 uint64_t offset
, int data_len
, int iov_len
)
319 struct bdevio_request req
;
323 req
.data_len
= data_len
;
325 sgl_chop_buffer(&req
, iov_len
);
327 g_completion_success
= false;
329 execute_spdk_function(__blockdev_write
, &req
);
333 _blockdev_compare_and_write(struct io_target
*target
, char *cmp_buf
, char *write_buf
,
334 uint64_t offset
, int data_len
, int iov_len
)
336 struct bdevio_request req
;
340 req
.fused_buf
= write_buf
;
341 req
.data_len
= data_len
;
343 sgl_chop_buffer(&req
, iov_len
);
344 sgl_chop_fused_buffer(&req
, iov_len
);
346 g_completion_success
= false;
348 execute_spdk_function(__blockdev_compare_and_write
, &req
);
352 blockdev_write_zeroes(struct io_target
*target
, char *tx_buf
,
353 uint64_t offset
, int data_len
)
355 struct bdevio_request req
;
359 req
.data_len
= data_len
;
362 g_completion_success
= false;
364 execute_spdk_function(__blockdev_write_zeroes
, &req
);
368 __blockdev_read(void *arg
)
370 struct bdevio_request
*req
= arg
;
371 struct io_target
*target
= req
->target
;
375 rc
= spdk_bdev_readv(target
->bdev_desc
, target
->ch
, req
->iov
, req
->iovcnt
, req
->offset
,
376 req
->data_len
, quick_test_complete
, NULL
);
378 rc
= spdk_bdev_read(target
->bdev_desc
, target
->ch
, req
->buf
, req
->offset
,
379 req
->data_len
, quick_test_complete
, NULL
);
383 g_completion_success
= false;
389 blockdev_read(struct io_target
*target
, char *rx_buf
,
390 uint64_t offset
, int data_len
, int iov_len
)
392 struct bdevio_request req
;
396 req
.data_len
= data_len
;
399 sgl_chop_buffer(&req
, iov_len
);
401 g_completion_success
= false;
403 execute_spdk_function(__blockdev_read
, &req
);
407 blockdev_write_read_data_match(char *rx_buf
, char *tx_buf
, int data_length
)
410 rc
= memcmp(rx_buf
, tx_buf
, data_length
);
419 blockdev_io_valid_blocks(struct spdk_bdev
*bdev
, uint64_t data_length
)
421 if (data_length
< spdk_bdev_get_block_size(bdev
) ||
422 data_length
% spdk_bdev_get_block_size(bdev
) ||
423 data_length
/ spdk_bdev_get_block_size(bdev
) > spdk_bdev_get_num_blocks(bdev
)) {
431 blockdev_write_read(uint32_t data_length
, uint32_t iov_len
, int pattern
, uint64_t offset
,
432 int expected_rc
, bool write_zeroes
)
434 struct io_target
*target
;
439 target
= g_current_io_target
;
441 if (!blockdev_io_valid_blocks(target
->bdev
, data_length
)) {
446 initialize_buffer(&tx_buf
, pattern
, data_length
);
447 initialize_buffer(&rx_buf
, 0, data_length
);
449 blockdev_write(target
, tx_buf
, offset
, data_length
, iov_len
);
451 initialize_buffer(&tx_buf
, 0, data_length
);
452 initialize_buffer(&rx_buf
, pattern
, data_length
);
454 blockdev_write_zeroes(target
, tx_buf
, offset
, data_length
);
458 if (expected_rc
== 0) {
459 CU_ASSERT_EQUAL(g_completion_success
, true);
461 CU_ASSERT_EQUAL(g_completion_success
, false);
463 blockdev_read(target
, rx_buf
, offset
, data_length
, iov_len
);
465 if (expected_rc
== 0) {
466 CU_ASSERT_EQUAL(g_completion_success
, true);
468 CU_ASSERT_EQUAL(g_completion_success
, false);
471 if (g_completion_success
) {
472 rc
= blockdev_write_read_data_match(rx_buf
, tx_buf
, data_length
);
473 /* Assert the write by comparing it with values read
474 * from each blockdev */
475 CU_ASSERT_EQUAL(rc
, 0);
480 blockdev_compare_and_write(uint32_t data_length
, uint32_t iov_len
, uint64_t offset
)
482 struct io_target
*target
;
484 char *write_buf
= NULL
;
488 target
= g_current_io_target
;
490 if (!blockdev_io_valid_blocks(target
->bdev
, data_length
)) {
494 initialize_buffer(&tx_buf
, 0xAA, data_length
);
495 initialize_buffer(&rx_buf
, 0, data_length
);
496 initialize_buffer(&write_buf
, 0xBB, data_length
);
498 blockdev_write(target
, tx_buf
, offset
, data_length
, iov_len
);
499 CU_ASSERT_EQUAL(g_completion_success
, true);
501 _blockdev_compare_and_write(target
, tx_buf
, write_buf
, offset
, data_length
, iov_len
);
502 CU_ASSERT_EQUAL(g_completion_success
, true);
504 _blockdev_compare_and_write(target
, tx_buf
, write_buf
, offset
, data_length
, iov_len
);
505 CU_ASSERT_EQUAL(g_completion_success
, false);
507 blockdev_read(target
, rx_buf
, offset
, data_length
, iov_len
);
508 CU_ASSERT_EQUAL(g_completion_success
, true);
509 rc
= blockdev_write_read_data_match(rx_buf
, write_buf
, data_length
);
510 /* Assert the write by comparing it with values read
511 * from each blockdev */
512 CU_ASSERT_EQUAL(rc
, 0);
516 blockdev_write_read_4k(void)
518 uint32_t data_length
;
525 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
528 /* Params are valid, hence the expected return value
529 * of write and read for all blockdevs is 0. */
532 blockdev_write_read(data_length
, 0, pattern
, offset
, expected_rc
, 0);
536 blockdev_write_zeroes_read_4k(void)
538 uint32_t data_length
;
547 /* Params are valid, hence the expected return value
548 * of write_zeroes and read for all blockdevs is 0. */
551 blockdev_write_read(data_length
, 0, pattern
, offset
, expected_rc
, 1);
555 * This i/o will not have to split at the bdev layer.
558 blockdev_write_zeroes_read_1m(void)
560 uint32_t data_length
;
566 data_length
= 1048576;
569 /* Params are valid, hence the expected return value
570 * of write_zeroes and read for all blockdevs is 0. */
573 blockdev_write_read(data_length
, 0, pattern
, offset
, expected_rc
, 1);
577 * This i/o will have to split at the bdev layer if
578 * write-zeroes is not supported by the bdev.
581 blockdev_write_zeroes_read_3m(void)
583 uint32_t data_length
;
589 data_length
= 3145728;
592 /* Params are valid, hence the expected return value
593 * of write_zeroes and read for all blockdevs is 0. */
596 blockdev_write_read(data_length
, 0, pattern
, offset
, expected_rc
, 1);
600 * This i/o will have to split at the bdev layer if
601 * write-zeroes is not supported by the bdev. It also
602 * tests a write size that is not an even multiple of
603 * the bdev layer zero buffer size.
606 blockdev_write_zeroes_read_3m_500k(void)
608 uint32_t data_length
;
613 /* Data size = 3.5M */
614 data_length
= 3670016;
617 /* Params are valid, hence the expected return value
618 * of write_zeroes and read for all blockdevs is 0. */
621 blockdev_write_read(data_length
, 0, pattern
, offset
, expected_rc
, 1);
625 blockdev_writev_readv_4k(void)
627 uint32_t data_length
, iov_len
;
635 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
638 /* Params are valid, hence the expected return value
639 * of write and read for all blockdevs is 0. */
642 blockdev_write_read(data_length
, iov_len
, pattern
, offset
, expected_rc
, 0);
646 blockdev_comparev_and_writev(void)
648 uint32_t data_length
, iov_len
;
653 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
656 blockdev_compare_and_write(data_length
, iov_len
, offset
);
660 blockdev_writev_readv_30x4k(void)
662 uint32_t data_length
, iov_len
;
668 data_length
= 4096 * 30;
670 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
673 /* Params are valid, hence the expected return value
674 * of write and read for all blockdevs is 0. */
677 blockdev_write_read(data_length
, iov_len
, pattern
, offset
, expected_rc
, 0);
681 blockdev_write_read_512Bytes(void)
683 uint32_t data_length
;
688 /* Data size = 512 */
690 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
693 /* Params are valid, hence the expected return value
694 * of write and read for all blockdevs is 0. */
697 blockdev_write_read(data_length
, 0, pattern
, offset
, expected_rc
, 0);
701 blockdev_writev_readv_512Bytes(void)
703 uint32_t data_length
, iov_len
;
708 /* Data size = 512 */
711 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
714 /* Params are valid, hence the expected return value
715 * of write and read for all blockdevs is 0. */
718 blockdev_write_read(data_length
, iov_len
, pattern
, offset
, expected_rc
, 0);
722 blockdev_write_read_size_gt_128k(void)
724 uint32_t data_length
;
729 /* Data size = 132K */
730 data_length
= 135168;
731 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
734 /* Params are valid, hence the expected return value
735 * of write and read for all blockdevs is 0. */
738 blockdev_write_read(data_length
, 0, pattern
, offset
, expected_rc
, 0);
742 blockdev_writev_readv_size_gt_128k(void)
744 uint32_t data_length
, iov_len
;
749 /* Data size = 132K */
750 data_length
= 135168;
752 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
755 /* Params are valid, hence the expected return value
756 * of write and read for all blockdevs is 0. */
759 blockdev_write_read(data_length
, iov_len
, pattern
, offset
, expected_rc
, 0);
763 blockdev_writev_readv_size_gt_128k_two_iov(void)
765 uint32_t data_length
, iov_len
;
770 /* Data size = 132K */
771 data_length
= 135168;
772 iov_len
= 128 * 1024;
773 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
776 /* Params are valid, hence the expected return value
777 * of write and read for all blockdevs is 0. */
780 blockdev_write_read(data_length
, iov_len
, pattern
, offset
, expected_rc
, 0);
784 blockdev_write_read_invalid_size(void)
786 uint32_t data_length
;
791 /* Data size is not a multiple of the block size */
792 data_length
= 0x1015;
793 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
796 /* Params are invalid, hence the expected return value
797 * of write and read for all blockdevs is < 0 */
800 blockdev_write_read(data_length
, 0, pattern
, offset
, expected_rc
, 0);
804 blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void)
806 struct io_target
*target
;
807 struct spdk_bdev
*bdev
;
814 target
= g_current_io_target
;
817 block_size
= spdk_bdev_get_block_size(bdev
);
819 /* The start offset has been set to a marginal value
820 * such that offset + nbytes == Total size of
822 offset
= ((spdk_bdev_get_num_blocks(bdev
) - 1) * block_size
);
824 initialize_buffer(&tx_buf
, 0xA3, block_size
);
825 initialize_buffer(&rx_buf
, 0, block_size
);
827 blockdev_write(target
, tx_buf
, offset
, block_size
, 0);
828 CU_ASSERT_EQUAL(g_completion_success
, true);
830 blockdev_read(target
, rx_buf
, offset
, block_size
, 0);
831 CU_ASSERT_EQUAL(g_completion_success
, true);
833 rc
= blockdev_write_read_data_match(rx_buf
, tx_buf
, block_size
);
834 /* Assert the write by comparing it with values read
835 * from each blockdev */
836 CU_ASSERT_EQUAL(rc
, 0);
840 blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void)
842 struct io_target
*target
;
843 struct spdk_bdev
*bdev
;
850 /* Tests the overflow condition of the blockdevs. */
852 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
855 target
= g_current_io_target
;
858 /* The start offset has been set to a valid value
859 * but offset + nbytes is greater than the Total size
860 * of the blockdev. The test should fail. */
861 offset
= ((spdk_bdev_get_num_blocks(bdev
) * spdk_bdev_get_block_size(bdev
)) - 1024);
863 initialize_buffer(&tx_buf
, pattern
, data_length
);
864 initialize_buffer(&rx_buf
, 0, data_length
);
866 blockdev_write(target
, tx_buf
, offset
, data_length
, 0);
867 CU_ASSERT_EQUAL(g_completion_success
, false);
869 blockdev_read(target
, rx_buf
, offset
, data_length
, 0);
870 CU_ASSERT_EQUAL(g_completion_success
, false);
874 blockdev_write_read_max_offset(void)
882 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
883 /* The start offset has been set to UINT64_MAX such that
884 * adding nbytes wraps around and points to an invalid address. */
887 /* Params are invalid, hence the expected return value
888 * of write and read for all blockdevs is < 0 */
891 blockdev_write_read(data_length
, 0, pattern
, offset
, expected_rc
, 0);
895 blockdev_overlapped_write_read_8k(void)
904 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
907 /* Params are valid, hence the expected return value
908 * of write and read for all blockdevs is 0. */
910 /* Assert the write by comparing it with values read
911 * from the same offset for each blockdev */
912 blockdev_write_read(data_length
, 0, pattern
, offset
, expected_rc
, 0);
914 /* Overwrite the pattern 0xbb of size 8K on an address offset overlapping
915 * with the address written above and assert the new value in
916 * the overlapped address range */
917 /* Populate 8k with value 0xBB */
919 /* Offset = 6144; Overlap offset addresses and write value 0xbb */
921 /* Assert the write by comparing it with values read
922 * from the overlapped offset for each blockdev */
923 blockdev_write_read(data_length
, 0, pattern
, offset
, expected_rc
, 0);
927 __blockdev_reset(void *arg
)
929 struct bdevio_request
*req
= arg
;
930 struct io_target
*target
= req
->target
;
933 rc
= spdk_bdev_reset(target
->bdev_desc
, target
->ch
, quick_test_complete
, NULL
);
935 g_completion_success
= false;
941 blockdev_test_reset(void)
943 struct bdevio_request req
;
944 struct io_target
*target
;
946 target
= g_current_io_target
;
949 g_completion_success
= false;
951 execute_spdk_function(__blockdev_reset
, &req
);
953 /* Workaround: NVMe-oF target doesn't support reset yet - so for now
954 * don't fail the test if it's an NVMe bdev.
956 if (!spdk_bdev_io_type_supported(target
->bdev
, SPDK_BDEV_IO_TYPE_NVME_IO
)) {
957 CU_ASSERT_EQUAL(g_completion_success
, true);
961 struct bdevio_passthrough_request
{
962 struct spdk_nvme_cmd cmd
;
965 struct io_target
*target
;
972 nvme_pt_test_complete(struct spdk_bdev_io
*bdev_io
, bool success
, void *arg
)
974 struct bdevio_passthrough_request
*pt_req
= arg
;
976 spdk_bdev_io_get_nvme_status(bdev_io
, &pt_req
->cdw0
, &pt_req
->sct
, &pt_req
->sc
);
977 spdk_bdev_free_io(bdev_io
);
982 __blockdev_nvme_passthru(void *arg
)
984 struct bdevio_passthrough_request
*pt_req
= arg
;
985 struct io_target
*target
= pt_req
->target
;
988 rc
= spdk_bdev_nvme_io_passthru(target
->bdev_desc
, target
->ch
,
989 &pt_req
->cmd
, pt_req
->buf
, pt_req
->len
,
990 nvme_pt_test_complete
, pt_req
);
997 blockdev_test_nvme_passthru_rw(void)
999 struct bdevio_passthrough_request pt_req
;
1000 void *write_buf
, *read_buf
;
1001 struct io_target
*target
;
1003 target
= g_current_io_target
;
1005 if (!spdk_bdev_io_type_supported(target
->bdev
, SPDK_BDEV_IO_TYPE_NVME_IO
)) {
1009 memset(&pt_req
, 0, sizeof(pt_req
));
1010 pt_req
.target
= target
;
1011 pt_req
.cmd
.opc
= SPDK_NVME_OPC_WRITE
;
1012 pt_req
.cmd
.nsid
= 1;
1013 *(uint64_t *)&pt_req
.cmd
.cdw10
= 4;
1014 pt_req
.cmd
.cdw12
= 0;
1016 pt_req
.len
= spdk_bdev_get_block_size(target
->bdev
);
1017 write_buf
= spdk_malloc(pt_req
.len
, 0, NULL
, SPDK_ENV_LCORE_ID_ANY
, SPDK_MALLOC_DMA
);
1018 memset(write_buf
, 0xA5, pt_req
.len
);
1019 pt_req
.buf
= write_buf
;
1021 pt_req
.sct
= SPDK_NVME_SCT_VENDOR_SPECIFIC
;
1022 pt_req
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
1023 execute_spdk_function(__blockdev_nvme_passthru
, &pt_req
);
1024 CU_ASSERT(pt_req
.sct
== SPDK_NVME_SCT_GENERIC
);
1025 CU_ASSERT(pt_req
.sc
== SPDK_NVME_SC_SUCCESS
);
1027 pt_req
.cmd
.opc
= SPDK_NVME_OPC_READ
;
1028 read_buf
= spdk_zmalloc(pt_req
.len
, 0, NULL
, SPDK_ENV_LCORE_ID_ANY
, SPDK_MALLOC_DMA
);
1029 pt_req
.buf
= read_buf
;
1031 pt_req
.sct
= SPDK_NVME_SCT_VENDOR_SPECIFIC
;
1032 pt_req
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
1033 execute_spdk_function(__blockdev_nvme_passthru
, &pt_req
);
1034 CU_ASSERT(pt_req
.sct
== SPDK_NVME_SCT_GENERIC
);
1035 CU_ASSERT(pt_req
.sc
== SPDK_NVME_SC_SUCCESS
);
1037 CU_ASSERT(!memcmp(read_buf
, write_buf
, pt_req
.len
));
1038 spdk_free(read_buf
);
1039 spdk_free(write_buf
);
1043 blockdev_test_nvme_passthru_vendor_specific(void)
1045 struct bdevio_passthrough_request pt_req
;
1046 struct io_target
*target
;
1048 target
= g_current_io_target
;
1050 if (!spdk_bdev_io_type_supported(target
->bdev
, SPDK_BDEV_IO_TYPE_NVME_IO
)) {
1054 memset(&pt_req
, 0, sizeof(pt_req
));
1055 pt_req
.target
= target
;
1056 pt_req
.cmd
.opc
= 0x7F; /* choose known invalid opcode */
1057 pt_req
.cmd
.nsid
= 1;
1059 pt_req
.sct
= SPDK_NVME_SCT_VENDOR_SPECIFIC
;
1060 pt_req
.sc
= SPDK_NVME_SC_SUCCESS
;
1061 pt_req
.cdw0
= 0xbeef;
1062 execute_spdk_function(__blockdev_nvme_passthru
, &pt_req
);
1063 CU_ASSERT(pt_req
.sct
== SPDK_NVME_SCT_GENERIC
);
1064 CU_ASSERT(pt_req
.sc
== SPDK_NVME_SC_INVALID_OPCODE
);
1065 CU_ASSERT(pt_req
.cdw0
== 0x0);
1069 __blockdev_nvme_admin_passthru(void *arg
)
1071 struct bdevio_passthrough_request
*pt_req
= arg
;
1072 struct io_target
*target
= pt_req
->target
;
1075 rc
= spdk_bdev_nvme_admin_passthru(target
->bdev_desc
, target
->ch
,
1076 &pt_req
->cmd
, pt_req
->buf
, pt_req
->len
,
1077 nvme_pt_test_complete
, pt_req
);
1084 blockdev_test_nvme_admin_passthru(void)
1086 struct io_target
*target
;
1087 struct bdevio_passthrough_request pt_req
;
1089 target
= g_current_io_target
;
1091 if (!spdk_bdev_io_type_supported(target
->bdev
, SPDK_BDEV_IO_TYPE_NVME_ADMIN
)) {
1095 memset(&pt_req
, 0, sizeof(pt_req
));
1096 pt_req
.target
= target
;
1097 pt_req
.cmd
.opc
= SPDK_NVME_OPC_IDENTIFY
;
1098 pt_req
.cmd
.nsid
= 0;
1099 *(uint64_t *)&pt_req
.cmd
.cdw10
= SPDK_NVME_IDENTIFY_CTRLR
;
1101 pt_req
.len
= sizeof(struct spdk_nvme_ctrlr_data
);
1102 pt_req
.buf
= spdk_malloc(pt_req
.len
, 0, NULL
, SPDK_ENV_LCORE_ID_ANY
, SPDK_MALLOC_DMA
);
1104 pt_req
.sct
= SPDK_NVME_SCT_GENERIC
;
1105 pt_req
.sc
= SPDK_NVME_SC_SUCCESS
;
1106 execute_spdk_function(__blockdev_nvme_admin_passthru
, &pt_req
);
1107 CU_ASSERT(pt_req
.sct
== SPDK_NVME_SCT_GENERIC
);
1108 CU_ASSERT(pt_req
.sc
== SPDK_NVME_SC_SUCCESS
);
1112 __stop_init_thread(void *arg
)
1114 unsigned num_failures
= g_num_failures
;
1115 struct spdk_jsonrpc_request
*request
= arg
;
1119 bdevio_cleanup_targets();
1120 if (g_wait_for_tests
) {
1121 /* Do not stop the app yet, wait for another RPC */
1122 rpc_perform_tests_cb(num_failures
, request
);
1125 spdk_app_stop(num_failures
);
1129 stop_init_thread(unsigned num_failures
, struct spdk_jsonrpc_request
*request
)
1131 g_num_failures
= num_failures
;
1133 spdk_thread_send_msg(g_thread_init
, __stop_init_thread
, request
);
1139 if (g_current_io_target
== NULL
) {
1140 g_current_io_target
= g_io_targets
;
1148 g_current_io_target
= g_current_io_target
->next
;
1152 #define SUITE_NAME_MAX 64
1155 __setup_ut_on_single_target(struct io_target
*target
)
1158 CU_pSuite suite
= NULL
;
1159 char name
[SUITE_NAME_MAX
];
1161 snprintf(name
, sizeof(name
), "bdevio tests on: %s", spdk_bdev_get_name(target
->bdev
));
1162 suite
= CU_add_suite(name
, suite_init
, suite_fini
);
1163 if (suite
== NULL
) {
1164 CU_cleanup_registry();
1165 rc
= CU_get_error();
1170 CU_add_test(suite
, "blockdev write read 4k", blockdev_write_read_4k
) == NULL
1171 || CU_add_test(suite
, "blockdev write zeroes read 4k", blockdev_write_zeroes_read_4k
) == NULL
1172 || CU_add_test(suite
, "blockdev write zeroes read 1m", blockdev_write_zeroes_read_1m
) == NULL
1173 || CU_add_test(suite
, "blockdev write zeroes read 3m", blockdev_write_zeroes_read_3m
) == NULL
1174 || CU_add_test(suite
, "blockdev write zeroes read 3.5m", blockdev_write_zeroes_read_3m_500k
) == NULL
1175 || CU_add_test(suite
, "blockdev reset",
1176 blockdev_test_reset
) == NULL
1177 || CU_add_test(suite
, "blockdev write read 512 bytes",
1178 blockdev_write_read_512Bytes
) == NULL
1179 || CU_add_test(suite
, "blockdev write read size > 128k",
1180 blockdev_write_read_size_gt_128k
) == NULL
1181 || CU_add_test(suite
, "blockdev write read invalid size",
1182 blockdev_write_read_invalid_size
) == NULL
1183 || CU_add_test(suite
, "blockdev write read offset + nbytes == size of blockdev",
1184 blockdev_write_read_offset_plus_nbytes_equals_bdev_size
) == NULL
1185 || CU_add_test(suite
, "blockdev write read offset + nbytes > size of blockdev",
1186 blockdev_write_read_offset_plus_nbytes_gt_bdev_size
) == NULL
1187 || CU_add_test(suite
, "blockdev write read max offset",
1188 blockdev_write_read_max_offset
) == NULL
1189 || CU_add_test(suite
, "blockdev write read 8k on overlapped address offset",
1190 blockdev_overlapped_write_read_8k
) == NULL
1191 || CU_add_test(suite
, "blockdev writev readv 4k", blockdev_writev_readv_4k
) == NULL
1192 || CU_add_test(suite
, "blockdev writev readv 30 x 4k",
1193 blockdev_writev_readv_30x4k
) == NULL
1194 || CU_add_test(suite
, "blockdev writev readv 512 bytes",
1195 blockdev_writev_readv_512Bytes
) == NULL
1196 || CU_add_test(suite
, "blockdev writev readv size > 128k",
1197 blockdev_writev_readv_size_gt_128k
) == NULL
1198 || CU_add_test(suite
, "blockdev writev readv size > 128k in two iovs",
1199 blockdev_writev_readv_size_gt_128k_two_iov
) == NULL
1200 || CU_add_test(suite
, "blockdev comparev and writev", blockdev_comparev_and_writev
) == NULL
1201 || CU_add_test(suite
, "blockdev nvme passthru rw",
1202 blockdev_test_nvme_passthru_rw
) == NULL
1203 || CU_add_test(suite
, "blockdev nvme passthru vendor specific",
1204 blockdev_test_nvme_passthru_vendor_specific
) == NULL
1205 || CU_add_test(suite
, "blockdev nvme admin passthru",
1206 blockdev_test_nvme_admin_passthru
) == NULL
1208 CU_cleanup_registry();
1209 rc
= CU_get_error();
1216 __run_ut_thread(void *arg
)
1218 struct spdk_jsonrpc_request
*request
= arg
;
1220 struct io_target
*target
;
1221 unsigned num_failures
;
1223 if (CU_initialize_registry() != CUE_SUCCESS
) {
1224 /* CUnit error, probably won't recover */
1225 rc
= CU_get_error();
1226 stop_init_thread(-rc
, request
);
1229 target
= g_io_targets
;
1230 while (target
!= NULL
) {
1231 rc
= __setup_ut_on_single_target(target
);
1233 /* CUnit error, probably won't recover */
1234 stop_init_thread(-rc
, request
);
1236 target
= target
->next
;
1238 CU_basic_set_mode(CU_BRM_VERBOSE
);
1239 CU_basic_run_tests();
1240 num_failures
= CU_get_number_of_failures();
1241 CU_cleanup_registry();
1243 stop_init_thread(num_failures
, request
);
1247 __construct_targets(void *arg
)
1249 if (bdevio_construct_targets() < 0) {
1254 spdk_thread_send_msg(g_thread_ut
, __run_ut_thread
, NULL
);
1258 test_main(void *arg1
)
1260 struct spdk_cpuset tmpmask
= {}, *appmask
;
1261 uint32_t cpu
, init_cpu
;
1263 pthread_mutex_init(&g_test_mutex
, NULL
);
1264 pthread_cond_init(&g_test_cond
, NULL
);
1266 appmask
= spdk_app_get_core_mask();
1268 if (spdk_cpuset_count(appmask
) < 3) {
1273 init_cpu
= spdk_env_get_current_core();
1274 g_thread_init
= spdk_get_thread();
1276 for (cpu
= 0; cpu
< SPDK_ENV_LCORE_ID_ANY
; cpu
++) {
1277 if (cpu
!= init_cpu
&& spdk_cpuset_get_cpu(appmask
, cpu
)) {
1278 spdk_cpuset_zero(&tmpmask
);
1279 spdk_cpuset_set_cpu(&tmpmask
, cpu
, true);
1280 g_thread_ut
= spdk_thread_create("ut_thread", &tmpmask
);
1285 if (cpu
== SPDK_ENV_LCORE_ID_ANY
) {
1290 for (cpu
++; cpu
< SPDK_ENV_LCORE_ID_ANY
; cpu
++) {
1291 if (cpu
!= init_cpu
&& spdk_cpuset_get_cpu(appmask
, cpu
)) {
1292 spdk_cpuset_zero(&tmpmask
);
1293 spdk_cpuset_set_cpu(&tmpmask
, cpu
, true);
1294 g_thread_io
= spdk_thread_create("io_thread", &tmpmask
);
1299 if (cpu
== SPDK_ENV_LCORE_ID_ANY
) {
1304 if (g_wait_for_tests
) {
1305 /* Do not perform any tests until RPC is received */
1309 spdk_thread_send_msg(g_thread_init
, __construct_targets
, NULL
);
1315 printf(" -w start bdevio app and wait for RPC to start the tests\n");
1319 bdevio_parse_arg(int ch
, char *arg
)
1323 g_wait_for_tests
= true;
1331 struct rpc_perform_tests
{
1336 free_rpc_perform_tests(struct rpc_perform_tests
*r
)
1341 static const struct spdk_json_object_decoder rpc_perform_tests_decoders
[] = {
1342 {"name", offsetof(struct rpc_perform_tests
, name
), spdk_json_decode_string
, true},
1346 rpc_perform_tests_cb(unsigned num_failures
, struct spdk_jsonrpc_request
*request
)
1348 struct spdk_json_write_ctx
*w
;
1350 if (num_failures
== 0) {
1351 w
= spdk_jsonrpc_begin_result(request
);
1352 spdk_json_write_uint32(w
, num_failures
);
1353 spdk_jsonrpc_end_result(request
, w
);
1355 spdk_jsonrpc_send_error_response_fmt(request
, SPDK_JSONRPC_ERROR_INTERNAL_ERROR
,
1356 "%d test cases failed", num_failures
);
1361 rpc_perform_tests(struct spdk_jsonrpc_request
*request
, const struct spdk_json_val
*params
)
1363 struct rpc_perform_tests req
= {NULL
};
1364 struct spdk_bdev
*bdev
;
1367 if (params
&& spdk_json_decode_object(params
, rpc_perform_tests_decoders
,
1368 SPDK_COUNTOF(rpc_perform_tests_decoders
),
1370 SPDK_ERRLOG("spdk_json_decode_object failed\n");
1371 spdk_jsonrpc_send_error_response(request
, SPDK_JSONRPC_ERROR_INVALID_PARAMS
, "Invalid parameters");
1376 bdev
= spdk_bdev_get_by_name(req
.name
);
1378 SPDK_ERRLOG("Bdev '%s' does not exist\n", req
.name
);
1379 spdk_jsonrpc_send_error_response_fmt(request
, SPDK_JSONRPC_ERROR_INTERNAL_ERROR
,
1380 "Bdev '%s' does not exist: %s",
1381 req
.name
, spdk_strerror(ENODEV
));
1384 rc
= bdevio_construct_target(bdev
);
1386 SPDK_ERRLOG("Could not construct target for bdev '%s'\n", spdk_bdev_get_name(bdev
));
1387 spdk_jsonrpc_send_error_response_fmt(request
, SPDK_JSONRPC_ERROR_INTERNAL_ERROR
,
1388 "Could not construct target for bdev '%s': %s",
1389 spdk_bdev_get_name(bdev
), spdk_strerror(-rc
));
1393 rc
= bdevio_construct_targets();
1395 SPDK_ERRLOG("Could not construct targets for all bdevs\n");
1396 spdk_jsonrpc_send_error_response_fmt(request
, SPDK_JSONRPC_ERROR_INTERNAL_ERROR
,
1397 "Could not construct targets for all bdevs: %s",
1398 spdk_strerror(-rc
));
1402 free_rpc_perform_tests(&req
);
1404 spdk_thread_send_msg(g_thread_ut
, __run_ut_thread
, request
);
1409 free_rpc_perform_tests(&req
);
1411 SPDK_RPC_REGISTER("perform_tests", rpc_perform_tests
, SPDK_RPC_RUNTIME
)
1414 main(int argc
, char **argv
)
1417 struct spdk_app_opts opts
= {};
1419 spdk_app_opts_init(&opts
);
1420 opts
.name
= "bdevio";
1421 opts
.reactor_mask
= "0x7";
1423 if ((rc
= spdk_app_parse_args(argc
, argv
, &opts
, "w", NULL
,
1424 bdevio_parse_arg
, bdevio_usage
)) !=
1425 SPDK_APP_PARSE_ARGS_SUCCESS
) {
1429 rc
= spdk_app_start(&opts
, test_main
, NULL
);