4 * Copyright (c) Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "spdk/stdinc.h"
36 #include "spdk/bdev.h"
37 #include "spdk/copy_engine.h"
40 #include "spdk/thread.h"
41 #include "spdk/event.h"
43 #include "spdk/util.h"
44 #include "spdk/string.h"
46 #include "CUnit/Basic.h"
48 #define BUFFER_IOVS 1024
49 #define BUFFER_SIZE 260 * 1024
50 #define BDEV_TASK_ARRAY_SIZE 2048
52 pthread_mutex_t g_test_mutex
;
53 pthread_cond_t g_test_cond
;
55 static uint32_t g_lcore_id_init
;
56 static uint32_t g_lcore_id_ut
;
57 static uint32_t g_lcore_id_io
;
58 static bool g_wait_for_tests
= false;
61 struct spdk_bdev
*bdev
;
62 struct spdk_bdev_desc
*bdev_desc
;
63 struct spdk_io_channel
*ch
;
64 struct io_target
*next
;
67 struct bdevio_request
{
71 struct iovec iov
[BUFFER_IOVS
];
73 struct io_target
*target
;
76 struct io_target
*g_io_targets
= NULL
;
77 struct io_target
*g_current_io_target
= NULL
;
78 static void rpc_perform_tests_cb(unsigned num_failures
, struct spdk_jsonrpc_request
*request
);
81 execute_spdk_function(spdk_event_fn fn
, void *arg1
, void *arg2
)
83 struct spdk_event
*event
;
85 event
= spdk_event_allocate(g_lcore_id_io
, fn
, arg1
, arg2
);
86 pthread_mutex_lock(&g_test_mutex
);
87 spdk_event_call(event
);
88 pthread_cond_wait(&g_test_cond
, &g_test_mutex
);
89 pthread_mutex_unlock(&g_test_mutex
);
95 pthread_mutex_lock(&g_test_mutex
);
96 pthread_cond_signal(&g_test_cond
);
97 pthread_mutex_unlock(&g_test_mutex
);
101 __get_io_channel(void *arg1
, void *arg2
)
103 struct io_target
*target
= arg1
;
105 target
->ch
= spdk_bdev_get_io_channel(target
->bdev_desc
);
111 bdevio_construct_target(struct spdk_bdev
*bdev
)
113 struct io_target
*target
;
115 uint64_t num_blocks
= spdk_bdev_get_num_blocks(bdev
);
116 uint32_t block_size
= spdk_bdev_get_block_size(bdev
);
118 target
= malloc(sizeof(struct io_target
));
119 if (target
== NULL
) {
123 rc
= spdk_bdev_open(bdev
, true, NULL
, NULL
, &target
->bdev_desc
);
126 SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev
), rc
);
130 printf(" %s: %" PRIu64
" blocks of %" PRIu32
" bytes (%" PRIu64
" MiB)\n",
131 spdk_bdev_get_name(bdev
),
132 num_blocks
, block_size
,
133 (num_blocks
* block_size
+ 1024 * 1024 - 1) / (1024 * 1024));
136 target
->next
= g_io_targets
;
137 execute_spdk_function(__get_io_channel
, target
, NULL
);
138 g_io_targets
= target
;
144 bdevio_construct_targets(void)
146 struct spdk_bdev
*bdev
;
149 printf("I/O targets:\n");
151 bdev
= spdk_bdev_first_leaf();
152 while (bdev
!= NULL
) {
153 rc
= bdevio_construct_target(bdev
);
155 SPDK_ERRLOG("Could not construct bdev %s, error=%d\n", spdk_bdev_get_name(bdev
), rc
);
158 bdev
= spdk_bdev_next_leaf(bdev
);
161 if (g_io_targets
== NULL
) {
162 SPDK_ERRLOG("No bdevs to perform tests on\n");
170 __put_io_channel(void *arg1
, void *arg2
)
172 struct io_target
*target
= arg1
;
174 spdk_put_io_channel(target
->ch
);
179 bdevio_cleanup_targets(void)
181 struct io_target
*target
;
183 target
= g_io_targets
;
184 while (target
!= NULL
) {
185 execute_spdk_function(__put_io_channel
, target
, NULL
);
186 spdk_bdev_close(target
->bdev_desc
);
187 g_io_targets
= target
->next
;
189 target
= g_io_targets
;
193 static bool g_completion_success
;
196 initialize_buffer(char **buf
, int pattern
, int size
)
198 *buf
= spdk_dma_zmalloc(size
, 0x1000, NULL
);
199 memset(*buf
, pattern
, size
);
203 quick_test_complete(struct spdk_bdev_io
*bdev_io
, bool success
, void *arg
)
205 g_completion_success
= success
;
206 spdk_bdev_free_io(bdev_io
);
211 __blockdev_write(void *arg1
, void *arg2
)
213 struct bdevio_request
*req
= arg1
;
214 struct io_target
*target
= req
->target
;
218 rc
= spdk_bdev_writev(target
->bdev_desc
, target
->ch
, req
->iov
, req
->iovcnt
, req
->offset
,
219 req
->data_len
, quick_test_complete
, NULL
);
221 rc
= spdk_bdev_write(target
->bdev_desc
, target
->ch
, req
->buf
, req
->offset
,
222 req
->data_len
, quick_test_complete
, NULL
);
226 g_completion_success
= false;
232 __blockdev_write_zeroes(void *arg1
, void *arg2
)
234 struct bdevio_request
*req
= arg1
;
235 struct io_target
*target
= req
->target
;
238 rc
= spdk_bdev_write_zeroes(target
->bdev_desc
, target
->ch
, req
->offset
,
239 req
->data_len
, quick_test_complete
, NULL
);
241 g_completion_success
= false;
247 sgl_chop_buffer(struct bdevio_request
*req
, int iov_len
)
249 int data_len
= req
->data_len
;
250 char *buf
= req
->buf
;
257 for (; data_len
> 0 && req
->iovcnt
< BUFFER_IOVS
; req
->iovcnt
++) {
258 if (data_len
< iov_len
) {
262 req
->iov
[req
->iovcnt
].iov_base
= buf
;
263 req
->iov
[req
->iovcnt
].iov_len
= iov_len
;
269 CU_ASSERT_EQUAL_FATAL(data_len
, 0);
273 blockdev_write(struct io_target
*target
, char *tx_buf
,
274 uint64_t offset
, int data_len
, int iov_len
)
276 struct bdevio_request req
;
280 req
.data_len
= data_len
;
282 sgl_chop_buffer(&req
, iov_len
);
284 g_completion_success
= false;
286 execute_spdk_function(__blockdev_write
, &req
, NULL
);
290 blockdev_write_zeroes(struct io_target
*target
, char *tx_buf
,
291 uint64_t offset
, int data_len
)
293 struct bdevio_request req
;
297 req
.data_len
= data_len
;
300 g_completion_success
= false;
302 execute_spdk_function(__blockdev_write_zeroes
, &req
, NULL
);
306 __blockdev_read(void *arg1
, void *arg2
)
308 struct bdevio_request
*req
= arg1
;
309 struct io_target
*target
= req
->target
;
313 rc
= spdk_bdev_readv(target
->bdev_desc
, target
->ch
, req
->iov
, req
->iovcnt
, req
->offset
,
314 req
->data_len
, quick_test_complete
, NULL
);
316 rc
= spdk_bdev_read(target
->bdev_desc
, target
->ch
, req
->buf
, req
->offset
,
317 req
->data_len
, quick_test_complete
, NULL
);
321 g_completion_success
= false;
327 blockdev_read(struct io_target
*target
, char *rx_buf
,
328 uint64_t offset
, int data_len
, int iov_len
)
330 struct bdevio_request req
;
334 req
.data_len
= data_len
;
337 sgl_chop_buffer(&req
, iov_len
);
339 g_completion_success
= false;
341 execute_spdk_function(__blockdev_read
, &req
, NULL
);
345 blockdev_write_read_data_match(char *rx_buf
, char *tx_buf
, int data_length
)
348 rc
= memcmp(rx_buf
, tx_buf
, data_length
);
350 spdk_dma_free(rx_buf
);
351 spdk_dma_free(tx_buf
);
357 blockdev_write_read(uint32_t data_length
, uint32_t iov_len
, int pattern
, uint64_t offset
,
358 int expected_rc
, bool write_zeroes
)
360 struct io_target
*target
;
365 target
= g_current_io_target
;
367 if (data_length
< spdk_bdev_get_block_size(target
->bdev
) ||
368 data_length
/ spdk_bdev_get_block_size(target
->bdev
) > spdk_bdev_get_num_blocks(target
->bdev
)) {
373 initialize_buffer(&tx_buf
, pattern
, data_length
);
374 initialize_buffer(&rx_buf
, 0, data_length
);
376 blockdev_write(target
, tx_buf
, offset
, data_length
, iov_len
);
378 initialize_buffer(&tx_buf
, 0, data_length
);
379 initialize_buffer(&rx_buf
, pattern
, data_length
);
381 blockdev_write_zeroes(target
, tx_buf
, offset
, data_length
);
385 if (expected_rc
== 0) {
386 CU_ASSERT_EQUAL(g_completion_success
, true);
388 CU_ASSERT_EQUAL(g_completion_success
, false);
390 blockdev_read(target
, rx_buf
, offset
, data_length
, iov_len
);
392 if (expected_rc
== 0) {
393 CU_ASSERT_EQUAL(g_completion_success
, true);
395 CU_ASSERT_EQUAL(g_completion_success
, false);
398 if (g_completion_success
) {
399 rc
= blockdev_write_read_data_match(rx_buf
, tx_buf
, data_length
);
400 /* Assert the write by comparing it with values read
401 * from each blockdev */
402 CU_ASSERT_EQUAL(rc
, 0);
407 blockdev_write_read_4k(void)
409 uint32_t data_length
;
416 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
419 /* Params are valid, hence the expected return value
420 * of write and read for all blockdevs is 0. */
423 blockdev_write_read(data_length
, 0, pattern
, offset
, expected_rc
, 0);
427 blockdev_write_zeroes_read_4k(void)
429 uint32_t data_length
;
438 /* Params are valid, hence the expected return value
439 * of write_zeroes and read for all blockdevs is 0. */
442 blockdev_write_read(data_length
, 0, pattern
, offset
, expected_rc
, 1);
446 * This i/o will not have to split at the bdev layer.
449 blockdev_write_zeroes_read_1m(void)
451 uint32_t data_length
;
457 data_length
= 1048576;
460 /* Params are valid, hence the expected return value
461 * of write_zeroes and read for all blockdevs is 0. */
464 blockdev_write_read(data_length
, 0, pattern
, offset
, expected_rc
, 1);
468 * This i/o will have to split at the bdev layer if
469 * write-zeroes is not supported by the bdev.
472 blockdev_write_zeroes_read_3m(void)
474 uint32_t data_length
;
480 data_length
= 3145728;
483 /* Params are valid, hence the expected return value
484 * of write_zeroes and read for all blockdevs is 0. */
487 blockdev_write_read(data_length
, 0, pattern
, offset
, expected_rc
, 1);
491 * This i/o will have to split at the bdev layer if
492 * write-zeroes is not supported by the bdev. It also
493 * tests a write size that is not an even multiple of
494 * the bdev layer zero buffer size.
497 blockdev_write_zeroes_read_3m_500k(void)
499 uint32_t data_length
;
504 /* Data size = 3.5M */
505 data_length
= 3670016;
508 /* Params are valid, hence the expected return value
509 * of write_zeroes and read for all blockdevs is 0. */
512 blockdev_write_read(data_length
, 0, pattern
, offset
, expected_rc
, 1);
516 blockdev_writev_readv_4k(void)
518 uint32_t data_length
, iov_len
;
526 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
529 /* Params are valid, hence the expected return value
530 * of write and read for all blockdevs is 0. */
533 blockdev_write_read(data_length
, iov_len
, pattern
, offset
, expected_rc
, 0);
537 blockdev_writev_readv_30x4k(void)
539 uint32_t data_length
, iov_len
;
545 data_length
= 4096 * 30;
547 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
550 /* Params are valid, hence the expected return value
551 * of write and read for all blockdevs is 0. */
554 blockdev_write_read(data_length
, iov_len
, pattern
, offset
, expected_rc
, 0);
558 blockdev_write_read_512Bytes(void)
560 uint32_t data_length
;
565 /* Data size = 512 */
567 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
570 /* Params are valid, hence the expected return value
571 * of write and read for all blockdevs is 0. */
574 blockdev_write_read(data_length
, 0, pattern
, offset
, expected_rc
, 0);
578 blockdev_writev_readv_512Bytes(void)
580 uint32_t data_length
, iov_len
;
585 /* Data size = 512 */
588 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
591 /* Params are valid, hence the expected return value
592 * of write and read for all blockdevs is 0. */
595 blockdev_write_read(data_length
, iov_len
, pattern
, offset
, expected_rc
, 0);
599 blockdev_write_read_size_gt_128k(void)
601 uint32_t data_length
;
606 /* Data size = 132K */
607 data_length
= 135168;
608 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
611 /* Params are valid, hence the expected return value
612 * of write and read for all blockdevs is 0. */
615 blockdev_write_read(data_length
, 0, pattern
, offset
, expected_rc
, 0);
619 blockdev_writev_readv_size_gt_128k(void)
621 uint32_t data_length
, iov_len
;
626 /* Data size = 132K */
627 data_length
= 135168;
629 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
632 /* Params are valid, hence the expected return value
633 * of write and read for all blockdevs is 0. */
636 blockdev_write_read(data_length
, iov_len
, pattern
, offset
, expected_rc
, 0);
640 blockdev_writev_readv_size_gt_128k_two_iov(void)
642 uint32_t data_length
, iov_len
;
647 /* Data size = 132K */
648 data_length
= 135168;
649 iov_len
= 128 * 1024;
650 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
653 /* Params are valid, hence the expected return value
654 * of write and read for all blockdevs is 0. */
657 blockdev_write_read(data_length
, iov_len
, pattern
, offset
, expected_rc
, 0);
661 blockdev_write_read_invalid_size(void)
663 uint32_t data_length
;
668 /* Data size is not a multiple of the block size */
669 data_length
= 0x1015;
670 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
673 /* Params are invalid, hence the expected return value
674 * of write and read for all blockdevs is < 0 */
677 blockdev_write_read(data_length
, 0, pattern
, offset
, expected_rc
, 0);
681 blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void)
683 struct io_target
*target
;
684 struct spdk_bdev
*bdev
;
691 target
= g_current_io_target
;
694 block_size
= spdk_bdev_get_block_size(bdev
);
696 /* The start offset has been set to a marginal value
697 * such that offset + nbytes == Total size of
699 offset
= ((spdk_bdev_get_num_blocks(bdev
) - 1) * block_size
);
701 initialize_buffer(&tx_buf
, 0xA3, block_size
);
702 initialize_buffer(&rx_buf
, 0, block_size
);
704 blockdev_write(target
, tx_buf
, offset
, block_size
, 0);
705 CU_ASSERT_EQUAL(g_completion_success
, true);
707 blockdev_read(target
, rx_buf
, offset
, block_size
, 0);
708 CU_ASSERT_EQUAL(g_completion_success
, true);
710 rc
= blockdev_write_read_data_match(rx_buf
, tx_buf
, block_size
);
711 /* Assert the write by comparing it with values read
712 * from each blockdev */
713 CU_ASSERT_EQUAL(rc
, 0);
717 blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void)
719 struct io_target
*target
;
720 struct spdk_bdev
*bdev
;
727 /* Tests the overflow condition of the blockdevs. */
729 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
732 target
= g_current_io_target
;
735 /* The start offset has been set to a valid value
736 * but offset + nbytes is greater than the Total size
737 * of the blockdev. The test should fail. */
738 offset
= ((spdk_bdev_get_num_blocks(bdev
) * spdk_bdev_get_block_size(bdev
)) - 1024);
740 initialize_buffer(&tx_buf
, pattern
, data_length
);
741 initialize_buffer(&rx_buf
, 0, data_length
);
743 blockdev_write(target
, tx_buf
, offset
, data_length
, 0);
744 CU_ASSERT_EQUAL(g_completion_success
, false);
746 blockdev_read(target
, rx_buf
, offset
, data_length
, 0);
747 CU_ASSERT_EQUAL(g_completion_success
, false);
751 blockdev_write_read_max_offset(void)
759 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
760 /* The start offset has been set to UINT64_MAX such that
761 * adding nbytes wraps around and points to an invalid address. */
764 /* Params are invalid, hence the expected return value
765 * of write and read for all blockdevs is < 0 */
768 blockdev_write_read(data_length
, 0, pattern
, offset
, expected_rc
, 0);
772 blockdev_overlapped_write_read_8k(void)
781 CU_ASSERT_TRUE(data_length
< BUFFER_SIZE
);
784 /* Params are valid, hence the expected return value
785 * of write and read for all blockdevs is 0. */
787 /* Assert the write by comparing it with values read
788 * from the same offset for each blockdev */
789 blockdev_write_read(data_length
, 0, pattern
, offset
, expected_rc
, 0);
791 /* Overwrite the pattern 0xbb of size 8K on an address offset overlapping
792 * with the address written above and assert the new value in
793 * the overlapped address range */
794 /* Populate 8k with value 0xBB */
796 /* Offset = 6144; Overlap offset addresses and write value 0xbb */
798 /* Assert the write by comparing it with values read
799 * from the overlapped offset for each blockdev */
800 blockdev_write_read(data_length
, 0, pattern
, offset
, expected_rc
, 0);
804 __blockdev_reset(void *arg1
, void *arg2
)
806 struct bdevio_request
*req
= arg1
;
807 struct io_target
*target
= req
->target
;
810 rc
= spdk_bdev_reset(target
->bdev_desc
, target
->ch
, quick_test_complete
, NULL
);
812 g_completion_success
= false;
818 blockdev_test_reset(void)
820 struct bdevio_request req
;
821 struct io_target
*target
;
823 target
= g_current_io_target
;
826 g_completion_success
= false;
828 execute_spdk_function(__blockdev_reset
, &req
, NULL
);
830 /* Workaround: NVMe-oF target doesn't support reset yet - so for now
831 * don't fail the test if it's an NVMe bdev.
833 if (!spdk_bdev_io_type_supported(target
->bdev
, SPDK_BDEV_IO_TYPE_NVME_IO
)) {
834 CU_ASSERT_EQUAL(g_completion_success
, true);
838 struct bdevio_passthrough_request
{
839 struct spdk_nvme_cmd cmd
;
842 struct io_target
*target
;
848 nvme_pt_test_complete(struct spdk_bdev_io
*bdev_io
, bool success
, void *arg
)
850 struct bdevio_passthrough_request
*pt_req
= arg
;
852 spdk_bdev_io_get_nvme_status(bdev_io
, &pt_req
->sct
, &pt_req
->sc
);
853 spdk_bdev_free_io(bdev_io
);
858 __blockdev_nvme_passthru(void *arg1
, void *arg2
)
860 struct bdevio_passthrough_request
*pt_req
= arg1
;
861 struct io_target
*target
= pt_req
->target
;
864 rc
= spdk_bdev_nvme_io_passthru(target
->bdev_desc
, target
->ch
,
865 &pt_req
->cmd
, pt_req
->buf
, pt_req
->len
,
866 nvme_pt_test_complete
, pt_req
);
873 blockdev_test_nvme_passthru_rw(void)
875 struct bdevio_passthrough_request pt_req
;
876 void *write_buf
, *read_buf
;
877 struct io_target
*target
;
879 target
= g_current_io_target
;
881 if (!spdk_bdev_io_type_supported(target
->bdev
, SPDK_BDEV_IO_TYPE_NVME_IO
)) {
885 memset(&pt_req
, 0, sizeof(pt_req
));
886 pt_req
.target
= target
;
887 pt_req
.cmd
.opc
= SPDK_NVME_OPC_WRITE
;
889 *(uint64_t *)&pt_req
.cmd
.cdw10
= 4;
890 pt_req
.cmd
.cdw12
= 0;
892 pt_req
.len
= spdk_bdev_get_block_size(target
->bdev
);
893 write_buf
= spdk_dma_malloc(pt_req
.len
, 0, NULL
);
894 memset(write_buf
, 0xA5, pt_req
.len
);
895 pt_req
.buf
= write_buf
;
897 pt_req
.sct
= SPDK_NVME_SCT_VENDOR_SPECIFIC
;
898 pt_req
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
899 execute_spdk_function(__blockdev_nvme_passthru
, &pt_req
, NULL
);
900 CU_ASSERT(pt_req
.sct
== SPDK_NVME_SCT_GENERIC
);
901 CU_ASSERT(pt_req
.sc
== SPDK_NVME_SC_SUCCESS
);
903 pt_req
.cmd
.opc
= SPDK_NVME_OPC_READ
;
904 read_buf
= spdk_dma_zmalloc(pt_req
.len
, 0, NULL
);
905 pt_req
.buf
= read_buf
;
907 pt_req
.sct
= SPDK_NVME_SCT_VENDOR_SPECIFIC
;
908 pt_req
.sc
= SPDK_NVME_SC_INVALID_FIELD
;
909 execute_spdk_function(__blockdev_nvme_passthru
, &pt_req
, NULL
);
910 CU_ASSERT(pt_req
.sct
== SPDK_NVME_SCT_GENERIC
);
911 CU_ASSERT(pt_req
.sc
== SPDK_NVME_SC_SUCCESS
);
913 CU_ASSERT(!memcmp(read_buf
, write_buf
, pt_req
.len
));
914 spdk_dma_free(read_buf
);
915 spdk_dma_free(write_buf
);
919 blockdev_test_nvme_passthru_vendor_specific(void)
921 struct bdevio_passthrough_request pt_req
;
922 struct io_target
*target
;
924 target
= g_current_io_target
;
926 if (!spdk_bdev_io_type_supported(target
->bdev
, SPDK_BDEV_IO_TYPE_NVME_IO
)) {
930 memset(&pt_req
, 0, sizeof(pt_req
));
931 pt_req
.target
= target
;
932 pt_req
.cmd
.opc
= 0x7F; /* choose known invalid opcode */
935 pt_req
.sct
= SPDK_NVME_SCT_VENDOR_SPECIFIC
;
936 pt_req
.sc
= SPDK_NVME_SC_SUCCESS
;
937 execute_spdk_function(__blockdev_nvme_passthru
, &pt_req
, NULL
);
938 CU_ASSERT(pt_req
.sct
== SPDK_NVME_SCT_GENERIC
);
939 CU_ASSERT(pt_req
.sc
== SPDK_NVME_SC_INVALID_OPCODE
);
943 __blockdev_nvme_admin_passthru(void *arg1
, void *arg2
)
945 struct bdevio_passthrough_request
*pt_req
= arg1
;
946 struct io_target
*target
= pt_req
->target
;
949 rc
= spdk_bdev_nvme_admin_passthru(target
->bdev_desc
, target
->ch
,
950 &pt_req
->cmd
, pt_req
->buf
, pt_req
->len
,
951 nvme_pt_test_complete
, pt_req
);
958 blockdev_test_nvme_admin_passthru(void)
960 struct io_target
*target
;
961 struct bdevio_passthrough_request pt_req
;
963 target
= g_current_io_target
;
965 if (!spdk_bdev_io_type_supported(target
->bdev
, SPDK_BDEV_IO_TYPE_NVME_ADMIN
)) {
969 memset(&pt_req
, 0, sizeof(pt_req
));
970 pt_req
.target
= target
;
971 pt_req
.cmd
.opc
= SPDK_NVME_OPC_IDENTIFY
;
973 *(uint64_t *)&pt_req
.cmd
.cdw10
= SPDK_NVME_IDENTIFY_CTRLR
;
975 pt_req
.len
= sizeof(struct spdk_nvme_ctrlr_data
);
976 pt_req
.buf
= spdk_dma_malloc(pt_req
.len
, 0, NULL
);
978 pt_req
.sct
= SPDK_NVME_SCT_GENERIC
;
979 pt_req
.sc
= SPDK_NVME_SC_SUCCESS
;
980 execute_spdk_function(__blockdev_nvme_admin_passthru
, &pt_req
, NULL
);
981 CU_ASSERT(pt_req
.sct
== SPDK_NVME_SCT_GENERIC
);
982 CU_ASSERT(pt_req
.sc
== SPDK_NVME_SC_SUCCESS
);
986 __stop_init_thread(void *arg1
, void *arg2
)
988 unsigned num_failures
= (unsigned)(uintptr_t)arg1
;
989 struct spdk_jsonrpc_request
*request
= arg2
;
991 bdevio_cleanup_targets();
992 if (g_wait_for_tests
) {
993 /* Do not stop the app yet, wait for another RPC */
994 rpc_perform_tests_cb(num_failures
, request
);
997 spdk_app_stop(num_failures
);
1001 stop_init_thread(unsigned num_failures
, struct spdk_jsonrpc_request
*request
)
1003 struct spdk_event
*event
;
1005 event
= spdk_event_allocate(g_lcore_id_init
, __stop_init_thread
,
1006 (void *)(uintptr_t)num_failures
, request
);
1007 spdk_event_call(event
);
1013 if (g_current_io_target
== NULL
) {
1014 g_current_io_target
= g_io_targets
;
1022 g_current_io_target
= g_current_io_target
->next
;
1026 #define SUITE_NAME_MAX 64
1029 __setup_ut_on_single_target(struct io_target
*target
)
1032 CU_pSuite suite
= NULL
;
1033 char name
[SUITE_NAME_MAX
];
1035 snprintf(name
, sizeof(name
), "bdevio tests on: %s", spdk_bdev_get_name(target
->bdev
));
1036 suite
= CU_add_suite(name
, suite_init
, suite_fini
);
1037 if (suite
== NULL
) {
1038 CU_cleanup_registry();
1039 rc
= CU_get_error();
1044 CU_add_test(suite
, "blockdev write read 4k", blockdev_write_read_4k
) == NULL
1045 || CU_add_test(suite
, "blockdev write zeroes read 4k", blockdev_write_zeroes_read_4k
) == NULL
1046 || CU_add_test(suite
, "blockdev write zeroes read 1m", blockdev_write_zeroes_read_1m
) == NULL
1047 || CU_add_test(suite
, "blockdev write zeroes read 3m", blockdev_write_zeroes_read_3m
) == NULL
1048 || CU_add_test(suite
, "blockdev write zeroes read 3.5m", blockdev_write_zeroes_read_3m_500k
) == NULL
1049 || CU_add_test(suite
, "blockdev reset",
1050 blockdev_test_reset
) == NULL
1051 || CU_add_test(suite
, "blockdev write read 512 bytes",
1052 blockdev_write_read_512Bytes
) == NULL
1053 || CU_add_test(suite
, "blockdev write read size > 128k",
1054 blockdev_write_read_size_gt_128k
) == NULL
1055 || CU_add_test(suite
, "blockdev write read invalid size",
1056 blockdev_write_read_invalid_size
) == NULL
1057 || CU_add_test(suite
, "blockdev write read offset + nbytes == size of blockdev",
1058 blockdev_write_read_offset_plus_nbytes_equals_bdev_size
) == NULL
1059 || CU_add_test(suite
, "blockdev write read offset + nbytes > size of blockdev",
1060 blockdev_write_read_offset_plus_nbytes_gt_bdev_size
) == NULL
1061 || CU_add_test(suite
, "blockdev write read max offset",
1062 blockdev_write_read_max_offset
) == NULL
1063 || CU_add_test(suite
, "blockdev write read 8k on overlapped address offset",
1064 blockdev_overlapped_write_read_8k
) == NULL
1065 || CU_add_test(suite
, "blockdev writev readv 4k", blockdev_writev_readv_4k
) == NULL
1066 || CU_add_test(suite
, "blockdev writev readv 30 x 4k",
1067 blockdev_writev_readv_30x4k
) == NULL
1068 || CU_add_test(suite
, "blockdev writev readv 512 bytes",
1069 blockdev_writev_readv_512Bytes
) == NULL
1070 || CU_add_test(suite
, "blockdev writev readv size > 128k",
1071 blockdev_writev_readv_size_gt_128k
) == NULL
1072 || CU_add_test(suite
, "blockdev writev readv size > 128k in two iovs",
1073 blockdev_writev_readv_size_gt_128k_two_iov
) == NULL
1074 || CU_add_test(suite
, "blockdev nvme passthru rw",
1075 blockdev_test_nvme_passthru_rw
) == NULL
1076 || CU_add_test(suite
, "blockdev nvme passthru vendor specific",
1077 blockdev_test_nvme_passthru_vendor_specific
) == NULL
1078 || CU_add_test(suite
, "blockdev nvme admin passthru",
1079 blockdev_test_nvme_admin_passthru
) == NULL
1081 CU_cleanup_registry();
1082 rc
= CU_get_error();
1089 __run_ut_thread(void *arg1
, void *arg2
)
1091 struct spdk_jsonrpc_request
*request
= arg2
;
1093 struct io_target
*target
;
1094 unsigned num_failures
;
1096 if (CU_initialize_registry() != CUE_SUCCESS
) {
1097 /* CUnit error, probably won't recover */
1098 rc
= CU_get_error();
1099 stop_init_thread(-rc
, request
);
1102 target
= g_io_targets
;
1103 while (target
!= NULL
) {
1104 rc
= __setup_ut_on_single_target(target
);
1106 /* CUnit error, probably won't recover */
1107 stop_init_thread(-rc
, request
);
1109 target
= target
->next
;
1111 CU_basic_set_mode(CU_BRM_VERBOSE
);
1112 CU_basic_run_tests();
1113 num_failures
= CU_get_number_of_failures();
1114 CU_cleanup_registry();
1116 stop_init_thread(num_failures
, request
);
1120 test_main(void *arg1
)
1122 struct spdk_event
*event
;
1124 pthread_mutex_init(&g_test_mutex
, NULL
);
1125 pthread_cond_init(&g_test_cond
, NULL
);
1127 g_lcore_id_init
= spdk_env_get_first_core();
1128 g_lcore_id_ut
= spdk_env_get_next_core(g_lcore_id_init
);
1129 g_lcore_id_io
= spdk_env_get_next_core(g_lcore_id_ut
);
1131 if (g_lcore_id_init
== SPDK_ENV_LCORE_ID_ANY
||
1132 g_lcore_id_ut
== SPDK_ENV_LCORE_ID_ANY
||
1133 g_lcore_id_io
== SPDK_ENV_LCORE_ID_ANY
) {
1134 SPDK_ERRLOG("Could not reserve 3 separate threads.\n");
1138 if (g_wait_for_tests
) {
1139 /* Do not perform any tests until RPC is received */
1143 if (bdevio_construct_targets() < 0) {
1148 event
= spdk_event_allocate(g_lcore_id_ut
, __run_ut_thread
, NULL
, NULL
);
1149 spdk_event_call(event
);
1155 printf(" -w start bdevio app and wait for RPC to start the tests\n");
1159 bdevio_parse_arg(int ch
, char *arg
)
1163 g_wait_for_tests
= true;
1171 struct rpc_perform_tests
{
1176 free_rpc_perform_tests(struct rpc_perform_tests
*r
)
1181 static const struct spdk_json_object_decoder rpc_perform_tests_decoders
[] = {
1182 {"name", offsetof(struct rpc_perform_tests
, name
), spdk_json_decode_string
, true},
1186 rpc_perform_tests_cb(unsigned num_failures
, struct spdk_jsonrpc_request
*request
)
1188 struct spdk_json_write_ctx
*w
;
1190 w
= spdk_jsonrpc_begin_result(request
);
1194 spdk_json_write_uint32(w
, num_failures
);
1195 spdk_jsonrpc_end_result(request
, w
);
1199 rpc_perform_tests(struct spdk_jsonrpc_request
*request
, const struct spdk_json_val
*params
)
1201 struct rpc_perform_tests req
= {NULL
};
1202 struct spdk_event
*event
;
1203 struct spdk_bdev
*bdev
;
1206 if (params
&& spdk_json_decode_object(params
, rpc_perform_tests_decoders
,
1207 SPDK_COUNTOF(rpc_perform_tests_decoders
),
1209 SPDK_ERRLOG("spdk_json_decode_object failed\n");
1210 spdk_jsonrpc_send_error_response(request
, SPDK_JSONRPC_ERROR_INVALID_PARAMS
, "Invalid parameters");
1215 bdev
= spdk_bdev_get_by_name(req
.name
);
1217 SPDK_ERRLOG("Bdev '%s' does not exist\n", req
.name
);
1218 spdk_jsonrpc_send_error_response_fmt(request
, SPDK_JSONRPC_ERROR_INTERNAL_ERROR
,
1219 "Bdev '%s' does not exist: %s",
1220 req
.name
, spdk_strerror(ENODEV
));
1223 rc
= bdevio_construct_target(bdev
);
1225 SPDK_ERRLOG("Could not construct target for bdev '%s'\n", spdk_bdev_get_name(bdev
));
1226 spdk_jsonrpc_send_error_response_fmt(request
, SPDK_JSONRPC_ERROR_INTERNAL_ERROR
,
1227 "Could not construct target for bdev '%s': %s",
1228 spdk_bdev_get_name(bdev
), spdk_strerror(-rc
));
1232 rc
= bdevio_construct_targets();
1234 SPDK_ERRLOG("Could not construct targets for all bdevs\n");
1235 spdk_jsonrpc_send_error_response_fmt(request
, SPDK_JSONRPC_ERROR_INTERNAL_ERROR
,
1236 "Could not construct targets for all bdevs: %s",
1237 spdk_strerror(-rc
));
1241 free_rpc_perform_tests(&req
);
1243 event
= spdk_event_allocate(g_lcore_id_ut
, __run_ut_thread
, NULL
, request
);
1244 spdk_event_call(event
);
1249 free_rpc_perform_tests(&req
);
1251 SPDK_RPC_REGISTER("perform_tests", rpc_perform_tests
, SPDK_RPC_RUNTIME
)
1254 main(int argc
, char **argv
)
1257 struct spdk_app_opts opts
= {};
1259 spdk_app_opts_init(&opts
);
1260 opts
.name
= "bdevio";
1261 opts
.reactor_mask
= "0x7";
1263 if ((rc
= spdk_app_parse_args(argc
, argv
, &opts
, "w", NULL
,
1264 bdevio_parse_arg
, bdevio_usage
)) !=
1265 SPDK_APP_PARSE_ARGS_SUCCESS
) {
1269 rc
= spdk_app_start(&opts
, test_main
, NULL
);