]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/test/lib/bdev/bdevio/bdevio.c
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / spdk / test / lib / bdev / bdevio / bdevio.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright (C) 2008-2012 Daisuke Aoyama <aoyama@peach.ne.jp>.
5 * Copyright (c) Intel Corporation.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <pthread.h>
36 #include <stdint.h>
37 #include <stdlib.h>
38 #include <string.h>
39
40 #include "spdk/bdev.h"
41 #include "spdk/copy_engine.h"
42 #include "spdk/env.h"
43 #include "spdk/log.h"
44 #include "spdk/io_channel.h"
45
46 #include "CUnit/Basic.h"
47
48 #define BUFFER_IOVS 1024
49 #define BUFFER_SIZE 260 * 1024
50 #define BDEV_TASK_ARRAY_SIZE 2048
51
52
53 #include "../common.c"
54
55 pthread_mutex_t g_test_mutex;
56 pthread_cond_t g_test_cond;
57
58 struct io_target {
59 struct spdk_bdev *bdev;
60 struct spdk_io_channel *ch;
61 struct io_target *next;
62 };
63
64 struct bdevio_request {
65 char *buf;
66 int data_len;
67 uint64_t offset;
68 struct iovec iov[BUFFER_IOVS];
69 int iovcnt;
70 struct io_target *target;
71 };
72
73 struct io_target *g_io_targets = NULL;
74
75 static void
76 execute_spdk_function(spdk_event_fn fn, void *arg1, void *arg2)
77 {
78 struct spdk_event *event;
79
80 event = spdk_event_allocate(1, fn, arg1, arg2);
81 pthread_mutex_lock(&g_test_mutex);
82 spdk_event_call(event);
83 pthread_cond_wait(&g_test_cond, &g_test_mutex);
84 pthread_mutex_unlock(&g_test_mutex);
85 }
86
87 static void
88 wake_ut_thread(void)
89 {
90 pthread_mutex_lock(&g_test_mutex);
91 pthread_cond_signal(&g_test_cond);
92 pthread_mutex_unlock(&g_test_mutex);
93 }
94
95 static void
96 __get_io_channel(void *arg1, void *arg2)
97 {
98 struct io_target *target = arg1;
99
100 target->ch = spdk_bdev_get_io_channel(target->bdev, SPDK_IO_PRIORITY_DEFAULT);
101 wake_ut_thread();
102 }
103
104 static int
105 bdevio_construct_targets(void)
106 {
107 struct spdk_bdev *bdev;
108 struct io_target *target;
109
110 printf("I/O targets:\n");
111
112 bdev = spdk_bdev_first();
113 while (bdev != NULL) {
114
115 if (!spdk_bdev_claim(bdev, NULL, NULL)) {
116 bdev = spdk_bdev_next(bdev);
117 continue;
118 }
119
120 printf(" %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n",
121 bdev->name,
122 bdev->blockcnt, bdev->blocklen,
123 (bdev->blockcnt * bdev->blocklen + 1024 * 1024 - 1) / (1024 * 1024));
124
125 target = malloc(sizeof(struct io_target));
126 if (target == NULL) {
127 return -ENOMEM;
128 }
129 target->bdev = bdev;
130 target->next = g_io_targets;
131 execute_spdk_function(__get_io_channel, target, NULL);
132 g_io_targets = target;
133
134 bdev = spdk_bdev_next(bdev);
135 }
136
137 return 0;
138 }
139
140 static void
141 __put_io_channel(void *arg1, void *arg2)
142 {
143 struct io_target *target = arg1;
144
145 spdk_put_io_channel(target->ch);
146 wake_ut_thread();
147 }
148
149 static void
150 bdevio_cleanup_targets(void)
151 {
152 struct io_target *target;
153
154 target = g_io_targets;
155 while (target != NULL) {
156 execute_spdk_function(__put_io_channel, target, NULL);
157 spdk_bdev_unclaim(target->bdev);
158 g_io_targets = target->next;
159 free(target);
160 target = g_io_targets;
161 }
162 }
163
164 static enum spdk_bdev_io_status g_completion_status;
165
166 static void
167 initialize_buffer(char **buf, int pattern, int size)
168 {
169 *buf = spdk_zmalloc(size, 0x1000, NULL);
170 memset(*buf, pattern, size);
171 }
172
173 static void
174 quick_test_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status, void *arg)
175 {
176 g_completion_status = status;
177 spdk_bdev_free_io(bdev_io);
178 wake_ut_thread();
179 }
180
181 static void
182 __blockdev_write(void *arg1, void *arg2)
183 {
184 struct bdevio_request *req = arg1;
185 struct io_target *target = req->target;
186 struct spdk_bdev_io *bdev_io;
187
188 if (req->iovcnt) {
189 bdev_io = spdk_bdev_writev(target->bdev, target->ch, req->iov, req->iovcnt, req->offset,
190 req->data_len, quick_test_complete, NULL);
191 } else {
192 bdev_io = spdk_bdev_write(target->bdev, target->ch, req->buf, req->offset,
193 req->data_len, quick_test_complete, NULL);
194 }
195
196 if (!bdev_io) {
197 g_completion_status = SPDK_BDEV_IO_STATUS_FAILED;
198 wake_ut_thread();
199 }
200 }
201
202 static void
203 sgl_chop_buffer(struct bdevio_request *req, int iov_len)
204 {
205 int data_len = req->data_len;
206 char *buf = req->buf;
207
208 req->iovcnt = 0;
209 if (!iov_len)
210 return;
211
212 for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) {
213 if (data_len < iov_len)
214 iov_len = data_len;
215
216 req->iov[req->iovcnt].iov_base = buf;
217 req->iov[req->iovcnt].iov_len = iov_len;
218
219 buf += iov_len;
220 data_len -= iov_len;
221 }
222
223 CU_ASSERT_EQUAL_FATAL(data_len, 0);
224 }
225
226 static void
227 blockdev_write(struct io_target *target, char *tx_buf,
228 uint64_t offset, int data_len, int iov_len)
229 {
230 struct bdevio_request req;
231
232 req.target = target;
233 req.buf = tx_buf;
234 req.data_len = data_len;
235 req.offset = offset;
236 sgl_chop_buffer(&req, iov_len);
237
238 g_completion_status = SPDK_BDEV_IO_STATUS_FAILED;
239
240 execute_spdk_function(__blockdev_write, &req, NULL);
241 }
242
243 static void
244 __blockdev_read(void *arg1, void *arg2)
245 {
246 struct bdevio_request *req = arg1;
247 struct io_target *target = req->target;
248 struct spdk_bdev_io *bdev_io;
249
250 if (req->iovcnt) {
251 bdev_io = spdk_bdev_readv(target->bdev, target->ch, req->iov, req->iovcnt, req->offset,
252 req->data_len, quick_test_complete, NULL);
253 } else {
254 bdev_io = spdk_bdev_read(target->bdev, target->ch, req->buf, req->offset,
255 req->data_len, quick_test_complete, NULL);
256 }
257
258 if (!bdev_io) {
259 g_completion_status = SPDK_BDEV_IO_STATUS_FAILED;
260 wake_ut_thread();
261 }
262 }
263
264 static void
265 blockdev_read(struct io_target *target, char *rx_buf,
266 uint64_t offset, int data_len, int iov_len)
267 {
268 struct bdevio_request req;
269
270 req.target = target;
271 req.buf = rx_buf;
272 req.data_len = data_len;
273 req.offset = offset;
274 req.iovcnt = 0;
275 sgl_chop_buffer(&req, iov_len);
276
277 g_completion_status = SPDK_BDEV_IO_STATUS_FAILED;
278
279 execute_spdk_function(__blockdev_read, &req, NULL);
280 }
281
282 static int
283 blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length)
284 {
285 int rc;
286 rc = memcmp(rx_buf, tx_buf, data_length);
287
288 spdk_free(rx_buf);
289 spdk_free(tx_buf);
290
291 return rc;
292 }
293
294 static void
295 blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset,
296 int expected_rc)
297 {
298 struct io_target *target;
299 char *tx_buf = NULL;
300 char *rx_buf = NULL;
301 int rc;
302
303 target = g_io_targets;
304 while (target != NULL) {
305 if (data_length < target->bdev->blocklen) {
306 target = target->next;
307 continue;
308 }
309
310 initialize_buffer(&tx_buf, pattern, data_length);
311 initialize_buffer(&rx_buf, 0, data_length);
312
313 blockdev_write(target, tx_buf, offset, data_length, iov_len);
314
315 if (expected_rc == 0) {
316 CU_ASSERT_EQUAL(g_completion_status, SPDK_BDEV_IO_STATUS_SUCCESS);
317 } else {
318 CU_ASSERT_EQUAL(g_completion_status, SPDK_BDEV_IO_STATUS_FAILED);
319 }
320
321 blockdev_read(target, rx_buf, offset, data_length, iov_len);
322
323 if (expected_rc == 0) {
324 CU_ASSERT_EQUAL(g_completion_status, SPDK_BDEV_IO_STATUS_SUCCESS);
325 } else {
326 CU_ASSERT_EQUAL(g_completion_status, SPDK_BDEV_IO_STATUS_FAILED);
327 }
328
329 if (g_completion_status == SPDK_BDEV_IO_STATUS_SUCCESS) {
330 rc = blockdev_write_read_data_match(rx_buf, tx_buf, data_length);
331 /* Assert the write by comparing it with values read
332 * from each blockdev */
333 CU_ASSERT_EQUAL(rc, 0);
334 }
335
336 target = target->next;
337 }
338 }
339
340 static void
341 blockdev_write_read_4k(void)
342 {
343 uint32_t data_length;
344 uint64_t offset;
345 int pattern;
346 int expected_rc;
347
348 /* Data size = 4K */
349 data_length = 4096;
350 CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
351 offset = 0;
352 pattern = 0xA3;
353 /* Params are valid, hence the expected return value
354 * of write and read for all blockdevs is 0. */
355 expected_rc = 0;
356
357 blockdev_write_read(data_length, 0, pattern, offset, expected_rc);
358 }
359
360 static void
361 blockdev_writev_readv_4k(void)
362 {
363 uint32_t data_length, iov_len;
364 uint64_t offset;
365 int pattern;
366 int expected_rc;
367
368 /* Data size = 4K */
369 data_length = 4096;
370 iov_len = 4096;
371 CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
372 offset = 0;
373 pattern = 0xA3;
374 /* Params are valid, hence the expected return value
375 * of write and read for all blockdevs is 0. */
376 expected_rc = 0;
377
378 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc);
379 }
380
381 static void
382 blockdev_writev_readv_30x4k(void)
383 {
384 uint32_t data_length, iov_len;
385 uint64_t offset;
386 int pattern;
387 int expected_rc;
388
389 /* Data size = 4K */
390 data_length = 4096 * 30;
391 iov_len = 4096;
392 CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
393 offset = 0;
394 pattern = 0xA3;
395 /* Params are valid, hence the expected return value
396 * of write and read for all blockdevs is 0. */
397 expected_rc = 0;
398
399 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc);
400 }
401
402 static void
403 blockdev_write_read_512Bytes(void)
404 {
405 uint32_t data_length;
406 uint64_t offset;
407 int pattern;
408 int expected_rc;
409
410 /* Data size = 512 */
411 data_length = 512;
412 CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
413 offset = 2048;
414 pattern = 0xA3;
415 /* Params are valid, hence the expected return value
416 * of write and read for all blockdevs is 0. */
417 expected_rc = 0;
418
419 blockdev_write_read(data_length, 0, pattern, offset, expected_rc);
420 }
421
422 static void
423 blockdev_writev_readv_512Bytes(void)
424 {
425 uint32_t data_length, iov_len;
426 uint64_t offset;
427 int pattern;
428 int expected_rc;
429
430 /* Data size = 512 */
431 data_length = 512;
432 iov_len = 512;
433 CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
434 offset = 2048;
435 pattern = 0xA3;
436 /* Params are valid, hence the expected return value
437 * of write and read for all blockdevs is 0. */
438 expected_rc = 0;
439
440 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc);
441 }
442
443 static void
444 blockdev_write_read_size_gt_128k(void)
445 {
446 uint32_t data_length;
447 uint64_t offset;
448 int pattern;
449 int expected_rc;
450
451 /* Data size = 132K */
452 data_length = 135168;
453 CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
454 offset = 2048;
455 pattern = 0xA3;
456 /* Params are valid, hence the expected return value
457 * of write and read for all blockdevs is 0. */
458 expected_rc = 0;
459
460 blockdev_write_read(data_length, 0, pattern, offset, expected_rc);
461 }
462
463 static void
464 blockdev_writev_readv_size_gt_128k(void)
465 {
466 uint32_t data_length, iov_len;
467 uint64_t offset;
468 int pattern;
469 int expected_rc;
470
471 /* Data size = 132K */
472 data_length = 135168;
473 iov_len = 135168;
474 CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
475 offset = 2048;
476 pattern = 0xA3;
477 /* Params are valid, hence the expected return value
478 * of write and read for all blockdevs is 0. */
479 expected_rc = 0;
480
481 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc);
482 }
483
484 static void
485 blockdev_writev_readv_size_gt_128k_two_iov(void)
486 {
487 uint32_t data_length, iov_len;
488 uint64_t offset;
489 int pattern;
490 int expected_rc;
491
492 /* Data size = 132K */
493 data_length = 135168;
494 iov_len = 128 * 1024;
495 CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
496 offset = 2048;
497 pattern = 0xA3;
498 /* Params are valid, hence the expected return value
499 * of write and read for all blockdevs is 0. */
500 expected_rc = 0;
501
502 blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc);
503 }
504
505 static void
506 blockdev_write_read_invalid_size(void)
507 {
508 uint32_t data_length;
509 uint64_t offset;
510 int pattern;
511 int expected_rc;
512
513 /* Data size is not a multiple of the block size */
514 data_length = 0x1015;
515 CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
516 offset = 2048;
517 pattern = 0xA3;
518 /* Params are invalid, hence the expected return value
519 * of write and read for all blockdevs is < 0 */
520 expected_rc = -1;
521
522 blockdev_write_read(data_length, 0, pattern, offset, expected_rc);
523 }
524
525 static void
526 blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void)
527 {
528 struct io_target *target;
529 struct spdk_bdev *bdev;
530 char *tx_buf = NULL;
531 char *rx_buf = NULL;
532 uint64_t offset;
533 int rc;
534
535 target = g_io_targets;
536 while (target != NULL) {
537 bdev = target->bdev;
538
539 /* The start offset has been set to a marginal value
540 * such that offset + nbytes == Total size of
541 * blockdev. */
542 offset = ((bdev->blockcnt - 1) * bdev->blocklen);
543
544 initialize_buffer(&tx_buf, 0xA3, bdev->blocklen);
545 initialize_buffer(&rx_buf, 0, bdev->blocklen);
546
547 blockdev_write(target, tx_buf, offset, bdev->blocklen, 0);
548 CU_ASSERT_EQUAL(g_completion_status, SPDK_BDEV_IO_STATUS_SUCCESS);
549
550 blockdev_read(target, rx_buf, offset, bdev->blocklen, 0);
551 CU_ASSERT_EQUAL(g_completion_status, SPDK_BDEV_IO_STATUS_SUCCESS);
552
553 rc = blockdev_write_read_data_match(rx_buf, tx_buf, bdev->blocklen);
554 /* Assert the write by comparing it with values read
555 * from each blockdev */
556 CU_ASSERT_EQUAL(rc, 0);
557
558 target = target->next;
559 }
560 }
561
562 static void
563 blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void)
564 {
565 struct io_target *target;
566 struct spdk_bdev *bdev;
567 char *tx_buf = NULL;
568 char *rx_buf = NULL;
569 int data_length;
570 uint64_t offset;
571 int pattern;
572
573 /* Tests the overflow condition of the blockdevs. */
574 data_length = 4096;
575 CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
576 pattern = 0xA3;
577
578 target = g_io_targets;
579 while (target != NULL) {
580 bdev = target->bdev;
581
582 /* The start offset has been set to a valid value
583 * but offset + nbytes is greater than the Total size
584 * of the blockdev. The test should fail. */
585 offset = ((bdev->blockcnt * bdev->blocklen) - 1024);
586
587 initialize_buffer(&tx_buf, pattern, data_length);
588 initialize_buffer(&rx_buf, 0, data_length);
589
590 blockdev_write(target, tx_buf, offset, data_length, 0);
591 CU_ASSERT_EQUAL(g_completion_status, SPDK_BDEV_IO_STATUS_FAILED);
592
593 blockdev_read(target, rx_buf, offset, data_length, 0);
594 CU_ASSERT_EQUAL(g_completion_status, SPDK_BDEV_IO_STATUS_FAILED);
595
596 target = target->next;
597 }
598 }
599
600 static void
601 blockdev_write_read_max_offset(void)
602 {
603 int data_length;
604 uint64_t offset;
605 int pattern;
606 int expected_rc;
607
608 data_length = 4096;
609 CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
610 /* The start offset has been set to UINT64_MAX such that
611 * adding nbytes wraps around and points to an invalid address. */
612 offset = UINT64_MAX;
613 pattern = 0xA3;
614 /* Params are invalid, hence the expected return value
615 * of write and read for all blockdevs is < 0 */
616 expected_rc = -1;
617
618 blockdev_write_read(data_length, 0, pattern, offset, expected_rc);
619 }
620
621 static void
622 blockdev_overlapped_write_read_8k(void)
623 {
624 int data_length;
625 uint64_t offset;
626 int pattern;
627 int expected_rc;
628
629 /* Data size = 8K */
630 data_length = 8192;
631 CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
632 offset = 0;
633 pattern = 0xA3;
634 /* Params are valid, hence the expected return value
635 * of write and read for all blockdevs is 0. */
636 expected_rc = 0;
637 /* Assert the write by comparing it with values read
638 * from the same offset for each blockdev */
639 blockdev_write_read(data_length, 0, pattern, offset, expected_rc);
640
641 /* Overwrite the pattern 0xbb of size 8K on an address offset overlapping
642 * with the address written above and assert the new value in
643 * the overlapped address range */
644 /* Populate 8k with value 0xBB */
645 pattern = 0xBB;
646 /* Offset = 6144; Overlap offset addresses and write value 0xbb */
647 offset = 4096;
648 /* Assert the write by comparing it with values read
649 * from the overlapped offset for each blockdev */
650 blockdev_write_read(data_length, 0, pattern, offset, expected_rc);
651 }
652
653 static void
654 __blockdev_reset(void *arg1, void *arg2)
655 {
656 struct bdevio_request *req = arg1;
657 enum spdk_bdev_reset_type *reset_type = arg2;
658 struct io_target *target = req->target;
659 int rc;
660
661 rc = spdk_bdev_reset(target->bdev, *reset_type, quick_test_complete, NULL);
662 if (rc < 0) {
663 g_completion_status = SPDK_BDEV_IO_STATUS_FAILED;
664 wake_ut_thread();
665 }
666 }
667
668 static void
669 blockdev_reset(struct io_target *target, enum spdk_bdev_reset_type reset_type)
670 {
671 struct bdevio_request req;
672
673 req.target = target;
674
675 g_completion_status = SPDK_BDEV_IO_STATUS_FAILED;
676
677 execute_spdk_function(__blockdev_reset, &req, &reset_type);
678 }
679
680 static void
681 blockdev_test_reset(void)
682 {
683 struct io_target *target;
684
685 target = g_io_targets;
686 while (target != NULL) {
687 target->bdev->gencnt = 0;
688 blockdev_reset(target, SPDK_BDEV_RESET_HARD);
689 CU_ASSERT_EQUAL(g_completion_status, SPDK_BDEV_IO_STATUS_SUCCESS);
690 CU_ASSERT_EQUAL(target->bdev->gencnt, 1);
691
692 target->bdev->gencnt = 0;
693 blockdev_reset(target, SPDK_BDEV_RESET_SOFT);
694 CU_ASSERT_EQUAL(g_completion_status, SPDK_BDEV_IO_STATUS_SUCCESS);
695 CU_ASSERT_EQUAL(target->bdev->gencnt, 0);
696
697 target = target->next;
698 }
699 }
700
701 static void
702 test_main(void *arg1, void *arg2)
703 {
704 CU_pSuite suite = NULL;
705 unsigned int num_failures;
706
707 pthread_mutex_init(&g_test_mutex, NULL);
708 pthread_cond_init(&g_test_cond, NULL);
709
710 if (bdevio_construct_targets() < 0) {
711 spdk_app_stop(-1);
712 return;
713 }
714
715 if (CU_initialize_registry() != CUE_SUCCESS) {
716 spdk_app_stop(CU_get_error());
717 return;
718 }
719
720 suite = CU_add_suite("components_suite", NULL, NULL);
721 if (suite == NULL) {
722 CU_cleanup_registry();
723 spdk_app_stop(CU_get_error());
724 return;
725 }
726
727 if (
728 CU_add_test(suite, "blockdev write read 4k", blockdev_write_read_4k) == NULL
729 || CU_add_test(suite, "blockdev write read 512 bytes",
730 blockdev_write_read_512Bytes) == NULL
731 || CU_add_test(suite, "blockdev write read size > 128k",
732 blockdev_write_read_size_gt_128k) == NULL
733 || CU_add_test(suite, "blockdev write read invalid size",
734 blockdev_write_read_invalid_size) == NULL
735 || CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev",
736 blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL
737 || CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev",
738 blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL
739 || CU_add_test(suite, "blockdev write read max offset",
740 blockdev_write_read_max_offset) == NULL
741 || CU_add_test(suite, "blockdev write read 8k on overlapped address offset",
742 blockdev_overlapped_write_read_8k) == NULL
743 || CU_add_test(suite, "blockdev writev readv 4k", blockdev_writev_readv_4k) == NULL
744 || CU_add_test(suite, "blockdev writev readv 30 x 4k",
745 blockdev_writev_readv_30x4k) == NULL
746 || CU_add_test(suite, "blockdev writev readv 512 bytes",
747 blockdev_writev_readv_512Bytes) == NULL
748 || CU_add_test(suite, "blockdev writev readv size > 128k",
749 blockdev_writev_readv_size_gt_128k) == NULL
750 || CU_add_test(suite, "blockdev writev readv size > 128k in two iovs",
751 blockdev_writev_readv_size_gt_128k_two_iov) == NULL
752 || CU_add_test(suite, "blockdev reset",
753 blockdev_test_reset) == NULL
754 ) {
755 CU_cleanup_registry();
756 spdk_app_stop(CU_get_error());
757 return;
758 }
759
760 CU_basic_set_mode(CU_BRM_VERBOSE);
761 CU_basic_run_tests();
762 num_failures = CU_get_number_of_failures();
763 CU_cleanup_registry();
764 bdevio_cleanup_targets();
765 spdk_app_stop(num_failures);
766 }
767
768 int
769 main(int argc, char **argv)
770 {
771 const char *config_file;
772 int num_failures;
773
774 if (argc == 1) {
775 config_file = "/usr/local/etc/spdk/iscsi.conf";
776 } else {
777 config_file = argv[1];
778 }
779 bdevtest_init(config_file, "0x3");
780
781 num_failures = spdk_app_start(test_main, NULL, NULL);
782 spdk_app_fini();
783
784 return num_failures;
785 }