]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / test / unit / lib / bdev / bdev.c / bdev_ut.c
CommitLineData
11fdf7f2
TL
1/*-
2 * BSD LICENSE
3 *
f67539c2
TL
4 * Copyright (c) Intel Corporation. All rights reserved.
5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
11fdf7f2
TL
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include "spdk_cunit.h"
35
9f95a23c 36#include "common/lib/ut_multithread.c"
11fdf7f2
TL
37#include "unit/lib/json_mock.c"
38
39#include "spdk/config.h"
40/* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
41#undef SPDK_CONFIG_VTUNE
42
43#include "bdev/bdev.c"
44
45DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
46 const char *name), NULL);
47DEFINE_STUB(spdk_conf_section_get_nmval, char *,
48 (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
49DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
50
51struct spdk_trace_histories *g_trace_histories;
52DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
53DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
54DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
9f95a23c 55DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
11fdf7f2
TL
56 uint16_t tpoint_id, uint8_t owner_type,
57 uint8_t object_type, uint8_t new_object,
9f95a23c 58 uint8_t arg1_type, const char *arg1_name));
11fdf7f2
TL
59DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
60 uint32_t size, uint64_t object_id, uint64_t arg1));
9f95a23c
TL
61DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
62DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
11fdf7f2 63
9f95a23c
TL
64
65int g_status;
66int g_count;
f67539c2
TL
67enum spdk_bdev_event_type g_event_type1;
68enum spdk_bdev_event_type g_event_type2;
9f95a23c 69struct spdk_histogram_data *g_histogram;
f67539c2
TL
70void *g_unregister_arg;
71int g_unregister_rc;
11fdf7f2
TL
72
73void
74spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
75 int *sc, int *sk, int *asc, int *ascq)
76{
77}
78
79static int
80null_init(void)
81{
82 return 0;
83}
84
85static int
86null_clean(void)
87{
88 return 0;
89}
90
91static int
92stub_destruct(void *ctx)
93{
94 return 0;
95}
96
97struct ut_expected_io {
98 uint8_t type;
99 uint64_t offset;
100 uint64_t length;
101 int iovcnt;
102 struct iovec iov[BDEV_IO_NUM_CHILD_IOV];
f67539c2 103 void *md_buf;
11fdf7f2
TL
104 TAILQ_ENTRY(ut_expected_io) link;
105};
106
107struct bdev_ut_channel {
108 TAILQ_HEAD(, spdk_bdev_io) outstanding_io;
109 uint32_t outstanding_io_count;
110 TAILQ_HEAD(, ut_expected_io) expected_io;
111};
112
113static bool g_io_done;
9f95a23c 114static struct spdk_bdev_io *g_bdev_io;
11fdf7f2 115static enum spdk_bdev_io_status g_io_status;
9f95a23c 116static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
11fdf7f2
TL
117static uint32_t g_bdev_ut_io_device;
118static struct bdev_ut_channel *g_bdev_ut_channel;
f67539c2
TL
119static void *g_compare_read_buf;
120static uint32_t g_compare_read_buf_len;
121static void *g_compare_write_buf;
122static uint32_t g_compare_write_buf_len;
123static bool g_abort_done;
124static enum spdk_bdev_io_status g_abort_status;
11fdf7f2
TL
125
126static struct ut_expected_io *
127ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt)
128{
129 struct ut_expected_io *expected_io;
130
131 expected_io = calloc(1, sizeof(*expected_io));
132 SPDK_CU_ASSERT_FATAL(expected_io != NULL);
133
134 expected_io->type = type;
135 expected_io->offset = offset;
136 expected_io->length = length;
137 expected_io->iovcnt = iovcnt;
138
139 return expected_io;
140}
141
142static void
143ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len)
144{
145 expected_io->iov[pos].iov_base = base;
146 expected_io->iov[pos].iov_len = len;
147}
148
149static void
150stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
151{
152 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch);
153 struct ut_expected_io *expected_io;
154 struct iovec *iov, *expected_iov;
f67539c2 155 struct spdk_bdev_io *bio_to_abort;
11fdf7f2
TL
156 int i;
157
9f95a23c
TL
158 g_bdev_io = bdev_io;
159
f67539c2
TL
160 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
161 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len;
162
163 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
164 CU_ASSERT(g_compare_read_buf_len == len);
165 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len);
166 }
167
168 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
169 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len;
170
171 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
172 CU_ASSERT(g_compare_write_buf_len == len);
173 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len);
174 }
175
176 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) {
177 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len;
178
179 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1);
180 CU_ASSERT(g_compare_read_buf_len == len);
181 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) {
182 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE;
183 }
184 }
185
186 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) {
187 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) {
188 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) {
189 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) {
190 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link);
191 ch->outstanding_io_count--;
192 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED);
193 break;
194 }
195 }
196 }
197 }
198
11fdf7f2
TL
199 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
200 ch->outstanding_io_count++;
201
202 expected_io = TAILQ_FIRST(&ch->expected_io);
203 if (expected_io == NULL) {
204 return;
205 }
206 TAILQ_REMOVE(&ch->expected_io, expected_io, link);
207
208 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) {
209 CU_ASSERT(bdev_io->type == expected_io->type);
210 }
211
f67539c2
TL
212 if (expected_io->md_buf != NULL) {
213 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf);
214 }
215
11fdf7f2
TL
216 if (expected_io->length == 0) {
217 free(expected_io);
218 return;
219 }
220
221 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks);
222 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks);
223
224 if (expected_io->iovcnt == 0) {
225 free(expected_io);
226 /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */
227 return;
228 }
229
230 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt);
231 for (i = 0; i < expected_io->iovcnt; i++) {
232 iov = &bdev_io->u.bdev.iovs[i];
233 expected_iov = &expected_io->iov[i];
234 CU_ASSERT(iov->iov_len == expected_iov->iov_len);
235 CU_ASSERT(iov->iov_base == expected_iov->iov_base);
236 }
237
238 free(expected_io);
239}
240
9f95a23c 241static void
f67539c2
TL
242stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch,
243 struct spdk_bdev_io *bdev_io, bool success)
9f95a23c
TL
244{
245 CU_ASSERT(success == true);
246
247 stub_submit_request(_ch, bdev_io);
248}
249
250static void
f67539c2 251stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
9f95a23c 252{
f67539c2 253 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb,
9f95a23c
TL
254 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
255}
256
11fdf7f2
TL
257static uint32_t
258stub_complete_io(uint32_t num_to_complete)
259{
260 struct bdev_ut_channel *ch = g_bdev_ut_channel;
261 struct spdk_bdev_io *bdev_io;
9f95a23c 262 static enum spdk_bdev_io_status io_status;
11fdf7f2
TL
263 uint32_t num_completed = 0;
264
265 while (num_completed < num_to_complete) {
266 if (TAILQ_EMPTY(&ch->outstanding_io)) {
267 break;
268 }
269 bdev_io = TAILQ_FIRST(&ch->outstanding_io);
270 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link);
271 ch->outstanding_io_count--;
9f95a23c
TL
272 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS :
273 g_io_exp_status;
274 spdk_bdev_io_complete(bdev_io, io_status);
11fdf7f2
TL
275 num_completed++;
276 }
277
278 return num_completed;
279}
280
281static struct spdk_io_channel *
282bdev_ut_get_io_channel(void *ctx)
283{
284 return spdk_get_io_channel(&g_bdev_ut_io_device);
285}
286
f67539c2
TL
287static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = {
288 [SPDK_BDEV_IO_TYPE_READ] = true,
289 [SPDK_BDEV_IO_TYPE_WRITE] = true,
290 [SPDK_BDEV_IO_TYPE_COMPARE] = true,
291 [SPDK_BDEV_IO_TYPE_UNMAP] = true,
292 [SPDK_BDEV_IO_TYPE_FLUSH] = true,
293 [SPDK_BDEV_IO_TYPE_RESET] = true,
294 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true,
295 [SPDK_BDEV_IO_TYPE_NVME_IO] = true,
296 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true,
297 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true,
298 [SPDK_BDEV_IO_TYPE_ZCOPY] = true,
299 [SPDK_BDEV_IO_TYPE_ABORT] = true,
300};
301
302static void
303ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable)
304{
305 g_io_types_supported[io_type] = enable;
306}
307
308static bool
309stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type)
310{
311 return g_io_types_supported[io_type];
312}
11fdf7f2
TL
313
314static struct spdk_bdev_fn_table fn_table = {
315 .destruct = stub_destruct,
316 .submit_request = stub_submit_request,
317 .get_io_channel = bdev_ut_get_io_channel,
318 .io_type_supported = stub_io_type_supported,
319};
320
321static int
322bdev_ut_create_ch(void *io_device, void *ctx_buf)
323{
324 struct bdev_ut_channel *ch = ctx_buf;
325
326 CU_ASSERT(g_bdev_ut_channel == NULL);
327 g_bdev_ut_channel = ch;
328
329 TAILQ_INIT(&ch->outstanding_io);
330 ch->outstanding_io_count = 0;
331 TAILQ_INIT(&ch->expected_io);
332 return 0;
333}
334
335static void
336bdev_ut_destroy_ch(void *io_device, void *ctx_buf)
337{
338 CU_ASSERT(g_bdev_ut_channel != NULL);
339 g_bdev_ut_channel = NULL;
340}
341
9f95a23c
TL
342struct spdk_bdev_module bdev_ut_if;
343
11fdf7f2
TL
344static int
345bdev_ut_module_init(void)
346{
347 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch,
348 sizeof(struct bdev_ut_channel), NULL);
9f95a23c 349 spdk_bdev_module_init_done(&bdev_ut_if);
11fdf7f2
TL
350 return 0;
351}
352
353static void
354bdev_ut_module_fini(void)
355{
356 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL);
357}
358
359struct spdk_bdev_module bdev_ut_if = {
360 .name = "bdev_ut",
361 .module_init = bdev_ut_module_init,
362 .module_fini = bdev_ut_module_fini,
9f95a23c 363 .async_init = true,
11fdf7f2
TL
364};
365
366static void vbdev_ut_examine(struct spdk_bdev *bdev);
367
368static int
369vbdev_ut_module_init(void)
370{
371 return 0;
372}
373
374static void
375vbdev_ut_module_fini(void)
376{
377}
378
379struct spdk_bdev_module vbdev_ut_if = {
380 .name = "vbdev_ut",
381 .module_init = vbdev_ut_module_init,
382 .module_fini = vbdev_ut_module_fini,
383 .examine_config = vbdev_ut_examine,
384};
385
9f95a23c
TL
386SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
387SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if)
11fdf7f2
TL
388
389static void
390vbdev_ut_examine(struct spdk_bdev *bdev)
391{
392 spdk_bdev_module_examine_done(&vbdev_ut_if);
393}
394
395static struct spdk_bdev *
396allocate_bdev(char *name)
397{
398 struct spdk_bdev *bdev;
399 int rc;
400
401 bdev = calloc(1, sizeof(*bdev));
402 SPDK_CU_ASSERT_FATAL(bdev != NULL);
403
404 bdev->name = name;
405 bdev->fn_table = &fn_table;
406 bdev->module = &bdev_ut_if;
407 bdev->blockcnt = 1024;
408 bdev->blocklen = 512;
409
410 rc = spdk_bdev_register(bdev);
411 CU_ASSERT(rc == 0);
412
413 return bdev;
414}
415
416static struct spdk_bdev *
9f95a23c 417allocate_vbdev(char *name)
11fdf7f2
TL
418{
419 struct spdk_bdev *bdev;
11fdf7f2
TL
420 int rc;
421
422 bdev = calloc(1, sizeof(*bdev));
423 SPDK_CU_ASSERT_FATAL(bdev != NULL);
424
425 bdev->name = name;
426 bdev->fn_table = &fn_table;
427 bdev->module = &vbdev_ut_if;
428
9f95a23c 429 rc = spdk_bdev_register(bdev);
11fdf7f2
TL
430 CU_ASSERT(rc == 0);
431
432 return bdev;
433}
434
435static void
436free_bdev(struct spdk_bdev *bdev)
437{
438 spdk_bdev_unregister(bdev, NULL, NULL);
9f95a23c 439 poll_threads();
11fdf7f2
TL
440 memset(bdev, 0xFF, sizeof(*bdev));
441 free(bdev);
442}
443
444static void
445free_vbdev(struct spdk_bdev *bdev)
446{
447 spdk_bdev_unregister(bdev, NULL, NULL);
9f95a23c 448 poll_threads();
11fdf7f2
TL
449 memset(bdev, 0xFF, sizeof(*bdev));
450 free(bdev);
451}
452
453static void
454get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc)
455{
456 const char *bdev_name;
457
458 CU_ASSERT(bdev != NULL);
459 CU_ASSERT(rc == 0);
460 bdev_name = spdk_bdev_get_name(bdev);
461 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0");
462
463 free(stat);
464 free_bdev(bdev);
9f95a23c
TL
465
466 *(bool *)cb_arg = true;
11fdf7f2
TL
467}
468
f67539c2
TL
469static void
470bdev_unregister_cb(void *cb_arg, int rc)
471{
472 g_unregister_arg = cb_arg;
473 g_unregister_rc = rc;
474}
475
476static void
477bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
478{
479 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx;
480
481 g_event_type1 = type;
482 if (SPDK_BDEV_EVENT_REMOVE == type) {
483 spdk_bdev_close(desc);
484 }
485}
486
487static void
488bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx)
489{
490 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx;
491
492 g_event_type2 = type;
493 if (SPDK_BDEV_EVENT_REMOVE == type) {
494 spdk_bdev_close(desc);
495 }
496}
497
11fdf7f2
TL
498static void
499get_device_stat_test(void)
500{
501 struct spdk_bdev *bdev;
502 struct spdk_bdev_io_stat *stat;
9f95a23c 503 bool done;
11fdf7f2
TL
504
505 bdev = allocate_bdev("bdev0");
506 stat = calloc(1, sizeof(struct spdk_bdev_io_stat));
507 if (stat == NULL) {
508 free_bdev(bdev);
509 return;
510 }
9f95a23c
TL
511
512 done = false;
513 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done);
514 while (!done) { poll_threads(); }
515
516
11fdf7f2
TL
517}
518
519static void
520open_write_test(void)
521{
522 struct spdk_bdev *bdev[9];
523 struct spdk_bdev_desc *desc[9] = {};
524 int rc;
525
526 /*
527 * Create a tree of bdevs to test various open w/ write cases.
528 *
529 * bdev0 through bdev3 are physical block devices, such as NVMe
530 * namespaces or Ceph block devices.
531 *
532 * bdev4 is a virtual bdev with multiple base bdevs. This models
533 * caching or RAID use cases.
534 *
535 * bdev5 through bdev7 are all virtual bdevs with the same base
536 * bdev (except bdev7). This models partitioning or logical volume
537 * use cases.
538 *
539 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs
540 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This
541 * models caching, RAID, partitioning or logical volumes use cases.
542 *
543 * bdev8 is a virtual bdev with multiple base bdevs, but these
544 * base bdevs are themselves virtual bdevs.
545 *
546 * bdev8
547 * |
548 * +----------+
549 * | |
550 * bdev4 bdev5 bdev6 bdev7
551 * | | | |
552 * +---+---+ +---+ + +---+---+
553 * | | \ | / \
554 * bdev0 bdev1 bdev2 bdev3
555 */
556
557 bdev[0] = allocate_bdev("bdev0");
558 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if);
559 CU_ASSERT(rc == 0);
560
561 bdev[1] = allocate_bdev("bdev1");
562 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
563 CU_ASSERT(rc == 0);
564
565 bdev[2] = allocate_bdev("bdev2");
566 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if);
567 CU_ASSERT(rc == 0);
568
569 bdev[3] = allocate_bdev("bdev3");
570 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
571 CU_ASSERT(rc == 0);
572
9f95a23c 573 bdev[4] = allocate_vbdev("bdev4");
11fdf7f2
TL
574 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if);
575 CU_ASSERT(rc == 0);
576
9f95a23c 577 bdev[5] = allocate_vbdev("bdev5");
11fdf7f2
TL
578 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
579 CU_ASSERT(rc == 0);
580
9f95a23c 581 bdev[6] = allocate_vbdev("bdev6");
11fdf7f2 582
9f95a23c 583 bdev[7] = allocate_vbdev("bdev7");
11fdf7f2 584
9f95a23c 585 bdev[8] = allocate_vbdev("bdev8");
11fdf7f2
TL
586
587 /* Open bdev0 read-only. This should succeed. */
588 rc = spdk_bdev_open(bdev[0], false, NULL, NULL, &desc[0]);
589 CU_ASSERT(rc == 0);
590 SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
591 spdk_bdev_close(desc[0]);
592
593 /*
594 * Open bdev1 read/write. This should fail since bdev1 has been claimed
595 * by a vbdev module.
596 */
597 rc = spdk_bdev_open(bdev[1], true, NULL, NULL, &desc[1]);
598 CU_ASSERT(rc == -EPERM);
599
600 /*
601 * Open bdev4 read/write. This should fail since bdev3 has been claimed
602 * by a vbdev module.
603 */
604 rc = spdk_bdev_open(bdev[4], true, NULL, NULL, &desc[4]);
605 CU_ASSERT(rc == -EPERM);
606
607 /* Open bdev4 read-only. This should succeed. */
608 rc = spdk_bdev_open(bdev[4], false, NULL, NULL, &desc[4]);
609 CU_ASSERT(rc == 0);
610 SPDK_CU_ASSERT_FATAL(desc[4] != NULL);
611 spdk_bdev_close(desc[4]);
612
613 /*
614 * Open bdev8 read/write. This should succeed since it is a leaf
615 * bdev.
616 */
617 rc = spdk_bdev_open(bdev[8], true, NULL, NULL, &desc[8]);
618 CU_ASSERT(rc == 0);
619 SPDK_CU_ASSERT_FATAL(desc[8] != NULL);
620 spdk_bdev_close(desc[8]);
621
622 /*
623 * Open bdev5 read/write. This should fail since bdev4 has been claimed
624 * by a vbdev module.
625 */
626 rc = spdk_bdev_open(bdev[5], true, NULL, NULL, &desc[5]);
627 CU_ASSERT(rc == -EPERM);
628
629 /* Open bdev4 read-only. This should succeed. */
630 rc = spdk_bdev_open(bdev[5], false, NULL, NULL, &desc[5]);
631 CU_ASSERT(rc == 0);
632 SPDK_CU_ASSERT_FATAL(desc[5] != NULL);
633 spdk_bdev_close(desc[5]);
634
635 free_vbdev(bdev[8]);
636
637 free_vbdev(bdev[5]);
638 free_vbdev(bdev[6]);
639 free_vbdev(bdev[7]);
640
641 free_vbdev(bdev[4]);
642
643 free_bdev(bdev[0]);
644 free_bdev(bdev[1]);
645 free_bdev(bdev[2]);
646 free_bdev(bdev[3]);
647}
648
649static void
650bytes_to_blocks_test(void)
651{
652 struct spdk_bdev bdev;
653 uint64_t offset_blocks, num_blocks;
654
655 memset(&bdev, 0, sizeof(bdev));
656
657 bdev.blocklen = 512;
658
659 /* All parameters valid */
660 offset_blocks = 0;
661 num_blocks = 0;
f67539c2 662 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0);
11fdf7f2
TL
663 CU_ASSERT(offset_blocks == 1);
664 CU_ASSERT(num_blocks == 2);
665
666 /* Offset not a block multiple */
f67539c2 667 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0);
11fdf7f2
TL
668
669 /* Length not a block multiple */
f67539c2 670 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0);
9f95a23c
TL
671
672 /* In case blocklen not the power of two */
673 bdev.blocklen = 100;
f67539c2 674 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0);
9f95a23c
TL
675 CU_ASSERT(offset_blocks == 1);
676 CU_ASSERT(num_blocks == 2);
677
678 /* Offset not a block multiple */
f67539c2 679 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0);
9f95a23c
TL
680
681 /* Length not a block multiple */
f67539c2 682 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0);
11fdf7f2
TL
683}
684
685static void
686num_blocks_test(void)
687{
688 struct spdk_bdev bdev;
689 struct spdk_bdev_desc *desc = NULL;
f67539c2 690 struct spdk_bdev_desc *desc_ext = NULL;
11fdf7f2
TL
691 int rc;
692
693 memset(&bdev, 0, sizeof(bdev));
694 bdev.name = "num_blocks";
695 bdev.fn_table = &fn_table;
696 bdev.module = &bdev_ut_if;
697 spdk_bdev_register(&bdev);
698 spdk_bdev_notify_blockcnt_change(&bdev, 50);
699
700 /* Growing block number */
701 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0);
702 /* Shrinking block number */
703 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0);
704
705 /* In case bdev opened */
706 rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc);
707 CU_ASSERT(rc == 0);
708 SPDK_CU_ASSERT_FATAL(desc != NULL);
709
710 /* Growing block number */
711 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0);
712 /* Shrinking block number */
713 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0);
714
f67539c2
TL
715 /* In case bdev opened with ext API */
716 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc_ext, &desc_ext);
717 CU_ASSERT(rc == 0);
718 SPDK_CU_ASSERT_FATAL(desc_ext != NULL);
719
720 g_event_type1 = 0xFF;
721 /* Growing block number */
722 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0);
723
724 poll_threads();
725 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE);
726
727 g_event_type1 = 0xFF;
728 /* Growing block number and closing */
729 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0);
730
11fdf7f2 731 spdk_bdev_close(desc);
f67539c2 732 spdk_bdev_close(desc_ext);
11fdf7f2 733 spdk_bdev_unregister(&bdev, NULL, NULL);
9f95a23c
TL
734
735 poll_threads();
f67539c2
TL
736
737 /* Callback is not called for closed device */
738 CU_ASSERT_EQUAL(g_event_type1, 0xFF);
11fdf7f2
TL
739}
740
741static void
742io_valid_test(void)
743{
744 struct spdk_bdev bdev;
745
746 memset(&bdev, 0, sizeof(bdev));
747
748 bdev.blocklen = 512;
749 spdk_bdev_notify_blockcnt_change(&bdev, 100);
750
751 /* All parameters valid */
f67539c2 752 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true);
11fdf7f2
TL
753
754 /* Last valid block */
f67539c2 755 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true);
11fdf7f2
TL
756
757 /* Offset past end of bdev */
f67539c2 758 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false);
11fdf7f2
TL
759
760 /* Offset + length past end of bdev */
f67539c2 761 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false);
11fdf7f2
TL
762
763 /* Offset near end of uint64_t range (2^64 - 1) */
f67539c2 764 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false);
11fdf7f2
TL
765}
766
767static void
768alias_add_del_test(void)
769{
770 struct spdk_bdev *bdev[3];
771 int rc;
772
773 /* Creating and registering bdevs */
774 bdev[0] = allocate_bdev("bdev0");
775 SPDK_CU_ASSERT_FATAL(bdev[0] != 0);
776
777 bdev[1] = allocate_bdev("bdev1");
778 SPDK_CU_ASSERT_FATAL(bdev[1] != 0);
779
780 bdev[2] = allocate_bdev("bdev2");
781 SPDK_CU_ASSERT_FATAL(bdev[2] != 0);
782
9f95a23c
TL
783 poll_threads();
784
11fdf7f2
TL
785 /*
786 * Trying adding an alias identical to name.
787 * Alias is identical to name, so it can not be added to aliases list
788 */
789 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name);
790 CU_ASSERT(rc == -EEXIST);
791
792 /*
793 * Trying to add empty alias,
794 * this one should fail
795 */
796 rc = spdk_bdev_alias_add(bdev[0], NULL);
797 CU_ASSERT(rc == -EINVAL);
798
799 /* Trying adding same alias to two different registered bdevs */
800
801 /* Alias is used first time, so this one should pass */
802 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0");
803 CU_ASSERT(rc == 0);
804
805 /* Alias was added to another bdev, so this one should fail */
806 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0");
807 CU_ASSERT(rc == -EEXIST);
808
809 /* Alias is used first time, so this one should pass */
810 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1");
811 CU_ASSERT(rc == 0);
812
813 /* Trying removing an alias from registered bdevs */
814
815 /* Alias is not on a bdev aliases list, so this one should fail */
816 rc = spdk_bdev_alias_del(bdev[0], "not existing");
817 CU_ASSERT(rc == -ENOENT);
818
819 /* Alias is present on a bdev aliases list, so this one should pass */
820 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0");
821 CU_ASSERT(rc == 0);
822
823 /* Alias is present on a bdev aliases list, so this one should pass */
824 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1");
825 CU_ASSERT(rc == 0);
826
827 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */
828 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name);
829 CU_ASSERT(rc != 0);
830
831 /* Trying to del all alias from empty alias list */
832 spdk_bdev_alias_del_all(bdev[2]);
833 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases));
834
835 /* Trying to del all alias from non-empty alias list */
836 rc = spdk_bdev_alias_add(bdev[2], "alias0");
837 CU_ASSERT(rc == 0);
838 rc = spdk_bdev_alias_add(bdev[2], "alias1");
839 CU_ASSERT(rc == 0);
840 spdk_bdev_alias_del_all(bdev[2]);
841 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases));
842
843 /* Unregister and free bdevs */
844 spdk_bdev_unregister(bdev[0], NULL, NULL);
845 spdk_bdev_unregister(bdev[1], NULL, NULL);
846 spdk_bdev_unregister(bdev[2], NULL, NULL);
847
9f95a23c
TL
848 poll_threads();
849
11fdf7f2
TL
850 free(bdev[0]);
851 free(bdev[1]);
852 free(bdev[2]);
853}
854
855static void
856io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
857{
858 g_io_done = true;
859 g_io_status = bdev_io->internal.status;
860 spdk_bdev_free_io(bdev_io);
861}
862
863static void
864bdev_init_cb(void *arg, int rc)
865{
866 CU_ASSERT(rc == 0);
867}
868
869static void
870bdev_fini_cb(void *arg)
871{
872}
873
874struct bdev_ut_io_wait_entry {
875 struct spdk_bdev_io_wait_entry entry;
876 struct spdk_io_channel *io_ch;
877 struct spdk_bdev_desc *desc;
878 bool submitted;
879};
880
881static void
882io_wait_cb(void *arg)
883{
884 struct bdev_ut_io_wait_entry *entry = arg;
885 int rc;
886
887 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL);
888 CU_ASSERT(rc == 0);
889 entry->submitted = true;
890}
891
9f95a23c
TL
892static void
893bdev_io_types_test(void)
894{
895 struct spdk_bdev *bdev;
896 struct spdk_bdev_desc *desc = NULL;
897 struct spdk_io_channel *io_ch;
898 struct spdk_bdev_opts bdev_opts = {
899 .bdev_io_pool_size = 4,
900 .bdev_io_cache_size = 2,
901 };
902 int rc;
903
904 rc = spdk_bdev_set_opts(&bdev_opts);
905 CU_ASSERT(rc == 0);
906 spdk_bdev_initialize(bdev_init_cb, NULL);
907 poll_threads();
908
909 bdev = allocate_bdev("bdev0");
910
911 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
912 CU_ASSERT(rc == 0);
913 poll_threads();
914 SPDK_CU_ASSERT_FATAL(desc != NULL);
915 io_ch = spdk_bdev_get_io_channel(desc);
916 CU_ASSERT(io_ch != NULL);
917
918 /* WRITE and WRITE ZEROES are not supported */
f67539c2
TL
919 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false);
920 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false);
9f95a23c
TL
921 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL);
922 CU_ASSERT(rc == -ENOTSUP);
f67539c2
TL
923 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true);
924 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true);
9f95a23c
TL
925
926 spdk_put_io_channel(io_ch);
927 spdk_bdev_close(desc);
928 free_bdev(bdev);
929 spdk_bdev_finish(bdev_fini_cb, NULL);
930 poll_threads();
931}
932
11fdf7f2
TL
933static void
934bdev_io_wait_test(void)
935{
936 struct spdk_bdev *bdev;
937 struct spdk_bdev_desc *desc = NULL;
938 struct spdk_io_channel *io_ch;
939 struct spdk_bdev_opts bdev_opts = {
940 .bdev_io_pool_size = 4,
941 .bdev_io_cache_size = 2,
942 };
943 struct bdev_ut_io_wait_entry io_wait_entry;
944 struct bdev_ut_io_wait_entry io_wait_entry2;
945 int rc;
946
947 rc = spdk_bdev_set_opts(&bdev_opts);
948 CU_ASSERT(rc == 0);
949 spdk_bdev_initialize(bdev_init_cb, NULL);
9f95a23c 950 poll_threads();
11fdf7f2
TL
951
952 bdev = allocate_bdev("bdev0");
953
954 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
955 CU_ASSERT(rc == 0);
9f95a23c 956 poll_threads();
11fdf7f2
TL
957 SPDK_CU_ASSERT_FATAL(desc != NULL);
958 io_ch = spdk_bdev_get_io_channel(desc);
959 CU_ASSERT(io_ch != NULL);
960
961 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
962 CU_ASSERT(rc == 0);
963 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
964 CU_ASSERT(rc == 0);
965 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
966 CU_ASSERT(rc == 0);
967 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
968 CU_ASSERT(rc == 0);
969 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
970
971 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
972 CU_ASSERT(rc == -ENOMEM);
973
974 io_wait_entry.entry.bdev = bdev;
975 io_wait_entry.entry.cb_fn = io_wait_cb;
976 io_wait_entry.entry.cb_arg = &io_wait_entry;
977 io_wait_entry.io_ch = io_ch;
978 io_wait_entry.desc = desc;
979 io_wait_entry.submitted = false;
980 /* Cannot use the same io_wait_entry for two different calls. */
981 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry));
982 io_wait_entry2.entry.cb_arg = &io_wait_entry2;
983
984 /* Queue two I/O waits. */
985 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry);
986 CU_ASSERT(rc == 0);
987 CU_ASSERT(io_wait_entry.submitted == false);
988 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry);
989 CU_ASSERT(rc == 0);
990 CU_ASSERT(io_wait_entry2.submitted == false);
991
992 stub_complete_io(1);
993 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
994 CU_ASSERT(io_wait_entry.submitted == true);
995 CU_ASSERT(io_wait_entry2.submitted == false);
996
997 stub_complete_io(1);
998 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
999 CU_ASSERT(io_wait_entry2.submitted == true);
1000
1001 stub_complete_io(4);
1002 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1003
1004 spdk_put_io_channel(io_ch);
1005 spdk_bdev_close(desc);
1006 free_bdev(bdev);
1007 spdk_bdev_finish(bdev_fini_cb, NULL);
9f95a23c 1008 poll_threads();
11fdf7f2
TL
1009}
1010
1011static void
1012bdev_io_spans_boundary_test(void)
1013{
1014 struct spdk_bdev bdev;
1015 struct spdk_bdev_io bdev_io;
1016
1017 memset(&bdev, 0, sizeof(bdev));
1018
1019 bdev.optimal_io_boundary = 0;
1020 bdev_io.bdev = &bdev;
1021
1022 /* bdev has no optimal_io_boundary set - so this should return false. */
f67539c2 1023 CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
11fdf7f2
TL
1024
1025 bdev.optimal_io_boundary = 32;
1026 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET;
1027
1028 /* RESETs are not based on LBAs - so this should return false. */
f67539c2 1029 CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
11fdf7f2
TL
1030
1031 bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
1032 bdev_io.u.bdev.offset_blocks = 0;
1033 bdev_io.u.bdev.num_blocks = 32;
1034
1035 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */
f67539c2 1036 CU_ASSERT(bdev_io_should_split(&bdev_io) == false);
11fdf7f2
TL
1037
1038 bdev_io.u.bdev.num_blocks = 33;
1039
1040 /* This I/O spans a boundary. */
f67539c2 1041 CU_ASSERT(bdev_io_should_split(&bdev_io) == true);
11fdf7f2
TL
1042}
1043
1044static void
f67539c2 1045bdev_io_split_test(void)
11fdf7f2
TL
1046{
1047 struct spdk_bdev *bdev;
1048 struct spdk_bdev_desc *desc = NULL;
1049 struct spdk_io_channel *io_ch;
1050 struct spdk_bdev_opts bdev_opts = {
1051 .bdev_io_pool_size = 512,
1052 .bdev_io_cache_size = 64,
1053 };
1054 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2];
1055 struct ut_expected_io *expected_io;
1056 uint64_t i;
1057 int rc;
1058
1059 rc = spdk_bdev_set_opts(&bdev_opts);
1060 CU_ASSERT(rc == 0);
1061 spdk_bdev_initialize(bdev_init_cb, NULL);
1062
1063 bdev = allocate_bdev("bdev0");
1064
1065 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1066 CU_ASSERT(rc == 0);
1067 SPDK_CU_ASSERT_FATAL(desc != NULL);
1068 io_ch = spdk_bdev_get_io_channel(desc);
1069 CU_ASSERT(io_ch != NULL);
1070
1071 bdev->optimal_io_boundary = 16;
1072 bdev->split_on_optimal_io_boundary = false;
1073
1074 g_io_done = false;
1075
1076 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */
1077 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1);
1078 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512);
1079 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1080
1081 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
1082 CU_ASSERT(rc == 0);
1083 CU_ASSERT(g_io_done == false);
1084
1085 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1086 stub_complete_io(1);
1087 CU_ASSERT(g_io_done == true);
1088 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1089
1090 bdev->split_on_optimal_io_boundary = true;
1091
1092 /* Now test that a single-vector command is split correctly.
1093 * Offset 14, length 8, payload 0xF000
1094 * Child - Offset 14, length 2, payload 0xF000
1095 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
1096 *
1097 * Set up the expected values before calling spdk_bdev_read_blocks
1098 */
1099 g_io_done = false;
1100 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
1101 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
1102 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1103
1104 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
1105 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
1106 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1107
1108 /* spdk_bdev_read_blocks will submit the first child immediately. */
1109 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
1110 CU_ASSERT(rc == 0);
1111 CU_ASSERT(g_io_done == false);
1112
1113 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1114 stub_complete_io(2);
1115 CU_ASSERT(g_io_done == true);
1116 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1117
1118 /* Now set up a more complex, multi-vector command that needs to be split,
1119 * including splitting iovecs.
1120 */
1121 iov[0].iov_base = (void *)0x10000;
1122 iov[0].iov_len = 512;
1123 iov[1].iov_base = (void *)0x20000;
1124 iov[1].iov_len = 20 * 512;
1125 iov[2].iov_base = (void *)0x30000;
1126 iov[2].iov_len = 11 * 512;
1127
1128 g_io_done = false;
1129 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
1130 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
1131 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
1132 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1133
1134 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
1135 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
1136 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1137
1138 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
1139 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
1140 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
1141 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1142
1143 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
1144 CU_ASSERT(rc == 0);
1145 CU_ASSERT(g_io_done == false);
1146
1147 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
1148 stub_complete_io(3);
1149 CU_ASSERT(g_io_done == true);
1150
1151 /* Test multi vector command that needs to be split by strip and then needs to be
1152 * split further due to the capacity of child iovs.
1153 */
1154 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) {
1155 iov[i].iov_base = (void *)((i + 1) * 0x10000);
1156 iov[i].iov_len = 512;
1157 }
1158
1159 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
1160 g_io_done = false;
1161 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV,
1162 BDEV_IO_NUM_CHILD_IOV);
1163 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
1164 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512);
1165 }
1166 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1167
1168 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
1169 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV);
1170 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
1171 ut_expected_io_set_iov(expected_io, i,
1172 (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512);
1173 }
1174 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1175
1176 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
1177 BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
1178 CU_ASSERT(rc == 0);
1179 CU_ASSERT(g_io_done == false);
1180
1181 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1182 stub_complete_io(1);
1183 CU_ASSERT(g_io_done == false);
1184
1185 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1186 stub_complete_io(1);
1187 CU_ASSERT(g_io_done == true);
1188 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1189
9f95a23c
TL
1190 /* Test multi vector command that needs to be split by strip and then needs to be
1191 * split further due to the capacity of child iovs. In this case, the length of
1192 * the rest of iovec array with an I/O boundary is the multiple of block size.
1193 */
1194
1195 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary
1196 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs.
1197 */
1198 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) {
1199 iov[i].iov_base = (void *)((i + 1) * 0x10000);
1200 iov[i].iov_len = 512;
1201 }
1202 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) {
1203 iov[i].iov_base = (void *)((i + 1) * 0x10000);
1204 iov[i].iov_len = 256;
1205 }
1206 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
1207 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512;
1208
1209 /* Add an extra iovec to trigger split */
1210 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
1211 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
1212
1213 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
1214 g_io_done = false;
1215 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
1216 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV);
1217 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) {
1218 ut_expected_io_set_iov(expected_io, i,
1219 (void *)((i + 1) * 0x10000), 512);
1220 }
1221 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) {
1222 ut_expected_io_set_iov(expected_io, i,
1223 (void *)((i + 1) * 0x10000), 256);
1224 }
1225 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1226
1227 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1,
1228 1, 1);
1229 ut_expected_io_set_iov(expected_io, 0,
1230 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512);
1231 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1232
1233 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
1234 1, 1);
1235 ut_expected_io_set_iov(expected_io, 0,
1236 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512);
1237 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1238
1239 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0,
1240 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
1241 CU_ASSERT(rc == 0);
1242 CU_ASSERT(g_io_done == false);
1243
1244 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1245 stub_complete_io(1);
1246 CU_ASSERT(g_io_done == false);
1247
1248 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1249 stub_complete_io(2);
1250 CU_ASSERT(g_io_done == true);
1251 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1252
11fdf7f2 1253 /* Test multi vector command that needs to be split by strip and then needs to be
f67539c2
TL
1254 * split further due to the capacity of child iovs, the child request offset should
1255 * be rewind to last aligned offset and go success without error.
11fdf7f2
TL
1256 */
1257 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1258 iov[i].iov_base = (void *)((i + 1) * 0x10000);
1259 iov[i].iov_len = 512;
1260 }
1261 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000);
1262 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
1263
f67539c2
TL
1264 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
1265 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256;
1266
1267 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
1268 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
1269
11fdf7f2
TL
1270 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
1271 g_io_done = false;
1272 g_io_status = 0;
f67539c2
TL
1273 /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */
1274 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0,
1275 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1);
1276 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1277 ut_expected_io_set_iov(expected_io, i,
1278 (void *)((i + 1) * 0x10000), 512);
1279 }
1280 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1281 /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */
1282 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1,
1283 1, 2);
1284 ut_expected_io_set_iov(expected_io, 0,
1285 (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256);
1286 ut_expected_io_set_iov(expected_io, 1,
1287 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256);
1288 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1289 /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */
1290 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
1291 1, 1);
1292 ut_expected_io_set_iov(expected_io, 0,
1293 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512);
1294 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
11fdf7f2
TL
1295
1296 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
f67539c2 1297 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
11fdf7f2 1298 CU_ASSERT(rc == 0);
f67539c2
TL
1299 CU_ASSERT(g_io_done == false);
1300
1301 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1302 stub_complete_io(1);
1303 CU_ASSERT(g_io_done == false);
1304
1305 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
1306 stub_complete_io(2);
11fdf7f2 1307 CU_ASSERT(g_io_done == true);
f67539c2
TL
1308 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1309
1310 /* Test multi vector command that needs to be split due to the IO boundary and
1311 * the capacity of child iovs. Especially test the case when the command is
1312 * split due to the capacity of child iovs, the tail address is not aligned with
1313 * block size and is rewinded to the aligned address.
1314 *
1315 * The iovecs used in read request is complex but is based on the data
1316 * collected in the real issue. We change the base addresses but keep the lengths
1317 * not to loose the credibility of the test.
1318 */
1319 bdev->optimal_io_boundary = 128;
1320 g_io_done = false;
1321 g_io_status = 0;
1322
1323 for (i = 0; i < 31; i++) {
1324 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20));
1325 iov[i].iov_len = 1024;
1326 }
1327 iov[31].iov_base = (void *)0xFEED1F00000;
1328 iov[31].iov_len = 32768;
1329 iov[32].iov_base = (void *)0xFEED2000000;
1330 iov[32].iov_len = 160;
1331 iov[33].iov_base = (void *)0xFEED2100000;
1332 iov[33].iov_len = 4096;
1333 iov[34].iov_base = (void *)0xFEED2200000;
1334 iov[34].iov_len = 4096;
1335 iov[35].iov_base = (void *)0xFEED2300000;
1336 iov[35].iov_len = 4096;
1337 iov[36].iov_base = (void *)0xFEED2400000;
1338 iov[36].iov_len = 4096;
1339 iov[37].iov_base = (void *)0xFEED2500000;
1340 iov[37].iov_len = 4096;
1341 iov[38].iov_base = (void *)0xFEED2600000;
1342 iov[38].iov_len = 4096;
1343 iov[39].iov_base = (void *)0xFEED2700000;
1344 iov[39].iov_len = 4096;
1345 iov[40].iov_base = (void *)0xFEED2800000;
1346 iov[40].iov_len = 4096;
1347 iov[41].iov_base = (void *)0xFEED2900000;
1348 iov[41].iov_len = 4096;
1349 iov[42].iov_base = (void *)0xFEED2A00000;
1350 iov[42].iov_len = 4096;
1351 iov[43].iov_base = (void *)0xFEED2B00000;
1352 iov[43].iov_len = 12288;
1353 iov[44].iov_base = (void *)0xFEED2C00000;
1354 iov[44].iov_len = 8192;
1355 iov[45].iov_base = (void *)0xFEED2F00000;
1356 iov[45].iov_len = 4096;
1357 iov[46].iov_base = (void *)0xFEED3000000;
1358 iov[46].iov_len = 4096;
1359 iov[47].iov_base = (void *)0xFEED3100000;
1360 iov[47].iov_len = 4096;
1361 iov[48].iov_base = (void *)0xFEED3200000;
1362 iov[48].iov_len = 24576;
1363 iov[49].iov_base = (void *)0xFEED3300000;
1364 iov[49].iov_len = 16384;
1365 iov[50].iov_base = (void *)0xFEED3400000;
1366 iov[50].iov_len = 12288;
1367 iov[51].iov_base = (void *)0xFEED3500000;
1368 iov[51].iov_len = 4096;
1369 iov[52].iov_base = (void *)0xFEED3600000;
1370 iov[52].iov_len = 4096;
1371 iov[53].iov_base = (void *)0xFEED3700000;
1372 iov[53].iov_len = 4096;
1373 iov[54].iov_base = (void *)0xFEED3800000;
1374 iov[54].iov_len = 28672;
1375 iov[55].iov_base = (void *)0xFEED3900000;
1376 iov[55].iov_len = 20480;
1377 iov[56].iov_base = (void *)0xFEED3A00000;
1378 iov[56].iov_len = 4096;
1379 iov[57].iov_base = (void *)0xFEED3B00000;
1380 iov[57].iov_len = 12288;
1381 iov[58].iov_base = (void *)0xFEED3C00000;
1382 iov[58].iov_len = 4096;
1383 iov[59].iov_base = (void *)0xFEED3D00000;
1384 iov[59].iov_len = 4096;
1385 iov[60].iov_base = (void *)0xFEED3E00000;
1386 iov[60].iov_len = 352;
1387
1388 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity
1389 * of child iovs,
1390 */
1391 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32);
1392 for (i = 0; i < 32; i++) {
1393 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len);
1394 }
1395 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1396
1397 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33]
1398 * split by the IO boundary requirement.
1399 */
1400 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2);
1401 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len);
1402 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864);
1403 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1404
1405 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to
1406 * the first 864 bytes of iov[46] split by the IO boundary requirement.
1407 */
1408 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14);
1409 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864),
1410 iov[33].iov_len - 864);
1411 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len);
1412 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len);
1413 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len);
1414 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len);
1415 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len);
1416 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len);
1417 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len);
1418 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len);
1419 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len);
1420 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len);
1421 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len);
1422 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len);
1423 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864);
1424 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1425
1426 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the
1427 * first 864 bytes of iov[52] split by the IO boundary requirement.
1428 */
1429 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7);
1430 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864),
1431 iov[46].iov_len - 864);
1432 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len);
1433 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len);
1434 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len);
1435 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len);
1436 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len);
1437 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864);
1438 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1439
1440 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to
1441 * the first 4096 bytes of iov[57] split by the IO boundary requirement.
1442 */
1443 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6);
1444 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864),
1445 iov[52].iov_len - 864);
1446 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len);
1447 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len);
1448 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len);
1449 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len);
1450 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960);
1451 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1452
1453 /* The 6th child IO must be from the remaining 7328 bytes of iov[57]
1454 * to the first 3936 bytes of iov[58] split by the capacity of child iovs.
1455 */
1456 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3);
1457 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960),
1458 iov[57].iov_len - 4960);
1459 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len);
1460 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936);
1461 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1462
1463 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */
1464 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2);
1465 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936),
1466 iov[59].iov_len - 3936);
1467 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len);
1468 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1469
1470 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 61, 0, 543, io_done, NULL);
1471 CU_ASSERT(rc == 0);
1472 CU_ASSERT(g_io_done == false);
1473
1474 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1475 stub_complete_io(1);
1476 CU_ASSERT(g_io_done == false);
1477
1478 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5);
1479 stub_complete_io(5);
1480 CU_ASSERT(g_io_done == false);
1481
1482 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1483 stub_complete_io(1);
1484 CU_ASSERT(g_io_done == true);
1485 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1486 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
11fdf7f2
TL
1487
1488 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be
1489 * split, so test that.
1490 */
1491 bdev->optimal_io_boundary = 15;
1492 g_io_done = false;
1493 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
1494 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1495
1496 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
1497 CU_ASSERT(rc == 0);
1498 CU_ASSERT(g_io_done == false);
1499 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1500 stub_complete_io(1);
1501 CU_ASSERT(g_io_done == true);
1502
1503 /* Test an UNMAP. This should also not be split. */
1504 bdev->optimal_io_boundary = 16;
1505 g_io_done = false;
1506 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0);
1507 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1508
1509 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL);
1510 CU_ASSERT(rc == 0);
1511 CU_ASSERT(g_io_done == false);
1512 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1513 stub_complete_io(1);
1514 CU_ASSERT(g_io_done == true);
1515
1516 /* Test a FLUSH. This should also not be split. */
1517 bdev->optimal_io_boundary = 16;
1518 g_io_done = false;
1519 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0);
1520 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1521
1522 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL);
1523 CU_ASSERT(rc == 0);
1524 CU_ASSERT(g_io_done == false);
1525 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1526 stub_complete_io(1);
1527 CU_ASSERT(g_io_done == true);
1528
1529 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1530
9f95a23c
TL
1531 /* Children requests return an error status */
1532 bdev->optimal_io_boundary = 16;
1533 iov[0].iov_base = (void *)0x10000;
1534 iov[0].iov_len = 512 * 64;
1535 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
1536 g_io_done = false;
1537 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
1538
1539 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL);
1540 CU_ASSERT(rc == 0);
1541 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5);
1542 stub_complete_io(4);
1543 CU_ASSERT(g_io_done == false);
1544 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1545 stub_complete_io(1);
1546 CU_ASSERT(g_io_done == true);
1547 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
1548
f67539c2
TL
1549 /* Test if a multi vector command terminated with failure before continueing
1550 * splitting process when one of child I/O failed.
1551 * The multi vector command is as same as the above that needs to be split by strip
1552 * and then needs to be split further due to the capacity of child iovs.
1553 */
1554 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
1555 iov[i].iov_base = (void *)((i + 1) * 0x10000);
1556 iov[i].iov_len = 512;
1557 }
1558 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000);
1559 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
1560
1561 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000);
1562 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256;
1563
1564 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000);
1565 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512;
1566
1567 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
1568
1569 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
1570 g_io_done = false;
1571 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
1572
1573 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
1574 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
1575 CU_ASSERT(rc == 0);
1576 CU_ASSERT(g_io_done == false);
1577
1578 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1579 stub_complete_io(1);
1580 CU_ASSERT(g_io_done == true);
1581 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
1582
1583 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
1584
1585 /* for this test we will create the following conditions to hit the code path where
1586 * we are trying to send and IO following a split that has no iovs because we had to
1587 * trim them for alignment reasons.
1588 *
1589 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200
1590 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV
1591 * position 30 and overshoot by 0x2e.
1592 * - That means we'll send the IO and loop back to pick up the remaining bytes at
1593 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e
1594 * which eliniates that vector so we just send the first split IO with 30 vectors
1595 * and let the completion pick up the last 2 vectors.
1596 */
1597 bdev->optimal_io_boundary = 32;
1598 bdev->split_on_optimal_io_boundary = true;
1599 g_io_done = false;
1600
1601 /* Init all parent IOVs to 0x212 */
1602 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) {
1603 iov[i].iov_base = (void *)((i + 1) * 0x10000);
1604 iov[i].iov_len = 0x212;
1605 }
1606
1607 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV,
1608 BDEV_IO_NUM_CHILD_IOV - 1);
1609 /* expect 0-29 to be 1:1 with the parent iov */
1610 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) {
1611 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len);
1612 }
1613
1614 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment
1615 * where 0x1e is the amount we overshot the 16K boundary
1616 */
1617 ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2,
1618 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4);
1619 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1620
1621 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was
1622 * shortened that take it to the next boundary and then a final one to get us to
1623 * 0x4200 bytes for the IO.
1624 */
1625 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
1626 BDEV_IO_NUM_CHILD_IOV, 2);
1627 /* position 30 picked up the remaining bytes to the next boundary */
1628 ut_expected_io_set_iov(expected_io, 0,
1629 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e);
1630
1631 /* position 31 picked the the rest of the trasnfer to get us to 0x4200 */
1632 ut_expected_io_set_iov(expected_io, 1,
1633 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2);
1634 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1635
1636 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0,
1637 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL);
1638 CU_ASSERT(rc == 0);
1639 CU_ASSERT(g_io_done == false);
1640
1641 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1642 stub_complete_io(1);
1643 CU_ASSERT(g_io_done == false);
1644
1645 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1646 stub_complete_io(1);
1647 CU_ASSERT(g_io_done == true);
1648 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1649
11fdf7f2
TL
1650 spdk_put_io_channel(io_ch);
1651 spdk_bdev_close(desc);
1652 free_bdev(bdev);
1653 spdk_bdev_finish(bdev_fini_cb, NULL);
9f95a23c 1654 poll_threads();
11fdf7f2
TL
1655}
1656
1657static void
1658bdev_io_split_with_io_wait(void)
1659{
1660 struct spdk_bdev *bdev;
f67539c2 1661 struct spdk_bdev_desc *desc = NULL;
11fdf7f2
TL
1662 struct spdk_io_channel *io_ch;
1663 struct spdk_bdev_channel *channel;
1664 struct spdk_bdev_mgmt_channel *mgmt_ch;
1665 struct spdk_bdev_opts bdev_opts = {
1666 .bdev_io_pool_size = 2,
1667 .bdev_io_cache_size = 1,
1668 };
1669 struct iovec iov[3];
1670 struct ut_expected_io *expected_io;
1671 int rc;
1672
1673 rc = spdk_bdev_set_opts(&bdev_opts);
1674 CU_ASSERT(rc == 0);
1675 spdk_bdev_initialize(bdev_init_cb, NULL);
1676
1677 bdev = allocate_bdev("bdev0");
1678
1679 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1680 CU_ASSERT(rc == 0);
1681 CU_ASSERT(desc != NULL);
1682 io_ch = spdk_bdev_get_io_channel(desc);
1683 CU_ASSERT(io_ch != NULL);
1684 channel = spdk_io_channel_get_ctx(io_ch);
1685 mgmt_ch = channel->shared_resource->mgmt_ch;
1686
1687 bdev->optimal_io_boundary = 16;
1688 bdev->split_on_optimal_io_boundary = true;
1689
1690 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
1691 CU_ASSERT(rc == 0);
1692
1693 /* Now test that a single-vector command is split correctly.
1694 * Offset 14, length 8, payload 0xF000
1695 * Child - Offset 14, length 2, payload 0xF000
1696 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
1697 *
1698 * Set up the expected values before calling spdk_bdev_read_blocks
1699 */
1700 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
1701 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
1702 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1703
1704 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
1705 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
1706 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1707
1708 /* The following children will be submitted sequentially due to the capacity of
1709 * spdk_bdev_io.
1710 */
1711
1712 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */
1713 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
1714 CU_ASSERT(rc == 0);
1715 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1716 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1717
1718 /* Completing the first read I/O will submit the first child */
1719 stub_complete_io(1);
1720 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
1721 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1722
1723 /* Completing the first child will submit the second child */
1724 stub_complete_io(1);
1725 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1726
1727 /* Complete the second child I/O. This should result in our callback getting
1728 * invoked since the parent I/O is now complete.
1729 */
1730 stub_complete_io(1);
1731 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
1732
1733 /* Now set up a more complex, multi-vector command that needs to be split,
1734 * including splitting iovecs.
1735 */
1736 iov[0].iov_base = (void *)0x10000;
1737 iov[0].iov_len = 512;
1738 iov[1].iov_base = (void *)0x20000;
1739 iov[1].iov_len = 20 * 512;
1740 iov[2].iov_base = (void *)0x30000;
1741 iov[2].iov_len = 11 * 512;
1742
1743 g_io_done = false;
1744 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
1745 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
1746 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
1747 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1748
1749 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
1750 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
1751 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1752
1753 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
1754 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
1755 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
1756 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
1757
1758 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
1759 CU_ASSERT(rc == 0);
1760 CU_ASSERT(g_io_done == false);
1761
1762 /* The following children will be submitted sequentially due to the capacity of
1763 * spdk_bdev_io.
1764 */
1765
1766 /* Completing the first child will submit the second child */
1767 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1768 stub_complete_io(1);
1769 CU_ASSERT(g_io_done == false);
1770
1771 /* Completing the second child will submit the third child */
1772 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1773 stub_complete_io(1);
1774 CU_ASSERT(g_io_done == false);
1775
1776 /* Completing the third child will result in our callback getting invoked
1777 * since the parent I/O is now complete.
1778 */
1779 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
1780 stub_complete_io(1);
1781 CU_ASSERT(g_io_done == true);
1782
1783 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
1784
1785 spdk_put_io_channel(io_ch);
1786 spdk_bdev_close(desc);
1787 free_bdev(bdev);
1788 spdk_bdev_finish(bdev_fini_cb, NULL);
9f95a23c
TL
1789 poll_threads();
1790}
1791
1792static void
1793bdev_io_alignment(void)
1794{
1795 struct spdk_bdev *bdev;
f67539c2 1796 struct spdk_bdev_desc *desc = NULL;
9f95a23c
TL
1797 struct spdk_io_channel *io_ch;
1798 struct spdk_bdev_opts bdev_opts = {
1799 .bdev_io_pool_size = 20,
1800 .bdev_io_cache_size = 2,
1801 };
1802 int rc;
1803 void *buf;
1804 struct iovec iovs[2];
1805 int iovcnt;
1806 uint64_t alignment;
1807
1808 rc = spdk_bdev_set_opts(&bdev_opts);
1809 CU_ASSERT(rc == 0);
1810 spdk_bdev_initialize(bdev_init_cb, NULL);
1811
f67539c2 1812 fn_table.submit_request = stub_submit_request_get_buf;
9f95a23c
TL
1813 bdev = allocate_bdev("bdev0");
1814
1815 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
1816 CU_ASSERT(rc == 0);
1817 CU_ASSERT(desc != NULL);
1818 io_ch = spdk_bdev_get_io_channel(desc);
1819 CU_ASSERT(io_ch != NULL);
1820
1821 /* Create aligned buffer */
1822 rc = posix_memalign(&buf, 4096, 8192);
1823 SPDK_CU_ASSERT_FATAL(rc == 0);
1824
1825 /* Pass aligned single buffer with no alignment required */
1826 alignment = 1;
1827 bdev->required_alignment = spdk_u32log2(alignment);
1828
1829 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
1830 CU_ASSERT(rc == 0);
1831 stub_complete_io(1);
1832 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1833 alignment));
1834
1835 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL);
1836 CU_ASSERT(rc == 0);
1837 stub_complete_io(1);
1838 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1839 alignment));
1840
1841 /* Pass unaligned single buffer with no alignment required */
1842 alignment = 1;
1843 bdev->required_alignment = spdk_u32log2(alignment);
1844
1845 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1846 CU_ASSERT(rc == 0);
1847 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1848 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
1849 stub_complete_io(1);
1850
1851 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1852 CU_ASSERT(rc == 0);
1853 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1854 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
1855 stub_complete_io(1);
1856
1857 /* Pass unaligned single buffer with 512 alignment required */
1858 alignment = 512;
1859 bdev->required_alignment = spdk_u32log2(alignment);
1860
1861 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1862 CU_ASSERT(rc == 0);
1863 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1864 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1865 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1866 alignment));
1867 stub_complete_io(1);
1868 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1869
1870 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
1871 CU_ASSERT(rc == 0);
1872 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1873 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1874 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1875 alignment));
1876 stub_complete_io(1);
1877 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1878
1879 /* Pass unaligned single buffer with 4096 alignment required */
1880 alignment = 4096;
1881 bdev->required_alignment = spdk_u32log2(alignment);
1882
1883 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
1884 CU_ASSERT(rc == 0);
1885 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1886 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1887 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1888 alignment));
1889 stub_complete_io(1);
1890 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1891
1892 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
1893 CU_ASSERT(rc == 0);
1894 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
1895 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1896 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1897 alignment));
1898 stub_complete_io(1);
1899 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1900
1901 /* Pass aligned iovs with no alignment required */
1902 alignment = 1;
1903 bdev->required_alignment = spdk_u32log2(alignment);
1904
1905 iovcnt = 1;
1906 iovs[0].iov_base = buf;
1907 iovs[0].iov_len = 512;
1908
1909 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1910 CU_ASSERT(rc == 0);
1911 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1912 stub_complete_io(1);
1913 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1914
1915 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1916 CU_ASSERT(rc == 0);
1917 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1918 stub_complete_io(1);
1919 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1920
1921 /* Pass unaligned iovs with no alignment required */
1922 alignment = 1;
1923 bdev->required_alignment = spdk_u32log2(alignment);
1924
1925 iovcnt = 2;
1926 iovs[0].iov_base = buf + 16;
1927 iovs[0].iov_len = 256;
1928 iovs[1].iov_base = buf + 16 + 256 + 32;
1929 iovs[1].iov_len = 256;
1930
1931 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1932 CU_ASSERT(rc == 0);
1933 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1934 stub_complete_io(1);
1935 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1936
1937 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1938 CU_ASSERT(rc == 0);
1939 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1940 stub_complete_io(1);
1941 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);
1942
1943 /* Pass unaligned iov with 2048 alignment required */
1944 alignment = 2048;
1945 bdev->required_alignment = spdk_u32log2(alignment);
1946
1947 iovcnt = 2;
1948 iovs[0].iov_base = buf + 16;
1949 iovs[0].iov_len = 256;
1950 iovs[1].iov_base = buf + 16 + 256 + 32;
1951 iovs[1].iov_len = 256;
1952
1953 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1954 CU_ASSERT(rc == 0);
1955 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
1956 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1957 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1958 alignment));
1959 stub_complete_io(1);
1960 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1961
1962 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1963 CU_ASSERT(rc == 0);
1964 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
1965 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
1966 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1967 alignment));
1968 stub_complete_io(1);
1969 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1970
1971 /* Pass iov without allocated buffer without alignment required */
1972 alignment = 1;
1973 bdev->required_alignment = spdk_u32log2(alignment);
1974
1975 iovcnt = 1;
1976 iovs[0].iov_base = NULL;
1977 iovs[0].iov_len = 0;
1978
1979 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1980 CU_ASSERT(rc == 0);
1981 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1982 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1983 alignment));
1984 stub_complete_io(1);
1985
1986 /* Pass iov without allocated buffer with 1024 alignment required */
1987 alignment = 1024;
1988 bdev->required_alignment = spdk_u32log2(alignment);
1989
1990 iovcnt = 1;
1991 iovs[0].iov_base = NULL;
1992 iovs[0].iov_len = 0;
1993
1994 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
1995 CU_ASSERT(rc == 0);
1996 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
1997 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
1998 alignment));
1999 stub_complete_io(1);
2000
2001 spdk_put_io_channel(io_ch);
2002 spdk_bdev_close(desc);
2003 free_bdev(bdev);
f67539c2 2004 fn_table.submit_request = stub_submit_request;
9f95a23c
TL
2005 spdk_bdev_finish(bdev_fini_cb, NULL);
2006 poll_threads();
2007
2008 free(buf);
2009}
2010
2011static void
f67539c2 2012bdev_io_alignment_with_boundary(void)
9f95a23c
TL
2013{
2014 struct spdk_bdev *bdev;
f67539c2
TL
2015 struct spdk_bdev_desc *desc = NULL;
2016 struct spdk_io_channel *io_ch;
2017 struct spdk_bdev_opts bdev_opts = {
2018 .bdev_io_pool_size = 20,
2019 .bdev_io_cache_size = 2,
2020 };
9f95a23c 2021 int rc;
f67539c2
TL
2022 void *buf;
2023 struct iovec iovs[2];
2024 int iovcnt;
2025 uint64_t alignment;
9f95a23c 2026
f67539c2
TL
2027 rc = spdk_bdev_set_opts(&bdev_opts);
2028 CU_ASSERT(rc == 0);
9f95a23c
TL
2029 spdk_bdev_initialize(bdev_init_cb, NULL);
2030
f67539c2
TL
2031 fn_table.submit_request = stub_submit_request_get_buf;
2032 bdev = allocate_bdev("bdev0");
9f95a23c
TL
2033
2034 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
2035 CU_ASSERT(rc == 0);
2036 CU_ASSERT(desc != NULL);
f67539c2
TL
2037 io_ch = spdk_bdev_get_io_channel(desc);
2038 CU_ASSERT(io_ch != NULL);
9f95a23c 2039
f67539c2
TL
2040 /* Create aligned buffer */
2041 rc = posix_memalign(&buf, 4096, 131072);
2042 SPDK_CU_ASSERT_FATAL(rc == 0);
2043 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
9f95a23c 2044
f67539c2
TL
2045 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */
2046 alignment = 512;
2047 bdev->required_alignment = spdk_u32log2(alignment);
2048 bdev->optimal_io_boundary = 2;
2049 bdev->split_on_optimal_io_boundary = true;
9f95a23c 2050
f67539c2
TL
2051 iovcnt = 1;
2052 iovs[0].iov_base = NULL;
2053 iovs[0].iov_len = 512 * 3;
9f95a23c 2054
f67539c2
TL
2055 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
2056 CU_ASSERT(rc == 0);
2057 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
2058 stub_complete_io(2);
2059
2060 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */
2061 alignment = 512;
2062 bdev->required_alignment = spdk_u32log2(alignment);
2063 bdev->optimal_io_boundary = 16;
2064 bdev->split_on_optimal_io_boundary = true;
2065
2066 iovcnt = 1;
2067 iovs[0].iov_base = NULL;
2068 iovs[0].iov_len = 512 * 16;
2069
2070 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL);
2071 CU_ASSERT(rc == 0);
2072 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
2073 stub_complete_io(2);
2074
2075 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */
2076 alignment = 512;
2077 bdev->required_alignment = spdk_u32log2(alignment);
2078 bdev->optimal_io_boundary = 128;
2079 bdev->split_on_optimal_io_boundary = true;
2080
2081 iovcnt = 1;
2082 iovs[0].iov_base = buf + 16;
2083 iovs[0].iov_len = 512 * 160;
2084 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL);
2085 CU_ASSERT(rc == 0);
2086 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
2087 stub_complete_io(2);
2088
2089 /* 512 * 3 with 2 IO boundary */
2090 alignment = 512;
2091 bdev->required_alignment = spdk_u32log2(alignment);
2092 bdev->optimal_io_boundary = 2;
2093 bdev->split_on_optimal_io_boundary = true;
2094
2095 iovcnt = 2;
2096 iovs[0].iov_base = buf + 16;
2097 iovs[0].iov_len = 512;
2098 iovs[1].iov_base = buf + 16 + 512 + 32;
2099 iovs[1].iov_len = 1024;
2100
2101 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
2102 CU_ASSERT(rc == 0);
2103 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
2104 stub_complete_io(2);
2105
2106 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL);
2107 CU_ASSERT(rc == 0);
2108 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
2109 stub_complete_io(2);
2110
2111 /* 512 * 64 with 32 IO boundary */
2112 bdev->optimal_io_boundary = 32;
2113 iovcnt = 2;
2114 iovs[0].iov_base = buf + 16;
2115 iovs[0].iov_len = 16384;
2116 iovs[1].iov_base = buf + 16 + 16384 + 32;
2117 iovs[1].iov_len = 16384;
2118
2119 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL);
2120 CU_ASSERT(rc == 0);
2121 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
2122 stub_complete_io(3);
2123
2124 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL);
2125 CU_ASSERT(rc == 0);
2126 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
2127 stub_complete_io(3);
2128
2129 /* 512 * 160 with 32 IO boundary */
2130 iovcnt = 1;
2131 iovs[0].iov_base = buf + 16;
2132 iovs[0].iov_len = 16384 + 65536;
2133
2134 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL);
2135 CU_ASSERT(rc == 0);
2136 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6);
2137 stub_complete_io(6);
2138
2139 spdk_put_io_channel(io_ch);
2140 spdk_bdev_close(desc);
2141 free_bdev(bdev);
2142 fn_table.submit_request = stub_submit_request;
2143 spdk_bdev_finish(bdev_fini_cb, NULL);
2144 poll_threads();
2145
2146 free(buf);
2147}
2148
2149static void
2150histogram_status_cb(void *cb_arg, int status)
2151{
2152 g_status = status;
2153}
2154
2155static void
2156histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
2157{
2158 g_status = status;
2159 g_histogram = histogram;
2160}
2161
2162static void
2163histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
2164 uint64_t total, uint64_t so_far)
2165{
2166 g_count += count;
2167}
2168
2169static void
2170bdev_histograms(void)
2171{
2172 struct spdk_bdev *bdev;
2173 struct spdk_bdev_desc *desc = NULL;
2174 struct spdk_io_channel *ch;
2175 struct spdk_histogram_data *histogram;
2176 uint8_t buf[4096];
2177 int rc;
2178
2179 spdk_bdev_initialize(bdev_init_cb, NULL);
2180
2181 bdev = allocate_bdev("bdev");
2182
2183 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
2184 CU_ASSERT(rc == 0);
2185 CU_ASSERT(desc != NULL);
2186
2187 ch = spdk_bdev_get_io_channel(desc);
2188 CU_ASSERT(ch != NULL);
2189
2190 /* Enable histogram */
2191 g_status = -1;
2192 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true);
2193 poll_threads();
2194 CU_ASSERT(g_status == 0);
2195 CU_ASSERT(bdev->internal.histogram_enabled == true);
2196
2197 /* Allocate histogram */
2198 histogram = spdk_histogram_data_alloc();
2199 SPDK_CU_ASSERT_FATAL(histogram != NULL);
2200
2201 /* Check if histogram is zeroed */
2202 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
2203 poll_threads();
2204 CU_ASSERT(g_status == 0);
2205 SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
2206
2207 g_count = 0;
2208 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
2209
2210 CU_ASSERT(g_count == 0);
2211
2212 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL);
9f95a23c
TL
2213 CU_ASSERT(rc == 0);
2214
2215 spdk_delay_us(10);
2216 stub_complete_io(1);
2217 poll_threads();
2218
f67539c2 2219 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL);
9f95a23c
TL
2220 CU_ASSERT(rc == 0);
2221
2222 spdk_delay_us(10);
2223 stub_complete_io(1);
2224 poll_threads();
2225
2226 /* Check if histogram gathered data from all I/O channels */
2227 g_histogram = NULL;
2228 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
2229 poll_threads();
2230 CU_ASSERT(g_status == 0);
2231 CU_ASSERT(bdev->internal.histogram_enabled == true);
2232 SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
2233
2234 g_count = 0;
2235 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
2236 CU_ASSERT(g_count == 2);
2237
2238 /* Disable histogram */
2239 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false);
2240 poll_threads();
2241 CU_ASSERT(g_status == 0);
2242 CU_ASSERT(bdev->internal.histogram_enabled == false);
2243
2244 /* Try to run histogram commands on disabled bdev */
2245 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL);
2246 poll_threads();
2247 CU_ASSERT(g_status == -EFAULT);
2248
f67539c2 2249 spdk_histogram_data_free(histogram);
9f95a23c
TL
2250 spdk_put_io_channel(ch);
2251 spdk_bdev_close(desc);
2252 free_bdev(bdev);
2253 spdk_bdev_finish(bdev_fini_cb, NULL);
2254 poll_threads();
11fdf7f2
TL
2255}
2256
f67539c2
TL
2257static void
2258_bdev_compare(bool emulated)
11fdf7f2 2259{
f67539c2
TL
2260 struct spdk_bdev *bdev;
2261 struct spdk_bdev_desc *desc = NULL;
2262 struct spdk_io_channel *ioch;
2263 struct ut_expected_io *expected_io;
2264 uint64_t offset, num_blocks;
2265 uint32_t num_completed;
2266 char aa_buf[512];
2267 char bb_buf[512];
2268 struct iovec compare_iov;
2269 uint8_t io_type;
2270 int rc;
11fdf7f2 2271
f67539c2
TL
2272 if (emulated) {
2273 io_type = SPDK_BDEV_IO_TYPE_READ;
2274 } else {
2275 io_type = SPDK_BDEV_IO_TYPE_COMPARE;
11fdf7f2
TL
2276 }
2277
f67539c2
TL
2278 memset(aa_buf, 0xaa, sizeof(aa_buf));
2279 memset(bb_buf, 0xbb, sizeof(bb_buf));
2280
2281 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated;
2282
2283 spdk_bdev_initialize(bdev_init_cb, NULL);
2284 fn_table.submit_request = stub_submit_request_get_buf;
2285 bdev = allocate_bdev("bdev");
2286
2287 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
2288 CU_ASSERT_EQUAL(rc, 0);
2289 SPDK_CU_ASSERT_FATAL(desc != NULL);
2290 ioch = spdk_bdev_get_io_channel(desc);
2291 SPDK_CU_ASSERT_FATAL(ioch != NULL);
2292
2293 fn_table.submit_request = stub_submit_request_get_buf;
2294 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
2295
2296 offset = 50;
2297 num_blocks = 1;
2298 compare_iov.iov_base = aa_buf;
2299 compare_iov.iov_len = sizeof(aa_buf);
2300
2301 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0);
2302 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2303
2304 g_io_done = false;
2305 g_compare_read_buf = aa_buf;
2306 g_compare_read_buf_len = sizeof(aa_buf);
2307 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL);
2308 CU_ASSERT_EQUAL(rc, 0);
2309 num_completed = stub_complete_io(1);
2310 CU_ASSERT_EQUAL(num_completed, 1);
2311 CU_ASSERT(g_io_done == true);
2312 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
2313
2314 expected_io = ut_alloc_expected_io(io_type, offset, num_blocks, 0);
2315 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2316
2317 g_io_done = false;
2318 g_compare_read_buf = bb_buf;
2319 g_compare_read_buf_len = sizeof(bb_buf);
2320 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL);
2321 CU_ASSERT_EQUAL(rc, 0);
2322 num_completed = stub_complete_io(1);
2323 CU_ASSERT_EQUAL(num_completed, 1);
2324 CU_ASSERT(g_io_done == true);
2325 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
2326
2327 spdk_put_io_channel(ioch);
2328 spdk_bdev_close(desc);
2329 free_bdev(bdev);
2330 fn_table.submit_request = stub_submit_request;
2331 spdk_bdev_finish(bdev_fini_cb, NULL);
2332 poll_threads();
2333
2334 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true;
2335
2336 g_compare_read_buf = NULL;
2337}
2338
2339static void
2340bdev_compare(void)
2341{
2342 _bdev_compare(true);
2343 _bdev_compare(false);
2344}
2345
2346static void
2347bdev_compare_and_write(void)
2348{
2349 struct spdk_bdev *bdev;
2350 struct spdk_bdev_desc *desc = NULL;
2351 struct spdk_io_channel *ioch;
2352 struct ut_expected_io *expected_io;
2353 uint64_t offset, num_blocks;
2354 uint32_t num_completed;
2355 char aa_buf[512];
2356 char bb_buf[512];
2357 char cc_buf[512];
2358 char write_buf[512];
2359 struct iovec compare_iov;
2360 struct iovec write_iov;
2361 int rc;
2362
2363 memset(aa_buf, 0xaa, sizeof(aa_buf));
2364 memset(bb_buf, 0xbb, sizeof(bb_buf));
2365 memset(cc_buf, 0xcc, sizeof(cc_buf));
2366
2367 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false;
2368
2369 spdk_bdev_initialize(bdev_init_cb, NULL);
2370 fn_table.submit_request = stub_submit_request_get_buf;
2371 bdev = allocate_bdev("bdev");
2372
2373 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
2374 CU_ASSERT_EQUAL(rc, 0);
2375 SPDK_CU_ASSERT_FATAL(desc != NULL);
2376 ioch = spdk_bdev_get_io_channel(desc);
2377 SPDK_CU_ASSERT_FATAL(ioch != NULL);
2378
2379 fn_table.submit_request = stub_submit_request_get_buf;
2380 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
2381
2382 offset = 50;
2383 num_blocks = 1;
2384 compare_iov.iov_base = aa_buf;
2385 compare_iov.iov_len = sizeof(aa_buf);
2386 write_iov.iov_base = bb_buf;
2387 write_iov.iov_len = sizeof(bb_buf);
2388
2389 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0);
2390 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2391 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0);
2392 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2393
2394 g_io_done = false;
2395 g_compare_read_buf = aa_buf;
2396 g_compare_read_buf_len = sizeof(aa_buf);
2397 memset(write_buf, 0, sizeof(write_buf));
2398 g_compare_write_buf = write_buf;
2399 g_compare_write_buf_len = sizeof(write_buf);
2400 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1,
2401 offset, num_blocks, io_done, NULL);
2402 /* Trigger range locking */
2403 poll_threads();
2404 CU_ASSERT_EQUAL(rc, 0);
2405 num_completed = stub_complete_io(1);
2406 CU_ASSERT_EQUAL(num_completed, 1);
2407 CU_ASSERT(g_io_done == false);
2408 num_completed = stub_complete_io(1);
2409 /* Trigger range unlocking */
2410 poll_threads();
2411 CU_ASSERT_EQUAL(num_completed, 1);
2412 CU_ASSERT(g_io_done == true);
2413 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
2414 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0);
2415
2416 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0);
2417 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2418
2419 g_io_done = false;
2420 g_compare_read_buf = cc_buf;
2421 g_compare_read_buf_len = sizeof(cc_buf);
2422 memset(write_buf, 0, sizeof(write_buf));
2423 g_compare_write_buf = write_buf;
2424 g_compare_write_buf_len = sizeof(write_buf);
2425 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1,
2426 offset, num_blocks, io_done, NULL);
2427 /* Trigger range locking */
2428 poll_threads();
2429 CU_ASSERT_EQUAL(rc, 0);
2430 num_completed = stub_complete_io(1);
2431 /* Trigger range unlocking earlier because we expect error here */
2432 poll_threads();
2433 CU_ASSERT_EQUAL(num_completed, 1);
2434 CU_ASSERT(g_io_done == true);
2435 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE);
2436 num_completed = stub_complete_io(1);
2437 CU_ASSERT_EQUAL(num_completed, 0);
2438
2439 spdk_put_io_channel(ioch);
2440 spdk_bdev_close(desc);
2441 free_bdev(bdev);
2442 fn_table.submit_request = stub_submit_request;
2443 spdk_bdev_finish(bdev_fini_cb, NULL);
2444 poll_threads();
2445
2446 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true;
2447
2448 g_compare_read_buf = NULL;
2449 g_compare_write_buf = NULL;
2450}
2451
2452static void
2453bdev_write_zeroes(void)
2454{
2455 struct spdk_bdev *bdev;
2456 struct spdk_bdev_desc *desc = NULL;
2457 struct spdk_io_channel *ioch;
2458 struct ut_expected_io *expected_io;
2459 uint64_t offset, num_io_blocks, num_blocks;
2460 uint32_t num_completed, num_requests;
2461 int rc;
2462
2463 spdk_bdev_initialize(bdev_init_cb, NULL);
2464 bdev = allocate_bdev("bdev");
2465
2466 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
2467 CU_ASSERT_EQUAL(rc, 0);
2468 SPDK_CU_ASSERT_FATAL(desc != NULL);
2469 ioch = spdk_bdev_get_io_channel(desc);
2470 SPDK_CU_ASSERT_FATAL(ioch != NULL);
2471
2472 fn_table.submit_request = stub_submit_request;
2473 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
2474
2475 /* First test that if the bdev supports write_zeroes, the request won't be split */
2476 bdev->md_len = 0;
2477 bdev->blocklen = 4096;
2478 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2;
2479
2480 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0);
2481 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2482 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
2483 CU_ASSERT_EQUAL(rc, 0);
2484 num_completed = stub_complete_io(1);
2485 CU_ASSERT_EQUAL(num_completed, 1);
2486
2487 /* Check that if write zeroes is not supported it'll be replaced by regular writes */
2488 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false);
2489 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen;
2490 num_requests = 2;
2491 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests;
2492
2493 for (offset = 0; offset < num_requests; ++offset) {
2494 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
2495 offset * num_io_blocks, num_io_blocks, 0);
2496 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2497 }
2498
2499 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
2500 CU_ASSERT_EQUAL(rc, 0);
2501 num_completed = stub_complete_io(num_requests);
2502 CU_ASSERT_EQUAL(num_completed, num_requests);
2503
2504 /* Check that the splitting is correct if bdev has interleaved metadata */
2505 bdev->md_interleave = true;
2506 bdev->md_len = 64;
2507 bdev->blocklen = 4096 + 64;
2508 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2;
2509
2510 num_requests = offset = 0;
2511 while (offset < num_blocks) {
2512 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset);
2513 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
2514 offset, num_io_blocks, 0);
2515 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2516 offset += num_io_blocks;
2517 num_requests++;
11fdf7f2
TL
2518 }
2519
f67539c2
TL
2520 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
2521 CU_ASSERT_EQUAL(rc, 0);
2522 num_completed = stub_complete_io(num_requests);
2523 CU_ASSERT_EQUAL(num_completed, num_requests);
2524 num_completed = stub_complete_io(num_requests);
2525 assert(num_completed == 0);
2526
2527 /* Check the the same for separate metadata buffer */
2528 bdev->md_interleave = false;
2529 bdev->md_len = 64;
2530 bdev->blocklen = 4096;
2531
2532 num_requests = offset = 0;
2533 while (offset < num_blocks) {
2534 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks);
2535 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE,
2536 offset, num_io_blocks, 0);
2537 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen;
2538 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
2539 offset += num_io_blocks;
2540 num_requests++;
11fdf7f2
TL
2541 }
2542
f67539c2
TL
2543 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL);
2544 CU_ASSERT_EQUAL(rc, 0);
2545 num_completed = stub_complete_io(num_requests);
2546 CU_ASSERT_EQUAL(num_completed, num_requests);
2547
2548 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true);
2549 spdk_put_io_channel(ioch);
2550 spdk_bdev_close(desc);
2551 free_bdev(bdev);
2552 spdk_bdev_finish(bdev_fini_cb, NULL);
2553 poll_threads();
2554}
2555
2556static void
2557bdev_open_while_hotremove(void)
2558{
2559 struct spdk_bdev *bdev;
2560 struct spdk_bdev_desc *desc[2] = {};
2561 int rc;
2562
2563 bdev = allocate_bdev("bdev");
2564
2565 rc = spdk_bdev_open(bdev, false, NULL, NULL, &desc[0]);
2566 CU_ASSERT(rc == 0);
2567 SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
2568
2569 spdk_bdev_unregister(bdev, NULL, NULL);
2570
2571 rc = spdk_bdev_open(bdev, false, NULL, NULL, &desc[1]);
2572 CU_ASSERT(rc == -ENODEV);
2573 SPDK_CU_ASSERT_FATAL(desc[1] == NULL);
2574
2575 spdk_bdev_close(desc[0]);
2576 free_bdev(bdev);
2577}
2578
2579static void
2580bdev_close_while_hotremove(void)
2581{
2582 struct spdk_bdev *bdev;
2583 struct spdk_bdev_desc *desc = NULL;
2584 int rc = 0;
2585
2586 bdev = allocate_bdev("bdev");
2587
2588 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc);
2589 CU_ASSERT_EQUAL(rc, 0);
2590
2591 /* Simulate hot-unplug by unregistering bdev */
2592 g_event_type1 = 0xFF;
2593 g_unregister_arg = NULL;
2594 g_unregister_rc = -1;
2595 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678);
2596 /* Close device while remove event is in flight */
2597 spdk_bdev_close(desc);
2598
2599 /* Ensure that unregister callback is delayed */
2600 CU_ASSERT_EQUAL(g_unregister_arg, NULL);
2601 CU_ASSERT_EQUAL(g_unregister_rc, -1);
2602
2603 poll_threads();
2604
2605 /* Event callback shall not be issued because device was closed */
2606 CU_ASSERT_EQUAL(g_event_type1, 0xFF);
2607 /* Unregister callback is issued */
2608 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678);
2609 CU_ASSERT_EQUAL(g_unregister_rc, 0);
2610
2611 free_bdev(bdev);
2612}
2613
2614static void
2615bdev_open_ext(void)
2616{
2617 struct spdk_bdev *bdev;
2618 struct spdk_bdev_desc *desc1 = NULL;
2619 struct spdk_bdev_desc *desc2 = NULL;
2620 int rc = 0;
2621
2622 bdev = allocate_bdev("bdev");
2623
2624 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1);
2625 CU_ASSERT_EQUAL(rc, -EINVAL);
2626
2627 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1);
2628 CU_ASSERT_EQUAL(rc, 0);
2629
2630 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2);
2631 CU_ASSERT_EQUAL(rc, 0);
2632
2633 g_event_type1 = 0xFF;
2634 g_event_type2 = 0xFF;
2635
2636 /* Simulate hot-unplug by unregistering bdev */
2637 spdk_bdev_unregister(bdev, NULL, NULL);
2638 poll_threads();
2639
2640 /* Check if correct events have been triggered in event callback fn */
2641 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE);
2642 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE);
2643
2644 free_bdev(bdev);
2645 poll_threads();
2646}
2647
2648struct timeout_io_cb_arg {
2649 struct iovec iov;
2650 uint8_t type;
2651};
2652
2653static int
2654bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch)
2655{
2656 struct spdk_bdev_io *bdev_io;
2657 int n = 0;
2658
2659 if (!ch) {
2660 return -1;
2661 }
2662
2663 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) {
2664 n++;
2665 }
2666
2667 return n;
2668}
2669
2670static void
2671bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io)
2672{
2673 struct timeout_io_cb_arg *ctx = cb_arg;
2674
2675 ctx->type = bdev_io->type;
2676 ctx->iov.iov_base = bdev_io->iov.iov_base;
2677 ctx->iov.iov_len = bdev_io->iov.iov_len;
2678}
2679
2680static void
2681bdev_set_io_timeout(void)
2682{
2683 struct spdk_bdev *bdev;
2684 struct spdk_bdev_desc *desc = NULL;
2685 struct spdk_io_channel *io_ch = NULL;
2686 struct spdk_bdev_channel *bdev_ch = NULL;
2687 struct timeout_io_cb_arg cb_arg;
2688
2689 spdk_bdev_initialize(bdev_init_cb, NULL);
2690
2691 bdev = allocate_bdev("bdev");
2692
2693 CU_ASSERT(spdk_bdev_open(bdev, true, NULL, NULL, &desc) == 0);
2694 SPDK_CU_ASSERT_FATAL(desc != NULL);
2695 io_ch = spdk_bdev_get_io_channel(desc);
2696 CU_ASSERT(io_ch != NULL);
2697
2698 bdev_ch = spdk_io_channel_get_ctx(io_ch);
2699 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted));
2700
2701 /* This is the part1.
2702 * We will check the bdev_ch->io_submitted list
2703 * TO make sure that it can link IOs and only the user submitted IOs
2704 */
2705 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0);
2706 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
2707 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0);
2708 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2);
2709 stub_complete_io(1);
2710 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
2711 stub_complete_io(1);
2712 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
2713
2714 /* Split IO */
2715 bdev->optimal_io_boundary = 16;
2716 bdev->split_on_optimal_io_boundary = true;
2717
2718 /* Now test that a single-vector command is split correctly.
2719 * Offset 14, length 8, payload 0xF000
2720 * Child - Offset 14, length 2, payload 0xF000
2721 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
2722 *
2723 * Set up the expected values before calling spdk_bdev_read_blocks
2724 */
2725 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0);
2726 /* We count all submitted IOs including IO that are generated by splitting. */
2727 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3);
2728 stub_complete_io(1);
2729 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2);
2730 stub_complete_io(1);
2731 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
2732
2733 /* Also include the reset IO */
2734 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0);
2735 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1);
2736 poll_threads();
2737 stub_complete_io(1);
2738 poll_threads();
2739 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0);
2740
2741 /* This is part2
2742 * Test the desc timeout poller register
2743 */
2744
2745 /* Successfully set the timeout */
2746 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0);
2747 CU_ASSERT(desc->io_timeout_poller != NULL);
2748 CU_ASSERT(desc->timeout_in_sec == 30);
2749 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb);
2750 CU_ASSERT(desc->cb_arg == &cb_arg);
2751
2752 /* Change the timeout limit */
2753 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0);
2754 CU_ASSERT(desc->io_timeout_poller != NULL);
2755 CU_ASSERT(desc->timeout_in_sec == 20);
2756 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb);
2757 CU_ASSERT(desc->cb_arg == &cb_arg);
2758
2759 /* Disable the timeout */
2760 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0);
2761 CU_ASSERT(desc->io_timeout_poller == NULL);
2762
2763 /* This the part3
2764 * We will test to catch timeout IO and check whether the IO is
2765 * the submitted one.
2766 */
2767 memset(&cb_arg, 0, sizeof(cb_arg));
2768 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0);
2769 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0);
2770
2771 /* Don't reach the limit */
2772 spdk_delay_us(15 * spdk_get_ticks_hz());
2773 poll_threads();
2774 CU_ASSERT(cb_arg.type == 0);
2775 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
2776 CU_ASSERT(cb_arg.iov.iov_len == 0);
2777
2778 /* 15 + 15 = 30 reach the limit */
2779 spdk_delay_us(15 * spdk_get_ticks_hz());
2780 poll_threads();
2781 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
2782 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000);
2783 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen);
2784 stub_complete_io(1);
2785
2786 /* Use the same split IO above and check the IO */
2787 memset(&cb_arg, 0, sizeof(cb_arg));
2788 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0);
2789
2790 /* The first child complete in time */
2791 spdk_delay_us(15 * spdk_get_ticks_hz());
2792 poll_threads();
2793 stub_complete_io(1);
2794 CU_ASSERT(cb_arg.type == 0);
2795 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
2796 CU_ASSERT(cb_arg.iov.iov_len == 0);
2797
2798 /* The second child reach the limit */
2799 spdk_delay_us(15 * spdk_get_ticks_hz());
2800 poll_threads();
2801 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
2802 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000);
2803 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen);
2804 stub_complete_io(1);
2805
2806 /* Also include the reset IO */
2807 memset(&cb_arg, 0, sizeof(cb_arg));
2808 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0);
2809 spdk_delay_us(30 * spdk_get_ticks_hz());
2810 poll_threads();
2811 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET);
2812 stub_complete_io(1);
2813 poll_threads();
2814
2815 spdk_put_io_channel(io_ch);
2816 spdk_bdev_close(desc);
2817 free_bdev(bdev);
2818 spdk_bdev_finish(bdev_fini_cb, NULL);
2819 poll_threads();
2820}
2821
2822static void
2823lba_range_overlap(void)
2824{
2825 struct lba_range r1, r2;
2826
2827 r1.offset = 100;
2828 r1.length = 50;
2829
2830 r2.offset = 0;
2831 r2.length = 1;
2832 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
2833
2834 r2.offset = 0;
2835 r2.length = 100;
2836 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
2837
2838 r2.offset = 0;
2839 r2.length = 110;
2840 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
2841
2842 r2.offset = 100;
2843 r2.length = 10;
2844 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
2845
2846 r2.offset = 110;
2847 r2.length = 20;
2848 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
2849
2850 r2.offset = 140;
2851 r2.length = 150;
2852 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
2853
2854 r2.offset = 130;
2855 r2.length = 200;
2856 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2));
2857
2858 r2.offset = 150;
2859 r2.length = 100;
2860 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
2861
2862 r2.offset = 110;
2863 r2.length = 0;
2864 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2));
2865}
2866
2867static bool g_lock_lba_range_done;
2868static bool g_unlock_lba_range_done;
2869
2870static void
2871lock_lba_range_done(void *ctx, int status)
2872{
2873 g_lock_lba_range_done = true;
2874}
2875
2876static void
2877unlock_lba_range_done(void *ctx, int status)
2878{
2879 g_unlock_lba_range_done = true;
2880}
2881
2882static void
2883lock_lba_range_check_ranges(void)
2884{
2885 struct spdk_bdev *bdev;
2886 struct spdk_bdev_desc *desc = NULL;
2887 struct spdk_io_channel *io_ch;
2888 struct spdk_bdev_channel *channel;
2889 struct lba_range *range;
2890 int ctx1;
2891 int rc;
2892
2893 spdk_bdev_initialize(bdev_init_cb, NULL);
2894
2895 bdev = allocate_bdev("bdev0");
2896
2897 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
2898 CU_ASSERT(rc == 0);
2899 CU_ASSERT(desc != NULL);
2900 io_ch = spdk_bdev_get_io_channel(desc);
2901 CU_ASSERT(io_ch != NULL);
2902 channel = spdk_io_channel_get_ctx(io_ch);
2903
2904 g_lock_lba_range_done = false;
2905 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
2906 CU_ASSERT(rc == 0);
2907 poll_threads();
2908
2909 CU_ASSERT(g_lock_lba_range_done == true);
2910 range = TAILQ_FIRST(&channel->locked_ranges);
2911 SPDK_CU_ASSERT_FATAL(range != NULL);
2912 CU_ASSERT(range->offset == 20);
2913 CU_ASSERT(range->length == 10);
2914 CU_ASSERT(range->owner_ch == channel);
2915
2916 /* Unlocks must exactly match a lock. */
2917 g_unlock_lba_range_done = false;
2918 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1);
2919 CU_ASSERT(rc == -EINVAL);
2920 CU_ASSERT(g_unlock_lba_range_done == false);
2921
2922 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1);
2923 CU_ASSERT(rc == 0);
2924 spdk_delay_us(100);
2925 poll_threads();
2926
2927 CU_ASSERT(g_unlock_lba_range_done == true);
2928 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
2929
2930 spdk_put_io_channel(io_ch);
2931 spdk_bdev_close(desc);
2932 free_bdev(bdev);
2933 spdk_bdev_finish(bdev_fini_cb, NULL);
2934 poll_threads();
2935}
2936
2937static void
2938lock_lba_range_with_io_outstanding(void)
2939{
2940 struct spdk_bdev *bdev;
2941 struct spdk_bdev_desc *desc = NULL;
2942 struct spdk_io_channel *io_ch;
2943 struct spdk_bdev_channel *channel;
2944 struct lba_range *range;
2945 char buf[4096];
2946 int ctx1;
2947 int rc;
2948
2949 spdk_bdev_initialize(bdev_init_cb, NULL);
2950
2951 bdev = allocate_bdev("bdev0");
2952
2953 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
2954 CU_ASSERT(rc == 0);
2955 CU_ASSERT(desc != NULL);
2956 io_ch = spdk_bdev_get_io_channel(desc);
2957 CU_ASSERT(io_ch != NULL);
2958 channel = spdk_io_channel_get_ctx(io_ch);
2959
2960 g_io_done = false;
2961 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1);
2962 CU_ASSERT(rc == 0);
2963
2964 g_lock_lba_range_done = false;
2965 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
2966 CU_ASSERT(rc == 0);
2967 poll_threads();
2968
2969 /* The lock should immediately become valid, since there are no outstanding
2970 * write I/O.
2971 */
2972 CU_ASSERT(g_io_done == false);
2973 CU_ASSERT(g_lock_lba_range_done == true);
2974 range = TAILQ_FIRST(&channel->locked_ranges);
2975 SPDK_CU_ASSERT_FATAL(range != NULL);
2976 CU_ASSERT(range->offset == 20);
2977 CU_ASSERT(range->length == 10);
2978 CU_ASSERT(range->owner_ch == channel);
2979 CU_ASSERT(range->locked_ctx == &ctx1);
2980
2981 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
2982 CU_ASSERT(rc == 0);
2983 stub_complete_io(1);
2984 spdk_delay_us(100);
2985 poll_threads();
2986
2987 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
2988
2989 /* Now try again, but with a write I/O. */
2990 g_io_done = false;
2991 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1);
2992 CU_ASSERT(rc == 0);
2993
2994 g_lock_lba_range_done = false;
2995 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
2996 CU_ASSERT(rc == 0);
2997 poll_threads();
2998
2999 /* The lock should not be fully valid yet, since a write I/O is outstanding.
3000 * But note that the range should be on the channel's locked_list, to make sure no
3001 * new write I/O are started.
3002 */
3003 CU_ASSERT(g_io_done == false);
3004 CU_ASSERT(g_lock_lba_range_done == false);
3005 range = TAILQ_FIRST(&channel->locked_ranges);
3006 SPDK_CU_ASSERT_FATAL(range != NULL);
3007 CU_ASSERT(range->offset == 20);
3008 CU_ASSERT(range->length == 10);
3009
3010 /* Complete the write I/O. This should make the lock valid (checked by confirming
3011 * our callback was invoked).
3012 */
3013 stub_complete_io(1);
3014 spdk_delay_us(100);
3015 poll_threads();
3016 CU_ASSERT(g_io_done == true);
3017 CU_ASSERT(g_lock_lba_range_done == true);
3018
3019 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1);
3020 CU_ASSERT(rc == 0);
3021 poll_threads();
3022
3023 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges));
3024
3025 spdk_put_io_channel(io_ch);
3026 spdk_bdev_close(desc);
3027 free_bdev(bdev);
3028 spdk_bdev_finish(bdev_fini_cb, NULL);
3029 poll_threads();
3030}
3031
3032static void
3033lock_lba_range_overlapped(void)
3034{
3035 struct spdk_bdev *bdev;
3036 struct spdk_bdev_desc *desc = NULL;
3037 struct spdk_io_channel *io_ch;
3038 struct spdk_bdev_channel *channel;
3039 struct lba_range *range;
3040 int ctx1;
3041 int rc;
3042
3043 spdk_bdev_initialize(bdev_init_cb, NULL);
3044
3045 bdev = allocate_bdev("bdev0");
3046
3047 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
3048 CU_ASSERT(rc == 0);
3049 CU_ASSERT(desc != NULL);
3050 io_ch = spdk_bdev_get_io_channel(desc);
3051 CU_ASSERT(io_ch != NULL);
3052 channel = spdk_io_channel_get_ctx(io_ch);
3053
3054 /* Lock range 20-29. */
3055 g_lock_lba_range_done = false;
3056 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1);
3057 CU_ASSERT(rc == 0);
3058 poll_threads();
3059
3060 CU_ASSERT(g_lock_lba_range_done == true);
3061 range = TAILQ_FIRST(&channel->locked_ranges);
3062 SPDK_CU_ASSERT_FATAL(range != NULL);
3063 CU_ASSERT(range->offset == 20);
3064 CU_ASSERT(range->length == 10);
3065
3066 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with
3067 * 20-29.
3068 */
3069 g_lock_lba_range_done = false;
3070 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1);
3071 CU_ASSERT(rc == 0);
3072 poll_threads();
3073
3074 CU_ASSERT(g_lock_lba_range_done == false);
3075 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges);
3076 SPDK_CU_ASSERT_FATAL(range != NULL);
3077 CU_ASSERT(range->offset == 25);
3078 CU_ASSERT(range->length == 15);
3079
3080 /* Unlock 20-29. This should result in range 25-39 now getting locked since it
3081 * no longer overlaps with an active lock.
3082 */
3083 g_unlock_lba_range_done = false;
3084 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1);
3085 CU_ASSERT(rc == 0);
3086 poll_threads();
3087
3088 CU_ASSERT(g_unlock_lba_range_done == true);
3089 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges));
3090 range = TAILQ_FIRST(&channel->locked_ranges);
3091 SPDK_CU_ASSERT_FATAL(range != NULL);
3092 CU_ASSERT(range->offset == 25);
3093 CU_ASSERT(range->length == 15);
3094
3095 /* Lock 40-59. This should immediately lock since it does not overlap with the
3096 * currently active 25-39 lock.
3097 */
3098 g_lock_lba_range_done = false;
3099 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1);
3100 CU_ASSERT(rc == 0);
3101 poll_threads();
3102
3103 CU_ASSERT(g_lock_lba_range_done == true);
3104 range = TAILQ_FIRST(&bdev->internal.locked_ranges);
3105 SPDK_CU_ASSERT_FATAL(range != NULL);
3106 range = TAILQ_NEXT(range, tailq);
3107 SPDK_CU_ASSERT_FATAL(range != NULL);
3108 CU_ASSERT(range->offset == 40);
3109 CU_ASSERT(range->length == 20);
3110
3111 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */
3112 g_lock_lba_range_done = false;
3113 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1);
3114 CU_ASSERT(rc == 0);
3115 poll_threads();
3116
3117 CU_ASSERT(g_lock_lba_range_done == false);
3118 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges);
3119 SPDK_CU_ASSERT_FATAL(range != NULL);
3120 CU_ASSERT(range->offset == 35);
3121 CU_ASSERT(range->length == 10);
3122
3123 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since
3124 * the 40-59 lock is still active.
3125 */
3126 g_unlock_lba_range_done = false;
3127 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1);
3128 CU_ASSERT(rc == 0);
3129 poll_threads();
3130
3131 CU_ASSERT(g_unlock_lba_range_done == true);
3132 CU_ASSERT(g_lock_lba_range_done == false);
3133 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges);
3134 SPDK_CU_ASSERT_FATAL(range != NULL);
3135 CU_ASSERT(range->offset == 35);
3136 CU_ASSERT(range->length == 10);
3137
3138 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are
3139 * no longer any active overlapping locks.
3140 */
3141 g_unlock_lba_range_done = false;
3142 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1);
3143 CU_ASSERT(rc == 0);
3144 poll_threads();
3145
3146 CU_ASSERT(g_unlock_lba_range_done == true);
3147 CU_ASSERT(g_lock_lba_range_done == true);
3148 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges));
3149 range = TAILQ_FIRST(&bdev->internal.locked_ranges);
3150 SPDK_CU_ASSERT_FATAL(range != NULL);
3151 CU_ASSERT(range->offset == 35);
3152 CU_ASSERT(range->length == 10);
3153
3154 /* Finally, unlock 35-44. */
3155 g_unlock_lba_range_done = false;
3156 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1);
3157 CU_ASSERT(rc == 0);
3158 poll_threads();
3159
3160 CU_ASSERT(g_unlock_lba_range_done == true);
3161 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges));
3162
3163 spdk_put_io_channel(io_ch);
3164 spdk_bdev_close(desc);
3165 free_bdev(bdev);
3166 spdk_bdev_finish(bdev_fini_cb, NULL);
3167 poll_threads();
3168}
3169
3170static void
3171abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
3172{
3173 g_abort_done = true;
3174 g_abort_status = bdev_io->internal.status;
3175 spdk_bdev_free_io(bdev_io);
3176}
3177
3178static void
3179bdev_io_abort(void)
3180{
3181 struct spdk_bdev *bdev;
3182 struct spdk_bdev_desc *desc = NULL;
3183 struct spdk_io_channel *io_ch;
3184 struct spdk_bdev_channel *channel;
3185 struct spdk_bdev_mgmt_channel *mgmt_ch;
3186 struct spdk_bdev_opts bdev_opts = {
3187 .bdev_io_pool_size = 7,
3188 .bdev_io_cache_size = 2,
3189 };
3190 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2];
3191 uint64_t io_ctx1 = 0, io_ctx2 = 0, i;
3192 int rc;
3193
3194 rc = spdk_bdev_set_opts(&bdev_opts);
3195 CU_ASSERT(rc == 0);
3196 spdk_bdev_initialize(bdev_init_cb, NULL);
3197
3198 bdev = allocate_bdev("bdev0");
3199
3200 rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
3201 CU_ASSERT(rc == 0);
3202 CU_ASSERT(desc != NULL);
3203 io_ch = spdk_bdev_get_io_channel(desc);
3204 CU_ASSERT(io_ch != NULL);
3205 channel = spdk_io_channel_get_ctx(io_ch);
3206 mgmt_ch = channel->shared_resource->mgmt_ch;
3207
3208 g_abort_done = false;
3209
3210 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false);
3211
3212 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
3213 CU_ASSERT(rc == -ENOTSUP);
3214
3215 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true);
3216
3217 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL);
3218 CU_ASSERT(rc == 0);
3219 CU_ASSERT(g_abort_done == true);
3220 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED);
3221
3222 /* Test the case that the target I/O was successfully aborted. */
3223 g_io_done = false;
3224
3225 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1);
3226 CU_ASSERT(rc == 0);
3227 CU_ASSERT(g_io_done == false);
3228
3229 g_abort_done = false;
3230 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3231
3232 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
3233 CU_ASSERT(rc == 0);
3234 CU_ASSERT(g_io_done == true);
3235 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
3236 stub_complete_io(1);
3237 CU_ASSERT(g_abort_done == true);
3238 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3239
3240 /* Test the case that the target I/O was not aborted because it completed
3241 * in the middle of execution of the abort.
3242 */
3243 g_io_done = false;
3244
3245 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1);
3246 CU_ASSERT(rc == 0);
3247 CU_ASSERT(g_io_done == false);
3248
3249 g_abort_done = false;
3250 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
3251
3252 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
3253 CU_ASSERT(rc == 0);
3254 CU_ASSERT(g_io_done == false);
3255
3256 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3257 stub_complete_io(1);
3258 CU_ASSERT(g_io_done == true);
3259 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3260
3261 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED;
3262 stub_complete_io(1);
3263 CU_ASSERT(g_abort_done == true);
3264 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3265
3266 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3267
3268 bdev->optimal_io_boundary = 16;
3269 bdev->split_on_optimal_io_boundary = true;
3270
3271 /* Test that a single-vector command which is split is aborted correctly.
3272 * Offset 14, length 8, payload 0xF000
3273 * Child - Offset 14, length 2, payload 0xF000
3274 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
3275 */
3276 g_io_done = false;
3277
3278 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1);
3279 CU_ASSERT(rc == 0);
3280 CU_ASSERT(g_io_done == false);
3281
3282 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
3283
3284 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3285
3286 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
3287 CU_ASSERT(rc == 0);
3288 CU_ASSERT(g_io_done == true);
3289 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
3290 stub_complete_io(2);
3291 CU_ASSERT(g_abort_done == true);
3292 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3293
3294 /* Test that a multi-vector command that needs to be split by strip and then
3295 * needs to be split is aborted correctly. Abort is requested before the second
3296 * child I/O was submitted. The parent I/O should complete with failure without
3297 * submitting the second child I/O.
3298 */
3299 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) {
3300 iov[i].iov_base = (void *)((i + 1) * 0x10000);
3301 iov[i].iov_len = 512;
3302 }
3303
3304 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
3305 g_io_done = false;
3306 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
3307 BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1);
3308 CU_ASSERT(rc == 0);
3309 CU_ASSERT(g_io_done == false);
3310
3311 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
3312
3313 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3314
3315 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
3316 CU_ASSERT(rc == 0);
3317 CU_ASSERT(g_io_done == true);
3318 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
3319 stub_complete_io(1);
3320 CU_ASSERT(g_abort_done == true);
3321 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3322
3323 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3324
3325 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3326
3327 bdev->optimal_io_boundary = 16;
3328 g_io_done = false;
3329
3330 /* Test that a ingle-vector command which is split is aborted correctly.
3331 * Differently from the above, the child abort request will be submitted
3332 * sequentially due to the capacity of spdk_bdev_io.
3333 */
3334 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1);
3335 CU_ASSERT(rc == 0);
3336 CU_ASSERT(g_io_done == false);
3337
3338 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
3339
3340 g_abort_done = false;
3341 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3342
3343 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL);
3344 CU_ASSERT(rc == 0);
3345 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
3346 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
3347
3348 stub_complete_io(1);
3349 CU_ASSERT(g_io_done == true);
3350 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
3351 stub_complete_io(3);
3352 CU_ASSERT(g_abort_done == true);
3353 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
3354
3355 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;
3356
3357 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
3358
3359 spdk_put_io_channel(io_ch);
3360 spdk_bdev_close(desc);
3361 free_bdev(bdev);
3362 spdk_bdev_finish(bdev_fini_cb, NULL);
3363 poll_threads();
3364}
3365
3366int
3367main(int argc, char **argv)
3368{
3369 CU_pSuite suite = NULL;
3370 unsigned int num_failures;
3371
3372 CU_set_error_action(CUEA_ABORT);
3373 CU_initialize_registry();
3374
3375 suite = CU_add_suite("bdev", null_init, null_clean);
3376
3377 CU_ADD_TEST(suite, bytes_to_blocks_test);
3378 CU_ADD_TEST(suite, num_blocks_test);
3379 CU_ADD_TEST(suite, io_valid_test);
3380 CU_ADD_TEST(suite, open_write_test);
3381 CU_ADD_TEST(suite, alias_add_del_test);
3382 CU_ADD_TEST(suite, get_device_stat_test);
3383 CU_ADD_TEST(suite, bdev_io_types_test);
3384 CU_ADD_TEST(suite, bdev_io_wait_test);
3385 CU_ADD_TEST(suite, bdev_io_spans_boundary_test);
3386 CU_ADD_TEST(suite, bdev_io_split_test);
3387 CU_ADD_TEST(suite, bdev_io_split_with_io_wait);
3388 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary);
3389 CU_ADD_TEST(suite, bdev_io_alignment);
3390 CU_ADD_TEST(suite, bdev_histograms);
3391 CU_ADD_TEST(suite, bdev_write_zeroes);
3392 CU_ADD_TEST(suite, bdev_compare_and_write);
3393 CU_ADD_TEST(suite, bdev_compare);
3394 CU_ADD_TEST(suite, bdev_open_while_hotremove);
3395 CU_ADD_TEST(suite, bdev_close_while_hotremove);
3396 CU_ADD_TEST(suite, bdev_open_ext);
3397 CU_ADD_TEST(suite, bdev_set_io_timeout);
3398 CU_ADD_TEST(suite, lba_range_overlap);
3399 CU_ADD_TEST(suite, lock_lba_range_check_ranges);
3400 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding);
3401 CU_ADD_TEST(suite, lock_lba_range_overlapped);
3402 CU_ADD_TEST(suite, bdev_io_abort);
3403
3404 allocate_cores(1);
9f95a23c
TL
3405 allocate_threads(1);
3406 set_thread(0);
3407
11fdf7f2
TL
3408 CU_basic_set_mode(CU_BRM_VERBOSE);
3409 CU_basic_run_tests();
3410 num_failures = CU_get_number_of_failures();
3411 CU_cleanup_registry();
9f95a23c
TL
3412
3413 free_threads();
f67539c2 3414 free_cores();
9f95a23c 3415
11fdf7f2
TL
3416 return num_failures;
3417}