4 * Copyright (c) Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "spdk/stdinc.h"
36 #include "spdk_cunit.h"
37 #include "common/lib/test_env.c"
39 #include "ftl/ftl_rwb.c"
43 struct spdk_ftl_conf conf
;
48 /* the fields below are calculated by the configurations */
50 size_t max_active_batches
;
52 size_t max_allocable_entries
;
53 size_t interleave_offset
;
54 size_t num_entries_per_worker
;
57 static struct ftl_rwb
*g_rwb
;
58 static struct ftl_rwb_ut g_ut
;
60 static int _init_suite(void);
65 g_ut
.conf
.rwb_size
= 1024 * 1024;
66 g_ut
.conf
.num_interleave_units
= 1;
67 g_ut
.metadata_size
= 64;
77 g_ut
.conf
.rwb_size
= 2 * 1024 * 1024;
78 g_ut
.conf
.num_interleave_units
= 4;
79 g_ut
.metadata_size
= 64;
89 struct spdk_ftl_conf
*conf
= &g_ut
.conf
;
91 if (conf
->num_interleave_units
== 0 ||
92 g_ut
.xfer_size
% conf
->num_interleave_units
||
93 g_ut
.num_punits
== 0) {
97 g_ut
.max_batches
= conf
->rwb_size
/ (FTL_BLOCK_SIZE
* g_ut
.xfer_size
);
98 if (conf
->num_interleave_units
> 1) {
99 g_ut
.max_batches
+= g_ut
.num_punits
;
100 g_ut
.max_active_batches
= g_ut
.num_punits
;
103 g_ut
.max_active_batches
= 1;
106 g_ut
.max_entries
= g_ut
.max_batches
* g_ut
.xfer_size
;
107 g_ut
.max_allocable_entries
= (g_ut
.max_batches
/ g_ut
.max_active_batches
) *
108 g_ut
.max_active_batches
* g_ut
.xfer_size
;
110 g_ut
.interleave_offset
= g_ut
.xfer_size
/ conf
->num_interleave_units
;
112 /* if max_batches is less than max_active_batches * 2, */
113 /* test_rwb_limits_applied will be failed. */
114 if (g_ut
.max_batches
< g_ut
.max_active_batches
* 2) {
118 g_ut
.num_entries_per_worker
= 16 * g_ut
.max_allocable_entries
;
126 g_rwb
= ftl_rwb_init(&g_ut
.conf
, g_ut
.xfer_size
,
127 g_ut
.metadata_size
, g_ut
.num_punits
);
128 SPDK_CU_ASSERT_FATAL(g_rwb
!= NULL
);
139 test_rwb_acquire(void)
141 struct ftl_rwb_entry
*entry
;
145 /* Verify that it's possible to acquire all of the entries */
146 for (i
= 0; i
< g_ut
.max_allocable_entries
; ++i
) {
147 entry
= ftl_rwb_acquire(g_rwb
, FTL_RWB_TYPE_USER
);
148 SPDK_CU_ASSERT_FATAL(entry
);
152 entry
= ftl_rwb_acquire(g_rwb
, FTL_RWB_TYPE_USER
);
153 CU_ASSERT_PTR_NULL(entry
);
160 struct ftl_rwb_entry
*entry
;
161 struct ftl_rwb_batch
*batch
;
162 size_t entry_count
, i
, i_reset
= 0, i_offset
= 0;
163 uint64_t expected_lba
;
167 /* Acquire all entries */
168 for (i
= 0; i
< g_ut
.max_allocable_entries
; ++i
) {
169 entry
= ftl_rwb_acquire(g_rwb
, FTL_RWB_TYPE_USER
);
171 SPDK_CU_ASSERT_FATAL(entry
);
176 /* Pop all batches and free them */
177 for (i
= 0; i
< g_ut
.max_allocable_entries
/ g_ut
.xfer_size
; ++i
) {
178 batch
= ftl_rwb_pop(g_rwb
);
179 SPDK_CU_ASSERT_FATAL(batch
);
182 ftl_rwb_foreach(entry
, batch
) {
183 if (i
% g_ut
.max_active_batches
== 0) {
184 i_offset
= i
* g_ut
.xfer_size
;
187 if (entry_count
% g_ut
.interleave_offset
== 0) {
188 i_reset
= i
% g_ut
.max_active_batches
+
189 (entry_count
/ g_ut
.interleave_offset
) *
190 g_ut
.max_active_batches
;
193 expected_lba
= i_offset
+
194 i_reset
* g_ut
.interleave_offset
+
195 entry_count
% g_ut
.interleave_offset
;
197 CU_ASSERT_EQUAL(entry
->lba
, expected_lba
);
201 CU_ASSERT_EQUAL(entry_count
, g_ut
.xfer_size
);
202 ftl_rwb_batch_release(batch
);
205 /* Acquire all entries once more */
206 for (i
= 0; i
< g_ut
.max_allocable_entries
; ++i
) {
207 entry
= ftl_rwb_acquire(g_rwb
, FTL_RWB_TYPE_USER
);
208 SPDK_CU_ASSERT_FATAL(entry
);
212 /* Pop one batch and check we can acquire xfer_size entries */
213 for (i
= 0; i
< g_ut
.max_active_batches
; i
++) {
214 batch
= ftl_rwb_pop(g_rwb
);
215 SPDK_CU_ASSERT_FATAL(batch
);
216 ftl_rwb_batch_release(batch
);
219 for (i
= 0; i
< g_ut
.xfer_size
* g_ut
.max_active_batches
; ++i
) {
220 entry
= ftl_rwb_acquire(g_rwb
, FTL_RWB_TYPE_USER
);
222 SPDK_CU_ASSERT_FATAL(entry
);
226 entry
= ftl_rwb_acquire(g_rwb
, FTL_RWB_TYPE_USER
);
227 CU_ASSERT_PTR_NULL(entry
);
229 /* Pop and Release all batches */
230 for (i
= 0; i
< g_ut
.max_allocable_entries
/ g_ut
.xfer_size
; ++i
) {
231 batch
= ftl_rwb_pop(g_rwb
);
232 SPDK_CU_ASSERT_FATAL(batch
);
233 ftl_rwb_batch_release(batch
);
240 test_rwb_disable_interleaving(void)
242 struct ftl_rwb_entry
*entry
;
243 struct ftl_rwb_batch
*batch
;
244 size_t entry_count
, i
;
248 ftl_rwb_disable_interleaving(g_rwb
);
250 /* Acquire all entries and assign sequential lbas */
251 for (i
= 0; i
< g_ut
.max_allocable_entries
; ++i
) {
252 entry
= ftl_rwb_acquire(g_rwb
, FTL_RWB_TYPE_USER
);
254 SPDK_CU_ASSERT_FATAL(entry
);
259 /* Check for expected lbas */
260 for (i
= 0; i
< g_ut
.max_allocable_entries
/ g_ut
.xfer_size
; ++i
) {
261 batch
= ftl_rwb_pop(g_rwb
);
262 SPDK_CU_ASSERT_FATAL(batch
);
265 ftl_rwb_foreach(entry
, batch
) {
266 CU_ASSERT_EQUAL(entry
->lba
, i
* g_ut
.xfer_size
+ entry_count
);
270 CU_ASSERT_EQUAL(entry_count
, g_ut
.xfer_size
);
271 ftl_rwb_batch_release(batch
);
278 test_rwb_batch_revert(void)
280 struct ftl_rwb_batch
*batch
;
281 struct ftl_rwb_entry
*entry
;
285 for (i
= 0; i
< g_ut
.max_allocable_entries
; ++i
) {
286 entry
= ftl_rwb_acquire(g_rwb
, FTL_RWB_TYPE_USER
);
287 SPDK_CU_ASSERT_FATAL(entry
);
291 /* Pop one batch and revert it */
292 batch
= ftl_rwb_pop(g_rwb
);
293 SPDK_CU_ASSERT_FATAL(batch
);
295 ftl_rwb_batch_revert(batch
);
297 /* Verify all of the batches */
298 for (i
= 0; i
< g_ut
.max_allocable_entries
/ g_ut
.xfer_size
; ++i
) {
299 batch
= ftl_rwb_pop(g_rwb
);
300 CU_ASSERT_PTR_NOT_NULL_FATAL(batch
);
306 test_rwb_entry_from_offset(void)
308 struct ftl_rwb_entry
*entry
;
309 struct ftl_ppa ppa
= { .cached
= 1 };
313 for (i
= 0; i
< g_ut
.max_allocable_entries
; ++i
) {
316 entry
= ftl_rwb_entry_from_offset(g_rwb
, i
);
317 CU_ASSERT_EQUAL(ppa
.offset
, entry
->pos
);
323 test_rwb_worker(void *ctx
)
325 struct ftl_rwb_entry
*entry
;
326 unsigned int *num_done
= ctx
;
329 for (i
= 0; i
< g_ut
.num_entries_per_worker
; ++i
) {
331 entry
= ftl_rwb_acquire(g_rwb
, FTL_RWB_TYPE_USER
);
337 /* Allow other threads to run under valgrind */
343 __atomic_fetch_add(num_done
, 1, __ATOMIC_SEQ_CST
);
348 test_rwb_parallel(void)
350 struct ftl_rwb_batch
*batch
;
351 struct ftl_rwb_entry
*entry
;
352 #define NUM_PARALLEL_WORKERS 4
353 pthread_t workers
[NUM_PARALLEL_WORKERS
];
354 unsigned int num_done
= 0;
355 size_t i
, num_entries
= 0;
356 bool all_done
= false;
360 for (i
= 0; i
< NUM_PARALLEL_WORKERS
; ++i
) {
361 rc
= pthread_create(&workers
[i
], NULL
, test_rwb_worker
, (void *)&num_done
);
362 CU_ASSERT_TRUE(rc
== 0);
366 batch
= ftl_rwb_pop(g_rwb
);
368 ftl_rwb_foreach(entry
, batch
) {
372 ftl_rwb_batch_release(batch
);
374 if (NUM_PARALLEL_WORKERS
== __atomic_load_n(&num_done
, __ATOMIC_SEQ_CST
)) {
376 /* Pop all left entries from rwb */
381 for (i
= 0; i
< NUM_PARALLEL_WORKERS
; ++i
) {
382 pthread_join(workers
[i
], NULL
);
388 /* Allow other threads to run under valgrind */
393 CU_ASSERT_TRUE(num_entries
== NUM_PARALLEL_WORKERS
* g_ut
.num_entries_per_worker
);
398 test_rwb_limits_base(void)
400 struct ftl_rwb_entry
*entry
;
401 size_t limits
[FTL_RWB_TYPE_MAX
];
404 ftl_rwb_get_limits(g_rwb
, limits
);
405 CU_ASSERT_TRUE(limits
[FTL_RWB_TYPE_INTERNAL
] == ftl_rwb_entry_cnt(g_rwb
));
406 CU_ASSERT_TRUE(limits
[FTL_RWB_TYPE_USER
] == ftl_rwb_entry_cnt(g_rwb
));
408 /* Verify it's possible to acquire both type of entries */
409 entry
= ftl_rwb_acquire(g_rwb
, FTL_RWB_TYPE_INTERNAL
);
410 CU_ASSERT_PTR_NOT_NULL_FATAL(entry
);
412 entry
= ftl_rwb_acquire(g_rwb
, FTL_RWB_TYPE_USER
);
413 CU_ASSERT_PTR_NOT_NULL_FATAL(entry
);
418 test_rwb_limits_set(void)
420 size_t limits
[FTL_RWB_TYPE_MAX
], check
[FTL_RWB_TYPE_MAX
];
425 /* Check valid limits */
426 ftl_rwb_get_limits(g_rwb
, limits
);
427 memcpy(check
, limits
, sizeof(limits
));
428 ftl_rwb_set_limits(g_rwb
, limits
);
429 ftl_rwb_get_limits(g_rwb
, limits
);
430 CU_ASSERT(memcmp(check
, limits
, sizeof(limits
)) == 0);
432 for (i
= 0; i
< FTL_RWB_TYPE_MAX
; ++i
) {
433 ftl_rwb_get_limits(g_rwb
, limits
);
437 memcpy(check
, limits
, sizeof(limits
));
438 ftl_rwb_set_limits(g_rwb
, limits
);
439 ftl_rwb_get_limits(g_rwb
, limits
);
440 CU_ASSERT(memcmp(check
, limits
, sizeof(limits
)) == 0);
445 test_rwb_limits_applied(void)
447 struct ftl_rwb_entry
*entry
;
448 struct ftl_rwb_batch
*batch
;
449 size_t limits
[FTL_RWB_TYPE_MAX
];
450 const size_t test_limit
= g_ut
.xfer_size
* g_ut
.max_active_batches
;
455 /* Check that it's impossible to acquire any entries when the limits are */
457 ftl_rwb_get_limits(g_rwb
, limits
);
458 limits
[FTL_RWB_TYPE_USER
] = 0;
459 ftl_rwb_set_limits(g_rwb
, limits
);
460 entry
= ftl_rwb_acquire(g_rwb
, FTL_RWB_TYPE_USER
);
461 CU_ASSERT_PTR_NULL(entry
);
463 limits
[FTL_RWB_TYPE_USER
] = ftl_rwb_entry_cnt(g_rwb
);
464 limits
[FTL_RWB_TYPE_INTERNAL
] = 0;
465 ftl_rwb_set_limits(g_rwb
, limits
);
466 entry
= ftl_rwb_acquire(g_rwb
, FTL_RWB_TYPE_INTERNAL
);
467 CU_ASSERT_PTR_NULL(entry
);
469 /* Check positive limits */
470 limits
[FTL_RWB_TYPE_USER
] = ftl_rwb_entry_cnt(g_rwb
);
471 limits
[FTL_RWB_TYPE_INTERNAL
] = test_limit
;
472 ftl_rwb_set_limits(g_rwb
, limits
);
473 for (i
= 0; i
< test_limit
; ++i
) {
474 entry
= ftl_rwb_acquire(g_rwb
, FTL_RWB_TYPE_INTERNAL
);
475 SPDK_CU_ASSERT_FATAL(entry
);
476 entry
->flags
= FTL_IO_INTERNAL
;
480 /* Now we expect null, since we've reached threshold */
481 entry
= ftl_rwb_acquire(g_rwb
, FTL_RWB_TYPE_INTERNAL
);
482 CU_ASSERT_PTR_NULL(entry
);
484 for (i
= 0; i
< test_limit
/ g_ut
.xfer_size
; ++i
) {
485 /* Complete the entries and check we can retrieve the entries once again */
486 batch
= ftl_rwb_pop(g_rwb
);
487 SPDK_CU_ASSERT_FATAL(batch
);
488 ftl_rwb_batch_release(batch
);
491 entry
= ftl_rwb_acquire(g_rwb
, FTL_RWB_TYPE_INTERNAL
);
492 SPDK_CU_ASSERT_FATAL(entry
);
493 entry
->flags
= FTL_IO_INTERNAL
;
495 /* Set the same limit but this time for user entries */
496 limits
[FTL_RWB_TYPE_USER
] = test_limit
;
497 limits
[FTL_RWB_TYPE_INTERNAL
] = ftl_rwb_entry_cnt(g_rwb
);
498 ftl_rwb_set_limits(g_rwb
, limits
);
499 for (i
= 0; i
< test_limit
; ++i
) {
500 entry
= ftl_rwb_acquire(g_rwb
, FTL_RWB_TYPE_USER
);
501 SPDK_CU_ASSERT_FATAL(entry
);
505 /* Now we expect null, since we've reached threshold */
506 entry
= ftl_rwb_acquire(g_rwb
, FTL_RWB_TYPE_USER
);
507 CU_ASSERT_PTR_NULL(entry
);
509 /* Check that we're still able to acquire a number of internal entries */
510 /* while the user entires are being throttled */
511 for (i
= 0; i
< g_ut
.xfer_size
; ++i
) {
512 entry
= ftl_rwb_acquire(g_rwb
, FTL_RWB_TYPE_INTERNAL
);
513 SPDK_CU_ASSERT_FATAL(entry
);
520 main(int argc
, char **argv
)
522 CU_pSuite suite1
, suite2
;
523 unsigned int num_failures
;
525 if (CU_initialize_registry() != CUE_SUCCESS
) {
526 return CU_get_error();
529 suite1
= CU_add_suite("suite1", init_suite1
, NULL
);
531 CU_cleanup_registry();
532 return CU_get_error();
535 suite2
= CU_add_suite("suite2", init_suite2
, NULL
);
537 CU_cleanup_registry();
538 return CU_get_error();
542 CU_add_test(suite1
, "test_rwb_acquire",
543 test_rwb_acquire
) == NULL
544 || CU_add_test(suite1
, "test_rwb_pop",
545 test_rwb_pop
) == NULL
546 || CU_add_test(suite1
, "test_rwb_disable_interleaving",
547 test_rwb_disable_interleaving
) == NULL
548 || CU_add_test(suite1
, "test_rwb_batch_revert",
549 test_rwb_batch_revert
) == NULL
550 || CU_add_test(suite1
, "test_rwb_entry_from_offset",
551 test_rwb_entry_from_offset
) == NULL
552 || CU_add_test(suite1
, "test_rwb_parallel",
553 test_rwb_parallel
) == NULL
554 || CU_add_test(suite1
, "test_rwb_limits_base",
555 test_rwb_limits_base
) == NULL
556 || CU_add_test(suite1
, "test_rwb_limits_set",
557 test_rwb_limits_set
) == NULL
558 || CU_add_test(suite1
, "test_rwb_limits_applied",
559 test_rwb_limits_applied
) == NULL
560 || CU_add_test(suite2
, "test_rwb_acquire",
561 test_rwb_acquire
) == NULL
562 || CU_add_test(suite2
, "test_rwb_pop",
563 test_rwb_pop
) == NULL
564 || CU_add_test(suite2
, "test_rwb_disable_interleaving",
565 test_rwb_disable_interleaving
) == NULL
566 || CU_add_test(suite2
, "test_rwb_batch_revert",
567 test_rwb_batch_revert
) == NULL
568 || CU_add_test(suite2
, "test_rwb_entry_from_offset",
569 test_rwb_entry_from_offset
) == NULL
570 || CU_add_test(suite2
, "test_rwb_parallel",
571 test_rwb_parallel
) == NULL
572 || CU_add_test(suite2
, "test_rwb_limits_base",
573 test_rwb_limits_base
) == NULL
574 || CU_add_test(suite2
, "test_rwb_limits_set",
575 test_rwb_limits_set
) == NULL
576 || CU_add_test(suite2
, "test_rwb_limits_applied",
577 test_rwb_limits_applied
) == NULL
579 CU_cleanup_registry();
580 return CU_get_error();
583 CU_basic_set_mode(CU_BRM_VERBOSE
);
584 CU_basic_run_tests();
585 num_failures
= CU_get_number_of_failures();
586 CU_cleanup_registry();