]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/test/unit/lib/ftl/ftl_rwb.c/ftl_rwb_ut.c
bump version to 15.2.11-pve1
[ceph.git] / ceph / src / spdk / test / unit / lib / ftl / ftl_rwb.c / ftl_rwb_ut.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright (c) Intel Corporation.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include "spdk/stdinc.h"
35
36 #include "spdk_cunit.h"
37 #include "common/lib/test_env.c"
38
39 #include "ftl/ftl_rwb.c"
40
41 struct ftl_rwb_ut {
42 /* configurations */
43 struct spdk_ftl_conf conf;
44 size_t metadata_size;
45 size_t num_punits;
46 size_t xfer_size;
47
48 /* the fields below are calculated by the configurations */
49 size_t max_batches;
50 size_t max_active_batches;
51 size_t max_entries;
52 size_t max_allocable_entries;
53 size_t interleave_offset;
54 size_t num_entries_per_worker;
55 };
56
57 static struct ftl_rwb *g_rwb;
58 static struct ftl_rwb_ut g_ut;
59
60 static int _init_suite(void);
61
62 static int
63 init_suite1(void)
64 {
65 g_ut.conf.rwb_size = 1024 * 1024;
66 g_ut.conf.num_interleave_units = 1;
67 g_ut.metadata_size = 64;
68 g_ut.num_punits = 4;
69 g_ut.xfer_size = 16;
70
71 return _init_suite();
72 }
73
74 static int
75 init_suite2(void)
76 {
77 g_ut.conf.rwb_size = 2 * 1024 * 1024;
78 g_ut.conf.num_interleave_units = 4;
79 g_ut.metadata_size = 64;
80 g_ut.num_punits = 8;
81 g_ut.xfer_size = 16;
82
83 return _init_suite();
84 }
85
86 static int
87 _init_suite(void)
88 {
89 struct spdk_ftl_conf *conf = &g_ut.conf;
90
91 if (conf->num_interleave_units == 0 ||
92 g_ut.xfer_size % conf->num_interleave_units ||
93 g_ut.num_punits == 0) {
94 return -1;
95 }
96
97 g_ut.max_batches = conf->rwb_size / (FTL_BLOCK_SIZE * g_ut.xfer_size);
98 if (conf->num_interleave_units > 1) {
99 g_ut.max_batches += g_ut.num_punits;
100 g_ut.max_active_batches = g_ut.num_punits;
101 } else {
102 g_ut.max_batches++;
103 g_ut.max_active_batches = 1;
104 }
105
106 g_ut.max_entries = g_ut.max_batches * g_ut.xfer_size;
107 g_ut.max_allocable_entries = (g_ut.max_batches / g_ut.max_active_batches) *
108 g_ut.max_active_batches * g_ut.xfer_size;
109
110 g_ut.interleave_offset = g_ut.xfer_size / conf->num_interleave_units;
111
112 /* if max_batches is less than max_active_batches * 2, */
113 /* test_rwb_limits_applied will be failed. */
114 if (g_ut.max_batches < g_ut.max_active_batches * 2) {
115 return -1;
116 }
117
118 g_ut.num_entries_per_worker = 16 * g_ut.max_allocable_entries;
119
120 return 0;
121 }
122
123 static void
124 setup_rwb(void)
125 {
126 g_rwb = ftl_rwb_init(&g_ut.conf, g_ut.xfer_size,
127 g_ut.metadata_size, g_ut.num_punits);
128 SPDK_CU_ASSERT_FATAL(g_rwb != NULL);
129 }
130
131 static void
132 cleanup_rwb(void)
133 {
134 ftl_rwb_free(g_rwb);
135 g_rwb = NULL;
136 }
137
138 static void
139 test_rwb_acquire(void)
140 {
141 struct ftl_rwb_entry *entry;
142 size_t i;
143
144 setup_rwb();
145 /* Verify that it's possible to acquire all of the entries */
146 for (i = 0; i < g_ut.max_allocable_entries; ++i) {
147 entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
148 SPDK_CU_ASSERT_FATAL(entry);
149 ftl_rwb_push(entry);
150 }
151
152 entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
153 CU_ASSERT_PTR_NULL(entry);
154 cleanup_rwb();
155 }
156
157 static void
158 test_rwb_pop(void)
159 {
160 struct ftl_rwb_entry *entry;
161 struct ftl_rwb_batch *batch;
162 size_t entry_count, i, i_reset = 0, i_offset = 0;
163 uint64_t expected_lba;
164
165 setup_rwb();
166
167 /* Acquire all entries */
168 for (i = 0; i < g_ut.max_allocable_entries; ++i) {
169 entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
170
171 SPDK_CU_ASSERT_FATAL(entry);
172 entry->lba = i;
173 ftl_rwb_push(entry);
174 }
175
176 /* Pop all batches and free them */
177 for (i = 0; i < g_ut.max_allocable_entries / g_ut.xfer_size; ++i) {
178 batch = ftl_rwb_pop(g_rwb);
179 SPDK_CU_ASSERT_FATAL(batch);
180 entry_count = 0;
181
182 ftl_rwb_foreach(entry, batch) {
183 if (i % g_ut.max_active_batches == 0) {
184 i_offset = i * g_ut.xfer_size;
185 }
186
187 if (entry_count % g_ut.interleave_offset == 0) {
188 i_reset = i % g_ut.max_active_batches +
189 (entry_count / g_ut.interleave_offset) *
190 g_ut.max_active_batches;
191 }
192
193 expected_lba = i_offset +
194 i_reset * g_ut.interleave_offset +
195 entry_count % g_ut.interleave_offset;
196
197 CU_ASSERT_EQUAL(entry->lba, expected_lba);
198 entry_count++;
199 }
200
201 CU_ASSERT_EQUAL(entry_count, g_ut.xfer_size);
202 ftl_rwb_batch_release(batch);
203 }
204
205 /* Acquire all entries once more */
206 for (i = 0; i < g_ut.max_allocable_entries; ++i) {
207 entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
208 SPDK_CU_ASSERT_FATAL(entry);
209 ftl_rwb_push(entry);
210 }
211
212 /* Pop one batch and check we can acquire xfer_size entries */
213 for (i = 0; i < g_ut.max_active_batches; i++) {
214 batch = ftl_rwb_pop(g_rwb);
215 SPDK_CU_ASSERT_FATAL(batch);
216 ftl_rwb_batch_release(batch);
217 }
218
219 for (i = 0; i < g_ut.xfer_size * g_ut.max_active_batches; ++i) {
220 entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
221
222 SPDK_CU_ASSERT_FATAL(entry);
223 ftl_rwb_push(entry);
224 }
225
226 entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
227 CU_ASSERT_PTR_NULL(entry);
228
229 /* Pop and Release all batches */
230 for (i = 0; i < g_ut.max_allocable_entries / g_ut.xfer_size; ++i) {
231 batch = ftl_rwb_pop(g_rwb);
232 SPDK_CU_ASSERT_FATAL(batch);
233 ftl_rwb_batch_release(batch);
234 }
235
236 cleanup_rwb();
237 }
238
239 static void
240 test_rwb_disable_interleaving(void)
241 {
242 struct ftl_rwb_entry *entry;
243 struct ftl_rwb_batch *batch;
244 size_t entry_count, i;
245
246 setup_rwb();
247
248 ftl_rwb_disable_interleaving(g_rwb);
249
250 /* Acquire all entries and assign sequential lbas */
251 for (i = 0; i < g_ut.max_allocable_entries; ++i) {
252 entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
253
254 SPDK_CU_ASSERT_FATAL(entry);
255 entry->lba = i;
256 ftl_rwb_push(entry);
257 }
258
259 /* Check for expected lbas */
260 for (i = 0; i < g_ut.max_allocable_entries / g_ut.xfer_size; ++i) {
261 batch = ftl_rwb_pop(g_rwb);
262 SPDK_CU_ASSERT_FATAL(batch);
263 entry_count = 0;
264
265 ftl_rwb_foreach(entry, batch) {
266 CU_ASSERT_EQUAL(entry->lba, i * g_ut.xfer_size + entry_count);
267 entry_count++;
268 }
269
270 CU_ASSERT_EQUAL(entry_count, g_ut.xfer_size);
271 ftl_rwb_batch_release(batch);
272 }
273
274 cleanup_rwb();
275 }
276
277 static void
278 test_rwb_batch_revert(void)
279 {
280 struct ftl_rwb_batch *batch;
281 struct ftl_rwb_entry *entry;
282 size_t i;
283
284 setup_rwb();
285 for (i = 0; i < g_ut.max_allocable_entries; ++i) {
286 entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
287 SPDK_CU_ASSERT_FATAL(entry);
288 ftl_rwb_push(entry);
289 }
290
291 /* Pop one batch and revert it */
292 batch = ftl_rwb_pop(g_rwb);
293 SPDK_CU_ASSERT_FATAL(batch);
294
295 ftl_rwb_batch_revert(batch);
296
297 /* Verify all of the batches */
298 for (i = 0; i < g_ut.max_allocable_entries / g_ut.xfer_size; ++i) {
299 batch = ftl_rwb_pop(g_rwb);
300 CU_ASSERT_PTR_NOT_NULL_FATAL(batch);
301 }
302 cleanup_rwb();
303 }
304
305 static void
306 test_rwb_entry_from_offset(void)
307 {
308 struct ftl_rwb_entry *entry;
309 struct ftl_ppa ppa = { .cached = 1 };
310 size_t i;
311
312 setup_rwb();
313 for (i = 0; i < g_ut.max_allocable_entries; ++i) {
314 ppa.offset = i;
315
316 entry = ftl_rwb_entry_from_offset(g_rwb, i);
317 CU_ASSERT_EQUAL(ppa.offset, entry->pos);
318 }
319 cleanup_rwb();
320 }
321
322 static void *
323 test_rwb_worker(void *ctx)
324 {
325 struct ftl_rwb_entry *entry;
326 unsigned int *num_done = ctx;
327 size_t i;
328
329 for (i = 0; i < g_ut.num_entries_per_worker; ++i) {
330 while (1) {
331 entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
332 if (entry) {
333 entry->flags = 0;
334 ftl_rwb_push(entry);
335 break;
336 } else {
337 /* Allow other threads to run under valgrind */
338 pthread_yield();
339 }
340 }
341 }
342
343 __atomic_fetch_add(num_done, 1, __ATOMIC_SEQ_CST);
344 return NULL;
345 }
346
347 static void
348 test_rwb_parallel(void)
349 {
350 struct ftl_rwb_batch *batch;
351 struct ftl_rwb_entry *entry;
352 #define NUM_PARALLEL_WORKERS 4
353 pthread_t workers[NUM_PARALLEL_WORKERS];
354 unsigned int num_done = 0;
355 size_t i, num_entries = 0;
356 bool all_done = false;
357 int rc;
358
359 setup_rwb();
360 for (i = 0; i < NUM_PARALLEL_WORKERS; ++i) {
361 rc = pthread_create(&workers[i], NULL, test_rwb_worker, (void *)&num_done);
362 CU_ASSERT_TRUE(rc == 0);
363 }
364
365 while (1) {
366 batch = ftl_rwb_pop(g_rwb);
367 if (batch) {
368 ftl_rwb_foreach(entry, batch) {
369 num_entries++;
370 }
371
372 ftl_rwb_batch_release(batch);
373 } else {
374 if (NUM_PARALLEL_WORKERS == __atomic_load_n(&num_done, __ATOMIC_SEQ_CST)) {
375 if (!all_done) {
376 /* Pop all left entries from rwb */
377 all_done = true;
378 continue;
379 }
380
381 for (i = 0; i < NUM_PARALLEL_WORKERS; ++i) {
382 pthread_join(workers[i], NULL);
383 }
384
385 break;
386 }
387
388 /* Allow other threads to run under valgrind */
389 pthread_yield();
390 }
391 }
392
393 CU_ASSERT_TRUE(num_entries == NUM_PARALLEL_WORKERS * g_ut.num_entries_per_worker);
394 cleanup_rwb();
395 }
396
397 static void
398 test_rwb_limits_base(void)
399 {
400 struct ftl_rwb_entry *entry;
401 size_t limits[FTL_RWB_TYPE_MAX];
402
403 setup_rwb();
404 ftl_rwb_get_limits(g_rwb, limits);
405 CU_ASSERT_TRUE(limits[FTL_RWB_TYPE_INTERNAL] == ftl_rwb_entry_cnt(g_rwb));
406 CU_ASSERT_TRUE(limits[FTL_RWB_TYPE_USER] == ftl_rwb_entry_cnt(g_rwb));
407
408 /* Verify it's possible to acquire both type of entries */
409 entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
410 CU_ASSERT_PTR_NOT_NULL_FATAL(entry);
411
412 entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
413 CU_ASSERT_PTR_NOT_NULL_FATAL(entry);
414 cleanup_rwb();
415 }
416
417 static void
418 test_rwb_limits_set(void)
419 {
420 size_t limits[FTL_RWB_TYPE_MAX], check[FTL_RWB_TYPE_MAX];
421 size_t i;
422
423 setup_rwb();
424
425 /* Check valid limits */
426 ftl_rwb_get_limits(g_rwb, limits);
427 memcpy(check, limits, sizeof(limits));
428 ftl_rwb_set_limits(g_rwb, limits);
429 ftl_rwb_get_limits(g_rwb, limits);
430 CU_ASSERT(memcmp(check, limits, sizeof(limits)) == 0);
431
432 for (i = 0; i < FTL_RWB_TYPE_MAX; ++i) {
433 ftl_rwb_get_limits(g_rwb, limits);
434 limits[i] = 0;
435 }
436
437 memcpy(check, limits, sizeof(limits));
438 ftl_rwb_set_limits(g_rwb, limits);
439 ftl_rwb_get_limits(g_rwb, limits);
440 CU_ASSERT(memcmp(check, limits, sizeof(limits)) == 0);
441 cleanup_rwb();
442 }
443
444 static void
445 test_rwb_limits_applied(void)
446 {
447 struct ftl_rwb_entry *entry;
448 struct ftl_rwb_batch *batch;
449 size_t limits[FTL_RWB_TYPE_MAX];
450 const size_t test_limit = g_ut.xfer_size * g_ut.max_active_batches;
451 size_t i;
452
453 setup_rwb();
454
455 /* Check that it's impossible to acquire any entries when the limits are */
456 /* set to 0 */
457 ftl_rwb_get_limits(g_rwb, limits);
458 limits[FTL_RWB_TYPE_USER] = 0;
459 ftl_rwb_set_limits(g_rwb, limits);
460 entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
461 CU_ASSERT_PTR_NULL(entry);
462
463 limits[FTL_RWB_TYPE_USER] = ftl_rwb_entry_cnt(g_rwb);
464 limits[FTL_RWB_TYPE_INTERNAL] = 0;
465 ftl_rwb_set_limits(g_rwb, limits);
466 entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
467 CU_ASSERT_PTR_NULL(entry);
468
469 /* Check positive limits */
470 limits[FTL_RWB_TYPE_USER] = ftl_rwb_entry_cnt(g_rwb);
471 limits[FTL_RWB_TYPE_INTERNAL] = test_limit;
472 ftl_rwb_set_limits(g_rwb, limits);
473 for (i = 0; i < test_limit; ++i) {
474 entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
475 SPDK_CU_ASSERT_FATAL(entry);
476 entry->flags = FTL_IO_INTERNAL;
477 ftl_rwb_push(entry);
478 }
479
480 /* Now we expect null, since we've reached threshold */
481 entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
482 CU_ASSERT_PTR_NULL(entry);
483
484 for (i = 0; i < test_limit / g_ut.xfer_size; ++i) {
485 /* Complete the entries and check we can retrieve the entries once again */
486 batch = ftl_rwb_pop(g_rwb);
487 SPDK_CU_ASSERT_FATAL(batch);
488 ftl_rwb_batch_release(batch);
489 }
490
491 entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
492 SPDK_CU_ASSERT_FATAL(entry);
493 entry->flags = FTL_IO_INTERNAL;
494
495 /* Set the same limit but this time for user entries */
496 limits[FTL_RWB_TYPE_USER] = test_limit;
497 limits[FTL_RWB_TYPE_INTERNAL] = ftl_rwb_entry_cnt(g_rwb);
498 ftl_rwb_set_limits(g_rwb, limits);
499 for (i = 0; i < test_limit; ++i) {
500 entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
501 SPDK_CU_ASSERT_FATAL(entry);
502 ftl_rwb_push(entry);
503 }
504
505 /* Now we expect null, since we've reached threshold */
506 entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
507 CU_ASSERT_PTR_NULL(entry);
508
509 /* Check that we're still able to acquire a number of internal entries */
510 /* while the user entires are being throttled */
511 for (i = 0; i < g_ut.xfer_size; ++i) {
512 entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
513 SPDK_CU_ASSERT_FATAL(entry);
514 }
515
516 cleanup_rwb();
517 }
518
519 int
520 main(int argc, char **argv)
521 {
522 CU_pSuite suite1, suite2;
523 unsigned int num_failures;
524
525 if (CU_initialize_registry() != CUE_SUCCESS) {
526 return CU_get_error();
527 }
528
529 suite1 = CU_add_suite("suite1", init_suite1, NULL);
530 if (!suite1) {
531 CU_cleanup_registry();
532 return CU_get_error();
533 }
534
535 suite2 = CU_add_suite("suite2", init_suite2, NULL);
536 if (!suite2) {
537 CU_cleanup_registry();
538 return CU_get_error();
539 }
540
541 if (
542 CU_add_test(suite1, "test_rwb_acquire",
543 test_rwb_acquire) == NULL
544 || CU_add_test(suite1, "test_rwb_pop",
545 test_rwb_pop) == NULL
546 || CU_add_test(suite1, "test_rwb_disable_interleaving",
547 test_rwb_disable_interleaving) == NULL
548 || CU_add_test(suite1, "test_rwb_batch_revert",
549 test_rwb_batch_revert) == NULL
550 || CU_add_test(suite1, "test_rwb_entry_from_offset",
551 test_rwb_entry_from_offset) == NULL
552 || CU_add_test(suite1, "test_rwb_parallel",
553 test_rwb_parallel) == NULL
554 || CU_add_test(suite1, "test_rwb_limits_base",
555 test_rwb_limits_base) == NULL
556 || CU_add_test(suite1, "test_rwb_limits_set",
557 test_rwb_limits_set) == NULL
558 || CU_add_test(suite1, "test_rwb_limits_applied",
559 test_rwb_limits_applied) == NULL
560 || CU_add_test(suite2, "test_rwb_acquire",
561 test_rwb_acquire) == NULL
562 || CU_add_test(suite2, "test_rwb_pop",
563 test_rwb_pop) == NULL
564 || CU_add_test(suite2, "test_rwb_disable_interleaving",
565 test_rwb_disable_interleaving) == NULL
566 || CU_add_test(suite2, "test_rwb_batch_revert",
567 test_rwb_batch_revert) == NULL
568 || CU_add_test(suite2, "test_rwb_entry_from_offset",
569 test_rwb_entry_from_offset) == NULL
570 || CU_add_test(suite2, "test_rwb_parallel",
571 test_rwb_parallel) == NULL
572 || CU_add_test(suite2, "test_rwb_limits_base",
573 test_rwb_limits_base) == NULL
574 || CU_add_test(suite2, "test_rwb_limits_set",
575 test_rwb_limits_set) == NULL
576 || CU_add_test(suite2, "test_rwb_limits_applied",
577 test_rwb_limits_applied) == NULL
578 ) {
579 CU_cleanup_registry();
580 return CU_get_error();
581 }
582
583 CU_basic_set_mode(CU_BRM_VERBOSE);
584 CU_basic_run_tests();
585 num_failures = CU_get_number_of_failures();
586 CU_cleanup_registry();
587
588 return num_failures;
589 }