1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
5 #include <gtest/gtest.h>
7 #include "os/bluestore/fastbmap_allocator_impl.h"
9 class TestAllocatorLevel01
: public AllocatorLevel01Loose
12 void init(uint64_t capacity
, uint64_t alloc_unit
)
14 _init(capacity
, alloc_unit
);
16 interval_t
allocate_l1_cont(uint64_t length
, uint64_t min_length
,
17 uint64_t pos_start
, uint64_t pos_end
)
19 return _allocate_l1_contiguous(length
, min_length
, 0, pos_start
, pos_end
);
21 void free_l1(const interval_t
& r
)
23 _free_l1(r
.offset
, r
.length
);
27 class TestAllocatorLevel02
: public AllocatorLevel02
<AllocatorLevel01Loose
>
30 void init(uint64_t capacity
, uint64_t alloc_unit
)
32 _init(capacity
, alloc_unit
);
34 void allocate_l2(uint64_t length
, uint64_t min_length
,
36 interval_vector_t
* res
)
38 uint64_t allocated
= 0;
39 uint64_t hint
= 0; // trigger internal l2 hint support
40 _allocate_l2(length
, min_length
, 0, hint
, &allocated
, res
);
41 *allocated0
+= allocated
;
43 void free_l2(const interval_vector_t
& r
)
47 void mark_free(uint64_t o
, uint64_t len
)
51 void mark_allocated(uint64_t o
, uint64_t len
)
53 _mark_allocated(o
, len
);
57 const uint64_t _1m
= 1024 * 1024;
58 const uint64_t _2m
= 2 * 1024 * 1024;
60 TEST(TestAllocatorLevel01
, test_l1
)
62 TestAllocatorLevel01 al1
;
63 uint64_t num_l1_entries
= 3 * 256;
64 uint64_t capacity
= num_l1_entries
* 512 * 4096;
65 al1
.init(capacity
, 0x1000);
66 ASSERT_EQ(capacity
, al1
.debug_get_free());
68 auto i1
= al1
.allocate_l1_cont(0x1000, 0x1000, 0, num_l1_entries
);
69 ASSERT_EQ(i1
.offset
, 0u);
70 ASSERT_EQ(i1
.length
, 0x1000u
);
71 ASSERT_EQ(capacity
- 0x1000, al1
.debug_get_free());
73 auto i2
= al1
.allocate_l1_cont(0x1000, 0x1000, 0, num_l1_entries
);
74 ASSERT_EQ(i2
.offset
, 0x1000u
);
75 ASSERT_EQ(i2
.length
, 0x1000u
);
78 i1
= al1
.allocate_l1_cont(0x1000, 0x1000, 0, num_l1_entries
);
79 ASSERT_EQ(i1
.offset
, 0u);
80 ASSERT_EQ(i1
.length
, 0x1000u
);
81 i2
= al1
.allocate_l1_cont(0x1000, 0x1000, 0, num_l1_entries
);
82 ASSERT_EQ(i2
.offset
, 0x1000u
);
83 ASSERT_EQ(i2
.length
, 0x1000u
);
87 i1
= al1
.allocate_l1_cont(0x2000, 0x1000, 0, num_l1_entries
);
88 ASSERT_EQ(i1
.offset
, 0u);
89 ASSERT_EQ(i1
.length
, 0x2000u
);
91 i2
= al1
.allocate_l1_cont(0x3000, 0x1000, 0, num_l1_entries
);
92 ASSERT_EQ(i2
.offset
, 0x2000u
);
93 ASSERT_EQ(i2
.length
, 0x3000u
);
98 i1
= al1
.allocate_l1_cont(0x2000, 0x1000, 0, num_l1_entries
);
99 ASSERT_EQ(i1
.offset
, 0u);
100 ASSERT_EQ(i1
.length
, 0x2000u
);
102 i2
= al1
.allocate_l1_cont(2 * 1024 * 1024, 0x1000, 0, num_l1_entries
);
103 ASSERT_EQ(i2
.offset
, 2u * 1024u * 1024u);
104 ASSERT_EQ(i2
.length
, 2u * 1024u * 1024u);
107 i1
= al1
.allocate_l1_cont(1024 * 1024, 0x1000, 0, num_l1_entries
);
108 ASSERT_EQ(i1
.offset
, 0u);
109 ASSERT_EQ(i1
.length
, 1024u * 1024u);
111 auto i3
= al1
.allocate_l1_cont(1024 * 1024 + 0x1000, 0x1000, 0, num_l1_entries
);
112 ASSERT_EQ(i3
.offset
, 2u * 2u * 1024u * 1024u);
113 ASSERT_EQ(i3
.length
, 1024u * 1024u + 0x1000u
);
115 // here we have the following layout:
116 // Alloc: 0~1M, 2M~2M, 4M~1M+4K
117 // Free: 1M~1M, 4M+4K ~ 2M-4K, 6M ~...
119 auto i4
= al1
.allocate_l1_cont(1024 * 1024, 0x1000, 0, num_l1_entries
);
120 ASSERT_EQ(1 * 1024 * 1024u, i4
.offset
);
121 ASSERT_EQ(1024 * 1024u, i4
.length
);
124 i4
= al1
.allocate_l1_cont(1024 * 1024 - 0x1000, 0x1000, 0, num_l1_entries
);
125 ASSERT_EQ(i4
.offset
, 5u * 1024u * 1024u + 0x1000u
);
126 ASSERT_EQ(i4
.length
, 1024u * 1024u - 0x1000u
);
129 i4
= al1
.allocate_l1_cont(1024 * 1024 + 0x1000, 0x1000, 0, num_l1_entries
);
130 ASSERT_EQ(i4
.offset
, 6u * 1024u * 1024u);
131 //ASSERT_EQ(i4.offset, 5 * 1024 * 1024 + 0x1000);
132 ASSERT_EQ(i4
.length
, 1024u * 1024u + 0x1000u
);
139 i1
= al1
.allocate_l1_cont(1024 * 1024, 0x1000, 0, num_l1_entries
);
140 ASSERT_EQ(i1
.offset
, 0u);
141 ASSERT_EQ(i1
.length
, 1024u * 1024u);
143 i2
= al1
.allocate_l1_cont(1024 * 1024, 0x1000, 0, num_l1_entries
);
144 ASSERT_EQ(i2
.offset
, 1u * 1024u * 1024u);
145 ASSERT_EQ(i2
.length
, 1024u * 1024u);
147 i3
= al1
.allocate_l1_cont(512 * 1024, 0x1000, 0, num_l1_entries
);
148 ASSERT_EQ(i3
.offset
, 2u * 1024u * 1024u);
149 ASSERT_EQ(i3
.length
, 512u * 1024u);
151 i4
= al1
.allocate_l1_cont(1536 * 1024, 0x1000, 0, num_l1_entries
);
152 ASSERT_EQ(i4
.offset
, (2u * 1024u + 512u) * 1024u);
153 ASSERT_EQ(i4
.length
, 1536u * 1024u);
154 // making a hole 1.5 Mb length
157 // and trying to fill it
158 i2
= al1
.allocate_l1_cont(1536 * 1024, 0x1000, 0, num_l1_entries
);
159 ASSERT_EQ(i2
.offset
, 1024u * 1024u);
160 ASSERT_EQ(i2
.length
, 1536u * 1024u);
163 // and trying to fill it partially
164 i2
= al1
.allocate_l1_cont(1528 * 1024, 0x1000, 0, num_l1_entries
);
165 ASSERT_EQ(i2
.offset
, 1024u * 1024u);
166 ASSERT_EQ(i2
.length
, 1528u * 1024u);
168 i3
= al1
.allocate_l1_cont(8 * 1024, 0x1000, 0, num_l1_entries
);
169 ASSERT_EQ(i3
.offset
, 2552u * 1024u);
170 ASSERT_EQ(i3
.length
, 8u * 1024u);
173 // here we have the following layout:
174 // Alloc: 0~1M, 2552K~8K, num_l1_entries0K~1.5M
175 // Free: 1M~1528K, 4M ~...
177 i2
= al1
.allocate_l1_cont(1536 * 1024, 0x1000, 0, num_l1_entries
);
178 ASSERT_EQ(i2
.offset
, 4u * 1024u * 1024u);
179 ASSERT_EQ(i2
.length
, 1536u * 1024u);
185 ASSERT_EQ(capacity
, al1
.debug_get_free());
187 for (uint64_t i
= 0; i
< capacity
; i
+= _2m
) {
188 i1
= al1
.allocate_l1_cont(_2m
, _2m
, 0, num_l1_entries
);
189 ASSERT_EQ(i1
.offset
, i
);
190 ASSERT_EQ(i1
.length
, _2m
);
192 ASSERT_EQ(0u, al1
.debug_get_free());
193 i2
= al1
.allocate_l1_cont(_2m
, _2m
, 0, num_l1_entries
);
194 ASSERT_EQ(i2
.length
, 0u);
195 ASSERT_EQ(0u, al1
.debug_get_free());
198 i2
= al1
.allocate_l1_cont(_2m
, _2m
, 0, num_l1_entries
);
201 i2
= al1
.allocate_l1_cont(_1m
, _1m
, 0, num_l1_entries
);
202 ASSERT_EQ(i2
.offset
, i1
.offset
);
203 ASSERT_EQ(i2
.length
, _1m
);
205 i3
= al1
.allocate_l1_cont(_2m
, _2m
, 0, num_l1_entries
);
206 ASSERT_EQ(i3
.length
, 0u);
208 i3
= al1
.allocate_l1_cont(_2m
, _1m
, 0, num_l1_entries
);
209 ASSERT_EQ(i3
.length
, _1m
);
211 i4
= al1
.allocate_l1_cont(_2m
, _1m
, 0, num_l1_entries
);
212 ASSERT_EQ(i4
.length
, 0u);
215 i2
= al1
.allocate_l1_cont(_2m
, _2m
, 0, num_l1_entries
);
216 ASSERT_EQ(i2
.length
, 0u);
218 i2
= al1
.allocate_l1_cont(_2m
, 0x1000, 0, num_l1_entries
);
219 ASSERT_EQ(i2
.length
, _1m
);
223 ASSERT_EQ(_2m
, al1
.debug_get_free());
225 i1
= al1
.allocate_l1_cont(_2m
- 3 * 0x1000, 0x1000, 0, num_l1_entries
);
226 i2
= al1
.allocate_l1_cont(0x1000, 0x1000, 0, num_l1_entries
);
227 i3
= al1
.allocate_l1_cont(0x1000, 0x1000, 0, num_l1_entries
);
228 i4
= al1
.allocate_l1_cont(0x1000, 0x1000, 0, num_l1_entries
);
229 ASSERT_EQ(0u, al1
.debug_get_free());
234 i2
= al1
.allocate_l1_cont(0x4000, 0x2000, 0, num_l1_entries
);
235 ASSERT_EQ(i2
.length
, 0u);
236 i2
= al1
.allocate_l1_cont(0x4000, 0x1000, 0, num_l1_entries
);
237 ASSERT_EQ(i2
.length
, 0x1000u
);
240 i3
= al1
.allocate_l1_cont(0x6000, 0x3000, 0, num_l1_entries
);
241 ASSERT_EQ(i3
.length
, 0u);
242 i3
= al1
.allocate_l1_cont(0x6000, 0x1000, 0, num_l1_entries
);
243 ASSERT_EQ(i3
.length
, 0x2000u
);
244 ASSERT_EQ(0u, al1
.debug_get_free());
246 std::cout
<< "Done L1" << std::endl
;
249 TEST(TestAllocatorLevel01
, test_l2
)
251 TestAllocatorLevel02 al2
;
252 uint64_t num_l2_entries
= 64;// *512;
253 uint64_t capacity
= num_l2_entries
* 256 * 512 * 4096;
254 al2
.init(capacity
, 0x1000);
255 std::cout
<< "Init L2" << std::endl
;
257 uint64_t allocated1
= 0;
258 interval_vector_t a1
;
259 al2
.allocate_l2(0x2000, 0x2000, &allocated1
, &a1
);
260 ASSERT_EQ(allocated1
, 0x2000u
);
261 ASSERT_EQ(a1
[0].offset
, 0u);
262 ASSERT_EQ(a1
[0].length
, 0x2000u
);
264 // limit query range in debug_get_free for the sake of performance
265 ASSERT_EQ(0x2000u
, al2
.debug_get_allocated(0, 1));
266 ASSERT_EQ(0u, al2
.debug_get_allocated(1, 2));
268 uint64_t allocated2
= 0;
269 interval_vector_t a2
;
270 al2
.allocate_l2(0x2000, 0x2000, &allocated2
, &a2
);
271 ASSERT_EQ(allocated2
, 0x2000u
);
272 ASSERT_EQ(a2
[0].offset
, 0x2000u
);
273 ASSERT_EQ(a2
[0].length
, 0x2000u
);
274 // limit query range in debug_get_free for the sake of performance
275 ASSERT_EQ(0x4000u
, al2
.debug_get_allocated(0, 1));
276 ASSERT_EQ(0u, al2
.debug_get_allocated(1, 2));
282 al2
.allocate_l2(0x1000, 0x1000, &allocated2
, &a2
);
283 ASSERT_EQ(allocated2
, 0x1000u
);
284 ASSERT_EQ(a2
[0].offset
, 0x0000u
);
285 ASSERT_EQ(a2
[0].length
, 0x1000u
);
286 // limit query range in debug_get_free for the sake of performance
287 ASSERT_EQ(0x3000u
, al2
.debug_get_allocated(0, 1));
288 ASSERT_EQ(0u, al2
.debug_get_allocated(1, 2));
290 uint64_t allocated3
= 0;
291 interval_vector_t a3
;
292 al2
.allocate_l2(0x2000, 0x1000, &allocated3
, &a3
);
293 ASSERT_EQ(allocated3
, 0x2000u
);
294 ASSERT_EQ(a3
.size(), 2u);
295 ASSERT_EQ(a3
[0].offset
, 0x1000u
);
296 ASSERT_EQ(a3
[0].length
, 0x1000u
);
297 ASSERT_EQ(a3
[1].offset
, 0x4000u
);
298 ASSERT_EQ(a3
[1].length
, 0x1000u
);
299 // limit query range in debug_get_free for the sake of performance
300 ASSERT_EQ(0x5000u
, al2
.debug_get_allocated(0, 1));
301 ASSERT_EQ(0u, al2
.debug_get_allocated(1, 2));
304 r
.emplace_back(0x0, 0x5000);
310 al2
.allocate_l2(_1m
, _1m
, &allocated3
, &a3
);
311 ASSERT_EQ(a3
.size(), 1u);
312 ASSERT_EQ(a3
[0].offset
, 0u);
313 ASSERT_EQ(a3
[0].length
, _1m
);
319 al2
.allocate_l2(4 * _1m
, _1m
, &allocated3
, &a3
);
320 ASSERT_EQ(a3
.size(), 1u);
321 ASSERT_EQ(a3
[0].offset
, 0u);
322 ASSERT_EQ(a3
[0].length
, 4 * _1m
);
327 for (uint64_t i
= 0; i
< capacity
; i
+= 0x1000) {
328 uint64_t allocated4
= 0;
329 interval_vector_t a4
;
330 al2
.allocate_l2(0x1000, 0x1000, &allocated4
, &a4
);
331 ASSERT_EQ(a4
.size(), 1u);
332 ASSERT_EQ(a4
[0].offset
, i
);
333 ASSERT_EQ(a4
[0].length
, 0x1000u
);
334 if (0 == (i
% (1 * 1024 * _1m
))) {
335 std::cout
<< "alloc1 " << i
/ 1024 / 1024 << " mb of "
336 << capacity
/ 1024 / 1024 << std::endl
;
340 for (uint64_t i
= 0; i
< capacity
; i
+= _2m
) {
341 uint64_t allocated4
= 0;
342 interval_vector_t a4
;
343 al2
.allocate_l2(_2m
, _2m
, &allocated4
, &a4
);
344 ASSERT_EQ(a4
.size(), 1);
345 ASSERT_EQ(a4
[0].offset
, i
);
346 ASSERT_EQ(a4
[0].length
, _2m
);
347 if (0 == (i
% (1 * 1024 * _1m
))) {
348 std::cout
<< "alloc1 " << i
/ 1024 / 1024 << " mb of "
349 << capacity
/ 1024 / 1024 << std::endl
;
354 ASSERT_EQ(0u, al2
.debug_get_free());
355 for (uint64_t i
= 0; i
< capacity
; i
+= _1m
) {
357 r
.emplace_back(i
, _1m
);
359 if (0 == (i
% (1 * 1024 * _1m
))) {
360 std::cout
<< "free1 " << i
/ 1024 / 1024 << " mb of "
361 << capacity
/ 1024 / 1024 << std::endl
;
364 ASSERT_EQ(capacity
, al2
.debug_get_free());
366 for (uint64_t i
= 0; i
< capacity
; i
+= _1m
) {
367 uint64_t allocated4
= 0;
368 interval_vector_t a4
;
369 al2
.allocate_l2(_1m
, _1m
, &allocated4
, &a4
);
370 ASSERT_EQ(a4
.size(), 1u);
371 ASSERT_EQ(allocated4
, _1m
);
372 ASSERT_EQ(a4
[0].offset
, i
);
373 ASSERT_EQ(a4
[0].length
, _1m
);
374 if (0 == (i
% (1 * 1024 * _1m
))) {
375 std::cout
<< "alloc2 " << i
/ 1024 / 1024 << " mb of "
376 << capacity
/ 1024 / 1024 << std::endl
;
379 ASSERT_EQ(0u, al2
.debug_get_free());
380 uint64_t allocated4
= 0;
381 interval_vector_t a4
;
382 al2
.allocate_l2(_1m
, _1m
, &allocated4
, &a4
);
383 ASSERT_EQ(a4
.size(), 0u);
384 al2
.allocate_l2(0x1000, 0x1000, &allocated4
, &a4
);
385 ASSERT_EQ(a4
.size(), 0u);
387 for (uint64_t i
= 0; i
< capacity
; i
+= 0x2000) {
389 r
.emplace_back(i
, 0x1000);
391 if (0 == (i
% (1 * 1024 * _1m
))) {
392 std::cout
<< "free2 " << i
/ 1024 / 1024 << " mb of "
393 << capacity
/ 1024 / 1024 << std::endl
;
396 ASSERT_EQ(capacity
/ 2, al2
.debug_get_free());
398 // unable to allocate due to fragmentation
399 al2
.allocate_l2(_1m
, _1m
, &allocated4
, &a4
);
400 ASSERT_EQ(a4
.size(), 0u);
402 for (uint64_t i
= 0; i
< capacity
; i
+= 2 * _1m
) {
405 al2
.allocate_l2(_1m
, 0x1000, &allocated4
, &a4
);
406 ASSERT_EQ(a4
.size(), _1m
/ 0x1000);
407 ASSERT_EQ(allocated4
, _1m
);
408 ASSERT_EQ(a4
[0].offset
, i
);
409 ASSERT_EQ(a4
[0].length
, 0x1000u
);
410 if (0 == (i
% (1 * 1024 * _1m
))) {
411 std::cout
<< "alloc3 " << i
/ 1024 / 1024 << " mb of "
412 << capacity
/ 1024 / 1024 << std::endl
;
415 ASSERT_EQ(0u, al2
.debug_get_free());
417 std::cout
<< "Done L2" << std::endl
;
420 TEST(TestAllocatorLevel01
, test_l2_huge
)
422 TestAllocatorLevel02 al2
;
423 uint64_t num_l2_entries
= 4 * 512;
424 uint64_t capacity
= num_l2_entries
* 256 * 512 * 4096; // 1 TB
425 al2
.init(capacity
, 0x1000);
426 std::cout
<< "Init L2 Huge" << std::endl
;
428 for (uint64_t i
= 0; i
< capacity
; i
+= _1m
) {
429 uint64_t allocated4
= 0;
430 interval_vector_t a4
;
431 al2
.allocate_l2(0x1000, 0x1000, &allocated4
, &a4
);
432 ASSERT_EQ(a4
.size(), 1u);
433 ASSERT_EQ(allocated4
, 0x1000u
);
434 ASSERT_EQ(a4
[0].offset
, i
);
435 ASSERT_EQ(a4
[0].length
, 0x1000u
);
439 al2
.allocate_l2(_1m
- 0x1000, 0x1000, &allocated4
, &a4
);
440 ASSERT_EQ(a4
.size(), 1u);
441 ASSERT_EQ(allocated4
, _1m
- 0x1000);
442 ASSERT_EQ(a4
[0].offset
, i
+ 0x1000);
443 ASSERT_EQ(a4
[0].length
, _1m
- 0x1000);
444 if (0 == (i
% (1 * 1024 * _1m
))) {
445 std::cout
<< "allocH " << i
/ 1024 / 1024 << " mb of "
446 << capacity
/ 1024 / 1024 << std::endl
;
449 for (uint64_t i
= 0; i
< capacity
; i
+= _1m
) {
450 interval_vector_t a4
;
451 a4
.emplace_back(i
, 0x1000);
453 if (0 == (i
% (1 * 1024 * _1m
))) {
454 std::cout
<< "freeH1 " << i
/ 1024 / 1024 << " mb of "
455 << capacity
/ 1024 / 1024 << std::endl
;
459 std::cout
<< "Try" << std::endl
;
460 time_t t
= time(NULL
);
461 for (int i
= 0; i
< 10; ++i
) {
462 uint64_t allocated
= 0;
464 al2
.allocate_l2(0x2000, 0x2000, &allocated
, &a
);
465 ASSERT_EQ(a
.size(), 0u);
467 std::cout
<< "End try in " << time(NULL
) - t
<< " seconds" << std::endl
;
470 std::cout
<< "Try" << std::endl
;
471 time_t t
= time(NULL
);
472 for (int i
= 0; i
< 10; ++i
) {
473 uint64_t allocated
= 0;
475 al2
.allocate_l2(_2m
, _2m
, &allocated
, &a
);
476 ASSERT_EQ(a
.size(), 0u);
478 std::cout
<< "End try in " << time(NULL
) - t
<< " seconds" << std::endl
;
481 ASSERT_EQ((capacity
/ _1m
) * 0x1000, al2
.debug_get_free());
483 std::cout
<< "Done L2 Huge" << std::endl
;
486 TEST(TestAllocatorLevel01
, test_l2_unaligned
)
489 TestAllocatorLevel02 al2
;
490 uint64_t num_l2_entries
= 3;
491 uint64_t capacity
= num_l2_entries
* 256 * 512 * 4096; // 3x512 MB
492 al2
.init(capacity
, 0x1000);
493 std::cout
<< "Init L2 Unaligned" << std::endl
;
495 for (uint64_t i
= 0; i
< capacity
; i
+= _1m
/ 2) {
496 uint64_t allocated4
= 0;
497 interval_vector_t a4
;
498 al2
.allocate_l2(_1m
/ 2, _1m
/ 2, &allocated4
, &a4
);
499 ASSERT_EQ(a4
.size(), 1u);
500 ASSERT_EQ(allocated4
, _1m
/ 2);
501 ASSERT_EQ(a4
[0].offset
, i
);
502 ASSERT_EQ(a4
[0].length
, _1m
/ 2);
503 if (0 == (i
% (1 * 1024 * _1m
))) {
504 std::cout
<< "allocU " << i
/ 1024 / 1024 << " mb of "
505 << capacity
/ 1024 / 1024 << std::endl
;
508 ASSERT_EQ(0u, al2
.debug_get_free());
510 // no space to allocate
511 uint64_t allocated4
= 0;
512 interval_vector_t a4
;
513 al2
.allocate_l2(0x1000, 0x1000, &allocated4
, &a4
);
514 ASSERT_EQ(a4
.size(), 0u);
518 TestAllocatorLevel02 al2
;
519 uint64_t capacity
= 500 * 512 * 4096; // 500x2 MB
520 al2
.init(capacity
, 0x1000);
521 std::cout
<< ("Init L2 Unaligned2\n");
522 for (uint64_t i
= 0; i
< capacity
; i
+= _1m
/ 2) {
523 uint64_t allocated4
= 0;
524 interval_vector_t a4
;
525 al2
.allocate_l2(_1m
/ 2, _1m
/ 2, &allocated4
, &a4
);
526 ASSERT_EQ(a4
.size(), 1u);
527 ASSERT_EQ(allocated4
, _1m
/ 2);
528 ASSERT_EQ(a4
[0].offset
, i
);
529 ASSERT_EQ(a4
[0].length
, _1m
/ 2);
530 if (0 == (i
% (1 * 1024 * _1m
))) {
531 std::cout
<< "allocU2 " << i
/ 1024 / 1024 << " mb of "
532 << capacity
/ 1024 / 1024 << std::endl
;
535 ASSERT_EQ(0u, al2
.debug_get_free());
537 // no space to allocate
538 uint64_t allocated4
= 0;
539 interval_vector_t a4
;
540 al2
.allocate_l2(0x1000, 0x1000, &allocated4
, &a4
);
541 ASSERT_EQ(a4
.size(), 0u);
546 TestAllocatorLevel02 al2
;
547 uint64_t capacity
= 100 * 512 * 4096 + 127 * 4096;
548 al2
.init(capacity
, 0x1000);
549 std::cout
<< "Init L2 Unaligned2" << std::endl
;
550 for (uint64_t i
= 0; i
< capacity
; i
+= 0x1000) {
551 uint64_t allocated4
= 0;
552 interval_vector_t a4
;
553 al2
.allocate_l2(0x1000, 0x1000, &allocated4
, &a4
);
554 ASSERT_EQ(a4
.size(), 1u);
555 ASSERT_EQ(allocated4
, 0x1000u
);
556 ASSERT_EQ(a4
[0].offset
, i
);
557 ASSERT_EQ(a4
[0].length
, 0x1000u
);
559 ASSERT_EQ(0u, al2
.debug_get_free());
561 // no space to allocate
562 uint64_t allocated4
= 0;
563 interval_vector_t a4
;
564 al2
.allocate_l2(0x1000, 0x1000, &allocated4
, &a4
);
565 ASSERT_EQ(a4
.size(), 0u);
569 TestAllocatorLevel02 al2
;
570 uint64_t capacity
= 3 * 4096;
571 al2
.init(capacity
, 0x1000);
572 std::cout
<< "Init L2 Unaligned2" << std::endl
;
573 for (uint64_t i
= 0; i
< capacity
; i
+= 0x1000) {
574 uint64_t allocated4
= 0;
575 interval_vector_t a4
;
576 al2
.allocate_l2(0x1000, 0x1000, &allocated4
, &a4
);
577 ASSERT_EQ(a4
.size(), 1u);
578 ASSERT_EQ(allocated4
, 0x1000u
);
579 ASSERT_EQ(a4
[0].offset
, i
);
580 ASSERT_EQ(a4
[0].length
, 0x1000u
);
582 ASSERT_EQ(0u, al2
.debug_get_free());
584 // no space to allocate
585 uint64_t allocated4
= 0;
586 interval_vector_t a4
;
587 al2
.allocate_l2(0x1000, 0x1000, &allocated4
, &a4
);
588 ASSERT_EQ(a4
.size(), 0u);
592 std::cout
<< "Done L2 Unaligned" << std::endl
;
595 TEST(TestAllocatorLevel01
, test_l2_contiguous_alignment
)
598 TestAllocatorLevel02 al2
;
599 uint64_t num_l2_entries
= 3;
600 uint64_t capacity
= num_l2_entries
* 256 * 512 * 4096; // 3x512 MB
601 uint64_t num_chunks
= capacity
/ 4096;
602 al2
.init(capacity
, 4096);
603 std::cout
<< "Init L2 cont aligned" << std::endl
;
605 std::map
<size_t, size_t> bins_overall
;
606 al2
.collect_stats(bins_overall
);
607 ASSERT_EQ(bins_overall
.size(), 1u);
608 // std::cout<<bins_overall.begin()->first << std::endl;
609 ASSERT_EQ(bins_overall
[cbits(num_chunks
) - 1], 1u);
611 for (uint64_t i
= 0; i
< capacity
/ 2; i
+= _1m
) {
612 uint64_t allocated4
= 0;
613 interval_vector_t a4
;
614 al2
.allocate_l2(_1m
, _1m
, &allocated4
, &a4
);
615 ASSERT_EQ(a4
.size(), 1u);
616 ASSERT_EQ(allocated4
, _1m
);
617 ASSERT_EQ(a4
[0].offset
, i
);
618 ASSERT_EQ(a4
[0].length
, _1m
);
620 ASSERT_EQ(capacity
/ 2, al2
.debug_get_free());
622 bins_overall
.clear();
623 al2
.collect_stats(bins_overall
);
624 ASSERT_EQ(bins_overall
.size(), 1u);
625 ASSERT_EQ(bins_overall
[cbits(num_chunks
/ 2) - 1], 1u);
628 // Original free space disposition (start chunk, count):
630 size_t to_release
= 2 * _1m
+ 0x1000;
631 // release 2M + 4K at the beginning
633 r
.emplace_back(0, to_release
);
635 bins_overall
.clear();
636 al2
.collect_stats(bins_overall
);
637 ASSERT_EQ(bins_overall
.size(), 2u);
638 ASSERT_EQ(bins_overall
[cbits(to_release
/ 0x1000) - 1], 1u);
639 ASSERT_EQ(bins_overall
[cbits(num_chunks
/ 2) - 1], 1u);
642 // Original free space disposition (start chunk, count):
643 // <0, 513>, <NC / 2, NC / 2>
644 // allocate 4K within the deallocated range
645 uint64_t allocated4
= 0;
646 interval_vector_t a4
;
647 al2
.allocate_l2(0x1000, 0x1000, &allocated4
, &a4
);
648 ASSERT_EQ(a4
.size(), 1u);
649 ASSERT_EQ(allocated4
, 0x1000u
);
650 ASSERT_EQ(a4
[0].offset
, 0u);
651 ASSERT_EQ(a4
[0].length
, 0x1000u
);
652 bins_overall
.clear();
653 al2
.collect_stats(bins_overall
);
654 ASSERT_EQ(bins_overall
.size(), 2u);
655 ASSERT_EQ(bins_overall
[cbits(2 * _1m
/ 0x1000) - 1], 1u);
656 ASSERT_EQ(bins_overall
[cbits(num_chunks
/ 2) - 1], 1u);
659 // Original free space disposition (start chunk, count):
660 // <1, 512>, <NC / 2, NC / 2>
661 // allocate 1M - should go to offset 4096
662 uint64_t allocated4
= 0;
663 interval_vector_t a4
;
664 al2
.allocate_l2(_1m
, _1m
, &allocated4
, &a4
);
665 ASSERT_EQ(a4
.size(), 1u);
666 ASSERT_EQ(allocated4
, _1m
);
667 ASSERT_EQ(a4
[0].offset
, 4096);
668 ASSERT_EQ(a4
[0].length
, _1m
);
669 bins_overall
.clear();
670 al2
.collect_stats(bins_overall
);
671 ASSERT_EQ(bins_overall
.size(), 2u);
672 ASSERT_EQ(bins_overall
[cbits(_1m
/ 0x1000) - 1], 1u);
673 ASSERT_EQ(bins_overall
[cbits(num_chunks
/ 2) - 1], 1u);
676 // Original free space disposition (start chunk, count):
677 // <257, 256>, <NC / 2, NC / 2>
678 // and allocate yet another 8K within the deallocated range
679 uint64_t allocated4
= 0;
680 interval_vector_t a4
;
681 al2
.allocate_l2(0x2000, 0x1000, &allocated4
, &a4
);
682 ASSERT_EQ(a4
.size(), 1u);
683 ASSERT_EQ(allocated4
, 0x2000u
);
684 ASSERT_EQ(a4
[0].offset
, _1m
+ 0x1000u
);
685 ASSERT_EQ(a4
[0].length
, 0x2000u
);
686 bins_overall
.clear();
687 al2
.collect_stats(bins_overall
);
688 ASSERT_EQ(bins_overall
.size(), 2u);
689 ASSERT_EQ(bins_overall
[cbits((_1m
- 0x2000) / 0x1000) - 1], 1u);
690 ASSERT_EQ(bins_overall
[cbits(num_chunks
/ 2) - 1], 1u);
693 // Original free space disposition (start chunk, count):
694 // <259, 254>, <NC / 2, NC / 2>
697 r
.emplace_back(0x1000, _1m
);
699 bins_overall
.clear();
700 al2
.collect_stats(bins_overall
);
701 ASSERT_EQ(bins_overall
.size(), 3u);
702 //ASSERT_EQ(bins_overall[cbits((2 * _1m - 0x3000) / 0x1000) - 1], 1u);
703 ASSERT_EQ(bins_overall
[cbits(_1m
/ 0x1000) - 1], 1u);
704 ASSERT_EQ(bins_overall
[cbits((_1m
- 0x2000) / 0x1000) - 1], 1u);
705 ASSERT_EQ(bins_overall
[cbits(num_chunks
/ 2) - 1], 1u);
708 // Original free space disposition (start chunk, count):
709 // <1, 257>, <259, 254>, <NC / 2, NC / 2>
710 // allocate 3M - should go to the first 1M chunk and @capacity/2
711 uint64_t allocated4
= 0;
712 interval_vector_t a4
;
713 al2
.allocate_l2(3 * _1m
, _1m
, &allocated4
, &a4
);
714 ASSERT_EQ(a4
.size(), 2u);
715 ASSERT_EQ(allocated4
, 3 * _1m
);
716 ASSERT_EQ(a4
[0].offset
, 0x1000);
717 ASSERT_EQ(a4
[0].length
, _1m
);
718 ASSERT_EQ(a4
[1].offset
, capacity
/ 2);
719 ASSERT_EQ(a4
[1].length
, 2 * _1m
);
720 bins_overall
.clear();
721 al2
.collect_stats(bins_overall
);
722 ASSERT_EQ(bins_overall
.size(), 2u);
723 ASSERT_EQ(bins_overall
[cbits((_1m
- 0x2000) / 0x1000) - 1], 1u);
724 ASSERT_EQ(bins_overall
[cbits((num_chunks
- 512) / 2) - 1], 1u);
727 // Original free space disposition (start chunk, count):
728 // <259, 254>, <NC / 2 - 512, NC / 2 - 512>
729 // release allocated 1M in the first meg chunk except
730 // the first 4K chunk
732 r
.emplace_back(0x1000, _1m
);
734 bins_overall
.clear();
735 al2
.collect_stats(bins_overall
);
736 ASSERT_EQ(bins_overall
.size(), 3u);
737 ASSERT_EQ(bins_overall
[cbits(_1m
/ 0x1000) - 1], 1u);
738 ASSERT_EQ(bins_overall
[cbits((_1m
- 0x2000) / 0x1000) - 1], 1u);
739 ASSERT_EQ(bins_overall
[cbits((num_chunks
- 512) / 2) - 1], 1u);
742 // Original free space disposition (start chunk, count):
743 // <1, 256>, <259, 254>, <NC / 2 - 512, NC / 2 - 512>
744 // release 2M @(capacity / 2)
746 r
.emplace_back(capacity
/ 2, 2 * _1m
);
748 bins_overall
.clear();
749 al2
.collect_stats(bins_overall
);
750 ASSERT_EQ(bins_overall
.size(), 3u);
751 ASSERT_EQ(bins_overall
[cbits(_1m
/ 0x1000) - 1], 1u);
752 ASSERT_EQ(bins_overall
[cbits((_1m
- 0x2000) / 0x1000) - 1], 1u);
753 ASSERT_EQ(bins_overall
[cbits((num_chunks
) / 2) - 1], 1u);
756 // Original free space disposition (start chunk, count):
757 // <1, 256>, <259, 254>, <NC / 2, NC / 2>
758 // allocate 4x512K - should go to the second halves of
759 // the first and second 1M chunks and @(capacity / 2)
760 uint64_t allocated4
= 0;
761 interval_vector_t a4
;
762 al2
.allocate_l2(2 * _1m
, _1m
/ 2, &allocated4
, &a4
);
763 ASSERT_EQ(a4
.size(), 3u);
764 ASSERT_EQ(allocated4
, 2 * _1m
);
765 ASSERT_EQ(a4
[1].offset
, 0x1000);
766 ASSERT_EQ(a4
[1].length
, _1m
);
767 ASSERT_EQ(a4
[0].offset
, _1m
+ 0x3000);
768 ASSERT_EQ(a4
[0].length
, _1m
/ 2);
769 ASSERT_EQ(a4
[2].offset
, capacity
/ 2);
770 ASSERT_EQ(a4
[2].length
, _1m
/ 2);
772 bins_overall
.clear();
773 al2
.collect_stats(bins_overall
);
774 ASSERT_EQ(bins_overall
.size(), 2u);
775 ASSERT_EQ(bins_overall
[cbits((_1m
- 0x2000 - 0x80000) / 0x1000) - 1], 1u);
776 ASSERT_EQ(bins_overall
[cbits((num_chunks
- 256) / 2) - 1], 1u);
780 // Original free space disposition (start chunk, count):
781 // <387, 126>, <NC / 2 + 128, NC / 2 - 128>
782 // cleanup first 1536K except the last 4K chunk
784 r
.emplace_back(0, _1m
+ _1m
/ 2 - 0x1000);
786 bins_overall
.clear();
787 al2
.collect_stats(bins_overall
);
789 ASSERT_EQ(bins_overall
.size(), 3u);
790 ASSERT_EQ(bins_overall
[cbits((_1m
+ _1m
/ 2 - 0x1000) / 0x1000) - 1], 1u);
791 ASSERT_EQ(bins_overall
[cbits((_1m
- 0x2000 - 0x80000) / 0x1000) - 1], 1u);
792 ASSERT_EQ(bins_overall
[cbits((num_chunks
- 256) / 2) - 1], 1u);
795 // Original free space disposition (start chunk, count):
796 // <0, 383> <387, 126>, <NC / 2 + 128, NC / 2 - 128>
797 // release 512K @(capacity / 2)
799 r
.emplace_back(capacity
/ 2, _1m
/ 2);
801 bins_overall
.clear();
802 al2
.collect_stats(bins_overall
);
804 ASSERT_EQ(bins_overall
.size(), 3u);
805 ASSERT_EQ(bins_overall
[cbits((_1m
+ _1m
/ 2 - 0x1000) / 0x1000) - 1], 1u);
806 ASSERT_EQ(bins_overall
[cbits((_1m
- 0x2000 - 0x80000) / 0x1000) - 1], 1u);
807 ASSERT_EQ(bins_overall
[cbits(num_chunks
/ 2) - 1], 1u);
810 // Original free space disposition (start chunk, count):
811 // <0, 383> <387, 126>, <NC / 2, NC / 2>
812 // allocate 132M (=33792*4096) = using 4M granularity should go to (capacity / 2)
813 uint64_t allocated4
= 0;
814 interval_vector_t a4
;
815 al2
.allocate_l2(132 * _1m
, 4 * _1m
, &allocated4
, &a4
);
816 ASSERT_EQ(a4
.size(), 1u);
817 ASSERT_EQ(a4
[0].offset
, capacity
/ 2);
818 ASSERT_EQ(a4
[0].length
, 132 * _1m
);
820 bins_overall
.clear();
821 al2
.collect_stats(bins_overall
);
822 ASSERT_EQ(bins_overall
.size(), 3u);
823 ASSERT_EQ(bins_overall
[cbits((_1m
+ _1m
/ 2 - 0x1000) / 0x1000) - 1], 1u);
824 ASSERT_EQ(bins_overall
[cbits((_1m
- 0x2000 - 0x80000) / 0x1000) - 1], 1u);
825 ASSERT_EQ(bins_overall
[cbits(num_chunks
/ 2 - 33792) - 1], 1u);
828 // Original free space disposition (start chunk, count):
829 // <0, 383> <387, 126>, <NC / 2 + 33792, NC / 2 - 33792>
830 // cleanup remaining 4*4K chunks in the first 2M
832 r
.emplace_back(383 * 4096, 4 * 0x1000);
834 bins_overall
.clear();
835 al2
.collect_stats(bins_overall
);
837 ASSERT_EQ(bins_overall
.size(), 2u);
838 ASSERT_EQ(bins_overall
[cbits((2 * _1m
+ 0x1000) / 0x1000) - 1], 1u);
839 ASSERT_EQ(bins_overall
[cbits(num_chunks
/ 2 - 33792) - 1], 1u);
842 // Original free space disposition (start chunk, count):
843 // <0, 513>, <NC / 2 + 33792, NC / 2 - 33792>
844 // release 132M @(capacity / 2)
846 r
.emplace_back(capacity
/ 2, 132 * _1m
);
848 bins_overall
.clear();
849 al2
.collect_stats(bins_overall
);
850 ASSERT_EQ(bins_overall
.size(), 2u);
851 ASSERT_EQ(bins_overall
[cbits((2 * _1m
+ 0x1000) / 0x1000) - 1], 1u);
852 ASSERT_EQ(bins_overall
[cbits(num_chunks
/ 2) - 1], 1u);
855 // Original free space disposition (start chunk, count):
856 // <0, 513>, <NC / 2, NC / 2>
857 // allocate 132M using 2M granularity should go to the first chunk and to
859 uint64_t allocated4
= 0;
860 interval_vector_t a4
;
861 al2
.allocate_l2(132 * _1m
, 2 * _1m
, &allocated4
, &a4
);
862 ASSERT_EQ(a4
.size(), 2u);
863 ASSERT_EQ(a4
[0].offset
, 0u);
864 ASSERT_EQ(a4
[0].length
, 2 * _1m
);
865 ASSERT_EQ(a4
[1].offset
, capacity
/ 2);
866 ASSERT_EQ(a4
[1].length
, 130 * _1m
);
868 bins_overall
.clear();
869 al2
.collect_stats(bins_overall
);
871 ASSERT_EQ(bins_overall
.size(), 2u);
872 ASSERT_EQ(bins_overall
[cbits(0)], 1u);
873 ASSERT_EQ(bins_overall
[cbits(num_chunks
/ 2 - 33792) - 1], 1u);
876 // Original free space disposition (start chunk, count):
877 // <512, 1>, <NC / 2 + 33792, NC / 2 - 33792>
878 // release 130M @(capacity / 2)
880 r
.emplace_back(capacity
/ 2, 132 * _1m
);
882 bins_overall
.clear();
883 al2
.collect_stats(bins_overall
);
885 ASSERT_EQ(bins_overall
.size(), 2u);
886 ASSERT_EQ(bins_overall
[cbits(0)], 1u);
887 ASSERT_EQ(bins_overall
[cbits(num_chunks
/ 2) - 1], 1u);
890 // Original free space disposition (start chunk, count):
891 // <512,1>, <NC / 2, NC / 2>
896 r
.emplace_back(0x1000, 0x4000);
897 r
.emplace_back(0x7000, 0x8000);
898 r
.emplace_back(0x11000, 0x6000);
901 bins_overall
.clear();
902 al2
.collect_stats(bins_overall
);
904 ASSERT_EQ(bins_overall
.size(), 4u);
905 ASSERT_EQ(bins_overall
[cbits(0)], 1u);
906 ASSERT_EQ(bins_overall
[cbits(0x4000 / 0x1000) - 1], 2u); // accounts both 0x4000 & 0x6000
907 ASSERT_EQ(bins_overall
[cbits(0x8000 / 0x1000) - 1], 1u);
908 ASSERT_EQ(bins_overall
[cbits(num_chunks
/ 2) - 1], 1u);
911 // Original free space disposition (start chunk, count):
912 // <1, 4>, <7, 8>, <17, 6> <512,1>, <NC / 2, NC / 2>
913 // allocate 80K using 16K granularity
914 uint64_t allocated4
= 0;
915 interval_vector_t a4
;
916 al2
.allocate_l2(0x14000, 0x4000, &allocated4
, &a4
);
918 ASSERT_EQ(a4
.size(), 4);
919 ASSERT_EQ(a4
[1].offset
, 0x1000u
);
920 ASSERT_EQ(a4
[1].length
, 0x4000u
);
921 ASSERT_EQ(a4
[0].offset
, 0x7000u
);
922 ASSERT_EQ(a4
[0].length
, 0x8000u
);
923 ASSERT_EQ(a4
[2].offset
, 0x11000u
);
924 ASSERT_EQ(a4
[2].length
, 0x4000u
);
925 ASSERT_EQ(a4
[3].offset
, capacity
/ 2);
926 ASSERT_EQ(a4
[3].length
, 0x4000u
);
928 bins_overall
.clear();
929 al2
.collect_stats(bins_overall
);
931 ASSERT_EQ(bins_overall
.size(), 3u);
932 ASSERT_EQ(bins_overall
[cbits(0)], 1u);
933 ASSERT_EQ(bins_overall
[cbits(0x2000 / 0x1000) - 1], 1u);
934 ASSERT_EQ(bins_overall
[cbits(num_chunks
/ 2 - 1) - 1], 1u);
937 // Original free space disposition (start chunk, count):
938 // <21, 2> <512,1>, <NC / 2 + 1, NC / 2 - 1>
941 std::cout
<< "Done L2 cont aligned" << std::endl
;
944 TEST(TestAllocatorLevel01
, test_4G_alloc_bug
)
947 TestAllocatorLevel02 al2
;
948 uint64_t capacity
= 0x8000 * _1m
; // = 32GB
949 al2
.init(capacity
, 0x10000);
950 std::cout
<< "Init L2 cont aligned" << std::endl
;
952 uint64_t allocated4
= 0;
953 interval_vector_t a4
;
954 al2
.allocate_l2(_1m
, _1m
, &allocated4
, &a4
);
955 ASSERT_EQ(a4
.size(), 1u); // the bug caused no allocations here
956 ASSERT_EQ(allocated4
, _1m
);
957 ASSERT_EQ(a4
[0].offset
, 0u);
958 ASSERT_EQ(a4
[0].length
, _1m
);
962 TEST(TestAllocatorLevel01
, test_4G_alloc_bug2
)
965 TestAllocatorLevel02 al2
;
966 uint64_t capacity
= 0x8000 * _1m
; // = 32GB
967 al2
.init(capacity
, 0x10000);
969 for (uint64_t i
= 0; i
< capacity
; i
+= _1m
) {
970 uint64_t allocated4
= 0;
971 interval_vector_t a4
;
972 al2
.allocate_l2(_1m
, _1m
, &allocated4
, &a4
);
973 ASSERT_EQ(a4
.size(), 1u);
974 ASSERT_EQ(allocated4
, _1m
);
975 ASSERT_EQ(a4
[0].offset
, i
);
976 ASSERT_EQ(a4
[0].length
, _1m
);
978 ASSERT_EQ(0u , al2
.debug_get_free());
981 r
.emplace_back(0x5fec30000, 0x13d0000);
982 r
.emplace_back(0x628000000, 0x80000000);
983 r
.emplace_back(0x6a8000000, 0x80000000);
984 r
.emplace_back(0x728100000, 0x70000);
987 std::map
<size_t, size_t> bins_overall
;
988 al2
.collect_stats(bins_overall
);
990 uint64_t allocated4
= 0;
991 interval_vector_t a4
;
992 al2
.allocate_l2(0x3e000000, _1m
, &allocated4
, &a4
);
993 ASSERT_EQ(a4
.size(), 2u);
994 ASSERT_EQ(allocated4
, 0x3e000000u
);
995 ASSERT_EQ(a4
[0].offset
, 0x5fec30000u
);
996 ASSERT_EQ(a4
[0].length
, 0x1300000u
);
997 ASSERT_EQ(a4
[1].offset
, 0x628000000u
);
998 ASSERT_EQ(a4
[1].length
, 0x3cd00000u
);
1002 TEST(TestAllocatorLevel01
, test_4G_alloc_bug3
)
1005 TestAllocatorLevel02 al2
;
1006 uint64_t capacity
= 0x8000 * _1m
; // = 32GB
1007 al2
.init(capacity
, 0x10000);
1008 std::cout
<< "Init L2 cont aligned" << std::endl
;
1010 uint64_t allocated4
= 0;
1011 interval_vector_t a4
;
1012 al2
.allocate_l2(4096ull * _1m
, _1m
, &allocated4
, &a4
);
1013 ASSERT_EQ(a4
.size(), 2u); // allocator has to split into 2 allocations
1014 ASSERT_EQ(allocated4
, 4096ull * _1m
);
1015 ASSERT_EQ(a4
[0].offset
, 0u);
1016 ASSERT_EQ(a4
[0].length
, 2048ull * _1m
);
1017 ASSERT_EQ(a4
[1].offset
, 2048ull * _1m
);
1018 ASSERT_EQ(a4
[1].length
, 2048ull * _1m
);
1022 TEST(TestAllocatorLevel01
, test_claim_free_l2
)
1024 TestAllocatorLevel02 al2
;
1025 uint64_t num_l2_entries
= 64;// *512;
1026 uint64_t capacity
= num_l2_entries
* 256 * 512 * 4096;
1027 al2
.init(capacity
, 0x1000);
1028 std::cout
<< "Init L2" << std::endl
;
1030 uint64_t max_available
= 0x20000;
1031 al2
.mark_allocated(max_available
, capacity
- max_available
);
1033 uint64_t allocated1
= 0;
1034 interval_vector_t a1
;
1035 al2
.allocate_l2(0x2000, 0x2000, &allocated1
, &a1
);
1036 ASSERT_EQ(allocated1
, 0x2000u
);
1037 ASSERT_EQ(a1
[0].offset
, 0u);
1038 ASSERT_EQ(a1
[0].length
, 0x2000u
);
1040 uint64_t allocated2
= 0;
1041 interval_vector_t a2
;
1042 al2
.allocate_l2(0x2000, 0x2000, &allocated2
, &a2
);
1043 ASSERT_EQ(allocated2
, 0x2000u
);
1044 ASSERT_EQ(a2
[0].offset
, 0x2000u
);
1045 ASSERT_EQ(a2
[0].length
, 0x2000u
);
1047 uint64_t allocated3
= 0;
1048 interval_vector_t a3
;
1049 al2
.allocate_l2(0x3000, 0x3000, &allocated3
, &a3
);
1050 ASSERT_EQ(allocated3
, 0x3000u
);
1051 ASSERT_EQ(a3
[0].offset
, 0x4000u
);
1052 ASSERT_EQ(a3
[0].length
, 0x3000u
);
1056 ASSERT_EQ(max_available
- 0x2000, al2
.debug_get_free());
1058 auto claimed
= al2
.claim_free_to_right(0x4000);
1059 ASSERT_EQ(max_available
- 0x4000u
, claimed
);
1060 ASSERT_EQ(0x2000, al2
.debug_get_free());
1062 claimed
= al2
.claim_free_to_right(0x4000);
1063 ASSERT_EQ(0, claimed
);
1064 ASSERT_EQ(0x2000, al2
.debug_get_free());
1066 claimed
= al2
.claim_free_to_left(0x2000);
1067 ASSERT_EQ(0x2000u
, claimed
);
1068 ASSERT_EQ(0, al2
.debug_get_free());
1070 claimed
= al2
.claim_free_to_left(0x2000);
1071 ASSERT_EQ(0, claimed
);
1072 ASSERT_EQ(0, al2
.debug_get_free());
1075 al2
.mark_free(0x3000, 0x4000);
1076 ASSERT_EQ(0x4000, al2
.debug_get_free());
1078 claimed
= al2
.claim_free_to_right(0x7000);
1079 ASSERT_EQ(0, claimed
);
1080 ASSERT_EQ(0x4000, al2
.debug_get_free());
1082 claimed
= al2
.claim_free_to_right(0x6000);
1083 ASSERT_EQ(0x1000, claimed
);
1084 ASSERT_EQ(0x3000, al2
.debug_get_free());
1086 claimed
= al2
.claim_free_to_right(0x6000);
1087 ASSERT_EQ(0, claimed
);
1088 ASSERT_EQ(0x3000, al2
.debug_get_free());
1090 claimed
= al2
.claim_free_to_left(0x3000);
1091 ASSERT_EQ(0u, claimed
);
1092 ASSERT_EQ(0x3000, al2
.debug_get_free());
1094 claimed
= al2
.claim_free_to_left(0x4000);
1095 ASSERT_EQ(0x1000, claimed
);
1096 ASSERT_EQ(0x2000, al2
.debug_get_free());
1098 // claiming on the right boundary
1099 claimed
= al2
.claim_free_to_right(capacity
);
1100 ASSERT_EQ(0x0, claimed
);
1101 ASSERT_EQ(0x2000, al2
.debug_get_free());
1103 // extend allocator space up to 64M
1104 auto max_available2
= 64 * 1024 * 1024;
1105 al2
.mark_free(max_available
, max_available2
- max_available
);
1106 ASSERT_EQ(max_available2
- max_available
+ 0x2000, al2
.debug_get_free());
1108 // pin some allocations
1109 al2
.mark_allocated(0x400000 + 0x2000, 1000);
1110 al2
.mark_allocated(0x400000 + 0x5000, 1000);
1111 al2
.mark_allocated(0x400000 + 0x20000, 1000);
1112 ASSERT_EQ(max_available2
- max_available
- 0x1000, al2
.debug_get_free());
1114 claimed
= al2
.claim_free_to_left(0x403000);
1115 ASSERT_EQ(0x0, claimed
);
1117 claimed
= al2
.claim_free_to_left(0x404000);
1118 ASSERT_EQ(0x1000, claimed
);
1119 ASSERT_EQ(max_available2
- max_available
- 0x2000, al2
.debug_get_free());
1121 claimed
= al2
.claim_free_to_left(max_available
);
1122 ASSERT_EQ(0, claimed
);
1124 claimed
= al2
.claim_free_to_left(0x400000);
1125 ASSERT_EQ(0x3e0000, claimed
);
1126 ASSERT_EQ(max_available2
- max_available
- 0x3e2000, al2
.get_available());
1127 ASSERT_EQ(max_available2
- max_available
- 0x3e2000, al2
.debug_get_free());
1129 claimed
= al2
.claim_free_to_right(0x407000);
1130 ASSERT_EQ(0x19000, claimed
);
1131 ASSERT_EQ(max_available2
- max_available
- 0x3e2000 - 0x19000,
1132 al2
.get_available());
1133 ASSERT_EQ(max_available2
- max_available
- 0x3e2000 - 0x19000,
1134 al2
.debug_get_free());
1136 claimed
= al2
.claim_free_to_right(0x407000);
1137 ASSERT_EQ(0, claimed
);
1139 claimed
= al2
.claim_free_to_right(0x430000);
1140 ASSERT_EQ(max_available2
- 0x430000, claimed
);
1142 al2
.get_available());
1144 al2
.debug_get_free());