1 //===-- sanitizer_allocator_test.cc ---------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11 // Tests for sanitizer_allocator.h.
13 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_allocator.h"
15 #include "sanitizer_common/sanitizer_allocator_internal.h"
16 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_test_utils.h"
19 #include "sanitizer_pthread_wrappers.h"
21 #include "gtest/gtest.h"
28 // Too slow for debug build
31 #if SANITIZER_CAN_USE_ALLOCATOR64
33 static const uptr kAllocatorSpace
= 0x10000000000ULL
;
34 static const uptr kAllocatorSize
= 0x10000000000ULL
; // 1T.
35 static const u64 kAddressSpaceSize
= 1ULL << 40;
37 static const uptr kAllocatorSpace
= 0x700000000000ULL
;
38 static const uptr kAllocatorSize
= 0x010000000000ULL
; // 1T.
39 static const u64 kAddressSpaceSize
= 1ULL << 47;
42 typedef SizeClassAllocator64
<
43 kAllocatorSpace
, kAllocatorSize
, 16, DefaultSizeClassMap
> Allocator64
;
45 typedef SizeClassAllocator64
<
46 kAllocatorSpace
, kAllocatorSize
, 16, CompactSizeClassMap
> Allocator64Compact
;
47 #elif defined(__mips64)
48 static const u64 kAddressSpaceSize
= 1ULL << 40;
49 #elif defined(__aarch64__)
50 static const u64 kAddressSpaceSize
= 1ULL << 39;
51 #elif defined(__s390x__)
52 static const u64 kAddressSpaceSize
= 1ULL << 53;
53 #elif defined(__s390__)
54 static const u64 kAddressSpaceSize
= 1ULL << 31;
56 static const u64 kAddressSpaceSize
= 1ULL << 32;
59 static const uptr kRegionSizeLog
= FIRST_32_SECOND_64(20, 24);
60 static const uptr kFlatByteMapSize
= kAddressSpaceSize
>> kRegionSizeLog
;
62 typedef SizeClassAllocator32
<
67 FlatByteMap
<kFlatByteMapSize
> >
70 template <class SizeClassMap
>
71 void TestSizeClassMap() {
72 typedef SizeClassMap SCMap
;
77 TEST(SanitizerCommon
, DefaultSizeClassMap
) {
78 TestSizeClassMap
<DefaultSizeClassMap
>();
81 TEST(SanitizerCommon
, CompactSizeClassMap
) {
82 TestSizeClassMap
<CompactSizeClassMap
>();
85 TEST(SanitizerCommon
, InternalSizeClassMap
) {
86 TestSizeClassMap
<InternalSizeClassMap
>();
89 template <class Allocator
>
90 void TestSizeClassAllocator() {
91 Allocator
*a
= new Allocator
;
93 SizeClassAllocatorLocalCache
<Allocator
> cache
;
94 memset(&cache
, 0, sizeof(cache
));
97 static const uptr sizes
[] = {1, 16, 30, 40, 100, 1000, 10000,
98 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
100 std::vector
<void *> allocated
;
102 uptr last_total_allocated
= 0;
103 for (int i
= 0; i
< 3; i
++) {
104 // Allocate a bunch of chunks.
105 for (uptr s
= 0; s
< ARRAY_SIZE(sizes
); s
++) {
106 uptr size
= sizes
[s
];
107 if (!a
->CanAllocate(size
, 1)) continue;
108 // printf("s = %ld\n", size);
109 uptr n_iter
= std::max((uptr
)6, 4000000 / size
);
110 // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
111 for (uptr i
= 0; i
< n_iter
; i
++) {
112 uptr class_id0
= Allocator::SizeClassMapT::ClassID(size
);
113 char *x
= (char*)cache
.Allocate(a
, class_id0
);
117 allocated
.push_back(x
);
118 CHECK_EQ(x
, a
->GetBlockBegin(x
));
119 CHECK_EQ(x
, a
->GetBlockBegin(x
+ size
- 1));
120 CHECK(a
->PointerIsMine(x
));
121 CHECK(a
->PointerIsMine(x
+ size
- 1));
122 CHECK(a
->PointerIsMine(x
+ size
/ 2));
123 CHECK_GE(a
->GetActuallyAllocatedSize(x
), size
);
124 uptr class_id
= a
->GetSizeClass(x
);
125 CHECK_EQ(class_id
, Allocator::SizeClassMapT::ClassID(size
));
126 uptr
*metadata
= reinterpret_cast<uptr
*>(a
->GetMetaData(x
));
127 metadata
[0] = reinterpret_cast<uptr
>(x
) + 1;
128 metadata
[1] = 0xABCD;
132 for (uptr i
= 0; i
< allocated
.size(); i
++) {
133 void *x
= allocated
[i
];
134 uptr
*metadata
= reinterpret_cast<uptr
*>(a
->GetMetaData(x
));
135 CHECK_EQ(metadata
[0], reinterpret_cast<uptr
>(x
) + 1);
136 CHECK_EQ(metadata
[1], 0xABCD);
137 cache
.Deallocate(a
, a
->GetSizeClass(x
), x
);
140 uptr total_allocated
= a
->TotalMemoryUsed();
141 if (last_total_allocated
== 0)
142 last_total_allocated
= total_allocated
;
143 CHECK_EQ(last_total_allocated
, total_allocated
);
146 // Check that GetBlockBegin never crashes.
147 for (uptr x
= 0, step
= kAddressSpaceSize
/ 100000;
148 x
< kAddressSpaceSize
- step
; x
+= step
)
149 if (a
->PointerIsMine(reinterpret_cast<void *>(x
)))
150 Ident(a
->GetBlockBegin(reinterpret_cast<void *>(x
)));
156 #if SANITIZER_CAN_USE_ALLOCATOR64
157 TEST(SanitizerCommon
, SizeClassAllocator64
) {
158 TestSizeClassAllocator
<Allocator64
>();
161 TEST(SanitizerCommon
, SizeClassAllocator64Compact
) {
162 TestSizeClassAllocator
<Allocator64Compact
>();
166 TEST(SanitizerCommon
, SizeClassAllocator32Compact
) {
167 TestSizeClassAllocator
<Allocator32Compact
>();
170 template <class Allocator
>
171 void SizeClassAllocatorMetadataStress() {
172 Allocator
*a
= new Allocator
;
174 SizeClassAllocatorLocalCache
<Allocator
> cache
;
175 memset(&cache
, 0, sizeof(cache
));
178 const uptr kNumAllocs
= 1 << 13;
179 void *allocated
[kNumAllocs
];
180 void *meta
[kNumAllocs
];
181 for (uptr i
= 0; i
< kNumAllocs
; i
++) {
182 void *x
= cache
.Allocate(a
, 1 + i
% 50);
184 meta
[i
] = a
->GetMetaData(x
);
186 // Get Metadata kNumAllocs^2 times.
187 for (uptr i
= 0; i
< kNumAllocs
* kNumAllocs
; i
++) {
188 uptr idx
= i
% kNumAllocs
;
189 void *m
= a
->GetMetaData(allocated
[idx
]);
190 EXPECT_EQ(m
, meta
[idx
]);
192 for (uptr i
= 0; i
< kNumAllocs
; i
++) {
193 cache
.Deallocate(a
, 1 + i
% 50, allocated
[i
]);
200 #if SANITIZER_CAN_USE_ALLOCATOR64
201 TEST(SanitizerCommon
, SizeClassAllocator64MetadataStress
) {
202 SizeClassAllocatorMetadataStress
<Allocator64
>();
205 TEST(SanitizerCommon
, SizeClassAllocator64CompactMetadataStress
) {
206 SizeClassAllocatorMetadataStress
<Allocator64Compact
>();
208 #endif // SANITIZER_CAN_USE_ALLOCATOR64
209 TEST(SanitizerCommon
, SizeClassAllocator32CompactMetadataStress
) {
210 SizeClassAllocatorMetadataStress
<Allocator32Compact
>();
213 template <class Allocator
>
214 void SizeClassAllocatorGetBlockBeginStress() {
215 Allocator
*a
= new Allocator
;
217 SizeClassAllocatorLocalCache
<Allocator
> cache
;
218 memset(&cache
, 0, sizeof(cache
));
221 uptr max_size_class
= Allocator::kNumClasses
- 1;
222 uptr size
= Allocator::SizeClassMapT::Size(max_size_class
);
224 // Make sure we correctly compute GetBlockBegin() w/o overflow.
225 for (size_t i
= 0; i
<= G8
/ size
; i
++) {
226 void *x
= cache
.Allocate(a
, max_size_class
);
227 void *beg
= a
->GetBlockBegin(x
);
228 // if ((i & (i - 1)) == 0)
229 // fprintf(stderr, "[%zd] %p %p\n", i, x, beg);
237 #if SANITIZER_CAN_USE_ALLOCATOR64
238 TEST(SanitizerCommon
, SizeClassAllocator64GetBlockBegin
) {
239 SizeClassAllocatorGetBlockBeginStress
<Allocator64
>();
241 TEST(SanitizerCommon
, SizeClassAllocator64CompactGetBlockBegin
) {
242 SizeClassAllocatorGetBlockBeginStress
<Allocator64Compact
>();
244 TEST(SanitizerCommon
, SizeClassAllocator32CompactGetBlockBegin
) {
245 SizeClassAllocatorGetBlockBeginStress
<Allocator32Compact
>();
247 #endif // SANITIZER_CAN_USE_ALLOCATOR64
249 struct TestMapUnmapCallback
{
250 static int map_count
, unmap_count
;
251 void OnMap(uptr p
, uptr size
) const { map_count
++; }
252 void OnUnmap(uptr p
, uptr size
) const { unmap_count
++; }
254 int TestMapUnmapCallback::map_count
;
255 int TestMapUnmapCallback::unmap_count
;
257 #if SANITIZER_CAN_USE_ALLOCATOR64
258 TEST(SanitizerCommon
, SizeClassAllocator64MapUnmapCallback
) {
259 TestMapUnmapCallback::map_count
= 0;
260 TestMapUnmapCallback::unmap_count
= 0;
261 typedef SizeClassAllocator64
<
262 kAllocatorSpace
, kAllocatorSize
, 16, DefaultSizeClassMap
,
263 TestMapUnmapCallback
> Allocator64WithCallBack
;
264 Allocator64WithCallBack
*a
= new Allocator64WithCallBack
;
266 EXPECT_EQ(TestMapUnmapCallback::map_count
, 1); // Allocator state.
267 SizeClassAllocatorLocalCache
<Allocator64WithCallBack
> cache
;
268 memset(&cache
, 0, sizeof(cache
));
270 AllocatorStats stats
;
272 a
->AllocateBatch(&stats
, &cache
, 32);
273 EXPECT_EQ(TestMapUnmapCallback::map_count
, 3); // State + alloc + metadata.
275 EXPECT_EQ(TestMapUnmapCallback::unmap_count
, 1); // The whole thing.
280 TEST(SanitizerCommon
, SizeClassAllocator32MapUnmapCallback
) {
281 TestMapUnmapCallback::map_count
= 0;
282 TestMapUnmapCallback::unmap_count
= 0;
283 typedef SizeClassAllocator32
<
284 0, kAddressSpaceSize
,
288 FlatByteMap
<kFlatByteMapSize
>,
289 TestMapUnmapCallback
>
290 Allocator32WithCallBack
;
291 Allocator32WithCallBack
*a
= new Allocator32WithCallBack
;
293 EXPECT_EQ(TestMapUnmapCallback::map_count
, 0);
294 SizeClassAllocatorLocalCache
<Allocator32WithCallBack
> cache
;
295 memset(&cache
, 0, sizeof(cache
));
297 AllocatorStats stats
;
299 a
->AllocateBatch(&stats
, &cache
, 32);
300 EXPECT_EQ(TestMapUnmapCallback::map_count
, 1);
302 EXPECT_EQ(TestMapUnmapCallback::unmap_count
, 1);
304 // fprintf(stderr, "Map: %d Unmap: %d\n",
305 // TestMapUnmapCallback::map_count,
306 // TestMapUnmapCallback::unmap_count);
309 TEST(SanitizerCommon
, LargeMmapAllocatorMapUnmapCallback
) {
310 TestMapUnmapCallback::map_count
= 0;
311 TestMapUnmapCallback::unmap_count
= 0;
312 LargeMmapAllocator
<TestMapUnmapCallback
> a
;
313 a
.Init(/* may_return_null */ false);
314 AllocatorStats stats
;
316 void *x
= a
.Allocate(&stats
, 1 << 20, 1);
317 EXPECT_EQ(TestMapUnmapCallback::map_count
, 1);
318 a
.Deallocate(&stats
, x
);
319 EXPECT_EQ(TestMapUnmapCallback::unmap_count
, 1);
322 template<class Allocator
>
323 void FailInAssertionOnOOM() {
326 SizeClassAllocatorLocalCache
<Allocator
> cache
;
327 memset(&cache
, 0, sizeof(cache
));
329 AllocatorStats stats
;
331 for (int i
= 0; i
< 1000000; i
++) {
332 a
.AllocateBatch(&stats
, &cache
, 52);
338 #if SANITIZER_CAN_USE_ALLOCATOR64
339 TEST(SanitizerCommon
, SizeClassAllocator64Overflow
) {
340 EXPECT_DEATH(FailInAssertionOnOOM
<Allocator64
>(), "Out of memory");
344 TEST(SanitizerCommon
, LargeMmapAllocator
) {
345 LargeMmapAllocator
<> a
;
346 a
.Init(/* may_return_null */ false);
347 AllocatorStats stats
;
350 static const int kNumAllocs
= 1000;
351 char *allocated
[kNumAllocs
];
352 static const uptr size
= 4000;
354 for (int i
= 0; i
< kNumAllocs
; i
++) {
355 allocated
[i
] = (char *)a
.Allocate(&stats
, size
, 1);
356 CHECK(a
.PointerIsMine(allocated
[i
]));
359 CHECK_GT(a
.TotalMemoryUsed(), size
* kNumAllocs
);
360 for (int i
= 0; i
< kNumAllocs
; i
++) {
361 char *p
= allocated
[i
];
362 CHECK(a
.PointerIsMine(p
));
363 a
.Deallocate(&stats
, p
);
365 // Check that non left.
366 CHECK_EQ(a
.TotalMemoryUsed(), 0);
368 // Allocate some more, also add metadata.
369 for (int i
= 0; i
< kNumAllocs
; i
++) {
370 char *x
= (char *)a
.Allocate(&stats
, size
, 1);
371 CHECK_GE(a
.GetActuallyAllocatedSize(x
), size
);
372 uptr
*meta
= reinterpret_cast<uptr
*>(a
.GetMetaData(x
));
376 for (int i
= 0; i
< kNumAllocs
* kNumAllocs
; i
++) {
377 char *p
= allocated
[i
% kNumAllocs
];
378 CHECK(a
.PointerIsMine(p
));
379 CHECK(a
.PointerIsMine(p
+ 2000));
381 CHECK_GT(a
.TotalMemoryUsed(), size
* kNumAllocs
);
382 // Deallocate all in reverse order.
383 for (int i
= 0; i
< kNumAllocs
; i
++) {
384 int idx
= kNumAllocs
- i
- 1;
385 char *p
= allocated
[idx
];
386 uptr
*meta
= reinterpret_cast<uptr
*>(a
.GetMetaData(p
));
387 CHECK_EQ(*meta
, idx
);
388 CHECK(a
.PointerIsMine(p
));
389 a
.Deallocate(&stats
, p
);
391 CHECK_EQ(a
.TotalMemoryUsed(), 0);
394 uptr max_alignment
= SANITIZER_WORDSIZE
== 64 ? (1 << 28) : (1 << 24);
395 for (uptr alignment
= 8; alignment
<= max_alignment
; alignment
*= 2) {
396 const uptr kNumAlignedAllocs
= 100;
397 for (uptr i
= 0; i
< kNumAlignedAllocs
; i
++) {
398 uptr size
= ((i
% 10) + 1) * 4096;
399 char *p
= allocated
[i
] = (char *)a
.Allocate(&stats
, size
, alignment
);
400 CHECK_EQ(p
, a
.GetBlockBegin(p
));
401 CHECK_EQ(p
, a
.GetBlockBegin(p
+ size
- 1));
402 CHECK_EQ(p
, a
.GetBlockBegin(p
+ size
/ 2));
403 CHECK_EQ(0, (uptr
)allocated
[i
] % alignment
);
404 p
[0] = p
[size
- 1] = 0;
406 for (uptr i
= 0; i
< kNumAlignedAllocs
; i
++) {
407 a
.Deallocate(&stats
, allocated
[i
]);
411 // Regression test for boundary condition in GetBlockBegin().
412 uptr page_size
= GetPageSizeCached();
413 char *p
= (char *)a
.Allocate(&stats
, page_size
, 1);
414 CHECK_EQ(p
, a
.GetBlockBegin(p
));
415 CHECK_EQ(p
, (char *)a
.GetBlockBegin(p
+ page_size
- 1));
416 CHECK_NE(p
, (char *)a
.GetBlockBegin(p
+ page_size
));
417 a
.Deallocate(&stats
, p
);
421 <class PrimaryAllocator
, class SecondaryAllocator
, class AllocatorCache
>
422 void TestCombinedAllocator() {
424 CombinedAllocator
<PrimaryAllocator
, AllocatorCache
, SecondaryAllocator
>
426 Allocator
*a
= new Allocator
;
427 a
->Init(/* may_return_null */ true);
429 AllocatorCache cache
;
430 memset(&cache
, 0, sizeof(cache
));
431 a
->InitCache(&cache
);
433 EXPECT_EQ(a
->Allocate(&cache
, -1, 1), (void*)0);
434 EXPECT_EQ(a
->Allocate(&cache
, -1, 1024), (void*)0);
435 EXPECT_EQ(a
->Allocate(&cache
, (uptr
)-1 - 1024, 1), (void*)0);
436 EXPECT_EQ(a
->Allocate(&cache
, (uptr
)-1 - 1024, 1024), (void*)0);
437 EXPECT_EQ(a
->Allocate(&cache
, (uptr
)-1 - 1023, 1024), (void*)0);
440 a
->SetMayReturnNull(false);
441 EXPECT_DEATH(a
->Allocate(&cache
, -1, 1),
442 "allocator is terminating the process");
444 const uptr kNumAllocs
= 100000;
445 const uptr kNumIter
= 10;
446 for (uptr iter
= 0; iter
< kNumIter
; iter
++) {
447 std::vector
<void*> allocated
;
448 for (uptr i
= 0; i
< kNumAllocs
; i
++) {
449 uptr size
= (i
% (1 << 14)) + 1;
451 size
= 1 << (10 + (i
% 14));
452 void *x
= a
->Allocate(&cache
, size
, 1);
453 uptr
*meta
= reinterpret_cast<uptr
*>(a
->GetMetaData(x
));
456 allocated
.push_back(x
);
459 random_shuffle(allocated
.begin(), allocated
.end());
461 for (uptr i
= 0; i
< kNumAllocs
; i
++) {
462 void *x
= allocated
[i
];
463 uptr
*meta
= reinterpret_cast<uptr
*>(a
->GetMetaData(x
));
465 CHECK(a
->PointerIsMine(x
));
467 a
->Deallocate(&cache
, x
);
470 a
->SwallowCache(&cache
);
472 a
->DestroyCache(&cache
);
476 #if SANITIZER_CAN_USE_ALLOCATOR64
477 TEST(SanitizerCommon
, CombinedAllocator64
) {
478 TestCombinedAllocator
<Allocator64
,
479 LargeMmapAllocator
<>,
480 SizeClassAllocatorLocalCache
<Allocator64
> > ();
483 TEST(SanitizerCommon
, CombinedAllocator64Compact
) {
484 TestCombinedAllocator
<Allocator64Compact
,
485 LargeMmapAllocator
<>,
486 SizeClassAllocatorLocalCache
<Allocator64Compact
> > ();
490 TEST(SanitizerCommon
, CombinedAllocator32Compact
) {
491 TestCombinedAllocator
<Allocator32Compact
,
492 LargeMmapAllocator
<>,
493 SizeClassAllocatorLocalCache
<Allocator32Compact
> > ();
496 template <class AllocatorCache
>
497 void TestSizeClassAllocatorLocalCache() {
498 AllocatorCache cache
;
499 typedef typename
AllocatorCache::Allocator Allocator
;
500 Allocator
*a
= new Allocator();
503 memset(&cache
, 0, sizeof(cache
));
506 const uptr kNumAllocs
= 10000;
507 const int kNumIter
= 100;
508 uptr saved_total
= 0;
509 for (int class_id
= 1; class_id
<= 5; class_id
++) {
510 for (int it
= 0; it
< kNumIter
; it
++) {
511 void *allocated
[kNumAllocs
];
512 for (uptr i
= 0; i
< kNumAllocs
; i
++) {
513 allocated
[i
] = cache
.Allocate(a
, class_id
);
515 for (uptr i
= 0; i
< kNumAllocs
; i
++) {
516 cache
.Deallocate(a
, class_id
, allocated
[i
]);
519 uptr total_allocated
= a
->TotalMemoryUsed();
521 CHECK_EQ(saved_total
, total_allocated
);
522 saved_total
= total_allocated
;
530 #if SANITIZER_CAN_USE_ALLOCATOR64
531 TEST(SanitizerCommon
, SizeClassAllocator64LocalCache
) {
532 TestSizeClassAllocatorLocalCache
<
533 SizeClassAllocatorLocalCache
<Allocator64
> >();
536 TEST(SanitizerCommon
, SizeClassAllocator64CompactLocalCache
) {
537 TestSizeClassAllocatorLocalCache
<
538 SizeClassAllocatorLocalCache
<Allocator64Compact
> >();
542 TEST(SanitizerCommon
, SizeClassAllocator32CompactLocalCache
) {
543 TestSizeClassAllocatorLocalCache
<
544 SizeClassAllocatorLocalCache
<Allocator32Compact
> >();
547 #if SANITIZER_CAN_USE_ALLOCATOR64
548 typedef SizeClassAllocatorLocalCache
<Allocator64
> AllocatorCache
;
549 static AllocatorCache static_allocator_cache
;
551 void *AllocatorLeakTestWorker(void *arg
) {
552 typedef AllocatorCache::Allocator Allocator
;
553 Allocator
*a
= (Allocator
*)(arg
);
554 static_allocator_cache
.Allocate(a
, 10);
555 static_allocator_cache
.Drain(a
);
559 TEST(SanitizerCommon
, AllocatorLeakTest
) {
560 typedef AllocatorCache::Allocator Allocator
;
563 uptr total_used_memory
= 0;
564 for (int i
= 0; i
< 100; i
++) {
566 PTHREAD_CREATE(&t
, 0, AllocatorLeakTestWorker
, &a
);
569 total_used_memory
= a
.TotalMemoryUsed();
570 EXPECT_EQ(a
.TotalMemoryUsed(), total_used_memory
);
576 // Struct which is allocated to pass info to new threads. The new thread frees
578 struct NewThreadParams
{
579 AllocatorCache
*thread_cache
;
580 AllocatorCache::Allocator
*allocator
;
584 // Called in a new thread. Just frees its argument.
585 static void *DeallocNewThreadWorker(void *arg
) {
586 NewThreadParams
*params
= reinterpret_cast<NewThreadParams
*>(arg
);
587 params
->thread_cache
->Deallocate(params
->allocator
, params
->class_id
, params
);
591 // The allocator cache is supposed to be POD and zero initialized. We should be
592 // able to call Deallocate on a zeroed cache, and it will self-initialize.
593 TEST(Allocator
, AllocatorCacheDeallocNewThread
) {
594 AllocatorCache::Allocator allocator
;
596 AllocatorCache main_cache
;
597 AllocatorCache child_cache
;
598 memset(&main_cache
, 0, sizeof(main_cache
));
599 memset(&child_cache
, 0, sizeof(child_cache
));
601 uptr class_id
= DefaultSizeClassMap::ClassID(sizeof(NewThreadParams
));
602 NewThreadParams
*params
= reinterpret_cast<NewThreadParams
*>(
603 main_cache
.Allocate(&allocator
, class_id
));
604 params
->thread_cache
= &child_cache
;
605 params
->allocator
= &allocator
;
606 params
->class_id
= class_id
;
608 PTHREAD_CREATE(&t
, 0, DeallocNewThreadWorker
, params
);
611 allocator
.TestOnlyUnmap();
615 TEST(Allocator
, Basic
) {
616 char *p
= (char*)InternalAlloc(10);
617 EXPECT_NE(p
, (char*)0);
618 char *p2
= (char*)InternalAlloc(20);
619 EXPECT_NE(p2
, (char*)0);
625 TEST(Allocator
, Stress
) {
626 const int kCount
= 1000;
629 for (int i
= 0; i
< kCount
; i
++) {
630 uptr sz
= my_rand_r(&rnd
) % 1000;
631 char *p
= (char*)InternalAlloc(sz
);
632 EXPECT_NE(p
, (char*)0);
635 for (int i
= 0; i
< kCount
; i
++) {
636 InternalFree(ptrs
[i
]);
640 TEST(Allocator
, LargeAlloc
) {
641 void *p
= InternalAlloc(10 << 20);
645 TEST(Allocator
, ScopedBuffer
) {
646 const int kSize
= 512;
648 InternalScopedBuffer
<int> int_buf(kSize
);
649 EXPECT_EQ(sizeof(int) * kSize
, int_buf
.size()); // NOLINT
651 InternalScopedBuffer
<char> char_buf(kSize
);
652 EXPECT_EQ(sizeof(char) * kSize
, char_buf
.size()); // NOLINT
653 internal_memset(char_buf
.data(), 'c', kSize
);
654 for (int i
= 0; i
< kSize
; i
++) {
655 EXPECT_EQ('c', char_buf
[i
]);
659 void IterationTestCallback(uptr chunk
, void *arg
) {
660 reinterpret_cast<std::set
<uptr
> *>(arg
)->insert(chunk
);
663 template <class Allocator
>
664 void TestSizeClassAllocatorIteration() {
665 Allocator
*a
= new Allocator
;
667 SizeClassAllocatorLocalCache
<Allocator
> cache
;
668 memset(&cache
, 0, sizeof(cache
));
671 static const uptr sizes
[] = {1, 16, 30, 40, 100, 1000, 10000,
672 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
674 std::vector
<void *> allocated
;
676 // Allocate a bunch of chunks.
677 for (uptr s
= 0; s
< ARRAY_SIZE(sizes
); s
++) {
678 uptr size
= sizes
[s
];
679 if (!a
->CanAllocate(size
, 1)) continue;
680 // printf("s = %ld\n", size);
681 uptr n_iter
= std::max((uptr
)6, 80000 / size
);
682 // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
683 for (uptr j
= 0; j
< n_iter
; j
++) {
684 uptr class_id0
= Allocator::SizeClassMapT::ClassID(size
);
685 void *x
= cache
.Allocate(a
, class_id0
);
686 allocated
.push_back(x
);
690 std::set
<uptr
> reported_chunks
;
692 a
->ForEachChunk(IterationTestCallback
, &reported_chunks
);
695 for (uptr i
= 0; i
< allocated
.size(); i
++) {
696 // Don't use EXPECT_NE. Reporting the first mismatch is enough.
697 ASSERT_NE(reported_chunks
.find(reinterpret_cast<uptr
>(allocated
[i
])),
698 reported_chunks
.end());
705 #if SANITIZER_CAN_USE_ALLOCATOR64
706 TEST(SanitizerCommon
, SizeClassAllocator64Iteration
) {
707 TestSizeClassAllocatorIteration
<Allocator64
>();
711 TEST(SanitizerCommon
, SizeClassAllocator32Iteration
) {
712 TestSizeClassAllocatorIteration
<Allocator32Compact
>();
715 TEST(SanitizerCommon
, LargeMmapAllocatorIteration
) {
716 LargeMmapAllocator
<> a
;
717 a
.Init(/* may_return_null */ false);
718 AllocatorStats stats
;
721 static const uptr kNumAllocs
= 1000;
722 char *allocated
[kNumAllocs
];
723 static const uptr size
= 40;
725 for (uptr i
= 0; i
< kNumAllocs
; i
++)
726 allocated
[i
] = (char *)a
.Allocate(&stats
, size
, 1);
728 std::set
<uptr
> reported_chunks
;
730 a
.ForEachChunk(IterationTestCallback
, &reported_chunks
);
733 for (uptr i
= 0; i
< kNumAllocs
; i
++) {
734 // Don't use EXPECT_NE. Reporting the first mismatch is enough.
735 ASSERT_NE(reported_chunks
.find(reinterpret_cast<uptr
>(allocated
[i
])),
736 reported_chunks
.end());
738 for (uptr i
= 0; i
< kNumAllocs
; i
++)
739 a
.Deallocate(&stats
, allocated
[i
]);
742 TEST(SanitizerCommon
, LargeMmapAllocatorBlockBegin
) {
743 LargeMmapAllocator
<> a
;
744 a
.Init(/* may_return_null */ false);
745 AllocatorStats stats
;
748 static const uptr kNumAllocs
= 1024;
749 static const uptr kNumExpectedFalseLookups
= 10000000;
750 char *allocated
[kNumAllocs
];
751 static const uptr size
= 4096;
753 for (uptr i
= 0; i
< kNumAllocs
; i
++) {
754 allocated
[i
] = (char *)a
.Allocate(&stats
, size
, 1);
758 for (uptr i
= 0; i
< kNumAllocs
* kNumAllocs
; i
++) {
759 // if ((i & (i - 1)) == 0) fprintf(stderr, "[%zd]\n", i);
760 char *p1
= allocated
[i
% kNumAllocs
];
761 EXPECT_EQ(p1
, a
.GetBlockBeginFastLocked(p1
));
762 EXPECT_EQ(p1
, a
.GetBlockBeginFastLocked(p1
+ size
/ 2));
763 EXPECT_EQ(p1
, a
.GetBlockBeginFastLocked(p1
+ size
- 1));
764 EXPECT_EQ(p1
, a
.GetBlockBeginFastLocked(p1
- 100));
767 for (uptr i
= 0; i
< kNumExpectedFalseLookups
; i
++) {
768 void *p
= reinterpret_cast<void *>(i
% 1024);
769 EXPECT_EQ((void *)0, a
.GetBlockBeginFastLocked(p
));
770 p
= reinterpret_cast<void *>(~0L - (i
% 1024));
771 EXPECT_EQ((void *)0, a
.GetBlockBeginFastLocked(p
));
775 for (uptr i
= 0; i
< kNumAllocs
; i
++)
776 a
.Deallocate(&stats
, allocated
[i
]);
780 #if SANITIZER_CAN_USE_ALLOCATOR64
781 // Regression test for out-of-memory condition in PopulateFreeList().
782 TEST(SanitizerCommon
, SizeClassAllocator64PopulateFreeListOOM
) {
783 // In a world where regions are small and chunks are huge...
784 typedef SizeClassMap
<63, 128, 16> SpecialSizeClassMap
;
785 typedef SizeClassAllocator64
<kAllocatorSpace
, kAllocatorSize
, 0,
786 SpecialSizeClassMap
> SpecialAllocator64
;
787 const uptr kRegionSize
=
788 kAllocatorSize
/ SpecialSizeClassMap::kNumClassesRounded
;
789 SpecialAllocator64
*a
= new SpecialAllocator64
;
791 SizeClassAllocatorLocalCache
<SpecialAllocator64
> cache
;
792 memset(&cache
, 0, sizeof(cache
));
795 // ...one man is on a mission to overflow a region with a series of
796 // successive allocations.
797 const uptr kClassID
= 107;
798 const uptr kAllocationSize
= DefaultSizeClassMap::Size(kClassID
);
799 ASSERT_LT(2 * kAllocationSize
, kRegionSize
);
800 ASSERT_GT(3 * kAllocationSize
, kRegionSize
);
801 cache
.Allocate(a
, kClassID
);
802 EXPECT_DEATH(cache
.Allocate(a
, kClassID
) && cache
.Allocate(a
, kClassID
),
803 "The process has exhausted");
809 TEST(SanitizerCommon
, TwoLevelByteMap
) {
810 const u64 kSize1
= 1 << 6, kSize2
= 1 << 12;
811 const u64 n
= kSize1
* kSize2
;
812 TwoLevelByteMap
<kSize1
, kSize2
> m
;
814 for (u64 i
= 0; i
< n
; i
+= 7) {
815 m
.set(i
, (i
% 100) + 1);
817 for (u64 j
= 0; j
< n
; j
++) {
821 EXPECT_EQ(m
[j
], (j
% 100) + 1);
828 typedef TwoLevelByteMap
<1 << 12, 1 << 13, TestMapUnmapCallback
> TestByteMap
;
830 struct TestByteMapParam
{
836 void *TwoLevelByteMapUserThread(void *param
) {
837 TestByteMapParam
*p
= (TestByteMapParam
*)param
;
838 for (size_t i
= p
->shard
; i
< p
->m
->size(); i
+= p
->num_shards
) {
839 size_t val
= (i
% 100) + 1;
841 EXPECT_EQ((*p
->m
)[i
], val
);
846 TEST(SanitizerCommon
, ThreadedTwoLevelByteMap
) {
849 TestMapUnmapCallback::map_count
= 0;
850 TestMapUnmapCallback::unmap_count
= 0;
851 static const int kNumThreads
= 4;
852 pthread_t t
[kNumThreads
];
853 TestByteMapParam p
[kNumThreads
];
854 for (int i
= 0; i
< kNumThreads
; i
++) {
857 p
[i
].num_shards
= kNumThreads
;
858 PTHREAD_CREATE(&t
[i
], 0, TwoLevelByteMapUserThread
, &p
[i
]);
860 for (int i
= 0; i
< kNumThreads
; i
++) {
861 PTHREAD_JOIN(t
[i
], 0);
863 EXPECT_EQ((uptr
)TestMapUnmapCallback::map_count
, m
.size1());
864 EXPECT_EQ((uptr
)TestMapUnmapCallback::unmap_count
, 0UL);
866 EXPECT_EQ((uptr
)TestMapUnmapCallback::map_count
, m
.size1());
867 EXPECT_EQ((uptr
)TestMapUnmapCallback::unmap_count
, m
.size1());
870 #endif // #if !SANITIZER_DEBUG