1 //===-- sanitizer_allocator.cc --------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is shared between AddressSanitizer and ThreadSanitizer
11 // run-time libraries.
12 // This allocator is used inside run-times.
13 //===----------------------------------------------------------------------===//
15 #include "sanitizer_allocator.h"
16 #include "sanitizer_allocator_internal.h"
17 #include "sanitizer_common.h"
19 namespace __sanitizer
{
21 // ThreadSanitizer for Go uses libc malloc/free.
22 #if defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
23 # if SANITIZER_LINUX && !SANITIZER_ANDROID
24 extern "C" void *__libc_malloc(uptr size
);
25 extern "C" void *__libc_memalign(uptr alignment
, uptr size
);
26 extern "C" void *__libc_realloc(void *ptr
, uptr size
);
27 extern "C" void __libc_free(void *ptr
);
30 # define __libc_malloc malloc
31 static void *__libc_memalign(uptr alignment
, uptr size
) {
33 uptr error
= posix_memalign(&p
, alignment
, size
);
34 if (error
) return nullptr;
37 # define __libc_realloc realloc
38 # define __libc_free free
41 static void *RawInternalAlloc(uptr size
, InternalAllocatorCache
*cache
,
45 return __libc_malloc(size
);
47 return __libc_memalign(alignment
, size
);
50 static void *RawInternalRealloc(void *ptr
, uptr size
,
51 InternalAllocatorCache
*cache
) {
53 return __libc_realloc(ptr
, size
);
56 static void RawInternalFree(void *ptr
, InternalAllocatorCache
*cache
) {
61 InternalAllocator
*internal_allocator() {
65 #else // defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
67 static ALIGNED(64) char internal_alloc_placeholder
[sizeof(InternalAllocator
)];
68 static atomic_uint8_t internal_allocator_initialized
;
69 static StaticSpinMutex internal_alloc_init_mu
;
71 static InternalAllocatorCache internal_allocator_cache
;
72 static StaticSpinMutex internal_allocator_cache_mu
;
74 InternalAllocator
*internal_allocator() {
75 InternalAllocator
*internal_allocator_instance
=
76 reinterpret_cast<InternalAllocator
*>(&internal_alloc_placeholder
);
77 if (atomic_load(&internal_allocator_initialized
, memory_order_acquire
) == 0) {
78 SpinMutexLock
l(&internal_alloc_init_mu
);
79 if (atomic_load(&internal_allocator_initialized
, memory_order_relaxed
) ==
81 internal_allocator_instance
->Init(/* may_return_null*/ false);
82 atomic_store(&internal_allocator_initialized
, 1, memory_order_release
);
85 return internal_allocator_instance
;
88 static void *RawInternalAlloc(uptr size
, InternalAllocatorCache
*cache
,
90 if (alignment
== 0) alignment
= 8;
92 SpinMutexLock
l(&internal_allocator_cache_mu
);
93 return internal_allocator()->Allocate(&internal_allocator_cache
, size
,
96 return internal_allocator()->Allocate(cache
, size
, alignment
, false);
99 static void *RawInternalRealloc(void *ptr
, uptr size
,
100 InternalAllocatorCache
*cache
) {
103 SpinMutexLock
l(&internal_allocator_cache_mu
);
104 return internal_allocator()->Reallocate(&internal_allocator_cache
, ptr
,
107 return internal_allocator()->Reallocate(cache
, ptr
, size
, alignment
);
110 static void RawInternalFree(void *ptr
, InternalAllocatorCache
*cache
) {
112 SpinMutexLock
l(&internal_allocator_cache_mu
);
113 return internal_allocator()->Deallocate(&internal_allocator_cache
, ptr
);
115 internal_allocator()->Deallocate(cache
, ptr
);
118 #endif // defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
120 const u64 kBlockMagic
= 0x6A6CB03ABCEBC041ull
;
122 void *InternalAlloc(uptr size
, InternalAllocatorCache
*cache
, uptr alignment
) {
123 if (size
+ sizeof(u64
) < size
)
125 void *p
= RawInternalAlloc(size
+ sizeof(u64
), cache
, alignment
);
128 ((u64
*)p
)[0] = kBlockMagic
;
129 return (char*)p
+ sizeof(u64
);
132 void *InternalRealloc(void *addr
, uptr size
, InternalAllocatorCache
*cache
) {
134 return InternalAlloc(size
, cache
);
135 if (size
+ sizeof(u64
) < size
)
137 addr
= (char*)addr
- sizeof(u64
);
138 size
= size
+ sizeof(u64
);
139 CHECK_EQ(kBlockMagic
, ((u64
*)addr
)[0]);
140 void *p
= RawInternalRealloc(addr
, size
, cache
);
143 return (char*)p
+ sizeof(u64
);
146 void *InternalCalloc(uptr count
, uptr size
, InternalAllocatorCache
*cache
) {
147 if (CallocShouldReturnNullDueToOverflow(count
, size
))
148 return internal_allocator()->ReturnNullOrDie();
149 void *p
= InternalAlloc(count
* size
, cache
);
150 if (p
) internal_memset(p
, 0, count
* size
);
154 void InternalFree(void *addr
, InternalAllocatorCache
*cache
) {
157 addr
= (char*)addr
- sizeof(u64
);
158 CHECK_EQ(kBlockMagic
, ((u64
*)addr
)[0]);
160 RawInternalFree(addr
, cache
);
164 static LowLevelAllocateCallback low_level_alloc_callback
;
166 void *LowLevelAllocator::Allocate(uptr size
) {
167 // Align allocation size.
168 size
= RoundUpTo(size
, 8);
169 if (allocated_end_
- allocated_current_
< (sptr
)size
) {
170 uptr size_to_allocate
= Max(size
, GetPageSizeCached());
172 (char*)MmapOrDie(size_to_allocate
, __func__
);
173 allocated_end_
= allocated_current_
+ size_to_allocate
;
174 if (low_level_alloc_callback
) {
175 low_level_alloc_callback((uptr
)allocated_current_
,
179 CHECK(allocated_end_
- allocated_current_
>= (sptr
)size
);
180 void *res
= allocated_current_
;
181 allocated_current_
+= size
;
185 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback
) {
186 low_level_alloc_callback
= callback
;
189 bool CallocShouldReturnNullDueToOverflow(uptr size
, uptr n
) {
190 if (!size
) return false;
191 uptr max
= (uptr
)-1L;
192 return (max
/ size
) < n
;
195 void NORETURN
ReportAllocatorCannotReturnNull() {
196 Report("%s's allocator is terminating the process instead of returning 0\n",
198 Report("If you don't like this behavior set allocator_may_return_null=1\n");
203 } // namespace __sanitizer