1 //===-- sanitizer_allocator.cc --------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is shared between AddressSanitizer and ThreadSanitizer
11 // run-time libraries.
12 // This allocator is used inside run-times.
13 //===----------------------------------------------------------------------===//
14 #include "sanitizer_allocator.h"
15 #include "sanitizer_allocator_internal.h"
16 #include "sanitizer_common.h"
17 #include "sanitizer_flags.h"
19 namespace __sanitizer
{
21 // ThreadSanitizer for Go uses libc malloc/free.
22 #if defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
23 # if SANITIZER_LINUX && !SANITIZER_ANDROID
24 extern "C" void *__libc_malloc(uptr size
);
25 extern "C" void __libc_free(void *ptr
);
26 # define LIBC_MALLOC __libc_malloc
27 # define LIBC_FREE __libc_free
30 # define LIBC_MALLOC malloc
31 # define LIBC_FREE free
34 static void *RawInternalAlloc(uptr size
, InternalAllocatorCache
*cache
) {
36 return LIBC_MALLOC(size
);
39 static void RawInternalFree(void *ptr
, InternalAllocatorCache
*cache
) {
44 InternalAllocator
*internal_allocator() {
50 static ALIGNED(64) char internal_alloc_placeholder
[sizeof(InternalAllocator
)];
51 static atomic_uint8_t internal_allocator_initialized
;
52 static StaticSpinMutex internal_alloc_init_mu
;
54 static InternalAllocatorCache internal_allocator_cache
;
55 static StaticSpinMutex internal_allocator_cache_mu
;
57 InternalAllocator
*internal_allocator() {
58 InternalAllocator
*internal_allocator_instance
=
59 reinterpret_cast<InternalAllocator
*>(&internal_alloc_placeholder
);
60 if (atomic_load(&internal_allocator_initialized
, memory_order_acquire
) == 0) {
61 SpinMutexLock
l(&internal_alloc_init_mu
);
62 if (atomic_load(&internal_allocator_initialized
, memory_order_relaxed
) ==
64 internal_allocator_instance
->Init();
65 atomic_store(&internal_allocator_initialized
, 1, memory_order_release
);
68 return internal_allocator_instance
;
71 static void *RawInternalAlloc(uptr size
, InternalAllocatorCache
*cache
) {
73 SpinMutexLock
l(&internal_allocator_cache_mu
);
74 return internal_allocator()->Allocate(&internal_allocator_cache
, size
, 8,
77 return internal_allocator()->Allocate(cache
, size
, 8, false);
80 static void RawInternalFree(void *ptr
, InternalAllocatorCache
*cache
) {
82 SpinMutexLock
l(&internal_allocator_cache_mu
);
83 return internal_allocator()->Deallocate(&internal_allocator_cache
, ptr
);
85 internal_allocator()->Deallocate(cache
, ptr
);
88 #endif // SANITIZER_GO
90 const u64 kBlockMagic
= 0x6A6CB03ABCEBC041ull
;
92 void *InternalAlloc(uptr size
, InternalAllocatorCache
*cache
) {
93 if (size
+ sizeof(u64
) < size
)
95 void *p
= RawInternalAlloc(size
+ sizeof(u64
), cache
);
98 ((u64
*)p
)[0] = kBlockMagic
;
99 return (char*)p
+ sizeof(u64
);
102 void InternalFree(void *addr
, InternalAllocatorCache
*cache
) {
105 addr
= (char*)addr
- sizeof(u64
);
106 CHECK_EQ(kBlockMagic
, ((u64
*)addr
)[0]);
108 RawInternalFree(addr
, cache
);
112 static LowLevelAllocateCallback low_level_alloc_callback
;
114 void *LowLevelAllocator::Allocate(uptr size
) {
115 // Align allocation size.
116 size
= RoundUpTo(size
, 8);
117 if (allocated_end_
- allocated_current_
< (sptr
)size
) {
118 uptr size_to_allocate
= Max(size
, GetPageSizeCached());
120 (char*)MmapOrDie(size_to_allocate
, __func__
);
121 allocated_end_
= allocated_current_
+ size_to_allocate
;
122 if (low_level_alloc_callback
) {
123 low_level_alloc_callback((uptr
)allocated_current_
,
127 CHECK(allocated_end_
- allocated_current_
>= (sptr
)size
);
128 void *res
= allocated_current_
;
129 allocated_current_
+= size
;
133 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback
) {
134 low_level_alloc_callback
= callback
;
137 bool CallocShouldReturnNullDueToOverflow(uptr size
, uptr n
) {
138 if (!size
) return false;
139 uptr max
= (uptr
)-1L;
140 return (max
/ size
) < n
;
143 void *AllocatorReturnNull() {
144 if (common_flags()->allocator_may_return_null
)
146 Report("%s's allocator is terminating the process instead of returning 0\n",
148 Report("If you don't like this behavior set allocator_may_return_null=1\n");
153 } // namespace __sanitizer