]>
Commit | Line | Data |
---|---|---|
1a4d82fc JJ |
1 | //===-- sanitizer_allocator.cc --------------------------------------------===// |
2 | // | |
3 | // The LLVM Compiler Infrastructure | |
4 | // | |
5 | // This file is distributed under the University of Illinois Open Source | |
6 | // License. See LICENSE.TXT for details. | |
7 | // | |
8 | //===----------------------------------------------------------------------===// | |
9 | // | |
10 | // This file is shared between AddressSanitizer and ThreadSanitizer | |
11 | // run-time libraries. | |
12 | // This allocator is used inside run-times. | |
13 | //===----------------------------------------------------------------------===// | |
92a42be0 | 14 | |
1a4d82fc | 15 | #include "sanitizer_allocator.h" |
7cac9316 | 16 | |
1a4d82fc | 17 | #include "sanitizer_allocator_internal.h" |
7cac9316 | 18 | #include "sanitizer_atomic.h" |
1a4d82fc | 19 | #include "sanitizer_common.h" |
1a4d82fc JJ |
20 | |
21 | namespace __sanitizer { | |
22 | ||
23 | // ThreadSanitizer for Go uses libc malloc/free. | |
7cac9316 | 24 | #if SANITIZER_GO || defined(SANITIZER_USE_MALLOC) |
1a4d82fc JJ |
25 | # if SANITIZER_LINUX && !SANITIZER_ANDROID |
26 | extern "C" void *__libc_malloc(uptr size); | |
7cac9316 | 27 | # if !SANITIZER_GO |
5bcae85e | 28 | extern "C" void *__libc_memalign(uptr alignment, uptr size); |
7cac9316 | 29 | # endif |
5bcae85e | 30 | extern "C" void *__libc_realloc(void *ptr, uptr size); |
1a4d82fc | 31 | extern "C" void __libc_free(void *ptr); |
1a4d82fc JJ |
32 | # else |
33 | # include <stdlib.h> | |
5bcae85e | 34 | # define __libc_malloc malloc |
7cac9316 | 35 | # if !SANITIZER_GO |
5bcae85e SL |
36 | static void *__libc_memalign(uptr alignment, uptr size) { |
37 | void *p; | |
38 | uptr error = posix_memalign(&p, alignment, size); | |
39 | if (error) return nullptr; | |
40 | return p; | |
41 | } | |
7cac9316 | 42 | # endif |
5bcae85e SL |
43 | # define __libc_realloc realloc |
44 | # define __libc_free free | |
1a4d82fc JJ |
45 | # endif |
46 | ||
5bcae85e SL |
47 | static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache, |
48 | uptr alignment) { | |
49 | (void)cache; | |
7cac9316 | 50 | #if !SANITIZER_GO |
5bcae85e SL |
51 | if (alignment == 0) |
52 | return __libc_malloc(size); | |
53 | else | |
54 | return __libc_memalign(alignment, size); | |
7cac9316 XL |
55 | #else |
56 | // Windows does not provide __libc_memalign/posix_memalign. It provides | |
57 | // __aligned_malloc, but the allocated blocks can't be passed to free, | |
58 | // they need to be passed to __aligned_free. InternalAlloc interface does | |
59 | // not account for such requirement. Alignemnt does not seem to be used | |
60 | // anywhere in runtime, so just call __libc_malloc for now. | |
61 | DCHECK_EQ(alignment, 0); | |
62 | return __libc_malloc(size); | |
63 | #endif | |
5bcae85e SL |
64 | } |
65 | ||
66 | static void *RawInternalRealloc(void *ptr, uptr size, | |
67 | InternalAllocatorCache *cache) { | |
1a4d82fc | 68 | (void)cache; |
5bcae85e | 69 | return __libc_realloc(ptr, size); |
1a4d82fc JJ |
70 | } |
71 | ||
72 | static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { | |
73 | (void)cache; | |
5bcae85e | 74 | __libc_free(ptr); |
1a4d82fc JJ |
75 | } |
76 | ||
77 | InternalAllocator *internal_allocator() { | |
78 | return 0; | |
79 | } | |
80 | ||
7cac9316 | 81 | #else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC) |
1a4d82fc JJ |
82 | |
83 | static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)]; | |
84 | static atomic_uint8_t internal_allocator_initialized; | |
85 | static StaticSpinMutex internal_alloc_init_mu; | |
86 | ||
87 | static InternalAllocatorCache internal_allocator_cache; | |
88 | static StaticSpinMutex internal_allocator_cache_mu; | |
89 | ||
90 | InternalAllocator *internal_allocator() { | |
91 | InternalAllocator *internal_allocator_instance = | |
92 | reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder); | |
93 | if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) { | |
94 | SpinMutexLock l(&internal_alloc_init_mu); | |
95 | if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) == | |
96 | 0) { | |
7cac9316 XL |
97 | internal_allocator_instance->Init( |
98 | /* may_return_null */ false, kReleaseToOSIntervalNever); | |
1a4d82fc JJ |
99 | atomic_store(&internal_allocator_initialized, 1, memory_order_release); |
100 | } | |
101 | } | |
102 | return internal_allocator_instance; | |
103 | } | |
104 | ||
5bcae85e SL |
105 | static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache, |
106 | uptr alignment) { | |
107 | if (alignment == 0) alignment = 8; | |
1a4d82fc JJ |
108 | if (cache == 0) { |
109 | SpinMutexLock l(&internal_allocator_cache_mu); | |
5bcae85e SL |
110 | return internal_allocator()->Allocate(&internal_allocator_cache, size, |
111 | alignment, false); | |
1a4d82fc | 112 | } |
5bcae85e SL |
113 | return internal_allocator()->Allocate(cache, size, alignment, false); |
114 | } | |
115 | ||
116 | static void *RawInternalRealloc(void *ptr, uptr size, | |
117 | InternalAllocatorCache *cache) { | |
118 | uptr alignment = 8; | |
119 | if (cache == 0) { | |
120 | SpinMutexLock l(&internal_allocator_cache_mu); | |
121 | return internal_allocator()->Reallocate(&internal_allocator_cache, ptr, | |
122 | size, alignment); | |
123 | } | |
124 | return internal_allocator()->Reallocate(cache, ptr, size, alignment); | |
1a4d82fc JJ |
125 | } |
126 | ||
127 | static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { | |
92a42be0 | 128 | if (!cache) { |
1a4d82fc JJ |
129 | SpinMutexLock l(&internal_allocator_cache_mu); |
130 | return internal_allocator()->Deallocate(&internal_allocator_cache, ptr); | |
131 | } | |
132 | internal_allocator()->Deallocate(cache, ptr); | |
133 | } | |
134 | ||
7cac9316 | 135 | #endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC) |
1a4d82fc JJ |
136 | |
137 | const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull; | |
138 | ||
5bcae85e | 139 | void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) { |
1a4d82fc | 140 | if (size + sizeof(u64) < size) |
92a42be0 | 141 | return nullptr; |
5bcae85e | 142 | void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment); |
92a42be0 SL |
143 | if (!p) |
144 | return nullptr; | |
1a4d82fc JJ |
145 | ((u64*)p)[0] = kBlockMagic; |
146 | return (char*)p + sizeof(u64); | |
147 | } | |
148 | ||
5bcae85e SL |
149 | void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) { |
150 | if (!addr) | |
151 | return InternalAlloc(size, cache); | |
152 | if (size + sizeof(u64) < size) | |
153 | return nullptr; | |
154 | addr = (char*)addr - sizeof(u64); | |
155 | size = size + sizeof(u64); | |
156 | CHECK_EQ(kBlockMagic, ((u64*)addr)[0]); | |
157 | void *p = RawInternalRealloc(addr, size, cache); | |
158 | if (!p) | |
159 | return nullptr; | |
160 | return (char*)p + sizeof(u64); | |
161 | } | |
162 | ||
163 | void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) { | |
164 | if (CallocShouldReturnNullDueToOverflow(count, size)) | |
7cac9316 | 165 | return internal_allocator()->ReturnNullOrDieOnBadRequest(); |
5bcae85e SL |
166 | void *p = InternalAlloc(count * size, cache); |
167 | if (p) internal_memset(p, 0, count * size); | |
168 | return p; | |
169 | } | |
170 | ||
1a4d82fc | 171 | void InternalFree(void *addr, InternalAllocatorCache *cache) { |
92a42be0 | 172 | if (!addr) |
1a4d82fc JJ |
173 | return; |
174 | addr = (char*)addr - sizeof(u64); | |
175 | CHECK_EQ(kBlockMagic, ((u64*)addr)[0]); | |
176 | ((u64*)addr)[0] = 0; | |
177 | RawInternalFree(addr, cache); | |
178 | } | |
179 | ||
180 | // LowLevelAllocator | |
181 | static LowLevelAllocateCallback low_level_alloc_callback; | |
182 | ||
183 | void *LowLevelAllocator::Allocate(uptr size) { | |
184 | // Align allocation size. | |
185 | size = RoundUpTo(size, 8); | |
186 | if (allocated_end_ - allocated_current_ < (sptr)size) { | |
187 | uptr size_to_allocate = Max(size, GetPageSizeCached()); | |
188 | allocated_current_ = | |
189 | (char*)MmapOrDie(size_to_allocate, __func__); | |
190 | allocated_end_ = allocated_current_ + size_to_allocate; | |
191 | if (low_level_alloc_callback) { | |
192 | low_level_alloc_callback((uptr)allocated_current_, | |
193 | size_to_allocate); | |
194 | } | |
195 | } | |
196 | CHECK(allocated_end_ - allocated_current_ >= (sptr)size); | |
197 | void *res = allocated_current_; | |
198 | allocated_current_ += size; | |
199 | return res; | |
200 | } | |
201 | ||
202 | void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) { | |
203 | low_level_alloc_callback = callback; | |
204 | } | |
205 | ||
206 | bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) { | |
207 | if (!size) return false; | |
208 | uptr max = (uptr)-1L; | |
209 | return (max / size) < n; | |
210 | } | |
211 | ||
7cac9316 XL |
212 | static atomic_uint8_t reporting_out_of_memory = {0}; |
213 | ||
214 | bool IsReportingOOM() { return atomic_load_relaxed(&reporting_out_of_memory); } | |
215 | ||
216 | void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) { | |
217 | if (out_of_memory) atomic_store_relaxed(&reporting_out_of_memory, 1); | |
1a4d82fc JJ |
218 | Report("%s's allocator is terminating the process instead of returning 0\n", |
219 | SanitizerToolName); | |
220 | Report("If you don't like this behavior set allocator_may_return_null=1\n"); | |
221 | CHECK(0); | |
92a42be0 | 222 | Die(); |
1a4d82fc JJ |
223 | } |
224 | ||
92a42be0 | 225 | } // namespace __sanitizer |