]>
Commit | Line | Data |
---|---|---|
1a4d82fc JJ |
1 | //===-- sanitizer_allocator.h -----------------------------------*- C++ -*-===// |
2 | // | |
3 | // The LLVM Compiler Infrastructure | |
4 | // | |
5 | // This file is distributed under the University of Illinois Open Source | |
6 | // License. See LICENSE.TXT for details. | |
7 | // | |
8 | //===----------------------------------------------------------------------===// | |
9 | // | |
10 | // Specialized memory allocator for ThreadSanitizer, MemorySanitizer, etc. | |
11 | // | |
12 | //===----------------------------------------------------------------------===// | |
13 | ||
14 | #ifndef SANITIZER_ALLOCATOR_H | |
15 | #define SANITIZER_ALLOCATOR_H | |
16 | ||
17 | #include "sanitizer_internal_defs.h" | |
18 | #include "sanitizer_common.h" | |
19 | #include "sanitizer_libc.h" | |
20 | #include "sanitizer_list.h" | |
21 | #include "sanitizer_mutex.h" | |
22 | #include "sanitizer_lfstack.h" | |
23 | ||
24 | namespace __sanitizer { | |
25 | ||
92a42be0 SL |
26 | // Prints error message and kills the program. |
27 | void NORETURN ReportAllocatorCannotReturnNull(); | |
1a4d82fc JJ |
28 | |
29 | // SizeClassMap maps allocation sizes into size classes and back. | |
30 | // Class 0 corresponds to size 0. | |
31 | // Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16). | |
32 | // Next 4 classes: 256 + i * 64 (i = 1 to 4). | |
33 | // Next 4 classes: 512 + i * 128 (i = 1 to 4). | |
34 | // ... | |
35 | // Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4). | |
36 | // Last class corresponds to kMaxSize = 1 << kMaxSizeLog. | |
37 | // | |
38 | // This structure of the size class map gives us: | |
39 | // - Efficient table-free class-to-size and size-to-class functions. | |
40 | // - Difference between two consequent size classes is betweed 14% and 25% | |
41 | // | |
42 | // This class also gives a hint to a thread-caching allocator about the amount | |
43 | // of chunks that need to be cached per-thread: | |
44 | // - kMaxNumCached is the maximal number of chunks per size class. | |
45 | // - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class. | |
46 | // | |
47 | // Part of output of SizeClassMap::Print(): | |
48 | // c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0 | |
49 | // c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1 | |
50 | // c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2 | |
51 | // c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3 | |
52 | // c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4 | |
53 | // c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5 | |
54 | // c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6 | |
55 | // c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7 | |
56 | // | |
57 | // c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8 | |
58 | // c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9 | |
59 | // c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10 | |
60 | // c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11 | |
61 | // c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12 | |
62 | // c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13 | |
63 | // c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14 | |
64 | // c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15 | |
65 | // | |
66 | // c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16 | |
67 | // c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17 | |
68 | // c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18 | |
69 | // c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19 | |
70 | // | |
71 | // c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20 | |
72 | // c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21 | |
73 | // c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22 | |
74 | // c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23 | |
75 | // | |
76 | // c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24 | |
77 | // c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25 | |
78 | // c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26 | |
79 | // c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27 | |
80 | // | |
81 | // ... | |
82 | // | |
83 | // c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48 | |
84 | // c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49 | |
85 | // c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50 | |
86 | // c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51 | |
87 | // | |
88 | // c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52 | |
89 | ||
90 | template <uptr kMaxSizeLog, uptr kMaxNumCachedT, uptr kMaxBytesCachedLog> | |
91 | class SizeClassMap { | |
92 | static const uptr kMinSizeLog = 4; | |
93 | static const uptr kMidSizeLog = kMinSizeLog + 4; | |
94 | static const uptr kMinSize = 1 << kMinSizeLog; | |
95 | static const uptr kMidSize = 1 << kMidSizeLog; | |
96 | static const uptr kMidClass = kMidSize / kMinSize; | |
97 | static const uptr S = 2; | |
98 | static const uptr M = (1 << S) - 1; | |
99 | ||
100 | public: | |
101 | static const uptr kMaxNumCached = kMaxNumCachedT; | |
102 | // We transfer chunks between central and thread-local free lists in batches. | |
103 | // For small size classes we allocate batches separately. | |
104 | // For large size classes we use one of the chunks to store the batch. | |
105 | struct TransferBatch { | |
106 | TransferBatch *next; | |
107 | uptr count; | |
108 | void *batch[kMaxNumCached]; | |
109 | }; | |
110 | ||
111 | static const uptr kMaxSize = 1UL << kMaxSizeLog; | |
112 | static const uptr kNumClasses = | |
113 | kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1; | |
114 | COMPILER_CHECK(kNumClasses >= 32 && kNumClasses <= 256); | |
115 | static const uptr kNumClassesRounded = | |
116 | kNumClasses == 32 ? 32 : | |
117 | kNumClasses <= 64 ? 64 : | |
118 | kNumClasses <= 128 ? 128 : 256; | |
119 | ||
120 | static uptr Size(uptr class_id) { | |
121 | if (class_id <= kMidClass) | |
122 | return kMinSize * class_id; | |
123 | class_id -= kMidClass; | |
124 | uptr t = kMidSize << (class_id >> S); | |
125 | return t + (t >> S) * (class_id & M); | |
126 | } | |
127 | ||
128 | static uptr ClassID(uptr size) { | |
129 | if (size <= kMidSize) | |
130 | return (size + kMinSize - 1) >> kMinSizeLog; | |
131 | if (size > kMaxSize) return 0; | |
132 | uptr l = MostSignificantSetBitIndex(size); | |
133 | uptr hbits = (size >> (l - S)) & M; | |
134 | uptr lbits = size & ((1 << (l - S)) - 1); | |
135 | uptr l1 = l - kMidSizeLog; | |
136 | return kMidClass + (l1 << S) + hbits + (lbits > 0); | |
137 | } | |
138 | ||
139 | static uptr MaxCached(uptr class_id) { | |
140 | if (class_id == 0) return 0; | |
141 | uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id); | |
142 | return Max<uptr>(1, Min(kMaxNumCached, n)); | |
143 | } | |
144 | ||
145 | static void Print() { | |
146 | uptr prev_s = 0; | |
147 | uptr total_cached = 0; | |
148 | for (uptr i = 0; i < kNumClasses; i++) { | |
149 | uptr s = Size(i); | |
150 | if (s >= kMidSize / 2 && (s & (s - 1)) == 0) | |
151 | Printf("\n"); | |
152 | uptr d = s - prev_s; | |
153 | uptr p = prev_s ? (d * 100 / prev_s) : 0; | |
154 | uptr l = s ? MostSignificantSetBitIndex(s) : 0; | |
155 | uptr cached = MaxCached(i) * s; | |
156 | Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd " | |
157 | "cached: %zd %zd; id %zd\n", | |
158 | i, Size(i), d, p, l, MaxCached(i), cached, ClassID(s)); | |
159 | total_cached += cached; | |
160 | prev_s = s; | |
161 | } | |
162 | Printf("Total cached: %zd\n", total_cached); | |
163 | } | |
164 | ||
165 | static bool SizeClassRequiresSeparateTransferBatch(uptr class_id) { | |
166 | return Size(class_id) < sizeof(TransferBatch) - | |
167 | sizeof(uptr) * (kMaxNumCached - MaxCached(class_id)); | |
168 | } | |
169 | ||
170 | static void Validate() { | |
171 | for (uptr c = 1; c < kNumClasses; c++) { | |
172 | // Printf("Validate: c%zd\n", c); | |
173 | uptr s = Size(c); | |
174 | CHECK_NE(s, 0U); | |
175 | CHECK_EQ(ClassID(s), c); | |
176 | if (c != kNumClasses - 1) | |
177 | CHECK_EQ(ClassID(s + 1), c + 1); | |
178 | CHECK_EQ(ClassID(s - 1), c); | |
179 | if (c) | |
180 | CHECK_GT(Size(c), Size(c-1)); | |
181 | } | |
182 | CHECK_EQ(ClassID(kMaxSize + 1), 0); | |
183 | ||
184 | for (uptr s = 1; s <= kMaxSize; s++) { | |
185 | uptr c = ClassID(s); | |
186 | // Printf("s%zd => c%zd\n", s, c); | |
187 | CHECK_LT(c, kNumClasses); | |
188 | CHECK_GE(Size(c), s); | |
189 | if (c > 0) | |
190 | CHECK_LT(Size(c-1), s); | |
191 | } | |
192 | } | |
193 | }; | |
194 | ||
195 | typedef SizeClassMap<17, 128, 16> DefaultSizeClassMap; | |
196 | typedef SizeClassMap<17, 64, 14> CompactSizeClassMap; | |
197 | template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache; | |
198 | ||
199 | // Memory allocator statistics | |
200 | enum AllocatorStat { | |
92a42be0 SL |
201 | AllocatorStatAllocated, |
202 | AllocatorStatMapped, | |
1a4d82fc JJ |
203 | AllocatorStatCount |
204 | }; | |
205 | ||
92a42be0 | 206 | typedef uptr AllocatorStatCounters[AllocatorStatCount]; |
1a4d82fc JJ |
207 | |
208 | // Per-thread stats, live in per-thread cache. | |
209 | class AllocatorStats { | |
210 | public: | |
211 | void Init() { | |
212 | internal_memset(this, 0, sizeof(*this)); | |
213 | } | |
92a42be0 | 214 | void InitLinkerInitialized() {} |
1a4d82fc | 215 | |
92a42be0 | 216 | void Add(AllocatorStat i, uptr v) { |
1a4d82fc JJ |
217 | v += atomic_load(&stats_[i], memory_order_relaxed); |
218 | atomic_store(&stats_[i], v, memory_order_relaxed); | |
219 | } | |
220 | ||
92a42be0 SL |
221 | void Sub(AllocatorStat i, uptr v) { |
222 | v = atomic_load(&stats_[i], memory_order_relaxed) - v; | |
1a4d82fc JJ |
223 | atomic_store(&stats_[i], v, memory_order_relaxed); |
224 | } | |
225 | ||
92a42be0 SL |
226 | void Set(AllocatorStat i, uptr v) { |
227 | atomic_store(&stats_[i], v, memory_order_relaxed); | |
228 | } | |
229 | ||
230 | uptr Get(AllocatorStat i) const { | |
1a4d82fc JJ |
231 | return atomic_load(&stats_[i], memory_order_relaxed); |
232 | } | |
233 | ||
234 | private: | |
235 | friend class AllocatorGlobalStats; | |
236 | AllocatorStats *next_; | |
237 | AllocatorStats *prev_; | |
92a42be0 | 238 | atomic_uintptr_t stats_[AllocatorStatCount]; |
1a4d82fc JJ |
239 | }; |
240 | ||
241 | // Global stats, used for aggregation and querying. | |
242 | class AllocatorGlobalStats : public AllocatorStats { | |
243 | public: | |
92a42be0 | 244 | void InitLinkerInitialized() { |
1a4d82fc JJ |
245 | next_ = this; |
246 | prev_ = this; | |
247 | } | |
92a42be0 SL |
248 | void Init() { |
249 | internal_memset(this, 0, sizeof(*this)); | |
250 | InitLinkerInitialized(); | |
251 | } | |
1a4d82fc JJ |
252 | |
253 | void Register(AllocatorStats *s) { | |
254 | SpinMutexLock l(&mu_); | |
255 | s->next_ = next_; | |
256 | s->prev_ = this; | |
257 | next_->prev_ = s; | |
258 | next_ = s; | |
259 | } | |
260 | ||
261 | void Unregister(AllocatorStats *s) { | |
262 | SpinMutexLock l(&mu_); | |
263 | s->prev_->next_ = s->next_; | |
264 | s->next_->prev_ = s->prev_; | |
265 | for (int i = 0; i < AllocatorStatCount; i++) | |
266 | Add(AllocatorStat(i), s->Get(AllocatorStat(i))); | |
267 | } | |
268 | ||
269 | void Get(AllocatorStatCounters s) const { | |
92a42be0 | 270 | internal_memset(s, 0, AllocatorStatCount * sizeof(uptr)); |
1a4d82fc JJ |
271 | SpinMutexLock l(&mu_); |
272 | const AllocatorStats *stats = this; | |
273 | for (;;) { | |
274 | for (int i = 0; i < AllocatorStatCount; i++) | |
275 | s[i] += stats->Get(AllocatorStat(i)); | |
276 | stats = stats->next_; | |
277 | if (stats == this) | |
278 | break; | |
279 | } | |
92a42be0 SL |
280 | // All stats must be non-negative. |
281 | for (int i = 0; i < AllocatorStatCount; i++) | |
282 | s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0; | |
1a4d82fc JJ |
283 | } |
284 | ||
285 | private: | |
286 | mutable SpinMutex mu_; | |
287 | }; | |
288 | ||
289 | // Allocators call these callbacks on mmap/munmap. | |
290 | struct NoOpMapUnmapCallback { | |
291 | void OnMap(uptr p, uptr size) const { } | |
292 | void OnUnmap(uptr p, uptr size) const { } | |
293 | }; | |
294 | ||
295 | // Callback type for iterating over chunks. | |
296 | typedef void (*ForEachChunkCallback)(uptr chunk, void *arg); | |
297 | ||
298 | // SizeClassAllocator64 -- allocator for 64-bit address space. | |
299 | // | |
300 | // Space: a portion of address space of kSpaceSize bytes starting at | |
301 | // a fixed address (kSpaceBeg). Both constants are powers of two and | |
302 | // kSpaceBeg is kSpaceSize-aligned. | |
303 | // At the beginning the entire space is mprotect-ed, then small parts of it | |
304 | // are mapped on demand. | |
305 | // | |
306 | // Region: a part of Space dedicated to a single size class. | |
307 | // There are kNumClasses Regions of equal size. | |
308 | // | |
309 | // UserChunk: a piece of memory returned to user. | |
310 | // MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk. | |
311 | // | |
312 | // A Region looks like this: | |
313 | // UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 | |
314 | template <const uptr kSpaceBeg, const uptr kSpaceSize, | |
315 | const uptr kMetadataSize, class SizeClassMap, | |
316 | class MapUnmapCallback = NoOpMapUnmapCallback> | |
317 | class SizeClassAllocator64 { | |
318 | public: | |
319 | typedef typename SizeClassMap::TransferBatch Batch; | |
320 | typedef SizeClassAllocator64<kSpaceBeg, kSpaceSize, kMetadataSize, | |
321 | SizeClassMap, MapUnmapCallback> ThisT; | |
322 | typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache; | |
323 | ||
324 | void Init() { | |
325 | CHECK_EQ(kSpaceBeg, | |
92a42be0 | 326 | reinterpret_cast<uptr>(MmapNoAccess(kSpaceBeg, kSpaceSize))); |
1a4d82fc JJ |
327 | MapWithCallback(kSpaceEnd, AdditionalSize()); |
328 | } | |
329 | ||
330 | void MapWithCallback(uptr beg, uptr size) { | |
331 | CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size))); | |
332 | MapUnmapCallback().OnMap(beg, size); | |
333 | } | |
334 | ||
335 | void UnmapWithCallback(uptr beg, uptr size) { | |
336 | MapUnmapCallback().OnUnmap(beg, size); | |
337 | UnmapOrDie(reinterpret_cast<void *>(beg), size); | |
338 | } | |
339 | ||
340 | static bool CanAllocate(uptr size, uptr alignment) { | |
341 | return size <= SizeClassMap::kMaxSize && | |
342 | alignment <= SizeClassMap::kMaxSize; | |
343 | } | |
344 | ||
345 | NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c, | |
346 | uptr class_id) { | |
347 | CHECK_LT(class_id, kNumClasses); | |
348 | RegionInfo *region = GetRegionInfo(class_id); | |
349 | Batch *b = region->free_list.Pop(); | |
92a42be0 | 350 | if (!b) |
1a4d82fc JJ |
351 | b = PopulateFreeList(stat, c, class_id, region); |
352 | region->n_allocated += b->count; | |
353 | return b; | |
354 | } | |
355 | ||
356 | NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) { | |
357 | RegionInfo *region = GetRegionInfo(class_id); | |
358 | CHECK_GT(b->count, 0); | |
359 | region->free_list.Push(b); | |
360 | region->n_freed += b->count; | |
361 | } | |
362 | ||
363 | static bool PointerIsMine(const void *p) { | |
364 | return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize; | |
365 | } | |
366 | ||
367 | static uptr GetSizeClass(const void *p) { | |
368 | return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClassesRounded; | |
369 | } | |
370 | ||
371 | void *GetBlockBegin(const void *p) { | |
372 | uptr class_id = GetSizeClass(p); | |
373 | uptr size = SizeClassMap::Size(class_id); | |
92a42be0 | 374 | if (!size) return nullptr; |
1a4d82fc JJ |
375 | uptr chunk_idx = GetChunkIdx((uptr)p, size); |
376 | uptr reg_beg = (uptr)p & ~(kRegionSize - 1); | |
377 | uptr beg = chunk_idx * size; | |
378 | uptr next_beg = beg + size; | |
92a42be0 | 379 | if (class_id >= kNumClasses) return nullptr; |
1a4d82fc JJ |
380 | RegionInfo *region = GetRegionInfo(class_id); |
381 | if (region->mapped_user >= next_beg) | |
382 | return reinterpret_cast<void*>(reg_beg + beg); | |
92a42be0 | 383 | return nullptr; |
1a4d82fc JJ |
384 | } |
385 | ||
386 | static uptr GetActuallyAllocatedSize(void *p) { | |
387 | CHECK(PointerIsMine(p)); | |
388 | return SizeClassMap::Size(GetSizeClass(p)); | |
389 | } | |
390 | ||
391 | uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); } | |
392 | ||
393 | void *GetMetaData(const void *p) { | |
394 | uptr class_id = GetSizeClass(p); | |
395 | uptr size = SizeClassMap::Size(class_id); | |
396 | uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size); | |
397 | return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) - | |
398 | (1 + chunk_idx) * kMetadataSize); | |
399 | } | |
400 | ||
401 | uptr TotalMemoryUsed() { | |
402 | uptr res = 0; | |
403 | for (uptr i = 0; i < kNumClasses; i++) | |
404 | res += GetRegionInfo(i)->allocated_user; | |
405 | return res; | |
406 | } | |
407 | ||
408 | // Test-only. | |
409 | void TestOnlyUnmap() { | |
410 | UnmapWithCallback(kSpaceBeg, kSpaceSize + AdditionalSize()); | |
411 | } | |
412 | ||
413 | void PrintStats() { | |
414 | uptr total_mapped = 0; | |
415 | uptr n_allocated = 0; | |
416 | uptr n_freed = 0; | |
417 | for (uptr class_id = 1; class_id < kNumClasses; class_id++) { | |
418 | RegionInfo *region = GetRegionInfo(class_id); | |
419 | total_mapped += region->mapped_user; | |
420 | n_allocated += region->n_allocated; | |
421 | n_freed += region->n_freed; | |
422 | } | |
423 | Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; " | |
424 | "remains %zd\n", | |
425 | total_mapped >> 20, n_allocated, n_allocated - n_freed); | |
426 | for (uptr class_id = 1; class_id < kNumClasses; class_id++) { | |
427 | RegionInfo *region = GetRegionInfo(class_id); | |
428 | if (region->mapped_user == 0) continue; | |
429 | Printf(" %02zd (%zd): total: %zd K allocs: %zd remains: %zd\n", | |
430 | class_id, | |
431 | SizeClassMap::Size(class_id), | |
432 | region->mapped_user >> 10, | |
433 | region->n_allocated, | |
434 | region->n_allocated - region->n_freed); | |
435 | } | |
436 | } | |
437 | ||
438 | // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone | |
439 | // introspection API. | |
440 | void ForceLock() { | |
441 | for (uptr i = 0; i < kNumClasses; i++) { | |
442 | GetRegionInfo(i)->mutex.Lock(); | |
443 | } | |
444 | } | |
445 | ||
446 | void ForceUnlock() { | |
447 | for (int i = (int)kNumClasses - 1; i >= 0; i--) { | |
448 | GetRegionInfo(i)->mutex.Unlock(); | |
449 | } | |
450 | } | |
451 | ||
452 | // Iterate over all existing chunks. | |
453 | // The allocator must be locked when calling this function. | |
454 | void ForEachChunk(ForEachChunkCallback callback, void *arg) { | |
455 | for (uptr class_id = 1; class_id < kNumClasses; class_id++) { | |
456 | RegionInfo *region = GetRegionInfo(class_id); | |
457 | uptr chunk_size = SizeClassMap::Size(class_id); | |
458 | uptr region_beg = kSpaceBeg + class_id * kRegionSize; | |
459 | for (uptr chunk = region_beg; | |
460 | chunk < region_beg + region->allocated_user; | |
461 | chunk += chunk_size) { | |
462 | // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk)); | |
463 | callback(chunk, arg); | |
464 | } | |
465 | } | |
466 | } | |
467 | ||
92a42be0 SL |
468 | static uptr AdditionalSize() { |
469 | return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded, | |
470 | GetPageSizeCached()); | |
471 | } | |
472 | ||
1a4d82fc JJ |
473 | typedef SizeClassMap SizeClassMapT; |
474 | static const uptr kNumClasses = SizeClassMap::kNumClasses; | |
475 | static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded; | |
476 | ||
477 | private: | |
478 | static const uptr kRegionSize = kSpaceSize / kNumClassesRounded; | |
479 | static const uptr kSpaceEnd = kSpaceBeg + kSpaceSize; | |
480 | COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0); | |
481 | // kRegionSize must be >= 2^32. | |
482 | COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2))); | |
483 | // Populate the free list with at most this number of bytes at once | |
484 | // or with one element if its size is greater. | |
485 | static const uptr kPopulateSize = 1 << 14; | |
486 | // Call mmap for user memory with at least this size. | |
487 | static const uptr kUserMapSize = 1 << 16; | |
488 | // Call mmap for metadata memory with at least this size. | |
489 | static const uptr kMetaMapSize = 1 << 16; | |
490 | ||
491 | struct RegionInfo { | |
492 | BlockingMutex mutex; | |
493 | LFStack<Batch> free_list; | |
494 | uptr allocated_user; // Bytes allocated for user memory. | |
495 | uptr allocated_meta; // Bytes allocated for metadata. | |
496 | uptr mapped_user; // Bytes mapped for user memory. | |
497 | uptr mapped_meta; // Bytes mapped for metadata. | |
498 | uptr n_allocated, n_freed; // Just stats. | |
499 | }; | |
500 | COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize); | |
501 | ||
1a4d82fc JJ |
502 | RegionInfo *GetRegionInfo(uptr class_id) { |
503 | CHECK_LT(class_id, kNumClasses); | |
504 | RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize); | |
505 | return ®ions[class_id]; | |
506 | } | |
507 | ||
508 | static uptr GetChunkIdx(uptr chunk, uptr size) { | |
509 | uptr offset = chunk % kRegionSize; | |
510 | // Here we divide by a non-constant. This is costly. | |
511 | // size always fits into 32-bits. If the offset fits too, use 32-bit div. | |
512 | if (offset >> (SANITIZER_WORDSIZE / 2)) | |
513 | return offset / size; | |
514 | return (u32)offset / (u32)size; | |
515 | } | |
516 | ||
517 | NOINLINE Batch* PopulateFreeList(AllocatorStats *stat, AllocatorCache *c, | |
518 | uptr class_id, RegionInfo *region) { | |
519 | BlockingMutexLock l(®ion->mutex); | |
520 | Batch *b = region->free_list.Pop(); | |
521 | if (b) | |
522 | return b; | |
523 | uptr size = SizeClassMap::Size(class_id); | |
524 | uptr count = size < kPopulateSize ? SizeClassMap::MaxCached(class_id) : 1; | |
525 | uptr beg_idx = region->allocated_user; | |
526 | uptr end_idx = beg_idx + count * size; | |
527 | uptr region_beg = kSpaceBeg + kRegionSize * class_id; | |
528 | if (end_idx + size > region->mapped_user) { | |
529 | // Do the mmap for the user memory. | |
530 | uptr map_size = kUserMapSize; | |
531 | while (end_idx + size > region->mapped_user + map_size) | |
532 | map_size += kUserMapSize; | |
533 | CHECK_GE(region->mapped_user + map_size, end_idx); | |
534 | MapWithCallback(region_beg + region->mapped_user, map_size); | |
92a42be0 | 535 | stat->Add(AllocatorStatMapped, map_size); |
1a4d82fc JJ |
536 | region->mapped_user += map_size; |
537 | } | |
538 | uptr total_count = (region->mapped_user - beg_idx - size) | |
539 | / size / count * count; | |
540 | region->allocated_meta += total_count * kMetadataSize; | |
541 | if (region->allocated_meta > region->mapped_meta) { | |
542 | uptr map_size = kMetaMapSize; | |
543 | while (region->allocated_meta > region->mapped_meta + map_size) | |
544 | map_size += kMetaMapSize; | |
545 | // Do the mmap for the metadata. | |
546 | CHECK_GE(region->mapped_meta + map_size, region->allocated_meta); | |
547 | MapWithCallback(region_beg + kRegionSize - | |
548 | region->mapped_meta - map_size, map_size); | |
549 | region->mapped_meta += map_size; | |
550 | } | |
551 | CHECK_LE(region->allocated_meta, region->mapped_meta); | |
552 | if (region->mapped_user + region->mapped_meta > kRegionSize) { | |
553 | Printf("%s: Out of memory. Dying. ", SanitizerToolName); | |
554 | Printf("The process has exhausted %zuMB for size class %zu.\n", | |
555 | kRegionSize / 1024 / 1024, size); | |
556 | Die(); | |
557 | } | |
558 | for (;;) { | |
559 | if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id)) | |
560 | b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch))); | |
561 | else | |
562 | b = (Batch*)(region_beg + beg_idx); | |
563 | b->count = count; | |
564 | for (uptr i = 0; i < count; i++) | |
565 | b->batch[i] = (void*)(region_beg + beg_idx + i * size); | |
566 | region->allocated_user += count * size; | |
567 | CHECK_LE(region->allocated_user, region->mapped_user); | |
568 | beg_idx += count * size; | |
569 | if (beg_idx + count * size + size > region->mapped_user) | |
570 | break; | |
571 | CHECK_GT(b->count, 0); | |
572 | region->free_list.Push(b); | |
573 | } | |
574 | return b; | |
575 | } | |
576 | }; | |
577 | ||
578 | // Maps integers in rage [0, kSize) to u8 values. | |
579 | template<u64 kSize> | |
580 | class FlatByteMap { | |
581 | public: | |
582 | void TestOnlyInit() { | |
583 | internal_memset(map_, 0, sizeof(map_)); | |
584 | } | |
585 | ||
586 | void set(uptr idx, u8 val) { | |
587 | CHECK_LT(idx, kSize); | |
588 | CHECK_EQ(0U, map_[idx]); | |
589 | map_[idx] = val; | |
590 | } | |
591 | u8 operator[] (uptr idx) { | |
592 | CHECK_LT(idx, kSize); | |
593 | // FIXME: CHECK may be too expensive here. | |
594 | return map_[idx]; | |
595 | } | |
596 | private: | |
597 | u8 map_[kSize]; | |
598 | }; | |
599 | ||
600 | // TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values. | |
601 | // It is implemented as a two-dimensional array: array of kSize1 pointers | |
602 | // to kSize2-byte arrays. The secondary arrays are mmaped on demand. | |
603 | // Each value is initially zero and can be set to something else only once. | |
604 | // Setting and getting values from multiple threads is safe w/o extra locking. | |
605 | template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback> | |
606 | class TwoLevelByteMap { | |
607 | public: | |
608 | void TestOnlyInit() { | |
609 | internal_memset(map1_, 0, sizeof(map1_)); | |
610 | mu_.Init(); | |
611 | } | |
92a42be0 | 612 | |
1a4d82fc JJ |
613 | void TestOnlyUnmap() { |
614 | for (uptr i = 0; i < kSize1; i++) { | |
615 | u8 *p = Get(i); | |
616 | if (!p) continue; | |
617 | MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2); | |
618 | UnmapOrDie(p, kSize2); | |
619 | } | |
620 | } | |
621 | ||
622 | uptr size() const { return kSize1 * kSize2; } | |
623 | uptr size1() const { return kSize1; } | |
624 | uptr size2() const { return kSize2; } | |
625 | ||
626 | void set(uptr idx, u8 val) { | |
627 | CHECK_LT(idx, kSize1 * kSize2); | |
628 | u8 *map2 = GetOrCreate(idx / kSize2); | |
629 | CHECK_EQ(0U, map2[idx % kSize2]); | |
630 | map2[idx % kSize2] = val; | |
631 | } | |
632 | ||
633 | u8 operator[] (uptr idx) const { | |
634 | CHECK_LT(idx, kSize1 * kSize2); | |
635 | u8 *map2 = Get(idx / kSize2); | |
636 | if (!map2) return 0; | |
637 | return map2[idx % kSize2]; | |
638 | } | |
639 | ||
640 | private: | |
641 | u8 *Get(uptr idx) const { | |
642 | CHECK_LT(idx, kSize1); | |
643 | return reinterpret_cast<u8 *>( | |
644 | atomic_load(&map1_[idx], memory_order_acquire)); | |
645 | } | |
646 | ||
647 | u8 *GetOrCreate(uptr idx) { | |
648 | u8 *res = Get(idx); | |
649 | if (!res) { | |
650 | SpinMutexLock l(&mu_); | |
651 | if (!(res = Get(idx))) { | |
652 | res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap"); | |
653 | MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2); | |
654 | atomic_store(&map1_[idx], reinterpret_cast<uptr>(res), | |
655 | memory_order_release); | |
656 | } | |
657 | } | |
658 | return res; | |
659 | } | |
660 | ||
661 | atomic_uintptr_t map1_[kSize1]; | |
662 | StaticSpinMutex mu_; | |
663 | }; | |
664 | ||
665 | // SizeClassAllocator32 -- allocator for 32-bit address space. | |
666 | // This allocator can theoretically be used on 64-bit arch, but there it is less | |
667 | // efficient than SizeClassAllocator64. | |
668 | // | |
669 | // [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can | |
670 | // be returned by MmapOrDie(). | |
671 | // | |
672 | // Region: | |
673 | // a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize). | |
674 | // Since the regions are aligned by kRegionSize, there are exactly | |
675 | // kNumPossibleRegions possible regions in the address space and so we keep | |
676 | // a ByteMap possible_regions to store the size classes of each Region. | |
677 | // 0 size class means the region is not used by the allocator. | |
678 | // | |
679 | // One Region is used to allocate chunks of a single size class. | |
680 | // A Region looks like this: | |
681 | // UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1 | |
682 | // | |
683 | // In order to avoid false sharing the objects of this class should be | |
684 | // chache-line aligned. | |
685 | template <const uptr kSpaceBeg, const u64 kSpaceSize, | |
686 | const uptr kMetadataSize, class SizeClassMap, | |
687 | const uptr kRegionSizeLog, | |
688 | class ByteMap, | |
689 | class MapUnmapCallback = NoOpMapUnmapCallback> | |
690 | class SizeClassAllocator32 { | |
691 | public: | |
692 | typedef typename SizeClassMap::TransferBatch Batch; | |
693 | typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize, | |
694 | SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT; | |
695 | typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache; | |
696 | ||
697 | void Init() { | |
698 | possible_regions.TestOnlyInit(); | |
699 | internal_memset(size_class_info_array, 0, sizeof(size_class_info_array)); | |
700 | } | |
701 | ||
702 | void *MapWithCallback(uptr size) { | |
703 | size = RoundUpTo(size, GetPageSizeCached()); | |
704 | void *res = MmapOrDie(size, "SizeClassAllocator32"); | |
705 | MapUnmapCallback().OnMap((uptr)res, size); | |
706 | return res; | |
707 | } | |
708 | ||
709 | void UnmapWithCallback(uptr beg, uptr size) { | |
710 | MapUnmapCallback().OnUnmap(beg, size); | |
711 | UnmapOrDie(reinterpret_cast<void *>(beg), size); | |
712 | } | |
713 | ||
714 | static bool CanAllocate(uptr size, uptr alignment) { | |
715 | return size <= SizeClassMap::kMaxSize && | |
716 | alignment <= SizeClassMap::kMaxSize; | |
717 | } | |
718 | ||
719 | void *GetMetaData(const void *p) { | |
720 | CHECK(PointerIsMine(p)); | |
721 | uptr mem = reinterpret_cast<uptr>(p); | |
722 | uptr beg = ComputeRegionBeg(mem); | |
723 | uptr size = SizeClassMap::Size(GetSizeClass(p)); | |
724 | u32 offset = mem - beg; | |
725 | uptr n = offset / (u32)size; // 32-bit division | |
726 | uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize; | |
727 | return reinterpret_cast<void*>(meta); | |
728 | } | |
729 | ||
730 | NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c, | |
731 | uptr class_id) { | |
732 | CHECK_LT(class_id, kNumClasses); | |
733 | SizeClassInfo *sci = GetSizeClassInfo(class_id); | |
734 | SpinMutexLock l(&sci->mutex); | |
735 | if (sci->free_list.empty()) | |
736 | PopulateFreeList(stat, c, sci, class_id); | |
737 | CHECK(!sci->free_list.empty()); | |
738 | Batch *b = sci->free_list.front(); | |
739 | sci->free_list.pop_front(); | |
740 | return b; | |
741 | } | |
742 | ||
743 | NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) { | |
744 | CHECK_LT(class_id, kNumClasses); | |
745 | SizeClassInfo *sci = GetSizeClassInfo(class_id); | |
746 | SpinMutexLock l(&sci->mutex); | |
747 | CHECK_GT(b->count, 0); | |
748 | sci->free_list.push_front(b); | |
749 | } | |
750 | ||
751 | bool PointerIsMine(const void *p) { | |
752 | return GetSizeClass(p) != 0; | |
753 | } | |
754 | ||
755 | uptr GetSizeClass(const void *p) { | |
756 | return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))]; | |
757 | } | |
758 | ||
759 | void *GetBlockBegin(const void *p) { | |
760 | CHECK(PointerIsMine(p)); | |
761 | uptr mem = reinterpret_cast<uptr>(p); | |
762 | uptr beg = ComputeRegionBeg(mem); | |
763 | uptr size = SizeClassMap::Size(GetSizeClass(p)); | |
764 | u32 offset = mem - beg; | |
765 | u32 n = offset / (u32)size; // 32-bit division | |
766 | uptr res = beg + (n * (u32)size); | |
767 | return reinterpret_cast<void*>(res); | |
768 | } | |
769 | ||
770 | uptr GetActuallyAllocatedSize(void *p) { | |
771 | CHECK(PointerIsMine(p)); | |
772 | return SizeClassMap::Size(GetSizeClass(p)); | |
773 | } | |
774 | ||
775 | uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); } | |
776 | ||
777 | uptr TotalMemoryUsed() { | |
778 | // No need to lock here. | |
779 | uptr res = 0; | |
780 | for (uptr i = 0; i < kNumPossibleRegions; i++) | |
781 | if (possible_regions[i]) | |
782 | res += kRegionSize; | |
783 | return res; | |
784 | } | |
785 | ||
786 | void TestOnlyUnmap() { | |
787 | for (uptr i = 0; i < kNumPossibleRegions; i++) | |
788 | if (possible_regions[i]) | |
789 | UnmapWithCallback((i * kRegionSize), kRegionSize); | |
790 | } | |
791 | ||
792 | // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone | |
793 | // introspection API. | |
794 | void ForceLock() { | |
795 | for (uptr i = 0; i < kNumClasses; i++) { | |
796 | GetSizeClassInfo(i)->mutex.Lock(); | |
797 | } | |
798 | } | |
799 | ||
800 | void ForceUnlock() { | |
801 | for (int i = kNumClasses - 1; i >= 0; i--) { | |
802 | GetSizeClassInfo(i)->mutex.Unlock(); | |
803 | } | |
804 | } | |
805 | ||
806 | // Iterate over all existing chunks. | |
807 | // The allocator must be locked when calling this function. | |
808 | void ForEachChunk(ForEachChunkCallback callback, void *arg) { | |
809 | for (uptr region = 0; region < kNumPossibleRegions; region++) | |
810 | if (possible_regions[region]) { | |
811 | uptr chunk_size = SizeClassMap::Size(possible_regions[region]); | |
812 | uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize); | |
813 | uptr region_beg = region * kRegionSize; | |
814 | for (uptr chunk = region_beg; | |
815 | chunk < region_beg + max_chunks_in_region * chunk_size; | |
816 | chunk += chunk_size) { | |
817 | // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk)); | |
818 | callback(chunk, arg); | |
819 | } | |
820 | } | |
821 | } | |
822 | ||
823 | void PrintStats() { | |
824 | } | |
825 | ||
92a42be0 SL |
826 | static uptr AdditionalSize() { |
827 | return 0; | |
828 | } | |
829 | ||
1a4d82fc JJ |
830 | typedef SizeClassMap SizeClassMapT; |
831 | static const uptr kNumClasses = SizeClassMap::kNumClasses; | |
832 | ||
833 | private: | |
834 | static const uptr kRegionSize = 1 << kRegionSizeLog; | |
835 | static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize; | |
836 | ||
837 | struct SizeClassInfo { | |
838 | SpinMutex mutex; | |
839 | IntrusiveList<Batch> free_list; | |
840 | char padding[kCacheLineSize - sizeof(uptr) - sizeof(IntrusiveList<Batch>)]; | |
841 | }; | |
842 | COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize); | |
843 | ||
844 | uptr ComputeRegionId(uptr mem) { | |
845 | uptr res = mem >> kRegionSizeLog; | |
846 | CHECK_LT(res, kNumPossibleRegions); | |
847 | return res; | |
848 | } | |
849 | ||
850 | uptr ComputeRegionBeg(uptr mem) { | |
851 | return mem & ~(kRegionSize - 1); | |
852 | } | |
853 | ||
854 | uptr AllocateRegion(AllocatorStats *stat, uptr class_id) { | |
855 | CHECK_LT(class_id, kNumClasses); | |
856 | uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize, | |
857 | "SizeClassAllocator32")); | |
858 | MapUnmapCallback().OnMap(res, kRegionSize); | |
92a42be0 | 859 | stat->Add(AllocatorStatMapped, kRegionSize); |
1a4d82fc JJ |
860 | CHECK_EQ(0U, (res & (kRegionSize - 1))); |
861 | possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id)); | |
862 | return res; | |
863 | } | |
864 | ||
865 | SizeClassInfo *GetSizeClassInfo(uptr class_id) { | |
866 | CHECK_LT(class_id, kNumClasses); | |
867 | return &size_class_info_array[class_id]; | |
868 | } | |
869 | ||
870 | void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c, | |
871 | SizeClassInfo *sci, uptr class_id) { | |
872 | uptr size = SizeClassMap::Size(class_id); | |
873 | uptr reg = AllocateRegion(stat, class_id); | |
874 | uptr n_chunks = kRegionSize / (size + kMetadataSize); | |
875 | uptr max_count = SizeClassMap::MaxCached(class_id); | |
92a42be0 | 876 | Batch *b = nullptr; |
1a4d82fc | 877 | for (uptr i = reg; i < reg + n_chunks * size; i += size) { |
92a42be0 | 878 | if (!b) { |
1a4d82fc JJ |
879 | if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id)) |
880 | b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch))); | |
881 | else | |
882 | b = (Batch*)i; | |
883 | b->count = 0; | |
884 | } | |
885 | b->batch[b->count++] = (void*)i; | |
886 | if (b->count == max_count) { | |
887 | CHECK_GT(b->count, 0); | |
888 | sci->free_list.push_back(b); | |
92a42be0 | 889 | b = nullptr; |
1a4d82fc JJ |
890 | } |
891 | } | |
892 | if (b) { | |
893 | CHECK_GT(b->count, 0); | |
894 | sci->free_list.push_back(b); | |
895 | } | |
896 | } | |
897 | ||
898 | ByteMap possible_regions; | |
899 | SizeClassInfo size_class_info_array[kNumClasses]; | |
900 | }; | |
901 | ||
902 | // Objects of this type should be used as local caches for SizeClassAllocator64 | |
903 | // or SizeClassAllocator32. Since the typical use of this class is to have one | |
904 | // object per thread in TLS, is has to be POD. | |
905 | template<class SizeClassAllocator> | |
906 | struct SizeClassAllocatorLocalCache { | |
907 | typedef SizeClassAllocator Allocator; | |
908 | static const uptr kNumClasses = SizeClassAllocator::kNumClasses; | |
909 | ||
910 | void Init(AllocatorGlobalStats *s) { | |
911 | stats_.Init(); | |
912 | if (s) | |
913 | s->Register(&stats_); | |
914 | } | |
915 | ||
916 | void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) { | |
917 | Drain(allocator); | |
918 | if (s) | |
919 | s->Unregister(&stats_); | |
920 | } | |
921 | ||
922 | void *Allocate(SizeClassAllocator *allocator, uptr class_id) { | |
923 | CHECK_NE(class_id, 0UL); | |
924 | CHECK_LT(class_id, kNumClasses); | |
92a42be0 | 925 | stats_.Add(AllocatorStatAllocated, SizeClassMap::Size(class_id)); |
1a4d82fc JJ |
926 | PerClass *c = &per_class_[class_id]; |
927 | if (UNLIKELY(c->count == 0)) | |
928 | Refill(allocator, class_id); | |
929 | void *res = c->batch[--c->count]; | |
930 | PREFETCH(c->batch[c->count - 1]); | |
931 | return res; | |
932 | } | |
933 | ||
934 | void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) { | |
935 | CHECK_NE(class_id, 0UL); | |
936 | CHECK_LT(class_id, kNumClasses); | |
937 | // If the first allocator call on a new thread is a deallocation, then | |
938 | // max_count will be zero, leading to check failure. | |
939 | InitCache(); | |
92a42be0 | 940 | stats_.Sub(AllocatorStatAllocated, SizeClassMap::Size(class_id)); |
1a4d82fc JJ |
941 | PerClass *c = &per_class_[class_id]; |
942 | CHECK_NE(c->max_count, 0UL); | |
943 | if (UNLIKELY(c->count == c->max_count)) | |
944 | Drain(allocator, class_id); | |
945 | c->batch[c->count++] = p; | |
946 | } | |
947 | ||
948 | void Drain(SizeClassAllocator *allocator) { | |
949 | for (uptr class_id = 0; class_id < kNumClasses; class_id++) { | |
950 | PerClass *c = &per_class_[class_id]; | |
951 | while (c->count > 0) | |
952 | Drain(allocator, class_id); | |
953 | } | |
954 | } | |
955 | ||
956 | // private: | |
957 | typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap; | |
958 | typedef typename SizeClassMap::TransferBatch Batch; | |
959 | struct PerClass { | |
960 | uptr count; | |
961 | uptr max_count; | |
962 | void *batch[2 * SizeClassMap::kMaxNumCached]; | |
963 | }; | |
964 | PerClass per_class_[kNumClasses]; | |
965 | AllocatorStats stats_; | |
966 | ||
967 | void InitCache() { | |
968 | if (per_class_[1].max_count) | |
969 | return; | |
970 | for (uptr i = 0; i < kNumClasses; i++) { | |
971 | PerClass *c = &per_class_[i]; | |
972 | c->max_count = 2 * SizeClassMap::MaxCached(i); | |
973 | } | |
974 | } | |
975 | ||
976 | NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) { | |
977 | InitCache(); | |
978 | PerClass *c = &per_class_[class_id]; | |
979 | Batch *b = allocator->AllocateBatch(&stats_, this, class_id); | |
980 | CHECK_GT(b->count, 0); | |
981 | for (uptr i = 0; i < b->count; i++) | |
982 | c->batch[i] = b->batch[i]; | |
983 | c->count = b->count; | |
984 | if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id)) | |
985 | Deallocate(allocator, SizeClassMap::ClassID(sizeof(Batch)), b); | |
986 | } | |
987 | ||
988 | NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) { | |
989 | InitCache(); | |
990 | PerClass *c = &per_class_[class_id]; | |
991 | Batch *b; | |
992 | if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id)) | |
993 | b = (Batch*)Allocate(allocator, SizeClassMap::ClassID(sizeof(Batch))); | |
994 | else | |
995 | b = (Batch*)c->batch[0]; | |
996 | uptr cnt = Min(c->max_count / 2, c->count); | |
997 | for (uptr i = 0; i < cnt; i++) { | |
998 | b->batch[i] = c->batch[i]; | |
999 | c->batch[i] = c->batch[i + c->max_count / 2]; | |
1000 | } | |
1001 | b->count = cnt; | |
1002 | c->count -= cnt; | |
1003 | CHECK_GT(b->count, 0); | |
1004 | allocator->DeallocateBatch(&stats_, class_id, b); | |
1005 | } | |
1006 | }; | |
1007 | ||
1008 | // This class can (de)allocate only large chunks of memory using mmap/unmap. | |
1009 | // The main purpose of this allocator is to cover large and rare allocation | |
1010 | // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64). | |
1011 | template <class MapUnmapCallback = NoOpMapUnmapCallback> | |
1012 | class LargeMmapAllocator { | |
1013 | public: | |
92a42be0 | 1014 | void InitLinkerInitialized(bool may_return_null) { |
1a4d82fc | 1015 | page_size_ = GetPageSizeCached(); |
92a42be0 SL |
1016 | atomic_store(&may_return_null_, may_return_null, memory_order_relaxed); |
1017 | } | |
1018 | ||
1019 | void Init(bool may_return_null) { | |
1020 | internal_memset(this, 0, sizeof(*this)); | |
1021 | InitLinkerInitialized(may_return_null); | |
1a4d82fc JJ |
1022 | } |
1023 | ||
1024 | void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) { | |
1025 | CHECK(IsPowerOfTwo(alignment)); | |
1026 | uptr map_size = RoundUpMapSize(size); | |
1027 | if (alignment > page_size_) | |
1028 | map_size += alignment; | |
92a42be0 SL |
1029 | // Overflow. |
1030 | if (map_size < size) | |
1031 | return ReturnNullOrDie(); | |
1a4d82fc JJ |
1032 | uptr map_beg = reinterpret_cast<uptr>( |
1033 | MmapOrDie(map_size, "LargeMmapAllocator")); | |
92a42be0 | 1034 | CHECK(IsAligned(map_beg, page_size_)); |
1a4d82fc JJ |
1035 | MapUnmapCallback().OnMap(map_beg, map_size); |
1036 | uptr map_end = map_beg + map_size; | |
1037 | uptr res = map_beg + page_size_; | |
1038 | if (res & (alignment - 1)) // Align. | |
1039 | res += alignment - (res & (alignment - 1)); | |
92a42be0 SL |
1040 | CHECK(IsAligned(res, alignment)); |
1041 | CHECK(IsAligned(res, page_size_)); | |
1042 | CHECK_GE(res + size, map_beg); | |
1a4d82fc JJ |
1043 | CHECK_LE(res + size, map_end); |
1044 | Header *h = GetHeader(res); | |
1045 | h->size = size; | |
1046 | h->map_beg = map_beg; | |
1047 | h->map_size = map_size; | |
1048 | uptr size_log = MostSignificantSetBitIndex(map_size); | |
1049 | CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log)); | |
1050 | { | |
1051 | SpinMutexLock l(&mutex_); | |
1052 | uptr idx = n_chunks_++; | |
1053 | chunks_sorted_ = false; | |
1054 | CHECK_LT(idx, kMaxNumChunks); | |
1055 | h->chunk_idx = idx; | |
1056 | chunks_[idx] = h; | |
1057 | stats.n_allocs++; | |
1058 | stats.currently_allocated += map_size; | |
1059 | stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated); | |
1060 | stats.by_size_log[size_log]++; | |
92a42be0 SL |
1061 | stat->Add(AllocatorStatAllocated, map_size); |
1062 | stat->Add(AllocatorStatMapped, map_size); | |
1a4d82fc JJ |
1063 | } |
1064 | return reinterpret_cast<void*>(res); | |
1065 | } | |
1066 | ||
92a42be0 SL |
1067 | void *ReturnNullOrDie() { |
1068 | if (atomic_load(&may_return_null_, memory_order_acquire)) | |
1069 | return nullptr; | |
1070 | ReportAllocatorCannotReturnNull(); | |
1071 | } | |
1072 | ||
1073 | void SetMayReturnNull(bool may_return_null) { | |
1074 | atomic_store(&may_return_null_, may_return_null, memory_order_release); | |
1075 | } | |
1076 | ||
1a4d82fc JJ |
1077 | void Deallocate(AllocatorStats *stat, void *p) { |
1078 | Header *h = GetHeader(p); | |
1079 | { | |
1080 | SpinMutexLock l(&mutex_); | |
1081 | uptr idx = h->chunk_idx; | |
1082 | CHECK_EQ(chunks_[idx], h); | |
1083 | CHECK_LT(idx, n_chunks_); | |
1084 | chunks_[idx] = chunks_[n_chunks_ - 1]; | |
1085 | chunks_[idx]->chunk_idx = idx; | |
1086 | n_chunks_--; | |
1087 | chunks_sorted_ = false; | |
1088 | stats.n_frees++; | |
1089 | stats.currently_allocated -= h->map_size; | |
92a42be0 SL |
1090 | stat->Sub(AllocatorStatAllocated, h->map_size); |
1091 | stat->Sub(AllocatorStatMapped, h->map_size); | |
1a4d82fc JJ |
1092 | } |
1093 | MapUnmapCallback().OnUnmap(h->map_beg, h->map_size); | |
1094 | UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size); | |
1095 | } | |
1096 | ||
1097 | uptr TotalMemoryUsed() { | |
1098 | SpinMutexLock l(&mutex_); | |
1099 | uptr res = 0; | |
1100 | for (uptr i = 0; i < n_chunks_; i++) { | |
1101 | Header *h = chunks_[i]; | |
1102 | CHECK_EQ(h->chunk_idx, i); | |
1103 | res += RoundUpMapSize(h->size); | |
1104 | } | |
1105 | return res; | |
1106 | } | |
1107 | ||
1108 | bool PointerIsMine(const void *p) { | |
92a42be0 | 1109 | return GetBlockBegin(p) != nullptr; |
1a4d82fc JJ |
1110 | } |
1111 | ||
1112 | uptr GetActuallyAllocatedSize(void *p) { | |
1113 | return RoundUpTo(GetHeader(p)->size, page_size_); | |
1114 | } | |
1115 | ||
1116 | // At least page_size_/2 metadata bytes is available. | |
1117 | void *GetMetaData(const void *p) { | |
1118 | // Too slow: CHECK_EQ(p, GetBlockBegin(p)); | |
1119 | if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) { | |
1120 | Printf("%s: bad pointer %p\n", SanitizerToolName, p); | |
1121 | CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_)); | |
1122 | } | |
1123 | return GetHeader(p) + 1; | |
1124 | } | |
1125 | ||
1126 | void *GetBlockBegin(const void *ptr) { | |
1127 | uptr p = reinterpret_cast<uptr>(ptr); | |
1128 | SpinMutexLock l(&mutex_); | |
1129 | uptr nearest_chunk = 0; | |
1130 | // Cache-friendly linear search. | |
1131 | for (uptr i = 0; i < n_chunks_; i++) { | |
1132 | uptr ch = reinterpret_cast<uptr>(chunks_[i]); | |
1133 | if (p < ch) continue; // p is at left to this chunk, skip it. | |
1134 | if (p - ch < p - nearest_chunk) | |
1135 | nearest_chunk = ch; | |
1136 | } | |
1137 | if (!nearest_chunk) | |
92a42be0 | 1138 | return nullptr; |
1a4d82fc JJ |
1139 | Header *h = reinterpret_cast<Header *>(nearest_chunk); |
1140 | CHECK_GE(nearest_chunk, h->map_beg); | |
1141 | CHECK_LT(nearest_chunk, h->map_beg + h->map_size); | |
1142 | CHECK_LE(nearest_chunk, p); | |
1143 | if (h->map_beg + h->map_size <= p) | |
92a42be0 | 1144 | return nullptr; |
1a4d82fc JJ |
1145 | return GetUser(h); |
1146 | } | |
1147 | ||
1148 | // This function does the same as GetBlockBegin, but is much faster. | |
1149 | // Must be called with the allocator locked. | |
1150 | void *GetBlockBeginFastLocked(void *ptr) { | |
1151 | mutex_.CheckLocked(); | |
1152 | uptr p = reinterpret_cast<uptr>(ptr); | |
1153 | uptr n = n_chunks_; | |
92a42be0 | 1154 | if (!n) return nullptr; |
1a4d82fc JJ |
1155 | if (!chunks_sorted_) { |
1156 | // Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate. | |
1157 | SortArray(reinterpret_cast<uptr*>(chunks_), n); | |
1158 | for (uptr i = 0; i < n; i++) | |
1159 | chunks_[i]->chunk_idx = i; | |
1160 | chunks_sorted_ = true; | |
1161 | min_mmap_ = reinterpret_cast<uptr>(chunks_[0]); | |
1162 | max_mmap_ = reinterpret_cast<uptr>(chunks_[n - 1]) + | |
1163 | chunks_[n - 1]->map_size; | |
1164 | } | |
1165 | if (p < min_mmap_ || p >= max_mmap_) | |
92a42be0 | 1166 | return nullptr; |
1a4d82fc JJ |
1167 | uptr beg = 0, end = n - 1; |
1168 | // This loop is a log(n) lower_bound. It does not check for the exact match | |
1169 | // to avoid expensive cache-thrashing loads. | |
1170 | while (end - beg >= 2) { | |
1171 | uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1 | |
1172 | if (p < reinterpret_cast<uptr>(chunks_[mid])) | |
1173 | end = mid - 1; // We are not interested in chunks_[mid]. | |
1174 | else | |
1175 | beg = mid; // chunks_[mid] may still be what we want. | |
1176 | } | |
1177 | ||
1178 | if (beg < end) { | |
1179 | CHECK_EQ(beg + 1, end); | |
1180 | // There are 2 chunks left, choose one. | |
1181 | if (p >= reinterpret_cast<uptr>(chunks_[end])) | |
1182 | beg = end; | |
1183 | } | |
1184 | ||
1185 | Header *h = chunks_[beg]; | |
1186 | if (h->map_beg + h->map_size <= p || p < h->map_beg) | |
92a42be0 | 1187 | return nullptr; |
1a4d82fc JJ |
1188 | return GetUser(h); |
1189 | } | |
1190 | ||
1191 | void PrintStats() { | |
1192 | Printf("Stats: LargeMmapAllocator: allocated %zd times, " | |
1193 | "remains %zd (%zd K) max %zd M; by size logs: ", | |
1194 | stats.n_allocs, stats.n_allocs - stats.n_frees, | |
1195 | stats.currently_allocated >> 10, stats.max_allocated >> 20); | |
1196 | for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) { | |
1197 | uptr c = stats.by_size_log[i]; | |
1198 | if (!c) continue; | |
1199 | Printf("%zd:%zd; ", i, c); | |
1200 | } | |
1201 | Printf("\n"); | |
1202 | } | |
1203 | ||
1204 | // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone | |
1205 | // introspection API. | |
1206 | void ForceLock() { | |
1207 | mutex_.Lock(); | |
1208 | } | |
1209 | ||
1210 | void ForceUnlock() { | |
1211 | mutex_.Unlock(); | |
1212 | } | |
1213 | ||
1214 | // Iterate over all existing chunks. | |
1215 | // The allocator must be locked when calling this function. | |
1216 | void ForEachChunk(ForEachChunkCallback callback, void *arg) { | |
1217 | for (uptr i = 0; i < n_chunks_; i++) | |
1218 | callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg); | |
1219 | } | |
1220 | ||
1221 | private: | |
1222 | static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18); | |
1223 | struct Header { | |
1224 | uptr map_beg; | |
1225 | uptr map_size; | |
1226 | uptr size; | |
1227 | uptr chunk_idx; | |
1228 | }; | |
1229 | ||
1230 | Header *GetHeader(uptr p) { | |
1231 | CHECK(IsAligned(p, page_size_)); | |
1232 | return reinterpret_cast<Header*>(p - page_size_); | |
1233 | } | |
1234 | Header *GetHeader(const void *p) { | |
1235 | return GetHeader(reinterpret_cast<uptr>(p)); | |
1236 | } | |
1237 | ||
1238 | void *GetUser(Header *h) { | |
1239 | CHECK(IsAligned((uptr)h, page_size_)); | |
1240 | return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_); | |
1241 | } | |
1242 | ||
1243 | uptr RoundUpMapSize(uptr size) { | |
1244 | return RoundUpTo(size, page_size_) + page_size_; | |
1245 | } | |
1246 | ||
1247 | uptr page_size_; | |
1248 | Header *chunks_[kMaxNumChunks]; | |
1249 | uptr n_chunks_; | |
1250 | uptr min_mmap_, max_mmap_; | |
1251 | bool chunks_sorted_; | |
1252 | struct Stats { | |
1253 | uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64]; | |
1254 | } stats; | |
92a42be0 | 1255 | atomic_uint8_t may_return_null_; |
1a4d82fc JJ |
1256 | SpinMutex mutex_; |
1257 | }; | |
1258 | ||
1259 | // This class implements a complete memory allocator by using two | |
1260 | // internal allocators: | |
1261 | // PrimaryAllocator is efficient, but may not allocate some sizes (alignments). | |
1262 | // When allocating 2^x bytes it should return 2^x aligned chunk. | |
1263 | // PrimaryAllocator is used via a local AllocatorCache. | |
1264 | // SecondaryAllocator can allocate anything, but is not efficient. | |
1265 | template <class PrimaryAllocator, class AllocatorCache, | |
1266 | class SecondaryAllocator> // NOLINT | |
1267 | class CombinedAllocator { | |
1268 | public: | |
92a42be0 | 1269 | void InitCommon(bool may_return_null) { |
1a4d82fc | 1270 | primary_.Init(); |
92a42be0 SL |
1271 | atomic_store(&may_return_null_, may_return_null, memory_order_relaxed); |
1272 | } | |
1273 | ||
1274 | void InitLinkerInitialized(bool may_return_null) { | |
1275 | secondary_.InitLinkerInitialized(may_return_null); | |
1276 | stats_.InitLinkerInitialized(); | |
1277 | InitCommon(may_return_null); | |
1278 | } | |
1279 | ||
1280 | void Init(bool may_return_null) { | |
1281 | secondary_.Init(may_return_null); | |
1a4d82fc | 1282 | stats_.Init(); |
92a42be0 | 1283 | InitCommon(may_return_null); |
1a4d82fc JJ |
1284 | } |
1285 | ||
1286 | void *Allocate(AllocatorCache *cache, uptr size, uptr alignment, | |
92a42be0 | 1287 | bool cleared = false, bool check_rss_limit = false) { |
1a4d82fc JJ |
1288 | // Returning 0 on malloc(0) may break a lot of code. |
1289 | if (size == 0) | |
1290 | size = 1; | |
1291 | if (size + alignment < size) | |
92a42be0 SL |
1292 | return ReturnNullOrDie(); |
1293 | if (check_rss_limit && RssLimitIsExceeded()) | |
1294 | return ReturnNullOrDie(); | |
1a4d82fc JJ |
1295 | if (alignment > 8) |
1296 | size = RoundUpTo(size, alignment); | |
1297 | void *res; | |
1298 | bool from_primary = primary_.CanAllocate(size, alignment); | |
1299 | if (from_primary) | |
1300 | res = cache->Allocate(&primary_, primary_.ClassID(size)); | |
1301 | else | |
1302 | res = secondary_.Allocate(&stats_, size, alignment); | |
1303 | if (alignment > 8) | |
1304 | CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0); | |
1305 | if (cleared && res && from_primary) | |
1306 | internal_bzero_aligned16(res, RoundUpTo(size, 16)); | |
1307 | return res; | |
1308 | } | |
1309 | ||
92a42be0 SL |
1310 | bool MayReturnNull() const { |
1311 | return atomic_load(&may_return_null_, memory_order_acquire); | |
1312 | } | |
1313 | ||
1314 | void *ReturnNullOrDie() { | |
1315 | if (MayReturnNull()) | |
1316 | return nullptr; | |
1317 | ReportAllocatorCannotReturnNull(); | |
1318 | } | |
1319 | ||
1320 | void SetMayReturnNull(bool may_return_null) { | |
1321 | secondary_.SetMayReturnNull(may_return_null); | |
1322 | atomic_store(&may_return_null_, may_return_null, memory_order_release); | |
1323 | } | |
1324 | ||
1325 | bool RssLimitIsExceeded() { | |
1326 | return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire); | |
1327 | } | |
1328 | ||
1329 | void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) { | |
1330 | atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded, | |
1331 | memory_order_release); | |
1332 | } | |
1333 | ||
1a4d82fc JJ |
1334 | void Deallocate(AllocatorCache *cache, void *p) { |
1335 | if (!p) return; | |
1336 | if (primary_.PointerIsMine(p)) | |
1337 | cache->Deallocate(&primary_, primary_.GetSizeClass(p), p); | |
1338 | else | |
1339 | secondary_.Deallocate(&stats_, p); | |
1340 | } | |
1341 | ||
1342 | void *Reallocate(AllocatorCache *cache, void *p, uptr new_size, | |
1343 | uptr alignment) { | |
1344 | if (!p) | |
1345 | return Allocate(cache, new_size, alignment); | |
1346 | if (!new_size) { | |
1347 | Deallocate(cache, p); | |
92a42be0 | 1348 | return nullptr; |
1a4d82fc JJ |
1349 | } |
1350 | CHECK(PointerIsMine(p)); | |
1351 | uptr old_size = GetActuallyAllocatedSize(p); | |
1352 | uptr memcpy_size = Min(new_size, old_size); | |
1353 | void *new_p = Allocate(cache, new_size, alignment); | |
1354 | if (new_p) | |
1355 | internal_memcpy(new_p, p, memcpy_size); | |
1356 | Deallocate(cache, p); | |
1357 | return new_p; | |
1358 | } | |
1359 | ||
1360 | bool PointerIsMine(void *p) { | |
1361 | if (primary_.PointerIsMine(p)) | |
1362 | return true; | |
1363 | return secondary_.PointerIsMine(p); | |
1364 | } | |
1365 | ||
1366 | bool FromPrimary(void *p) { | |
1367 | return primary_.PointerIsMine(p); | |
1368 | } | |
1369 | ||
1370 | void *GetMetaData(const void *p) { | |
1371 | if (primary_.PointerIsMine(p)) | |
1372 | return primary_.GetMetaData(p); | |
1373 | return secondary_.GetMetaData(p); | |
1374 | } | |
1375 | ||
1376 | void *GetBlockBegin(const void *p) { | |
1377 | if (primary_.PointerIsMine(p)) | |
1378 | return primary_.GetBlockBegin(p); | |
1379 | return secondary_.GetBlockBegin(p); | |
1380 | } | |
1381 | ||
1382 | // This function does the same as GetBlockBegin, but is much faster. | |
1383 | // Must be called with the allocator locked. | |
1384 | void *GetBlockBeginFastLocked(void *p) { | |
1385 | if (primary_.PointerIsMine(p)) | |
1386 | return primary_.GetBlockBegin(p); | |
1387 | return secondary_.GetBlockBeginFastLocked(p); | |
1388 | } | |
1389 | ||
1390 | uptr GetActuallyAllocatedSize(void *p) { | |
1391 | if (primary_.PointerIsMine(p)) | |
1392 | return primary_.GetActuallyAllocatedSize(p); | |
1393 | return secondary_.GetActuallyAllocatedSize(p); | |
1394 | } | |
1395 | ||
1396 | uptr TotalMemoryUsed() { | |
1397 | return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed(); | |
1398 | } | |
1399 | ||
1400 | void TestOnlyUnmap() { primary_.TestOnlyUnmap(); } | |
1401 | ||
1402 | void InitCache(AllocatorCache *cache) { | |
1403 | cache->Init(&stats_); | |
1404 | } | |
1405 | ||
1406 | void DestroyCache(AllocatorCache *cache) { | |
1407 | cache->Destroy(&primary_, &stats_); | |
1408 | } | |
1409 | ||
1410 | void SwallowCache(AllocatorCache *cache) { | |
1411 | cache->Drain(&primary_); | |
1412 | } | |
1413 | ||
1414 | void GetStats(AllocatorStatCounters s) const { | |
1415 | stats_.Get(s); | |
1416 | } | |
1417 | ||
1418 | void PrintStats() { | |
1419 | primary_.PrintStats(); | |
1420 | secondary_.PrintStats(); | |
1421 | } | |
1422 | ||
1423 | // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone | |
1424 | // introspection API. | |
1425 | void ForceLock() { | |
1426 | primary_.ForceLock(); | |
1427 | secondary_.ForceLock(); | |
1428 | } | |
1429 | ||
1430 | void ForceUnlock() { | |
1431 | secondary_.ForceUnlock(); | |
1432 | primary_.ForceUnlock(); | |
1433 | } | |
1434 | ||
1435 | // Iterate over all existing chunks. | |
1436 | // The allocator must be locked when calling this function. | |
1437 | void ForEachChunk(ForEachChunkCallback callback, void *arg) { | |
1438 | primary_.ForEachChunk(callback, arg); | |
1439 | secondary_.ForEachChunk(callback, arg); | |
1440 | } | |
1441 | ||
1442 | private: | |
1443 | PrimaryAllocator primary_; | |
1444 | SecondaryAllocator secondary_; | |
1445 | AllocatorGlobalStats stats_; | |
92a42be0 SL |
1446 | atomic_uint8_t may_return_null_; |
1447 | atomic_uint8_t rss_limit_is_exceeded_; | |
1a4d82fc JJ |
1448 | }; |
1449 | ||
1450 | // Returns true if calloc(size, n) should return 0 due to overflow in size*n. | |
1451 | bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n); | |
1452 | ||
92a42be0 | 1453 | } // namespace __sanitizer |
1a4d82fc | 1454 | |
92a42be0 | 1455 | #endif // SANITIZER_ALLOCATOR_H |