1 //===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Part of the Sanitizer Allocator.
12 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_ALLOCATOR_H
14 #error This file must be included inside sanitizer_allocator.h
17 // This class implements a complete memory allocator by using two
18 // internal allocators:
19 // PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
20 // When allocating 2^x bytes it should return 2^x aligned chunk.
21 // PrimaryAllocator is used via a local AllocatorCache.
22 // SecondaryAllocator can allocate anything, but is not efficient.
23 template <class PrimaryAllocator
, class AllocatorCache
,
24 class SecondaryAllocator
> // NOLINT
25 class CombinedAllocator
{
27 void InitCommon(bool may_return_null
, s32 release_to_os_interval_ms
) {
28 primary_
.Init(release_to_os_interval_ms
);
29 atomic_store(&may_return_null_
, may_return_null
, memory_order_relaxed
);
32 void InitLinkerInitialized(
33 bool may_return_null
, s32 release_to_os_interval_ms
) {
34 secondary_
.InitLinkerInitialized(may_return_null
);
35 stats_
.InitLinkerInitialized();
36 InitCommon(may_return_null
, release_to_os_interval_ms
);
39 void Init(bool may_return_null
, s32 release_to_os_interval_ms
) {
40 secondary_
.Init(may_return_null
);
42 InitCommon(may_return_null
, release_to_os_interval_ms
);
45 void *Allocate(AllocatorCache
*cache
, uptr size
, uptr alignment
,
46 bool cleared
= false, bool check_rss_limit
= false) {
47 // Returning 0 on malloc(0) may break a lot of code.
50 if (size
+ alignment
< size
) return ReturnNullOrDieOnBadRequest();
51 if (check_rss_limit
&& RssLimitIsExceeded()) return ReturnNullOrDieOnOOM();
52 uptr original_size
= size
;
53 // If alignment requirements are to be fulfilled by the frontend allocator
54 // rather than by the primary or secondary, passing an alignment lower than
55 // or equal to 8 will prevent any further rounding up, as well as the later
58 size
= RoundUpTo(size
, alignment
);
60 bool from_primary
= primary_
.CanAllocate(size
, alignment
);
61 // The primary allocator should return a 2^x aligned allocation when
62 // requested 2^x bytes, hence using the rounded up 'size' when being
63 // serviced by the primary (this is no longer true when the primary is
64 // using a non-fixed base address). The secondary takes care of the
65 // alignment without such requirement, and allocating 'size' would use
66 // extraneous memory, so we employ 'original_size'.
68 res
= cache
->Allocate(&primary_
, primary_
.ClassID(size
));
70 res
= secondary_
.Allocate(&stats_
, original_size
, alignment
);
72 CHECK_EQ(reinterpret_cast<uptr
>(res
) & (alignment
- 1), 0);
73 // When serviced by the secondary, the chunk comes from a mmap allocation
74 // and will be zero'd out anyway. We only need to clear our the chunk if
75 // it was serviced by the primary, hence using the rounded up 'size'.
76 if (cleared
&& res
&& from_primary
)
77 internal_bzero_aligned16(res
, RoundUpTo(size
, 16));
81 bool MayReturnNull() const {
82 return atomic_load(&may_return_null_
, memory_order_acquire
);
85 void *ReturnNullOrDieOnBadRequest() {
88 ReportAllocatorCannotReturnNull(false);
91 void *ReturnNullOrDieOnOOM() {
92 if (MayReturnNull()) return nullptr;
93 ReportAllocatorCannotReturnNull(true);
96 void SetMayReturnNull(bool may_return_null
) {
97 secondary_
.SetMayReturnNull(may_return_null
);
98 atomic_store(&may_return_null_
, may_return_null
, memory_order_release
);
101 s32
ReleaseToOSIntervalMs() const {
102 return primary_
.ReleaseToOSIntervalMs();
105 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms
) {
106 primary_
.SetReleaseToOSIntervalMs(release_to_os_interval_ms
);
109 bool RssLimitIsExceeded() {
110 return atomic_load(&rss_limit_is_exceeded_
, memory_order_acquire
);
113 void SetRssLimitIsExceeded(bool rss_limit_is_exceeded
) {
114 atomic_store(&rss_limit_is_exceeded_
, rss_limit_is_exceeded
,
115 memory_order_release
);
118 void Deallocate(AllocatorCache
*cache
, void *p
) {
120 if (primary_
.PointerIsMine(p
))
121 cache
->Deallocate(&primary_
, primary_
.GetSizeClass(p
), p
);
123 secondary_
.Deallocate(&stats_
, p
);
126 void *Reallocate(AllocatorCache
*cache
, void *p
, uptr new_size
,
129 return Allocate(cache
, new_size
, alignment
);
131 Deallocate(cache
, p
);
134 CHECK(PointerIsMine(p
));
135 uptr old_size
= GetActuallyAllocatedSize(p
);
136 uptr memcpy_size
= Min(new_size
, old_size
);
137 void *new_p
= Allocate(cache
, new_size
, alignment
);
139 internal_memcpy(new_p
, p
, memcpy_size
);
140 Deallocate(cache
, p
);
144 bool PointerIsMine(void *p
) {
145 if (primary_
.PointerIsMine(p
))
147 return secondary_
.PointerIsMine(p
);
150 bool FromPrimary(void *p
) {
151 return primary_
.PointerIsMine(p
);
154 void *GetMetaData(const void *p
) {
155 if (primary_
.PointerIsMine(p
))
156 return primary_
.GetMetaData(p
);
157 return secondary_
.GetMetaData(p
);
160 void *GetBlockBegin(const void *p
) {
161 if (primary_
.PointerIsMine(p
))
162 return primary_
.GetBlockBegin(p
);
163 return secondary_
.GetBlockBegin(p
);
166 // This function does the same as GetBlockBegin, but is much faster.
167 // Must be called with the allocator locked.
168 void *GetBlockBeginFastLocked(void *p
) {
169 if (primary_
.PointerIsMine(p
))
170 return primary_
.GetBlockBegin(p
);
171 return secondary_
.GetBlockBeginFastLocked(p
);
174 uptr
GetActuallyAllocatedSize(void *p
) {
175 if (primary_
.PointerIsMine(p
))
176 return primary_
.GetActuallyAllocatedSize(p
);
177 return secondary_
.GetActuallyAllocatedSize(p
);
180 uptr
TotalMemoryUsed() {
181 return primary_
.TotalMemoryUsed() + secondary_
.TotalMemoryUsed();
184 void TestOnlyUnmap() { primary_
.TestOnlyUnmap(); }
186 void InitCache(AllocatorCache
*cache
) {
187 cache
->Init(&stats_
);
190 void DestroyCache(AllocatorCache
*cache
) {
191 cache
->Destroy(&primary_
, &stats_
);
194 void SwallowCache(AllocatorCache
*cache
) {
195 cache
->Drain(&primary_
);
198 void GetStats(AllocatorStatCounters s
) const {
203 primary_
.PrintStats();
204 secondary_
.PrintStats();
207 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
208 // introspection API.
210 primary_
.ForceLock();
211 secondary_
.ForceLock();
215 secondary_
.ForceUnlock();
216 primary_
.ForceUnlock();
219 // Iterate over all existing chunks.
220 // The allocator must be locked when calling this function.
221 void ForEachChunk(ForEachChunkCallback callback
, void *arg
) {
222 primary_
.ForEachChunk(callback
, arg
);
223 secondary_
.ForEachChunk(callback
, arg
);
227 PrimaryAllocator primary_
;
228 SecondaryAllocator secondary_
;
229 AllocatorGlobalStats stats_
;
230 atomic_uint8_t may_return_null_
;
231 atomic_uint8_t rss_limit_is_exceeded_
;