]> git.proxmox.com Git - rustc.git/blob - src/libcompiler_builtins/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
New upstream version 1.20.0+dfsg1
[rustc.git] / src / libcompiler_builtins / compiler-rt / lib / sanitizer_common / sanitizer_allocator_combined.h
1 //===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Part of the Sanitizer Allocator.
11 //
12 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_ALLOCATOR_H
14 #error This file must be included inside sanitizer_allocator.h
15 #endif
16
17 // This class implements a complete memory allocator by using two
18 // internal allocators:
19 // PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
20 // When allocating 2^x bytes it should return 2^x aligned chunk.
21 // PrimaryAllocator is used via a local AllocatorCache.
22 // SecondaryAllocator can allocate anything, but is not efficient.
23 template <class PrimaryAllocator, class AllocatorCache,
24 class SecondaryAllocator> // NOLINT
25 class CombinedAllocator {
26 public:
27 void InitCommon(bool may_return_null, s32 release_to_os_interval_ms) {
28 primary_.Init(release_to_os_interval_ms);
29 atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
30 }
31
32 void InitLinkerInitialized(
33 bool may_return_null, s32 release_to_os_interval_ms) {
34 secondary_.InitLinkerInitialized(may_return_null);
35 stats_.InitLinkerInitialized();
36 InitCommon(may_return_null, release_to_os_interval_ms);
37 }
38
39 void Init(bool may_return_null, s32 release_to_os_interval_ms) {
40 secondary_.Init(may_return_null);
41 stats_.Init();
42 InitCommon(may_return_null, release_to_os_interval_ms);
43 }
44
45 void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
46 bool cleared = false, bool check_rss_limit = false) {
47 // Returning 0 on malloc(0) may break a lot of code.
48 if (size == 0)
49 size = 1;
50 if (size + alignment < size) return ReturnNullOrDieOnBadRequest();
51 if (check_rss_limit && RssLimitIsExceeded()) return ReturnNullOrDieOnOOM();
52 uptr original_size = size;
53 // If alignment requirements are to be fulfilled by the frontend allocator
54 // rather than by the primary or secondary, passing an alignment lower than
55 // or equal to 8 will prevent any further rounding up, as well as the later
56 // alignment check.
57 if (alignment > 8)
58 size = RoundUpTo(size, alignment);
59 void *res;
60 bool from_primary = primary_.CanAllocate(size, alignment);
61 // The primary allocator should return a 2^x aligned allocation when
62 // requested 2^x bytes, hence using the rounded up 'size' when being
63 // serviced by the primary (this is no longer true when the primary is
64 // using a non-fixed base address). The secondary takes care of the
65 // alignment without such requirement, and allocating 'size' would use
66 // extraneous memory, so we employ 'original_size'.
67 if (from_primary)
68 res = cache->Allocate(&primary_, primary_.ClassID(size));
69 else
70 res = secondary_.Allocate(&stats_, original_size, alignment);
71 if (alignment > 8)
72 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
73 // When serviced by the secondary, the chunk comes from a mmap allocation
74 // and will be zero'd out anyway. We only need to clear our the chunk if
75 // it was serviced by the primary, hence using the rounded up 'size'.
76 if (cleared && res && from_primary)
77 internal_bzero_aligned16(res, RoundUpTo(size, 16));
78 return res;
79 }
80
81 bool MayReturnNull() const {
82 return atomic_load(&may_return_null_, memory_order_acquire);
83 }
84
85 void *ReturnNullOrDieOnBadRequest() {
86 if (MayReturnNull())
87 return nullptr;
88 ReportAllocatorCannotReturnNull(false);
89 }
90
91 void *ReturnNullOrDieOnOOM() {
92 if (MayReturnNull()) return nullptr;
93 ReportAllocatorCannotReturnNull(true);
94 }
95
96 void SetMayReturnNull(bool may_return_null) {
97 secondary_.SetMayReturnNull(may_return_null);
98 atomic_store(&may_return_null_, may_return_null, memory_order_release);
99 }
100
101 s32 ReleaseToOSIntervalMs() const {
102 return primary_.ReleaseToOSIntervalMs();
103 }
104
105 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
106 primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
107 }
108
109 bool RssLimitIsExceeded() {
110 return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
111 }
112
113 void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
114 atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
115 memory_order_release);
116 }
117
118 void Deallocate(AllocatorCache *cache, void *p) {
119 if (!p) return;
120 if (primary_.PointerIsMine(p))
121 cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
122 else
123 secondary_.Deallocate(&stats_, p);
124 }
125
126 void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
127 uptr alignment) {
128 if (!p)
129 return Allocate(cache, new_size, alignment);
130 if (!new_size) {
131 Deallocate(cache, p);
132 return nullptr;
133 }
134 CHECK(PointerIsMine(p));
135 uptr old_size = GetActuallyAllocatedSize(p);
136 uptr memcpy_size = Min(new_size, old_size);
137 void *new_p = Allocate(cache, new_size, alignment);
138 if (new_p)
139 internal_memcpy(new_p, p, memcpy_size);
140 Deallocate(cache, p);
141 return new_p;
142 }
143
144 bool PointerIsMine(void *p) {
145 if (primary_.PointerIsMine(p))
146 return true;
147 return secondary_.PointerIsMine(p);
148 }
149
150 bool FromPrimary(void *p) {
151 return primary_.PointerIsMine(p);
152 }
153
154 void *GetMetaData(const void *p) {
155 if (primary_.PointerIsMine(p))
156 return primary_.GetMetaData(p);
157 return secondary_.GetMetaData(p);
158 }
159
160 void *GetBlockBegin(const void *p) {
161 if (primary_.PointerIsMine(p))
162 return primary_.GetBlockBegin(p);
163 return secondary_.GetBlockBegin(p);
164 }
165
166 // This function does the same as GetBlockBegin, but is much faster.
167 // Must be called with the allocator locked.
168 void *GetBlockBeginFastLocked(void *p) {
169 if (primary_.PointerIsMine(p))
170 return primary_.GetBlockBegin(p);
171 return secondary_.GetBlockBeginFastLocked(p);
172 }
173
174 uptr GetActuallyAllocatedSize(void *p) {
175 if (primary_.PointerIsMine(p))
176 return primary_.GetActuallyAllocatedSize(p);
177 return secondary_.GetActuallyAllocatedSize(p);
178 }
179
180 uptr TotalMemoryUsed() {
181 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
182 }
183
184 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
185
186 void InitCache(AllocatorCache *cache) {
187 cache->Init(&stats_);
188 }
189
190 void DestroyCache(AllocatorCache *cache) {
191 cache->Destroy(&primary_, &stats_);
192 }
193
194 void SwallowCache(AllocatorCache *cache) {
195 cache->Drain(&primary_);
196 }
197
198 void GetStats(AllocatorStatCounters s) const {
199 stats_.Get(s);
200 }
201
202 void PrintStats() {
203 primary_.PrintStats();
204 secondary_.PrintStats();
205 }
206
207 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
208 // introspection API.
209 void ForceLock() {
210 primary_.ForceLock();
211 secondary_.ForceLock();
212 }
213
214 void ForceUnlock() {
215 secondary_.ForceUnlock();
216 primary_.ForceUnlock();
217 }
218
219 // Iterate over all existing chunks.
220 // The allocator must be locked when calling this function.
221 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
222 primary_.ForEachChunk(callback, arg);
223 secondary_.ForEachChunk(callback, arg);
224 }
225
226 private:
227 PrimaryAllocator primary_;
228 SecondaryAllocator secondary_;
229 AllocatorGlobalStats stats_;
230 atomic_uint8_t may_return_null_;
231 atomic_uint8_t rss_limit_is_exceeded_;
232 };
233