]> git.proxmox.com Git - rustc.git/blob - src/compiler-rt/lib/sanitizer_common/sanitizer_allocator_local_cache.h
New upstream version 1.19.0+dfsg1
[rustc.git] / src / compiler-rt / lib / sanitizer_common / sanitizer_allocator_local_cache.h
1 //===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Part of the Sanitizer Allocator.
11 //
12 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_ALLOCATOR_H
14 #error This file must be included inside sanitizer_allocator.h
15 #endif
16
17 // Objects of this type should be used as local caches for SizeClassAllocator64
18 // or SizeClassAllocator32. Since the typical use of this class is to have one
19 // object per thread in TLS, is has to be POD.
20 template<class SizeClassAllocator>
21 struct SizeClassAllocatorLocalCache
22 : SizeClassAllocator::AllocatorCache {
23 };
24
25 // Cache used by SizeClassAllocator64.
26 template <class SizeClassAllocator>
27 struct SizeClassAllocator64LocalCache {
28 typedef SizeClassAllocator Allocator;
29 static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
30 typedef typename Allocator::SizeClassMapT SizeClassMap;
31 typedef typename Allocator::CompactPtrT CompactPtrT;
32
33 void Init(AllocatorGlobalStats *s) {
34 stats_.Init();
35 if (s)
36 s->Register(&stats_);
37 }
38
39 void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
40 Drain(allocator);
41 if (s)
42 s->Unregister(&stats_);
43 }
44
45 void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
46 CHECK_NE(class_id, 0UL);
47 CHECK_LT(class_id, kNumClasses);
48 stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
49 PerClass *c = &per_class_[class_id];
50 if (UNLIKELY(c->count == 0))
51 Refill(c, allocator, class_id);
52 CHECK_GT(c->count, 0);
53 CompactPtrT chunk = c->chunks[--c->count];
54 void *res = reinterpret_cast<void *>(allocator->CompactPtrToPointer(
55 allocator->GetRegionBeginBySizeClass(class_id), chunk));
56 return res;
57 }
58
59 void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
60 CHECK_NE(class_id, 0UL);
61 CHECK_LT(class_id, kNumClasses);
62 // If the first allocator call on a new thread is a deallocation, then
63 // max_count will be zero, leading to check failure.
64 InitCache();
65 stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
66 PerClass *c = &per_class_[class_id];
67 CHECK_NE(c->max_count, 0UL);
68 if (UNLIKELY(c->count == c->max_count))
69 Drain(c, allocator, class_id, c->max_count / 2);
70 CompactPtrT chunk = allocator->PointerToCompactPtr(
71 allocator->GetRegionBeginBySizeClass(class_id),
72 reinterpret_cast<uptr>(p));
73 c->chunks[c->count++] = chunk;
74 }
75
76 void Drain(SizeClassAllocator *allocator) {
77 for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
78 PerClass *c = &per_class_[class_id];
79 while (c->count > 0)
80 Drain(c, allocator, class_id, c->count);
81 }
82 }
83
84 // private:
85 struct PerClass {
86 u32 count;
87 u32 max_count;
88 CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
89 };
90 PerClass per_class_[kNumClasses];
91 AllocatorStats stats_;
92
93 void InitCache() {
94 if (per_class_[1].max_count)
95 return;
96 for (uptr i = 0; i < kNumClasses; i++) {
97 PerClass *c = &per_class_[i];
98 c->max_count = 2 * SizeClassMap::MaxCachedHint(i);
99 }
100 }
101
102 NOINLINE void Refill(PerClass *c, SizeClassAllocator *allocator,
103 uptr class_id) {
104 InitCache();
105 uptr num_requested_chunks = SizeClassMap::MaxCachedHint(class_id);
106 allocator->GetFromAllocator(&stats_, class_id, c->chunks,
107 num_requested_chunks);
108 c->count = num_requested_chunks;
109 }
110
111 NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
112 uptr count) {
113 InitCache();
114 CHECK_GE(c->count, count);
115 uptr first_idx_to_drain = c->count - count;
116 c->count -= count;
117 allocator->ReturnToAllocator(&stats_, class_id,
118 &c->chunks[first_idx_to_drain], count);
119 }
120 };
121
122 // Cache used by SizeClassAllocator32.
123 template <class SizeClassAllocator>
124 struct SizeClassAllocator32LocalCache {
125 typedef SizeClassAllocator Allocator;
126 typedef typename Allocator::TransferBatch TransferBatch;
127 static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
128
129 void Init(AllocatorGlobalStats *s) {
130 stats_.Init();
131 if (s)
132 s->Register(&stats_);
133 }
134
135 void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
136 Drain(allocator);
137 if (s)
138 s->Unregister(&stats_);
139 }
140
141 void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
142 CHECK_NE(class_id, 0UL);
143 CHECK_LT(class_id, kNumClasses);
144 stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
145 PerClass *c = &per_class_[class_id];
146 if (UNLIKELY(c->count == 0))
147 Refill(allocator, class_id);
148 void *res = c->batch[--c->count];
149 PREFETCH(c->batch[c->count - 1]);
150 return res;
151 }
152
153 void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
154 CHECK_NE(class_id, 0UL);
155 CHECK_LT(class_id, kNumClasses);
156 // If the first allocator call on a new thread is a deallocation, then
157 // max_count will be zero, leading to check failure.
158 InitCache();
159 stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
160 PerClass *c = &per_class_[class_id];
161 CHECK_NE(c->max_count, 0UL);
162 if (UNLIKELY(c->count == c->max_count))
163 Drain(allocator, class_id);
164 c->batch[c->count++] = p;
165 }
166
167 void Drain(SizeClassAllocator *allocator) {
168 for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
169 PerClass *c = &per_class_[class_id];
170 while (c->count > 0)
171 Drain(allocator, class_id);
172 }
173 }
174
175 // private:
176 typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
177 struct PerClass {
178 uptr count;
179 uptr max_count;
180 void *batch[2 * TransferBatch::kMaxNumCached];
181 };
182 PerClass per_class_[kNumClasses];
183 AllocatorStats stats_;
184
185 void InitCache() {
186 if (per_class_[1].max_count)
187 return;
188 for (uptr i = 0; i < kNumClasses; i++) {
189 PerClass *c = &per_class_[i];
190 c->max_count = 2 * TransferBatch::MaxCached(i);
191 }
192 }
193
194 // TransferBatch class is declared in SizeClassAllocator.
195 // We transfer chunks between central and thread-local free lists in batches.
196 // For small size classes we allocate batches separately.
197 // For large size classes we may use one of the chunks to store the batch.
198 // sizeof(TransferBatch) must be a power of 2 for more efficient allocation.
199 static uptr SizeClassForTransferBatch(uptr class_id) {
200 if (Allocator::ClassIdToSize(class_id) <
201 TransferBatch::AllocationSizeRequiredForNElements(
202 TransferBatch::MaxCached(class_id)))
203 return SizeClassMap::ClassID(sizeof(TransferBatch));
204 return 0;
205 }
206
207 // Returns a TransferBatch suitable for class_id.
208 // For small size classes allocates the batch from the allocator.
209 // For large size classes simply returns b.
210 TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
211 TransferBatch *b) {
212 if (uptr batch_class_id = SizeClassForTransferBatch(class_id))
213 return (TransferBatch*)Allocate(allocator, batch_class_id);
214 return b;
215 }
216
217 // Destroys TransferBatch b.
218 // For small size classes deallocates b to the allocator.
219 // Does notthing for large size classes.
220 void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
221 TransferBatch *b) {
222 if (uptr batch_class_id = SizeClassForTransferBatch(class_id))
223 Deallocate(allocator, batch_class_id, b);
224 }
225
226 NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
227 InitCache();
228 PerClass *c = &per_class_[class_id];
229 TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
230 CHECK_GT(b->Count(), 0);
231 b->CopyToArray(c->batch);
232 c->count = b->Count();
233 DestroyBatch(class_id, allocator, b);
234 }
235
236 NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
237 InitCache();
238 PerClass *c = &per_class_[class_id];
239 uptr cnt = Min(c->max_count / 2, c->count);
240 uptr first_idx_to_drain = c->count - cnt;
241 TransferBatch *b = CreateBatch(
242 class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
243 b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id),
244 &c->batch[first_idx_to_drain], cnt);
245 c->count -= cnt;
246 allocator->DeallocateBatch(&stats_, class_id, b);
247 }
248 };
249