1 //===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Part of the Sanitizer Allocator.
12 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_ALLOCATOR_H
14 #error This file must be included inside sanitizer_allocator.h
17 // Objects of this type should be used as local caches for SizeClassAllocator64
18 // or SizeClassAllocator32. Since the typical use of this class is to have one
19 // object per thread in TLS, is has to be POD.
20 template<class SizeClassAllocator
>
21 struct SizeClassAllocatorLocalCache
22 : SizeClassAllocator::AllocatorCache
{
25 // Cache used by SizeClassAllocator64.
26 template <class SizeClassAllocator
>
27 struct SizeClassAllocator64LocalCache
{
28 typedef SizeClassAllocator Allocator
;
29 static const uptr kNumClasses
= SizeClassAllocator::kNumClasses
;
30 typedef typename
Allocator::SizeClassMapT SizeClassMap
;
31 typedef typename
Allocator::CompactPtrT CompactPtrT
;
33 void Init(AllocatorGlobalStats
*s
) {
39 void Destroy(SizeClassAllocator
*allocator
, AllocatorGlobalStats
*s
) {
42 s
->Unregister(&stats_
);
45 void *Allocate(SizeClassAllocator
*allocator
, uptr class_id
) {
46 CHECK_NE(class_id
, 0UL);
47 CHECK_LT(class_id
, kNumClasses
);
48 stats_
.Add(AllocatorStatAllocated
, Allocator::ClassIdToSize(class_id
));
49 PerClass
*c
= &per_class_
[class_id
];
50 if (UNLIKELY(c
->count
== 0))
51 Refill(c
, allocator
, class_id
);
52 CHECK_GT(c
->count
, 0);
53 CompactPtrT chunk
= c
->chunks
[--c
->count
];
54 void *res
= reinterpret_cast<void *>(allocator
->CompactPtrToPointer(
55 allocator
->GetRegionBeginBySizeClass(class_id
), chunk
));
59 void Deallocate(SizeClassAllocator
*allocator
, uptr class_id
, void *p
) {
60 CHECK_NE(class_id
, 0UL);
61 CHECK_LT(class_id
, kNumClasses
);
62 // If the first allocator call on a new thread is a deallocation, then
63 // max_count will be zero, leading to check failure.
65 stats_
.Sub(AllocatorStatAllocated
, Allocator::ClassIdToSize(class_id
));
66 PerClass
*c
= &per_class_
[class_id
];
67 CHECK_NE(c
->max_count
, 0UL);
68 if (UNLIKELY(c
->count
== c
->max_count
))
69 Drain(c
, allocator
, class_id
, c
->max_count
/ 2);
70 CompactPtrT chunk
= allocator
->PointerToCompactPtr(
71 allocator
->GetRegionBeginBySizeClass(class_id
),
72 reinterpret_cast<uptr
>(p
));
73 c
->chunks
[c
->count
++] = chunk
;
76 void Drain(SizeClassAllocator
*allocator
) {
77 for (uptr class_id
= 0; class_id
< kNumClasses
; class_id
++) {
78 PerClass
*c
= &per_class_
[class_id
];
80 Drain(c
, allocator
, class_id
, c
->count
);
88 CompactPtrT chunks
[2 * SizeClassMap::kMaxNumCachedHint
];
90 PerClass per_class_
[kNumClasses
];
91 AllocatorStats stats_
;
94 if (per_class_
[1].max_count
)
96 for (uptr i
= 0; i
< kNumClasses
; i
++) {
97 PerClass
*c
= &per_class_
[i
];
98 c
->max_count
= 2 * SizeClassMap::MaxCachedHint(i
);
102 NOINLINE
void Refill(PerClass
*c
, SizeClassAllocator
*allocator
,
105 uptr num_requested_chunks
= SizeClassMap::MaxCachedHint(class_id
);
106 allocator
->GetFromAllocator(&stats_
, class_id
, c
->chunks
,
107 num_requested_chunks
);
108 c
->count
= num_requested_chunks
;
111 NOINLINE
void Drain(PerClass
*c
, SizeClassAllocator
*allocator
, uptr class_id
,
114 CHECK_GE(c
->count
, count
);
115 uptr first_idx_to_drain
= c
->count
- count
;
117 allocator
->ReturnToAllocator(&stats_
, class_id
,
118 &c
->chunks
[first_idx_to_drain
], count
);
122 // Cache used by SizeClassAllocator32.
123 template <class SizeClassAllocator
>
124 struct SizeClassAllocator32LocalCache
{
125 typedef SizeClassAllocator Allocator
;
126 typedef typename
Allocator::TransferBatch TransferBatch
;
127 static const uptr kNumClasses
= SizeClassAllocator::kNumClasses
;
129 void Init(AllocatorGlobalStats
*s
) {
132 s
->Register(&stats_
);
135 void Destroy(SizeClassAllocator
*allocator
, AllocatorGlobalStats
*s
) {
138 s
->Unregister(&stats_
);
141 void *Allocate(SizeClassAllocator
*allocator
, uptr class_id
) {
142 CHECK_NE(class_id
, 0UL);
143 CHECK_LT(class_id
, kNumClasses
);
144 stats_
.Add(AllocatorStatAllocated
, Allocator::ClassIdToSize(class_id
));
145 PerClass
*c
= &per_class_
[class_id
];
146 if (UNLIKELY(c
->count
== 0))
147 Refill(allocator
, class_id
);
148 void *res
= c
->batch
[--c
->count
];
149 PREFETCH(c
->batch
[c
->count
- 1]);
153 void Deallocate(SizeClassAllocator
*allocator
, uptr class_id
, void *p
) {
154 CHECK_NE(class_id
, 0UL);
155 CHECK_LT(class_id
, kNumClasses
);
156 // If the first allocator call on a new thread is a deallocation, then
157 // max_count will be zero, leading to check failure.
159 stats_
.Sub(AllocatorStatAllocated
, Allocator::ClassIdToSize(class_id
));
160 PerClass
*c
= &per_class_
[class_id
];
161 CHECK_NE(c
->max_count
, 0UL);
162 if (UNLIKELY(c
->count
== c
->max_count
))
163 Drain(allocator
, class_id
);
164 c
->batch
[c
->count
++] = p
;
167 void Drain(SizeClassAllocator
*allocator
) {
168 for (uptr class_id
= 0; class_id
< kNumClasses
; class_id
++) {
169 PerClass
*c
= &per_class_
[class_id
];
171 Drain(allocator
, class_id
);
176 typedef typename
SizeClassAllocator::SizeClassMapT SizeClassMap
;
180 void *batch
[2 * TransferBatch::kMaxNumCached
];
182 PerClass per_class_
[kNumClasses
];
183 AllocatorStats stats_
;
186 if (per_class_
[1].max_count
)
188 for (uptr i
= 0; i
< kNumClasses
; i
++) {
189 PerClass
*c
= &per_class_
[i
];
190 c
->max_count
= 2 * TransferBatch::MaxCached(i
);
194 // TransferBatch class is declared in SizeClassAllocator.
195 // We transfer chunks between central and thread-local free lists in batches.
196 // For small size classes we allocate batches separately.
197 // For large size classes we may use one of the chunks to store the batch.
198 // sizeof(TransferBatch) must be a power of 2 for more efficient allocation.
199 static uptr
SizeClassForTransferBatch(uptr class_id
) {
200 if (Allocator::ClassIdToSize(class_id
) <
201 TransferBatch::AllocationSizeRequiredForNElements(
202 TransferBatch::MaxCached(class_id
)))
203 return SizeClassMap::ClassID(sizeof(TransferBatch
));
207 // Returns a TransferBatch suitable for class_id.
208 // For small size classes allocates the batch from the allocator.
209 // For large size classes simply returns b.
210 TransferBatch
*CreateBatch(uptr class_id
, SizeClassAllocator
*allocator
,
212 if (uptr batch_class_id
= SizeClassForTransferBatch(class_id
))
213 return (TransferBatch
*)Allocate(allocator
, batch_class_id
);
217 // Destroys TransferBatch b.
218 // For small size classes deallocates b to the allocator.
219 // Does notthing for large size classes.
220 void DestroyBatch(uptr class_id
, SizeClassAllocator
*allocator
,
222 if (uptr batch_class_id
= SizeClassForTransferBatch(class_id
))
223 Deallocate(allocator
, batch_class_id
, b
);
226 NOINLINE
void Refill(SizeClassAllocator
*allocator
, uptr class_id
) {
228 PerClass
*c
= &per_class_
[class_id
];
229 TransferBatch
*b
= allocator
->AllocateBatch(&stats_
, this, class_id
);
230 CHECK_GT(b
->Count(), 0);
231 b
->CopyToArray(c
->batch
);
232 c
->count
= b
->Count();
233 DestroyBatch(class_id
, allocator
, b
);
236 NOINLINE
void Drain(SizeClassAllocator
*allocator
, uptr class_id
) {
238 PerClass
*c
= &per_class_
[class_id
];
239 uptr cnt
= Min(c
->max_count
/ 2, c
->count
);
240 uptr first_idx_to_drain
= c
->count
- cnt
;
241 TransferBatch
*b
= CreateBatch(
242 class_id
, allocator
, (TransferBatch
*)c
->batch
[first_idx_to_drain
]);
243 b
->SetFromArray(allocator
->GetRegionBeginBySizeClass(class_id
),
244 &c
->batch
[first_idx_to_drain
], cnt
);
246 allocator
->DeallocateBatch(&stats_
, class_id
, b
);