1 //===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Memory quarantine for AddressSanitizer and potentially other tools.
11 // Quarantine caches some specified amount of memory in per-thread caches,
12 // then evicts to global FIFO queue. When the queue reaches specified threshold,
13 // oldest memory is recycled.
15 //===----------------------------------------------------------------------===//
17 #ifndef SANITIZER_QUARANTINE_H
18 #define SANITIZER_QUARANTINE_H
20 #include "sanitizer_internal_defs.h"
21 #include "sanitizer_mutex.h"
22 #include "sanitizer_list.h"
24 namespace __sanitizer
{
26 template<typename Node
> class QuarantineCache
;
28 struct QuarantineBatch
{
29 static const uptr kSize
= 1021;
30 QuarantineBatch
*next
;
36 COMPILER_CHECK(sizeof(QuarantineBatch
) <= (1 << 13)); // 8Kb.
38 // The callback interface is:
39 // void Callback::Recycle(Node *ptr);
40 // void *cb.Allocate(uptr size);
41 // void cb.Deallocate(void *ptr);
42 template<typename Callback
, typename Node
>
45 typedef QuarantineCache
<Callback
> Cache
;
47 explicit Quarantine(LinkerInitialized
)
48 : cache_(LINKER_INITIALIZED
) {
51 void Init(uptr size
, uptr cache_size
) {
52 // Thread local quarantine size can be zero only when global quarantine size
53 // is zero (it allows us to perform just one atomic read per Put() call).
54 CHECK((size
== 0 && cache_size
== 0) || cache_size
!= 0);
56 atomic_store(&max_size_
, size
, memory_order_relaxed
);
57 atomic_store(&min_size_
, size
/ 10 * 9,
58 memory_order_relaxed
); // 90% of max size.
59 atomic_store(&max_cache_size_
, cache_size
, memory_order_relaxed
);
62 uptr
GetSize() const { return atomic_load(&max_size_
, memory_order_relaxed
); }
63 uptr
GetCacheSize() const {
64 return atomic_load(&max_cache_size_
, memory_order_relaxed
);
67 void Put(Cache
*c
, Callback cb
, Node
*ptr
, uptr size
) {
68 uptr cache_size
= GetCacheSize();
70 c
->Enqueue(cb
, ptr
, size
);
72 // cache_size == 0 only when size == 0 (see Init).
75 // Check cache size anyway to accommodate for runtime cache_size change.
76 if (c
->Size() > cache_size
)
80 void NOINLINE
Drain(Cache
*c
, Callback cb
) {
82 SpinMutexLock
l(&cache_mutex_
);
85 if (cache_
.Size() > GetSize() && recycle_mutex_
.TryLock())
89 void PrintStats() const {
90 // It assumes that the world is stopped, just as the allocator's PrintStats.
96 char pad0_
[kCacheLineSize
];
97 atomic_uintptr_t max_size_
;
98 atomic_uintptr_t min_size_
;
99 atomic_uintptr_t max_cache_size_
;
100 char pad1_
[kCacheLineSize
];
101 SpinMutex cache_mutex_
;
102 SpinMutex recycle_mutex_
;
104 char pad2_
[kCacheLineSize
];
106 void NOINLINE
Recycle(Callback cb
) {
108 uptr min_size
= atomic_load(&min_size_
, memory_order_relaxed
);
110 SpinMutexLock
l(&cache_mutex_
);
111 while (cache_
.Size() > min_size
) {
112 QuarantineBatch
*b
= cache_
.DequeueBatch();
116 recycle_mutex_
.Unlock();
120 void NOINLINE
DoRecycle(Cache
*c
, Callback cb
) {
121 while (QuarantineBatch
*b
= c
->DequeueBatch()) {
122 const uptr kPrefetch
= 16;
123 CHECK(kPrefetch
<= ARRAY_SIZE(b
->batch
));
124 for (uptr i
= 0; i
< kPrefetch
; i
++)
125 PREFETCH(b
->batch
[i
]);
126 for (uptr i
= 0, count
= b
->count
; i
< count
; i
++) {
127 if (i
+ kPrefetch
< count
)
128 PREFETCH(b
->batch
[i
+ kPrefetch
]);
129 cb
.Recycle((Node
*)b
->batch
[i
]);
136 // Per-thread cache of memory blocks.
137 template<typename Callback
>
138 class QuarantineCache
{
140 explicit QuarantineCache(LinkerInitialized
) {
149 return atomic_load(&size_
, memory_order_relaxed
);
152 void Enqueue(Callback cb
, void *ptr
, uptr size
) {
153 if (list_
.empty() || list_
.back()->count
== QuarantineBatch::kSize
) {
155 size
+= sizeof(QuarantineBatch
); // Count the batch in Quarantine size.
157 QuarantineBatch
*b
= list_
.back();
159 b
->batch
[b
->count
++] = ptr
;
164 void Transfer(QuarantineCache
*c
) {
165 list_
.append_back(&c
->list_
);
167 atomic_store(&c
->size_
, 0, memory_order_relaxed
);
170 void EnqueueBatch(QuarantineBatch
*b
) {
175 QuarantineBatch
*DequeueBatch() {
178 QuarantineBatch
*b
= list_
.front();
184 void PrintStats() const {
185 uptr batch_count
= 0;
186 uptr total_quarantine_bytes
= 0;
187 uptr total_quarantine_chunks
= 0;
188 for (List::ConstIterator it
= list_
.begin(); it
!= list_
.end(); ++it
) {
190 total_quarantine_bytes
+= (*it
).size
;
191 total_quarantine_chunks
+= (*it
).count
;
193 Printf("Global quarantine stats: batches: %zd; bytes: %zd; chunks: %zd "
194 "(capacity: %zd chunks)\n",
195 batch_count
, total_quarantine_bytes
, total_quarantine_chunks
,
196 batch_count
* QuarantineBatch::kSize
);
200 typedef IntrusiveList
<QuarantineBatch
> List
;
203 atomic_uintptr_t size_
;
205 void SizeAdd(uptr add
) {
206 atomic_store(&size_
, Size() + add
, memory_order_relaxed
);
208 void SizeSub(uptr sub
) {
209 atomic_store(&size_
, Size() - sub
, memory_order_relaxed
);
212 NOINLINE QuarantineBatch
* AllocBatch(Callback cb
) {
213 QuarantineBatch
*b
= (QuarantineBatch
*)cb
.Allocate(sizeof(*b
));
222 } // namespace __sanitizer
224 #endif // SANITIZER_QUARANTINE_H