]> git.proxmox.com Git - rustc.git/blob - src/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
1a0d9545b7e1494c631a7123a3e263daae4cb692
[rustc.git] / src / compiler-rt / lib / sanitizer_common / sanitizer_quarantine.h
1 //===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Memory quarantine for AddressSanitizer and potentially other tools.
11 // Quarantine caches some specified amount of memory in per-thread caches,
12 // then evicts to global FIFO queue. When the queue reaches specified threshold,
13 // oldest memory is recycled.
14 //
15 //===----------------------------------------------------------------------===//
16
17 #ifndef SANITIZER_QUARANTINE_H
18 #define SANITIZER_QUARANTINE_H
19
20 #include "sanitizer_internal_defs.h"
21 #include "sanitizer_mutex.h"
22 #include "sanitizer_list.h"
23
24 namespace __sanitizer {
25
26 template<typename Node> class QuarantineCache;
27
28 struct QuarantineBatch {
29 static const uptr kSize = 1021;
30 QuarantineBatch *next;
31 uptr size;
32 uptr count;
33 void *batch[kSize];
34 };
35
36 COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13)); // 8Kb.
37
38 // The callback interface is:
39 // void Callback::Recycle(Node *ptr);
40 // void *cb.Allocate(uptr size);
41 // void cb.Deallocate(void *ptr);
42 template<typename Callback, typename Node>
43 class Quarantine {
44 public:
45 typedef QuarantineCache<Callback> Cache;
46
47 explicit Quarantine(LinkerInitialized)
48 : cache_(LINKER_INITIALIZED) {
49 }
50
51 void Init(uptr size, uptr cache_size) {
52 // Thread local quarantine size can be zero only when global quarantine size
53 // is zero (it allows us to perform just one atomic read per Put() call).
54 CHECK((size == 0 && cache_size == 0) || cache_size != 0);
55
56 atomic_store(&max_size_, size, memory_order_relaxed);
57 atomic_store(&min_size_, size / 10 * 9,
58 memory_order_relaxed); // 90% of max size.
59 atomic_store(&max_cache_size_, cache_size, memory_order_relaxed);
60 }
61
62 uptr GetSize() const { return atomic_load(&max_size_, memory_order_relaxed); }
63 uptr GetCacheSize() const {
64 return atomic_load(&max_cache_size_, memory_order_relaxed);
65 }
66
67 void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
68 uptr cache_size = GetCacheSize();
69 if (cache_size) {
70 c->Enqueue(cb, ptr, size);
71 } else {
72 // cache_size == 0 only when size == 0 (see Init).
73 cb.Recycle(ptr);
74 }
75 // Check cache size anyway to accommodate for runtime cache_size change.
76 if (c->Size() > cache_size)
77 Drain(c, cb);
78 }
79
80 void NOINLINE Drain(Cache *c, Callback cb) {
81 {
82 SpinMutexLock l(&cache_mutex_);
83 cache_.Transfer(c);
84 }
85 if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
86 Recycle(cb);
87 }
88
89 void PrintStats() const {
90 // It assumes that the world is stopped, just as the allocator's PrintStats.
91 cache_.PrintStats();
92 }
93
94 private:
95 // Read-only data.
96 char pad0_[kCacheLineSize];
97 atomic_uintptr_t max_size_;
98 atomic_uintptr_t min_size_;
99 atomic_uintptr_t max_cache_size_;
100 char pad1_[kCacheLineSize];
101 SpinMutex cache_mutex_;
102 SpinMutex recycle_mutex_;
103 Cache cache_;
104 char pad2_[kCacheLineSize];
105
106 void NOINLINE Recycle(Callback cb) {
107 Cache tmp;
108 uptr min_size = atomic_load(&min_size_, memory_order_relaxed);
109 {
110 SpinMutexLock l(&cache_mutex_);
111 while (cache_.Size() > min_size) {
112 QuarantineBatch *b = cache_.DequeueBatch();
113 tmp.EnqueueBatch(b);
114 }
115 }
116 recycle_mutex_.Unlock();
117 DoRecycle(&tmp, cb);
118 }
119
120 void NOINLINE DoRecycle(Cache *c, Callback cb) {
121 while (QuarantineBatch *b = c->DequeueBatch()) {
122 const uptr kPrefetch = 16;
123 CHECK(kPrefetch <= ARRAY_SIZE(b->batch));
124 for (uptr i = 0; i < kPrefetch; i++)
125 PREFETCH(b->batch[i]);
126 for (uptr i = 0, count = b->count; i < count; i++) {
127 if (i + kPrefetch < count)
128 PREFETCH(b->batch[i + kPrefetch]);
129 cb.Recycle((Node*)b->batch[i]);
130 }
131 cb.Deallocate(b);
132 }
133 }
134 };
135
136 // Per-thread cache of memory blocks.
137 template<typename Callback>
138 class QuarantineCache {
139 public:
140 explicit QuarantineCache(LinkerInitialized) {
141 }
142
143 QuarantineCache()
144 : size_() {
145 list_.clear();
146 }
147
148 uptr Size() const {
149 return atomic_load(&size_, memory_order_relaxed);
150 }
151
152 void Enqueue(Callback cb, void *ptr, uptr size) {
153 if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
154 AllocBatch(cb);
155 size += sizeof(QuarantineBatch); // Count the batch in Quarantine size.
156 }
157 QuarantineBatch *b = list_.back();
158 CHECK(b);
159 b->batch[b->count++] = ptr;
160 b->size += size;
161 SizeAdd(size);
162 }
163
164 void Transfer(QuarantineCache *c) {
165 list_.append_back(&c->list_);
166 SizeAdd(c->Size());
167 atomic_store(&c->size_, 0, memory_order_relaxed);
168 }
169
170 void EnqueueBatch(QuarantineBatch *b) {
171 list_.push_back(b);
172 SizeAdd(b->size);
173 }
174
175 QuarantineBatch *DequeueBatch() {
176 if (list_.empty())
177 return nullptr;
178 QuarantineBatch *b = list_.front();
179 list_.pop_front();
180 SizeSub(b->size);
181 return b;
182 }
183
184 void PrintStats() const {
185 uptr batch_count = 0;
186 uptr total_quarantine_bytes = 0;
187 uptr total_quarantine_chunks = 0;
188 for (List::ConstIterator it = list_.begin(); it != list_.end(); ++it) {
189 batch_count++;
190 total_quarantine_bytes += (*it).size;
191 total_quarantine_chunks += (*it).count;
192 }
193 Printf("Global quarantine stats: batches: %zd; bytes: %zd; chunks: %zd "
194 "(capacity: %zd chunks)\n",
195 batch_count, total_quarantine_bytes, total_quarantine_chunks,
196 batch_count * QuarantineBatch::kSize);
197 }
198
199 private:
200 typedef IntrusiveList<QuarantineBatch> List;
201
202 List list_;
203 atomic_uintptr_t size_;
204
205 void SizeAdd(uptr add) {
206 atomic_store(&size_, Size() + add, memory_order_relaxed);
207 }
208 void SizeSub(uptr sub) {
209 atomic_store(&size_, Size() - sub, memory_order_relaxed);
210 }
211
212 NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
213 QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
214 CHECK(b);
215 b->count = 0;
216 b->size = 0;
217 list_.push_back(b);
218 return b;
219 }
220 };
221
222 } // namespace __sanitizer
223
224 #endif // SANITIZER_QUARANTINE_H