]> git.proxmox.com Git - rustc.git/blame - src/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
Imported Upstream version 1.6.0+dfsg1
[rustc.git] / src / compiler-rt / lib / sanitizer_common / sanitizer_quarantine.h
CommitLineData
1a4d82fc
JJ
1//===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Memory quarantine for AddressSanitizer and potentially other tools.
11// Quarantine caches some specified amount of memory in per-thread caches,
12// then evicts to global FIFO queue. When the queue reaches specified threshold,
13// oldest memory is recycled.
14//
15//===----------------------------------------------------------------------===//
16
17#ifndef SANITIZER_QUARANTINE_H
18#define SANITIZER_QUARANTINE_H
19
20#include "sanitizer_internal_defs.h"
21#include "sanitizer_mutex.h"
22#include "sanitizer_list.h"
23
24namespace __sanitizer {
25
26template<typename Node> class QuarantineCache;
27
28struct QuarantineBatch {
29 static const uptr kSize = 1021;
30 QuarantineBatch *next;
31 uptr size;
32 uptr count;
33 void *batch[kSize];
34};
35
36COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13)); // 8Kb.
37
38// The callback interface is:
39// void Callback::Recycle(Node *ptr);
40// void *cb.Allocate(uptr size);
41// void cb.Deallocate(void *ptr);
42template<typename Callback, typename Node>
43class Quarantine {
44 public:
45 typedef QuarantineCache<Callback> Cache;
46
47 explicit Quarantine(LinkerInitialized)
48 : cache_(LINKER_INITIALIZED) {
49 }
50
51 void Init(uptr size, uptr cache_size) {
92a42be0
SL
52 atomic_store(&max_size_, size, memory_order_release);
53 atomic_store(&min_size_, size / 10 * 9,
54 memory_order_release); // 90% of max size.
1a4d82fc
JJ
55 max_cache_size_ = cache_size;
56 }
57
92a42be0
SL
58 uptr GetSize() const { return atomic_load(&max_size_, memory_order_acquire); }
59
1a4d82fc
JJ
60 void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
61 c->Enqueue(cb, ptr, size);
62 if (c->Size() > max_cache_size_)
63 Drain(c, cb);
64 }
65
66 void NOINLINE Drain(Cache *c, Callback cb) {
67 {
68 SpinMutexLock l(&cache_mutex_);
69 cache_.Transfer(c);
70 }
92a42be0 71 if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
1a4d82fc
JJ
72 Recycle(cb);
73 }
74
75 private:
76 // Read-only data.
77 char pad0_[kCacheLineSize];
92a42be0
SL
78 atomic_uintptr_t max_size_;
79 atomic_uintptr_t min_size_;
1a4d82fc
JJ
80 uptr max_cache_size_;
81 char pad1_[kCacheLineSize];
82 SpinMutex cache_mutex_;
83 SpinMutex recycle_mutex_;
84 Cache cache_;
85 char pad2_[kCacheLineSize];
86
87 void NOINLINE Recycle(Callback cb) {
88 Cache tmp;
92a42be0 89 uptr min_size = atomic_load(&min_size_, memory_order_acquire);
1a4d82fc
JJ
90 {
91 SpinMutexLock l(&cache_mutex_);
92a42be0 92 while (cache_.Size() > min_size) {
1a4d82fc
JJ
93 QuarantineBatch *b = cache_.DequeueBatch();
94 tmp.EnqueueBatch(b);
95 }
96 }
97 recycle_mutex_.Unlock();
98 DoRecycle(&tmp, cb);
99 }
100
101 void NOINLINE DoRecycle(Cache *c, Callback cb) {
102 while (QuarantineBatch *b = c->DequeueBatch()) {
103 const uptr kPrefetch = 16;
104 for (uptr i = 0; i < kPrefetch; i++)
105 PREFETCH(b->batch[i]);
106 for (uptr i = 0; i < b->count; i++) {
107 PREFETCH(b->batch[i + kPrefetch]);
108 cb.Recycle((Node*)b->batch[i]);
109 }
110 cb.Deallocate(b);
111 }
112 }
113};
114
115// Per-thread cache of memory blocks.
116template<typename Callback>
117class QuarantineCache {
118 public:
119 explicit QuarantineCache(LinkerInitialized) {
120 }
121
122 QuarantineCache()
123 : size_() {
124 list_.clear();
125 }
126
127 uptr Size() const {
128 return atomic_load(&size_, memory_order_relaxed);
129 }
130
131 void Enqueue(Callback cb, void *ptr, uptr size) {
132 if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
133 AllocBatch(cb);
134 size += sizeof(QuarantineBatch); // Count the batch in Quarantine size.
135 }
136 QuarantineBatch *b = list_.back();
92a42be0 137 CHECK(b);
1a4d82fc
JJ
138 b->batch[b->count++] = ptr;
139 b->size += size;
140 SizeAdd(size);
141 }
142
143 void Transfer(QuarantineCache *c) {
144 list_.append_back(&c->list_);
145 SizeAdd(c->Size());
146 atomic_store(&c->size_, 0, memory_order_relaxed);
147 }
148
149 void EnqueueBatch(QuarantineBatch *b) {
150 list_.push_back(b);
151 SizeAdd(b->size);
152 }
153
154 QuarantineBatch *DequeueBatch() {
155 if (list_.empty())
92a42be0 156 return nullptr;
1a4d82fc
JJ
157 QuarantineBatch *b = list_.front();
158 list_.pop_front();
159 SizeSub(b->size);
160 return b;
161 }
162
163 private:
164 IntrusiveList<QuarantineBatch> list_;
165 atomic_uintptr_t size_;
166
167 void SizeAdd(uptr add) {
168 atomic_store(&size_, Size() + add, memory_order_relaxed);
169 }
170 void SizeSub(uptr sub) {
171 atomic_store(&size_, Size() - sub, memory_order_relaxed);
172 }
173
174 NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
175 QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
92a42be0 176 CHECK(b);
1a4d82fc
JJ
177 b->count = 0;
178 b->size = 0;
179 list_.push_back(b);
180 return b;
181 }
182};
92a42be0 183} // namespace __sanitizer
1a4d82fc 184
92a42be0 185#endif // SANITIZER_QUARANTINE_H