1 //===-- tsan_mman.cc ------------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_allocator_interface.h"
14 #include "sanitizer_common/sanitizer_common.h"
15 #include "sanitizer_common/sanitizer_placement_new.h"
16 #include "tsan_mman.h"
18 #include "tsan_report.h"
19 #include "tsan_flags.h"
21 // May be overriden by front-end.
22 extern "C" void WEAK
__sanitizer_malloc_hook(void *ptr
, uptr size
) {
27 extern "C" void WEAK
__sanitizer_free_hook(void *ptr
) {
33 struct MapUnmapCallback
{
34 void OnMap(uptr p
, uptr size
) const { }
35 void OnUnmap(uptr p
, uptr size
) const {
36 // We are about to unmap a chunk of user memory.
37 // Mark the corresponding shadow memory as not needed.
38 DontNeedShadowFor(p
, size
);
39 // Mark the corresponding meta shadow memory as not needed.
40 // Note the block does not contain any meta info at this point
41 // (this happens after free).
42 const uptr kMetaRatio
= kMetaShadowCell
/ kMetaShadowSize
;
43 const uptr kPageSize
= GetPageSizeCached() * kMetaRatio
;
44 // Block came from LargeMmapAllocator, so must be large.
45 // We rely on this in the calculations below.
46 CHECK_GE(size
, 2 * kPageSize
);
47 uptr diff
= RoundUp(p
, kPageSize
) - p
;
52 diff
= p
+ size
- RoundDown(p
+ size
, kPageSize
);
55 FlushUnneededShadowMemory((uptr
)MemToMeta(p
), size
/ kMetaRatio
);
59 static char allocator_placeholder
[sizeof(Allocator
)] ALIGNED(64);
60 Allocator
*allocator() {
61 return reinterpret_cast<Allocator
*>(&allocator_placeholder
);
64 void InitializeAllocator() {
65 allocator()->Init(common_flags()->allocator_may_return_null
);
68 void AllocatorThreadStart(ThreadState
*thr
) {
69 allocator()->InitCache(&thr
->alloc_cache
);
70 internal_allocator()->InitCache(&thr
->internal_alloc_cache
);
73 void AllocatorThreadFinish(ThreadState
*thr
) {
74 allocator()->DestroyCache(&thr
->alloc_cache
);
75 internal_allocator()->DestroyCache(&thr
->internal_alloc_cache
);
78 void AllocatorPrintStats() {
79 allocator()->PrintStats();
82 static void SignalUnsafeCall(ThreadState
*thr
, uptr pc
) {
83 if (atomic_load_relaxed(&thr
->in_signal_handler
) == 0 ||
84 !flags()->report_signal_unsafe
)
86 VarSizeStackTrace stack
;
87 ObtainCurrentStack(thr
, pc
, &stack
);
88 if (IsFiredSuppression(ctx
, ReportTypeSignalUnsafe
, stack
))
90 ThreadRegistryLock
l(ctx
->thread_registry
);
91 ScopedReport
rep(ReportTypeSignalUnsafe
);
92 rep
.AddStack(stack
, true);
93 OutputReport(thr
, rep
);
96 void *user_alloc(ThreadState
*thr
, uptr pc
, uptr sz
, uptr align
, bool signal
) {
97 if ((sz
>= (1ull << 40)) || (align
>= (1ull << 40)))
98 return allocator()->ReturnNullOrDie();
99 void *p
= allocator()->Allocate(&thr
->alloc_cache
, sz
, align
);
102 if (ctx
&& ctx
->initialized
)
103 OnUserAlloc(thr
, pc
, (uptr
)p
, sz
, true);
105 SignalUnsafeCall(thr
, pc
);
109 void *user_calloc(ThreadState
*thr
, uptr pc
, uptr size
, uptr n
) {
110 if (CallocShouldReturnNullDueToOverflow(size
, n
))
111 return allocator()->ReturnNullOrDie();
112 void *p
= user_alloc(thr
, pc
, n
* size
);
114 internal_memset(p
, 0, n
* size
);
118 void user_free(ThreadState
*thr
, uptr pc
, void *p
, bool signal
) {
119 if (ctx
&& ctx
->initialized
)
120 OnUserFree(thr
, pc
, (uptr
)p
, true);
121 allocator()->Deallocate(&thr
->alloc_cache
, p
);
123 SignalUnsafeCall(thr
, pc
);
126 void OnUserAlloc(ThreadState
*thr
, uptr pc
, uptr p
, uptr sz
, bool write
) {
127 DPrintf("#%d: alloc(%zu) = %p\n", thr
->tid
, sz
, p
);
128 ctx
->metamap
.AllocBlock(thr
, pc
, p
, sz
);
129 if (write
&& thr
->ignore_reads_and_writes
== 0)
130 MemoryRangeImitateWrite(thr
, pc
, (uptr
)p
, sz
);
132 MemoryResetRange(thr
, pc
, (uptr
)p
, sz
);
135 void OnUserFree(ThreadState
*thr
, uptr pc
, uptr p
, bool write
) {
136 CHECK_NE(p
, (void*)0);
137 uptr sz
= ctx
->metamap
.FreeBlock(thr
, pc
, p
);
138 DPrintf("#%d: free(%p, %zu)\n", thr
->tid
, p
, sz
);
139 if (write
&& thr
->ignore_reads_and_writes
== 0)
140 MemoryRangeFreed(thr
, pc
, (uptr
)p
, sz
);
143 void *user_realloc(ThreadState
*thr
, uptr pc
, void *p
, uptr sz
) {
145 // FIXME: Handle "shrinking" more efficiently,
146 // it seems that some software actually does this.
148 p2
= user_alloc(thr
, pc
, sz
);
152 uptr oldsz
= user_alloc_usable_size(p
);
153 internal_memcpy(p2
, p
, min(oldsz
, sz
));
157 user_free(thr
, pc
, p
);
161 uptr
user_alloc_usable_size(const void *p
) {
164 MBlock
*b
= ctx
->metamap
.GetBlock((uptr
)p
);
165 return b
? b
->siz
: 0;
168 void invoke_malloc_hook(void *ptr
, uptr size
) {
169 ThreadState
*thr
= cur_thread();
170 if (ctx
== 0 || !ctx
->initialized
|| thr
->ignore_interceptors
)
172 __sanitizer_malloc_hook(ptr
, size
);
175 void invoke_free_hook(void *ptr
) {
176 ThreadState
*thr
= cur_thread();
177 if (ctx
== 0 || !ctx
->initialized
|| thr
->ignore_interceptors
)
179 __sanitizer_free_hook(ptr
);
182 void *internal_alloc(MBlockType typ
, uptr sz
) {
183 ThreadState
*thr
= cur_thread();
185 thr
->nomalloc
= 0; // CHECK calls internal_malloc().
188 return InternalAlloc(sz
, &thr
->internal_alloc_cache
);
191 void internal_free(void *p
) {
192 ThreadState
*thr
= cur_thread();
194 thr
->nomalloc
= 0; // CHECK calls internal_malloc().
197 InternalFree(p
, &thr
->internal_alloc_cache
);
200 } // namespace __tsan
202 using namespace __tsan
;
205 uptr
__sanitizer_get_current_allocated_bytes() {
206 uptr stats
[AllocatorStatCount
];
207 allocator()->GetStats(stats
);
208 return stats
[AllocatorStatAllocated
];
211 uptr
__sanitizer_get_heap_size() {
212 uptr stats
[AllocatorStatCount
];
213 allocator()->GetStats(stats
);
214 return stats
[AllocatorStatMapped
];
217 uptr
__sanitizer_get_free_bytes() {
221 uptr
__sanitizer_get_unmapped_bytes() {
225 uptr
__sanitizer_get_estimated_allocated_size(uptr size
) {
229 int __sanitizer_get_ownership(const void *p
) {
230 return allocator()->GetBlockBegin(p
) != 0;
233 uptr
__sanitizer_get_allocated_size(const void *p
) {
234 return user_alloc_usable_size(p
);
237 void __tsan_on_thread_idle() {
238 ThreadState
*thr
= cur_thread();
239 allocator()->SwallowCache(&thr
->alloc_cache
);
240 internal_allocator()->SwallowCache(&thr
->internal_alloc_cache
);
241 ctx
->metamap
.OnThreadIdle(thr
);