]>
Commit | Line | Data |
---|---|---|
1a4d82fc JJ |
1 | //===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===// |
2 | // | |
3 | // The LLVM Compiler Infrastructure | |
4 | // | |
5 | // This file is distributed under the University of Illinois Open Source | |
6 | // License. See LICENSE.TXT for details. | |
7 | // | |
8 | //===----------------------------------------------------------------------===// | |
9 | // | |
10 | // This file is a part of ThreadSanitizer (TSan), a race detector. | |
11 | // | |
12 | // Main internal TSan header file. | |
13 | // | |
14 | // Ground rules: | |
15 | // - C++ run-time should not be used (static CTORs, RTTI, exceptions, static | |
16 | // function-scope locals) | |
17 | // - All functions/classes/etc reside in namespace __tsan, except for those | |
18 | // declared in tsan_interface.h. | |
19 | // - Platform-specific files should be used instead of ifdefs (*). | |
20 | // - No system headers included in header files (*). | |
21 | // - Platform specific headres included only into platform-specific files (*). | |
22 | // | |
23 | // (*) Except when inlining is critical for performance. | |
24 | //===----------------------------------------------------------------------===// | |
25 | ||
26 | #ifndef TSAN_RTL_H | |
27 | #define TSAN_RTL_H | |
28 | ||
29 | #include "sanitizer_common/sanitizer_allocator.h" | |
30 | #include "sanitizer_common/sanitizer_allocator_internal.h" | |
31 | #include "sanitizer_common/sanitizer_asm.h" | |
32 | #include "sanitizer_common/sanitizer_common.h" | |
33 | #include "sanitizer_common/sanitizer_deadlock_detector_interface.h" | |
34 | #include "sanitizer_common/sanitizer_libignore.h" | |
35 | #include "sanitizer_common/sanitizer_suppressions.h" | |
36 | #include "sanitizer_common/sanitizer_thread_registry.h" | |
37 | #include "tsan_clock.h" | |
38 | #include "tsan_defs.h" | |
39 | #include "tsan_flags.h" | |
40 | #include "tsan_sync.h" | |
41 | #include "tsan_trace.h" | |
42 | #include "tsan_vector.h" | |
43 | #include "tsan_report.h" | |
44 | #include "tsan_platform.h" | |
45 | #include "tsan_mutexset.h" | |
46 | #include "tsan_ignoreset.h" | |
92a42be0 | 47 | #include "tsan_stack_trace.h" |
1a4d82fc JJ |
48 | |
49 | #if SANITIZER_WORDSIZE != 64 | |
50 | # error "ThreadSanitizer is supported only on 64-bit platforms" | |
51 | #endif | |
52 | ||
53 | namespace __tsan { | |
54 | ||
92a42be0 | 55 | #ifndef SANITIZER_GO |
1a4d82fc | 56 | struct MapUnmapCallback; |
92a42be0 SL |
57 | #if defined(__mips64) || defined(__aarch64__) |
58 | static const uptr kAllocatorSpace = 0; | |
59 | static const uptr kAllocatorSize = SANITIZER_MMAP_RANGE_SIZE; | |
60 | static const uptr kAllocatorRegionSizeLog = 20; | |
61 | static const uptr kAllocatorNumRegions = | |
62 | kAllocatorSize >> kAllocatorRegionSizeLog; | |
63 | typedef TwoLevelByteMap<(kAllocatorNumRegions >> 12), 1 << 12, | |
64 | MapUnmapCallback> ByteMap; | |
65 | typedef SizeClassAllocator32<kAllocatorSpace, kAllocatorSize, 0, | |
66 | CompactSizeClassMap, kAllocatorRegionSizeLog, ByteMap, | |
67 | MapUnmapCallback> PrimaryAllocator; | |
68 | #else | |
69 | typedef SizeClassAllocator64<kHeapMemBeg, kHeapMemEnd - kHeapMemBeg, 0, | |
1a4d82fc | 70 | DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator; |
92a42be0 | 71 | #endif |
1a4d82fc JJ |
72 | typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; |
73 | typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator; | |
74 | typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, | |
75 | SecondaryAllocator> Allocator; | |
76 | Allocator *allocator(); | |
77 | #endif | |
78 | ||
79 | void TsanCheckFailed(const char *file, int line, const char *cond, | |
80 | u64 v1, u64 v2); | |
81 | ||
82 | const u64 kShadowRodata = (u64)-1; // .rodata shadow marker | |
83 | ||
84 | // FastState (from most significant bit): | |
85 | // ignore : 1 | |
86 | // tid : kTidBits | |
1a4d82fc JJ |
87 | // unused : - |
88 | // history_size : 3 | |
92a42be0 | 89 | // epoch : kClkBits |
1a4d82fc JJ |
90 | class FastState { |
91 | public: | |
92 | FastState(u64 tid, u64 epoch) { | |
93 | x_ = tid << kTidShift; | |
92a42be0 | 94 | x_ |= epoch; |
1a4d82fc JJ |
95 | DCHECK_EQ(tid, this->tid()); |
96 | DCHECK_EQ(epoch, this->epoch()); | |
97 | DCHECK_EQ(GetIgnoreBit(), false); | |
98 | } | |
99 | ||
100 | explicit FastState(u64 x) | |
101 | : x_(x) { | |
102 | } | |
103 | ||
104 | u64 raw() const { | |
105 | return x_; | |
106 | } | |
107 | ||
108 | u64 tid() const { | |
109 | u64 res = (x_ & ~kIgnoreBit) >> kTidShift; | |
110 | return res; | |
111 | } | |
112 | ||
113 | u64 TidWithIgnore() const { | |
114 | u64 res = x_ >> kTidShift; | |
115 | return res; | |
116 | } | |
117 | ||
118 | u64 epoch() const { | |
92a42be0 | 119 | u64 res = x_ & ((1ull << kClkBits) - 1); |
1a4d82fc JJ |
120 | return res; |
121 | } | |
122 | ||
123 | void IncrementEpoch() { | |
124 | u64 old_epoch = epoch(); | |
92a42be0 | 125 | x_ += 1; |
1a4d82fc JJ |
126 | DCHECK_EQ(old_epoch + 1, epoch()); |
127 | (void)old_epoch; | |
128 | } | |
129 | ||
130 | void SetIgnoreBit() { x_ |= kIgnoreBit; } | |
131 | void ClearIgnoreBit() { x_ &= ~kIgnoreBit; } | |
132 | bool GetIgnoreBit() const { return (s64)x_ < 0; } | |
133 | ||
134 | void SetHistorySize(int hs) { | |
135 | CHECK_GE(hs, 0); | |
136 | CHECK_LE(hs, 7); | |
92a42be0 | 137 | x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift); |
1a4d82fc JJ |
138 | } |
139 | ||
92a42be0 | 140 | ALWAYS_INLINE |
1a4d82fc | 141 | int GetHistorySize() const { |
92a42be0 | 142 | return (int)((x_ >> kHistoryShift) & kHistoryMask); |
1a4d82fc JJ |
143 | } |
144 | ||
145 | void ClearHistorySize() { | |
92a42be0 | 146 | SetHistorySize(0); |
1a4d82fc JJ |
147 | } |
148 | ||
92a42be0 | 149 | ALWAYS_INLINE |
1a4d82fc JJ |
150 | u64 GetTracePos() const { |
151 | const int hs = GetHistorySize(); | |
152 | // When hs == 0, the trace consists of 2 parts. | |
153 | const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1; | |
154 | return epoch() & mask; | |
155 | } | |
156 | ||
157 | private: | |
158 | friend class Shadow; | |
159 | static const int kTidShift = 64 - kTidBits - 1; | |
1a4d82fc JJ |
160 | static const u64 kIgnoreBit = 1ull << 63; |
161 | static const u64 kFreedBit = 1ull << 63; | |
92a42be0 SL |
162 | static const u64 kHistoryShift = kClkBits; |
163 | static const u64 kHistoryMask = 7; | |
1a4d82fc JJ |
164 | u64 x_; |
165 | }; | |
166 | ||
167 | // Shadow (from most significant bit): | |
168 | // freed : 1 | |
169 | // tid : kTidBits | |
1a4d82fc JJ |
170 | // is_atomic : 1 |
171 | // is_read : 1 | |
172 | // size_log : 2 | |
173 | // addr0 : 3 | |
92a42be0 | 174 | // epoch : kClkBits |
1a4d82fc JJ |
175 | class Shadow : public FastState { |
176 | public: | |
177 | explicit Shadow(u64 x) | |
178 | : FastState(x) { | |
179 | } | |
180 | ||
181 | explicit Shadow(const FastState &s) | |
182 | : FastState(s.x_) { | |
183 | ClearHistorySize(); | |
184 | } | |
185 | ||
186 | void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) { | |
92a42be0 | 187 | DCHECK_EQ((x_ >> kClkBits) & 31, 0); |
1a4d82fc JJ |
188 | DCHECK_LE(addr0, 7); |
189 | DCHECK_LE(kAccessSizeLog, 3); | |
92a42be0 | 190 | x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits; |
1a4d82fc JJ |
191 | DCHECK_EQ(kAccessSizeLog, size_log()); |
192 | DCHECK_EQ(addr0, this->addr0()); | |
193 | } | |
194 | ||
195 | void SetWrite(unsigned kAccessIsWrite) { | |
196 | DCHECK_EQ(x_ & kReadBit, 0); | |
197 | if (!kAccessIsWrite) | |
198 | x_ |= kReadBit; | |
199 | DCHECK_EQ(kAccessIsWrite, IsWrite()); | |
200 | } | |
201 | ||
202 | void SetAtomic(bool kIsAtomic) { | |
203 | DCHECK(!IsAtomic()); | |
204 | if (kIsAtomic) | |
205 | x_ |= kAtomicBit; | |
206 | DCHECK_EQ(IsAtomic(), kIsAtomic); | |
207 | } | |
208 | ||
209 | bool IsAtomic() const { | |
210 | return x_ & kAtomicBit; | |
211 | } | |
212 | ||
213 | bool IsZero() const { | |
214 | return x_ == 0; | |
215 | } | |
216 | ||
217 | static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) { | |
218 | u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift; | |
219 | DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore()); | |
220 | return shifted_xor == 0; | |
221 | } | |
222 | ||
92a42be0 SL |
223 | static ALWAYS_INLINE |
224 | bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) { | |
225 | u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31; | |
1a4d82fc JJ |
226 | return masked_xor == 0; |
227 | } | |
228 | ||
92a42be0 | 229 | static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2, |
1a4d82fc JJ |
230 | unsigned kS2AccessSize) { |
231 | bool res = false; | |
232 | u64 diff = s1.addr0() - s2.addr0(); | |
233 | if ((s64)diff < 0) { // s1.addr0 < s2.addr0 // NOLINT | |
234 | // if (s1.addr0() + size1) > s2.addr0()) return true; | |
92a42be0 SL |
235 | if (s1.size() > -diff) |
236 | res = true; | |
1a4d82fc JJ |
237 | } else { |
238 | // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true; | |
92a42be0 SL |
239 | if (kS2AccessSize > diff) |
240 | res = true; | |
1a4d82fc | 241 | } |
92a42be0 SL |
242 | DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2)); |
243 | DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1)); | |
1a4d82fc JJ |
244 | return res; |
245 | } | |
246 | ||
92a42be0 SL |
247 | u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; } |
248 | u64 ALWAYS_INLINE size() const { return 1ull << size_log(); } | |
249 | bool ALWAYS_INLINE IsWrite() const { return !IsRead(); } | |
250 | bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; } | |
1a4d82fc JJ |
251 | |
252 | // The idea behind the freed bit is as follows. | |
253 | // When the memory is freed (or otherwise unaccessible) we write to the shadow | |
254 | // values with tid/epoch related to the free and the freed bit set. | |
255 | // During memory accesses processing the freed bit is considered | |
256 | // as msb of tid. So any access races with shadow with freed bit set | |
257 | // (it is as if write from a thread with which we never synchronized before). | |
258 | // This allows us to detect accesses to freed memory w/o additional | |
259 | // overheads in memory access processing and at the same time restore | |
260 | // tid/epoch of free. | |
261 | void MarkAsFreed() { | |
262 | x_ |= kFreedBit; | |
263 | } | |
264 | ||
265 | bool IsFreed() const { | |
266 | return x_ & kFreedBit; | |
267 | } | |
268 | ||
269 | bool GetFreedAndReset() { | |
270 | bool res = x_ & kFreedBit; | |
271 | x_ &= ~kFreedBit; | |
272 | return res; | |
273 | } | |
274 | ||
92a42be0 SL |
275 | bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const { |
276 | bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift) | |
277 | | (u64(kIsAtomic) << kAtomicShift)); | |
1a4d82fc JJ |
278 | DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic)); |
279 | return v; | |
280 | } | |
281 | ||
92a42be0 | 282 | bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const { |
1a4d82fc JJ |
283 | bool v = ((x_ >> kReadShift) & 3) |
284 | <= u64((kIsWrite ^ 1) | (kIsAtomic << 1)); | |
285 | DCHECK_EQ(v, (IsAtomic() < kIsAtomic) || | |
286 | (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite)); | |
287 | return v; | |
288 | } | |
289 | ||
92a42be0 | 290 | bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const { |
1a4d82fc JJ |
291 | bool v = ((x_ >> kReadShift) & 3) |
292 | >= u64((kIsWrite ^ 1) | (kIsAtomic << 1)); | |
293 | DCHECK_EQ(v, (IsAtomic() > kIsAtomic) || | |
294 | (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite)); | |
295 | return v; | |
296 | } | |
297 | ||
298 | private: | |
92a42be0 | 299 | static const u64 kReadShift = 5 + kClkBits; |
1a4d82fc | 300 | static const u64 kReadBit = 1ull << kReadShift; |
92a42be0 | 301 | static const u64 kAtomicShift = 6 + kClkBits; |
1a4d82fc JJ |
302 | static const u64 kAtomicBit = 1ull << kAtomicShift; |
303 | ||
92a42be0 | 304 | u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; } |
1a4d82fc | 305 | |
92a42be0 | 306 | static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) { |
1a4d82fc JJ |
307 | if (s1.addr0() == s2.addr0()) return true; |
308 | if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0()) | |
309 | return true; | |
310 | if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0()) | |
311 | return true; | |
312 | return false; | |
313 | } | |
314 | }; | |
315 | ||
92a42be0 | 316 | struct ThreadSignalContext; |
1a4d82fc JJ |
317 | |
318 | struct JmpBuf { | |
319 | uptr sp; | |
320 | uptr mangled_sp; | |
92a42be0 SL |
321 | int int_signal_send; |
322 | bool in_blocking_func; | |
323 | uptr in_signal_handler; | |
1a4d82fc JJ |
324 | uptr *shadow_stack_pos; |
325 | }; | |
326 | ||
327 | // This struct is stored in TLS. | |
328 | struct ThreadState { | |
329 | FastState fast_state; | |
330 | // Synch epoch represents the threads's epoch before the last synchronization | |
331 | // action. It allows to reduce number of shadow state updates. | |
332 | // For example, fast_synch_epoch=100, last write to addr X was at epoch=150, | |
333 | // if we are processing write to X from the same thread at epoch=200, | |
334 | // we do nothing, because both writes happen in the same 'synch epoch'. | |
335 | // That is, if another memory access does not race with the former write, | |
336 | // it does not race with the latter as well. | |
337 | // QUESTION: can we can squeeze this into ThreadState::Fast? | |
338 | // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are | |
339 | // taken by epoch between synchs. | |
340 | // This way we can save one load from tls. | |
341 | u64 fast_synch_epoch; | |
342 | // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read. | |
343 | // We do not distinguish beteween ignoring reads and writes | |
344 | // for better performance. | |
345 | int ignore_reads_and_writes; | |
346 | int ignore_sync; | |
347 | // Go does not support ignores. | |
92a42be0 | 348 | #ifndef SANITIZER_GO |
1a4d82fc JJ |
349 | IgnoreSet mop_ignore_set; |
350 | IgnoreSet sync_ignore_set; | |
351 | #endif | |
352 | // C/C++ uses fixed size shadow stack embed into Trace. | |
353 | // Go uses malloc-allocated shadow stack with dynamic size. | |
354 | uptr *shadow_stack; | |
355 | uptr *shadow_stack_end; | |
356 | uptr *shadow_stack_pos; | |
357 | u64 *racy_shadow_addr; | |
358 | u64 racy_state[2]; | |
359 | MutexSet mset; | |
360 | ThreadClock clock; | |
92a42be0 | 361 | #ifndef SANITIZER_GO |
1a4d82fc JJ |
362 | AllocatorCache alloc_cache; |
363 | InternalAllocatorCache internal_alloc_cache; | |
364 | Vector<JmpBuf> jmp_bufs; | |
365 | int ignore_interceptors; | |
366 | #endif | |
92a42be0 | 367 | #if TSAN_COLLECT_STATS |
1a4d82fc | 368 | u64 stat[StatCnt]; |
92a42be0 | 369 | #endif |
1a4d82fc JJ |
370 | const int tid; |
371 | const int unique_id; | |
372 | bool in_symbolizer; | |
373 | bool in_ignored_lib; | |
92a42be0 SL |
374 | bool is_inited; |
375 | bool is_dead; | |
1a4d82fc JJ |
376 | bool is_freeing; |
377 | bool is_vptr_access; | |
378 | const uptr stk_addr; | |
379 | const uptr stk_size; | |
380 | const uptr tls_addr; | |
381 | const uptr tls_size; | |
382 | ThreadContext *tctx; | |
383 | ||
92a42be0 | 384 | #if SANITIZER_DEBUG && !SANITIZER_GO |
1a4d82fc | 385 | InternalDeadlockDetector internal_deadlock_detector; |
92a42be0 | 386 | #endif |
1a4d82fc JJ |
387 | DDPhysicalThread *dd_pt; |
388 | DDLogicalThread *dd_lt; | |
389 | ||
92a42be0 SL |
390 | atomic_uintptr_t in_signal_handler; |
391 | ThreadSignalContext *signal_ctx; | |
392 | ||
393 | DenseSlabAllocCache block_cache; | |
394 | DenseSlabAllocCache sync_cache; | |
395 | DenseSlabAllocCache clock_cache; | |
1a4d82fc | 396 | |
92a42be0 | 397 | #ifndef SANITIZER_GO |
1a4d82fc JJ |
398 | u32 last_sleep_stack_id; |
399 | ThreadClock last_sleep_clock; | |
400 | #endif | |
401 | ||
402 | // Set in regions of runtime that must be signal-safe and fork-safe. | |
403 | // If set, malloc must not be called. | |
404 | int nomalloc; | |
405 | ||
406 | explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, | |
407 | unsigned reuse_count, | |
408 | uptr stk_addr, uptr stk_size, | |
409 | uptr tls_addr, uptr tls_size); | |
410 | }; | |
411 | ||
92a42be0 SL |
412 | #ifndef SANITIZER_GO |
413 | #if SANITIZER_MAC | |
414 | ThreadState *cur_thread(); | |
415 | void cur_thread_finalize(); | |
416 | #else | |
417 | __attribute__((tls_model("initial-exec"))) | |
1a4d82fc JJ |
418 | extern THREADLOCAL char cur_thread_placeholder[]; |
419 | INLINE ThreadState *cur_thread() { | |
420 | return reinterpret_cast<ThreadState *>(&cur_thread_placeholder); | |
421 | } | |
92a42be0 SL |
422 | INLINE void cur_thread_finalize() { } |
423 | #endif // SANITIZER_MAC | |
424 | #endif // SANITIZER_GO | |
1a4d82fc JJ |
425 | |
426 | class ThreadContext : public ThreadContextBase { | |
427 | public: | |
428 | explicit ThreadContext(int tid); | |
429 | ~ThreadContext(); | |
430 | ThreadState *thr; | |
431 | u32 creation_stack_id; | |
432 | SyncClock sync; | |
433 | // Epoch at which the thread had started. | |
434 | // If we see an event from the thread stamped by an older epoch, | |
435 | // the event is from a dead thread that shared tid with this thread. | |
436 | u64 epoch0; | |
437 | u64 epoch1; | |
438 | ||
439 | // Override superclass callbacks. | |
92a42be0 SL |
440 | void OnDead() override; |
441 | void OnJoined(void *arg) override; | |
442 | void OnFinished() override; | |
443 | void OnStarted(void *arg) override; | |
444 | void OnCreated(void *arg) override; | |
445 | void OnReset() override; | |
446 | void OnDetached(void *arg) override; | |
1a4d82fc JJ |
447 | }; |
448 | ||
449 | struct RacyStacks { | |
450 | MD5Hash hash[2]; | |
451 | bool operator==(const RacyStacks &other) const { | |
452 | if (hash[0] == other.hash[0] && hash[1] == other.hash[1]) | |
453 | return true; | |
454 | if (hash[0] == other.hash[1] && hash[1] == other.hash[0]) | |
455 | return true; | |
456 | return false; | |
457 | } | |
458 | }; | |
459 | ||
460 | struct RacyAddress { | |
461 | uptr addr_min; | |
462 | uptr addr_max; | |
463 | }; | |
464 | ||
465 | struct FiredSuppression { | |
466 | ReportType type; | |
92a42be0 | 467 | uptr pc_or_addr; |
1a4d82fc JJ |
468 | Suppression *supp; |
469 | }; | |
470 | ||
471 | struct Context { | |
472 | Context(); | |
473 | ||
474 | bool initialized; | |
475 | bool after_multithreaded_fork; | |
476 | ||
92a42be0 | 477 | MetaMap metamap; |
1a4d82fc JJ |
478 | |
479 | Mutex report_mtx; | |
480 | int nreported; | |
481 | int nmissed_expected; | |
482 | atomic_uint64_t last_symbolize_time_ns; | |
483 | ||
484 | void *background_thread; | |
485 | atomic_uint32_t stop_background_thread; | |
486 | ||
487 | ThreadRegistry *thread_registry; | |
488 | ||
92a42be0 | 489 | Mutex racy_mtx; |
1a4d82fc JJ |
490 | Vector<RacyStacks> racy_stacks; |
491 | Vector<RacyAddress> racy_addresses; | |
492 | // Number of fired suppressions may be large enough. | |
92a42be0 | 493 | Mutex fired_suppressions_mtx; |
1a4d82fc JJ |
494 | InternalMmapVector<FiredSuppression> fired_suppressions; |
495 | DDetector *dd; | |
496 | ||
92a42be0 SL |
497 | ClockAlloc clock_alloc; |
498 | ||
1a4d82fc JJ |
499 | Flags flags; |
500 | ||
501 | u64 stat[StatCnt]; | |
502 | u64 int_alloc_cnt[MBlockTypeCount]; | |
503 | u64 int_alloc_siz[MBlockTypeCount]; | |
504 | }; | |
505 | ||
506 | extern Context *ctx; // The one and the only global runtime context. | |
507 | ||
508 | struct ScopedIgnoreInterceptors { | |
509 | ScopedIgnoreInterceptors() { | |
92a42be0 | 510 | #ifndef SANITIZER_GO |
1a4d82fc JJ |
511 | cur_thread()->ignore_interceptors++; |
512 | #endif | |
513 | } | |
514 | ||
515 | ~ScopedIgnoreInterceptors() { | |
92a42be0 | 516 | #ifndef SANITIZER_GO |
1a4d82fc JJ |
517 | cur_thread()->ignore_interceptors--; |
518 | #endif | |
519 | } | |
520 | }; | |
521 | ||
522 | class ScopedReport { | |
523 | public: | |
524 | explicit ScopedReport(ReportType typ); | |
525 | ~ScopedReport(); | |
526 | ||
92a42be0 | 527 | void AddMemoryAccess(uptr addr, Shadow s, StackTrace stack, |
1a4d82fc | 528 | const MutexSet *mset); |
92a42be0 SL |
529 | void AddStack(StackTrace stack, bool suppressable = false); |
530 | void AddThread(const ThreadContext *tctx, bool suppressable = false); | |
531 | void AddThread(int unique_tid, bool suppressable = false); | |
1a4d82fc JJ |
532 | void AddUniqueTid(int unique_tid); |
533 | void AddMutex(const SyncVar *s); | |
534 | u64 AddMutex(u64 id); | |
535 | void AddLocation(uptr addr, uptr size); | |
536 | void AddSleep(u32 stack_id); | |
537 | void SetCount(int count); | |
538 | ||
539 | const ReportDesc *GetReport() const; | |
540 | ||
541 | private: | |
542 | ReportDesc *rep_; | |
543 | // Symbolizer makes lots of intercepted calls. If we try to process them, | |
544 | // at best it will cause deadlocks on internal mutexes. | |
545 | ScopedIgnoreInterceptors ignore_interceptors_; | |
546 | ||
547 | void AddDeadMutex(u64 id); | |
548 | ||
549 | ScopedReport(const ScopedReport&); | |
550 | void operator = (const ScopedReport&); | |
551 | }; | |
552 | ||
92a42be0 SL |
553 | void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk, |
554 | MutexSet *mset); | |
1a4d82fc | 555 | |
92a42be0 SL |
556 | template<typename StackTraceTy> |
557 | void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack) { | |
558 | uptr size = thr->shadow_stack_pos - thr->shadow_stack; | |
559 | uptr start = 0; | |
560 | if (size + !!toppc > kStackTraceMax) { | |
561 | start = size + !!toppc - kStackTraceMax; | |
562 | size = kStackTraceMax - !!toppc; | |
563 | } | |
564 | stack->Init(&thr->shadow_stack[start], size, toppc); | |
565 | } | |
566 | ||
567 | ||
568 | #if TSAN_COLLECT_STATS | |
1a4d82fc JJ |
569 | void StatAggregate(u64 *dst, u64 *src); |
570 | void StatOutput(u64 *stat); | |
92a42be0 SL |
571 | #endif |
572 | ||
1a4d82fc | 573 | void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) { |
92a42be0 SL |
574 | #if TSAN_COLLECT_STATS |
575 | thr->stat[typ] += n; | |
576 | #endif | |
1a4d82fc JJ |
577 | } |
578 | void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) { | |
92a42be0 SL |
579 | #if TSAN_COLLECT_STATS |
580 | thr->stat[typ] = n; | |
581 | #endif | |
1a4d82fc JJ |
582 | } |
583 | ||
584 | void MapShadow(uptr addr, uptr size); | |
92a42be0 | 585 | void MapThreadTrace(uptr addr, uptr size, const char *name); |
1a4d82fc JJ |
586 | void DontNeedShadowFor(uptr addr, uptr size); |
587 | void InitializeShadowMemory(); | |
588 | void InitializeInterceptors(); | |
589 | void InitializeLibIgnore(); | |
590 | void InitializeDynamicAnnotations(); | |
591 | ||
592 | void ForkBefore(ThreadState *thr, uptr pc); | |
593 | void ForkParentAfter(ThreadState *thr, uptr pc); | |
594 | void ForkChildAfter(ThreadState *thr, uptr pc); | |
595 | ||
596 | void ReportRace(ThreadState *thr); | |
92a42be0 SL |
597 | bool OutputReport(ThreadState *thr, const ScopedReport &srep); |
598 | bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace); | |
1a4d82fc JJ |
599 | bool IsExpectedReport(uptr addr, uptr size); |
600 | void PrintMatchedBenignRaces(); | |
1a4d82fc JJ |
601 | |
602 | #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1 | |
603 | # define DPrintf Printf | |
604 | #else | |
605 | # define DPrintf(...) | |
606 | #endif | |
607 | ||
608 | #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2 | |
609 | # define DPrintf2 Printf | |
610 | #else | |
611 | # define DPrintf2(...) | |
612 | #endif | |
613 | ||
614 | u32 CurrentStackId(ThreadState *thr, uptr pc); | |
615 | ReportStack *SymbolizeStackId(u32 stack_id); | |
616 | void PrintCurrentStack(ThreadState *thr, uptr pc); | |
92a42be0 | 617 | void PrintCurrentStackSlow(uptr pc); // uses libunwind |
1a4d82fc JJ |
618 | |
619 | void Initialize(ThreadState *thr); | |
620 | int Finalize(ThreadState *thr); | |
621 | ||
92a42be0 SL |
622 | void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write); |
623 | void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write); | |
1a4d82fc JJ |
624 | |
625 | void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, | |
626 | int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic); | |
627 | void MemoryAccessImpl(ThreadState *thr, uptr addr, | |
628 | int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, | |
629 | u64 *shadow_mem, Shadow cur); | |
630 | void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, | |
631 | uptr size, bool is_write); | |
632 | void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr, | |
633 | uptr size, uptr step, bool is_write); | |
634 | void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, | |
635 | int size, bool kAccessIsWrite, bool kIsAtomic); | |
636 | ||
637 | const int kSizeLog1 = 0; | |
638 | const int kSizeLog2 = 1; | |
639 | const int kSizeLog4 = 2; | |
640 | const int kSizeLog8 = 3; | |
641 | ||
642 | void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc, | |
643 | uptr addr, int kAccessSizeLog) { | |
644 | MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false); | |
645 | } | |
646 | ||
647 | void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc, | |
648 | uptr addr, int kAccessSizeLog) { | |
649 | MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false); | |
650 | } | |
651 | ||
652 | void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc, | |
653 | uptr addr, int kAccessSizeLog) { | |
654 | MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true); | |
655 | } | |
656 | ||
657 | void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc, | |
658 | uptr addr, int kAccessSizeLog) { | |
659 | MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true); | |
660 | } | |
661 | ||
662 | void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size); | |
663 | void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size); | |
664 | void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size); | |
665 | ||
666 | void ThreadIgnoreBegin(ThreadState *thr, uptr pc); | |
667 | void ThreadIgnoreEnd(ThreadState *thr, uptr pc); | |
668 | void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc); | |
669 | void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc); | |
670 | ||
671 | void FuncEntry(ThreadState *thr, uptr pc); | |
672 | void FuncExit(ThreadState *thr); | |
673 | ||
674 | int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached); | |
675 | void ThreadStart(ThreadState *thr, int tid, uptr os_id); | |
676 | void ThreadFinish(ThreadState *thr); | |
677 | int ThreadTid(ThreadState *thr, uptr pc, uptr uid); | |
678 | void ThreadJoin(ThreadState *thr, uptr pc, int tid); | |
679 | void ThreadDetach(ThreadState *thr, uptr pc, int tid); | |
680 | void ThreadFinalize(ThreadState *thr); | |
681 | void ThreadSetName(ThreadState *thr, const char *name); | |
682 | int ThreadCount(ThreadState *thr); | |
683 | void ProcessPendingSignals(ThreadState *thr); | |
684 | ||
685 | void MutexCreate(ThreadState *thr, uptr pc, uptr addr, | |
686 | bool rw, bool recursive, bool linker_init); | |
687 | void MutexDestroy(ThreadState *thr, uptr pc, uptr addr); | |
688 | void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1, | |
689 | bool try_lock = false); | |
690 | int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false); | |
691 | void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool try_lock = false); | |
692 | void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr); | |
693 | void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr); | |
694 | void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD | |
695 | ||
696 | void Acquire(ThreadState *thr, uptr pc, uptr addr); | |
92a42be0 SL |
697 | // AcquireGlobal synchronizes the current thread with all other threads. |
698 | // In terms of happens-before relation, it draws a HB edge from all threads | |
699 | // (where they happen to execute right now) to the current thread. We use it to | |
700 | // handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal | |
701 | // right before executing finalizers. This provides a coarse, but simple | |
702 | // approximation of the actual required synchronization. | |
1a4d82fc JJ |
703 | void AcquireGlobal(ThreadState *thr, uptr pc); |
704 | void Release(ThreadState *thr, uptr pc, uptr addr); | |
705 | void ReleaseStore(ThreadState *thr, uptr pc, uptr addr); | |
706 | void AfterSleep(ThreadState *thr, uptr pc); | |
707 | void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c); | |
708 | void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c); | |
709 | void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c); | |
710 | void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c); | |
711 | ||
712 | // The hacky call uses custom calling convention and an assembly thunk. | |
713 | // It is considerably faster that a normal call for the caller | |
714 | // if it is not executed (it is intended for slow paths from hot functions). | |
715 | // The trick is that the call preserves all registers and the compiler | |
716 | // does not treat it as a call. | |
717 | // If it does not work for you, use normal call. | |
92a42be0 | 718 | #if !SANITIZER_DEBUG && defined(__x86_64__) && !SANITIZER_MAC |
1a4d82fc JJ |
719 | // The caller may not create the stack frame for itself at all, |
720 | // so we create a reserve stack frame for it (1024b must be enough). | |
721 | #define HACKY_CALL(f) \ | |
722 | __asm__ __volatile__("sub $1024, %%rsp;" \ | |
723 | CFI_INL_ADJUST_CFA_OFFSET(1024) \ | |
724 | ".hidden " #f "_thunk;" \ | |
725 | "call " #f "_thunk;" \ | |
726 | "add $1024, %%rsp;" \ | |
727 | CFI_INL_ADJUST_CFA_OFFSET(-1024) \ | |
728 | ::: "memory", "cc"); | |
729 | #else | |
730 | #define HACKY_CALL(f) f() | |
731 | #endif | |
732 | ||
733 | void TraceSwitch(ThreadState *thr); | |
734 | uptr TraceTopPC(ThreadState *thr); | |
735 | uptr TraceSize(); | |
736 | uptr TraceParts(); | |
737 | Trace *ThreadTrace(int tid); | |
738 | ||
739 | extern "C" void __tsan_trace_switch(); | |
740 | void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs, | |
741 | EventType typ, u64 addr) { | |
92a42be0 SL |
742 | if (!kCollectHistory) |
743 | return; | |
1a4d82fc JJ |
744 | DCHECK_GE((int)typ, 0); |
745 | DCHECK_LE((int)typ, 7); | |
746 | DCHECK_EQ(GetLsb(addr, 61), addr); | |
747 | StatInc(thr, StatEvents); | |
748 | u64 pos = fs.GetTracePos(); | |
749 | if (UNLIKELY((pos % kTracePartSize) == 0)) { | |
92a42be0 | 750 | #ifndef SANITIZER_GO |
1a4d82fc JJ |
751 | HACKY_CALL(__tsan_trace_switch); |
752 | #else | |
753 | TraceSwitch(thr); | |
754 | #endif | |
755 | } | |
756 | Event *trace = (Event*)GetThreadTrace(fs.tid()); | |
757 | Event *evp = &trace[pos]; | |
758 | Event ev = (u64)addr | ((u64)typ << 61); | |
759 | *evp = ev; | |
760 | } | |
761 | ||
92a42be0 SL |
762 | #ifndef SANITIZER_GO |
763 | uptr ALWAYS_INLINE HeapEnd() { | |
764 | return kHeapMemEnd + PrimaryAllocator::AdditionalSize(); | |
765 | } | |
766 | #endif | |
767 | ||
1a4d82fc JJ |
768 | } // namespace __tsan |
769 | ||
770 | #endif // TSAN_RTL_H |